static bool prefer_and_bit_test (machine_mode mode, int bitnum) { bool speed_p; wide_int mask = wi::set_bit_in_zero (bitnum, GET_MODE_PRECISION (mode)); if (and_test == 0) { /* Set up rtxes for the two variations. Use NULL as a placeholder for the BITNUM-based constants. */ and_reg = gen_rtx_REG (mode, LAST_VIRTUAL_REGISTER + 1); and_test = gen_rtx_AND (mode, and_reg, NULL); shift_test = gen_rtx_AND (mode, gen_rtx_ASHIFTRT (mode, and_reg, NULL), const1_rtx); } else { /* Change the mode of the previously-created rtxes. */ PUT_MODE (and_reg, mode); PUT_MODE (and_test, mode); PUT_MODE (shift_test, mode); PUT_MODE (XEXP (shift_test, 0), mode); } /* Fill in the integers. */ XEXP (and_test, 1) = immed_wide_int_const (mask, mode); XEXP (XEXP (shift_test, 0), 1) = GEN_INT (bitnum); speed_p = optimize_insn_for_speed_p (); return (rtx_cost (and_test, mode, IF_THEN_ELSE, 0, speed_p) <= rtx_cost (shift_test, mode, IF_THEN_ELSE, 0, speed_p)); }
static bool prefer_and_bit_test (enum machine_mode mode, int bitnum) { if (and_test == 0) { /* Set up rtxes for the two variations. Use NULL as a placeholder for the BITNUM-based constants. */ and_reg = gen_rtx_REG (mode, FIRST_PSEUDO_REGISTER); and_test = gen_rtx_AND (mode, and_reg, NULL); shift_test = gen_rtx_AND (mode, gen_rtx_ASHIFTRT (mode, and_reg, NULL), const1_rtx); } else { /* Change the mode of the previously-created rtxes. */ PUT_MODE (and_reg, mode); PUT_MODE (and_test, mode); PUT_MODE (shift_test, mode); PUT_MODE (XEXP (shift_test, 0), mode); } /* Fill in the integers. */ XEXP (and_test, 1) = immed_double_int_const (double_int_setbit (double_int_zero, bitnum), mode); XEXP (XEXP (shift_test, 0), 1) = GEN_INT (bitnum); return (rtx_cost (and_test, IF_THEN_ELSE, optimize_insn_for_speed_p ()) <= rtx_cost (shift_test, IF_THEN_ELSE, optimize_insn_for_speed_p ())); }
static unsigned int arm_pertask_ssp_rtl_execute(void) { rtx_insn *insn; for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) { const char *sym; rtx body; rtx mask, masked_sp; /* * Find a SET insn involving a SYMBOL_REF to __stack_chk_guard */ if (!INSN_P(insn)) continue; body = PATTERN(insn); if (GET_CODE(body) != SET || GET_CODE(SET_SRC(body)) != SYMBOL_REF) continue; sym = XSTR(SET_SRC(body), 0); if (strcmp(sym, "__stack_chk_guard")) continue; /* * Replace the source of the SET insn with an expression that * produces the address of the copy of the stack canary value * stored in struct thread_info */ mask = GEN_INT(sext_hwi(sp_mask, GET_MODE_PRECISION(Pmode))); masked_sp = gen_reg_rtx(Pmode); emit_insn_before(gen_rtx_set(masked_sp, gen_rtx_AND(Pmode, stack_pointer_rtx, mask)), insn); SET_SRC(body) = gen_rtx_PLUS(Pmode, masked_sp, GEN_INT(canary_offset)); } return 0; }
static void emit_case_bit_tests (gswitch *swtch, tree index_expr, tree minval, tree range, tree maxval) { struct case_bit_test test[MAX_CASE_BIT_TESTS]; unsigned int i, j, k; unsigned int count; basic_block switch_bb = gimple_bb (swtch); basic_block default_bb, new_default_bb, new_bb; edge default_edge; bool update_dom = dom_info_available_p (CDI_DOMINATORS); vec<basic_block> bbs_to_fix_dom = vNULL; tree index_type = TREE_TYPE (index_expr); tree unsigned_index_type = unsigned_type_for (index_type); unsigned int branch_num = gimple_switch_num_labels (swtch); gimple_stmt_iterator gsi; gassign *shift_stmt; tree idx, tmp, csui; tree word_type_node = lang_hooks.types.type_for_mode (word_mode, 1); tree word_mode_zero = fold_convert (word_type_node, integer_zero_node); tree word_mode_one = fold_convert (word_type_node, integer_one_node); int prec = TYPE_PRECISION (word_type_node); wide_int wone = wi::one (prec); memset (&test, 0, sizeof (test)); /* Get the edge for the default case. */ tmp = gimple_switch_default_label (swtch); default_bb = label_to_block (CASE_LABEL (tmp)); default_edge = find_edge (switch_bb, default_bb); /* Go through all case labels, and collect the case labels, profile counts, and other information we need to build the branch tests. */ count = 0; for (i = 1; i < branch_num; i++) { unsigned int lo, hi; tree cs = gimple_switch_label (swtch, i); tree label = CASE_LABEL (cs); edge e = find_edge (switch_bb, label_to_block (label)); for (k = 0; k < count; k++) if (e == test[k].target_edge) break; if (k == count) { gcc_checking_assert (count < MAX_CASE_BIT_TESTS); test[k].mask = wi::zero (prec); test[k].target_edge = e; test[k].label = label; test[k].bits = 1; count++; } else test[k].bits++; lo = tree_to_uhwi (int_const_binop (MINUS_EXPR, CASE_LOW (cs), minval)); if (CASE_HIGH (cs) == NULL_TREE) hi = lo; else hi = tree_to_uhwi (int_const_binop (MINUS_EXPR, CASE_HIGH (cs), minval)); for (j = lo; j <= hi; j++) test[k].mask |= wi::lshift (wone, j); } qsort (test, count, sizeof (*test), case_bit_test_cmp); /* If all values are in the 0 .. BITS_PER_WORD-1 range, we can get rid of the minval subtractions, but it might make the mask constants more expensive. So, compare the costs. */ if (compare_tree_int (minval, 0) > 0 && compare_tree_int (maxval, GET_MODE_BITSIZE (word_mode)) < 0) { int cost_diff; HOST_WIDE_INT m = tree_to_uhwi (minval); rtx reg = gen_raw_REG (word_mode, 10000); bool speed_p = optimize_bb_for_speed_p (gimple_bb (swtch)); cost_diff = set_rtx_cost (gen_rtx_PLUS (word_mode, reg, GEN_INT (-m)), speed_p); for (i = 0; i < count; i++) { rtx r = immed_wide_int_const (test[i].mask, word_mode); cost_diff += set_src_cost (gen_rtx_AND (word_mode, reg, r), word_mode, speed_p); r = immed_wide_int_const (wi::lshift (test[i].mask, m), word_mode); cost_diff -= set_src_cost (gen_rtx_AND (word_mode, reg, r), word_mode, speed_p); } if (cost_diff > 0) { for (i = 0; i < count; i++) test[i].mask = wi::lshift (test[i].mask, m); minval = build_zero_cst (TREE_TYPE (minval)); range = maxval; } } /* We generate two jumps to the default case label. Split the default edge, so that we don't have to do any PHI node updating. */ new_default_bb = split_edge (default_edge); if (update_dom) { bbs_to_fix_dom.create (10); bbs_to_fix_dom.quick_push (switch_bb); bbs_to_fix_dom.quick_push (default_bb); bbs_to_fix_dom.quick_push (new_default_bb); } /* Now build the test-and-branch code. */ gsi = gsi_last_bb (switch_bb); /* idx = (unsigned)x - minval. */ idx = fold_convert (unsigned_index_type, index_expr); idx = fold_build2 (MINUS_EXPR, unsigned_index_type, idx, fold_convert (unsigned_index_type, minval)); idx = force_gimple_operand_gsi (&gsi, idx, /*simple=*/true, NULL_TREE, /*before=*/true, GSI_SAME_STMT); /* if (idx > range) goto default */ range = force_gimple_operand_gsi (&gsi, fold_convert (unsigned_index_type, range), /*simple=*/true, NULL_TREE, /*before=*/true, GSI_SAME_STMT); tmp = fold_build2 (GT_EXPR, boolean_type_node, idx, range); new_bb = hoist_edge_and_branch_if_true (&gsi, tmp, default_edge, update_dom); if (update_dom) bbs_to_fix_dom.quick_push (new_bb); gcc_assert (gimple_bb (swtch) == new_bb); gsi = gsi_last_bb (new_bb); /* Any blocks dominated by the GIMPLE_SWITCH, but that are not successors of NEW_BB, are still immediately dominated by SWITCH_BB. Make it so. */ if (update_dom) { vec<basic_block> dom_bbs; basic_block dom_son; dom_bbs = get_dominated_by (CDI_DOMINATORS, new_bb); FOR_EACH_VEC_ELT (dom_bbs, i, dom_son) { edge e = find_edge (new_bb, dom_son); if (e && single_pred_p (e->dest)) continue; set_immediate_dominator (CDI_DOMINATORS, dom_son, switch_bb); bbs_to_fix_dom.safe_push (dom_son); } dom_bbs.release (); }