static tree add_elt_to_tree (tree expr, tree type, tree elt, double_int scale, aff_tree *comb) { enum tree_code code; tree type1 = type; if (POINTER_TYPE_P (type)) type1 = sizetype; scale = double_int_ext_for_comb (scale, comb); elt = fold_convert (type1, elt); if (scale.is_one ()) { if (!expr) return fold_convert (type, elt); if (POINTER_TYPE_P (type)) return fold_build_pointer_plus (expr, elt); return fold_build2 (PLUS_EXPR, type, expr, elt); } if (scale.is_minus_one ()) { if (!expr) return fold_convert (type, fold_build1 (NEGATE_EXPR, type1, elt)); if (POINTER_TYPE_P (type)) { elt = fold_build1 (NEGATE_EXPR, type1, elt); return fold_build_pointer_plus (expr, elt); } return fold_build2 (MINUS_EXPR, type, expr, elt); } if (!expr) return fold_convert (type, fold_build2 (MULT_EXPR, type1, elt, double_int_to_tree (type1, scale))); if (scale.is_negative ()) { code = MINUS_EXPR; scale = -scale; } else code = PLUS_EXPR; elt = fold_build2 (MULT_EXPR, type1, elt, double_int_to_tree (type1, scale)); if (POINTER_TYPE_P (type)) { if (code == MINUS_EXPR) elt = fold_build1 (NEGATE_EXPR, type1, elt); return fold_build_pointer_plus (expr, elt); } return fold_build2 (code, type, expr, elt); }
void aff_combination_scale (aff_tree *comb, double_int scale) { unsigned i, j; scale = double_int_ext_for_comb (scale, comb); if (scale.is_one ()) return; if (scale.is_zero ()) { aff_combination_zero (comb, comb->type); return; } comb->offset = double_int_ext_for_comb (scale * comb->offset, comb); for (i = 0, j = 0; i < comb->n; i++) { double_int new_coef; new_coef = double_int_ext_for_comb (scale * comb->elts[i].coef, comb); /* A coefficient may become zero due to overflow. Remove the zero elements. */ if (new_coef.is_zero ()) continue; comb->elts[j].coef = new_coef; comb->elts[j].val = comb->elts[i].val; j++; } comb->n = j; if (comb->rest) { tree type = comb->type; if (POINTER_TYPE_P (type)) type = sizetype; if (comb->n < MAX_AFF_ELTS) { comb->elts[comb->n].coef = scale; comb->elts[comb->n].val = comb->rest; comb->rest = NULL_TREE; comb->n++; } else comb->rest = fold_build2 (MULT_EXPR, type, comb->rest, double_int_to_tree (type, scale)); } }
static bool fixed_saturate1 (machine_mode mode, double_int a, double_int *f, bool sat_p) { bool overflow_p = false; bool unsigned_p = UNSIGNED_FIXED_POINT_MODE_P (mode); int i_f_bits = GET_MODE_IBIT (mode) + GET_MODE_FBIT (mode); if (unsigned_p) /* Unsigned type. */ { double_int max; max.low = -1; max.high = -1; max = max.zext (i_f_bits); if (a.ugt (max)) { if (sat_p) *f = max; else overflow_p = true; } } else /* Signed type. */ { double_int max, min; max.high = -1; max.low = -1; max = max.zext (i_f_bits); min.high = 0; min.low = 1; min = min.alshift (i_f_bits, HOST_BITS_PER_DOUBLE_INT); min = min.sext (1 + i_f_bits); if (a.sgt (max)) { if (sat_p) *f = max; else overflow_p = true; } else if (a.slt (min)) { if (sat_p) *f = min; else overflow_p = true; } } return overflow_p; }
FIXED_VALUE_TYPE fixed_from_double_int (double_int payload, machine_mode mode) { FIXED_VALUE_TYPE value; gcc_assert (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_DOUBLE_INT); if (SIGNED_SCALAR_FIXED_POINT_MODE_P (mode)) value.data = payload.sext (1 + GET_MODE_IBIT (mode) + GET_MODE_FBIT (mode)); else if (UNSIGNED_SCALAR_FIXED_POINT_MODE_P (mode)) value.data = payload.zext (GET_MODE_IBIT (mode) + GET_MODE_FBIT (mode)); else gcc_unreachable (); value.mode = mode; return value; }
void set_range_info (tree name, enum value_range_type range_type, double_int min, double_int max) { gcc_assert (!POINTER_TYPE_P (TREE_TYPE (name))); gcc_assert (range_type == VR_RANGE || range_type == VR_ANTI_RANGE); range_info_def *ri = SSA_NAME_RANGE_INFO (name); /* Allocate if not available. */ if (ri == NULL) { ri = ggc_alloc_cleared_range_info_def (); SSA_NAME_RANGE_INFO (name) = ri; ri->nonzero_bits = double_int::mask (TYPE_PRECISION (TREE_TYPE (name))); } /* Record the range type. */ if (SSA_NAME_RANGE_TYPE (name) != range_type) SSA_NAME_ANTI_RANGE_P (name) = (range_type == VR_ANTI_RANGE); /* Set the values. */ ri->min = min; ri->max = max; /* If it is a range, try to improve nonzero_bits from the min/max. */ if (range_type == VR_RANGE) { int prec = TYPE_PRECISION (TREE_TYPE (name)); double_int xorv; min = min.zext (prec); max = max.zext (prec); xorv = min ^ max; if (xorv.high) xorv = double_int::mask (2 * HOST_BITS_PER_WIDE_INT - clz_hwi (xorv.high)); else if (xorv.low) xorv = double_int::mask (HOST_BITS_PER_WIDE_INT - clz_hwi (xorv.low)); ri->nonzero_bits = ri->nonzero_bits & (min | xorv); } }
static bool fixed_saturate2 (machine_mode mode, double_int a_high, double_int a_low, double_int *f, bool sat_p) { bool overflow_p = false; bool unsigned_p = UNSIGNED_FIXED_POINT_MODE_P (mode); int i_f_bits = GET_MODE_IBIT (mode) + GET_MODE_FBIT (mode); if (unsigned_p) /* Unsigned type. */ { double_int max_r, max_s; max_r.high = 0; max_r.low = 0; max_s.high = -1; max_s.low = -1; max_s = max_s.zext (i_f_bits); if (a_high.ugt (max_r) || (a_high == max_r && a_low.ugt (max_s))) { if (sat_p) *f = max_s; else overflow_p = true; } } else /* Signed type. */ { double_int max_r, max_s, min_r, min_s; max_r.high = 0; max_r.low = 0; max_s.high = -1; max_s.low = -1; max_s = max_s.zext (i_f_bits); min_r.high = -1; min_r.low = -1; min_s.high = 0; min_s.low = 1; min_s = min_s.alshift (i_f_bits, HOST_BITS_PER_DOUBLE_INT); min_s = min_s.sext (1 + i_f_bits); if (a_high.sgt (max_r) || (a_high == max_r && a_low.ugt (max_s))) { if (sat_p) *f = max_s; else overflow_p = true; } else if (a_high.slt (min_r) || (a_high == min_r && a_low.ult (min_s))) { if (sat_p) *f = min_s; else overflow_p = true; } } return overflow_p; }
double_int double_int_ext_for_comb (double_int cst, aff_tree *comb) { return cst.sext (TYPE_PRECISION (comb->type)); }
void aff_combination_add_elt (aff_tree *comb, tree elt, double_int scale) { unsigned i; tree type; scale = double_int_ext_for_comb (scale, comb); if (scale.is_zero ()) return; for (i = 0; i < comb->n; i++) if (operand_equal_p (comb->elts[i].val, elt, 0)) { double_int new_coef; new_coef = comb->elts[i].coef + scale; new_coef = double_int_ext_for_comb (new_coef, comb); if (!new_coef.is_zero ()) { comb->elts[i].coef = new_coef; return; } comb->n--; comb->elts[i] = comb->elts[comb->n]; if (comb->rest) { gcc_assert (comb->n == MAX_AFF_ELTS - 1); comb->elts[comb->n].coef = double_int_one; comb->elts[comb->n].val = comb->rest; comb->rest = NULL_TREE; comb->n++; } return; } if (comb->n < MAX_AFF_ELTS) { comb->elts[comb->n].coef = scale; comb->elts[comb->n].val = elt; comb->n++; return; } type = comb->type; if (POINTER_TYPE_P (type)) type = sizetype; if (scale.is_one ()) elt = fold_convert (type, elt); else elt = fold_build2 (MULT_EXPR, type, fold_convert (type, elt), double_int_to_tree (type, scale)); if (comb->rest) comb->rest = fold_build2 (PLUS_EXPR, type, comb->rest, elt); else comb->rest = elt; }