static tree add_elt_to_tree (tree expr, tree type, tree elt, double_int scale, aff_tree *comb) { enum tree_code code; tree type1 = type; if (POINTER_TYPE_P (type)) type1 = sizetype; scale = double_int_ext_for_comb (scale, comb); elt = fold_convert (type1, elt); if (scale.is_one ()) { if (!expr) return fold_convert (type, elt); if (POINTER_TYPE_P (type)) return fold_build_pointer_plus (expr, elt); return fold_build2 (PLUS_EXPR, type, expr, elt); } if (scale.is_minus_one ()) { if (!expr) return fold_convert (type, fold_build1 (NEGATE_EXPR, type1, elt)); if (POINTER_TYPE_P (type)) { elt = fold_build1 (NEGATE_EXPR, type1, elt); return fold_build_pointer_plus (expr, elt); } return fold_build2 (MINUS_EXPR, type, expr, elt); } if (!expr) return fold_convert (type, fold_build2 (MULT_EXPR, type1, elt, double_int_to_tree (type1, scale))); if (scale.is_negative ()) { code = MINUS_EXPR; scale = -scale; } else code = PLUS_EXPR; elt = fold_build2 (MULT_EXPR, type1, elt, double_int_to_tree (type1, scale)); if (POINTER_TYPE_P (type)) { if (code == MINUS_EXPR) elt = fold_build1 (NEGATE_EXPR, type1, elt); return fold_build_pointer_plus (expr, elt); } return fold_build2 (code, type, expr, elt); }
static void addr_to_parts (tree type, aff_tree *addr, tree iv_cand, tree base_hint, struct mem_address *parts, bool speed) { tree part; unsigned i; parts->symbol = NULL_TREE; parts->base = NULL_TREE; parts->index = NULL_TREE; parts->step = NULL_TREE; if (!double_int_zero_p (addr->offset)) parts->offset = double_int_to_tree (sizetype, addr->offset); else parts->offset = NULL_TREE; /* Try to find a symbol. */ move_fixed_address_to_symbol (parts, addr); /* No need to do address parts reassociation if the number of parts is <= 2 -- in that case, no loop invariant code motion can be exposed. */ if (!base_hint && (addr->n > 2)) move_variant_to_index (parts, addr, iv_cand); /* First move the most expensive feasible multiplication to index. */ if (!parts->index) most_expensive_mult_to_index (type, parts, addr, speed); /* Try to find a base of the reference. Since at the moment there is no reliable way how to distinguish between pointer and its offset, this is just a guess. */ if (!parts->symbol && base_hint) move_hint_to_base (type, parts, base_hint, addr); if (!parts->symbol && !parts->base) move_pointer_to_base (parts, addr); /* Then try to process the remaining elements. */ for (i = 0; i < addr->n; i++) { part = fold_convert (sizetype, addr->elts[i].val); if (!double_int_one_p (addr->elts[i].coef)) part = fold_build2 (MULT_EXPR, sizetype, part, double_int_to_tree (sizetype, addr->elts[i].coef)); add_to_parts (parts, part); } if (addr->rest) add_to_parts (parts, fold_convert (sizetype, addr->rest)); }
tree aff_combination_to_tree (aff_tree *comb) { tree type = comb->type; tree expr = NULL_TREE; unsigned i; double_int off, sgn; tree type1 = type; if (POINTER_TYPE_P (type)) type1 = sizetype; gcc_assert (comb->n == MAX_AFF_ELTS || comb->rest == NULL_TREE); for (i = 0; i < comb->n; i++) expr = add_elt_to_tree (expr, type, comb->elts[i].val, comb->elts[i].coef, comb); if (comb->rest) expr = add_elt_to_tree (expr, type, comb->rest, double_int_one, comb); /* Ensure that we get x - 1, not x + (-1) or x + 0xff..f if x is unsigned. */ if (comb->offset.is_negative ()) { off = -comb->offset; sgn = double_int_minus_one; } else { off = comb->offset; sgn = double_int_one; } return add_elt_to_tree (expr, type, double_int_to_tree (type1, off), sgn, comb); }
tree aff_combination_to_tree (aff_tree *comb) { tree type = comb->type; tree expr = comb->rest; unsigned i; double_int off, sgn; gcc_assert (comb->n == MAX_AFF_ELTS || comb->rest == NULL_TREE); for (i = 0; i < comb->n; i++) expr = add_elt_to_tree (expr, type, comb->elts[i].val, comb->elts[i].coef, comb); /* Ensure that we get x - 1, not x + (-1) or x + 0xff..f if x is unsigned. */ if (double_int_negative_p (comb->offset)) { off = double_int_neg (comb->offset); sgn = double_int_minus_one; } else { off = comb->offset; sgn = double_int_one; } return add_elt_to_tree (expr, type, double_int_to_tree (type, off), sgn, comb); }
static tree add_elt_to_tree (tree expr, tree type, tree elt, double_int scale, aff_tree *comb) { enum tree_code code; scale = double_int_ext_for_comb (scale, comb); elt = fold_convert (type, elt); if (double_int_one_p (scale)) { if (!expr) return elt; return fold_build2 (PLUS_EXPR, type, expr, elt); } if (double_int_minus_one_p (scale)) { if (!expr) return fold_build1 (NEGATE_EXPR, type, elt); return fold_build2 (MINUS_EXPR, type, expr, elt); } if (!expr) return fold_build2 (MULT_EXPR, type, elt, double_int_to_tree (type, scale)); if (double_int_negative_p (scale)) { code = MINUS_EXPR; scale = double_int_neg (scale); } else code = PLUS_EXPR; elt = fold_build2 (MULT_EXPR, type, elt, double_int_to_tree (type, scale)); return fold_build2 (code, type, expr, elt); }
void aff_combination_scale (aff_tree *comb, double_int scale) { unsigned i, j; scale = double_int_ext_for_comb (scale, comb); if (double_int_one_p (scale)) return; if (double_int_zero_p (scale)) { aff_combination_zero (comb, comb->type); return; } comb->offset = double_int_ext_for_comb (double_int_mul (scale, comb->offset), comb); for (i = 0, j = 0; i < comb->n; i++) { double_int new_coef; new_coef = double_int_ext_for_comb (double_int_mul (scale, comb->elts[i].coef), comb); /* A coefficient may become zero due to overflow. Remove the zero elements. */ if (double_int_zero_p (new_coef)) continue; comb->elts[j].coef = new_coef; comb->elts[j].val = comb->elts[i].val; j++; } comb->n = j; if (comb->rest) { tree type = comb->type; if (POINTER_TYPE_P (type)) type = sizetype; if (comb->n < MAX_AFF_ELTS) { comb->elts[comb->n].coef = scale; comb->elts[comb->n].val = comb->rest; comb->rest = NULL_TREE; comb->n++; } else comb->rest = fold_build2 (MULT_EXPR, type, comb->rest, double_int_to_tree (type, scale)); } }
static tree gmp_cst_to_tree (tree type, mpz_t val) { tree t = type ? type : integer_type_node; mpz_t tmp; double_int di; mpz_init (tmp); mpz_set (tmp, val); di = mpz_get_double_int (t, tmp, true); mpz_clear (tmp); return double_int_to_tree (t, di); }
static void move_variant_to_index (struct mem_address *parts, aff_tree *addr, tree v) { unsigned i; tree val = NULL_TREE; gcc_assert (!parts->index); for (i = 0; i < addr->n; i++) { val = addr->elts[i].val; if (operand_equal_p (val, v, 0)) break; } if (i == addr->n) return; parts->index = fold_convert (sizetype, val); parts->step = double_int_to_tree (sizetype, addr->elts[i].coef); aff_combination_remove_elt (addr, i); }
tree gfc_conv_mpz_to_tree (mpz_t i, int kind) { double_int val = mpz_get_double_int (gfc_get_int_type (kind), i, true); return double_int_to_tree (gfc_get_int_type (kind), val); }
static void most_expensive_mult_to_index (tree type, struct mem_address *parts, aff_tree *addr, bool speed) { addr_space_t as = TYPE_ADDR_SPACE (type); enum machine_mode address_mode = targetm.addr_space.address_mode (as); HOST_WIDE_INT coef; double_int best_mult, amult, amult_neg; unsigned best_mult_cost = 0, acost; tree mult_elt = NULL_TREE, elt; unsigned i, j; enum tree_code op_code; best_mult = double_int_zero; for (i = 0; i < addr->n; i++) { if (!double_int_fits_in_shwi_p (addr->elts[i].coef)) continue; coef = double_int_to_shwi (addr->elts[i].coef); if (coef == 1 || !multiplier_allowed_in_address_p (coef, TYPE_MODE (type), as)) continue; acost = multiply_by_cost (coef, address_mode, speed); if (acost > best_mult_cost) { best_mult_cost = acost; best_mult = addr->elts[i].coef; } } if (!best_mult_cost) return; /* Collect elements multiplied by best_mult. */ for (i = j = 0; i < addr->n; i++) { amult = addr->elts[i].coef; amult_neg = double_int_ext_for_comb (double_int_neg (amult), addr); if (double_int_equal_p (amult, best_mult)) op_code = PLUS_EXPR; else if (double_int_equal_p (amult_neg, best_mult)) op_code = MINUS_EXPR; else { addr->elts[j] = addr->elts[i]; j++; continue; } elt = fold_convert (sizetype, addr->elts[i].val); if (mult_elt) mult_elt = fold_build2 (op_code, sizetype, mult_elt, elt); else if (op_code == PLUS_EXPR) mult_elt = elt; else mult_elt = fold_build1 (NEGATE_EXPR, sizetype, elt); } addr->n = j; parts->index = mult_elt; parts->step = double_int_to_tree (sizetype, best_mult); }
static tree compute_object_offset (const_tree expr, const_tree var) { enum tree_code code = PLUS_EXPR; tree base, off, t; if (expr == var) return size_zero_node; switch (TREE_CODE (expr)) { case COMPONENT_REF: base = compute_object_offset (TREE_OPERAND (expr, 0), var); if (base == error_mark_node) return base; t = TREE_OPERAND (expr, 1); off = size_binop (PLUS_EXPR, DECL_FIELD_OFFSET (t), size_int (tree_low_cst (DECL_FIELD_BIT_OFFSET (t), 1) / BITS_PER_UNIT)); break; case REALPART_EXPR: CASE_CONVERT: case VIEW_CONVERT_EXPR: case NON_LVALUE_EXPR: return compute_object_offset (TREE_OPERAND (expr, 0), var); case IMAGPART_EXPR: base = compute_object_offset (TREE_OPERAND (expr, 0), var); if (base == error_mark_node) return base; off = TYPE_SIZE_UNIT (TREE_TYPE (expr)); break; case ARRAY_REF: base = compute_object_offset (TREE_OPERAND (expr, 0), var); if (base == error_mark_node) return base; t = TREE_OPERAND (expr, 1); if (TREE_CODE (t) == INTEGER_CST && tree_int_cst_sgn (t) < 0) { code = MINUS_EXPR; t = fold_build1 (NEGATE_EXPR, TREE_TYPE (t), t); } t = fold_convert (sizetype, t); off = size_binop (MULT_EXPR, TYPE_SIZE_UNIT (TREE_TYPE (expr)), t); break; case MEM_REF: gcc_assert (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR); return double_int_to_tree (sizetype, mem_ref_offset (expr)); default: return error_mark_node; } return size_binop (code, base, off); }
void aff_combination_add_elt (aff_tree *comb, tree elt, double_int scale) { unsigned i; tree type; scale = double_int_ext_for_comb (scale, comb); if (scale.is_zero ()) return; for (i = 0; i < comb->n; i++) if (operand_equal_p (comb->elts[i].val, elt, 0)) { double_int new_coef; new_coef = comb->elts[i].coef + scale; new_coef = double_int_ext_for_comb (new_coef, comb); if (!new_coef.is_zero ()) { comb->elts[i].coef = new_coef; return; } comb->n--; comb->elts[i] = comb->elts[comb->n]; if (comb->rest) { gcc_assert (comb->n == MAX_AFF_ELTS - 1); comb->elts[comb->n].coef = double_int_one; comb->elts[comb->n].val = comb->rest; comb->rest = NULL_TREE; comb->n++; } return; } if (comb->n < MAX_AFF_ELTS) { comb->elts[comb->n].coef = scale; comb->elts[comb->n].val = elt; comb->n++; return; } type = comb->type; if (POINTER_TYPE_P (type)) type = sizetype; if (scale.is_one ()) elt = fold_convert (type, elt); else elt = fold_build2 (MULT_EXPR, type, fold_convert (type, elt), double_int_to_tree (type, scale)); if (comb->rest) comb->rest = fold_build2 (PLUS_EXPR, type, comb->rest, elt); else comb->rest = elt; }
static tree add_elt_to_tree (tree expr, tree type, tree elt, double_int scale, aff_tree *comb) { enum tree_code code; tree type1 = type; if (POINTER_TYPE_P (type)) #ifdef TARGET_POINTER_SIZETYPE { /* sizetype is not good enough for pointers in ADDRESS_SPACES on dsPIC; some pointers are larger than 'sizetype' (CAW) */ type1 = TARGET_POINTER_SIZETYPE(type); } #else type1 = sizetype; #endif scale = double_int_ext_for_comb (scale, comb); elt = fold_convert (type1, elt); if (double_int_one_p (scale)) { if (!expr) return fold_convert (type, elt); if (POINTER_TYPE_P (type)) return fold_build2 (POINTER_PLUS_EXPR, type, expr, elt); return fold_build2 (PLUS_EXPR, type, expr, elt); } if (double_int_minus_one_p (scale)) { if (!expr) return fold_convert (type, fold_build1 (NEGATE_EXPR, type1, elt)); if (POINTER_TYPE_P (type)) { elt = fold_build1 (NEGATE_EXPR, type1, elt); return fold_build2 (POINTER_PLUS_EXPR, type, expr, elt); } return fold_build2 (MINUS_EXPR, type, expr, elt); } if (!expr) return fold_convert (type, fold_build2 (MULT_EXPR, type1, elt, double_int_to_tree (type1, scale))); if (double_int_negative_p (scale)) { code = MINUS_EXPR; scale = double_int_neg (scale); } else code = PLUS_EXPR; elt = fold_build2 (MULT_EXPR, type1, elt, double_int_to_tree (type1, scale)); if (POINTER_TYPE_P (type)) { if (code == MINUS_EXPR) elt = fold_build1 (NEGATE_EXPR, type1, elt); return fold_build2 (POINTER_PLUS_EXPR, type, expr, elt); } return fold_build2 (code, type, expr, elt); }
/* Return the marking bitmap for the class TYPE. For now this is a single word describing the type. */ tree get_boehm_type_descriptor (tree type) { unsigned int count, log2_size, ubit; int bit; int all_bits_set = 1; int last_set_index = 0; HOST_WIDE_INT last_view_index = -1; int pointer_after_end = 0; double_int mask; tree field, value, value_type; mask = double_int_zero; /* If the GC wasn't requested, just use a null pointer. */ if (! flag_use_boehm_gc) return null_pointer_node; value_type = java_type_for_mode (ptr_mode, 1); /* If we have a type of unknown size, use a proc. */ if (int_size_in_bytes (type) == -1) goto procedure_object_descriptor; bit = POINTER_SIZE / BITS_PER_UNIT; /* The size of this node has to be known. And, we only support 32 and 64 bit targets, so we need to know that the log2 is one of our values. */ log2_size = exact_log2 (bit); if (bit == -1 || (log2_size != 2 && log2_size != 3)) { /* This means the GC isn't supported. We should probably abort or give an error. Instead, for now, we just silently revert. FIXME. */ return null_pointer_node; } bit *= BITS_PER_UNIT; /* Warning avoidance. */ ubit = (unsigned int) bit; if (type == class_type_node) goto procedure_object_descriptor; field = TYPE_FIELDS (type); mark_reference_fields (field, &mask, ubit, &pointer_after_end, &all_bits_set, &last_set_index, &last_view_index); /* If the object is all pointers, or if the part with pointers fits in our bitmap, then we are ok. Otherwise we have to allocate it a different way. */ if (all_bits_set != -1 || (pointer_after_end && flag_reduced_reflection)) { /* In this case the initial part of the object is all reference fields, and the end of the object is all non-reference fields. We represent the mark as a count of the fields, shifted. In the GC the computation looks something like this: value = DS_LENGTH | WORDS_TO_BYTES (last_set_index + 1); DS_LENGTH is 0. WORDS_TO_BYTES shifts by log2(bytes-per-pointer). In the case of flag_reduced_reflection and the bitmap would overflow, we tell the gc that the object is all pointers so that we don't have to emit reflection data for run time marking. */ count = 0; mask = double_int_zero; ++last_set_index; while (last_set_index) { if ((last_set_index & 1)) mask = mask.set_bit (log2_size + count); last_set_index >>= 1; ++count; } value = double_int_to_tree (value_type, mask); }