static int crx_addr_reg_p (rtx addr_reg)
{
  rtx reg;

  if (REG_P (addr_reg))
    {
      reg = addr_reg;
    }
  else if ((GET_CODE (addr_reg) == SUBREG
	   && REG_P (SUBREG_REG (addr_reg))
	   && GET_MODE_SIZE (GET_MODE (SUBREG_REG (addr_reg)))
	   <= UNITS_PER_WORD))
    {
      reg = SUBREG_REG (addr_reg);
    }
  else
    return FALSE;

  if (GET_MODE (addr_reg) != Pmode)
    {
      return FALSE;
    }

  return TRUE;
}
Example #2
0
static void
kill_value (const_rtx x, struct value_data *vd)
{
  if (GET_CODE (x) == SUBREG)
    {
      rtx tmp = simplify_subreg (GET_MODE (x), SUBREG_REG (x),
				 GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
      x = tmp ? tmp : SUBREG_REG (x);
    }
  if (REG_P (x))
    kill_value_regno (REGNO (x), REG_NREGS (x), vd);
}
Example #3
0
/* Transform (subreg (plus reg const)) to (plus (subreg reg) const)
   when it is possible.  Return X or the transformation result if the
   transformation is done.  */
static rtx
move_plus_up (rtx x)
{
  rtx subreg_reg;
  enum machine_mode x_mode, subreg_reg_mode;
  
  if (GET_CODE (x) != SUBREG || !subreg_lowpart_p (x))
    return x;
  subreg_reg = SUBREG_REG (x);
  x_mode = GET_MODE (x);
  subreg_reg_mode = GET_MODE (subreg_reg);
  if (GET_CODE (x) == SUBREG && GET_CODE (subreg_reg) == PLUS
      && GET_MODE_SIZE (x_mode) <= GET_MODE_SIZE (subreg_reg_mode)
      && CONSTANT_P (XEXP (subreg_reg, 1))
      && GET_MODE_CLASS (x_mode) == MODE_INT
      && GET_MODE_CLASS (subreg_reg_mode) == MODE_INT)
    {
      rtx cst = simplify_subreg (x_mode, XEXP (subreg_reg, 1), subreg_reg_mode,
				 subreg_lowpart_offset (x_mode,
							subreg_reg_mode));
      if (cst && CONSTANT_P (cst))
	return gen_rtx_PLUS (x_mode, lowpart_subreg (x_mode,
						     XEXP (subreg_reg, 0),
						     subreg_reg_mode), cst);
    }
  return x;
}
static inline int
q_regs_operand_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
#line 49 "../.././gcc/config/i386/predicates.md"
{
  if (GET_CODE (op) == SUBREG)
    op = SUBREG_REG (op);
  return ANY_QI_REG_P (op);
}
Example #5
0
static void
kill_value (const_rtx x, struct value_data *vd)
{
  if (GET_CODE (x) == SUBREG)
    {
      rtx tmp = simplify_subreg (GET_MODE (x), SUBREG_REG (x),
				 GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
      x = tmp ? tmp : SUBREG_REG (x);
    }
  if (REG_P (x))
    {
      unsigned int regno = REGNO (x);
      unsigned int n = hard_regno_nregs[regno][GET_MODE (x)];

      kill_value_regno (regno, n, vd);
    }
}
Example #6
0
static rtx
propagate_rtx (rtx x, machine_mode mode, rtx old_rtx, rtx new_rtx,
	       bool speed)
{
  rtx tem;
  bool collapsed;
  int flags;

  if (REG_P (new_rtx) && REGNO (new_rtx) < FIRST_PSEUDO_REGISTER)
    return NULL_RTX;

  flags = 0;
  if (REG_P (new_rtx)
      || CONSTANT_P (new_rtx)
      || (GET_CODE (new_rtx) == SUBREG
	  && REG_P (SUBREG_REG (new_rtx))
	  && (GET_MODE_SIZE (mode)
	      <= GET_MODE_SIZE (GET_MODE (SUBREG_REG (new_rtx))))))
    flags |= PR_CAN_APPEAR;
  if (!varying_mem_p (new_rtx))
    flags |= PR_HANDLE_MEM;

  if (speed)
    flags |= PR_OPTIMIZE_FOR_SPEED;

  tem = x;
  collapsed = propagate_rtx_1 (&tem, old_rtx, copy_rtx (new_rtx), flags);
  if (tem == x || !collapsed)
    return NULL_RTX;

  /* gen_lowpart_common will not be able to process VOIDmode entities other
     than CONST_INTs.  */
  if (GET_MODE (tem) == VOIDmode && !CONST_INT_P (tem))
    return NULL_RTX;

  if (GET_MODE (tem) == VOIDmode)
    tem = rtl_hooks.gen_lowpart_no_emit (mode, tem);
  else
    gcc_assert (GET_MODE (tem) == mode);

  return tem;
}
Example #7
0
static void
kill_value (rtx x, struct value_data *vd)
{
  rtx orig_rtx = x;

  if (GET_CODE (x) == SUBREG)
    {
      x = simplify_subreg (GET_MODE (x), SUBREG_REG (x),
			   GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
      if (x == NULL_RTX)
	x = SUBREG_REG (orig_rtx);
    }
  if (REG_P (x))
    {
      unsigned int regno = REGNO (x);
      unsigned int n = hard_regno_nregs[regno][GET_MODE (x)];

      kill_value_regno (regno, n, vd);
    }
}
static inline int
register_no_elim_operand_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
#line 513 "../.././gcc/config/i386/predicates.md"
{
  if (GET_CODE (op) == SUBREG)
    op = SUBREG_REG (op);
  return !(op == arg_pointer_rtx
	   || op == frame_pointer_rtx
	   || IN_RANGE (REGNO (op),
			FIRST_PSEUDO_REGISTER, LAST_VIRTUAL_REGISTER));
}
static inline int
index_register_operand_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
#line 542 "../.././gcc/config/i386/predicates.md"
{
  if (GET_CODE (op) == SUBREG)
    op = SUBREG_REG (op);
  if (reload_in_progress || reload_completed)
    return REG_OK_FOR_INDEX_STRICT_P (op);
  else
    return REG_OK_FOR_INDEX_NONSTRICT_P (op);
}
static inline int
call_register_no_elim_operand_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
#line 528 "../.././gcc/config/i386/predicates.md"
{
  if (GET_CODE (op) == SUBREG)
    op = SUBREG_REG (op);

  if (!TARGET_64BIT && op == stack_pointer_rtx)
    return 0;

  return register_no_elim_operand (op, mode);
}
static inline int
ext_register_operand_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
#line 58 "../.././gcc/config/i386/predicates.md"
{
  if ((!TARGET_64BIT || GET_MODE (op) != DImode)
      && GET_MODE (op) != SImode && GET_MODE (op) != HImode)
    return 0;
  if (GET_CODE (op) == SUBREG)
    op = SUBREG_REG (op);

  /* Be careful to accept only registers having upper parts.  */
  return REGNO (op) > LAST_VIRTUAL_REGISTER || REGNO (op) < 4;
}
Example #12
0
static void
reg_becomes_live (rtx reg, const_rtx setter ATTRIBUTE_UNUSED, void *live)
{
  int regno;

  if (GET_CODE (reg) == SUBREG)
    reg = SUBREG_REG (reg);

  if (!REG_P (reg))
    return;

  regno = REGNO (reg);
  if (regno < FIRST_PSEUDO_REGISTER)
    add_to_hard_reg_set ((HARD_REG_SET *) live, GET_MODE (reg), regno);
}
Example #13
0
static void
print_value (char *buf, rtx x, int verbose)
{
  char t[BUF_LEN];
  char *cur = buf;

  switch (GET_CODE (x))
    {
    case CONST_INT:
      sprintf (t, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
      cur = safe_concat (buf, cur, t);
      break;
    case CONST_DOUBLE:
      if (FLOAT_MODE_P (GET_MODE (x)))
	real_to_decimal (t, CONST_DOUBLE_REAL_VALUE (x), sizeof (t), 0, 1);
      else
	sprintf (t, "<0x%lx,0x%lx>", (long) CONST_DOUBLE_LOW (x), (long) CONST_DOUBLE_HIGH (x));
      cur = safe_concat (buf, cur, t);
      break;
    case CONST_STRING:
      cur = safe_concat (buf, cur, "\"");
      cur = safe_concat (buf, cur, XSTR (x, 0));
      cur = safe_concat (buf, cur, "\"");
      break;
    case SYMBOL_REF:
      cur = safe_concat (buf, cur, "`");
      cur = safe_concat (buf, cur, XSTR (x, 0));
      cur = safe_concat (buf, cur, "'");
      break;
    case LABEL_REF:
      sprintf (t, "L%d", INSN_UID (XEXP (x, 0)));
      cur = safe_concat (buf, cur, t);
      break;
    case CONST:
      print_value (t, XEXP (x, 0), verbose);
      cur = safe_concat (buf, cur, "const(");
      cur = safe_concat (buf, cur, t);
      cur = safe_concat (buf, cur, ")");
      break;
    case HIGH:
      print_value (t, XEXP (x, 0), verbose);
      cur = safe_concat (buf, cur, "high(");
      cur = safe_concat (buf, cur, t);
      cur = safe_concat (buf, cur, ")");
      break;
    case REG:
      if (REGNO (x) < FIRST_PSEUDO_REGISTER)
	{
	  int c = reg_names[REGNO (x)][0];
	  if (ISDIGIT (c))
	    cur = safe_concat (buf, cur, "%");

	  cur = safe_concat (buf, cur, reg_names[REGNO (x)]);
	}
      else
	{
	  sprintf (t, "r%d", REGNO (x));
	  cur = safe_concat (buf, cur, t);
	}
      break;
    case SUBREG:
      print_value (t, SUBREG_REG (x), verbose);
      cur = safe_concat (buf, cur, t);
      sprintf (t, "#%d", SUBREG_BYTE (x));
      cur = safe_concat (buf, cur, t);
      break;
    case SCRATCH:
      cur = safe_concat (buf, cur, "scratch");
      break;
    case CC0:
      cur = safe_concat (buf, cur, "cc0");
      break;
    case PC:
      cur = safe_concat (buf, cur, "pc");
      break;
    case MEM:
      print_value (t, XEXP (x, 0), verbose);
      cur = safe_concat (buf, cur, "[");
      cur = safe_concat (buf, cur, t);
      cur = safe_concat (buf, cur, "]");
      break;
    default:
      print_exp (t, x, verbose);
      cur = safe_concat (buf, cur, t);
      break;
    }
}				/* print_value */
Example #14
0
static bool
propagate_rtx_1 (rtx *px, rtx old_rtx, rtx new_rtx, int flags)
{
  rtx x = *px, tem = NULL_RTX, op0, op1, op2;
  enum rtx_code code = GET_CODE (x);
  machine_mode mode = GET_MODE (x);
  machine_mode op_mode;
  bool can_appear = (flags & PR_CAN_APPEAR) != 0;
  bool valid_ops = true;

  if (!(flags & PR_HANDLE_MEM) && MEM_P (x) && !MEM_READONLY_P (x))
    {
      /* If unsafe, change MEMs to CLOBBERs or SCRATCHes (to preserve whether
	 they have side effects or not).  */
      *px = (side_effects_p (x)
	     ? gen_rtx_CLOBBER (GET_MODE (x), const0_rtx)
	     : gen_rtx_SCRATCH (GET_MODE (x)));
      return false;
    }

  /* If X is OLD_RTX, return NEW_RTX.  But not if replacing only within an
     address, and we are *not* inside one.  */
  if (x == old_rtx)
    {
      *px = new_rtx;
      return can_appear;
    }

  /* If this is an expression, try recursive substitution.  */
  switch (GET_RTX_CLASS (code))
    {
    case RTX_UNARY:
      op0 = XEXP (x, 0);
      op_mode = GET_MODE (op0);
      valid_ops &= propagate_rtx_1 (&op0, old_rtx, new_rtx, flags);
      if (op0 == XEXP (x, 0))
	return true;
      tem = simplify_gen_unary (code, mode, op0, op_mode);
      break;

    case RTX_BIN_ARITH:
    case RTX_COMM_ARITH:
      op0 = XEXP (x, 0);
      op1 = XEXP (x, 1);
      valid_ops &= propagate_rtx_1 (&op0, old_rtx, new_rtx, flags);
      valid_ops &= propagate_rtx_1 (&op1, old_rtx, new_rtx, flags);
      if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
	return true;
      tem = simplify_gen_binary (code, mode, op0, op1);
      break;

    case RTX_COMPARE:
    case RTX_COMM_COMPARE:
      op0 = XEXP (x, 0);
      op1 = XEXP (x, 1);
      op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
      valid_ops &= propagate_rtx_1 (&op0, old_rtx, new_rtx, flags);
      valid_ops &= propagate_rtx_1 (&op1, old_rtx, new_rtx, flags);
      if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
	return true;
      tem = simplify_gen_relational (code, mode, op_mode, op0, op1);
      break;

    case RTX_TERNARY:
    case RTX_BITFIELD_OPS:
      op0 = XEXP (x, 0);
      op1 = XEXP (x, 1);
      op2 = XEXP (x, 2);
      op_mode = GET_MODE (op0);
      valid_ops &= propagate_rtx_1 (&op0, old_rtx, new_rtx, flags);
      valid_ops &= propagate_rtx_1 (&op1, old_rtx, new_rtx, flags);
      valid_ops &= propagate_rtx_1 (&op2, old_rtx, new_rtx, flags);
      if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
	return true;
      if (op_mode == VOIDmode)
	op_mode = GET_MODE (op0);
      tem = simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
      break;

    case RTX_EXTRA:
      /* The only case we try to handle is a SUBREG.  */
      if (code == SUBREG)
	{
          op0 = XEXP (x, 0);
	  valid_ops &= propagate_rtx_1 (&op0, old_rtx, new_rtx, flags);
          if (op0 == XEXP (x, 0))
	    return true;
	  tem = simplify_gen_subreg (mode, op0, GET_MODE (SUBREG_REG (x)),
				     SUBREG_BYTE (x));
	}
      break;

    case RTX_OBJ:
      if (code == MEM && x != new_rtx)
	{
	  rtx new_op0;
	  op0 = XEXP (x, 0);

	  /* There are some addresses that we cannot work on.  */
	  if (!can_simplify_addr (op0))
	    return true;

	  op0 = new_op0 = targetm.delegitimize_address (op0);
	  valid_ops &= propagate_rtx_1 (&new_op0, old_rtx, new_rtx,
					flags | PR_CAN_APPEAR);

	  /* Dismiss transformation that we do not want to carry on.  */
	  if (!valid_ops
	      || new_op0 == op0
	      || !(GET_MODE (new_op0) == GET_MODE (op0)
		   || GET_MODE (new_op0) == VOIDmode))
	    return true;

	  canonicalize_address (new_op0);

	  /* Copy propagations are always ok.  Otherwise check the costs.  */
	  if (!(REG_P (old_rtx) && REG_P (new_rtx))
	      && !should_replace_address (op0, new_op0, GET_MODE (x),
					  MEM_ADDR_SPACE (x),
	      			 	  flags & PR_OPTIMIZE_FOR_SPEED))
	    return true;

	  tem = replace_equiv_address_nv (x, new_op0);
	}

      else if (code == LO_SUM)
	{
          op0 = XEXP (x, 0);
          op1 = XEXP (x, 1);

	  /* The only simplification we do attempts to remove references to op0
	     or make it constant -- in both cases, op0's invalidity will not
	     make the result invalid.  */
	  propagate_rtx_1 (&op0, old_rtx, new_rtx, flags | PR_CAN_APPEAR);
	  valid_ops &= propagate_rtx_1 (&op1, old_rtx, new_rtx, flags);
          if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
	    return true;

	  /* (lo_sum (high x) x) -> x  */
	  if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
	    tem = op1;
	  else
	    tem = gen_rtx_LO_SUM (mode, op0, op1);

	  /* OP1 is likely not a legitimate address, otherwise there would have
	     been no LO_SUM.  We want it to disappear if it is invalid, return
	     false in that case.  */
	  return memory_address_p (mode, tem);
	}

      else if (code == REG)
	{
	  if (rtx_equal_p (x, old_rtx))
	    {
              *px = new_rtx;
              return can_appear;
	    }
	}
      break;

    default:
      break;
    }

  /* No change, no trouble.  */
  if (tem == NULL_RTX)
    return true;

  *px = tem;

  /* Allow replacements that simplify operations on a vector or complex
     value to a component.  The most prominent case is
     (subreg ([vec_]concat ...)).   */
  if (REG_P (tem) && !HARD_REGISTER_P (tem)
      && (VECTOR_MODE_P (GET_MODE (new_rtx))
	  || COMPLEX_MODE_P (GET_MODE (new_rtx)))
      && GET_MODE (tem) == GET_MODE_INNER (GET_MODE (new_rtx)))
    return true;

  /* The replacement we made so far is valid, if all of the recursive
     replacements were valid, or we could simplify everything to
     a constant.  */
  return valid_ops || can_appear || CONSTANT_P (tem);
}
Example #15
0
/* Scan X and replace any eliminable registers (such as fp) with a
   replacement (such as sp) if SUBST_P, plus an offset.  The offset is
   a change in the offset between the eliminable register and its
   substitution if UPDATE_P, or the full offset if FULL_P, or
   otherwise zero.  If FULL_P, we also use the SP offsets for
   elimination to SP.  If UPDATE_P, use UPDATE_SP_OFFSET for updating
   offsets of register elimnable to SP.  If UPDATE_SP_OFFSET is
   non-zero, don't use difference of the offset and the previous
   offset.

   MEM_MODE is the mode of an enclosing MEM.  We need this to know how
   much to adjust a register for, e.g., PRE_DEC.  Also, if we are
   inside a MEM, we are allowed to replace a sum of a hard register
   and the constant zero with the hard register, which we cannot do
   outside a MEM.  In addition, we need to record the fact that a
   hard register is referenced outside a MEM.

   If we make full substitution to SP for non-null INSN, add the insn
   sp offset.  */
rtx
lra_eliminate_regs_1 (rtx_insn *insn, rtx x, machine_mode mem_mode,
		      bool subst_p, bool update_p,
		      HOST_WIDE_INT update_sp_offset, bool full_p)
{
  enum rtx_code code = GET_CODE (x);
  struct lra_elim_table *ep;
  rtx new_rtx;
  int i, j;
  const char *fmt;
  int copied = 0;

  lra_assert (!update_p || !full_p);
  lra_assert (update_sp_offset == 0 || (!subst_p && update_p && !full_p));
  if (! current_function_decl)
    return x;

  switch (code)
    {
    CASE_CONST_ANY:
    case CONST:
    case SYMBOL_REF:
    case CODE_LABEL:
    case PC:
    case CC0:
    case ASM_INPUT:
    case ADDR_VEC:
    case ADDR_DIFF_VEC:
    case RETURN:
      return x;

    case REG:
      /* First handle the case where we encounter a bare hard register
	 that is eliminable.  Replace it with a PLUS.  */
      if ((ep = get_elimination (x)) != NULL)
	{
	  rtx to = subst_p ? ep->to_rtx : ep->from_rtx;

	  if (update_sp_offset != 0)
	    {
	      if (ep->to_rtx == stack_pointer_rtx)
		return plus_constant (Pmode, to, update_sp_offset);
	      return to;
	    }
	  else if (update_p)
	    return plus_constant (Pmode, to, ep->offset - ep->previous_offset);
	  else if (full_p)
	    return plus_constant (Pmode, to,
				  ep->offset
				  - (insn != NULL_RTX
				     && ep->to_rtx == stack_pointer_rtx
				     ? lra_get_insn_recog_data (insn)->sp_offset
				     : 0));
	  else
	    return to;
	}
      return x;

    case PLUS:
      /* If this is the sum of an eliminable register and a constant, rework
	 the sum.  */
      if (REG_P (XEXP (x, 0)) && CONSTANT_P (XEXP (x, 1)))
	{
	  if ((ep = get_elimination (XEXP (x, 0))) != NULL)
	    {
	      HOST_WIDE_INT offset;
	      rtx to = subst_p ? ep->to_rtx : ep->from_rtx;

	      if (! update_p && ! full_p)
		return gen_rtx_PLUS (Pmode, to, XEXP (x, 1));
	      
	      if (update_sp_offset != 0)
		offset = ep->to_rtx == stack_pointer_rtx ? update_sp_offset : 0;
	      else
		offset = (update_p
			  ? ep->offset - ep->previous_offset : ep->offset);
	      if (full_p && insn != NULL_RTX && ep->to_rtx == stack_pointer_rtx)
		offset -= lra_get_insn_recog_data (insn)->sp_offset;
	      if (CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) == -offset)
		return to;
	      else
		return gen_rtx_PLUS (Pmode, to,
				     plus_constant (Pmode,
						    XEXP (x, 1), offset));
	    }

	  /* If the hard register is not eliminable, we are done since
	     the other operand is a constant.  */
	  return x;
	}

      /* If this is part of an address, we want to bring any constant
	 to the outermost PLUS.  We will do this by doing hard
	 register replacement in our operands and seeing if a constant
	 shows up in one of them.

	 Note that there is no risk of modifying the structure of the
	 insn, since we only get called for its operands, thus we are
	 either modifying the address inside a MEM, or something like
	 an address operand of a load-address insn.  */

      {
	rtx new0 = lra_eliminate_regs_1 (insn, XEXP (x, 0), mem_mode,
					 subst_p, update_p,
					 update_sp_offset, full_p);
	rtx new1 = lra_eliminate_regs_1 (insn, XEXP (x, 1), mem_mode,
					 subst_p, update_p,
					 update_sp_offset, full_p);

	new0 = move_plus_up (new0);
	new1 = move_plus_up (new1);
	if (new0 != XEXP (x, 0) || new1 != XEXP (x, 1))
	  return form_sum (new0, new1);
      }
      return x;

    case MULT:
      /* If this is the product of an eliminable hard register and a
	 constant, apply the distribute law and move the constant out
	 so that we have (plus (mult ..) ..).  This is needed in order
	 to keep load-address insns valid.  This case is pathological.
	 We ignore the possibility of overflow here.  */
      if (REG_P (XEXP (x, 0)) && CONST_INT_P (XEXP (x, 1))
	  && (ep = get_elimination (XEXP (x, 0))) != NULL)
	{
	  rtx to = subst_p ? ep->to_rtx : ep->from_rtx;

	  if (update_sp_offset != 0)
	    {
	      if (ep->to_rtx == stack_pointer_rtx)
		return plus_constant (Pmode,
				      gen_rtx_MULT (Pmode, to, XEXP (x, 1)),
				      update_sp_offset * INTVAL (XEXP (x, 1)));
	      return gen_rtx_MULT (Pmode, to, XEXP (x, 1));
	    }
	  else if (update_p)
	    return plus_constant (Pmode,
				  gen_rtx_MULT (Pmode, to, XEXP (x, 1)),
				  (ep->offset - ep->previous_offset)
				  * INTVAL (XEXP (x, 1)));
	  else if (full_p)
	    {
	      HOST_WIDE_INT offset = ep->offset;

	      if (insn != NULL_RTX && ep->to_rtx == stack_pointer_rtx)
		offset -= lra_get_insn_recog_data (insn)->sp_offset;
	      return
		plus_constant (Pmode,
			       gen_rtx_MULT (Pmode, to, XEXP (x, 1)),
			       offset * INTVAL (XEXP (x, 1)));
	    }
	  else
	    return gen_rtx_MULT (Pmode, to, XEXP (x, 1));
	}

      /* fall through */

    case CALL:
    case COMPARE:
    /* See comments before PLUS about handling MINUS.  */
    case MINUS:
    case DIV:	   case UDIV:
    case MOD:	   case UMOD:
    case AND:	   case IOR:	  case XOR:
    case ROTATERT: case ROTATE:
    case ASHIFTRT: case LSHIFTRT: case ASHIFT:
    case NE:	   case EQ:
    case GE:	   case GT:	  case GEU:    case GTU:
    case LE:	   case LT:	  case LEU:    case LTU:
      {
	rtx new0 = lra_eliminate_regs_1 (insn, XEXP (x, 0), mem_mode,
					 subst_p, update_p, 
					 update_sp_offset, full_p);
	rtx new1 = XEXP (x, 1)
		   ? lra_eliminate_regs_1 (insn, XEXP (x, 1), mem_mode,
					   subst_p, update_p,
					   update_sp_offset, full_p) : 0;

	if (new0 != XEXP (x, 0) || new1 != XEXP (x, 1))
	  return gen_rtx_fmt_ee (code, GET_MODE (x), new0, new1);
      }
      return x;

    case EXPR_LIST:
      /* If we have something in XEXP (x, 0), the usual case,
	 eliminate it.	*/
      if (XEXP (x, 0))
	{
	  new_rtx = lra_eliminate_regs_1 (insn, XEXP (x, 0), mem_mode,
					  subst_p, update_p,
					  update_sp_offset, full_p);
	  if (new_rtx != XEXP (x, 0))
	    {
	      /* If this is a REG_DEAD note, it is not valid anymore.
		 Using the eliminated version could result in creating a
		 REG_DEAD note for the stack or frame pointer.	*/
	      if (REG_NOTE_KIND (x) == REG_DEAD)
		return (XEXP (x, 1)
			? lra_eliminate_regs_1 (insn, XEXP (x, 1), mem_mode,
						subst_p, update_p,
						update_sp_offset, full_p)
			: NULL_RTX);

	      x = alloc_reg_note (REG_NOTE_KIND (x), new_rtx, XEXP (x, 1));
	    }
	}

      /* fall through */

    case INSN_LIST:
    case INT_LIST:
      /* Now do eliminations in the rest of the chain.	If this was
	 an EXPR_LIST, this might result in allocating more memory than is
	 strictly needed, but it simplifies the code.  */
      if (XEXP (x, 1))
	{
	  new_rtx = lra_eliminate_regs_1 (insn, XEXP (x, 1), mem_mode,
					  subst_p, update_p,
					  update_sp_offset, full_p);
	  if (new_rtx != XEXP (x, 1))
	    return
	      gen_rtx_fmt_ee (GET_CODE (x), GET_MODE (x),
			      XEXP (x, 0), new_rtx);
	}
      return x;

    case PRE_INC:
    case POST_INC:
    case PRE_DEC:
    case POST_DEC:
      /* We do not support elimination of a register that is modified.
	 elimination_effects has already make sure that this does not
	 happen.  */
      return x;

    case PRE_MODIFY:
    case POST_MODIFY:
      /* We do not support elimination of a hard register that is
	 modified.  LRA has already make sure that this does not
	 happen. The only remaining case we need to consider here is
	 that the increment value may be an eliminable register.  */
      if (GET_CODE (XEXP (x, 1)) == PLUS
	  && XEXP (XEXP (x, 1), 0) == XEXP (x, 0))
	{
	  rtx new_rtx = lra_eliminate_regs_1 (insn, XEXP (XEXP (x, 1), 1),
					      mem_mode, subst_p, update_p,
					      update_sp_offset, full_p);

	  if (new_rtx != XEXP (XEXP (x, 1), 1))
	    return gen_rtx_fmt_ee (code, GET_MODE (x), XEXP (x, 0),
				   gen_rtx_PLUS (GET_MODE (x),
						 XEXP (x, 0), new_rtx));
	}
      return x;

    case STRICT_LOW_PART:
    case NEG:	       case NOT:
    case SIGN_EXTEND:  case ZERO_EXTEND:
    case TRUNCATE:     case FLOAT_EXTEND: case FLOAT_TRUNCATE:
    case FLOAT:	       case FIX:
    case UNSIGNED_FIX: case UNSIGNED_FLOAT:
    case ABS:
    case SQRT:
    case FFS:
    case CLZ:
    case CTZ:
    case POPCOUNT:
    case PARITY:
    case BSWAP:
      new_rtx = lra_eliminate_regs_1 (insn, XEXP (x, 0), mem_mode,
				      subst_p, update_p,
				      update_sp_offset, full_p);
      if (new_rtx != XEXP (x, 0))
	return gen_rtx_fmt_e (code, GET_MODE (x), new_rtx);
      return x;

    case SUBREG:
      new_rtx = lra_eliminate_regs_1 (insn, SUBREG_REG (x), mem_mode,
				      subst_p, update_p,
				      update_sp_offset, full_p);

      if (new_rtx != SUBREG_REG (x))
	{
	  int x_size = GET_MODE_SIZE (GET_MODE (x));
	  int new_size = GET_MODE_SIZE (GET_MODE (new_rtx));

	  if (MEM_P (new_rtx) && x_size <= new_size)
	    {
	      SUBREG_REG (x) = new_rtx;
	      alter_subreg (&x, false);
	      return x;
	    }
	  else if (! subst_p)
	    {
	      /* LRA can transform subregs itself.  So don't call
		 simplify_gen_subreg until LRA transformations are
		 finished.  Function simplify_gen_subreg can do
		 non-trivial transformations (like truncation) which
		 might make LRA work to fail.  */
	      SUBREG_REG (x) = new_rtx;
	      return x;
	    }
	  else
	    return simplify_gen_subreg (GET_MODE (x), new_rtx,
					GET_MODE (new_rtx), SUBREG_BYTE (x));
	}

      return x;

    case MEM:
      /* Our only special processing is to pass the mode of the MEM to our
	 recursive call and copy the flags.  While we are here, handle this
	 case more efficiently.	 */
      return
	replace_equiv_address_nv
	(x,
	 lra_eliminate_regs_1 (insn, XEXP (x, 0), GET_MODE (x),
			       subst_p, update_p, update_sp_offset, full_p));

    case USE:
      /* Handle insn_list USE that a call to a pure function may generate.  */
      new_rtx = lra_eliminate_regs_1 (insn, XEXP (x, 0), VOIDmode,
				      subst_p, update_p, update_sp_offset, full_p);
      if (new_rtx != XEXP (x, 0))
	return gen_rtx_USE (GET_MODE (x), new_rtx);
      return x;

    case CLOBBER:
    case SET:
      gcc_unreachable ();

    default:
      break;
    }

  /* Process each of our operands recursively.	If any have changed, make a
     copy of the rtx.  */
  fmt = GET_RTX_FORMAT (code);
  for (i = 0; i < GET_RTX_LENGTH (code); i++, fmt++)
    {
      if (*fmt == 'e')
	{
	  new_rtx = lra_eliminate_regs_1 (insn, XEXP (x, i), mem_mode,
					  subst_p, update_p,
					  update_sp_offset, full_p);
	  if (new_rtx != XEXP (x, i) && ! copied)
	    {
	      x = shallow_copy_rtx (x);
	      copied = 1;
	    }
	  XEXP (x, i) = new_rtx;
	}
      else if (*fmt == 'E')
	{
	  int copied_vec = 0;
	  for (j = 0; j < XVECLEN (x, i); j++)
	    {
	      new_rtx = lra_eliminate_regs_1 (insn, XVECEXP (x, i, j), mem_mode,
					      subst_p, update_p,
					      update_sp_offset, full_p);
	      if (new_rtx != XVECEXP (x, i, j) && ! copied_vec)
		{
		  rtvec new_v = gen_rtvec_v (XVECLEN (x, i),
					     XVEC (x, i)->elem);
		  if (! copied)
		    {
		      x = shallow_copy_rtx (x);
		      copied = 1;
		    }
		  XVEC (x, i) = new_v;
		  copied_vec = 1;
		}
	      XVECEXP (x, i, j) = new_rtx;
	    }
	}
    }

  return x;
}
Example #16
0
void
print_value (pretty_printer *pp, const_rtx x, int verbose)
{
  char tmp[1024];

  if (!x)
    {
      pp_string (pp, "(nil)");
      return;
    }
  switch (GET_CODE (x))
    {
    case CONST_INT:
      pp_scalar (pp, HOST_WIDE_INT_PRINT_HEX,
		 (unsigned HOST_WIDE_INT) INTVAL (x));
      break;
    case CONST_DOUBLE:
      if (FLOAT_MODE_P (GET_MODE (x)))
	{
	  real_to_decimal (tmp, CONST_DOUBLE_REAL_VALUE (x),
			   sizeof (tmp), 0, 1);
	  pp_string (pp, tmp);
	}
      else
	pp_printf (pp, "<%wx,%wx>",
		   (unsigned HOST_WIDE_INT) CONST_DOUBLE_LOW (x),
		   (unsigned HOST_WIDE_INT) CONST_DOUBLE_HIGH (x));
      break;
    case CONST_FIXED:
      fixed_to_decimal (tmp, CONST_FIXED_VALUE (x), sizeof (tmp));
      pp_string (pp, tmp);
      break;
    case CONST_STRING:
      pp_printf (pp, "\"%s\"", XSTR (x, 0));
      break;
    case SYMBOL_REF:
      pp_printf (pp, "`%s'", XSTR (x, 0));
      break;
    case LABEL_REF:
      pp_printf (pp, "L%d", INSN_UID (XEXP (x, 0)));
      break;
    case CONST:
    case HIGH:
    case STRICT_LOW_PART:
      pp_printf (pp, "%s(", GET_RTX_NAME (GET_CODE (x)));
      print_value (pp, XEXP (x, 0), verbose);
      pp_right_paren (pp);
      break;
    case REG:
      if (REGNO (x) < FIRST_PSEUDO_REGISTER)
	{
	  if (ISDIGIT (reg_names[REGNO (x)][0]))
	    pp_modulo (pp);
	  pp_string (pp, reg_names[REGNO (x)]);
	}
      else
	pp_printf (pp, "r%d", REGNO (x));
      if (verbose)
	pp_printf (pp, ":%s", GET_MODE_NAME (GET_MODE (x)));
      break;
    case SUBREG:
      print_value (pp, SUBREG_REG (x), verbose);
      pp_printf (pp, "#%d", SUBREG_BYTE (x));
      break;
    case SCRATCH:
    case CC0:
    case PC:
      pp_string (pp, GET_RTX_NAME (GET_CODE (x)));
      break;
    case MEM:
      pp_left_bracket (pp);
      print_value (pp, XEXP (x, 0), verbose);
      pp_right_bracket (pp);
      break;
    case DEBUG_EXPR:
      pp_printf (pp, "D#%i", DEBUG_TEMP_UID (DEBUG_EXPR_TREE_DECL (x)));
      break;
    default:
      print_exp (pp, x, verbose);
      break;
    }
}				/* print_value */
Example #17
0
void
print_value (char *buf, const_rtx x, int verbose)
{
  char t[BUF_LEN];
  char *cur = buf;

  if (!x)
    {
      safe_concat (buf, buf, "(nil)");
      return;
    }
  switch (GET_CODE (x))
    {
    case CONST_INT:
      sprintf (t, HOST_WIDE_INT_PRINT_HEX,
	       (unsigned HOST_WIDE_INT) INTVAL (x));
      cur = safe_concat (buf, cur, t);
      break;
    case CONST_DOUBLE:
      if (FLOAT_MODE_P (GET_MODE (x)))
	real_to_decimal (t, CONST_DOUBLE_REAL_VALUE (x), sizeof (t), 0, 1);
      else
	sprintf (t,
		 "<" HOST_WIDE_INT_PRINT_HEX "," HOST_WIDE_INT_PRINT_HEX ">",
		 (unsigned HOST_WIDE_INT) CONST_DOUBLE_LOW (x),
		 (unsigned HOST_WIDE_INT) CONST_DOUBLE_HIGH (x));
      cur = safe_concat (buf, cur, t);
      break;
    case CONST_FIXED:
      fixed_to_decimal (t, CONST_FIXED_VALUE (x), sizeof (t));
      cur = safe_concat (buf, cur, t);
      break;
    case CONST_STRING:
      cur = safe_concat (buf, cur, "\"");
      cur = safe_concat (buf, cur, XSTR (x, 0));
      cur = safe_concat (buf, cur, "\"");
      break;
    case SYMBOL_REF:
      cur = safe_concat (buf, cur, "`");
      cur = safe_concat (buf, cur, XSTR (x, 0));
      cur = safe_concat (buf, cur, "'");
      break;
    case LABEL_REF:
      sprintf (t, "L%d", INSN_UID (XEXP (x, 0)));
      cur = safe_concat (buf, cur, t);
      break;
    case CONST:
      print_value (t, XEXP (x, 0), verbose);
      cur = safe_concat (buf, cur, "const(");
      cur = safe_concat (buf, cur, t);
      cur = safe_concat (buf, cur, ")");
      break;
    case HIGH:
      print_value (t, XEXP (x, 0), verbose);
      cur = safe_concat (buf, cur, "high(");
      cur = safe_concat (buf, cur, t);
      cur = safe_concat (buf, cur, ")");
      break;
    case REG:
      if (REGNO (x) < FIRST_PSEUDO_REGISTER)
	{
	  int c = reg_names[REGNO (x)][0];
	  if (ISDIGIT (c))
	    cur = safe_concat (buf, cur, "%");

	  cur = safe_concat (buf, cur, reg_names[REGNO (x)]);
	}
      else
	{
	  sprintf (t, "r%d", REGNO (x));
	  cur = safe_concat (buf, cur, t);
	}
      if (verbose
#ifdef INSN_SCHEDULING
	  && !current_sched_info
#endif
	 )
	{
	  sprintf (t, ":%s", GET_MODE_NAME (GET_MODE (x)));
	  cur = safe_concat (buf, cur, t);
	}
      break;
    case SUBREG:
      print_value (t, SUBREG_REG (x), verbose);
      cur = safe_concat (buf, cur, t);
      sprintf (t, "#%d", SUBREG_BYTE (x));
      cur = safe_concat (buf, cur, t);
      break;
    case STRICT_LOW_PART:
      print_value (t, XEXP (x, 0), verbose);
      cur = safe_concat (buf, cur, "strict_low_part(");
      cur = safe_concat (buf, cur, t);
      cur = safe_concat (buf, cur, ")");
      break;
    case SCRATCH:
      cur = safe_concat (buf, cur, "scratch");
      break;
    case CC0:
      cur = safe_concat (buf, cur, "cc0");
      break;
    case PC:
      cur = safe_concat (buf, cur, "pc");
      break;
    case MEM:
      print_value (t, XEXP (x, 0), verbose);
      cur = safe_concat (buf, cur, "[");
      cur = safe_concat (buf, cur, t);
      cur = safe_concat (buf, cur, "]");
      break;
    case DEBUG_EXPR:
      sprintf (t, "D#%i", DEBUG_TEMP_UID (DEBUG_EXPR_TREE_DECL (x)));
      cur = safe_concat (buf, cur, t);
      break;
    default:
      print_exp (t, x, verbose);
      cur = safe_concat (buf, cur, t);
      break;
    }
}				/* print_value */
Example #18
0
void
sdbout_symbol (tree decl, int local)
{
  tree type = TREE_TYPE (decl);
  tree context = NULL_TREE;
  rtx value;
  int regno = -1;
  const char *name;

  /* If we are called before sdbout_init is run, just save the symbol
     for later.  */
  if (!sdbout_initialized)
    {
      preinit_symbols = tree_cons (0, decl, preinit_symbols);
      return;
    }

  sdbout_one_type (type);

  switch (TREE_CODE (decl))
    {
    case CONST_DECL:
      /* Enum values are defined by defining the enum type.  */
      return;

    case FUNCTION_DECL:
      /* Don't mention a nested function under its parent.  */
      context = decl_function_context (decl);
      if (context == current_function_decl)
	return;
      /* Check DECL_INITIAL to distinguish declarations from definitions.
	 Don't output debug info here for declarations; they will have
	 a DECL_INITIAL value of 0.  */
      if (! DECL_INITIAL (decl))
	return;
      if (!MEM_P (DECL_RTL (decl))
	  || GET_CODE (XEXP (DECL_RTL (decl), 0)) != SYMBOL_REF)
	return;
      PUT_SDB_DEF (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)));
      PUT_SDB_VAL (XEXP (DECL_RTL (decl), 0));
      PUT_SDB_SCL (TREE_PUBLIC (decl) ? C_EXT : C_STAT);
      break;

    case TYPE_DECL:
      /* Done with tagged types.  */
      if (DECL_NAME (decl) == 0)
	return;
      if (DECL_IGNORED_P (decl))
	return;
      /* Don't output intrinsic types.  GAS chokes on SDB .def
	 statements that contain identifiers with embedded spaces
	 (eg "unsigned long").  */
      if (DECL_IS_BUILTIN (decl))
	return;

      /* Output typedef name.  */
      if (template_name_p (DECL_NAME (decl)))
	PUT_SDB_DEF (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)));
      else
	PUT_SDB_DEF (IDENTIFIER_POINTER (DECL_NAME (decl)));
      PUT_SDB_SCL (C_TPDEF);
      break;

    case PARM_DECL:
      /* Parm decls go in their own separate chains
	 and are output by sdbout_reg_parms and sdbout_parms.  */
      gcc_unreachable ();

    case VAR_DECL:
      /* Don't mention a variable that is external.
	 Let the file that defines it describe it.  */
      if (DECL_EXTERNAL (decl))
	return;

      /* Ignore __FUNCTION__, etc.  */
      if (DECL_IGNORED_P (decl))
	return;

      /* If there was an error in the declaration, don't dump core
	 if there is no RTL associated with the variable doesn't
	 exist.  */
      if (!DECL_RTL_SET_P (decl))
	return;

      SET_DECL_RTL (decl,
		    eliminate_regs (DECL_RTL (decl), VOIDmode, NULL_RTX));
#ifdef LEAF_REG_REMAP
      if (crtl->uses_only_leaf_regs)
	leaf_renumber_regs_insn (DECL_RTL (decl));
#endif
      value = DECL_RTL (decl);

      /* Don't mention a variable at all
	 if it was completely optimized into nothingness.

	 If DECL was from an inline function, then its rtl
	 is not identically the rtl that was used in this
	 particular compilation.  */
      if (REG_P (value))
	{
	  regno = REGNO (value);
	  if (regno >= FIRST_PSEUDO_REGISTER)
	    return;
	}
      else if (GET_CODE (value) == SUBREG)
	{
	  while (GET_CODE (value) == SUBREG)
	    value = SUBREG_REG (value);
	  if (REG_P (value))
	    {
	      if (REGNO (value) >= FIRST_PSEUDO_REGISTER)
		return;
	    }
	  regno = REGNO (alter_subreg (&value));
	  SET_DECL_RTL (decl, value);
	}
      /* Don't output anything if an auto variable
	 gets RTL that is static.
	 GAS version 2.2 can't handle such output.  */
      else if (MEM_P (value) && CONSTANT_P (XEXP (value, 0))
	       && ! TREE_STATIC (decl))
	return;

      /* Emit any structure, union, or enum type that has not been output.
	 This occurs for tag-less structs (et al) used to declare variables
	 within functions.  */
      if (TREE_CODE (type) == ENUMERAL_TYPE
	  || TREE_CODE (type) == RECORD_TYPE
	  || TREE_CODE (type) == UNION_TYPE
	  || TREE_CODE (type) == QUAL_UNION_TYPE)
	{
	  if (COMPLETE_TYPE_P (type)		/* not a forward reference */
	      && KNOWN_TYPE_TAG (type) == 0)	/* not yet declared */
	    sdbout_one_type (type);
	}

      /* Defer SDB information for top-level initialized variables! */
      if (! local
	  && MEM_P (value)
	  && DECL_INITIAL (decl))
	return;

      /* C++ in 2.3 makes nameless symbols.  That will be fixed later.
	 For now, avoid crashing.  */
      if (DECL_NAME (decl) == NULL_TREE)
	return;

      /* Record the name for, starting a symtab entry.  */
      if (local)
	name = IDENTIFIER_POINTER (DECL_NAME (decl));
      else
	name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));

      if (MEM_P (value)
	  && GET_CODE (XEXP (value, 0)) == SYMBOL_REF)
	{
	  PUT_SDB_DEF (name);
	  if (TREE_PUBLIC (decl))
	    {
	      PUT_SDB_VAL (XEXP (value, 0));
	      PUT_SDB_SCL (C_EXT);
	    }
	  else
	    {
	      PUT_SDB_VAL (XEXP (value, 0));
	      PUT_SDB_SCL (C_STAT);
	    }
	}
      else if (regno >= 0)
	{
	  PUT_SDB_DEF (name);
	  PUT_SDB_INT_VAL (DBX_REGISTER_NUMBER (regno));
	  PUT_SDB_SCL (C_REG);
	}
      else if (MEM_P (value)
	       && (MEM_P (XEXP (value, 0))
		   || (REG_P (XEXP (value, 0))
		       && REGNO (XEXP (value, 0)) != HARD_FRAME_POINTER_REGNUM
		       && REGNO (XEXP (value, 0)) != STACK_POINTER_REGNUM)))
	/* If the value is indirect by memory or by a register
	   that isn't the frame pointer
	   then it means the object is variable-sized and address through
	   that register or stack slot.  COFF has no way to represent this
	   so all we can do is output the variable as a pointer.  */
	{
	  PUT_SDB_DEF (name);
	  if (REG_P (XEXP (value, 0)))
	    {
	      PUT_SDB_INT_VAL (DBX_REGISTER_NUMBER (REGNO (XEXP (value, 0))));
	      PUT_SDB_SCL (C_REG);
	    }
	  else
	    {
	      /* DECL_RTL looks like (MEM (MEM (PLUS (REG...)
		 (CONST_INT...)))).
		 We want the value of that CONST_INT.  */
	      /* Encore compiler hates a newline in a macro arg, it seems.  */
	      PUT_SDB_INT_VAL (DEBUGGER_AUTO_OFFSET
			       (XEXP (XEXP (value, 0), 0)));
	      PUT_SDB_SCL (C_AUTO);
	    }

	  /* Effectively do build_pointer_type, but don't cache this type,
	     since it might be temporary whereas the type it points to
	     might have been saved for inlining.  */
	  /* Don't use REFERENCE_TYPE because dbx can't handle that.  */
	  type = make_node (POINTER_TYPE);
	  TREE_TYPE (type) = TREE_TYPE (decl);
	}
      else if (MEM_P (value)
	       && ((GET_CODE (XEXP (value, 0)) == PLUS
		    && REG_P (XEXP (XEXP (value, 0), 0))
		    && CONST_INT_P (XEXP (XEXP (value, 0), 1)))
		   /* This is for variables which are at offset zero from
		      the frame pointer.  This happens on the Alpha.
		      Non-frame pointer registers are excluded above.  */
		   || (REG_P (XEXP (value, 0)))))
	{
	  /* DECL_RTL looks like (MEM (PLUS (REG...) (CONST_INT...)))
	     or (MEM (REG...)).  We want the value of that CONST_INT
	     or zero.  */
	  PUT_SDB_DEF (name);
	  PUT_SDB_INT_VAL (DEBUGGER_AUTO_OFFSET (XEXP (value, 0)));
	  PUT_SDB_SCL (C_AUTO);
	}
      else
	{
	  /* It is something we don't know how to represent for SDB.  */
	  return;
	}
      break;

    default:
      break;
    }
  PUT_SDB_TYPE (plain_type (type));
  PUT_SDB_ENDEF;
}
Example #19
0
int
legitimate_address_p (enum machine_mode mode, rtx addr, int strict)
{
    struct avm2_address parts;
    rtx base, index, disp;
    HOST_WIDE_INT scale;
    const char *reason = NULL;
    rtx reason_rtx = NULL_RTX;

    if (avm2_decompose_address (addr, &parts) <= 0)
    {
        reason = "decomposition failed";
        goto report_error;
    }

    base = parts.base;
    index = parts.index;
    disp = parts.disp;
    scale = parts.scale;

    /* Validate base register.

       Don't allow SUBREG's that span more than a word here.  It can lead to spill
       failures when the base is one word out of a two word structure, which is
       represented internally as a DImode int.  */

    if (base)
    {
        rtx reg;
        reason_rtx = base;

        if (REG_P (base))
            reg = base;
        else if (GET_CODE (base) == SUBREG
                 && REG_P (SUBREG_REG (base))
                 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (base)))
                 <= UNITS_PER_WORD)
            reg = SUBREG_REG (base);
        else
        {
            reason = "base is not a register";
            goto report_error;
        }

        if (GET_MODE (base) != Pmode)
        {
            reason = "base is not in Pmode";
            goto report_error;
        }

        if ((strict && ! REG_OK_FOR_BASE_STRICT_P (reg))
                || (! strict && ! REG_OK_FOR_BASE_NONSTRICT_P (reg)))
        {
            reason = "base is not valid";
            goto report_error;
        }
    }

    /* Validate index register.

       Don't allow SUBREG's that span more than a word here -- same as above.  */

    if (index)
    {
        rtx reg;
        reason_rtx = index;

        if (REG_P (index))
            reg = index;
        else if (GET_CODE (index) == SUBREG
                 && REG_P (SUBREG_REG (index))
                 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (index)))
                 <= UNITS_PER_WORD)
            reg = SUBREG_REG (index);
        else
        {
            reason = "index is not a register";
            goto report_error;
        }

        if (GET_MODE (index) != Pmode)
        {
            reason = "index is not in Pmode";
            goto report_error;
        }

        if ((strict && ! REG_OK_FOR_INDEX_STRICT_P (reg))
                || (! strict && ! REG_OK_FOR_INDEX_NONSTRICT_P (reg)))
        {
            reason = "index is not valid";
            goto report_error;
        }
    }

    /* Validate scale factor.  */
    if (scale != 1)
    {
        reason_rtx = GEN_INT (scale);
        if (!index)
        {
            reason = "scale without index";
            goto report_error;
        }

        if (scale != 2 && scale != 4 && scale != 8)
        {
            reason = "scale is not a valid multiplier";
            goto report_error;
        }
    }

    /* Validate displacement.  */
    if (disp)
    {
        reason_rtx = disp;

        if (GET_CODE (disp) == CONST
                && GET_CODE (XEXP (disp, 0)) == UNSPEC)
            switch (XINT (XEXP (disp, 0), 1))
            {
            default:
                reason = "invalid address unspec";
                goto report_error;
            }

        else if (SYMBOLIC_CONST (disp)
                 && (flag_pic))
        {
            /* APPLE LOCAL end dynamic-no-pic */

            /* This code used to verify that a symbolic pic displacement
            includes the pic_offset_table_rtx register.

            While this is good idea, unfortunately these constructs may
            be created by "adds using lea" optimization for incorrect
            code like:

            int a;
            int foo(int i)
             {
               return *(&a+i);
             }

            This code is nonsensical, but results in addressing
            GOT table with pic_offset_table_rtx base.  We can't
            just refuse it easily, since it gets matched by
            "addsi3" pattern, that later gets split to lea in the
            case output register differs from input.  While this
            can be handled by separate addsi pattern for this case
            that never results in lea, this seems to be easier and
            correct fix for crash to disable this test.  */
        }
        else if (GET_CODE (disp) != LABEL_REF
                 && GET_CODE (disp) != CONST_INT
                 && (GET_CODE (disp) != CONST
                     || !legitimate_constant_p (disp))
                 && (GET_CODE (disp) != SYMBOL_REF
                     || !legitimate_constant_p (disp)))
        {
            reason = "displacement is not constant";
            goto report_error;
        }
    }
    return TRUE;

report_error:
    return FALSE;
}
Example #20
0
void
eliminate_regs_in_insn (rtx_insn *insn, bool replace_p, bool first_p,
			HOST_WIDE_INT update_sp_offset)
{
  int icode = recog_memoized (insn);
  rtx old_set = single_set (insn);
  bool validate_p;
  int i;
  rtx substed_operand[MAX_RECOG_OPERANDS];
  rtx orig_operand[MAX_RECOG_OPERANDS];
  struct lra_elim_table *ep;
  rtx plus_src, plus_cst_src;
  lra_insn_recog_data_t id;
  struct lra_static_insn_data *static_id;

  if (icode < 0 && asm_noperands (PATTERN (insn)) < 0 && ! DEBUG_INSN_P (insn))
    {
      lra_assert (GET_CODE (PATTERN (insn)) == USE
		  || GET_CODE (PATTERN (insn)) == CLOBBER
		  || GET_CODE (PATTERN (insn)) == ASM_INPUT);
      return;
    }

  /* Check for setting an eliminable register.	*/
  if (old_set != 0 && REG_P (SET_DEST (old_set))
      && (ep = get_elimination (SET_DEST (old_set))) != NULL)
    {
      for (ep = reg_eliminate; ep < &reg_eliminate[NUM_ELIMINABLE_REGS]; ep++)
	if (ep->from_rtx == SET_DEST (old_set) && ep->can_eliminate)
	  {
	    bool delete_p = replace_p;
	    
#ifdef HARD_FRAME_POINTER_REGNUM
	    if (ep->from == FRAME_POINTER_REGNUM
		&& ep->to == HARD_FRAME_POINTER_REGNUM)
	      /* If this is setting the frame pointer register to the
		 hardware frame pointer register and this is an
		 elimination that will be done (tested above), this
		 insn is really adjusting the frame pointer downward
		 to compensate for the adjustment done before a
		 nonlocal goto.  */
	      {
		rtx src = SET_SRC (old_set);
		rtx off = remove_reg_equal_offset_note (insn, ep->to_rtx);
		
		/* We should never process such insn with non-zero
		   UPDATE_SP_OFFSET.  */
		lra_assert (update_sp_offset == 0);
		
		if (off != NULL_RTX
		    || src == ep->to_rtx
		    || (GET_CODE (src) == PLUS
			&& XEXP (src, 0) == ep->to_rtx
			&& CONST_INT_P (XEXP (src, 1))))
		  {
		    HOST_WIDE_INT offset;
		    
		    if (replace_p)
		      {
			SET_DEST (old_set) = ep->to_rtx;
			lra_update_insn_recog_data (insn);
			return;
		      }
		    offset = (off != NULL_RTX ? INTVAL (off)
			      : src == ep->to_rtx ? 0 : INTVAL (XEXP (src, 1)));
		    offset -= (ep->offset - ep->previous_offset);
		    src = plus_constant (Pmode, ep->to_rtx, offset);
		    
		    /* First see if this insn remains valid when we
		       make the change.  If not, keep the INSN_CODE
		       the same and let the constraint pass fit it
		       up.  */
		    validate_change (insn, &SET_SRC (old_set), src, 1);
		    validate_change (insn, &SET_DEST (old_set),
				     ep->from_rtx, 1);
		    if (! apply_change_group ())
		      {
			SET_SRC (old_set) = src;
			SET_DEST (old_set) = ep->from_rtx;
		      }
		    lra_update_insn_recog_data (insn);
		    /* Add offset note for future updates.  */
		    add_reg_note (insn, REG_EQUAL, src);
		    return;
		  }
	      }
#endif
	    
	    /* This insn isn't serving a useful purpose.  We delete it
	       when REPLACE is set.  */
	    if (delete_p)
	      lra_delete_dead_insn (insn);
	    return;
	  }
    }

  /* We allow one special case which happens to work on all machines we
     currently support: a single set with the source or a REG_EQUAL
     note being a PLUS of an eliminable register and a constant.  */
  plus_src = plus_cst_src = 0;
  if (old_set && REG_P (SET_DEST (old_set)))
    {
      if (GET_CODE (SET_SRC (old_set)) == PLUS)
	plus_src = SET_SRC (old_set);
      /* First see if the source is of the form (plus (...) CST).  */
      if (plus_src
	  && CONST_INT_P (XEXP (plus_src, 1)))
	plus_cst_src = plus_src;
      /* Check that the first operand of the PLUS is a hard reg or
	 the lowpart subreg of one.  */
      if (plus_cst_src)
	{
	  rtx reg = XEXP (plus_cst_src, 0);

	  if (GET_CODE (reg) == SUBREG && subreg_lowpart_p (reg))
	    reg = SUBREG_REG (reg);

	  if (!REG_P (reg) || REGNO (reg) >= FIRST_PSEUDO_REGISTER)
	    plus_cst_src = 0;
	}
    }
  if (plus_cst_src)
    {
      rtx reg = XEXP (plus_cst_src, 0);
      HOST_WIDE_INT offset = INTVAL (XEXP (plus_cst_src, 1));

      if (GET_CODE (reg) == SUBREG)
	reg = SUBREG_REG (reg);

      if (REG_P (reg) && (ep = get_elimination (reg)) != NULL)
	{
	  rtx to_rtx = replace_p ? ep->to_rtx : ep->from_rtx;

	  if (! replace_p)
	    {
	      if (update_sp_offset == 0)
		offset += (ep->offset - ep->previous_offset);
	      if (ep->to_rtx == stack_pointer_rtx)
		{
		  if (first_p)
		    offset -= lra_get_insn_recog_data (insn)->sp_offset;
		  else
		    offset += update_sp_offset;
		}
	      offset = trunc_int_for_mode (offset, GET_MODE (plus_cst_src));
	    }

	  if (GET_CODE (XEXP (plus_cst_src, 0)) == SUBREG)
	    to_rtx = gen_lowpart (GET_MODE (XEXP (plus_cst_src, 0)), to_rtx);
	  /* If we have a nonzero offset, and the source is already a
	     simple REG, the following transformation would increase
	     the cost of the insn by replacing a simple REG with (plus
	     (reg sp) CST).  So try only when we already had a PLUS
	     before.  */
	  if (offset == 0 || plus_src)
	    {
	      rtx new_src = plus_constant (GET_MODE (to_rtx), to_rtx, offset);

	      old_set = single_set (insn);

	      /* First see if this insn remains valid when we make the
		 change.  If not, try to replace the whole pattern
		 with a simple set (this may help if the original insn
		 was a PARALLEL that was only recognized as single_set
		 due to REG_UNUSED notes).  If this isn't valid
		 either, keep the INSN_CODE the same and let the
		 constraint pass fix it up.  */
	      if (! validate_change (insn, &SET_SRC (old_set), new_src, 0))
		{
		  rtx new_pat = gen_rtx_SET (SET_DEST (old_set), new_src);

		  if (! validate_change (insn, &PATTERN (insn), new_pat, 0))
		    SET_SRC (old_set) = new_src;
		}
	      lra_update_insn_recog_data (insn);
	      /* This can't have an effect on elimination offsets, so skip
		 right to the end.  */
	      return;
	    }
	}
    }

  /* Eliminate all eliminable registers occurring in operands that
     can be handled by the constraint pass.  */
  id = lra_get_insn_recog_data (insn);
  static_id = id->insn_static_data;
  validate_p = false;
  for (i = 0; i < static_id->n_operands; i++)
    {
      orig_operand[i] = *id->operand_loc[i];
      substed_operand[i] = *id->operand_loc[i];

      /* For an asm statement, every operand is eliminable.  */
      if (icode < 0 || insn_data[icode].operand[i].eliminable)
	{
	  /* Check for setting a hard register that we know about.  */
	  if (static_id->operand[i].type != OP_IN
	      && REG_P (orig_operand[i]))
	    {
	      /* If we are assigning to a hard register that can be
		 eliminated, it must be as part of a PARALLEL, since
		 the code above handles single SETs.  This reg can not
		 be longer eliminated -- it is forced by
		 mark_not_eliminable.  */
	      for (ep = reg_eliminate;
		   ep < &reg_eliminate[NUM_ELIMINABLE_REGS];
		   ep++)
		lra_assert (ep->from_rtx != orig_operand[i]
			    || ! ep->can_eliminate);
	    }

	  /* Companion to the above plus substitution, we can allow
	     invariants as the source of a plain move.	*/
	  substed_operand[i]
	    = lra_eliminate_regs_1 (insn, *id->operand_loc[i], VOIDmode,
				    replace_p, ! replace_p && ! first_p,
				    update_sp_offset, first_p);
	  if (substed_operand[i] != orig_operand[i])
	    validate_p = true;
	}
    }

  if (! validate_p)
    return;

  /* Substitute the operands; the new values are in the substed_operand
     array.  */
  for (i = 0; i < static_id->n_operands; i++)
    *id->operand_loc[i] = substed_operand[i];
  for (i = 0; i < static_id->n_dups; i++)
    *id->dup_loc[i] = substed_operand[(int) static_id->dup_num[i]];

  /* If we had a move insn but now we don't, re-recognize it.
     This will cause spurious re-recognition if the old move had a
     PARALLEL since the new one still will, but we can't call
     single_set without having put new body into the insn and the
     re-recognition won't hurt in this rare case.  */
  id = lra_update_insn_recog_data (insn);
  static_id = id->insn_static_data;
}
Example #21
0
static bool
replace_oldest_value_addr (rtx *loc, enum reg_class cl,
			   enum machine_mode mode, addr_space_t as,
			   rtx insn, struct value_data *vd)
{
  rtx x = *loc;
  RTX_CODE code = GET_CODE (x);
  const char *fmt;
  int i, j;
  bool changed = false;

  switch (code)
    {
    case PLUS:
      if (DEBUG_INSN_P (insn))
	break;

      {
	rtx orig_op0 = XEXP (x, 0);
	rtx orig_op1 = XEXP (x, 1);
	RTX_CODE code0 = GET_CODE (orig_op0);
	RTX_CODE code1 = GET_CODE (orig_op1);
	rtx op0 = orig_op0;
	rtx op1 = orig_op1;
	rtx *locI = NULL;
	rtx *locB = NULL;
	enum rtx_code index_code = SCRATCH;

	if (GET_CODE (op0) == SUBREG)
	  {
	    op0 = SUBREG_REG (op0);
	    code0 = GET_CODE (op0);
	  }

	if (GET_CODE (op1) == SUBREG)
	  {
	    op1 = SUBREG_REG (op1);
	    code1 = GET_CODE (op1);
	  }

	if (code0 == MULT || code0 == SIGN_EXTEND || code0 == TRUNCATE
	    || code0 == ZERO_EXTEND || code1 == MEM)
	  {
	    locI = &XEXP (x, 0);
	    locB = &XEXP (x, 1);
	    index_code = GET_CODE (*locI);
	  }
	else if (code1 == MULT || code1 == SIGN_EXTEND || code1 == TRUNCATE
		 || code1 == ZERO_EXTEND || code0 == MEM)
	  {
	    locI = &XEXP (x, 1);
	    locB = &XEXP (x, 0);
	    index_code = GET_CODE (*locI);
	  }
	else if (code0 == CONST_INT || code0 == CONST
		 || code0 == SYMBOL_REF || code0 == LABEL_REF)
	  {
	    locB = &XEXP (x, 1);
	    index_code = GET_CODE (XEXP (x, 0));
	  }
	else if (code1 == CONST_INT || code1 == CONST
		 || code1 == SYMBOL_REF || code1 == LABEL_REF)
	  {
	    locB = &XEXP (x, 0);
	    index_code = GET_CODE (XEXP (x, 1));
	  }
	else if (code0 == REG && code1 == REG)
	  {
	    int index_op;
	    unsigned regno0 = REGNO (op0), regno1 = REGNO (op1);

	    if (REGNO_OK_FOR_INDEX_P (regno1)
		&& regno_ok_for_base_p (regno0, mode, as, PLUS, REG))
	      index_op = 1;
	    else if (REGNO_OK_FOR_INDEX_P (regno0)
		     && regno_ok_for_base_p (regno1, mode, as, PLUS, REG))
	      index_op = 0;
	    else if (regno_ok_for_base_p (regno0, mode, as, PLUS, REG)
		     || REGNO_OK_FOR_INDEX_P (regno1))
	      index_op = 1;
	    else if (regno_ok_for_base_p (regno1, mode, as, PLUS, REG))
	      index_op = 0;
	    else
	      index_op = 1;

	    locI = &XEXP (x, index_op);
	    locB = &XEXP (x, !index_op);
	    index_code = GET_CODE (*locI);
	  }
	else if (code0 == REG)
	  {
	    locI = &XEXP (x, 0);
	    locB = &XEXP (x, 1);
	    index_code = GET_CODE (*locI);
	  }
	else if (code1 == REG)
	  {
	    locI = &XEXP (x, 1);
	    locB = &XEXP (x, 0);
	    index_code = GET_CODE (*locI);
	  }

	if (locI)
	  changed |= replace_oldest_value_addr (locI, INDEX_REG_CLASS,
						mode, as, insn, vd);
	if (locB)
	  changed |= replace_oldest_value_addr (locB,
						base_reg_class (mode, as, PLUS,
								index_code),
						mode, as, insn, vd);
	return changed;
      }

    case POST_INC:
    case POST_DEC:
    case POST_MODIFY:
    case PRE_INC:
    case PRE_DEC:
    case PRE_MODIFY:
      return false;

    case MEM:
      return replace_oldest_value_mem (x, insn, vd);

    case REG:
      return replace_oldest_value_reg (loc, cl, insn, vd);

    default:
      break;
    }

  fmt = GET_RTX_FORMAT (code);
  for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
    {
      if (fmt[i] == 'e')
	changed |= replace_oldest_value_addr (&XEXP (x, i), cl, mode, as,
					      insn, vd);
      else if (fmt[i] == 'E')
	for (j = XVECLEN (x, i) - 1; j >= 0; j--)
	  changed |= replace_oldest_value_addr (&XVECEXP (x, i, j), cl,
						mode, as, insn, vd);
    }

  return changed;
}
Example #22
0
int
avm2_decompose_address (rtx addr, struct avm2_address *out)
{
    rtx base = NULL_RTX, index = NULL_RTX, disp = NULL_RTX;
    rtx base_reg, index_reg;
    HOST_WIDE_INT scale = 1;
    rtx scale_rtx = NULL_RTX;
    int retval = 1;
    enum avm2_address_seg seg = SEG_DEFAULT;

    if (GET_CODE (addr) == REG || GET_CODE (addr) == SUBREG)
        base = addr;
    else if (GET_CODE (addr) == PLUS)
    {
        rtx addends[4], op;
        int n = 0, i;

        op = addr;
        do
        {
            if (n >= 4)
                return 0;
            addends[n++] = XEXP (op, 1);
            op = XEXP (op, 0);
        }
        while (GET_CODE (op) == PLUS);
        if (n >= 4)
            return 0;
        addends[n] = op;

        for (i = n; i >= 0; --i)
        {
            op = addends[i];
            switch (GET_CODE (op))
            {
            case MULT:
                if (index)
                    return 0;
                index = XEXP (op, 0);
                scale_rtx = XEXP (op, 1);
                break;

            case UNSPEC:
                return 0;
                break;

            case REG:
            case SUBREG:
                if (!base)
                    base = op;
                else if (!index)
                    index = op;
                else
                    return 0;
                break;

            case CONST:
            case CONST_INT:
            case SYMBOL_REF:
            case LABEL_REF:
                if (disp)
                    return 0;
                disp = op;
                break;

            default:
                return 0;
            }
        }
    }
    else if (GET_CODE (addr) == MULT)
    {
        index = XEXP (addr, 0);		/* index*scale */
        scale_rtx = XEXP (addr, 1);
    }
    else if (GET_CODE (addr) == ASHIFT)
    {
        rtx tmp;

        /* We're called for lea too, which implements ashift on occasion.  */
        index = XEXP (addr, 0);
        tmp = XEXP (addr, 1);
        if (GET_CODE (tmp) != CONST_INT)
            return 0;
        scale = INTVAL (tmp);
        if ((unsigned HOST_WIDE_INT) scale > 3)
            return 0;
        scale = 1 << scale;
        retval = -1;
    }
    else
        disp = addr;			/* displacement */

    /* Extract the integral value of scale.  */
    if (scale_rtx)
    {
        if (GET_CODE (scale_rtx) != CONST_INT)
            return 0;
        scale = INTVAL (scale_rtx);
    }

    base_reg = base && GET_CODE (base) == SUBREG ? SUBREG_REG (base) : base;
    index_reg = index && GET_CODE (index) == SUBREG ? SUBREG_REG (index) : index;

    /* Allow arg pointer and stack pointer as index if there is not scaling.  */
    if (base_reg && index_reg && scale == 1
            && (index_reg == arg_pointer_rtx
                || index_reg == frame_pointer_rtx
                || (REG_P (index_reg) && REGNO (index_reg) == STACK_POINTER_REGNUM)))
    {
        rtx tmp;
        tmp = base, base = index, index = tmp;
        tmp = base_reg, base_reg = index_reg, index_reg = tmp;
    }

    /* Special case: %ebp cannot be encoded as a base without a displacement.  */
    if ((base_reg == hard_frame_pointer_rtx
            || base_reg == frame_pointer_rtx
            || base_reg == arg_pointer_rtx) && !disp)
        disp = const0_rtx;

    /* Special case: on K6, [%esi] makes the instruction vector decoded.
       Avoid this by transforming to [%esi+0].  */
    if (false)
        disp = const0_rtx;

    /* Special case: encode reg+reg instead of reg*2.  */
    if (!base && index && scale && scale == 2)
        base = index, base_reg = index_reg, scale = 1;

    /* Special case: scaling cannot be encoded without base or displacement.  */
    if (!base && !disp && index && scale != 1)
        disp = const0_rtx;

    out->base = base;
    out->index = index;
    out->disp = disp;
    out->scale = scale;
    out->seg = seg;

    return retval;
}
Example #23
0
static basic_block
create_pre_exit (int n_entities, int *entity_map, const int *num_modes)
{
  edge eg;
  edge_iterator ei;
  basic_block pre_exit;

  /* The only non-call predecessor at this stage is a block with a
     fallthrough edge; there can be at most one, but there could be
     none at all, e.g. when exit is called.  */
  pre_exit = 0;
  FOR_EACH_EDGE (eg, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
    if (eg->flags & EDGE_FALLTHRU)
      {
	basic_block src_bb = eg->src;
	rtx_insn *last_insn;
	rtx ret_reg;

	gcc_assert (!pre_exit);
	/* If this function returns a value at the end, we have to
	   insert the final mode switch before the return value copy
	   to its hard register.  */
	if (EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) == 1
	    && NONJUMP_INSN_P ((last_insn = BB_END (src_bb)))
	    && GET_CODE (PATTERN (last_insn)) == USE
	    && GET_CODE ((ret_reg = XEXP (PATTERN (last_insn), 0))) == REG)
	  {
	    int ret_start = REGNO (ret_reg);
	    int nregs = hard_regno_nregs[ret_start][GET_MODE (ret_reg)];
	    int ret_end = ret_start + nregs;
	    bool short_block = false;
	    bool multi_reg_return = false;
	    bool forced_late_switch = false;
	    rtx_insn *before_return_copy;

	    do
	      {
		rtx_insn *return_copy = PREV_INSN (last_insn);
		rtx return_copy_pat, copy_reg;
		int copy_start, copy_num;
		int j;

		if (NONDEBUG_INSN_P (return_copy))
		  {
		    /* When using SJLJ exceptions, the call to the
		       unregister function is inserted between the
		       clobber of the return value and the copy.
		       We do not want to split the block before this
		       or any other call; if we have not found the
		       copy yet, the copy must have been deleted.  */
		    if (CALL_P (return_copy))
		      {
			short_block = true;
			break;
		      }
		    return_copy_pat = PATTERN (return_copy);
		    switch (GET_CODE (return_copy_pat))
		      {
		      case USE:
			/* Skip USEs of multiple return registers.
			   __builtin_apply pattern is also handled here.  */
			if (GET_CODE (XEXP (return_copy_pat, 0)) == REG
			    && (targetm.calls.function_value_regno_p
				(REGNO (XEXP (return_copy_pat, 0)))))
			  {
			    multi_reg_return = true;
			    last_insn = return_copy;
			    continue;
			  }
			break;

		      case ASM_OPERANDS:
			/* Skip barrier insns.  */
			if (!MEM_VOLATILE_P (return_copy_pat))
			  break;

			/* Fall through.  */

		      case ASM_INPUT:
		      case UNSPEC_VOLATILE:
			last_insn = return_copy;
			continue;

		      default:
			break;
		      }

		    /* If the return register is not (in its entirety)
		       likely spilled, the return copy might be
		       partially or completely optimized away.  */
		    return_copy_pat = single_set (return_copy);
		    if (!return_copy_pat)
		      {
			return_copy_pat = PATTERN (return_copy);
			if (GET_CODE (return_copy_pat) != CLOBBER)
			  break;
			else if (!optimize)
			  {
			    /* This might be (clobber (reg [<result>]))
			       when not optimizing.  Then check if
			       the previous insn is the clobber for
			       the return register.  */
			    copy_reg = SET_DEST (return_copy_pat);
			    if (GET_CODE (copy_reg) == REG
				&& !HARD_REGISTER_NUM_P (REGNO (copy_reg)))
			      {
				if (INSN_P (PREV_INSN (return_copy)))
				  {
				    return_copy = PREV_INSN (return_copy);
				    return_copy_pat = PATTERN (return_copy);
				    if (GET_CODE (return_copy_pat) != CLOBBER)
				      break;
				  }
			      }
			  }
		      }
		    copy_reg = SET_DEST (return_copy_pat);
		    if (GET_CODE (copy_reg) == REG)
		      copy_start = REGNO (copy_reg);
		    else if (GET_CODE (copy_reg) == SUBREG
			     && GET_CODE (SUBREG_REG (copy_reg)) == REG)
		      copy_start = REGNO (SUBREG_REG (copy_reg));
		    else
		      {
			/* When control reaches end of non-void function,
			   there are no return copy insns at all.  This
			   avoids an ice on that invalid function.  */
			if (ret_start + nregs == ret_end)
			  short_block = true;
			break;
		      }
		    if (!targetm.calls.function_value_regno_p (copy_start))
		      copy_num = 0;
		    else
		      copy_num
			= hard_regno_nregs[copy_start][GET_MODE (copy_reg)];

		    /* If the return register is not likely spilled, - as is
		       the case for floating point on SH4 - then it might
		       be set by an arithmetic operation that needs a
		       different mode than the exit block.  */
		    for (j = n_entities - 1; j >= 0; j--)
		      {
			int e = entity_map[j];
			int mode =
			  targetm.mode_switching.needed (e, return_copy);

			if (mode != num_modes[e]
			    && mode != targetm.mode_switching.exit (e))
			  break;
		      }
		    if (j >= 0)
		      {
			/* __builtin_return emits a sequence of loads to all
			   return registers.  One of them might require
			   another mode than MODE_EXIT, even if it is
			   unrelated to the return value, so we want to put
			   the final mode switch after it.  */
			if (multi_reg_return
			    && targetm.calls.function_value_regno_p
			        (copy_start))
			  forced_late_switch = true;

			/* For the SH4, floating point loads depend on fpscr,
			   thus we might need to put the final mode switch
			   after the return value copy.  That is still OK,
			   because a floating point return value does not
			   conflict with address reloads.  */
			if (copy_start >= ret_start
			    && copy_start + copy_num <= ret_end
			    && OBJECT_P (SET_SRC (return_copy_pat)))
			  forced_late_switch = true;
			break;
		      }
		    if (copy_num == 0)
		      {
			last_insn = return_copy;
			continue;
		      }

		    if (copy_start >= ret_start
			&& copy_start + copy_num <= ret_end)
		      nregs -= copy_num;
		    else if (!multi_reg_return
			     || !targetm.calls.function_value_regno_p
				 (copy_start))
		      break;
		    last_insn = return_copy;
		  }
		/* ??? Exception handling can lead to the return value
		   copy being already separated from the return value use,
		   as in  unwind-dw2.c .
		   Similarly, conditionally returning without a value,
		   and conditionally using builtin_return can lead to an
		   isolated use.  */
		if (return_copy == BB_HEAD (src_bb))
		  {
		    short_block = true;
		    break;
		  }
		last_insn = return_copy;
	      }
	    while (nregs);

	    /* If we didn't see a full return value copy, verify that there
	       is a plausible reason for this.  If some, but not all of the
	       return register is likely spilled, we can expect that there
	       is a copy for the likely spilled part.  */
	    gcc_assert (!nregs
			|| forced_late_switch
			|| short_block
			|| !(targetm.class_likely_spilled_p
			     (REGNO_REG_CLASS (ret_start)))
			|| (nregs
			    != hard_regno_nregs[ret_start][GET_MODE (ret_reg)])
			/* For multi-hard-register floating point
		   	   values, sometimes the likely-spilled part
		   	   is ordinarily copied first, then the other
		   	   part is set with an arithmetic operation.
		   	   This doesn't actually cause reload
		   	   failures, so let it pass.  */
			|| (GET_MODE_CLASS (GET_MODE (ret_reg)) != MODE_INT
			    && nregs != 1));

	    if (!NOTE_INSN_BASIC_BLOCK_P (last_insn))
	      {
		before_return_copy
		  = emit_note_before (NOTE_INSN_DELETED, last_insn);
		/* Instructions preceding LAST_INSN in the same block might
		   require a different mode than MODE_EXIT, so if we might
		   have such instructions, keep them in a separate block
		   from pre_exit.  */
		src_bb = split_block (src_bb,
				      PREV_INSN (before_return_copy))->dest;
	      }
	    else
	      before_return_copy = last_insn;
	    pre_exit = split_block (src_bb, before_return_copy)->src;
	  }
	else
	  {
	    pre_exit = split_edge (eg);
	  }
      }

  return pre_exit;
}
Example #24
0
/* Check if *XP is equivalent to Y.  Until an an unreconcilable difference is
   found, use in-group changes with validate_change on *XP to make register
   assignments agree.  It is the (not necessarily direct) callers
   responsibility to verify / confirm / cancel these changes, as appropriate.
   RVALUE indicates if the processed piece of rtl is used as a destination, in
   which case we can't have different registers being an input.  Returns
   nonzero if the two blocks have been identified as equivalent, zero otherwise.
   RVALUE == 0: destination
   RVALUE == 1: source
   RVALUE == -1: source, ignore SET_DEST of SET / clobber.  */
bool
rtx_equiv_p (rtx *xp, rtx y, int rvalue, struct equiv_info *info)
{
  rtx x = *xp;
  enum rtx_code code;
  int length;
  const char *format;
  int i;

  if (!y || !x)
    return x == y;
  code = GET_CODE (y);
  if (code != REG && x == y)
    return true;
  if (GET_CODE (x) != code
      || GET_MODE (x) != GET_MODE (y))
    return false;

  /* ??? could extend to allow CONST_INT inputs.  */
  switch (code)
    {
    case REG:
      {
	unsigned x_regno = REGNO (x);
	unsigned y_regno = REGNO (y);
	int x_common_live, y_common_live;

	if (reload_completed
	    && (x_regno >= FIRST_PSEUDO_REGISTER
		|| y_regno >= FIRST_PSEUDO_REGISTER))
	  {
	    /* We should only see this in REG_NOTEs.  */
	    gcc_assert (!info->live_update);
	    /* Returning false will cause us to remove the notes.  */
	    return false;
	  }
#ifdef STACK_REGS
	/* After reg-stack, can only accept literal matches of stack regs.  */
	if (info->mode & CLEANUP_POST_REGSTACK
	    && (IN_RANGE (x_regno, FIRST_STACK_REG, LAST_STACK_REG)
		|| IN_RANGE (y_regno, FIRST_STACK_REG, LAST_STACK_REG)))
	  return x_regno == y_regno;
#endif

	/* If the register is a locally live one in one block, the
	   corresponding one must be locally live in the other, too, and
	   match of identical regnos doesn't apply.  */
	if (REGNO_REG_SET_P (info->x_local_live, x_regno))
	  {
	    if (!REGNO_REG_SET_P (info->y_local_live, y_regno))
	      return false;
	  }
	else if (REGNO_REG_SET_P (info->y_local_live, y_regno))
	  return false;
	else if (x_regno == y_regno)
	  {
	    if (!rvalue && info->cur.input_valid
		&& (reg_overlap_mentioned_p (x, info->x_input)
		    || reg_overlap_mentioned_p (x, info->y_input)))
	      return false;

	    /* Update liveness information.  */
	    if (info->live_update
		&& assign_reg_reg_set (info->common_live, x, rvalue))
	      info->cur.version++;

	    return true;
	  }

	x_common_live = REGNO_REG_SET_P (info->common_live, x_regno);
	y_common_live = REGNO_REG_SET_P (info->common_live, y_regno);
	if (x_common_live != y_common_live)
	  return false;
	else if (x_common_live)
	  {
	    if (! rvalue || info->input_cost < 0 || no_new_pseudos)
	      return false;
	    /* If info->live_update is not set, we are processing notes.
	       We then allow a match with x_input / y_input found in a
	       previous pass.  */
	    if (info->live_update && !info->cur.input_valid)
	      {
		info->cur.input_valid = true;
		info->x_input = x;
		info->y_input = y;
		info->cur.input_count += optimize_size ? 2 : 1;
		if (info->input_reg
		    && GET_MODE (info->input_reg) != GET_MODE (info->x_input))
		  info->input_reg = NULL_RTX;
		if (!info->input_reg)
		  info->input_reg = gen_reg_rtx (GET_MODE (info->x_input));
	      }
	    else if ((info->live_update
		      ? ! info->cur.input_valid : ! info->x_input)
		     || ! rtx_equal_p (x, info->x_input)
		     || ! rtx_equal_p (y, info->y_input))
	      return false;
	    validate_change (info->cur.x_start, xp, info->input_reg, 1);
	  }
	else
	  {
	    int x_nregs = (x_regno >= FIRST_PSEUDO_REGISTER
			   ? 1 : hard_regno_nregs[x_regno][GET_MODE (x)]);
	    int y_nregs = (y_regno >= FIRST_PSEUDO_REGISTER
			   ? 1 : hard_regno_nregs[y_regno][GET_MODE (y)]);
	    int size = GET_MODE_SIZE (GET_MODE (x));
	    enum machine_mode x_mode = GET_MODE (x);
	    unsigned x_regno_i, y_regno_i;
	    int x_nregs_i, y_nregs_i, size_i;
	    int local_count = info->cur.local_count;

	    /* This might be a register local to each block.  See if we have
	       it already registered.  */
	    for (i = local_count - 1; i >= 0; i--)
	      {
		x_regno_i = REGNO (info->x_local[i]);
		x_nregs_i = (x_regno_i >= FIRST_PSEUDO_REGISTER
			     ? 1 : hard_regno_nregs[x_regno_i][GET_MODE (x)]);
		y_regno_i = REGNO (info->y_local[i]);
		y_nregs_i = (y_regno_i >= FIRST_PSEUDO_REGISTER
			     ? 1 : hard_regno_nregs[y_regno_i][GET_MODE (y)]);
		size_i = GET_MODE_SIZE (GET_MODE (info->x_local[i]));

		/* If we have a new pair of registers that is wider than an
		   old pair and enclosing it with matching offsets,
		   remove the old pair.  If we find a matching, wider, old
		   pair, use the old one.  If the width is the same, use the
		   old one if the modes match, but the new if they don't.
		   We don't want to get too fancy with subreg_regno_offset
		   here, so we just test two straightforward cases each.  */
		if (info->live_update
		    && (x_mode != GET_MODE (info->x_local[i])
			? size >= size_i : size > size_i))
		  {
		    /* If the new pair is fully enclosing a matching
		       existing pair, remove the old one.  N.B. because
		       we are removing one entry here, the check below
		       if we have space for a new entry will succeed.  */
		    if ((x_regno <= x_regno_i
			 && x_regno + x_nregs >= x_regno_i + x_nregs_i
			 && x_nregs == y_nregs && x_nregs_i == y_nregs_i
			 && x_regno - x_regno_i == y_regno - y_regno_i)
			|| (x_regno == x_regno_i && y_regno == y_regno_i
			    && x_nregs >= x_nregs_i && y_nregs >= y_nregs_i))
		      {
			info->cur.local_count = --local_count;
			info->x_local[i] = info->x_local[local_count];
			info->y_local[i] = info->y_local[local_count];
			continue;
		      }
		  }
		else
		  {

		    /* If the new pair is fully enclosed within a matching
		       existing pair, succeed.  */
		    if (x_regno >= x_regno_i
			&& x_regno + x_nregs <= x_regno_i + x_nregs_i
			&& x_nregs == y_nregs && x_nregs_i == y_nregs_i
			&& x_regno - x_regno_i == y_regno - y_regno_i)
		      break;
		    if (x_regno == x_regno_i && y_regno == y_regno_i
			&& x_nregs <= x_nregs_i && y_nregs <= y_nregs_i)
		      break;
		}

		/* Any other overlap causes a match failure.  */
		if (x_regno + x_nregs > x_regno_i
		    && x_regno_i + x_nregs_i > x_regno)
		  return false;
		if (y_regno + y_nregs > y_regno_i
		    && y_regno_i + y_nregs_i > y_regno)
		  return false;
	      }
	    if (i < 0)
	      {
		/* Not found.  Create a new entry if possible.  */
		if (!info->live_update
		    || info->cur.local_count >= STRUCT_EQUIV_MAX_LOCAL)
		  return false;
		info->x_local[info->cur.local_count] = x;
		info->y_local[info->cur.local_count] = y;
		info->cur.local_count++;
		info->cur.version++;
	      }
	    note_local_live (info, x, y, rvalue);
	  }
	return true;
      }
    case SET:
      gcc_assert (rvalue < 0);
      /* Ignore the destinations role as a destination.  Still, we have
	 to consider input registers embedded in the addresses of a MEM.
	 N.B., we process the rvalue aspect of STRICT_LOW_PART /
	 ZERO_EXTEND / SIGN_EXTEND along with their lvalue aspect.  */
      if(!set_dest_addr_equiv_p (SET_DEST (x), SET_DEST (y), info))
	return false;
      /* Process source.  */
      return rtx_equiv_p (&SET_SRC (x), SET_SRC (y), 1, info);
    case PRE_MODIFY:
      /* Process destination.  */
      if (!rtx_equiv_p (&XEXP (x, 0), XEXP (y, 0), 0, info))
	return false;
      /* Process source.  */
      return rtx_equiv_p (&XEXP (x, 1), XEXP (y, 1), 1, info);
    case POST_MODIFY:
      {
	rtx x_dest0, x_dest1;

	/* Process destination.  */
	x_dest0 = XEXP (x, 0);
	gcc_assert (REG_P (x_dest0));
	if (!rtx_equiv_p (&XEXP (x, 0), XEXP (y, 0), 0, info))
	  return false;
	x_dest1 = XEXP (x, 0);
	/* validate_change might have changed the destination.  Put it back
	   so that we can do a proper match for its role a an input.  */
	XEXP (x, 0) = x_dest0;
	if (!rtx_equiv_p (&XEXP (x, 0), XEXP (y, 0), 1, info))
	  return false;
	gcc_assert (x_dest1 == XEXP (x, 0));
	/* Process source.  */
	return rtx_equiv_p (&XEXP (x, 1), XEXP (y, 1), 1, info);
      }
    case CLOBBER:
      gcc_assert (rvalue < 0);
      return true;
    /* Some special forms are also rvalues when they appear in lvalue
       positions.  However, we must ont try to match a register after we
       have already altered it with validate_change, consider the rvalue
       aspect while we process the lvalue.  */
    case STRICT_LOW_PART:
    case ZERO_EXTEND:
    case SIGN_EXTEND:
      {
	rtx x_inner, y_inner;
	enum rtx_code code;
	int change;

	if (rvalue)
	  break;
	x_inner = XEXP (x, 0);
	y_inner = XEXP (y, 0);
	if (GET_MODE (x_inner) != GET_MODE (y_inner))
	  return false;
	code = GET_CODE (x_inner);
	if (code != GET_CODE (y_inner))
	  return false;
	/* The address of a MEM is an input that will be processed during
	   rvalue == -1 processing.  */
	if (code == SUBREG)
	  {
	    if (SUBREG_BYTE (x_inner) != SUBREG_BYTE (y_inner))
	      return false;
	    x = x_inner;
	    x_inner = SUBREG_REG (x_inner);
	    y_inner = SUBREG_REG (y_inner);
	    if (GET_MODE (x_inner) != GET_MODE (y_inner))
	      return false;
	    code = GET_CODE (x_inner);
	    if (code != GET_CODE (y_inner))
	      return false;
	  }
	if (code == MEM)
	  return true;
	gcc_assert (code == REG);
	if (! rtx_equiv_p (&XEXP (x, 0), y_inner, rvalue, info))
	  return false;
	if (REGNO (x_inner) == REGNO (y_inner))
	  {
	    change = assign_reg_reg_set (info->common_live, x_inner, 1);
	    info->cur.version++;
	  }
	else
	  change = note_local_live (info, x_inner, y_inner, 1);
	gcc_assert (change);
	return true;
      }
    /* The AUTO_INC / POST_MODIFY / PRE_MODIFY sets are modelled to take
       place during input processing, however, that is benign, since they
       are paired with reads.  */
    case MEM:
      return !rvalue || rtx_equiv_p (&XEXP (x, 0), XEXP (y, 0), rvalue, info);
    case POST_INC: case POST_DEC: case PRE_INC: case PRE_DEC:
      return (rtx_equiv_p (&XEXP (x, 0), XEXP (y, 0), 0, info)
	      && rtx_equiv_p (&XEXP (x, 0), XEXP (y, 0), 1, info));
    case PARALLEL:
      /* If this is a top-level PATTERN PARALLEL, we expect the caller to 
	 have handled the SET_DESTs.  A complex or vector PARALLEL can be
	 identified by having a mode.  */
      gcc_assert (rvalue < 0 || GET_MODE (x) != VOIDmode);
      break;
    case LABEL_REF:
      /* Check special tablejump match case.  */
      if (XEXP (y, 0) == info->y_label)
	return (XEXP (x, 0) == info->x_label);
      /* We can't assume nonlocal labels have their following insns yet.  */
      if (LABEL_REF_NONLOCAL_P (x) || LABEL_REF_NONLOCAL_P (y))
	return XEXP (x, 0) == XEXP (y, 0);

      /* Two label-refs are equivalent if they point at labels
	 in the same position in the instruction stream.  */
      return (next_real_insn (XEXP (x, 0))
	      == next_real_insn (XEXP (y, 0)));
    case SYMBOL_REF:
      return XSTR (x, 0) == XSTR (y, 0);
    /* Some rtl is guaranteed to be shared, or unique;  If we didn't match
       EQ equality above, they aren't the same.  */
    case CONST_INT:
    case CODE_LABEL:
      return false;
    default:
      break;
    }

  /* For commutative operations, the RTX match if the operands match in any
     order.  */
  if (targetm.commutative_p (x, UNKNOWN))
    return ((rtx_equiv_p (&XEXP (x, 0), XEXP (y, 0), rvalue, info)
	     && rtx_equiv_p (&XEXP (x, 1), XEXP (y, 1), rvalue, info))
	    || (rtx_equiv_p (&XEXP (x, 0), XEXP (y, 1), rvalue, info)
		&& rtx_equiv_p (&XEXP (x, 1), XEXP (y, 0), rvalue, info)));

  /* Process subexpressions - this is similar to rtx_equal_p.  */
  length = GET_RTX_LENGTH (code);
  format = GET_RTX_FORMAT (code);

  for (i = 0; i < length; ++i)
    {
      switch (format[i])
	{
	case 'w':
	  if (XWINT (x, i) != XWINT (y, i))
	    return false;
	  break;
	case 'n':
	case 'i':
	  if (XINT (x, i) != XINT (y, i))
	    return false;
	  break;
	case 'V':
	case 'E':
	  if (XVECLEN (x, i) != XVECLEN (y, i))
	    return false;
	  if (XVEC (x, i) != 0)
	    {
	      int j;
	      for (j = 0; j < XVECLEN (x, i); ++j)
		{
		  if (! rtx_equiv_p (&XVECEXP (x, i, j), XVECEXP (y, i, j),
				     rvalue, info))
		    return false;
		}
	    }
	  break;
	case 'e':
	  if (! rtx_equiv_p (&XEXP (x, i), XEXP (y, i), rvalue, info))
	    return false;
	  break;
	case 'S':
	case 's':
	  if ((XSTR (x, i) || XSTR (y, i))
	      && (! XSTR (x, i) || ! XSTR (y, i)
		  || strcmp (XSTR (x, i), XSTR (y, i))))
	    return false;
	  break;
	case 'u':
	  /* These are just backpointers, so they don't matter.  */
	  break;
	case '0':
	case 't':
	  break;
	  /* It is believed that rtx's at this level will never
	     contain anything but integers and other rtx's,
	     except for within LABEL_REFs and SYMBOL_REFs.  */
	default:
	  gcc_unreachable ();
	}
    }
  return true;
}