示例#1
0
static inline void
replace_reciprocal (use_operand_p use_p)
{
  gimple use_stmt = USE_STMT (use_p);
  basic_block bb = gimple_bb (use_stmt);
  struct occurrence *occ = (struct occurrence *) bb->aux;

  if (optimize_bb_for_speed_p (bb)
      && occ->recip_def && use_stmt != occ->recip_def_stmt)
    {
      gimple_assign_set_rhs_code (use_stmt, MULT_EXPR);
      SET_USE (use_p, occ->recip_def);
      fold_stmt_inplace (use_stmt);
      update_stmt (use_stmt);
    }
}
示例#2
0
static void
emit_case_bit_tests (gswitch *swtch, tree index_expr,
		     tree minval, tree range, tree maxval)
{
  struct case_bit_test test[MAX_CASE_BIT_TESTS];
  unsigned int i, j, k;
  unsigned int count;

  basic_block switch_bb = gimple_bb (swtch);
  basic_block default_bb, new_default_bb, new_bb;
  edge default_edge;
  bool update_dom = dom_info_available_p (CDI_DOMINATORS);

  vec<basic_block> bbs_to_fix_dom = vNULL;

  tree index_type = TREE_TYPE (index_expr);
  tree unsigned_index_type = unsigned_type_for (index_type);
  unsigned int branch_num = gimple_switch_num_labels (swtch);

  gimple_stmt_iterator gsi;
  gassign *shift_stmt;

  tree idx, tmp, csui;
  tree word_type_node = lang_hooks.types.type_for_mode (word_mode, 1);
  tree word_mode_zero = fold_convert (word_type_node, integer_zero_node);
  tree word_mode_one = fold_convert (word_type_node, integer_one_node);
  int prec = TYPE_PRECISION (word_type_node);
  wide_int wone = wi::one (prec);

  memset (&test, 0, sizeof (test));

  /* Get the edge for the default case.  */
  tmp = gimple_switch_default_label (swtch);
  default_bb = label_to_block (CASE_LABEL (tmp));
  default_edge = find_edge (switch_bb, default_bb);

  /* Go through all case labels, and collect the case labels, profile
     counts, and other information we need to build the branch tests.  */
  count = 0;
  for (i = 1; i < branch_num; i++)
    {
      unsigned int lo, hi;
      tree cs = gimple_switch_label (swtch, i);
      tree label = CASE_LABEL (cs);
      edge e = find_edge (switch_bb, label_to_block (label));
      for (k = 0; k < count; k++)
	if (e == test[k].target_edge)
	  break;

      if (k == count)
	{
	  gcc_checking_assert (count < MAX_CASE_BIT_TESTS);
	  test[k].mask = wi::zero (prec);
	  test[k].target_edge = e;
	  test[k].label = label;
	  test[k].bits = 1;
	  count++;
	}
      else
        test[k].bits++;

      lo = tree_to_uhwi (int_const_binop (MINUS_EXPR,
					  CASE_LOW (cs), minval));
      if (CASE_HIGH (cs) == NULL_TREE)
	hi = lo;
      else
	hi = tree_to_uhwi (int_const_binop (MINUS_EXPR,
					    CASE_HIGH (cs), minval));

      for (j = lo; j <= hi; j++)
	test[k].mask |= wi::lshift (wone, j);
    }

  qsort (test, count, sizeof (*test), case_bit_test_cmp);

  /* If all values are in the 0 .. BITS_PER_WORD-1 range, we can get rid of
     the minval subtractions, but it might make the mask constants more
     expensive.  So, compare the costs.  */
  if (compare_tree_int (minval, 0) > 0
      && compare_tree_int (maxval, GET_MODE_BITSIZE (word_mode)) < 0)
    {
      int cost_diff;
      HOST_WIDE_INT m = tree_to_uhwi (minval);
      rtx reg = gen_raw_REG (word_mode, 10000);
      bool speed_p = optimize_bb_for_speed_p (gimple_bb (swtch));
      cost_diff = set_rtx_cost (gen_rtx_PLUS (word_mode, reg,
					      GEN_INT (-m)), speed_p);
      for (i = 0; i < count; i++)
	{
	  rtx r = immed_wide_int_const (test[i].mask, word_mode);
	  cost_diff += set_src_cost (gen_rtx_AND (word_mode, reg, r),
				     word_mode, speed_p);
	  r = immed_wide_int_const (wi::lshift (test[i].mask, m), word_mode);
	  cost_diff -= set_src_cost (gen_rtx_AND (word_mode, reg, r),
				     word_mode, speed_p);
	}
      if (cost_diff > 0)
	{
	  for (i = 0; i < count; i++)
	    test[i].mask = wi::lshift (test[i].mask, m);
	  minval = build_zero_cst (TREE_TYPE (minval));
	  range = maxval;
	}
    }

  /* We generate two jumps to the default case label.
     Split the default edge, so that we don't have to do any PHI node
     updating.  */
  new_default_bb = split_edge (default_edge);

  if (update_dom)
    {
      bbs_to_fix_dom.create (10);
      bbs_to_fix_dom.quick_push (switch_bb);
      bbs_to_fix_dom.quick_push (default_bb);
      bbs_to_fix_dom.quick_push (new_default_bb);
    }

  /* Now build the test-and-branch code.  */

  gsi = gsi_last_bb (switch_bb);

  /* idx = (unsigned)x - minval.  */
  idx = fold_convert (unsigned_index_type, index_expr);
  idx = fold_build2 (MINUS_EXPR, unsigned_index_type, idx,
		     fold_convert (unsigned_index_type, minval));
  idx = force_gimple_operand_gsi (&gsi, idx,
				  /*simple=*/true, NULL_TREE,
				  /*before=*/true, GSI_SAME_STMT);

  /* if (idx > range) goto default */
  range = force_gimple_operand_gsi (&gsi,
				    fold_convert (unsigned_index_type, range),
				    /*simple=*/true, NULL_TREE,
				    /*before=*/true, GSI_SAME_STMT);
  tmp = fold_build2 (GT_EXPR, boolean_type_node, idx, range);
  new_bb = hoist_edge_and_branch_if_true (&gsi, tmp, default_edge, update_dom);
  if (update_dom)
    bbs_to_fix_dom.quick_push (new_bb);
  gcc_assert (gimple_bb (swtch) == new_bb);
  gsi = gsi_last_bb (new_bb);

  /* Any blocks dominated by the GIMPLE_SWITCH, but that are not successors
     of NEW_BB, are still immediately dominated by SWITCH_BB.  Make it so.  */
  if (update_dom)
    {
      vec<basic_block> dom_bbs;
      basic_block dom_son;

      dom_bbs = get_dominated_by (CDI_DOMINATORS, new_bb);
      FOR_EACH_VEC_ELT (dom_bbs, i, dom_son)
	{
	  edge e = find_edge (new_bb, dom_son);
	  if (e && single_pred_p (e->dest))
	    continue;
	  set_immediate_dominator (CDI_DOMINATORS, dom_son, switch_bb);
	  bbs_to_fix_dom.safe_push (dom_son);
	}
      dom_bbs.release ();
    }
示例#3
0
static bool
attempt_change (rtx new_addr, rtx inc_reg)
{
  /* There are four cases: For the two cases that involve an add
     instruction, we are going to have to delete the add and insert a
     mov.  We are going to assume that the mov is free.  This is
     fairly early in the backend and there are a lot of opportunities
     for removing that move later.  In particular, there is the case
     where the move may be dead, this is what dead code elimination
     passes are for.  The two cases where we have an inc insn will be
     handled mov free.  */

  basic_block bb = BLOCK_FOR_INSN (mem_insn.insn);
  rtx mov_insn = NULL;
  int regno;
  rtx mem = *mem_insn.mem_loc;
  enum machine_mode mode = GET_MODE (mem);
  rtx new_mem;
  int old_cost = 0;
  int new_cost = 0;
  bool speed = optimize_bb_for_speed_p (bb);

  PUT_MODE (mem_tmp, mode);
  XEXP (mem_tmp, 0) = new_addr;

  old_cost = (set_src_cost (mem, speed)
	      + set_rtx_cost (PATTERN (inc_insn.insn), speed));
  new_cost = set_src_cost (mem_tmp, speed);

  /* The first item of business is to see if this is profitable.  */
  if (old_cost < new_cost)
    {
      if (dump_file)
	fprintf (dump_file, "cost failure old=%d new=%d\n", old_cost, new_cost);
      return false;
    }

  /* Jump through a lot of hoops to keep the attributes up to date.  We
     do not want to call one of the change address variants that take
     an offset even though we know the offset in many cases.  These
     assume you are changing where the address is pointing by the
     offset.  */
  new_mem = replace_equiv_address_nv (mem, new_addr);
  if (! validate_change (mem_insn.insn, mem_insn.mem_loc, new_mem, 0))
    {
      if (dump_file)
	fprintf (dump_file, "validation failure\n");
      return false;
    }

  /* From here to the end of the function we are committed to the
     change, i.e. nothing fails.  Generate any necessary movs, move
     any regnotes, and fix up the reg_next_{use,inc_use,def}.  */
  switch (inc_insn.form)
    {
    case FORM_PRE_ADD:
      /* Replace the addition with a move.  Do it at the location of
	 the addition since the operand of the addition may change
	 before the memory reference.  */
      mov_insn = insert_move_insn_before (inc_insn.insn,
					  inc_insn.reg_res, inc_insn.reg0);
      move_dead_notes (mov_insn, inc_insn.insn, inc_insn.reg0);

      regno = REGNO (inc_insn.reg_res);
      reg_next_def[regno] = mov_insn;
      reg_next_use[regno] = NULL;
      regno = REGNO (inc_insn.reg0);
      reg_next_use[regno] = mov_insn;
      df_recompute_luids (bb);
      break;

    case FORM_POST_INC:
      regno = REGNO (inc_insn.reg_res);
      if (reg_next_use[regno] == reg_next_inc_use[regno])
	reg_next_inc_use[regno] = NULL;

      /* Fallthru.  */
    case FORM_PRE_INC:
      regno = REGNO (inc_insn.reg_res);
      reg_next_def[regno] = mem_insn.insn;
      reg_next_use[regno] = NULL;

      break;

    case FORM_POST_ADD:
      mov_insn = insert_move_insn_before (mem_insn.insn,
					  inc_insn.reg_res, inc_insn.reg0);
      move_dead_notes (mov_insn, inc_insn.insn, inc_insn.reg0);

      /* Do not move anything to the mov insn because the instruction
	 pointer for the main iteration has not yet hit that.  It is
	 still pointing to the mem insn. */
      regno = REGNO (inc_insn.reg_res);
      reg_next_def[regno] = mem_insn.insn;
      reg_next_use[regno] = NULL;

      regno = REGNO (inc_insn.reg0);
      reg_next_use[regno] = mem_insn.insn;
      if ((reg_next_use[regno] == reg_next_inc_use[regno])
	  || (reg_next_inc_use[regno] == inc_insn.insn))
	reg_next_inc_use[regno] = NULL;
      df_recompute_luids (bb);
      break;

    case FORM_last:
    default:
      gcc_unreachable ();
    }

  if (!inc_insn.reg1_is_const)
    {
      regno = REGNO (inc_insn.reg1);
      reg_next_use[regno] = mem_insn.insn;
      if ((reg_next_use[regno] == reg_next_inc_use[regno])
	  || (reg_next_inc_use[regno] == inc_insn.insn))
	reg_next_inc_use[regno] = NULL;
    }

  delete_insn (inc_insn.insn);

  if (dump_file && mov_insn)
    {
      fprintf (dump_file, "inserting mov ");
      dump_insn_slim (dump_file, mov_insn);
    }

  /* Record that this insn has an implicit side effect.  */
  add_reg_note (mem_insn.insn, REG_INC, inc_reg);

  if (dump_file)
    {
      fprintf (dump_file, "****success ");
      dump_insn_slim (dump_file, mem_insn.insn);
    }

  return true;
}
示例#4
0
static int
reload_cse_simplify_operands (rtx insn, rtx testreg)
{
  int i, j;

  /* For each operand, all registers that are equivalent to it.  */
  HARD_REG_SET equiv_regs[MAX_RECOG_OPERANDS];

  const char *constraints[MAX_RECOG_OPERANDS];

  /* Vector recording how bad an alternative is.  */
  int *alternative_reject;
  /* Vector recording how many registers can be introduced by choosing
     this alternative.  */
  int *alternative_nregs;
  /* Array of vectors recording, for each operand and each alternative,
     which hard register to substitute, or -1 if the operand should be
     left as it is.  */
  int *op_alt_regno[MAX_RECOG_OPERANDS];
  /* Array of alternatives, sorted in order of decreasing desirability.  */
  int *alternative_order;

  extract_insn (insn);

  if (recog_data.n_alternatives == 0 || recog_data.n_operands == 0)
    return 0;

  /* Figure out which alternative currently matches.  */
  if (! constrain_operands (1))
    fatal_insn_not_found (insn);

  alternative_reject = XALLOCAVEC (int, recog_data.n_alternatives);
  alternative_nregs = XALLOCAVEC (int, recog_data.n_alternatives);
  alternative_order = XALLOCAVEC (int, recog_data.n_alternatives);
  memset (alternative_reject, 0, recog_data.n_alternatives * sizeof (int));
  memset (alternative_nregs, 0, recog_data.n_alternatives * sizeof (int));

  /* For each operand, find out which regs are equivalent.  */
  for (i = 0; i < recog_data.n_operands; i++)
    {
      cselib_val *v;
      struct elt_loc_list *l;
      rtx op;
      enum machine_mode mode;

      CLEAR_HARD_REG_SET (equiv_regs[i]);

      /* cselib blows up on CODE_LABELs.  Trying to fix that doesn't seem
	 right, so avoid the problem here.  Likewise if we have a constant
         and the insn pattern doesn't tell us the mode we need.  */
      if (LABEL_P (recog_data.operand[i])
	  || (CONSTANT_P (recog_data.operand[i])
	      && recog_data.operand_mode[i] == VOIDmode))
	continue;

      op = recog_data.operand[i];
      mode = GET_MODE (op);
#ifdef LOAD_EXTEND_OP
      if (MEM_P (op)
	  && GET_MODE_BITSIZE (mode) < BITS_PER_WORD
	  && LOAD_EXTEND_OP (mode) != UNKNOWN)
	{
	  rtx set = single_set (insn);

	  /* We might have multiple sets, some of which do implicit
	     extension.  Punt on this for now.  */
	  if (! set)
	    continue;
	  /* If the destination is also a MEM or a STRICT_LOW_PART, no
	     extension applies.
	     Also, if there is an explicit extension, we don't have to
	     worry about an implicit one.  */
	  else if (MEM_P (SET_DEST (set))
		   || GET_CODE (SET_DEST (set)) == STRICT_LOW_PART
		   || GET_CODE (SET_SRC (set)) == ZERO_EXTEND
		   || GET_CODE (SET_SRC (set)) == SIGN_EXTEND)
	    ; /* Continue ordinary processing.  */
#ifdef CANNOT_CHANGE_MODE_CLASS
	  /* If the register cannot change mode to word_mode, it follows that
	     it cannot have been used in word_mode.  */
	  else if (REG_P (SET_DEST (set))
		   && CANNOT_CHANGE_MODE_CLASS (GET_MODE (SET_DEST (set)),
						word_mode,
						REGNO_REG_CLASS (REGNO (SET_DEST (set)))))
	    ; /* Continue ordinary processing.  */
#endif
	  /* If this is a straight load, make the extension explicit.  */
	  else if (REG_P (SET_DEST (set))
		   && recog_data.n_operands == 2
		   && SET_SRC (set) == op
		   && SET_DEST (set) == recog_data.operand[1-i])
	    {
	      validate_change (insn, recog_data.operand_loc[i],
			       gen_rtx_fmt_e (LOAD_EXTEND_OP (mode),
					      word_mode, op),
			       1);
	      validate_change (insn, recog_data.operand_loc[1-i],
			       gen_rtx_REG (word_mode, REGNO (SET_DEST (set))),
			       1);
	      if (! apply_change_group ())
		return 0;
	      return reload_cse_simplify_operands (insn, testreg);
	    }
	  else
	    /* ??? There might be arithmetic operations with memory that are
	       safe to optimize, but is it worth the trouble?  */
	    continue;
	}
#endif /* LOAD_EXTEND_OP */
      v = cselib_lookup (op, recog_data.operand_mode[i], 0);
      if (! v)
	continue;

      for (l = v->locs; l; l = l->next)
	if (REG_P (l->loc))
	  SET_HARD_REG_BIT (equiv_regs[i], REGNO (l->loc));
    }

  for (i = 0; i < recog_data.n_operands; i++)
    {
      enum machine_mode mode;
      int regno;
      const char *p;

      op_alt_regno[i] = XALLOCAVEC (int, recog_data.n_alternatives);
      for (j = 0; j < recog_data.n_alternatives; j++)
	op_alt_regno[i][j] = -1;

      p = constraints[i] = recog_data.constraints[i];
      mode = recog_data.operand_mode[i];

      /* Add the reject values for each alternative given by the constraints
	 for this operand.  */
      j = 0;
      while (*p != '\0')
	{
	  char c = *p++;
	  if (c == ',')
	    j++;
	  else if (c == '?')
	    alternative_reject[j] += 3;
	  else if (c == '!')
	    alternative_reject[j] += 300;
	}

      /* We won't change operands which are already registers.  We
	 also don't want to modify output operands.  */
      regno = true_regnum (recog_data.operand[i]);
      if (regno >= 0
	  || constraints[i][0] == '='
	  || constraints[i][0] == '+')
	continue;

      for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
	{
	  int rclass = (int) NO_REGS;

	  if (! TEST_HARD_REG_BIT (equiv_regs[i], regno))
	    continue;

	  SET_REGNO (testreg, regno);
	  PUT_MODE (testreg, mode);

	  /* We found a register equal to this operand.  Now look for all
	     alternatives that can accept this register and have not been
	     assigned a register they can use yet.  */
	  j = 0;
	  p = constraints[i];
	  for (;;)
	    {
	      char c = *p;

	      switch (c)
		{
		case '=':  case '+':  case '?':
		case '#':  case '&':  case '!':
		case '*':  case '%':
		case '0':  case '1':  case '2':  case '3':  case '4':
		case '5':  case '6':  case '7':  case '8':  case '9':
		case '<':  case '>':  case 'V':  case 'o':
		case 'E':  case 'F':  case 'G':  case 'H':
		case 's':  case 'i':  case 'n':
		case 'I':  case 'J':  case 'K':  case 'L':
		case 'M':  case 'N':  case 'O':  case 'P':
		case 'p':  case 'X':  case TARGET_MEM_CONSTRAINT:
		  /* These don't say anything we care about.  */
		  break;

		case 'g': case 'r':
		  rclass = reg_class_subunion[(int) rclass][(int) GENERAL_REGS];
		  break;

		default:
		  rclass
		    = (reg_class_subunion
		       [(int) rclass]
		       [(int) REG_CLASS_FROM_CONSTRAINT ((unsigned char) c, p)]);
		  break;

		case ',': case '\0':
		  /* See if REGNO fits this alternative, and set it up as the
		     replacement register if we don't have one for this
		     alternative yet and the operand being replaced is not
		     a cheap CONST_INT.  */
		  if (op_alt_regno[i][j] == -1
		      && reg_fits_class_p (testreg, rclass, 0, mode)
		      && (GET_CODE (recog_data.operand[i]) != CONST_INT
			  || (rtx_cost (recog_data.operand[i], SET,
			  		optimize_bb_for_speed_p (BLOCK_FOR_INSN (insn)))
			      > rtx_cost (testreg, SET,
			  		optimize_bb_for_speed_p (BLOCK_FOR_INSN (insn))))))
		    {
		      alternative_nregs[j]++;
		      op_alt_regno[i][j] = regno;
		    }
		  j++;
		  rclass = (int) NO_REGS;
		  break;
		}
	      p += CONSTRAINT_LEN (c, p);

	      if (c == '\0')
		break;
	    }
	}
    }

  /* Record all alternatives which are better or equal to the currently
     matching one in the alternative_order array.  */
  for (i = j = 0; i < recog_data.n_alternatives; i++)
    if (alternative_reject[i] <= alternative_reject[which_alternative])
      alternative_order[j++] = i;
  recog_data.n_alternatives = j;

  /* Sort it.  Given a small number of alternatives, a dumb algorithm
     won't hurt too much.  */
  for (i = 0; i < recog_data.n_alternatives - 1; i++)
    {
      int best = i;
      int best_reject = alternative_reject[alternative_order[i]];
      int best_nregs = alternative_nregs[alternative_order[i]];
      int tmp;

      for (j = i + 1; j < recog_data.n_alternatives; j++)
	{
	  int this_reject = alternative_reject[alternative_order[j]];
	  int this_nregs = alternative_nregs[alternative_order[j]];

	  if (this_reject < best_reject
	      || (this_reject == best_reject && this_nregs > best_nregs))
	    {
	      best = j;
	      best_reject = this_reject;
	      best_nregs = this_nregs;
	    }
	}

      tmp = alternative_order[best];
      alternative_order[best] = alternative_order[i];
      alternative_order[i] = tmp;
    }

  /* Substitute the operands as determined by op_alt_regno for the best
     alternative.  */
  j = alternative_order[0];

  for (i = 0; i < recog_data.n_operands; i++)
    {
      enum machine_mode mode = recog_data.operand_mode[i];
      if (op_alt_regno[i][j] == -1)
	continue;

      validate_change (insn, recog_data.operand_loc[i],
		       gen_rtx_REG (mode, op_alt_regno[i][j]), 1);
    }

  for (i = recog_data.n_dups - 1; i >= 0; i--)
    {
      int op = recog_data.dup_num[i];
      enum machine_mode mode = recog_data.operand_mode[op];

      if (op_alt_regno[op][j] == -1)
	continue;

      validate_change (insn, recog_data.dup_loc[i],
		       gen_rtx_REG (mode, op_alt_regno[op][j]), 1);
    }

  return apply_change_group ();
}
示例#5
0
static int
reload_cse_simplify_set (rtx set, rtx insn)
{
  int did_change = 0;
  int dreg;
  rtx src;
  enum reg_class dclass;
  int old_cost;
  cselib_val *val;
  struct elt_loc_list *l;
#ifdef LOAD_EXTEND_OP
  enum rtx_code extend_op = UNKNOWN;
#endif
  bool speed = optimize_bb_for_speed_p (BLOCK_FOR_INSN (insn));

  dreg = true_regnum (SET_DEST (set));
  if (dreg < 0)
    return 0;

  src = SET_SRC (set);
  if (side_effects_p (src) || true_regnum (src) >= 0)
    return 0;

  dclass = REGNO_REG_CLASS (dreg);

#ifdef LOAD_EXTEND_OP
  /* When replacing a memory with a register, we need to honor assumptions
     that combine made wrt the contents of sign bits.  We'll do this by
     generating an extend instruction instead of a reg->reg copy.  Thus
     the destination must be a register that we can widen.  */
  if (MEM_P (src)
      && GET_MODE_BITSIZE (GET_MODE (src)) < BITS_PER_WORD
      && (extend_op = LOAD_EXTEND_OP (GET_MODE (src))) != UNKNOWN
      && !REG_P (SET_DEST (set)))
    return 0;
#endif

  val = cselib_lookup (src, GET_MODE (SET_DEST (set)), 0);
  if (! val)
    return 0;

  /* If memory loads are cheaper than register copies, don't change them.  */
  if (MEM_P (src))
    old_cost = MEMORY_MOVE_COST (GET_MODE (src), dclass, 1);
  else if (REG_P (src))
    old_cost = REGISTER_MOVE_COST (GET_MODE (src),
				   REGNO_REG_CLASS (REGNO (src)), dclass);
  else
    old_cost = rtx_cost (src, SET, speed);

  for (l = val->locs; l; l = l->next)
    {
      rtx this_rtx = l->loc;
      int this_cost;

      if (CONSTANT_P (this_rtx) && ! references_value_p (this_rtx, 0))
	{
#ifdef LOAD_EXTEND_OP
	  if (extend_op != UNKNOWN)
	    {
	      HOST_WIDE_INT this_val;

	      /* ??? I'm lazy and don't wish to handle CONST_DOUBLE.  Other
		 constants, such as SYMBOL_REF, cannot be extended.  */
	      if (GET_CODE (this_rtx) != CONST_INT)
		continue;

	      this_val = INTVAL (this_rtx);
	      switch (extend_op)
		{
		case ZERO_EXTEND:
		  this_val &= GET_MODE_MASK (GET_MODE (src));
		  break;
		case SIGN_EXTEND:
		  /* ??? In theory we're already extended.  */
		  if (this_val == trunc_int_for_mode (this_val, GET_MODE (src)))
		    break;
		default:
		  gcc_unreachable ();
		}
	      this_rtx = GEN_INT (this_val);
	    }
#endif
	  this_cost = rtx_cost (this_rtx, SET, speed);
	}
      else if (REG_P (this_rtx))
	{
#ifdef LOAD_EXTEND_OP
	  if (extend_op != UNKNOWN)
	    {
	      this_rtx = gen_rtx_fmt_e (extend_op, word_mode, this_rtx);
	      this_cost = rtx_cost (this_rtx, SET, speed);
	    }
	  else
#endif
	    this_cost = REGISTER_MOVE_COST (GET_MODE (this_rtx),
					    REGNO_REG_CLASS (REGNO (this_rtx)),
					    dclass);
	}
      else
	continue;

      /* If equal costs, prefer registers over anything else.  That
	 tends to lead to smaller instructions on some machines.  */
      if (this_cost < old_cost
	  || (this_cost == old_cost
	      && REG_P (this_rtx)
	      && !REG_P (SET_SRC (set))))
	{
#ifdef LOAD_EXTEND_OP
	  if (GET_MODE_BITSIZE (GET_MODE (SET_DEST (set))) < BITS_PER_WORD
	      && extend_op != UNKNOWN
#ifdef CANNOT_CHANGE_MODE_CLASS
	      && !CANNOT_CHANGE_MODE_CLASS (GET_MODE (SET_DEST (set)),
					    word_mode,
					    REGNO_REG_CLASS (REGNO (SET_DEST (set))))
#endif
	      )
	    {
	      rtx wide_dest = gen_rtx_REG (word_mode, REGNO (SET_DEST (set)));
	      ORIGINAL_REGNO (wide_dest) = ORIGINAL_REGNO (SET_DEST (set));
	      validate_change (insn, &SET_DEST (set), wide_dest, 1);
	    }
#endif

	  validate_unshare_change (insn, &SET_SRC (set), this_rtx, 1);
	  old_cost = this_cost, did_change = 1;
	}
    }

  return did_change;
}