Пример #1
0
rtx
nds32_expand_store_multiple (int base_regno, int count,
			     rtx base_addr, rtx basemem,
			     bool update_base_reg_p,
			     rtx *update_base_reg)
{
  int par_index;
  int offset;
  int start_idx;
  rtx result;
  rtx new_addr, mem, reg;

  if (count == 1)
    {
      reg = gen_rtx_REG (SImode, base_regno);
      if (update_base_reg_p)
	{
	  *update_base_reg = gen_reg_rtx (SImode);
	  return gen_unaligned_store_update_base_w (*update_base_reg, base_addr, reg);
	}
      else
	return gen_unaligned_store_w (gen_rtx_MEM (SImode, base_addr), reg);
    }

  /* Create the pattern that is presented in nds32-multiple.md.  */

  if (update_base_reg_p)
    {
      result = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count + 1));
      start_idx = 1;
    }
  else
    {
      result = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
      start_idx = 0;
    }

  if (update_base_reg_p)
    {
      offset           = count * 4;
      new_addr         = plus_constant (Pmode, base_addr, offset);
      *update_base_reg = gen_reg_rtx (SImode);

      XVECEXP (result, 0, 0) = gen_rtx_SET (*update_base_reg, new_addr);
    }

  for (par_index = 0; par_index < count; par_index++)
    {
      offset   = par_index * 4;
      /* 4-byte for storing data to memory.  */
      new_addr = plus_constant (Pmode, base_addr, offset);
      mem      = adjust_automodify_address_nv (basemem, SImode,
					       new_addr, offset);
      reg      = gen_rtx_REG (SImode, base_regno + par_index);

      XVECEXP (result, 0, par_index + start_idx) = gen_rtx_SET (mem, reg);
    }

  return result;
}
Пример #2
0
static bool
can_eliminate_compare (rtx compare, rtx eh_note, struct comparison *cmp)
{
  /* Take care that it's in the same EH region.  */
  if (cfun->can_throw_non_call_exceptions
      && !rtx_equal_p (eh_note, cmp->eh_note))
    return false;

  /* Make sure the compare is redundant with the previous.  */
  if (!rtx_equal_p (XEXP (compare, 0), cmp->in_a)
      || !rtx_equal_p (XEXP (compare, 1), cmp->in_b))
    return false;

  /* New mode must be compatible with the previous compare mode.  */
  enum machine_mode new_mode
    = targetm.cc_modes_compatible (GET_MODE (compare), cmp->orig_mode);

  if (new_mode == VOIDmode)
    return false;

  if (cmp->orig_mode != new_mode)
    {
      /* Generate new comparison for substitution.  */
      rtx flags = gen_rtx_REG (new_mode, targetm.flags_regnum);
      rtx x = gen_rtx_COMPARE (new_mode, cmp->in_a, cmp->in_b);
      x = gen_rtx_SET (flags, x);

      if (!validate_change (cmp->insn, &PATTERN (cmp->insn), x, false))
	return false;

      cmp->orig_mode = new_mode;
    }

  return true;
}
Пример #3
0
static void
test_uncond_jump ()
{
  rtx_insn *label = gen_label_rtx ();
  rtx jump_pat = gen_rtx_SET (pc_rtx,
			      gen_rtx_LABEL_REF (VOIDmode,
						 label));
  ASSERT_EQ (SET, jump_pat->code);
  ASSERT_EQ (LABEL_REF, SET_SRC (jump_pat)->code);
  ASSERT_EQ (label, label_ref_label (SET_SRC (jump_pat)));
  ASSERT_EQ (PC, SET_DEST (jump_pat)->code);

  verify_print_pattern ("pc=L0", jump_pat);

  ASSERT_RTL_DUMP_EQ ("(set (pc)\n"
		      "    (label_ref 0))",
		      jump_pat);

  rtx_insn *jump_insn = emit_jump_insn (jump_pat);
  ASSERT_FALSE (any_condjump_p (jump_insn));
  ASSERT_TRUE (any_uncondjump_p (jump_insn));
  ASSERT_TRUE (pc_set (jump_insn));
  ASSERT_TRUE (simplejump_p (jump_insn));
  ASSERT_TRUE (onlyjump_p (jump_insn));
  ASSERT_TRUE (control_flow_insn_p (jump_insn));

  ASSERT_RTL_DUMP_EQ ("(cjump_insn 1 (set (pc)\n"
		      "        (label_ref 0)))\n",
		      jump_insn);
}
Пример #4
0
rtx
nds32_expand_store_multiple (int base_regno, int count,
			     rtx base_addr, rtx basemem)
{
  int par_index;
  int offset;
  rtx result;
  rtx new_addr, mem, reg;

  /* Create the pattern that is presented in nds32-multiple.md.  */

  result = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));

  for (par_index = 0; par_index < count; par_index++)
    {
      offset   = par_index * 4;
      /* 4-byte for storing data to memory.  */
      new_addr = plus_constant (Pmode, base_addr, offset);
      mem      = adjust_automodify_address_nv (basemem, SImode,
					       new_addr, offset);
      reg      = gen_rtx_REG (SImode, base_regno + par_index);

      XVECEXP (result, 0, par_index) = gen_rtx_SET (mem, reg);
    }

  return result;
}
Пример #5
0
static void
compute_init_costs (void)
{
  rtx rtx_jump, rtx_store, rtx_return, reg, label;
  basic_block bb;

  FOR_EACH_BB (bb)
    if (BB_HEAD (bb))
      break;

  label = block_label (bb);
  reg = gen_rtx_REG (Pmode, 0);

  /* Pattern for indirect jump.  */
  rtx_jump = gen_indirect_jump (reg);

  /* Pattern for storing address.  */
  rtx_store = gen_rtx_SET (VOIDmode, reg, gen_symbol_ref_rtx_for_label (label));

  /* Pattern for return insn.  */
  rtx_return = gen_jump (label);

  /* The cost of jump.  */
  seq_jump_cost = compute_rtx_cost (make_jump_insn_raw (rtx_jump));

  /* The cost of calling sequence.  */
  seq_call_cost = seq_jump_cost + compute_rtx_cost (make_insn_raw (rtx_store));

  /* The cost of return.  */
  seq_return_cost = compute_rtx_cost (make_jump_insn_raw (rtx_return));

  /* Simple heuristic for minimal sequence cost.  */
  seq_call_cost   = (int)(seq_call_cost * (double)SEQ_CALL_COST_MULTIPLIER);
}
Пример #6
0
static void
test_single_set ()
{
  /* A label is not a SET.  */
  ASSERT_EQ (NULL_RTX, single_set (gen_label_rtx ()));

  /* An unconditional jump insn is a single SET.  */
  rtx set_pc = gen_rtx_SET (pc_rtx,
			    gen_rtx_LABEL_REF (VOIDmode,
					       gen_label_rtx ()));
  rtx_insn *jump_insn = emit_jump_insn (set_pc);
  ASSERT_EQ (set_pc, single_set (jump_insn));

  /* etc */
}
Пример #7
0
Файл: ree.c Проект: aixoss/gcc
static bool
transform_ifelse (ext_cand *cand, rtx def_insn)
{
  rtx set_insn = PATTERN (def_insn);
  rtx srcreg, dstreg, srcreg2;
  rtx map_srcreg, map_dstreg, map_srcreg2;
  rtx ifexpr;
  rtx cond;
  rtx new_set;

  gcc_assert (GET_CODE (set_insn) == SET);

  cond = XEXP (SET_SRC (set_insn), 0);
  dstreg = SET_DEST (set_insn);
  srcreg = XEXP (SET_SRC (set_insn), 1);
  srcreg2 = XEXP (SET_SRC (set_insn), 2);
  /* If the conditional move already has the right or wider mode,
     there is nothing to do.  */
  if (GET_MODE_SIZE (GET_MODE (dstreg)) >= GET_MODE_SIZE (cand->mode))
    return true;

  map_srcreg = gen_rtx_REG (cand->mode, REGNO (srcreg));
  map_srcreg2 = gen_rtx_REG (cand->mode, REGNO (srcreg2));
  map_dstreg = gen_rtx_REG (cand->mode, REGNO (dstreg));
  ifexpr = gen_rtx_IF_THEN_ELSE (cand->mode, cond, map_srcreg, map_srcreg2);
  new_set = gen_rtx_SET (VOIDmode, map_dstreg, ifexpr);

  if (validate_change (def_insn, &PATTERN (def_insn), new_set, true)
      && update_reg_equal_equiv_notes (def_insn, cand->mode, GET_MODE (dstreg),
				       cand->code))
    {
      if (dump_file)
        {
          fprintf (dump_file,
		   "Mode of conditional move instruction extended:\n");
          print_rtl_single (dump_file, def_insn);
        }
      return true;
    }

  return false;
}
Пример #8
0
rtx
fr30_move_double (rtx * operands)
{
  rtx src  = operands[1];
  rtx dest = operands[0];
  enum rtx_code src_code = GET_CODE (src);
  enum rtx_code dest_code = GET_CODE (dest);
  enum machine_mode mode = GET_MODE (dest);
  rtx val;

  start_sequence ();

  if (dest_code == REG)
    {
      if (src_code == REG)
	{
	  int reverse = (REGNO (dest) == REGNO (src) + 1);
	  
	  /* We normally copy the low-numbered register first.  However, if
	     the first register of operand 0 is the same as the second register
	     of operand 1, we must copy in the opposite order.  */
	  emit_insn (gen_rtx_SET (VOIDmode,
				  operand_subword (dest, reverse, TRUE, mode),
				  operand_subword (src,  reverse, TRUE, mode)));
	  
	  emit_insn (gen_rtx_SET (VOIDmode,
			      operand_subword (dest, !reverse, TRUE, mode),
			      operand_subword (src,  !reverse, TRUE, mode)));
	}
      else if (src_code == MEM)
	{
	  rtx addr = XEXP (src, 0);
	  int dregno = REGNO (dest);
	  rtx dest0;
	  rtx dest1;
	  rtx new_mem;
	  
	  /* If the high-address word is used in the address, we
	     must load it last.  Otherwise, load it first.  */
	  int reverse = (refers_to_regno_p (dregno, dregno + 1, addr, 0) != 0);

	  gcc_assert (GET_CODE (addr) == REG);
	  
	  dest0 = operand_subword (dest, reverse, TRUE, mode);
	  dest1 = operand_subword (dest, !reverse, TRUE, mode);

	  if (reverse)
	    {
	      emit_insn (gen_rtx_SET (VOIDmode, dest1,
				      adjust_address (src, SImode, 0)));
	      emit_insn (gen_rtx_SET (SImode, dest0,
				      gen_rtx_REG (SImode, REGNO (addr))));
	      emit_insn (gen_rtx_SET (SImode, dest0,
				      plus_constant (dest0, UNITS_PER_WORD)));

	      new_mem = gen_rtx_MEM (SImode, dest0);
	      MEM_COPY_ATTRIBUTES (new_mem, src);
	      
	      emit_insn (gen_rtx_SET (VOIDmode, dest0, new_mem));
	    }
	  else
	    {
	      emit_insn (gen_rtx_SET (VOIDmode, dest0,
				      adjust_address (src, SImode, 0)));
	      emit_insn (gen_rtx_SET (SImode, dest1,
				      gen_rtx_REG (SImode, REGNO (addr))));
	      emit_insn (gen_rtx_SET (SImode, dest1,
				      plus_constant (dest1, UNITS_PER_WORD)));

	      new_mem = gen_rtx_MEM (SImode, dest1);
	      MEM_COPY_ATTRIBUTES (new_mem, src);
	      
	      emit_insn (gen_rtx_SET (VOIDmode, dest1, new_mem));
	    }
	}
      else if (src_code == CONST_INT || src_code == CONST_DOUBLE)
	{
	  rtx words[2];
	  split_double (src, &words[0], &words[1]);
	  emit_insn (gen_rtx_SET (VOIDmode,
				  operand_subword (dest, 0, TRUE, mode),
				  words[0]));
      
	  emit_insn (gen_rtx_SET (VOIDmode,
				  operand_subword (dest, 1, TRUE, mode),
				  words[1]));
	}
    }
  else if (src_code == REG && dest_code == MEM)
    {
      rtx addr = XEXP (dest, 0);
      rtx src0;
      rtx src1;

      gcc_assert (GET_CODE (addr) == REG);
      
      src0 = operand_subword (src, 0, TRUE, mode);
      src1 = operand_subword (src, 1, TRUE, mode);
      
      emit_insn (gen_rtx_SET (VOIDmode, adjust_address (dest, SImode, 0),
			      src0));

      if (REGNO (addr) == STACK_POINTER_REGNUM
	  || REGNO (addr) == FRAME_POINTER_REGNUM)
	emit_insn (gen_rtx_SET (VOIDmode,
				adjust_address (dest, SImode, UNITS_PER_WORD),
				src1));
      else
	{
	  rtx new_mem;
	  
	  /* We need a scratch register to hold the value of 'address + 4'.
	     We ought to allow gcc to find one for us, but for now, just
	     push one of the source registers.  */
	  emit_insn (gen_movsi_push (src0));
	  emit_insn (gen_movsi_internal (src0, addr));
	  emit_insn (gen_addsi_small_int (src0, src0, GEN_INT (UNITS_PER_WORD)));
	  
	  new_mem = gen_rtx_MEM (SImode, src0);
	  MEM_COPY_ATTRIBUTES (new_mem, dest);
	  
	  emit_insn (gen_rtx_SET (VOIDmode, new_mem, src1));
	  emit_insn (gen_movsi_pop (src0));
	}
    }
  else
    /* This should have been prevented by the constraints on movdi_insn.  */
    gcc_unreachable ();
  
  val = get_insns ();
  end_sequence ();

  return val;
}
Пример #9
0
Файл: ree.c Проект: aixoss/gcc
static bool
combine_set_extension (ext_cand *cand, rtx curr_insn, rtx *orig_set)
{
  rtx orig_src = SET_SRC (*orig_set);
  enum machine_mode orig_mode = GET_MODE (SET_DEST (*orig_set));
  rtx new_reg = gen_rtx_REG (cand->mode, REGNO (SET_DEST (*orig_set)));
  rtx new_set;

  /* Merge constants by directly moving the constant into the register under
     some conditions.  Recall that RTL constants are sign-extended.  */
  if (GET_CODE (orig_src) == CONST_INT
      && HOST_BITS_PER_WIDE_INT >= GET_MODE_BITSIZE (cand->mode))
    {
      if (INTVAL (orig_src) >= 0 || cand->code == SIGN_EXTEND)
	new_set = gen_rtx_SET (VOIDmode, new_reg, orig_src);
      else
	{
	  /* Zero-extend the negative constant by masking out the bits outside
	     the source mode.  */
	  rtx new_const_int
	    = GEN_INT (INTVAL (orig_src) & GET_MODE_MASK (orig_mode));
	  new_set = gen_rtx_SET (VOIDmode, new_reg, new_const_int);
	}
    }
  else if (GET_MODE (orig_src) == VOIDmode)
    {
      /* This is mostly due to a call insn that should not be optimized.  */
      return false;
    }
  else if (GET_CODE (orig_src) == cand->code)
    {
      /* Here is a sequence of two extensions.  Try to merge them.  */
      rtx temp_extension
	= gen_rtx_fmt_e (cand->code, cand->mode, XEXP (orig_src, 0));
      rtx simplified_temp_extension = simplify_rtx (temp_extension);
      if (simplified_temp_extension)
        temp_extension = simplified_temp_extension;
      new_set = gen_rtx_SET (VOIDmode, new_reg, temp_extension);
    }
  else if (GET_CODE (orig_src) == IF_THEN_ELSE)
    {
      /* Only IF_THEN_ELSE of phi-type copies are combined.  Otherwise,
         in general, IF_THEN_ELSE should not be combined.  */
      return false;
    }
  else
    {
      /* This is the normal case.  */
      rtx temp_extension
	= gen_rtx_fmt_e (cand->code, cand->mode, orig_src);
      rtx simplified_temp_extension = simplify_rtx (temp_extension);
      if (simplified_temp_extension)
        temp_extension = simplified_temp_extension;
      new_set = gen_rtx_SET (VOIDmode, new_reg, temp_extension);
    }

  /* This change is a part of a group of changes.  Hence,
     validate_change will not try to commit the change.  */
  if (validate_change (curr_insn, orig_set, new_set, true)
      && update_reg_equal_equiv_notes (curr_insn, cand->mode, orig_mode,
				       cand->code))
    {
      if (dump_file)
        {
          fprintf (dump_file,
		   "Tentatively merged extension with definition:\n");
          print_rtl_single (dump_file, curr_insn);
        }
      return true;
    }

  return false;
}
Пример #10
0
void
init_caller_save ()
{
  char *first_obj = (char *) oballoc (0);
  rtx addr_reg;
  int offset;
  rtx address;
  int i, j;

  /* First find all the registers that we need to deal with and all
     the modes that they can have.  If we can't find a mode to use,
     we can't have the register live over calls.  */

  for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
    {
      if (call_used_regs[i] && ! call_fixed_regs[i])
	{
	  for (j = 1; j <= MOVE_MAX / UNITS_PER_WORD; j++)
	    {
	      regno_save_mode[i][j] = choose_hard_reg_mode (i, j);
	      if (regno_save_mode[i][j] == VOIDmode && j == 1)
		{
		  call_fixed_regs[i] = 1;
		  SET_HARD_REG_BIT (call_fixed_reg_set, i);
		}
	    }
	}
      else
	regno_save_mode[i][1] = VOIDmode;
    }

  /* The following code tries to approximate the conditions under which
     we can easily save and restore a register without scratch registers or
     other complexities.  It will usually work, except under conditions where
     the validity of an insn operand is dependent on the address offset.
     No such cases are currently known.

     We first find a typical offset from some BASE_REG_CLASS register.
     This address is chosen by finding the first register in the class
     and by finding the smallest power of two that is a valid offset from
     that register in every mode we will use to save registers.  */

  for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
    if (TEST_HARD_REG_BIT (reg_class_contents[(int) BASE_REG_CLASS], i))
      break;

  if (i == FIRST_PSEUDO_REGISTER)
    abort ();

  addr_reg = gen_rtx_REG (Pmode, i);

  for (offset = 1 << (HOST_BITS_PER_INT / 2); offset; offset >>= 1)
    {
      address = gen_rtx_PLUS (Pmode, addr_reg, GEN_INT (offset));

      for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
	if (regno_save_mode[i][1] != VOIDmode
	  && ! strict_memory_address_p (regno_save_mode[i][1], address))
	  break;

      if (i == FIRST_PSEUDO_REGISTER)
	break;
    }

  /* If we didn't find a valid address, we must use register indirect.  */
  if (offset == 0)
    address = addr_reg;

  /* Next we try to form an insn to save and restore the register.  We
     see if such an insn is recognized and meets its constraints.  */

  start_sequence ();

  for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
    for (j = 1; j <= MOVE_MAX / UNITS_PER_WORD; j++)
      if (regno_save_mode[i][j] != VOIDmode)
        {
	  rtx mem = gen_rtx_MEM (regno_save_mode[i][j], address);
	  rtx reg = gen_rtx_REG (regno_save_mode[i][j], i);
	  rtx savepat = gen_rtx_SET (VOIDmode, mem, reg);
	  rtx restpat = gen_rtx_SET (VOIDmode, reg, mem);
	  rtx saveinsn = emit_insn (savepat);
	  rtx restinsn = emit_insn (restpat);
	  int ok;

	  reg_save_code[i][j] = recog_memoized (saveinsn);
	  reg_restore_code[i][j] = recog_memoized (restinsn);

	  /* Now extract both insns and see if we can meet their
             constraints.  */
	  ok = (reg_save_code[i][j] != -1 && reg_restore_code[i][j] != -1);
	  if (ok)
	    {
	      insn_extract (saveinsn);
	      ok = constrain_operands (reg_save_code[i][j], 1);
	      insn_extract (restinsn);
	      ok &= constrain_operands (reg_restore_code[i][j], 1);
	    }

	  if (! ok)
	    {
	      regno_save_mode[i][j] = VOIDmode;
	      if (j == 1)
		{
		  call_fixed_regs[i] = 1;
		  SET_HARD_REG_BIT (call_fixed_reg_set, i);
		}
	    }
      }

  end_sequence ();

  obfree (first_obj);
}
Пример #11
0
void
find_comparison_dom_walker::before_dom_children (basic_block bb)
{
  struct comparison *last_cmp;
  rtx insn, next, last_clobber;
  bool last_cmp_valid;
  bool need_purge = false;
  bitmap killed;

  killed = BITMAP_ALLOC (NULL);

  /* The last comparison that was made.  Will be reset to NULL
     once the flags are clobbered.  */
  last_cmp = NULL;

  /* True iff the last comparison has not been clobbered, nor
     have its inputs.  Used to eliminate duplicate compares.  */
  last_cmp_valid = false;

  /* The last insn that clobbered the flags, if that insn is of
     a form that may be valid for eliminating a following compare.
     To be reset to NULL once the flags are set otherwise.  */
  last_clobber = NULL;

  /* Propagate the last live comparison throughout the extended basic block. */
  if (single_pred_p (bb))
    {
      last_cmp = (struct comparison *) single_pred (bb)->aux;
      if (last_cmp)
	last_cmp_valid = last_cmp->inputs_valid;
    }

  for (insn = BB_HEAD (bb); insn; insn = next)
    {
      rtx src;

      next = (insn == BB_END (bb) ? NULL_RTX : NEXT_INSN (insn));
      if (!NONDEBUG_INSN_P (insn))
	continue;

      /* Compute the set of registers modified by this instruction.  */
      bitmap_clear (killed);
      df_simulate_find_defs (insn, killed);

      src = conforming_compare (insn);
      if (src)
	{
	  enum machine_mode src_mode = GET_MODE (src);
	  rtx eh_note = NULL;

	  if (flag_non_call_exceptions)
	    eh_note = find_reg_note (insn, REG_EH_REGION, NULL);

	  if (!last_cmp_valid)
	    goto dont_delete;

	  /* Take care that it's in the same EH region.  */
	  if (flag_non_call_exceptions
	      && !rtx_equal_p (eh_note, last_cmp->eh_note))
	    goto dont_delete;

	  /* Make sure the compare is redundant with the previous.  */
	  if (!rtx_equal_p (last_cmp->in_a, XEXP (src, 0))
	      || !rtx_equal_p (last_cmp->in_b, XEXP (src, 1)))
	    goto dont_delete;

	  /* New mode must be compatible with the previous compare mode.  */
	  {
	    enum machine_mode new_mode
	      = targetm.cc_modes_compatible (last_cmp->orig_mode, src_mode);
	    if (new_mode == VOIDmode)
	      goto dont_delete;

	    if (new_mode != last_cmp->orig_mode)
	      {
		rtx x, flags = gen_rtx_REG (src_mode, targetm.flags_regnum);

		/* Generate new comparison for substitution.  */
		x = gen_rtx_COMPARE (new_mode, XEXP (src, 0), XEXP (src, 1));
		x = gen_rtx_SET (VOIDmode, flags, x);

		if (!validate_change (last_cmp->insn,
				      &PATTERN (last_cmp->insn), x, false))
		  goto dont_delete;

		last_cmp->orig_mode = new_mode;
	      }
	  }

	  /* All tests and substitutions succeeded!  */
	  if (eh_note)
	    need_purge = true;
	  delete_insn (insn);
	  continue;

	dont_delete:
	  last_cmp = XCNEW (struct comparison);
	  last_cmp->insn = insn;
	  last_cmp->prev_clobber = last_clobber;
	  last_cmp->in_a = XEXP (src, 0);
	  last_cmp->in_b = XEXP (src, 1);
	  last_cmp->eh_note = eh_note;
	  last_cmp->orig_mode = src_mode;
	  all_compares.safe_push (last_cmp);

	  /* It's unusual, but be prepared for comparison patterns that
	     also clobber an input, or perhaps a scratch.  */
	  last_clobber = NULL;
	  last_cmp_valid = true;
	}

      /* Notice if this instruction kills the flags register.  */
      else if (bitmap_bit_p (killed, targetm.flags_regnum))
	{
	  /* See if this insn could be the "clobber" that eliminates
	     a future comparison.   */
	  last_clobber = (arithmetic_flags_clobber_p (insn) ? insn : NULL);

	  /* In either case, the previous compare is no longer valid.  */
	  last_cmp = NULL;
	  last_cmp_valid = false;
	  continue;
	}

      /* Notice if this instruction uses the flags register.  */
      else if (last_cmp)
	find_flags_uses_in_insn (last_cmp, insn);

      /* Notice if any of the inputs to the comparison have changed.  */
      if (last_cmp_valid
	  && (bitmap_bit_p (killed, REGNO (last_cmp->in_a))
	      || (REG_P (last_cmp->in_b)
		  && bitmap_bit_p (killed, REGNO (last_cmp->in_b)))))
	last_cmp_valid = false;
    }
Пример #12
0
void
gen_int_relational (enum rtx_code code, /* relational test (EQ, etc) */
                    rtx result,		/* result to store comp. or 0 if branch */
                    rtx cmp0,		/* first operand to compare */
                    rtx cmp1,		/* second operand to compare */
                    rtx destination)	/* destination of the branch, or 0 if compare */
{
    enum machine_mode mode;
    int branch_p;

    mode = GET_MODE (cmp0);
    if (mode == VOIDmode)
        mode = GET_MODE (cmp1);

    /* Is this a branch or compare */
    branch_p = (destination != 0);

    /* Instruction set doesn't support LE or LT, so swap operands and use GE, GT */
    switch (code)
    {
    case LE:
    case LT:
    case LEU:
    case LTU:
        code = swap_condition (code);
        rtx temp = cmp0;
        cmp0 = cmp1;
        cmp1 = temp;
        break;
    default:
        break;
    }

    if (branch_p)
    {
        rtx insn;

        /* Operands must be in registers */
        if (!register_operand (cmp0, mode))
            cmp0 = force_reg (mode, cmp0);
        if (!register_operand (cmp1, mode))
            cmp1 = force_reg (mode, cmp1);

        /* Generate conditional branch instruction */
        rtx cond = gen_rtx_fmt_ee (code, mode, cmp0, cmp1);
        rtx label = gen_rtx_LABEL_REF (VOIDmode, destination);
        insn = gen_rtx_SET (VOIDmode, pc_rtx,
                            gen_rtx_IF_THEN_ELSE (VOIDmode,
                                    cond, label, pc_rtx));
        emit_jump_insn (insn);
    }
    else
    {
        /* We can't have const_ints in cmp0, other than 0 */
        if ((GET_CODE (cmp0) == CONST_INT) && (INTVAL (cmp0) != 0))
            cmp0 = force_reg (mode, cmp0);

        /* If the comparison is against an int not in legal range
           move it into a register */
        if (GET_CODE (cmp1) == CONST_INT)
        {
            HOST_WIDE_INT value = INTVAL (cmp1);
            switch (code)
            {
            case EQ:
            case NE:
            case LE:
            case LT:
            case GE:
            case GT:
                if (!MEDIUM_INT(value))
                    cmp1 = force_reg (mode, cmp1);
                break;
            case LEU:
            case LTU:
            case GEU:
            case GTU:
                if (!MEDIUM_UINT(value))
                    cmp1 = force_reg (mode, cmp1);
                break;
            default:
                abort ();
            }
        }

        /* Generate compare instruction */
        emit_move_insn (result, gen_rtx_fmt_ee (code, mode, cmp0, cmp1));
    }
}
Пример #13
0
rtx
fr30_move_double (rtx * operands)
{
  rtx src  = operands[1];
  rtx dest = operands[0];
  enum rtx_code src_code = GET_CODE (src);
  enum rtx_code dest_code = GET_CODE (dest);
  enum machine_mode mode = GET_MODE (dest);
  rtx val;

  start_sequence ();

  if (dest_code == REG)
    {
      if (src_code == REG)
	{
	  int reverse = (REGNO (dest) == REGNO (src) + 1);
	  
	  /* We normally copy the low-numbered register first.  However, if
	     the first register of operand 0 is the same as the second register
	     of operand 1, we must copy in the opposite order.  */
	  emit_insn (gen_rtx_SET (VOIDmode,
				  operand_subword (dest, reverse, TRUE, mode),
				  operand_subword (src,  reverse, TRUE, mode)));
	  
	  emit_insn (gen_rtx_SET (VOIDmode,
			      operand_subword (dest, !reverse, TRUE, mode),
			      operand_subword (src,  !reverse, TRUE, mode)));
	}
      else if (src_code == MEM)
	{
	  rtx addr = XEXP (src, 0);
	  int dregno = REGNO (dest);
	  rtx dest0 = operand_subword (dest, 0, TRUE, mode);;
	  rtx dest1 = operand_subword (dest, 1, TRUE, mode);;
	  rtx new_mem;
	  
	  gcc_assert (GET_CODE (addr) == REG);
	  
	  /* Copy the address before clobbering it.  See PR 34174.  */
	  emit_insn (gen_rtx_SET (SImode, dest1, addr));
	  emit_insn (gen_rtx_SET (VOIDmode, dest0,
				  adjust_address (src, SImode, 0)));
	  emit_insn (gen_rtx_SET (SImode, dest1,
				  plus_constant (dest1, UNITS_PER_WORD)));

	  new_mem = gen_rtx_MEM (SImode, dest1);
	  MEM_COPY_ATTRIBUTES (new_mem, src);
	      
	  emit_insn (gen_rtx_SET (VOIDmode, dest1, new_mem));
	}
      else if (src_code == CONST_INT || src_code == CONST_DOUBLE)
	{
	  rtx words[2];
	  split_double (src, &words[0], &words[1]);
	  emit_insn (gen_rtx_SET (VOIDmode,
				  operand_subword (dest, 0, TRUE, mode),
				  words[0]));
      
	  emit_insn (gen_rtx_SET (VOIDmode,
				  operand_subword (dest, 1, TRUE, mode),
				  words[1]));
	}
    }
  else if (src_code == REG && dest_code == MEM)
    {
      rtx addr = XEXP (dest, 0);
      rtx src0;
      rtx src1;

      gcc_assert (GET_CODE (addr) == REG);

      src0 = operand_subword (src, 0, TRUE, mode);
      src1 = operand_subword (src, 1, TRUE, mode);

      emit_move_insn (adjust_address (dest, SImode, 0), src0);

      if (REGNO (addr) == STACK_POINTER_REGNUM
	  || REGNO (addr) == FRAME_POINTER_REGNUM)
	emit_insn (gen_rtx_SET (VOIDmode,
				adjust_address (dest, SImode, UNITS_PER_WORD),
				src1));
      else
	{
	  rtx new_mem;
	  rtx scratch_reg_r0 = gen_rtx_REG (SImode, 0);

	  /* We need a scratch register to hold the value of 'address + 4'.
	     We use r0 for this purpose. It is used for example for long
	     jumps and is already marked to not be used by normal register
	     allocation.  */
	  emit_insn (gen_movsi_internal (scratch_reg_r0, addr));
	  emit_insn (gen_addsi_small_int (scratch_reg_r0, scratch_reg_r0,
					  GEN_INT (UNITS_PER_WORD)));
	  new_mem = gen_rtx_MEM (SImode, scratch_reg_r0);
	  MEM_COPY_ATTRIBUTES (new_mem, dest);
	  emit_move_insn (new_mem, src1);
	  emit_insn (gen_blockage ());
	}
    }
  else
    /* This should have been prevented by the constraints on movdi_insn.  */
    gcc_unreachable ();

  val = get_insns ();
  end_sequence ();

  return val;
}
Пример #14
0
void
eliminate_regs_in_insn (rtx_insn *insn, bool replace_p, bool first_p,
			HOST_WIDE_INT update_sp_offset)
{
  int icode = recog_memoized (insn);
  rtx old_set = single_set (insn);
  bool validate_p;
  int i;
  rtx substed_operand[MAX_RECOG_OPERANDS];
  rtx orig_operand[MAX_RECOG_OPERANDS];
  struct lra_elim_table *ep;
  rtx plus_src, plus_cst_src;
  lra_insn_recog_data_t id;
  struct lra_static_insn_data *static_id;

  if (icode < 0 && asm_noperands (PATTERN (insn)) < 0 && ! DEBUG_INSN_P (insn))
    {
      lra_assert (GET_CODE (PATTERN (insn)) == USE
		  || GET_CODE (PATTERN (insn)) == CLOBBER
		  || GET_CODE (PATTERN (insn)) == ASM_INPUT);
      return;
    }

  /* Check for setting an eliminable register.	*/
  if (old_set != 0 && REG_P (SET_DEST (old_set))
      && (ep = get_elimination (SET_DEST (old_set))) != NULL)
    {
      for (ep = reg_eliminate; ep < &reg_eliminate[NUM_ELIMINABLE_REGS]; ep++)
	if (ep->from_rtx == SET_DEST (old_set) && ep->can_eliminate)
	  {
	    bool delete_p = replace_p;
	    
#ifdef HARD_FRAME_POINTER_REGNUM
	    if (ep->from == FRAME_POINTER_REGNUM
		&& ep->to == HARD_FRAME_POINTER_REGNUM)
	      /* If this is setting the frame pointer register to the
		 hardware frame pointer register and this is an
		 elimination that will be done (tested above), this
		 insn is really adjusting the frame pointer downward
		 to compensate for the adjustment done before a
		 nonlocal goto.  */
	      {
		rtx src = SET_SRC (old_set);
		rtx off = remove_reg_equal_offset_note (insn, ep->to_rtx);
		
		/* We should never process such insn with non-zero
		   UPDATE_SP_OFFSET.  */
		lra_assert (update_sp_offset == 0);
		
		if (off != NULL_RTX
		    || src == ep->to_rtx
		    || (GET_CODE (src) == PLUS
			&& XEXP (src, 0) == ep->to_rtx
			&& CONST_INT_P (XEXP (src, 1))))
		  {
		    HOST_WIDE_INT offset;
		    
		    if (replace_p)
		      {
			SET_DEST (old_set) = ep->to_rtx;
			lra_update_insn_recog_data (insn);
			return;
		      }
		    offset = (off != NULL_RTX ? INTVAL (off)
			      : src == ep->to_rtx ? 0 : INTVAL (XEXP (src, 1)));
		    offset -= (ep->offset - ep->previous_offset);
		    src = plus_constant (Pmode, ep->to_rtx, offset);
		    
		    /* First see if this insn remains valid when we
		       make the change.  If not, keep the INSN_CODE
		       the same and let the constraint pass fit it
		       up.  */
		    validate_change (insn, &SET_SRC (old_set), src, 1);
		    validate_change (insn, &SET_DEST (old_set),
				     ep->from_rtx, 1);
		    if (! apply_change_group ())
		      {
			SET_SRC (old_set) = src;
			SET_DEST (old_set) = ep->from_rtx;
		      }
		    lra_update_insn_recog_data (insn);
		    /* Add offset note for future updates.  */
		    add_reg_note (insn, REG_EQUAL, src);
		    return;
		  }
	      }
#endif
	    
	    /* This insn isn't serving a useful purpose.  We delete it
	       when REPLACE is set.  */
	    if (delete_p)
	      lra_delete_dead_insn (insn);
	    return;
	  }
    }

  /* We allow one special case which happens to work on all machines we
     currently support: a single set with the source or a REG_EQUAL
     note being a PLUS of an eliminable register and a constant.  */
  plus_src = plus_cst_src = 0;
  if (old_set && REG_P (SET_DEST (old_set)))
    {
      if (GET_CODE (SET_SRC (old_set)) == PLUS)
	plus_src = SET_SRC (old_set);
      /* First see if the source is of the form (plus (...) CST).  */
      if (plus_src
	  && CONST_INT_P (XEXP (plus_src, 1)))
	plus_cst_src = plus_src;
      /* Check that the first operand of the PLUS is a hard reg or
	 the lowpart subreg of one.  */
      if (plus_cst_src)
	{
	  rtx reg = XEXP (plus_cst_src, 0);

	  if (GET_CODE (reg) == SUBREG && subreg_lowpart_p (reg))
	    reg = SUBREG_REG (reg);

	  if (!REG_P (reg) || REGNO (reg) >= FIRST_PSEUDO_REGISTER)
	    plus_cst_src = 0;
	}
    }
  if (plus_cst_src)
    {
      rtx reg = XEXP (plus_cst_src, 0);
      HOST_WIDE_INT offset = INTVAL (XEXP (plus_cst_src, 1));

      if (GET_CODE (reg) == SUBREG)
	reg = SUBREG_REG (reg);

      if (REG_P (reg) && (ep = get_elimination (reg)) != NULL)
	{
	  rtx to_rtx = replace_p ? ep->to_rtx : ep->from_rtx;

	  if (! replace_p)
	    {
	      if (update_sp_offset == 0)
		offset += (ep->offset - ep->previous_offset);
	      if (ep->to_rtx == stack_pointer_rtx)
		{
		  if (first_p)
		    offset -= lra_get_insn_recog_data (insn)->sp_offset;
		  else
		    offset += update_sp_offset;
		}
	      offset = trunc_int_for_mode (offset, GET_MODE (plus_cst_src));
	    }

	  if (GET_CODE (XEXP (plus_cst_src, 0)) == SUBREG)
	    to_rtx = gen_lowpart (GET_MODE (XEXP (plus_cst_src, 0)), to_rtx);
	  /* If we have a nonzero offset, and the source is already a
	     simple REG, the following transformation would increase
	     the cost of the insn by replacing a simple REG with (plus
	     (reg sp) CST).  So try only when we already had a PLUS
	     before.  */
	  if (offset == 0 || plus_src)
	    {
	      rtx new_src = plus_constant (GET_MODE (to_rtx), to_rtx, offset);

	      old_set = single_set (insn);

	      /* First see if this insn remains valid when we make the
		 change.  If not, try to replace the whole pattern
		 with a simple set (this may help if the original insn
		 was a PARALLEL that was only recognized as single_set
		 due to REG_UNUSED notes).  If this isn't valid
		 either, keep the INSN_CODE the same and let the
		 constraint pass fix it up.  */
	      if (! validate_change (insn, &SET_SRC (old_set), new_src, 0))
		{
		  rtx new_pat = gen_rtx_SET (SET_DEST (old_set), new_src);

		  if (! validate_change (insn, &PATTERN (insn), new_pat, 0))
		    SET_SRC (old_set) = new_src;
		}
	      lra_update_insn_recog_data (insn);
	      /* This can't have an effect on elimination offsets, so skip
		 right to the end.  */
	      return;
	    }
	}
    }

  /* Eliminate all eliminable registers occurring in operands that
     can be handled by the constraint pass.  */
  id = lra_get_insn_recog_data (insn);
  static_id = id->insn_static_data;
  validate_p = false;
  for (i = 0; i < static_id->n_operands; i++)
    {
      orig_operand[i] = *id->operand_loc[i];
      substed_operand[i] = *id->operand_loc[i];

      /* For an asm statement, every operand is eliminable.  */
      if (icode < 0 || insn_data[icode].operand[i].eliminable)
	{
	  /* Check for setting a hard register that we know about.  */
	  if (static_id->operand[i].type != OP_IN
	      && REG_P (orig_operand[i]))
	    {
	      /* If we are assigning to a hard register that can be
		 eliminated, it must be as part of a PARALLEL, since
		 the code above handles single SETs.  This reg can not
		 be longer eliminated -- it is forced by
		 mark_not_eliminable.  */
	      for (ep = reg_eliminate;
		   ep < &reg_eliminate[NUM_ELIMINABLE_REGS];
		   ep++)
		lra_assert (ep->from_rtx != orig_operand[i]
			    || ! ep->can_eliminate);
	    }

	  /* Companion to the above plus substitution, we can allow
	     invariants as the source of a plain move.	*/
	  substed_operand[i]
	    = lra_eliminate_regs_1 (insn, *id->operand_loc[i], VOIDmode,
				    replace_p, ! replace_p && ! first_p,
				    update_sp_offset, first_p);
	  if (substed_operand[i] != orig_operand[i])
	    validate_p = true;
	}
    }

  if (! validate_p)
    return;

  /* Substitute the operands; the new values are in the substed_operand
     array.  */
  for (i = 0; i < static_id->n_operands; i++)
    *id->operand_loc[i] = substed_operand[i];
  for (i = 0; i < static_id->n_dups; i++)
    *id->dup_loc[i] = substed_operand[(int) static_id->dup_num[i]];

  /* If we had a move insn but now we don't, re-recognize it.
     This will cause spurious re-recognition if the old move had a
     PARALLEL since the new one still will, but we can't call
     single_set without having put new body into the insn and the
     re-recognition won't hurt in this rare case.  */
  id = lra_update_insn_recog_data (insn);
  static_id = id->insn_static_data;
}
Пример #15
0
/* Functions to expand load_multiple and store_multiple.
   They are auxiliary extern functions to help create rtx template.
   Check nds32-multiple.md file for the patterns.  */
rtx
nds32_expand_load_multiple (int base_regno, int count,
			    rtx base_addr, rtx basemem,
			    bool update_base_reg_p,
			    rtx *update_base_reg)
{
  int par_index;
  int offset;
  int start_idx;
  rtx result;
  rtx new_addr, mem, reg;

  /* Generate a unaligned load to prevent load instruction pull out from
     parallel, and then it will generate lwi, and lose unaligned acces */
  if (count == 1)
    {
      reg = gen_rtx_REG (SImode, base_regno);
      if (update_base_reg_p)
	{
	  *update_base_reg = gen_reg_rtx (SImode);
	  return gen_unaligned_load_update_base_w (*update_base_reg, reg, base_addr);
	}
      else
	return gen_unaligned_load_w (reg, gen_rtx_MEM (SImode, base_addr));
    }

  /* Create the pattern that is presented in nds32-multiple.md.  */
  if (update_base_reg_p)
    {
      result = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count + 1));
      start_idx = 1;
    }
  else
    {
      result = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
      start_idx = 0;
    }

  if (update_base_reg_p)
    {
      offset           = count * 4;
      new_addr         = plus_constant (Pmode, base_addr, offset);
      *update_base_reg = gen_reg_rtx (SImode);

      XVECEXP (result, 0, 0) = gen_rtx_SET (*update_base_reg, new_addr);
    }

  for (par_index = 0; par_index < count; par_index++)
    {
      offset   = par_index * 4;
      /* 4-byte for loading data to each register.  */
      new_addr = plus_constant (Pmode, base_addr, offset);
      mem      = adjust_automodify_address_nv (basemem, SImode,
					       new_addr, offset);
      reg      = gen_rtx_REG (SImode, base_regno + par_index);

      XVECEXP (result, 0, (par_index + start_idx)) = gen_rtx_SET (reg, mem);
    }

  return result;
}
Пример #16
0
Файл: lm32.c Проект: boomeer/gcc
static void
gen_int_relational (enum rtx_code code,	
		    rtx result,	
		    rtx cmp0,	
		    rtx cmp1,	
		    rtx destination)	
{
  machine_mode mode;
  int branch_p;

  mode = GET_MODE (cmp0);
  if (mode == VOIDmode)
    mode = GET_MODE (cmp1);

  /* Is this a branch or compare.  */
  branch_p = (destination != 0);

  /* Instruction set doesn't support LE or LT, so swap operands and use 
     GE, GT.  */
  switch (code)
    {
    case LE:
    case LT:
    case LEU:
    case LTU:
      {
	rtx temp;

	code = swap_condition (code);
	temp = cmp0;
	cmp0 = cmp1;
	cmp1 = temp;
	break;
      }
    default:
      break;
    }

  if (branch_p)
    {
      rtx insn, cond, label;

      /* Operands must be in registers.  */
      if (!register_operand (cmp0, mode))
	cmp0 = force_reg (mode, cmp0);
      if (!register_operand (cmp1, mode))
	cmp1 = force_reg (mode, cmp1);

      /* Generate conditional branch instruction.  */
      cond = gen_rtx_fmt_ee (code, mode, cmp0, cmp1);
      label = gen_rtx_LABEL_REF (VOIDmode, destination);
      insn = gen_rtx_SET (pc_rtx, gen_rtx_IF_THEN_ELSE (VOIDmode,
							cond, label, pc_rtx));
      emit_jump_insn (insn);
    }
  else
    {
      /* We can't have const_ints in cmp0, other than 0.  */
      if ((GET_CODE (cmp0) == CONST_INT) && (INTVAL (cmp0) != 0))
	cmp0 = force_reg (mode, cmp0);

      /* If the comparison is against an int not in legal range
         move it into a register.  */
      if (GET_CODE (cmp1) == CONST_INT)
	{
	  switch (code)
	    {
	    case EQ:
	    case NE:
	    case LE:
	    case LT:
	    case GE:
	    case GT:
	      if (!satisfies_constraint_K (cmp1))
		cmp1 = force_reg (mode, cmp1);
	      break;
	    case LEU:
	    case LTU:
	    case GEU:
	    case GTU:
	      if (!satisfies_constraint_L (cmp1))
		cmp1 = force_reg (mode, cmp1);
	      break;
	    default:
	      gcc_unreachable ();
	    }
	}

      /* Generate compare instruction.  */
      emit_move_insn (result, gen_rtx_fmt_ee (code, mode, cmp0, cmp1));
    }
}
Пример #17
0
static bool
combine_set_extension (ext_cand *cand, rtx_insn *curr_insn, rtx *orig_set)
{
  rtx orig_src = SET_SRC (*orig_set);
  machine_mode orig_mode = GET_MODE (SET_DEST (*orig_set));
  rtx new_set;
  rtx cand_pat = PATTERN (cand->insn);

  /* If the extension's source/destination registers are not the same
     then we need to change the original load to reference the destination
     of the extension.  Then we need to emit a copy from that destination
     to the original destination of the load.  */
  rtx new_reg;
  bool copy_needed
    = (REGNO (SET_DEST (cand_pat)) != REGNO (XEXP (SET_SRC (cand_pat), 0)));
  if (copy_needed)
    new_reg = gen_rtx_REG (cand->mode, REGNO (SET_DEST (cand_pat)));
  else
    new_reg = gen_rtx_REG (cand->mode, REGNO (SET_DEST (*orig_set)));

#if 0
  /* Rethinking test.  Temporarily disabled.  */
  /* We're going to be widening the result of DEF_INSN, ensure that doing so
     doesn't change the number of hard registers needed for the result.  */
  if (HARD_REGNO_NREGS (REGNO (new_reg), cand->mode)
      != HARD_REGNO_NREGS (REGNO (SET_DEST (*orig_set)),
			   GET_MODE (SET_DEST (*orig_set))))
	return false;
#endif

  /* Merge constants by directly moving the constant into the register under
     some conditions.  Recall that RTL constants are sign-extended.  */
  if (GET_CODE (orig_src) == CONST_INT
      && HOST_BITS_PER_WIDE_INT >= GET_MODE_BITSIZE (cand->mode))
    {
      if (INTVAL (orig_src) >= 0 || cand->code == SIGN_EXTEND)
	new_set = gen_rtx_SET (new_reg, orig_src);
      else
	{
	  /* Zero-extend the negative constant by masking out the bits outside
	     the source mode.  */
	  rtx new_const_int
	    = gen_int_mode (INTVAL (orig_src) & GET_MODE_MASK (orig_mode),
			    GET_MODE (new_reg));
	  new_set = gen_rtx_SET (new_reg, new_const_int);
	}
    }
  else if (GET_MODE (orig_src) == VOIDmode)
    {
      /* This is mostly due to a call insn that should not be optimized.  */
      return false;
    }
  else if (GET_CODE (orig_src) == cand->code)
    {
      /* Here is a sequence of two extensions.  Try to merge them.  */
      rtx temp_extension
	= gen_rtx_fmt_e (cand->code, cand->mode, XEXP (orig_src, 0));
      rtx simplified_temp_extension = simplify_rtx (temp_extension);
      if (simplified_temp_extension)
        temp_extension = simplified_temp_extension;
      new_set = gen_rtx_SET (new_reg, temp_extension);
    }
  else if (GET_CODE (orig_src) == IF_THEN_ELSE)
    {
      /* Only IF_THEN_ELSE of phi-type copies are combined.  Otherwise,
         in general, IF_THEN_ELSE should not be combined.  */
      return false;
    }
  else
    {
      /* This is the normal case.  */
      rtx temp_extension
	= gen_rtx_fmt_e (cand->code, cand->mode, orig_src);
      rtx simplified_temp_extension = simplify_rtx (temp_extension);
      if (simplified_temp_extension)
        temp_extension = simplified_temp_extension;
      new_set = gen_rtx_SET (new_reg, temp_extension);
    }

  /* This change is a part of a group of changes.  Hence,
     validate_change will not try to commit the change.  */
  if (validate_change (curr_insn, orig_set, new_set, true)
      && update_reg_equal_equiv_notes (curr_insn, cand->mode, orig_mode,
				       cand->code))
    {
      if (dump_file)
        {
          fprintf (dump_file,
		   "Tentatively merged extension with definition %s:\n",
		   (copy_needed) ? "(copy needed)" : "");
          print_rtl_single (dump_file, curr_insn);
        }
      return true;
    }

  return false;
}