예제 #1
0
static unsigned int
postreload_load (void)
{
  basic_block bb;

  init_alias_analysis ();

  FOR_EACH_BB (bb)
    {
      rtx insn;

      htab_load = htab_create (10, load_htab_hash, load_htab_eq, NULL);

      FOR_BB_INSNS (bb, insn)
	{
	  rtx set;
	  struct load **load;

	  /* Set reg_kill, invalidate entries if there is an
	     aliasing store or if the registers making up the address
	     change.  */
	  htab_traverse_noresize
	    (htab_load, find_reg_kill_and_mem_invalidate, insn);	

	  set = single_set (insn);
	  if (interesting_second_load (set, &load, insn))
	    {
	      rtx move;

	      move = gen_move_insn (SET_DEST (set), (*load)->reg);
	      /* Make sure we can generate a move.  */
	      extract_insn (move);
	      if (! constrain_operands (1))
		continue;

	      move = emit_insn_before (move, (*load)->reg_kill);
	      delete_insn (insn);

	      if (dump_file)
		{
		  fputs ("Replaced this load:\n  ", dump_file);
		  print_inline_rtx (dump_file, insn, 2);
		  fputs ("\n  with this move:\n  ", dump_file);
		  print_inline_rtx (dump_file, move, 2);
		  fputs ("\n\n", dump_file);
		}
	    }
	  else if (interesting_load (set))
	    alloc_load (set);
	  else if (CALL_P (insn))
	    htab_empty (htab_load);
	}

      htab_empty (htab_load);
    }
예제 #2
0
static rtx
insert_move_insn_before (rtx next_insn, rtx dest_reg, rtx src_reg)
{
  rtx insns;

  start_sequence ();
  emit_move_insn (dest_reg, src_reg);
  insns = get_insns ();
  end_sequence ();
  emit_insn_before (insns, next_insn);
  return insns;
}
예제 #3
0
파일: sibcall.c 프로젝트: robinsonb5/zpugcc
void
replace_call_placeholder (rtx insn, sibcall_use_t use)
{
  if (use == sibcall_use_tail_recursion)
    emit_insn_before (XEXP (PATTERN (insn), 2), insn);
  else if (use == sibcall_use_sibcall)
    emit_insn_before (XEXP (PATTERN (insn), 1), insn);
  else if (use == sibcall_use_normal)
    emit_insn_before (XEXP (PATTERN (insn), 0), insn);
  else
    abort ();

  /* Turn off LABEL_PRESERVE_P for the tail recursion label if it
     exists.  We only had to set it long enough to keep the jump
     pass above from deleting it as unused.  */
  if (XEXP (PATTERN (insn), 3))
    LABEL_PRESERVE_P (XEXP (PATTERN (insn), 3)) = 0;

  /* "Delete" the placeholder insn.  */
  remove_insn (insn);
}
예제 #4
0
// add special KERNEXEC instrumentation: orq %r10,(%rsp) just before retn
static void kernexec_instrument_retaddr_or(rtx insn)
{
	rtx orq;
	rtvec argvec, constraintvec, labelvec;
	int line;

	// create asm volatile("orq %%r10,(%%rsp)":::)
	argvec = rtvec_alloc(0);
	constraintvec = rtvec_alloc(0);
	labelvec = rtvec_alloc(0);
	line = expand_location(RTL_LOCATION(insn)).line;
	orq = gen_rtx_ASM_OPERANDS(VOIDmode, "orq %%r10,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
	MEM_VOLATILE_P(orq) = 1;
//	RTX_FRAME_RELATED_P(orq) = 1; // not for ASM_OPERANDS
	emit_insn_before(orq, insn);
}
예제 #5
0
static unsigned int arm_pertask_ssp_rtl_execute(void)
{
	rtx_insn *insn;

	for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
		const char *sym;
		rtx body;
		rtx mask, masked_sp;

		/*
		 * Find a SET insn involving a SYMBOL_REF to __stack_chk_guard
		 */
		if (!INSN_P(insn))
			continue;
		body = PATTERN(insn);
		if (GET_CODE(body) != SET ||
		    GET_CODE(SET_SRC(body)) != SYMBOL_REF)
			continue;
		sym = XSTR(SET_SRC(body), 0);
		if (strcmp(sym, "__stack_chk_guard"))
			continue;

		/*
		 * Replace the source of the SET insn with an expression that
		 * produces the address of the copy of the stack canary value
		 * stored in struct thread_info
		 */
		mask = GEN_INT(sext_hwi(sp_mask, GET_MODE_PRECISION(Pmode)));
		masked_sp = gen_reg_rtx(Pmode);

		emit_insn_before(gen_rtx_set(masked_sp,
					     gen_rtx_AND(Pmode,
							 stack_pointer_rtx,
							 mask)),
				 insn);

		SET_SRC(body) = gen_rtx_PLUS(Pmode, masked_sp,
					     GEN_INT(canary_offset));
	}
	return 0;
}
예제 #6
0
static int
optimize_mode_switching (void)
{
  int e;
  basic_block bb;
  bool need_commit = false;
  static const int num_modes[] = NUM_MODES_FOR_MODE_SWITCHING;
#define N_ENTITIES ARRAY_SIZE (num_modes)
  int entity_map[N_ENTITIES];
  struct bb_info *bb_info[N_ENTITIES];
  int i, j;
  int n_entities = 0;
  int max_num_modes = 0;
  bool emitted ATTRIBUTE_UNUSED = false;
  basic_block post_entry = 0;
  basic_block pre_exit = 0;
  struct edge_list *edge_list = 0;

  /* These bitmaps are used for the LCM algorithm.  */
  sbitmap *kill, *del, *insert, *antic, *transp, *comp;
  sbitmap *avin, *avout;

  for (e = N_ENTITIES - 1; e >= 0; e--)
    if (OPTIMIZE_MODE_SWITCHING (e))
      {
	int entry_exit_extra = 0;

	/* Create the list of segments within each basic block.
	   If NORMAL_MODE is defined, allow for two extra
	   blocks split from the entry and exit block.  */
	if (targetm.mode_switching.entry && targetm.mode_switching.exit)
	  entry_exit_extra = 3;

	bb_info[n_entities]
	  = XCNEWVEC (struct bb_info,
		      last_basic_block_for_fn (cfun) + entry_exit_extra);
	entity_map[n_entities++] = e;
	if (num_modes[e] > max_num_modes)
	  max_num_modes = num_modes[e];
      }

  if (! n_entities)
    return 0;

  /* Make sure if MODE_ENTRY is defined MODE_EXIT is defined.  */
  gcc_assert ((targetm.mode_switching.entry && targetm.mode_switching.exit)
	      || (!targetm.mode_switching.entry
		  && !targetm.mode_switching.exit));

  if (targetm.mode_switching.entry && targetm.mode_switching.exit)
    {
      /* Split the edge from the entry block, so that we can note that
	 there NORMAL_MODE is supplied.  */
      post_entry = split_edge (single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
      pre_exit = create_pre_exit (n_entities, entity_map, num_modes);
    }

  df_analyze ();

  /* Create the bitmap vectors.  */
  antic = sbitmap_vector_alloc (last_basic_block_for_fn (cfun),
				n_entities * max_num_modes);
  transp = sbitmap_vector_alloc (last_basic_block_for_fn (cfun),
				 n_entities * max_num_modes);
  comp = sbitmap_vector_alloc (last_basic_block_for_fn (cfun),
			       n_entities * max_num_modes);
  avin = sbitmap_vector_alloc (last_basic_block_for_fn (cfun),
			       n_entities * max_num_modes);
  avout = sbitmap_vector_alloc (last_basic_block_for_fn (cfun),
				n_entities * max_num_modes);
  kill = sbitmap_vector_alloc (last_basic_block_for_fn (cfun),
			       n_entities * max_num_modes);

  bitmap_vector_ones (transp, last_basic_block_for_fn (cfun));
  bitmap_vector_clear (antic, last_basic_block_for_fn (cfun));
  bitmap_vector_clear (comp, last_basic_block_for_fn (cfun));

  for (j = n_entities - 1; j >= 0; j--)
    {
      int e = entity_map[j];
      int no_mode = num_modes[e];
      struct bb_info *info = bb_info[j];
      rtx_insn *insn;

      /* Determine what the first use (if any) need for a mode of entity E is.
	 This will be the mode that is anticipatable for this block.
	 Also compute the initial transparency settings.  */
      FOR_EACH_BB_FN (bb, cfun)
	{
	  struct seginfo *ptr;
	  int last_mode = no_mode;
	  bool any_set_required = false;
	  HARD_REG_SET live_now;

	  info[bb->index].mode_out = info[bb->index].mode_in = no_mode;

	  REG_SET_TO_HARD_REG_SET (live_now, df_get_live_in (bb));

	  /* Pretend the mode is clobbered across abnormal edges.  */
	  {
	    edge_iterator ei;
	    edge eg;
	    FOR_EACH_EDGE (eg, ei, bb->preds)
	      if (eg->flags & EDGE_COMPLEX)
		break;
	    if (eg)
	      {
		rtx_insn *ins_pos = BB_HEAD (bb);
		if (LABEL_P (ins_pos))
		  ins_pos = NEXT_INSN (ins_pos);
		gcc_assert (NOTE_INSN_BASIC_BLOCK_P (ins_pos));
		if (ins_pos != BB_END (bb))
		  ins_pos = NEXT_INSN (ins_pos);
		ptr = new_seginfo (no_mode, ins_pos, bb->index, live_now);
		add_seginfo (info + bb->index, ptr);
		for (i = 0; i < no_mode; i++)
		  clear_mode_bit (transp[bb->index], j, i);
	      }
	  }

	  FOR_BB_INSNS (bb, insn)
	    {
	      if (INSN_P (insn))
		{
		  int mode = targetm.mode_switching.needed (e, insn);
		  rtx link;

		  if (mode != no_mode && mode != last_mode)
		    {
		      any_set_required = true;
		      last_mode = mode;
		      ptr = new_seginfo (mode, insn, bb->index, live_now);
		      add_seginfo (info + bb->index, ptr);
		      for (i = 0; i < no_mode; i++)
			clear_mode_bit (transp[bb->index], j, i);
		    }

		  if (targetm.mode_switching.after)
		    last_mode = targetm.mode_switching.after (e, last_mode,
							      insn);

		  /* Update LIVE_NOW.  */
		  for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
		    if (REG_NOTE_KIND (link) == REG_DEAD)
		      reg_dies (XEXP (link, 0), &live_now);

		  note_stores (PATTERN (insn), reg_becomes_live, &live_now);
		  for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
		    if (REG_NOTE_KIND (link) == REG_UNUSED)
		      reg_dies (XEXP (link, 0), &live_now);
		}
	    }

	  info[bb->index].computing = last_mode;
	  /* Check for blocks without ANY mode requirements.
	     N.B. because of MODE_AFTER, last_mode might still
	     be different from no_mode, in which case we need to
	     mark the block as nontransparent.  */
	  if (!any_set_required)
	    {
	      ptr = new_seginfo (no_mode, BB_END (bb), bb->index, live_now);
	      add_seginfo (info + bb->index, ptr);
	      if (last_mode != no_mode)
		for (i = 0; i < no_mode; i++)
		  clear_mode_bit (transp[bb->index], j, i);
	    }
	}
      if (targetm.mode_switching.entry && targetm.mode_switching.exit)
	{
	  int mode = targetm.mode_switching.entry (e);

	  info[post_entry->index].mode_out =
	    info[post_entry->index].mode_in = no_mode;
	  if (pre_exit)
	    {
	      info[pre_exit->index].mode_out =
		info[pre_exit->index].mode_in = no_mode;
	    }

	  if (mode != no_mode)
	    {
	      bb = post_entry;

	      /* By always making this nontransparent, we save
		 an extra check in make_preds_opaque.  We also
		 need this to avoid confusing pre_edge_lcm when
		 antic is cleared but transp and comp are set.  */
	      for (i = 0; i < no_mode; i++)
		clear_mode_bit (transp[bb->index], j, i);

	      /* Insert a fake computing definition of MODE into entry
		 blocks which compute no mode. This represents the mode on
		 entry.  */
	      info[bb->index].computing = mode;

	      if (pre_exit)
		info[pre_exit->index].seginfo->mode =
		  targetm.mode_switching.exit (e);
	    }
	}

      /* Set the anticipatable and computing arrays.  */
      for (i = 0; i < no_mode; i++)
	{
	  int m = targetm.mode_switching.priority (entity_map[j], i);

	  FOR_EACH_BB_FN (bb, cfun)
	    {
	      if (info[bb->index].seginfo->mode == m)
		set_mode_bit (antic[bb->index], j, m);

	      if (info[bb->index].computing == m)
		set_mode_bit (comp[bb->index], j, m);
	    }
	}
    }

  /* Calculate the optimal locations for the
     placement mode switches to modes with priority I.  */

  FOR_EACH_BB_FN (bb, cfun)
    bitmap_not (kill[bb->index], transp[bb->index]);

  edge_list = pre_edge_lcm_avs (n_entities * max_num_modes, transp, comp, antic,
				kill, avin, avout, &insert, &del);

  for (j = n_entities - 1; j >= 0; j--)
    {
      int no_mode = num_modes[entity_map[j]];

      /* Insert all mode sets that have been inserted by lcm.  */

      for (int ed = NUM_EDGES (edge_list) - 1; ed >= 0; ed--)
	{
	  edge eg = INDEX_EDGE (edge_list, ed);

	  eg->aux = (void *)(intptr_t)-1;

	  for (i = 0; i < no_mode; i++)
	    {
	      int m = targetm.mode_switching.priority (entity_map[j], i);
	      if (mode_bit_p (insert[ed], j, m))
		{
		  eg->aux = (void *)(intptr_t)m;
		  break;
		}
	    }
	}

      FOR_EACH_BB_FN (bb, cfun)
	{
	  struct bb_info *info = bb_info[j];
	  int last_mode = no_mode;

	  /* intialize mode in availability for bb.  */
	  for (i = 0; i < no_mode; i++)
	    if (mode_bit_p (avout[bb->index], j, i))
	      {
		if (last_mode == no_mode)
		  last_mode = i;
		if (last_mode != i)
		  {
		    last_mode = no_mode;
		    break;
		  }
	      }
	  info[bb->index].mode_out = last_mode;

	  /* intialize mode out availability for bb.  */
	  last_mode = no_mode;
	  for (i = 0; i < no_mode; i++)
	    if (mode_bit_p (avin[bb->index], j, i))
	      {
		if (last_mode == no_mode)
		  last_mode = i;
		if (last_mode != i)
		  {
		    last_mode = no_mode;
		    break;
		  }
	      }
	  info[bb->index].mode_in = last_mode;

	  for (i = 0; i < no_mode; i++)
	    if (mode_bit_p (del[bb->index], j, i))
	      info[bb->index].seginfo->mode = no_mode;
	}

      /* Now output the remaining mode sets in all the segments.  */

      /* In case there was no mode inserted. the mode information on the edge
	 might not be complete.
	 Update mode info on edges and commit pending mode sets.  */
      need_commit |= commit_mode_sets (edge_list, entity_map[j], bb_info[j]);

      /* Reset modes for next entity.  */
      clear_aux_for_edges ();

      FOR_EACH_BB_FN (bb, cfun)
	{
	  struct seginfo *ptr, *next;
	  int cur_mode = bb_info[j][bb->index].mode_in;

	  for (ptr = bb_info[j][bb->index].seginfo; ptr; ptr = next)
	    {
	      next = ptr->next;
	      if (ptr->mode != no_mode)
		{
		  rtx_insn *mode_set;

		  rtl_profile_for_bb (bb);
		  start_sequence ();

		  targetm.mode_switching.emit (entity_map[j], ptr->mode,
					       cur_mode, ptr->regs_live);
		  mode_set = get_insns ();
		  end_sequence ();

		  /* modes kill each other inside a basic block.  */
		  cur_mode = ptr->mode;

		  /* Insert MODE_SET only if it is nonempty.  */
		  if (mode_set != NULL_RTX)
		    {
		      emitted = true;
		      if (NOTE_INSN_BASIC_BLOCK_P (ptr->insn_ptr))
			/* We need to emit the insns in a FIFO-like manner,
			   i.e. the first to be emitted at our insertion
			   point ends up first in the instruction steam.
			   Because we made sure that NOTE_INSN_BASIC_BLOCK is
			   only used for initially empty basic blocks, we
			   can achieve this by appending at the end of
			   the block.  */
			emit_insn_after
			  (mode_set, BB_END (NOTE_BASIC_BLOCK (ptr->insn_ptr)));
		      else
			emit_insn_before (mode_set, ptr->insn_ptr);
		    }

		  default_rtl_profile ();
		}

	      free (ptr);
	    }
	}

      free (bb_info[j]);
    }

  free_edge_list (edge_list);

  /* Finished. Free up all the things we've allocated.  */
  sbitmap_vector_free (del);
  sbitmap_vector_free (insert);
  sbitmap_vector_free (kill);
  sbitmap_vector_free (antic);
  sbitmap_vector_free (transp);
  sbitmap_vector_free (comp);
  sbitmap_vector_free (avin);
  sbitmap_vector_free (avout);

  if (need_commit)
    commit_edge_insertions ();

  if (targetm.mode_switching.entry && targetm.mode_switching.exit)
    cleanup_cfg (CLEANUP_NO_INSN_DEL);
  else if (!need_commit && !emitted)
    return 0;

  return 1;
}
예제 #7
0
static void
force_move_args_size_note (basic_block bb, rtx prev, rtx insn)
{
    rtx note, test, next_candidate, prev_candidate;

    /* If PREV exists, tail-call to the logic in the other function.  */
    if (prev)
    {
        maybe_move_args_size_note (prev, insn, false);
        return;
    }

    /* First, make sure there's anything that needs doing.  */
    note = find_reg_note (insn, REG_ARGS_SIZE, NULL_RTX);
    if (note == NULL)
        return;

    /* We need to find a spot between the previous and next exception points
       where we can place the note and "properly" deallocate the arguments.  */
    next_candidate = prev_candidate = NULL;

    /* It is often the case that we have insns in the order:
    call
    add sp (previous deallocation)
    sub sp (align for next arglist)
    push arg
       and the add/sub cancel.  Therefore we begin by searching forward.  */

    test = insn;
    while ((test = next_active_insn_bb (bb, test)) != NULL)
    {
        /* Found an existing note: nothing to do.  */
        if (find_reg_note (test, REG_ARGS_SIZE, NULL_RTX))
            return;
        /* Found something that affects unwinding.  Stop searching.  */
        if (CALL_P (test) || !insn_nothrow_p (test))
            break;
        if (next_candidate == NULL)
            next_candidate = test;
    }

    test = insn;
    while ((test = prev_active_insn_bb (bb, test)) != NULL)
    {
        rtx tnote;
        /* Found a place that seems logical to adjust the stack.  */
        tnote = find_reg_note (test, REG_ARGS_SIZE, NULL_RTX);
        if (tnote)
        {
            XEXP (tnote, 0) = XEXP (note, 0);
            return;
        }
        if (prev_candidate == NULL)
            prev_candidate = test;
        /* Found something that affects unwinding.  Stop searching.  */
        if (CALL_P (test) || !insn_nothrow_p (test))
            break;
    }

    if (prev_candidate)
        test = prev_candidate;
    else if (next_candidate)
        test = next_candidate;
    else
    {
        /* ??? We *must* have a place, lest we ICE on the lost adjustment.
        Options are: dummy clobber insn, nop, or prevent the removal of
         the sp += 0 insn.  */
        /* TODO: Find another way to indicate to the dwarf2 code that we
        have not in fact lost an adjustment.  */
        test = emit_insn_before (gen_rtx_CLOBBER (VOIDmode, const0_rtx), insn);
    }
    add_reg_note (test, REG_ARGS_SIZE, XEXP (note, 0));
}
예제 #8
0
파일: init-regs.c 프로젝트: alcides/gcc
static void
initialize_uninitialized_regs (void)
{
  basic_block bb;
  bitmap already_genned = BITMAP_ALLOC (NULL);

  if (optimize == 1)
    {
      df_live_add_problem ();
      df_live_set_all_dirty ();
    }

  df_analyze ();

  FOR_EACH_BB_FN (bb, cfun)
    {
      rtx_insn *insn;
      bitmap lr = DF_LR_IN (bb);
      bitmap ur = DF_LIVE_IN (bb);
      bitmap_clear (already_genned);

      FOR_BB_INSNS (bb, insn)
	{
	  df_ref use;
	  if (!NONDEBUG_INSN_P (insn))
	    continue;

	  FOR_EACH_INSN_USE (use, insn)
	    {
	      unsigned int regno = DF_REF_REGNO (use);

	      /* Only do this for the pseudos.  */
	      if (regno < FIRST_PSEUDO_REGISTER)
		continue;

	      /* Do not generate multiple moves for the same regno.
		 This is common for sequences of subreg operations.
		 They would be deleted during combine but there is no
		 reason to churn the system.  */
	      if (bitmap_bit_p (already_genned, regno))
		continue;

	      /* A use is MUST uninitialized if it reaches the top of
		 the block from the inside of the block (the lr test)
		 and no def for it reaches the top of the block from
		 outside of the block (the ur test).  */
	      if (bitmap_bit_p (lr, regno)
		  && (!bitmap_bit_p (ur, regno)))
		{
		  rtx_insn *move_insn;
		  rtx reg = DF_REF_REAL_REG (use);

		  bitmap_set_bit (already_genned, regno);

		  start_sequence ();
		  emit_move_insn (reg, CONST0_RTX (GET_MODE (reg)));
		  move_insn = get_insns ();
		  end_sequence ();
		  emit_insn_before (move_insn, insn);
		  if (dump_file)
		    fprintf (dump_file,
			     "adding initialization in %s of reg %d at in block %d for insn %d.\n",
			     current_function_name (), regno, bb->index,
			     INSN_UID (insn));
		}
	    }