示例#1
0
static void
ftrace_update_lines (struct btrace_function *bfun, CORE_ADDR pc)
{
  struct symtab_and_line sal;
  const char *fullname;

  sal = find_pc_line (pc, 0);
  if (sal.symtab == NULL || sal.line == 0)
    {
      DEBUG_FTRACE ("no lines at %s", core_addr_to_string_nz (pc));
      return;
    }

  /* Check if we switched files.  This could happen if, say, a macro that
     is defined in another file is expanded here.  */
  fullname = symtab_to_fullname (sal.symtab);
  if (ftrace_skip_file (bfun, fullname))
    {
      DEBUG_FTRACE ("ignoring file at %s, file=%s",
		    core_addr_to_string_nz (pc), fullname);
      return;
    }

  /* Update the line range.  */
  bfun->lbegin = min (bfun->lbegin, sal.line);
  bfun->lend = max (bfun->lend, sal.line);

  if (record_debug > 1)
    ftrace_debug (bfun, "update lines");
}
示例#2
0
static void
record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
			     struct frame_id *this_id)
{
  const struct btrace_frame_cache *cache;
  const struct btrace_function *bfun;
  CORE_ADDR code, special;

  cache = *this_cache;

  bfun = cache->bfun;
  gdb_assert (bfun != NULL);

  while (bfun->segment.prev != NULL)
    bfun = bfun->segment.prev;

  code = get_frame_func (this_frame);
  special = bfun->number;

  *this_id = frame_id_build_unavailable_stack_special (code, special);

  DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
	 btrace_get_bfun_name (cache->bfun),
	 core_addr_to_string_nz (this_id->code_addr),
	 core_addr_to_string_nz (this_id->special_addr));
}
示例#3
0
static const char *
ftrace_print_insn_addr (const struct btrace_insn *insn)
{
  if (insn == NULL)
    return "<nil>";

  return core_addr_to_string_nz (insn->pc);
}
示例#4
0
static struct value *
record_btrace_frame_prev_register (struct frame_info *this_frame,
				   void **this_cache,
				   int regnum)
{
  const struct btrace_frame_cache *cache;
  const struct btrace_function *bfun, *caller;
  const struct btrace_insn *insn;
  struct gdbarch *gdbarch;
  CORE_ADDR pc;
  int pcreg;

  gdbarch = get_frame_arch (this_frame);
  pcreg = gdbarch_pc_regnum (gdbarch);
  if (pcreg < 0 || regnum != pcreg)
    throw_error (NOT_AVAILABLE_ERROR,
		 _("Registers are not available in btrace record history"));

  cache = *this_cache;
  bfun = cache->bfun;
  gdb_assert (bfun != NULL);

  caller = bfun->up;
  if (caller == NULL)
    throw_error (NOT_AVAILABLE_ERROR,
		 _("No caller in btrace record history"));

  if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
    {
      insn = VEC_index (btrace_insn_s, caller->insn, 0);
      pc = insn->pc;
    }
  else
    {
      insn = VEC_last (btrace_insn_s, caller->insn);
      pc = insn->pc;

      pc += gdb_insn_length (gdbarch, pc);
    }

  DEBUG ("[frame] unwound PC in %s on level %d: %s",
	 btrace_get_bfun_name (bfun), bfun->level,
	 core_addr_to_string_nz (pc));

  return frame_unwind_got_address (this_frame, regnum, pc);
}
示例#5
0
static struct target_waitstatus
record_btrace_step_thread (struct thread_info *tp)
{
  struct btrace_insn_iterator *replay, end;
  struct btrace_thread_info *btinfo;
  struct address_space *aspace;
  struct inferior *inf;
  enum btrace_thread_flag flags;
  unsigned int steps;

  btinfo = &tp->btrace;
  replay = btinfo->replay;

  flags = btinfo->flags & BTHR_MOVE;
  btinfo->flags &= ~BTHR_MOVE;

  DEBUG ("stepping %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flags);

  switch (flags)
    {
    default:
      internal_error (__FILE__, __LINE__, _("invalid stepping type."));

    case BTHR_STEP:
      /* We're done if we're not replaying.  */
      if (replay == NULL)
	return btrace_step_no_history ();

      /* We are always able to step at least once.  */
      steps = btrace_insn_next (replay, 1);
      gdb_assert (steps == 1);

      /* Determine the end of the instruction trace.  */
      btrace_insn_end (&end, btinfo);

      /* We stop replaying if we reached the end of the trace.  */
      if (btrace_insn_cmp (replay, &end) == 0)
	record_btrace_stop_replaying (tp);

      return btrace_step_stopped ();

    case BTHR_RSTEP:
      /* Start replaying if we're not already doing so.  */
      if (replay == NULL)
	replay = record_btrace_start_replaying (tp);

      /* If we can't step any further, we reached the end of the history.  */
      steps = btrace_insn_prev (replay, 1);
      if (steps == 0)
	return btrace_step_no_history ();

      return btrace_step_stopped ();

    case BTHR_CONT:
      /* We're done if we're not replaying.  */
      if (replay == NULL)
	return btrace_step_no_history ();

      inf = find_inferior_pid (ptid_get_pid (tp->ptid));
      aspace = inf->aspace;

      /* Determine the end of the instruction trace.  */
      btrace_insn_end (&end, btinfo);

      for (;;)
	{
	  const struct btrace_insn *insn;

	  /* We are always able to step at least once.  */
	  steps = btrace_insn_next (replay, 1);
	  gdb_assert (steps == 1);

	  /* We stop replaying if we reached the end of the trace.  */
	  if (btrace_insn_cmp (replay, &end) == 0)
	    {
	      record_btrace_stop_replaying (tp);
	      return btrace_step_no_history ();
	    }

	  insn = btrace_insn_get (replay);
	  gdb_assert (insn);

	  DEBUG ("stepping %d (%s) ... %s", tp->num,
		 target_pid_to_str (tp->ptid),
		 core_addr_to_string_nz (insn->pc));

	  if (breakpoint_here_p (aspace, insn->pc))
	    return btrace_step_stopped ();
	}

    case BTHR_RCONT:
      /* Start replaying if we're not already doing so.  */
      if (replay == NULL)
	replay = record_btrace_start_replaying (tp);

      inf = find_inferior_pid (ptid_get_pid (tp->ptid));
      aspace = inf->aspace;

      for (;;)
	{
	  const struct btrace_insn *insn;

	  /* If we can't step any further, we're done.  */
	  steps = btrace_insn_prev (replay, 1);
	  if (steps == 0)
	    return btrace_step_no_history ();

	  insn = btrace_insn_get (replay);
	  gdb_assert (insn);

	  DEBUG ("reverse-stepping %d (%s) ... %s", tp->num,
		 target_pid_to_str (tp->ptid),
		 core_addr_to_string_nz (insn->pc));

	  if (breakpoint_here_p (aspace, insn->pc))
	    return btrace_step_stopped ();
	}
    }
}
示例#6
0
static int
btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp)
{
  struct btrace_thread_info *btinfo;
  struct btrace_function *last_bfun;
  struct btrace_insn *last_insn;
  btrace_block_s *first_new_block;

  btinfo = &tp->btrace;
  last_bfun = btinfo->end;
  gdb_assert (last_bfun != NULL);
  gdb_assert (!VEC_empty (btrace_block_s, btrace->blocks));

  /* If the existing trace ends with a gap, we just glue the traces
     together.  We need to drop the last (i.e. chronologically first) block
     of the new trace,  though, since we can't fill in the start address.*/
  if (VEC_empty (btrace_insn_s, last_bfun->insn))
    {
      VEC_pop (btrace_block_s, btrace->blocks);
      return 0;
    }

  /* Beware that block trace starts with the most recent block, so the
     chronologically first block in the new trace is the last block in
     the new trace's block vector.  */
  first_new_block = VEC_last (btrace_block_s, btrace->blocks);
  last_insn = VEC_last (btrace_insn_s, last_bfun->insn);

  /* If the current PC at the end of the block is the same as in our current
     trace, there are two explanations:
       1. we executed the instruction and some branch brought us back.
       2. we have not made any progress.
     In the first case, the delta trace vector should contain at least two
     entries.
     In the second case, the delta trace vector should contain exactly one
     entry for the partial block containing the current PC.  Remove it.  */
  if (first_new_block->end == last_insn->pc
      && VEC_length (btrace_block_s, btrace->blocks) == 1)
    {
      VEC_pop (btrace_block_s, btrace->blocks);
      return 0;
    }

  DEBUG ("stitching %s to %s", ftrace_print_insn_addr (last_insn),
	 core_addr_to_string_nz (first_new_block->end));

  /* Do a simple sanity check to make sure we don't accidentally end up
     with a bad block.  This should not occur in practice.  */
  if (first_new_block->end < last_insn->pc)
    {
      warning (_("Error while trying to read delta trace.  Falling back to "
		 "a full read."));
      return -1;
    }

  /* We adjust the last block to start at the end of our current trace.  */
  gdb_assert (first_new_block->begin == 0);
  first_new_block->begin = last_insn->pc;

  /* We simply pop the last insn so we can insert it again as part of
     the normal branch trace computation.
     Since instruction iterators are based on indices in the instructions
     vector, we don't leave any pointers dangling.  */
  DEBUG ("pruning insn at %s for stitching",
	 ftrace_print_insn_addr (last_insn));

  VEC_pop (btrace_insn_s, last_bfun->insn);

  /* The instructions vector may become empty temporarily if this has
     been the only instruction in this function segment.
     This violates the invariant but will be remedied shortly by
     btrace_compute_ftrace when we add the new trace.  */

  /* The only case where this would hurt is if the entire trace consisted
     of just that one instruction.  If we remove it, we might turn the now
     empty btrace function segment into a gap.  But we don't want gaps at
     the beginning.  To avoid this, we remove the entire old trace.  */
  if (last_bfun == btinfo->begin && VEC_empty (btrace_insn_s, last_bfun->insn))
    btrace_clear (tp);

  return 0;
}
示例#7
0
static void
btrace_compute_ftrace_bts (struct thread_info *tp,
			   const struct btrace_data_bts *btrace)
{
  struct btrace_thread_info *btinfo;
  struct btrace_function *begin, *end;
  struct gdbarch *gdbarch;
  unsigned int blk, ngaps;
  int level;

  gdbarch = target_gdbarch ();
  btinfo = &tp->btrace;
  begin = btinfo->begin;
  end = btinfo->end;
  ngaps = btinfo->ngaps;
  level = begin != NULL ? -btinfo->level : INT_MAX;
  blk = VEC_length (btrace_block_s, btrace->blocks);

  while (blk != 0)
    {
      btrace_block_s *block;
      CORE_ADDR pc;

      blk -= 1;

      block = VEC_index (btrace_block_s, btrace->blocks, blk);
      pc = block->begin;

      for (;;)
	{
	  struct btrace_insn insn;
	  int size;

	  /* We should hit the end of the block.  Warn if we went too far.  */
	  if (block->end < pc)
	    {
	      /* Indicate the gap in the trace - unless we're at the
		 beginning.  */
	      if (begin != NULL)
		{
		  warning (_("Recorded trace may be corrupted around %s."),
			   core_addr_to_string_nz (pc));

		  end = ftrace_new_gap (end, BDE_BTS_OVERFLOW);
		  ngaps += 1;
		}
	      break;
	    }

	  end = ftrace_update_function (end, pc);
	  if (begin == NULL)
	    begin = end;

	  /* Maintain the function level offset.
	     For all but the last block, we do it here.  */
	  if (blk != 0)
	    level = min (level, end->level);

	  size = 0;
	  TRY
	    {
	      size = gdb_insn_length (gdbarch, pc);
	    }
	  CATCH (error, RETURN_MASK_ERROR)
	    {
	    }
	  END_CATCH

	  insn.pc = pc;
	  insn.size = size;
	  insn.iclass = ftrace_classify_insn (gdbarch, pc);

	  ftrace_update_insns (end, &insn);

	  /* We're done once we pushed the instruction at the end.  */
	  if (block->end == pc)
	    break;

	  /* We can't continue if we fail to compute the size.  */
	  if (size <= 0)
	    {
	      warning (_("Recorded trace may be incomplete around %s."),
		       core_addr_to_string_nz (pc));

	      /* Indicate the gap in the trace.  We just added INSN so we're
		 not at the beginning.  */
	      end = ftrace_new_gap (end, BDE_BTS_INSN_SIZE);
	      ngaps += 1;

	      break;
	    }

	  pc += size;

	  /* Maintain the function level offset.
	     For the last block, we do it here to not consider the last
	     instruction.
	     Since the last instruction corresponds to the current instruction
	     and is not really part of the execution history, it shouldn't
	     affect the level.  */
	  if (blk == 0)
	    level = min (level, end->level);
	}
    }

  btinfo->begin = begin;
  btinfo->end = end;
  btinfo->ngaps = ngaps;

  /* LEVEL is the minimal function level of all btrace function segments.
     Define the global level offset to -LEVEL so all function levels are
     normalized to start at zero.  */
  btinfo->level = -level;
}
示例#8
0
static struct btrace_function *
ftrace_update_function (struct btrace_function *bfun, CORE_ADDR pc)
{
  struct bound_minimal_symbol bmfun;
  struct minimal_symbol *mfun;
  struct symbol *fun;
  struct btrace_insn *last;

  /* Try to determine the function we're in.  We use both types of symbols
     to avoid surprises when we sometimes get a full symbol and sometimes
     only a minimal symbol.  */
  fun = find_pc_function (pc);
  bmfun = lookup_minimal_symbol_by_pc (pc);
  mfun = bmfun.minsym;

  if (fun == NULL && mfun == NULL)
    DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));

  /* If we didn't have a function or if we had a gap before, we create one.  */
  if (bfun == NULL || bfun->errcode != 0)
    return ftrace_new_function (bfun, mfun, fun);

  /* Check the last instruction, if we have one.
     We do this check first, since it allows us to fill in the call stack
     links in addition to the normal flow links.  */
  last = NULL;
  if (!VEC_empty (btrace_insn_s, bfun->insn))
    last = VEC_last (btrace_insn_s, bfun->insn);

  if (last != NULL)
    {
      switch (last->iclass)
	{
	case BTRACE_INSN_RETURN:
	  {
	    const char *fname;

	    /* On some systems, _dl_runtime_resolve returns to the resolved
	       function instead of jumping to it.  From our perspective,
	       however, this is a tailcall.
	       If we treated it as return, we wouldn't be able to find the
	       resolved function in our stack back trace.  Hence, we would
	       lose the current stack back trace and start anew with an empty
	       back trace.  When the resolved function returns, we would then
	       create a stack back trace with the same function names but
	       different frame id's.  This will confuse stepping.  */
	    fname = ftrace_print_function_name (bfun);
	    if (strcmp (fname, "_dl_runtime_resolve") == 0)
	      return ftrace_new_tailcall (bfun, mfun, fun);

	    return ftrace_new_return (bfun, mfun, fun);
	  }

	case BTRACE_INSN_CALL:
	  /* Ignore calls to the next instruction.  They are used for PIC.  */
	  if (last->pc + last->size == pc)
	    break;

	  return ftrace_new_call (bfun, mfun, fun);

	case BTRACE_INSN_JUMP:
	  {
	    CORE_ADDR start;

	    start = get_pc_function_start (pc);

	    /* If we can't determine the function for PC, we treat a jump at
	       the end of the block as tail call.  */
	    if (start == 0 || start == pc)
	      return ftrace_new_tailcall (bfun, mfun, fun);
	  }
	}
    }

  /* Check if we're switching functions for some other reason.  */
  if (ftrace_function_switched (bfun, mfun, fun))
    {
      DEBUG_FTRACE ("switching from %s in %s at %s",
		    ftrace_print_insn_addr (last),
		    ftrace_print_function_name (bfun),
		    ftrace_print_filename (bfun));

      return ftrace_new_switch (bfun, mfun, fun);
    }

  return bfun;
}
示例#9
0
static struct btrace_function *
ftrace_update_function (struct gdbarch *gdbarch,
			struct btrace_function *bfun, CORE_ADDR pc)
{
  struct bound_minimal_symbol bmfun;
  struct minimal_symbol *mfun;
  struct symbol *fun;
  struct btrace_insn *last;

  /* Try to determine the function we're in.  We use both types of symbols
     to avoid surprises when we sometimes get a full symbol and sometimes
     only a minimal symbol.  */
  fun = find_pc_function (pc);
  bmfun = lookup_minimal_symbol_by_pc (pc);
  mfun = bmfun.minsym;

  if (fun == NULL && mfun == NULL)
    DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));

  /* If we didn't have a function before, we create one.  */
  if (bfun == NULL)
    return ftrace_new_function (bfun, mfun, fun);

  /* Check the last instruction, if we have one.
     We do this check first, since it allows us to fill in the call stack
     links in addition to the normal flow links.  */
  last = NULL;
  if (!VEC_empty (btrace_insn_s, bfun->insn))
    last = VEC_last (btrace_insn_s, bfun->insn);

  if (last != NULL)
    {
      CORE_ADDR lpc;

      lpc = last->pc;

      /* Check for returns.  */
      if (gdbarch_insn_is_ret (gdbarch, lpc))
	return ftrace_new_return (gdbarch, bfun, mfun, fun);

      /* Check for calls.  */
      if (gdbarch_insn_is_call (gdbarch, lpc))
	{
	  int size;

	  size = gdb_insn_length (gdbarch, lpc);

	  /* Ignore calls to the next instruction.  They are used for PIC.  */
	  if (lpc + size != pc)
	    return ftrace_new_call (bfun, mfun, fun);
	}
    }

  /* Check if we're switching functions for some other reason.  */
  if (ftrace_function_switched (bfun, mfun, fun))
    {
      DEBUG_FTRACE ("switching from %s in %s at %s",
		    ftrace_print_insn_addr (last),
		    ftrace_print_function_name (bfun),
		    ftrace_print_filename (bfun));

      if (last != NULL)
	{
	  CORE_ADDR start, lpc;

	  start = get_pc_function_start (pc);

	  /* If we can't determine the function for PC, we treat a jump at
	     the end of the block as tail call.  */
	  if (start == 0)
	    start = pc;

	  lpc = last->pc;

	  /* Jumps indicate optimized tail calls.  */
	  if (start == pc && gdbarch_insn_is_jump (gdbarch, lpc))
	    return ftrace_new_tailcall (bfun, mfun, fun);
	}

      return ftrace_new_switch (bfun, mfun, fun);
    }

  return bfun;
}