static struct value * record_btrace_frame_prev_register (struct frame_info *this_frame, void **this_cache, int regnum) { const struct btrace_frame_cache *cache; const struct btrace_function *bfun, *caller; const struct btrace_insn *insn; struct gdbarch *gdbarch; CORE_ADDR pc; int pcreg; gdbarch = get_frame_arch (this_frame); pcreg = gdbarch_pc_regnum (gdbarch); if (pcreg < 0 || regnum != pcreg) throw_error (NOT_AVAILABLE_ERROR, _("Registers are not available in btrace record history")); cache = *this_cache; bfun = cache->bfun; gdb_assert (bfun != NULL); caller = bfun->up; if (caller == NULL) throw_error (NOT_AVAILABLE_ERROR, _("No caller in btrace record history")); if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0) { insn = VEC_index (btrace_insn_s, caller->insn, 0); pc = insn->pc; } else { insn = VEC_last (btrace_insn_s, caller->insn); pc = insn->pc; pc += gdb_insn_length (gdbarch, pc); } DEBUG ("[frame] unwound PC in %s on level %d: %s", btrace_get_bfun_name (bfun), bfun->level, core_addr_to_string_nz (pc)); return frame_unwind_got_address (this_frame, regnum, pc); }
static void btrace_compute_ftrace_bts (struct thread_info *tp, const struct btrace_data_bts *btrace) { struct btrace_thread_info *btinfo; struct btrace_function *begin, *end; struct gdbarch *gdbarch; unsigned int blk, ngaps; int level; gdbarch = target_gdbarch (); btinfo = &tp->btrace; begin = btinfo->begin; end = btinfo->end; ngaps = btinfo->ngaps; level = begin != NULL ? -btinfo->level : INT_MAX; blk = VEC_length (btrace_block_s, btrace->blocks); while (blk != 0) { btrace_block_s *block; CORE_ADDR pc; blk -= 1; block = VEC_index (btrace_block_s, btrace->blocks, blk); pc = block->begin; for (;;) { struct btrace_insn insn; int size; /* We should hit the end of the block. Warn if we went too far. */ if (block->end < pc) { /* Indicate the gap in the trace - unless we're at the beginning. */ if (begin != NULL) { warning (_("Recorded trace may be corrupted around %s."), core_addr_to_string_nz (pc)); end = ftrace_new_gap (end, BDE_BTS_OVERFLOW); ngaps += 1; } break; } end = ftrace_update_function (end, pc); if (begin == NULL) begin = end; /* Maintain the function level offset. For all but the last block, we do it here. */ if (blk != 0) level = min (level, end->level); size = 0; TRY { size = gdb_insn_length (gdbarch, pc); } CATCH (error, RETURN_MASK_ERROR) { } END_CATCH insn.pc = pc; insn.size = size; insn.iclass = ftrace_classify_insn (gdbarch, pc); ftrace_update_insns (end, &insn); /* We're done once we pushed the instruction at the end. */ if (block->end == pc) break; /* We can't continue if we fail to compute the size. */ if (size <= 0) { warning (_("Recorded trace may be incomplete around %s."), core_addr_to_string_nz (pc)); /* Indicate the gap in the trace. We just added INSN so we're not at the beginning. */ end = ftrace_new_gap (end, BDE_BTS_INSN_SIZE); ngaps += 1; break; } pc += size; /* Maintain the function level offset. For the last block, we do it here to not consider the last instruction. Since the last instruction corresponds to the current instruction and is not really part of the execution history, it shouldn't affect the level. */ if (blk == 0) level = min (level, end->level); } } btinfo->begin = begin; btinfo->end = end; btinfo->ngaps = ngaps; /* LEVEL is the minimal function level of all btrace function segments. Define the global level offset to -LEVEL so all function levels are normalized to start at zero. */ btinfo->level = -level; }
static struct btrace_function * ftrace_update_function (struct gdbarch *gdbarch, struct btrace_function *bfun, CORE_ADDR pc) { struct bound_minimal_symbol bmfun; struct minimal_symbol *mfun; struct symbol *fun; struct btrace_insn *last; /* Try to determine the function we're in. We use both types of symbols to avoid surprises when we sometimes get a full symbol and sometimes only a minimal symbol. */ fun = find_pc_function (pc); bmfun = lookup_minimal_symbol_by_pc (pc); mfun = bmfun.minsym; if (fun == NULL && mfun == NULL) DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc)); /* If we didn't have a function before, we create one. */ if (bfun == NULL) return ftrace_new_function (bfun, mfun, fun); /* Check the last instruction, if we have one. We do this check first, since it allows us to fill in the call stack links in addition to the normal flow links. */ last = NULL; if (!VEC_empty (btrace_insn_s, bfun->insn)) last = VEC_last (btrace_insn_s, bfun->insn); if (last != NULL) { CORE_ADDR lpc; lpc = last->pc; /* Check for returns. */ if (gdbarch_insn_is_ret (gdbarch, lpc)) return ftrace_new_return (gdbarch, bfun, mfun, fun); /* Check for calls. */ if (gdbarch_insn_is_call (gdbarch, lpc)) { int size; size = gdb_insn_length (gdbarch, lpc); /* Ignore calls to the next instruction. They are used for PIC. */ if (lpc + size != pc) return ftrace_new_call (bfun, mfun, fun); } } /* Check if we're switching functions for some other reason. */ if (ftrace_function_switched (bfun, mfun, fun)) { DEBUG_FTRACE ("switching from %s in %s at %s", ftrace_print_insn_addr (last), ftrace_print_function_name (bfun), ftrace_print_filename (bfun)); if (last != NULL) { CORE_ADDR start, lpc; start = get_pc_function_start (pc); /* If we can't determine the function for PC, we treat a jump at the end of the block as tail call. */ if (start == 0) start = pc; lpc = last->pc; /* Jumps indicate optimized tail calls. */ if (start == pc && gdbarch_insn_is_jump (gdbarch, lpc)) return ftrace_new_tailcall (bfun, mfun, fun); } return ftrace_new_switch (bfun, mfun, fun); } return bfun; }