static void cli_text (struct ui_out *uiout, const char *string) { cli_out_data *data = ui_out_data (uiout); struct ui_file *stream; if (data->suppress_output) return; stream = VEC_last (ui_filep, data->streams); fputs_filtered (string, stream); }
static void cli_spaces (struct ui_out *uiout, int numspaces) { cli_out_data *data = ui_out_data (uiout); struct ui_file *stream; if (data->suppress_output) return; stream = VEC_last (ui_filep, data->streams); print_spaces_filtered (numspaces, stream); }
/* VARARGS */ static void out_field_fmt (struct ui_out *uiout, int fldno, const char *fldname, const char *format,...) { cli_out_data *data = ui_out_data (uiout); struct ui_file *stream = VEC_last (ui_filep, data->streams); va_list args; va_start (args, format); vfprintf_filtered (stream, format, args); va_end (args); }
cli_message (struct ui_out *uiout, int verbosity, const char *format, va_list args) { cli_out_data *data = ui_out_data (uiout); if (data->suppress_output) return; if (ui_out_get_verblvl (uiout) >= verbosity) { struct ui_file *stream = VEC_last (ui_filep, data->streams); vfprintf_unfiltered (stream, format, args); } }
static void dse_initialize_block_local_data (struct dom_walk_data *walk_data, basic_block bb ATTRIBUTE_UNUSED, bool recycled) { struct dse_block_local_data *bd = VEC_last (void_p, walk_data->block_data_stack); /* If we are given a recycled block local data structure, ensure any bitmap associated with the block is cleared. */ if (recycled) { if (bd->stores) bitmap_clear (bd->stores); } }
static struct value * record_btrace_frame_prev_register (struct frame_info *this_frame, void **this_cache, int regnum) { const struct btrace_frame_cache *cache; const struct btrace_function *bfun, *caller; const struct btrace_insn *insn; struct gdbarch *gdbarch; CORE_ADDR pc; int pcreg; gdbarch = get_frame_arch (this_frame); pcreg = gdbarch_pc_regnum (gdbarch); if (pcreg < 0 || regnum != pcreg) throw_error (NOT_AVAILABLE_ERROR, _("Registers are not available in btrace record history")); cache = *this_cache; bfun = cache->bfun; gdb_assert (bfun != NULL); caller = bfun->up; if (caller == NULL) throw_error (NOT_AVAILABLE_ERROR, _("No caller in btrace record history")); if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0) { insn = VEC_index (btrace_insn_s, caller->insn, 0); pc = insn->pc; } else { insn = VEC_last (btrace_insn_s, caller->insn); pc = insn->pc; pc += gdb_insn_length (gdbarch, pc); } DEBUG ("[frame] unwound PC in %s on level %d: %s", btrace_get_bfun_name (bfun), bfun->level, core_addr_to_string_nz (pc)); return frame_unwind_got_address (this_frame, regnum, pc); }
cli_field_fmt (struct ui_out *uiout, int fldno, int width, enum ui_align align, const char *fldname, const char *format, va_list args) { cli_out_data *data = ui_out_data (uiout); struct ui_file *stream; if (data->suppress_output) return; stream = VEC_last (ui_filep, data->streams); vfprintf_filtered (stream, format, args); if (align != ui_noalign) field_separator (); }
static struct btrace_function * ftrace_find_call (struct btrace_function *bfun) { for (; bfun != NULL; bfun = bfun->up) { struct btrace_insn *last; /* Skip gaps. */ if (bfun->errcode != 0) continue; last = VEC_last (btrace_insn_s, bfun->insn); if (last->iclass == BTRACE_INSN_CALL) break; } return bfun; }
static struct btrace_function * ftrace_find_call (struct gdbarch *gdbarch, struct btrace_function *bfun) { for (; bfun != NULL; bfun = bfun->up) { struct btrace_insn *last; CORE_ADDR pc; /* We do not allow empty function segments. */ gdb_assert (!VEC_empty (btrace_insn_s, bfun->insn)); last = VEC_last (btrace_insn_s, bfun->insn); pc = last->pc; if (gdbarch_insn_is_call (gdbarch, pc)) break; } return bfun; }
return NULL; } #else /* HAVE_LIBEXPAT */ #include "xml-support.h" /* Handle the start of a <segment> element. */ static void library_list_start_segment (struct gdb_xml_parser *parser, const struct gdb_xml_element *element, void *user_data, VEC(gdb_xml_value_s) *attributes) { VEC(lm_info_p) **list = user_data; struct lm_info *last = VEC_last (lm_info_p, *list); ULONGEST *address_p = xml_find_attribute (attributes, "address")->value; CORE_ADDR address = (CORE_ADDR) *address_p; if (last->section_bases != NULL) gdb_xml_error (parser, _("Library list with both segments and sections")); VEC_safe_push (CORE_ADDR, last->segment_bases, &address); } static void library_list_start_section (struct gdb_xml_parser *parser, const struct gdb_xml_element *element, void *user_data, VEC(gdb_xml_value_s) *attributes) {
static int btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp) { struct btrace_thread_info *btinfo; struct btrace_function *last_bfun; struct btrace_insn *last_insn; btrace_block_s *first_new_block; btinfo = &tp->btrace; last_bfun = btinfo->end; gdb_assert (last_bfun != NULL); gdb_assert (!VEC_empty (btrace_block_s, btrace->blocks)); /* If the existing trace ends with a gap, we just glue the traces together. We need to drop the last (i.e. chronologically first) block of the new trace, though, since we can't fill in the start address.*/ if (VEC_empty (btrace_insn_s, last_bfun->insn)) { VEC_pop (btrace_block_s, btrace->blocks); return 0; } /* Beware that block trace starts with the most recent block, so the chronologically first block in the new trace is the last block in the new trace's block vector. */ first_new_block = VEC_last (btrace_block_s, btrace->blocks); last_insn = VEC_last (btrace_insn_s, last_bfun->insn); /* If the current PC at the end of the block is the same as in our current trace, there are two explanations: 1. we executed the instruction and some branch brought us back. 2. we have not made any progress. In the first case, the delta trace vector should contain at least two entries. In the second case, the delta trace vector should contain exactly one entry for the partial block containing the current PC. Remove it. */ if (first_new_block->end == last_insn->pc && VEC_length (btrace_block_s, btrace->blocks) == 1) { VEC_pop (btrace_block_s, btrace->blocks); return 0; } DEBUG ("stitching %s to %s", ftrace_print_insn_addr (last_insn), core_addr_to_string_nz (first_new_block->end)); /* Do a simple sanity check to make sure we don't accidentally end up with a bad block. This should not occur in practice. */ if (first_new_block->end < last_insn->pc) { warning (_("Error while trying to read delta trace. Falling back to " "a full read.")); return -1; } /* We adjust the last block to start at the end of our current trace. */ gdb_assert (first_new_block->begin == 0); first_new_block->begin = last_insn->pc; /* We simply pop the last insn so we can insert it again as part of the normal branch trace computation. Since instruction iterators are based on indices in the instructions vector, we don't leave any pointers dangling. */ DEBUG ("pruning insn at %s for stitching", ftrace_print_insn_addr (last_insn)); VEC_pop (btrace_insn_s, last_bfun->insn); /* The instructions vector may become empty temporarily if this has been the only instruction in this function segment. This violates the invariant but will be remedied shortly by btrace_compute_ftrace when we add the new trace. */ /* The only case where this would hurt is if the entire trace consisted of just that one instruction. If we remove it, we might turn the now empty btrace function segment into a gap. But we don't want gaps at the beginning. To avoid this, we remove the entire old trace. */ if (last_bfun == btinfo->begin && VEC_empty (btrace_insn_s, last_bfun->insn)) btrace_clear (tp); return 0; }
static struct btrace_function * ftrace_update_function (struct btrace_function *bfun, CORE_ADDR pc) { struct bound_minimal_symbol bmfun; struct minimal_symbol *mfun; struct symbol *fun; struct btrace_insn *last; /* Try to determine the function we're in. We use both types of symbols to avoid surprises when we sometimes get a full symbol and sometimes only a minimal symbol. */ fun = find_pc_function (pc); bmfun = lookup_minimal_symbol_by_pc (pc); mfun = bmfun.minsym; if (fun == NULL && mfun == NULL) DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc)); /* If we didn't have a function or if we had a gap before, we create one. */ if (bfun == NULL || bfun->errcode != 0) return ftrace_new_function (bfun, mfun, fun); /* Check the last instruction, if we have one. We do this check first, since it allows us to fill in the call stack links in addition to the normal flow links. */ last = NULL; if (!VEC_empty (btrace_insn_s, bfun->insn)) last = VEC_last (btrace_insn_s, bfun->insn); if (last != NULL) { switch (last->iclass) { case BTRACE_INSN_RETURN: { const char *fname; /* On some systems, _dl_runtime_resolve returns to the resolved function instead of jumping to it. From our perspective, however, this is a tailcall. If we treated it as return, we wouldn't be able to find the resolved function in our stack back trace. Hence, we would lose the current stack back trace and start anew with an empty back trace. When the resolved function returns, we would then create a stack back trace with the same function names but different frame id's. This will confuse stepping. */ fname = ftrace_print_function_name (bfun); if (strcmp (fname, "_dl_runtime_resolve") == 0) return ftrace_new_tailcall (bfun, mfun, fun); return ftrace_new_return (bfun, mfun, fun); } case BTRACE_INSN_CALL: /* Ignore calls to the next instruction. They are used for PIC. */ if (last->pc + last->size == pc) break; return ftrace_new_call (bfun, mfun, fun); case BTRACE_INSN_JUMP: { CORE_ADDR start; start = get_pc_function_start (pc); /* If we can't determine the function for PC, we treat a jump at the end of the block as tail call. */ if (start == 0 || start == pc) return ftrace_new_tailcall (bfun, mfun, fun); } } } /* Check if we're switching functions for some other reason. */ if (ftrace_function_switched (bfun, mfun, fun)) { DEBUG_FTRACE ("switching from %s in %s at %s", ftrace_print_insn_addr (last), ftrace_print_function_name (bfun), ftrace_print_filename (bfun)); return ftrace_new_switch (bfun, mfun, fun); } return bfun; }
struct lto_out_decl_state * lto_get_out_decl_state (void) { return VEC_last (lto_out_decl_state_ptr, decl_state_stack); }
static struct btrace_function * ftrace_update_function (struct gdbarch *gdbarch, struct btrace_function *bfun, CORE_ADDR pc) { struct bound_minimal_symbol bmfun; struct minimal_symbol *mfun; struct symbol *fun; struct btrace_insn *last; /* Try to determine the function we're in. We use both types of symbols to avoid surprises when we sometimes get a full symbol and sometimes only a minimal symbol. */ fun = find_pc_function (pc); bmfun = lookup_minimal_symbol_by_pc (pc); mfun = bmfun.minsym; if (fun == NULL && mfun == NULL) DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc)); /* If we didn't have a function before, we create one. */ if (bfun == NULL) return ftrace_new_function (bfun, mfun, fun); /* Check the last instruction, if we have one. We do this check first, since it allows us to fill in the call stack links in addition to the normal flow links. */ last = NULL; if (!VEC_empty (btrace_insn_s, bfun->insn)) last = VEC_last (btrace_insn_s, bfun->insn); if (last != NULL) { CORE_ADDR lpc; lpc = last->pc; /* Check for returns. */ if (gdbarch_insn_is_ret (gdbarch, lpc)) return ftrace_new_return (gdbarch, bfun, mfun, fun); /* Check for calls. */ if (gdbarch_insn_is_call (gdbarch, lpc)) { int size; size = gdb_insn_length (gdbarch, lpc); /* Ignore calls to the next instruction. They are used for PIC. */ if (lpc + size != pc) return ftrace_new_call (bfun, mfun, fun); } } /* Check if we're switching functions for some other reason. */ if (ftrace_function_switched (bfun, mfun, fun)) { DEBUG_FTRACE ("switching from %s in %s at %s", ftrace_print_insn_addr (last), ftrace_print_function_name (bfun), ftrace_print_filename (bfun)); if (last != NULL) { CORE_ADDR start, lpc; start = get_pc_function_start (pc); /* If we can't determine the function for PC, we treat a jump at the end of the block as tail call. */ if (start == 0) start = pc; lpc = last->pc; /* Jumps indicate optimized tail calls. */ if (start == pc && gdbarch_insn_is_jump (gdbarch, lpc)) return ftrace_new_tailcall (bfun, mfun, fun); } return ftrace_new_switch (bfun, mfun, fun); } return bfun; }