ps_err_e ps_pdmodel (gdb_ps_prochandle_t ph, int *data_model) { if (exec_bfd == 0) *data_model = PR_MODEL_UNKNOWN; else if (bfd_get_arch_size (exec_bfd) == 32) *data_model = PR_MODEL_ILP32; else *data_model = PR_MODEL_LP64; return PS_OK; }
static offsetT encoding_size (unsigned char encoding) { if (encoding == DW_EH_PE_omit) return 0; switch (encoding & 0x7) { case 0: return bfd_get_arch_size (stdoutput) == 64 ? 8 : 4; case DW_EH_PE_udata2: return 2; case DW_EH_PE_udata4: return 4; case DW_EH_PE_udata8: return 8; default: abort (); } }
static void mips_elf32_after_open() { /* Call the standard elf routine. */ gldelf32ltsmip_after_open (); #ifdef SUPPORT_EMBEDDED_RELOCS if (command_line.embedded_relocs && (! link_info.relocateable)) { bfd *abfd; /* In the embedded relocs mode we create a .rel.sdata section for each input file with a .sdata section which has has relocations. The BFD backend will fill in these sections with magic numbers which can be used to relocate the data section at run time. */ for (abfd = link_info.input_bfds; abfd != NULL; abfd = abfd->link_next) { asection *datasec; /* As first-order business, make sure that each input BFD is ELF. We need to call a special BFD backend function to generate the embedded relocs, and we have that function only for ELF */ if (bfd_get_flavour (abfd) != bfd_target_elf_flavour) einfo ("%F%B: all input objects must be ELF for --embedded-relocs\n"); if (bfd_get_arch_size (abfd) != 32) einfo ("%F%B: all input objects must be 32-bit ELF for --embedded-relocs\n"); datasec = bfd_get_section_by_name (abfd, ".sdata"); /* Note that we assume that the reloc_count field has already been set up. We could call bfd_get_reloc_upper_bound, but that returns the size of a memory buffer rather than a reloc count. We do not want to call bfd_canonicalize_reloc, because although it would always work it would force us to read in the relocs into BFD canonical form, which would waste a significant amount of time and memory. */ if (datasec != NULL && datasec->reloc_count > 0) { asection *relsec; relsec = bfd_make_section (abfd, ".rel.sdata"); if (relsec == NULL || ! bfd_set_section_flags (abfd, relsec, (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY)) || ! bfd_set_section_alignment (abfd, relsec, (32 == 32) ? 2 : 3) || ! bfd_set_section_size (abfd, relsec, datasec->reloc_count * ((32 / 8) + 8))) einfo ("%F%B: cannot create .rel.sdata section: %E\n"); } /* Double check that all other data sections have no relocs, as is required for embedded PIC code. */ bfd_map_over_sections (abfd, mips_elf32_check_sections, (PTR) datasec); } } #endif /* SUPPORT_EMBEDDED_RELOCS */ }
static struct gdbarch * tilegx_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches) { struct gdbarch *gdbarch; int arch_size = 64; /* Handle arch_size == 32 or 64. Default to 64. */ if (info.abfd) arch_size = bfd_get_arch_size (info.abfd); /* Try to find a pre-existing architecture. */ for (arches = gdbarch_list_lookup_by_info (arches, &info); arches != NULL; arches = gdbarch_list_lookup_by_info (arches->next, &info)) { /* We only have two flavors -- just make sure arch_size matches. */ if (gdbarch_ptr_bit (arches->gdbarch) == arch_size) return (arches->gdbarch); } gdbarch = gdbarch_alloc (&info, NULL); /* Basic register fields and methods, datatype sizes and stuff. */ /* There are 64 physical registers which can be referenced by instructions (although only 56 of them can actually be debugged) and 1 magic register (the PC). The other three magic registers (ex1, syscall, orig_r0) which are known to "ptrace" are ignored by "gdb". Note that we simply pretend that there are 65 registers, and no "pseudo registers". */ set_gdbarch_num_regs (gdbarch, TILEGX_NUM_REGS); set_gdbarch_num_pseudo_regs (gdbarch, 0); set_gdbarch_sp_regnum (gdbarch, TILEGX_SP_REGNUM); set_gdbarch_pc_regnum (gdbarch, TILEGX_PC_REGNUM); set_gdbarch_register_name (gdbarch, tilegx_register_name); set_gdbarch_register_type (gdbarch, tilegx_register_type); set_gdbarch_short_bit (gdbarch, 2 * TARGET_CHAR_BIT); set_gdbarch_int_bit (gdbarch, 4 * TARGET_CHAR_BIT); set_gdbarch_long_bit (gdbarch, arch_size); set_gdbarch_long_long_bit (gdbarch, 8 * TARGET_CHAR_BIT); set_gdbarch_float_bit (gdbarch, 4 * TARGET_CHAR_BIT); set_gdbarch_double_bit (gdbarch, 8 * TARGET_CHAR_BIT); set_gdbarch_long_double_bit (gdbarch, 8 * TARGET_CHAR_BIT); set_gdbarch_ptr_bit (gdbarch, arch_size); set_gdbarch_addr_bit (gdbarch, arch_size); set_gdbarch_cannot_fetch_register (gdbarch, tilegx_cannot_reference_register); set_gdbarch_cannot_store_register (gdbarch, tilegx_cannot_reference_register); /* Stack grows down. */ set_gdbarch_inner_than (gdbarch, core_addr_lessthan); /* Frame Info. */ set_gdbarch_unwind_sp (gdbarch, tilegx_unwind_sp); set_gdbarch_unwind_pc (gdbarch, tilegx_unwind_pc); set_gdbarch_dummy_id (gdbarch, tilegx_unwind_dummy_id); set_gdbarch_frame_align (gdbarch, tilegx_frame_align); frame_base_set_default (gdbarch, &tilegx_frame_base); set_gdbarch_skip_prologue (gdbarch, tilegx_skip_prologue); set_gdbarch_stack_frame_destroyed_p (gdbarch, tilegx_stack_frame_destroyed_p); /* Map debug registers into internal register numbers. */ set_gdbarch_dwarf2_reg_to_regnum (gdbarch, tilegx_dwarf2_reg_to_regnum); /* These values and methods are used when gdb calls a target function. */ set_gdbarch_push_dummy_call (gdbarch, tilegx_push_dummy_call); set_gdbarch_get_longjmp_target (gdbarch, tilegx_get_longjmp_target); set_gdbarch_write_pc (gdbarch, tilegx_write_pc); set_gdbarch_breakpoint_from_pc (gdbarch, tilegx_breakpoint_from_pc); set_gdbarch_return_value (gdbarch, tilegx_return_value); set_gdbarch_print_insn (gdbarch, print_insn_tilegx); gdbarch_init_osabi (info, gdbarch); dwarf2_append_unwinders (gdbarch); frame_unwind_append_unwinder (gdbarch, &tilegx_frame_unwind); return gdbarch; }
static int scan_dyntag (int dyntag, bfd *abfd, CORE_ADDR *ptr) { int arch_size, step, sect_size; long dyn_tag; CORE_ADDR dyn_ptr, dyn_addr; gdb_byte *bufend, *bufstart, *buf; Elf32_External_Dyn *x_dynp_32; Elf64_External_Dyn *x_dynp_64; struct bfd_section *sect; struct target_section *target_section; if (abfd == NULL) return 0; if (bfd_get_flavour (abfd) != bfd_target_elf_flavour) return 0; arch_size = bfd_get_arch_size (abfd); if (arch_size == -1) return 0; /* Find the start address of the .dynamic section. */ sect = bfd_get_section_by_name (abfd, ".dynamic"); if (sect == NULL) return 0; for (target_section = current_target_sections->sections; target_section < current_target_sections->sections_end; target_section++) if (sect == target_section->the_bfd_section) break; if (target_section < current_target_sections->sections_end) dyn_addr = target_section->addr; else { /* ABFD may come from OBJFILE acting only as a symbol file without being loaded into the target (see add_symbol_file_command). This case is such fallback to the file VMA address without the possibility of having the section relocated to its actual in-memory address. */ dyn_addr = bfd_section_vma (abfd, sect); } /* Read in .dynamic from the BFD. We will get the actual value from memory later. */ sect_size = bfd_section_size (abfd, sect); buf = bufstart = alloca (sect_size); if (!bfd_get_section_contents (abfd, sect, buf, 0, sect_size)) return 0; /* Iterate over BUF and scan for DYNTAG. If found, set PTR and return. */ step = (arch_size == 32) ? sizeof (Elf32_External_Dyn) : sizeof (Elf64_External_Dyn); for (bufend = buf + sect_size; buf < bufend; buf += step) { if (arch_size == 32) { x_dynp_32 = (Elf32_External_Dyn *) buf; dyn_tag = bfd_h_get_32 (abfd, (bfd_byte *) x_dynp_32->d_tag); dyn_ptr = bfd_h_get_32 (abfd, (bfd_byte *) x_dynp_32->d_un.d_ptr); } else { x_dynp_64 = (Elf64_External_Dyn *) buf; dyn_tag = bfd_h_get_64 (abfd, (bfd_byte *) x_dynp_64->d_tag); dyn_ptr = bfd_h_get_64 (abfd, (bfd_byte *) x_dynp_64->d_un.d_ptr); } if (dyn_tag == DT_NULL) return 0; if (dyn_tag == dyntag) { /* If requested, try to read the runtime value of this .dynamic entry. */ if (ptr) { struct type *ptr_type; gdb_byte ptr_buf[8]; CORE_ADDR ptr_addr; ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr; ptr_addr = dyn_addr + (buf - bufstart) + arch_size / 8; if (target_read_memory (ptr_addr, ptr_buf, arch_size / 8) == 0) dyn_ptr = extract_typed_address (ptr_buf, ptr_type); *ptr = dyn_ptr; } return 1; } } return 0; }
static ps_err_e rw_common (int dowrite, const struct ps_prochandle *ph, gdb_ps_addr_t addr, char *buf, int size) { struct cleanup *old_chain; old_chain = save_inferior_ptid (); if (is_thread (inferior_ptid) || /* A thread */ !target_thread_alive (inferior_ptid)) /* An lwp, but not alive */ inferior_ptid = procfs_first_available (); /* Find any live lwp. */ /* Note: don't need to call switch_to_thread; we're just reading memory. */ #if defined (__sparcv9) /* For Sparc64 cross Sparc32, make sure the address has not been accidentally sign-extended (or whatever) to beyond 32 bits. */ if (bfd_get_arch_size (exec_bfd) == 32) addr &= 0xffffffff; #endif while (size > 0) { int cc; /* FIXME: passing 0 as attrib argument. */ if (target_has_execution) cc = procfs_ops.to_xfer_memory (addr, buf, size, dowrite, 0, &procfs_ops); else cc = orig_core_ops.to_xfer_memory (addr, buf, size, dowrite, 0, &core_ops); if (cc < 0) { if (dowrite == 0) print_sys_errmsg ("rw_common (): read", errno); else print_sys_errmsg ("rw_common (): write", errno); do_cleanups (old_chain); return PS_ERR; } else if (cc == 0) { if (dowrite == 0) warning ("rw_common (): unable to read at addr 0x%lx", (long) addr); else warning ("rw_common (): unable to write at addr 0x%lx", (long) addr); do_cleanups (old_chain); return PS_ERR; } size -= cc; buf += cc; } do_cleanups (old_chain); return PS_OK; }