static void debug_reloc(Elf32_Sym *symtab,char *strtab, ELF_RELOC *rpnt) { if(_dl_debug_reloc) { int symtab_index; const char *sym; symtab_index = ELF32_R_SYM(rpnt->r_info); sym = symtab_index ? strtab + symtab[symtab_index].st_name : "sym=0x0"; if(_dl_debug_symbols) _dl_dprintf(_dl_debug_file, "\n\t"); else _dl_dprintf(_dl_debug_file, "\n%s\n\t", sym); #ifdef ELF_USES_RELOCA _dl_dprintf(_dl_debug_file, "%s\toffset=%x\taddend=%x", _dl_reltypes(ELF32_R_TYPE(rpnt->r_info)), rpnt->r_offset, rpnt->r_addend); #else _dl_dprintf(_dl_debug_file, "%s\toffset=%x\n", _dl_reltypes(ELF32_R_TYPE(rpnt->r_info)), rpnt->r_offset); #endif } }
static int _dl_do_reloc(struct elf_resolve *tpnt, struct dyn_elf *scope, Elf32_Rela *rpnt, Elf32_Sym *symtab, char *strtab) { int reloc_type; int symtab_index; char *symname; unsigned long *reloc_addr; unsigned long symbol_addr; #if defined(__SUPPORT_LD_DEBUG__) unsigned long old_val; #endif reloc_addr = (unsigned long *)(tpnt->loadaddr + rpnt->r_offset); reloc_type = ELF32_R_TYPE(rpnt->r_info); symtab_index = ELF32_R_SYM(rpnt->r_info); symbol_addr = 0; symname = strtab + symtab[symtab_index].st_name; if (symtab_index) { symbol_addr = (unsigned long) _dl_find_hash(strtab + symtab[symtab_index].st_name, tpnt->symbol_scope, tpnt, elf_machine_type_class(reloc_type)); /* Allow undefined references to weak symbols */ if (!symbol_addr && ELF32_ST_BIND(symtab[symtab_index].st_info) != STB_WEAK) { _dl_dprintf(2, "%s: can't resolve symbol '%s'\n", _dl_progname, symname); return 0; } } #if defined(__SUPPORT_LD_DEBUG__) old_val = *reloc_addr; #endif switch (reloc_type) { case R_AVR32_NONE: break; case R_AVR32_GLOB_DAT: case R_AVR32_JMP_SLOT: *reloc_addr = symbol_addr + rpnt->r_addend; break; case R_AVR32_RELATIVE: *reloc_addr = (unsigned long)tpnt->loadaddr + rpnt->r_addend; break; default: return -1; } #if defined(__SUPPORT_LD_DEBUG__) if (_dl_debug_reloc && _dl_debug_detail) _dl_dprintf(_dl_debug_file, "\tpatched: %x ==> %x @ %x\n", old_val, *reloc_addr); #endif return 0; }
unsigned long _dl_linux_resolver(struct elf_resolve *tpnt, int reloc_entry) { int reloc_type; int symtab_index; char *strtab; char *symname; char *new_addr; char *rel_addr; char **got_addr; Elf32_Sym *symtab; ELF_RELOC *this_reloc; unsigned long instr_addr; rel_addr = (char *)tpnt->dynamic_info[DT_JMPREL]; this_reloc = (ELF_RELOC *)(intptr_t)(rel_addr + reloc_entry); reloc_type = ELF32_R_TYPE(this_reloc->r_info); symtab_index = ELF32_R_SYM(this_reloc->r_info); symtab = (Elf32_Sym *)(intptr_t)tpnt->dynamic_info[DT_SYMTAB]; strtab = (char *)tpnt->dynamic_info[DT_STRTAB]; symname = strtab + symtab[symtab_index].st_name; if (unlikely(reloc_type != R_CRIS_JUMP_SLOT)) { _dl_dprintf(2, "%s: Incorrect relocation type in jump relocations\n", _dl_progname); _dl_exit(1); } /* Address of the jump instruction to fix up. */ instr_addr = ((unsigned long)this_reloc->r_offset + (unsigned long)tpnt->loadaddr); got_addr = (char **)instr_addr; /* Get the address of the GOT entry. */ new_addr = _dl_find_hash(symname, tpnt->symbol_scope, tpnt, ELF_RTYPE_CLASS_PLT); if (unlikely(!new_addr)) { _dl_dprintf(2, "%s: Can't resolve symbol '%s'\n", _dl_progname, symname); _dl_exit(1); } #if defined (__SUPPORT_LD_DEBUG__) if (_dl_debug_bindings) { _dl_dprintf(_dl_debug_file, "\nresolve function: %s", symname); if (_dl_debug_detail) _dl_dprintf(_dl_debug_file, "\n\tpatched: %x ==> %x @ %x", *got_addr, new_addr, got_addr); } if (!_dl_debug_nofixups) { *got_addr = new_addr; } #else *got_addr = new_addr; #endif return (unsigned long)new_addr; }
_dl_linux_resolver (struct elf_resolve *tpnt, int reloc_entry) { ELF_RELOC *this_reloc; char *strtab; ElfW(Sym) *symtab; int symtab_index; char *rel_addr; struct elf_resolve *new_tpnt; char *new_addr; struct funcdesc_value funcval; struct funcdesc_value volatile *got_entry; char *symname; rel_addr = (char *)tpnt->dynamic_info[DT_JMPREL]; this_reloc = (ELF_RELOC *)(intptr_t)(rel_addr + reloc_entry); symtab_index = ELF_R_SYM(this_reloc->r_info); symtab = (Elf32_Sym *) tpnt->dynamic_info[DT_SYMTAB]; strtab = (char *) tpnt->dynamic_info[DT_STRTAB]; symname= strtab + symtab[symtab_index].st_name; /* Address of GOT entry fix up */ got_entry = (struct funcdesc_value *) DL_RELOC_ADDR(tpnt->loadaddr, this_reloc->r_offset); /* Get the address to be used to fill in the GOT entry. */ new_addr = _dl_lookup_hash(symname, tpnt->symbol_scope, NULL, 0, &new_tpnt); if (!new_addr) { new_addr = _dl_lookup_hash(symname, NULL, NULL, 0, &new_tpnt); if (!new_addr) { _dl_dprintf(2, "%s: can't resolve symbol '%s'\n", _dl_progname, symname); _dl_exit(1); } } funcval.entry_point = new_addr; funcval.got_value = new_tpnt->loadaddr.got_value; #if defined (__SUPPORT_LD_DEBUG__) if (_dl_debug_bindings) { _dl_dprintf(_dl_debug_file, "\nresolve function: %s", symname); if (_dl_debug_detail) _dl_dprintf(_dl_debug_file, "\n\tpatched (%x,%x) ==> (%x,%x) @ %x\n", got_entry->entry_point, got_entry->got_value, funcval.entry_point, funcval.got_value, got_entry); } if (1 || !_dl_debug_nofixups) { *got_entry = funcval; } #else *got_entry = funcval; #endif return got_entry; }
static int _dl_parse(struct elf_resolve *tpnt, struct dyn_elf *scope, unsigned long rel_addr, unsigned long rel_size, int (*reloc_fnc)(struct elf_resolve *tpnt, struct dyn_elf *scope, ELF_RELOC *rpnt, Elf32_Sym *symtab, char *strtab)) { int symtab_index; unsigned int i; char *strtab; Elf32_Sym *symtab; ELF_RELOC *rpnt; /* Parse the relocation information. */ rpnt = (ELF_RELOC *)(intptr_t)rel_addr; rel_size /= sizeof(ELF_RELOC); symtab = (Elf32_Sym *)(intptr_t)tpnt->dynamic_info[DT_SYMTAB]; strtab = (char *)tpnt->dynamic_info[DT_STRTAB]; for (i = 0; i < rel_size; i++, rpnt++) { int res; symtab_index = ELF32_R_SYM(rpnt->r_info); debug_sym(symtab, strtab, symtab_index); debug_reloc(symtab, strtab, rpnt); /* Pass over to actual relocation function. */ res = reloc_fnc(tpnt, scope, rpnt, symtab, strtab); if (res == 0) continue; _dl_dprintf(2, "\n%s: ", _dl_progname); if (symtab_index) _dl_dprintf(2, "symbol '%s': ", strtab + symtab[symtab_index].st_name); if (unlikely(res < 0)) { int reloc_type = ELF32_R_TYPE(rpnt->r_info); #if defined (__SUPPORT_LD_DEBUG__) _dl_dprintf(2, "can't handle reloc type %s\n", _dl_reltypes(reloc_type)); #else _dl_dprintf(2, "can't handle reloc type %x\n", reloc_type); #endif _dl_exit(-res); } else if (unlikely(res > 0)) { _dl_dprintf(2, "can't resolve symbol\n"); return res; } } return 0; }
unsigned long _dl_linux_resolver(struct elf_resolve *tpnt, int reloc_entry) { ELF_RELOC *this_reloc; char *strtab; char *symname; Elf32_Sym *symtab; ELF_RELOC *rel_addr; int symtab_index; unsigned long new_addr; char **got_addr; unsigned long instr_addr; rel_addr = (ELF_RELOC *) tpnt->dynamic_info[DT_JMPREL]; this_reloc = rel_addr + reloc_entry; symtab_index = ELF32_R_SYM(this_reloc->r_info); symtab = (Elf32_Sym *) tpnt->dynamic_info[DT_SYMTAB]; strtab = (char *) tpnt->dynamic_info[DT_STRTAB]; symname = strtab + symtab[symtab_index].st_name; /* Address of jump instruction to fix up */ instr_addr = ((unsigned long) this_reloc->r_offset + (unsigned long) tpnt->loadaddr); got_addr = (char **) instr_addr; /* Get the address of the GOT entry */ new_addr = _dl_find_hash(symname, &_dl_loaded_modules->symbol_scope, tpnt, ELF_RTYPE_CLASS_PLT, NULL); if (unlikely(!new_addr)) { _dl_dprintf(2, "%s: can't resolve symbol '%s'\n", _dl_progname, symname); _dl_exit(1); } #if defined (__SUPPORT_LD_DEBUG__) #if !defined __SUPPORT_LD_DEBUG_EARLY__ if ((unsigned long) got_addr < 0x40000000) #endif { if (_dl_debug_bindings) { _dl_dprintf(_dl_debug_file, "\nresolve function: %s", symname); if (_dl_debug_detail) _dl_dprintf(_dl_debug_file, "\tpatch %x ==> %x @ %x", *got_addr, new_addr, got_addr); } } if (!_dl_debug_nofixups) { *got_addr = (char *)new_addr; } #else *got_addr = (char *)new_addr; #endif return new_addr; }
static int _dl_do_lazy_reloc (struct elf_resolve *tpnt, struct dyn_elf *scope, ELF_RELOC *rpnt, Elf32_Sym *symtab, char *strtab) { int reloc_type; unsigned long *reloc_addr; (void)scope; (void)symtab; (void)strtab; reloc_addr = (unsigned long *)(intptr_t) (tpnt->loadaddr + (unsigned long) rpnt->r_offset); reloc_type = ELF32_R_TYPE(rpnt->r_info); #if defined (__SUPPORT_LD_DEBUG__) { unsigned long old_val = *reloc_addr; #endif switch (reloc_type) { case R_SH_NONE: break; case R_SH_JMP_SLOT: *reloc_addr += (unsigned long) tpnt->loadaddr; break; default: return -1; /*call _dl_exit(1) */ } #if defined (__SUPPORT_LD_DEBUG__) if(_dl_debug_reloc && _dl_debug_detail) _dl_dprintf(_dl_debug_file, "\tpatch: %x ==> %x @ %x", old_val, *reloc_addr, reloc_addr); } #endif return 0; }
/* Initialize static TLS area and DTV for current (only) thread. libpthread implementations should provide their own hook to handle all threads. */ void attribute_hidden __attribute_noinline__ _dl_nothread_init_static_tls (struct link_map *map) { # ifdef TLS_TCB_AT_TP void *dest = (char *) THREAD_SELF - map->l_tls_offset; # elif defined(TLS_DTV_AT_TP) void *dest = (char *) THREAD_SELF + map->l_tls_offset + TLS_PRE_TCB_SIZE; # else # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined" # endif /* Fill in the DTV slot so that a later LD/GD access will find it. */ dtv_t *dtv = THREAD_DTV (); if (!(map->l_tls_modid <= dtv[-1].counter)) { _dl_dprintf(2, "map->l_tls_modid <= dtv[-1].counter FAILED!\n"); _dl_exit(30); } dtv[map->l_tls_modid].pointer.val = dest; dtv[map->l_tls_modid].pointer.is_static = true; /* Initialize the memory. */ _dl_memcpy(dest, map->l_tls_initimage, map->l_tls_initimage_size); _dl_memset((dest + map->l_tls_initimage_size), '\0', map->l_tls_blocksize - map->l_tls_initimage_size); }
/* * This function intentionally does not return any value but signals error * directly, as static TLS should be rare and code handling it should * not be inlined as much as possible. */ void internal_function __attribute_noinline__ _dl_allocate_static_tls (struct link_map *map) { if (_dl_try_allocate_static_tls (map)) { _dl_dprintf(2, "cannot allocate memory in static TLS block"); _dl_exit(30); } }
void _dlinfo(void) { struct elf_resolve *tpnt; struct dyn_elf *rpnt, *hpnt; _dl_dprintf(2, "List of loaded modules\n"); /* First start with a complete list of all of the loaded files. */ for (tpnt = _dl_loaded_modules; tpnt; tpnt = tpnt->next) { _dl_dprintf(2, "\t%x %x %x %s %d %s\n", (unsigned) tpnt->loadaddr, (unsigned) tpnt, (unsigned) tpnt->symbol_scope, type[tpnt->libtype], tpnt->usage_count, tpnt->libname); } /* Next dump the module list for the application itself */ _dl_dprintf(2, "\nModules for application (%x):\n", (unsigned) _dl_symbol_tables); for (rpnt = _dl_symbol_tables; rpnt; rpnt = rpnt->next) _dl_dprintf(2, "\t%x %s\n", (unsigned) rpnt->dyn, rpnt->dyn->libname); for (hpnt = _dl_handles; hpnt; hpnt = hpnt->next_handle) { _dl_dprintf(2, "Modules for handle %x\n", (unsigned) hpnt); for (rpnt = hpnt; rpnt; rpnt = rpnt->next) _dl_dprintf(2, "\t%x %s\n", (unsigned) rpnt->dyn, rpnt->dyn->libname); } }
void _dl_protect_relro (struct elf_resolve *l) { ElfW(Addr) base = (ElfW(Addr)) DL_RELOC_ADDR(l->loadaddr, l->relro_addr); ElfW(Addr) start = (base & PAGE_ALIGN); ElfW(Addr) end = ((base + l->relro_size) & PAGE_ALIGN); _dl_if_debug_dprint("RELRO protecting %s: start:%x, end:%x\n", l->libname, start, end); if (start != end && _dl_mprotect ((void *) start, end - start, PROT_READ) < 0) { _dl_dprintf(2, "%s: cannot apply additional memory protection after relocation", l->libname); _dl_exit(0); } }
static void debug_sym(Elf32_Sym *symtab,char *strtab,int symtab_index) { if(_dl_debug_symbols) { if(symtab_index){ _dl_dprintf(_dl_debug_file, "\n%s\tvalue=%x\tsize=%x\tinfo=%x\tother=%x\tshndx=%x", strtab + symtab[symtab_index].st_name, symtab[symtab_index].st_value, symtab[symtab_index].st_size, symtab[symtab_index].st_info, symtab[symtab_index].st_other, symtab[symtab_index].st_shndx); } } }
static void * allocate_and_init (struct link_map *map) { void *newp; newp = _dl_memalign (map->l_tls_align, map->l_tls_blocksize); if (newp == NULL) { _dl_dprintf(2, "%s:%d: Out of memory!!!\n", __FUNCTION__, __LINE__); _dl_exit(1); } /* Initialize the memory. */ _dl_memcpy (newp, map->l_tls_initimage, map->l_tls_initimage_size); _dl_memset ((newp + map->l_tls_initimage_size), '\0', map->l_tls_blocksize - map->l_tls_initimage_size); return newp; }
static int _dl_do_lazy_reloc(struct elf_resolve *tpnt, struct dyn_elf *scope, ELF_RELOC *rpnt, Elf32_Sym *symtab, char *strtab) { int reloc_type; unsigned long *reloc_addr; #if defined (__SUPPORT_LD_DEBUG__) unsigned long old_val; #endif /* Don't care about these, just keep the compiler happy. */ (void)scope; (void)symtab; (void)strtab; reloc_addr = (unsigned long *)(intptr_t)(tpnt->loadaddr + (unsigned long)rpnt->r_offset); reloc_type = ELF32_R_TYPE(rpnt->r_info); #if defined (__SUPPORT_LD_DEBUG__) old_val = *reloc_addr; #endif switch (reloc_type) { case R_CRIS_NONE: break; case R_CRIS_JUMP_SLOT: *reloc_addr += (unsigned long)tpnt->loadaddr; break; default: return -1; /* Calls _dl_exit(1). */ } #if defined (__SUPPORT_LD_DEBUG__) if (_dl_debug_reloc && _dl_debug_detail) _dl_dprintf(_dl_debug_file, "\n\tpatched: %x ==> %x @ %x", old_val, *reloc_addr, reloc_addr); #endif return 0; }
/* No, there are cases where the SVr4 linker fails to emit COPY relocs at all */ static int _dl_do_copy (struct elf_resolve *tpnt, struct dyn_elf *scope, ELF_RELOC *rpnt, Elf32_Sym *symtab, char *strtab) { int reloc_type; int symtab_index; unsigned long *reloc_addr; unsigned long symbol_addr; int goof = 0; reloc_addr = (unsigned long *)(intptr_t) (tpnt->loadaddr + (unsigned long) rpnt->r_offset); reloc_type = ELF32_R_TYPE(rpnt->r_info); if (reloc_type != R_SH_COPY) return 0; symtab_index = ELF32_R_SYM(rpnt->r_info); symbol_addr = 0; if (symtab_index) { symbol_addr = (unsigned long) _dl_find_hash(strtab + symtab[symtab_index].st_name, scope, NULL, copyrel); if (!symbol_addr) goof++; } if (!goof) { #if defined (__SUPPORT_LD_DEBUG__) if(_dl_debug_move) _dl_dprintf(_dl_debug_file,"\n%s move %x bytes from %x to %x", strtab + symtab[symtab_index].st_name, symtab[symtab_index].st_size, symbol_addr, symtab[symtab_index].st_value); #endif _dl_memcpy((char *) symtab[symtab_index].st_value, (char *) symbol_addr, symtab[symtab_index].st_size); } return goof; }
int _dl_map_cache(void) { int fd; struct stat st; header_t *header; libentry_t *libent; int i, strtabsize; if (_dl_cache_addr == MAP_FAILED) return -1; else if (_dl_cache_addr != NULL) return 0; if (_dl_stat(LDSO_CACHE, &st) || (fd = _dl_open(LDSO_CACHE, O_RDONLY|O_CLOEXEC, 0)) < 0) { _dl_cache_addr = MAP_FAILED; /* so we won't try again */ return -1; } _dl_cache_size = st.st_size; _dl_cache_addr = _dl_mmap(0, _dl_cache_size, PROT_READ, LDSO_CACHE_MMAP_FLAGS, fd, 0); _dl_close(fd); if (_dl_mmap_check_error(_dl_cache_addr)) { _dl_dprintf(2, "%s:%i: can't map '%s'\n", _dl_progname, __LINE__, LDSO_CACHE); return -1; } header = (header_t *) _dl_cache_addr; if (_dl_cache_size < sizeof(header_t) || _dl_memcmp(header->magic, LDSO_CACHE_MAGIC, LDSO_CACHE_MAGIC_LEN) || _dl_memcmp(header->version, LDSO_CACHE_VER, LDSO_CACHE_VER_LEN) || _dl_cache_size < (sizeof(header_t) + header->nlibs * sizeof(libentry_t)) || _dl_cache_addr[_dl_cache_size - 1] != '\0') { _dl_dprintf(2, "%s: cache '%s' is corrupt\n", _dl_progname, LDSO_CACHE); goto fail; } strtabsize = _dl_cache_size - sizeof(header_t) - header->nlibs * sizeof(libentry_t); libent = (libentry_t *) & header[1]; for (i = 0; i < header->nlibs; i++) { if (libent[i].sooffset >= strtabsize || libent[i].liboffset >= strtabsize) { _dl_dprintf(2, "%s: cache '%s' is corrupt\n", _dl_progname, LDSO_CACHE); goto fail; } } return 0; fail: _dl_munmap(_dl_cache_addr, _dl_cache_size); _dl_cache_addr = MAP_FAILED; return -1; }
void _dl_add_to_slotinfo (struct link_map *l) { /* Now that we know the object is loaded successfully add modules containing TLS data to the dtv info table. We might have to increase its size. */ struct dtv_slotinfo_list *listp; struct dtv_slotinfo_list *prevp; size_t idx = l->l_tls_modid; _dl_debug_early("Adding to slotinfo for %s\n", l->l_name); /* Find the place in the dtv slotinfo list. */ listp = _dl_tls_dtv_slotinfo_list; prevp = NULL; /* Needed to shut up gcc. */ do { /* Does it fit in the array of this list element? */ if (idx < listp->len) break; idx -= listp->len; prevp = listp; listp = listp->next; } while (listp != NULL); if (listp == NULL) { /* When we come here it means we have to add a new element to the slotinfo list. And the new module must be in the first slot. */ _dl_assert (idx == 0); listp = prevp->next = (struct dtv_slotinfo_list *) _dl_malloc (sizeof (struct dtv_slotinfo_list) + TLS_SLOTINFO_SURPLUS * sizeof (struct dtv_slotinfo)); if (listp == NULL) { /* We ran out of memory. We will simply fail this call but don't undo anything we did so far. The application will crash or be terminated anyway very soon. */ /* We have to do this since some entries in the dtv slotinfo array might already point to this generation. */ ++_dl_tls_generation; _dl_dprintf (_dl_debug_file, "cannot create TLS data structures: ABORT\n"); _dl_exit (127); } listp->len = TLS_SLOTINFO_SURPLUS; listp->next = NULL; _dl_memset (listp->slotinfo, '\0', TLS_SLOTINFO_SURPLUS * sizeof (struct dtv_slotinfo)); } /* Add the information into the slotinfo data structure. */ listp->slotinfo[idx].map = l; listp->slotinfo[idx].gen = _dl_tls_generation + 1; /* ??? ideally this would be done once per call to dlopen. However there's no easy way to indicate whether a library used TLS, so do it here instead. */ /* Bump the TLS generation number. */ _dl_tls_generation++; }
/* * We are trying to perform a static TLS relocation in MAP, but it was * dynamically loaded. This can only work if there is enough surplus in * the static TLS area already allocated for each running thread. If this * object's TLS segment is too big to fit, we fail. If it fits, * we set MAP->l_tls_offset and return. * This function intentionally does not return any value but signals error * directly, as static TLS should be rare and code handling it should * not be inlined as much as possible. */ void internal_function __attribute_noinline__ _dl_allocate_static_tls (struct link_map *map) { /* If the alignment requirements are too high fail. */ if (map->l_tls_align > _dl_tls_static_align) { fail: _dl_dprintf(2, "cannot allocate memory in static TLS block"); _dl_exit(30); } # ifdef TLS_TCB_AT_TP size_t freebytes; size_t n; size_t blsize; freebytes = _dl_tls_static_size - _dl_tls_static_used - TLS_TCB_SIZE; blsize = map->l_tls_blocksize + map->l_tls_firstbyte_offset; if (freebytes < blsize) goto fail; n = (freebytes - blsize) & ~(map->l_tls_align - 1); size_t offset = _dl_tls_static_used + (freebytes - n - map->l_tls_firstbyte_offset); map->l_tls_offset = _dl_tls_static_used = offset; # elif defined(TLS_DTV_AT_TP) size_t used; size_t check; size_t offset = roundup_pow2 (_dl_tls_static_used, map->l_tls_align); used = offset + map->l_tls_blocksize; check = used; /* dl_tls_static_used includes the TCB at the beginning. */ if (check > _dl_tls_static_size) goto fail; map->l_tls_offset = offset; _dl_tls_static_used = used; # else # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined" # endif /* * If the object is not yet relocated we cannot initialize the * static TLS region. Delay it. */ if (((struct elf_resolve *) map)->init_flag & RELOCS_DONE) { #ifdef SHARED /* * Update the slot information data for at least the generation of * the DSO we are allocating data for. */ if (__builtin_expect (THREAD_DTV()[0].counter != _dl_tls_generation, 0)) (void) _dl_update_slotinfo (map->l_tls_modid); #endif _dl_init_static_tls (map); } else map->l_need_tls_init = 1; }
static int _dl_do_reloc(struct elf_resolve *tpnt, struct dyn_elf *scope, ELF_RELOC *rpnt, Elf32_Sym *symtab, char *strtab) { int reloc_type; int symtab_index; char *symname; unsigned long *reloc_addr; unsigned long symbol_addr; #if defined (__SUPPORT_LD_DEBUG__) unsigned long old_val; #endif reloc_addr = (unsigned long *)(intptr_t)(tpnt->loadaddr + (unsigned long)rpnt->r_offset); reloc_type = ELF32_R_TYPE(rpnt->r_info); symtab_index = ELF32_R_SYM(rpnt->r_info); symbol_addr = 0; symname = strtab + symtab[symtab_index].st_name; if (symtab_index) { symbol_addr = (unsigned long)_dl_find_hash(symname, scope, tpnt, elf_machine_type_class(reloc_type)); /* * We want to allow undefined references to weak symbols - this * might have been intentional. We should not be linking local * symbols here, so all bases should be covered. */ if (unlikely(!symbol_addr && ELF32_ST_BIND(symtab[symtab_index].st_info) != STB_WEAK)) return 1; } #if defined (__SUPPORT_LD_DEBUG__) old_val = *reloc_addr; #endif switch (reloc_type) { case R_386_NONE: break; case R_386_32: *reloc_addr += symbol_addr; break; case R_386_PC32: *reloc_addr += symbol_addr - (unsigned long)reloc_addr; break; case R_386_GLOB_DAT: case R_386_JMP_SLOT: *reloc_addr = symbol_addr; break; case R_386_RELATIVE: *reloc_addr += (unsigned long)tpnt->loadaddr; break; case R_386_COPY: if (symbol_addr) { #if defined (__SUPPORT_LD_DEBUG__) if (_dl_debug_move) _dl_dprintf(_dl_debug_file, "\n%s move %d bytes from %x to %x", symname, symtab[symtab_index].st_size, symbol_addr, reloc_addr); #endif _dl_memcpy((char *)reloc_addr, (char *)symbol_addr, symtab[symtab_index].st_size); } break; default: return -1; } #if defined (__SUPPORT_LD_DEBUG__) if (_dl_debug_reloc && _dl_debug_detail) _dl_dprintf(_dl_debug_file, "\n\tpatched: %x ==> %x @ %x", old_val, *reloc_addr, reloc_addr); #endif return 0; }
static int _dl_do_reloc (struct elf_resolve *tpnt,struct dyn_elf *scope, ELF_RELOC *rpnt, Elf32_Sym *symtab, char *strtab) { int reloc_type; int symtab_index; unsigned long *reloc_addr; unsigned long symbol_addr; int goof = 0; reloc_addr = (unsigned long *)(intptr_t) (tpnt->loadaddr + (unsigned long) rpnt->r_offset); reloc_type = ELF32_R_TYPE(rpnt->r_info); symtab_index = ELF32_R_SYM(rpnt->r_info); symbol_addr = 0; if (symtab_index) { symbol_addr = (unsigned long) _dl_find_hash(strtab + symtab[symtab_index].st_name, scope, (reloc_type == R_SH_JMP_SLOT ? tpnt : NULL), symbolrel); /* * We want to allow undefined references to weak symbols - this might * have been intentional. We should not be linking local symbols * here, so all bases should be covered. */ if (!symbol_addr && ELF32_ST_BIND(symtab[symtab_index].st_info) == STB_GLOBAL) { goof++; } } #if defined (__SUPPORT_LD_DEBUG__) { unsigned long old_val = *reloc_addr; #endif switch (reloc_type) { case R_SH_NONE: break; case R_SH_COPY: /* handled later on */ break; case R_SH_DIR32: case R_SH_GLOB_DAT: case R_SH_JMP_SLOT: *reloc_addr = symbol_addr + rpnt->r_addend; break; case R_SH_REL32: *reloc_addr = symbol_addr + rpnt->r_addend - (unsigned long) reloc_addr; break; case R_SH_RELATIVE: *reloc_addr = (unsigned long) tpnt->loadaddr + rpnt->r_addend; break; default: return -1; /*call _dl_exit(1) */ } #if defined (__SUPPORT_LD_DEBUG__) if(_dl_debug_reloc && _dl_debug_detail) _dl_dprintf(_dl_debug_file, "\tpatch: %x ==> %x @ %x", old_val, *reloc_addr, reloc_addr); } #endif return goof; }
int _dladdr(void *__address, Dl_info * __dlip) { struct elf_resolve *pelf; struct elf_resolve *rpnt; #ifdef USE_CACHE _dl_map_cache(); #endif /* * Try and locate the module address is in */ pelf = NULL; #if 0 _dl_dprintf(2, "dladdr( 0x%p, 0x%p )\n", __address, __dlip); #endif for (rpnt = _dl_loaded_modules; rpnt; rpnt = rpnt->next) { struct elf_resolve *tpnt; tpnt = rpnt; #if 0 _dl_dprintf(2, "Module \"%s\" at 0x%p\n", tpnt->libname, tpnt->loadaddr); #endif if (tpnt->loadaddr < (char *) __address && (pelf == NULL || pelf->loadaddr < tpnt->loadaddr)) { pelf = tpnt; } } if (!pelf) { return 0; } /* * Try and locate the symbol of address */ { char *strtab; Elf32_Sym *symtab; int hn, si; int sf; int sn = 0; void *sa = 0; symtab = (Elf32_Sym *) (pelf->dynamic_info[DT_SYMTAB] + pelf->loadaddr); strtab = (char *) (pelf->dynamic_info[DT_STRTAB] + pelf->loadaddr); sf = 0; for (hn = 0; hn < pelf->nbucket; hn++) { for (si = pelf->elf_buckets[hn]; si; si = pelf->chains[si]) { void *symbol_addr; symbol_addr = pelf->loadaddr + symtab[si].st_value; if (symbol_addr <= __address && (!sf || sa < symbol_addr)) { sa = symbol_addr; sn = si; sf = 1; } #if 0 _dl_dprintf(2, "Symbol \"%s\" at 0x%p\n", strtab + symtab[si].st_name, symbol_addr); #endif } } if (sf) { __dlip->dli_fname = pelf->libname; __dlip->dli_fbase = pelf->loadaddr; __dlip->dli_sname = strtab + symtab[sn].st_name; __dlip->dli_saddr = sa; } return 1; } }
static int _dl_do_reloc(struct elf_resolve *tpnt, struct dyn_elf *scope, ELF_RELOC *rpnt, Elf32_Sym *symtab, char *strtab) { int reloc_type; int symtab_index; char *symname; unsigned long *reloc_addr; unsigned long symbol_addr; #if defined (__SUPPORT_LD_DEBUG__) unsigned long old_val; #endif reloc_addr = (unsigned long *)(intptr_t)(tpnt->loadaddr + (unsigned long)rpnt->r_offset); reloc_type = ELF32_R_TYPE(rpnt->r_info); symtab_index = ELF32_R_SYM(rpnt->r_info); symbol_addr = 0; symname = strtab + symtab[symtab_index].st_name; if (symtab_index) { if (symtab[symtab_index].st_shndx != SHN_UNDEF && ELF32_ST_BIND(symtab[symtab_index].st_info) == STB_LOCAL) { symbol_addr = (unsigned long)tpnt->loadaddr; } else { symbol_addr = (unsigned long)_dl_find_hash(symname, scope, tpnt, elf_machine_type_class(reloc_type)); } if (unlikely(!symbol_addr && ELF32_ST_BIND(symtab[symtab_index].st_info) != STB_WEAK)) { _dl_dprintf(2, "%s: can't resolve symbol '%s'\n", _dl_progname, symname); _dl_exit(1); }; symbol_addr += rpnt->r_addend; } #if defined (__SUPPORT_LD_DEBUG__) old_val = *reloc_addr; #endif switch (reloc_type) { case R_CRIS_NONE: break; case R_CRIS_GLOB_DAT: case R_CRIS_JUMP_SLOT: case R_CRIS_32: *reloc_addr = symbol_addr; break; case R_CRIS_COPY: #if defined (__SUPPORT_LD_DEBUG__) if (_dl_debug_move) _dl_dprintf(_dl_debug_file, "\n%s move %d bytes from %x to %x", symname, symtab[symtab_index].st_size, symbol_addr, reloc_addr); #endif _dl_memcpy((char *)reloc_addr, (char *)symbol_addr, symtab[symtab_index].st_size); break; case R_CRIS_RELATIVE: *reloc_addr = (unsigned long)tpnt->loadaddr + rpnt->r_addend; break; default: return -1; /* Calls _dl_exit(1). */ } #if defined (__SUPPORT_LD_DEBUG__) if (_dl_debug_reloc && _dl_debug_detail) _dl_dprintf(_dl_debug_file, "\n\tpatched: %x ==> %x @ %x", old_val, *reloc_addr, reloc_addr); #endif return 0; }
static int _dl_do_reloc (struct elf_resolve *tpnt,struct dyn_elf *scope, ELF_RELOC *rpnt, Elf32_Sym *symtab, char *strtab) { int reloc_type; int symtab_index; char *symname; unsigned long *reloc_addr; unsigned long symbol_addr; #if defined (__SUPPORT_LD_DEBUG__) unsigned long old_val; #endif reloc_addr = (unsigned long *)(intptr_t) (tpnt->loadaddr + (unsigned long) rpnt->r_offset); reloc_type = ELF32_R_TYPE(rpnt->r_info); symtab_index = ELF32_R_SYM(rpnt->r_info); symbol_addr = 0; symname = strtab + symtab[symtab_index].st_name; if (symtab_index) { symbol_addr = (unsigned long) _dl_find_hash(symname, scope, tpnt, elf_machine_type_class(reloc_type)); /* * We want to allow undefined references to weak symbols - this might * have been intentional. We should not be linking local symbols * here, so all bases should be covered. */ if (!symbol_addr && ELF32_ST_BIND(symtab[symtab_index].st_info) != STB_WEAK) { _dl_dprintf (2, "%s: can't resolve symbol '%s'\n", _dl_progname, strtab + symtab[symtab_index].st_name); _dl_exit (1); } } #if defined (__SUPPORT_LD_DEBUG__) old_val = *reloc_addr; #endif switch (reloc_type) { case R_SH_NONE: break; case R_SH_COPY: if (symbol_addr) { #if defined (__SUPPORT_LD_DEBUG__) if(_dl_debug_move) _dl_dprintf(_dl_debug_file,"\n%s move %x bytes from %x to %x", symname, symtab[symtab_index].st_size, symbol_addr, reloc_addr); #endif _dl_memcpy((char *) reloc_addr, (char *) symbol_addr, symtab[symtab_index].st_size); } return 0; /* no further LD_DEBUG messages for copy relocs */ case R_SH_DIR32: case R_SH_GLOB_DAT: case R_SH_JMP_SLOT: *reloc_addr = symbol_addr + rpnt->r_addend; break; case R_SH_REL32: *reloc_addr = symbol_addr + rpnt->r_addend - (unsigned long) reloc_addr; break; case R_SH_RELATIVE: *reloc_addr = (unsigned long) tpnt->loadaddr + rpnt->r_addend; break; default: return -1; /*call _dl_exit(1) */ } #if defined (__SUPPORT_LD_DEBUG__) if(_dl_debug_reloc && _dl_debug_detail) _dl_dprintf(_dl_debug_file, "\tpatched: %x ==> %x @ %x", old_val, *reloc_addr, reloc_addr); #endif return 0; }
static int _dl_parse(struct elf_resolve *tpnt, struct dyn_elf *scope, unsigned long rel_addr, unsigned long rel_size, int (*reloc_fnc) (struct elf_resolve *tpnt, struct dyn_elf *scope, ELF_RELOC *rpnt, Elf32_Sym *symtab, char *strtab)) { unsigned int i; char *strtab; int goof = 0; Elf32_Sym *symtab; ELF_RELOC *rpnt; int symtab_index; /* Now parse the relocation information */ rpnt = (ELF_RELOC *)(intptr_t) (rel_addr + tpnt->loadaddr); rel_size = rel_size / sizeof(ELF_RELOC); symtab = (Elf32_Sym *)(intptr_t) (tpnt->dynamic_info[DT_SYMTAB] + tpnt->loadaddr); strtab = (char *) (tpnt->dynamic_info[DT_STRTAB] + tpnt->loadaddr); for (i = 0; i < rel_size; i++, rpnt++) { int res; symtab_index = ELF32_R_SYM(rpnt->r_info); /* When the dynamic linker bootstrapped itself, it resolved some symbols. Make sure we do not do them again */ if (!symtab_index && tpnt->libtype == program_interpreter) continue; if (symtab_index && tpnt->libtype == program_interpreter && _dl_symbol(strtab + symtab[symtab_index].st_name)) continue; #if defined (__SUPPORT_LD_DEBUG__) debug_sym(symtab,strtab,symtab_index); debug_reloc(symtab,strtab,rpnt); #endif res = reloc_fnc (tpnt, scope, rpnt, symtab, strtab); if (res==0) continue; _dl_dprintf(2, "\n%s: ",_dl_progname); if (symtab_index) _dl_dprintf(2, "symbol '%s': ", strtab + symtab[symtab_index].st_name); if (res <0) { int reloc_type = ELF32_R_TYPE(rpnt->r_info); #if defined (__SUPPORT_LD_DEBUG__) _dl_dprintf(2, "can't handle reloc type %s\n ", _dl_reltypes(reloc_type)); #else _dl_dprintf(2, "can't handle reloc type %x\n", reloc_type); #endif _dl_exit(-res); } else if (res >0) { _dl_dprintf(2, "can't resolve symbol\n"); goof += res; } } return goof; }
/* This function's behavior must exactly match that * in uClibc/ldso/util/ldd.c */ static struct elf_resolve * search_for_named_library(const char *name, int secure, const char *path_list, struct dyn_elf **rpnt) { char *path, *path_n, *mylibname; struct elf_resolve *tpnt; int done; if (path_list==NULL) return NULL; /* We need a writable copy of this string, but we don't * need this allocated permanently since we don't want * to leak memory, so use alloca to put path on the stack */ done = _dl_strlen(path_list); path = alloca(done + 1); /* another bit of local storage */ mylibname = alloca(2050); /* gcc inlines alloca using a single instruction adjusting * the stack pointer and no stack overflow check and thus * no NULL error return. No point leaving in dead code... */ #if 0 if (!path || !mylibname) { _dl_dprintf(2, "Out of memory!\n"); _dl_exit(0); } #endif _dl_memcpy(path, path_list, done+1); /* Unlike ldd.c, don't bother to eliminate double //s */ /* Replace colons with zeros in path_list */ /* : at the beginning or end of path maps to CWD */ /* :: anywhere maps CWD */ /* "" maps to CWD */ done = 0; path_n = path; do { if (*path == 0) { *path = ':'; done = 1; } if (*path == ':') { *path = 0; if (*path_n) _dl_strcpy(mylibname, path_n); else _dl_strcpy(mylibname, "."); /* Assume current dir if empty path */ _dl_strcat(mylibname, "/"); _dl_strcat(mylibname, name); if ((tpnt = _dl_load_elf_shared_library(secure, rpnt, mylibname)) != NULL) return tpnt; path_n = path+1; } path++; } while (!done); return NULL; }
static int _dl_do_reloc (struct elf_resolve *tpnt,struct dyn_elf *scope, ELF_RELOC *rpnt, Elf32_Sym *symtab, char *strtab) { int reloc_type; int symtab_index; char *symname; unsigned long *reloc_addr; unsigned long symbol_addr; #if defined (__SUPPORT_LD_DEBUG__) unsigned long old_val; #endif struct elf_resolve *tls_tpnt = NULL; struct symbol_ref sym_ref; reloc_addr = (unsigned long *)(intptr_t) (tpnt->loadaddr + (unsigned long) rpnt->r_offset); reloc_type = ELF32_R_TYPE(rpnt->r_info); symtab_index = ELF32_R_SYM(rpnt->r_info); symbol_addr = 0; sym_ref.sym = &symtab[symtab_index]; sym_ref.tpnt = NULL; if (symtab_index) { symname = strtab + symtab[symtab_index].st_name; symbol_addr = (unsigned long) _dl_find_hash(symname, scope, tpnt, elf_machine_type_class(reloc_type), &sym_ref); /* * We want to allow undefined references to weak symbols - this might * have been intentional. We should not be linking local symbols * here, so all bases should be covered. */ if (!symbol_addr && (ELF_ST_TYPE(symtab[symtab_index].st_info) != STT_TLS) && (ELF32_ST_BIND(symtab[symtab_index].st_info) != STB_WEAK)) { _dl_dprintf(2, "%s: can't resolve symbol '%s'\n", _dl_progname, symname); /* Let the caller to handle the error: it may be non fatal if called from dlopen */ return 1; } tls_tpnt = sym_ref.tpnt; } #if defined (__SUPPORT_LD_DEBUG__) old_val = *reloc_addr; #endif #if defined USE_TLS && USE_TLS /* In case of a TLS reloc, tls_tpnt NULL means we have an 'anonymous' symbol. This is the case for a static tls variable, so the lookup module is just that one is referencing the tls variable. */ if (!tls_tpnt) tls_tpnt = tpnt; #endif switch (reloc_type) { case R_SH_NONE: break; case R_SH_COPY: if (symbol_addr) { #if defined (__SUPPORT_LD_DEBUG__) if (_dl_debug_move) _dl_dprintf(_dl_debug_file,"\n%s move %x bytes from %x to %x", symname, symtab[symtab_index].st_size, symbol_addr, reloc_addr); #endif _dl_memcpy((char *) reloc_addr, (char *) symbol_addr, symtab[symtab_index].st_size); } return 0; /* no further LD_DEBUG messages for copy relocs */ case R_SH_DIR32: case R_SH_GLOB_DAT: case R_SH_JMP_SLOT: *reloc_addr = symbol_addr + rpnt->r_addend; break; case R_SH_REL32: *reloc_addr = symbol_addr + rpnt->r_addend - (unsigned long) reloc_addr; break; case R_SH_RELATIVE: *reloc_addr = (unsigned long) tpnt->loadaddr + rpnt->r_addend; break; #if defined USE_TLS && USE_TLS case R_SH_TLS_DTPMOD32: *reloc_addr = tls_tpnt->l_tls_modid; break; case R_SH_TLS_DTPOFF32: *reloc_addr = symbol_addr; break; case R_SH_TLS_TPOFF32: CHECK_STATIC_TLS ((struct link_map *) tls_tpnt); *reloc_addr = tls_tpnt->l_tls_offset + symbol_addr + rpnt->r_addend; break; #endif default: return -1; } #if defined (__SUPPORT_LD_DEBUG__) if (_dl_debug_reloc && _dl_debug_detail) _dl_dprintf(_dl_debug_file, "\tpatched: %x ==> %x @ %x\n", old_val, *reloc_addr, reloc_addr); #endif return 0; }
static int _dl_do_reloc(struct elf_resolve *tpnt, struct r_scope_elem *scope, ELF_RELOC *rpnt, Elf32_Sym *symtab, char *strtab) { int reloc_type; int symtab_index; char *symname; struct elf_resolve *tls_tpnt = NULL; unsigned long *reloc_addr; unsigned long symbol_addr; #if defined (__SUPPORT_LD_DEBUG__) unsigned long old_val; #endif struct symbol_ref sym_ref; reloc_addr = (unsigned long *)(intptr_t)(tpnt->loadaddr + (unsigned long)rpnt->r_offset); reloc_type = ELF32_R_TYPE(rpnt->r_info); symtab_index = ELF32_R_SYM(rpnt->r_info); symbol_addr = 0; sym_ref.sym = &symtab[symtab_index]; sym_ref.tpnt = NULL; symname = strtab + symtab[symtab_index].st_name; if (symtab_index) { symbol_addr = (unsigned long)_dl_find_hash(symname, scope, tpnt, elf_machine_type_class(reloc_type), &sym_ref); /* * We want to allow undefined references to weak symbols - this * might have been intentional. We should not be linking local * symbols here, so all bases should be covered. */ if (unlikely(!symbol_addr && (ELF_ST_TYPE(symtab[symtab_index].st_info) != STT_TLS) && ELF32_ST_BIND(symtab[symtab_index].st_info) != STB_WEAK)) return 1; if (_dl_trace_prelink) _dl_debug_lookup (symname, tpnt, &symtab[symtab_index], &sym_ref, elf_machine_type_class(reloc_type)); tls_tpnt = sym_ref.tpnt; } else { symbol_addr = symtab[symtab_index].st_value; tls_tpnt = tpnt; } #if defined (__SUPPORT_LD_DEBUG__) old_val = *reloc_addr; #endif switch (reloc_type) { case R_386_NONE: break; case R_386_32: *reloc_addr += symbol_addr; break; case R_386_PC32: *reloc_addr += symbol_addr - (unsigned long)reloc_addr; break; case R_386_GLOB_DAT: case R_386_JMP_SLOT: *reloc_addr = symbol_addr; break; case R_386_RELATIVE: *reloc_addr += (unsigned long)tpnt->loadaddr; break; case R_386_COPY: if (symbol_addr) { #if defined (__SUPPORT_LD_DEBUG__) if (_dl_debug_move) _dl_dprintf(_dl_debug_file, "\n%s move %d bytes from %x to %x", symname, symtab[symtab_index].st_size, symbol_addr, reloc_addr); #endif _dl_memcpy((char *)reloc_addr, (char *)symbol_addr, symtab[symtab_index].st_size); } break; #if defined USE_TLS && USE_TLS case R_386_TLS_DTPMOD32: *reloc_addr = tls_tpnt->l_tls_modid; break; case R_386_TLS_DTPOFF32: /* During relocation all TLS symbols are defined and used. * Therefore the offset is already correct. */ *reloc_addr = symbol_addr; break; case R_386_TLS_TPOFF32: /* The offset is positive, backward from the thread pointer. */ CHECK_STATIC_TLS((struct link_map*) tls_tpnt); *reloc_addr += tls_tpnt->l_tls_offset - symbol_addr; break; case R_386_TLS_TPOFF: /* The offset is negative, forward from the thread pointer. */ CHECK_STATIC_TLS((struct link_map*) tls_tpnt); *reloc_addr += symbol_addr - tls_tpnt->l_tls_offset; break; #endif default: return -1; } #if defined (__SUPPORT_LD_DEBUG__) if (_dl_debug_reloc && _dl_debug_detail) _dl_dprintf(_dl_debug_file, "\n\tpatched: %x ==> %x @ %x\n", old_val, *reloc_addr, reloc_addr); #endif return 0; }
static int _dl_do_reloc (struct elf_resolve *tpnt,struct r_scope_elem *scope, ELF_RELOC *rpnt, Elf32_Sym *symtab, char *strtab) { int reloc_type; int symtab_index; char *symname; unsigned long *reloc_addr; unsigned long symbol_addr; struct symbol_ref sym_ref; struct elf_resolve *def_mod = 0; int goof = 0; reloc_addr = (unsigned long *) (tpnt->loadaddr + (unsigned long) rpnt->r_offset); reloc_type = ELF32_R_TYPE(rpnt->r_info); symtab_index = ELF32_R_SYM(rpnt->r_info); symbol_addr = 0; sym_ref.sym = &symtab[symtab_index]; sym_ref.tpnt = NULL; symname = strtab + symtab[symtab_index].st_name; if (symtab_index) { symbol_addr = _dl_find_hash(symname, scope, tpnt, elf_machine_type_class(reloc_type), &sym_ref); /* * We want to allow undefined references to weak symbols - this might * have been intentional. We should not be linking local symbols * here, so all bases should be covered. */ if (!symbol_addr && (ELF_ST_TYPE(symtab[symtab_index].st_info) != STT_TLS) && (ELF32_ST_BIND(symtab[symtab_index].st_info) != STB_WEAK)) { /* This may be non-fatal if called from dlopen. */ return 1; } if (_dl_trace_prelink) { _dl_debug_lookup (symname, tpnt, &symtab[symtab_index], &sym_ref, elf_machine_type_class(reloc_type)); } def_mod = sym_ref.tpnt; } else { /* * Relocs against STN_UNDEF are usually treated as using a * symbol value of zero, and using the module containing the * reloc itself. */ symbol_addr = symtab[symtab_index].st_value; def_mod = tpnt; } #if defined (__SUPPORT_LD_DEBUG__) { unsigned long old_val = *reloc_addr; #endif switch (reloc_type) { case R_ARM_NONE: break; case R_ARM_ABS32: *reloc_addr += symbol_addr; break; case R_ARM_PC24: #if 0 { unsigned long addend; long newvalue, topbits; addend = *reloc_addr & 0x00ffffff; if (addend & 0x00800000) addend |= 0xff000000; newvalue = symbol_addr - (unsigned long)reloc_addr + (addend << 2); topbits = newvalue & 0xfe000000; if (topbits != 0xfe000000 && topbits != 0x00000000) { newvalue = fix_bad_pc24(reloc_addr, symbol_addr) - (unsigned long)reloc_addr + (addend << 2); topbits = newvalue & 0xfe000000; if (unlikely(topbits != 0xfe000000 && topbits != 0x00000000)) { _dl_dprintf(2,"symbol '%s': R_ARM_PC24 relocation out of range.", symtab[symtab_index].st_name); _dl_exit(1); } } newvalue >>= 2; symbol_addr = (*reloc_addr & 0xff000000) | (newvalue & 0x00ffffff); *reloc_addr = symbol_addr; break; } #else _dl_dprintf(2,"R_ARM_PC24: Compile shared libraries with -fPIC!\n"); _dl_exit(1); #endif case R_ARM_GLOB_DAT: case R_ARM_JUMP_SLOT: *reloc_addr = symbol_addr; break; case R_ARM_RELATIVE: *reloc_addr += (unsigned long) tpnt->loadaddr; break; case R_ARM_COPY: _dl_memcpy((void *) reloc_addr, (void *) symbol_addr, symtab[symtab_index].st_size); break; #if defined USE_TLS && USE_TLS case R_ARM_TLS_DTPMOD32: *reloc_addr = def_mod->l_tls_modid; break; case R_ARM_TLS_DTPOFF32: *reloc_addr += symbol_addr; break; case R_ARM_TLS_TPOFF32: CHECK_STATIC_TLS ((struct link_map *) def_mod); *reloc_addr += (symbol_addr + def_mod->l_tls_offset); break; #endif default: return -1; /*call _dl_exit(1) */ } #if defined (__SUPPORT_LD_DEBUG__) if (_dl_debug_reloc && _dl_debug_detail) _dl_dprintf(_dl_debug_file, "\tpatch: %x ==> %x @ %x", old_val, *reloc_addr, reloc_addr); } #endif return goof; }
__attribute__((__visibility__("hidden"))) struct funcdesc_value volatile * _dl_linux_resolver (struct elf_resolve *tpnt, int reloc_entry) { int reloc_type; ELF_RELOC *this_reloc; char *strtab; ElfW(Sym) *symtab; int symtab_index; char *rel_addr; struct elf_resolve *new_tpnt; char *new_addr; struct funcdesc_value funcval; struct funcdesc_value volatile *got_entry; char *symname; rel_addr = (char *)tpnt->dynamic_info[DT_JMPREL]; this_reloc = (ELF_RELOC *)(intptr_t)(rel_addr + reloc_entry); reloc_type = ELF_R_TYPE(this_reloc->r_info); symtab_index = ELF_R_SYM(this_reloc->r_info); symtab = (Elf32_Sym *) tpnt->dynamic_info[DT_SYMTAB]; strtab = (char *) tpnt->dynamic_info[DT_STRTAB]; symname= strtab + symtab[symtab_index].st_name; if (reloc_type != R_UBICOM32_FUNCDESC_VALUE) { _dl_dprintf(2, "%s: Incorrect relocation type in jump relocations\n", _dl_progname); _dl_exit(1); } /* Address of GOT entry fix up */ got_entry = (struct funcdesc_value *) DL_RELOC_ADDR(tpnt->loadaddr, this_reloc->r_offset); /* Get the address to be used to fill in the GOT entry. */ new_addr = _dl_lookup_hash(symname, tpnt->symbol_scope, NULL, 0, &new_tpnt); if (!new_addr) { new_addr = _dl_lookup_hash(symname, NULL, NULL, 0, &new_tpnt); if (!new_addr) { _dl_dprintf(2, "_dl_linux_resolver: %s: can't resolve symbol '%s'\n", _dl_progname, symname); _dl_exit(1); } } funcval.entry_point = new_addr; funcval.got_value = new_tpnt->loadaddr.got_value; #if defined (__SUPPORT_LD_DEBUG__) if (_dl_debug_bindings) { _dl_dprintf(_dl_debug_file, "\nresolve function: %s", symname); if (_dl_debug_detail) _dl_dprintf(_dl_debug_file, "\n\tpatched (%x,%x) ==> (%x,%x) @ %x\n", got_entry->entry_point, got_entry->got_value, funcval.entry_point, funcval.got_value, got_entry); } if (1 || !_dl_debug_nofixups) { got_entry->entry_point = ((void *)&_dl_ubicom32_resolve_pending); got_entry->got_value = funcval.got_value; got_entry->entry_point = funcval.entry_point; } #else /* * initially set the entry point to resolve pending before starting * the update. This has the effect of putting all other requests in a * holding pattern until the resolution is completed. */ got_entry->entry_point = ((void*)&_dl_ubicom32_resolve_pending); got_entry->got_value = funcval.got_value; got_entry->entry_point = funcval.entry_point; #endif return got_entry; }