/* This function's behavior must exactly match that * in uClibc/ldso/util/ldd.c */ static struct elf_resolve * search_for_named_library(const char *name, unsigned int rflags, const char *path_list, struct dyn_elf **rpnt, const char* origin) { char *mylibname; struct elf_resolve *tpnt; const char *p, *pn; int plen; if (path_list==NULL) return NULL; /* another bit of local storage */ mylibname = alloca(2050); /* Unlike ldd.c, don't bother to eliminate double //s */ /* Replace colons with zeros in path_list */ /* : at the beginning or end of path maps to CWD */ /* :: anywhere maps CWD */ /* "" maps to CWD */ for (p = path_list; p != NULL; p = pn) { pn = _dl_strchr(p + 1, ':'); if (pn != NULL) { plen = pn - p; pn++; } else plen = _dl_strlen(p); if (plen >= 7 && _dl_memcmp(p, "$ORIGIN", 7) == 0) { int olen; /* $ORIGIN is not expanded for SUID/GUID programs (except if it is $ORIGIN alone) */ if ((rflags & __RTLD_SECURE) && plen != 7) continue; if (origin == NULL) continue; for (olen = _dl_strlen(origin) - 1; olen >= 0 && origin[olen] != '/'; olen--) ; if (olen <= 0) continue; _dl_memcpy(&mylibname[0], origin, olen); _dl_memcpy(&mylibname[olen], p + 7, plen - 7); mylibname[olen + plen - 7] = 0; } else if (plen != 0) { _dl_memcpy(mylibname, p, plen); mylibname[plen] = 0; } else { _dl_strcpy(mylibname, "."); } _dl_strcat(mylibname, "/"); _dl_strcat(mylibname, name); #ifdef __LDSO_SAFE_RUNPATH__ if (*mylibname == '/') #endif if ((tpnt = _dl_load_elf_shared_library(rflags, rpnt, mylibname)) != NULL) return tpnt; } return NULL; }
/* Initialize static TLS area and DTV for current (only) thread. libpthread implementations should provide their own hook to handle all threads. */ void attribute_hidden __attribute_noinline__ _dl_nothread_init_static_tls (struct link_map *map) { # ifdef TLS_TCB_AT_TP void *dest = (char *) THREAD_SELF - map->l_tls_offset; # elif defined(TLS_DTV_AT_TP) void *dest = (char *) THREAD_SELF + map->l_tls_offset + TLS_PRE_TCB_SIZE; # else # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined" # endif /* Fill in the DTV slot so that a later LD/GD access will find it. */ dtv_t *dtv = THREAD_DTV (); if (!(map->l_tls_modid <= dtv[-1].counter)) { _dl_dprintf(2, "map->l_tls_modid <= dtv[-1].counter FAILED!\n"); _dl_exit(30); } dtv[map->l_tls_modid].pointer.val = dest; dtv[map->l_tls_modid].pointer.is_static = true; /* Initialize the memory. */ _dl_memcpy(dest, map->l_tls_initimage, map->l_tls_initimage_size); _dl_memset((dest + map->l_tls_initimage_size), '\0', map->l_tls_blocksize - map->l_tls_initimage_size); }
/* This function's behavior must exactly match that * in uClibc/ldso/util/ldd.c */ static struct elf_resolve * search_for_named_library(const char *name, unsigned rflags, const char *path_list, struct dyn_elf **rpnt) { char *path, *path_n, *mylibname; struct elf_resolve *tpnt; int done; if (path_list==NULL) return NULL; /* We need a writable copy of this string, but we don't * need this allocated permanently since we don't want * to leak memory, so use alloca to put path on the stack */ done = _dl_strlen(path_list); path = alloca(done + 1); /* another bit of local storage */ mylibname = alloca(2050); _dl_memcpy(path, path_list, done+1); /* Unlike ldd.c, don't bother to eliminate double //s */ /* Replace colons with zeros in path_list */ /* : at the beginning or end of path maps to CWD */ /* :: anywhere maps CWD */ /* "" maps to CWD */ done = 0; path_n = path; do { if (*path == 0) { *path = ':'; done = 1; } if (*path == ':') { *path = 0; if (*path_n) _dl_strcpy(mylibname, path_n); else _dl_strcpy(mylibname, "."); /* Assume current dir if empty path */ _dl_strcat(mylibname, "/"); _dl_strcat(mylibname, name); #ifdef __LDSO_SAFE_RUNPATH__ if (*mylibname == '/') #endif if ((tpnt = _dl_load_elf_shared_library(rflags, rpnt, mylibname)) != NULL) return tpnt; path_n = path+1; } path++; } while (!done); return NULL; }
static void * allocate_and_init (struct link_map *map) { void *newp; newp = _dl_memalign (map->l_tls_align, map->l_tls_blocksize); if (newp == NULL) { _dl_dprintf(2, "%s:%d: Out of memory!!!\n", __FUNCTION__, __LINE__); _dl_exit(1); } /* Initialize the memory. */ _dl_memcpy (newp, map->l_tls_initimage, map->l_tls_initimage_size); _dl_memset ((newp + map->l_tls_initimage_size), '\0', map->l_tls_blocksize - map->l_tls_initimage_size); return newp; }
/* No, there are cases where the SVr4 linker fails to emit COPY relocs at all */ static int _dl_do_copy (struct elf_resolve *tpnt, struct dyn_elf *scope, ELF_RELOC *rpnt, Elf32_Sym *symtab, char *strtab) { int reloc_type; int symtab_index; unsigned long *reloc_addr; unsigned long symbol_addr; int goof = 0; reloc_addr = (unsigned long *)(intptr_t) (tpnt->loadaddr + (unsigned long) rpnt->r_offset); reloc_type = ELF32_R_TYPE(rpnt->r_info); if (reloc_type != R_SH_COPY) return 0; symtab_index = ELF32_R_SYM(rpnt->r_info); symbol_addr = 0; if (symtab_index) { symbol_addr = (unsigned long) _dl_find_hash(strtab + symtab[symtab_index].st_name, scope, NULL, copyrel); if (!symbol_addr) goof++; } if (!goof) { #if defined (__SUPPORT_LD_DEBUG__) if(_dl_debug_move) _dl_dprintf(_dl_debug_file,"\n%s move %x bytes from %x to %x", strtab + symtab[symtab_index].st_name, symtab[symtab_index].st_size, symbol_addr, symtab[symtab_index].st_value); #endif _dl_memcpy((char *) symtab[symtab_index].st_value, (char *) symbol_addr, symtab[symtab_index].st_size); } return goof; }
struct link_map * _dl_update_slotinfo (unsigned long int req_modid) { struct link_map *the_map = NULL; dtv_t *dtv = THREAD_DTV (); /* The global dl_tls_dtv_slotinfo array contains for each module index the generation counter current when the entry was created. This array never shrinks so that all module indices which were valid at some time can be used to access it. Before the first use of a new module index in this function the array was extended appropriately. Access also does not have to be guarded against modifications of the array. It is assumed that pointer-size values can be read atomically even in SMP environments. It is possible that other threads at the same time dynamically load code and therefore add to the slotinfo list. This is a problem since we must not pick up any information about incomplete work. The solution to this is to ignore all dtv slots which were created after the one we are currently interested. We know that dynamic loading for this module is completed and this is the last load operation we know finished. */ unsigned long int idx = req_modid; struct dtv_slotinfo_list *listp = _dl_tls_dtv_slotinfo_list; _dl_debug_early ("Updating slotinfo for module %d\n", req_modid); while (idx >= listp->len) { idx -= listp->len; listp = listp->next; } if (dtv[0].counter < listp->slotinfo[idx].gen) { /* The generation counter for the slot is higher than what the current dtv implements. We have to update the whole dtv but only those entries with a generation counter <= the one for the entry we need. */ size_t new_gen = listp->slotinfo[idx].gen; size_t total = 0; /* We have to look through the entire dtv slotinfo list. */ listp = _dl_tls_dtv_slotinfo_list; do { size_t cnt; for (cnt = total == 0 ? 1 : 0; cnt < listp->len; ++cnt) { size_t gen = listp->slotinfo[cnt].gen; if (gen > new_gen) /* This is a slot for a generation younger than the one we are handling now. It might be incompletely set up so ignore it. */ continue; /* If the entry is older than the current dtv layout we know we don't have to handle it. */ if (gen <= dtv[0].counter) continue; /* If there is no map this means the entry is empty. */ struct link_map *map = listp->slotinfo[cnt].map; if (map == NULL) { /* If this modid was used at some point the memory might still be allocated. */ if (! dtv[total + cnt].pointer.is_static && dtv[total + cnt].pointer.val != TLS_DTV_UNALLOCATED) { _dl_free (dtv[total + cnt].pointer.val); dtv[total + cnt].pointer.val = TLS_DTV_UNALLOCATED; } continue; } /* Check whether the current dtv array is large enough. */ size_t modid = map->l_tls_modid; _dl_assert (total + cnt == modid); if (dtv[-1].counter < modid) { /* Reallocate the dtv. */ dtv_t *newp; size_t newsize = _dl_tls_max_dtv_idx + DTV_SURPLUS; size_t oldsize = dtv[-1].counter; _dl_assert (map->l_tls_modid <= newsize); if (dtv == _dl_initial_dtv) { /* This is the initial dtv that was allocated during rtld startup using the dl-minimal.c malloc instead of the real malloc. We can't free it, we have to abandon the old storage. */ newp = _dl_malloc ((2 + newsize) * sizeof (dtv_t)); if (newp == NULL) oom (); _dl_memcpy (newp, &dtv[-1], oldsize * sizeof (dtv_t)); } else { newp = _dl_realloc (&dtv[-1], (2 + newsize) * sizeof (dtv_t)); if (newp == NULL) oom (); } newp[0].counter = newsize; /* Clear the newly allocated part. */ _dl_memset (newp + 2 + oldsize, '\0', (newsize - oldsize) * sizeof (dtv_t)); /* Point dtv to the generation counter. */ dtv = &newp[1]; /* Install this new dtv in the thread data structures. */ INSTALL_NEW_DTV (dtv); } /* If there is currently memory allocate for this dtv entry free it. */ /* XXX Ideally we will at some point create a memory pool. */ if (! dtv[modid].pointer.is_static && dtv[modid].pointer.val != TLS_DTV_UNALLOCATED) /* Note that free is called for NULL is well. We deallocate even if it is this dtv entry we are supposed to load. The reason is that we call memalign and not malloc. */ _dl_free (dtv[modid].pointer.val); /* This module is loaded dynamically- We defer memory allocation. */ dtv[modid].pointer.is_static = false; dtv[modid].pointer.val = TLS_DTV_UNALLOCATED; if (modid == req_modid) the_map = map; } total += listp->len; } while ((listp = listp->next) != NULL); /* This will be the new maximum generation counter. */ dtv[0].counter = new_gen; } return the_map; }
internal_function _dl_allocate_tls_init (void *result) { if (result == NULL) /* The memory allocation failed. */ return NULL; dtv_t *dtv = GET_DTV (result); struct dtv_slotinfo_list *listp; size_t total = 0; size_t maxgen = 0; /* We have to prepare the dtv for all currently loaded modules using TLS. For those which are dynamically loaded we add the values indicating deferred allocation. */ listp = _dl_tls_dtv_slotinfo_list; while (1) { size_t cnt; for (cnt = total == 0 ? 1 : 0; cnt < listp->len; ++cnt) { struct link_map *map; void *dest; /* Check for the total number of used slots. */ if (total + cnt > _dl_tls_max_dtv_idx) break; map = listp->slotinfo[cnt].map; if (map == NULL) /* Unused entry. */ continue; /* Keep track of the maximum generation number. This might not be the generation counter. */ maxgen = MAX (maxgen, listp->slotinfo[cnt].gen); if (map->l_tls_offset == NO_TLS_OFFSET) { /* For dynamically loaded modules we simply store the value indicating deferred allocation. */ dtv[map->l_tls_modid].pointer.val = TLS_DTV_UNALLOCATED; dtv[map->l_tls_modid].pointer.is_static = false; continue; } _dl_assert (map->l_tls_modid == cnt); _dl_assert (map->l_tls_blocksize >= map->l_tls_initimage_size); # ifdef TLS_TCB_AT_TP _dl_assert ((size_t) map->l_tls_offset >= map->l_tls_blocksize); dest = (char *) result - map->l_tls_offset; # elif defined(TLS_DTV_AT_TP) dest = (char *) result + map->l_tls_offset; # else # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined" # endif /* Copy the initialization image and clear the BSS part. */ dtv[map->l_tls_modid].pointer.val = dest; dtv[map->l_tls_modid].pointer.is_static = true; _dl_memcpy(dest, map->l_tls_initimage, map->l_tls_initimage_size); _dl_memset((dest + map->l_tls_initimage_size), '\0', map->l_tls_blocksize - map->l_tls_initimage_size); } total += cnt; if (total >= _dl_tls_max_dtv_idx) break; listp = listp->next; _dl_assert (listp != NULL); } /* The DTV version is up-to-date now. */ dtv[0].counter = maxgen; return result; }
static int _dl_do_reloc (struct elf_resolve *tpnt,struct dyn_elf *scope, ELF_RELOC *rpnt, Elf32_Sym *symtab, char *strtab) { int reloc_type; int symtab_index; char *symname; unsigned long *reloc_addr; unsigned long symbol_addr; #if defined (__SUPPORT_LD_DEBUG__) unsigned long old_val; #endif struct elf_resolve *tls_tpnt = NULL; struct symbol_ref sym_ref; reloc_addr = (unsigned long *)(intptr_t) (tpnt->loadaddr + (unsigned long) rpnt->r_offset); reloc_type = ELF32_R_TYPE(rpnt->r_info); symtab_index = ELF32_R_SYM(rpnt->r_info); symbol_addr = 0; sym_ref.sym = &symtab[symtab_index]; sym_ref.tpnt = NULL; if (symtab_index) { symname = strtab + symtab[symtab_index].st_name; symbol_addr = (unsigned long) _dl_find_hash(symname, scope, tpnt, elf_machine_type_class(reloc_type), &sym_ref); /* * We want to allow undefined references to weak symbols - this might * have been intentional. We should not be linking local symbols * here, so all bases should be covered. */ if (!symbol_addr && (ELF_ST_TYPE(symtab[symtab_index].st_info) != STT_TLS) && (ELF32_ST_BIND(symtab[symtab_index].st_info) != STB_WEAK)) { _dl_dprintf(2, "%s: can't resolve symbol '%s'\n", _dl_progname, symname); /* Let the caller to handle the error: it may be non fatal if called from dlopen */ return 1; } tls_tpnt = sym_ref.tpnt; } #if defined (__SUPPORT_LD_DEBUG__) old_val = *reloc_addr; #endif #if defined USE_TLS && USE_TLS /* In case of a TLS reloc, tls_tpnt NULL means we have an 'anonymous' symbol. This is the case for a static tls variable, so the lookup module is just that one is referencing the tls variable. */ if (!tls_tpnt) tls_tpnt = tpnt; #endif switch (reloc_type) { case R_SH_NONE: break; case R_SH_COPY: if (symbol_addr) { #if defined (__SUPPORT_LD_DEBUG__) if (_dl_debug_move) _dl_dprintf(_dl_debug_file,"\n%s move %x bytes from %x to %x", symname, symtab[symtab_index].st_size, symbol_addr, reloc_addr); #endif _dl_memcpy((char *) reloc_addr, (char *) symbol_addr, symtab[symtab_index].st_size); } return 0; /* no further LD_DEBUG messages for copy relocs */ case R_SH_DIR32: case R_SH_GLOB_DAT: case R_SH_JMP_SLOT: *reloc_addr = symbol_addr + rpnt->r_addend; break; case R_SH_REL32: *reloc_addr = symbol_addr + rpnt->r_addend - (unsigned long) reloc_addr; break; case R_SH_RELATIVE: *reloc_addr = (unsigned long) tpnt->loadaddr + rpnt->r_addend; break; #if defined USE_TLS && USE_TLS case R_SH_TLS_DTPMOD32: *reloc_addr = tls_tpnt->l_tls_modid; break; case R_SH_TLS_DTPOFF32: *reloc_addr = symbol_addr; break; case R_SH_TLS_TPOFF32: CHECK_STATIC_TLS ((struct link_map *) tls_tpnt); *reloc_addr = tls_tpnt->l_tls_offset + symbol_addr + rpnt->r_addend; break; #endif default: return -1; } #if defined (__SUPPORT_LD_DEBUG__) if (_dl_debug_reloc && _dl_debug_detail) _dl_dprintf(_dl_debug_file, "\tpatched: %x ==> %x @ %x\n", old_val, *reloc_addr, reloc_addr); #endif return 0; }
/* This function's behavior must exactly match that * in uClibc/ldso/util/ldd.c */ static struct elf_resolve * search_for_named_library(const char *name, int secure, const char *path_list, struct dyn_elf **rpnt) { char *path, *path_n, *mylibname; struct elf_resolve *tpnt; int done; if (path_list==NULL) return NULL; /* We need a writable copy of this string, but we don't * need this allocated permanently since we don't want * to leak memory, so use alloca to put path on the stack */ done = _dl_strlen(path_list); path = alloca(done + 1); /* another bit of local storage */ mylibname = alloca(2050); /* gcc inlines alloca using a single instruction adjusting * the stack pointer and no stack overflow check and thus * no NULL error return. No point leaving in dead code... */ #if 0 if (!path || !mylibname) { _dl_dprintf(2, "Out of memory!\n"); _dl_exit(0); } #endif _dl_memcpy(path, path_list, done+1); /* Unlike ldd.c, don't bother to eliminate double //s */ /* Replace colons with zeros in path_list */ /* : at the beginning or end of path maps to CWD */ /* :: anywhere maps CWD */ /* "" maps to CWD */ done = 0; path_n = path; do { if (*path == 0) { *path = ':'; done = 1; } if (*path == ':') { *path = 0; if (*path_n) _dl_strcpy(mylibname, path_n); else _dl_strcpy(mylibname, "."); /* Assume current dir if empty path */ _dl_strcat(mylibname, "/"); _dl_strcat(mylibname, name); if ((tpnt = _dl_load_elf_shared_library(secure, rpnt, mylibname)) != NULL) return tpnt; path_n = path+1; } path++; } while (!done); return NULL; }
static int _dl_do_reloc (struct elf_resolve *tpnt,struct r_scope_elem *scope, ELF_RELOC *rpnt, Elf32_Sym *symtab, char *strtab) { int reloc_type; int symtab_index; char *symname; unsigned long *reloc_addr; unsigned long symbol_addr; struct symbol_ref sym_ref; struct elf_resolve *def_mod = 0; int goof = 0; reloc_addr = (unsigned long *) (tpnt->loadaddr + (unsigned long) rpnt->r_offset); reloc_type = ELF32_R_TYPE(rpnt->r_info); symtab_index = ELF32_R_SYM(rpnt->r_info); symbol_addr = 0; sym_ref.sym = &symtab[symtab_index]; sym_ref.tpnt = NULL; symname = strtab + symtab[symtab_index].st_name; if (symtab_index) { symbol_addr = _dl_find_hash(symname, scope, tpnt, elf_machine_type_class(reloc_type), &sym_ref); /* * We want to allow undefined references to weak symbols - this might * have been intentional. We should not be linking local symbols * here, so all bases should be covered. */ if (!symbol_addr && (ELF_ST_TYPE(symtab[symtab_index].st_info) != STT_TLS) && (ELF32_ST_BIND(symtab[symtab_index].st_info) != STB_WEAK)) { /* This may be non-fatal if called from dlopen. */ return 1; } if (_dl_trace_prelink) { _dl_debug_lookup (symname, tpnt, &symtab[symtab_index], &sym_ref, elf_machine_type_class(reloc_type)); } def_mod = sym_ref.tpnt; } else { /* * Relocs against STN_UNDEF are usually treated as using a * symbol value of zero, and using the module containing the * reloc itself. */ symbol_addr = symtab[symtab_index].st_value; def_mod = tpnt; } #if defined (__SUPPORT_LD_DEBUG__) { unsigned long old_val = *reloc_addr; #endif switch (reloc_type) { case R_ARM_NONE: break; case R_ARM_ABS32: *reloc_addr += symbol_addr; break; case R_ARM_PC24: #if 0 { unsigned long addend; long newvalue, topbits; addend = *reloc_addr & 0x00ffffff; if (addend & 0x00800000) addend |= 0xff000000; newvalue = symbol_addr - (unsigned long)reloc_addr + (addend << 2); topbits = newvalue & 0xfe000000; if (topbits != 0xfe000000 && topbits != 0x00000000) { newvalue = fix_bad_pc24(reloc_addr, symbol_addr) - (unsigned long)reloc_addr + (addend << 2); topbits = newvalue & 0xfe000000; if (unlikely(topbits != 0xfe000000 && topbits != 0x00000000)) { _dl_dprintf(2,"symbol '%s': R_ARM_PC24 relocation out of range.", symtab[symtab_index].st_name); _dl_exit(1); } } newvalue >>= 2; symbol_addr = (*reloc_addr & 0xff000000) | (newvalue & 0x00ffffff); *reloc_addr = symbol_addr; break; } #else _dl_dprintf(2,"R_ARM_PC24: Compile shared libraries with -fPIC!\n"); _dl_exit(1); #endif case R_ARM_GLOB_DAT: case R_ARM_JUMP_SLOT: *reloc_addr = symbol_addr; break; case R_ARM_RELATIVE: *reloc_addr += (unsigned long) tpnt->loadaddr; break; case R_ARM_COPY: _dl_memcpy((void *) reloc_addr, (void *) symbol_addr, symtab[symtab_index].st_size); break; #if defined USE_TLS && USE_TLS case R_ARM_TLS_DTPMOD32: *reloc_addr = def_mod->l_tls_modid; break; case R_ARM_TLS_DTPOFF32: *reloc_addr += symbol_addr; break; case R_ARM_TLS_TPOFF32: CHECK_STATIC_TLS ((struct link_map *) def_mod); *reloc_addr += (symbol_addr + def_mod->l_tls_offset); break; #endif default: return -1; /*call _dl_exit(1) */ } #if defined (__SUPPORT_LD_DEBUG__) if (_dl_debug_reloc && _dl_debug_detail) _dl_dprintf(_dl_debug_file, "\tpatch: %x ==> %x @ %x", old_val, *reloc_addr, reloc_addr); } #endif return goof; }
static int _dl_do_reloc(struct elf_resolve *tpnt, struct dyn_elf *scope, ELF_RELOC *rpnt, Elf32_Sym *symtab, char *strtab) { int reloc_type; int symtab_index; char *symname; unsigned long *reloc_addr; unsigned long symbol_addr; #if defined (__SUPPORT_LD_DEBUG__) unsigned long old_val; #endif reloc_addr = (unsigned long *)(intptr_t)(tpnt->loadaddr + (unsigned long)rpnt->r_offset); reloc_type = ELF32_R_TYPE(rpnt->r_info); symtab_index = ELF32_R_SYM(rpnt->r_info); symbol_addr = 0; symname = strtab + symtab[symtab_index].st_name; if (symtab_index) { if (symtab[symtab_index].st_shndx != SHN_UNDEF && ELF32_ST_BIND(symtab[symtab_index].st_info) == STB_LOCAL) { symbol_addr = (unsigned long)tpnt->loadaddr; } else { symbol_addr = (unsigned long)_dl_find_hash(symname, scope, tpnt, elf_machine_type_class(reloc_type)); } if (unlikely(!symbol_addr && ELF32_ST_BIND(symtab[symtab_index].st_info) != STB_WEAK)) { _dl_dprintf(2, "%s: can't resolve symbol '%s'\n", _dl_progname, symname); _dl_exit(1); }; symbol_addr += rpnt->r_addend; } #if defined (__SUPPORT_LD_DEBUG__) old_val = *reloc_addr; #endif switch (reloc_type) { case R_CRIS_NONE: break; case R_CRIS_GLOB_DAT: case R_CRIS_JUMP_SLOT: case R_CRIS_32: *reloc_addr = symbol_addr; break; case R_CRIS_COPY: #if defined (__SUPPORT_LD_DEBUG__) if (_dl_debug_move) _dl_dprintf(_dl_debug_file, "\n%s move %d bytes from %x to %x", symname, symtab[symtab_index].st_size, symbol_addr, reloc_addr); #endif _dl_memcpy((char *)reloc_addr, (char *)symbol_addr, symtab[symtab_index].st_size); break; case R_CRIS_RELATIVE: *reloc_addr = (unsigned long)tpnt->loadaddr + rpnt->r_addend; break; default: return -1; /* Calls _dl_exit(1). */ } #if defined (__SUPPORT_LD_DEBUG__) if (_dl_debug_reloc && _dl_debug_detail) _dl_dprintf(_dl_debug_file, "\n\tpatched: %x ==> %x @ %x", old_val, *reloc_addr, reloc_addr); #endif return 0; }
static int _dl_do_reloc(struct elf_resolve *tpnt, struct dyn_elf *scope, ELF_RELOC *rpnt, Elf32_Sym *symtab, char *strtab) { int reloc_type; int symtab_index; char *symname; unsigned long *reloc_addr; unsigned long symbol_addr; #if defined (__SUPPORT_LD_DEBUG__) unsigned long old_val; #endif reloc_addr = (unsigned long *)(intptr_t)(tpnt->loadaddr + (unsigned long)rpnt->r_offset); reloc_type = ELF32_R_TYPE(rpnt->r_info); symtab_index = ELF32_R_SYM(rpnt->r_info); symbol_addr = 0; symname = strtab + symtab[symtab_index].st_name; if (symtab_index) { symbol_addr = (unsigned long)_dl_find_hash(symname, scope, tpnt, elf_machine_type_class(reloc_type)); /* * We want to allow undefined references to weak symbols - this * might have been intentional. We should not be linking local * symbols here, so all bases should be covered. */ if (unlikely(!symbol_addr && ELF32_ST_BIND(symtab[symtab_index].st_info) != STB_WEAK)) return 1; } #if defined (__SUPPORT_LD_DEBUG__) old_val = *reloc_addr; #endif switch (reloc_type) { case R_386_NONE: break; case R_386_32: *reloc_addr += symbol_addr; break; case R_386_PC32: *reloc_addr += symbol_addr - (unsigned long)reloc_addr; break; case R_386_GLOB_DAT: case R_386_JMP_SLOT: *reloc_addr = symbol_addr; break; case R_386_RELATIVE: *reloc_addr += (unsigned long)tpnt->loadaddr; break; case R_386_COPY: if (symbol_addr) { #if defined (__SUPPORT_LD_DEBUG__) if (_dl_debug_move) _dl_dprintf(_dl_debug_file, "\n%s move %d bytes from %x to %x", symname, symtab[symtab_index].st_size, symbol_addr, reloc_addr); #endif _dl_memcpy((char *)reloc_addr, (char *)symbol_addr, symtab[symtab_index].st_size); } break; default: return -1; } #if defined (__SUPPORT_LD_DEBUG__) if (_dl_debug_reloc && _dl_debug_detail) _dl_dprintf(_dl_debug_file, "\n\tpatched: %x ==> %x @ %x", old_val, *reloc_addr, reloc_addr); #endif return 0; }
static int _dl_do_reloc(struct elf_resolve *tpnt, struct r_scope_elem *scope, ELF_RELOC *rpnt, Elf32_Sym *symtab, char *strtab) { int reloc_type; int symtab_index; char *symname; struct elf_resolve *tls_tpnt = NULL; unsigned long *reloc_addr; unsigned long symbol_addr; #if defined (__SUPPORT_LD_DEBUG__) unsigned long old_val; #endif struct symbol_ref sym_ref; reloc_addr = (unsigned long *)(intptr_t)(tpnt->loadaddr + (unsigned long)rpnt->r_offset); reloc_type = ELF32_R_TYPE(rpnt->r_info); symtab_index = ELF32_R_SYM(rpnt->r_info); symbol_addr = 0; sym_ref.sym = &symtab[symtab_index]; sym_ref.tpnt = NULL; symname = strtab + symtab[symtab_index].st_name; if (symtab_index) { symbol_addr = (unsigned long)_dl_find_hash(symname, scope, tpnt, elf_machine_type_class(reloc_type), &sym_ref); /* * We want to allow undefined references to weak symbols - this * might have been intentional. We should not be linking local * symbols here, so all bases should be covered. */ if (unlikely(!symbol_addr && (ELF_ST_TYPE(symtab[symtab_index].st_info) != STT_TLS) && ELF32_ST_BIND(symtab[symtab_index].st_info) != STB_WEAK)) return 1; if (_dl_trace_prelink) _dl_debug_lookup (symname, tpnt, &symtab[symtab_index], &sym_ref, elf_machine_type_class(reloc_type)); tls_tpnt = sym_ref.tpnt; } else { symbol_addr = symtab[symtab_index].st_value; tls_tpnt = tpnt; } #if defined (__SUPPORT_LD_DEBUG__) old_val = *reloc_addr; #endif switch (reloc_type) { case R_386_NONE: break; case R_386_32: *reloc_addr += symbol_addr; break; case R_386_PC32: *reloc_addr += symbol_addr - (unsigned long)reloc_addr; break; case R_386_GLOB_DAT: case R_386_JMP_SLOT: *reloc_addr = symbol_addr; break; case R_386_RELATIVE: *reloc_addr += (unsigned long)tpnt->loadaddr; break; case R_386_COPY: if (symbol_addr) { #if defined (__SUPPORT_LD_DEBUG__) if (_dl_debug_move) _dl_dprintf(_dl_debug_file, "\n%s move %d bytes from %x to %x", symname, symtab[symtab_index].st_size, symbol_addr, reloc_addr); #endif _dl_memcpy((char *)reloc_addr, (char *)symbol_addr, symtab[symtab_index].st_size); } break; #if defined USE_TLS && USE_TLS case R_386_TLS_DTPMOD32: *reloc_addr = tls_tpnt->l_tls_modid; break; case R_386_TLS_DTPOFF32: /* During relocation all TLS symbols are defined and used. * Therefore the offset is already correct. */ *reloc_addr = symbol_addr; break; case R_386_TLS_TPOFF32: /* The offset is positive, backward from the thread pointer. */ CHECK_STATIC_TLS((struct link_map*) tls_tpnt); *reloc_addr += tls_tpnt->l_tls_offset - symbol_addr; break; case R_386_TLS_TPOFF: /* The offset is negative, forward from the thread pointer. */ CHECK_STATIC_TLS((struct link_map*) tls_tpnt); *reloc_addr += symbol_addr - tls_tpnt->l_tls_offset; break; #endif default: return -1; } #if defined (__SUPPORT_LD_DEBUG__) if (_dl_debug_reloc && _dl_debug_detail) _dl_dprintf(_dl_debug_file, "\n\tpatched: %x ==> %x @ %x\n", old_val, *reloc_addr, reloc_addr); #endif return 0; }
static int _dl_do_reloc (struct elf_resolve *tpnt,struct dyn_elf *scope, ELF_RELOC *rpnt, Elf32_Sym *symtab, char *strtab) { int reloc_type; int symtab_index; char *symname; unsigned long *reloc_addr; unsigned long symbol_addr; #if defined (__SUPPORT_LD_DEBUG__) unsigned long old_val; #endif reloc_addr = (unsigned long *)(intptr_t) (tpnt->loadaddr + (unsigned long) rpnt->r_offset); reloc_type = ELF32_R_TYPE(rpnt->r_info); symtab_index = ELF32_R_SYM(rpnt->r_info); symbol_addr = 0; symname = strtab + symtab[symtab_index].st_name; if (symtab_index) { symbol_addr = (unsigned long) _dl_find_hash(symname, scope, tpnt, elf_machine_type_class(reloc_type)); /* * We want to allow undefined references to weak symbols - this might * have been intentional. We should not be linking local symbols * here, so all bases should be covered. */ if (!symbol_addr && ELF32_ST_BIND(symtab[symtab_index].st_info) != STB_WEAK) { _dl_dprintf (2, "%s: can't resolve symbol '%s'\n", _dl_progname, strtab + symtab[symtab_index].st_name); _dl_exit (1); } } #if defined (__SUPPORT_LD_DEBUG__) old_val = *reloc_addr; #endif switch (reloc_type) { case R_SH_NONE: break; case R_SH_COPY: if (symbol_addr) { #if defined (__SUPPORT_LD_DEBUG__) if(_dl_debug_move) _dl_dprintf(_dl_debug_file,"\n%s move %x bytes from %x to %x", symname, symtab[symtab_index].st_size, symbol_addr, reloc_addr); #endif _dl_memcpy((char *) reloc_addr, (char *) symbol_addr, symtab[symtab_index].st_size); } return 0; /* no further LD_DEBUG messages for copy relocs */ case R_SH_DIR32: case R_SH_GLOB_DAT: case R_SH_JMP_SLOT: *reloc_addr = symbol_addr + rpnt->r_addend; break; case R_SH_REL32: *reloc_addr = symbol_addr + rpnt->r_addend - (unsigned long) reloc_addr; break; case R_SH_RELATIVE: *reloc_addr = (unsigned long) tpnt->loadaddr + rpnt->r_addend; break; default: return -1; /*call _dl_exit(1) */ } #if defined (__SUPPORT_LD_DEBUG__) if(_dl_debug_reloc && _dl_debug_detail) _dl_dprintf(_dl_debug_file, "\tpatched: %x ==> %x @ %x", old_val, *reloc_addr, reloc_addr); #endif return 0; }