/* * Process a shared object's program header. This is used only for the * main program, when the kernel has already loaded the main program * into memory before calling the dynamic linker. It creates and * returns an Obj_Entry structure. */ Obj_Entry * _rtld_digest_phdr(const Elf_Phdr *phdr, int phnum, caddr_t entry) { Obj_Entry *obj; const Elf_Phdr *phlimit = phdr + phnum; const Elf_Phdr *ph; int nsegs = 0; Elf_Addr vaddr; obj = _rtld_obj_new(); for (ph = phdr; ph < phlimit; ++ph) { if (ph->p_type != PT_PHDR) continue; obj->phdr = (void *)(uintptr_t)phdr->p_vaddr; obj->phsize = phdr->p_memsz; obj->relocbase = (caddr_t)((uintptr_t)phdr - (uintptr_t)ph->p_vaddr); dbg(("headers: phdr %p phsize %zu relocbase %lx", obj->phdr, obj->phsize, (long)obj->relocbase)); break; } assert(obj->phdr == phdr); for (ph = phdr; ph < phlimit; ++ph) { vaddr = (Elf_Addr)(uintptr_t)(obj->relocbase + ph->p_vaddr); switch (ph->p_type) { case PT_INTERP: obj->interp = (const char *)(uintptr_t)vaddr; break; case PT_LOAD: assert(nsegs < 2); if (nsegs == 0) { /* First load segment */ obj->vaddrbase = round_down(vaddr); obj->mapbase = (caddr_t)(uintptr_t)obj->vaddrbase; obj->textsize = round_up(vaddr + ph->p_memsz) - obj->vaddrbase; } else { /* Last load segment */ obj->mapsize = round_up(vaddr + ph->p_memsz) - obj->vaddrbase; } ++nsegs; break; case PT_DYNAMIC: obj->dynamic = (Elf_Dyn *)(uintptr_t)vaddr; break; } } assert(nsegs == 2); obj->entry = entry; return obj; }
/* * Map a shared object into memory. The argument is a file descriptor, * which must be open on the object and positioned at its beginning. * * The return value is a pointer to a newly-allocated Obj_Entry structure * for the shared object. Returns NULL on failure. */ Obj_Entry * _rtld_map_object(const char *path, int fd, const struct stat *sb) { Obj_Entry *obj; Elf_Ehdr *ehdr; Elf_Phdr *phdr; size_t phsize; Elf_Phdr *phlimit; Elf_Phdr *segs[2]; int nsegs; caddr_t mapbase = MAP_FAILED; size_t mapsize = 0; size_t bsssize = 0; int mapflags; Elf_Off base_offset; #ifdef MAP_ALIGNED Elf_Addr base_alignment; #endif Elf_Addr base_vaddr; Elf_Addr base_vlimit; Elf_Addr text_vlimit; int text_flags; caddr_t base_addr; Elf_Off data_offset; Elf_Addr data_vaddr; Elf_Addr data_vlimit; int data_flags; caddr_t data_addr; Elf_Addr phdr_vaddr; size_t phdr_memsz; caddr_t gap_addr; size_t gap_size; int i; #ifdef RTLD_LOADER Elf_Addr clear_vaddr; caddr_t clear_addr; size_t nclear; #endif if (sb != NULL && sb->st_size < (off_t)sizeof (Elf_Ehdr)) { _rtld_error("%s: unrecognized file format1", path); return NULL; } obj = _rtld_obj_new(); obj->path = xstrdup(path); obj->pathlen = strlen(path); if (sb != NULL) { obj->dev = sb->st_dev; obj->ino = sb->st_ino; } ehdr = mmap(NULL, _rtld_pagesz, PROT_READ, MAP_FILE | MAP_SHARED, fd, (off_t)0); obj->ehdr = ehdr; if (ehdr == MAP_FAILED) { _rtld_error("%s: read error: %s", path, xstrerror(errno)); goto bad; } /* Make sure the file is valid */ if (memcmp(ELFMAG, ehdr->e_ident, SELFMAG) != 0 || ehdr->e_ident[EI_CLASS] != ELFCLASS) { _rtld_error("%s: unrecognized file format2 [%x != %x]", path, ehdr->e_ident[EI_CLASS], ELFCLASS); goto bad; } /* Elf_e_ident includes class */ if (ehdr->e_ident[EI_VERSION] != EV_CURRENT || ehdr->e_version != EV_CURRENT || ehdr->e_ident[EI_DATA] != ELFDEFNNAME(MACHDEP_ENDIANNESS)) { _rtld_error("%s: unsupported file version", path); goto bad; } if (ehdr->e_type != ET_EXEC && ehdr->e_type != ET_DYN) { _rtld_error("%s: unsupported file type", path); goto bad; } switch (ehdr->e_machine) { ELFDEFNNAME(MACHDEP_ID_CASES) default: _rtld_error("%s: unsupported machine", path); goto bad; } /* * We rely on the program header being in the first page. This is * not strictly required by the ABI specification, but it seems to * always true in practice. And, it simplifies things considerably. */ assert(ehdr->e_phentsize == sizeof(Elf_Phdr)); assert(ehdr->e_phoff + ehdr->e_phnum * sizeof(Elf_Phdr) <= _rtld_pagesz); /* * Scan the program header entries, and save key information. * * We rely on there being exactly two load segments, text and data, * in that order. */ phdr = (Elf_Phdr *) ((caddr_t)ehdr + ehdr->e_phoff); phsize = ehdr->e_phnum * sizeof(phdr[0]); obj->phdr = NULL; phdr_vaddr = EA_UNDEF; phdr_memsz = 0; phlimit = phdr + ehdr->e_phnum; nsegs = 0; while (phdr < phlimit) { switch (phdr->p_type) { case PT_INTERP: obj->interp = (void *)(uintptr_t)phdr->p_vaddr; dbg(("%s: PT_INTERP %p", obj->path, obj->interp)); break; case PT_LOAD: if (nsegs < 2) segs[nsegs] = phdr; ++nsegs; dbg(("%s: PT_LOAD %p", obj->path, phdr)); break; case PT_PHDR: phdr_vaddr = phdr->p_vaddr; phdr_memsz = phdr->p_memsz; dbg(("%s: PT_PHDR %p phsize %zu", obj->path, (void *)(uintptr_t)phdr_vaddr, phdr_memsz)); break; case PT_DYNAMIC: obj->dynamic = (void *)(uintptr_t)phdr->p_vaddr; dbg(("%s: PT_DYNAMIC %p", obj->path, obj->dynamic)); break; } ++phdr; } phdr = (Elf_Phdr *) ((caddr_t)ehdr + ehdr->e_phoff); obj->entry = (void *)(uintptr_t)ehdr->e_entry; if (!obj->dynamic) { _rtld_error("%s: not dynamically linked", path); goto bad; } if (nsegs != 2) { _rtld_error("%s: wrong number of segments (%d != 2)", path, nsegs); goto bad; } /* * Map the entire address space of the object as a file * region to stake out our contiguous region and establish a * base for relocation. We use a file mapping so that * the kernel will give us whatever alignment is appropriate * for the platform we're running on. * * We map it using the text protection, map the data segment * into the right place, then map an anon segment for the bss * and unmap the gaps left by padding to alignment. */ #ifdef MAP_ALIGNED base_alignment = segs[0]->p_align; #endif base_offset = round_down(segs[0]->p_offset); base_vaddr = round_down(segs[0]->p_vaddr); base_vlimit = round_up(segs[1]->p_vaddr + segs[1]->p_memsz); text_vlimit = round_up(segs[0]->p_vaddr + segs[0]->p_memsz); text_flags = protflags(segs[0]->p_flags); data_offset = round_down(segs[1]->p_offset); data_vaddr = round_down(segs[1]->p_vaddr); data_vlimit = round_up(segs[1]->p_vaddr + segs[1]->p_filesz); data_flags = protflags(segs[1]->p_flags); #ifdef RTLD_LOADER clear_vaddr = segs[1]->p_vaddr + segs[1]->p_filesz; #endif obj->textsize = text_vlimit - base_vaddr; obj->vaddrbase = base_vaddr; obj->isdynamic = ehdr->e_type == ET_DYN; obj->phdr_loaded = false; for (i = 0; i < nsegs; i++) { if (phdr_vaddr != EA_UNDEF && segs[i]->p_vaddr <= phdr_vaddr && segs[i]->p_memsz >= phdr_memsz) { obj->phdr_loaded = true; break; } if (segs[i]->p_offset <= ehdr->e_phoff && segs[i]->p_memsz >= phsize) { phdr_vaddr = segs[i]->p_vaddr + ehdr->e_phoff; phdr_memsz = phsize; obj->phdr_loaded = true; break; } } if (obj->phdr_loaded) { obj->phdr = (void *)(uintptr_t)phdr_vaddr; obj->phsize = phdr_memsz; } else { Elf_Phdr *buf; buf = xmalloc(phsize); if (buf == NULL) { _rtld_error("%s: cannot allocate program header", path); goto bad; } memcpy(buf, phdr, phsize); obj->phdr = buf; obj->phsize = phsize; } dbg(("%s: phdr %p phsize %zu (%s)", obj->path, obj->phdr, obj->phsize, obj->phdr_loaded ? "loaded" : "allocated")); /* Unmap header if it overlaps the first load section. */ if (base_offset < _rtld_pagesz) { munmap(ehdr, _rtld_pagesz); obj->ehdr = MAP_FAILED; } /* * Calculate log2 of the base section alignment. */ mapflags = 0; #ifdef MAP_ALIGNED if (base_alignment > _rtld_pagesz) { unsigned int log2 = 0; for (; base_alignment > 1; base_alignment >>= 1) log2++; mapflags = MAP_ALIGNED(log2); }
/* * Process a shared object's program header. This is used only for the * main program, when the kernel has already loaded the main program * into memory before calling the dynamic linker. It creates and * returns an Obj_Entry structure. */ Obj_Entry * _rtld_digest_phdr(const Elf_Phdr *phdr, int phnum, caddr_t entry) { Obj_Entry *obj; const Elf_Phdr *phlimit = phdr + phnum; const Elf_Phdr *ph; int nsegs = 0; Elf_Addr vaddr; obj = _rtld_obj_new(); for (ph = phdr; ph < phlimit; ++ph) { if (ph->p_type != PT_PHDR) continue; obj->phdr = (void *)(uintptr_t)ph->p_vaddr; obj->phsize = ph->p_memsz; obj->relocbase = (caddr_t)((uintptr_t)phdr - (uintptr_t)ph->p_vaddr); dbg(("headers: phdr %p (%p) phsize %zu relocbase %p", obj->phdr, phdr, obj->phsize, obj->relocbase)); break; } for (ph = phdr; ph < phlimit; ++ph) { vaddr = (Elf_Addr)(uintptr_t)(obj->relocbase + ph->p_vaddr); switch (ph->p_type) { case PT_INTERP: obj->interp = (const char *)(uintptr_t)vaddr; dbg(("headers: %s %p phsize %" PRImemsz, "PT_INTERP", (void *)(uintptr_t)vaddr, ph->p_memsz)); break; case PT_LOAD: assert(nsegs < 2); if (nsegs == 0) { /* First load segment */ obj->vaddrbase = round_down(vaddr); obj->mapbase = (caddr_t)(uintptr_t)obj->vaddrbase; obj->textsize = round_up(vaddr + ph->p_memsz) - obj->vaddrbase; } else { /* Last load segment */ obj->mapsize = round_up(vaddr + ph->p_memsz) - obj->vaddrbase; } ++nsegs; dbg(("headers: %s %p phsize %" PRImemsz, "PT_LOAD", (void *)(uintptr_t)vaddr, ph->p_memsz)); break; case PT_DYNAMIC: obj->dynamic = (Elf_Dyn *)(uintptr_t)vaddr; dbg(("headers: %s %p phsize %" PRImemsz, "PT_DYNAMIC", (void *)(uintptr_t)vaddr, ph->p_memsz)); break; #if defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II) case PT_TLS: obj->tlsindex = 1; obj->tlssize = ph->p_memsz; obj->tlsalign = ph->p_align; obj->tlsinitsize = ph->p_filesz; obj->tlsinit = (void *)(uintptr_t)ph->p_vaddr; dbg(("headers: %s %p phsize %" PRImemsz, "PT_TLS", (void *)(uintptr_t)vaddr, ph->p_memsz)); break; #endif #ifdef __ARM_EABI__ case PT_ARM_EXIDX: obj->exidx_start = (void *)(uintptr_t)vaddr; obj->exidx_sz = ph->p_memsz; dbg(("headers: %s %p phsize %" PRImemsz, "PT_ARM_EXIDX", (void *)(uintptr_t)vaddr, ph->p_memsz)); break; #endif } } assert(nsegs == 2); obj->entry = entry; return obj; }