/* * Open a directory. */ DIR * _dl_opendir(const char *name) { DIR *dirp; int fd; struct stat sb; if ((fd = _dl_open(name, O_RDONLY | O_NONBLOCK)) < 0) return (NULL); if (_dl_fstat(fd, &sb) || !S_ISDIR(sb.st_mode)) { _dl_close(fd); return (NULL); } if (_dl_fcntl(fd, F_SETFD, FD_CLOEXEC) < 0 || (dirp = (DIR *)_dl_malloc(sizeof(DIR))) == NULL) { _dl_close(fd); return (NULL); } dirp->dd_len = _dl_round_page(sb.st_blksize); dirp->dd_buf = _dl_malloc(dirp->dd_len); if (dirp->dd_buf == NULL) { _dl_free(dirp); _dl_close (fd); return (NULL); } dirp->dd_seek = 0; dirp->dd_loc = 0; dirp->dd_fd = fd; return (dirp); }
/* * close a directory. */ int _dl_closedir(DIR *dirp) { int fd; int ret; fd = dirp->dd_fd; dirp->dd_fd = -1; dirp->dd_loc = 0; _dl_free((void *)dirp->dd_buf); _dl_free((void *)dirp); ret = _dl_close(fd); return (ret); }
int _dl_map_cache(void) { int fd; struct stat st; header_t *header; libentry_t *libent; int i, strtabsize; if (_dl_cache_addr == MAP_FAILED) return -1; else if (_dl_cache_addr != NULL) return 0; if (_dl_stat(LDSO_CACHE, &st) || (fd = _dl_open(LDSO_CACHE, O_RDONLY|O_CLOEXEC, 0)) < 0) { _dl_cache_addr = MAP_FAILED; /* so we won't try again */ return -1; } _dl_cache_size = st.st_size; _dl_cache_addr = _dl_mmap(0, _dl_cache_size, PROT_READ, LDSO_CACHE_MMAP_FLAGS, fd, 0); _dl_close(fd); if (_dl_mmap_check_error(_dl_cache_addr)) { _dl_dprintf(2, "%s:%i: can't map '%s'\n", _dl_progname, __LINE__, LDSO_CACHE); return -1; } header = (header_t *) _dl_cache_addr; if (_dl_cache_size < sizeof(header_t) || _dl_memcmp(header->magic, LDSO_CACHE_MAGIC, LDSO_CACHE_MAGIC_LEN) || _dl_memcmp(header->version, LDSO_CACHE_VER, LDSO_CACHE_VER_LEN) || _dl_cache_size < (sizeof(header_t) + header->nlibs * sizeof(libentry_t)) || _dl_cache_addr[_dl_cache_size - 1] != '\0') { _dl_dprintf(2, "%s: cache '%s' is corrupt\n", _dl_progname, LDSO_CACHE); goto fail; } strtabsize = _dl_cache_size - sizeof(header_t) - header->nlibs * sizeof(libentry_t); libent = (libentry_t *) & header[1]; for (i = 0; i < header->nlibs; i++) { if (libent[i].sooffset >= strtabsize || libent[i].liboffset >= strtabsize) { _dl_dprintf(2, "%s: cache '%s' is corrupt\n", _dl_progname, LDSO_CACHE); goto fail; } } return 0; fail: _dl_munmap(_dl_cache_addr, _dl_cache_size); _dl_cache_addr = MAP_FAILED; return -1; }
int __libc_dlclose (void *map) { _dl_close (map); return 0; }
elf_object_t * _dl_tryload_shlib(const char *libname, int type, int flags) { int libfile, i; struct load_list *next_load, *load_list = NULL; Elf_Addr maxva = 0, minva = ELFDEFNNAME(NO_ADDR); Elf_Addr libaddr, loff, align = _dl_pagesz - 1; elf_object_t *object; char hbuf[4096]; Elf_Dyn *dynp = 0; Elf_Ehdr *ehdr; Elf_Phdr *phdp; struct stat sb; void *prebind_data; #define ROUND_PG(x) (((x) + align) & ~(align)) #define TRUNC_PG(x) ((x) & ~(align)) libfile = _dl_open(libname, O_RDONLY); if (libfile < 0) { _dl_errno = DL_CANT_OPEN; return(0); } if ( _dl_fstat(libfile, &sb) < 0) { _dl_errno = DL_CANT_OPEN; return(0); } for (object = _dl_objects; object != NULL; object = object->next) { if (object->dev == sb.st_dev && object->inode == sb.st_ino) { object->obj_flags |= flags & DF_1_GLOBAL; _dl_close(libfile); if (_dl_loading_object == NULL) _dl_loading_object = object; if (object->load_object != _dl_objects && object->load_object != _dl_loading_object) { _dl_link_grpref(object->load_object, _dl_loading_object); } return(object); } } _dl_read(libfile, hbuf, sizeof(hbuf)); ehdr = (Elf_Ehdr *)hbuf; if (ehdr->e_ident[0] != ELFMAG0 || ehdr->e_ident[1] != ELFMAG1 || ehdr->e_ident[2] != ELFMAG2 || ehdr->e_ident[3] != ELFMAG3 || ehdr->e_type != ET_DYN || ehdr->e_machine != MACHID) { _dl_close(libfile); _dl_errno = DL_NOT_ELF; return(0); } /* * Alright, we might have a winner! * Figure out how much VM space we need. */ phdp = (Elf_Phdr *)(hbuf + ehdr->e_phoff); for (i = 0; i < ehdr->e_phnum; i++, phdp++) { switch (phdp->p_type) { case PT_LOAD: if (phdp->p_vaddr < minva) minva = phdp->p_vaddr; if (phdp->p_vaddr + phdp->p_memsz > maxva) maxva = phdp->p_vaddr + phdp->p_memsz; break; case PT_DYNAMIC: dynp = (Elf_Dyn *)phdp->p_vaddr; break; case PT_TLS: _dl_printf("%s: unsupported TLS program header in %s\n", _dl_progname, libname); _dl_close(libfile); _dl_errno = DL_CANT_LOAD_OBJ; return(0); default: break; } } minva = TRUNC_PG(minva); maxva = ROUND_PG(maxva); /* * We map the entire area to see that we can get the VM * space required. Map it unaccessible to start with. * * We must map the file we'll map later otherwise the VM * system won't be able to align the mapping properly * on VAC architectures. */ libaddr = (Elf_Addr)_dl_mmap(0, maxva - minva, PROT_NONE, MAP_PRIVATE|MAP_FILE, libfile, 0); if (_dl_mmap_error(libaddr)) { _dl_printf("%s: rtld mmap failed mapping %s.\n", _dl_progname, libname); _dl_close(libfile); _dl_errno = DL_CANT_MMAP; return(0); } loff = libaddr - minva; phdp = (Elf_Phdr *)(hbuf + ehdr->e_phoff); for (i = 0; i < ehdr->e_phnum; i++, phdp++) { switch (phdp->p_type) { case PT_LOAD: { char *start = (char *)(TRUNC_PG(phdp->p_vaddr)) + loff; Elf_Addr off = (phdp->p_vaddr & align); Elf_Addr size = off + phdp->p_filesz; void *res; if (size != 0) { res = _dl_mmap(start, ROUND_PG(size), PFLAGS(phdp->p_flags), MAP_FIXED|MAP_PRIVATE, libfile, TRUNC_PG(phdp->p_offset)); } else res = NULL; /* silence gcc */ next_load = _dl_malloc(sizeof(struct load_list)); next_load->next = load_list; load_list = next_load; next_load->start = start; next_load->size = size; next_load->prot = PFLAGS(phdp->p_flags); if (size != 0 && _dl_mmap_error(res)) { _dl_printf("%s: rtld mmap failed mapping %s.\n", _dl_progname, libname); _dl_close(libfile); _dl_errno = DL_CANT_MMAP; _dl_munmap((void *)libaddr, maxva - minva); _dl_load_list_free(load_list); return(0); } if (phdp->p_flags & PF_W) { /* Zero out everything past the EOF */ if ((size & align) != 0) _dl_memset(start + size, 0, _dl_pagesz - (size & align)); if (ROUND_PG(size) == ROUND_PG(off + phdp->p_memsz)) continue; start = start + ROUND_PG(size); size = ROUND_PG(off + phdp->p_memsz) - ROUND_PG(size); res = _dl_mmap(start, size, PFLAGS(phdp->p_flags), MAP_FIXED|MAP_PRIVATE|MAP_ANON, -1, 0); if (_dl_mmap_error(res)) { _dl_printf("%s: rtld mmap failed mapping %s.\n", _dl_progname, libname); _dl_close(libfile); _dl_errno = DL_CANT_MMAP; _dl_munmap((void *)libaddr, maxva - minva); _dl_load_list_free(load_list); return(0); } } break; } case PT_OPENBSD_RANDOMIZE: _dl_randombuf((char *)(phdp->p_vaddr + loff), phdp->p_memsz); break; default: break; } } prebind_data = prebind_load_fd(libfile, libname); _dl_close(libfile); dynp = (Elf_Dyn *)((unsigned long)dynp + loff); object = _dl_finalize_object(libname, dynp, (Elf_Phdr *)((char *)libaddr + ehdr->e_phoff), ehdr->e_phnum,type, libaddr, loff); if (object) { object->prebind_data = prebind_data; object->load_size = maxva - minva; /*XXX*/ object->load_list = load_list; /* set inode, dev from stat info */ object->dev = sb.st_dev; object->inode = sb.st_ino; object->obj_flags |= flags; _dl_set_sod(object->load_name, &object->sod); } else { _dl_munmap((void *)libaddr, maxva - minva); _dl_load_list_free(load_list); } return(object); }
elf_object_t * _dl_tryload_shlib(const char *libname, int type, int flags) { int libfile, i; struct load_list *ld, *lowld = NULL; elf_object_t *object; Elf_Dyn *dynp = 0; Elf_Ehdr *ehdr; Elf_Phdr *phdp; Elf_Addr load_end = 0; Elf_Addr align = _dl_pagesz - 1, off, size; struct stat sb; void *prebind_data; char hbuf[4096]; #define ROUND_PG(x) (((x) + align) & ~(align)) #define TRUNC_PG(x) ((x) & ~(align)) libfile = _dl_open(libname, O_RDONLY); if (libfile < 0) { _dl_errno = DL_CANT_OPEN; return(0); } if ( _dl_fstat(libfile, &sb) < 0) { _dl_errno = DL_CANT_OPEN; return(0); } for (object = _dl_objects; object != NULL; object = object->next) { if (object->dev == sb.st_dev && object->inode == sb.st_ino) { object->obj_flags |= flags & RTLD_GLOBAL; _dl_close(libfile); if (_dl_loading_object == NULL) _dl_loading_object = object; if (object->load_object != _dl_objects && object->load_object != _dl_loading_object) { _dl_link_grpref(object->load_object, _dl_loading_object); } return(object); } } _dl_read(libfile, hbuf, sizeof(hbuf)); ehdr = (Elf_Ehdr *)hbuf; if (ehdr->e_ident[0] != ELFMAG0 || ehdr->e_ident[1] != ELFMAG1 || ehdr->e_ident[2] != ELFMAG2 || ehdr->e_ident[3] != ELFMAG3 || ehdr->e_type != ET_DYN || ehdr->e_machine != MACHID) { _dl_close(libfile); _dl_errno = DL_NOT_ELF; return(0); } /* Insertion sort */ #define LDLIST_INSERT(ld) do { \ struct load_list **_ld; \ for (_ld = &lowld; *_ld != NULL; _ld = &(*_ld)->next) \ if ((*_ld)->moff > ld->moff) \ break; \ ld->next = *_ld; \ *_ld = ld; \ } while (0) /* * Alright, we might have a winner! * Figure out how much VM space we need and set up the load * list that we'll use to find free VM space. */ phdp = (Elf_Phdr *)(hbuf + ehdr->e_phoff); for (i = 0; i < ehdr->e_phnum; i++, phdp++) { switch (phdp->p_type) { case PT_LOAD: off = (phdp->p_vaddr & align); size = off + phdp->p_filesz; ld = _dl_malloc(sizeof(struct load_list)); ld->start = NULL; ld->size = size; ld->moff = TRUNC_PG(phdp->p_vaddr); ld->foff = TRUNC_PG(phdp->p_offset); ld->prot = PFLAGS(phdp->p_flags); LDLIST_INSERT(ld); if ((ld->prot & PROT_WRITE) == 0 || ROUND_PG(size) == ROUND_PG(off + phdp->p_memsz)) break; /* This phdr has a zfod section */ ld = _dl_malloc(sizeof(struct load_list)); ld->start = NULL; ld->size = ROUND_PG(off + phdp->p_memsz) - ROUND_PG(size); ld->moff = TRUNC_PG(phdp->p_vaddr) + ROUND_PG(size); ld->foff = -1; ld->prot = PFLAGS(phdp->p_flags); LDLIST_INSERT(ld); break; case PT_DYNAMIC: dynp = (Elf_Dyn *)phdp->p_vaddr; break; default: break; } } #define LOFF ((Elf_Addr)lowld->start - lowld->moff) retry: for (ld = lowld; ld != NULL; ld = ld->next) { off_t foff; int fd, flags; /* * We don't want to provide the fd/off hint for anything * but the first mapping, all other might have * cache-incoherent aliases and will cause this code to * loop forever. */ if (ld == lowld) { fd = libfile; foff = ld->foff; flags = 0; } else { fd = -1; foff = 0; flags = MAP_FIXED; } ld->start = (void *)(LOFF + ld->moff); /* * Magic here. * The first mquery is done with MAP_FIXED to see if * the mapping we want is free. If it's not, we redo the * mquery without MAP_FIXED to get the next free mapping, * adjust the base mapping address to match this free mapping * and restart the process again. */ ld->start = _dl_mquery(ld->start, ROUND_PG(ld->size), ld->prot, flags, fd, foff); if (_dl_mmap_error(ld->start)) { ld->start = (void *)(LOFF + ld->moff); ld->start = _dl_mquery(ld->start, ROUND_PG(ld->size), ld->prot, flags & ~MAP_FIXED, fd, foff); if (_dl_mmap_error(ld->start)) goto fail; } if (ld->start != (void *)(LOFF + ld->moff)) { lowld->start = ld->start - ld->moff + lowld->moff; goto retry; } /* * XXX - we need some kind of boundary condition here, * or fix mquery to not run into the stack */ } for (ld = lowld; ld != NULL; ld = ld->next) { int fd, flags; off_t foff; void *res; if (ld->foff < 0) { fd = -1; foff = 0; flags = MAP_FIXED|MAP_PRIVATE|MAP_ANON; } else { fd = libfile; foff = ld->foff; flags = MAP_FIXED|MAP_PRIVATE; } res = _dl_mmap(ld->start, ROUND_PG(ld->size), ld->prot, flags, fd, foff); if (_dl_mmap_error(res)) goto fail; /* Zero out everything past the EOF */ if ((ld->prot & PROT_WRITE) != 0 && (ld->size & align) != 0) _dl_memset((char *)ld->start + ld->size, 0, _dl_pagesz - (ld->size & align)); load_end = (Elf_Addr)ld->start + ROUND_PG(ld->size); } prebind_data = prebind_load_fd(libfile, libname); _dl_close(libfile); dynp = (Elf_Dyn *)((unsigned long)dynp + LOFF); object = _dl_finalize_object(libname, dynp, (Elf_Phdr *)((char *)lowld->start + ehdr->e_phoff), ehdr->e_phnum, type, (Elf_Addr)lowld->start, LOFF); if (object) { object->prebind_data = prebind_data; object->load_size = (Elf_Addr)load_end - (Elf_Addr)lowld->start; object->load_list = lowld; /* set inode, dev from stat info */ object->dev = sb.st_dev; object->inode = sb.st_ino; object->obj_flags |= flags; _dl_build_sod(object->load_name, &object->sod); } else { /* XXX no point. object is never returned NULL */ _dl_load_list_free(lowld); } return(object); fail: _dl_printf("%s: rtld mmap failed mapping %s.\n", _dl_progname, libname); _dl_close(libfile); _dl_errno = DL_CANT_MMAP; _dl_load_list_free(lowld); return(0); }