Пример #1
0
/* view_size can be the size of the first mapping, to handle non-contiguous
 * modules -- we'll update the module's size here
 */
void
os_module_area_init(module_area_t *ma, app_pc base, size_t view_size, bool at_map,
                    const char *filepath, uint64 inode HEAPACCT(which_heap_t which))
{
    app_pc mod_base, mod_end;
    ptr_int_t load_delta;
    char *soname = NULL;
    ASSERT(module_is_header(base, view_size));

    /* i#1589: use privload data if it exists (for client lib) */
    if (!privload_fill_os_module_info(base, &mod_base, &mod_end, &soname, &ma->os_data)) {
        /* XXX i#1860: on Android we'll fail to fill in info from .dynamic, so
         * we'll have incomplete data until the loader maps the segment with .dynamic.
         * ma->os_data.have_dynamic_info indicates whether we have the info.
         */
        module_walk_program_headers(base, view_size, at_map,
                                    !at_map, /* i#1589: ld.so relocates .dynamic */
                                    &mod_base, NULL, &mod_end, &soname, &ma->os_data);
    }
    if (ma->os_data.contiguous) {
        app_pc map_end = ma->os_data.segments[ma->os_data.num_segments - 1].end;
        module_list_add_mapping(ma, base, map_end);
        /* update, since may just be 1st segment size */
        ma->end = map_end;
    } else {
        /* Add the non-contiguous segments (i#160/PR 562667).  We could just add
         * them all separately but vmvectors are more efficient with fewer
         * entries so we merge.  We don't want general merging in our vector
         * either.
         */
        app_pc seg_base;
        uint i;
        ASSERT(ma->os_data.num_segments > 0 && ma->os_data.segments != NULL);
        seg_base = ma->os_data.segments[0].start;
        for (i = 1; i < ma->os_data.num_segments; i++) {
            if (ma->os_data.segments[i].start > ma->os_data.segments[i - 1].end ||
                /* XXX: for shared we just add the first one.  But if the first
                 * module is unloaded we'll be missing an entry for the others.
                 * We assume this won't happen b/c our only use of this now is
                 * the MacOS dyld shared cache's shared __LINKEDIT segment.  If
                 * it could happen we should switch to a refcount in the vector.
                 */
                ma->os_data.segments[i - 1].shared) {
                if (!ma->os_data.segments[i - 1].shared ||
                    !vmvector_overlap(loaded_module_areas, seg_base,
                                      ma->os_data.segments[i - 1].end)) {
                    module_list_add_mapping(ma, seg_base,
                                            ma->os_data.segments[i - 1].end);
                }
                seg_base = ma->os_data.segments[i].start;
            }
        }
        if (!ma->os_data.segments[i - 1].shared ||
            !vmvector_overlap(loaded_module_areas, seg_base,
                              ma->os_data.segments[i - 1].end))
            module_list_add_mapping(ma, seg_base, ma->os_data.segments[i - 1].end);
        DOLOG(2, LOG_VMAREAS, {
            LOG(GLOBAL, LOG_INTERP | LOG_VMAREAS, 2, "segment list\n");
            for (i = 0; i < ma->os_data.num_segments; i++) {
                LOG(GLOBAL, LOG_INTERP | LOG_VMAREAS, 2,
                    "\tsegment %d: [" PFX "," PFX ") prot=%x\n", i,
                    ma->os_data.segments[i].start, ma->os_data.segments[i].end,
                    ma->os_data.segments[i].prot);
            }
        });
Пример #2
0
static int
dl_iterate_get_path_cb(struct dl_phdr_info *info, size_t size, void *data)
{
    dl_iterate_data_t *iter_data = (dl_iterate_data_t *)data;
    /* info->dlpi_addr is offset from preferred so we need to calculate the
     * absolute address of the base.
     * we can calculate the absolute address of the first segment, but ELF
     * doesn't seem to guarantee that either the elf header (base of
     * file) or the program headers (info->dlpi_phdr) are later than
     * the min_vaddr, so it's a little confusing as to what would be
     * in the maps file or whatever and would thus be the base we're looking
     * to match: for now we assume the page with min_vaddr is that base.
     * If elf header, program header, and 1st segment could all be on
     * separate pages, I don't see any way to find the elf header in
     * such cases short of walking backward and looking for the magic #s.
     */
    app_pc pref_start, pref_end;
    app_pc min_vaddr = module_vaddr_from_prog_header((app_pc)info->dlpi_phdr,
                                                     info->dlpi_phnum, NULL, NULL);
    app_pc base = info->dlpi_addr + min_vaddr;
    /* Note that dl_iterate_phdr doesn't give a name for the executable or
     * ld-linux.so presumably b/c those are mapped by the kernel so the
     * user-space loader doesn't need to know their file paths.
     */
    LOG(GLOBAL, LOG_VMAREAS, 2,
        "dl_iterate_get_path_cb: addr=" PFX " hdrs=" PFX " base=" PFX " name=%s\n",
        info->dlpi_addr, info->dlpi_phdr, base, info->dlpi_name);
    /* all we have is an addr somewhere in the module, so we need the end */
    if (module_walk_program_headers(base,
                                    /* FIXME: don't have view size: but
                                     * anything larger than header sizes works
                                     */
                                    PAGE_SIZE, false,
                                    true, /* i#1589: ld.so relocated .dynamic */
                                    &pref_start, NULL, &pref_end, NULL, NULL)) {
        /* we're passed back start,end of preferred base */
        if ((iter_data->target_addr != NULL && iter_data->target_addr >= base &&
             iter_data->target_addr < base + (pref_end - pref_start)) ||
            (iter_data->target_path != NULL &&
             /* if we're passed an ambiguous name, we return first hit.
              * if passed full path, should normally be what was used to
              * load, so should match.
              */
             strstr(info->dlpi_name, iter_data->target_path) != NULL)) {
            if (iter_data->path_size > 0) {
                /* We want just the path, not the filename */
                char *slash = strrchr(info->dlpi_name, '/');
                ASSERT_CURIOSITY(slash != NULL);
                ASSERT_CURIOSITY((slash - info->dlpi_name) < iter_data->path_size);
                strncpy(iter_data->path_out, info->dlpi_name,
                        MIN(iter_data->path_size, (slash - info->dlpi_name)));
                iter_data->path_out[iter_data->path_size] = '\0';
            }
            iter_data->mod_start = base;
            iter_data->mod_end = base + (pref_end - pref_start);
            return 1; /* done iterating */
        }
    } else {
        ASSERT_NOT_REACHED();
    }
    return 0; /* keep looking */
}
Пример #3
0
/* callback for dl_iterate_phdr() for adding existing modules to our lists */
static int
dl_iterate_get_areas_cb(struct dl_phdr_info *info, size_t size, void *data)
{
    int *count = (int *)data;
    uint i;
    /* see comments in dl_iterate_get_path_cb() */
    app_pc modend;
    app_pc min_vaddr = module_vaddr_from_prog_header((app_pc)info->dlpi_phdr,
                                                     info->dlpi_phnum, NULL, &modend);
    app_pc modbase = info->dlpi_addr + min_vaddr;
    size_t modsize = modend - min_vaddr;
    LOG(GLOBAL, LOG_VMAREAS, 2,
        "dl_iterate_get_areas_cb: addr=" PFX " hdrs=" PFX " base=" PFX " name=%s\n",
        info->dlpi_addr, info->dlpi_phdr, modbase, info->dlpi_name);
    ASSERT(info->dlpi_phnum == module_num_program_headers(modbase));

    ASSERT(count != NULL);
    if (*count == 0) {
        /* since we don't get a name for the executable, for now we
         * assume that the first iter is the executable itself.
         * XXX: this seems to hold, but there's no guarantee: can we do better?
         */
        executable_start = modbase;
    }

#ifndef X64
    if (modsize == PAGE_SIZE && info->dlpi_name[0] == '\0') {
        /* Candidate for VDSO.  Xref PR 289138 on using AT_SYSINFO to locate. */
        /* Xref VSYSCALL_PAGE_START_HARDCODED but later linuxes randomize */
        char *soname;
        if (module_walk_program_headers(modbase, modsize, false,
                                        true, /* i#1589: ld.so relocated .dynamic */
                                        NULL, NULL, NULL, &soname, NULL) &&
            strncmp(soname, VSYSCALL_PAGE_SO_NAME, strlen(VSYSCALL_PAGE_SO_NAME)) == 0) {
            ASSERT(!dynamo_initialized); /* .data should be +w */
            ASSERT(vsyscall_page_start == NULL);
            vsyscall_page_start = modbase;
            LOG(GLOBAL, LOG_VMAREAS, 1, "found vsyscall page @ " PFX "\n",
                vsyscall_page_start);
        }
    }
#endif
    if (modbase != vsyscall_page_start)
        module_list_add(modbase, modsize, false, info->dlpi_name, 0 /*don't have inode*/);

    for (i = 0; i < info->dlpi_phnum; i++) {
        app_pc start, end;
        uint prot;
        size_t align;
        if (module_read_program_header(modbase, i, &start, &end, &prot, &align)) {
            start += info->dlpi_addr;
            end += info->dlpi_addr;
            LOG(GLOBAL, LOG_VMAREAS, 2, "\tsegment %d: " PFX "-" PFX " %s align=%d\n", i,
                start, end, memprot_string(prot), align);
            start = (app_pc)ALIGN_BACKWARD(start, PAGE_SIZE);
            end = (app_pc)ALIGN_FORWARD(end, PAGE_SIZE);
            LOG(GLOBAL, LOG_VMAREAS, 4,
                "find_executable_vm_areas: adding: " PFX "-" PFX " prot=%d\n", start, end,
                prot);
            all_memory_areas_lock();
            update_all_memory_areas(start, end, prot, DR_MEMTYPE_IMAGE);
            all_memory_areas_unlock();
            if (app_memory_allocation(NULL, start, end - start, prot,
                                      true /*image*/
                                      _IF_DEBUG("ELF SO")))
                (*count)++;
        }
    }
    return 0; /* keep iterating */
}
Пример #4
0
/* See memquery.h for full interface specs, which are identical to
 * memquery_library_bounds().
 */
int
memquery_library_bounds_by_iterator(const char *name, app_pc *start/*IN/OUT*/,
                                    app_pc *end/*OUT*/,
                                    char *fullpath/*OPTIONAL OUT*/, size_t path_size)
{
    int count = 0;
    bool found_library = false;
    char libname[MAXIMUM_PATH];
    const char *name_cmp = name;
    memquery_iter_t iter;
    app_pc last_base = NULL;
    app_pc last_end = NULL;
    size_t image_size = 0;
    app_pc cur_end = NULL;
    app_pc mod_start = NULL;
    ASSERT(name != NULL || start != NULL);

    /* If name is non-NULL, start can be NULL, so we have to walk the whole
     * address space even when we have syscalls for memquery (e.g., on Mac).
     * Even if start is non-NULL, it could be in the middle of the library.
     */
    memquery_iterator_start(&iter, NULL,
                            /* We're never called from a fragile place like a
                             * signal handler, so as long as it's not real early
                             * it's ok to alloc.
                             */
                            dynamo_heap_initialized);
    libname[0] = '\0';
    while (memquery_iterator_next(&iter)) {
        LOG(GLOBAL, LOG_VMAREAS, 5, "start="PFX" end="PFX" prot=%x comment=%s\n",
            iter.vm_start, iter.vm_end, iter.prot, iter.comment);

        /* Record the base of each differently-named set of entries up until
         * we find our target, when we'll clobber libpath
         */
        if (!found_library &&
            strncmp(libname, iter.comment, BUFFER_SIZE_ELEMENTS(libname)) != 0) {
            last_base = iter.vm_start;
            /* last_end is used to know what's readable beyond last_base */
            if (TEST(MEMPROT_READ, iter.prot))
                last_end = iter.vm_end;
            else
                last_end = last_base;
            /* remember name so we can find the base of a multiply-mapped so */
            strncpy(libname, iter.comment, BUFFER_SIZE_ELEMENTS(libname));
            NULL_TERMINATE_BUFFER(libname);
        }

        if ((name_cmp != NULL &&
             (strstr(iter.comment, name_cmp) != NULL ||
              /* For Linux, include mid-library (non-.bss) anonymous mappings.
               * Our private loader
               * fills mapping holes with anonymous memory instead of a
               * MEMPROT_NONE mapping from the original file.
               * For Mac, this includes mid-library .bss.
               */
              (found_library && iter.comment[0] == '\0' && image_size != 0 &&
               iter.vm_end - mod_start < image_size))) ||
            (name == NULL && *start >= iter.vm_start && *start < iter.vm_end)) {
            if (!found_library) {
                size_t mod_readable_sz;
                char *dst = (fullpath != NULL) ? fullpath : libname;
                size_t dstsz = (fullpath != NULL) ? path_size :
                    BUFFER_SIZE_ELEMENTS(libname);
                char *slash = strrchr(iter.comment, '/');
                ASSERT_CURIOSITY(slash != NULL);
                ASSERT_CURIOSITY((slash - iter.comment) < dstsz);
                /* we keep the last '/' at end */
                ++slash;
                strncpy(dst, iter.comment, MIN(dstsz, (slash - iter.comment)));
                /* if max no null */
                dst[dstsz - 1] = '\0';
                if (name == NULL)
                    name_cmp = dst;
                found_library = true;
                /* Most library have multiple segments, and some have the
                 * ELF header repeated in a later mapping, so we can't rely
                 * on is_elf_so_header() and header walking.
                 * We use the name tracking to remember the first entry
                 * that had this name.
                 */
                if (last_base == NULL) {
                    mod_start = iter.vm_start;
                    mod_readable_sz = iter.vm_end - iter.vm_start;
                } else {
                    mod_start = last_base;
                    mod_readable_sz = last_end - last_base;
                }
                if (module_is_header(mod_start, mod_readable_sz)) {
                    app_pc mod_base, mod_end;
                    if (module_walk_program_headers(mod_start, mod_readable_sz, false,
                                                    &mod_base, NULL, &mod_end, NULL,
                                                    NULL)) {
                        image_size = mod_end - mod_base;
                        LOG(GLOBAL, LOG_VMAREAS, 4, "%s: image size is "PIFX"\n",
                            __FUNCTION__, image_size);
                        ASSERT_CURIOSITY(image_size != 0);
                    } else {
                        ASSERT_NOT_REACHED();
                    }
                } else {
                    ASSERT(false && "expected elf header");
                }
            }
            count++;
            cur_end = iter.vm_end;
        } else if (found_library) {
            /* hit non-matching, we expect module segments to be adjacent */
            break;
        }
    }

    /* Xref PR 208443: .bss sections are anonymous (no file name listed in
     * maps file), but not every library has one.  We have to parse the ELF
     * header to know since we can't assume that a subsequent anonymous
     * region is .bss. */
    if (image_size != 0 && cur_end - mod_start < image_size) {
        /* Found a .bss section. Check current mapping (note might only be
         * part of the mapping (due to os region merging? FIXME investigate). */
        ASSERT_CURIOSITY(iter.vm_start == cur_end /* no gaps, FIXME might there be
                                                   * a gap if the file has large
                                                   * alignment and no data section?
                                                   * curiosity for now*/);
        ASSERT_CURIOSITY(iter.inode == 0); /* .bss is anonymous */
        ASSERT_CURIOSITY(iter.vm_end - mod_start >= image_size);/* should be big enough */
        count++;
        cur_end = mod_start + image_size;
    } else {
        /* Shouldn't have more mapped then the size of the module, unless it's a
         * second adjacent separate map of the same file.  Curiosity for now. */
        ASSERT_CURIOSITY(image_size == 0 || cur_end - mod_start == image_size);
    }
    memquery_iterator_stop(&iter);

    if (start != NULL)
        *start = mod_start;
    if (end != NULL)
        *end = cur_end;
    return count;
}