Ejemplo n.º 1
0
bool
memquery_from_os(const byte *pc, OUT dr_mem_info_t *info, OUT bool *have_type)
{
    memquery_iter_t iter;
    app_pc last_end = NULL;
    app_pc next_start = (app_pc) POINTER_MAX;
    bool found = false;
    ASSERT(info != NULL);
    memquery_iterator_start(&iter, (app_pc) pc, false/*won't alloc*/);
    while (memquery_iterator_next(&iter)) {
        if (pc >= iter.vm_start && pc < iter.vm_end) {
            info->base_pc = iter.vm_start;
            info->size = (iter.vm_end - iter.vm_start);
            info->prot = iter.prot;
            /* On early (pre-Fedora 2) kernels the vsyscall page is listed
             * with no permissions at all in the maps file.  Here's RHEL4:
             *   ffffe000-fffff000 ---p 00000000 00:00 0
             * We return "rx" as the permissions in that case.
             */
            if (vsyscall_page_start != NULL &&
                pc >= vsyscall_page_start && pc < vsyscall_page_start+PAGE_SIZE) {
                /* i#1583: recent kernels have 2-page vdso, which can be split,
                 * but we don't expect to come here b/c they won't have zero
                 * permissions.
                 */
                ASSERT(iter.vm_start == vsyscall_page_start);
                ASSERT(iter.vm_end - iter.vm_start == PAGE_SIZE ||
                       /* i386 Ubuntu 14.04:
                        * 0xb77bc000-0xb77be000   0x2000    0x0 [vvar]
                        * 0xb77be000-0xb77c0000   0x2000    0x0 [vdso]
                        */
                       iter.vm_end - iter.vm_start == 2*PAGE_SIZE);
                info->prot = (MEMPROT_READ|MEMPROT_EXEC|MEMPROT_VDSO);
            } else if (strcmp(iter.comment, "[vvar]") == 0) {
                /* The VVAR pages were added in kernel 3.0 but not labeled until
                 * 3.15.  We document that we do not label prior to 3.15.
                 * DrMem#1778 seems to only happen on 3.19+ in any case.
                 */
                info->prot |= MEMPROT_VDSO;
            }
            found = true;
            break;
        } else if (pc < iter.vm_start) {
            next_start = iter.vm_start;
            break;
        }
        last_end = iter.vm_end;
    }
    memquery_iterator_stop(&iter);
    if (!found) {
        info->base_pc = last_end;
        info->size = (next_start - last_end);
        info->prot = MEMPROT_NONE;
        info->type = DR_MEMTYPE_FREE;
        *have_type = true;
    }
    return true;
}
Ejemplo n.º 2
0
/* See memquery.h for full interface specs, which are identical to
 * memquery_library_bounds().
 */
int
memquery_library_bounds_by_iterator(const char *name, app_pc *start/*IN/OUT*/,
                                    app_pc *end/*OUT*/,
                                    char *fullpath/*OPTIONAL OUT*/, size_t path_size)
{
    int count = 0;
    bool found_library = false;
    char libname[MAXIMUM_PATH];
    const char *name_cmp = name;
    memquery_iter_t iter;
    app_pc last_base = NULL;
    app_pc last_end = NULL;
    size_t image_size = 0;
    app_pc cur_end = NULL;
    app_pc mod_start = NULL;
    ASSERT(name != NULL || start != NULL);

    /* If name is non-NULL, start can be NULL, so we have to walk the whole
     * address space even when we have syscalls for memquery (e.g., on Mac).
     * Even if start is non-NULL, it could be in the middle of the library.
     */
    memquery_iterator_start(&iter, NULL,
                            /* We're never called from a fragile place like a
                             * signal handler, so as long as it's not real early
                             * it's ok to alloc.
                             */
                            dynamo_heap_initialized);
    libname[0] = '\0';
    while (memquery_iterator_next(&iter)) {
        LOG(GLOBAL, LOG_VMAREAS, 5, "start="PFX" end="PFX" prot=%x comment=%s\n",
            iter.vm_start, iter.vm_end, iter.prot, iter.comment);

        /* Record the base of each differently-named set of entries up until
         * we find our target, when we'll clobber libpath
         */
        if (!found_library &&
            strncmp(libname, iter.comment, BUFFER_SIZE_ELEMENTS(libname)) != 0) {
            last_base = iter.vm_start;
            /* last_end is used to know what's readable beyond last_base */
            if (TEST(MEMPROT_READ, iter.prot))
                last_end = iter.vm_end;
            else
                last_end = last_base;
            /* remember name so we can find the base of a multiply-mapped so */
            strncpy(libname, iter.comment, BUFFER_SIZE_ELEMENTS(libname));
            NULL_TERMINATE_BUFFER(libname);
        }

        if ((name_cmp != NULL &&
             (strstr(iter.comment, name_cmp) != NULL ||
              /* For Linux, include mid-library (non-.bss) anonymous mappings.
               * Our private loader
               * fills mapping holes with anonymous memory instead of a
               * MEMPROT_NONE mapping from the original file.
               * For Mac, this includes mid-library .bss.
               */
              (found_library && iter.comment[0] == '\0' && image_size != 0 &&
               iter.vm_end - mod_start < image_size))) ||
            (name == NULL && *start >= iter.vm_start && *start < iter.vm_end)) {
            if (!found_library) {
                size_t mod_readable_sz;
                char *dst = (fullpath != NULL) ? fullpath : libname;
                size_t dstsz = (fullpath != NULL) ? path_size :
                    BUFFER_SIZE_ELEMENTS(libname);
                char *slash = strrchr(iter.comment, '/');
                ASSERT_CURIOSITY(slash != NULL);
                ASSERT_CURIOSITY((slash - iter.comment) < dstsz);
                /* we keep the last '/' at end */
                ++slash;
                strncpy(dst, iter.comment, MIN(dstsz, (slash - iter.comment)));
                /* if max no null */
                dst[dstsz - 1] = '\0';
                if (name == NULL)
                    name_cmp = dst;
                found_library = true;
                /* Most library have multiple segments, and some have the
                 * ELF header repeated in a later mapping, so we can't rely
                 * on is_elf_so_header() and header walking.
                 * We use the name tracking to remember the first entry
                 * that had this name.
                 */
                if (last_base == NULL) {
                    mod_start = iter.vm_start;
                    mod_readable_sz = iter.vm_end - iter.vm_start;
                } else {
                    mod_start = last_base;
                    mod_readable_sz = last_end - last_base;
                }
                if (module_is_header(mod_start, mod_readable_sz)) {
                    app_pc mod_base, mod_end;
                    if (module_walk_program_headers(mod_start, mod_readable_sz, false,
                                                    &mod_base, NULL, &mod_end, NULL,
                                                    NULL)) {
                        image_size = mod_end - mod_base;
                        LOG(GLOBAL, LOG_VMAREAS, 4, "%s: image size is "PIFX"\n",
                            __FUNCTION__, image_size);
                        ASSERT_CURIOSITY(image_size != 0);
                    } else {
                        ASSERT_NOT_REACHED();
                    }
                } else {
                    ASSERT(false && "expected elf header");
                }
            }
            count++;
            cur_end = iter.vm_end;
        } else if (found_library) {
            /* hit non-matching, we expect module segments to be adjacent */
            break;
        }
    }

    /* Xref PR 208443: .bss sections are anonymous (no file name listed in
     * maps file), but not every library has one.  We have to parse the ELF
     * header to know since we can't assume that a subsequent anonymous
     * region is .bss. */
    if (image_size != 0 && cur_end - mod_start < image_size) {
        /* Found a .bss section. Check current mapping (note might only be
         * part of the mapping (due to os region merging? FIXME investigate). */
        ASSERT_CURIOSITY(iter.vm_start == cur_end /* no gaps, FIXME might there be
                                                   * a gap if the file has large
                                                   * alignment and no data section?
                                                   * curiosity for now*/);
        ASSERT_CURIOSITY(iter.inode == 0); /* .bss is anonymous */
        ASSERT_CURIOSITY(iter.vm_end - mod_start >= image_size);/* should be big enough */
        count++;
        cur_end = mod_start + image_size;
    } else {
        /* Shouldn't have more mapped then the size of the module, unless it's a
         * second adjacent separate map of the same file.  Curiosity for now. */
        ASSERT_CURIOSITY(image_size == 0 || cur_end - mod_start == image_size);
    }
    memquery_iterator_stop(&iter);

    if (start != NULL)
        *start = mod_start;
    if (end != NULL)
        *end = cur_end;
    return count;
}
Ejemplo n.º 3
0
bool
memquery_iterator_next(memquery_iter_t *iter)
{
    maps_iter_t *mi = (maps_iter_t *) &iter->internal;
    char perm[16];
    char *line;
    int len;
    app_pc prev_start = iter->vm_start;
    ASSERT((iter->may_alloc && OWN_MUTEX(&maps_iter_buf_lock)) ||
           (!iter->may_alloc && OWN_MUTEX(&memory_info_buf_lock)));
    if (mi->newline == NULL) {
        mi->bufwant = BUFSIZE-1;
        mi->bufread = os_read(mi->maps, mi->buf, mi->bufwant);
        ASSERT(mi->bufread <= mi->bufwant);
        LOG(GLOBAL, LOG_VMAREAS, 6,
            "get_memory_info_from_os: bytes read %d/want %d\n",
            mi->bufread, mi->bufwant);
        if (mi->bufread <= 0)
            return false;
        mi->buf[mi->bufread] = '\0';
        mi->newline = strchr(mi->buf, '\n');
        line = mi->buf;
    } else {
        line = mi->newline + 1;
        mi->newline = strchr(line, '\n');
        if (mi->newline == NULL) {
            /* FIXME clean up: factor out repetitive code */
            /* shift 1st part of line to start of buf, then read in rest */
            /* the memory for the processed part can be reused  */
            mi->bufwant = line - mi->buf;
            ASSERT(mi->bufwant <= mi->bufread);
            len = mi->bufread - mi->bufwant; /* what is left from last time */
            /* since strings may overlap, should use memmove, not strncpy */
            /* FIXME corner case: if len == 0, nothing to move */
            memmove(mi->buf, line, len);
            mi->bufread = os_read(mi->maps, mi->buf+len, mi->bufwant);
            ASSERT(mi->bufread <= mi->bufwant);
            if (mi->bufread <= 0)
                return false;
            mi->bufread += len; /* bufread is total in buf */
            mi->buf[mi->bufread] = '\0';
            mi->newline = strchr(mi->buf, '\n');
            line = mi->buf;
        }
    }
    LOG(GLOBAL, LOG_VMAREAS, 6,
        "\nget_memory_info_from_os: newline=[%s]\n",
        mi->newline ? mi->newline : "(null)");

    /* Buffer is big enough to hold at least one line: if not, the file changed
     * underneath us after we hit the end.  Just bail.
     */
    if (mi->newline == NULL)
        return false;
    *mi->newline = '\0';
    LOG(GLOBAL, LOG_VMAREAS, 6,
        "\nget_memory_info_from_os: line=[%s]\n", line);
    mi->comment_buffer[0]='\0';
    len = sscanf(line,
#ifdef IA32_ON_IA64
                 MAPS_LINE_FORMAT8, /* cross-compiling! */
#else
                 sizeof(void*) == 4 ? MAPS_LINE_FORMAT4 : MAPS_LINE_FORMAT8,
#endif
                 (unsigned long*)&iter->vm_start, (unsigned long*)&iter->vm_end,
                 perm, (unsigned long*)&iter->offset, &iter->inode,
                 mi->comment_buffer);
    if (iter->vm_start == iter->vm_end) {
        /* i#366 & i#599: Merge an empty regions caused by stack guard pages
         * into the stack region if the stack region is less than one page away.
         * Otherwise skip it.  Some Linux kernels (2.6.32 has been observed)
         * have empty entries for the stack guard page.  We drop the permissions
         * on the guard page, because Linux always insists that it has rwxp
         * perms, no matter how we change the protections.  The actual stack
         * region has the perms we expect.
         * XXX: We could get more accurate info if we looked at
         * /proc/self/smaps, which has a Size: 4k line for these "empty"
         * regions.
         */
        app_pc empty_start = iter->vm_start;
        bool r;
        LOG(GLOBAL, LOG_VMAREAS, 2,
            "maps_iterator_next: skipping or merging empty region 0x%08x\n",
            iter->vm_start);
        /* don't trigger the maps-file-changed check.
         * slight risk of a race where we'll pass back earlier/overlapping
         * region: we'll live with it.
         */
        iter->vm_start = NULL;
        r = memquery_iterator_next(iter);
        /* We could check to see if we're combining with the [stack] section,
         * but that doesn't work if there are multiple stacks or the stack is
         * split into multiple maps entries, so we merge any empty region within
         * one page of the next region.
         */
        if (empty_start <= iter->vm_start &&
            iter->vm_start <= empty_start + PAGE_SIZE) {
            /* Merge regions if the next region was zero or one page away. */
            iter->vm_start = empty_start;
        }
        return r;
    }
    if (iter->vm_start <= prev_start) {
        /* the maps file has expanded underneath us (presumably due to our
         * own committing while iterating): skip ahead */
        LOG(GLOBAL, LOG_VMAREAS, 2,
            "maps_iterator_next: maps file changed: skipping 0x%08x\n", prev_start);
        iter->vm_start = prev_start;
        return memquery_iterator_next(iter);
    }
    if (len<6)
        mi->comment_buffer[0]='\0';
    iter->prot = permstr_to_memprot(perm);
#ifdef ANDROID
    /* i#1861: the Android kernel supports custom comments which can't merge */
    if (iter->comment[0] != '\0')
        iter->prot |= MEMPROT_HAS_COMMENT;
#endif
    return true;
}