Пример #1
0
bool
wingdi_process_syscall_arg(drsys_arg_t *arg)
{
    if (arg->containing_type == DRSYS_TYPE_LARGE_STRING) {
        /* i#489: LARGE_STRING.MaximumLength and LARGE_STRING.bAnsi end
         * up initialized by a series of bit manips that fool us.
         */
        LARGE_STRING ls;
        if (strcmp(arg->arg_name, "LARGE_STRING.MaximumLength") == 0) {
            /* can't take offseof bitfield so we assume no padding */
            byte *start = ((byte*) arg->start_addr) - sizeof(ls.Length) -
                offsetof(LARGE_STRING, Length);
            ASSERT(arg->pre, "LARGE_STRING non-buffer fields are always IN");
            if (safe_read((void*) start, sizeof(ls), &ls)) {
                LOG(SYSCALL_VERBOSE,
                    "LARGE_STRING Buffer="PFX" Length=%d MaximumLength=%d\n",
                    (byte *)ls.Buffer, ls.Length, ls.MaximumLength);
                /* Check for undef if looks "suspicious": weak,
                 * but simpler and more efficient than pattern match on every bb.
                 */
                if (ls.MaximumLength > ls.Length &&
                    ls.MaximumLength > 1024 /* suspicious */) {
                    check_sysmem(MEMREF_CHECK_DEFINEDNESS, arg->sysnum, arg->start_addr,
                                 sizeof(ULONG/*+bAnsi*/), arg->mc, arg->arg_name);
                } else {
                    shadow_set_range(arg->start_addr, (byte *)arg->start_addr +
                                     sizeof(ULONG), SHADOW_DEFINED);
                }
            } else
                WARN("WARNING: unable to read syscall param\n");
            return true; /* handled */
        }
    }
    return false; /* not handled */
}
Пример #2
0
/* The basic algorithm is to have each read/write set the shadow metadata,
 * and the periodic sweep then sets the timestamp if an alloc's metadata
 * is set and subsequently clears the metadata.
 */
static bool
alloc_itercb_sweep(malloc_info_t *info, void *iter_data)
{
    /* we don't care much about synch: ok to not be perfectly accurate */
    /* FIXME: ignore pre_us? option-controlled? */
    byte *end = info->base + info->request_size;
    if (shadow_val_in_range(info->base, end, 1)) {
        stale_per_alloc_t *spa = (stale_per_alloc_t *) info->client_data;
        uint64 stamp = *((uint64 *)iter_data);
        LOG(3, "\t"PFX"-"PFX" was accessed @%"INT64_FORMAT"u\\n", info->base, end, stamp);
        spa->last_access = stamp;
        shadow_set_range(info->base, end, 0);
    }
    return true;
}
Пример #3
0
static void
handle_clone(void *drcontext, dr_mcontext_t *mc)
{
    uint flags = (uint) dr_syscall_get_param(drcontext, 0);
    app_pc newsp = (app_pc) dr_syscall_get_param(drcontext, 1);

    if (!options.shadowing)
        return;

    /* PR 418629: we need to change the stack from defined (marked when it
     * was allocated) to unaddressable.  Originally we couldn't get the stack
     * bounds in the thread init event (xref PR 395156) so we watch here:
     * we could move this code now but not worth it.
     * FIXME: should we watch SYS_exit and put stack back to defined
     * in case it's re-used?  Seems better to leave it unaddressable
     * since may be more common to have racy accesses we want to flag
     * rather than legitimate re-use?
     */
    if (TEST(CLONE_VM, flags) && newsp != NULL) {
        app_pc stack_base = NULL;
        size_t stack_size;
        /* newsp is TOS */
        ASSERT(options.track_heap, "now relying on -track_heap in general");
        if (is_in_heap_region(newsp)) {
            /* How find base of malloc chunk to then find size?
             * Don't want to store all mallocs in an interval data structure
             * (shown to be slow in PR 535568).
             * Maybe hardcode knowledge of how far from upper address
             * glibc clone() sets newsp?
             * Actually, should just walk shadow memory until hit
             * unaddressable.
             */
            /* FIXME: NEVER TESTED! */
            app_pc pc;
            ssize_t sz;
            /* PR 525807 added an interval tree of "large mallocs" */
            if (malloc_large_lookup(newsp, &pc, (size_t*)&sz)) {
                stack_base = pc;
                stack_size = sz;
            } else {
                /* Should be rare so we just do brute force and slow */
                pc = shadow_prev_dword(newsp, newsp - options.stack_swap_threshold,
                                       SHADOW_UNADDRESSABLE);
                sz = malloc_chunk_size(pc+1);
                if (sz > 0) { /* returns -1 on failure */
                    stack_base = pc + 1;
                    stack_size = sz;
                }
            }
        } else {
            /* On linux a pre-adjacent mmap w/ same prot will be merged into the
             * same region as returned by dr_query_memory() and we'll mark it as
             * unaddressable => many false positives (on FC10, adding a printf
             * to suite/tests/linux/clone.c between the stack mmap and the clone
             * call resulted in the merge).  My solution is to track mmaps and
             * assume a stack will be a single mmap (maybe separate guard page
             * but that should be noprot so ok to not mark unaddress: xref PR
             * 406328).
             */
            if (!mmap_anon_lookup(newsp, &stack_base, &stack_size)) {
                /* Fall back to a query */
                LOG(2, "thread stack "PFX" not in mmap table, querying\n", newsp);
                if (!dr_query_memory(newsp - 1, &stack_base, &stack_size, NULL)) {
                    /* We can estimate the stack end by assuming that clone()
                     * puts less than a page on the stack, but the base is harder:
                     * instead we rely on PR 525807 handle_push_addressable() to
                     * mark the stack unaddr one page at a time.
                     */
                    stack_base = NULL;
                }
            }
        }
        if (stack_base != NULL) {
            LOG(2, "changing thread stack "PFX"-"PFX" -"PFX" to unaddressable\n",
                stack_base, stack_base + stack_size, newsp);
            ASSERT(stack_base + stack_size >= newsp,
                   "new thread's stack alloc messed up");
            if (options.check_stack_bounds) {
                /* assume that above newsp should stay defined */
                shadow_set_range(stack_base, newsp, SHADOW_UNADDRESSABLE);
                check_stack_size_vs_threshold(drcontext, stack_size);
                if (BEYOND_TOS_REDZONE_SIZE > 0) {
                    size_t redzone_sz = BEYOND_TOS_REDZONE_SIZE;
                    if (newsp - BEYOND_TOS_REDZONE_SIZE < stack_base)
                        redzone_sz = newsp - stack_base;
                    shadow_set_range(newsp - redzone_sz, newsp, SHADOW_UNDEFINED);
                }
            }
        } else {
            LOG(0, "ERROR: cannot find bounds of new thread's stack "PFX"\n",
                newsp);
            ASSERT(false, "can't find bounds of thread's stack");
        }
    }
}