Esempio n. 1
0
static bool
check_and_mark_native_exec(module_area_t *ma, bool add)
{
    bool is_native = false;
    const char *name = GET_MODULE_NAME(&ma->names);
    ASSERT(os_get_module_info_locked());
    if (DYNAMO_OPTION(native_exec) && name != NULL &&
        on_native_exec_list(name)) {
        LOG(GLOBAL, LOG_INTERP|LOG_VMAREAS, 1,
            "module %s is on native_exec list\n", name);
        is_native = true;
    }

    if (add && is_native) {
        RSTATS_INC(num_native_module_loads);
        vmvector_add(native_exec_areas, ma->start, ma->end, NULL);
    } else if (!add) {
        /* If we're removing and it's native, it should be on there already.  If
         * it's not native, then it shouldn't be present, but we'll remove
         * whatever is there.
         */
        DEBUG_DECLARE(bool present =)
            vmvector_remove(native_exec_areas, ma->start, ma->end);
        ASSERT_CURIOSITY((is_native && present) || (!is_native && !present));
    }
Esempio n. 2
0
void
ksynch_init(void)
{
    /* Determines whether the kernel supports SYS_futex syscall or not.
     * From man futex(2): initial futex support was merged in 2.5.7, in current six
     * argument format since 2.6.7.
     */
    volatile int futex_for_test = 0;
    ptr_int_t res = dynamorio_syscall(SYS_futex, 6, &futex_for_test, FUTEX_WAKE, 1,
                                      NULL, NULL, 0);
    kernel_futex_support = (res >= 0);
    ASSERT_CURIOSITY(kernel_futex_support);
}
Esempio n. 3
0
/* HACK to get recursive write lock for internal and external use
 * FIXME: code blatantly copied from dynamo_vm_areas_{un}lock(); eliminate duplication!
 */
void
memcache_lock(void)
{
    /* ok to ask for locks or mark stale before all_memory_areas is allocated,
     * during heap init and before we can allocate it.  no lock needed then.
     */
    ASSERT(all_memory_areas != NULL ||
           get_num_threads() <= 1 /* must be only DR thread */);
    if (all_memory_areas == NULL)
        return;
    if (self_owns_write_lock(&all_memory_areas->lock)) {
        all_memory_areas_recursion++;
        /* we have a 5-deep path:
         *   global_heap_alloc | heap_create_unit | get_guarded_real_memory |
         *   heap_low_on_memory | release_guarded_real_memory
         */
        ASSERT_CURIOSITY(all_memory_areas_recursion <= 4);
    } else
        write_lock(&all_memory_areas->lock);
}
Esempio n. 4
0
void
tls_thread_init(os_local_state_t *os_tls, byte *segment)
{
    /* We have four different ways to obtain TLS, each with its own limitations:
     *
     * 1) Piggyback on the threading system (like we do on Windows): here that would
     *    be pthreads, which uses a segment since at least RH9, and uses gdt-based
     *    segments for NPTL.  The advantage is we won't run out of ldt or gdt entries
     *    (except when the app itself would).  The disadvantage is we're stealing
     *    application slots and we rely on user mode interfaces.
     *
     * 2) Steal an ldt entry via SYS_modify_ldt.  This suffers from the 8K ldt entry
     *    limit and requires that we update manually on a new thread.  For 64-bit
     *    we're limited here to a 32-bit base.  (Strangely, the kernel's
     *    include/asm-x86_64/ldt.h implies that the base is ignored: but it doesn't
     *    seem to be.)
     *
     * 3) Steal a gdt entry via SYS_set_thread_area.  There is a 3rd unused entry
     *    (after pthreads and wine) we could use.  The kernel swaps for us, and with
     *    CLONE_TLS the kernel will set up the entry for a new thread for us.  Xref
     *    PR 192231 and PR 285898.  This system call is disabled on 64-bit 2.6
     *    kernels (though the man page for arch_prctl implies it isn't for 2.5
     *    kernels?!?)
     *
     * 4) Use SYS_arch_prctl.  This is only implemented on 64-bit kernels, and can
     *    only be used to set the gdt entries that fs and gs select for.  Faster to
     *    use <4GB base (obtain with mmap MAP_32BIT) since can use gdt; else have to
     *    use wrmsr.  The man pages say "ARCH_SET_GS is disabled in some kernels".
     */
    uint selector;
    int index = -1;
    int res;
#ifdef X64
    /* First choice is gdt, which means arch_prctl.  Since this may fail
     * on some kernels, we require -heap_in_lower_4GB so we can fall back
     * on modify_ldt.
     */
    byte *cur_gs;
    res = dynamorio_syscall(SYS_arch_prctl, 2, ARCH_GET_GS, &cur_gs);
    if (res >= 0) {
        LOG(GLOBAL, LOG_THREADS, 1, "os_tls_init: cur gs base is "PFX"\n", cur_gs);
        /* If we're a non-initial thread, gs will be set to the parent thread's value */
        if (cur_gs == NULL || is_dynamo_address(cur_gs) ||
            /* By resolving i#107, we can handle gs conflicts between app and dr. */
            INTERNAL_OPTION(mangle_app_seg)) {
            res = dynamorio_syscall(SYS_arch_prctl, 2, ARCH_SET_GS, segment);
            if (res >= 0) {
                os_tls->tls_type = TLS_TYPE_ARCH_PRCTL;
                LOG(GLOBAL, LOG_THREADS, 1,
                    "os_tls_init: arch_prctl successful for base "PFX"\n", segment);
                /* Kernel should have written %gs for us if using GDT */
                if (!dynamo_initialized && read_thread_register(SEG_TLS) == 0) {
                    LOG(GLOBAL, LOG_THREADS, 1, "os_tls_init: using MSR\n");
                    tls_using_msr = true;
                }
                if (IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false)) {
                    res = dynamorio_syscall(SYS_arch_prctl, 2, ARCH_SET_FS,
                                            os_tls->os_seg_info.priv_lib_tls_base);
                    /* Assuming set fs must be successful if set gs succeeded. */
                    ASSERT(res >= 0);
                }
            } else {
                /* we've found a kernel where ARCH_SET_GS is disabled */
                ASSERT_CURIOSITY(false && "arch_prctl failed on set but not get");
                LOG(GLOBAL, LOG_THREADS, 1,
                    "os_tls_init: arch_prctl failed: error %d\n", res);
            }
        } else {
            /* FIXME PR 205276: we don't currently handle it: fall back on ldt, but
             * we'll have the same conflict w/ the selector...
             */
            ASSERT_BUG_NUM(205276, cur_gs == NULL);
        }
    }
#endif

    if (os_tls->tls_type == TLS_TYPE_NONE) {
        /* Second choice is set_thread_area */
        /* PR 285898: if we added CLONE_SETTLS to all clone calls (and emulated vfork
         * with clone) we could avoid having to set tls up for each thread (as well
         * as solve race PR 207903), at least for kernel 2.5.32+.  For now we stick
         * w/ manual setup.
         */
        our_modify_ldt_t desc;

        /* Pick which GDT slots we'll use for DR TLS and for library TLS if
         * using the private loader.
         */
        choose_gdt_slots(os_tls);

        if (tls_gdt_index > -1) {
            /* Now that we know which GDT slot to use, install the per-thread base
             * into it.
             */
            /* Base here must be 32-bit */
            IF_X64(ASSERT(DYNAMO_OPTION(heap_in_lower_4GB) &&
                          segment <= (byte*)UINT_MAX));
            initialize_ldt_struct(&desc, segment, PAGE_SIZE, tls_gdt_index);
            res = dynamorio_syscall(SYS_set_thread_area, 1, &desc);
            LOG(GLOBAL, LOG_THREADS, 3,
                "%s: set_thread_area %d => %d res, %d index\n",
                __FUNCTION__, tls_gdt_index, res, desc.entry_number);
            ASSERT(res < 0 || desc.entry_number == tls_gdt_index);
        } else {
            res = -1;  /* fall back on LDT */
        }

        if (res >= 0) {
            LOG(GLOBAL, LOG_THREADS, 1,
                "os_tls_init: set_thread_area successful for base "PFX" @index %d\n",
                segment, tls_gdt_index);
            os_tls->tls_type = TLS_TYPE_GDT;
            index = tls_gdt_index;
            selector = GDT_SELECTOR(index);
            WRITE_DR_SEG(selector); /* macro needs lvalue! */
        } else {
            IF_VMX86(ASSERT_NOT_REACHED()); /* since no modify_ldt */
            LOG(GLOBAL, LOG_THREADS, 1,
                "os_tls_init: set_thread_area failed: error %d\n", res);
        }

#ifdef CLIENT_INTERFACE
        /* Install the library TLS base. */
        if (INTERNAL_OPTION(private_loader) && res >= 0) {
            app_pc base = os_tls->os_seg_info.priv_lib_tls_base;
            /* lib_tls_gdt_index is picked in choose_gdt_slots. */
            ASSERT(lib_tls_gdt_index >= gdt_entry_tls_min);
            initialize_ldt_struct(&desc, base, GDT_NO_SIZE_LIMIT,
                                  lib_tls_gdt_index);
            res = dynamorio_syscall(SYS_set_thread_area, 1, &desc);
            LOG(GLOBAL, LOG_THREADS, 3,
                "%s: set_thread_area %d => %d res, %d index\n",
                __FUNCTION__, lib_tls_gdt_index, res, desc.entry_number);
            if (res >= 0) {
                /* i558 update lib seg reg to enforce the segment changes */
                selector = GDT_SELECTOR(lib_tls_gdt_index);
                LOG(GLOBAL, LOG_THREADS, 2, "%s: setting %s to selector 0x%x\n",
                    __FUNCTION__, reg_names[LIB_SEG_TLS], selector);
                WRITE_LIB_SEG(selector);
            }
        }
#endif
    }

    if (os_tls->tls_type == TLS_TYPE_NONE) {
        /* Third choice: modify_ldt, which should be available on kernel 2.3.99+ */
        /* Base here must be 32-bit */
        IF_X64(ASSERT(DYNAMO_OPTION(heap_in_lower_4GB) && segment <= (byte*)UINT_MAX));
        /* we have the thread_initexit_lock so no race here */
        index = find_unused_ldt_index();
        selector = LDT_SELECTOR(index);
        ASSERT(index != -1);
        create_ldt_entry((void *)segment, PAGE_SIZE, index);
        os_tls->tls_type = TLS_TYPE_LDT;
        WRITE_DR_SEG(selector); /* macro needs lvalue! */
        LOG(GLOBAL, LOG_THREADS, 1,
            "os_tls_init: modify_ldt successful for base "PFX" w/ index %d\n",
            segment, index);
    }

    os_tls->ldt_index = index;
}
Esempio n. 5
0
/* Queries the set of available GDT slots, and initializes:
 * - tls_gdt_index
 * - gdt_entry_tls_min on ia32
 * - lib_tls_gdt_index if using private loader
 * GDT slots are initialized with a base and limit of zero.  The caller is
 * responsible for setting them to a real base.
 */
static void
choose_gdt_slots(os_local_state_t *os_tls)
{
    our_modify_ldt_t desc;
    int i;
    int avail_index[GDT_NUM_TLS_SLOTS];
    our_modify_ldt_t clear_desc;
    int res;

    /* using local static b/c dynamo_initialized is not set for a client thread
     * when created in client's dr_init routine
     */
    /* FIXME: Could be racy if we have multiple threads initializing during
     * startup.
     */
    if (tls_global_init)
        return;
    tls_global_init = true;

    /* We don't want to break the assumptions of pthreads or wine,
     * so we try to take the last slot.  We don't want to hardcode
     * the index b/c the kernel will let us clobber entries so we want
     * to only pass in -1.
     */
    ASSERT(!dynamo_initialized);
    ASSERT(tls_gdt_index == -1);
    for (i = 0; i < GDT_NUM_TLS_SLOTS; i++)
        avail_index[i] = -1;
    for (i = 0; i < GDT_NUM_TLS_SLOTS; i++) {
        /* We use a base and limit of 0 for testing what's available. */
        initialize_ldt_struct(&desc, NULL, 0, -1);
        res = dynamorio_syscall(SYS_set_thread_area, 1, &desc);
        LOG(GLOBAL, LOG_THREADS, 4,
            "%s: set_thread_area -1 => %d res, %d index\n",
            __FUNCTION__, res, desc.entry_number);
        if (res >= 0) {
            /* We assume monotonic increases */
            avail_index[i] = desc.entry_number;
            ASSERT(avail_index[i] > tls_gdt_index);
            tls_gdt_index = desc.entry_number;
        } else
            break;
    }

#ifndef X64
    /* In x86-64's ia32 emulation,
     * set_thread_area(6 <= entry_number && entry_number <= 8) fails
     * with EINVAL (22) because x86-64 only accepts GDT indices 12 to 14
     * for TLS entries.
     */
    if (tls_gdt_index > (gdt_entry_tls_min + GDT_NUM_TLS_SLOTS))
        gdt_entry_tls_min = GDT_ENTRY_TLS_MIN_64;  /* The kernel is x64. */
#endif

    /* Now give up the earlier slots */
    for (i = 0; i < GDT_NUM_TLS_SLOTS; i++) {
        if (avail_index[i] > -1 &&
            avail_index[i] != tls_gdt_index) {
            LOG(GLOBAL, LOG_THREADS, 4,
                "clearing set_thread_area index %d\n", avail_index[i]);
            clear_ldt_struct(&clear_desc, avail_index[i]);
            res = dynamorio_syscall(SYS_set_thread_area, 1, &clear_desc);
            ASSERT(res >= 0);
        }
    }

#ifndef VMX86_SERVER
    ASSERT_CURIOSITY(tls_gdt_index ==
                     (kernel_is_64bit() ? GDT_64BIT : GDT_32BIT));
#endif

#ifdef CLIENT_INTERFACE
    if (INTERNAL_OPTION(private_loader) && tls_gdt_index != -1) {
        /* Use the app's selector with our own TLS base for libraries.  app_fs
         * and app_gs are initialized by the caller in os_tls_app_seg_init().
         */
        int index = SELECTOR_INDEX(os_tls->app_lib_tls_reg);
        if (index == 0) {
            /* An index of zero means the app has no TLS (yet), and happens
             * during early injection.  We use -1 to grab a new entry.  When the
             * app asks for its first table entry with set_thread_area, we give
             * it this one and emulate its usage of the segment.
             */
            ASSERT_CURIOSITY(DYNAMO_OPTION(early_inject) && "app has "
                             "no TLS, but we used non-early injection");
            initialize_ldt_struct(&desc, NULL, 0, -1);
            res = dynamorio_syscall(SYS_set_thread_area, 1, &desc);
            LOG(GLOBAL, LOG_THREADS, 4,
                "%s: set_thread_area -1 => %d res, %d index\n",
                __FUNCTION__, res, desc.entry_number);
            ASSERT(res >= 0);
            if (res >= 0) {
                return_stolen_lib_tls_gdt = true;
                index = desc.entry_number;
            }
        }
        lib_tls_gdt_index = index;
    }
#endif
}
Esempio n. 6
0
void
native_module_unhook(module_area_t *ma)
{
    ASSERT_CURIOSITY(__FUNCTION__" NYI");
}
Esempio n. 7
0
void
native_module_hook(module_area_t *ma, bool at_map)
{
    ASSERT_CURIOSITY(__FUNCTION__" NYI");
}
Esempio n. 8
0
bool
module_walk_program_headers(app_pc base, size_t view_size, bool at_map, bool dyn_reloc,
                            OUT app_pc *out_base /* relative pc */,
                            OUT app_pc *out_first_end /* relative pc */,
                            OUT app_pc *out_max_end /* relative pc */,
                            OUT char **out_soname, OUT os_module_data_t *out_data)
{
    mach_header_t *hdr = (mach_header_t *)base;
    struct load_command *cmd, *cmd_stop;
    app_pc seg_min_start = (app_pc)POINTER_MAX;
    app_pc seg_max_end = NULL, seg_first_end;
    bool found_seg = false;
    size_t linkedit_file_off = 0, linkedit_mem_off, exports_file_off = 0;
    ASSERT(is_macho_header(base, view_size));
    cmd = (struct load_command *)(hdr + 1);
    cmd_stop = (struct load_command *)((byte *)cmd + hdr->sizeofcmds);
    while (cmd < cmd_stop) {
        if (cmd->cmd == LC_SEGMENT) {
            segment_command_t *seg = (segment_command_t *)cmd;
            found_seg = true;
            LOG(GLOBAL, LOG_VMAREAS, 4, "%s: segment %s addr=0x%x sz=0x%x file=0x%x\n",
                __FUNCTION__, seg->segname, seg->vmaddr, seg->vmsize, seg->fileoff);
            if ((app_pc)seg->vmaddr + seg->vmsize > seg_max_end)
                seg_max_end = (app_pc)seg->vmaddr + seg->vmsize;
            if (strcmp(seg->segname, "__PAGEZERO") == 0 && seg->initprot == 0) {
                /* Skip it: zero page for executable, and it's hard to identify
                 * that page as part of the module.
                 */
            } else if ((app_pc)seg->vmaddr < seg_min_start) {
                seg_min_start = (app_pc)seg->vmaddr;
                seg_first_end = (app_pc)seg->vmaddr + seg->vmsize;
            }
            if (strcmp(seg->segname, "__LINKEDIT") == 0) {
                linkedit_file_off = seg->fileoff;
                linkedit_mem_off = seg->vmaddr;
            }
        } else if (cmd->cmd == LC_DYLD_INFO || cmd->cmd == LC_DYLD_INFO_ONLY) {
            struct dyld_info_command *di = (struct dyld_info_command *)cmd;
            LOG(GLOBAL, LOG_VMAREAS, 4, "%s: exports addr=0x%x sz=0x%x\n", __FUNCTION__,
                di->export_off, di->export_size);
            exports_file_off = di->export_off;
            if (out_data != NULL)
                out_data->exports_sz = di->export_size;
        } else if (cmd->cmd == LC_ID_DYLIB) {
            struct dylib_command *dy = (struct dylib_command *)cmd;
            char *soname = (char *)cmd + dy->dylib.name.offset;
            /* XXX: we assume these strings are always null-terminated */
            /* They seem to have full paths on Mac.  We drop to basename, as
             * that's what many clients expect for module_name.
             */
            char *slash = strrchr(soname, '/');
            if (slash != NULL)
                soname = slash + 1;
            LOG(GLOBAL, LOG_VMAREAS, 4, "%s: lib identity %s\n", __FUNCTION__, soname);
            if (out_soname != NULL)
                *out_soname = soname;
            if (out_data != NULL) {
                out_data->timestamp = dy->dylib.timestamp;
                out_data->current_version = dy->dylib.current_version;
                out_data->compatibility_version = dy->dylib.compatibility_version;
            }
        }
        cmd = (struct load_command *)((byte *)cmd + cmd->cmdsize);
    }
    if (found_seg) {
        ptr_int_t load_delta = base - seg_min_start;
        ptr_int_t linkedit_delta = 0;
        if (linkedit_file_off > 0) {
            linkedit_delta = ((ssize_t)linkedit_mem_off - linkedit_file_off);
        }
        LOG(GLOBAL, LOG_VMAREAS, 4, "%s: bounds " PFX "-" PFX "\n", __FUNCTION__,
            seg_min_start, seg_max_end);
        if (out_base != NULL)
            *out_base = seg_min_start;
        if (out_first_end != NULL)
            *out_first_end = seg_first_end;
        if (out_max_end != NULL)
            *out_max_end = seg_max_end;
        if (out_data != NULL) {
            app_pc shared_start, shared_end;
            bool have_shared = module_dyld_shared_region(&shared_start, &shared_end);
            if (have_shared && base >= shared_start && base < shared_end) {
                /* These should have had their segment bounds updated */
                ASSERT_CURIOSITY(seg_min_start == base);
                out_data->in_shared_cache = true;
            }
            /* Now that we have the load delta, we can add the abs addr segments */
            cmd = (struct load_command *)(hdr + 1);
            while (cmd < cmd_stop) {
                if (cmd->cmd == LC_SEGMENT) {
                    segment_command_t *seg = (segment_command_t *)cmd;
                    if (strcmp(seg->segname, "__PAGEZERO") == 0 && seg->initprot == 0) {
                        /* skip */
                    } else {
                        app_pc seg_start = (app_pc)seg->vmaddr + load_delta;
                        size_t seg_size = seg->vmsize;
                        bool shared = false;
                        if (strcmp(seg->segname, "__LINKEDIT") == 0 && have_shared &&
                            seg_start >= shared_start && seg_start < shared_end) {
                            /* We assume that all __LINKEDIT segments in the
                             * dyld cache are shared as one single segment.
                             */
                            shared = true;
                            /* XXX: seg->vmsize is too large for these: it extends
                             * off the end of the mapping.  I have no idea why.
                             * So we truncate it.  We leave max_end above.
                             */
                            if (seg_start + seg->vmsize > shared_end) {
                                LOG(GLOBAL, LOG_VMAREAS, 4,
                                    "%s: truncating __LINKEDIT size from " PIFX
                                    " to " PIFX "\n",
                                    __FUNCTION__, seg->vmsize, shared_end - seg_start);
                                seg_size = shared_end - seg_start;
                            }
                        }
                        module_add_segment_data(
                            out_data, 0 /*don't know*/, seg_start, seg_size,
                            /* we want initprot, not maxprot, right? */
                            vmprot_to_memprot(seg->initprot),
                            /* XXX: alignment is specified per section --
                             * ignoring for now
                             */
                            PAGE_SIZE, shared, seg->fileoff);
                    }
                } else if (cmd->cmd == LC_SYMTAB) {
                    /* even if stripped, dynamic symbols are in this table */
                    struct symtab_command *symtab = (struct symtab_command *)cmd;
                    out_data->symtab =
                        (app_pc)symtab->symoff + load_delta + linkedit_delta;
                    out_data->num_syms = symtab->nsyms;
                    out_data->strtab =
                        (app_pc)symtab->stroff + load_delta + linkedit_delta;
                    out_data->strtab_sz = symtab->strsize;
                } else if (cmd->cmd == LC_UUID) {
                    memcpy(out_data->uuid, ((struct uuid_command *)cmd)->uuid,
                           sizeof(out_data->uuid));
                }
                cmd = (struct load_command *)((byte *)cmd + cmd->cmdsize);
            }
            /* FIXME i#58: we need to fill in more of out_data, like preferred
             * base.  For alignment: it's per-section, so how handle it?
             */
            out_data->base_address = seg_min_start;
            out_data->alignment = PAGE_SIZE; /* FIXME i#58: need min section align? */
            if (linkedit_file_off > 0 && exports_file_off > 0) {
                out_data->exports =
                    (app_pc)load_delta + exports_file_off + linkedit_delta;
            } else
                out_data->exports = NULL;
        }
    }
    return found_seg;
}
Esempio n. 9
0
static int
dl_iterate_get_path_cb(struct dl_phdr_info *info, size_t size, void *data)
{
    dl_iterate_data_t *iter_data = (dl_iterate_data_t *)data;
    /* info->dlpi_addr is offset from preferred so we need to calculate the
     * absolute address of the base.
     * we can calculate the absolute address of the first segment, but ELF
     * doesn't seem to guarantee that either the elf header (base of
     * file) or the program headers (info->dlpi_phdr) are later than
     * the min_vaddr, so it's a little confusing as to what would be
     * in the maps file or whatever and would thus be the base we're looking
     * to match: for now we assume the page with min_vaddr is that base.
     * If elf header, program header, and 1st segment could all be on
     * separate pages, I don't see any way to find the elf header in
     * such cases short of walking backward and looking for the magic #s.
     */
    app_pc pref_start, pref_end;
    app_pc min_vaddr = module_vaddr_from_prog_header((app_pc)info->dlpi_phdr,
                                                     info->dlpi_phnum, NULL, NULL);
    app_pc base = info->dlpi_addr + min_vaddr;
    /* Note that dl_iterate_phdr doesn't give a name for the executable or
     * ld-linux.so presumably b/c those are mapped by the kernel so the
     * user-space loader doesn't need to know their file paths.
     */
    LOG(GLOBAL, LOG_VMAREAS, 2,
        "dl_iterate_get_path_cb: addr=" PFX " hdrs=" PFX " base=" PFX " name=%s\n",
        info->dlpi_addr, info->dlpi_phdr, base, info->dlpi_name);
    /* all we have is an addr somewhere in the module, so we need the end */
    if (module_walk_program_headers(base,
                                    /* FIXME: don't have view size: but
                                     * anything larger than header sizes works
                                     */
                                    PAGE_SIZE, false,
                                    true, /* i#1589: ld.so relocated .dynamic */
                                    &pref_start, NULL, &pref_end, NULL, NULL)) {
        /* we're passed back start,end of preferred base */
        if ((iter_data->target_addr != NULL && iter_data->target_addr >= base &&
             iter_data->target_addr < base + (pref_end - pref_start)) ||
            (iter_data->target_path != NULL &&
             /* if we're passed an ambiguous name, we return first hit.
              * if passed full path, should normally be what was used to
              * load, so should match.
              */
             strstr(info->dlpi_name, iter_data->target_path) != NULL)) {
            if (iter_data->path_size > 0) {
                /* We want just the path, not the filename */
                char *slash = strrchr(info->dlpi_name, '/');
                ASSERT_CURIOSITY(slash != NULL);
                ASSERT_CURIOSITY((slash - info->dlpi_name) < iter_data->path_size);
                strncpy(iter_data->path_out, info->dlpi_name,
                        MIN(iter_data->path_size, (slash - info->dlpi_name)));
                iter_data->path_out[iter_data->path_size] = '\0';
            }
            iter_data->mod_start = base;
            iter_data->mod_end = base + (pref_end - pref_start);
            return 1; /* done iterating */
        }
    } else {
        ASSERT_NOT_REACHED();
    }
    return 0; /* keep looking */
}
Esempio n. 10
0
static
#endif
void
handle_nudge(dcontext_t *dcontext, nudge_arg_t *arg)
{
    uint nudge_action_mask = arg->nudge_action_mask;

    /* Future version checks would go here. */
    ASSERT_CURIOSITY(arg->version == NUDGE_ARG_CURRENT_VERSION);

    /* Nudge shouldn't start with any locks held.  Do this assert after the
     * dynamo_exited check, other wise the locks may be deleted. */
    ASSERT_OWN_NO_LOCKS();

    STATS_INC(num_nudges);

#ifdef WINDOWS
    /* Linux does this in signal.c */
    SYSLOG_INTERNAL_INFO("received nudge mask=0x%x id=0x%08x arg=0x"ZHEX64_FORMAT_STRING,
                         arg->nudge_action_mask, arg->client_id, arg->client_arg);
#endif

    if (nudge_action_mask == 0) {
        ASSERT_CURIOSITY(false && "Nudge: no action specified");
        return;
    } else if (nudge_action_mask >= NUDGE_GENERIC(PARAMETRIZED_END)) {
        ASSERT(false && "Nudge: unknown nudge action");
        return;
    }

    /* In -thin_client mode only detach and process_control nudges are allowed;
     * case 8888. */
#define VALID_THIN_CLIENT_NUDGES (NUDGE_GENERIC(process_control)|NUDGE_GENERIC(detach))
    if (DYNAMO_OPTION(thin_client)) {
        if (TEST(VALID_THIN_CLIENT_NUDGES, nudge_action_mask)) {
             /* If it is a valid thin client nudge, then disable all others. */
             nudge_action_mask &= VALID_THIN_CLIENT_NUDGES;
        } else {
            return;   /* invalid nudge for thin_client, so mute it */
        }
    }

    /* FIXME: NYI action handlers. As implemented move to desired order. */
    if (TEST(NUDGE_GENERIC(upgrade), nudge_action_mask)) {
        /* FIXME: watch out for flushed clean-call fragment */
        nudge_action_mask &= ~NUDGE_GENERIC(upgrade);
        ASSERT_NOT_IMPLEMENTED(false && "case 4179");
    }
    if (TEST(NUDGE_GENERIC(kstats), nudge_action_mask)) {
        nudge_action_mask &= ~NUDGE_GENERIC(kstats);
        ASSERT_NOT_IMPLEMENTED(false);
    }
#ifdef INTERNAL
    if (TEST(NUDGE_GENERIC(stats), nudge_action_mask)) {
        nudge_action_mask &= ~NUDGE_GENERIC(stats);
        ASSERT_NOT_IMPLEMENTED(false);
    }
    if (TEST(NUDGE_GENERIC(invalidate), nudge_action_mask)) {
        /* FIXME: watch out for flushed clean-call fragment  */
        nudge_action_mask &= ~NUDGE_GENERIC(invalidate);
        ASSERT_NOT_IMPLEMENTED(false);
    }
    if (TEST(NUDGE_GENERIC(recreate_pc), nudge_action_mask)) {
        nudge_action_mask &= ~NUDGE_GENERIC(recreate_pc);
        ASSERT_NOT_IMPLEMENTED(false);
    }
    if (TEST(NUDGE_GENERIC(recreate_state), nudge_action_mask)) {
        nudge_action_mask &= ~NUDGE_GENERIC(recreate_state);
        ASSERT_NOT_IMPLEMENTED(false);
    }
    if (TEST(NUDGE_GENERIC(reattach), nudge_action_mask)) {
        /* FIXME: watch out for flushed clean-call fragment */
        nudge_action_mask &= ~NUDGE_GENERIC(reattach);
        ASSERT_NOT_IMPLEMENTED(false);
    }
#endif /* INTERNAL */
    if (TEST(NUDGE_GENERIC(diagnose), nudge_action_mask)) {
        nudge_action_mask &= ~NUDGE_GENERIC(diagnose);
        ASSERT_NOT_IMPLEMENTED(false);
    }

    /* Implemented action handlers */
    if (TEST(NUDGE_GENERIC(opt), nudge_action_mask)) {
        nudge_action_mask &= ~NUDGE_GENERIC(opt);
        synchronize_dynamic_options();
    }
    if (TEST(NUDGE_GENERIC(ldmp), nudge_action_mask)) {
        nudge_action_mask &= ~NUDGE_GENERIC(ldmp);
        os_dump_core("Nudge triggered ldmp.");
    }
    if (TEST(NUDGE_GENERIC(freeze), nudge_action_mask)) {
        nudge_action_mask &= ~NUDGE_GENERIC(freeze);
        coarse_units_freeze_all(true/*in-place: FIXME: separate nudge for non?*/);
    }
    if (TEST(NUDGE_GENERIC(persist), nudge_action_mask)) {
        nudge_action_mask &= ~NUDGE_GENERIC(persist);
        coarse_units_freeze_all(false/*!in-place==persist*/);
    }
#ifdef CLIENT_INTERFACE
    if (TEST(NUDGE_GENERIC(client), nudge_action_mask)) {
        nudge_action_mask &= ~NUDGE_GENERIC(client);
        instrument_nudge(dcontext, arg->client_id, arg->client_arg);
    }
#endif
#ifdef PROCESS_CONTROL
    if (TEST(NUDGE_GENERIC(process_control), nudge_action_mask)) {  /* Case 8594 */
        nudge_action_mask &= ~NUDGE_GENERIC(process_control);
        /* Need to synchronize because process control can be switched between
         * on (white or black list) & off.  FIXME - the nudge mask should specify this,
         * but doesn't hurt to do it again. */
        synchronize_dynamic_options();
        if (IS_PROCESS_CONTROL_ON())
            process_control();

        /* If process control is enforced then control won't come back.  If
         * either -detect_mode is on or if there was nothing to enforce, control
         * comes back in which case it is safe to let remaining nudges be
         * processed because no core state would have been changed. */
    }
#endif
#ifdef HOTPATCHING
    if (DYNAMO_OPTION(hot_patching) && DYNAMO_OPTION(liveshields) &&
        TEST_ANY(NUDGE_GENERIC(policy)|NUDGE_GENERIC(mode)|NUDGE_GENERIC(lstats),
                 nudge_action_mask)) {
        hotp_nudge_update(nudge_action_mask &
                          (NUDGE_GENERIC(policy)|NUDGE_GENERIC(mode)|NUDGE_GENERIC(lstats)));
        nudge_action_mask &= ~(NUDGE_GENERIC(policy)|NUDGE_GENERIC(mode)|NUDGE_GENERIC(lstats));
    }
#endif
#ifdef PROGRAM_SHEPHERDING
    if (TEST(NUDGE_GENERIC(violation), nudge_action_mask)) {
        nudge_action_mask &= ~NUDGE_GENERIC(violation);
        /* Use nudge mechanism to trigger a security violation at an
         * arbitrary time. Note - is only useful for testing kill process attack
         * handling as this is not an app thread (we injected it). */
        /* see bug 652 for planned improvements */
        security_violation(dcontext, dcontext->next_tag,
                           ATTACK_SIM_NUDGE_VIOLATION, OPTION_BLOCK|OPTION_REPORT);
    }
#endif
    if (TEST(NUDGE_GENERIC(reset), nudge_action_mask)) {
        nudge_action_mask &= ~NUDGE_GENERIC(reset);
        if (DYNAMO_OPTION(enable_reset)) {
            mutex_lock(&reset_pending_lock);
            /* fcache_reset_all_caches_proactively() will unlock */
            fcache_reset_all_caches_proactively(RESET_ALL);
            /* NOTE - reset is safe since we won't return to the code cache below (we
             * will in fact not return at all). */
        } else {
            SYSLOG_INTERNAL_WARNING("nudge reset ignored since resets are disabled");
        }
    }
#ifdef WINDOWS
    /* The detach handler is last since in the common case it doesn't return. */
    if (TEST(NUDGE_GENERIC(detach), nudge_action_mask)) {
        dcontext->free_app_stack = false;
        nudge_action_mask &= ~NUDGE_GENERIC(detach);
        detach_helper(DETACH_NORMAL_TYPE);
    }
#endif
}
Esempio n. 11
0
/* See memquery.h for full interface specs, which are identical to
 * memquery_library_bounds().
 */
int
memquery_library_bounds_by_iterator(const char *name, app_pc *start/*IN/OUT*/,
                                    app_pc *end/*OUT*/,
                                    char *fullpath/*OPTIONAL OUT*/, size_t path_size)
{
    int count = 0;
    bool found_library = false;
    char libname[MAXIMUM_PATH];
    const char *name_cmp = name;
    memquery_iter_t iter;
    app_pc last_base = NULL;
    app_pc last_end = NULL;
    size_t image_size = 0;
    app_pc cur_end = NULL;
    app_pc mod_start = NULL;
    ASSERT(name != NULL || start != NULL);

    /* If name is non-NULL, start can be NULL, so we have to walk the whole
     * address space even when we have syscalls for memquery (e.g., on Mac).
     * Even if start is non-NULL, it could be in the middle of the library.
     */
    memquery_iterator_start(&iter, NULL,
                            /* We're never called from a fragile place like a
                             * signal handler, so as long as it's not real early
                             * it's ok to alloc.
                             */
                            dynamo_heap_initialized);
    libname[0] = '\0';
    while (memquery_iterator_next(&iter)) {
        LOG(GLOBAL, LOG_VMAREAS, 5, "start="PFX" end="PFX" prot=%x comment=%s\n",
            iter.vm_start, iter.vm_end, iter.prot, iter.comment);

        /* Record the base of each differently-named set of entries up until
         * we find our target, when we'll clobber libpath
         */
        if (!found_library &&
            strncmp(libname, iter.comment, BUFFER_SIZE_ELEMENTS(libname)) != 0) {
            last_base = iter.vm_start;
            /* last_end is used to know what's readable beyond last_base */
            if (TEST(MEMPROT_READ, iter.prot))
                last_end = iter.vm_end;
            else
                last_end = last_base;
            /* remember name so we can find the base of a multiply-mapped so */
            strncpy(libname, iter.comment, BUFFER_SIZE_ELEMENTS(libname));
            NULL_TERMINATE_BUFFER(libname);
        }

        if ((name_cmp != NULL &&
             (strstr(iter.comment, name_cmp) != NULL ||
              /* For Linux, include mid-library (non-.bss) anonymous mappings.
               * Our private loader
               * fills mapping holes with anonymous memory instead of a
               * MEMPROT_NONE mapping from the original file.
               * For Mac, this includes mid-library .bss.
               */
              (found_library && iter.comment[0] == '\0' && image_size != 0 &&
               iter.vm_end - mod_start < image_size))) ||
            (name == NULL && *start >= iter.vm_start && *start < iter.vm_end)) {
            if (!found_library) {
                size_t mod_readable_sz;
                char *dst = (fullpath != NULL) ? fullpath : libname;
                size_t dstsz = (fullpath != NULL) ? path_size :
                    BUFFER_SIZE_ELEMENTS(libname);
                char *slash = strrchr(iter.comment, '/');
                ASSERT_CURIOSITY(slash != NULL);
                ASSERT_CURIOSITY((slash - iter.comment) < dstsz);
                /* we keep the last '/' at end */
                ++slash;
                strncpy(dst, iter.comment, MIN(dstsz, (slash - iter.comment)));
                /* if max no null */
                dst[dstsz - 1] = '\0';
                if (name == NULL)
                    name_cmp = dst;
                found_library = true;
                /* Most library have multiple segments, and some have the
                 * ELF header repeated in a later mapping, so we can't rely
                 * on is_elf_so_header() and header walking.
                 * We use the name tracking to remember the first entry
                 * that had this name.
                 */
                if (last_base == NULL) {
                    mod_start = iter.vm_start;
                    mod_readable_sz = iter.vm_end - iter.vm_start;
                } else {
                    mod_start = last_base;
                    mod_readable_sz = last_end - last_base;
                }
                if (module_is_header(mod_start, mod_readable_sz)) {
                    app_pc mod_base, mod_end;
                    if (module_walk_program_headers(mod_start, mod_readable_sz, false,
                                                    &mod_base, NULL, &mod_end, NULL,
                                                    NULL)) {
                        image_size = mod_end - mod_base;
                        LOG(GLOBAL, LOG_VMAREAS, 4, "%s: image size is "PIFX"\n",
                            __FUNCTION__, image_size);
                        ASSERT_CURIOSITY(image_size != 0);
                    } else {
                        ASSERT_NOT_REACHED();
                    }
                } else {
                    ASSERT(false && "expected elf header");
                }
            }
            count++;
            cur_end = iter.vm_end;
        } else if (found_library) {
            /* hit non-matching, we expect module segments to be adjacent */
            break;
        }
    }

    /* Xref PR 208443: .bss sections are anonymous (no file name listed in
     * maps file), but not every library has one.  We have to parse the ELF
     * header to know since we can't assume that a subsequent anonymous
     * region is .bss. */
    if (image_size != 0 && cur_end - mod_start < image_size) {
        /* Found a .bss section. Check current mapping (note might only be
         * part of the mapping (due to os region merging? FIXME investigate). */
        ASSERT_CURIOSITY(iter.vm_start == cur_end /* no gaps, FIXME might there be
                                                   * a gap if the file has large
                                                   * alignment and no data section?
                                                   * curiosity for now*/);
        ASSERT_CURIOSITY(iter.inode == 0); /* .bss is anonymous */
        ASSERT_CURIOSITY(iter.vm_end - mod_start >= image_size);/* should be big enough */
        count++;
        cur_end = mod_start + image_size;
    } else {
        /* Shouldn't have more mapped then the size of the module, unless it's a
         * second adjacent separate map of the same file.  Curiosity for now. */
        ASSERT_CURIOSITY(image_size == 0 || cur_end - mod_start == image_size);
    }
    memquery_iterator_stop(&iter);

    if (start != NULL)
        *start = mod_start;
    if (end != NULL)
        *end = cur_end;
    return count;
}