예제 #1
0
void
native_exec_init(void)
{
    native_module_init();
    if (!DYNAMO_OPTION(native_exec) || DYNAMO_OPTION(thin_client))
        return;
    VMVECTOR_ALLOC_VECTOR(native_exec_areas, GLOBAL_DCONTEXT, VECTOR_SHARED,
                          native_exec_areas);
    ASSERT(retstub_end == retstub_start +
           MAX_NATIVE_RETSTACK * BACK_FROM_NATIVE_RETSTUB_SIZE);
}
예제 #2
0
파일: stats.c 프로젝트: AVGirl/dynamorio
void
kstat_thread_init(dcontext_t *dcontext)
{
    thread_kstats_t *new_thread_kstats;
    if (!DYNAMO_OPTION(kstats))
        return;                 /* dcontext->thread_kstats stays NULL */

    /* allocated on thread heap - use global if timing initialization matters */
    new_thread_kstats = HEAP_TYPE_ALLOC(dcontext, thread_kstats_t, ACCT_STATS, UNPROTECTED);
    LOG(THREAD, LOG_STATS, 2, "thread_kstats="PFX" size=%d\n", new_thread_kstats,
        sizeof(thread_kstats_t));
    /* initialize any thread stats bookkeeping fields before assigning to dcontext */
    kstat_init_variables(&new_thread_kstats->vars_kstats);
    /* add a dummy node to save one branch in UPDATE_CURRENT_COUNTER */
    new_thread_kstats->stack_kstats.depth = 1;

    new_thread_kstats->thread_id = get_thread_id();
#ifdef DEBUG
    new_thread_kstats->outfile_kstats = THREAD;
#else
    new_thread_kstats->outfile_kstats =
        open_log_file(kstats_thread_logfile_name(), NULL, 0);
#endif
    dcontext->thread_kstats = new_thread_kstats;

    /* need to do this in a thread after it's initialized */
    kstat_calibrate();

    KSTART_DC(dcontext, thread_measured);

    LOG(THREAD, LOG_STATS, 2, "threads_started\n");
}
예제 #3
0
파일: stats.c 프로젝트: AVGirl/dynamorio
void
kstat_init()
{
    kstat_frequency_per_msec = get_timer_frequency();
    kstat_ignore_context_switch = KSTAT_OUTLIER_THRESHOLD_MS * kstat_frequency_per_msec;

    LOG(GLOBAL, LOG_STATS, 1, "Processor speed: "UINT64_FORMAT_STRING"MHz\n",
        kstat_frequency_per_msec/1000);

    /* FIXME: There is no check for TSC feature and whether CR4.TSD is set
     * so we can read it at CPL 3
     */

    if (!DYNAMO_OPTION(kstats))
        return;

    kstat_init_variables(&process_kstats);
#ifdef DEBUG
    process_kstats_outfile = GLOBAL;
#else
    /* Open a process-wide kstats file. open_thread_private_file() does the
     * job when passed the appropriate basename (2nd arg).
     */
    process_kstats_outfile =
        open_log_file(kstats_main_logfile_name(), NULL, 0);
#endif
}
예제 #4
0
static bool
check_and_mark_native_exec(module_area_t *ma, bool add)
{
    bool is_native = false;
    const char *name = GET_MODULE_NAME(&ma->names);
    ASSERT(os_get_module_info_locked());
    if (DYNAMO_OPTION(native_exec) && name != NULL &&
        on_native_exec_list(name)) {
        LOG(GLOBAL, LOG_INTERP|LOG_VMAREAS, 1,
            "module %s is on native_exec list\n", name);
        is_native = true;
    }

    if (add && is_native) {
        RSTATS_INC(num_native_module_loads);
        vmvector_add(native_exec_areas, ma->start, ma->end, NULL);
    } else if (!add) {
        /* If we're removing and it's native, it should be on there already.  If
         * it's not native, then it shouldn't be present, but we'll remove
         * whatever is there.
         */
        DEBUG_DECLARE(bool present =)
            vmvector_remove(native_exec_areas, ma->start, ma->end);
        ASSERT_CURIOSITY((is_native && present) || (!is_native && !present));
    }
예제 #5
0
static bool
on_native_exec_list(const char *modname)
{
    bool onlist = false;

    ASSERT(!DYNAMO_OPTION(thin_client));
    if (!DYNAMO_OPTION(native_exec))
        return false;

    if (!IS_STRING_OPTION_EMPTY(native_exec_default_list)) {
        string_option_read_lock();
        LOG(THREAD_GET, LOG_INTERP|LOG_VMAREAS, 4,
            "on_native_exec_list: module %s vs default list %s\n",
            (modname == NULL ? "null" : modname),
            DYNAMO_OPTION(native_exec_default_list));
        onlist = check_filter(DYNAMO_OPTION(native_exec_default_list), modname);
        string_option_read_unlock();
    }
    if (!onlist &&
        !IS_STRING_OPTION_EMPTY(native_exec_list)) {
        string_option_read_lock();
        LOG(THREAD_GET, LOG_INTERP|LOG_VMAREAS, 4,
            "on_native_exec_list: module %s vs append list %s\n",
            modname==NULL?"null":modname, DYNAMO_OPTION(native_exec_list));
        onlist = check_filter(DYNAMO_OPTION(native_exec_list), modname);
        string_option_read_unlock();
    }
    return onlist;
}
예제 #6
0
/* Returns whether it had to change page protections */
static bool
patch_coarse_branch(cache_pc stub, cache_pc tgt, bool hot_patch,
                    coarse_info_t *info /*OPTIONAL*/)
{
	// COMPLETEDD #498 patch_coarse_branch
    bool stubs_readonly = false;
    bool stubs_restore = false;
    if (DYNAMO_OPTION(persist_protect_stubs)) {
        if (info == NULL)
            info = get_stub_coarse_info(stub);
        ASSERT(info != NULL);
        if (info->stubs_readonly) {
            stubs_readonly = true;
            stubs_restore = true;
            /* if we don't preserve mapped-in COW state the protection change
             * will fail (case 10570)
             */
            make_copy_on_writable((byte *)PAGE_START(entrance_stub_jmp(stub)),
                                  /* stub jmp can't cross page boundary (can't
                                   * cross cache line in fact) */
                                  PAGE_SIZE);
            if (DYNAMO_OPTION(persist_protect_stubs_limit) > 0) {
                info->stubs_write_count++;
                if (info->stubs_write_count >
                    DYNAMO_OPTION(persist_protect_stubs_limit)) {
                    SYSLOG_INTERNAL_WARNING_ONCE("pcache stubs over write limit");
                    STATS_INC(pcache_unprot_over_limit);
                    stubs_restore = false;
                    info->stubs_readonly = false;
                }
            }
        }
    }
    patch_branch(entrance_stub_jmp(stub), tgt, HOT_PATCHABLE);
    if (stubs_restore)
        make_unwritable((byte *)PAGE_START(entrance_stub_jmp(stub)), PAGE_SIZE);
    return stubs_readonly;
}
예제 #7
0
파일: jit_opt.c 프로젝트: AVGirl/dynamorio
void
jitopt_init()
{
    if (DYNAMO_OPTION(opt_jit)) {
        fragment_tree = fragment_tree_create();

#ifdef ANNOTATIONS
        dr_annotation_register_call(DYNAMORIO_ANNOTATE_MANAGE_CODE_AREA_NAME,
                                    (void *) annotation_manage_code_area, false, 2,
                                    DR_ANNOTATION_CALL_TYPE_FASTCALL);
        dr_annotation_register_call(DYNAMORIO_ANNOTATE_UNMANAGE_CODE_AREA_NAME,
                                    (void *) annotation_unmanage_code_area, false, 2,
                                    DR_ANNOTATION_CALL_TYPE_FASTCALL);
#endif /* ANNOTATIONS */
    }
}
예제 #8
0
파일: stats.c 프로젝트: AVGirl/dynamorio
void
kstat_exit()
{
    if (!DYNAMO_OPTION(kstats))
        return;

    /* report merged process statistics */
    mutex_lock(&process_kstats_lock);
    print_file(process_kstats_outfile, "Process KSTATS:\n");
    kstat_report(process_kstats_outfile, &process_kstats);
    mutex_unlock(&process_kstats_lock);

    DELETE_LOCK(process_kstats_lock);

#ifndef DEBUG
    os_close(process_kstats_outfile);
#endif
}
예제 #9
0
/* Passing in stub's info avoids a vmvector lookup */
void
unlink_entrance_stub(dcontext_t *dcontext, cache_pc stub, uint flags,
                     coarse_info_t *info /*OPTIONAL*/)
{
	// COMPLETEDD #499 unlink_entrance_stub
	printf("Starting unlink_enterance_stub\n");
  cache_pc tgt;
  ASSERT(DYNAMO_OPTION(coarse_units));
  ASSERT(coarse_is_entrance_stub(stub));
  ASSERT(self_owns_recursive_lock(&change_linking_lock));
  LOG(THREAD, LOG_LINKS, 5,
      "unlink_entrance_stub "PFX"\n", stub);
  if (TESTANY(FRAG_IS_TRACE_HEAD|FRAG_IS_TRACE, flags))
      tgt = trace_head_return_coarse_prefix(stub, info);
  else
      tgt = fcache_return_coarse_prefix(stub, info);
  if (patch_coarse_branch(stub, tgt, HOT_PATCHABLE, info))
      STATS_INC(pcache_unprot_unlink);
}
예제 #10
0
파일: jit_opt.c 프로젝트: AVGirl/dynamorio
uint
jitopt_clear_span(app_pc start, app_pc end)
{
    bb_node_t *overlap;
    uint removal_count = 0;

    ASSERT(DYNAMO_OPTION(opt_jit));

    do {
        /* XXX i#1114: maybe more efficient to delete deepest overlapping node first */
        overlap = fragment_tree_overlap_lookup(fragment_tree, start, end);
        if (overlap == fragment_tree->nil)
            break;
        fragment_tree_delete(fragment_tree, overlap);
        removal_count++;
    } while (true);

    return removal_count;
}
예제 #11
0
void
tls_thread_init(os_local_state_t *os_tls, byte *segment)
{
    /* We have four different ways to obtain TLS, each with its own limitations:
     *
     * 1) Piggyback on the threading system (like we do on Windows): here that would
     *    be pthreads, which uses a segment since at least RH9, and uses gdt-based
     *    segments for NPTL.  The advantage is we won't run out of ldt or gdt entries
     *    (except when the app itself would).  The disadvantage is we're stealing
     *    application slots and we rely on user mode interfaces.
     *
     * 2) Steal an ldt entry via SYS_modify_ldt.  This suffers from the 8K ldt entry
     *    limit and requires that we update manually on a new thread.  For 64-bit
     *    we're limited here to a 32-bit base.  (Strangely, the kernel's
     *    include/asm-x86_64/ldt.h implies that the base is ignored: but it doesn't
     *    seem to be.)
     *
     * 3) Steal a gdt entry via SYS_set_thread_area.  There is a 3rd unused entry
     *    (after pthreads and wine) we could use.  The kernel swaps for us, and with
     *    CLONE_TLS the kernel will set up the entry for a new thread for us.  Xref
     *    PR 192231 and PR 285898.  This system call is disabled on 64-bit 2.6
     *    kernels (though the man page for arch_prctl implies it isn't for 2.5
     *    kernels?!?)
     *
     * 4) Use SYS_arch_prctl.  This is only implemented on 64-bit kernels, and can
     *    only be used to set the gdt entries that fs and gs select for.  Faster to
     *    use <4GB base (obtain with mmap MAP_32BIT) since can use gdt; else have to
     *    use wrmsr.  The man pages say "ARCH_SET_GS is disabled in some kernels".
     */
    uint selector;
    int index = -1;
    int res;
#ifdef X64
    /* First choice is gdt, which means arch_prctl.  Since this may fail
     * on some kernels, we require -heap_in_lower_4GB so we can fall back
     * on modify_ldt.
     */
    byte *cur_gs;
    res = dynamorio_syscall(SYS_arch_prctl, 2, ARCH_GET_GS, &cur_gs);
    if (res >= 0) {
        LOG(GLOBAL, LOG_THREADS, 1, "os_tls_init: cur gs base is "PFX"\n", cur_gs);
        /* If we're a non-initial thread, gs will be set to the parent thread's value */
        if (cur_gs == NULL || is_dynamo_address(cur_gs) ||
            /* By resolving i#107, we can handle gs conflicts between app and dr. */
            INTERNAL_OPTION(mangle_app_seg)) {
            res = dynamorio_syscall(SYS_arch_prctl, 2, ARCH_SET_GS, segment);
            if (res >= 0) {
                os_tls->tls_type = TLS_TYPE_ARCH_PRCTL;
                LOG(GLOBAL, LOG_THREADS, 1,
                    "os_tls_init: arch_prctl successful for base "PFX"\n", segment);
                /* Kernel should have written %gs for us if using GDT */
                if (!dynamo_initialized && read_thread_register(SEG_TLS) == 0) {
                    LOG(GLOBAL, LOG_THREADS, 1, "os_tls_init: using MSR\n");
                    tls_using_msr = true;
                }
                if (IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false)) {
                    res = dynamorio_syscall(SYS_arch_prctl, 2, ARCH_SET_FS,
                                            os_tls->os_seg_info.priv_lib_tls_base);
                    /* Assuming set fs must be successful if set gs succeeded. */
                    ASSERT(res >= 0);
                }
            } else {
                /* we've found a kernel where ARCH_SET_GS is disabled */
                ASSERT_CURIOSITY(false && "arch_prctl failed on set but not get");
                LOG(GLOBAL, LOG_THREADS, 1,
                    "os_tls_init: arch_prctl failed: error %d\n", res);
            }
        } else {
            /* FIXME PR 205276: we don't currently handle it: fall back on ldt, but
             * we'll have the same conflict w/ the selector...
             */
            ASSERT_BUG_NUM(205276, cur_gs == NULL);
        }
    }
#endif

    if (os_tls->tls_type == TLS_TYPE_NONE) {
        /* Second choice is set_thread_area */
        /* PR 285898: if we added CLONE_SETTLS to all clone calls (and emulated vfork
         * with clone) we could avoid having to set tls up for each thread (as well
         * as solve race PR 207903), at least for kernel 2.5.32+.  For now we stick
         * w/ manual setup.
         */
        our_modify_ldt_t desc;

        /* Pick which GDT slots we'll use for DR TLS and for library TLS if
         * using the private loader.
         */
        choose_gdt_slots(os_tls);

        if (tls_gdt_index > -1) {
            /* Now that we know which GDT slot to use, install the per-thread base
             * into it.
             */
            /* Base here must be 32-bit */
            IF_X64(ASSERT(DYNAMO_OPTION(heap_in_lower_4GB) &&
                          segment <= (byte*)UINT_MAX));
            initialize_ldt_struct(&desc, segment, PAGE_SIZE, tls_gdt_index);
            res = dynamorio_syscall(SYS_set_thread_area, 1, &desc);
            LOG(GLOBAL, LOG_THREADS, 3,
                "%s: set_thread_area %d => %d res, %d index\n",
                __FUNCTION__, tls_gdt_index, res, desc.entry_number);
            ASSERT(res < 0 || desc.entry_number == tls_gdt_index);
        } else {
            res = -1;  /* fall back on LDT */
        }

        if (res >= 0) {
            LOG(GLOBAL, LOG_THREADS, 1,
                "os_tls_init: set_thread_area successful for base "PFX" @index %d\n",
                segment, tls_gdt_index);
            os_tls->tls_type = TLS_TYPE_GDT;
            index = tls_gdt_index;
            selector = GDT_SELECTOR(index);
            WRITE_DR_SEG(selector); /* macro needs lvalue! */
        } else {
            IF_VMX86(ASSERT_NOT_REACHED()); /* since no modify_ldt */
            LOG(GLOBAL, LOG_THREADS, 1,
                "os_tls_init: set_thread_area failed: error %d\n", res);
        }

#ifdef CLIENT_INTERFACE
        /* Install the library TLS base. */
        if (INTERNAL_OPTION(private_loader) && res >= 0) {
            app_pc base = os_tls->os_seg_info.priv_lib_tls_base;
            /* lib_tls_gdt_index is picked in choose_gdt_slots. */
            ASSERT(lib_tls_gdt_index >= gdt_entry_tls_min);
            initialize_ldt_struct(&desc, base, GDT_NO_SIZE_LIMIT,
                                  lib_tls_gdt_index);
            res = dynamorio_syscall(SYS_set_thread_area, 1, &desc);
            LOG(GLOBAL, LOG_THREADS, 3,
                "%s: set_thread_area %d => %d res, %d index\n",
                __FUNCTION__, lib_tls_gdt_index, res, desc.entry_number);
            if (res >= 0) {
                /* i558 update lib seg reg to enforce the segment changes */
                selector = GDT_SELECTOR(lib_tls_gdt_index);
                LOG(GLOBAL, LOG_THREADS, 2, "%s: setting %s to selector 0x%x\n",
                    __FUNCTION__, reg_names[LIB_SEG_TLS], selector);
                WRITE_LIB_SEG(selector);
            }
        }
#endif
    }

    if (os_tls->tls_type == TLS_TYPE_NONE) {
        /* Third choice: modify_ldt, which should be available on kernel 2.3.99+ */
        /* Base here must be 32-bit */
        IF_X64(ASSERT(DYNAMO_OPTION(heap_in_lower_4GB) && segment <= (byte*)UINT_MAX));
        /* we have the thread_initexit_lock so no race here */
        index = find_unused_ldt_index();
        selector = LDT_SELECTOR(index);
        ASSERT(index != -1);
        create_ldt_entry((void *)segment, PAGE_SIZE, index);
        os_tls->tls_type = TLS_TYPE_LDT;
        WRITE_DR_SEG(selector); /* macro needs lvalue! */
        LOG(GLOBAL, LOG_THREADS, 1,
            "os_tls_init: modify_ldt successful for base "PFX" w/ index %d\n",
            segment, index);
    }

    os_tls->ldt_index = index;
}
예제 #12
0
/* Queries the set of available GDT slots, and initializes:
 * - tls_gdt_index
 * - gdt_entry_tls_min on ia32
 * - lib_tls_gdt_index if using private loader
 * GDT slots are initialized with a base and limit of zero.  The caller is
 * responsible for setting them to a real base.
 */
static void
choose_gdt_slots(os_local_state_t *os_tls)
{
    our_modify_ldt_t desc;
    int i;
    int avail_index[GDT_NUM_TLS_SLOTS];
    our_modify_ldt_t clear_desc;
    int res;

    /* using local static b/c dynamo_initialized is not set for a client thread
     * when created in client's dr_init routine
     */
    /* FIXME: Could be racy if we have multiple threads initializing during
     * startup.
     */
    if (tls_global_init)
        return;
    tls_global_init = true;

    /* We don't want to break the assumptions of pthreads or wine,
     * so we try to take the last slot.  We don't want to hardcode
     * the index b/c the kernel will let us clobber entries so we want
     * to only pass in -1.
     */
    ASSERT(!dynamo_initialized);
    ASSERT(tls_gdt_index == -1);
    for (i = 0; i < GDT_NUM_TLS_SLOTS; i++)
        avail_index[i] = -1;
    for (i = 0; i < GDT_NUM_TLS_SLOTS; i++) {
        /* We use a base and limit of 0 for testing what's available. */
        initialize_ldt_struct(&desc, NULL, 0, -1);
        res = dynamorio_syscall(SYS_set_thread_area, 1, &desc);
        LOG(GLOBAL, LOG_THREADS, 4,
            "%s: set_thread_area -1 => %d res, %d index\n",
            __FUNCTION__, res, desc.entry_number);
        if (res >= 0) {
            /* We assume monotonic increases */
            avail_index[i] = desc.entry_number;
            ASSERT(avail_index[i] > tls_gdt_index);
            tls_gdt_index = desc.entry_number;
        } else
            break;
    }

#ifndef X64
    /* In x86-64's ia32 emulation,
     * set_thread_area(6 <= entry_number && entry_number <= 8) fails
     * with EINVAL (22) because x86-64 only accepts GDT indices 12 to 14
     * for TLS entries.
     */
    if (tls_gdt_index > (gdt_entry_tls_min + GDT_NUM_TLS_SLOTS))
        gdt_entry_tls_min = GDT_ENTRY_TLS_MIN_64;  /* The kernel is x64. */
#endif

    /* Now give up the earlier slots */
    for (i = 0; i < GDT_NUM_TLS_SLOTS; i++) {
        if (avail_index[i] > -1 &&
            avail_index[i] != tls_gdt_index) {
            LOG(GLOBAL, LOG_THREADS, 4,
                "clearing set_thread_area index %d\n", avail_index[i]);
            clear_ldt_struct(&clear_desc, avail_index[i]);
            res = dynamorio_syscall(SYS_set_thread_area, 1, &clear_desc);
            ASSERT(res >= 0);
        }
    }

#ifndef VMX86_SERVER
    ASSERT_CURIOSITY(tls_gdt_index ==
                     (kernel_is_64bit() ? GDT_64BIT : GDT_32BIT));
#endif

#ifdef CLIENT_INTERFACE
    if (INTERNAL_OPTION(private_loader) && tls_gdt_index != -1) {
        /* Use the app's selector with our own TLS base for libraries.  app_fs
         * and app_gs are initialized by the caller in os_tls_app_seg_init().
         */
        int index = SELECTOR_INDEX(os_tls->app_lib_tls_reg);
        if (index == 0) {
            /* An index of zero means the app has no TLS (yet), and happens
             * during early injection.  We use -1 to grab a new entry.  When the
             * app asks for its first table entry with set_thread_area, we give
             * it this one and emulate its usage of the segment.
             */
            ASSERT_CURIOSITY(DYNAMO_OPTION(early_inject) && "app has "
                             "no TLS, but we used non-early injection");
            initialize_ldt_struct(&desc, NULL, 0, -1);
            res = dynamorio_syscall(SYS_set_thread_area, 1, &desc);
            LOG(GLOBAL, LOG_THREADS, 4,
                "%s: set_thread_area -1 => %d res, %d index\n",
                __FUNCTION__, res, desc.entry_number);
            ASSERT(res >= 0);
            if (res >= 0) {
                return_stolen_lib_tls_gdt = true;
                index = desc.entry_number;
            }
        }
        lib_tls_gdt_index = index;
    }
#endif
}
예제 #13
0
static
#endif
void
handle_nudge(dcontext_t *dcontext, nudge_arg_t *arg)
{
    uint nudge_action_mask = arg->nudge_action_mask;

    /* Future version checks would go here. */
    ASSERT_CURIOSITY(arg->version == NUDGE_ARG_CURRENT_VERSION);

    /* Nudge shouldn't start with any locks held.  Do this assert after the
     * dynamo_exited check, other wise the locks may be deleted. */
    ASSERT_OWN_NO_LOCKS();

    STATS_INC(num_nudges);

#ifdef WINDOWS
    /* Linux does this in signal.c */
    SYSLOG_INTERNAL_INFO("received nudge mask=0x%x id=0x%08x arg=0x"ZHEX64_FORMAT_STRING,
                         arg->nudge_action_mask, arg->client_id, arg->client_arg);
#endif

    if (nudge_action_mask == 0) {
        ASSERT_CURIOSITY(false && "Nudge: no action specified");
        return;
    } else if (nudge_action_mask >= NUDGE_GENERIC(PARAMETRIZED_END)) {
        ASSERT(false && "Nudge: unknown nudge action");
        return;
    }

    /* In -thin_client mode only detach and process_control nudges are allowed;
     * case 8888. */
#define VALID_THIN_CLIENT_NUDGES (NUDGE_GENERIC(process_control)|NUDGE_GENERIC(detach))
    if (DYNAMO_OPTION(thin_client)) {
        if (TEST(VALID_THIN_CLIENT_NUDGES, nudge_action_mask)) {
             /* If it is a valid thin client nudge, then disable all others. */
             nudge_action_mask &= VALID_THIN_CLIENT_NUDGES;
        } else {
            return;   /* invalid nudge for thin_client, so mute it */
        }
    }

    /* FIXME: NYI action handlers. As implemented move to desired order. */
    if (TEST(NUDGE_GENERIC(upgrade), nudge_action_mask)) {
        /* FIXME: watch out for flushed clean-call fragment */
        nudge_action_mask &= ~NUDGE_GENERIC(upgrade);
        ASSERT_NOT_IMPLEMENTED(false && "case 4179");
    }
    if (TEST(NUDGE_GENERIC(kstats), nudge_action_mask)) {
        nudge_action_mask &= ~NUDGE_GENERIC(kstats);
        ASSERT_NOT_IMPLEMENTED(false);
    }
#ifdef INTERNAL
    if (TEST(NUDGE_GENERIC(stats), nudge_action_mask)) {
        nudge_action_mask &= ~NUDGE_GENERIC(stats);
        ASSERT_NOT_IMPLEMENTED(false);
    }
    if (TEST(NUDGE_GENERIC(invalidate), nudge_action_mask)) {
        /* FIXME: watch out for flushed clean-call fragment  */
        nudge_action_mask &= ~NUDGE_GENERIC(invalidate);
        ASSERT_NOT_IMPLEMENTED(false);
    }
    if (TEST(NUDGE_GENERIC(recreate_pc), nudge_action_mask)) {
        nudge_action_mask &= ~NUDGE_GENERIC(recreate_pc);
        ASSERT_NOT_IMPLEMENTED(false);
    }
    if (TEST(NUDGE_GENERIC(recreate_state), nudge_action_mask)) {
        nudge_action_mask &= ~NUDGE_GENERIC(recreate_state);
        ASSERT_NOT_IMPLEMENTED(false);
    }
    if (TEST(NUDGE_GENERIC(reattach), nudge_action_mask)) {
        /* FIXME: watch out for flushed clean-call fragment */
        nudge_action_mask &= ~NUDGE_GENERIC(reattach);
        ASSERT_NOT_IMPLEMENTED(false);
    }
#endif /* INTERNAL */
    if (TEST(NUDGE_GENERIC(diagnose), nudge_action_mask)) {
        nudge_action_mask &= ~NUDGE_GENERIC(diagnose);
        ASSERT_NOT_IMPLEMENTED(false);
    }

    /* Implemented action handlers */
    if (TEST(NUDGE_GENERIC(opt), nudge_action_mask)) {
        nudge_action_mask &= ~NUDGE_GENERIC(opt);
        synchronize_dynamic_options();
    }
    if (TEST(NUDGE_GENERIC(ldmp), nudge_action_mask)) {
        nudge_action_mask &= ~NUDGE_GENERIC(ldmp);
        os_dump_core("Nudge triggered ldmp.");
    }
    if (TEST(NUDGE_GENERIC(freeze), nudge_action_mask)) {
        nudge_action_mask &= ~NUDGE_GENERIC(freeze);
        coarse_units_freeze_all(true/*in-place: FIXME: separate nudge for non?*/);
    }
    if (TEST(NUDGE_GENERIC(persist), nudge_action_mask)) {
        nudge_action_mask &= ~NUDGE_GENERIC(persist);
        coarse_units_freeze_all(false/*!in-place==persist*/);
    }
#ifdef CLIENT_INTERFACE
    if (TEST(NUDGE_GENERIC(client), nudge_action_mask)) {
        nudge_action_mask &= ~NUDGE_GENERIC(client);
        instrument_nudge(dcontext, arg->client_id, arg->client_arg);
    }
#endif
#ifdef PROCESS_CONTROL
    if (TEST(NUDGE_GENERIC(process_control), nudge_action_mask)) {  /* Case 8594 */
        nudge_action_mask &= ~NUDGE_GENERIC(process_control);
        /* Need to synchronize because process control can be switched between
         * on (white or black list) & off.  FIXME - the nudge mask should specify this,
         * but doesn't hurt to do it again. */
        synchronize_dynamic_options();
        if (IS_PROCESS_CONTROL_ON())
            process_control();

        /* If process control is enforced then control won't come back.  If
         * either -detect_mode is on or if there was nothing to enforce, control
         * comes back in which case it is safe to let remaining nudges be
         * processed because no core state would have been changed. */
    }
#endif
#ifdef HOTPATCHING
    if (DYNAMO_OPTION(hot_patching) && DYNAMO_OPTION(liveshields) &&
        TEST_ANY(NUDGE_GENERIC(policy)|NUDGE_GENERIC(mode)|NUDGE_GENERIC(lstats),
                 nudge_action_mask)) {
        hotp_nudge_update(nudge_action_mask &
                          (NUDGE_GENERIC(policy)|NUDGE_GENERIC(mode)|NUDGE_GENERIC(lstats)));
        nudge_action_mask &= ~(NUDGE_GENERIC(policy)|NUDGE_GENERIC(mode)|NUDGE_GENERIC(lstats));
    }
#endif
#ifdef PROGRAM_SHEPHERDING
    if (TEST(NUDGE_GENERIC(violation), nudge_action_mask)) {
        nudge_action_mask &= ~NUDGE_GENERIC(violation);
        /* Use nudge mechanism to trigger a security violation at an
         * arbitrary time. Note - is only useful for testing kill process attack
         * handling as this is not an app thread (we injected it). */
        /* see bug 652 for planned improvements */
        security_violation(dcontext, dcontext->next_tag,
                           ATTACK_SIM_NUDGE_VIOLATION, OPTION_BLOCK|OPTION_REPORT);
    }
#endif
    if (TEST(NUDGE_GENERIC(reset), nudge_action_mask)) {
        nudge_action_mask &= ~NUDGE_GENERIC(reset);
        if (DYNAMO_OPTION(enable_reset)) {
            mutex_lock(&reset_pending_lock);
            /* fcache_reset_all_caches_proactively() will unlock */
            fcache_reset_all_caches_proactively(RESET_ALL);
            /* NOTE - reset is safe since we won't return to the code cache below (we
             * will in fact not return at all). */
        } else {
            SYSLOG_INTERNAL_WARNING("nudge reset ignored since resets are disabled");
        }
    }
#ifdef WINDOWS
    /* The detach handler is last since in the common case it doesn't return. */
    if (TEST(NUDGE_GENERIC(detach), nudge_action_mask)) {
        dcontext->free_app_stack = false;
        nudge_action_mask &= ~NUDGE_GENERIC(detach);
        detach_helper(DETACH_NORMAL_TYPE);
    }
#endif
}
예제 #14
0
파일: jit_opt.c 프로젝트: AVGirl/dynamorio
void
jitopt_add_dgc_bb(app_pc start, app_pc end, bool is_trace_head)
{
    ASSERT(DYNAMO_OPTION(opt_jit));
    fragment_tree_insert(fragment_tree, start, end);
}
예제 #15
0
파일: jit_opt.c 프로젝트: AVGirl/dynamorio
void
jitopt_exit()
{
    if (DYNAMO_OPTION(opt_jit))
        fragment_tree_destroy(fragment_tree);
}