drcovlib_status_t drcovlib_init(drcovlib_options_t *ops) { int count = dr_atomic_add32_return_sum(&drcovlib_init_count, 1); if (count > 1) return DRCOVLIB_SUCCESS; if (ops->struct_size != sizeof(options)) return DRCOVLIB_ERROR_INVALID_PARAMETER; if ((ops->flags & (~(DRCOVLIB_DUMP_AS_TEXT|DRCOVLIB_THREAD_PRIVATE))) != 0) return DRCOVLIB_ERROR_INVALID_PARAMETER; if (TEST(DRCOVLIB_THREAD_PRIVATE, ops->flags)) { if (!dr_using_all_private_caches()) return DRCOVLIB_ERROR_INVALID_SETUP; drcov_per_thread = true; } options = *ops; if (options.logdir != NULL) dr_snprintf(logdir, BUFFER_SIZE_ELEMENTS(logdir), "%s", ops->logdir); else /* default */ dr_snprintf(logdir, BUFFER_SIZE_ELEMENTS(logdir), "."); NULL_TERMINATE_BUFFER(logdir); options.logdir = logdir; if (options.native_until_thread > 0) go_native = true; drmgr_init(); drx_init(); /* We follow a simple model of the caller requesting the coverage dump, * either via calling the exit routine, using its own soft_kills nudge, or * an explicit dump call for unusual cases. This means that drx's * soft_kills remains inside the outer later, i.e., the drcov client. This * is the easiest approach for coordinating soft_kills among many libraries. * Thus, we do *not* register for an exit event here. */ drmgr_register_thread_init_event(event_thread_init); drmgr_register_thread_exit_event(event_thread_exit); drmgr_register_bb_instrumentation_event(event_basic_block_analysis, NULL, NULL); dr_register_filter_syscall_event(event_filter_syscall); drmgr_register_pre_syscall_event(event_pre_syscall); #ifdef UNIX dr_register_fork_init_event(event_fork); #endif tls_idx = drmgr_register_tls_field(); if (tls_idx == -1) return DRCOVLIB_ERROR; return event_init(); }
DR_EXPORT void drx_exit() { int count = dr_atomic_add32_return_sum(&drx_init_count, -1); if (count != 0) return; if (soft_kills_enabled) soft_kills_exit(); drmgr_exit(); }
DR_EXPORT bool drx_init(void) { int count = dr_atomic_add32_return_sum(&drx_init_count, 1); if (count > 1) return true; drmgr_init(); note_base = drmgr_reserve_note_range(DRX_NOTE_COUNT); ASSERT(note_base != DRMGR_NOTE_NONE, "failed to reserve note range"); return true; }
static void event_thread_init(void *drcontext) { per_thread_t *data; static volatile int thread_count; if (options.native_until_thread > 0) { int local_count = dr_atomic_add32_return_sum(&thread_count, 1); NOTIFY(1, "@@@@@@@@@@@@@ new thread #%d "TIDFMT"\n", local_count, dr_get_thread_id(drcontext)); if (go_native && local_count == options.native_until_thread) { void **drcontexts = NULL; uint num_threads, i; go_native = false; NOTIFY(1, "thread "TIDFMT" suspending all threads\n", dr_get_thread_id(drcontext)); if (dr_suspend_all_other_threads_ex(&drcontexts, &num_threads, NULL, DR_SUSPEND_NATIVE)) { NOTIFY(1, "suspended %d threads\n", num_threads); for (i = 0; i < num_threads; i++) { if (dr_is_thread_native(drcontexts[i])) { NOTIFY(2, "\txxx taking over thread #%d "TIDFMT"\n", i, dr_get_thread_id(drcontexts[i])); dr_retakeover_suspended_native_thread(drcontexts[i]); } else { NOTIFY(2, "\tthread #%d "TIDFMT" under DR\n", i, dr_get_thread_id(drcontexts[i])); } } if (!dr_resume_all_other_threads(drcontexts, num_threads)) { ASSERT(false, "failed to resume threads"); } } else { ASSERT(false, "failed to suspend threads"); } } } /* allocate thread private data for per-thread cache */ if (drcov_per_thread) data = thread_data_create(drcontext); else data = thread_data_copy(drcontext); drmgr_set_tls_field(drcontext, tls_idx, data); }
DR_EXPORT drmf_status_t drfuzz_exit(void) { int count = dr_atomic_add32_return_sum(&drfuzz_init_count, -1); if (count > 0) return DRMF_SUCCESS; if (count < 0) return DRMF_ERROR; global_free(callbacks, sizeof(drfuzz_callbacks_t), HEAPSTAT_MISC); drmgr_exit(); drwrap_exit(); hashtable_delete(&fuzz_target_htable); return DRMF_SUCCESS; }
DR_EXPORT drmf_status_t drfuzz_init(client_id_t client_id) { drmf_status_t res; int count = dr_atomic_add32_return_sum(&drfuzz_init_count, 1); if (count > 1) return DRMF_SUCCESS; res = drmf_check_version(client_id); if (res != DRMF_SUCCESS) return res; callbacks = global_alloc(sizeof(drfuzz_callbacks_t), HEAPSTAT_MISC); memset(callbacks, 0, sizeof(drfuzz_callbacks_t)); drmgr_init(); drwrap_init(); #ifdef UNIX drmgr_register_signal_event(fault_handler); #else /* WINDOWS */ drmgr_register_exception_event(fault_handler); #endif drmgr_register_thread_init_event(thread_init); drmgr_register_thread_exit_event(thread_exit); drmgr_register_bb_app2app_event(bb_event, NULL); tls_idx_fuzzer = drmgr_register_tls_field(); if (tls_idx_fuzzer < 0) { DRFUZZ_ERROR("drfuzz failed to reserve TLS slot--initialization failed\n"); return DRMF_ERROR; } /* Synchronized to allow addition and removal of fuzz targets during execution * of the target program, e.g. to explore control flow paths. */ hashtable_init_ex(&fuzz_target_htable, 3, HASH_INTPTR, false/*no strdup*/, true/*synchronized*/, free_fuzz_target, NULL/*no custom hash*/, NULL/*no custom comparator*/); return DRMF_SUCCESS; }
drcovlib_status_t drcovlib_exit(void) { int count = dr_atomic_add32_return_sum(&drcovlib_init_count, -1); if (count != 0) return DRCOVLIB_SUCCESS; if (!drcov_per_thread) { dump_drcov_data(NULL, global_data); global_data_destroy(global_data); } /* destroy module table */ drmodtrack_exit(); drmgr_unregister_tls_field(tls_idx); drx_exit(); drmgr_exit(); return DRCOVLIB_SUCCESS; }
drcovlib_status_t drmodtrack_init(void) { int count = dr_atomic_add32_return_sum(&drmodtrack_init_count, 1); if (count > 1) return DRCOVLIB_SUCCESS; if (!drmgr_init() || !drmgr_register_thread_init_event(event_thread_init) || !drmgr_register_thread_exit_event(event_thread_exit) || !drmgr_register_module_load_event(event_module_load) || !drmgr_register_module_unload_event(event_module_unload)) return DRCOVLIB_ERROR; tls_idx = drmgr_register_tls_field(); if (tls_idx == -1) return DRCOVLIB_ERROR; memset(module_table.cache, 0, sizeof(module_table.cache)); drvector_init(&module_table.vector, 16, false, module_table_entry_free); return DRCOVLIB_SUCCESS; }
static dr_signal_action_t event_signal(void *drcontext, dr_siginfo_t *info) { dr_atomic_add32_return_sum(&num_signals, 1); return DR_SIGNAL_DELIVER; }
static bool event_pre_syscall(void *drcontext, int sysnum) { bool modify_write = (sysnum == write_sysnum); dr_atomic_add32_return_sum(&num_syscalls, 1); #ifdef UNIX if (sysnum == SYS_execve) { /* our stats will be re-set post-execve so display now */ show_results(); # ifdef SHOW_RESULTS dr_fprintf(STDERR, "<---- execve ---->\n"); # endif } #endif #ifndef SHOW_RESULTS /* for sanity tests that don't show results we don't change the app's output */ modify_write = false; #endif if (modify_write) { /* store params for access post-syscall */ int i; per_thread_t *data = (per_thread_t *) drmgr_get_cls_field(drcontext, tcls_idx); #ifdef WINDOWS /* stderr and stdout are identical in our cygwin rxvt shell so for * our example we suppress output starting with 'H' instead */ byte *output = (byte *) dr_syscall_get_param(drcontext, 5); byte first; size_t read; bool ok = dr_safe_read(output, 1, &first, &read); if (!ok || read != 1) return true; /* data unreadable: execute normally */ if (dr_is_wow64()) { /* store the xcx emulation parameter for wow64 */ dr_mcontext_t mc = {sizeof(mc),DR_MC_INTEGER/*only need xcx*/}; dr_get_mcontext(drcontext, &mc); data->xcx = mc.xcx; } #endif for (i = 0; i < SYS_MAX_ARGS; i++) data->param[i] = dr_syscall_get_param(drcontext, i); /* suppress stderr */ if (dr_syscall_get_param(drcontext, 0) == (reg_t) STDERR #ifdef WINDOWS && first == 'H' #endif ) { /* pretend it succeeded */ #ifdef UNIX /* return the #bytes == 3rd param */ dr_syscall_result_info_t info = { sizeof(info), }; info.succeeded = true; info.value = dr_syscall_get_param(drcontext, 2); dr_syscall_set_result_ex(drcontext, &info); #else /* XXX: we should also set the IO_STATUS_BLOCK.Information field */ dr_syscall_set_result(drcontext, 0); #endif #ifdef SHOW_RESULTS dr_fprintf(STDERR, "<---- skipping write to stderr ---->\n"); #endif return false; /* skip syscall */ } else if (dr_syscall_get_param(drcontext, 0) == (reg_t) STDOUT) { if (!data->repeat) { /* redirect stdout to stderr (unless it's our repeat) */ #ifdef SHOW_RESULTS dr_fprintf(STDERR, "<---- changing stdout to stderr ---->\n"); #endif dr_syscall_set_param(drcontext, 0, (reg_t) STDERR); } /* we're going to repeat this syscall once */ data->repeat = !data->repeat; } } return true; /* execute normally */ }
DR_EXPORT drmf_status_t drsymcache_init(client_id_t client_id, const char *symcache_dir_in, size_t modsize_cache_threshold) { #ifdef WINDOWS module_data_t *mod; #endif drmf_status_t res; drmgr_priority_t pri_mod_load_cache = {sizeof(pri_mod_load_cache), DRMGR_PRIORITY_NAME_DRSYMCACHE, NULL, NULL, DRMGR_PRIORITY_MODLOAD_DRSYMCACHE_READ}; drmgr_priority_t pri_mod_unload_cache = {sizeof(pri_mod_unload_cache), DRMGR_PRIORITY_NAME_DRSYMCACHE, NULL, NULL, DRMGR_PRIORITY_MODUNLOAD_DRSYMCACHE}; drmgr_priority_t pri_mod_save_cache = {sizeof(pri_mod_save_cache), DRMGR_PRIORITY_NAME_DRSYMCACHE_SAVE, NULL, NULL, DRMGR_PRIORITY_MODLOAD_DRSYMCACHE_SAVE}; /* handle multiple sets of init/exit calls */ int count = dr_atomic_add32_return_sum(&symcache_init_count, 1); if (count > 1) return DRMF_WARNING_ALREADY_INITIALIZED; res = drmf_check_version(client_id); if (res != DRMF_SUCCESS) return res; drmgr_init(); drmgr_register_module_load_event_ex(symcache_module_load, &pri_mod_load_cache); drmgr_register_module_unload_event_ex(symcache_module_unload, &pri_mod_unload_cache); drmgr_register_module_load_event_ex(symcache_module_load_save, &pri_mod_save_cache); initialized = true; op_modsize_cache_threshold = modsize_cache_threshold; hashtable_init_ex(&symcache_table, SYMCACHE_MASTER_TABLE_HASH_BITS, IF_WINDOWS_ELSE(HASH_STRING_NOCASE, HASH_STRING), true/*strdup*/, false/*!synch*/, symcache_free_entry, NULL, NULL); symcache_lock = dr_mutex_create(); dr_snprintf(symcache_dir, BUFFER_SIZE_ELEMENTS(symcache_dir), "%s", symcache_dir_in); NULL_TERMINATE_BUFFER(symcache_dir); if (!dr_directory_exists(symcache_dir)) { if (!dr_create_dir(symcache_dir)) { /* check again in case of a race (i#616) */ if (!dr_directory_exists(symcache_dir)) { NOTIFY_ERROR("Unable to create symcache dir %s"NL, symcache_dir); ASSERT(false, "unable to create symcache dir"); dr_abort(); } } } #ifdef WINDOWS /* It's common for tools to query ntdll in their init routines so we add it * early here */ mod = dr_lookup_module_by_name("ntdll.dll"); if (mod != NULL) { symcache_module_load(dr_get_current_drcontext(), mod, true); dr_free_module_data(mod); } #endif return DRMF_SUCCESS; }
static void verify_trace_buffer(void *drcontext, void *buf_base, size_t size) { dr_atomic_add32_return_sum(&num_faults, 1); }