static void pre_pwrite(void *drcontext) { int fd; struct per_thread_journal_state *jstate; struct journal_descriptor *jd; fd = (int)dr_syscall_get_param(drcontext, 0); if (fd != jfile_fds[0] && fd != jfile_fds[1]) return; jstate = (struct per_thread_journal_state *) drmgr_get_tls_field(drcontext, tls_idx); fi_printf("writing journal\n"); jstate->using_fd = fd; jstate->state = THREAD_STATE_WRITING_JFILE; jd = (struct journal_descriptor *)dr_syscall_get_param(drcontext, 1); DR_ASSERT(jd->magic == JOURNAL_DESC_MAGIC); if (jd->flag == JF_STORE) jstate->pwrite_state = PWRITE_WRITING_STORE; else if (jd->flag == JF_REMOVE_OBJ) fi_printf("FIXME: testing object removal is not supported yet"); else die("unknown journal flag: %d\n", jd->flag); }
static bool event_pre_syscall(void *drcontext, int sysnum) { per_thread_t *data = drmgr_get_tls_field(drcontext, tls_index); //dr_fprintf(STDERR, "pre sysnum %d\n", sysnum); switch (sysnum) { case BRK_SYSCALL : data->param[0] = dr_syscall_get_param(drcontext, 0); // dr_fprintf(STDERR, " brk %u\n", nm); break; case MMAP_SYSCALL : data->param[1] = dr_syscall_get_param(drcontext, 1); // dr_fprintf(STDERR, " mmap %u\n", nm); break; case MUNMAP_SYSCALL : data->param[0] = dr_syscall_get_param(drcontext, 0); data->param[1] = dr_syscall_get_param(drcontext, 1); // dr_fprintf(STDERR, " mmap %u\n", nm); break; } return true; }
static bool event_pre_syscall(void *drcontext, int sysnum) { #ifdef LINUX if (sysnum == SYS_clone) { per_thread_t *data = (per_thread_t *) drmgr_get_cls_field(drcontext, cls_idx); data->saved_param = dr_syscall_get_param(drcontext, 0); } #else if (sysnum == sysnum_CreateProcess || sysnum == sysnum_CreateProcessEx || sysnum == sysnum_CreateUserProcess) { per_thread_t *data = (per_thread_t *) drmgr_get_cls_field(drcontext, cls_idx); data->saved_param = dr_syscall_get_param(drcontext, 0); } #endif return true; }
static void handle_pre_prctl(void *drcontext, dr_mcontext_t *mc) { uint request = (uint) dr_syscall_get_param(drcontext, 0); ptr_int_t arg1 = (ptr_int_t) dr_syscall_get_param(drcontext, 1); /* They all use param #0, which is checked via table specifying 1 arg. * Officially it's a 5-arg syscall but so far nothing using beyond 2 args. */ /* XXX: could use SYSINFO_SECONDARY_TABLE instead */ switch (request) { case PR_SET_NAME: case PR_GET_NAME: if (request == PR_SET_NAME && options.prctl_whitelist[0] != '\0') check_prctl_whitelist((byte *)arg1); break; } }
static void pre_open(void *drcontext) { const char *path; struct per_thread_journal_state *jstate; jstate = (struct per_thread_journal_state *) drmgr_get_tls_field(drcontext, tls_idx); path = (const char *)dr_syscall_get_param(drcontext, 0); if (strstr(path, "journal_file0")) { fi_printf("journal_file0 is opened\n"); DR_ASSERT(jstate->state == THREAD_STATE_DEFAULT); jstate->state = THREAD_STATE_OPENING_JFILE_0; } else if (strstr(path, "journal_file1")) { fi_printf("journal_file1 is opened\n"); DR_ASSERT(jstate->state == THREAD_STATE_DEFAULT); jstate->state = THREAD_STATE_OPENING_JFILE_1; } }
static void handle_pre_execve(void *drcontext) { #ifndef USE_DRSYMS /* PR 453867: tell postprocess.pl to watch for new logdir and * fork a new copy. * FIXME: what if syscall fails? Punting on that for now. * Note that if it fails and then a later one succeeds, postprocess.pl * will replace the first with the last. */ char logdir[MAXIMUM_PATH]; /* one reason we're not inside os_post_syscall() */ size_t bytes_read = 0; /* Not using safe_read() since we want a partial read if hits page boundary */ dr_safe_read((void *) dr_syscall_get_param(drcontext, 0), BUFFER_SIZE_BYTES(logdir), logdir, &bytes_read); if (bytes_read < BUFFER_SIZE_BYTES(logdir)) logdir[bytes_read] = '\0'; NULL_TERMINATE_BUFFER(logdir); ELOGF(0, f_fork, "EXEC path=%s\n", logdir); #endif }
static bool event_pre_syscall(void *drcontext, int sysnum) { bool modify_write = (sysnum == write_sysnum); dr_atomic_add32_return_sum(&num_syscalls, 1); #ifdef UNIX if (sysnum == SYS_execve) { /* our stats will be re-set post-execve so display now */ show_results(); # ifdef SHOW_RESULTS dr_fprintf(STDERR, "<---- execve ---->\n"); # endif } #endif #ifndef SHOW_RESULTS /* for sanity tests that don't show results we don't change the app's output */ modify_write = false; #endif if (modify_write) { /* store params for access post-syscall */ int i; per_thread_t *data = (per_thread_t *) drmgr_get_cls_field(drcontext, tcls_idx); #ifdef WINDOWS /* stderr and stdout are identical in our cygwin rxvt shell so for * our example we suppress output starting with 'H' instead */ byte *output = (byte *) dr_syscall_get_param(drcontext, 5); byte first; size_t read; bool ok = dr_safe_read(output, 1, &first, &read); if (!ok || read != 1) return true; /* data unreadable: execute normally */ if (dr_is_wow64()) { /* store the xcx emulation parameter for wow64 */ dr_mcontext_t mc = {sizeof(mc),DR_MC_INTEGER/*only need xcx*/}; dr_get_mcontext(drcontext, &mc); data->xcx = mc.xcx; } #endif for (i = 0; i < SYS_MAX_ARGS; i++) data->param[i] = dr_syscall_get_param(drcontext, i); /* suppress stderr */ if (dr_syscall_get_param(drcontext, 0) == (reg_t) STDERR #ifdef WINDOWS && first == 'H' #endif ) { /* pretend it succeeded */ #ifdef UNIX /* return the #bytes == 3rd param */ dr_syscall_result_info_t info = { sizeof(info), }; info.succeeded = true; info.value = dr_syscall_get_param(drcontext, 2); dr_syscall_set_result_ex(drcontext, &info); #else /* XXX: we should also set the IO_STATUS_BLOCK.Information field */ dr_syscall_set_result(drcontext, 0); #endif #ifdef SHOW_RESULTS dr_fprintf(STDERR, "<---- skipping write to stderr ---->\n"); #endif return false; /* skip syscall */ } else if (dr_syscall_get_param(drcontext, 0) == (reg_t) STDOUT) { if (!data->repeat) { /* redirect stdout to stderr (unless it's our repeat) */ #ifdef SHOW_RESULTS dr_fprintf(STDERR, "<---- changing stdout to stderr ---->\n"); #endif dr_syscall_set_param(drcontext, 0, (reg_t) STDERR); } /* we're going to repeat this syscall once */ data->repeat = !data->repeat; } } return true; /* execute normally */ }
static bool event_pre_syscall(void *drcontext, int sysnum) { ATOMIC_INC(num_syscalls); #ifdef LINUX if (sysnum == SYS_execve) { /* our stats will be re-set post-execve so display now */ show_results(); # ifdef SHOW_RESULTS dr_fprintf(STDERR, "<---- execve ---->\n"); # endif } #endif #ifdef SHOW_RESULTS dr_fprintf(STDERR, "[%d] "PFX" "PFX" "PFX"\n", sysnum, dr_syscall_get_param(drcontext, 0), dr_syscall_get_param(drcontext, 1), dr_syscall_get_param(drcontext, 2)); #endif if (sysnum == write_sysnum) { /* store params for access post-syscall */ int i; per_thread_t *data = (per_thread_t *) drmgr_get_cls_field(drcontext, tcls_idx); #ifdef WINDOWS /* stderr and stdout are identical in our cygwin rxvt shell so for * our example we suppress output starting with 'H' instead */ byte *output = (byte *) dr_syscall_get_param(drcontext, 5); byte first; size_t read; bool ok = dr_safe_read(output, 1, &first, &read); if (!ok || read != 1) return true; /* data unreadable: execute normally */ if (dr_is_wow64()) { /* store the xcx emulation parameter for wow64 */ dr_mcontext_t mc = {sizeof(mc),DR_MC_INTEGER/*only need xcx*/}; dr_get_mcontext(drcontext, &mc); data->xcx = mc.xcx; } #endif for (i = 0; i < SYS_MAX_ARGS; i++) data->param[i] = dr_syscall_get_param(drcontext, i); /* suppress stderr */ if (dr_syscall_get_param(drcontext, 0) == (reg_t) STDERR #ifdef WINDOWS && first == 'H' #endif ) { /* pretend it succeeded */ #ifdef LINUX /* return the #bytes == 3rd param */ dr_syscall_set_result(drcontext, dr_syscall_get_param(drcontext, 2)); #else /* we should also set the IO_STATUS_BLOCK.Information field */ dr_syscall_set_result(drcontext, 0); #endif #ifdef SHOW_RESULTS dr_fprintf(STDERR, " [%d] => skipped\n", sysnum); #endif return false; /* skip syscall */ } else if (dr_syscall_get_param(drcontext, 0) == (reg_t) STDOUT) { if (!data->repeat) { /* redirect stdout to stderr (unless it's our repeat) */ #ifdef SHOW_RESULTS dr_fprintf(STDERR, " [%d] STDOUT => STDERR\n", sysnum); #endif dr_syscall_set_param(drcontext, 0, (reg_t) STDERR); } /* we're going to repeat this syscall once */ data->repeat = !data->repeat; } } return true; /* execute normally */ }
static void handle_clone(void *drcontext, dr_mcontext_t *mc) { uint flags = (uint) dr_syscall_get_param(drcontext, 0); app_pc newsp = (app_pc) dr_syscall_get_param(drcontext, 1); if (!options.shadowing) return; /* PR 418629: we need to change the stack from defined (marked when it * was allocated) to unaddressable. Originally we couldn't get the stack * bounds in the thread init event (xref PR 395156) so we watch here: * we could move this code now but not worth it. * FIXME: should we watch SYS_exit and put stack back to defined * in case it's re-used? Seems better to leave it unaddressable * since may be more common to have racy accesses we want to flag * rather than legitimate re-use? */ if (TEST(CLONE_VM, flags) && newsp != NULL) { app_pc stack_base = NULL; size_t stack_size; /* newsp is TOS */ ASSERT(options.track_heap, "now relying on -track_heap in general"); if (is_in_heap_region(newsp)) { /* How find base of malloc chunk to then find size? * Don't want to store all mallocs in an interval data structure * (shown to be slow in PR 535568). * Maybe hardcode knowledge of how far from upper address * glibc clone() sets newsp? * Actually, should just walk shadow memory until hit * unaddressable. */ /* FIXME: NEVER TESTED! */ app_pc pc; ssize_t sz; /* PR 525807 added an interval tree of "large mallocs" */ if (malloc_large_lookup(newsp, &pc, (size_t*)&sz)) { stack_base = pc; stack_size = sz; } else { /* Should be rare so we just do brute force and slow */ pc = shadow_prev_dword(newsp, newsp - options.stack_swap_threshold, SHADOW_UNADDRESSABLE); sz = malloc_chunk_size(pc+1); if (sz > 0) { /* returns -1 on failure */ stack_base = pc + 1; stack_size = sz; } } } else { /* On linux a pre-adjacent mmap w/ same prot will be merged into the * same region as returned by dr_query_memory() and we'll mark it as * unaddressable => many false positives (on FC10, adding a printf * to suite/tests/linux/clone.c between the stack mmap and the clone * call resulted in the merge). My solution is to track mmaps and * assume a stack will be a single mmap (maybe separate guard page * but that should be noprot so ok to not mark unaddress: xref PR * 406328). */ if (!mmap_anon_lookup(newsp, &stack_base, &stack_size)) { /* Fall back to a query */ LOG(2, "thread stack "PFX" not in mmap table, querying\n", newsp); if (!dr_query_memory(newsp - 1, &stack_base, &stack_size, NULL)) { /* We can estimate the stack end by assuming that clone() * puts less than a page on the stack, but the base is harder: * instead we rely on PR 525807 handle_push_addressable() to * mark the stack unaddr one page at a time. */ stack_base = NULL; } } } if (stack_base != NULL) { LOG(2, "changing thread stack "PFX"-"PFX" -"PFX" to unaddressable\n", stack_base, stack_base + stack_size, newsp); ASSERT(stack_base + stack_size >= newsp, "new thread's stack alloc messed up"); if (options.check_stack_bounds) { /* assume that above newsp should stay defined */ shadow_set_range(stack_base, newsp, SHADOW_UNADDRESSABLE); check_stack_size_vs_threshold(drcontext, stack_size); if (BEYOND_TOS_REDZONE_SIZE > 0) { size_t redzone_sz = BEYOND_TOS_REDZONE_SIZE; if (newsp - BEYOND_TOS_REDZONE_SIZE < stack_base) redzone_sz = newsp - stack_base; shadow_set_range(newsp - redzone_sz, newsp, SHADOW_UNDEFINED); } } } else { LOG(0, "ERROR: cannot find bounds of new thread's stack "PFX"\n", newsp); ASSERT(false, "can't find bounds of thread's stack"); } } }