static void nudge_terminate_on_dstack(dcontext_t *dcontext) { ASSERT(dcontext == get_thread_private_dcontext()); if (dcontext->nudge_terminate_process) { os_terminate_with_code(dcontext, TERMINATE_PROCESS|TERMINATE_CLEANUP, dcontext->nudge_exit_code); } else os_terminate(dcontext, TERMINATE_THREAD|TERMINATE_CLEANUP); ASSERT_NOT_REACHED(); }
void loader_init(void) { uint i; privmod_t *mod; acquire_recursive_lock(&privload_lock); VMVECTOR_ALLOC_VECTOR(modlist_areas, GLOBAL_DCONTEXT, VECTOR_SHARED | VECTOR_NEVER_MERGE /* protected by privload_lock */ | VECTOR_NO_LOCK, modlist_areas); /* os specific loader initialization prologue before finalize the load */ os_loader_init_prologue(); /* Process client libs we loaded early but did not finalize */ for (i = 0; i < privmod_static_idx; i++) { /* Transfer to real list so we can do normal processing */ char name_copy[MAXIMUM_PATH]; mod = privload_insert(NULL, privmod_static[i].base, privmod_static[i].size, privmod_static[i].name, privmod_static[i].path); LOG(GLOBAL, LOG_LOADER, 1, "%s: processing imports for %s\n", __FUNCTION__, mod->name); /* save a copy for error msg, b/c mod will be unloaded (i#643) */ snprintf(name_copy, BUFFER_SIZE_ELEMENTS(name_copy), "%s", mod->name); NULL_TERMINATE_BUFFER(name_copy); if (!privload_load_finalize(mod)) { mod = NULL; /* it's been unloaded! */ #ifdef CLIENT_INTERFACE SYSLOG(SYSLOG_ERROR, CLIENT_LIBRARY_UNLOADABLE, 5, get_application_name(), get_application_pid(), name_copy, "\n\tUnable to locate imports of client library"); #endif os_terminate(NULL, TERMINATE_PROCESS); ASSERT_NOT_REACHED(); } } /* os specific loader initialization epilogue after finalize the load */ os_loader_init_epilogue(); /* FIXME i#338: call loader_thread_init here once get * loader_init called after dynamo_thread_init but in a way that * works with Windows */ release_recursive_lock(&privload_lock); }
int Window::EventLoop(void *_window) { Window *window = (Window*) _window; while (true) { Event event; window->WaitEvent(&event); if (event.what == EVT_QUIT) { delete window; os_terminate(0); } window->Lock(); window->DispatchEvent(&event); window->Unlock(); } }
// // Test malloc() // int malloc_thread(void*) { for (int i = 0; i < 50000; i++){ #if LOOK_ITS_A_RACE sem_acquire(malloc_sem); #endif free(malloc(10000)); #if LOOK_ITS_A_RACE sem_release(malloc_sem); #endif } printf("malloc thread finished\n"); os_terminate(0); return 0; }
/* This is the target for all nudge threads. * CAUTION: generic_nudge_target is added to global_rct_ind_targets table. If this * function is renamed or cloned, update rct_known_targets_init accordingly. */ void generic_nudge_target(nudge_arg_t *arg) { /* Fix for case 5130; volatile forces a 'call' instruction to be generated * rather than 'jmp' during optimization. FIXME: need a standardized & * better way of stopping core from emulating itself. */ volatile bool nudge_result; /* needed to make sure dr has a specific target to lookup and avoid * interpreting when taking over new threads; see leave_call_native(). */ nudge_result = generic_nudge_handler(arg); /* Should never return. */ ASSERT_NOT_REACHED(); os_terminate(NULL, TERMINATE_THREAD); /* just in case */ }
void console_thread(void) { int l, src; uint32 code; char data[257]; #ifdef CONSOLE_DEBUG vprintf(active, "console: " FG_GREEN "listener ready" FG_WHITE " (port %d)\n",console_port); #endif while((l = port_recv(console_port, &src, data, 256, &code)) >= 0){ if(code == 0) { remote_port = src; } else { data[l] = 0; vputs(active, data); } } vprintf(active, "console: output listener has left the building (%d)\n",l); os_terminate(0); }
int main (void) { int area, sarea; char *c, *rcboot, *line, *boot_servers, **params; int i, space, p_argc, boot, fd, len, total, prog, filenum; void *ptr; boot_dir *dir; line = malloc (256); boot_servers = malloc (256); if (!(boot = area_clone (3, 0, (void **) &dir, 0))) { os_console ("no uberarea; giving up"); os_debug (); for (;;) ; /* fatal */ return 0; } else if ((filenum = boot_get_num (dir, "rc.boot")) < 0) { os_console ("no /boot/rc.boot; do you know what you're doing?"); os_debug (); } else { *line = *boot_servers = len = total = 0; rcboot = boot_get_data (dir, filenum); while (total < dir->bd_entry[filenum].be_vsize) { line[len++] = *rcboot++; total++; if (line[len - 1] == '\n') { line[len-- - 1] = 0; for (i = space = 0, p_argc = 2; i < len; i++) if ((line[i] == ' ') && !space) space = 1; else if ((line[i] != ' ') && space) { p_argc++; space = 0; } if ((*line != '#') && *line) { params = malloc (sizeof (char *) * p_argc); c = line; for (i = 0; i < p_argc - 1; i++) { for (len = 0; c[len] && (c[len] != ' '); len++) ; params[i] = malloc (len + 1); strlcpy (params[i], c, len + 1); c += len + 1; } params[i] = NULL; if (!strcmp (params[0], "exit")) os_terminate (1); prog = boot_get_num (dir, params[0]); area = area_create (dir->bd_entry[prog].be_vsize, 0, &ptr, 0); memcpy (ptr, boot_get_data (dir, prog), dir->bd_entry[prog].be_vsize); sarea = area_create (0x1000, 0, &ptr, 0); strlcat (boot_servers, " ", 256); strlcat (boot_servers, params[0], 256); thr_wait (thr_spawn (0x1074, 0x3ffffd, area, 0x1000, sarea, 0x3ff000, params[0])); } len = 0; } } } /* say hello */ __libc_init_fdl (); __libc_init_console (); __libc_init_vfs (); printf (copyright); printf ("init: bootstrap servers started. [ %s ]\n", boot_servers + 1); /* if we one day pass arguments to init, we will parse them here. */ /* do some more normal stuff */ printf ("init: beginning automatic boot.\n\n"); fd = open ("/boot/rc", O_RDONLY, 0); if (fd < 0) printf ("error opening /boot/rc\n"); else { *line = len = 0; while (read (fd, line + len++, 1) > 0) { if (line[len - 1] == '\n') { /* line[len - 1] = 0; if ((*line != '#') && *line) { // printf ("execing `%s'\n", line); params = malloc (sizeof (char *) * 2); params[0] = malloc (strlen (line) + 1); strcpy (params[0], line); params[1] = NULL; thr_join (thr_detach (run2), 0); } len = 0; */ line[len-- - 1] = 0; for (i = space = 0, p_argc = 2; i < len; i++) if ((line[i] == ' ') && !space) space = 1; else if ((line[i] != ' ') && space) { p_argc++; space = 0; } if ((*line != '#') && *line) { params = malloc (sizeof (char *) * p_argc); c = line; for (i = 0; i < p_argc - 1; i++) { for (len = 0; c[len] && (c[len] != ' '); len++) ; params[i] = malloc (len + 1); strlcpy (params[i], c, len + 1); c += len + 1; } params[i] = NULL; if (!strcmp (params[0], "exit")) os_terminate (1); i = execve (params[0], params, NULL); if(i>0) thr_wait(i); else printf("cannot execute \"%s\"\n",params[0]); } len = 0; } } close (fd); } printf ("init: nothing left to do\n"); return 0; }
void tls_thread_init(os_local_state_t *os_tls, byte *segment) { /* We have four different ways to obtain TLS, each with its own limitations: * * 1) Piggyback on the threading system (like we do on Windows): here that would * be pthreads, which uses a segment since at least RH9, and uses gdt-based * segments for NPTL. The advantage is we won't run out of ldt or gdt entries * (except when the app itself would). The disadvantage is we're stealing * application slots and we rely on user mode interfaces. * * 2) Steal an ldt entry via SYS_modify_ldt. This suffers from the 8K ldt entry * limit and requires that we update manually on a new thread. For 64-bit * we're limited here to a 32-bit base. (Strangely, the kernel's * include/asm-x86_64/ldt.h implies that the base is ignored: but it doesn't * seem to be.) * * 3) Steal a gdt entry via SYS_set_thread_area. There is a 3rd unused entry * (after pthreads and wine) we could use. The kernel swaps for us, and with * CLONE_TLS the kernel will set up the entry for a new thread for us. Xref * PR 192231 and PR 285898. This system call is disabled on 64-bit 2.6 * kernels (though the man page for arch_prctl implies it isn't for 2.5 * kernels?!?) * * 4) Use SYS_arch_prctl. This is only implemented on 64-bit kernels, and can * only be used to set the gdt entries that fs and gs select for. Faster to * use <4GB base (obtain with mmap MAP_32BIT) since can use gdt; else have to * use wrmsr. The man pages say "ARCH_SET_GS is disabled in some kernels". */ uint selector; int index = -1; int res; #ifdef X64 /* First choice is gdt, which means arch_prctl. Since this may fail * on some kernels, we require -heap_in_lower_4GB so we can fall back * on modify_ldt. */ byte *cur_gs; res = dynamorio_syscall(SYS_arch_prctl, 2, ARCH_GET_GS, &cur_gs); if (res >= 0) { LOG(GLOBAL, LOG_THREADS, 1, "os_tls_init: cur gs base is "PFX"\n", cur_gs); /* If we're a non-initial thread, gs will be set to the parent thread's value */ if (cur_gs == NULL || is_dynamo_address(cur_gs) || /* By resolving i#107, we can handle gs conflicts between app and dr. */ INTERNAL_OPTION(mangle_app_seg)) { res = dynamorio_syscall(SYS_arch_prctl, 2, ARCH_SET_GS, segment); if (res >= 0) { os_tls->tls_type = TLS_TYPE_ARCH_PRCTL; LOG(GLOBAL, LOG_THREADS, 1, "os_tls_init: arch_prctl successful for base "PFX"\n", segment); res = dynamorio_syscall(SYS_arch_prctl, 2, ARCH_GET_GS, &cur_gs); if (res >= 0 && cur_gs != segment && !on_WSL) { /* XXX i#1896: on WSL, ARCH_GET_GS is broken and does not return * the true value. (Plus, fs and gs start out equal to ss (0x2b) * and are not set by ARCH_SET_*). i#2089's safe read TLS * solution solves this, but we still warn as we haven't fixed * later issues. Without the safe read we have to abort. */ on_WSL = true; LOG(GLOBAL, LOG_THREADS, 1, "os_tls_init: running on WSL\n"); if (INTERNAL_OPTION(safe_read_tls_init)) { SYSLOG_INTERNAL_WARNING ("Support for the Windows Subsystem for Linux is still " "preliminary, due to missing kernel features. " "Continuing, but please report any problems encountered."); } else { SYSLOG(SYSLOG_ERROR, WSL_UNSUPPORTED_FATAL, 2, get_application_name(), get_application_pid()); os_terminate(NULL, TERMINATE_PROCESS); ASSERT_NOT_REACHED(); } } /* Kernel should have written %gs for us if using GDT */ if (!dynamo_initialized && /* We assume that WSL is using MSR */ (on_WSL || read_thread_register(SEG_TLS) == 0)) { LOG(GLOBAL, LOG_THREADS, 1, "os_tls_init: using MSR\n"); tls_using_msr = true; } if (IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false)) { res = dynamorio_syscall(SYS_arch_prctl, 2, ARCH_SET_FS, os_tls->os_seg_info.priv_lib_tls_base); /* Assuming set fs must be successful if set gs succeeded. */ ASSERT(res >= 0); } } else { /* we've found a kernel where ARCH_SET_GS is disabled */ ASSERT_CURIOSITY(false && "arch_prctl failed on set but not get"); LOG(GLOBAL, LOG_THREADS, 1, "os_tls_init: arch_prctl failed: error %d\n", res); } } else { /* FIXME PR 205276: we don't currently handle it: fall back on ldt, but * we'll have the same conflict w/ the selector... */ ASSERT_BUG_NUM(205276, cur_gs == NULL); } } #endif if (os_tls->tls_type == TLS_TYPE_NONE) { /* Second choice is set_thread_area */ /* PR 285898: if we added CLONE_SETTLS to all clone calls (and emulated vfork * with clone) we could avoid having to set tls up for each thread (as well * as solve race PR 207903), at least for kernel 2.5.32+. For now we stick * w/ manual setup. */ our_modify_ldt_t desc; /* Pick which GDT slots we'll use for DR TLS and for library TLS if * using the private loader. */ choose_gdt_slots(os_tls); if (tls_gdt_index > -1) { /* Now that we know which GDT slot to use, install the per-thread base * into it. */ /* Base here must be 32-bit */ IF_X64(ASSERT(DYNAMO_OPTION(heap_in_lower_4GB) && segment <= (byte*)UINT_MAX)); initialize_ldt_struct(&desc, segment, PAGE_SIZE, tls_gdt_index); res = dynamorio_syscall(SYS_set_thread_area, 1, &desc); LOG(GLOBAL, LOG_THREADS, 3, "%s: set_thread_area %d => %d res, %d index\n", __FUNCTION__, tls_gdt_index, res, desc.entry_number); ASSERT(res < 0 || desc.entry_number == tls_gdt_index); } else { res = -1; /* fall back on LDT */ } if (res >= 0) { LOG(GLOBAL, LOG_THREADS, 1, "os_tls_init: set_thread_area successful for base "PFX" @index %d\n", segment, tls_gdt_index); os_tls->tls_type = TLS_TYPE_GDT; index = tls_gdt_index; selector = GDT_SELECTOR(index); WRITE_DR_SEG(selector); /* macro needs lvalue! */ } else { IF_VMX86(ASSERT_NOT_REACHED()); /* since no modify_ldt */ LOG(GLOBAL, LOG_THREADS, 1, "os_tls_init: set_thread_area failed: error %d\n", res); } #ifdef CLIENT_INTERFACE /* Install the library TLS base. */ if (INTERNAL_OPTION(private_loader) && res >= 0) { app_pc base = os_tls->os_seg_info.priv_lib_tls_base; /* lib_tls_gdt_index is picked in choose_gdt_slots. */ ASSERT(lib_tls_gdt_index >= gdt_entry_tls_min); initialize_ldt_struct(&desc, base, GDT_NO_SIZE_LIMIT, lib_tls_gdt_index); res = dynamorio_syscall(SYS_set_thread_area, 1, &desc); LOG(GLOBAL, LOG_THREADS, 3, "%s: set_thread_area %d => %d res, %d index\n", __FUNCTION__, lib_tls_gdt_index, res, desc.entry_number); if (res >= 0) { /* i558 update lib seg reg to enforce the segment changes */ selector = GDT_SELECTOR(lib_tls_gdt_index); LOG(GLOBAL, LOG_THREADS, 2, "%s: setting %s to selector 0x%x\n", __FUNCTION__, reg_names[LIB_SEG_TLS], selector); WRITE_LIB_SEG(selector); } } #endif } if (os_tls->tls_type == TLS_TYPE_NONE) { /* Third choice: modify_ldt, which should be available on kernel 2.3.99+ */ /* Base here must be 32-bit */ IF_X64(ASSERT(DYNAMO_OPTION(heap_in_lower_4GB) && segment <= (byte*)UINT_MAX)); /* we have the thread_initexit_lock so no race here */ index = find_unused_ldt_index(); selector = LDT_SELECTOR(index); ASSERT(index != -1); create_ldt_entry((void *)segment, PAGE_SIZE, index); os_tls->tls_type = TLS_TYPE_LDT; WRITE_DR_SEG(selector); /* macro needs lvalue! */ LOG(GLOBAL, LOG_THREADS, 1, "os_tls_init: modify_ldt successful for base "PFX" w/ index %d\n", segment, index); } os_tls->ldt_index = index; }
void dr_terminate(const char* reason) { os_write(1 /* stdout */, reason, strlen(reason)); os_terminate(NULL, 0); }
/* This is the actual nudge handler * Notes: This function returns a boolean mainly to fix case 5130; it is not * really necessary. */ bool generic_nudge_handler(nudge_arg_t *arg_dont_use) { dcontext_t *dcontext = get_thread_private_dcontext(); nudge_arg_t safe_arg = {0}; uint nudge_action_mask = 0; #ifdef WINDOWS /* this routine is run natively via leave_call_native() so there's no * cxt switch that swapped for us */ if (dcontext != NULL) swap_peb_pointer(dcontext, true/*to priv*/); #endif /* To be extra safe we use safe_read() to access the nudge argument, though once * we get past the checks below we are trusting its content. */ ASSERT(arg_dont_use != NULL && "invalid nudge argument"); if (!safe_read(arg_dont_use, sizeof(nudge_arg_t), &safe_arg)) { ASSERT(false && "invalid nudge argument"); goto nudge_finished; } nudge_action_mask = safe_arg.nudge_action_mask; /* if needed tell thread exit to free the application stack */ if (!TEST(NUDGE_NUDGER_FREE_STACK, safe_arg.flags)) { dcontext->free_app_stack = true; } else { ASSERT_NOT_TESTED(); } /* FIXME - would be nice to inform nudge creator if we need to nop the nudge. */ /* Fix for case 5702. If a nudge thread comes in during process exit, * don't process it, i.e., nop it. FIXME - this leaks the app stack and nudge arg * if the nudge was supposed to free them. */ if (dynamo_exited) goto nudge_finished; /* Node manager will not be able to nudge before reading the drmarker and * the dr_marker isn't available before callback_interception_init(). * Since after callback_interception_init() new threads won't be allowed * to progress till dynamo_initialized is set, by the time a nudge thread * reaches here dynamo_initialized should be set. */ ASSERT(dynamo_initialized); if (!dynamo_initialized) goto nudge_finished; /* We should always have a dcontext. */ ASSERT(dcontext != NULL); if (dcontext == NULL) goto nudge_finished; ENTERING_DR(); /* Xref case 552, the nudge_target value provides a reasonable measure * of security against an attacker leveraging this routine. */ if (dcontext->nudge_target != (void *)generic_nudge_target) { /* FIXME - should we report this likely attempt to attack us? need * a unit test for this (though will then have to tone this down). */ ASSERT(false && "unauthorized thread tried to nudge"); /* If we really are under attack we should terminate immediately and * proceed no further. Note we are leaking the app stack and nudge arg if we * were supposed to free them. */ os_terminate(dcontext, TERMINATE_THREAD); ASSERT_NOT_REACHED(); } /* Free the arg if requested. */ if (TEST(NUDGE_FREE_ARG, safe_arg.flags)) { ASSERT_NOT_TESTED(); nt_free_virtual_memory(arg_dont_use); } handle_nudge(dcontext, &safe_arg); nudge_finished: return nudge_thread_cleanup(dcontext, false/*just thread*/, 0/*unused*/); }
/* exit_process is only honored if dcontext != NULL, and exit_code is only honored * if exit_process is true */ bool nudge_thread_cleanup(dcontext_t *dcontext, bool exit_process, uint exit_code) { /* Note - for supporting detach with CLIENT_INTERFACE and nudge threads we need that * no lock grabbing or other actions that would interfere with the detaching process * occur in the cleanup path here. */ /* Case 8901: this routine is currently called from the code cache, which may have * been reset underneath us, so we can't just blindly return. This also gives us * consistent behavior for handling stack freeing. */ /* Case 9020: no EXITING_DR() as os_terminate will do that for us */ /* FIXME - these nudge threads do hit dll mains for thread attach so app may have * allocated some TLS memory which won't end up being freed since this won't go * through dll main thread detach. The app may also object to unbalanced attach to * detach ratio though we haven't seen that in practice. Long term we should take * over and redirect the thread at the init apc so it doesn't go through the * DllMains to start with. */ /* We have a general problem on how to free the application stack for nudges. * Currently the app/os will never free a nudge thread's app stack: * On NT and 2k ExitThread would normally free the app stack, but we always * terminate nudge threads instead of allowing them to return and exit normally. * On XP and 2k3 none of our nudge creation routines inform csrss of the new thread * (which is who typically frees the stacks). * On Vista we don't use NtCreateThreadEx to create the nudge threads so the kernel * doesn't free the stack. * As such we are left with two options: free the app stack here (nudgee free) or * have the nudge thread creator free the app stack (nudger free). Going with * nudgee free means we leak exit race nudge stacks whereas if we go with nudger free * for external nudges then we'll leak timed out nudge stacks (for internal nudges * we pretty much have to do nudgee free). A nudge_arg_t flag is used to specify * which model we use, but currently we always nudgee free. * * dynamo_thread_exit_common() is where the app stack is actually freed, not here. */ if (dynamo_exited || !dynamo_initialized || dcontext == NULL) { /* FIXME - no cleanup so we'll leak any memory allocated for this thread * including the application's stack and arg if we were supposed to free them. * We only expect to get here in rare races where the nudge thread was created * before dr exited (i.e. before drmarker was freed) but didn't end up getting * scheduled till after dr exited. */ ASSERT(!exit_process); /* shouldn't happen */ #ifdef WINDOWS if (dcontext != NULL) swap_peb_pointer(dcontext, false/*to app*/); #endif os_terminate(dcontext, TERMINATE_THREAD); } else { /* Nudge threads should exit without holding any locks. */ ASSERT_OWN_NO_LOCKS(); #ifdef WINDOWS /* if exiting the process, os_loader_exit will swap to app, and we want to * remain private during exit (esp client exit) */ if (!exit_process && dcontext != NULL) swap_peb_pointer(dcontext, false/*to app*/); #endif /* if freeing the app stack we must be on the dstack when we cleanup */ if (dcontext->free_app_stack && !is_currently_on_dstack(dcontext)) { if (exit_process) { /* XXX: wasteful to use two dcontext fields just for this. * Extend call_switch_stack to support extra args or sthg? */ dcontext->nudge_terminate_process = true; dcontext->nudge_exit_code = exit_code; } call_switch_stack(dcontext, dcontext->dstack, (void(*)(void*))nudge_terminate_on_dstack, NULL /* not on initstack */, false /* don't return */); } else { /* Already on dstack or nudge creator will free app stack. */ if (exit_process) { os_terminate_with_code(dcontext, TERMINATE_PROCESS|TERMINATE_CLEANUP, exit_code); } else { os_terminate(dcontext, TERMINATE_THREAD|TERMINATE_CLEANUP); } } } ASSERT_NOT_REACHED(); /* we should never return */ return true; }