/* Processes a LC_UNIXTHREAD command. Returns 0 on success, -1 on any failure. The stack is mapped in and returned in *out_stack. The thread's entry point is returned in *out_entry. */ static int load_unixthread(vki_uint8_t **out_stack_start, vki_uint8_t **out_stack_end, vki_uint8_t **out_entry, struct thread_command *threadcmd) { int err; vki_uint8_t *stack_end; int customstack; err = load_genericthread(&stack_end, &customstack, out_entry, threadcmd); if (err) return -1; if (!stack_end) { print("bad executable (no thread stack)\n"); return -1; } if (!customstack) { // Map the stack vki_size_t stacksize = VG_PGROUNDUP(default_stack_size()); vm_address_t stackbase = VG_PGROUNDDN(stack_end-stacksize); SysRes res; res = VG_(am_mmap_anon_fixed_client)(stackbase, stacksize, VKI_PROT_READ|VKI_PROT_WRITE|VKI_PROT_EXEC); check_mmap(res, stackbase, stacksize, "load_unixthread1"); if (out_stack_start) *out_stack_start = (vki_uint8_t *)stackbase; } else { // custom stack - mapped via __UNIXTHREAD segment } if (out_stack_end) *out_stack_end = stack_end; return 0; }
static int handle_lcmain ( vki_uint8_t **out_stack_start, vki_uint8_t **out_stack_end, vki_size_t requested_size ) { if (requested_size == 0) { requested_size = default_stack_size(); } requested_size = VG_PGROUNDUP(requested_size); const vki_size_t HACK = 64 * 1024 * 1024; requested_size += HACK; SysRes res = VG_(am_mmap_anon_float_client)(requested_size, VKI_PROT_READ|VKI_PROT_WRITE|VKI_PROT_EXEC); check_mmap_float(res, requested_size, "handle_lcmain"); vg_assert(!sr_isError(res)); *out_stack_start = (vki_uint8_t*)sr_Res(res); *out_stack_end = *out_stack_start + requested_size - 1; Bool need_discard = False; res = VG_(am_munmap_client)(&need_discard, (Addr)*out_stack_start, HACK); if (sr_isError(res)) return -1; vg_assert(!need_discard); // True == wtf? *out_stack_start += HACK; return 0; }