extern "C" void upcall_cmp_type(int8_t *result, const type_desc *tydesc, const type_desc **subtydescs, uint8_t *data_0, uint8_t *data_1, uint8_t cmp_type) { s_cmp_type_args args = {result, tydesc, subtydescs, data_0, data_1, cmp_type}; SWITCH_STACK(&args, upcall_s_cmp_type); }
extern "C" CDECL type_desc * upcall_get_type_desc(void *curr_crate, // ignored, legacy compat. size_t size, size_t align, size_t n_descs, type_desc const **descs, uintptr_t n_obj_params) { s_get_type_desc_args args = {0,size,align,n_descs,descs,n_obj_params}; SWITCH_STACK(&args, upcall_s_get_type_desc); return args.retval; }
/** The exception handling personality function. It figures out what to do with each landing pad. Just a stack-switching wrapper around the C++ personality function. */ extern "C" _Unwind_Reason_Code upcall_rust_personality(int version, _Unwind_Action actions, uint64_t exception_class, _Unwind_Exception *ue_header, _Unwind_Context *context) { s_rust_personality_args args = {(_Unwind_Reason_Code)0, version, actions, exception_class, ue_header, context}; SWITCH_STACK(&args, upcall_s_rust_personality); return args.retval; }
int shim_do_execve_rtld (struct shim_handle * hdl, const char ** argv, const char ** envp) { BEGIN_PROFILE_INTERVAL(); struct shim_thread * cur_thread = get_cur_thread(); int ret; if ((ret = close_cloexec_handle(cur_thread->handle_map)) < 0) return ret; SAVE_PROFILE_INTERVAL(close_CLOEXEC_files_for_exec); void * tcb = malloc(sizeof(__libc_tcb_t)); if (!tcb) return -ENOMEM; populate_tls(tcb, false); __disable_preempt(&((__libc_tcb_t *) tcb)->shim_tcb); // Temporarily disable preemption // during execve(). debug("set tcb to %p\n", tcb); put_handle(cur_thread->exec); get_handle(hdl); cur_thread->exec = hdl; old_stack_top = cur_thread->stack_top; old_stack = cur_thread->stack; old_stack_red = cur_thread->stack_red; cur_thread->stack_top = NULL; cur_thread->stack = NULL; cur_thread->stack_red = NULL; initial_envp = NULL; new_argc = 0; for (const char ** a = argv ; *a ; a++, new_argc++); new_argcp = &new_argc; if ((ret = init_stack(argv, envp, &new_argcp, &new_argp, REQUIRED_ELF_AUXV, &new_auxp)) < 0) return ret; SAVE_PROFILE_INTERVAL(alloc_new_stack_for_exec); SWITCH_STACK(new_argp); cur_thread = get_cur_thread(); UPDATE_PROFILE_INTERVAL(); DkVirtualMemoryFree(old_stack, old_stack_top - old_stack); DkVirtualMemoryFree(old_stack_red, old_stack - old_stack_red); if (bkeep_munmap(old_stack, old_stack_top - old_stack, 0) < 0 || bkeep_munmap(old_stack_red, old_stack - old_stack_red, 0) < 0) BUG(); remove_loaded_libraries(); clean_link_map_list(); SAVE_PROFILE_INTERVAL(unmap_loaded_binaries_for_exec); reset_brk(); size_t count = DEFAULT_VMA_COUNT; struct shim_vma_val * vmas = malloc(sizeof(struct shim_vma_val) * count); if (!vmas) return -ENOMEM; retry_dump_vmas: ret = dump_all_vmas(vmas, count); if (ret == -EOVERFLOW) { struct shim_vma_val * new_vmas = malloc(sizeof(struct shim_vma_val) * count * 2); if (!new_vmas) { free(vmas); return -ENOMEM; } free(vmas); vmas = new_vmas; count *= 2; goto retry_dump_vmas; } if (ret < 0) { free(vmas); return ret; } count = ret; for (struct shim_vma_val * vma = vmas ; vma < vmas + count ; vma++) { /* Don't free the current stack */ if (vma->addr == cur_thread->stack) continue; /* Free all the mapped VMAs */ if (!(vma->flags & VMA_UNMAPPED)) DkVirtualMemoryFree(vma->addr, vma->length); /* Remove the VMAs */ bkeep_munmap(vma->addr, vma->length, vma->flags); } free_vma_val_array(vmas, count); SAVE_PROFILE_INTERVAL(unmap_all_vmas_for_exec); if ((ret = load_elf_object(cur_thread->exec, NULL, 0)) < 0) shim_terminate(ret); init_brk_from_executable(cur_thread->exec); load_elf_interp(cur_thread->exec); SAVE_PROFILE_INTERVAL(load_new_executable_for_exec); cur_thread->robust_list = NULL; #ifdef PROFILE if (ENTER_TIME) SAVE_PROFILE_INTERVAL_SINCE(syscall_execve, ENTER_TIME); #endif debug("execve: start execution\n"); execute_elf_object(cur_thread->exec, new_argcp, new_argp, REQUIRED_ELF_AUXV, new_auxp); return 0; }
extern "C" void upcall_log_type(const type_desc *tydesc, uint8_t *data, uint32_t level) { s_log_type_args args = {tydesc, data, level}; SWITCH_STACK(&args, upcall_s_log_type); }
/** Frees space in the dynamic stack. */ extern "C" CDECL void upcall_dynastack_free(void *ptr) { s_dynastack_free_args args = {ptr}; SWITCH_STACK(&args, upcall_s_dynastack_free); }
/** * Allocates space associated with a type descriptor in the dynamic stack and * returns it. */ extern "C" CDECL void * upcall_dynastack_alloc_2(size_t sz, type_desc *ty) { s_dynastack_alloc_2_args args = {0, sz, ty}; SWITCH_STACK(&args, upcall_s_dynastack_alloc_2); return args.retval; }
/** * Allocates space in the dynamic stack and returns it. * * FIXME: Deprecated since dynamic stacks need to be self-describing for GC. */ extern "C" CDECL void * upcall_dynastack_alloc(size_t sz) { s_dynastack_alloc_args args = {0, sz}; SWITCH_STACK(&args, upcall_s_dynastack_alloc); return args.retval; }
/** * Returns a token that can be used to deallocate all of the allocated space * space in the dynamic stack. */ extern "C" CDECL void * upcall_dynastack_mark() { s_dynastack_mark_args args = {0}; SWITCH_STACK(&args, upcall_s_dynastack_mark); return args.retval; }
extern "C" CDECL void upcall_vec_grow(rust_vec** vp, size_t new_sz) { s_vec_grow_args args = {vp, new_sz}; SWITCH_STACK(&args, upcall_s_vec_grow); }
/** * Called whenever an object's ref count drops to zero. */ extern "C" CDECL void upcall_shared_free(void* ptr) { s_shared_free_args args = {ptr}; SWITCH_STACK(&args, upcall_s_shared_free); }
extern "C" CDECL uintptr_t upcall_shared_malloc(size_t nbytes, type_desc *td) { s_shared_malloc_args args = {0, nbytes, td}; SWITCH_STACK(&args, upcall_s_shared_malloc); return args.retval; }
/** * Called whenever an object's ref count drops to zero. */ extern "C" CDECL void upcall_free(void* ptr, uintptr_t is_gc) { s_free_args args = {ptr, is_gc}; SWITCH_STACK(&args, upcall_s_free); }