noreturn int shim_do_exit_group (int error_code) { INC_PROFILE_OCCURENCE(syscall_use_ipc); struct shim_thread * cur_thread = get_cur_thread(); assert(!is_internal(cur_thread)); if (debug_handle) sysparser_printf("---- shim_exit_group (returning %d)\n", error_code); if (cur_thread->dummy) { cur_thread->term_signal = 0; thread_exit(cur_thread, true); switch_dummy_thread(cur_thread); } debug("now kill other threads in the process\n"); do_kill_proc(cur_thread->tgid, cur_thread->tgid, SIGKILL, false); debug("now exit the process\n"); try_process_exit(error_code, 0); #ifdef PROFILE if (ENTER_TIME) SAVE_PROFILE_INTERVAL_SINCE(syscall_exit_group, ENTER_TIME); #endif DkThreadExit(); }
int ipc_cld_profile_callback (IPC_CALLBACK_ARGS) { struct shim_ipc_cld_profile * msgin = (struct shim_ipc_cld_profile *) &msg->msg; debug("ipc callback from %u: IPC_CLD_PROFILE\n", msg->src); for (int i = 0 ; i < msgin->nprofile ; i++) { int idx = msgin->profile[i].idx; if (idx == 0) break; idx--; switch (PROFILES[idx].type) { case OCCURENCE: debug("receive %s: %u times\n", PROFILES[idx].name, msgin->profile[i].val.occurence.count); atomic_add(msgin->profile[i].val.occurence.count, &PROFILES[idx].val.occurence.count); break; case INTERVAL: debug("receive %s: %u times, %lu msec\n", PROFILES[idx].name, msgin->profile[i].val.interval.count, msgin->profile[i].val.interval.time); atomic_add(msgin->profile[i].val.interval.count, &PROFILES[idx].val.interval.count); atomic_add(msgin->profile[i].val.interval.time, &PROFILES[idx].val.interval.time); break; case CATAGORY: break; } } SAVE_PROFILE_INTERVAL_SINCE(ipc_send_profile, msgin->time); return 0; }
noreturn int shim_do_exit (int error_code) { INC_PROFILE_OCCURENCE(syscall_use_ipc); struct shim_thread * cur_thread = get_cur_thread(); assert(!is_internal(cur_thread)); if (debug_handle) sysparser_printf("---- shim_exit (returning %d)\n", error_code); if (cur_thread->dummy) { cur_thread->term_signal = 0; thread_exit(cur_thread, true); switch_dummy_thread(cur_thread); } try_process_exit(error_code, 0); #ifdef PROFILE if (ENTER_TIME) SAVE_PROFILE_INTERVAL_SINCE(syscall_exit, ENTER_TIME); #endif DkThreadExit(); }
int shim_do_execve_rtld (struct shim_handle * hdl, const char ** argv, const char ** envp) { BEGIN_PROFILE_INTERVAL(); struct shim_thread * cur_thread = get_cur_thread(); int ret; if ((ret = close_cloexec_handle(cur_thread->handle_map)) < 0) return ret; SAVE_PROFILE_INTERVAL(close_CLOEXEC_files_for_exec); void * tcb = malloc(sizeof(__libc_tcb_t)); if (!tcb) return -ENOMEM; populate_tls(tcb, false); __disable_preempt(&((__libc_tcb_t *) tcb)->shim_tcb); // Temporarily disable preemption // during execve(). debug("set tcb to %p\n", tcb); put_handle(cur_thread->exec); get_handle(hdl); cur_thread->exec = hdl; old_stack_top = cur_thread->stack_top; old_stack = cur_thread->stack; old_stack_red = cur_thread->stack_red; cur_thread->stack_top = NULL; cur_thread->stack = NULL; cur_thread->stack_red = NULL; initial_envp = NULL; new_argc = 0; for (const char ** a = argv ; *a ; a++, new_argc++); new_argcp = &new_argc; if ((ret = init_stack(argv, envp, &new_argcp, &new_argp, REQUIRED_ELF_AUXV, &new_auxp)) < 0) return ret; SAVE_PROFILE_INTERVAL(alloc_new_stack_for_exec); SWITCH_STACK(new_argp); cur_thread = get_cur_thread(); UPDATE_PROFILE_INTERVAL(); DkVirtualMemoryFree(old_stack, old_stack_top - old_stack); DkVirtualMemoryFree(old_stack_red, old_stack - old_stack_red); if (bkeep_munmap(old_stack, old_stack_top - old_stack, 0) < 0 || bkeep_munmap(old_stack_red, old_stack - old_stack_red, 0) < 0) BUG(); remove_loaded_libraries(); clean_link_map_list(); SAVE_PROFILE_INTERVAL(unmap_loaded_binaries_for_exec); reset_brk(); size_t count = DEFAULT_VMA_COUNT; struct shim_vma_val * vmas = malloc(sizeof(struct shim_vma_val) * count); if (!vmas) return -ENOMEM; retry_dump_vmas: ret = dump_all_vmas(vmas, count); if (ret == -EOVERFLOW) { struct shim_vma_val * new_vmas = malloc(sizeof(struct shim_vma_val) * count * 2); if (!new_vmas) { free(vmas); return -ENOMEM; } free(vmas); vmas = new_vmas; count *= 2; goto retry_dump_vmas; } if (ret < 0) { free(vmas); return ret; } count = ret; for (struct shim_vma_val * vma = vmas ; vma < vmas + count ; vma++) { /* Don't free the current stack */ if (vma->addr == cur_thread->stack) continue; /* Free all the mapped VMAs */ if (!(vma->flags & VMA_UNMAPPED)) DkVirtualMemoryFree(vma->addr, vma->length); /* Remove the VMAs */ bkeep_munmap(vma->addr, vma->length, vma->flags); } free_vma_val_array(vmas, count); SAVE_PROFILE_INTERVAL(unmap_all_vmas_for_exec); if ((ret = load_elf_object(cur_thread->exec, NULL, 0)) < 0) shim_terminate(ret); init_brk_from_executable(cur_thread->exec); load_elf_interp(cur_thread->exec); SAVE_PROFILE_INTERVAL(load_new_executable_for_exec); cur_thread->robust_list = NULL; #ifdef PROFILE if (ENTER_TIME) SAVE_PROFILE_INTERVAL_SINCE(syscall_execve, ENTER_TIME); #endif debug("execve: start execution\n"); execute_elf_object(cur_thread->exec, new_argcp, new_argp, REQUIRED_ELF_AUXV, new_auxp); return 0; }