int main (int argc, char ** argv, char **envp) { void * p1 = (void *) DkVirtualMemoryAlloc (NULL, pal_control.alloc_align * 4, 0, PAL_PROT_READ|PAL_PROT_WRITE); void * p2 = (void *) DkVirtualMemoryAlloc (NULL, pal_control.alloc_align * 4, 0, PAL_PROT_READ|PAL_PROT_WRITE); void * p3 = (void *) DkVirtualMemoryAlloc (NULL, pal_control.alloc_align * 2, 0, PAL_PROT_READ|PAL_PROT_WRITE); DkVirtualMemoryAlloc ((void *) (((uint64_t) p1 + (uint64_t) p2) / 2), pal_control.alloc_align * 4, 0, PAL_PROT_READ|PAL_PROT_WRITE); DkVirtualMemoryAlloc (p3, pal_control.alloc_align * 2, 0, PAL_PROT_READ|PAL_PROT_WRITE); DkVirtualMemoryFree (p3, pal_control.alloc_align); return 0; }
int main (int argc, char ** argv, char ** envp) { volatile int c; DkSetExceptionHandler(handler, PAL_EVENT_MEMFAULT, 0); void * mem1 = (void *) DkVirtualMemoryAlloc(NULL, UNIT * 4, 0, PAL_PROT_READ|PAL_PROT_WRITE); if (mem1) pal_printf("Memory Allocation OK\n"); void * mem2 = (void *) DkVirtualMemoryAlloc(NULL, UNIT, 0, PAL_PROT_READ|PAL_PROT_WRITE); if (mem2) { c = count; *(volatile int *) mem2 = 0; pal_printf("(int *) %p = %d\n", mem2, *(volatile int *) mem2); if (c == count) pal_printf("Memory Allocation Protection (RW) OK\n"); DkVirtualMemoryProtect(mem2, UNIT, PAL_PROT_READ); c = count; *(volatile int *) mem2 = 0; asm volatile("nop"); if (c == count - 1) pal_printf("Memory Protection (R) OK\n"); DkVirtualMemoryFree(mem2, UNIT); c = count; *(volatile int *) mem2 = 0; asm volatile("nop"); if (c == count - 1) pal_printf("Memory Deallocation OK\n"); } void * mem3 = (void *) pal_control.user_address.start; void * mem4 = (void *) pal_control.user_address.end - UNIT; if (mem3 >= pal_control.executable_range.start && mem3 < pal_control.executable_range.end) mem3 = (void *) (((PAL_NUM) pal_control.executable_range.end + UNIT - 1) & ~(UNIT - 1)); mem3 = (void *) DkVirtualMemoryAlloc(mem3, UNIT, 0, PAL_PROT_READ|PAL_PROT_WRITE); mem4 = (void *) DkVirtualMemoryAlloc(mem4, UNIT, 0, PAL_PROT_READ|PAL_PROT_WRITE); if (mem3 && mem4) pal_printf("Memory Allocation with Address OK\n"); /* total memory */ pal_printf("Total Memory: %llu\n", pal_control.mem_info.mem_total); unsigned long before = DkMemoryAvailableQuota(); void * mem5 = (void *) DkVirtualMemoryAlloc(NULL, UNIT * 1000, 0, PAL_PROT_READ|PAL_PROT_WRITE); if (mem5) { unsigned long after = before; for (int i = 0 ; i < 10000 ; i++) { for (void * ptr = mem5 ; ptr < mem5 + UNIT * 1000 ; ptr += UNIT) *(volatile int *) ptr = 0; unsigned long quota = DkMemoryAvailableQuota(); if (quota < after) after = quota; } pal_printf("Memory Qouta Before Allocation: %ld\n", before); pal_printf("Memory Qouta After Allocation: %ld\n", after); /* chance are some pages are evicted, so at least 80% accuracy */ if (before >= after + UNIT * 800) pal_printf("Get Memory Available Quota OK\n"); } return 0; }
int shim_do_execve_rtld (struct shim_handle * hdl, const char ** argv, const char ** envp) { BEGIN_PROFILE_INTERVAL(); struct shim_thread * cur_thread = get_cur_thread(); int ret; if ((ret = close_cloexec_handle(cur_thread->handle_map)) < 0) return ret; SAVE_PROFILE_INTERVAL(close_CLOEXEC_files_for_exec); void * tcb = malloc(sizeof(__libc_tcb_t)); if (!tcb) return -ENOMEM; populate_tls(tcb, false); __disable_preempt(&((__libc_tcb_t *) tcb)->shim_tcb); // Temporarily disable preemption // during execve(). debug("set tcb to %p\n", tcb); put_handle(cur_thread->exec); get_handle(hdl); cur_thread->exec = hdl; old_stack_top = cur_thread->stack_top; old_stack = cur_thread->stack; old_stack_red = cur_thread->stack_red; cur_thread->stack_top = NULL; cur_thread->stack = NULL; cur_thread->stack_red = NULL; initial_envp = NULL; new_argc = 0; for (const char ** a = argv ; *a ; a++, new_argc++); new_argcp = &new_argc; if ((ret = init_stack(argv, envp, &new_argcp, &new_argp, REQUIRED_ELF_AUXV, &new_auxp)) < 0) return ret; SAVE_PROFILE_INTERVAL(alloc_new_stack_for_exec); SWITCH_STACK(new_argp); cur_thread = get_cur_thread(); UPDATE_PROFILE_INTERVAL(); DkVirtualMemoryFree(old_stack, old_stack_top - old_stack); DkVirtualMemoryFree(old_stack_red, old_stack - old_stack_red); if (bkeep_munmap(old_stack, old_stack_top - old_stack, 0) < 0 || bkeep_munmap(old_stack_red, old_stack - old_stack_red, 0) < 0) BUG(); remove_loaded_libraries(); clean_link_map_list(); SAVE_PROFILE_INTERVAL(unmap_loaded_binaries_for_exec); reset_brk(); size_t count = DEFAULT_VMA_COUNT; struct shim_vma_val * vmas = malloc(sizeof(struct shim_vma_val) * count); if (!vmas) return -ENOMEM; retry_dump_vmas: ret = dump_all_vmas(vmas, count); if (ret == -EOVERFLOW) { struct shim_vma_val * new_vmas = malloc(sizeof(struct shim_vma_val) * count * 2); if (!new_vmas) { free(vmas); return -ENOMEM; } free(vmas); vmas = new_vmas; count *= 2; goto retry_dump_vmas; } if (ret < 0) { free(vmas); return ret; } count = ret; for (struct shim_vma_val * vma = vmas ; vma < vmas + count ; vma++) { /* Don't free the current stack */ if (vma->addr == cur_thread->stack) continue; /* Free all the mapped VMAs */ if (!(vma->flags & VMA_UNMAPPED)) DkVirtualMemoryFree(vma->addr, vma->length); /* Remove the VMAs */ bkeep_munmap(vma->addr, vma->length, vma->flags); } free_vma_val_array(vmas, count); SAVE_PROFILE_INTERVAL(unmap_all_vmas_for_exec); if ((ret = load_elf_object(cur_thread->exec, NULL, 0)) < 0) shim_terminate(ret); init_brk_from_executable(cur_thread->exec); load_elf_interp(cur_thread->exec); SAVE_PROFILE_INTERVAL(load_new_executable_for_exec); cur_thread->robust_list = NULL; #ifdef PROFILE if (ENTER_TIME) SAVE_PROFILE_INTERVAL_SINCE(syscall_execve, ENTER_TIME); #endif debug("execve: start execution\n"); execute_elf_object(cur_thread->exec, new_argcp, new_argp, REQUIRED_ELF_AUXV, new_auxp); return 0; }
int shim_do_execve_rtld (struct shim_handle * hdl, const char ** argv, const char ** envp) { BEGIN_PROFILE_INTERVAL(); struct shim_thread * cur_thread = get_cur_thread(); int ret; if ((ret = close_cloexec_handle(cur_thread->handle_map)) < 0) return ret; SAVE_PROFILE_INTERVAL(close_CLOEXEC_files_for_exec); void * tcb = malloc(sizeof(__libc_tcb_t)); if (!tcb) return -ENOMEM; populate_tls(tcb); put_handle(cur_thread->exec); get_handle(hdl); cur_thread->exec = hdl; old_stack_top = cur_thread->stack_top; old_stack = cur_thread->stack; old_stack_red = cur_thread->stack_red; cur_thread->stack_top = NULL; cur_thread->stack = NULL; cur_thread->stack_red = NULL; initial_envp = NULL; new_argc = 0; for (const char ** a = argv ; *a ; a++, new_argc++); if ((ret = init_stack(argv, envp, &new_argp, REQUIRED_ELF_AUXV, &new_auxp)) < 0) return ret; SAVE_PROFILE_INTERVAL(alloc_new_stack_for_exec); switch_stack(new_argp); cur_thread = get_cur_thread(); UPDATE_PROFILE_INTERVAL(); DkVirtualMemoryFree(old_stack, old_stack_top - old_stack); DkVirtualMemoryFree(old_stack_red, old_stack - old_stack_red); int flags = VMA_INTERNAL; bkeep_munmap(old_stack, old_stack_top - old_stack, &flags); bkeep_munmap(old_stack_red, old_stack - old_stack_red, &flags); remove_loaded_libraries(); clean_link_map_list(); SAVE_PROFILE_INTERVAL(unmap_loaded_binaries_for_exec); init_brk(); unmap_all_vmas(); SAVE_PROFILE_INTERVAL(unmap_all_vmas_for_exec); if ((ret = load_elf_object(cur_thread->exec, NULL, 0)) < 0) shim_terminate(); load_elf_interp(cur_thread->exec); SAVE_PROFILE_INTERVAL(load_new_executable_for_exec); debug("execve: start execution\n"); execute_elf_object(cur_thread->exec, new_argc, new_argp, REQUIRED_ELF_AUXV, new_auxp); return 0; }