long AllocateKernelMemory(long inSize) { long addr; if (gImageLastKernelAddr == 0) { gImageLastKernelAddr = RoundPage(bootArgs->kaddr + bootArgs->ksize); } addr = gImageLastKernelAddr; gImageLastKernelAddr += RoundPage(inSize); if ( gImageLastKernelAddr >= (KERNEL_ADDR + KERNEL_LEN) ) { stop ("AllocateKernelMemory error"); } bootArgs->ksize = gImageLastKernelAddr - bootArgs->kaddr; return addr; }
long AllocateKernelMemory(long inSize) { if (gPlatform.LastKernelAddr == 0) { gPlatform.LastKernelAddr = RoundPage(bootArgs->kaddr + bootArgs->ksize); } long address = gPlatform.LastKernelAddr; gPlatform.LastKernelAddr += RoundPage(inSize); if (gPlatform.LastKernelAddr >= (KERNEL_ADDR + KERNEL_LEN)) { stop ("AllocateKernelMemory error"); } bootArgs->ksize = gPlatform.LastKernelAddr - bootArgs->kaddr; #if DEBUG printf("AllocateKernelMemory: 0x%lx - 0x%lx\n", address, inSize); getchar(); #endif return address; }
/* _/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/ Funtion :page_fault_handler Input :uint32_t int_num < interrupt number > struct ctx_reg *reg < context register information > uint32_t error_code < error code > unsigned long fault_address < address at which fault occurs > Output :void Return :void Description :handler for page fault _/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/ */ EXPHANDLER void page_fault_handler(uint32_t int_num, struct ctx_reg *reg, uint32_t error_code, unsigned long fault_address) { static int count = 0; struct process *current = get_current(); struct memory_space *mspace = current->mspace; static unsigned long before = 0; if (before == fault_address) { printf("same page fault address!!![pid=%d]\n", current->pid); //show_regs(reg, error_code, fault_address); show_pagetables(get_current(), fault_address, fault_address + 0x1000); show_vm_list(current); for(;;); } before = fault_address; if ((fault_address < PROCESS_SIZE) && ((error_code & ERROR_USER) || (error_code & ERROR_PRESENT) == 0)) { // error_code & ERROR_USER) { unsigned long stack_rlim = current->rlimits[RLIMIT_STACK].rlim_cur; struct vm *vm; int err; /* ------------------------------------------------------------ */ /* extend stack */ /* ------------------------------------------------------------ */ if (mspace->end_stack && ((mspace->end_stack - stack_rlim) < fault_address)) { unsigned long extend_size; if (mspace->start_stack < fault_address) { //printf("COW stack\n"); //printf("activate stack 0x%08X [%d]\n", fault_address, is_vm_cow(fault_address)); /* -------------------------------------------- */ /* cow by forked */ /* -------------------------------------------- */ if ((error_code & ERROR_WRITE) && is_vm_cow(fault_address)) { goto activate_page; } printf("cow stack fault <pid>:%d\n", get_current()->pid); show_regs(reg, error_code, fault_address); show_pagetables(get_current(), fault_address, fault_address + 0x1000); //show_pagetables(current, mspace->start_stack, mspace->end_stack); //panic("1:unexpected error at %s [stack top:0x%08X, fault address:0x%08X]\n", // __func__, mspace->start_stack, fault_address); for(;;); } extend_size = mspace->start_stack - fault_address; extend_size = RoundPage(extend_size); if (extend_size < PROC_STACK_SIZE) { extend_size = PROC_STACK_SIZE; } if (stack_rlim < ((mspace->end_stack - mspace->start_stack) + extend_size)) { extend_size = stack_rlim - (mspace->end_stack - mspace->start_stack); } //printf("extend stack [0x%08X]\n", extend_size); //err = vm_extend_stack(current, PROC_STACK_SIZE); err = vm_extend_stack(current, extend_size); if (err) { show_regs(reg, error_code, fault_address); panic("2:unexpected error:vm_extend_stack at %s[0x%08X]\n", __func__, fault_address); goto failed_extend_stack; } /* stack expanded successfully */ return; } if (mspace->start_stack < fault_address) { show_regs(reg, error_code, fault_address); printf("5:unexpected error\n"); panic("5:unexpected error %s[0x%08X] 0x%08X\n", __func__, fault_address, mspace->start_stack); for(;;); } activate_page: /* ------------------------------------------------------------ */ /* activate vm page */ /* ------------------------------------------------------------ */ vm = get_address_vm(current, fault_address, fault_address); if (UNLIKELY(!vm)) { show_regs(reg, error_code, fault_address); show_pagetables(current, fault_address & PAGE_MASK, (fault_address + 0x1000)& PAGE_MASK); //show_vm_list(current); printf("6:cannot find get_address_vm in page fault[0x%08X]\n", fault_address); //*(int*)0xFFFFFFFF = 0; for(;;); goto failed_activate_vm; } err = activate_vm_page(current, vm, fault_address, error_code); if (UNLIKELY(err)) { show_regs(reg, error_code, fault_address); show_vm_list(current); panic("7:page fault panic\n"); goto failed_activate_vm; } return; } failed_activate_vm: failed_extend_stack: if (1 <= count) { for(;;); } else { printf("8:unexpected page fault\n"); show_regs(reg, error_code, fault_address); count++; } }
void* AllocateExecutableMemory(size_t size, bool low) { #if defined(_WIN32) void* ptr = VirtualAlloc(0, size, MEM_COMMIT, PAGE_EXECUTE_READWRITE); #else static char* map_hint = nullptr; #if defined(_M_X86_64) && !defined(MAP_32BIT) // This OS has no flag to enforce allocation below the 4 GB boundary, // but if we hint that we want a low address it is very likely we will // get one. // An older version of this code used MAP_FIXED, but that has the side // effect of discarding already mapped pages that happen to be in the // requested virtual memory range (such as the emulated RAM, sometimes). if (low && (!map_hint)) map_hint = (char*)RoundPage(512 * 1024 * 1024); /* 0.5 GB rounded up to the next page */ #endif void* ptr = mmap(map_hint, size, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_ANON | MAP_PRIVATE #if defined(_M_X86_64) && defined(MAP_32BIT) | (low ? MAP_32BIT : 0) #endif , -1, 0); #endif /* defined(_WIN32) */ #ifdef _WIN32 if (ptr == nullptr) { #else if (ptr == MAP_FAILED) { ptr = nullptr; #endif PanicAlert("Failed to allocate executable memory. If you are running Dolphin in Valgrind, try " "'#undef MAP_32BIT'."); } #if !defined(_WIN32) && defined(_M_X86_64) && !defined(MAP_32BIT) else { if (low) { map_hint += size; map_hint = (char*)RoundPage((uintptr_t)map_hint); /* round up to the next page */ } } #endif #if _M_X86_64 if ((u64)ptr >= 0x80000000 && low == true) PanicAlert("Executable memory ended up above 2GB!"); #endif return ptr; } void* AllocateMemoryPages(size_t size) { #ifdef _WIN32 void* ptr = VirtualAlloc(0, size, MEM_COMMIT, PAGE_READWRITE); #else void* ptr = mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0); if (ptr == MAP_FAILED) ptr = nullptr; #endif if (ptr == nullptr) PanicAlert("Failed to allocate raw memory"); return ptr; }