/* Copies the given kernel stack page table into the region 0 page table. Does not flush the TLB. */ void UseKernelStackForProc(PCB *pcb) { unsigned int kernel_stack_base_page = ADDR_TO_PAGE(KERNEL_STACK_BASE); unsigned int i; for (i = 0; i < NUM_KERNEL_PAGES; i++) { region_0_page_table[kernel_stack_base_page + i] = pcb->kernel_stack_page_table[i]; } }
/** * Free an allocated memory block. * * Whenever a block is freed, the allocator checks its buddy. If the buddy is * free as well, then the two buddies are combined to form a bigger block. This * process continues until one of the buddies is not free. * * @param addr memory block address to be freed */ void buddy_free(void *addr) { /* TODO: IMPLEMENT THIS FUNCTION */ int page = ADDR_TO_PAGE(addr); Node* node = find_page(page); // printf("Found node in tree with order: %d\n", node->order); node->free = 1; Node* parent = node->parent; while(parent != 0 && (parent->left->free + parent->right->free == 2)) { free(parent->left); free(parent->right); parent->left = 0; parent->right = 0; page = parent->pageIndex; node = parent; parent = node->parent; node->free = 1; } // page_t* page = &g_pages[ADDR_TO_PAGE(addr)]; // int index = page->order; // page_t* buddy = &g_pages[ADDR_TO_PAGE(BUDDY_ADDR(PAGE_TO_ADDR((unsigned long) (page - g_pages)), (index)))]; // struct list_head* head = list_for_each_entry(buddy, &(buddy->list), list); }
/** * free_pagedir - laajennetaan säikeen pinoa * @phys_pd: sivuhakemiston fyysinen sijainti * @size: koodin koko **/ void free_pagedir(uint_t phys_pd) { page_entry_t *pd; page_entry_t *pt; int i, j; if (phys_pd == KERNEL_PAGE_DIRECTORY) { panic("free_pagedir: freeing kernel!"); } if (phys_pd == ADDR_TO_PAGE(asm_get_cr3())) { panic("free_pagedir: freeing active pages! O_o"); } pd = temp_phys_page(0, phys_pd); for (i = KMEM_PDE_END; i < MEMORY_PE_COUNT; ++i) { if (!pd[i].pagenum) { continue; } pt = temp_phys_page(1, pd[i].pagenum); for (j = 0; j < MEMORY_PE_COUNT; ++j) { if (!pt[j].pagenum) { continue; } free_phys_page(pt[j].pagenum); pt[j] = NULL_PE; } temp_phys_page(1, 0); free_phys_page(pd[i].pagenum); pd[i] = NULL_PE; } temp_phys_page(0, 0); free_phys_page(phys_pd); }
int SetKernelBrk(void *addr) { TracePrintf(TRACE_LEVEL_FUNCTION_INFO, ">>> SetKernelBrk(%p)\n", addr); unsigned int new_kernel_brk_page = ADDR_TO_PAGE(addr - 1) + 1; // Ensure we aren't imposing on kernel stack limits. if (((unsigned int) addr) > KERNEL_STACK_BASE) { TracePrintf(TRACE_LEVEL_NON_TERMINAL_PROBLEM, "Address passed to SetKernelBrk() (%p) is greater than kernel stack base (%p).\n", addr, KERNEL_STACK_BASE); return -1; } // If virtual memory is enabled, give the kernel heap more frames or take some away. if (virtual_memory_enabled) { unsigned int kernel_stack_base_frame = ADDR_TO_PAGE(KERNEL_STACK_BASE); if (new_kernel_brk_page > kernel_brk_page) { // Heap should grow unsigned int new_page; for (new_page = kernel_brk_page; new_page < new_kernel_brk_page && new_page < kernel_stack_base_frame; new_page++) { int rc = MapNewRegion0Page(new_page); if (rc == ERROR) { TracePrintf(TRACE_LEVEL_NON_TERMINAL_PROBLEM, "MapNewRegion0Page(%u) failed.\n", new_page); return -1; } } } else if (new_kernel_brk_page < kernel_brk_page) { // Heap should shrink unsigned int page_to_free; for (page_to_free = kernel_brk_page - 1; page_to_free >= new_kernel_brk_page; page_to_free--) { if (page_to_free < kernel_stack_base_frame) { UnmapUsedRegion0Page(page_to_free); } } } // new_kernel_brk_page == kernel_brk_page, do nothing } TracePrintf(TRACE_LEVEL_FUNCTION_INFO, "<<< Brk()\n\n"); kernel_brk_page = new_kernel_brk_page; return 0; }
void page_fault_handler(void) { if (!has_threading() || (kernel_tasks.tss_for_active_thread.cs & 3) == 0) { kprintf("Kernel process = %d, thread = %d\n", active_pid, active_tid); kprintf("Trying to access address %p (page %d).\n", asm_get_cr2(), ADDR_TO_PAGE(asm_get_cr2())); panic("Page fault in kernel!"); return; } int i = handle_user_pagefault(); if (i) { tid_t tid = active_tid; while (tid == active_tid) { scheduler(); } kill_thread(tid); } return; }
/* Note: This must be executed in the magic kernel context switch space!!! First, maps kernel_stack[0] = dest_kernel_stack[-1] and copies kernel_stack[0] <-- kernel_stack[-1] = source_kernel_stack[-1]. Then, maps kernel_stack[0] = source_kernel_stack[0]. Then, for i = -2 to 0, maps kernel_stack[i+1] = dest_kernel_stack[i] and copies kernel_stack[i+1] <-- kernel_stack[i] = source_kernel_stack[i]. */ void CopyKernelStackPageTableAndData(PCB *source, PCB *dest) { unsigned int kernel_stack_base_page = ADDR_TO_PAGE(KERNEL_STACK_BASE); int i; // First, map kernel_stack[0] = dest_kernel_stack[-1] and copy // kernel_stack[0] <-- kernel_stack[-1] = source_kernel_stack[-1]. region_0_page_table[kernel_stack_base_page] = dest->kernel_stack_page_table[NUM_KERNEL_PAGES - 1]; WriteRegister(REG_TLB_FLUSH, kernel_stack_base_page << PAGESHIFT); CopyRegion0PageData(kernel_stack_base_page + NUM_KERNEL_PAGES - 1, kernel_stack_base_page); // Then, map kernel_stack[0] = source_kernel_stack[0]. region_0_page_table[kernel_stack_base_page] = source->kernel_stack_page_table[0]; WriteRegister(REG_TLB_FLUSH, kernel_stack_base_page << PAGESHIFT); // Then, for i = -2 to 0, maps kernel_stack[i+1] = dest_kernel_stack[i] and copies // kernel_stack[i+1] <-- kernel_stack[i] = source_kernel_stack[i]. for (i = NUM_KERNEL_PAGES - 2; i >= 0; i--) { region_0_page_table[kernel_stack_base_page + i + 1] = dest->kernel_stack_page_table[i]; WriteRegister(REG_TLB_FLUSH, (kernel_stack_base_page + i + 1) << PAGESHIFT); CopyRegion0PageData(kernel_stack_base_page + i, kernel_stack_base_page + i + 1); } }
void TrapMemory(UserContext *user_context) { TracePrintf(TRACE_LEVEL_FUNCTION_INFO, ">>> TrapMemory(%p)\n", user_context); unsigned int addr_int = (unsigned int) user_context->addr; // check if addr is outside of region 1 if (addr_int > VMEM_1_LIMIT || addr_int < VMEM_1_BASE) { // Illegal mem addr, so kill char *err_str = calloc(TERMINAL_MAX_LINE, sizeof(char)); sprintf(err_str, "Out of range memory access at %x by proc %d\n", user_context->addr, current_proc->pid); KernelTtyWriteInternal(0, err_str, strnlen(err_str, TERMINAL_MAX_LINE), user_context); free(err_str); KernelExit(ERROR, user_context); } // get the appropriate page in region 1 int addr_page = ADDR_TO_PAGE(user_context->addr - VMEM_1_BASE); if (current_proc->region_1_page_table[addr_page].valid != 1) { // "address not mapped" bool below_current_stack = (addr_page < current_proc->lowest_user_stack_page); bool above_heap = (addr_page > current_proc->user_brk_page); if (below_current_stack && above_heap) { // valid stack growth TracePrintf(TRACE_LEVEL_DETAIL_INFO, "Growing User stack\n"); // Allocate every page from the right below the current lowest user stack // page down to the memory address hit unsigned int page_to_alloc = current_proc->lowest_user_stack_page - 1; while (page_to_alloc >= addr_page) { // try to get a new frame, and handle error case if (GetUnusedFrame(&(current_proc->region_1_page_table[page_to_alloc])) == ERROR) { TracePrintf(TRACE_LEVEL_NON_TERMINAL_PROBLEM, "GetUnusedFrame() failed.\n"); char *err_str = calloc(TERMINAL_MAX_LINE, sizeof(char)); sprintf(err_str, "Proc %d tried to grow stack, but out of free frames\n", current_proc->pid); KernelTtyWriteInternal(0, err_str, strnlen(err_str, TERMINAL_MAX_LINE), user_context); free(err_str); KernelExit(ERROR, user_context); } assert(!current_proc->region_1_page_table[page_to_alloc].valid); // set the pte data current_proc->region_1_page_table[page_to_alloc].valid = 1; current_proc->region_1_page_table[page_to_alloc].prot = PROT_READ | PROT_WRITE; --page_to_alloc; } // update pcb to reflect change current_proc->lowest_user_stack_page = addr_page; } else if (!above_heap) { // Stack grew into heap! OOM! TracePrintf(TRACE_LEVEL_NON_TERMINAL_PROBLEM, "Out of mem on stack growth at %p\n", user_context->addr); char *err_str = calloc(TERMINAL_MAX_LINE, sizeof(char)); sprintf(err_str, "Proc %d tried to grow stack, but out of free frames\n", current_proc->pid); KernelTtyWriteInternal(0, err_str, strnlen(err_str, TERMINAL_MAX_LINE), user_context); free(err_str); KernelExit(ERROR, user_context); } else { // not below the user stack? should not happen! TracePrintf(TRACE_LEVEL_NON_TERMINAL_PROBLEM, "Somehow unmapped addr is above the bottom of the stack\n"); char *err_str = calloc(TERMINAL_MAX_LINE, sizeof(char)); sprintf(err_str, "Proc %d found an unmapped page in its stack. Sorry.\n", current_proc->pid); KernelTtyWriteInternal(0, err_str, strnlen(err_str, TERMINAL_MAX_LINE), user_context); free(err_str); KernelExit(ERROR, user_context); } } else { // Page was mapped and in range, so must be invalid permissions TracePrintf(TRACE_LEVEL_NON_TERMINAL_PROBLEM, "Proc %d accessed mem with invalid permissions\n", current_proc->pid); char *err_str = calloc(TERMINAL_MAX_LINE, sizeof(char)); sprintf(err_str, "Proc %d accessed %x with invalid permissions\n", current_proc->pid, user_context->addr); KernelTtyWriteInternal(0, err_str, strnlen(err_str, TERMINAL_MAX_LINE), user_context); free(err_str); exit(-1); } TracePrintf(TRACE_LEVEL_FUNCTION_INFO, "<<< TrapMemory()\n\n"); }
#include <types.h> #include <mmu.h> memory_layout_entry memory_padr_layout[] = { {ADDR_TO_PAGE(0x48002000), ADDR_TO_PAGE(0x48003000), MLT_IO_RO_REG, MLF_READABLE }, /* SYSTEM CONTROL MODULE 4K preferable RO*/ {ADDR_TO_PAGE(0x48004000), ADDR_TO_PAGE(0x48006000), MLT_IO_RO_REG, MLF_READABLE }, /* CLOCKS 16K (only 8k needed in Linux port)*/ {ADDR_TO_PAGE(0x4806a000), ADDR_TO_PAGE(0x4806b000), MLT_IO_RW_REG, MLF_READABLE | MLF_WRITEABLE }, /* UART1 4K */ {ADDR_TO_PAGE(0x4806c000), ADDR_TO_PAGE(0x4806d000), MLT_IO_RW_REG, MLF_READABLE | MLF_WRITEABLE }, /* UART2 4K */ {ADDR_TO_PAGE(0x48200000), ADDR_TO_PAGE(0x48201000), MLT_IO_HYP_REG , MLF_READABLE | MLF_WRITEABLE }, /*INTERRUPT CONTROLLER BASE 16KB (only 4k needed in Linux port)*/ {ADDR_TO_PAGE(0x48304000), ADDR_TO_PAGE(0x48305000), MLT_IO_RO_REG, MLF_READABLE }, /*L4-Wakeup (gp-timer in reserved ) 4KB*/ {ADDR_TO_PAGE(0x48306000), ADDR_TO_PAGE(0x48308000), MLT_IO_RO_REG, MLF_READABLE }, /*L4-Wakeup (power-reset manager) module A 8KB can be RO OMAP READS THE HW REGISTER TO SET UP CLOCKS*/ {ADDR_TO_PAGE(0x48320000), ADDR_TO_PAGE(0x48321000), MLT_IO_RO_REG, MLF_READABLE }, /*L4-Wakeup (32KTIMER module) 4KB RO*/ {ADDR_TO_PAGE(0x4830A000), ADDR_TO_PAGE(0x4830B000), MLT_IO_RO_REG, MLF_READABLE }, /*CONTROL MODULE ID CODE 4KB RO*/ {ADDR_TO_PAGE(0x49020000), ADDR_TO_PAGE(0x49021000), MLT_IO_RW_REG, MLF_READABLE | MLF_WRITEABLE }, /*UART 3*/ {ADDR_TO_PAGE(0x80100000), ADDR_TO_PAGE(0x80500000), MLT_HYPER_RAM , MLF_READABLE | MLF_WRITEABLE }, // hypervisor ram {ADDR_TO_PAGE(0x80500000), ADDR_TO_PAGE(0x80600000), MLT_TRUSTED_RAM , MLF_READABLE | MLF_WRITEABLE }, // trusted ram {ADDR_TO_PAGE(0x81000000), ADDR_TO_PAGE(0x81000000+0x00500000), MLT_USER_RAM , MLF_READABLE | MLF_WRITEABLE | MLF_LAST}, // user ram };
#include <types.h> #include <mmu.h> memory_layout_entry memory_padr_layout[] = { {ADDR_TO_PAGE(0x00000000), ADDR_TO_PAGE(0x000fffff), MLT_HYPER_RAM, MLF_READABLE | MLF_WRITEABLE }, // hypervisor ram {ADDR_TO_PAGE(0x00100000), ADDR_TO_PAGE(0x001fffff), MLT_USER_RAM, MLF_READABLE | MLF_WRITEABLE }, // user ram {ADDR_TO_PAGE(0xA0000000), ADDR_TO_PAGE(0xA00FFFFF), MLT_IO_REG, MLF_READABLE | MLF_WRITEABLE}, // IO {ADDR_TO_PAGE(0x80000000), ADDR_TO_PAGE(0x8FFFFFFF), MLT_IO_REG, MLF_READABLE | MLF_WRITEABLE} // IO };
/** * Allocate a memory block. * * On a memory request, the allocator returns the head of a free-list of the * matching size (i.e., smallest block that satisfies the request). If the * free-list of the matching block size is empty, then a larger block size will * be selected. The selected (large) block is then splitted into two smaller * blocks. Among the two blocks, left block will be used for allocation or be * further splitted while the right block will be added to the appropriate * free-list. * * @param size size in bytes * @return memory block address */ void *buddy_alloc(int size) { if(size > (1<<MAX_ORDER)) { printf("Size too big for memory space\n"); return NULL; } int newOrder = MIN_ORDER; while(size > (1<<newOrder)) { newOrder++; } // printf("Received request of order: %d\n", newOrder); int splits = 0; Node* temp = find_order(newOrder); while(temp == 0) { newOrder++; temp = find_order(newOrder); splits++; } if(splits == 0) { temp->free = 0; return PAGE_TO_ADDR(temp->pageIndex); } while(splits > 0) { // printf("Splitting node of order: %d\n", temp->order); Node* tempLeft = init_node(temp, temp->pageIndex); int pageRight = ADDR_TO_PAGE(BUDDY_ADDR(PAGE_TO_ADDR((unsigned long)temp->pageIndex), (temp->order-1))); Node* tempRight = init_node(temp, pageRight); temp->left = tempLeft; temp->right = tempRight; temp->free = 0; temp = temp->left; splits--; } temp->free = 0; return PAGE_TO_ADDR(temp->pageIndex); /*old code int index = MIN_ORDER; while(size > (1<<index)) { index++; } struct list_head head = free_area[index]; if(!list_empty(&head)) { page_t* page = list_entry(head.next, page_t, list); page->order = index; return PAGE_TO_ADDR((unsigned long) (g_pages - page)); } else { int splits = 1; index++; while(index <= MAX_ORDER && list_empty(&(free_area[index]))) { splits++; index++; } head = free_area[index]; page_t* page = list_entry(head.next, page_t, list); while(splits > 0) { page_t buddy = g_pages[ADDR_TO_PAGE(BUDDY_ADDR(PAGE_TO_ADDR((unsigned long) (page - g_pages)), (index-1)))]; buddy.order = index - 1; list_add(&buddy.list, &free_area[index-1]); splits--; index--; } page->order = index; return PAGE_TO_ADDR((unsigned long) (g_pages - page)); }*/ return NULL; }
int handle_user_pagefault(void) { phys_pagedir = active_process->mem.phys_pd; cr2 = asm_get_cr2(); dpage = ADDR_TO_PAGE(cr2); doffset = eip - (char*) PAGE_TO_ADDR(dpage); dpde = dpage / MEMORY_PE_COUNT; dpte = dpage % MEMORY_PE_COUNT; eip = (void*) kernel_tasks.tss_for_active_thread.eip; cpage = ADDR_TO_PAGE(eip); coffset = eip - (char*) PAGE_TO_ADDR(cpage); cpde = cpage / MEMORY_PE_COUNT; cpte = cpage % MEMORY_PE_COUNT; const char *panic_msg; uint_t new_location; page_entry_t *pd, *pt; if (!(pd = temp_page_directory(phys_pagedir))) { goto no_pd_got; } if (!pd[dpde].pagenum) { goto no_pt_page; } if ((dpde >= KMEM_PDE_END) && !pd[dpde].user) { printf("User process = %d, thread = %d\n", active_pid, active_tid); printf("Trying to access address %p (page %d).\n", asm_get_cr2(), ADDR_TO_PAGE(asm_get_cr2())); printf("(!pd[dpde].user)\n"); panic("Bug in memory handling!"); } if (!pd[dpde].present) { new_location = swap_in(phys_pagedir, pd[dpde].pagenum); if (!new_location) { goto no_pt_page_swapped; } pd[dpde].pagenum = new_location; pd[dpde].present = 1; } if (!(pt = temp_page_table(pd[dpde].pagenum))) { goto no_pt_got; } if (dpde && dpte && !pt[dpte].pagenum) { goto no_cr2_page; } if (!pt[dpte].user) { return user_tries_kernel(); } if (dpde < KMEM_PDE_END) { printf("User process = %d, thread = %d\n", active_pid, active_tid); printf("Trying to access address %p (page %d).\n", asm_get_cr2(), ADDR_TO_PAGE(asm_get_cr2())); printf("(dpde < KMEM_PDE_END) && pt[dpte].user)\n"); panic("Bug in memory handling!"); return user_tries_kernel(); } if (!pt[dpte].present) { new_location = swap_in(phys_pagedir, pt[dpte].pagenum); if (!new_location) { goto no_cr2_page_swapped; } pt[dpte].pagenum = new_location; pt[dpte].present = 1; } return 0; // Ratkaistu. :) no_pd_got: panic_msg = "Page fault: failed getting PD from RAM!"; goto fail; no_pt_page: panic_msg = "Page fault; page missing from PD!"; goto fail; no_pt_page_swapped: panic_msg = "Page fault; failed swapping PT to RAM!"; goto fail; no_pt_got: panic_msg = "Page fault; failed getting PT from RAM!"; goto fail; no_cr2_page: panic_msg = "Page fault; page missing from PT!"; goto fail; no_cr2_page_swapped: panic_msg = "Page fault; failed swapping page to RAM!"; goto fail; fail: printf("Page Fault!\nThread %i, process %i\n", active_tid, active_pid); printf("Trying to access address %p (page %d).\n", cr2, dpage); printf("%s\n", panic_msg); return -1; }
/** * cur_phys_pd - hakee cr3:sta sivutaulun sivunumeron (ei osoitetta) **/ uint_t cur_phys_pd(void) { return ADDR_TO_PAGE(asm_get_cr3()); }
void KernelStart(char *cmd_args[], unsigned int pmem_size, UserContext *uctxt) { virtual_memory_enabled = false; next_synch_resource_id = 1; // Initialize the interrupt vector table and write the base address // to the REG_VECTOR_BASE register TrapTableInit(); // Create the idle proc UserContext model_user_context = *uctxt; idle_proc = NewBlankPCB(model_user_context); // Perform the malloc for the idle proc's kernel stack page table before making page tables. idle_proc->kernel_stack_page_table = (struct pte *) calloc(KERNEL_STACK_MAXSIZE / PAGESIZE, sizeof(struct pte)); // Build the initial page table for region 0 such that page = frame for all valid pages. region_0_page_table = (struct pte *) calloc(VMEM_0_SIZE / PAGESIZE, sizeof(struct pte)); // Create the idle proc's page table for region 1. CreateRegion1PageTable(idle_proc); // Create the PTEs for the kernel text and data with the proper protections. unsigned int i; for (i = 0; i < kernel_brk_page; i++) { region_0_page_table[i].valid = 1; region_0_page_table[i].pfn = i; if (i < kernel_data_start_page) { // Text section. region_0_page_table[i].prot = PROT_READ | PROT_EXEC; } else { // Data section. region_0_page_table[i].prot = PROT_READ | PROT_WRITE; } } // Create the PTEs for the idle proc's kernel stack with page = frame and the proper protections. unsigned int kernel_stack_base_page = ADDR_TO_PAGE(KERNEL_STACK_BASE); for (i = 0; i < NUM_KERNEL_PAGES; i++) { idle_proc->kernel_stack_page_table[i].valid = 1; idle_proc->kernel_stack_page_table[i].pfn = i + kernel_stack_base_page; idle_proc->kernel_stack_page_table[i].prot = PROT_READ | PROT_WRITE; } // Load this new page table UseKernelStackForProc(idle_proc); idle_proc->kernel_context_initialized = true; // Set the TLB registers for the region 0 page table. WriteRegister(REG_PTBR0, (unsigned int) region_0_page_table); WriteRegister(REG_PTLR0, VMEM_0_SIZE / PAGESIZE); // Set the TLB registers for the region 1 page table. WriteRegister(REG_PTBR1, (unsigned int) idle_proc->region_1_page_table); WriteRegister(REG_PTLR1, VMEM_1_SIZE / PAGESIZE); // Enable virtual memory. Wooooo! TracePrintf(TRACE_LEVEL_DETAIL_INFO, "Enabling virtual memory. Wooooo!\n"); virtual_memory_enabled = true; WriteRegister(REG_VM_ENABLE, 1); // Initialize the physical memory management data structures. Then, initialize the // kernel book keeping structs. // Make idle the current proc since it has a region 1 page table that this call can use. current_proc = idle_proc; InitializePhysicalMemoryManagement(pmem_size); InitBookkeepingStructs(); int rc = LoadProgram("idle", NULL, idle_proc); if (rc != SUCCESS) { TracePrintf(TRACE_LEVEL_TERMINAL_PROBLEM, "KernelStart: FAILED TO LOAD IDLE!!\n"); Halt(); } // Load the init program. char *init_program_name = "init"; if (cmd_args[0]) { init_program_name = cmd_args[0]; } // Load the init program, but first make sure we are pointing to its region 1 page table. PCB *init_proc = NewBlankPCBWithPageTables(model_user_context); WriteRegister(REG_PTBR1, (unsigned int) init_proc->region_1_page_table); WriteRegister(REG_TLB_FLUSH, TLB_FLUSH_1); rc = LoadProgram(init_program_name, cmd_args, init_proc); if (rc != SUCCESS) { TracePrintf(TRACE_LEVEL_TERMINAL_PROBLEM, "KernelStart: FAILED TO LOAD INIT!!\n"); Halt(); } // Make idle the current proc. current_proc = idle_proc; WriteRegister(REG_PTBR1, (unsigned int) idle_proc->region_1_page_table); WriteRegister(REG_TLB_FLUSH, TLB_FLUSH_1); // Place the init proc in the ready queue. // On the first clock tick, the init process will be initialized and ran. ListAppend(ready_queue, init_proc, init_proc->pid); // Use the idle proc's user context after returning from KernelStart(). *uctxt = idle_proc->user_context; }
void SetKernelData(void *_KernelDataStart, void *_KernelDataEnd) { kernel_brk_page = ADDR_TO_PAGE(((unsigned int) _KernelDataEnd) - 1) + 1; kernel_data_start_page = ADDR_TO_PAGE(_KernelDataStart); }