// Setup a two-level page table: // boot_pgdir is the linear address of the root // boot_cr3 is the physical address of the root // After that turn on paging. void i386_vm_init(void) { pde_t * pgdir; int i; // init physical memory allocation and deallocation spin lock initlock(&phy_mem_lock, "phy_mem"); // create initial page directory , no need to acquire spin lock because // no other processors are running pgdir = (pde_t *)alloc_page(); memset(pgdir, 0, PAGE); boot_pgdir = pgdir; boot_cr3 = (uint)pgdir; // turn the page directory into a page table so that all page table // entries containing mappings for the entire space will be mapped // into the 4 Meg region at VPT pgdir[PDX(VPT)] = boot_cr3 | PTE_W | PTE_P; pgdir[PDX(UVPT)] = boot_cr3 | PTE_U | PTE_P; // map the range of memory from 0~0x100000 to itself boot_map_segment(pgdir, 0, 0, 0x100000, PTE_U | PTE_W); for (i = 0; i < e820_memmap->nr_map; i ++) { uint addr = (uint)e820_memmap->map[i].addr; uint size = (uint)e820_memmap->map[i].size; if (addr >= 0x100000) { cprintf("map : %x,%x with size %x\n",(addr >> 16),(addr & 0xffff),size); boot_map_segment(pgdir, addr, addr, size, PTE_U | PTE_W); } }
void mips_vm_init() { extern char end[]; extern int mCONTEXT; extern struct Env *envs; Pde *pgdir; u_int n; pgdir = alloc(BY2PG, BY2PG, 1); mCONTEXT = (int)pgdir; boot_pgdir = pgdir; // printf("pmap.c:\tinit()\tKVPT:%x\tend:%x\n",(int)(pgdir),(int)(&end)); // printf("pmap.c:\tinit()\talloc %d * %d \n",npage,sizeof(struct Page)); pages = (struct Page *)alloc(npage * sizeof(struct Page), BY2PG, 1); n = ROUND(npage * sizeof(struct Page), BY2PG); boot_map_segment(pgdir, UPAGES, n, PADDR(pages), PTE_R); envs = (struct Env *)alloc(NENV * sizeof(struct Env), BY2PG, 1); boot_map_segment(pgdir, UENVS, NENV * sizeof(struct Env), PADDR(envs), PTE_R); //panic("-------------------init not finish-------------"); }
// // Initialize the kernel virtual memory layout for environment e. // Allocate a page directory, set e->env_pgdir and e->env_cr3 accordingly, // and initialize the kernel portion of the new environment's address space. // Do NOT (yet) map anything into the user portion // of the environment's virtual address space. // // Returns 0 on success, < 0 on error. Errors include: // -E_NO_MEM if page directory or table could not be allocated. // static int env_setup_vm(struct Env *e) { int i, r; struct Page *p = NULL; // Allocate a page for the page directory if ((r = page_alloc(&p)) < 0) return r; // Now, set e->env_pgdir and e->env_cr3, // and initialize the page directory. // // Hint: // - Remember that page_alloc doesn't zero the page. // - The VA space of all envs is identical above UTOP // (except at VPT and UVPT, which we've set below). // See inc/memlayout.h for permissions and layout. // Can you use boot_pgdir as a template? Hint: Yes. // (Make sure you got the permissions right in Lab 2.) // - The initial VA below UTOP is empty. // - You do not need to make any more calls to page_alloc. // - Note: In general, pp_ref is not maintained for // physical pages mapped only above UTOP, but env_pgdir // is an exception -- you need to increment env_pgdir's // pp_ref for env_free to work correctly. // LAB 3: Your code here. memset(page2kva(p), 0, PGSIZE); e->env_pgdir = page2kva(p); e->env_cr3 = page2pa(p); p->pp_ref ++; #if 0 boot_map_segment(e->env_pgdir, UPAGES, ROUNDUP(npage*sizeof(struct Page), PGSIZE), (physaddr_t)PADDR(pages), PTE_U); boot_map_segment(e->env_pgdir, UENVS, ROUNDUP(NENV*sizeof(struct Env), PGSIZE), (physaddr_t)PADDR(envs), PTE_U); boot_map_segment(e->env_pgdir, KSTACKTOP-KSTKSIZE, KSTKSIZE, (physaddr_t)PADDR(bootstack), PTE_W); boot_map_segment(e->env_pgdir, KSTACKTOP-PTSIZE, PTSIZE-KSTKSIZE, 0, 0); boot_map_segment(e->env_pgdir, KERNBASE, 0xffffffff-KERNBASE+1, 0, PTE_W); #else for (i=PDX(UTOP); i<NPDENTRIES; i++) e->env_pgdir[i] = boot_pgdir[i]; #endif // VPT and UVPT map the env's own page table, with // different permissions. e->env_pgdir[PDX(VPT)] = e->env_cr3 | PTE_P | PTE_W; e->env_pgdir[PDX(UVPT)] = e->env_cr3 | PTE_P | PTE_U; return 0; }
//pmm_init - setup a pmm to manage physical memory, build PDT&PT to setup paging mechanism // - check the correctness of pmm & paging mechanism, print PDT&PT void pmm_init(void) { init_pmm_manager (); page_init (); #ifndef NOCHECK //check_alloc_page(); #endif boot_pgdir = boot_alloc_page (); memset (boot_pgdir, 0, PGSIZE); boot_pgdir_pa = PADDR (boot_pgdir); current_pgdir_pa = boot_pgdir_pa; #ifndef NOCHECK //check_pgdir (); #endif static_assert(KERNBASE % PTSIZE == 0 && KERNTOP % PTSIZE == 0); boot_pgdir[PDX(VPT)] = PADDR(boot_pgdir) | PTE_P | PTE_SPR_R | PTE_SPR_W | PTE_A | PTE_D; boot_map_segment(boot_pgdir, KERNBASE, RAM_SIZE, 0, PTE_SPR_R | PTE_SPR_W | PTE_A | PTE_D); enable_paging (); #ifndef NOCHECK //check_boot_pgdir (); #endif print_pgdir (kprintf); slab_init (); }
// Modify mappings in boot_pml4e to support SMP // - Map the per-CPU stacks in the region [KSTACKTOP-PTSIZE, KSTACKTOP) // static void mem_init_mp(void) { // Map per-CPU stacks starting at KSTACKTOP, for up to 'NCPU' CPUs. // // For CPU i, use the physical memory that 'percpu_kstacks[i]' refers // to as its kernel stack. CPU i's kernel stack grows down from virtual // address kstacktop_i = KSTACKTOP - i * (KSTKSIZE + KSTKGAP), and is // divided into two pieces, just like the single stack you set up in // mem_init: // * [kstacktop_i - KSTKSIZE, kstacktop_i) // -- backed by physical memory // * [kstacktop_i - (KSTKSIZE + KSTKGAP), kstacktop_i - KSTKSIZE) // -- not backed; so if the kernel overflows its stack, // it will fault rather than overwrite another CPU's stack. // Known as a "guard page". // Permissions: kernel RW, user NONE // // LAB 4: Your code here: int i = 0; for (i=0; i<NCPU; i++) { uint32_t kstacktop = KSTACKTOP - i * (KSTKSIZE + KSTKGAP); boot_map_segment(boot_pml4e, kstacktop-KSTKSIZE, KSTKSIZE, PADDR(percpu_kstacks[i]), PTE_W); //boot_map_segment(boot_pml4e, kstacktop, KSTKSIZE + KSTKGAP, PADDR(percpu_kstacks[i]), PTE_W); } }
//pmm_init - setup a pmm to manage physical memory, build PDT&PT to setup paging mechanism // - check the correctness of pmm & paging mechanism, print PDT&PT void pmm_init(void) { //We need to alloc/free the physical memory (granularity is 4KB or other size). //So a framework of physical memory manager (struct pmm_manager)is defined in pmm.h //First we should init a physical memory manager(pmm) based on the framework. //Then pmm can alloc/free the physical memory. //Now the first_fit/best_fit/worst_fit/buddy_system pmm are available. init_pmm_manager(); // detect physical memory space, reserve already used memory, // then use pmm->init_memmap to create free page list page_init(); //use pmm->check to verify the correctness of the alloc/free function in a pmm check_alloc_page(); // create boot_pgdir, an initial page directory(Page Directory Table, PDT) boot_pgdir = boot_alloc_page(); memset(boot_pgdir, 0, PGSIZE); boot_cr3 = PADDR(boot_pgdir); check_pgdir(); static_assert(KERNBASE % PTSIZE == 0 && KERNTOP % PTSIZE == 0); // recursively insert boot_pgdir in itself // to form a virtual page table at virtual address VPT // cprintf("haah1\n"); // map all physical memory to linear memory with base linear addr KERNBASE //linear_addr KERNBASE~KERNBASE+KMEMSIZE = phy_addr 0~KMEMSIZE //But shouldn't use this map until enable_paging() & gdt_init() finished. boot_map_segment(boot_pgdir, 0, KMEMSIZE, 0, PTE_TYPE_URWX_SRWX | PTE_R | PTE_V); boot_pgdir[PDX(VPT)] = PADDR(boot_pgdir) | PTE_TYPE_TABLE | PTE_R | PTE_V; // pgdir_alloc_page(boot_pgdir, USTACKTOP-PGSIZE , PTE_TYPE_URW_SRW); //cprintf("haha2\n"); //temporary map: //virtual_addr 3G~3G+4M = linear_addr 0~4M = linear_addr 3G~3G+4M = phy_addr 0~4M //boot_pgdir[0] = boot_pgdir[PDX(KERNBASE)]; //cprintf("OK!\n"); enable_paging(); // cprintf("haah\n"); //reload gdt(third time,the last time) to map all physical memory //virtual_addr 0~4G=liear_addr 0~4G //then set kernel stack(ss:esp) in TSS, setup TSS in gdt, load TSS //gdt_init(); //disable the map of virtual_addr 0~4M //boot_pgdir[0] = 0; //now the basic virtual memory map(see memalyout.h) is established. //check the correctness of the basic virtual memory map. check_boot_pgdir(); print_pgdir(); kmalloc_init(); }
// // Reserve size bytes in the MMIO region and map [pa,pa+size) at this // location. Return the base of the reserved region. size does *not* // have to be multiple of PGSIZE. // void * mmio_map_region(physaddr_t pa, size_t size) { // Where to start the next region. Initially, this is the // beginning of the MMIO region. Because this is static, its // value will be preserved between calls to mmio_map_region // (just like nextfree in boot_alloc). static uintptr_t base = MMIOBASE; // Reserve size bytes of virtual memory starting at base and // map physical pages [pa,pa+size) to virtual addresses // [base,base+size). Since this is device memory and not // regular DRAM, you'll have to tell the CPU that it isn't // safe to cache access to this memory. Luckily, the page // tables provide bits for this purpose; simply create the // mapping with PTE_PCD|PTE_PWT (cache-disable and // write-through) in addition to PTE_W. (If you're interested // in more details on this, see section 10.5 of IA32 volume // 3A.) // // Be sure to round size up to a multiple of PGSIZE and to // handle if this reservation would overflow MMIOLIM (it's // okay to simply panic if this happens). // // Hint: The staff solution uses boot_map_region. // // Your code here: size = ROUNDUP(size, PGSIZE); if (base+size >= MMIOLIM) { panic("trying to map address beyond MMIO region"); } boot_map_segment(boot_pml4e, base, size, pa, PTE_W|PTE_PCD|PTE_PWT); void *result = (void *)base; base = base + size; return result; /* uintptr_t va_i = base; physaddr_t pa_i = pa; for (; va_i<base+size; va_i+=PGSIZE,pa_i+=PGSIZE) { int result = page_insert(boot_pml4e, pa2page(pa_i), (void *)va_i, PTE_W|PTE_PCD|PTE_PWT); cprintf("Reached, page insert result = %d", result); } */ panic("mmio_map_region not implemented"); }
// // Initializes the kernel virtual memory layout for environment e. // // Allocates a page directory and initializes it. Sets // e->env_cr3 and e->env_pgdir accordingly. // // RETURNS // 0 -- on sucess // <0 -- otherwise // static int env_setup_vm(struct Env *e) { // Hint: int i, r; struct Page *p = NULL; Pde *pgdir; // Allocate a page for the page directory if ((r = page_alloc(&p)) < 0) { panic("env_setup_vm - page_alloc error\n"); return r; } p->pp_ref++; //printf("env.c:env_setup_vm:page_alloc:p\t@page:%x\t@:%x\tcon:%x\n",page2pa(p),(int)&p,(int)p); //printf("env_setup_vm : 1\n"); // Hint: // - The VA space of all envs is identical above UTOP // (except at VPT and UVPT) // - Use boot_pgdir // - Do not make any calls to page_alloc // - Note: pp_refcnt is not maintained for physical pages mapped above UTOP. pgdir = (Pde *)page2kva(p); // printf("env.c:env_setup_vm:\tpgdir\t:con:%x\n",(int)pgdir); for(i=0;i<UTOP; i+=BY2PG) pgdir[PDX(i)] = 0; for(i=PDX(UTOP); i<1024;i++) { //printf("boot_pgdir[%d] = %x\n",i,boot_pgdir[PDX(i)]); pgdir[i] = boot_pgdir[i]; } //printf("env_setup_vm : 2\n"); e->env_pgdir = pgdir; //printf("env_setup_vm : 3\n"); // ...except at VPT and UVPT. These map the env's own page table //e->env_pgdir[PDX(UVPT)] = e->env_cr3 | PTE_P | PTE_U; e->env_cr3 = PADDR(pgdir); boot_map_segment(e->env_pgdir,UVPT,PDMAP,PADDR(pgdir),PTE_R); // printf("env.c:env_setup_vm:\tboot_map_segment(%x,%x,%x,%x,PTE_R)\n",e->env_pgdir,UVPT,PDMAP,PADDR(pgdir)); e->env_pgdir[PDX(UVPT)] = e->env_cr3 | PTE_V | PTE_R; //printf("env_setup_vm : 4\n"); return 0; }
/** * Initialize page management mechanism. * Parts of no use are deleted, while no extra parts except a check is added. * arch/x86/mm/pmm.c should be a good reference. */ void pmm_init (void) { check_vpm (); init_pmm_manager (); page_init (); check_alloc_page (); boot_pgdir = boot_alloc_page(); memset(boot_pgdir, 0, PGSIZE); check_pgdir(); /* register kernel code and data pages in the table so that it won't raise bad segv. */ boot_map_segment (boot_pgdir, KERNBASE, mem_size, 0, PTE_W); check_boot_pgdir (); print_pgdir (kprintf); slab_init (); }
// Set up a four-level page table: // boot_pml4e is its linear (virtual) address of the root // // This function only sets up the kernel part of the address space // (ie. addresses >= UTOP). The user part of the address space // will be setup later. // // From UTOP to ULIM, the user is allowed to read but not write. // Above ULIM the user cannot read or write. void x64_vm_init(void) { pml4e_t* pml4e; uint32_t cr0; int i; size_t n; int r; struct Env *env; i386_detect_memory(); //panic("i386_vm_init: This function is not finished\n"); ////////////////////////////////////////////////////////////////////// // create initial page directory. ///panic("x64_vm_init: this function is not finished\n"); pml4e = boot_alloc(PGSIZE); memset(pml4e, 0, PGSIZE); boot_pml4e = pml4e; boot_cr3 = PADDR(pml4e); ////////////////////////////////////////////////////////////////////// // Allocate an array of npage 'struct Page's and store it in 'pages'. // The kernel uses this array to keep track of physical pages: for // each physical page, there is a corresponding struct Page in this // array. 'npage' is the number of physical pages in memory. // User-level programs will get read-only access to the array as well. // Your code goes here: pages = boot_alloc(npages * sizeof(struct Page)); ////////////////////////////////////////////////////////////////////// // Make 'envs' point to an array of size 'NENV' of 'struct Env'. // LAB 3: Your code here. envs = boot_alloc(NENV * sizeof(struct Env)); ////////////////////////////////////////////////////////////////////// // Now that we've allocated the initial kernel data structures, we set // up the list of free physical pages. Once we've done so, all further // memory management will go through the page_* functions. In // particular, we can now map memory using boot_map_segment or page_insert page_init(); check_page_free_list(1); check_page_alloc(); page_check(); ////////////////////////////////////////////////////////////////////// // Now we set up virtual memory ////////////////////////////////////////////////////////////////////// // Map 'pages' read-only by the user at linear address UPAGES // Permissions: // - the new image at UPAGES -- kernel R, user R // (ie. perm = PTE_U | PTE_P) // - pages itself -- kernel RW, user NONE // Your code goes here: ////////////////////////////////////////////////////////////////////// // Map the 'envs' array read-only by the user at linear address UENVS // (ie. perm = PTE_U | PTE_P). // Permissions: // - the new image at UENVS -- kernel R, user R // - envs itself -- kernel RW, user NONE // LAB 3: Your code here. boot_map_segment(boot_pml4e, UPAGES, ROUNDUP(npages*sizeof(struct Page), PGSIZE), PADDR(pages), PTE_U | PTE_P); boot_map_segment(boot_pml4e, (uintptr_t)pages, ROUNDUP(npages *sizeof(struct Page), PGSIZE), PADDR(pages), PTE_P | PTE_W); boot_map_segment(boot_pml4e, UENVS, ROUNDUP(NENV*sizeof(struct Env), PGSIZE), PADDR(envs), PTE_U | PTE_P); boot_map_segment(boot_pml4e, (uintptr_t)envs, ROUNDUP(NENV *sizeof(struct Env), PGSIZE), PADDR(envs), PTE_P | PTE_W); ////////////////////////////////////////////////////////////////////// // Use the physical memory that 'bootstack' refers to as the kernel // stack. The kernel stack grows down from virtual address KSTACKTOP. // We consider the entire range from [KSTACKTOP-PTSIZE, KSTACKTOP) // to be the kernel stack, but break this into two pieces: // * [KSTACKTOP-KSTKSIZE, KSTACKTOP) -- backed by physical memory // * [KSTACKTOP-PTSIZE, KSTACKTOP-KSTKSIZE) -- not backed; so if // the kernel overflows its stack, it will fault rather than // overwrite memory. Known as a "guard page". // Permissions: kernel RW, user NONE // Your code goes here: boot_map_segment(boot_pml4e, KSTACKTOP-KSTKSIZE, KSTKSIZE, PADDR(bootstack), PTE_P | PTE_W); /////////////////////////////////////////////////////////////////////// // Map all of physical memory at KERNBASE. // Ie. the VA range [KERNBASE, 2^32) should map to // the PA range [0, 2^32 - KERNBASE) // We might not have 2^32 - KERNBASE bytes of physical memory, but // we just set up the mapping anyway. // Permissions: kernel RW, user NONE // Your code goes here: boot_map_segment(boot_pml4e, KERNBASE, ~(uint32_t)0 - KERNBASE + 1, 0, PTE_P | PTE_W); // Check that the initial page directory has been set up correctly. // Initialize the SMP-related parts of the memory map mem_init_mp(); check_boot_pml4e(boot_pml4e); ////////////////////////////////////////////////////////////////////// // Permissions: kernel RW, user NONE pdpe_t *pdpe = KADDR(PTE_ADDR(pml4e[0])); pde_t *pgdir = KADDR(PTE_ADDR(pdpe[3])); lcr3(boot_cr3); check_page_free_list(0); }