void check_boot_pgdir(void) { pte_t *ptep; int i; for (i = 0; i < npage; i += PGSIZE) { assert((ptep = get_pte(boot_pgdir, (uintptr_t) KADDR(i), 0)) != NULL); assert(PTE_ADDR(*ptep) == i); } assert(PDE_ADDR(boot_pgdir[PDX(VPT)]) == PADDR(boot_pgdir)); assert(boot_pgdir[0] == 0); struct Page *p; p = alloc_page(); assert(page_insert(boot_pgdir, p, 0x100, PTE_W) == 0); assert(page_ref(p) == 1); assert(page_insert(boot_pgdir, p, 0x100 + PGSIZE, PTE_W) == 0); assert(page_ref(p) == 2); const char *str = "ucore: Hello world!!"; strcpy((void *)0x100, str); assert(strcmp((void *)0x100, (void *)(0x100 + PGSIZE)) == 0); *(char *)(page2kva(p) + 0x100) = '\0'; assert(strlen((const char *)0x100) == 0); free_page(p); free_page(pa2page(PDE_ADDR(boot_pgdir[0]))); boot_pgdir[0] = 0; kprintf("check_boot_pgdir() succeeded!\n"); }
//get_pte - get pte and return the kernel virtual address of this pte for la // - if the PT contians this pte didn't exist, alloc a page for PT // parameter: // pgdir: the kernel virtual base address of PDT // la: the linear address need to map // create: a logical value to decide if alloc a page for PT // return vaule: the kernel virtual address of this pte pte_t * get_pte(pde_t *pgdir, uintptr_t la, bool create) { /* LAB2 EXERCISE 2: YOUR CODE * * If you need to visit a physical address, please use KADDR() * please read pmm.h for useful macros * * Maybe you want help comment, BELOW comments can help you finish the code * * Some Useful MACROs and DEFINEs, you can use them in below implementation. * MACROs or Functions: * PDX(la) = the index of page directory entry of VIRTUAL ADDRESS la. * KADDR(pa) : takes a physical address and returns the corresponding kernel virtual address. * set_page_ref(page,1) : means the page be referenced by one time * page2pa(page): get the physical address of memory which this (struct Page *) page manages * struct Page * alloc_page() : allocation a page * memset(void *s, char c, size_t n) : sets the first n bytes of the memory area pointed by s * to the specified value c. * DEFINEs: * PTE_V 0x001 // page table/directory entry flags bit : Present * PTE_W 0x002 // page table/directory entry flags bit : Writeable * PTE_U 0x004 // page table/directory entry flags bit : User can access */ #if 0 pde_t *pdep = NULL; // (1) find page directory entry if (0) { // (2) check if entry is not present // (3) check if creating is needed, then alloc page for page table // CAUTION: this page is used for page table, not for common data page // (4) set page reference uintptr_t pa = 0; // (5) get linear address of page // (6) clear page content using memset // (7) set page directory entry's permission } return NULL; // (8) return page table entry #endif pde_t *pdep = &pgdir[PDX(la)]; //right!!! // if(la==0x50000000) // cprintf("pdep=%08x\n",pdep); if (!(*pdep & PTE_V)) { struct Page *page; // cprintf("haha\n"); // cprintf("create=%d\n",create); if (!create || (page = alloc_page()) == NULL) { return NULL; } //cprintf("hahapaget_pte\n"); set_page_ref(page, 1); uintptr_t pa = page2pa(page); memset(KADDR(pa), 0, PGSIZE); *pdep = pa | PTE_TYPE_TABLE | PTE_V | PTE_R; } //cprintf("%08x\n",&((pte_t *)KADDR(PDE_ADDR(*pdep)))[PTX(la)]); // if(la==0x50000000) // cprintf("get_pte return=%08x\n",&((pte_t *)KADDR(PDE_ADDR(*pdep)))[PTX(la)]); return &((pte_t *)KADDR(PDE_ADDR(*pdep)))[PTX(la)]; }
static void check_pgdir(void) { assert(npage <= KMEMSIZE / PGSIZE); assert(boot_pgdir != NULL && (uint32_t)PGOFF(boot_pgdir) == 0); assert(get_page(boot_pgdir, 0x0, NULL) == NULL); struct Page *p1, *p2; p1 = alloc_page(); // cprintf("insert begin\n"); assert(page_insert(boot_pgdir, p1, 0x0, 0) == 0); pte_t *ptep; // cprintf("%08x\n",boot_pgdir); assert((ptep = get_pte(boot_pgdir, 0x0, 0)) != NULL); assert(pte2page(*ptep) == p1); assert(page_ref(p1) == 1); ptep = &((pte_t *)KADDR(PDE_ADDR(boot_pgdir[0])))[1]; assert(get_pte(boot_pgdir, PGSIZE, 0) == ptep); p2 = alloc_page(); assert(page_insert(boot_pgdir, p2, PGSIZE, PTE_TYPE_URW_SRW) == 0); assert((ptep = get_pte(boot_pgdir, PGSIZE, 0)) != NULL); assert(*ptep & PTE_TYPE_URW_SRW); //assert(*ptep & PTE_W); assert(((boot_pgdir[0] & PTE_TYPE)==PTE_TYPE_TABLE)&&(boot_pgdir[0]&PTE_V)); assert(page_ref(p2) == 1); assert(page_insert(boot_pgdir, p1, PGSIZE, 0) == 0); assert(page_ref(p1) == 2); assert(page_ref(p2) == 0); assert((ptep = get_pte(boot_pgdir, PGSIZE, 0)) != NULL); assert(pte2page(*ptep) == p1); assert((*ptep & PTE_TYPE_URW_SRW) == 0); page_remove(boot_pgdir, 0x0); assert(page_ref(p1) == 1); assert(page_ref(p2) == 0); page_remove(boot_pgdir, PGSIZE); assert(page_ref(p1) == 0); assert(page_ref(p2) == 0); // cprintf("haha\n"); assert(page_ref(pde2page(boot_pgdir[0])) == 1); free_page(pde2page(boot_pgdir[0])); boot_pgdir[0] = 0; cprintf("check_pgdir() succeeded\n"); //cprintf("haha2\n"); }
static void check_boot_pgdir(void) { pte_t *ptep; int i; for (i = 0; i < npage; i += PGSIZE) { assert((ptep = get_pte(boot_pgdir, (uintptr_t)KADDR(i), 0)) != NULL); assert(PTE_ADDR(*ptep) == i); } assert(PDE_ADDR(boot_pgdir[PDX(VPT)]) == PADDR(boot_pgdir)); //cprintf("%08x\n",boot_pgdir[PDX(VPT)]); //cprintf("%08x\n",PADDR(boot_pgdir)); assert(boot_pgdir[256] == 0); struct Page *p; p = alloc_page(); assert(page_insert(boot_pgdir, p, 0x40000100, PTE_TYPE_SRW) == 0); assert(page_ref(p) == 1); assert(page_insert(boot_pgdir, p, 0x40000100 + PGSIZE, PTE_TYPE_SRW) == 0); assert(page_ref(p) == 2); const char *str = "ucore: Hello world!!"; strcpy((void *)0x40000100, str); assert(strcmp((void *)0x40000100, (void *)(0x40000100 + PGSIZE)) == 0); cprintf("%s\n\n",(char*)0x40000100); //cprintf("mstatus=%08x\n",read_mstatus_field(MSTATUS_PRV)); // cprintf("bageyalusilasiladi%s\n",((char*)0x40000100)); *(char *)(page2kva(p) + 0x100) = '\0'; //asm volatile("nop"); //asm volatile("nop"); //cprintf("\0\n"); // cprintf("%d\n",strlen((char *)0x40000100)); assert(strlen((const char *)0x40000100) == 0); //assert(((const char *)0x30000100) == '\0'); //asm volatile("nop"); // asm volatile("nop"); free_page(p); free_page(pde2page(boot_pgdir[256])); //cprintf("haah2\n"); boot_pgdir[256] = 0; cprintf("check_boot_pgdir() succeeded!\n"); }
/** * Check whether page directory for boot lives well. * NOTE: we don't have mm_struct at present. * as write to a clean page also raises SIGSEGV, we're not able to deal with it now. * so just mark all page inserted to be accessed and dirty. */ void check_boot_pgdir(void) { pte_t *ptep; int i; for (i = 0; i < npage; i += PGSIZE) { assert((ptep = get_pte(boot_pgdir, (uintptr_t)KADDR(i), 0)) != NULL); assert(PTE_ADDR(*ptep) == i); } //assert(PDE_ADDR(boot_pgdir[PDX(VPT)]) == PADDR(boot_pgdir)); assert(boot_pgdir[PDX(TEST_PAGE)] == 0); struct Page *p; p = alloc_page(); assert(page_insert(boot_pgdir, p, TEST_PAGE, PTE_W | PTE_D | PTE_A) == 0); assert(page_ref(p) == 1); assert(page_insert(boot_pgdir, p, TEST_PAGE + PGSIZE, PTE_W | PTE_D | PTE_A) == 0); assert(page_ref(p) == 2); const char *str = "ucore: Hello world!!"; strcpy((void *)TEST_PAGE, str); assert(strcmp((void *)TEST_PAGE, (void *)(TEST_PAGE + PGSIZE)) == 0); *(char *)(page2kva(p)) = '\0'; assert(strlen((const char *)TEST_PAGE) == 0); /* * in um architecture clear page table doesn't mean * the linear address is invalid * so remove them by hand */ tlb_invalidate (boot_pgdir, TEST_PAGE); tlb_invalidate (boot_pgdir, TEST_PAGE + PGSIZE); free_page(p); free_page(pa2page(PDE_ADDR(boot_pgdir[PDX(TEST_PAGE)]))); boot_pgdir[PDX(TEST_PAGE)] = 0; kprintf("check_boot_pgdir() succeeded.\n"); }
static pgentry_t pg_clone_page_4m(pgentry_t page) { uint32_t flags = (uint32_t)PDE_FLAGS(page); paddr_t pgframe = (paddr_t)PDE_ADDR(page); if ((flags & PAGE_PRESENT) == 0) { // Page not present, but some flags are set return page; } if ((flags & PAGE_LINK) != 0) { return page; } paddr_t newpgframe = frame_alloc_4m(); int num_pages = 1024; paddr_t limit = newpgframe + (paddr_t)(num_pages * PAGE_SIZE); frame_set_range(newpgframe, limit); for (int i = 0; i < num_pages; i++) { paddr_t offset = (paddr_t)i * PAGE_SIZE; frame_copy(pgframe + offset, newpgframe + offset); } return PAGE_ENTRY(newpgframe, flags); }
pte_t *mmap_lookup(pml4e_t *pml4, uint64_t va, bool create) { struct page *page4pdp = NULL, *page4pd = NULL, *page4pt = NULL; pdpe_t pml4e = pml4[PML4_IDX(va)]; if ((pml4e & PML4E_P) != 0) goto pml4e_found; if (create == false) return NULL; // Prepare new page directory pointer if ((page4pdp = page_alloc()) == NULL) return NULL; memset(page2kva(page4pdp), 0, PAGE_SIZE); page4pdp->ref = 1; // Insert new pdp into PML4 pml4e = pml4[PML4_IDX(va)] = page2pa(page4pdp) | PML4E_P | PML4E_W | PML4E_U; pml4e_found: assert((pml4e & PML4E_P) != 0); pdpe_t *pdp = VADDR(PML4E_ADDR(pml4e)); pdpe_t pdpe = pdp[PDP_IDX(va)]; if ((pdpe & PDPE_P) != 0) goto pdpe_found; if (create == false) return NULL; // Prepare new page directory if ((page4pd = page_alloc()) == NULL) return NULL; memset(page2kva(page4pd), 0, PAGE_SIZE); page4pd->ref = 1; // Insert new page directory into page directory pointer table pdpe = pdp[PDP_IDX(va)] = page2pa(page4pd) | PDPE_P | PDPE_W | PDPE_U; pdpe_found: assert((pdpe & PDPE_P) != 0); pde_t *pd = VADDR(PDPE_ADDR(pdpe)); pde_t pde = pd[PD_IDX(va)]; if ((pde & PDE_P) != 0) goto pde_found; if (create == false) return NULL; // Prepare new page table if ((page4pt = page_alloc()) == NULL) return NULL; memset(page2kva(page4pt), 0, PAGE_SIZE); page4pt->ref = 1; // Insert new page table into page directory pde = pd[PD_IDX(va)] = page2pa(page4pt) | PDE_P | PTE_W | PDE_U; pde_found: assert((pde & PDE_P) != 0); pte_t *pt = VADDR(PDE_ADDR(pde)); return &pt[PT_IDX(va)]; }
/** * Remap the specified address to a new page with new permission. * @param pgdir page directory * @param la linear address */ void tlb_update (pde_t *pgdir, uintptr_t la) { la = ROUNDDOWN (la, PGSIZE); pte_t* pte = get_pte (pgdir, la, 0); if (pte == 0 || (*pte & PTE_P) == 0) panic ("invalid tlb flushing\n"); uint32_t pa = PDE_ADDR(*pte); /* A tricky method to make the page table right under most circumstances. * Please consult the internal documentation for details. */ int r = 1, w = 1, x = 1; if (Get_PTE_A(pte) == 0) r = x = w = 0; else if (Get_PTE_W(pte) == 0 || Get_PTE_D(pte) == 0) w = 0; /* Make sure that the page is invalid before mapping * It is better to use 'mprotect' here actually. */ tlb_invalidate (pgdir, la); struct proc_struct *proc = find_proc_by_pgdir (pgdir); if (current != NULL && proc != NULL) { /* Map the page to the container process found using the stub code */ if (host_mmap (proc, (void*)la, PGSIZE, (r ? PROT_READ : 0) | (w ? PROT_WRITE : 0) | (x ? PROT_EXEC : 0), MAP_SHARED | MAP_FIXED, ginfo->mem_fd, pa) == MAP_FAILED) panic ("map in child failed.\n"); } else { /* Map the page to the host process */ struct mmap_arg_struct args = { .addr = la, .len = PGSIZE, .prot = (r ? PROT_READ : 0) | (w ? PROT_WRITE : 0) | (x ? PROT_EXEC : 0), .flags = MAP_SHARED | MAP_FIXED, .fd = ginfo->mem_fd, .offset = pa, }; syscall1 (__NR_mmap, (long)&args); } } /** * unmap the page specified by @la in the container process corresponding to @pgdir * @param pgdir page directory * @param la the logical address of the page to be flushed */ void tlb_invalidate (pde_t *pgdir, uintptr_t la) { struct proc_struct *proc = find_proc_by_pgdir (pgdir); if (current != NULL && proc != NULL) { if (host_munmap (proc, (void*)la, PGSIZE) < 0) panic ("unmap in child failed\n"); } else { syscall2 (__NR_munmap, la, PGSIZE); } } /** * invalidate [USERBASE, USERTOP). * used by tests or do_execve if a 'clean' space is needed (though not neccesary). */ void tlb_invalidate_user (void) { syscall2 (__NR_munmap, USERBASE, USERTOP - USERBASE); }
int cpu_pte_manipulate(struct mm_pte_manipulate *data) { pte_t **pdep; pte_t *pdep_l2base; pte_t *ptep; pte_t pte; pte_t orig_pte; uintptr_t l2_vaddr; unsigned bits; unsigned l1bits; pte_t **l1pagetable; struct pa_quantum *pq; unsigned pa_status; int flushed; ADDRESS *adp; int r; part_id_t mpid; PROCESS * prp; // Assume alignment has been checked by the caller adp = data->adp; if(adp == NULL) crash(); l1pagetable = adp->cpu.pgdir; l1bits = 0x1; // validity bit if(data->op & PTE_OP_BAD) { bits = PPC800_RPN_CI; } else if(data->prot & (PROT_READ|PROT_WRITE|PROT_EXEC)) { bits = 0xf1; //RUSH3: if PTE_OP_TEMP, mark PTE as accessable from procnto only if possible if(data->prot & PROT_WRITE) { bits |= (0x2<<PPC800_RPN_PP1_SHIFT) | (0x1<<PPC800_RPN_PP2_SHIFT); } else if(data->prot & (PROT_READ|PROT_EXEC)) { bits |= 0x3<<PPC800_RPN_PP1_SHIFT; } if(data->shmem_flags & SHMCTL_HAS_SPECIAL) { if(data->special & ~PPC_SPECIAL_MASK) { return EINVAL; } //RUSH1: If PPC_SPECIAL_E/W/M/G is on, should I report an error? if((data->special & PPC_SPECIAL_I)) { bits |= PPC800_RPN_CI; } } if(data->prot & PROT_NOCACHE) { bits |= PPC800_RPN_CI; l1bits |= PPC800_TWC_G; } } else { bits = 0; } r = EOK; flushed = 0; prp = adp ? object_from_data(adp, address_cookie) : NULL; mpid = mempart_getid(prp, sys_memclass_id); for( ;; ) { if(data->start >= data->end) break; pdep = &l1pagetable[L1PAGEIDX(data->start)]; l2_vaddr = (uintptr_t)*pdep; if(l2_vaddr == 0) { memsize_t resv = 0; if(!(data->op & (PTE_OP_MAP|PTE_OP_PREALLOC|PTE_OP_BAD))) { //Move vaddr to next page directory data->start = (data->start + PDE_SIZE) & ~(PDE_SIZE - 1); if(data->start == 0) data->start = ~0; continue; } if (MEMPART_CHK_and_INCR(mpid, __PAGESIZE, &resv) != EOK) { return ENOMEM; } pq = pa_alloc(__PAGESIZE, __PAGESIZE, 0, 0, &pa_status, restrict_proc, resv); if(pq == NULL) { MEMPART_UNDO_INCR(mpid, __PAGESIZE, resv); return ENOMEM; } MEMCLASS_PID_USE(prp, mempart_get_classid(mpid), __PAGESIZE); pq->flags |= PAQ_FLAG_SYSTEM; pq->u.inuse.next = adp->cpu.l2_list; adp->cpu.l2_list = pq; l2_vaddr = pa_quantum_to_paddr(pq); if(pa_status & PAA_STATUS_NOT_ZEROED) { zero_page((uint32_t *)l2_vaddr, __PAGESIZE, NULL); } } *pdep = (pte_t *)(l2_vaddr | l1bits); if(data->op & PTE_OP_PREALLOC) { //Move vaddr to next page directory data->start = (data->start + PDE_SIZE) & ~(PDE_SIZE - 1); if(data->start == 0) data->start = ~0; continue; } pdep_l2base = PDE_ADDR(*pdep); ptep = &(pdep_l2base[L2PAGEIDX(data->start)]); orig_pte = *ptep; if(data->op & (PTE_OP_MAP|PTE_OP_BAD)) { pte = data->paddr | bits; } else if(data->op & PTE_OP_UNMAP) { pte = 0; } else if(orig_pte & (0xfff & ~PPC800_RPN_CI)) { // PTE_OP_PROT pte = (orig_pte & ~0xfff) | bits; } else { // We don't change PTE permissions if we haven't mapped the // page yet... pte = orig_pte; } *ptep = pte; if((orig_pte != 0) && (pte != orig_pte)) { flushed = 1; ppc_tlbie(data->start); } data->start += __PAGESIZE; data->paddr += __PAGESIZE; if((data->op & PTE_OP_PREEMPT) && KerextNeedPreempt()) { r = EINTR; break; } } if(flushed) { ppc_isync(); } return r; }