int set_page_data(struct DataTable *tbl, void *va, data_t id) { int i; pge_t *pge = &(tbl->table)[PGX(va)]; if(! *pge) { pthread_mutex_lock(&tbl->lock); *pge = malloc(sizeof(pue_t) * TBLENTRIES); memset(*pge, 0, sizeof(pue_t) * TBLENTRIES); pthread_mutex_unlock(&tbl->lock); } pue_t *pue = &(*pge)[PUX(va)]; if(! *pue) { pthread_mutex_lock(&tbl->lock); *pue = malloc(sizeof(pme_t) * TBLENTRIES); memset(*pue, 0, sizeof(pme_t) * TBLENTRIES); pthread_mutex_unlock(&tbl->lock); } pme_t *pme = &(*pue)[PMX(va)]; if(! *pme) { pthread_mutex_lock(&tbl->lock); *pme = malloc(sizeof(data_t) * TBLENTRIES); memset(*pme, 0, sizeof(data_t) * TBLENTRIES); pthread_mutex_unlock(&tbl->lock); } DEBUG_LOG("set_page_data of %p from %lu to %lu", va, (*pme)[PTX(va)], id); (*pme)[PTX(va)] = id; return 0; }
void checkmmu(ulong va, ulong pa) { ulong *pdb, *pte; int pdbx; if(up->mmupdb == 0) return; pdb = mmupdb(up->mmupdb, va); pdbx = PDX(va); if(MAPPN(pdb[pdbx]) == 0){ /* okay to be empty - will fault and get filled */ return; } pte = KADDR(MAPPN(pdb[pdbx])); if(MAPPN(pte[PTX(va)]) != pa){ if(!paemode) print("%ld %s: va=0x%08lux pa=0x%08lux pte=0x%08lux (0x%08lux)\n", up->pid, up->text, va, pa, pte[PTX(va)], MAPPN(pte[PTX(va)])); else print("%ld %s: va=0x%08lux pa=0x%08lux pte=0x%16llux (0x%08lux)\n", up->pid, up->text, va, pa, *(uvlong*)&pte[PTX(va)], MAPPN(pte[PTX(va)])); } }
/************************************************************************* * 蓝宙电子科技有限公司 * * 函数名称:gpio_Interrupt_init * 功能说明:初始化gpio * 参数说明:PTxn 端口号(PORTA,PORTD) * IO 引脚方向,0=输入,1=输出,输入输出状态定义____________(修改:这个函数中只有定义为输入模式有效,否则不改变相关状态) * mode 中断模式 * 函数返回:无 * 修改时间:2012-9-15 已测试 * 备 注: *************************************************************************/ void gpio_Interrupt_init(PTxn ptxn, GPIO_CFG cfg, GPIO_INP mode) { ASSERT( (PTn(ptxn) < 32u) ); //使用断言检查输入、电平 是否为1bit //选择功能脚 PORTx_PCRx ,每个端口都有个寄存器 PORTx_PCRx PORT_PCR_REG(PORTX_BASE(ptxn), PTn(ptxn)) = (0 | PORT_PCR_MUX(1) | cfg | PORT_PCR_IRQC(mode) ); //选择功能脚 PORTx_PCRx ,每个端口都有中断模型 // PORT_DFER_REG(PORTX_BASE(ptxn)) = PORT_DFER_DFE( 1<<PTn(ptxn)); //端口方向控制输入还是输出 if( ( (cfg & 0x01) == GPI) || (cfg == GPI_UP) || (cfg == GPI_UP_PF) || (cfg == GPI_DOWN) || (cfg == GPI_DOWN_PF) ) // 最低位为0则输入 || 输入上拉模式 || 输入上拉,带无源滤波器 { GPIO_PDDR_REG(GPIOX_BASE(ptxn)) &= ~(1 << PTn(ptxn)); //设置端口方向为输入 } if(PTX(ptxn)==0) enable_irq(PortA_irq_no); else if(PTX(ptxn)==3) enable_irq(PortD_irq_no); }
/************************************************************************* * 蓝宙电子工作室 * * 函数名称:gpio_set * 功能说明:设置引脚状态 * 参数说明:ptxn:端口号(gpio.h中宏定义,gpio_cfg.h) * state 输出初始状态,0=低电平,1=高电平 * 函数返回:无 * 修改时间:2012-1-16 已测试 * 备 注:_____________________________________(修改过) *************************************************************************/ void gpio_set(PTxn ptxn, uint8_t state) { if(state == 1) GPIO_SET(PTX(ptxn), PTn(ptxn), 1); else GPIO_SET(PTX(ptxn), PTn(ptxn), 0); }
// Given 'pgdir', a pointer to a page directory, pgdir_walk returns // a pointer to the page table entry (PTE) for linear address 'va'. // This requires walking the two-level page table structure. // // The relevant page table page might not exist yet. // If this is true, and create == false, then pgdir_walk returns NULL. // Otherwise, pgdir_walk allocates a new page table page with page_alloc. // - If the allocation fails, pgdir_walk returns NULL. // - Otherwise, the new page's reference count is incremented, // the page is cleared, // and pgdir_walk returns a pointer into the new page table page. // // Hint 1: you can turn a Page * into the physical address of the // page it refers to with page2pa() from kern/pmap.h. // // Hint 2: the x86 MMU checks permission bits in both the page directory // and the page table, so it's safe to leave permissions in the page // directory more permissive than strictly necessary. // // Hint 3: look at inc/mmu.h for useful macros that mainipulate page // table and page directory entries. // pte_t * pgdir_walk(pde_t *pgdir, const void *va, int create) { // Fill this function in pte_t * pte; if ((pgdir[PDX(va)] & PTE_P) != 0) { pte =(pte_t *) KADDR(PTE_ADDR(pgdir[PDX(va)])); return pte + PTX(va); } if(create != 0) { struct PageInfo *tmp; tmp = page_alloc(1); if(tmp != NULL) { tmp->pp_ref += 1; tmp->pp_link = NULL; pgdir[PDX(va)] = page2pa(tmp) | PTE_U | PTE_W | PTE_P; pte = (pte_t *)KADDR(page2pa(tmp)); return pte+PTX(va); } } return NULL; }
pte_t* pgdir_walk(pde_t *pgdir, const void *va, int create) { // Fill this function in if(!pgdir){ cprintf("ERROR!!\n"); } //Get the entry id in the page directory uintptr_t pgdir_offset = (uintptr_t)PDX(va); pte_t *ptentry; //if page directory entry does not exsist. if(!(pgdir[pgdir_offset] & PTE_P)) { if(!create) return NULL; struct PageInfo *new_page = page_alloc(ALLOC_ZERO); if(!new_page) return NULL; new_page->pp_ref++; pgdir[pgdir_offset] = (page2pa(new_page)) | PTE_P | PTE_W | PTE_U; //Returning pointer to page table base. //return (pte_t*) (page2kva(new_page)); //Returning pointer to page table entry pde_t *ret_arr = page2kva(new_page); return &ret_arr[PTX(va)]; } else { ptentry = KADDR(PTE_ADDR(pgdir[pgdir_offset])); return &(ptentry[PTX(va)]); } }
// Given 'pgdir', a pointer to a page directory, pgdir_walk returns // a pointer to the page table entry (PTE) for linear address 'va'. // This requires walking the two-level page table structure. // // The relevant page table page might not exist yet. // If this is true, and create == false, then pgdir_walk returns NULL. // Otherwise, pgdir_walk allocates a new page table page with page_alloc. // - If the allocation fails, pgdir_walk returns NULL. // - Otherwise, the new page's reference count is incremented, // the page is cleared, // and pgdir_walk returns a pointer into the new page table page. // // Hint 1: you can turn a Page * into the physical address of the // page it refers to with page2pa() from kern/pmap.h. // // Hint 2: the x86 MMU checks permission bits in both the page directory // and the page table, so it's safe to leave permissions in the page // directory more permissive than strictly necessary. // // Hint 3: look at inc/mmu.h for useful macros that mainipulate page // table and page directory entries. // pte_t * pgdir_walk(pde_t *pgdir, const void *va, int create) { pte_t * pgtbl = NULL; if(!(pgdir[PDX(va)] & PTE_P)) { if(!create) { return NULL; } else { struct Page * new_pgtbl = page_alloc(ALLOC_ZERO); if(new_pgtbl){ new_pgtbl->pp_ref += 1; pgdir[PDX(va)] = (physaddr_t) page2pa(new_pgtbl) | PTE_P | PTE_W | PTE_U; pgtbl = (pte_t *) KADDR(PTE_ADDR(pgdir[PDX(va)])); return &pgtbl[PTX(va)]; } else return NULL; } } else pgtbl = (pte_t *) KADDR(PTE_ADDR(pgdir[PDX(va)])); return &pgtbl[PTX(va)]; }
// Given 'pgdir', a pointer to a page directory, pgdir_walk returns // a pointer to the page table entry (PTE) for linear address 'va'. // This requires walking the two-level page table structure. // // The relevant page table page might not exist yet. // If this is true, and create == false, then pgdir_walk returns NULL. // Otherwise, pgdir_walk allocates a new page table page with page_alloc. // - If the allocation fails, pgdir_walk returns NULL. // - Otherwise, the new page's reference count is incremented, // the page is cleared, // and pgdir_walk returns a pointer into the new page table page. // // Hint 1: you can turn a Page * into the physical address of the // page it refers to with page2pa() from kern/pmap.h. // // Hint 2: the x86 MMU checks permission bits in both the page directory // and the page table, so it's safe to leave permissions in the page // more permissive than strictly necessary. // // Hint 3: look at inc/mmu.h for useful macros that mainipulate page // table and page directory entries. // pte_t * pgdir_walk(pde_t *pgdir, const void *va, int create) { // Fill this function in struct Page* new_page; pde_t* pde = pgdir + PDX(va); pte_t* pte; // has created if (*pde & PTE_P) { pte = (pte_t*)KADDR(PTE_ADDR(*pde)); return pte + PTX(va); } // need create if (create == 0) { return NULL; } else { new_page = page_alloc(ALLOC_ZERO); if (new_page == NULL) { return NULL; } else { new_page->pp_ref++; *pde = page2pa(new_page) | PTE_P | PTE_W | PTE_U; pte = (pte_t*)KADDR(PTE_ADDR(*pde)); return pte + PTX(va); } } }
/* hint from check ptep = (pte_t *) KADDR(PTE_ADDR(kern_pgdir[PDX(PGSIZE)])); assert(pgdir_walk(kern_pgdir, (void*)PGSIZE, 0) == ptep+PTX(PGSIZE)); */ pte_t * pgdir_walk(pde_t *pgdir, const void *va, int create) { // Fill this function in pde_t *pde = pgdir + PDX(va); pte_t *ptep = NULL; if(*pde & PTE_P) { /* present */ ptep = KADDR(PTE_ADDR(*pde)); return ptep + PTX(va); } if(create == false) { return NULL; } struct PageInfo *new_ptep = page_alloc(ALLOC_ZERO); if(!new_ptep){ return NULL; } //assert( new_ptep != NULL ); //assert( new_ptep->pp_ref == 0 ); new_ptep->pp_ref = 1; *pde = page2pa(new_ptep) | PTE_P | PTE_U; ptep = page2kva(new_ptep); return ptep + PTX(va); }
// Given 'pgdir', a pointer to a page directory, pgdir_walk returns // a pointer to the page table entry (PTE) for linear address 'va'. // This requires walking the two-level page table structure. // // The relevant page table page might not exist yet. // If this is true, and create == false, then pgdir_walk returns NULL. // Otherwise, pgdir_walk allocates a new page table page with page_alloc. // - If the allocation fails, pgdir_walk returns NULL. // - Otherwise, the new page's reference count is incremented, // the page is cleared, // and pgdir_walk returns a pointer into the new page table page. // // Hint 1: you can turn a Page * into the physical address of the // page it refers to with page2pa() from kern/pmap.h. // // Hint 2: the x86 MMU checks permission bits in both the page directory // and the page table, so it's safe to leave permissions in the page // more permissive than strictly necessary. // // Hint 3: look at inc/mmu.h for useful macros that mainipulate page // table and page directory entries. // pte_t * pgdir_walk(pde_t *pgdir, const void *va, int create) { // Fill this function in /*stone's solution for lab2*/ pde_t* pde = pgdir + PDX(va);//stone: get pde if (*pde & PTE_P){//stone:if present pte_t *pte = PTX(va) + (pte_t *)KADDR(PTE_ADDR(*pde)); return pte; } else if (create == 0) return NULL; else{ struct Page* pp = page_alloc(ALLOC_ZERO); if (pp == NULL) return NULL; else{ pp->pp_ref = 1; physaddr_t physaddr = page2pa(pp); *pde = physaddr | PTE_U | PTE_W | PTE_P; pte_t *pte = PTX(va) + (pte_t *)KADDR(PTE_ADDR(*pde)); return pte; } } //return NULL; }
// Given 'pgdir', a pointer to a page directory, pgdir_walk returns // a pointer to the page table entry (PTE) for linear address 'va'. // This requires walking the two-level page table structure. // // The relevant page table page might not exist yet. // If this is true, and create == false, then pgdir_walk returns NULL. // Otherwise, pgdir_walk allocates a new page table page with page_alloc. // - If the allocation fails, pgdir_walk returns NULL. // - Otherwise, the new page's reference count is incremented, // the page is cleared, // and pgdir_walk returns a pointer into the new page table page. // // Hint 1: you can turn a Page * into the physical address of the // page it refers to with page2pa() from kern/pmap.h. // // Hint 2: the x86 MMU checks permission bits in both the page directory // and the page table, so it's safe to leave permissions in the page // more permissive than strictly necessary. // // Hint 3: look at inc/mmu.h for useful macros that mainipulate page // table and page directory entries. // pte_t * pgdir_walk(pde_t *pgdir, const void *va, int create) { if (pgdir == NULL) { panic("pgdir_walk: pgdir is NULL\n"); } pde_t pde = pgdir[PDX(va)]; // If page table page is present if (pde & PTE_P) { pte_t* pgt = KADDR(PTE_ADDR(pgdir[PDX(va)])); return &pgt[PTX(va)]; } // Otherwise page table page doesn't exist if (!create) return NULL; // Allocate new page table page (clear it as well) struct Page* pp; if (!(pp = page_alloc(ALLOC_ZERO))) return NULL; pp->pp_ref++; // increment ref count // set pde, for now permissions more permissive pgdir[PDX(va)] = page2pa(pp) | PTE_P | PTE_W | PTE_U; return &((pte_t*) page2kva(pp))[PTX(va)]; }
void kmm_pgfault(struct trapframe *tf) { // uint64_t err = tf->tf_err; uintptr_t addr = rcr2(); if (addr >= PBASE && addr < PBASE + PSIZE) { pgd_t *pgd = KADDR_DIRECT(PTE_ADDR(rcr3())); pud_t *pud; pmd_t *pmd; pte_t *ptd; /* PHYSICAL ADDRRESS ACCESSING */ if (last_pgd != NULL) { pud = KADDR_DIRECT(PGD_ADDR(last_pgd[PGX(last_addr)])); pmd = KADDR_DIRECT(PUD_ADDR(pud[PUX(last_addr)])); ptd = KADDR_DIRECT(PMD_ADDR(pmd[PMX(last_addr)])); ptd[PTX(last_addr)] = 0; if (ptd == temp_ptd) { pmd[PUX(last_addr)] = 0; if (pmd == temp_pmd) { pud[PUX(last_addr)] = 0; if (pud == temp_pud) last_pgd[PGX(last_addr)] = 0; } if (last_pgd == pgd) { invlpg((void *)last_addr); } } } if (pgd[PGX(last_addr)] == 0) pgd[PGX(last_addr)] = PADDR_DIRECT(temp_pud) | PTE_W | PTE_P; pud = KADDR_DIRECT(PGD_ADDR(pgd[PGX(last_addr)])); if (pud[PUX(last_addr)] == 0) pud[PUX(last_addr)] = PADDR_DIRECT(temp_pmd) | PTE_W | PTE_P; pmd = KADDR_DIRECT(PUD_ADDR(pud[PUX(last_addr)])); if (pmd[PMX(last_addr)] == 0) pmd[PMX(last_addr)] = PADDR_DIRECT(temp_ptd) | PTE_W | PTE_P; ptd = KADDR_DIRECT(PMD_ADDR(pmd[PMX(last_addr)])); ptd[PTX(last_addr)] = PADDR_DIRECT(addr) | PTE_W | PTE_P; last_pgd = pgd; last_addr = addr; /* XXX? */ // invlpg((void *)addr); } }
static physaddr_t check_va2pa(pde_t *pgdir, uintptr_t va) { pte_t *p; pgdir = &pgdir[PDX(va)]; if (!(*pgdir & PTE_P)) return ~0; p = (pte_t*) KADDR(PTE_ADDR(*pgdir)); if (!(p[PTX(va)] & PTE_P)) return ~0; return PTE_ADDR(p[PTX(va)]); }
// Given 'pgdir', a pointer to a page directory, pgdir_walk returns // a pointer to the page table entry (PTE) for linear address 'va'. // This requires walking the two-level page table structure. // // The relevant page table page might not exist yet. // If this is true, and create == false, then pgdir_walk returns NULL. // Otherwise, pgdir_walk allocates a new page table page with page_alloc. // - If the allocation fails, pgdir_walk returns NULL. // - Otherwise, the new page's reference count is incremented, // the page is cleared, // and pgdir_walk returns a pointer into the new page table page. // // Hint 1: you can turn a Page * into the physical address of the // page it refers to with page2pa() from kern/pmap.h. // // Hint 2: the x86 MMU checks permission bits in both the page directory // and the page table, so it's safe to leave permissions in the page // directory more permissive than strictly necessary. // // Hint 3: look at inc/mmu.h for useful macros that mainipulate page // table and page directory entries. // pte_t * pgdir_walk(pde_t *pgdir, const void *va, int create) { // Fill this function in pde_t* pd = pgdir + (unsigned int) PDX(va); //PTE_ADDR used for both pte and pde if (*pd & PTE_P) return (pte_t*) KADDR(PTE_ADDR(*pd)) + (unsigned)PTX(va); // if page doesn't exist if (create == 0) return NULL; struct PageInfo* newpt = page_alloc(1); if (newpt == NULL) return NULL; newpt -> pp_ref = 1; *pd = page2pa(newpt) | PTE_P | PTE_U | PTE_W; return (pte_t*)page2kva(newpt) + (unsigned) PTX(va); }
// Given 'pgdir', a pointer to a page directory, pgdir_walk returns // a pointer to the page table entry (PTE) for linear address 'va'. // This requires walking the two-level page table structure. // // The relevant page table page might not exist yet. // If this is true, and create == false, then pgdir_walk returns NULL. // Otherwise, pgdir_walk allocates a new page table page with page_alloc. // - If the allocation fails, pgdir_walk returns NULL. // - Otherwise, the new page's reference count is incremented, // the page is cleared, // and pgdir_walk returns a pointer into the new page table page. // // Hint 1: you can turn a Page * into the physical address of the // page it refers to with page2pa() from kern/pmap.h. // // Hint 2: the x86 MMU checks permission bits in both the page directory // and the page table, so it's safe to leave permissions in the page // more permissive than strictly necessary. // // Hint 3: look at inc/mmu.h for useful macros that mainipulate page // table and page directory entries. // pte_t * pgdir_walk(pde_t *pgdir, const void *va, int create) { pde_t *pde; pte_t *pgtab; struct PageInfo *pp; pde = &pgdir[PDX(va)]; if (*pde & PTE_P) { // KADDR(pa) : get the corresponding va of this pa. // ( reversed va->pa mapping ) // understanding why we need KADDR and PADDR is very important : // note : // 1. dereference, uintptr_t, physaddr_t // 2. the kernel, like any other software, cannot bypass virtual // memory translation and thus cannot directly load and store // to physical addresses. // 3. the kernel has set up some page table that has the direct // mapping of va -> pa. // 4. all pointers in c are virtual address. // read this : // http://pdos.csail.mit.edu/6.828/2012/labs/lab2/#Virtual--Linear--and-Physical-Addresses pgtab = (pte_t*)KADDR(PTE_ADDR(*pde)); } else { if (!create || (pp = page_alloc(ALLOC_ZERO)) == 0) return 0; pp->pp_ref = 1; pgtab = (pte_t*)KADDR(page2pa(pp)); *pde = PADDR(pgtab) | PTE_P | PTE_W | PTE_U; } return &pgtab[PTX(va)]; }
// Given 'pgdir', a pointer to a page directory, pgdir_walk returns // a pointer to the page table entry (PTE) for linear address 'va'. // This requires walking the two-level page table structure. // // The relevant page table page might not exist yet. // If this is true, and create == false, then pgdir_walk returns NULL. // Otherwise, pgdir_walk allocates a new page table page with page_alloc. // - If the allocation fails, pgdir_walk returns NULL. // - Otherwise, the new page's reference count is incremented, // the page is cleared, // and pgdir_walk returns a pointer into the new page table page. // // Hint: Check out page2pa() and page2kva() in kern/pmap.h. // // Hint 2: the x86 MMU checks permission bits in both the page directory // and the page table. pte_t * pgdir_walk(pde_t *pgdir, uintptr_t va, bool create) { // Use the page directory index to look up the page table entry physaddr_t page_table = pgdir[PDX(va)]; // Check whether the entry is present if (!(page_table & PTE_P)) { // The page table doesn't exist yet if (!create) return NULL; // Try creating a new page table Page *page = page_alloc(); // If a page couldn't be allocated, return NULL if (page == NULL) return NULL; page_table = (physaddr_t) page2pa(page); // Should be 4KB aligned assert(PGALIGNED(page_table)); // Increment the new page's reference count page->pp_ref++; // Clear the page memset((void *)KADDR(page_table), 0, PGSIZE); // Set the present bit of this entry page_table = page_table | PTE_W | PTE_U | PTE_P; // Install it in the directory pgdir[PDX(va)] = page_table; } // Page table address is the top 20 bits of the entry + 12 zeros return (pte_t *) &((pte_t *) KADDR(PTE_ADDR(page_table)))[PTX(va)]; }
// Given 'pgdir', a pointer to a page directory, pgdir_walk returns // a pointer to the page table entry (PTE) for linear address 'va'. // This requires walking the two-level page table structure. // // The relevant page table page might not exist yet. // If this is true, and create == false, then pgdir_walk returns NULL. // Otherwise, pgdir_walk allocates a new page table page with page_alloc. // - If the allocation fails, pgdir_walk returns NULL. // - Otherwise, the new page's reference count is incremented, // the page is cleared, // and pgdir_walk returns a pointer into the new page table page. // // Hint 1: you can turn a Page * into the physical address of the // page it refers to with page2pa() from kern/pmap.h. // // Hint 2: the x86 MMU checks permission bits in both the page directory // and the page table, so it's safe to leave permissions in the page // more permissive than strictly necessary. // // Hint 3: look at inc/mmu.h for useful macros that mainipulate page // table and page directory entries. // pte_t * pgdir_walk(pde_t *pgdir, const void *va, int create) { // Fill this function in physaddr_t pt_addr; struct PageInfo * new_page; pte_t * pagetable; assert(pgdir != NULL); if ((pgdir[PDX(va)] & PTE_P) == 0) { if (!create) { return NULL; } new_page = page_alloc(ALLOC_ZERO); if (new_page == NULL) { return NULL; } new_page->pp_ref++; pgdir[PDX(va)] = page2pa(new_page) | PTE_U | PTE_W | PTE_P; } pt_addr = PTE_ADDR(pgdir[PDX(va)]); pagetable = KADDR(pt_addr); return &pagetable[PTX(va)]; }
pte_t * get_pte(pgd_t *pgdir, uintptr_t la, bool create) { #if PTXSHIFT == PMXSHIFT return get_pmd(pgdir, la, create); #else /* PTXSHIFT == PMXSHIFT */ pmd_t *pmdp; if ((pmdp = get_pmd(pgdir, la, create)) == NULL) { return NULL; } if (! ptep_present(pmdp)) { struct Page *page; if (!create || (page = alloc_page()) == NULL) { return NULL; } set_page_ref(page, 1); uintptr_t pa = page2pa(page); memset(KADDR(pa), 0, PGSIZE); #ifdef ARCH_ARM pdep_map(pmdp, pa); #else ptep_map(pmdp, pa); #endif #ifndef ARCH_ARM ptep_set_u_write(pmdp); ptep_set_accessed(pmdp); ptep_set_dirty(pmdp); #else #warning ARM9 PDE does not have access field #endif } return &((pte_t *)KADDR(PMD_ADDR(*pmdp)))[PTX(la)]; #endif /* PTXSHIFT == PMXSHIFT */ }
// Given 'pgdir', a pointer to a page directory, pgdir_walk returns // a pointer to the page table entry (PTE) for linear address 'va'. // This requires walking the two-level page table structure. // // The relevant page table page might not exist yet. // If this is true, and create == false, then pgdir_walk returns NULL. // Otherwise, pgdir_walk allocates a new page table page with page_alloc. // - If the allocation fails, pgdir_walk returns NULL. // - Otherwise, the new page's reference count is incremented, // the page is cleared, // and pgdir_walk returns a pointer into the new page table page. // // Hint 1: you can turn a Page * into the physical address of the // page it refers to with page2pa() from kern/pmap.h. // // Hint 2: the x86 MMU checks permission bits in both the page directory // and the page table, so it's safe to leave permissions in the page // more permissive than strictly necessary. // // Hint 3: look at inc/mmu.h for useful macros that mainipulate page // table and page directory entries. // pte_t * pgdir_walk(pde_t *pgdir, const void *va, int create) { // Fill this function in pte_t* p1 = 0; pte_t* p2 = 0; pte_t* result; struct Page* pg; p1=pgdir + PDX(va); if(!((*p1) & PTE_P)) { if(!create) return 0; if( !(pg=page_alloc(ALLOC_ZERO)) ) { return 0; } pg->pp_ref++; *p1=PTE_P | PTE_U | PTE_W | page2pa(pg) ; p2=KADDR( PTE_ADDR(*p1) ); } else{ p2=KADDR( PTE_ADDR(*p1) ); } result=p2+PTX(va); return result; }
void mmumapcpu0(void) { ulong *pdb, *pte, va, pa, pdbx; if(strstr(xenstart->magic, "x86_32p")) paemode = 1; hypervisor_virt_start = paemode ? 0xF5800000 : 0xFC000000; patomfn = (ulong*)xenstart->mfn_list; matopfn = (ulong*)hypervisor_virt_start; /* Xen bug ? can't touch top entry in PDPT */ if(paemode) hypervisor_virt_start = 0xC0000000; /* * map CPU0MACH at MACHADDR. * When called the pagedir and page table exist, we just * need to fill in a page table entry. */ pdb = (ulong*)xenstart->pt_base; va = MACHADDR; pa = PADDR(CPU0MACH) | PTEVALID|PTEWRITE; pdbx = PDX(va); pdb = PDB(pdb, va); pte = KADDR(MAPPN(pdb[pdbx])); xenupdate(&pte[PTX(va)], pa); }
// Given 'pgdir', a pointer to a page directory, pgdir_walk returns // a pointer to the page table entry (PTE) for linear address 'va'. // This requires walking the two-level page table structure. // // The relevant page table page might not exist yet. // If this is true, and create == false, then pgdir_walk returns NULL. // Otherwise, pgdir_walk allocates a new page table page with page_alloc. // - If the allocation fails, pgdir_walk returns NULL. // - Otherwise, the new page's reference count is incremented, // the page is cleared, // and pgdir_walk returns a pointer into the new page table page. // // Hint 1: you can turn a Page * into the physical address of the // page it refers to with page2pa() from kern/pmap.h. // // Hint 2: the x86 MMU checks permission bits in both the page directory // and the page table, so it's safe to leave permissions in the page // more permissive than strictly necessary. // // Hint 3: look at inc/mmu.h for useful macros that mainipulate page // table and page directory entries. // pte_t * pgdir_walk(pde_t *pgdir, const void *va, int create) { int dindex = PDX(va), tindex = PTX(va); //dir index, table index if (!(pgdir[dindex] & PTE_P)) { //if pde not exist if (create) { struct PageInfo *pg = page_alloc(ALLOC_ZERO); //alloc a zero page if (!pg) return NULL; //allocation fails pg->pp_ref++; pgdir[dindex] = page2pa(pg) | PTE_P | PTE_U | PTE_W; } else return NULL; } pte_t *p = KADDR(PTE_ADDR(pgdir[dindex])); //THESE CODE COMMENTED IS NOT NEEDED // if (!(p[tindex] & PTE_P)) //if pte not exist // if (create) { // struct PageInfo *pg = page_alloc(ALLOC_ZERO); //alloc a zero page // pg->pp_ref++; // p[tindex] = page2pa(pg) | PTE_P; // } else return NULL; return p+tindex; }
/*! * @brief PORT初始化 * @param PTxn 端口 * @param cfg 端口属性配置,如触发选项和上拉下拉选项 * @since v5.0 * @note 与port_init_NoALT不同的是,此函数需要配置 MUX 复用功能, 否则 MUX = ALT0 * Sample usage: port_init (PTA8, IRQ_RISING | PF | ALT1 | PULLUP ); //初始化 PTA8 管脚,上升沿触发中断,带无源滤波器,复用功能为GPIO ,上拉电阻 */ void port_init(PTXn_e ptxn, uint32 cfg ) { SIM_SCGC5 |= (SIM_SCGC5_PORTA_MASK << PTX(ptxn)); //开启PORTx端口 PORT_ISFR_REG(PORTX_BASE(ptxn)) = (1<<PTn(ptxn)); // 清空标志位 PORT_PCR_REG(PORTX_BASE(ptxn), PTn(ptxn)) = cfg; // 复用功能 , 确定触发模式 ,开启上拉或下拉电阻 }
// Given 'pgdir', a pointer to a page directory, pgdir_walk returns // a pointer to the page table entry (PTE) for linear address 'va'. // This requires walking the two-level page table structure. // // The relevant page table page might not exist yet. // If this is true, and create == false, then pgdir_walk returns NULL. // Otherwise, pgdir_walk allocates a new page table page with page_alloc. // - If the allocation fails, pgdir_walk returns NULL. // - Otherwise, the new page's reference count is incremented, // the page is cleared, // and pgdir_walk returns a pointer into the new page table page. // // Hint 1: you can turn a Page * into the physical address of the // page it refers to with page2pa() from kern/pmap.h. // // Hint 2: the x86 MMU checks permission bits in both the page directory // and the page table, so it's safe to leave permissions in the page // more permissive than strictly necessary. // // Hint 3: look at inc/mmu.h for useful macros that mainipulate page // table and page directory entries. // pte_t * pgdir_walk(pde_t *pgdir, const void *va, int create) { // cprintf("pgdir_walk: va:%x pgdir[PDX(va)]:%x\n",va,pgdir[PDX(va)]); if (pgdir[PDX(va)] & PTE_P) { return PTX(va)+(pte_t*)(KADDR(PTE_ADDR(pgdir[PDX(va)]))); }else{ if (create == 0) return NULL; struct Page *newp = page_alloc(ALLOC_ZERO); if (newp == NULL) return NULL; newp->pp_ref++; pgdir[PDX(va)] = page2pa(newp) | PTE_P | PTE_W | PTE_U; return PTX(va)+(pte_t*)page2kva(newp); } return NULL; }
/** * dump information about virtual-to-physical address mapping * * @param pgdir * @param va */ void dump_va_mapping(pde_t *pgdir, uintptr_t va) { pte_t *p; dprintk("dump: pgdir=%p, va=%p\n", pgdir, va); pgdir = &pgdir[PDX(va)]; if (!(*pgdir & PTE_P)) { dprintk(" page directory entry not present.\n"); return; } p = (pte_t*) KADDR(PTE_ADDR(*pgdir)); if (!(p[PTX(va)] & PTE_P)) { dprintk(" page table entry not present.\n"); return; } dprintk(" pde=%p, pte=%p\n", *pgdir, p[PTX(va)]); }
//get_pte - get pte and return the kernel virtual address of this pte for la // - if the PT contians this pte didn't exist, alloc a page for PT // parameter: // pgdir: the kernel virtual base address of PDT // la: the linear address need to map // create: a logical value to decide if alloc a page for PT // return vaule: the kernel virtual address of this pte pte_t * get_pte(pde_t *pgdir, uintptr_t la, bool create) { /* LAB2 EXERCISE 2: YOUR CODE * * If you need to visit a physical address, please use KADDR() * please read pmm.h for useful macros * * Maybe you want help comment, BELOW comments can help you finish the code * * Some Useful MACROs and DEFINEs, you can use them in below implementation. * MACROs or Functions: * PDX(la) = the index of page directory entry of VIRTUAL ADDRESS la. * KADDR(pa) : takes a physical address and returns the corresponding kernel virtual address. * set_page_ref(page,1) : means the page be referenced by one time * page2pa(page): get the physical address of memory which this (struct Page *) page manages * struct Page * alloc_page() : allocation a page * memset(void *s, char c, size_t n) : sets the first n bytes of the memory area pointed by s * to the specified value c. * DEFINEs: * PTE_V 0x001 // page table/directory entry flags bit : Present * PTE_W 0x002 // page table/directory entry flags bit : Writeable * PTE_U 0x004 // page table/directory entry flags bit : User can access */ #if 0 pde_t *pdep = NULL; // (1) find page directory entry if (0) { // (2) check if entry is not present // (3) check if creating is needed, then alloc page for page table // CAUTION: this page is used for page table, not for common data page // (4) set page reference uintptr_t pa = 0; // (5) get linear address of page // (6) clear page content using memset // (7) set page directory entry's permission } return NULL; // (8) return page table entry #endif pde_t *pdep = &pgdir[PDX(la)]; //right!!! // if(la==0x50000000) // cprintf("pdep=%08x\n",pdep); if (!(*pdep & PTE_V)) { struct Page *page; // cprintf("haha\n"); // cprintf("create=%d\n",create); if (!create || (page = alloc_page()) == NULL) { return NULL; } //cprintf("hahapaget_pte\n"); set_page_ref(page, 1); uintptr_t pa = page2pa(page); memset(KADDR(pa), 0, PGSIZE); *pdep = pa | PTE_TYPE_TABLE | PTE_V | PTE_R; } //cprintf("%08x\n",&((pte_t *)KADDR(PDE_ADDR(*pdep)))[PTX(la)]); // if(la==0x50000000) // cprintf("get_pte return=%08x\n",&((pte_t *)KADDR(PDE_ADDR(*pdep)))[PTX(la)]); return &((pte_t *)KADDR(PDE_ADDR(*pdep)))[PTX(la)]; }
// // Frees env e and all memory it uses. // void env_free(struct Env *e) { pte_t *pt; uint32_t pdeno, pteno; physaddr_t pa; // If freeing the current environment, switch to boot_pgdir // before freeing the page directory, just in case the page // gets reused. if (e == curenv) lcr3(boot_cr3); // Note the environment's demise. // cprintf("[%08x] free env %08x\n", curenv ? curenv->env_id : 0, e->env_id); // Flush all mapped pages in the user portion of the address space static_assert(UTOP % PTSIZE == 0); for (pdeno = 0; pdeno < PDX(UTOP); pdeno++) { // only look at mapped page tables if (!(e->env_pgdir[pdeno] & PTE_P)) continue; // find the pa and va of the page table pa = PTE_ADDR(e->env_pgdir[pdeno]); pt = (pte_t*) KADDR(pa); // unmap all PTEs in this page table for (pteno = 0; pteno <= PTX(~0); pteno++) { if (pt[pteno] & PTE_P) page_remove(e->env_pgdir, PGADDR(pdeno, pteno, 0)); } // free the page table itself e->env_pgdir[pdeno] = 0; page_decref(pa2page(pa)); } // free the page directory pa = e->env_cr3; e->env_pgdir = 0; e->env_cr3 = 0; page_decref(pa2page(pa)); // return the environment to the free list e->env_status = ENV_FREE; LIST_INSERT_HEAD(&env_free_list, e, env_link); }
static void unmap_range_pte(pgd_t *pgdir, pte_t *pte, uintptr_t base, uintptr_t start, uintptr_t end) { assert(start >= 0 && start < end && end <= PTSIZE); assert(start % PGSIZE == 0 && end % PGSIZE == 0); do { pte_t *ptep = &pte[PTX(start)]; if (*ptep != 0) { page_remove_pte(pgdir, base + start, ptep); } start += PGSIZE; } while (start != 0 && start < end); }
/*! * @brief PORT初始化 * @param PTxn 端口 * @param cfg 端口属性配置,如触发选项和上拉下拉选项 * @since v5.0 * @note 与port_init不同的是,此函数不需要配置 MUX 复用功能(即使配置了也不生效), MUX 保留 为原先寄存器配置的值 * Sample usage: port_init_NoALT (PTA8, IRQ_RISING | PF | PULLUP ); //初始化 PTA8 管脚,上升沿触发中断,带无源滤波器,保留原先复用功能,上拉电阻 */ void port_init_NoALT(PTXn_e ptxn, uint32 cfg) { SIM_SCGC5 |= (SIM_SCGC5_PORTA_MASK << PTX(ptxn)); //开启PORTx端口 PORT_ISFR_REG(PORTX_BASE(ptxn)) = (1<<PTn(ptxn)); // 清空标志位 //清空cfg里的MUX,加载寄存器里的MUX cfg &= ~PORT_PCR_MUX_MASK; //清了MUX 字段(即不需要配置ALT,保持原来的ALT) cfg |= (PORT_PCR_REG(PORTX_BASE(ptxn), PTn(ptxn)) & PORT_PCR_MUX_MASK); //读取寄存器里配置的 MUX PORT_PCR_REG(PORTX_BASE(ptxn), PTn(ptxn)) = cfg; // 复用功能 , 确定触发模式 ,开启上拉或下拉电阻 }
static physaddr_t check_va2pa(pml4e_t *pml4e, uintptr_t va) { pte_t *pte; pdpe_t *pdpe; pde_t *pde; pml4e = &pml4e[PML4(va)]; if(!(*pml4e & PTE_P)) return ~0; pdpe = (pdpe_t *) KADDR(PTE_ADDR(*pml4e)); if (!(pdpe[PDPE(va)] & PTE_P)) return ~0; pde = (pde_t *) KADDR(PTE_ADDR(pdpe[PDPE(va)])); pde = &pde[PDX(va)]; if (!(*pde & PTE_P)) return ~0; pte = (pte_t*) KADDR(PTE_ADDR(*pde)); if (!(pte[PTX(va)] & PTE_P)) return ~0; return PTE_ADDR(pte[PTX(va)]); }
// // Check that an environment is allowed to access the range of memory // [va, va+len) with permissions 'perm | PTE_P'. // Normally 'perm' will contain PTE_U at least, but this is not required. // 'va' and 'len' need not be page-aligned; you must test every page that // contains any of that range. You will test either 'len/PGSIZE', // 'len/PGSIZE + 1', or 'len/PGSIZE + 2' pages. // // A user program can access a virtual address if (1) the address is below // ULIM, and (2) the page table gives it permission. These are exactly // the tests you should implement here. // // If there is an error, set the 'user_mem_check_addr' variable to the first // erroneous virtual address. // // Returns 0 if the user program can access this range of addresses, // and -E_FAULT otherwise. // int user_mem_check(struct Env *env, const void *va, size_t len, int perm) { // LAB 3: Your code here. int returnVal = 0; uint32_t va_int = (uint32_t) va; pte_t * current_te = pgdir_walk(env->env_pgdir, (void *) va_int, 0); if(!current_te) { user_mem_check_addr = va_int; return -E_FAULT; } else if(va_int >= ULIM || !((* current_te) & (PTE_P | perm))) { user_mem_check_addr = va_int; return -E_FAULT; } uint32_t va_top = va_int + len; if(va_top % PGSIZE != 0) { va_top = va_top + (PGSIZE - (va_top%PGSIZE)); } va_int = va_int + PGSIZE; for(va_int; va_int < va_top; va_int += PGSIZE) { current_te = pgdir_walk(env->env_pgdir, (void *) va_int, 0); if(!current_te) { user_mem_check_addr = PTX(va_int)<<12; returnVal = -E_FAULT; break; } else if(va_int >= ULIM || !((* current_te) & (PTE_P | perm))) { user_mem_check_addr = PTX(va_int)<<12; returnVal = -E_FAULT; break; } } return returnVal; }