Esempio n. 1
0
int
page_insert(Pde *pgdir, struct Page *pp, u_long va, u_int perm)
{
	// Fill this function in
	u_int PERM;
	Pte *pgtable_entry;
	PERM = perm | PTE_V;

	pgdir_walk(pgdir, va, 0, &pgtable_entry);

	if (pgtable_entry != 0 && (*pgtable_entry & PTE_V) != 0) {
		if (pa2page(*pgtable_entry) != pp) {
			page_remove(pgdir, va);
		} else	{
			tlb_invalidate(pgdir, va);
			*pgtable_entry = (page2pa(pp) | PERM);

			return 0;
		}
	}

	tlb_invalidate(pgdir, va);

	if (pgdir_walk(pgdir, va, 1, &pgtable_entry) != 0) {
		return -E_NO_MEM;    // panic("page insert wrong.\n");
	}

	*pgtable_entry = (page2pa(pp) | PERM);
	//printf("page_insert:PTE:\tcon:%x\t@:%x\n",(int)*pgtable_entry,(int)pgtable_entry);
	pp->pp_ref++;
	return 0;
}
Esempio n. 2
0
/* Given a page table entry, return the page.
 * We pass in the page directory index and page table index 
 * so we can re-create the virtual address if we need to.
 *
 * If the page was swapped out on disk, swap it in before returning. */
struct page *
get_page(int pdi, int pti, struct page_table* pt)
{
	//bool lock_p = false;
	int swapped = PTE_TO_LOCATION(pt->table[pti]);
	
	// Wait for page to finish moving to disk, or moving to memory.
	while(swapped == PTE_SWAPPING)
	{
		// Let other stuff run so that this can complete
		/*
		if(spinlock_do_i_hold(&stealmem_lock)){
			spinlock_release(&stealmem_lock);
			//lock_p = true;
		}
		*/
		thread_yield();
		swapped = PTE_TO_LOCATION(pt->table[pti]);
		/*
		// Get the lock back if we had released it above.
		if(!spinlock_do_i_hold(&stealmem_lock)){
			spinlock_acquire(&stealmem_lock);
		}
		*/
		//DEBUG(DB_SWAP,"status: %d", swapped);
	}
	
	//if(swapped == PTE_SWAPPING)
	//{
	//	panic("BOB SAGGOT");
	//}

	int* pte = &(pt->table[pti]);
	if(swapped == PTE_SWAP)
	{
		//bool lock = get_coremap_lock();
		//Get the address space, virtual address, and permissions from PTE.
		struct addrspace *as = curthread->t_addrspace;
		vaddr_t va = PD_INDEX_TO_VA(pdi) | PT_INDEX_TO_VA(pti);
		int permissions = PTE_TO_PERMISSIONS(*pte);
		//KASSERT(coremap_lock_do_i_hold());
		//Allocate a page
		struct page *page = page_alloc(as,va,permissions);
		struct page_table *pt = pgdir_walk(as,va,false);
		KASSERT(page->state == LOCKED);
		/* Page now has a home in RAM. But set the swap bit to 1 so we can swap the page in*/
		pt->table[pti] |= PTE_SWAP;
		//Swap the page in
	
		swapin_page(as,va,page);
		/* Page was swapped back in. Re-translate */
		pt = pgdir_walk(as,va,false);
		*pte = pt->table[pti];
		// DEBUG(DB_SWAP,"I%p\n", page);
		//release_coremap_lock(lock);
	}
	int page_num = PTE_TO_PFN(*pte) / PAGE_SIZE;
	return &core_map[page_num];
}
Esempio n. 3
0
//
// Map the physical page 'pp' at virtual address 'va'.
// The permissions (the low 12 bits) of the page table entry
// should be set to 'perm|PTE_P'.
//
// Requirements
//   - If there is already a page mapped at 'va', it should be page_remove()d.
//   - If necessary, on demand, a page table should be allocated and inserted
//     into 'pgdir'.
//   - pp->pp_ref should be incremented if the insertion succeeds.
//   - The TLB must be invalidated if a page was formerly present at 'va'.
//
// Corner-case hint: Make sure to consider what happens when the same
// pp is re-inserted at the same virtual address in the same pgdir.
// However, try not to distinguish this case in your code, as this
// frequently leads to subtle bugs; there's an elegant way to handle
// everything in one code path.
//
// RETURNS:
//   0 on success
//   -E_NO_MEM, if page table couldn't be allocated
//
// Hint: The TA solution is implemented using pgdir_walk, page_remove,
// and page2pa.
//
int
page_insert(pde_t *pgdir, struct PageInfo *pp, void *va, int perm)
{/*	// Fill this function in
	pte_t *pte;
	physaddr_t pa=page2pa(pp);
	
	pte=pgdir_walk(pgdir,va,1);
	
	if (pte==NULL){
		cprintf("return error\n");
		return -E_NO_MEM;}	
//Check If there is already a page mapped at 'va',		
	if(*pte & PTE_P){
	
// check if same page is reinserted at the same virtual addr in same pgdir
		if(PTE_ADDR(*pte)==pa){
			*pte=pa|perm|PTE_P;
			return 0;
		}
//page should be page_remove()d and tlb invalidated.			
	page_remove(pgdir,va);
	tlb_invalidate(pgdir,va);	
	}
//set permission of page_table_entry and increment pp->ref count
	*pte=pa |perm| PTE_P ;
	 pp->pp_ref++;
	return 0;
*/
    pte_t *pte = pgdir_walk(pgdir, va, 0);
    physaddr_t ppa = page2pa(pp);

    if (pte != NULL) {
        // for page alreay mapped
        if (*pte & PTE_P){
		if(PTE_ADDR(*pte)==ppa){
			*pte=ppa|perm|PTE_P;
			return 0;
		}
            page_remove(pgdir, va); // also invalidates tlb
	}
        if (page_free_list == pp) 
            page_free_list = page_free_list->pp_link; 
		
	} else {
	    pte = pgdir_walk(pgdir, va, 1);
	    if (!pte)
		    return -E_NO_MEM;
    	    
	}
	*pte = page2pa(pp) | perm | PTE_P;
	pp->pp_ref++;
	//cprintf("ref variable count: %d \n",pp->pp_ref);
    	tlb_invalidate(pgdir, va);
	return 0;
}
Esempio n. 4
0
//
// Check that an environment is allowed to access the range of memory
// [va, va+len) with permissions 'perm | PTE_P'.
// Normally 'perm' will contain PTE_U at least, but this is not required.
// 'va' and 'len' need not be page-aligned; you must test every page that
// contains any of that range.  You will test either 'len/PGSIZE',
// 'len/PGSIZE + 1', or 'len/PGSIZE + 2' pages.
//
// A user program can access a virtual address if (1) the address is below
// ULIM, and (2) the page table gives it permission.  These are exactly
// the tests you should implement here.
//
// If there is an error, set the 'user_mem_check_addr' variable to the first
// erroneous virtual address.
//
// Returns 0 if the user program can access this range of addresses,
// and -E_FAULT otherwise.
//
int
user_mem_check(struct Env *env, const void *va, size_t len, int perm)
{
	// LAB 3: Your code here.
	// 这个地方和grade有点关:
	// 必须找到第一个错误的地址,而不仅仅是判断是否权限允许访问。
	// 所以ia = ROUNDDOWN(va, PGSIZE),va所在页无权限的话,就会出错。
	// PGSIZE为步长的增加有点麻烦. 要考虑到第一次ROUNDDOWN()的话
	// 出错,但user_mem_check_addr要赋值为va.
	uint32_t ia = (uint32_t)va;
	perm |= PTE_P;
	for ( ; ia < (uint32_t)va + len; ia++){
		if (ia > ULIM){
			user_mem_check_addr = ia;
			return -E_FAULT;
		}
		pte_t *ptep = pgdir_walk(env->env_pgdir, (void *)ia, 0);
		if (!ptep){
			user_mem_check_addr = ia;
			return -E_FAULT;
		}
		if ((*ptep & perm) != perm){
			user_mem_check_addr = ia;
			return -E_FAULT;
		}
	}
	return 0;
}
Esempio n. 5
0
//
// Return the page mapped at virtual address 'va'.
// If pte_store is not zero, then we store in it the address
// of the pte for this page.  This is used by page_remove and
// can be used to verify page permissions for syscall arguments,
// but should not be used by most callers.
//
// Return NULL if there is no page mapped at va.
//
// Hint: the TA solution uses pgdir_walk and pa2page.
//
struct PageInfo *
page_lookup(pde_t *pgdir, void *va, pte_t **pte_store)
{      // Fill this function in
	pte_t *pte_entry;

	pte_entry = pgdir_walk(pgdir, va, 0);
	if (pte_entry == NULL)
		return NULL;
	if (*pte_entry == 0)
		return NULL;

	if (pte_store != NULL)
		*pte_store = pte_entry;

	return pa2page(PTE_ADDR(*pte_entry));	


/*	pte_t *pgtab=pgdir_walk(pgdir,va,0);
	if( !(pgtab) );
		return NULL;
	if(pte_store)
		*pte_store=pgtab;	

	return pa2page(PTE_ADDR(*pgtab));
*/
}
Esempio n. 6
0
/* Allocate a page in a user address space */
static
void
allocate_nonfixed_page(size_t page_num, struct addrspace *as, vaddr_t va, int permissions)
{
	// KASSERT(spinlock_do_i_hold(&stealmem_lock));
	KASSERT(core_map[page_num].state == FREE);
	//Allocate a page
	core_map[page_num].state = LOCKED;
	paddr_t pa = page_num * PAGE_SIZE;
	core_map[page_num].pa = pa;
	core_map[page_num].va = va;
	core_map[page_num].as = as;

	//Get the page table for the virtual address.
	struct page_table *pt = pgdir_walk(as,va,true);

	KASSERT(pt != NULL);
	KASSERT(pt != 0x0);

	//Update the page table entry to point to the page we made.
	size_t pt_index = VA_TO_PT_INDEX(va);
	vaddr_t page_location = PADDR_TO_KVADDR(core_map[page_num].pa);
	pt->table[pt_index] = PAGEVA_TO_PTE(page_location);
	// DEBUG(DB_VM, "VA:%p\n", (void*) va);
	// DEBUG(DB_VM, "PTE:%p\n", (void*) pt->table[pt_index]);
	// DEBUG(DB_VM, "PFN:%p\n", (void*) PTE_TO_PFN(pt->table[pt_index]));
	//Add in permissions
	pt->table[pt_index] |= permissions;

	zero_page(page_num);
	free_pages--;
	// DEBUG(DB_VM, "A:%d\n",free_pages);
}
Esempio n. 7
0
//
// Map the physical page 'pp' at virtual address 'va'.
// The permissions (the low 12 bits) of the page table entry
// should be set to 'perm|PTE_P'.
//
// Requirements
//   - If there is already a page mapped at 'va', it should be page_remove()d.
//   - If necessary, on demand, a page table should be allocated and inserted
//     into 'pgdir'.
//   - pp->pp_ref should be incremented if the insertion succeeds.
//   - The TLB must be invalidated if a page was formerly present at 'va'.
//
// Corner-case hint: Make sure to consider what happens when the same
// pp is re-inserted at the same virtual address in the same pgdir.
// However, try not to distinguish this case in your code, as this
// frequently leads to subtle bugs; there's an elegant way to handle
// everything in one code path.
//
// RETURNS:
//   0 on success
//   -E_NO_MEM, if page table couldn't be allocated
//
// Hint: The TA solution is implemented using pgdir_walk, page_remove,
// and page2pa.
//
int
page_insert(pde_t *pgdir, struct PageInfo *pp, void *va, int perm)
{
	// Fill this function in
	
	pte_t* ptep = pgdir_walk(pgdir, va, true);
	
	if(!ptep) {
		return -E_NO_MEM;
	}

	
	if( pa2page(*ptep) != pp ){
		page_remove(pgdir, va);
		assert( *ptep == 0 );
		assert(pp->pp_ref >= 0);
		pp->pp_ref++;
	}
	else {
		tlb_invalidate(pgdir, va);
	}

	
	*ptep = page2pa(pp) | perm | PTE_P;
	/* we should also change pde's perm*/
	pde_t *pde = pgdir + PDX(va); 
	*pde = *pde | perm;
	
	
	return 0;
}
Esempio n. 8
0
File: pmap.c Progetto: yuki252111/os
//
// Check that an environment is allowed to access the range of memory
// [va, va+len) with permissions 'perm | PTE_P'.
// Normally 'perm' will contain PTE_U at least, but this is not required.
// 'va' and 'len' need not be page-aligned; you must test every page that
// contains any of that range.  You will test either 'len/PGSIZE',
// 'len/PGSIZE + 1', or 'len/PGSIZE + 2' pages.
//
// A user program can access a virtual address if (1) the address is below
// ULIM, and (2) the page table gives it permission.  These are exactly
// the tests you should implement here.
//
// If there is an error, set the 'user_mem_check_addr' variable to the first
// erroneous virtual address.
//
// Returns 0 if the user program can access this range of addresses,
// and -E_FAULT otherwise.
//
int
user_mem_check(struct Env *env, const void *va, size_t len, int perm)
{
	// LAB 3: Your code here.
	uint32_t start=(uint32_t)va;
	uint32_t end=(uint32_t)(va+len);
	perm=perm|PTE_U|PTE_P;

	uint32_t i=0;
	pte_t* pte;
	for(i=start;i<end;i++)
	{
		if(i>=(uint32_t)ULIM)
		{
			user_mem_check_addr=i;
			return -E_FAULT;
		}
		pte = pgdir_walk (env->env_pgdir,(void*)i, 0);
		if(pte==NULL||((*pte&perm)!=perm))
		{
			user_mem_check_addr=i;
			return -E_FAULT;
		}
	}
	return 0;
}
Esempio n. 9
0
//
// Map the physical page 'pp' at virtual address 'va'.
// The permissions (the low 12 bits) of the page table entry
// should be set to 'perm|PTE_P'.
//
// Requirements
//   - If there is already a page mapped at 'va', it should be page_remove()d.
//   - If necessary, on demand, a page table should be allocated and inserted
//     into 'pgdir'.
//   - pp->pp_ref should be incremented if the insertion succeeds.
//   - The TLB must be invalidated if a page was formerly present at 'va'.
//
// Corner-case hint: Make sure to consider what happens when the same
// pp is re-inserted at the same virtual address in the same pgdir.
//
// RETURNS:
//   0 on success
//   -E_NO_MEM, if page table couldn't be allocated
//
// Hint: Check out pgdir_walk, page_remove, page2pa, and similar functions.
//
int
page_insert(pde_t *pgdir, struct Page *pp, uintptr_t va, pte_t perm)
{
	// Make sure the page table exists. If it doesn't, try and create one.
	// If that fails, exit with status -E_NO_MEM.
	pte_t *pte = pgdir_walk(pgdir, va, true);
	if (pte == NULL) return -E_NO_MEM;

	physaddr_t pa = page2pa(pp);
	Page *virtpage = page_lookup(pgdir, va, &pte);

	// See if there was an existing entry for va
	if (virtpage && (*pte & PTE_P)) {
		// Is it the same as the one we're entering?
		if (PTE_ADDR(*pte) == pa) {
			// Then just change the permissions and return
			*pte = PTE_ADDR(*pte) | perm | PTE_P;
			// Don't forget to invalidate the tlb!
			tlb_invalidate(pgdir, va);
//			cprintf("Found existing entry... changing permissions.\n");
		      	return 0;
		}
	        // Remove the old entry
//		cprintf("Found an existing entry... removing\n");
	        page_remove(pgdir, va);
	}
	// Either there was no entry for va, or there was an old entry and we removed it
//	cprintf("Adding entry...\n");
	*pte = pa | perm | PTE_P;
	pp->pp_ref++;
	assert(page_lookup(pgdir, va, &pte));
	assert(PTE_ADDR(*pte) == pa);
	return 0;
}
Esempio n. 10
0
//
// Check that an environment is allowed to access the range of memory
// [va, va+len) with permissions 'perm | PTE_P'.
// Normally 'perm' will contain PTE_U at least, but this is not required.
// 'va' and 'len' need not be page-aligned; you must test every page that
// contains any of that range.  You will test either 'len/PGSIZE',
// 'len/PGSIZE + 1', or 'len/PGSIZE + 2' pages.
//
// A user program can access a virtual address if (1) the address is below
// ULIM, and (2) the page table gives it permission.  These are exactly
// the tests you should implement here.
//
// If there is an error, set the 'user_mem_check_addr' variable to the first
// erroneous virtual address.
//
// Returns 0 if the user program can access this range of addresses,
// and -E_FAULT otherwise.
//
int
user_mem_check(struct Env *env, const void *va, size_t len, int perm)
{
	// LAB 3: Your code here.
	/*stone's solution for lab3-B*/
	uintptr_t start = (uintptr_t)va;
	uintptr_t end = (uintptr_t)va + len;
	perm |= PTE_P;
	int r = 0;
	while (start < end){
		if (start > ULIM){
			user_mem_check_addr = start;
			r = -E_FAULT;
			break;
		}
		pte_t* pte = pgdir_walk(env->env_pgdir, (void*)start, 0);
		if (pte == NULL || (*pte & perm) != perm){
			user_mem_check_addr = start;
			r = -E_FAULT;
			break;
		}
		start = ROUNDDOWN(start+PGSIZE, PGSIZE);
	}
	return r;
}
Esempio n. 11
0
File: pmap.c Progetto: yahu/JOS
//
// Map [va, va+size) of virtual address space to physical [pa, pa+size)
// in the page table rooted at pgdir.  Size is a multiple of PGSIZE.
// Use permission bits perm|PTE_P for the entries.
//
// This function is only intended to set up the ``static'' mappings
// above UTOP. As such, it should *not* change the pp_ref field on the
// mapped pages.
//
// Hint: the TA solution uses pgdir_walk
void
boot_map_region(pde_t *pgdir, uintptr_t va, size_t size, physaddr_t pa, int perm)
{
    // Fill this function in
    pte_t * pte;
    uintptr_t vtop ;
    uintptr_t initva=va;

    if( (va%PGSIZE) || (size%PGSIZE) || (pa%PGSIZE) ){
        cprintf("va=%x \n ",va);
        cprintf("va%PGSIZE=%x \n ",va%PGSIZE);
        cprintf("size%PGSIZE=%x \n ",size%PGSIZE);
        cprintf("pa%PGSIZE=%x \n ",pa%PGSIZE);
        panic("in boot_map_region ,va or size or pa is not aligned \n ");

    }


    vtop=va +(size-PGSIZE);

   // cprintf("va=%x\n",va);
    //cprintf("vtop=%x\n",vtop);

    while(va<=vtop && va>=initva){

       if((pte = pgdir_walk(pgdir, (void *)va, 1))==0)
            panic("get pte fail !");
        *pte=(pa& (~0xfff) ) | perm | PTE_P;
     //   cprintf("va %x map to phys %x\n ",va,pa);

        va+=PGSIZE;
        pa+=PGSIZE;
    }

}
Esempio n. 12
0
int
mon_modpageperms(int argc, char **argv, struct Trapframe *tf)
{
	uintptr_t va_arg;
	int perms;
	pde_t *pgdir;
	pte_t *pte;

	if (argc != 3) {
		cprintf("usage: modpageperms va perms\n");
		return 0;
	}

	va_arg = (uintptr_t) strtol(argv[1], NULL, 0);

	perms = (uintptr_t) strtol(argv[2], NULL, 0);
	if (PTE_ADDR(perms)) {
		cprintf("perms 0x%03x is invalid\n", perms);
		return 0;
	}

	pgdir = (pde_t *) KADDR(rcr3());
	pte = pgdir_walk(pgdir, (void *) va_arg, 0);
	if (!pte) {
		cprintf("va 0x%x is not mapped\n", va_arg);
		return 0;
	}

	cprintf("va 0x%x existing pte ");
	pte_show(*pte);
	*pte = PTE_ADDR(*pte) | perms;
	cprintf("va 0x%x changed pte ");
	pte_show(*pte);
	return 0;
}
Esempio n. 13
0
struct Page *
page_lookup(Pde *pgdir, u_long va, Pte **ppte)
{
	struct Page *ppage;
	Pte *pte;

	pgdir_walk(pgdir, va, 0, &pte);

	//printf("page_lookup:come 1\n");
	if (pte == 0) {
		return 0;
	}

	if ((*pte & PTE_V) == 0) {
		return 0;    //the page is not in memory.
	}

	//printf("page_lookup:come 2\n");
	ppage = pa2page(*pte);

	if (ppte) {
		*ppte = pte;
	}

	return ppage;
}
Esempio n. 14
0
int
mon_showvm(int argc, char **argv, struct Trapframe *tf)
{
	uintptr_t begin_va, end_va, va, next_va;
	pde_t *pgdir;
	pte_t *pte;
	int i, j;

	if (argc != 3) {
		cprintf("usage: dumpvm begin_va end_va\n");
		return 0;
	}

	begin_va = (uintptr_t) strtol(argv[1], NULL, 0);
	end_va = (uintptr_t) strtol(argv[2], NULL, 0);
	if (begin_va > end_va) {
		cprintf("begin va (0x%x) is greater than end va (0x%x)\n", begin_va, end_va);
		return 0;
	}

	pgdir = (pde_t *) KADDR(rcr3());
	for (i = j = 0, va = begin_va; va <= end_va; va += PGSIZE) {
		pte = pgdir_walk(pgdir, (void *) va, 0);
		next_va = MIN(ROUNDDOWN(va + PGSIZE, PGSIZE) - 1, end_va);
		show_page(pgdir, va, next_va, *pte && (*pte & PTE_P), &i, &j);
	}
	if (i)
		cprintf("\n");
	return 0;
}
Esempio n. 15
0
//
// Allocate len bytes of physical memory for environment env,
// and map it at virtual address va in the environment's address space.
// Does not zero or otherwise initialize the mapped pages in any way.
// Pages should be writable by user and kernel.
// Panic if any allocation attempt fails.
//
static void
segment_alloc(struct Env *e, void *va, size_t len)
{
    int i;
    pde_t * pgdir   = e->env_pgdir;

	// LAB 3: Your code here.
	// (But only if you need it for load_icode.)
	//
	// Hint: It is easier to use segment_alloc if the caller can pass
	//   'va' and 'len' values that are not page-aligned.
	//   You should round va down, and round (va + len) up.
    if (pgdir != NULL) {
        uint32_t saddr  = (uint32_t)ROUNDDOWN(va, PGSIZE);
        uint32_t eaddr  = (uint32_t)ROUNDUP(va + len, PGSIZE);
        int i,j;
        for (i = 0; i < (eaddr - saddr); i += PGSIZE) {
            pte_t *pte  = pgdir_walk(pgdir, (void *)(saddr + i), 1);
            if (!(*pte & PTE_P)) {
                struct Page *p;
                if (page_alloc(&p)) {
                    panic("Out of memory");
                }
                memset(p, 0, sizeof(struct Page));
                p->pp_ref   += 1; 
                *pte    = page2pa(p)|PTE_P|PTE_W|PTE_U; 
            }
            *pte    = *pte|PTE_P|PTE_W|PTE_U; 
            pgdir [PDX(saddr+i)]    = pgdir [PDX(saddr+i)]|PTE_P|PTE_W|PTE_U;
        }
    }
}
Esempio n. 16
0
// Set envid's trap frame to 'tf'.
// tf is modified to make sure that user environments always run at code
// protection level 3 (CPL 3) with interrupts enabled.
//
// Returns 0 on success, < 0 on error.  Errors are:
//	-E_BAD_ENV if environment envid doesn't currently exist,
//		or the caller doesn't have permission to change envid.
static int
sys_env_set_trapframe(envid_t envid, struct Trapframe *tf)
{
	// LAB 5: Your code here.
	// Remember to check whether the user has supplied us with a good
	// address!
	struct Env *env;
	pte_t *entry;

	user_mem_assert(curenv, tf, sizeof(struct Trapframe), 0);
	if (envid2env(envid, &env, 1) != 0){
		return -E_BAD_ENV;
	}

	entry = pgdir_walk(curenv->env_pgdir, (void *)tf, 0);
	if (entry == NULL){
		return -E_INVAL;
	}

	env->env_tf = *tf;
	// Missing here
	env->env_tf.tf_ds |= 3;
	env->env_tf.tf_es |= 3;
	env->env_tf.tf_ss |= 3;
	env->env_tf.tf_cs |= 3;
	env->env_tf.tf_eflags |= FL_IF;
	env->env_tf.tf_eflags &= ~(FL_IOPL_MASK);
	return 0;
}
Esempio n. 17
0
//
// Map the physical page 'pp' at virtual address 'va'.
// The permissions (the low 12 bits) of the page table entry
// should be set to 'perm|PTE_P'.
//
// Requirements
//   - If there is already a page mapped at 'va', it should be page_remove()d.
//   - If necessary, on demand, a page table should be allocated and inserted
//     into 'pgdir'.
//   - pp->pp_ref should be incremented if the insertion succeeds.
//   - The TLB must be invalidated if a page was formerly present at 'va'.
//
// Corner-case hint: Make sure to consider what happens when the same
// pp is re-inserted at the same virtual address in the same pgdir.
// Don't be tempted to write special-case code to handle this
// situation, though; there's an elegant way to address it.
//
// RETURNS:
//   0 on success
//   -E_NO_MEM, if page table couldn't be allocated
//
// Hint: The TA solution is implemented using pgdir_walk, page_remove,
// and page2pa.
//
int
page_insert(pde_t *pgdir, struct Page *pp, void *va, int perm)
{
//	cprintf("page_insert: pp:0x%x va:0x%x perm:0x%x\n",pp,va,perm);
	bool exist = 0;
	pte_t *pte = pgdir_walk(pgdir,va,1);
//	cprintf("page_insert: pte:0x%x *pte:0x%x\n",pte,*pte);
	if (pte == NULL) 
		return -E_NO_MEM;
	if (*pte & PTE_P)
	{
		struct Page *nowpp = page_lookup(pgdir,va,0);
		if (nowpp!=pp)
			page_remove(pgdir,va);
		else{
			tlb_invalidate(pgdir, va);
			exist = 1;
		}
	}
//	cprintf("page_insert: do not exist\n");
	*pte = page2pa(pp) | perm | PTE_P;
	if (exist == 0)
	{
		pp->pp_ref++;
	}
	return 0;
}
Esempio n. 18
0
//
// Check that an environment is allowed to access the range of memory
// [va, va+len) with permissions 'perm | PTE_P'.
// Normally 'perm' will contain PTE_U at least, but this is not required.
// 'va' and 'len' need not be page-aligned; you must test every page that
// contains any of that range.  You will test either 'len/PGSIZE',
// 'len/PGSIZE + 1', or 'len/PGSIZE + 2' pages.
//
// A user program can access a virtual address if (1) the address is below
// ULIM, and (2) the page table gives it permission.  These are exactly
// the tests you should implement here.
//
// If there is an error, set the 'user_mem_check_addr' variable to the first
// erroneous virtual address.
//
// Returns 0 if the user program can access this range of addresses,
// and -E_FAULT otherwise.
//
int
user_mem_check(struct Env *env, const void *va, size_t len, int perm)
{
	// LAB 3: Your code here.
	uint32_t bot = (uint32_t)ROUNDDOWN(va,PGSIZE);	
	uint32_t top = (uint32_t)ROUNDUP(va+len,PGSIZE);
	if ((uint32_t)va >= ULIM)
	{
		user_mem_check_addr = (uint32_t)va;
		return -E_FAULT;
	}
	if (top >= ULIM)
	{
		user_mem_check_addr = ULIM;
		return -E_FAULT;
	}
	uint32_t i;
	for (i=bot;i<top;i+=PGSIZE)
	{
		pte_t* pte = pgdir_walk(env->env_pgdir,(void*)i,0);
		if (pte ==NULL || (*pte & (perm | PTE_P))!=(perm | PTE_P))
		{
			if (i>(uint32_t)va)
				user_mem_check_addr = i;
			else
				user_mem_check_addr = (uint32_t)va;
			return -E_FAULT;
		}
	}
	return 0;

}
Esempio n. 19
0
//
// Map the physical page 'pp' at virtual address 'va'.
// The permissions (the low 12 bits) of the page table entry
// should be set to 'perm|PTE_P'.
//
// Requirements
//   - If there is already a page mapped at 'va', it should be page_remove()d.
//   - If necessary, on demand, a page table should be allocated and inserted
//     into 'pgdir'.
//   - pp->pp_ref should be incremented if the insertion succeeds.
//   - The TLB must be invalidated if a page was formerly present at 'va'.
//
// Corner-case hint: Make sure to consider what happens when the same
// pp is re-inserted at the same virtual address in the same pgdir.
// However, try not to distinguish this case in your code, as this
// frequently leads to subtle bugs; there's an elegant way to handle
// everything in one code path.
//
// RETURNS:
//   0 on success
//   -E_NO_MEM, if page table couldn't be allocated
//
// Hint: The TA solution is implemented using pgdir_walk, page_remove,
// and page2pa.
//
int
page_insert(pde_t *pgdir, struct PageInfo *pp, void *va, int perm)
{
	// Fill this function in
	pte_t *entry=NULL;
	struct PageInfo *page=NULL;
	physaddr_t page_paddr;
	entry = pgdir_walk(pgdir, va, 1);
	if(entry)
	{
		page_paddr = (physaddr_t)*entry;
		if(page_paddr & PTE_P)
		{
			page = pa2page(page_paddr);	
			if(page!=pp)
				page_remove(pgdir, va);	
			else
			{
				*entry = page2pa(pp) | perm | PTE_P;
				// cprintf("\npage_insert: perm:%d addr:%p",*entry);
				return 0;
			}
		}
		*entry = page2pa(pp) | perm | PTE_P;	
		pp->pp_ref++;
		return 0;
	}
	return -E_NO_MEM;
}
Esempio n. 20
0
//
// Map [va, va+size) of virtual address space to physical [pa, pa+size)
// in the page table rooted at pgdir.  Size is a multiple of PGSIZE.
// Use permission bits perm|PTE_P for the entries.
//
// This function is only intended to set up the ``static'' mappings
// above UTOP. As such, it should *not* change the pp_ref field on the
// mapped pages.
//
// Hint: the TA solution uses pgdir_walk
void
boot_map_region(pde_t *pgdir, uintptr_t va, size_t size, physaddr_t pa, int perm)
{
	assert(size % PGSIZE == 0);
	assert(va % PGSIZE == 0); // Can we assume this and below? I think yes.
	assert(pa % PGSIZE == 0);

	pte_t* pte = NULL;
	uintptr_t start = va;

	// Start allocating pages, by setting PTEs
	for (; size > 0; va += PGSIZE, pa += PGSIZE, size -= PGSIZE) {
		if (va < start) { // Overflow check
			break;
		}
		
		// Enable create while walking pgdir
		if (!(pte = pgdir_walk(pgdir, (void *) va, 1))) {
			return; // failed to allocate pg table or get pte!
		}

		// Check if trying to reallocate same page?
		if (*pte & PTE_P) {
			//panic("boot_map_region: remapping %p\n", va);
		}

		*pte = pa | perm | PTE_P;
	}
}
Esempio n. 21
0
//
// Map the physical page 'pp' at virtual address 'va'.
// The permissions (the low 12 bits) of the page table entry
// should be set to 'perm|PTE_P'.
//
// Requirements
//   - If there is already a page mapped at 'va', it should be page_remove()d.
//   - If necessary, on demand, a page table should be allocated and inserted
//     into 'pgdir'.
//   - pp->pp_ref should be incremented if the insertion succeeds.
//   - The TLB must be invalidated if a page was formerly present at 'va'.
//
// Corner-case hint: Make sure to consider what happens when the same
// pp is re-inserted at the same virtual address in the same pgdir.
// However, try not to distinguish this case in your code, as this
// frequently leads to subtle bugs; there's an elegant way to handle
// everything in one code path.
//
// RETURNS:
//   0 on success
//   -E_NO_MEM, if page table couldn't be allocated
//
// Hint: The TA solution is implemented using pgdir_walk, page_remove,
// and page2pa.
//
int
page_insert(pde_t *pgdir, struct PageInfo *pp, void *va, int perm)
{
	// Fill this function in
	if(!pgdir)
		return -E_NO_MEM;
	if(!pp)
		return -E_NO_MEM;
	pte_t *pt = pgdir_walk(pgdir, va, 1);
	if(!pt)
		return -E_NO_MEM;
	if(pt[0] & PTE_P){
		//Already exsists
		if(PTE_ADDR(pt[0]) != page2pa(pp)) {
			page_remove(pgdir, va);
		} else {
			pt[0] = PTE_ADDR(pt[0]) | perm | PTE_P;
			return 0;
		}
	}
	pp->pp_ref++;
	tlb_invalidate(pgdir, va);
	pt[0] = page2pa(pp) | perm | PTE_P;
	return 0;
}
Esempio n. 22
0
File: pmap.c Progetto: ajsbu/cse506
// Given a pdpe i.e page directory pointer pdpe_walk returns the pointer to page table entry
// The programming logic in this function is similar to pml4e_walk.
// It calls the pgdir_walk which returns the page_table entry pointer.
// Hints are the same as in pml4e_walk
pte_t *
pdpe_walk(pdpe_t *pdpe,const void *va,int create){

	struct Page *newPage = NULL;
	//if(!create)cprintf("va = %0x, pdpe[PDPE(va)] = %0x\n", va, pdpe[PDPE(va)]);
	if (!pdpe[PDPE(va)]) {

		if (!create)
			return NULL;
		else {
			newPage = page_alloc(0);

			if (newPage == 0) {
				return NULL;
			} else {
				newPage->pp_ref++;
				pdpe[PDPE(va)] = page2pa(newPage) | PTE_U | PTE_W | PTE_P;
				memset(page2kva(newPage), 0x00, PGSIZE);
			}
		}
	}

	pde_t *pde = (pde_t *)(KADDR(PTE_ADDR(pdpe[PDPE(va)])));
	pte_t *result = pgdir_walk(pde, va, create);

	if (!result && newPage) {
		pdpe[PDPE(va)] = 0;
		newPage->pp_ref = 0;
		page_free(newPage);
	}

	return result;
}
Esempio n. 23
0
//
// Map the physical page 'pp' at virtual address 'va'.
// The permissions (the low 12 bits) of the page table entry
// should be set to 'perm|PTE_P'.
//
// Requirements
//   - If there is already a page mapped at 'va', it should be page_remove()d.
//   - If necessary, on demand, a page table should be allocated and inserted
//     into 'pgdir'.
//   - pp->pp_ref should be incremented if the insertion succeeds.
//   - The TLB must be invalidated if a page was formerly present at 'va'.
//
// Corner-case hint: Make sure to consider what happens when the same
// pp is re-inserted at the same virtual address in the same pgdir.
// However, try not to distinguish this case in your code, as this
// frequently leads to subtle bugs; there's an elegant way to handle
// everything in one code path.
//
// RETURNS:
//   0 on success
//   -E_NO_MEM, if page table couldn't be allocated
//
// Hint: The TA solution is implemented using pgdir_walk, page_remove,
// and page2pa.
//
int
page_insert(pde_t *pgdir, struct PageInfo *pp, void *va, int perm)
{
	// Fill this function in
  pte_t * target_pte;
  int mapped;
  physaddr_t old_paddr, new_paddr;

  target_pte = pgdir_walk(pgdir, va, 1);

  if (target_pte == NULL) {
    return -E_NO_MEM;
  }

  old_paddr = PTE_ADDR(*target_pte);
  new_paddr = page2pa(pp);
  mapped = *target_pte & PTE_P;

  if (mapped && old_paddr != new_paddr) {
    page_remove(pgdir, va);
  }

  if (!(mapped && old_paddr == new_paddr)) {
    pp->pp_ref++;
  }

  *target_pte = new_paddr | perm | PTE_P;
  
  return 0;
}
Esempio n. 24
0
//
// Return the page mapped at virtual address 'va'.
// If pte_store is not zero, then we store in it the address
// of the pte for this page.  This is used by page_remove and
// can be used to verify page permissions for syscall arguments,
// but should not be used by most callers.
//
// Return NULL if there is no page mapped at va.
//
// Hint: the TA solution uses pgdir_walk and pa2page.
//
struct PageInfo *
page_lookup(pde_t *pgdir, void *va, pte_t **pte_store)
{
	pte_t *pte = pgdir_walk(pgdir, va, 0);	//not create
	if (!pte || !(*pte & PTE_P)) return NULL;	//page not found
	if (pte_store)
		*pte_store = pte;	//found and set
	return pa2page(PTE_ADDR(*pte));		
}
Esempio n. 25
0
//
// Return the page mapped at virtual address 'va'.
// If pte_store is not zero, then we store in it the address
// of the pte for this page.  This is used by page_remove and
// can be used to verify page permissions for syscall arguments,
// but should not be used by most callers.
//
// Return NULL if there is no page mapped at va.
//
// Hint: the TA solution uses pgdir_walk and pa2page.
//
struct PageInfo *
page_lookup(pde_t *pgdir, void *va, pte_t **pte_store)
{
  // Fill this function in
   pte_t *pte = pgdir_walk(pgdir, va, 0);
   if (!pte || !(*pte & PTE_P)) return NULL;  //page not found
   if (pte_store) *pte_store = pte; //found and store
   return pa2page(PTE_ADDR(*pte));
}
Esempio n. 26
0
//
// Return the page mapped at virtual address 'va'.
// If pte_store is not zero, then we store in it the address
// of the pte for this page.  This is used by page_remove and
// can be used to verify page permissions for syscall arguments,
// but should not be used by most callers.
//
// Return NULL if there is no page mapped at va.
//
// Hint: the TA solution uses pgdir_walk and pa2page.
//
struct PageInfo *
page_lookup(pde_t *pgdir, void *va, pte_t **pte_store)
{
	// Fill this function in
	    pte_t* pte = pgdir_walk(pgdir, va, 0);
        if (pte_store != NULL) *pte_store = pte;
        if (pte == NULL || !(*pte & PTE_P) ) return NULL;
        return pa2page(PTE_ADDR(*pte));
}
Esempio n. 27
0
/**
 * @brief Return the page mapped at virtual address 'va' in 
 * page directory 'pgdir'.
 *
 * If pte_store is not NULL, then we store in it the address
 * of the pte for this page.  This is used by page_remove
 * but should not be used by other callers.
 *
 * For jumbos, right now this returns the first Page* in the 4MB range
 *
 * @param[in]  pgdir     the page directory from which we should do the lookup
 * @param[in]  va        the virtual address of the page we are looking up
 * @param[out] pte_store the address of the page table entry for the returned page
 *
 * @return PAGE the page mapped at virtual address 'va'
 * @return NULL No mapping exists at virtual address 'va', or it's paged out
 */
page_t *page_lookup(pde_t *pgdir, void *va, pte_t **pte_store)
{
	pte_t* pte = pgdir_walk(pgdir, va, 0);
	if (!pte || !PAGE_PRESENT(*pte))
		return 0;
	if (pte_store)
		*pte_store = pte;
	return pa2page(PTE_ADDR(*pte));
}
Esempio n. 28
0
File: pmap.c Progetto: yuki252111/os
//
// Return the page mapped at virtual address 'va'.
// If pte_store is not zero, then we store in it the address
// of the pte for this page.  This is used by page_remove and
// can be used to verify page permissions for syscall arguments,
// but should not be used by most callers.
//
// Return NULL if there is no page mapped at va.
//
// Hint: the TA solution uses pgdir_walk and pa2page.
//
struct Page *
page_lookup(pde_t *pgdir, void *va, pte_t **pte_store)
{
	pte_t *pte=pgdir_walk(pgdir,va,0);
	if(pte_store)
		*pte_store=pte;
	if((pte!=NULL)&&(*pte&PTE_P))
		return pa2page(PTE_ADDR(*pte));
	return NULL;
}
Esempio n. 29
0
//
// Map [va, va+size) of virtual address space to physical [pa, pa+size)
// in the page table rooted at pgdir.  Size is a multiple of PGSIZE, and
// va and pa are both page-aligned.
// Use permission bits perm|PTE_P for the entries.
//
// This function is only intended to set up the ``static'' mappings
// above UTOP. As such, it should *not* change the pp_ref field on the
// mapped pages.
//
// Hint: the TA solution uses pgdir_walk
static void
boot_map_region(pde_t *pgdir, uintptr_t va, size_t size, physaddr_t pa, int perm)
{
  // Fill this function in
  int i;
  for (i = 0; i < size/PGSIZE; i++, va += PGSIZE, pa += PGSIZE) {
      pte_t *pte = pgdir_walk(pgdir, (void *)va, 1);
      *pte = pa | perm | PTE_P;
  }
}
Esempio n. 30
0
int get_user_page(struct proc *p, unsigned long uvastart, int write, int force,
    struct page **plist)
{
	pte_t		pte;
	int		ret = -1;
	struct page	*pp;

	spin_lock(&p->pte_lock);

	pte = pgdir_walk(p->env_pgdir, (void*)uvastart, TRUE);

	if (!pte_walk_okay(pte))
		goto err1;

	if (!pte_is_present(pte)) {
		unsigned long prot = PTE_P | PTE_U | PTE_A | PTE_W | PTE_D;
#if 0
		printk("[akaros]: get_user_page() uva=0x%llx pte absent\n",
		    uvastart);
#endif
		/*
		 * TODO: ok to allocate with pte_lock? "prot" needs to be
		 * based on VMR writability, refer to pgprot_noncached().
		 */
		if (upage_alloc(p, &pp, 0))
			goto err1;
		pte_write(pte, page2pa(pp), prot);
	} else {
		pp = pa2page(pte_get_paddr(pte));

		/* __vmr_free_pgs() refcnt's pagemap pages differently */
		if (atomic_read(&pp->pg_flags) & PG_PAGEMAP) {
			printk("[akaros]: get_user_page(): uva=0x%llx\n",
			    uvastart);
			goto err1;
		}
	}

	if (write && (!pte_has_perm_urw(pte))) {
		/* TODO: How is Linux using the "force" parameter */
		printk("[akaros]: get_user_page() uva=0x%llx pte ro\n",
		    uvastart);
		goto err1;
	}

	/* TODO (GUP): change the interface such that devices provide the memory and
	 * the user mmaps it, instead of trying to pin arbitrary user memory. */
	warn_once("Extremely unsafe, unpinned memory mapped!  If your process dies, you might scribble on RAM!");

	plist[0] = pp;
	ret = 1;
err1:
	spin_unlock(&p->pte_lock);
	return ret;
}