Esempio n. 1
0
File: pmap.c Progetto: yahu/JOS
//
// Check that the pages on the page_free_list are reasonable.
//
static void
check_page_free_list(bool only_low_memory)
{

	struct Page *pp;
	unsigned pdx_limit = only_low_memory ? 1 : NPDENTRIES;
	int nfree_basemem = 0, nfree_extmem = 0;
	char *first_free_page;

	if (!page_free_list)
		panic("'page_free_list' is a null pointer!");

	if (only_low_memory) {
		// Move pages with lower addresses first in the free
		// list, since entry_pgdir does not map all pages.
		struct Page *pp1, *pp2;
		struct Page **tp[2] = { &pp1, &pp2 };
		for (pp = page_free_list; pp; pp = pp->pp_link) {
			int pagetype = PDX(page2pa(pp)) >= pdx_limit;
			*tp[pagetype] = pp;
			tp[pagetype] = &pp->pp_link;
		}
		*tp[1] = 0;
		*tp[0] = pp2;
		page_free_list = pp1;
	}

	// if there's a page that shouldn't be on the free list,
	// try to make sure it eventually causes trouble.
	for (pp = page_free_list; pp; pp = pp->pp_link)
		if (PDX(page2pa(pp)) < pdx_limit)
			memset(page2kva(pp), 0x97, 128);

	first_free_page = (char *) boot_alloc(0);
	for (pp = page_free_list; pp; pp = pp->pp_link) {
		// check that we didn't corrupt the free list itself
		assert(pp >= pages);
		assert(pp < pages + npages);
		assert(((char *) pp - (char *) pages) % sizeof(*pp) == 0);

		// check a few pages that shouldn't be on the free list
		assert(page2pa(pp) != 0);
		assert(page2pa(pp) != IOPHYSMEM);
		assert(page2pa(pp) != EXTPHYSMEM - PGSIZE);
		assert(page2pa(pp) != EXTPHYSMEM);
		assert(page2pa(pp) < EXTPHYSMEM || (char *) page2kva(pp) >= first_free_page);
		// (new test for lab 4)
		assert(page2pa(pp) != MPENTRY_PADDR);

		if (page2pa(pp) < EXTPHYSMEM)
			++nfree_basemem;
		else
			++nfree_extmem;
	}

    assert(nfree_basemem > 0);
    assert(nfree_extmem > 0);

}
Esempio n. 2
0
File: pmap.c Progetto: yahu/JOS
static void
check_kern_pgdir(void)
{

	uint32_t i, n;
	pde_t *pgdir;

	pgdir = kern_pgdir;

	// check pages array
	n = ROUNDUP(npages*sizeof(struct Page), PGSIZE);
	for (i = 0; i < n; i += PGSIZE)
		assert(check_va2pa(pgdir, UPAGES + i) == PADDR(pages) + i);

	// check envs array (new test for lab 3)
	n = ROUNDUP(NENV*sizeof(struct Env), PGSIZE);
	for (i = 0; i < n; i += PGSIZE)
		assert(check_va2pa(pgdir, UENVS + i) == PADDR(envs) + i);

	// check phys mem
	for (i = 0; i < npages * PGSIZE; i += PGSIZE)
		assert(check_va2pa(pgdir, KERNBASE + i) == i);

	// check IO mem (new in lab 4)
	for (i = IOMEMBASE; i < -PGSIZE; i += PGSIZE)
		assert(check_va2pa(pgdir, i) == i);

	// check kernel stack
	// (updated in lab 4 to check per-CPU kernel stacks)
	for (n = 0; n < NCPU; n++) {
		uint32_t base = KSTACKTOP - (KSTKSIZE + KSTKGAP) * (n + 1);
		for (i = 0; i < KSTKSIZE; i += PGSIZE)
			assert(check_va2pa(pgdir, base + KSTKGAP + i)
				== PADDR(percpu_kstacks[n]) + i);
		for (i = 0; i < KSTKGAP; i += PGSIZE)
			assert(check_va2pa(pgdir, base + i) == ~0);
	}

	// check PDE permissions
	for (i = 0; i < NPDENTRIES; i++) {
		switch (i) {
		case PDX(UVPT):
		case PDX(KSTACKTOP-1):
		case PDX(UPAGES):
		case PDX(UENVS):
			assert(pgdir[i] & PTE_P);
			break;
		default:
			if (i >= PDX(KERNBASE)) {
				assert(pgdir[i] & PTE_P);
				assert(pgdir[i] & PTE_W);
			} else
				assert(pgdir[i] == 0);
			break;
		}
	}
	cprintf("check_kern_pgdir() succeeded!\n");

}
Esempio n. 3
0
File: pmm.c Progetto: TySag/project
//pmm_init - setup a pmm to manage physical memory, build PDT&PT to setup paging mechanism 
//         - check the correctness of pmm & paging mechanism, print PDT&PT
void pmm_init(void)
{
	//We need to alloc/free the physical memory (granularity is 4KB or other size). 
	//So a framework of physical memory manager (struct pmm_manager)is defined in pmm.h
	//First we should init a physical memory manager(pmm) based on the framework.
	//Then pmm can alloc/free the physical memory. 
	//Now the first_fit/best_fit/worst_fit/buddy_system pmm are available.
	init_pmm_manager();

	// detect physical memory space, reserve already used memory,
	// then use pmm->init_memmap to create free page list
	page_init();

	//use pmm->check to verify the correctness of the alloc/free function in a pmm
	check_alloc_page();

	// create boot_pgdir, an initial page directory(Page Directory Table, PDT)
	boot_pgdir = boot_alloc_page();
	memset(boot_pgdir, 0, PGSIZE);
	boot_cr3 = PADDR(boot_pgdir);

	check_pgdir();

	static_assert(KERNBASE % PTSIZE == 0 && KERNTOP % PTSIZE == 0);

	// recursively insert boot_pgdir in itself
	// to form a virtual page table at virtual address VPT
	boot_pgdir[PDX(VPT)] = PADDR(boot_pgdir) | PTE_P | PTE_W;

	// map all physical memory to linear memory with base linear addr KERNBASE
	//linear_addr KERNBASE~KERNBASE+KMEMSIZE = phy_addr 0~KMEMSIZE
	//But shouldn't use this map until enable_paging() & gdt_init() finished.
	boot_map_segment(boot_pgdir, KERNBASE, KMEMSIZE, 0, PTE_W);

	//temporary map: 
	//virtual_addr 3G~3G+4M = linear_addr 0~4M = linear_addr 3G~3G+4M = phy_addr 0~4M   
	boot_pgdir[0] = boot_pgdir[PDX(KERNBASE)];
	boot_pgdir[1] = boot_pgdir[PDX(KERNBASE) + 1];

	enable_paging();

	//reload gdt(third time,the last time) to map all physical memory
	//virtual_addr 0~4G=liear_addr 0~4G
	//then set kernel stack(ss:esp) in TSS, setup TSS in gdt, load TSS
	gdt_init();

	//disable the map of virtual_addr 0~4M
	boot_pgdir[0] = boot_pgdir[1] = 0;

	//now the basic virtual memory map(see memalyout.h) is established.
	//check the correctness of the basic virtual memory map.
	check_boot_pgdir();

	print_pgdir(kprintf);

	slab_init();
}
Esempio n. 4
0
static int
env_setup_vm(struct Env *e)
{
	int i, r;
	struct Page *p = NULL;
    size_t n;
    physaddr_t pa;
    uint32_t va;
    struct Page *kstack = NULL;
    pte_t *pt = NULL;

	// Allocate a page for the page directory
	if ((r = page_alloc(&p)) < 0)
		return r;


	// Now, set e->env_pgdir and e->env_cr3,
	// and initialize the page directory.
	//
	// Hint:
	//    - Remember that page_alloc doesn't zero the page.
	//    - The VA space of all envs is identical above UTOP
	//	(except at VPT and UVPT, which we've set below).
	//	See inc/memlayout.h for permissions and layout.
	//	Can you use boot_pgdir as a template?  Hint: Yes.
	//	(Make sure you got the permissions right in Lab 2.)
	//    - The initial VA below UTOP is empty.
	//    - You do not need to make any more calls to page_alloc.
	//    - Note: In general, pp_ref is not maintained for
	//	physical pages mapped only above UTOP, but env_pgdir
	//	is an exception -- you need to increment env_pgdir's
	//	pp_ref for env_free to work correctly.
	//    - The functions in kern/pmap.h are handy.

	// LAB 3: Your code here.
    memset(p, 0, sizeof(struct Page));
    p->pp_ref       += 1;
    e->env_pgdir    = page2kva(p);
    e->env_cr3      = page2pa(p);

    memset(e->env_pgdir, 0, PGSIZE);
    for (i=PDX(UTOP); i < NPDENTRIES; i++) {
        e->env_pgdir[i] = boot_pgdir[i];
    }

    for (i=0; i<PDX(USTACKTOP); i++) {
        e->env_pgdir[i] = 0;
    }

	// VPT and UVPT map the env's own page table, with
	// different permissions.
	e->env_pgdir[PDX(VPT)]  = e->env_cr3 | PTE_P | PTE_W;
	e->env_pgdir[PDX(UVPT)] = e->env_cr3 | PTE_P | PTE_U;

	return 0;
}
Esempio n. 5
0
 int dune_vm_lookup(ptent_t *root, void *va, int create, ptent_t **pte_out)
{
	// XXX: Using PA == VA
	int i, j, k, l;
	ptent_t *pml4 = root, *pdpte, *pde, *pte;

	i = PDX(3, va);
	j = PDX(2, va);
	k = PDX(1, va);
	l = PDX(0, va);

	if (!pte_present(pml4[i])) {
		if (!create)
			return -ENOENT;

		pdpte = alloc_page();
		memset(pdpte, 0, PGSIZE);

                pml4[i] = PTE_ADDR(pdpte) | PTE_DEF_FLAGS;
	} else
		pdpte = (ptent_t*) PTE_ADDR(pml4[i]);

	if (!pte_present(pdpte[j])) {
		if (!create)
			return -ENOENT;

		pde = alloc_page();
		memset(pde, 0, PGSIZE);

		pdpte[j] = PTE_ADDR(pde) | PTE_DEF_FLAGS;
	} else if (pte_big(pdpte[j])) {
		*pte_out = &pdpte[j];
		return 0;
	} else
		pde = (ptent_t*) PTE_ADDR(pdpte[j]);

	if (!pte_present(pde[k])) {
		if (!create)
			return -ENOENT;

		pte = alloc_page();
		memset(pte, 0, PGSIZE);

		pde[k] = PTE_ADDR(pte) | PTE_DEF_FLAGS;
	} else if (pte_big(pde[k])) {
		*pte_out = &pde[k];
		return 0;
	} else
		pte = (ptent_t*) PTE_ADDR(pde[k]);

	*pte_out = &pte[l];
	return 0;
}
Esempio n. 6
0
File: mmu.c Progetto: Shamar/harvey
static PTE
pdeget(uintptr_t va)
{
    PTE *pdp;

    if(va < 0xffffffffc0000000ull)
        panic("pdeget(%#p)", va);

    pdp = (PTE*)(PDMAP+PDX(PDMAP)*4096);

    return pdp[PDX(va)];
}
Esempio n. 7
0
/**
 * Check page table
 */
void check_pgdir(void)
{
	assert(npage <= KMEMSIZE / PGSIZE);
	assert(boot_pgdir != NULL && (uint32_t) PGOFF(boot_pgdir) == 0);
	assert(get_page(boot_pgdir, TEST_PAGE, NULL) == NULL);

	struct Page *p1, *p2;
	p1 = alloc_page();
	assert(page_insert(boot_pgdir, p1, TEST_PAGE, 0) == 0);

	pte_t *ptep, perm;
	assert((ptep = get_pte(boot_pgdir, TEST_PAGE, 0)) != NULL);
	assert(pa2page(*ptep) == p1);
	assert(page_ref(p1) == 1);

	ptep = &((pte_t *) KADDR(PTE_ADDR(boot_pgdir[PDX(TEST_PAGE)])))[1];
	assert(get_pte(boot_pgdir, TEST_PAGE + PGSIZE, 0) == ptep);

	p2 = alloc_page();
	ptep_unmap(&perm);
	ptep_set_u_read(&perm);
	ptep_set_u_write(&perm);
	assert(page_insert(boot_pgdir, p2, TEST_PAGE + PGSIZE, perm) == 0);
	assert((ptep = get_pte(boot_pgdir, TEST_PAGE + PGSIZE, 0)) != NULL);
	assert(ptep_u_read(ptep));
	assert(ptep_u_write(ptep));
	assert(ptep_u_read(&(boot_pgdir[PDX(TEST_PAGE)])));
	assert(page_ref(p2) == 1);

	assert(page_insert(boot_pgdir, p1, TEST_PAGE + PGSIZE, 0) == 0);
	assert(page_ref(p1) == 2);
	assert(page_ref(p2) == 0);
	assert((ptep = get_pte(boot_pgdir, TEST_PAGE + PGSIZE, 0)) != NULL);
	assert(pa2page(*ptep) == p1);
	assert(!ptep_u_read(ptep));

	page_remove(boot_pgdir, TEST_PAGE);
	assert(page_ref(p1) == 1);
	assert(page_ref(p2) == 0);

	page_remove(boot_pgdir, TEST_PAGE + PGSIZE);
	assert(page_ref(p1) == 0);
	assert(page_ref(p2) == 0);

	assert(page_ref(pa2page(boot_pgdir[PDX(TEST_PAGE)])) == 1);
	free_page(pa2page(boot_pgdir[PDX(TEST_PAGE)]));
	boot_pgdir[PDX(TEST_PAGE)] = 0;
	exit_range(boot_pgdir, TEST_PAGE, TEST_PAGE + PGSIZE);

	kprintf("check_pgdir() succeeded.\n");
}
Esempio n. 8
0
File: env.c Progetto: kay21s/JOS-Lab
//
// Initialize the kernel virtual memory layout for environment e.
// Allocate a page directory, set e->env_pgdir and e->env_cr3 accordingly,
// and initialize the kernel portion of the new environment's address space.
// Do NOT (yet) map anything into the user portion
// of the environment's virtual address space.
//
// Returns 0 on success, < 0 on error.  Errors include:
//	-E_NO_MEM if page directory or table could not be allocated.
//
static int
env_setup_vm(struct Env *e)
{
	int i, r;
	struct Page *p = NULL;

	// Allocate a page for the page directory
	if ((r = page_alloc(&p)) < 0)
		return r;

	// Now, set e->env_pgdir and e->env_cr3,
	// and initialize the page directory.
	//
	// Hint:
	//    - Remember that page_alloc doesn't zero the page.
	//    - The VA space of all envs is identical above UTOP
	//	(except at VPT and UVPT, which we've set below).
	//	See inc/memlayout.h for permissions and layout.
	//	Can you use boot_pgdir as a template?  Hint: Yes.
	//	(Make sure you got the permissions right in Lab 2.)
	//    - The initial VA below UTOP is empty.
	//    - You do not need to make any more calls to page_alloc.
	//    - Note: In general, pp_ref is not maintained for
	//	physical pages mapped only above UTOP, but env_pgdir
	//	is an exception -- you need to increment env_pgdir's
	//	pp_ref for env_free to work correctly.

	// LAB 3: Your code here.
	memset(page2kva(p), 0, PGSIZE);
	e->env_pgdir = page2kva(p);
	e->env_cr3 = page2pa(p);
	p->pp_ref ++;

#if 0
	boot_map_segment(e->env_pgdir, UPAGES, ROUNDUP(npage*sizeof(struct Page), PGSIZE), (physaddr_t)PADDR(pages), PTE_U);
	boot_map_segment(e->env_pgdir, UENVS, ROUNDUP(NENV*sizeof(struct Env), PGSIZE), (physaddr_t)PADDR(envs), PTE_U);
	boot_map_segment(e->env_pgdir, KSTACKTOP-KSTKSIZE, KSTKSIZE, (physaddr_t)PADDR(bootstack), PTE_W);
	boot_map_segment(e->env_pgdir, KSTACKTOP-PTSIZE, PTSIZE-KSTKSIZE, 0, 0);
	boot_map_segment(e->env_pgdir, KERNBASE, 0xffffffff-KERNBASE+1, 0, PTE_W);
#else
	for (i=PDX(UTOP); i<NPDENTRIES; i++)
		e->env_pgdir[i] = boot_pgdir[i];
#endif

	// VPT and UVPT map the env's own page table, with
	// different permissions.
	e->env_pgdir[PDX(VPT)]  = e->env_cr3 | PTE_P | PTE_W;
	e->env_pgdir[PDX(UVPT)] = e->env_cr3 | PTE_P | PTE_U;

	return 0;
}
Esempio n. 9
0
//
// Initializes the kernel virtual memory layout for environment e.
//
// Allocates a page directory and initializes it.  Sets
// e->env_cr3 and e->env_pgdir accordingly.
//
// RETURNS
//   0 -- on sucess
//   <0 -- otherwise 
//
static int
env_setup_vm(struct Env *e)
{
	// Hint:

	int i, r;
	struct Page *p = NULL;

	Pde *pgdir;
	// Allocate a page for the page directory
	if ((r = page_alloc(&p)) < 0)
	{
		panic("env_setup_vm - page_alloc error\n");
			return r;
	}
	p->pp_ref++;
	//printf("env.c:env_setup_vm:page_alloc:p\t@page:%x\t@:%x\tcon:%x\n",page2pa(p),(int)&p,(int)p);
//printf("env_setup_vm :	1\n");
	// Hint:
	//    - The VA space of all envs is identical above UTOP
	//      (except at VPT and UVPT) 
	//    - Use boot_pgdir
	//    - Do not make any calls to page_alloc 
	//    - Note: pp_refcnt is not maintained for physical pages mapped above UTOP.
	pgdir = (Pde *)page2kva(p);
//	printf("env.c:env_setup_vm:\tpgdir\t:con:%x\n",(int)pgdir);
	for(i=0;i<UTOP; i+=BY2PG)
		pgdir[PDX(i)] = 0;
	for(i=PDX(UTOP); i<1024;i++)
	{
//printf("boot_pgdir[%d] = %x\n",i,boot_pgdir[PDX(i)]);
		pgdir[i] = boot_pgdir[i];
	}
//printf("env_setup_vm :	2\n");

	e->env_pgdir = pgdir;
//printf("env_setup_vm :	3\n");
	// ...except at VPT and UVPT.  These map the env's own page table

	//e->env_pgdir[PDX(UVPT)]  = e->env_cr3 | PTE_P | PTE_U;

	e->env_cr3 = PADDR(pgdir);

	boot_map_segment(e->env_pgdir,UVPT,PDMAP,PADDR(pgdir),PTE_R);
//	printf("env.c:env_setup_vm:\tboot_map_segment(%x,%x,%x,%x,PTE_R)\n",e->env_pgdir,UVPT,PDMAP,PADDR(pgdir));
	e->env_pgdir[PDX(UVPT)]  = e->env_cr3 | PTE_V | PTE_R;
//printf("env_setup_vm :  4\n");
	return 0;
}
Esempio n. 10
0
/* from ../pc: */
void
reboot(void *entry, void *code, ulong size)
{
	// writeconf();		// pass kernel environment to next kernel
	shutdown(0);

	/*
	 * should be the only processor running now
	 */
	print("shutting down...\n");
	delay(200);

	splhi();

	/* turn off buffered serial console */
	serialoq = nil;

	/* shutdown devices */
	chandevshutdown();

#ifdef FUTURE
{
	ulong *pdb;
	/*
	 * Modify the machine page table to directly map the low 4MB of memory
	 * This allows the reboot code to turn off the page mapping
	 */
	pdb = m->pdb;
	pdb[PDX(0)] = pdb[PDX(KZERO)];
	mmuflushtlb(PADDR(pdb));
}
	/* setup reboot trampoline function */
{
	void (*f)(ulong, ulong, ulong) = (void*)REBOOTADDR;

	memmove(f, rebootcode, sizeof(rebootcode));
#else
	USED(entry, code, size);
#endif

	print("rebooting...\n");
#ifdef FUTURE
	/* off we go - never to return */
	(*f)(PADDR(entry), PADDR(code), size);
}
#endif
	setupboot(0);		// reboot, don't halt
	exit(0);
}
Esempio n. 11
0
static void
check_kern_pgdir(void)
{
	uint32_t i, n;
	pde_t *pgdir;

	pgdir = kern_pgdir;

	// check pages array
	n = ROUNDUP(npages*sizeof(struct PageInfo), PGSIZE);
	for (i = 0; i < n; i += PGSIZE)
		assert(check_va2pa(pgdir, UPAGES + i) == PADDR(pages) + i);

	// check envs array (new test for lab 3)
	n = ROUNDUP(NENV*sizeof(struct Env), PGSIZE);
	for (i = 0; i < n; i += PGSIZE)
		assert(check_va2pa(pgdir, UENVS + i) == PADDR(envs) + i);

	// check phys mem
	for (i = 0; i < npages * PGSIZE; i += PGSIZE)
		assert(check_va2pa(pgdir, KERNBASE + i) == i);

	// check kernel stack
	for (i = 0; i < KSTKSIZE; i += PGSIZE)
		assert(check_va2pa(pgdir, KSTACKTOP - KSTKSIZE + i) == PADDR(bootstack) + i);
	assert(check_va2pa(pgdir, KSTACKTOP - PTSIZE) == ~0);

	// check PDE permissions
	for (i = 0; i < NPDENTRIES; i++) {
		switch (i) {
		case PDX(UVPT):
		case PDX(KSTACKTOP-1):
		case PDX(UPAGES):
		case PDX(UENVS):
			assert(pgdir[i] & PTE_P);
			break;
		default:
			if (i >= PDX(KERNBASE)) {
				assert(pgdir[i] & PTE_P);
				assert(pgdir[i] & PTE_W);
			} else {
				assert(pgdir[i] == 0);
			}
			break;
		}
	}
	cprintf("check_kern_pgdir() succeeded!\n");
}
Esempio n. 12
0
// Copy the mappings for shared pages into the child address space.
static int
copy_shared_pages(envid_t child)
{
	// LAB 7: Your code here.

        int dir_index = 0 ;
        int i = 0 ;
        int pn;
        for ( dir_index = 0 ; dir_index < PDX(UTOP) ; dir_index++ )
        {

           //check if vpd[dir-index] is present 
           if( vpd[dir_index] & PTE_P )
           {
                for(i=0;i< NPTENTRIES;i++)
                {
                   pn =  dir_index * NPTENTRIES + i ;
                   duppage(child,pn );
                }
           }
        }


	return 0;
}
Esempio n. 13
0
// Given 'pgdir', a pointer to a page directory, pgdir_walk returns
// a pointer to the page table entry (PTE) for linear address 'va'.
// This requires walking the two-level page table structure.
//
// The relevant page table page might not exist yet.
// If this is true, and create == false, then pgdir_walk returns NULL.
// Otherwise, pgdir_walk allocates a new page table page with page_alloc.
//    - If the allocation fails, pgdir_walk returns NULL.
//    - Otherwise, the new page's reference count is incremented,
//	the page is cleared,
//	and pgdir_walk returns a pointer into the new page table page.
//
// Hint 1: you can turn a Page * into the physical address of the
// page it refers to with page2pa() from kern/pmap.h.
//
// Hint 2: the x86 MMU checks permission bits in both the page directory
// and the page table, so it's safe to leave permissions in the page
// more permissive than strictly necessary.
//
// Hint 3: look at inc/mmu.h for useful macros that mainipulate page
// table and page directory entries.
//
pte_t *
pgdir_walk(pde_t *pgdir, const void *va, int create)
{
	// Fill this function in
	struct Page* new_page;
	pde_t* pde = pgdir + PDX(va);
	pte_t* pte;

	// has created
	if (*pde & PTE_P) {
		pte = (pte_t*)KADDR(PTE_ADDR(*pde));
		return pte + PTX(va);
	}
	
	// need create
	if (create == 0) {
		return NULL;
	} else {
		new_page = page_alloc(ALLOC_ZERO);
		if (new_page == NULL) {
			return NULL;
		} else {
			new_page->pp_ref++;
			*pde = page2pa(new_page) | PTE_P | PTE_W | PTE_U;
			pte = (pte_t*)KADDR(PTE_ADDR(*pde));
			return pte + PTX(va);
		}
	}
}
Esempio n. 14
0
//pmm_init - setup a pmm to manage physical memory, build PDT&PT to setup paging mechanism 
//         - check the correctness of pmm & paging mechanism, print PDT&PT
void
pmm_init(void) {
	init_pmm_manager ();
	page_init ();
	
#ifndef NOCHECK
    //check_alloc_page();
#endif

	boot_pgdir = boot_alloc_page ();
	memset (boot_pgdir, 0, PGSIZE);
	boot_pgdir_pa = PADDR (boot_pgdir);
	current_pgdir_pa = boot_pgdir_pa;

#ifndef NOCHECK
	//check_pgdir ();
#endif

	static_assert(KERNBASE % PTSIZE == 0 && KERNTOP % PTSIZE == 0);

	boot_pgdir[PDX(VPT)] = PADDR(boot_pgdir) | PTE_P | PTE_SPR_R | PTE_SPR_W | PTE_A | PTE_D;
    boot_map_segment(boot_pgdir, KERNBASE, RAM_SIZE, 0, PTE_SPR_R | PTE_SPR_W | PTE_A | PTE_D);

	enable_paging ();
#ifndef NOCHECK
	//check_boot_pgdir ();
#endif
	
    print_pgdir (kprintf);

	slab_init ();
}
Esempio n. 15
0
// Copy the mappings for shared pages into the child address space.
static int
copy_shared_pages(envid_t child)
{
	int pn, perm;
	int retval = 0;

	// Step through each page below UTOP. If the page is PTE_SHARE,
	//  then copy the mapping of that page to the child environment.
	for(pn = 0; pn < PGNUM(UTOP); pn++) {
		// Check to see if the page directory entry and page table
		//  entry for this page exist, and if the page is marked
		//  PTE_SHARE.
		if((uvpd[PDX(pn*PGSIZE)]&PTE_P) == 0 ||
		   (uvpt[pn]&PTE_P) == 0 ||
		   (uvpt[pn]&PTE_SHARE) == 0)
			continue;

		// Grab the permissions for the page
		perm = uvpt[pn]&PTE_SYSCALL;

		// Copy the current page number over
		if((retval = sys_page_map(0, (void *)(pn*PGSIZE), child, (void *)(pn*PGSIZE), perm)) != 0)
			break;
	}

	return 0;
}
Esempio n. 16
0
File: fd.c Progetto: rbowden91/fp261
// Make file descriptor 'newfdnum' a duplicate of file descriptor 'oldfdnum'.
// For instance, writing onto either file descriptor will affect the
// file and the file offset of the other.
// Closes any previously open file descriptor at 'newfdnum'.
// This is implemented using virtual memory tricks (of course!).
int
dup(int oldfdnum, int newfdnum)
{
	int r;
	char *ova, *nva;
	pte_t pte;
	struct Fd *oldfd, *newfd;

	if ((r = fd_lookup(oldfdnum, &oldfd, true)) < 0
	    || (r = fd_lookup(newfdnum, &newfd, false)) < 0)
		return r;
	close(newfdnum);

	ova = fd2data(oldfd);
	nva = fd2data(newfd);

	if ((vpd[PDX(ova)] & PTE_P) && (vpt[PGNUM(ova)] & PTE_P))
		if ((r = sys_page_map(0, ova, 0, nva, vpt[PGNUM(ova)] & PTE_SYSCALL)) < 0)
			goto err;
	if ((r = sys_page_map(0, oldfd, 0, newfd, vpt[PGNUM(oldfd)] & PTE_SYSCALL)) < 0)
		goto err;

	return newfdnum;

err:
	sys_page_unmap(0, newfd);
	sys_page_unmap(0, nva);
	return r;
}
Esempio n. 17
0
File: fd.c Progetto: Hisham-A/JOS
// Make file descriptor 'newfdnum' a duplicate of file descriptor 'oldfdnum'.
// For instance, writing onto either file descriptor will affect the
// file and the file offset of the other.
// Closes any previously open file descriptor at 'newfdnum'.
// This is implemented using virtual memory tricks (of course!).
int
dup(int oldfdnum, int newfdnum)
{
	int r;
	char *ova, *nva;
	pte_t pte;
	struct Fd *oldfd, *newfd;

	if ((r = fd_lookup(oldfdnum, &oldfd)) < 0)
		return r;
	close(newfdnum);

	newfd = INDEX2FD(newfdnum);
	ova = fd2data(oldfd);
	nva = fd2data(newfd);

	if ((vpd[PDX(ova)] & PTE_P) && (vpt[VPN(ova)] & PTE_P))
		if ((r = sys_page_map(0, ova, 0, nva, vpt[VPN(ova)] & PTE_USER)) < 0)
			goto err;
	if ((r = sys_page_map(0, oldfd, 0, newfd, vpt[VPN(oldfd)] & PTE_USER)) < 0)
		goto err;

	return newfdnum;

err:
	sys_page_unmap(0, newfd);
	sys_page_unmap(0, nva);
	return r;
}
Esempio n. 18
0
//
// User-level fork with copy-on-write.
// Set up our page fault handler appropriately.
// Create a child.
// Copy our address space and page fault handler setup to the child.
// Then mark the child as runnable and return.
//
// Returns: child's envid to the parent, 0 to the child, < 0 on error.
// It is also OK to panic on error.
//
// Hint:
//   Use uvpd, uvpt, and duppage.
//   Remember to fix "thisenv" in the child process.
//   Neither user exception stack should ever be marked copy-on-write,
//   so you must allocate a new page for the child's user exception stack.
//
envid_t
fork(void)
{
	// LAB 4: Your code here.
    set_pgfault_handler(pgfault);
    int r, childid;
    childid = sys_exofork();
    if (childid <0)
            panic("exofork error in fork()!\n");
    else if (childid ==0)
    {
            thisenv = &envs[ENVX(sys_getenvid())];
            return 0;
    } else
    {
        int addr;
        for (addr = UTEXT; addr<UXSTACKTOP-PGSIZE; addr+=PGSIZE)
        {
                int pn = PGNUM(addr);
                if (((uvpd[PDX(addr)] & PTE_P) >0) &&
                    ((uvpt[pn] & PTE_P) >0) &&
                    ((uvpt[pn] & PTE_U) > 0))
                        duppage(childid, pn);
        }
        extern void _pgfault_upcall();
        sys_page_alloc(childid, (void*) (UXSTACKTOP - PGSIZE),  PTE_U|PTE_W|PTE_P);
        sys_env_set_pgfault_upcall(childid, _pgfault_upcall);
        sys_env_set_status(childid, ENV_RUNNABLE);
        return childid;
    }
}
Esempio n. 19
0
//
// User-level fork with copy-on-write.
// Set up our page fault handler appropriately.
// Create a child.
// Copy our address space and page fault handler setup to the child.
// Then mark the child as runnable and return.
//
// Returns: child's envid to the parent, 0 to the child, < 0 on error.
// It is also OK to panic on error.
//
// Hint:
//   Use uvpd, uvpt, and duppage.
//   Remember to fix "thisenv" in the child process.
//   Neither user exception stack should ever be marked copy-on-write,
//   so you must allocate a new page for the child's user exception stack.
//
envid_t
fork(void)
{
  extern void _pgfault_upcall();
  // LAB 4: Your code here.
  set_pgfault_handler(pgfault);

  envid_t envid = sys_exofork();
  if (envid == 0) {
    thisenv = &envs[ENVX(sys_getenvid())];
    return 0;
  }
  if (envid < 0) {
    panic("sys_exofork failed: %e", envid);
  }

  uint32_t addr;
  for (addr = 0; addr < USTACKTOP; addr += PGSIZE) {
    if ((uvpd[PDX(addr)] & PTE_P) && (uvpt[PGNUM(addr)] & PTE_P) && (uvpt[PGNUM(addr)] & PTE_U)) {
      duppage(envid, PGNUM(addr));
    }
  }
  int alloc_err = sys_page_alloc(envid, (void *)(UXSTACKTOP-PGSIZE), PTE_U|PTE_W|PTE_P);
  if (alloc_err) {
    panic("sys_page_alloc failed with error: %e", alloc_err);
  }
  sys_env_set_pgfault_upcall(envid, _pgfault_upcall);
  int set_status_err = sys_env_set_status(envid, ENV_RUNNABLE);
  if (set_status_err) {
    panic("sys_env_set_status");
  }
  return envid;
}
Esempio n. 20
0
void
mmumapcpu0(void)
{
	ulong *pdb, *pte, va, pa, pdbx;

	if(strstr(xenstart->magic, "x86_32p"))
		paemode = 1;
	hypervisor_virt_start = paemode ? 0xF5800000 : 0xFC000000;
	patomfn = (ulong*)xenstart->mfn_list;
	matopfn = (ulong*)hypervisor_virt_start;
	/* Xen bug ? can't touch top entry in PDPT */
	if(paemode)
		hypervisor_virt_start = 0xC0000000;

	/* 
	 * map CPU0MACH at MACHADDR.
	 * When called the pagedir and page table exist, we just
	 * need to fill in a page table entry.
	 */
	pdb = (ulong*)xenstart->pt_base;
	va = MACHADDR;
	pa = PADDR(CPU0MACH) | PTEVALID|PTEWRITE;
	pdbx = PDX(va);
	pdb = PDB(pdb, va);
	pte = KADDR(MAPPN(pdb[pdbx]));
	xenupdate(&pte[PTX(va)], pa);
}
Esempio n. 21
0
// Given 'pgdir', a pointer to a page directory, pgdir_walk returns
// a pointer to the page table entry (PTE) for linear address 'va'.
// This requires walking the two-level page table structure.
//
// The relevant page table page might not exist yet.
// If this is true, and create == false, then pgdir_walk returns NULL.
// Otherwise, pgdir_walk allocates a new page table page with page_alloc.
//    - If the allocation fails, pgdir_walk returns NULL.
//    - Otherwise, the new page's reference count is incremented,
//	the page is cleared,
//	and pgdir_walk returns a pointer into the new page table page.
//
// Hint 1: you can turn a Page * into the physical address of the
// page it refers to with page2pa() from kern/pmap.h.
//
// Hint 2: the x86 MMU checks permission bits in both the page directory
// and the page table, so it's safe to leave permissions in the page
// more permissive than strictly necessary.
//
// Hint 3: look at inc/mmu.h for useful macros that mainipulate page
// table and page directory entries.
//
pte_t *
pgdir_walk(pde_t *pgdir, const void *va, int create)
{
	int dindex = PDX(va), tindex = PTX(va);
	//dir index, table index
	if (!(pgdir[dindex] & PTE_P)) {	//if pde not exist
		if (create) {
			struct PageInfo *pg = page_alloc(ALLOC_ZERO);	//alloc a zero page
			if (!pg) return NULL;	//allocation fails
			pg->pp_ref++;
			pgdir[dindex] = page2pa(pg) | PTE_P | PTE_U | PTE_W;
		} else return NULL;
	}
	pte_t *p = KADDR(PTE_ADDR(pgdir[dindex]));

	//THESE CODE COMMENTED IS NOT NEEDED 
	// if (!(p[tindex] & PTE_P))	//if pte not exist
	// 	if (create) {
	// 		struct PageInfo *pg = page_alloc(ALLOC_ZERO);	//alloc a zero page
	// 		pg->pp_ref++;
	// 		p[tindex] = page2pa(pg) | PTE_P;
	// 	} else return NULL;

	return p+tindex;
}
Esempio n. 22
0
File: pmm.c Progetto: TySag/project
void check_boot_pgdir(void)
{
	pte_t *ptep;
	int i;
	for (i = 0; i < npage; i += PGSIZE) {
		assert((ptep =
			get_pte(boot_pgdir, (uintptr_t) KADDR(i), 0)) != NULL);
		assert(PTE_ADDR(*ptep) == i);
	}

	assert(PDE_ADDR(boot_pgdir[PDX(VPT)]) == PADDR(boot_pgdir));

	assert(boot_pgdir[0] == 0);

	struct Page *p;
	p = alloc_page();
	assert(page_insert(boot_pgdir, p, 0x100, PTE_W) == 0);
	assert(page_ref(p) == 1);
	assert(page_insert(boot_pgdir, p, 0x100 + PGSIZE, PTE_W) == 0);
	assert(page_ref(p) == 2);

	const char *str = "ucore: Hello world!!";
	strcpy((void *)0x100, str);
	assert(strcmp((void *)0x100, (void *)(0x100 + PGSIZE)) == 0);

	*(char *)(page2kva(p) + 0x100) = '\0';
	assert(strlen((const char *)0x100) == 0);

	free_page(p);
	free_page(pa2page(PDE_ADDR(boot_pgdir[0])));
	boot_pgdir[0] = 0;

	kprintf("check_boot_pgdir() succeeded!\n");
}
Esempio n. 23
0
//
// Map the physical page 'pp' at virtual address 'va'.
// The permissions (the low 12 bits) of the page table entry
// should be set to 'perm|PTE_P'.
//
// Requirements
//   - If there is already a page mapped at 'va', it should be page_remove()d.
//   - If necessary, on demand, a page table should be allocated and inserted
//     into 'pgdir'.
//   - pp->pp_ref should be incremented if the insertion succeeds.
//   - The TLB must be invalidated if a page was formerly present at 'va'.
//
// Corner-case hint: Make sure to consider what happens when the same
// pp is re-inserted at the same virtual address in the same pgdir.
// However, try not to distinguish this case in your code, as this
// frequently leads to subtle bugs; there's an elegant way to handle
// everything in one code path.
//
// RETURNS:
//   0 on success
//   -E_NO_MEM, if page table couldn't be allocated
//
// Hint: The TA solution is implemented using pgdir_walk, page_remove,
// and page2pa.
//
int
page_insert(pde_t *pgdir, struct PageInfo *pp, void *va, int perm)
{
	// Fill this function in
	
	pte_t* ptep = pgdir_walk(pgdir, va, true);
	
	if(!ptep) {
		return -E_NO_MEM;
	}

	
	if( pa2page(*ptep) != pp ){
		page_remove(pgdir, va);
		assert( *ptep == 0 );
		assert(pp->pp_ref >= 0);
		pp->pp_ref++;
	}
	else {
		tlb_invalidate(pgdir, va);
	}

	
	*ptep = page2pa(pp) | perm | PTE_P;
	/* we should also change pde's perm*/
	pde_t *pde = pgdir + PDX(va); 
	*pde = *pde | perm;
	
	
	return 0;
}
Esempio n. 24
0
//
// Custom page fault handler - if faulting page is copy-on-write,
// map in our own private writable copy.
//
static void
pgfault(struct UTrapframe *utf)
{
	void *addr = (void *) utf->utf_fault_va;
	uint32_t err = utf->utf_err;
	int r;

	// Check that the faulting access was (1) a write, and (2) to a
	// copy-on-write page.  If not, panic.
	// Hint:
	//   Use the read-only page table mappings at uvpt
	//   (see <inc/memlayout.h>).

    if (((err & FEC_WR) == 0) || ((uvpd[PDX(addr)] & PTE_P)==0) || 
                    ((uvpt[PGNUM(addr)] & PTE_COW)==0) )
            panic("Page fault in lib/fork.c!\n");
	// LAB 4: Your code here.

	// Allocate a new page, map it at a temporary location (PFTEMP),
	// copy the data from the old page to the new page, then move the new
	// page to the old page's address.
	// Hint:
	//   You should make three system calls.

    if ((r = sys_page_alloc(0, (void*)PFTEMP, PTE_U|PTE_P|PTE_W))  <0)
                panic("alloc page error in lib/fork.c\n");
    addr = ROUNDDOWN(addr, PGSIZE);
    memcpy(PFTEMP, addr, PGSIZE);
    if ((r = sys_page_map(0, PFTEMP, 0, addr, PTE_U|PTE_P|PTE_W)) <0)
                panic("page map error in lib/fork.c\n");
    if ((r = sys_page_unmap(0, PFTEMP)) <0)
                panic("page unmap error in lib/fork.c\n");
// LAB 4: Your code here.

}
//
// User-level fork with copy-on-write.
// Set up our page fault handler appropriately.
// Create a child.
// Copy our address space and page fault handler setup to the child.
// Then mark the child as runnable and return.
//
// Returns: child's envid to the parent, 0 to the child, < 0 on error.
// It is also OK to panic on error.
//
// Hint:
//   Use uvpd, uvpt, and duppage.
//   Remember to fix "thisenv" in the child process.
//   Neither user exception stack should ever be marked copy-on-write,
//   so you must allocate a new page for the child's user exception stack.
//
envid_t
fork(void)
{
    // LAB 4: Your code here.
    //panic("fork not implemented");
    void *addr;
    set_pgfault_handler(pgfault);
    envid_t forkid = sys_exofork();
    if (forkid < 0)
        panic("sys_exofork: %e", forkid);
    if(forkid == 0) {
        thisenv = &envs[ENVX(sys_getenvid())];
        return 0;
    }
    for (addr = (uint8_t*) UTEXT; addr < (void *) USTACKTOP; addr += PGSIZE)
        if( (uvpd[PDX(addr)] & PTE_P) && (uvpt[PGNUM(addr)] & PTE_P) )
            duppage(forkid, PGNUM(addr));
    if (sys_page_alloc(forkid, (void *)(UXSTACKTOP-PGSIZE), PTE_U|PTE_W|PTE_P) < 0)
        panic("user stack alloc failure\n");
    extern void _pgfault_upcall();
    if(sys_env_set_pgfault_upcall(forkid, _pgfault_upcall) < 0)
        panic("set pgfault upcall fails %d\n", forkid);
    if(sys_env_set_status(forkid, ENV_RUNNABLE) < 0)
        panic("set %d runnable fails\n", forkid);
    return forkid;
}
Esempio n. 26
0
/*
	hint from check
	ptep = (pte_t *) KADDR(PTE_ADDR(kern_pgdir[PDX(PGSIZE)]));
	assert(pgdir_walk(kern_pgdir, (void*)PGSIZE, 0) == ptep+PTX(PGSIZE));
*/
pte_t *
pgdir_walk(pde_t *pgdir, const void *va, int create)
{
	// Fill this function in
	pde_t *pde = pgdir + PDX(va);
	pte_t *ptep = NULL;
	if(*pde & PTE_P) { /* present */
		ptep = KADDR(PTE_ADDR(*pde));
		return ptep + PTX(va);
	}

	if(create == false) {
		return NULL;
	}

	struct PageInfo *new_ptep = page_alloc(ALLOC_ZERO);
	if(!new_ptep){
		return NULL;
	}
	//assert( new_ptep != NULL );
	//assert( new_ptep->pp_ref == 0 );
	new_ptep->pp_ref = 1;
	*pde = page2pa(new_ptep) | PTE_P | PTE_U;
	ptep = page2kva(new_ptep);
	return ptep + PTX(va);
}
Esempio n. 27
0
void
checkmmu(ulong va, ulong pa)
{
	ulong *pdb, *pte;
	int pdbx;
	
	if(up->mmupdb == 0)
		return;

	pdb = mmupdb(up->mmupdb, va);
	pdbx = PDX(va);
	if(MAPPN(pdb[pdbx]) == 0){
		/* okay to be empty - will fault and get filled */
		return;
	}
	
	pte = KADDR(MAPPN(pdb[pdbx]));
	if(MAPPN(pte[PTX(va)]) != pa){
		if(!paemode)
		  print("%ld %s: va=0x%08lux pa=0x%08lux pte=0x%08lux (0x%08lux)\n",
			up->pid, up->text,
			va, pa, pte[PTX(va)], MAPPN(pte[PTX(va)]));
		else
		  print("%ld %s: va=0x%08lux pa=0x%08lux pte=0x%16llux (0x%08lux)\n",
			up->pid, up->text,
			va, pa, *(uvlong*)&pte[PTX(va)], MAPPN(pte[PTX(va)]));
	}
}
Esempio n. 28
0
pte_t* pgdir_walk(pde_t *pgdir, const void *va, int create)
{
	// Fill this function in
	if(!pgdir){
		cprintf("ERROR!!\n");
	}
	//Get the entry id in the page directory	
	uintptr_t pgdir_offset = (uintptr_t)PDX(va);
	pte_t *ptentry;
	//if page directory entry does not exsist.
	if(!(pgdir[pgdir_offset] & PTE_P)) {
		if(!create)
			return NULL;
		struct PageInfo *new_page = page_alloc(ALLOC_ZERO);
		if(!new_page)
			return NULL;
		new_page->pp_ref++;
		pgdir[pgdir_offset] = (page2pa(new_page)) | PTE_P | PTE_W | PTE_U;
		
		//Returning pointer to page table base.
		//return (pte_t*) (page2kva(new_page));
		//Returning pointer to page table entry
		pde_t *ret_arr = page2kva(new_page); 
		return &ret_arr[PTX(va)];
		
	} else {
		ptentry = KADDR(PTE_ADDR(pgdir[pgdir_offset]));
		return &(ptentry[PTX(va)]);
	}
}
Esempio n. 29
0
// Given 'pgdir', a pointer to a page directory, pgdir_walk returns
// a pointer to the page table entry (PTE) for linear address 'va'.
// This requires walking the two-level page table structure.
//
// The relevant page table page might not exist yet.
// If this is true, and create == false, then pgdir_walk returns NULL.
// Otherwise, pgdir_walk allocates a new page table page with page_alloc.
//    - If the allocation fails, pgdir_walk returns NULL.
//    - Otherwise, the new page's reference count is incremented,
//	the page is cleared,
//	and pgdir_walk returns a pointer into the new page table page.
//
// Hint 1: you can turn a Page * into the physical address of the
// page it refers to with page2pa() from kern/pmap.h.
//
// Hint 2: the x86 MMU checks permission bits in both the page directory
// and the page table, so it's safe to leave permissions in the page
// more permissive than strictly necessary.
//
// Hint 3: look at inc/mmu.h for useful macros that mainipulate page
// table and page directory entries.
//
pte_t *
pgdir_walk(pde_t *pgdir, const void *va, int create)
{
	// Fill this function in
	/*stone's solution for lab2*/
	pde_t* pde = pgdir + PDX(va);//stone: get pde
	if (*pde & PTE_P){//stone:if present
		pte_t *pte = PTX(va) + (pte_t *)KADDR(PTE_ADDR(*pde));
		return pte;
	}
	else if (create == 0)
		return NULL;
	else{
		struct Page* pp = page_alloc(ALLOC_ZERO);
		if (pp == NULL)
			return NULL;
		else{
			pp->pp_ref = 1;
			physaddr_t physaddr = page2pa(pp);
			*pde = physaddr | PTE_U | PTE_W | PTE_P;
			pte_t *pte = PTX(va) + (pte_t *)KADDR(PTE_ADDR(*pde));
			return pte;
		}
	}	  
	//return NULL;
}
Esempio n. 30
0
/* copy the page tables, and set both pte's flag. 
 * note: on Copy On Write, both parent and child process will be marked 
 * write-protected, and increase the reference count of each shared physical 
 * page. 
 * */
int pt_copy(struct pde *npgd, struct pde *opgd){
    struct pde *opde, *npde;
    struct pte *opte, *npte, *old_pt, *new_pt;
    struct page *pg;
    uint pdn, pn;

    for(pdn=PDX(KMEM_END); pdn<1024; pdn++) {
        opde = &opgd[pdn];
        npde = &npgd[pdn];
        npde->pd_flag = opde->pd_flag;
        if (opde->pd_flag & PTE_P) {
            old_pt = (struct pte*)(opde->pd_off * PAGE);
            new_pt = (struct pte*)kmalloc(PAGE);
            npde->pd_off = PPN(new_pt);
            for(pn=0; pn<1024; pn++){
                opte = &old_pt[pn];
                npte = &new_pt[pn];
                npte->pt_off  = opte->pt_off;
                npte->pt_flag = opte->pt_flag;
                if (opte->pt_flag & PTE_P) {
                    // turn off each pte's PTE_W
                    npte->pt_flag &= ~PTE_W;
                    opte->pt_flag &= ~PTE_W;
                    // increase the ref count
                    pg = pgfind(opte->pt_off);
                    pg->pg_count++;
                }
            }
        }
    }
    return 0;
}