Exemple #1
0
// Set up a two-level page table:
//    kern_pgdir is its linear (virtual) address of the root
// Then turn on paging.  Then effectively turn off segmentation.
// (i.e., the segment base addrs are set to zero).
//
// This function only sets up the kernel part of the address space
// (ie. addresses >= UTOP).  The user part of the address space
// will be setup later.
//
// From UTOP to ULIM, the user is allowed to read but not write.
// Above ULIM the user cannot read or write.
void
mem_init(void)
{
	uint32_t cr0;
	size_t n;

	// Ensure user & kernel struct Pages agree.
	static_assert(sizeof(struct Page) == sizeof(struct UserPage));

	// Find out how much memory the machine has (npages & npages_basemem).
	i386_detect_memory();

	// Remove this line when you're ready to test this function.
	//panic("mem_init: This function is not finished\n");

	//////////////////////////////////////////////////////////////////////
	// create initial page directory.
	kern_pgdir = (pde_t *) boot_alloc(PGSIZE);
	memset(kern_pgdir, 0, PGSIZE);

	//////////////////////////////////////////////////////////////////////
	// Recursively insert PD in itself as a page table, to form
	// a virtual page table at virtual address UVPT.
	// (For now, you don't have understand the greater purpose of the
	// following line.)
	// Permissions: kernel R, user R
	kern_pgdir[PDX(UVPT)] = PADDR(kern_pgdir) | PTE_U | PTE_P;

	//////////////////////////////////////////////////////////////////////
	// Allocate an array of npages 'struct Page's and store it in 'pages'.
	// The kernel uses this array to keep track of physical pages: for
	// each physical page, there is a corresponding struct Page in this
	// array.  'npages' is the number of physical pages in memory.
	pages = (Page *) boot_alloc(npages * sizeof(struct Page));


	//////////////////////////////////////////////////////////////////////
	// Make 'envs' point to an array of size 'NENV' of 'struct Env'.
	// LAB 3: Your code here.
	envs = (Env *) boot_alloc(NENV * sizeof(struct Env));

	//////////////////////////////////////////////////////////////////////
	// Now that we've allocated the initial kernel data structures, we set
	// up the list of free physical pages. Once we've done so, all further
	// memory management will go through the page_* functions. In
	// particular, we can now map memory using page_map_segment
	// or page_insert
	page_init();

	check_page_free_list(true);
	check_page_alloc();
	check_page();

	//////////////////////////////////////////////////////////////////////
	// Now we set up virtual memory

	//////////////////////////////////////////////////////////////////////
	// Use the physical memory that 'entry_stack' refers to as the kernel
	// stack.  The kernel stack grows down from virtual address KSTACKTOP.
	// We consider the entire range from [KSTACKTOP-PTSIZE, KSTACKTOP)
	// to be the kernel stack, but break this into two pieces:
	//     * [KSTACKTOP-KSTKSIZE, KSTACKTOP) -- backed by physical memory
	//     * [KSTACKTOP-PTSIZE, KSTACKTOP-KSTKSIZE) -- not backed; so if
	//       the kernel overflows its stack, it will fault rather than
	//       overwrite memory.  Known as a "guard page".
	//     Permissions: kernel RW, user NONE
	Page *pp = pa2page((physaddr_t)entry_stack-KERNBASE);
	for (uintptr_t ptr = KSTACKTOP-KSTKSIZE; ptr < KSTACKTOP; ptr += PGSIZE) {
		if (page_insert(kern_pgdir, pp, ptr, PTE_W | PTE_P) < 0)
			panic("Couldn't create page table entries for stack.\n");
		pp++;
	}

	//////////////////////////////////////////////////////////////////////
	// Map all of physical memory at KERNBASE.
	// Ie.  the VA range [KERNBASE, 2^32) should map to
	//      the PA range [0, 2^32 - KERNBASE)
	// We might not have 2^32 - KERNBASE bytes of physical memory, but
	// we just set up the mapping anyway.
	// Permissions: kernel RW, user NONE
	page_map_segment(kern_pgdir, KERNBASE, 0xFFFFFFFF-KERNBASE, 0x0, PTE_W | PTE_P);
     
	//print_page_table(kern_pgdir, false, false);

	//////////////////////////////////////////////////////////////////////
	// Map the 'envs' array read-only by the user at linear address UENVS.
	// Permissions: kernel R, user R
	// (That's the UENVS version; 'envs' itself is kernel RW, user NONE.)
	// LAB 3: Your code here.
	page_map_segment(kern_pgdir, (uintptr_t) UENVS, ROUNDUP(NENV*sizeof(struct Env), PGSIZE), PADDR(envs), PTE_U | PTE_P);

	//////////////////////////////////////////////////////////////////////
	// Map 'pages' read-only by the user at linear address UPAGES.
	// Permissions: kernel R, user R
	// (That's the UPAGES version; 'pages' itself is kernel RW, user NONE.)
	// LAB 3: Your code here.
	page_map_segment(kern_pgdir, UPAGES, ROUNDUP(npages*sizeof(struct Page), PGSIZE), PADDR(pages), PTE_U | PTE_P);

	// Check that the initial page directory has been set up correctly.
	check_kern_pgdir();

	// Switch from the minimal entry page directory to the full kern_pgdir
	// page table we just created.	Our instruction pointer should be
	// somewhere between KERNBASE and KERNBASE+4MB right now, which is
	// mapped the same way by both page tables.
	//
	// If the machine reboots at this point, you've probably set up your
	// kern_pgdir wrong.
	lcr3(PADDR(kern_pgdir));

        // entry.S set the really important flags in cr0 (including enabling
        // paging).  Here we configure the rest of the flags we need.
	cr0 = rcr0();
	cr0 |= CR0_PE|CR0_PG|CR0_AM|CR0_WP|CR0_NE|CR0_MP;
	cr0 &= ~(CR0_TS|CR0_EM);
	lcr0(cr0);

	// Some more checks, only possible after kern_pgdir is installed.
	check_page_installed_pgdir();
}
Exemple #2
0
// do_pgfault - interrupt handler to process the page fault execption
int
do_pgfault(struct mm_struct *mm, uint32_t error_code, uintptr_t addr) {
    if (mm == NULL) {
        assert(current != NULL);
        panic("page fault in kernel thread: pid = %d, %d %08x.\n",
              current->pid, error_code, addr);
    }

    bool need_unlock = 1;
    if (!try_lock_mm(mm)) {
        if (current != NULL && mm->locked_by == current->pid) {
            need_unlock = 0;
        }
        else {
            lock_mm(mm);
        }
    }

    int ret = -E_INVAL;
    struct vma_struct *vma = find_vma(mm, addr);
    if (vma == NULL || vma->vm_start > addr) {
        goto failed;
    }
    if (vma->vm_flags & VM_STACK) {
        if (addr < vma->vm_start + PGSIZE) {
            goto failed;
        }
    }

    switch (error_code & 3) {
    default:
    /* default is 3: write, present */
    case 2: /* write, not present */
        if (!(vma->vm_flags & VM_WRITE)) {
            goto failed;
        }
        break;
    case 1: /* read, present */
        goto failed;
    case 0: /* read, not present */
        if (!(vma->vm_flags & (VM_READ | VM_EXEC))) {
            goto failed;
        }
    }

    uint32_t perm = PTE_U;
    if (vma->vm_flags & VM_WRITE) {
        perm |= PTE_W;
    }
    addr = ROUNDDOWN(addr, PGSIZE);

    ret = -E_NO_MEM;
    pte_t *ptep;

    if ((ptep = get_pte(mm->pgdir, addr, 1)) == NULL) {
        goto failed;
    }
    if (*ptep == 0) {
        if (!(vma->vm_flags & VM_SHARE)) {
            if (pgdir_alloc_page(mm->pgdir, addr, perm) == NULL) {
                goto failed;
            }
        }
        else {
            lock_shmem(vma->shmem);
            uintptr_t shmem_addr = addr - vma->vm_start + vma->shmem_off;
            pte_t *sh_ptep = shmem_get_entry(vma->shmem, shmem_addr, 1);
            if (sh_ptep == NULL || *sh_ptep == 0) {
                unlock_shmem(vma->shmem);
                goto failed;
            }
            unlock_shmem(vma->shmem);
            if (*sh_ptep & PTE_P) {
                page_insert(mm->pgdir, pa2page(*sh_ptep), addr, perm);
            }
            else {
                swap_duplicate(*ptep);
                *ptep = *sh_ptep;
            }
        }
    }
    else {
        struct Page *page, *newpage = NULL;
        bool cow = ((vma->vm_flags & (VM_SHARE | VM_WRITE)) == VM_WRITE), may_copy = 1;

        assert(!(*ptep & PTE_P) || ((error_code & 2) && !(*ptep & PTE_W) && cow));
        if (cow) {
            newpage = alloc_page();
        }
        if (*ptep & PTE_P) {
            page = pte2page(*ptep);
        }
        else {
            if ((ret = swap_in_page(*ptep, &page)) != 0) {
                if (newpage != NULL) {
                    free_page(newpage);
                }
                goto failed;
            }
            if (!(error_code & 2) && cow) {
                perm &= ~PTE_W;
                may_copy = 0;
            }
        }

        if (cow && may_copy) {
            if (page_ref(page) + swap_page_count(page) > 1) {
                if (newpage == NULL) {
                    goto failed;
                }
                memcpy(page2kva(newpage), page2kva(page), PGSIZE);
                page = newpage, newpage = NULL;
            }
        }
        page_insert(mm->pgdir, page, addr, perm);
        if (newpage != NULL) {
            free_page(newpage);
        }
    }
    ret = 0;

failed:
    if (need_unlock) {
        unlock_mm(mm);
    }
    return ret;
}
Exemple #3
0
void*
frame_evict (void* uaddr)
{

  /* 1. Choose a frame to evict, using your page replacement algorithm.
        The "accessed" and "dirty" bits in the page table, described below, 
        will come in handy. */
  struct frame_table_entry *fte = NULL;
  switch (PAGE_EVICTION_ALGORITHM)
  {
    /* First in first out */
    case PAGE_EVICTION_FIFO:
      fte = frame_evict_choose_fifo ();
      break;

    /* Second chance */
    case PAGE_EVICTION_SECONDCHANCE:
      fte = frame_evict_choose_secondchance ();
      break;

    default:
      PANIC ("Invalid eviction algorithm choice.");
  }
  ASSERT (fte != NULL);


  /* 2. Remove references to the frame from any page table that refers to it.
        Unless you have implemented sharing, only a single page should refer to
        a frame at any given time. */
  pagedir_clear_page (fte->owner->pagedir, pg_round_down (fte->uaddr));


  /* 3. If necessary, write the page to the file system or to swap.
        The evicted frame may then be used to store a different page. */
  struct page *p_evict = 
      page_lookup (fte->owner->pages, pg_round_down (fte->uaddr));
  if (p_evict == NULL)
        PANIC ("Failed to get supp page for existing page.");

  /* Page to be evicted is in swap */
  if (p_evict->page_location_option == FILESYS)
    {
      if (p_evict->writable)
        {
          file_write_at (p_evict->file, fte->kaddr, p_evict->page_read_bytes,
              p_evict->ofs);
        }
    }
  else if (p_evict->page_location_option == ALLZERO)
    {
      // All zero, so can just be overwritten
    }
  else
    {
      // From stack
      int index = swap_to_disk (pg_round_down (fte->uaddr));
      
      /* Creates a supp page and insert it into pages. */
      struct page *p = page_create ();

      if (p == NULL)
        PANIC ("Failed to get supp page for swap slot.");

      p->addr = fte->uaddr;
      p->page_location_option = SWAPSLOT;
      p->swap_index = index;
      page_insert (fte->owner->pages, &p->hash_elem);
    }

  /* Replace virtual address with new virtual address */
  fte->owner = thread_current ();
  fte->uaddr = uaddr;

  /* Reinsert the frame table entry into the frame table */
  lock_acquire (&frame_table_lock);
  list_remove (&fte->elem);
  list_push_front (&frame_table, &fte->elem);
  lock_release (&frame_table_lock);

  return fte->kaddr;
}
Exemple #4
0
/* do_pgfault - interrupt handler to process the page fault execption
 * @mm         : the control struct for a set of vma using the same PDT
 * @error_code : the error code recorded in trapframe->tf_err which is setted by x86 hardware
 * @addr       : the addr which causes a memory access exception, (the contents of the CR2 register)
 *
 * CALL GRAPH: trap--> trap_dispatch-->pgfault_handler-->do_pgfault
 * The processor provides ucore's do_pgfault function with two items of information to aid in diagnosing
 * the exception and recovering from it.
 *   (1) The contents of the CR2 register. The processor loads the CR2 register with the
 *       32-bit linear address that generated the exception. The do_pgfault fun can
 *       use this address to locate the corresponding page directory and page-table
 *       entries.
 *   (2) An error code on the kernel stack. The error code for a page fault has a format different from
 *       that for other exceptions. The error code tells the exception handler three things:
 *         -- The P flag   (bit 0) indicates whether the exception was due to a not-present page (0)
 *            or to either an access rights violation or the use of a reserved bit (1).
 *         -- The W/R flag (bit 1) indicates whether the memory access that caused the exception
 *            was a read (0) or write (1).
 *         -- The U/S flag (bit 2) indicates whether the processor was executing at user mode (1)
 *            or supervisor mode (0) at the time of the exception.
 */
int
do_pgfault(struct mm_struct *mm, uint32_t error_code, uintptr_t addr) {
    int ret = -E_INVAL;
    //try to find a vma which include addr
    struct vma_struct *vma = find_vma(mm, addr);

    pgfault_num++;
    //If the addr is in the range of a mm's vma?
    if (vma == NULL || vma->vm_start > addr) {
        cprintf("not valid addr %x, and  can not find it in vma\n", addr);
        goto failed;
    }
    //check the error_code
    switch (error_code & 3) {
    default:
            /* error code flag : default is 3 ( W/R=1, P=1): write, present */
    case 2: /* error code flag : (W/R=1, P=0): write, not present */
        if (!(vma->vm_flags & VM_WRITE)) {
            cprintf("do_pgfault failed: error code flag = write AND not present, but the addr's vma cannot write\n");
            goto failed;
        }
        break;
    case 1: /* error code flag : (W/R=0, P=1): read, present */
        cprintf("do_pgfault failed: error code flag = read AND present\n");
        goto failed;
    case 0: /* error code flag : (W/R=0, P=0): read, not present */
        if (!(vma->vm_flags & (VM_READ | VM_EXEC))) {
            cprintf("do_pgfault failed: error code flag = read AND not present, but the addr's vma cannot read or exec\n");
            goto failed;
        }
    }
    /* IF (write an existed addr ) OR
     *    (write an non_existed addr && addr is writable) OR
     *    (read  an non_existed addr && addr is readable)
     * THEN
     *    continue process
     */
    uint32_t perm = PTE_U;
    if (vma->vm_flags & VM_WRITE) {
        perm |= PTE_W;
    }
    addr = ROUNDDOWN(addr, PGSIZE);

    ret = -E_NO_MEM;

    pte_t *ptep=NULL;
    /*LAB3 EXERCISE 1: 2013012213
    * Maybe you want help comment, BELOW comments can help you finish the code
    *
    * Some Useful MACROs and DEFINEs, you can use them in below implementation.
    * MACROs or Functions:
    *   get_pte : get an pte and return the kernel virtual address of this pte for la
    *             if the PT contians this pte didn't exist, alloc a page for PT (notice the 3th parameter '1')
    *   pgdir_alloc_page : call alloc_page & page_insert functions to allocate a page size memory & setup
    *             an addr map pa<--->la with linear address la and the PDT pgdir
    * DEFINES:
    *   VM_WRITE  : If vma->vm_flags & VM_WRITE == 1/0, then the vma is writable/non writable
    *   PTE_W           0x002                   // page table/directory entry flags bit : Writeable
    *   PTE_U           0x004                   // page table/directory entry flags bit : User can access
    * VARIABLES:
    *   mm->pgdir : the PDT of these vma
    *
    */

    /*LAB3 EXERCISE 1: 2013012213*/

    if ((ptep = get_pte(mm->pgdir, addr, 1)) == NULL) {
        cprintf("get_pte in do_pgfault failed\n");
        goto failed;
    }
    if (*ptep == 0) {
        if (pgdir_alloc_page(mm->pgdir, addr, perm) == NULL) {
            cprintf("pgdir_alloc_page in do_pgfault failed\n");
            goto failed;
        }
    }
    else {
    /*LAB3 EXERCISE 2: 2013012213
    * Now we think this pte is a  swap entry, we should load data from disk to a page with phy addr,
    * and map the phy addr with logical addr, trigger swap manager to record the access situation of this page.
    *
    *  Some Useful MACROs and DEFINEs, you can use them in below implementation.
    *  MACROs or Functions:
    *    swap_in(mm, addr, &page) : alloc a memory page, then according to the swap entry in PTE for addr,
    *                               find the addr of disk page, read the content of disk page into this memroy page
    *    page_insert : build the map of phy addr of an Page with the linear addr la
    *    swap_map_swappable : set the page swappable
    */
    /*
     * LAB5 CHALLENGE ( the implmentation Copy on Write)
		There are 2 situlations when code comes here.
		  1) *ptep & PTE_P == 1, it means one process try to write a readonly page.
		     If the vma includes this addr is writable, then we can set the page writable by rewrite the *ptep.
		     This method could be used to implement the Copy on Write (COW) thchnology(a fast fork process method).
		  2) *ptep & PTE_P == 0 & but *ptep!=0, it means this pte is a  swap entry.
		     We should add the LAB3's results here.
     */
        struct Page *page = NULL;
        if (*ptep & PTE_P)
            panic("error write a non-writable pte");
        else {
           if(swap_init_ok) {
               if ((ret = swap_in(mm, addr, &page)) != 0) {
                   cprintf("swap_in in do_pgfault failed\n");
                   goto failed;
               }
           }
           else {
            cprintf("no swap_init_ok but ptep is %x, failed\n", *ptep);
            goto failed;
           }
        }
		page_insert(mm->pgdir, page, addr, perm);
		swap_map_swappable(mm, addr, page, 1);
		page->pra_vaddr = addr;
   }
   ret = 0;
failed:
    return ret;
}
Exemple #5
0
// check page_insert, page_remove, &c
static void
check_page(void)
{
	struct PageInfo *pp, *pp0, *pp1, *pp2;
	struct PageInfo *fl;
	pte_t *ptep, *ptep1;
	void *va;
	uintptr_t mm1, mm2;
	int i;
	extern pde_t entry_pgdir[];

	// should be able to allocate three pages
	pp0 = pp1 = pp2 = 0;
	assert((pp0 = page_alloc(0)));
	assert((pp1 = page_alloc(0)));
	assert((pp2 = page_alloc(0)));

	assert(pp0);
	assert(pp1 && pp1 != pp0);
	assert(pp2 && pp2 != pp1 && pp2 != pp0);

	// temporarily steal the rest of the free pages
	fl = page_free_list;
	page_free_list = 0;

	// should be no free memory
	assert(!page_alloc(0));

	// there is no page allocated at address 0
	assert(page_lookup(kern_pgdir, (void *) 0x0, &ptep) == NULL);

	// there is no free memory, so we can't allocate a page table
	assert(page_insert(kern_pgdir, pp1, 0x0, PTE_W) < 0);

	// free pp0 and try again: pp0 should be used for page table
	page_free(pp0);
	assert(page_insert(kern_pgdir, pp1, 0x0, PTE_W) == 0);
	assert(PTE_ADDR(kern_pgdir[0]) == page2pa(pp0));
	assert(check_va2pa(kern_pgdir, 0x0) == page2pa(pp1));
	assert(pp1->pp_ref == 1);
	assert(pp0->pp_ref == 1);

	// should be able to map pp2 at PGSIZE because pp0 is already allocated for page table
	assert(page_insert(kern_pgdir, pp2, (void*) PGSIZE, PTE_W) == 0);
	assert(check_va2pa(kern_pgdir, PGSIZE) == page2pa(pp2));
	assert(pp2->pp_ref == 1);

	// should be no free memory
	assert(!page_alloc(0));

	// should be able to map pp2 at PGSIZE because it's already there
	assert(page_insert(kern_pgdir, pp2, (void*) PGSIZE, PTE_W) == 0);
	assert(check_va2pa(kern_pgdir, PGSIZE) == page2pa(pp2));
	assert(pp2->pp_ref == 1);

	// pp2 should NOT be on the free list
	// could happen in ref counts are handled sloppily in page_insert
	//cprintf("p2: %p, free_list %p, p2 ref: %d", pp2, page_free_list, (int)pp2->pp_ref);
	assert(!page_alloc(0));

	// check that pgdir_walk returns a pointer to the pte
	// 从这里也可以推测出pgdir_walk的功能(因为page table entry的歧义: 
	// 是pointer to page table, 还是pointer of entry in page table)
	// 给定虚拟地址va, kern_pgdir[PDX(va)]是va二级页表, page table的物理地址。
	// 再KADDR一下,就成了page table的虚拟地址,即ptep
	// ptep + PTX(va)即va在page table中的表项的位置,的虚拟地址
	// 这就是pgdir_walk需要返回的。
	ptep = (pte_t *) KADDR(PTE_ADDR(kern_pgdir[PDX(PGSIZE)]));
	assert(pgdir_walk(kern_pgdir, (void*)PGSIZE, 0) == ptep+PTX(PGSIZE));

	// should be able to change permissions too.
	assert(page_insert(kern_pgdir, pp2, (void*) PGSIZE, PTE_W|PTE_U) == 0);
	assert(check_va2pa(kern_pgdir, PGSIZE) == page2pa(pp2));
	assert(pp2->pp_ref == 1);
	assert(*pgdir_walk(kern_pgdir, (void*) PGSIZE, 0) & PTE_U);
	assert(kern_pgdir[0] & PTE_U);

	// should be able to remap with fewer permissions
	assert(page_insert(kern_pgdir, pp2, (void*) PGSIZE, PTE_W) == 0);
	assert(*pgdir_walk(kern_pgdir, (void*) PGSIZE, 0) & PTE_W);
	assert(!(*pgdir_walk(kern_pgdir, (void*) PGSIZE, 0) & PTE_U));

	// should not be able to map at PTSIZE because need free page for page table
	assert(page_insert(kern_pgdir, pp0, (void*) PTSIZE, PTE_W) < 0);

	// insert pp1 at PGSIZE (replacing pp2)
	assert(page_insert(kern_pgdir, pp1, (void*) PGSIZE, PTE_W) == 0);
	assert(!(*pgdir_walk(kern_pgdir, (void*) PGSIZE, 0) & PTE_U));

	// should have pp1 at both 0 and PGSIZE, pp2 nowhere, ...
	assert(check_va2pa(kern_pgdir, 0) == page2pa(pp1));
	assert(check_va2pa(kern_pgdir, PGSIZE) == page2pa(pp1));
	// ... and ref counts should reflect this
	assert(pp1->pp_ref == 2);
	assert(pp2->pp_ref == 0);

	// pp2 should be returned by page_alloc
	assert((pp = page_alloc(0)) && pp == pp2);

	// unmapping pp1 at 0 should keep pp1 at PGSIZE
	page_remove(kern_pgdir, 0x0);
	assert(check_va2pa(kern_pgdir, 0x0) == ~0);
	assert(check_va2pa(kern_pgdir, PGSIZE) == page2pa(pp1));
	assert(pp1->pp_ref == 1);
	assert(pp2->pp_ref == 0);

	// unmapping pp1 at PGSIZE should free it
	page_remove(kern_pgdir, (void*) PGSIZE);
	assert(check_va2pa(kern_pgdir, 0x0) == ~0);
	assert(check_va2pa(kern_pgdir, PGSIZE) == ~0);
	assert(pp1->pp_ref == 0);
	assert(pp2->pp_ref == 0);

	// so it should be returned by page_alloc
	assert((pp = page_alloc(0)) && pp == pp1);

	// should be no free memory
	assert(!page_alloc(0));

	// forcibly take pp0 back
	assert(PTE_ADDR(kern_pgdir[0]) == page2pa(pp0));
	kern_pgdir[0] = 0;
	assert(pp0->pp_ref == 1);
	pp0->pp_ref = 0;

	// check pointer arithmetic in pgdir_walk
	page_free(pp0);
	va = (void*)(PGSIZE * NPDENTRIES + PGSIZE);
	ptep = pgdir_walk(kern_pgdir, va, 1);
	ptep1 = (pte_t *) KADDR(PTE_ADDR(kern_pgdir[PDX(va)]));
	assert(ptep == ptep1 + PTX(va));
	kern_pgdir[PDX(va)] = 0;
	pp0->pp_ref = 0;

	// check that new page tables get cleared
	memset(page2kva(pp0), 0xFF, PGSIZE);
	page_free(pp0);
	pgdir_walk(kern_pgdir, 0x0, 1);
	ptep = (pte_t *) page2kva(pp0);
	for(i=0; i<NPTENTRIES; i++)
		assert((ptep[i] & PTE_P) == 0);
	kern_pgdir[0] = 0;
	pp0->pp_ref = 0;

	// give free list back
	page_free_list = fl;

	// free the pages we took
	page_free(pp0);
	page_free(pp1);
	page_free(pp2);

	// test mmio_map_region
	mm1 = (uintptr_t) mmio_map_region(0, 4097);
	mm2 = (uintptr_t) mmio_map_region(0, 4096);
	// check that they're in the right region
	assert(mm1 >= MMIOBASE && mm1 + 8096 < MMIOLIM);
	assert(mm2 >= MMIOBASE && mm2 + 8096 < MMIOLIM);
	// check that they're page-aligned
	assert(mm1 % PGSIZE == 0 && mm2 % PGSIZE == 0);
	// check that they don't overlap
	assert(mm1 + 8096 <= mm2);
	// check page mappings
	assert(check_va2pa(kern_pgdir, mm1) == 0);
	assert(check_va2pa(kern_pgdir, mm1+PGSIZE) == PGSIZE);
	assert(check_va2pa(kern_pgdir, mm2) == 0);
	assert(check_va2pa(kern_pgdir, mm2+PGSIZE) == ~0);
	// check permissions
	assert(*pgdir_walk(kern_pgdir, (void*) mm1, 0) & (PTE_W|PTE_PWT|PTE_PCD));
	assert(!(*pgdir_walk(kern_pgdir, (void*) mm1, 0) & PTE_U));
	// clear the mappings
	*pgdir_walk(kern_pgdir, (void*) mm1, 0) = 0;
	*pgdir_walk(kern_pgdir, (void*) mm1 + PGSIZE, 0) = 0;
	*pgdir_walk(kern_pgdir, (void*) mm2, 0) = 0;

	cprintf("check_page() succeeded!\n");
}
Exemple #6
0
int do_pgfault(struct mm_struct *mm, machine_word_t error_code, uintptr_t addr)
{
	if (mm == NULL) {
		assert(current != NULL);
		/* Chen Yuheng 
		 * give handler a chance to deal with it 
		 */
		kprintf
		    ("page fault in kernel thread: pid = %d, name = %s, %d %08x.\n",
		     current->pid, current->name, error_code, addr);
		return -E_KILLED;
	}

	bool need_unlock = 1;
	if (!try_lock_mm(mm)) {
		if (current != NULL && mm->locked_by == current->pid) {
			need_unlock = 0;
		} else {
			lock_mm(mm);
		}
	}

	int ret = -E_INVAL;
	struct vma_struct *vma = find_vma(mm, addr);
	if (vma == NULL || vma->vm_start > addr) {
		goto failed;
	}
	if (vma->vm_flags & VM_STACK) {
		if (addr < vma->vm_start + PGSIZE) {
			goto failed;
		}
	}
	//kprintf("@ %x %08x\n", vma->vm_flags, vma->vm_start);
	//assert((vma->vm_flags & VM_IO)==0);
	if (vma->vm_flags & VM_IO) {
		ret = -E_INVAL;
		goto failed;
	}
	switch (error_code & 3) {
	default:
		/* default is 3: write, present */
	case 2:		/* write, not present */
		if (!(vma->vm_flags & VM_WRITE)) {
			goto failed;
		}
		break;
	case 1:		/* read, present */
		goto failed;
	case 0:		/* read, not present */
		if (!(vma->vm_flags & (VM_READ | VM_EXEC))) {
			goto failed;
		}
	}

	pte_perm_t perm, nperm;
#ifdef ARCH_ARM
	/* ARM9 software emulated PTE_xxx */
	perm = PTE_P | PTE_U;
	if (vma->vm_flags & VM_WRITE) {
		perm |= PTE_W;
	}
#else
	ptep_unmap(&perm);
	ptep_set_u_read(&perm);
	if (vma->vm_flags & VM_WRITE) {
		ptep_set_u_write(&perm);
	}
#endif
	addr = ROUNDDOWN(addr, PGSIZE);

	ret = -E_NO_MEM;

	pte_t *ptep;
	if ((ptep = get_pte(mm->pgdir, addr, 1)) == NULL) {
		goto failed;
	}
	if (ptep_invalid(ptep)) {
#ifdef UCONFIG_BIONIC_LIBC
		if (vma->mfile.file != NULL) {
			struct file *file = vma->mfile.file;
			off_t old_pos = file->pos, new_pos =
			    vma->mfile.offset + addr - vma->vm_start;
#ifdef SHARE_MAPPED_FILE
			struct mapped_addr *maddr =
			    find_maddr(file, new_pos, NULL);
			if (maddr == NULL) {
#endif // SHARE_MAPPED_FILE
				struct Page *page;
				if ((page = alloc_page()) == NULL) {
					assert(false);
					goto failed;
				}
				nperm = perm;
#ifdef ARCH_ARM
				/* ARM9 software emulated PTE_xxx */
				nperm &= ~PTE_W;
#else
				ptep_unset_s_write(&nperm);
#endif
				page_insert_pte(mm->pgdir, page, ptep, addr,
						nperm);

				if ((ret =
				     filestruct_setpos(file, new_pos)) != 0) {
					assert(false);
					goto failed;
				}
				filestruct_read(file, page2kva(page), PGSIZE);
				if ((ret =
				     filestruct_setpos(file, old_pos)) != 0) {
					assert(false);
					goto failed;
				}
#ifdef SHARE_MAPPED_FILE
				if ((maddr = (struct mapped_addr *)
				     kmalloc(sizeof(struct mapped_addr))) !=
				    NULL) {
					maddr->page = page;
					maddr->offset = new_pos;
					page->maddr = maddr;
					list_add(&
						 (file->node->mapped_addr_list),
						 &(maddr->list));
				} else {
					assert(false);
				}
			} else {
				nperm = perm;
#ifdef ARCH_ARM
				/* ARM9 software emulated PTE_xxx */
				nperm &= ~PTE_W;
#else
				ptep_unset_s_write(&nperm);
#endif
				page_insert_pte(mm->pgdir, maddr->page, ptep,
						addr, nperm);
			}
#endif //SHARE_MAPPED_FILE

		} else
#endif //UCONFIG_BIONIC_LIBC
		if (!(vma->vm_flags & VM_SHARE)) {
			if (pgdir_alloc_page(mm->pgdir, addr, perm) == NULL) {
				goto failed;
			}
#ifdef UCONFIG_BIONIC_LIBC
			if (vma->vm_flags & VM_ANONYMOUS) {
				memset((void *)addr, 0, PGSIZE);
			}
#endif //UCONFIG_BIONIC_LIBC
		} else {	//shared mem
			lock_shmem(vma->shmem);
			uintptr_t shmem_addr =
			    addr - vma->vm_start + vma->shmem_off;
			pte_t *sh_ptep =
			    shmem_get_entry(vma->shmem, shmem_addr, 1);
			if (sh_ptep == NULL || ptep_invalid(sh_ptep)) {
				unlock_shmem(vma->shmem);
				goto failed;
			}
			unlock_shmem(vma->shmem);
			if (ptep_present(sh_ptep)) {
				page_insert(mm->pgdir, pa2page(*sh_ptep), addr,
					    perm);
			} else {
#ifdef UCONFIG_SWAP
				swap_duplicate(*ptep);
				ptep_copy(ptep, sh_ptep);
#else
				panic("NO SWAP\n");
#endif
			}
		}
	} else {		//a present page, handle copy-on-write (cow) 
		struct Page *page, *newpage = NULL;
		bool cow =
		    ((vma->vm_flags & (VM_SHARE | VM_WRITE)) == VM_WRITE),
		    may_copy = 1;

#if 1
		if (!(!ptep_present(ptep)
		      || ((error_code & 2) && !ptep_u_write(ptep) && cow))) {
			//assert(PADDR(mm->pgdir) == rcr3());
			kprintf("%p %p %d %d %x\n", *ptep, addr, error_code,
				cow, vma->vm_flags);
			assert(0);
		}
#endif

		if (cow) {
			newpage = alloc_page();
		}
		if (ptep_present(ptep)) {
			page = pte2page(*ptep);
		} else {
#ifdef UCONFIG_SWAP
			if ((ret = swap_in_page(*ptep, &page)) != 0) {
				if (newpage != NULL) {
					free_page(newpage);
				}
				goto failed;
			}
#else
			assert(0);
#endif
			if (!(error_code & 2) && cow) {
#ifdef ARCH_ARM
//#warning ARM9 software emulated PTE_xxx
				perm &= ~PTE_W;
#else
				ptep_unset_s_write(&perm);
#endif
				may_copy = 0;
			}
		}

		if (cow && may_copy) {
#ifdef UCONFIG_SWAP
			if (page_ref(page) + swap_page_count(page) > 1) {
#else
			if (page_ref(page) > 1) {
#endif
				if (newpage == NULL) {
					goto failed;
				}
				memcpy(page2kva(newpage), page2kva(page),
				       PGSIZE);
				//kprintf("COW!\n");
				page = newpage, newpage = NULL;
			}
		}
#ifdef UCONFIG_BIONIC_LIBC
		else if (vma->mfile.file != NULL) {
#ifdef UCONFIG_SWAP
			assert(page_reg(page) + swap_page_count(page) == 1);
#else
			assert(page_ref(page) == 1);
#endif

#ifdef SHARE_MAPPED_FILE
			off_t offset = vma->mfile.offset + addr - vma->vm_start;
			struct mapped_addr *maddr =
			    find_maddr(vma->mfile.file, offset, page);
			if (maddr != NULL) {
				list_del(&(maddr->list));
				kfree(maddr);
				page->maddr = NULL;
				assert(find_maddr(vma->mfile.file, offset, page)
				       == NULL);
			} else {
			}
#endif //SHARE_MAPPED_FILE
		}
#endif //UCONFIG_BIONIC_LIBC
		else {
		}
		page_insert(mm->pgdir, page, addr, perm);
		if (newpage != NULL) {
			free_page(newpage);
		}
	}
	ret = 0;

failed:
	if (need_unlock) {
		unlock_mm(mm);
	}
	return ret;
}
Exemple #7
0
/* ucore use copy-on-write when forking a new process,
 * thus copy_range only copy pdt/pte and set their permission to 
 * READONLY, a write will be handled in pgfault
 */
int
copy_range(pgd_t *to, pgd_t *from, uintptr_t start, uintptr_t end, bool share) {
    assert(start % PGSIZE == 0 && end % PGSIZE == 0);
    assert(USER_ACCESS(start, end));

    do { 
        pte_t *ptep = get_pte(from, start, 0), *nptep;
        if (ptep == NULL) {
            if (get_pud(from, start, 0) == NULL) {
                start = ROUNDDOWN(start + PUSIZE, PUSIZE);
            }
            else if (get_pmd(from, start, 0) == NULL) {
                start = ROUNDDOWN(start + PMSIZE, PMSIZE);
            }
            else {
                start = ROUNDDOWN(start + PTSIZE, PTSIZE);
            }
            continue ;
        }
        if (*ptep != 0) {
            if ((nptep = get_pte(to, start, 1)) == NULL) {
                return -E_NO_MEM;
            }
            int ret;
            //kprintf("%08x %08x %08x\n", nptep, *nptep, start);
            assert(*ptep != 0 && *nptep == 0);
#ifdef ARCH_ARM
            //TODO  add code to handle swap 
            if (ptep_present(ptep)){ 
              //no body should be able to write this page
              //before a W-pgfault
              pte_perm_t perm = PTE_P;
              if(ptep_u_read(ptep))
                perm |= PTE_U;
              if(!share){
                //Original page should be set to readonly!
                //because Copy-on-write may happen
                //after the current proccess modifies its page
                ptep_set_perm(ptep, perm);
              }else{
                if(ptep_u_write(ptep)){
                  perm |= PTE_W;
                }
              }
              struct Page *page = pte2page(*ptep);
              ret = page_insert(to, page, start, perm);

            }
#else /* ARCH_ARM */
            if (ptep_present(ptep)) {
              pte_perm_t perm = ptep_get_perm(ptep, PTE_USER);
              struct Page *page = pte2page(*ptep);
              if (!share && ptep_s_write(ptep)) {
                ptep_unset_s_write(&perm);
                pte_perm_t perm_with_swap_stat = ptep_get_perm(ptep, PTE_SWAP);
                ptep_set_perm(&perm_with_swap_stat, perm);
                page_insert(from, page, start, perm_with_swap_stat);
                }
                ret = page_insert(to, page, start, perm);
                assert(ret == 0);
            }
#endif /* ARCH_ARM */
            else {
#ifdef CONFIG_NO_SWAP
              assert(0);
#endif
              swap_entry_t entry;
              ptep_copy(&entry, ptep);
              swap_duplicate(entry);
              ptep_copy(nptep, &entry);
            }
        }
        start += PGSIZE;
    } while (start != 0 && start < end);
#ifdef ARCH_ARM
    /* we have modified the PTE of the original
     * process, so invalidate TLB */
    tlb_invalidate_all();
#endif
    return 0;
}
Exemple #8
0
//
// Set up the initial program binary, stack, and processor flags
// for a user process.
// This function is ONLY called during kernel initialization,
// before running the first user-mode environment.
//
// This function loads all loadable segments from the ELF binary image
// into the environment's user memory, starting at the appropriate
// virtual addresses indicated in the ELF program header.
// At the same time it clears to zero any portions of these segments
// that are marked in the program header as being mapped
// but not actually present in the ELF file - i.e., the program's bss section.
//
// All this is very similar to what our boot loader does, except the boot
// loader also needs to read the code from disk.  Take a look at
// boot/main.c to get ideas.
//
// Finally, this function maps one page for the program's initial stack.
//
// load_icode panics if it encounters problems.
//  - How might load_icode fail?  What might be wrong with the given input?
//
static void
load_icode(struct Env *e, uint8_t *binary, size_t size)
{
	// Hints: 
	//  Load each program segment into virtual memory
	//  at the address specified in the ELF section header.
	//  You should only load segments with ph->p_type == ELF_PROG_LOAD.
	//  Each segment's virtual address can be found in ph->p_va
	//  and its size in memory can be found in ph->p_memsz.
	//  The ph->p_filesz bytes from the ELF binary, starting at
	//  'binary + ph->p_offset', should be copied to virtual address
	//  ph->p_va.  Any remaining memory bytes should be cleared to zero.
	//  (The ELF header should have ph->p_filesz <= ph->p_memsz.)
	//  Use functions from the previous lab to allocate and map pages.
	//
	//  All page protection bits should be user read/write for now.
	//  ELF segments are not necessarily page-aligned, but you can
	//  assume for this function that no two segments will touch
	//  the same virtual page.
	//
	//  You may find a function like segment_alloc useful.
	//
	//  Loading the segments is much simpler if you can move data
	//  directly into the virtual addresses stored in the ELF binary.
	//  So which page directory should be in force during
	//  this function?
	//
	// Hint:
	//  You must also do something with the program's entry point,
	//  to make sure that the environment starts executing there.
	//  What?  (See env_run() and env_pop_tf() below.)

	// LAB 3: Your code here.
        struct Elf *env_elf;
        struct Proghdr *ph;
        struct Page *pg;
        int i;
        unsigned int old_cr3;
        env_elf = (struct Elf *)binary;
        old_cr3 = rcr3();
        lcr3(PADDR(e->env_pgdir));
        if( env_elf->e_magic != ELF_MAGIC)
                return;
        ph = (struct Proghdr*)((unsigned int)env_elf + env_elf->e_phoff);
        for(i=0; i < env_elf->e_phnum;i++){
                if(ph->p_type == ELF_PROG_LOAD){
                        segment_alloc(e,(void *)ph->p_va, ph->p_memsz);
                        memset((void *)ph->p_va, 0, ph->p_memsz);
                        memmove((void *)ph->p_va, (void *)((unsigned int)env_elf + ph->p_offset), ph->p_filesz);
                }
                ph++;
        }

        e->env_tf.tf_eip = env_elf->e_entry;

	// Now map one page for the program's initial stack
	// at virtual address USTACKTOP - PGSIZE.

	// LAB 3: Your code here.
        if(page_alloc(&pg) != 0){
                cprintf("load_icode page_alloc fail!!\n");
                return;
        }

        page_insert(e->env_pgdir, pg, (void *)(USTACKTOP - PGSIZE), PTE_U | PTE_W);

        lcr3(old_cr3);
}
Exemple #9
0
// Try to send 'value' to the target env 'envid'.
// If srcva < UTOP, then also send page currently mapped at 'srcva',
// so that receiver gets a duplicate mapping of the same page.
//
// The send fails with a return value of -E_IPC_NOT_RECV if the
// target is not blocked, waiting for an IPC.
//
// The send also can fail for the other reasons listed below.
//
// Otherwise, the send succeeds, and the target's ipc fields are
// updated as follows:
//    env_ipc_recving is set to 0 to block future sends;
//    env_ipc_from is set to the sending envid;
//    env_ipc_value is set to the 'value' parameter;
//    env_ipc_perm is set to 'perm' if a page was transferred, 0 otherwise.
// The target environment is marked runnable again, returning 0
// from the paused sys_ipc_recv system call.  (Hint: does the
// sys_ipc_recv function ever actually return?)
//
// If the sender wants to send a page but the receiver isn't asking for one,
// then no page mapping is transferred, but no error occurs.
// The ipc only happens when no errors occur.
//
// Returns 0 on success, < 0 on error.
// Errors are:
//	-E_BAD_ENV if environment envid doesn't currently exist.
//		(No need to check permissions.)
//	-E_IPC_NOT_RECV if envid is not currently blocked in sys_ipc_recv,
//		or another environment managed to send first.
//	-E_INVAL if srcva < UTOP but srcva is not page-aligned.
//	-E_INVAL if srcva < UTOP and perm is inappropriate
//		(see sys_page_alloc).
//	-E_INVAL if srcva < UTOP but srcva is not mapped in the caller's
//		address space.
//	-E_INVAL if (perm & PTE_W), but srcva is read-only in the
//		current environment's address space.
//	-E_NO_MEM if there's not enough memory to map srcva in envid's
//		address space.
static int
sys_ipc_try_send(envid_t envid, uint32_t value, void *srcva, unsigned perm)
{
	// LAB 4: Your code here.
    /* lj */
    struct Env *denv = NULL;
    struct Page *pg = NULL;
    pte_t *pte = NULL;
    int ret = 0;
    //cprintf("%x try_send to %x [%d]\n", curenv->env_id, envid, value);

    if((ret = envid2env(envid, &denv, 0)) < 0) {
        return -E_BAD_ENV;
    }
    else if(!denv->env_ipc_recving) {
        //cprintf("00\n");
        return -E_IPC_NOT_RECV;
    }
    if((void *)-1 != srcva) {
        if((size_t)srcva > UTOP) {
            return -E_INVAL;
        }
        else if((size_t)srcva & (PGSIZE-1)) {
            return -E_INVAL;
        }
        else if((PTE_U | PTE_P) != (perm & (PTE_U | PTE_P))) {
            return -E_INVAL;
        }
        else if(perm & ~(PTE_U | PTE_P | PTE_W | PTE_AVAIL)) {
            return -E_INVAL;
        }
        else if(NULL == (pg = page_lookup(curenv->env_pgdir, srcva, &pte))) {
            return -E_INVAL;
        }
        else if((perm & PTE_W) != (*pte & PTE_W)) {
            return -E_INVAL;
        }
    }

    denv->env_ipc_from = curenv->env_id;
    denv->env_ipc_value = value;
    if((void *)-1 != srcva) {
        if((void *)-1 == denv->env_ipc_dstva) {
            denv->env_ipc_perm = 0;
        }
        else if((ret = page_insert(denv->env_pgdir, pg, denv->env_ipc_dstva, perm)) < 0) {
        }
        else {
            denv->env_ipc_perm = perm;
        }
    }
    /* After a env sent ipc to denv,
     * set denv runnable whether it is successfully sent
     * */
    denv->env_ipc_recving = 0;
    denv->env_tf.tf_regs.reg_eax = ret;
    denv->env_status = ENV_RUNNABLE;

    //panic("sys_ipc_try_send not implemented");
    //cprintf("ret %d\n",ret);
    //cprintf("%x try_send to %x [%d] awake %x\n", curenv->env_id, envid, value, denv->env_id);
    return ret;
}
Exemple #10
0
// Try to send 'value' to the target env 'envid'.
// If srcva < UTOP, then also send page currently mapped at 'srcva',
// so that receiver gets a duplicate mapping of the same page.
//
// The send fails with a return value of -E_IPC_NOT_RECV if the
// target is not blocked, waiting for an IPC.
//
// The send also can fail for the other reasons listed below.
//
// Otherwise, the send succeeds, and the target's ipc fields are
// updated as follows:
//    env_ipc_recving is set to 0 to block future sends;
//    env_ipc_from is set to the sending envid;
//    env_ipc_value is set to the 'value' parameter;
//    env_ipc_perm is set to 'perm' if a page was transferred, 0 otherwise.
// The target environment is marked runnable again, returning 0
// from the paused sys_ipc_recv system call.  (Hint: does the
// sys_ipc_recv function ever actually return?)
//
// If the sender wants to send a page but the receiver isn't asking for one,
// then no page mapping is transferred, but no error occurs.
// The ipc only happens when no errors occur.
//
// Returns 0 on success, < 0 on error.
// Errors are:
//	-E_BAD_ENV if environment envid doesn't currently exist.
//		(No need to check permissions.)
//	-E_IPC_NOT_RECV if envid is not currently blocked in sys_ipc_recv,
//		or another environment managed to send first.
//	-E_INVAL if srcva < UTOP but srcva is not page-aligned.
//	-E_INVAL if srcva < UTOP and perm is inappropriate
//		(see sys_page_alloc).
//	-E_INVAL if srcva < UTOP but srcva is not mapped in the caller's
//		address space.
//	-E_INVAL if (perm & PTE_W), but srcva is read-only in the
//		current environment's address space.
//	-E_NO_MEM if there's not enough memory to map srcva in envid's
//		address space.
static int
sys_ipc_try_send(envid_t envid, uint32_t value, void *srcva, unsigned perm)
{
	// LAB 4: Your code here.
    struct Env *e;
    int ret = envid2env(envid, &e, 0);
    if(ret)
			 return ret;
    if(!e->env_ipc_recving) 
			return -E_IPC_NOT_RECV;
    if(srcva < (void*)UTOP) 
		{
        pte_t *pte;
        struct PageInfo *pg = page_lookup(curenv->env_pgdir, srcva, &pte);
        if (!pg) return -E_INVAL;
        //if ((*pte & perm) != perm) return -E_INVAL;
        if ((perm & PTE_W) && !(*pte & PTE_W)) return -E_INVAL;
        if (srcva != ROUNDDOWN(srcva, PGSIZE)) return -E_INVAL;
        if (e->env_ipc_dstva < (void*)UTOP)
        {
            ret = page_insert(e->env_pgdir, pg, e->env_ipc_dstva, perm);
            if (ret) return ret;
            e->env_ipc_perm = perm;
        }
    }
    e->env_ipc_recving = 0;
    e->env_ipc_from = curenv->env_id;
    e->env_ipc_value = value; 
    e->env_status = ENV_RUNNABLE;
    e->env_tf.tf_regs.reg_eax = 0;
    return 0;
/*
struct Env *e;
	struct PageInfo *page;
	pte_t *pte;

	if (envid2env(envid, &e, 0) != 0) 
		return -E_BAD_ENV;

	if (e->env_ipc_recving == 0)
		return -E_IPC_NOT_RECV;

	e->env_ipc_recving = 0;
	e->env_ipc_from = curenv->env_id;
	e->env_ipc_value = value;

	if ((uint32_t) srcva < UTOP) { 
		if ((uint32_t) srcva % PGSIZE != 0)
			return -E_INVAL;

		if ((perm & PTE_U) == 0
			|| (perm & PTE_P) == 0
			|| (perm & ~PTE_SYSCALL) != 0) 
			return -E_INVAL;

		if ((page = page_lookup(curenv->env_pgdir, srcva, &pte)) == NULL)
			return -E_INVAL;

		if ((perm & PTE_W) != 0 && (*pte & PTE_W) == 0) 
			return -E_INVAL;

		if ((page_insert(e->env_pgdir, page, e->env_ipc_dstva, perm))!=0) {
			return -E_NO_MEM;
		}

		cprintf("sys_ipc_try_send: from 0x%x\n", e->env_ipc_from);
		e->env_ipc_perm = perm;
	} else {
		e->env_ipc_perm = 0;
		//cprintf("sys_ipc_try_send: perm=0 from 0x%x\n", e->env_ipc_from);
	}

	e->env_status = ENV_RUNNABLE;
	return 0;
*/
//	panic("sys_ipc_try_send not implemented");
}
Exemple #11
0
//
// Set up the initial program binary, stack, and processor flags
// for a user process.
//
// This function loads all loadable segments from the ELF binary image
// into the environment's user memory, starting at the appropriate
// virtual addresses indicated in the ELF program header.
// It also clears to zero any portions of these segments
// that are marked in the program header as being mapped
// but not actually present in the ELF file -- i.e., the program's bss section.
//
// Finally, this function maps one page for the program's initial stack.
//
// load_elf panics if it encounters problems.
//  - How might load_elf fail?  What might be wrong with the given input?
//
static void
load_elf(struct Env *e, uint8_t *binary, size_t size)
{
	struct Elf *elf = (struct Elf *) binary;
	// Load each program segment into environment 'e's virtual memory
	// at the address specified in the ELF section header.
	// Only load segments with ph->p_type == ELF_PROG_LOAD.
	// Each segment's virtual address can be found in ph->p_va
	// and its size in memory can be found in ph->p_memsz.
	// The ph->p_filesz bytes from the ELF binary, starting at
	// 'binary + ph->p_offset', should be copied to virtual address
	// ph->p_va.  Any remaining memory bytes should be cleared to zero.
	// (The ELF header should have ph->p_filesz <= ph->p_memsz.)
	// Use functions from the previous lab to allocate and map pages.
	//
	// All page protection bits should be user read/write for now.
	// ELF segments are not necessarily page-aligned, but you can
	// assume for this function that no two segments will touch
	// the same virtual page.
	//
	// You may find a function like segment_alloc useful.
	//
	// Loading the segments is much simpler if you can move data
	// directly into the virtual addresses stored in the ELF binary.
	// So which page directory should be in force during
	// this function?
	//
	// All this is very similar to what our boot loader does, except the
	// boot loader reads the code from disk and doesn't check whether
	// segments are loadable.  Take a look at boot/main.c to get ideas.
	//
	// You must also store the program's entry point somewhere,
	// to make sure that the environment starts executing at that point.
	// See env_run() and env_iret() below.
    

	// LAB 3: Your code here.
    lcr3(PADDR(e->env_pgdir));

    if (elf->e_magic != ELF_MAGIC)
        panic("Invalid Elf Magic");
    struct Proghdr *ph = (struct Proghdr *) ((uint8_t *) elf + elf->e_phoff);
    int ph_num = elf->e_phnum;

    // iterate over all program headers
    for (; --ph_num >= 0; ph++) 
        if (ph->p_type == ELF_PROG_LOAD)
        {
            segment_alloc(e, ph->p_va, ph->p_memsz);
            // copy data from binary to address space
            memmove((void *)ph->p_va, binary + ph->p_offset, ph->p_filesz);
        }

    // set entry point for new env
    e->env_tf.tf_eip = elf->e_entry;
    e->env_tf.tf_esp = USTACKTOP;
	// Now map one page for the program's initial stack
	// at virtual address USTACKTOP - PGSIZE.
	// (What should the permissions be?)
    struct Page *p;
    if ((p = page_alloc()) == NULL || page_insert(e->env_pgdir, p, USTACKTOP-PGSIZE, PTE_U|PTE_W|PTE_P))
        panic("segment_alloc: Can't allocate page");
    memset(page2kva(p), 0, PGSIZE);
}
Exemple #12
0
// Block until a value is ready.  Record that you want to receive
// using the env_ipc_recving and env_ipc_dstva fields of struct Env,
// mark yourself not runnable, and then give up the CPU.
//
// If 'dstva' is < UTOP, then you are willing to receive a page of data.
// 'dstva' is the virtual address at which the sent page should be mapped.
//
// This function only returns on error, but the system call will eventually
// return 0 on success.
// Return < 0 on error.  Errors are:
//	-E_INVAL if dstva < UTOP but dstva is not page-aligned.
static int
sys_ipc_recv(void *dstva)
{
	// LAB 4: Your code here.
	
		if((uint32_t)dstva < UTOP)
	{
		if((uint32_t)dstva % PGSIZE != 0)
		{
			return -E_INVAL;
		}
		curenv->env_ipc_dstva = dstva;
	}
	else
	{
		curenv->env_ipc_dstva = (void*)0xFFFFFFFF;
	}
	
	
	// LAB 4 CHALLENGE: Check if another env has queued a send to us
	// If they do, receive it and return, short circuiting on the first
	// received
	
	// i keeps track of the last index received from
	static int i = 0;
	static int recvs = RECV_LIMIT;
	// k is the counting index, 
	int k = i;
	// j is the stop index, when j == k we end
	int j;

	// Loop over all envs circularly starting with the last one we received
	// from
	// Every RECV_LIMIT receives from the same env, start one after the
	// last one we received from.
	// This should eliminate livelock when one env is repeatedly queueing
	// messages to this env faster than we receive, but it still provides
	// good performance when we only want to receive from a small number of
	// envs
	int shift_j = 1;
	for(j = k-1; k != j ; k = (k+1)%NENV)
	{
		// We need to shift j up 1 to that envs[i] is checked
		// This solves the problem of finding the end of the ring
		if(shift_j)
		{
			j = (j+1) % NENV;
			shift_j = 0;
		}

		struct Env *env = &envs[k];
		if(env->env_status == ENV_NOT_RUNNABLE &&
			 env->env_ipc_send_to == curenv->env_id)
		{
			// This environment has a send waiting for us!
			curenv->env_ipc_value = env->env_ipc_send_value;
			curenv->env_ipc_from = env->env_id;

			// Map the page iff they sent one AND we want one
			if((uint32_t)env->env_ipc_send_srcva < UTOP &&
					(uint32_t)curenv->env_ipc_dstva < UTOP)
			{
				int r;
				struct Page *page = page_lookup(env->env_pgdir, 
															env->env_ipc_send_srcva, NULL);
				r = page_insert(curenv->env_pgdir, page, curenv->env_ipc_dstva, 
																			env->env_ipc_send_perm);
				if(r < 0)
				{
					// Error mapping, we need to make the send call receive the
					// error and ignore it here as if it never happened
					env->env_ipc_send_to = 0;
					env->env_tf.tf_regs.reg_eax = r;
					env->env_status = ENV_RUNNABLE;
					continue;
				}

				curenv->env_ipc_perm = env->env_ipc_send_perm;
			}
			else
			{
				// If we do not map, clear perm
				curenv->env_ipc_perm = 0;
			}

			// We have received it, check that we do not need to increment i
			if(k == i)
			{
				if (--recvs == 0)
				{
					// If we have received RECV_LIMIT messages in a row from the same
					// env, start checking one past that env next time to prevent
					// livelock
					recvs = RECV_LIMIT;
					k++;
				}
			}
			else
			{
				recvs = RECV_LIMIT;
			}

			i = k;

			// Queued send has been received, ready the other env and return
			env->env_ipc_send_to = 0;
			env->env_tf.tf_regs.reg_eax = 0;
			env->env_status = ENV_RUNNABLE;
			return 0;
		}
	}
	

	curenv->env_ipc_recving = 1;
	curenv->env_status = ENV_NOT_RUNNABLE;
	sched_yield();

	
	return 0;
}
Exemple #13
0
// Try to send 'value' to the target env 'envid'.
// If srcva < UTOP, then also send page currently mapped at 'srcva',
// so that receiver gets a duplicate mapping of the same page.
//
// The send fails with a return value of -E_IPC_NOT_RECV if the
// target is not blocked, waiting for an IPC.
//
// The send also can fail for the other reasons listed below.
//
// Otherwise, the send succeeds, and the target's ipc fields are
// updated as follows:
//    env_ipc_recving is set to 0 to block future sends;
//    env_ipc_from is set to the sending envid;
//    env_ipc_value is set to the 'value' parameter;
//    env_ipc_perm is set to 'perm' if a page was transferred, 0 otherwise.
// The target environment is marked runnable again, returning 0
// from the paused sys_ipc_recv system call.  (Hint: does the
// sys_ipc_recv function ever actually return?)
//
// If the sender wants to send a page but the receiver isn't asking for one,
// then no page mapping is transferred, but no error occurs.
// The ipc only happens when no errors occur.
//
// Returns 0 on success, < 0 on error.
// Errors are:
//	-E_BAD_ENV if environment envid doesn't currently exist.
//		(No need to check permissions.)
//	-E_IPC_NOT_RECV if envid is not currently blocked in sys_ipc_recv,
//		or another environment managed to send first.
//	-E_INVAL if srcva < UTOP but srcva is not page-aligned.
//	-E_INVAL if srcva < UTOP and perm is inappropriate
//		(see sys_page_alloc).
//	-E_INVAL if srcva < UTOP but srcva is not mapped in the caller's
//		address space.
//	-E_INVAL if (perm & PTE_W), but srcva is read-only in the
//		current environment's address space.
//	-E_NO_MEM if there's not enough memory to map srcva in envid's
//		address space.
static int
sys_ipc_try_send(envid_t envid, uint32_t value, void *srcva, unsigned perm)
{
	// LAB 4: Your code here.
	struct Env *env;
	int r;
	uint32_t srcint = (uint32_t)srcva;
	struct Page *srcpage;

	// Make sure env exists
	if ((r = envid2env(envid, &env, 0) < 0))
		return r;

	int map_page = 0; // By default, we do not map a page

	// First check that our input makes sense
	if (srcint < UTOP)
	{
		// Check if srcva < UTOP but not page-aligned
		if(srcint % PGSIZE != 0)
		{
			return -E_INVAL;
		}

		// Check that permissions are appropriate
		if (!( (perm & PTE_P) &&
			  	 (perm & PTE_U) &&
			 		!(perm & ~(PTE_P | PTE_U | PTE_AVAIL | PTE_W))))
		{
			return -E_INVAL;
		}

		// Make sure that srcva is mapped in current env
		pte_t *pte;
		if ((srcpage = page_lookup(curenv->env_pgdir, srcva, &pte)) == NULL)
		{
			return -E_INVAL;
		}
		
		// Ensure that source and perm are either both RO or W
		if ((perm & PTE_W) != (*pte & PTE_W))
		{
			return -E_INVAL;
		}

		// Page mapping checks out, we are mapping a page if 
		// they want it
		map_page = 1;

		
	}

	// Make sure env receiving
	if (!env->env_ipc_recving)
	{
		// CHALLENGE: If the env is not receiving, set up
		// send fields in our env and sleep for response.
		// Note: The receiver now has the responsibility to
		// map its own page based off of what we have entered in
		// our struct (the va is guaranteed to be valid if it is < UTOP)
		// It must also set our EAX to the error code for the mapping.
		curenv->env_ipc_send_to = envid;
		curenv->env_ipc_send_value = value;
		curenv->env_ipc_send_srcva = srcva;
		curenv->env_ipc_send_perm = perm;


		// Sleep and yield
		curenv->env_status = ENV_NOT_RUNNABLE;
		sched_yield();
	}

	// The env is receiving, set their fields
	
	// Only map the page if our checks passed AND the destination VA
	// is < UTOP
	if(map_page && (uint32_t) env->env_ipc_dstva < UTOP)
	{
		// Map the page
		r = page_insert(env->env_pgdir, srcpage, env->env_ipc_dstva, perm);
		if(r < 0)
			return r;

		// Update perm
		env->env_ipc_perm = perm;
	}



	// If we made it here, there were no errors, update fields
	env->env_ipc_recving = 0;
	env->env_ipc_from = curenv->env_id;
	env->env_ipc_value = value;
	env->env_status = ENV_RUNNABLE;
	env->env_tf.tf_regs.reg_eax = 0;

	return 0;
}
Exemple #14
0
/** @brief Function deals with the page fault
 *
 *  This is an amazing function.
 *
 *  @param  void
 *  @return void
 */
void page_fault_handler(void){

	int fault_addr = get_cr2();
	
	mutex_lock(&cur_task->pcb_mutex);

	uint32_t align_addr = fault_addr & PGALIGN_MASK;
	uint32_t *ptep = NULL;
	Page *phy_page = NULL;
	phy_page = page_lookup(cur_task->task_pgdir, align_addr, &ptep);
	uint32_t pte = *ptep;
	
	/** Catch COW page fualt */
	if((ptep != NULL) &&(pte & PTE_P) && (pte & PTE_COW))
	{	
		mutex_lock(&mtx_m.frame_mutex);
		if(phy_page->pp_ref == 1)
		{
			*ptep = (pte | PTE_W) &(~PTE_COW);
			mutex_unlock(&mtx_m.frame_mutex);
		}
		else{
			mutex_unlock(&mtx_m.frame_mutex);
			if(pte & PTE_RWMARK){
				lprintf("ERROR: Cannot access TXT or ro_data area!");
				mutex_unlock(&cur_task->pcb_mutex);
				sys_vanish();
				return;
			}
			Page *new_page = page_alloc();
			if(new_page == NULL){
	 		lprintf("ERROR: No pages for COW in page_fault_handler");
				mutex_unlock(&cur_task->pcb_mutex);
				sys_vanish();
				return;
			}
		  
			uint32_t *temp_page = smemalign(PAGE_SIZE, PAGE_SIZE);
			if (temp_page == NULL){
			lprintf("ERROR: No memory for temp_page in page_fault_handler!");
				mutex_unlock(&cur_task->pcb_mutex);
				sys_vanish();
				return;
			}

			/* Copy the physical page to a temporary page in kernel space */
			memcpy((void*)temp_page, (void*)align_addr, PAGE_SIZE);
			
			if(page_insert(cur_task->task_pgdir, new_page, align_addr, 
				PTE_P | PTE_U | PTE_W) < 0)
			{
			lprintf("ERROR: No memory for COW in page_fault_handler");
				mutex_unlock(&cur_task->pcb_mutex);
				sys_vanish();
				return;
			}
			/* Copy the content to the new mapped physical page */
			memcpy((void*)align_addr, (void*)temp_page, PAGE_SIZE);

			/* Free the temp physical page */
			sfree(temp_page, PAGE_SIZE);
			
			mutex_lock(&mtx_m.frame_mutex);
			phy_page->pp_ref--;
			mutex_unlock(&mtx_m.frame_mutex);
		}
	}
	/** Catch the ZFOD */
	else if ((ptep != NULL) &&(pte & PTE_P) && (pte & PTE_ZFOD))
	{
		Page *pg = page_alloc();
		if(pg == NULL){
	 	lprintf("ERROR: No pages for ZFOD in page_fault_handler");
			mutex_unlock(&cur_task->pcb_mutex);
			sys_vanish();
			return;
		}
		uint32_t perm = PTE_P | PTE_U | PTE_W;
		if(page_insert(cur_task->task_pgdir, pg, align_addr, perm) < 0)
		{
		lprintf("ERROR: No memory for ZFOD in page_fault_handler");	
			mutex_unlock(&cur_task->pcb_mutex);
			sys_vanish();
			return;
		}
		bzero((void*)align_addr, PAGE_SIZE);
	}
	/* Check if installed swexn handler can fix this up */
	else if(cur_thread->swexn_eip != NULL)
	{
		mutex_unlock(&cur_task->pcb_mutex);
		swexn_handler(HAS_ERROR_CODE, SWEXN_CAUSE_PAGEFAULT);
		return;
	}
	else{
		mutex_unlock(&cur_task->pcb_mutex);
		sys_vanish();
		return;
	}
	mutex_unlock(&cur_task->pcb_mutex);
	return;
}
Exemple #15
0
// check page_insert, page_remove, &c
static void
check_page(void)
{
	struct Page *pp, *pp0, *pp1, *pp2;
	struct Page *fl;
	pte_t *ptep, *ptep1;
	uintptr_t va;
	int i;

	// should be able to allocate three pages
	pp0 = pp1 = pp2 = 0;
	assert((pp0 = page_alloc()));
	assert((pp1 = page_alloc()));
	assert((pp2 = page_alloc()));

	assert(pp0);
	assert(pp1 && pp1 != pp0);
	assert(pp2 && pp2 != pp1 && pp2 != pp0);


        // temporarily steal the rest of the free pages
	fl = page_free_list;
	page_free_list = 0;

	// should be no free memory
	assert(!page_alloc());

        // there is no page allocated at address 0
	assert(page_lookup(kern_pgdir, 0x0, &ptep) == NULL);

	// there is no free memory, so we can't allocate a page table
	assert(page_insert(kern_pgdir, pp1, 0x0, PTE_W) < 0);

	// free pp0 and try again: pp0 should be used for page table
	page_free(pp0);
	assert(page_insert(kern_pgdir, pp1, 0x0, PTE_W) == 0);
	assert(PTE_ADDR(kern_pgdir[0]) == page2pa(pp0));
	assert(check_va2pa(kern_pgdir, 0x0) == page2pa(pp1));
	assert(pp1->pp_ref == 1);
	assert(pp0->pp_ref == 1);

	// should be able to map pp2 at PGSIZE because pp0 is already allocated for page table
	assert(page_insert(kern_pgdir, pp2, PGSIZE, PTE_W) == 0);
	assert(check_va2pa(kern_pgdir, PGSIZE) == page2pa(pp2));
	assert(pp2->pp_ref == 1);

	// should be no free memory
	assert(!page_alloc());

	// should be able to map pp2 at PGSIZE because it's already there
	assert(page_insert(kern_pgdir, pp2, PGSIZE, PTE_W) == 0);
	assert(check_va2pa(kern_pgdir, PGSIZE) == page2pa(pp2));
	assert(pp2->pp_ref == 1);

	// pp2 should NOT be on the free list
	// could happen in ref counts are handled sloppily in page_insert
	assert(!page_alloc());

	// check that pgdir_walk returns a pointer to the pte
	ptep = (pte_t *) KADDR(PTE_ADDR(kern_pgdir[PDX(PGSIZE)]));
	assert(pgdir_walk(kern_pgdir, PGSIZE, false) == ptep+PTX(PGSIZE));

	// should be able to change permissions too.
	assert(page_insert(kern_pgdir, pp2, PGSIZE, PTE_W|PTE_U) == 0);
	assert(check_va2pa(kern_pgdir, PGSIZE) == page2pa(pp2));
	assert(pp2->pp_ref == 1);
	assert(*pgdir_walk(kern_pgdir, PGSIZE, false) & PTE_U);
	assert(kern_pgdir[0] & PTE_U);

	// should not be able to map at PTSIZE because need free page for page table
	assert(page_insert(kern_pgdir, pp0, PTSIZE, PTE_W) < 0);

	// insert pp1 at PGSIZE (replacing pp2)
	assert(page_insert(kern_pgdir, pp1, PGSIZE, PTE_W) == 0);
	assert(!(*pgdir_walk(kern_pgdir, PGSIZE, false) & PTE_U));

	// should have pp1 at both 0 and PGSIZE, pp2 nowhere, ...
	assert(check_va2pa(kern_pgdir, 0) == page2pa(pp1));
	assert(check_va2pa(kern_pgdir, PGSIZE) == page2pa(pp1));
	// ... and ref counts should reflect this
	assert(pp1->pp_ref == 2);
	assert(pp2->pp_ref == 0);

	// pp2 should be returned by page_alloc
	assert((pp = page_alloc()) && pp == pp2);

	// unmapping pp1 at 0 should keep pp1 at PGSIZE
	page_remove(kern_pgdir, 0x0);
	assert(check_va2pa(kern_pgdir, 0x0) == (physaddr_t) ~0);
	assert(check_va2pa(kern_pgdir, PGSIZE) == page2pa(pp1));
	assert(pp1->pp_ref == 1);
	assert(pp2->pp_ref == 0);

	// unmapping pp1 at PGSIZE should free it
	page_remove(kern_pgdir, PGSIZE);
	assert(check_va2pa(kern_pgdir, 0x0) == (physaddr_t) ~0);
	assert(check_va2pa(kern_pgdir, PGSIZE) == (physaddr_t) ~0);
	assert(pp1->pp_ref == 0);
	assert(pp2->pp_ref == 0);

	// so it should be returned by page_alloc
	assert((pp = page_alloc()) && pp == pp1);

	// should be no free memory
	assert(!page_alloc());

	// forcibly take pp0 back
	assert(PTE_ADDR(kern_pgdir[0]) == page2pa(pp0));
	kern_pgdir[0] = 0;
	assert(pp0->pp_ref == 1);
	pp0->pp_ref = 0;

	// check pointer arithmetic in pgdir_walk
	page_free(pp0);
	va = PGSIZE * NPDENTRIES + PGSIZE;
	ptep = pgdir_walk(kern_pgdir, va, true);
	ptep1 = (pte_t *) KADDR(PTE_ADDR(kern_pgdir[PDX(va)]));
	assert(ptep == ptep1 + PTX(va));
	kern_pgdir[PDX(va)] = 0;
	pp0->pp_ref = 0;

	// check that new page tables get cleared
	memset(page2kva(pp0), 0xFF, PGSIZE);
	page_free(pp0);
	pgdir_walk(kern_pgdir, 0x0, true);
	ptep = (pte_t *) page2kva(pp0);
	for(i=0; i<NPTENTRIES; i++)
		assert((ptep[i] & PTE_P) == 0);
	kern_pgdir[0] = 0;
	pp0->pp_ref = 0;

	// give free list back
	page_free_list = fl;

	// free the pages we took
	page_free(pp0);
	page_free(pp1);
	page_free(pp2);

	cprintf("check_page() succeeded!\n");
}
Exemple #16
0
// check_swap - check the correctness of swap & page replacement algorithm
static void
check_swap(void) {
    size_t nr_free_pages_store = nr_free_pages();
    size_t slab_allocated_store = slab_allocated();

    size_t offset;
    for (offset = 2; offset < max_swap_offset; offset ++) {
        mem_map[offset] = 1;
    }

    struct mm_struct *mm = mm_create();
    assert(mm != NULL);

    extern struct mm_struct *check_mm_struct;
    assert(check_mm_struct == NULL);

    check_mm_struct = mm;

    pgd_t *pgdir = mm->pgdir = boot_pgdir;
    assert(pgdir[0] == 0);

    struct vma_struct *vma = vma_create(0, PTSIZE, VM_WRITE | VM_READ);
    assert(vma != NULL);

    insert_vma_struct(mm, vma);

    struct Page *rp0 = alloc_page(), *rp1 = alloc_page();
    assert(rp0 != NULL && rp1 != NULL);

    uint32_t perm = PTE_U | PTE_W;
    int ret = page_insert(pgdir, rp1, 0, perm);
    assert(ret == 0 && page_ref(rp1) == 1);

    page_ref_inc(rp1);
    ret = page_insert(pgdir, rp0, 0, perm);
    assert(ret == 0 && page_ref(rp1) == 1 && page_ref(rp0) == 1);

    // check try_alloc_swap_entry

    swap_entry_t entry = try_alloc_swap_entry();
    assert(swap_offset(entry) == 1);
    mem_map[1] = 1;
    assert(try_alloc_swap_entry() == 0);

    // set rp1, Swap, Active, add to hash_list, active_list

    swap_page_add(rp1, entry);
    swap_active_list_add(rp1);
    assert(PageSwap(rp1));

    mem_map[1] = 0;
    entry = try_alloc_swap_entry();
    assert(swap_offset(entry) == 1);
    assert(!PageSwap(rp1));

    // check swap_remove_entry

    assert(swap_hash_find(entry) == NULL);
    mem_map[1] = 2;
    swap_remove_entry(entry);
    assert(mem_map[1] == 1);

    swap_page_add(rp1, entry);
    swap_inactive_list_add(rp1);
    swap_remove_entry(entry);
    assert(PageSwap(rp1));
    assert(rp1->index == entry && mem_map[1] == 0);

    // check page_launder, move page from inactive_list to active_list

    assert(page_ref(rp1) == 1);
    assert(nr_active_pages == 0 && nr_inactive_pages == 1);
    assert(list_next(&(inactive_list.swap_list)) == &(rp1->swap_link));

    page_launder();
    assert(nr_active_pages == 1 && nr_inactive_pages == 0);
    assert(PageSwap(rp1) && PageActive(rp1));

    entry = try_alloc_swap_entry();
    assert(swap_offset(entry) == 1);
    assert(!PageSwap(rp1) && nr_active_pages == 0);
    assert(list_empty(&(active_list.swap_list)));

    // set rp1 inactive again

    assert(page_ref(rp1) == 1);
    swap_page_add(rp1, 0);
    assert(PageSwap(rp1) && swap_offset(rp1->index) == 1);
    swap_inactive_list_add(rp1);
    mem_map[1] = 1;
    assert(nr_inactive_pages == 1);
    page_ref_dec(rp1);

    size_t count = nr_free_pages();
    swap_remove_entry(entry);
    assert(nr_inactive_pages == 0 && nr_free_pages() == count + 1);

    // check swap_out_mm

    pte_t *ptep0 = get_pte(pgdir, 0, 0), *ptep1;
    assert(ptep0 != NULL && pte2page(*ptep0) == rp0);

    ret = swap_out_mm(mm, 0);
    assert(ret == 0);

    ret = swap_out_mm(mm, 10);
    assert(ret == 1 && mm->swap_address == PGSIZE);

    ret = swap_out_mm(mm, 10);
    assert(ret == 0 && *ptep0 == entry && mem_map[1] == 1);
    assert(PageDirty(rp0) && PageActive(rp0) && page_ref(rp0) == 0);
    assert(nr_active_pages == 1 && list_next(&(active_list.swap_list)) == &(rp0->swap_link));

    // check refill_inactive_scan()

    refill_inactive_scan();
    assert(!PageActive(rp0) && page_ref(rp0) == 0);
    assert(nr_inactive_pages == 1 && list_next(&(inactive_list.swap_list)) == &(rp0->swap_link));

    page_ref_inc(rp0);
    page_launder();
    assert(PageActive(rp0) && page_ref(rp0) == 1);
    assert(nr_active_pages == 1 && list_next(&(active_list.swap_list)) == &(rp0->swap_link));

    page_ref_dec(rp0);
    refill_inactive_scan();
    assert(!PageActive(rp0));

    // save data in rp0

    int i;
    for (i = 0; i < PGSIZE; i ++) {
        ((char *)page2kva(rp0))[i] = (char)i;
    }

    page_launder();
    assert(nr_inactive_pages == 0 && list_empty(&(inactive_list.swap_list)));
    assert(mem_map[1] == 1);

    rp1 = alloc_page();
    assert(rp1 != NULL);
    ret = swapfs_read(entry, rp1);
    assert(ret == 0);

    for (i = 0; i < PGSIZE; i ++) {
        assert(((char *)page2kva(rp1))[i] == (char)i);
    }

    // page fault now

    *(char *)0 = 0xEF;

    rp0 = pte2page(*ptep0);
    assert(page_ref(rp0) == 1);
    assert(PageSwap(rp0) && PageActive(rp0));

    entry = try_alloc_swap_entry();
    assert(swap_offset(entry) == 1 && mem_map[1] == SWAP_UNUSED);
    assert(!PageSwap(rp0) && nr_active_pages == 0 && nr_inactive_pages == 0);

    // clear accessed flag

    assert(rp0 == pte2page(*ptep0));
    assert(!PageSwap(rp0));

    ret = swap_out_mm(mm, 10);
    assert(ret == 0);
    assert(!PageSwap(rp0) && (*ptep0 & PTE_P));

    // change page table

    ret = swap_out_mm(mm, 10);
    assert(ret == 1);
    assert(*ptep0 == entry && page_ref(rp0) == 0 && mem_map[1] == 1);

    count = nr_free_pages();
    refill_inactive_scan();
    page_launder();
    assert(count + 1 == nr_free_pages());

    ret = swapfs_read(entry, rp1);
    assert(ret == 0 && *(char *)(page2kva(rp1)) == (char)0xEF);
    free_page(rp1);

    // duplictate *ptep0

    ptep1 = get_pte(pgdir, PGSIZE, 0);
    assert(ptep1 != NULL && *ptep1 == 0);
    swap_duplicate(*ptep0);
    *ptep1 = *ptep0;

    // page fault again
    // update for copy on write

    *(char *)1 = 0x88;
    *(char *)(PGSIZE) = 0x8F;
    *(char *)(PGSIZE + 1) = 0xFF;
    assert(pte2page(*ptep0) != pte2page(*ptep1));
    assert(*(char *)0 == (char)0xEF);
    assert(*(char *)1 == (char)0x88);
    assert(*(char *)(PGSIZE) == (char)0x8F);
    assert(*(char *)(PGSIZE + 1) == (char)0xFF);

    rp0 = pte2page(*ptep0);
    rp1 = pte2page(*ptep1);
    assert(!PageSwap(rp0) && PageSwap(rp1) && PageActive(rp1));

    entry = try_alloc_swap_entry();
    assert(!PageSwap(rp0) && !PageSwap(rp1));
    assert(swap_offset(entry) == 1 && mem_map[1] == SWAP_UNUSED);
    assert(list_empty(&(active_list.swap_list)));
    assert(list_empty(&(inactive_list.swap_list)));

    page_insert(pgdir, rp0, PGSIZE, perm | PTE_A);

    // check swap_out_mm

    *(char *)0 = *(char *)PGSIZE = 0xEE;
    mm->swap_address = PGSIZE * 2;
    ret = swap_out_mm(mm, 2);
    assert(ret == 0);
    assert((*ptep0 & PTE_P) && !(*ptep0 & PTE_A));
    assert((*ptep1 & PTE_P) && !(*ptep1 & PTE_A));

    ret = swap_out_mm(mm, 2);
    assert(ret == 2);
    assert(mem_map[1] == 2 && page_ref(rp0) == 0);

    refill_inactive_scan();
    page_launder();
    assert(mem_map[1] == 2 && swap_hash_find(entry) == NULL);

    // check copy entry

    swap_remove_entry(entry);
    *ptep1 = 0;
    assert(mem_map[1] == 1);

    swap_entry_t store;
    ret = swap_copy_entry(entry, &store);
    assert(ret == -E_NO_MEM);
    mem_map[2] = SWAP_UNUSED;

    ret = swap_copy_entry(entry, &store);
    assert(ret == 0 && swap_offset(store) == 2 && mem_map[2] == 0);
    mem_map[2] = 1;
    *ptep1 = store;

    assert(*(char *)PGSIZE == (char)0xEE && *(char *)(PGSIZE + 1)== (char)0x88);

    *(char *)PGSIZE = 1, *(char *)(PGSIZE + 1) = 2;
    assert(*(char *)0 == (char)0xEE && *(char *)1 == (char)0x88);

    ret = swap_in_page(entry, &rp0);
    assert(ret == 0);
    ret = swap_in_page(store, &rp1);
    assert(ret == 0);
    assert(rp1 != rp0);

    // free memory

    swap_list_del(rp0), swap_list_del(rp1);
    swap_page_del(rp0), swap_page_del(rp1);

    assert(page_ref(rp0) == 1 && page_ref(rp1) == 1);
    assert(nr_active_pages == 0 && list_empty(&(active_list.swap_list)));
    assert(nr_inactive_pages == 0 && list_empty(&(inactive_list.swap_list)));

    for (i = 0; i < HASH_LIST_SIZE; i ++) {
        assert(list_empty(hash_list + i));
    }

    page_remove(pgdir, 0);
    page_remove(pgdir, PGSIZE);

    free_page(pa2page(PMD_ADDR(*get_pmd(pgdir, 0, 0))));
    free_page(pa2page(PUD_ADDR(*get_pud(pgdir, 0, 0))));
    free_page(pa2page(PGD_ADDR(*get_pgd(pgdir, 0, 0))));
    pgdir[0] = 0;

    mm->pgdir = NULL;
    mm_destroy(mm);
    check_mm_struct = NULL;

    assert(nr_active_pages == 0 && nr_inactive_pages == 0);
    for (offset = 0; offset < max_swap_offset; offset ++) {
        mem_map[offset] = SWAP_UNUSED;
    }

    assert(nr_free_pages_store == nr_free_pages());
    assert(slab_allocated_store == slab_allocated());

    cprintf("check_swap() succeeded.\n");
}
Exemple #17
0
// Map the page of memory at 'srcva' in srcenvid's address space
// at 'dstva' in dstenvid's address space with permission 'perm'.
// Perm has the same restrictions as in sys_page_alloc, except
// that it also must not grant write access to a read-only
// page.
//
// Return 0 on success, < 0 on error.  Errors are:
//	-E_BAD_ENV if srcenvid and/or dstenvid doesn't currently exist,
//		or the caller doesn't have permission to change one of them.
//	-E_INVAL if srcva >= UTOP or srcva is not page-aligned,
//		or dstva >= UTOP or dstva is not page-aligned.
//	-E_INVAL is srcva is not mapped in srcenvid's address space.
//	-E_INVAL if perm is inappropriate (see sys_page_alloc).
//	-E_INVAL if (perm & PTE_W), but srcva is read-only in srcenvid's
//		address space.
//	-E_NO_MEM if there's no memory to allocate any necessary page tables.
static int
sys_page_map(envid_t srcenvid, void *srcva,
	     envid_t dstenvid, void *dstva, int perm)
{
	// Hint: This function is a wrapper around page_lookup() and
	//   page_insert() from kern/pmap.c.
	//   Again, most of the new code you write should be to check the
	//   parameters for correctness.
	//   Use the third argument to page_lookup() to
	//   check the current permissions on the page.

	// LAB 4: Your code here.

	struct Env* srcenv;
	struct Env*dstenv;
	struct Page * pp;

	pte_t *srcpte;
	pte_t *dstpte;
/*
	if(dstenvid==0)
        dstenvid=curenv->env_id;
    if(srcenvid==0)

*/
	if(envid2env(srcenvid, &srcenv,1) < 0)
        return E_BAD_ENV;

    if(envid2env(dstenvid, &dstenv,1) < 0)
        return E_BAD_ENV;

    if((uint32_t)srcva > UTOP || ((uint32_t)srcva &(PGSIZE-1)) != 0 )
        return -E_INVAL;

    if((uint32_t)dstva > UTOP || ((uint32_t)dstva &(PGSIZE-1)) != 0 )
        return -E_INVAL;


    pp=page_lookup(srcenv->env_pgdir,srcva,0);
    if(!pp)
        return -E_INVAL;




    srcpte = pgdir_walk(srcenv->env_pgdir,srcva,0);
    if(!srcpte || !(*srcpte &PTE_P))
        return -E_INVAL;

    if((perm & (PTE_P | PTE_U)) != (PTE_P | PTE_U))
        return -E_INVAL;
    if( !(*srcpte | PTE_W) && (perm & PTE_W) )
        return -E_INVAL;

   /* dstpte = pgdir_walk(dstenv->env_pgdir,dstpte,1);

    if(!dstpte)
        return -E_NO_MEM;

    *dstpte =PTE_ADDR(*srcpte) | perm;
*/
    if(page_insert(dstenv->env_pgdir, pp, dstva, perm) < 0)
        return -E_NO_MEM;

    return 0;

	//panic("sys_page_map not implemented");
}
Exemple #18
0
Fichier : env.c Projet : gzs715/JOS
//
// Set up the initial program binary, stack, and processor flags
// for a user process.
// This function is ONLY called during kernel initialization,
// before running the first user-mode environment.
//
// This function loads all loadable segments from the ELF binary image
// into the environment's user memory, starting at the appropriate
// virtual addresses indicated in the ELF program header.
// At the same time it clears to zero any portions of these segments
// that are marked in the program header as being mapped
// but not actually present in the ELF file - i.e., the program's bss section.
//
// All this is very similar to what our boot loader does, except the boot
// loader also needs to read the code from disk.  Take a look at
// boot/main.c to get ideas.
//
// Finally, this function maps one page for the program's initial stack.
//
// load_icode panics if it encounters problems.
//  - How might load_icode fail?  What might be wrong with the given input?
//
static void
load_icode(struct Env *e, uint8_t *binary, size_t size)
{
	// Hints: 
	//  Load each program segment into virtual memory
	//  at the address specified in the ELF section header.
	//  You should only load segments with ph->p_type == ELF_PROG_LOAD.
	//  Each segment's virtual address can be found in ph->p_va
	//  and its size in memory can be found in ph->p_memsz.
	//  The ph->p_filesz bytes from the ELF binary, starting at
	//  'binary + ph->p_offset', should be copied to virtual address
	//  ph->p_va.  Any remaining memory bytes should be cleared to zero.
	//  (The ELF header should have ph->p_filesz <= ph->p_memsz.)
	//  Use functions from the previous lab to allocate and map pages.
	//
	//  All page protection bits should be user read/write for now.
	//  ELF segments are not necessarily page-aligned, but you can
	//  assume for this function that no two segments will touch
	//  the same virtual page.
	//
	//  You may find a function like segment_alloc useful.
	//
	//  Loading the segments is much simpler if you can move data
	//  directly into the virtual addresses stored in the ELF binary.
	//  So which page directory should be in force during
	//  this function?
	//
	// Hint:
	//  You must also do something with the program's entry point,
	//  to make sure that the environment starts executing there.
	//  What?  (See env_run() and env_pop_tf() below.)

	// LAB 3: Your code here.
	//cprintf("Begin to load icode\n");
	struct Proghdr *ph, *eph;
	ph = (struct Proghdr *) (binary + ((struct Elf *)binary)->e_phoff);
	eph = ph + ((struct Elf *)binary)->e_phnum;
	lcr3(e->env_cr3);
	while (ph < eph)
	{
		if(ph->p_type == ELF_PROG_LOAD)
		{
			segment_alloc(e, (void*)ph->p_va,ph->p_memsz);			
			memcpy((void *)ph->p_va, binary + ph->p_offset, ph->p_filesz);
			memset((void *)(ph->p_va + ph->p_filesz), 0x0, ph->p_memsz - ph->p_filesz);
		}
		ph++;
	}	
	//lcr3(boot_cr3);	
	//cprintf("segment copy success\n");

	// Now map one page for the program's initial stack
	// at virtual address USTACKTOP - PGSIZE.

	// LAB 3: Your code here.
	struct Page * user_stack; 
	if(page_alloc(&user_stack) == -E_NO_MEM)
		panic("No memory to alloc for user stack");
	page_insert(e->env_pgdir, user_stack, (void *)(USTACKTOP - PGSIZE),PTE_W|PTE_U|PTE_P);
	e->env_tf.tf_eip = ((struct Elf*)binary)->e_entry;
	
}
Exemple #19
0
// Try to send 'value' to the target env 'envid'.
// If srcva < UTOP, then also send page currently mapped at 'srcva',
// so that receiver gets a duplicate mapping of the same page.
//
// The send fails with a return value of -E_IPC_NOT_RECV if the
// target is not blocked, waiting for an IPC.
//
// The send also can fail for the other reasons listed below.
//
// Otherwise, the send succeeds, and the target's ipc fields are
// updated as follows:
//    env_ipc_recving is set to 0 to block future sends;
//    env_ipc_from is set to the sending envid;
//    env_ipc_value is set to the 'value' parameter;
//    env_ipc_perm is set to 'perm' if a page was transferred, 0 otherwise.
// The target environment is marked runnable again, returning 0
// from the paused sys_ipc_recv system call.  (Hint: does the
// sys_ipc_recv function ever actually return?)
//
// If the sender wants to send a page but the receiver isn't asking for one,
// then no page mapping is transferred, but no error occurs.
// The ipc only happens when no errors occur.
//
// Returns 0 on success, < 0 on error.
// Errors are:
//	-E_BAD_ENV if environment envid doesn't currently exist.
//		(No need to check permissions.)
//	-E_IPC_NOT_RECV if envid is not currently blocked in sys_ipc_recv,
//		or another environment managed to send first.
//	-E_INVAL if srcva < UTOP but srcva is not page-aligned.
//	-E_INVAL if srcva < UTOP and perm is inappropriate
//		(see sys_page_alloc).
//	-E_INVAL if srcva < UTOP but srcva is not mapped in the caller's
//		address space.
//	-E_INVAL if (perm & PTE_W), but srcva is read-only in the
//		current environment's address space.
//	-E_NO_MEM if there's not enough memory to map srcva in envid's
//		address space.
static int
sys_ipc_try_send(envid_t envid, uint32_t value, void *srcva, unsigned perm)
{
	// LAB 4: Your code here.
	struct Env *rcv;
	pte_t *pte;
	struct Page *pp;

	envid_t jdos_client = 0;
	struct Env *e;
	int i, r;

	if (curenv->env_alien && 
	     ((curenv->env_hosteid & 0xfff00000) == 
	      (envid & 0xfff00000))) {
		goto djos_send;
	}

	// Is receiver valid?
	if (envid2env(envid, &rcv, 0) < 0) {
		return -E_BAD_ENV;
	}

	if (rcv->env_status == ENV_SUSPENDED) {
		return -E_IPC_NOT_RECV;
	}

	if (rcv->env_status == ENV_LEASED) { // is leased?
	djos_send:
		for (i = 0; i < NENV; i++) {
			if (envs[i].env_type == ENV_TYPE_JDOSC) {
				jdos_client = envs[i].env_id;
				break;
			}
		}

		// jdos client running?
		if (!jdos_client) return -E_BAD_ENV; 

		if ((r = envid2env(jdos_client, &e, 0)) < 0) return r;

		// Mark suspended and try to send ipc
		curenv->env_status = ENV_SUSPENDED; 

		sys_page_alloc(curenv->env_id, (void *) IPCSND, 
			       PTE_U|PTE_P|PTE_W);

		*((envid_t *) IPCSND) = envid;
		*((uint32_t *)(IPCSND + sizeof(envid_t))) = value;
		*((unsigned *)(IPCSND + sizeof(envid_t) +
				   sizeof(uint32_t))) = perm;

		//can't write to page
		r = sys_ipc_try_send(jdos_client, CLIENT_SEND_IPC, 
				     (void *) IPCSND, PTE_U|PTE_P); 

		sys_page_unmap(curenv->env_id, (void *) IPCSND);

		// Failed to send ipc, back to running!
		if (r < 0) {
			cprintf("sys_send_ipc: failed to send ipc %d\n", r);
			curenv->env_status = ENV_RUNNABLE;
			return r;
		}
	}
	else {
		// Is receiver waiting?
		if (!rcv->env_ipc_recving) {
			return -E_IPC_NOT_RECV;
		}
		
		// Try mapping page from sender to receiver (if receiver 
		// wants it, and sender wants to send it)
		// NOTE: Can't use sys_map_page as it checks for env perms
		if ((uint32_t) rcv->env_ipc_dstva < UTOP && 
		    (uint32_t) srcva < UTOP) {
			if (!(pp = page_lookup(curenv->env_pgdir, 
					       srcva, &pte)))
				return -E_INVAL;
			
			if ((perm & PTE_W) && !(*pte & PTE_W))
				return -E_INVAL;
			
			if (page_insert(rcv->env_pgdir, pp, 
					rcv->env_ipc_dstva, perm) < 0)
				return -E_NO_MEM;
		}
		
		// Set fields which mark receiver as not waiting
		rcv->env_ipc_recving = 0;
		rcv->env_ipc_dstva = (void *) UTOP; // invalid dstva
		
		// Set received data fields of receiver
		rcv->env_ipc_value = value;
		rcv->env_ipc_from = curenv->env_id;	
		rcv->env_ipc_perm = perm;
		
		// Mark receiver as RUNNABLE
		rcv->env_status = ENV_RUNNABLE;
	}
	
	return 0;
}
Exemple #20
0
//
// Set up the initial program binary, stack, and processor flags
// for a user process.
// This function is ONLY called during kernel initialization,
// before running the first user-mode environment.
//
// This function loads all loadable segments from the ELF binary image
// into the environment's user memory, starting at the appropriate
// virtual addresses indicated in the ELF program header.
// At the same time it clears to zero any portions of these segments
// that are marked in the program header as being mapped
// but not actually present in the ELF file - i.e., the program's bss section.
//
// All this is very similar to what our boot loader does, except the boot
// loader also needs to read the code from disk.  Take a look at
// boot/main.c to get ideas.
//
// Finally, this function maps one page for the program's initial stack.
//
// load_icode panics if it encounters problems.
//  - How might load_icode fail?  What might be wrong with the given input?
//
//  
static void
load_icode(struct Env *e, uint8_t *binary, size_t size)
{
	// Hints: 
	//  Load each program segment into virtual memory
	//  at the address specified in the ELF section header.
	//  You should only load segments with ph->p_type == ELF_PROG_LOAD.
	//  Each segment's virtual address can be found in ph->p_va
	//  and its size in memory can be found in ph->p_memsz.
	//  The ph->p_filesz bytes from the ELF binary, starting at
	//  'binary + ph->p_offset', should be copied to virtual address
	//  ph->p_va.  Any remaining memory bytes should be cleared to zero.
	//  (The ELF header should have ph->p_filesz <= ph->p_memsz.)               /* p_filesz <= ph->memsz ,sunus*/
	//  Use functions from the previous lab to allocate and map pages.
	//
	//  All page protection bits should be user read/write for now.
	//  ELF segments are not necessarily page-aligned, but you can
	//  assume for this function that no two segments will touch
	//  the same virtual page.
	//
	//  You may find a function like segment_alloc useful.
	//
	//  Loading the segments is much simpler if you can move data
	//  directly into the virtual addresses stored in the ELF binary.
	//  So which page directory should be in force during
	//  this function?
	//
	//  You must also do something with the program's entry point,
	//  to make sure that the environment starts executing there.
	//  What?  (See env_run() and env_pop_tf() below.)

	// LAB 3: Your code here.

	// Now map one page for the program's initial stack
	// at virtual address USTACKTOP - PGSIZE.

	// LAB 3: Your code here.
	//  You must also do something with the program's entry point,    /* TO BE CODED!*/   /* DONE, LINE 328,338 and 339 */ 
	//  to make sure that the environment starts executing there.	  /* DEC 10,2010 */
	//  What?  (See env_run() and env_pop_tf() below.)		  /* sunus */

	// LAB 3: Your code here.

    	//DEC 09,2010 sunus
    struct Proghdr *ph,*eph;
	struct Elf *env_elf;
	struct Page *pstack;
	env_elf = (struct Elf *)binary;
	assert(env_elf->e_magic == ELF_MAGIC);
	ph = (struct Proghdr *)((uint8_t *)binary + env_elf->e_phoff);
	eph = ph +  env_elf->e_phnum;
	lcr3(e->env_cr3); // we will use env_cr3 for a little while :D
	for( ; ph < eph ; ph++)
	  {
              if(ph->p_type == ELF_PROG_LOAD)
              {
                      segment_alloc(e, (void *)ph->p_va,ph->p_memsz);
                      memmove((void *)ph->p_va, (void *)(binary + ph->p_offset), ph->p_filesz);
                      memset(((void *)ph->p_va + ph->p_filesz), 0, (ph->p_memsz - ph->p_filesz)); // .bss matters
              }
      }
	lcr3(boot_cr3); // restore boot_cr3 
	e->env_tf.tf_eip = env_elf->e_entry;
	// Now map one page for the program's initial stack
	// at virtual address USTACKTOP - PGSIZE.
	// LAB 3: Your code here.
	//DEC 10,2010,sunus
	assert(page_alloc(&pstack) == 0);
	assert(page_insert(e->env_pgdir, pstack,(void *)(USTACKTOP - PGSIZE), PTE_U|PTE_W) == 0);
	return ;
}
Exemple #21
0
/* do_pgfault - interrupt handler to process the page fault execption
 * @mm         : the control struct for a set of vma using the same PDT
 * @error_code : the error code recorded in trapframe->tf_err which is setted by x86 hardware
 * @addr       : the addr which causes a memory access exception, (the contents of the CR2 register)
 *
 * CALL GRAPH: trap--> trap_dispatch-->pgfault_handler-->do_pgfault
 * The processor provides ucore's do_pgfault function with two items of information to aid in diagnosing
 * the exception and recovering from it.
 *   (1) The contents of the CR2 register. The processor loads the CR2 register with the
 *       32-bit linear address that generated the exception. The do_pgfault fun can
 *       use this address to locate the corresponding page directory and page-table
 *       entries.
 *   (2) An error code on the kernel stack. The error code for a page fault has a format different from
 *       that for other exceptions. The error code tells the exception handler three things:
 *         -- The P flag   (bit 0) indicates whether the exception was due to a not-present page (0)
 *            or to either an access rights violation or the use of a reserved bit (1).
 *         -- The W/R flag (bit 1) indicates whether the memory access that caused the exception
 *            was a read (0) or write (1).
 *         -- The U/S flag (bit 2) indicates whether the processor was executing at user mode (1)
 *            or supervisor mode (0) at the time of the exception.
 */
int
do_pgfault(struct mm_struct *mm, uint32_t error_code, uintptr_t addr) {
    int ret = -E_INVAL;
    //try to find a vma which include addr
    struct vma_struct *vma = find_vma(mm, addr);

    pgfault_num++;
    //If the addr is in the range of a mm's vma?
    if (vma == NULL || vma->vm_start > addr) {
        cprintf("not valid addr %x, and  can not find it in vma\n", addr);
        goto failed;
    }
    //check the error_code
    switch (error_code & 3) {
    default:
            /* error code flag : default is 3 ( W/R=1, P=1): write, present */
    case 2: /* error code flag : (W/R=1, P=0): write, not present */
        if (!(vma->vm_flags & VM_WRITE)) {
            cprintf("do_pgfault failed: error code flag = write AND not present, but the addr's vma cannot write\n");
            goto failed;
        }
        break;
    case 1: /* error code flag : (W/R=0, P=1): read, present */
        cprintf("do_pgfault failed: error code flag = read AND present\n");
        goto failed;
    case 0: /* error code flag : (W/R=0, P=0): read, not present */
        if (!(vma->vm_flags & (VM_READ | VM_EXEC))) {
            cprintf("do_pgfault failed: error code flag = read AND not present, but the addr's vma cannot read or exec\n");
            goto failed;
        }
    }
    /* IF (write an existed addr ) OR
     *    (write an non_existed addr && addr is writable) OR
     *    (read  an non_existed addr && addr is readable)
     * THEN
     *    continue process
     */
    uint32_t perm = PTE_U;
    if (vma->vm_flags & VM_WRITE) {
        perm |= PTE_W;
    }
    addr = ROUNDDOWN(addr, PGSIZE);

    ret = -E_NO_MEM;

    pte_t *ptep=NULL;
    /*LAB3 EXERCISE 1: YOUR CODE
    * Maybe you want help comment, BELOW comments can help you finish the code
    *
    * Some Useful MACROs and DEFINEs, you can use them in below implementation.
    * MACROs or Functions:
    *   get_pte : get an pte and return the kernel virtual address of this pte for la
    *             if the PT contians this pte didn't exist, alloc a page for PT (notice the 3th parameter '1')
    *   pgdir_alloc_page : call alloc_page & page_insert functions to allocate a page size memory & setup
    *             an addr map pa<--->la with linear address la and the PDT pgdir
    * DEFINES:
    *   VM_WRITE  : If vma->vm_flags & VM_WRITE == 1/0, then the vma is writable/non writable
    *   PTE_W           0x002                   // page table/directory entry flags bit : Writeable
    *   PTE_U           0x004                   // page table/directory entry flags bit : User can access
    * VARIABLES:
    *   mm->pgdir : the PDT of these vma
    *
    */
#if 0
    /*LAB3 EXERCISE 1: 2013010617*/
    ptep = ???              //(1) try to find a pte, if pte's PT(Page Table) isn't existed, then create a PT.
    if (*ptep == 0) {
                            //(2) if the phy addr isn't exist, then alloc a page & map the phy addr with logical addr

    }
    else {
    /*LAB3 EXERCISE 2: 2013010617
    * Now we think this pte is a  swap entry, we should load data from disk to a page with phy addr,
    * and map the phy addr with logical addr, trigger swap manager to record the access situation of this page.
    *
    *  Some Useful MACROs and DEFINEs, you can use them in below implementation.
    *  MACROs or Functions:
    *    swap_in(mm, addr, &page) : alloc a memory page, then according to the swap entry in PTE for addr,
    *                               find the addr of disk page, read the content of disk page into this memroy page
    *    page_insert : build the map of phy addr of an Page with the linear addr la
    *    swap_map_swappable : set the page swappable
    */
        if(swap_init_ok) {
            struct Page *page=NULL;
                                    //(1)According to the mm AND addr, try to load the content of right disk page
                                    //    into the memory which page managed.
                                    //(2) According to the mm, addr AND page, setup the map of phy addr <---> logical addr
                                    //(3) make the page swappable.
        }
        else {
            cprintf("no swap_init_ok but ptep is %x, failed\n",*ptep);
            goto failed;
        }
   }
#endif
    // try to find a pte, if pte's PT(Page Table) isn't existed, then create a PT.
    // (notice the 3th parameter '1')
    if ((ptep = get_pte(mm->pgdir, addr, 1)) == NULL) {
        cprintf("get_pte in do_pgfault failed\n");
        goto failed;
    }
    
    if (*ptep == 0) { // if the phy addr isn't exist, then alloc a page & map the phy addr with logical addr
        if (pgdir_alloc_page(mm->pgdir, addr, perm) == NULL) {
            cprintf("pgdir_alloc_page in do_pgfault failed\n");
            goto failed;
        }
    }
    else { // if this pte is a swap entry, then load data from disk to a page with phy addr
           // and call page_insert to map the phy addr with logical addr
        if(swap_init_ok) {
            struct Page *page=NULL;
            if ((ret = swap_in(mm, addr, &page)) != 0) {
                cprintf("swap_in in do_pgfault failed\n");
                goto failed;
            }    
            page_insert(mm->pgdir, page, addr, perm);
            swap_map_swappable(mm, addr, page, 1);
            page->pra_vaddr = addr;
        }
        else {
            cprintf("no swap_init_ok but ptep is %x, failed\n",*ptep);
            goto failed;
        }
   }
   ret = 0;
failed:
    return ret;
}
Exemple #22
0
// Map the page of memory at 'srcva' in srcenvid's address space
// at 'dstva' in dstenvid's address space with permission 'perm'.
// Perm has the same restrictions as in sys_page_alloc, except
// that it also must not grant write access to a read-only
// page.
//
// Return 0 on success, < 0 on error.  Errors are:
//	-E_BAD_ENV if srcenvid and/or dstenvid doesn't currently exist,
//		or the caller doesn't have permission to change one of them.
//	-E_INVAL if srcva >= UTOP or srcva is not page-aligned,
//		or dstva >= UTOP or dstva is not page-aligned.
//	-E_INVAL is srcva is not mapped in srcenvid's address space.
//	-E_INVAL if perm is inappropriate (see sys_page_alloc).
//	-E_INVAL if (perm & PTE_W), but srcva is read-only in srcenvid's
//		address space.
//	-E_NO_MEM if there's no memory to allocate any necessary page tables.
static int
sys_page_map(envid_t srcenvid, void *srcva,
	     envid_t dstenvid, void *dstva, int perm)
{
	// Hint: This function is a wrapper around page_lookup() and
	//   page_insert() from kern/pmap.c.
	//   Again, most of the new code you write should be to check the
	//   parameters for correctness.
	//   Use the third argument to page_lookup() to
	//   check the current permissions on the page.

	// LAB 4: Your code here.

	cprintf("DEBUG sys_page_map() called\n");
	struct Env *se, *de;

	int err = envid2env(srcenvid, &se, 1);
	if (err) {
		cprintf("9 err %d", err);
		return err;	//bad environment
	}

	err = envid2env(dstenvid, &de, 1);
	if (err) {
		cprintf("10 err %d", err);
		return err;	//bad environment
	}

	// -E_INVAL if srcva >= UTOP or srcva is not page-aligned,
	// or dstva >= UTOP or dstva is not page-aligned.
	if (srcva>=(void*)UTOP || dstva>=(void*)UTOP || ROUNDDOWN(srcva,PGSIZE)!=srcva || ROUNDDOWN(dstva,PGSIZE)!=dstva) {
		cprintf("11 err");
		return -E_INVAL;
	}

	// -E_INVAL is srcva is not mapped in srcenvid's address space.
	pte_t *pte;
	struct PageInfo *pag = page_lookup(se->env_pgdir, srcva, &pte);
	if (!pag) {
		cprintf("12 err");
		return -E_INVAL;
	}

	// -E_INVAL if perm is inappropriate (see sys_page_alloc).
	int flag = PTE_U | PTE_P;
	if ((perm & flag) != flag) {
		cprintf("13 err");
		return -E_INVAL;
	}

	// -E_INVAL if (perm & PTE_W), but srcva is read-only in srcenvid's
	// address space.
	if (((*pte&PTE_W) == 0) && (perm&PTE_W)) {
		cprintf("14 err"); 
		return -E_INVAL;
	}

	// -E_NO_MEM if there's no memory to allocate any necessary page tables.
	err = page_insert(de->env_pgdir, pag, dstva, perm);
	cprintf("15 err %d", err);
	return err;
	//panic("sys_page_map not implemented");
}