Example #1
0
/*
 * This function implements the brk(2) system call.
 *
 * This routine manages the calling process's "break" -- the ending address
 * of the process's "dynamic" region (often also referred to as the "heap").
 * The current value of a process's break is maintained in the 'p_brk' member
 * of the proc_t structure that represents the process in question.
 *
 * The 'p_brk' and 'p_start_brk' members of a proc_t struct are initialized
 * by the loader. 'p_start_brk' is subsequently never modified; it always
 * holds the initial value of the break. Note that the starting break is
 * not necessarily page aligned!
 *
 * 'p_start_brk' is the lower limit of 'p_brk' (that is, setting the break
 * to any value less than 'p_start_brk' should be disallowed).
 *
 * The upper limit of 'p_brk' is defined by the minimum of (1) the
 * starting address of the next occuring mapping or (2) USER_MEM_HIGH.
 * That is, growth of the process break is limited only in that it cannot
 * overlap with/expand into an existing mapping or beyond the region of
 * the address space allocated for use by userland. (note the presence of
 * the 'vmmap_is_range_empty' function).
 *
 * The dynamic region should always be represented by at most ONE vmarea.
 * Note that vmareas only have page granularity, you will need to take this
 * into account when deciding how to set the mappings if p_brk or p_start_brk
 * is not page aligned.
 *
 * You are guaranteed that the process data/bss region is non-empty.
 * That is, if the starting brk is not page-aligned, its page has
 * read/write permissions.
 *
 * If addr is NULL, you should NOT fail as the man page says. Instead,
 * "return" the current break. We use this to implement sbrk(0) without writing
 * a separate syscall. Look in user/libc/syscall.c if you're curious.
 *
 * Also, despite the statement on the manpage, you MUST support combined use
 * of brk and mmap in the same process.
 *
 * Note that this function "returns" the new break through the "ret" argument.
 * Return 0 on success, -errno on failure.
 */
int
do_brk(void *addr, void **ret)
{
    if (addr == NULL) {
        *ret = curproc->p_brk;
        return 0;
    }

    uintptr_t start_brk = (uintptr_t)curproc->p_start_brk;
    uintptr_t brk = (uintptr_t)curproc->p_brk;
    uintptr_t vaddr = (uintptr_t)addr;
    uint32_t lopage = ADDR_TO_PN(PAGE_ALIGN_DOWN(start_brk));

    if (vaddr < start_brk) {
        return -ENOMEM;
    }

    if (vaddr >= USER_MEM_HIGH) {
        return -ENOMEM;
    }

    if (vaddr == brk) {
        *ret = addr;
        KASSERT(curproc->p_brk == addr);
        curproc->p_brk = addr;
        return 0;
    }

    KASSERT(start_brk <= brk);

    vmarea_t *area = vmmap_lookup(curproc->p_vmmap, lopage);

    if (area == NULL) {
        panic("panic for now\n");
        return -1;
    } else {
        KASSERT(area);

        uint32_t hiaddr = (uint32_t)(vaddr - 1);
        uint32_t hipage = ADDR_TO_PN(hiaddr);
        if (hipage < area->vma_end) {
            area->vma_end = hipage + 1;
            *ret = addr;
            curproc->p_brk = addr;
            return 0;
        } else {
            if (vmmap_is_range_empty(curproc->p_vmmap, area->vma_end,
                                        hipage - area->vma_end + 1)) {
                area->vma_end = hipage + 1;
                *ret = addr;
                curproc->p_brk = addr;
                return 0;
            } else {
                return -ENOMEM;
            }
        }
    }
        /*NOT_YET_IMPLEMENTED("VM: do_brk");*/
        /*return 0;*/
}
Example #2
0
/*
 * This function implements the munmap(2) syscall.
 *
 * As with do_mmap() it should perform the required error checking,
 * before calling upon vmmap_remove() to do most of the work.
 * Remember to clear the TLB.
 */
int
do_munmap(void *addr, size_t len)
{
        /*NOT_YET_IMPLEMENTED("VM: do_munmap");
        return -1;*/
	  	dbg(DBG_PRINT,"go into do_mumap\n");
		if((uint32_t)addr % PAGE_SIZE)
		{
			dbg(DBG_PRINT,"(GRADING3C)\n");
			return -EINVAL;
		}
		if(len <= 0||len > 0xc0000000)
	    {
			dbg(DBG_PRINT,"(GRADING3C)\n");
	    	return -EINVAL;
	   	}
		KASSERT(PAGE_ALIGNED(addr));
	  	if(((uint32_t)addr < USER_MEM_LOW) || ((uint32_t)addr > USER_MEM_HIGH) || ((uint32_t)addr+len > USER_MEM_HIGH))
	    {
			dbg(DBG_PRINT,"(GRADING3C)\n");
	    	return -EINVAL;
	    }

		vmmap_remove( curproc->p_vmmap, ADDR_TO_PN(addr), ((len - 1)/PAGE_SIZE  + 1));

		tlb_flush_all();

	  	KASSERT(NULL != curproc->p_pagedir);
	  	dbg(DBG_PRINT,"(GRADING3A 2.b)\n");

	  	return 0;
}
Example #3
0
/*
 * addr_perm checks to see if the address vaddr in the process p is valid
 * for all the operations specifed in perm. (A combination of one or more
 * of PROT_READ, PROT_WRITE, and PROT_EXEC).  You need to find the process's
 * vm_area that contains that virtual address, and verify that the protections
 * allow access.  The page protections need not match the specified permissions
 * exactly, as long as at least the specifed permissions are satisfied.  This
 * function should return 1 on success, and 0 on failure (think of it as
 * anwering the question "does process p have permission perm on address vaddr?")
 */
int addr_perm(struct proc *p, const void *vaddr, int perm) {
	/* NOT_YET_IMPLEMENTED("VM: addr_perm"); */
	vmarea_t *vm_area = vmmap_lookup(p->p_vmmap, ADDR_TO_PN(vaddr));/* lookup should find corresponding vfn */
	dbg(DBG_PRINT, "(GRADING3F)\n");
	if (vm_area == NULL) {
		dbg(DBG_PRINT, "(GRADING3D 2)\n");
		return 0;
	}

	int vm_prot = vm_area->vma_prot;/* Page permissions set */

	/*if (!(vm_prot & PROT_WRITE) && (perm & PROT_WRITE)) {

		dbg(DBG_ERROR, "2 error\n");
		return 0;
	}
	if (!(vm_prot & PROT_READ) && (perm & PROT_READ)) {

		dbg(DBG_ERROR, "3 error\n");
		return 0;
	}
	if (!(vm_prot & PROT_EXEC) && (perm & PROT_EXEC)) {

		dbg(DBG_ERROR, "4 error\n");
		return 0;
	}*/
	/* Success */
	return 1;
}
Example #4
0
/*
 * This function implements the munmap(2) syscall.
 *
 * As with do_mmap() it should perform the required error checking,
 * before calling upon vmmap_remove() to do most of the work.
 * Remember to clear the TLB.
 */
int
do_munmap(void *addr, size_t len)
{
    /*NOT_YET_IMPLEMENTED("VM: do_munmap");*/

    if ((size_t)addr < USER_MEM_LOW || (size_t)addr >= USER_MEM_HIGH) {
        return -EINVAL;
    }

    if(len == PAGE_SIZE * 15)
    {
        dbg(DBG_PRINT, "BREAK\n");
    }


    if(!PAGE_ALIGNED(addr)){
        dbg(DBG_PRINT,"Error: do_munmap failed due to addr or len is not page aligned!\n");
        return -EINVAL;
    }

    if((len <= 0) || (len >= USER_MEM_HIGH - USER_MEM_LOW)){
        dbg(DBG_PRINT,"Error: do_munmap failed due to len is <= 0!\n");
        return -EINVAL;
    }

    vmmap_t *map = curproc->p_vmmap;
    uint32_t lopage;
    uint32_t npages;
    lopage = ADDR_TO_PN(addr);
    
    /* updated */
    /* TODO: Check later: may change to: uint32_t hipage = ADDR_TO_PN((size_t)addr + len - 1) + 1; */
    uint32_t hipage = ADDR_TO_PN((size_t)addr + len - 1) + 1; 
    /*uint32_t hipage = ADDR_TO_PN((size_t)addr + len) + 1;*/
    npages = hipage - lopage;
    
    int retval = vmmap_remove(map, lopage, npages);
    if(retval < 0){
        dbg(DBG_PRINT,"Error: The unmapping of the vmarea was unsuccessful\n");
        return retval;
    }
    /* clear TLB for this vaddr*/
    /* Corrected */
    tlb_flush_range((uintptr_t)addr, npages);
    
    return 0;
}
Example #5
0
/*
 * This gets called by _pt_fault_handler in mm/pagetable.c The
 * calling function has already done a lot of error checking for
 * us. In particular it has checked that we are not page faulting
 * while in kernel mode. Make sure you understand why an
 * unexpected page fault in kernel mode is bad in Weenix. You
 * should probably read the _pt_fault_handler function to get a
 * sense of what it is doing.
 *
 * Before you can do anything you need to find the vmarea that
 * contains the address that was faulted on. Make sure to check
 * the permissions on the area to see if the process has
 * permission to do [cause]. If either of these checks does not
 * pass kill the offending process, setting its exit status to
 * EFAULT (normally we would send the SIGSEGV signal, however
 * Weenix does not support signals).
 *
 * Now it is time to find the correct page (don't forget
 * about shadow objects, especially copy-on-write magic!). Make
 * sure that if the user writes to the page it will be handled
 * correctly.
 *
 * Finally call pt_map to have the new mapping placed into the
 * appropriate page table.
 *
 * @param vaddr the address that was accessed to cause the fault
 *
 * @param cause this is the type of operation on the memory
 *              address which caused the fault, possible values
 *              can be found in pagefault.h
 */
void
handle_pagefault(uintptr_t vaddr, uint32_t cause)
{
        /*NOT_YET_IMPLEMENTED("VM: handle_pagefault");*/
	vmmap_t *map = curproc->p_vmmap;
	dbginfo(DBG_ERROR, proc_info, curproc);
	dbginfo(DBG_ERROR, proc_list_info, NULL);
	if(vaddr == NULL){
	}	
	vmarea_t *vma =	vmmap_lookup(map, ADDR_TO_PN(vaddr));
	
	/*uintptr_t pagenum = PAGE_OFFSET(vaddr);*/
	if(vma == NULL ||  !(cause & FAULT_USER)){
		/*XXX permission checks*/
		curproc->p_status = EFAULT;
		proc_kill(curproc, EFAULT);
	}
	pframe_t *pf;
	uintptr_t pagenum =  ADDR_TO_PN(vaddr) - vma->vma_start+vma->vma_off;
	/*XXX handle shadow objects*/	
	/*
	int forWrite = 0;
	if(cause & FAULT_WRITE){
		forWrite = 1;
	}
	*/		
	/*
	if(vma->vma_obj->mmo_shadowed != NULL){
		shadow_lookuppage(vma->vma_obj->mmo_shadowed, pagenum, forWrite,&pf);
	}else{
	*/
		pframe_get(vma->vma_obj, pagenum, &pf);
	/*}*/
	uintptr_t paddr = pt_virt_to_phys((uintptr_t)pf->pf_addr);
	uintptr_t pdflags = PD_PRESENT | PD_WRITE | PD_USER;
	uintptr_t ptflags = PT_PRESENT | PT_WRITE | PT_USER;
	/*XXX tlb flush?*/
	pt_map(curproc->p_pagedir,(uintptr_t)PAGE_ALIGN_DOWN(vaddr), paddr, pdflags, ptflags);

}
Example #6
0
/*
 * This function implements the munmap(2) syscall.
 *
 * As with do_mmap() it should perform the required error checking,
 * before calling upon vmmap_remove() to do most of the work.
 * Remember to clear the TLB.
 */
int
do_munmap(void *addr, size_t len)
{
    if ((uintptr_t) addr < USER_MEM_LOW || USER_MEM_HIGH - (uint32_t) addr < len){
        return -EINVAL;
    }

    if (len == 0){
        return -EINVAL;
    }

    if (!PAGE_ALIGNED(addr)){
        return -EINVAL; 
    }

    int ret = vmmap_remove(curproc->p_vmmap, ADDR_TO_PN(addr),
            (uint32_t) PAGE_ALIGN_UP(len) / PAGE_SIZE);

    /* no need to unmap range or flush the tlb, since this is done in
     * vmmap_remove() */

    return ret;
}
Example #7
0
/*
 * This function takes care of pages which are not in kas or need to be
 * taken care of in a special way.  For example, panicbuf pages are not
 * in kas and their pages are allocated via prom_retain().
 */
pgcnt_t
i_cpr_count_special_kpages(int mapflag, bitfunc_t bitfunc)
{
	struct cpr_map_info *pri, *tail;
	pgcnt_t pages, total = 0;
	pfn_t pfn;

	/*
	 * Save information about prom retained panicbuf pages
	 */
	if (bitfunc == cpr_setbit) {
		pri = &cpr_prom_retain[CPR_PANICBUF];
		pri->virt = (cpr_ptr)panicbuf;
		pri->phys = va_to_pa(panicbuf);
		pri->size = sizeof (panicbuf);
	}

	/*
	 * Go through the prom_retain array to tag those pages.
	 */
	tail = &cpr_prom_retain[CPR_PROM_RETAIN_CNT];
	for (pri = cpr_prom_retain; pri < tail; pri++) {
		pages = mmu_btopr(pri->size);
		for (pfn = ADDR_TO_PN(pri->phys); pages--; pfn++) {
			if (pf_is_memory(pfn)) {
				if (bitfunc == cpr_setbit) {
					if ((*bitfunc)(pfn, mapflag) == 0)
						total++;
				} else
					total++;
			}
		}
	}

	return (total);
}
Example #8
0
/*
 * This gets called by _pt_fault_handler in mm/pagetable.c The
 * calling function has already done a lot of error checking for
 * us. In particular it has checked that we are not page faulting
 * while in kernel mode. Make sure you understand why an
 * unexpected page fault in kernel mode is bad in Weenix. You
 * should probably read the _pt_fault_handler function to get a
 * sense of what it is doing.
 *
 * Before you can do anything you need to find the vmarea that
 * contains the address that was faulted on. Make sure to check
 * the permissions on the area to see if the process has
 * permission to do [cause]. If either of these checks does not
 * pass kill the offending process, setting its exit status to
 * EFAULT (normally we would send the SIGSEGV signal, however
 * Weenix does not support signals).
 *
 * Now it is time to find the correct page (don't forget
 * about shadow objects, especially copy-on-write magic!). Make
 * sure that if the user writes to the page it will be handled
 * correctly.
 *
 * Finally call pt_map to have the new mapping placed into the
 * appropriate page table.
 *
 * @param vaddr the address that was accessed to cause the fault
 *
 * @param cause this is the type of operation on the memory
 *              address which caused the fault, possible values
 *              can be found in pagefault.h
 */
void
handle_pagefault(uintptr_t vaddr, uint32_t cause)
{

        int forwrite = 0;
        if( vaddr<(USER_MEM_LOW) ||vaddr >= (USER_MEM_HIGH)){
                dbg(DBG_PRINT, "(GRADING3D) ADDRESS NOT VALID \n");
                do_exit(EFAULT);
                return;
        }
        vmarea_t *container = vmmap_lookup(curproc->p_vmmap, ADDR_TO_PN(vaddr));
        if(container == NULL){
          dbg(DBG_PRINT, "(GRADING3D) VMAREA NOT VALID \n");
                do_exit(EFAULT);
                return;
        }
        if(container->vma_prot == PROT_NONE){
          dbg(DBG_PRINT, "(GRADING3D) PROT NOT VALID \n");
                do_exit(EFAULT);
                return;
        }
        if((cause & FAULT_WRITE) && !(container->vma_prot & PROT_WRITE)){
          dbg(DBG_PRINT, "(GRADING3D) CONTAINER PROT NOT VALID \n");
                do_exit(EFAULT);
                return;
        }
        if(!(container->vma_prot & PROT_READ)){
          dbg(DBG_PRINT, "(GRADING3D)  PROT IS NOT PROT READ \n");
                do_exit(EFAULT);
                return;
        }
        int pagenum = ADDR_TO_PN(vaddr)-container->vma_start+container->vma_off;
        pframe_t *pf;
        if((container->vma_prot & PROT_WRITE) && (cause & FAULT_WRITE)){
          dbg(DBG_PRINT, "(GRADING3D) prot write fault write \n");
            int pf_res = pframe_lookup(container->vma_obj, pagenum, 1, &pf);
            if(pf_res<0){
              dbg(DBG_PRINT, "(GRADING3D) pframe lookup failed\n");
                do_exit(EFAULT);
            }
            pframe_dirty(pf);
        }else{
          dbg(DBG_PRINT, "(GRADING3D) prot write fault write else \n");
            int pf_res = pframe_lookup(container->vma_obj, pagenum, forwrite, &pf);
            if(pf_res<0){
              dbg(DBG_PRINT, "(GRADING3D) pframe lookup failed \n");
                do_exit(EFAULT);
            }
        }
        KASSERT(pf);
        dbg(DBG_PRINT, "(GRADING3A 5.a) pf is not NULL\n");
        KASSERT(pf->pf_addr);
        dbg(DBG_PRINT, "(GRADING3A 5.a) pf->addr is not NULL\n");
        uint32_t pdflags = PD_PRESENT | PD_USER;
        uint32_t ptflags = PT_PRESENT | PT_USER;
        if(cause & FAULT_WRITE){
          dbg(DBG_PRINT, "(GRADING3D) cause is fault write \n");
            pdflags = pdflags | PD_WRITE;
            ptflags = ptflags | PT_WRITE;
        }
        int ptmap_res = pt_map(curproc->p_pagedir, (uintptr_t)PAGE_ALIGN_DOWN(vaddr), pt_virt_to_phys((uintptr_t)pf->pf_addr), pdflags, ptflags);
}
Example #9
0
/*
 * This gets called by _pt_fault_handler in mm/pagetable.c The
 * calling function has already done a lot of error checking for
 * us. In particular it has checked that we are not page faulting
 * while in kernel mode. Make sure you understand why an
 * unexpected page fault in kernel mode is bad in Weenix. You
 * should probably read the _pt_fault_handler function to get a
 * sense of what it is doing.
 *
 * Before you can do anything you need to find the vmarea that
 * contains the address that was faulted on. Make sure to check
 * the permissions on the area to see if the process has
 * permission to do [cause]. If either of these checks does not
 * pass kill the offending process, setting its exit status to
 * EFAULT (normally we would send the SIGSEGV signal, however
 * Weenix does not support signals).
 *
 * Now it is time to find the correct page (don't forget
 * about shadow objects, especially copy-on-write magic!). Make
 * sure that if the user writes to the page it will be handled
 * correctly.
 *
 * Finally call pt_map to have the new mapping placed into the
 * appropriate page table.
 *
 * @param vaddr the address that was accessed to cause the fault
 *
 * @param cause this is the type of operation on the memory
 *              address which caused the fault, possible values
 *              can be found in pagefault.h
 */
void handle_pagefault(uintptr_t vaddr, uint32_t cause) {
	/*NOT_YET_IMPLEMENTED("VM: handle_pagefault");*/
	vmarea_t *vma;
	pframe_t *pf;
	int pflags = PD_PRESENT | PD_USER;
	int writeflag = 0;
	dbg(DBG_PRINT, "(GRADING3F)\n");
	if ((vma = vmmap_lookup(curproc->p_vmmap, ADDR_TO_PN(vaddr))) == NULL) {
		dbg(DBG_PRINT, "(GRADING3C 1)\n");
		proc_kill(curproc, EFAULT);
		return;
	}
	/*
	if (vma->vma_prot & PROT_NONE) {
		dbg(DBG_ERROR, "(GRADING3 3)\n");
		proc_kill(curproc, EFAULT);
		return;
	}*/
	if (!((cause & FAULT_WRITE) || (cause & FAULT_EXEC))
			&& !(vma->vma_prot & PROT_READ)) {
		dbg(DBG_PRINT, "(GRADING3D 3)\n");
		proc_kill(curproc, EFAULT);
		return;
	}
	if ((cause & FAULT_WRITE) && !(vma->vma_prot & PROT_WRITE)) {
		dbg(DBG_PRINT, "(GRADING3D 3)\n");
		proc_kill(curproc, EFAULT);
		return;
	}/*
	if ((cause & FAULT_EXEC) && !(vma->vma_prot & PROT_EXEC)) {
		dbg(DBG_ERROR, "(GRADING3 6)\n");
		proc_kill(curproc, EFAULT);
		return;;
	}*/

	if (cause & FAULT_WRITE) {
		dbg(DBG_PRINT, "(GRADING3F)\n");
		writeflag = 1;
	}

	if (pframe_lookup(vma->vma_obj,
	ADDR_TO_PN(vaddr) - vma->vma_start + vma->vma_off, writeflag, &pf) < 0) {
		dbg(DBG_PRINT, "(GRADING3D 4)\n");
		proc_kill(curproc, EFAULT);
		return;
	}
	if (cause & FAULT_WRITE) {
		pframe_pin(pf);
		dbg(DBG_PRINT, "(GRADING3F)\n");
		pframe_dirty(pf);
		/*
		if ( < 0) {
			dbg(DBG_ERROR, "(GRADING3 10)\n");
			pframe_unpin(pf);
			proc_kill(curproc, EFAULT);
			return;
		}*/
		pframe_unpin(pf);
		pflags |= PD_WRITE;
	}

	pt_map(curproc->p_pagedir, (uintptr_t) PAGE_ALIGN_DOWN(vaddr),
			pt_virt_to_phys((uintptr_t) pf->pf_addr), pflags, pflags);

}
Example #10
0
/*
 * This function implements the mmap(2) syscall, but only
 * supports the MAP_SHARED, MAP_PRIVATE, MAP_FIXED, and
 * MAP_ANON flags.
 *
 * Add a mapping to the current process's address space.
 * You need to do some error checking; see the ERRORS section
 * of the manpage for the problems you should anticipate.
 * After error checking most of the work of this function is
 * done by vmmap_map(), but remember to clear the TLB.
 */
int
do_mmap(void *addr, size_t len, int prot, int flags,
        int fd, off_t off, void **ret)
{
    /*NOT_YET_IMPLEMENTED("VM: do_mmap");*/
    
    if(((flags & MAP_FIXED) == MAP_FIXED) && ((size_t)addr < USER_MEM_LOW || (size_t)addr >= USER_MEM_HIGH)){
        return (int)MAP_FAILED;
    }
    
    
    if(!PAGE_ALIGNED(addr) || /*!PAGE_ALIGNED(len) ||*/ !PAGE_ALIGNED(off)){
        dbg(DBG_PRINT,"Error: do_mmap failed due to addr or len or off is not page aligned!\n");
        return (int)MAP_FAILED;
    }
    
    if((len <= 0) || (len >= USER_MEM_HIGH - USER_MEM_LOW)){
        dbg(DBG_PRINT,"Error: do_mmap failed due to len is <= 0!\n");
        return (int)MAP_FAILED;
    }
    
    if (!(((flags & MAP_PRIVATE) == MAP_PRIVATE) || ((flags & MAP_SHARED) == MAP_SHARED))) {
        return (int)MAP_FAILED;
    }
    
    if (((fd >= NFILES) || ( fd < 0)) && ((flags & MAP_ANON) != MAP_ANON)) {
        dbg(DBG_PRINT,"ERROR!!! fd = %d is out of range\n", fd);
        return (int)MAP_FAILED;
    }
   
    file_t *file = NULL;
    if ((flags & MAP_ANON) != MAP_ANON) {
        file = fget(fd);
    
    
        if (file == NULL) {
            return (int)MAP_FAILED;
        }
        if (((flags & MAP_PRIVATE) == MAP_PRIVATE) && ((file->f_mode & FMODE_READ) != FMODE_READ)) {
            fput(file);
            return (int)MAP_FAILED;
        }
        if (((flags & MAP_SHARED)==MAP_SHARED) && ((prot & PROT_WRITE) == PROT_WRITE) && /*(((file->f_mode & FMODE_READ )!=FMODE_READ)&&*/((file->f_mode &FMODE_WRITE)!=FMODE_WRITE)) {
            fput(file);
            return (int)MAP_FAILED;
        }
        if (((prot & PROT_WRITE)==PROT_WRITE)&&(file->f_mode==FMODE_APPEND)) {
            fput(file);
            return (int)MAP_FAILED;
        }
        if(file->f_vnode->vn_flags == VN_BUSY){
            fput(file);
            return (int)MAP_FAILED;
        }

    }

    *ret = NULL;
    vmmap_t *map = curproc->p_vmmap;
    uint32_t lopage;
    uint32_t npages;
    vmarea_t *vma;
    
    lopage = ADDR_TO_PN(addr);
    
    uint32_t hipage = ADDR_TO_PN((size_t)addr + len - 1) + 1; 
    /*uint32_t hipage = ADDR_TO_PN((size_t)addr + len) + 1;*/
    npages = hipage - lopage;
    int dir = VMMAP_DIR_HILO; /* see elf32.c */
    
    int retval;
    if ((flags & MAP_ANON) != MAP_ANON) {
        retval = vmmap_map(map, file->f_vnode, lopage, npages, prot, flags, off, dir, &vma);
    } else {
        retval = vmmap_map(map, NULL, lopage, npages, prot, flags, off, dir, &vma);
    }

    if(retval < 0){
        if ((flags & MAP_ANON) != MAP_ANON) {
            fput(file);
        }
        dbg(DBG_PRINT,"Error: The mapping of the vmarea was unsuccessful\n");
        return (int)MAP_FAILED;
    }

    *ret = PN_TO_ADDR (vma->vma_start);
    /* clear TLB for this vaddr*/
    tlb_flush_range((uintptr_t)(*ret), npages);
    
    if ((flags & MAP_ANON) != MAP_ANON) {
        fput(file);
    }
    
    return 0;
}
Example #11
0
/*
 * This function implements the mmap(2) syscall, but only
 * supports the MAP_SHARED, MAP_PRIVATE, MAP_FIXED, and
 * MAP_ANON flags.
 *
 * Add a mapping to the current process's address space.
 * You need to do some error checking; see the ERRORS section
 * of the manpage for the problems you should anticipate.
 * After error checking most of the work of this function is
 * done by vmmap_map(), but remember to clear the TLB.
 */
int
do_mmap(void *addr, size_t len, int prot, int flags,
        int fd, off_t off, void **ret)
{
    if (len == 0){
        return -EINVAL;
    }

    if (!valid_map_type(flags)){
        return -EINVAL;
    }

    if (!PAGE_ALIGNED(off)){
        return -EINVAL;
    }

    if (!(flags & MAP_ANON) && (flags & MAP_FIXED) && !PAGE_ALIGNED(addr)){
        return -EINVAL;
    }

    if (addr != NULL && (uint32_t) addr < USER_MEM_LOW){
        return -EINVAL;
    }

    if (len > USER_MEM_HIGH){
        return -EINVAL;
    }

    if (addr != NULL && len > USER_MEM_HIGH - (uint32_t) addr){
        return -EINVAL;
    }

    if (addr == 0 && (flags & MAP_FIXED)){
        return -EINVAL;
    }

/*    if ((!(flags & MAP_PRIVATE) && !(flags & MAP_SHARED))*/
            /*|| ((flags & MAP_PRIVATE) && (flags & MAP_SHARED)))*/
    /*{*/
        /*return -EINVAL;*/
    /*}*/

    vnode_t *vnode;
      
    if (!(flags & MAP_ANON)){
    
        if (!valid_fd(fd) || curproc->p_files[fd] == NULL){
            return -EBADF;
        }

        file_t *f = curproc->p_files[fd];
        vnode = f->f_vnode;

        if ((flags & MAP_PRIVATE) && !(f->f_mode & FMODE_READ)){
            return -EACCES;
        }

        if ((flags & MAP_SHARED) && (prot & PROT_WRITE) &&
                !((f->f_mode & FMODE_READ) && (f->f_mode & FMODE_WRITE)))
        {
            return -EACCES;
        }

        /*if ((prot & PROT_WRITE) && (f->f_mode & FMODE_APPEND)){*/
            /*return -EACCES;*/
        /*}*/
    } else {
        vnode = NULL;
    }

    vmarea_t *vma;

    int retval = vmmap_map(curproc->p_vmmap, vnode, ADDR_TO_PN(addr),
            (uint32_t) PAGE_ALIGN_UP(len) / PAGE_SIZE, prot, flags, off,
            VMMAP_DIR_HILO, &vma);

    KASSERT(retval == 0 || retval == -ENOMEM);

    if (ret != NULL && retval >= 0){
        *ret = PN_TO_ADDR(vma->vma_start);
    
        pt_unmap_range(curproc->p_pagedir, (uintptr_t) PN_TO_ADDR(vma->vma_start),
               (uintptr_t) PN_TO_ADDR(vma->vma_start)
               + (uintptr_t) PAGE_ALIGN_UP(len));
    
        tlb_flush_range((uintptr_t) PN_TO_ADDR(vma->vma_start),
                (uint32_t) PAGE_ALIGN_UP(len) / PAGE_SIZE);
    }
    


    return retval;
}
Example #12
0
/* Helper function for the ELF loader. Maps the specified segment
 * of the program header from the given file in to the given address
 * space with the given memory offset (in pages). On success returns 0, otherwise
 * returns a negative error code for the ELF loader to return.
 * Note that since any error returned by this function should
 * cause the ELF loader to give up, it is acceptable for the
 * address space to be modified after returning an error.
 * Note that memoff can be negative */
static int _elf32_map_segment(vmmap_t *map, vnode_t *file, int32_t memoff, const Elf32_Phdr *segment)
{
        uintptr_t addr;
        if (memoff < 0) {
                KASSERT(ADDR_TO_PN(segment->p_vaddr) > (uint32_t) -memoff);
                addr = (uintptr_t)segment->p_vaddr - (uintptr_t)PN_TO_ADDR(-memoff);
        } else {
                addr = (uintptr_t)segment->p_vaddr + (uintptr_t)PN_TO_ADDR(memoff);
        }
        uint32_t off = segment->p_offset;
        uint32_t memsz = segment->p_memsz;
        uint32_t filesz = segment->p_filesz;

        dbg(DBG_ELF, "Mapping program segment: type %#x, offset %#08x,"
            " vaddr %#08x, filesz %#x, memsz %#x, flags %#x, align %#x\n",
            segment->p_type, segment->p_offset, segment->p_vaddr,
            segment->p_filesz, segment->p_memsz, segment->p_flags,
            segment->p_align);

        /* check for bad data in the segment header */
        if (PAGE_SIZE != segment->p_align) {
                dbg(DBG_ELF, "ERROR: segment does not have correct alignment\n");
                return -ENOEXEC;
        } else if (filesz > memsz) {
                dbg(DBG_ELF, "ERROR: segment file size is greater than memory size\n");
                return -ENOEXEC;
        } else if (PAGE_OFFSET(addr) != PAGE_OFFSET(off)) {
                dbg(DBG_ELF, "ERROR: segment address and offset are not aligned correctly\n");
                return -ENOEXEC;
        }

        int perms = 0;
        if (PF_R & segment->p_flags) {
                perms |= PROT_READ;
        }
        if (PF_W & segment->p_flags) {
                perms |= PROT_WRITE;
        }
        if (PF_X & segment->p_flags) {
                perms |= PROT_EXEC;
        }

        if (0 < filesz) {
                /* something needs to be mapped from the file */
                /* start from the starting address and include enough pages to
                 * map all filesz bytes of the file */
                uint32_t lopage = ADDR_TO_PN(addr);
                uint32_t npages = ADDR_TO_PN(addr + filesz - 1) - lopage + 1;
                off_t fileoff = (off_t)PAGE_ALIGN_DOWN(off);

                int ret;
                if (!vmmap_is_range_empty(map, lopage, npages)) {
                        dbg(DBG_ELF, "ERROR: ELF file contains overlapping segments\n");
                        return -ENOEXEC;
                } else if (0 > (ret = vmmap_map(map, file, lopage, npages, perms,
                                                MAP_PRIVATE | MAP_FIXED, fileoff,
                                                0, NULL))) {
                        return ret;
                }
        }

        if (memsz > filesz) {
                /* there is left over memory in the segment which must
                 * be initialized to 0 (anonymously mapped) */
                uint32_t lopage = ADDR_TO_PN(addr + filesz);
                uint32_t npages = ADDR_TO_PN(PAGE_ALIGN_UP(addr + memsz)) - lopage;

                int ret;
                if (npages > 1 && !vmmap_is_range_empty(map, lopage + 1, npages - 1)) {
                        dbg(DBG_ELF, "ERROR: ELF file contains overlapping segments\n");
                        return -ENOEXEC;
                } else if (0 > (ret = vmmap_map(map, NULL, lopage, npages, perms,
                                                MAP_PRIVATE | MAP_FIXED, 0, 0, NULL))) {
                        return ret;
                } else if (!PAGE_ALIGNED(addr + filesz) && filesz > 0) {
                        /* In this case, we have accidentally zeroed too much of memory, as
                         * we zeroed all memory in the page containing addr + filesz.
                         * However, the remaining part of the data is not a full page, so we
                         * should not just map in another page (as there could be garbage
                         * after addr+filesz). For instance, consider the data-bss boundary
                         * (c.f. Intel x86 ELF supplement pp. 82).
                         * To fix this, we need to read in the contents of the file manually
                         * and put them at that user space addr in the anon map we just
                         * added. */
                        void *buf;
                        if (NULL == (buf = page_alloc()))
                                return -ENOMEM;
                        if (!(0 > (ret = file->vn_ops->read(file, (off_t) PAGE_ALIGN_DOWN(off + filesz),
                                                            buf, PAGE_OFFSET(addr + filesz))))) {
                                ret = vmmap_write(map, PAGE_ALIGN_DOWN(addr + filesz),
                                                  buf, PAGE_OFFSET(addr + filesz));
                        }
                        page_free(buf);
                        return ret;
                }
        }
        return 0;
}
Example #13
0
/*
 * This function implements the mmap(2) syscall, but only
 * supports the MAP_SHARED, MAP_PRIVATE, MAP_FIXED, and
 * MAP_ANON flags.
 *
 * Add a mapping to the current process's address space.
 * You need to do some error checking; see the ERRORS section
 * of the manpage for the problems you should anticipate.
 * After error checking most of the work of this function is
 * done by vmmap_map(), but remember to clear the TLB.
 */
int
do_mmap(void *addr, size_t len, int prot, int flags,
        int fd, off_t off, void **ret)
{
	  	dbg(DBG_PRINT,"go into do_mmap\n");
		file_t* file = NULL;

    	if(!PAGE_ALIGNED(off))
		{
			dbg(DBG_PRINT,"(GRADING3C)\n");
        	return -EINVAL;
		}

		if(len <= 0||len > 0xc0000000)
	    {
			dbg(DBG_PRINT,"(GRADING3C)\n");
	    	return -EINVAL;
	   	}

    	if (((uint32_t)addr < USER_MEM_LOW || (uint32_t)addr > USER_MEM_HIGH) && flags& MAP_FIXED)
		{      
			dbg(DBG_PRINT,"(GRADING3C)\n");  
			return -1;
		}

    if(!(flags & MAP_SHARED || flags & MAP_PRIVATE))
{			
	dbg(DBG_PRINT,"(GRADING3C)\n");
        return -EINVAL;
}
   	file = NULL;
    vnode_t *vn = NULL;
    int status = 0;
    uint32_t lopages = 0;
    size_t npages = (len - 1)/PAGE_SIZE  + 1;
    vmarea_t *newvma = NULL;
    if(flags & MAP_FIXED)
    {
			dbg(DBG_PRINT,"(GRADING3C)\n");
            lopages = ADDR_TO_PN( addr );
    }
    if(!(flags & MAP_ANON))
	{
			dbg(DBG_PRINT,"(GRADING3B)\n");
        if(fd < 0 || fd > NFILES)
		{
			dbg(DBG_PRINT,"(GRADING3C)\n");
            return -1;
        }
        file = fget(fd);
        if((prot & PROT_WRITE && MAP_SHARED & flags) && (file->f_mode == FMODE_READ))
		{
			dbg(DBG_PRINT,"(GRADING3C)\n");
            fput(file);
            return -1;
        }
        if(file == NULL) 
		{			
			dbg(DBG_PRINT,"(GRADING3C)\n");
			return -1;
		}
        vn = file->f_vnode;
    }




    status = vmmap_map(curproc->p_vmmap, vn, lopages, npages, prot, flags, off, VMMAP_DIR_HILO, &newvma);
    if(file != NULL) 
	{
		dbg(DBG_PRINT,"(GRADING3B)\n");
		fput(file);
	}
    if(newvma != NULL)
	{
			dbg(DBG_PRINT,"(GRADING3B)\n");
    *ret = PN_TO_ADDR(newvma->vma_start);
	}    
	if(status < 0)
	{
		dbg(DBG_PRINT,"(GRADING3C)\n");
		KASSERT(file == NULL);
        return status;
    }

    tlb_flush_all();
    KASSERT(curproc->p_pagedir != NULL);
    dbg(DBG_VM, "(GRADING3A 2.a)\n");
    return 0;

}
Example #14
0
/*
 * This gets called by _pt_fault_handler in mm/pagetable.c The
 * calling function has already done a lot of error checking for
 * us. In particular it has checked that we are not page faulting
 * while in kernel mode. Make sure you understand why an
 * unexpected page fault in kernel mode is bad in Weenix. You
 * should probably read the _pt_fault_handler function to get a
 * sense of what it is doing.
 *
 * Before you can do anything you need to find the vmarea that
 * contains the address that was faulted on. Make sure to check
 * the permissions on the area to see if the process has
 * permission to do [cause]. If either of these checks does not
 * pass kill the offending process, setting its exit status to
 * EFAULT (normally we would send the SIGSEGV signal, however
 * Weenix does not support signals).
 *
 * Now it is time to find the correct page (don't forget
 * about shadow objects, especially copy-on-write magic!). Make
 * sure that if the user writes to the page it will be handled
 * correctly.
 *
 * Finally call pt_map to have the new mapping placed into the
 * appropriate page table.
 *
 * @param vaddr the address that was accessed to cause the fault
 *
 * @param cause this is the type of operation on the memory
 *              address which caused the fault, possible values
 *              can be found in pagefault.h
 */
void
handle_pagefault(uintptr_t vaddr, uint32_t cause)
{
        /*NOT_YET_IMPLEMENTED("VM: handle_pagefault");*/
        uint32_t res_vfn=ADDR_TO_PN(vaddr);
        vmarea_t *temp_vmarea=vmmap_lookup(curproc->p_vmmap,res_vfn);
        if(temp_vmarea==NULL)
        {
                proc_kill(curproc,EFAULT);
                return;
                
        } 
        if((cause&FAULT_PRESENT)&&(!(temp_vmarea->vma_prot&PROT_READ)))
        {
                proc_kill(curproc,EFAULT);
                return;
           
        }
        if(cause&FAULT_RESERVED&&(!(temp_vmarea->vma_prot&PROT_NONE)))
        {
                proc_kill(curproc,EFAULT);
                return;
        }
        if(cause&FAULT_EXEC&&(!(temp_vmarea->vma_prot&PROT_EXEC)))
        {
                proc_kill(curproc,EFAULT);
                return;
        }
        if ((cause & FAULT_WRITE)&&!(temp_vmarea->vma_prot & PROT_WRITE)) 
	{
                do_exit(EFAULT);
		return;
	}
        if((cause&FAULT_WRITE)==0) 
	{
		if((temp_vmarea->vma_prot & PROT_READ)==0) 
		{
			do_exit(EFAULT);
		}
	}

        pframe_t *temp_pf_res=NULL;
        uint32_t pagenum=temp_vmarea->vma_off+res_vfn-temp_vmarea->vma_start;

        if(cause & FAULT_WRITE)/*******according to google group:1. pframe_get 2.pt_map(permission!!!)*************/
        {
                int tempres=pframe_lookup(temp_vmarea->vma_obj,pagenum,1, &temp_pf_res);
                if(tempres<0)
                { 
                        proc_kill(curproc,EFAULT);
                        return;
                }
                uintptr_t paddr=pt_virt_to_phys((uintptr_t)temp_pf_res->pf_addr);
                pt_map(curproc->p_pagedir,(uintptr_t)(PN_TO_ADDR(res_vfn)),paddr,PD_PRESENT|PD_WRITE|PD_USER,PT_PRESENT|PT_WRITE|PT_USER);
        }
        else{
                int tempres=pframe_lookup(temp_vmarea->vma_obj,pagenum,0, &temp_pf_res);
                if(tempres<0)
                { 
                        proc_kill(curproc,EFAULT);
                        return;
                }
                uintptr_t paddr=pt_virt_to_phys((uintptr_t)temp_pf_res->pf_addr);
                pt_map(curproc->p_pagedir,(uintptr_t)(PN_TO_ADDR(res_vfn)),paddr,PD_PRESENT|PD_USER,PT_PRESENT|PT_USER);
        }  
}
Example #15
0
/*
 * This function implements the brk(2) system call.
 *
 * This routine manages the calling process's "break" -- the ending address
 * of the process's "dynamic" region (often also referred to as the "heap").
 * The current value of a process's break is maintained in the 'p_brk' member
 * of the proc_t structure that represents the process in question.
 *
 * The 'p_brk' and 'p_start_brk' members of a proc_t struct are initialized
 * by the loader. 'p_start_brk' is subsequently never modified; it always
 * holds the initial value of the break. Note that the starting break is
 * not necessarily page aligned!
 *
 * 'p_start_brk' is the lower limit of 'p_brk' (that is, setting the break
 * to any value less than 'p_start_brk' should be disallowed).
 *
 * The upper limit of 'p_brk' is defined by the minimum of (1) the
 * starting address of the next occuring mapping or (2) USER_MEM_HIGH.
 * That is, growth of the process break is limited only in that it cannot
 * overlap with/expand into an existing mapping or beyond the region of
 * the address space allocated for use by userland. (note the presence of
 * the 'vmmap_is_range_empty' function).
 *
 * The dynamic region should always be represented by at most ONE vmarea.
 * Note that vmareas only have page granularity, you will need to take this
 * into account when deciding how to set the mappings if p_brk or p_start_brk
 * is not page aligned.
 *
 * You are guaranteed that the process data/bss region is non-empty.
 * That is, if the starting brk is not page-aligned, its page has
 * read/write permissions.
 *
 * If addr is NULL, you should NOT fail as the man page says. Instead,
 * "return" the current break. We use this to implement sbrk(0) without writing
 * a separate syscall. Look in user/libc/syscall.c if you're curious.
 *
 * Also, despite the statement on the manpage, you MUST support combined use
 * of brk and mmap in the same process.
 *
 * Note that this function "returns" the new break through the "ret" argument.
 * Return 0 on success, -errno on failure.
 */
int do_brk(void *addr, void **ret) {
	/*NOT_YET_IMPLEMENTED("VM: do_brk");*/
	vmarea_t *vmarea;
	/*If addr is NULL, "return" the current break.*/
	dbg(DBG_PRINT, "(GRADING3D 3)\n");
	if (addr == NULL || addr == curproc->p_brk) {
		dbg(DBG_PRINT, "(GRADING3D 3)\n");
		*ret = curproc->p_brk;
		return 0;
	}

	/*check for the address range*/
	if (((uint32_t) addr > USER_MEM_HIGH) || (curproc->p_start_brk > addr)) {
		dbg(DBG_PRINT, "(GRADING3D 3)\n");
		return -ENOMEM;
	}

	/*if p_brk and addr are not page aligned*/
	if (ADDR_TO_PN(
			PAGE_ALIGN_UP(curproc->p_brk)) != ADDR_TO_PN(PAGE_ALIGN_UP(addr))) {
		dbg(DBG_PRINT, "(GRADING3D 3)\n");
		if (addr > curproc->p_brk) {
			dbg(DBG_PRINT, "(GRADING3D 3)\n");
			if (!vmmap_is_range_empty(curproc->p_vmmap,
					ADDR_TO_PN(PAGE_ALIGN_UP(curproc->p_brk)),
					ADDR_TO_PN(
							PAGE_ALIGN_UP(addr)) -ADDR_TO_PN(PAGE_ALIGN_UP(curproc->p_brk)))) {
				dbg(DBG_PRINT, "(GRADING3D 3)\n");
				return -ENOMEM;
			}
			vmarea = vmmap_lookup(curproc->p_vmmap,
					ADDR_TO_PN(PAGE_ALIGN_UP(curproc->p_start_brk)));
			if (vmarea != NULL) {
				dbg(DBG_PRINT, "(GRADING3D 3)\n");
				vmarea->vma_end = ADDR_TO_PN(PAGE_ALIGN_UP(addr));
			} else {
				dbg(DBG_PRINT, "(GRADING3D 3)\n");
				vmmap_map(curproc->p_vmmap,
				NULL, ADDR_TO_PN(PAGE_ALIGN_UP(curproc->p_start_brk)),
						ADDR_TO_PN(
								PAGE_ALIGN_UP(addr)) - ADDR_TO_PN(PAGE_ALIGN_UP(curproc->p_start_brk)),
						PROT_READ | PROT_WRITE, MAP_PRIVATE, 0, VMMAP_DIR_LOHI,
						&vmarea);
			}
		} else {
			dbg(DBG_PRINT, "(GRADING3D 3)\n");
			vmmap_remove(curproc->p_vmmap, ADDR_TO_PN(PAGE_ALIGN_UP(addr)),
					ADDR_TO_PN(
							PAGE_ALIGN_UP(curproc->p_brk)) -ADDR_TO_PN(PAGE_ALIGN_UP(addr)));
		}

		curproc->p_brk = addr;
		*ret = addr;
	} else {
		dbg(DBG_PRINT, "(GRADING3D 3)\n");
		curproc->p_brk = addr;
		*ret = addr;
		return 0;
	}
	return 0;
}
Example #16
0
void
handle_pagefault(uintptr_t vaddr, uint32_t cause)
{

   
    pframe_t *pf;
        int ret_val;
        vmarea_t *vma = vmmap_lookup(curproc->p_vmmap, ADDR_TO_PN(vaddr));
        if(vma == NULL)
        {
            dbg(DBG_PRINT,"(GRADING3D 1): No vmarea found\n");
            proc_kill(curproc,EFAULT);
            return;
        }

        if(cause & FAULT_WRITE)
        {
            dbg(DBG_VM,"grade14\n");
            dbg(DBG_PRINT,"(GRADING3D 1),checking permission for writing\n");
            if(vma->vma_prot & PROT_WRITE)
            {
                dbg(DBG_VM,"grade15\n");
                 dbg(DBG_PRINT,"(GRADING3D 1),Vmarea has write permission\n");
                ret_val = pframe_lookup(vma->vma_obj, ADDR_TO_PN(vaddr) - vma->vma_start + vma->vma_off, (cause & FAULT_WRITE),&pf);
                if(ret_val<0)
                {
                    dbg(DBG_VM,"grade16\n");
                    dbg(DBG_PRINT,"(GRADING3D 1),pframe could not be found\n");
                     proc_kill(curproc,EFAULT);
                    return;  
                }
                pframe_dirty(pf);

                KASSERT(pf);
                dbg(DBG_PRINT,"(GRADING3A 5.a),pframe is not NULL\n");
                KASSERT(pf->pf_addr);
                dbg(DBG_PRINT,"(GRADING3A 5.a),pf->pf_addr is not NULL\n");
            }
            else
            {
                dbg(DBG_VM,"grade17\n");
                dbg(DBG_PRINT,"(GRADING3D 1),Vmarea does not have write permission\n"); 
              proc_kill(curproc,EFAULT);
            return;  
            }
            dbg(DBG_VM,"grade18\n");
            dbg(DBG_PRINT,"(GRADING3D 1),Calling pt_map after write\n");
            pt_map(curproc->p_pagedir,(uintptr_t)PAGE_ALIGN_DOWN(vaddr),pt_virt_to_phys((uintptr_t)pf->pf_addr), 
                (PD_WRITE|PD_PRESENT|PD_USER), (PT_WRITE|PT_PRESENT|PT_USER));
        }

        else

            {
                dbg(DBG_VM,"grade19\n");
                dbg(DBG_PRINT,"(GRADING3D 1),checking permission for reading\n");
            if(vma->vma_prot & PROT_READ)
            {
                dbg(DBG_VM,"grade20\n");
                dbg(DBG_PRINT,"(GRADING3D 1),Vmarea has read permission\n");
                ret_val = pframe_lookup(vma->vma_obj, ADDR_TO_PN(vaddr) - vma->vma_start + vma->vma_off, (cause & FAULT_WRITE),&pf);
                if(ret_val<0)
                {
                    dbg(DBG_VM,"grade21\n");
                    dbg(DBG_PRINT,"(GRADING3D 1),pframe could not be found\n");
                     proc_kill(curproc,EFAULT);
                    return;  
                }
                    dbg(DBG_VM,"grade22\n");
                KASSERT(pf);
                dbg(DBG_PRINT,"(GRADING3A 5.a),pframe is not NULL\n");
                KASSERT(pf->pf_addr);
                dbg(DBG_PRINT,"(GRADING3A 5.a),pf->pf_addr is not NULL\n");
            }
            else
            {
                dbg(DBG_VM,"grade23\n");
                dbg(DBG_PRINT,"(GRADING3D 1),Vmarea does not have read permission\n");
              proc_kill(curproc,EFAULT);
            return;  
            }
            dbg(DBG_VM,"grade24\n");
            dbg(DBG_PRINT,"(GRADING3D 1),Calling pt_map after read\n");
            pt_map(curproc->p_pagedir,(uintptr_t)PAGE_ALIGN_DOWN(vaddr),pt_virt_to_phys((uintptr_t)pf->pf_addr), 
               (PD_PRESENT|PD_USER), (PT_PRESENT|PT_USER));
        }


   
}
Example #17
0
File: brk.c Project: lee4sj/brown
/*
 * This function implements the brk(2) system call.
 *
 * This routine manages the calling process's "break" -- the ending address
 * of the process's "dynamic" region (often also referred to as the "heap").
 * The current value of a process's break is maintained in the 'p_brk' member
 * of the proc_t structure that represents the process in question.
 *
 * The 'p_brk' and 'p_start_brk' members of a proc_t struct are initialized
 * by the loader. 'p_start_brk' is subsequently never modified; it always
 * holds the initial value of the break. Note that the starting break is
 * not necessarily page aligned!
 *
 * 'p_start_brk' is the lower limit of 'p_brk' (that is, setting the break
 * to any value less than 'p_start_brk' should be disallowed).
 *
 * The upper limit of 'p_brk' is defined by the minimum of (1) the
 * starting address of the next occuring mapping or (2) USER_MEM_HIGH.
 * That is, growth of the process break is limited only in that it cannot
 * overlap with/expand into an existing mapping or beyond the region of
 * the address space allocated for use by userland. (note the presence of
 * the 'vmmap_is_range_empty' function).
 *
 * The dynamic region should always be represented by at most ONE vmarea.
 * Note that vmareas only have page granularity, you will need to take this
 * into account when deciding how to set the mappings if p_brk or p_start_brk
 * is not page aligned.
 *
 * You are guaranteed that the process data/bss region is non-empty.
 * That is, if the starting brk is not page-aligned, its page has
 * read/write permissions.
 *
 * If addr is NULL, you should NOT fail as the man page says. Instead,
 * "return" the current break. We use this to implement sbrk(0) without writing
 * a separate syscall. Look in user/libc/syscall.c if you're curious.
 *
 * Also, despite the statement on the manpage, you MUST support combined use
 * of brk and mmap in the same process.
 *
 * Note that this function "returns" the new break through the "ret" argument.
 * Return 0 on success, -errno on failure.
 */
int
do_brk(void *addr, void **ret)
{
	dbg(DBG_VM, "\n");

	void *cur_sbrk = curproc->p_start_brk;
	vmarea_t *vma;
	vmarea_t *nvma = NULL;

	if (addr == NULL) {
		*ret = curproc->p_brk;
		return 0;
	}

	KASSERT((uintptr_t)curproc->p_brk >= (uintptr_t)curproc->p_start_brk);

	if ((uintptr_t)cur_sbrk > (uintptr_t)addr) {
		return -ENOMEM;
	}

	/* check for upper limits */
	if ((uintptr_t)addr >= USER_MEM_HIGH)
		return -ENOMEM;

	uintptr_t pbrk_pg = ADDR_TO_PN(curproc->p_brk);
	uintptr_t addr_pg = ADDR_TO_PN(addr);
	(PAGE_ALIGNED(addr))? (addr_pg) : (addr_pg++);
	(PAGE_ALIGNED(curproc->p_brk))? (pbrk_pg) : (pbrk_pg++);

	/* if they resides in the same page, just update p_brk */
	if (pbrk_pg == addr_pg) {
		curproc->p_brk = addr;
		*ret = addr;
		return 0;
	}

	/* Get dynamic region's vmarea */
	vma = vmmap_lookup(curproc->p_vmmap, ADDR_TO_PN(cur_sbrk));
	KASSERT(vma && "vmarea for the dynamic region is not found!");

	/* check to see if vma has a next vma */
	if (vma->vma_plink.l_next != &curproc->p_vmmap->vmm_list) {
		nvma = list_item(vma->vma_plink.l_next, vmarea_t, vma_plink);
		KASSERT(nvma &&
			"next vmarea should exist but could not be found!");
	}

	/* check for upper limits */
	if (nvma && addr_pg > nvma->vma_start)
		return -ENOMEM;

	/* Now, update the vma, and curpor->p_brk */
	if (pbrk_pg > addr_pg) {
		vmmap_remove(curproc->p_vmmap, addr_pg, (pbrk_pg - addr_pg));
		tlb_flush_range((uintptr_t)PN_TO_ADDR(addr_pg), pbrk_pg - addr_pg);
	} else {
		vma->vma_end = addr_pg;
	}

	curproc->p_brk = addr;
	*ret = addr;

	return 0;
}
Example #18
0
static int _elf32_load(const char *filename, int fd, char *const argv[],
                       char *const envp[], uint32_t *eip, uint32_t *esp)
{
        int err = 0;
        Elf32_Ehdr header;
        Elf32_Ehdr interpheader;

        /* variables to clean up on failure */
        vmmap_t *map = NULL;
        file_t *file = NULL;
        char *pht = NULL;
        char *interpname = NULL;
        int interpfd = -1;
        file_t *interpfile = NULL;
        char *interppht = NULL;
        Elf32_auxv_t *auxv = NULL;
        char *argbuf = NULL;

        uintptr_t entry;

        file = fget(fd);
        KASSERT(NULL != file);

        /* Load and verify the ELF header */
        if (0 > (err = _elf32_load_ehdr(fd, &header, 0))) {
                goto done;
        }

        if (NULL == (map = vmmap_create())) {
                err = -ENOMEM;
                goto done;
        }

        size_t phtsize = header.e_phentsize * header.e_phnum;
        if (NULL == (pht = kmalloc(phtsize))) {
                err = -ENOMEM;
                goto done;
        }
        /* Read in the program header table */
        if (0 > (err = _elf32_load_phtable(fd, &header, pht, phtsize))) {
                goto done;
        }
        /* Load the segments in the program header table */
        if (0 > (err = _elf32_map_progsegs(file->f_vnode, map, &header, pht, 0))) {
                goto done;
        }

        Elf32_Phdr *phinterp = NULL;
        /* Check if program requires an interpreter */
        if (0 > (err = _elf32_find_phinterp(&header, pht, &phinterp))) {
                goto done;
        }

        /* Calculate program bounds for future reference */
        void *proglow;
        void *proghigh;
        _elf32_calc_progbounds(&header, pht, &proglow, &proghigh);

        entry = (uintptr_t) header.e_entry;

        /* if an interpreter was requested load it */
        if (NULL != phinterp) {
                /* read the file name of the interpreter from the binary */
                if (0 > (err = do_lseek(fd, phinterp->p_offset, SEEK_SET))) {
                        goto done;
                } else if (NULL == (interpname = kmalloc(phinterp->p_filesz))) {
                        err = -ENOMEM;
                        goto done;
                } else if (0 > (err = do_read(fd, interpname, phinterp->p_filesz))) {
                        goto done;
                }
                if (err != (int)phinterp->p_filesz) {
                        err = -ENOEXEC;
                        goto done;
                }

                /* open the interpreter */
                dbgq(DBG_ELF, "ELF Interpreter: %*s\n", phinterp->p_filesz, interpname);
                if (0 > (interpfd = do_open(interpname, O_RDONLY))) {
                        err = interpfd;
                        goto done;
                }
                kfree(interpname);
                interpname = NULL;

                interpfile = fget(interpfd);
                KASSERT(NULL != interpfile);

                /* Load and verify the interpreter ELF header */
                if (0 > (err = _elf32_load_ehdr(interpfd, &interpheader, 1))) {
                        goto done;
                }
                size_t interpphtsize = interpheader.e_phentsize * interpheader.e_phnum;
                if (NULL == (interppht = kmalloc(interpphtsize))) {
                        err = -ENOMEM;
                        goto done;
                }
                /* Read in the program header table */
                if (0 > (err = _elf32_load_phtable(interpfd, &interpheader, interppht, interpphtsize))) {
                        goto done;
                }

                /* Interpreter shouldn't itself need an interpreter */
                Elf32_Phdr *interpphinterp;
                if (0 > (err = _elf32_find_phinterp(&interpheader, interppht, &interpphinterp))) {
                        goto done;
                }
                if (NULL != interpphinterp) {
                        err = -EINVAL;
                        goto done;
                }

                /* Calculate the interpreter program size */
                void *interplow;
                void *interphigh;
                _elf32_calc_progbounds(&interpheader, interppht, &interplow, &interphigh);
                uint32_t interpnpages = ADDR_TO_PN(PAGE_ALIGN_UP(interphigh)) - ADDR_TO_PN(interplow);

                /* Find space for the interpreter */
                /* This is the first pn at which the interpreter will be mapped */
                uint32_t interppagebase = (uint32_t) vmmap_find_range(map, interpnpages, VMMAP_DIR_HILO);
                if ((uint32_t) - 1 == interppagebase) {
                        err = -ENOMEM;
                        goto done;
                }

                /* Base address at which the interpreter begins on that page */
                void *interpbase = (void *)((uintptr_t)PN_TO_ADDR(interppagebase) + PAGE_OFFSET(interplow));

                /* Offset from "expected base" in number of pages */
                int32_t interpoff = (int32_t) interppagebase - (int32_t) ADDR_TO_PN(interplow);

                entry = (uintptr_t) interpbase + ((uintptr_t) interpheader.e_entry - (uintptr_t) interplow);

                /* Load the interpreter program header and map in its segments */
                if (0 > (err = _elf32_map_progsegs(interpfile->f_vnode, map, &interpheader, interppht, interpoff))) {
                        goto done;
                }

                /* Build the ELF aux table */
                /* Need to hold AT_PHDR, AT_PHENT, AT_PHNUM, AT_ENTRY, AT_BASE,
                 * AT_PAGESZ, AT_NULL */
                if (NULL == (auxv = (Elf32_auxv_t *) kmalloc(7 * sizeof(Elf32_auxv_t)))) {
                        err = -ENOMEM;
                        goto done;
                }
                Elf32_auxv_t *auxvent = auxv;

                /* Add all the necessary entries */
                auxvent->a_type = AT_PHDR;
                auxvent->a_un.a_ptr = pht;
                auxvent++;

                auxvent->a_type = AT_PHENT;
                auxvent->a_un.a_val = header.e_phentsize;
                auxvent++;

                auxvent->a_type = AT_PHNUM;
                auxvent->a_un.a_val = header.e_phnum;
                auxvent++;

                auxvent->a_type = AT_ENTRY;
                auxvent->a_un.a_ptr = (void *) header.e_entry;
                auxvent++;

                auxvent->a_type = AT_BASE;
                auxvent->a_un.a_ptr = interpbase;
                auxvent++;

                auxvent->a_type = AT_PAGESZ;
                auxvent->a_un.a_val = PAGE_SIZE;
                auxvent++;

                auxvent->a_type = AT_NULL;

        } else {
                /* Just put AT_NULL (we don't really need this at all) */
                if (NULL == (auxv = (Elf32_auxv_t *) kmalloc(sizeof(Elf32_auxv_t)))) {
                        err = -ENOMEM;
                        goto done;
                }
                auxv->a_type = AT_NULL;
        }

        /* Allocate a stack. We put the stack immediately below the program text.
         * (in the Intel x86 ELF supplement pp 59 "example stack", that is where the
         * stack is located). I suppose we can add this "extra page for magic data" too */
        uint32_t stack_lopage = ADDR_TO_PN(proglow) - (DEFAULT_STACK_SIZE / PAGE_SIZE) - 1;
        err = vmmap_map(map, NULL, stack_lopage, (DEFAULT_STACK_SIZE / PAGE_SIZE) + 1,
                        PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_FIXED, 0, 0, NULL);
        KASSERT(0 == err);
        dbg(DBG_ELF, "Mapped stack at low addr 0x%p, size %#x\n",
            PN_TO_ADDR(stack_lopage), DEFAULT_STACK_SIZE + PAGE_SIZE);


        /* Copy out arguments onto the user stack */
        int argc, envc, auxc;
        size_t argsize = _elf32_calc_argsize(argv, envp, auxv, phtsize, &argc, &envc, &auxc);
        /* Make sure it fits on the stack */
        if (argsize >= DEFAULT_STACK_SIZE) {
                err = -E2BIG;
                goto done;
        }
        /* Copy arguments into kernel buffer */
        if (NULL == (argbuf = (char *) kmalloc(argsize))) {
                err = -ENOMEM;
                goto done;
        }
        /* Calculate where in user space we start putting the args. */
        void *arglow = (void *)((uintptr_t)(((char *) proglow) - argsize) & ~PTR_MASK);
        /* Copy everything into the user address space, modifying addresses in
         * argv, envp, and auxv to be user addresses as we go. */
        _elf32_load_args(map, arglow, argsize, argbuf, argv, envp, auxv, argc, envc, auxc, phtsize);

        dbg(DBG_ELF, "Past the point of no return. Swapping to map at 0x%p, setting brk to 0x%p\n", map, proghigh);
        /* the final threshold / What warm unspoken secrets will we learn? / Beyond
         * the point of no return ... */

        /* Give the process the new mappings. */
        vmmap_t *tempmap = curproc->p_vmmap;
        curproc->p_vmmap = map;
        map = tempmap; /* So the old maps are cleaned up */
        curproc->p_vmmap->vmm_proc = curproc;
        map->vmm_proc = NULL;

        /* Flush the process pagetables and TLB */
        pt_unmap_range(curproc->p_pagedir, USER_MEM_LOW, USER_MEM_HIGH);
        tlb_flush_all();

        /* Set the process break and starting break (immediately after the mapped-in
         * text/data/bss from the executable) */
        curproc->p_brk = proghigh;
        curproc->p_start_brk = proghigh;

        strncpy(curproc->p_comm, filename, PROC_NAME_LEN);

        /* Tell the caller the correct stack pointer and instruction
         * pointer to begin execution in user space */
        *eip = (uint32_t) entry;
        *esp = ((uint32_t) arglow) - 4; /* Space on the user stack for the (garbage) return address */
        /* Note that the return address will be fixed by the userland entry code,
         * whether in static or dynamic */

        /* And we're done */
        err = 0;

done:
        if (NULL != map) {
                vmmap_destroy(map);
        }
        if (NULL != file) {
                fput(file);
        }
        if (NULL != pht) {
                kfree(pht);
        }
        if (NULL != interpname) {
                kfree(interpname);
        }
        if (0 <= interpfd) {
                do_close(interpfd);
        }
        if (NULL != interpfile) {
                fput(interpfile);
        }
        if (NULL != interppht) {
                kfree(interppht);
        }
        if (NULL != auxv) {
                kfree(auxv);
        }
        if (NULL != argbuf) {
                kfree(argbuf);
        }
        return err;
}