Exemple #1
0
int
as_prepare_load(struct addrspace *as)
{
	/*
	 * Write this.
	 */
	vaddr_t result;

	result = getppages(as, as->as_vbase1, as->as_npages1);
	if (result) {
		return result;
	}

	result = getppages(as, as->as_vbase2, as->as_npages2);
	if (result) {
		return result;
	}

	result = getppages(as, USERSTACK - 1, 1);
	if (result) {
		return result;
	}

	return 0;
}
Exemple #2
0
int
as_prepare_load(struct addrspace *as)
{
	/*
	 * Write this.
	 */
	//dumbvm impl
	assert(as->as_pbase1 == 0);
	assert(as->as_pbase2 == 0);
	assert(as->as_stackpbase == 0);

	as->as_pbase1 = getppages(as->as_npages1);
	if (as->as_pbase1 == 0) {
		return ENOMEM;
	}

	as->as_pbase2 = getppages(as->as_npages2);
	if (as->as_pbase2 == 0) {
		return ENOMEM;
	}

	as->as_stackpbase = getppages(DUMBVM_STACKPAGES);
	if (as->as_stackpbase == 0) {
		return ENOMEM;
	}

	return 0;
	
	//end dumbvm implentation for now
	//fix this 
	
	(void)as;
}
Exemple #3
0
/* Allocate/free some kernel-space virtual pages */
vaddr_t alloc_kpages(int npages)
{
	paddr_t pa;
	if(vm_initialized){
		if(npages == 1){
			pa=page_alloc();
		}
		else{
			pa=alloc_npages(npages);
		}

		if(pa==0){
			panic("alloc_npages could not find an empty page\n");
			return 0;
		}
	}
	else{
		pa = getppages(npages);
		if (pa==0) {
			panic("getppages could not find an empty page\n");
			return 0;
		}

	}
	return PADDR_TO_KVADDR(pa);
}
Exemple #4
0
paddr_t pte_load(struct pte *pte){
	/* Obtain a paddr for the newly loaded page */
	paddr_t paddr = getppages(1);
	pte->paddr = paddr;
	pte->valid = 1;
	return paddr;
}
Exemple #5
0
/* Allocate/free some kernel-space virtual pages */
vaddr_t
alloc_kpages(int npages)
{
        paddr_t pa;
        pa = getppages(npages);
        if (pa==0) return 0;
        return PADDR_TO_KVADDR(pa);

        panic("alloc_kpages: unexpected return\n");
}
Exemple #6
0
/* Allocate/free some kernel-space virtual pages */
vaddr_t 
alloc_kpages(int npages)
{
	paddr_t pa;
	pa = getppages(npages);
	if (pa==0) {
		return 0;
	}
	return PADDR_TO_KVADDR(pa);
}
Exemple #7
0
/*kmalloc-routines*/
vaddr_t
alloc_kpages(unsigned npages) {

  paddr_t pa = getppages(npages);
	if (pa == 0) {
		return 0;
	}else{
	  return PADDR_TO_KVADDR(pa);
  }
}
Exemple #8
0
paddr_t
alloc_upages(int npages)
{
	paddr_t pa;
	pa = getppages(npages);

	if (pa==0) {
		return 0;
	}
	//	return PADDR_TO_UVADDR(pa);
	return pa;
}
Exemple #9
0
/* Allocate/free some kernel-space pages */
paddr_t 
alloc_kpages(int npages)
{
	paddr_t pa;

	if (vm_has_bootstrapped) {
		pa = getkpages(npages);
	} else {
		pa = getppages(npages);
	}

	if (pa==0)
		return 0;
	else
		return PADDR_TO_KVADDR(pa);
}
/* Allocate/free some kernel-space virtual pages */
vaddr_t 
alloc_kpages(int npages)	//called from kmalloc. which is in lib/kheap.c
{		
		
		//kprintf("inside alloc_kpages: value of done_vm_bootstrap = %d\n", done_vm_bootstrap);
		paddr_t pa;
		pa = getppages(npages);
		if (pa==0) {
			//kprintf("in alloc_kpages: returning %u and hex representation is %x to kheap\n",pa,pa);
			//means couldnt find enough pages
			return 0;
		}
		//kprintf("in alloc_kpages: returning %u and hex representation is %x to kheap\n",pa,pa);
		
		return PADDR_TO_KVADDR(pa);
	
}
Exemple #11
0
vaddr_t 
alloc_kpages(int npages)
{
    //virtually no change, only the implementation of getppages
	paddr_t pa;
	pa = getppages(npages);
    
	if (pa==0) {
		return 0;
	}
    vaddr_t va;
    va = PADDR_TO_KVADDR(pa);
    
    if(!(va > USERTOP)){
        assert(curthread!=NULL);
        assert(curthread->t_process != NULL);
        coremap_insertpid(pa,curthread->t_process->PID);
    }
    return va;
}
Exemple #12
0
/* Allocate kernel heap pages (called by kmalloc) */
vaddr_t alloc_kpages(int npages)
{
	if(!vm_initialized) {
		/* Use dumbvm if VM not initialized yet */
		paddr_t pa;
		pa = getppages(npages);
		if (pa==0) {
			return 0;
		}
		return PADDR_TO_KVADDR(pa);
	}
	if(npages == 1) {
		struct page *kern_page = page_alloc(0x0,0x0,0);
		return PADDR_TO_KVADDR(kern_page->pa);
	}
	else if(npages > 1) {
		return page_nalloc(npages);
	}
	else {
		panic("alloc_kpages called with negiatve page count!");
	}
	vaddr_t t;
	return t;
}
Exemple #13
0
int
vm_fault(int faulttype, vaddr_t faultaddress)
{
	vaddr_t vbase1, vtop1, vbase2, vtop2, stackbase, stacktop;
	paddr_t paddr;
	int i;
	u_int32_t ehi, elo;
	struct addrspace *as = curthread->t_vmspace;
	int spl;
    int result;
    int probe;
    
    if (first_v != faultaddress)
    {
        first_v = faultaddress;
        first_read = 0;
    } else {
        first_read = 1;
    }
    
    int p_i;
    
	spl = splhigh();
    
    
	faultaddress &= PAGE_FRAME;
    
	DEBUG(DB_VM, "dumbvm: fault: 0x%x\n", faultaddress);
    
    
	if (as == NULL) {
		/*
		 * No address space set up. This is probably a kernel
		 * fault early in boot. Return EFAULT so as to panic
		 * instead of getting into an infinite faulting loop.
		 */
		return EFAULT;
	}
    
	vbase1 = as->as_vbase1;
	vtop1 = vbase1 + as->as_npages1 * PAGE_SIZE;
	vbase2 = as->as_vbase2;
	vtop2 = vbase2 + as->as_npages2 * PAGE_SIZE;
	stackbase = USERSTACK - DUMBVM_STACKPAGES * PAGE_SIZE;
	stacktop = USERSTACK;
    
    p_i = faultaddress/PAGE_SIZE;
    
    int segment; // -1invalid 0code 1data 2stack
    size_t seg_size;
    size_t f_size;
    off_t offset;
    int flags;
    
    // Determine in which segment page lies,
    // Set file properties
    struct page *pg;
    if (p_i < 0)    {
        panic("addrspace: invalid page table index\n"); // need exception handling
        
    } else if (faultaddress >= vbase1 && faultaddress < vtop1)   { // look in code
        pg = (struct page *)array_getguy(as->useg1, (faultaddress-vbase1)/PAGE_SIZE);
        segment=0;
        seg_size=as->as_npages1;
        f_size = as->as_filesz1;
        offset = as->as_off1;
        flags=as->flag1;
        p_i = (faultaddress-vbase1)/PAGE_SIZE;
    } else if (faultaddress >= vbase2 && faultaddress < vtop2)    { // look in data
        pg = (struct page *)array_getguy(as->useg2, (faultaddress-vbase2)/PAGE_SIZE);
        segment =1;
        seg_size=as->as_npages2;
        f_size = as->as_filesz2;
        offset = as->as_off2;
        flags=as->flag2;
        p_i = (faultaddress-vbase2)/PAGE_SIZE;
    } else if (faultaddress >= stackbase && faultaddress < stacktop) { // look in stack
        pg = (struct page *)array_getguy(as->usegs, ((faultaddress - stackbase)/PAGE_SIZE));
        segment = 2;
        seg_size=DUMBVM_STACKPAGES;
        f_size = 0;
        flags=RWE;
        p_i = (faultaddress-vbase2)/PAGE_SIZE;
    } else {
        segment = -1;
        return EFAULT;
    }
    
    int wr_to = 0;
    int err;
    int f_amount = PAGE_SIZE;
    
    // Handling TLB miss fault type
	switch (faulttype) {
	    case VM_FAULT_READONLY:

                // kill the current process
                    err = EFAULT;
                    _exit(0, &err);   
                
	    case VM_FAULT_READ:
                if (!pg->valid) // page is not in memory
                {
                    pg->valid = 1;
                    pg->vaddr = faultaddress;
                    
                    
                    if(f_size != 0) {
                        
                        paddr = getppages(1);
                        if (paddr == NULL)
                        {
                            return ENOMEM;
                        }
                        
                        code_write_nread = 1; // reading from code
                        pg->paddr = paddr;
                        if (segment != 2) {
                            
                            if (f_size < f_amount)
                                f_amount = f_size;
                            
                            splx(spl);
                                result = load_each_segment(as->v, offset+(p_i*PAGE_SIZE), faultaddress, paddr, PAGE_SIZE, f_amount, flags & E_ONLY, 0);
                            spl = splhigh();

                            if (result) {
                                    return result;
                            }
                            _vmstats_inc(VMSTAT_PAGE_FAULT_DISK);
                            _vmstats_inc(VMSTAT_ELF_FILE_READ);
                            
                        } else {
                            _vmstats_inc(VMSTAT_PAGE_FAULT_ZERO); /* STATS */
                        }
                        
                        if (segment == 0 && first_code_read == 0)
                            first_code_read = 1;
                    
                    } else {
                        paddr = getppages(1);
                        if (paddr == NULL)
                        {
                            return ENOMEM;

                        }
                        if (segment == 0 && first_code_read == 0)
                        first_code_read = 1;
                        
                        _vmstats_inc(VMSTAT_PAGE_FAULT_ZERO); /* STATS */
                    }
                    
                    pg->paddr = paddr;

                }
                else // page is in memory
                {
                    if (segment == 0 && first_code_read == 0)
                        first_code_read = 1;
                    wr_to = 1;
                    paddr = pg->paddr;
                    
                    _vmstats_inc(VMSTAT_TLB_RELOAD); /* STATS */
                }
    
            break;
            
	    case VM_FAULT_WRITE:
                
                wr_to = 1;
                
                if (!pg->valid)
                {
                    pg->valid = 1;
                    pg->vaddr = faultaddress;
                    
                    
                    if(f_size != 0) {
                        
                        if (second_write == 0 && segment == 1) {
                            first_read = 0;
                            paddr = getppages(1);
                            second_write = 1;
                            wr_to = 0;
                        } else {
                            first_read = 1;
                            paddr = getppages(1);
                        }
                        
                        if (paddr == NULL)
                        {
                            return ENOMEM;

                        }
                        pg->paddr = paddr;
                        
                        if (segment != 2) {
                            
                            if (f_size < f_amount)
                                f_amount = f_size;
                            
                            splx(spl);

                            result = load_each_segment(as->v, offset+(p_i*PAGE_SIZE), faultaddress, paddr, PAGE_SIZE, f_amount, flags & E_ONLY, first_read);
                            spl = splhigh();

                            if (result) {
                                lock_release(tlb.tlb_lock);
                                   return result;
                            }
                                
                            _vmstats_inc(VMSTAT_PAGE_FAULT_DISK);
                            _vmstats_inc(VMSTAT_ELF_FILE_READ);
                            
                        } else {
                            _vmstats_inc(VMSTAT_PAGE_FAULT_ZERO); /* STATS */
                        }
                    } else {
                        paddr = getppages(1);
                        if (paddr == NULL)
                        {
                            return ENOMEM;

                        }
                        _vmstats_inc(VMSTAT_PAGE_FAULT_ZERO); /* STATS */
                    }
                    pg->paddr = paddr;
                }
                else
                {
                    paddr = pg->paddr;
                    
                    _vmstats_inc(VMSTAT_TLB_RELOAD); /* STATS */
                      
                }
		break;
	    default:
		return EINVAL;
	}

        splx(spl);

        _vmstats_inc(VMSTAT_TLB_FAULT);

        if (wr_to == 1 || (segment == 2) || (first_code_read == 1)) {
            
            lock_acquire(tlb.tlb_lock);
        
            if (first_code_read && faultaddress >= vbase1 && faultaddress < vtop1) {

               for (i=0; i<NUM_TLB; i++) {
                    TLB_Read(&ehi, &elo, i);

                    if (!(elo & TLBLO_VALID)) {
                            continue;
                    }

                    if (ehi >= vbase1 && ehi < vtop1) {
                        elo &= ~TLBLO_DIRTY;
                    }
                    TLB_Write(ehi, elo, i);
                }

               probe = TLB_Probe(faultaddress,0);
               if (probe >= 0) {
                    first_code_read = 0;
                    code_write_nread = 0;

                    lock_release(tlb.tlb_lock);

                    vmstats_inc(VMSTAT_TLB_FAULT_FREE); /* STATS */

                    return 0;
               }
            }
            for (i=0; i<NUM_TLB; i++) {
                
                    TLB_Read(&ehi, &elo, i);
                    
                    
                    if (elo & TLBLO_VALID) {
                            continue;
                    }
                    ehi = faultaddress;

                    
                    if ((first_code_read && faultaddress >= vbase1 && faultaddress < vtop1) ||
                            ((code_write_nread == 0) && faultaddress >= vbase1 && faultaddress < vtop1)) {
                        
                            elo = paddr | TLBLO_VALID;

                        first_code_read = 0;
                        code_write_nread = 0;
                    }
                    else {
                        elo = paddr | TLBLO_DIRTY | TLBLO_VALID;
                    }
                        TLB_Write(ehi, elo, i);
                    

                    DEBUG(DB_VM, "dumbvm: 0x%x -> 0x%x\n", faultaddress, paddr);
                    
                    lock_release(tlb.tlb_lock);
                    vmstats_inc(VMSTAT_TLB_FAULT_FREE); /* STATS */
                    return 0;
            }

            int victim = tlb_get_rr_victim();
            //kprintf("vm fault: got our victim, %d \n",victim);
            if (victim < 0 || victim >= NUM_TLB)
            {
                 lock_release(tlb.tlb_lock);
                return EFAULT;
            }

            ehi = faultaddress;
            
            if ((first_code_read && faultaddress >= vbase1 && faultaddress < vtop1) ||
                            ((code_write_nread == 0) && faultaddress >= vbase1 && faultaddress < vtop1)) {
                elo = paddr | TLBLO_VALID;

                first_code_read = 0;
                code_write_nread = 0;
            } else {
                elo = paddr | TLBLO_DIRTY | TLBLO_VALID;
            }
                 TLB_Write(ehi, elo, victim);          
            
            
            lock_release(tlb.tlb_lock);
            vmstats_inc(VMSTAT_TLB_FAULT_REPLACE); /* STATS */
    } else {
        vmstats_inc(VMSTAT_TLB_FAULT_REPLACE);
    }
                return 0;

}