Exemplo n.º 1
0
int vm_fault(int faulttype, vaddr_t faultaddress)
{
	if(faultaddress == 0)
		return EFAULT;
	// Check whether the faultaddress is valid
	struct page_table_entry *pt_entry_temp2 = (struct page_table_entry*)kmalloc(sizeof(struct page_table_entry)) ;
	lock_acquire(coremap_lock);
	struct addrspace *curaddrspace = curthread->t_addrspace ;
	faultaddress &= PAGE_FRAME;
	int i = 0 ;
	for (i = 0 ; i< N_REGIONS ; i++)
	{
		if(curaddrspace->regions[i] != NULL )
		{
			vaddr_t region_end = curaddrspace->regions[i]->region_start+(4096*curaddrspace->regions[i]->npages);
			if (faultaddress >= curaddrspace->regions[i]->region_start && faultaddress <= region_end)
			{
				break ;
			}
		}
	}

	if(i == N_REGIONS)
	{
		return EINVAL ;
	}

	//Check whether it is a write to a read only region
	if((faulttype == VM_FAULT_WRITE) && (curaddrspace->regions[i]->permissions == 0x4 || curaddrspace->regions[i]->permissions == 0x5))
	{
		return EINVAL ;
	}

	uint32_t ehi, elo;
	ehi = faultaddress;
	paddr_t pa = page_fault(faultaddress,pt_entry_temp2);
	elo = pa | TLBLO_DIRTY | TLBLO_VALID;
	int spl = splhigh();
	tlb_random(ehi, elo) ;
	splx(spl);
	lock_release(coremap_lock);
	if(pt_entry_temp2->state == 1000)
		kfree(pt_entry_temp2);
	return 0;
}
/* Fault handling function called by trap code */
int vm_fault(int faulttype, vaddr_t faultaddress) {
	struct addrspace* as = proc_getas();
	if (as == NULL) {
		//kprintf("AS was null\n");
		return EFAULT;
	}
	struct region* reg = findRegionForFaultAddress(as, faultaddress);
	if (reg == NULL) {
		//kprintf("Region not found\n");
		return EFAULT;
	}
	// TODO Check if it is a permission issue and return an error code in that case.

	// get page
	struct page* pg = findPageForFaultAddress(as, faultaddress);
	if (pg == NULL) {
		struct page* newpage = page_create(as, faultaddress);
		pg = newpage;
	}
	if (pg == NULL) {
		//kprintf("Failed to create a page\n");
		return EFAULT;
	}
	if (pg->pt_state == PT_STATE_SWAPPED) {
		//kprintf("Trying swap out from %x\n",pg->pt_pagebase);
		//kprintf("Swap out page Vaddr = %x\n",pg->pt_virtbase);
		swapout(as, pg);
		//kprintf("after swap out paddr = %x\n",pg->pt_pagebase);
		//kprintf("after Swap out Vaddr = %x\n", pg->pt_virtbase);
		//kprintf("after Swap out state = %d\n",pg->pt_state);
	}

	// load page address to tlb
	int spl = splhigh();
	tlb_random(pg->pt_virtbase * PAGE_SIZE,
			(pg->pt_pagebase * PAGE_SIZE) | TLBLO_DIRTY | TLBLO_VALID);
	splx(spl);
	(void) faulttype;
	return 0;
}
Exemplo n.º 3
0
// handle page faults
int vm_fault(int faulttype, vaddr_t faultaddress) {
	
	(void)faulttype;
//	(void)faultaddress;

	uint32_t tlbhi;
	uint32_t tlblo;

	if (curthread->t_addrspace == NULL)	// kernel has page faulted, so return EFAULT, which will cause a panic (as it should)
		return EFAULT;

	faultaddress &= PAGE_FRAME;	// page-align the fault address

	
	struct page* pg = as_fault(curthread->t_addrspace,faultaddress);

	if (pg==NULL){
		return EFAULT;
	}
	
	spinlock_acquire(&pg->pg_lock);

		int stat = pg->status;

	spinlock_release(&pg->pg_lock);

	if (stat==NOT_ALLOCD) {
		int err = page_alloc(pg);
		if (err)			
			return err;
	}

	KASSERT((pg->ram_addr&PAGE_FRAME)==pg->ram_addr);
	KASSERT(pg->status==IN_MEM);

	spinlock_acquire(&pg->pg_lock);

		pg->is_dirty = 1;
		tlblo = (pg->ram_addr & TLBLO_PPAGE) | TLBLO_VALID | TLBLO_DIRTY;

	spinlock_release(&pg->pg_lock);

	tlbhi = faultaddress & TLBHI_VPAGE;

	spinlock_acquire(&tlb_lock);;	// only one thread should be messing with the TLB at a time

		//int probe = tlb_probe(tlbhi,0);
	
		//if (probe<0) 			
			tlb_random(tlbhi,tlblo);
		//else
		//	tlb_write(tlbhi,tlblo,probe);

		int probe = tlb_probe(tlbhi,0);

		KASSERT(probe>=0);

	spinlock_release(&tlb_lock);
		
	return 0;
}
Exemplo n.º 4
0
int
vm_fault(int faulttype, vaddr_t faultaddress)
{

	struct pte *target;
    uint32_t tlbhi, tlblo;
    int spl;

    struct addrspace* as = curproc->p_addrspace;

    int permission = as_check_region(as, faultaddress);
    

    if (permission < 0 // check if not in region
    	&& as_check_stack(as,faultaddress) // check if not in stack
    	&& as_check_heap(as,faultaddress)) // check if not in heap
        	return EFAULT;
    
    if(permission<0)
    	permission = READ | WRITE;


    target = pte_get(as,faultaddress & PAGE_FRAME);

    if(target==NULL) {
    	target = pt_alloc_page(as,faultaddress & PAGE_FRAME);
    }
	

    // Lock pagetable entry
    if(swap_enabled == true)
    	lock_acquire(target->pte_lock);


    
    if(target->in_memory == 0) { // Page is allocated but not in memory.

    	target = pt_load_page(as, faultaddress & PAGE_FRAME); 	// pt_alloc_page creates the page table entry if neccessary and also
    	    													// allocates it using coremap.    	
    }
    



	KASSERT(target->in_memory != 0);
	KASSERT(target->paddr != 0);
	KASSERT(target->paddr != 1);
	
    tlbhi = faultaddress & PAGE_FRAME;
    tlblo = (target->paddr & PAGE_FRAME) | TLBLO_VALID;

    /* Adding CPU index to corresponding TLB entry */

    coremap[PADDR_TO_CM(target->paddr)].cpu = curcpu->c_number;    
    coremap[PADDR_TO_CM(target->paddr)].page = target;    
    coremap[PADDR_TO_CM(target->paddr)].accessed = 1;    
    int index;

    spl = splhigh();

    // TODO permissions
    //kprintf(" \n %x - %x \n",tlbhi, tlblo);
    switch (faulttype) {
        case VM_FAULT_READ:
        case VM_FAULT_WRITE:


            //index = PADDR_TO_CM(target->paddr);
            //coremap[index].state = DIRTY;
            tlb_random(tlbhi, tlblo);
            break;
        case VM_FAULT_READONLY:

            tlblo |= TLBLO_DIRTY;
            // TODO: Change physical page's state to DIRTY.
            index = PADDR_TO_CM(target->paddr);
            //KASSERT(coremap[index].state!=FIXED);
            //KASSERT(coremap[index].state!=VICTIM);
            KASSERT(target->in_memory != 0); // Someone swapped me out. Synchronization is broken.
            //KASSERT(coremap[index].as ==as);
            coremap[index].state = DIRTY; // Set it to dirty!


            index = tlb_probe(faultaddress & PAGE_FRAME, 0);
            tlb_write(tlbhi, tlblo, index);
    }

    splx(spl);
    if(swap_enabled == true)
    	lock_release(target->pte_lock);

    return 0;

}
Exemplo n.º 5
0
int
vm_fault(int faulttype, vaddr_t faultaddress)
{
	vaddr_t stackbase, stacktop;
	paddr_t paddr = 0;
	int i;
	uint32_t ehi, elo;
	struct addrspace *as;
	int spl;

	faultaddress &= PAGE_FRAME;

	DEBUG(DB_VM, "dumbvm: fault: 0x%x\n", faultaddress);

	switch (faulttype) {
	case VM_FAULT_READONLY:
		/* We always create pages read-write, so we can't get this */
		panic("dumbvm: got VM_FAULT_READONLY\n");
	case VM_FAULT_READ:
	case VM_FAULT_WRITE:
		break;
	default:
		return EINVAL;
	}

	as = curthread->t_addrspace;
	if (as == NULL) {
		/*
		 * No address space set up. This is probably a kernel
		 * fault early in boot. Return EFAULT so as to panic
		 * instead of getting into an infinite faulting loop.
		 */
		return EFAULT;
	}

	/* Assert that the address space has been set up properly. */
	KASSERT(as->heap_end != 0);
	KASSERT(as->heap_start != 0);
	KASSERT(as->pages != NULL);
	KASSERT(as->stack != NULL);
	KASSERT(as->heap != NULL);
	KASSERT(as->regions != NULL);
	//KASSERT((as->heap_start & PAGE_FRAME) == as->heap_start);
	//KASSERT((as->heap_end & PAGE_FRAME) == as->heap_end);
	KASSERT((as->pages->vaddr & PAGE_FRAME) == as->pages->vaddr);

	stackbase = USERSTACK - DUMBVM_STACKPAGES * PAGE_SIZE;
	stacktop = USERSTACK;

	struct page_table_entry *pte;

	// CHECK HEAP ADDRESS TOO (REMAINING)!!
	if (faultaddress >= stackbase && faultaddress < stacktop) {
		pte = as->stack;
		while(pte!=NULL){
			if(faultaddress >= pte->vaddr && faultaddress < (pte->vaddr + PAGE_SIZE)){
				paddr = (faultaddress - pte->vaddr) + pte->paddr; // CHECK THIS
				break;
			}

			pte = pte->next;
		}

	}else {
		pte = as->pages;
		while(pte!=NULL){

			if(faultaddress >= pte->vaddr && faultaddress < (pte->vaddr + PAGE_SIZE)){
				paddr = (faultaddress - pte->vaddr) + pte->paddr;
				break;
			}

			pte = pte->next;
		}
	}

	if(paddr==0){
		return EFAULT;
	}

	/* make sure it's page-aligned */
	KASSERT((paddr & PAGE_FRAME) == paddr);

	/* Disable interrupts on this CPU while frobbing the TLB. */
	spl = splhigh();

	for (i=0; i<NUM_TLB; i++) {
		tlb_read(&ehi, &elo, i);
		if (elo & TLBLO_VALID) {
			continue;
		}
		ehi = faultaddress;
		elo = paddr | TLBLO_DIRTY | TLBLO_VALID;
		DEBUG(DB_VM, "dumbvm: 0x%x -> 0x%x\n", faultaddress, paddr);
		tlb_write(ehi, elo, i);
		splx(spl);
		return 0;
	}

	ehi = faultaddress;
	elo = paddr | TLBLO_DIRTY | TLBLO_VALID;
	DEBUG(DB_VM, "dumbvm: 0x%x -> 0x%x\n", faultaddress, paddr);
	tlb_random(ehi, elo);
	splx(spl);
	return 0;

	//	kprintf("dumbvm: Ran out of TLB entries - cannot handle page fault\n");
	//	splx(spl);
	//	return EFAULT;
	//(void)faulttype;
	//(void)faultaddress;
	//return 0;
}