Пример #1
0
void
tlb_flush(void)
{
    vmstats_inc(3);
    int i;
    for (i = 0 ; i < NUM_TLB; ++i){
       tlb_invalidate(i);
    }
}
Пример #2
0
int
tlb_get_rr_victim(void)
{
    vmstats_inc(2);
    int victim;
    static unsigned int next_victim = 0;
   
    victim = next_victim;
    next_victim = (next_victim + 1) % NUM_TLB;
    return victim;
}
Пример #3
0
/*
 we must firstly probe, if return -1 then we can use this function
*/
int
tlb_getslot(void)
{  
   int i;
   u_int32_t ehi,elo;

   for(i = 0 ; i < NUM_TLB; ++i){
      TLB_Read(&ehi,&elo,i);
      if (elo & TLBLO_VALID) continue;
      vmstats_inc(1);
      return i;
   }
   int victim = tlb_get_rr_victim();
   tlb_invalidate(victim);
   return victim;
}
Пример #4
0
void
as_activate(struct addrspace *as)
{
	/*
	 * Write this.
	 */
	//dumbvm implementation to get kernel compiling initially:
	int i, spl;
	
	//to check if address space is different from old one
	static struct addrspace *old_as = NULL;

	
	(void)as;

	spl = splhigh();
#if OPT_A3
	if (old_as != as) {

	for (i=0; i<NUM_TLB; i++) {
		TLB_Write(TLBHI_INVALID(i), TLBLO_INVALID(), i);
	}
	vmstats_inc(3);
	
	}
#else
	

	for (i=0; i<NUM_TLB; i++) {
		TLB_Write(TLBHI_INVALID(i), TLBLO_INVALID(), i);
	}
	

#endif

	
	//make the one passed in as old
	old_as = as;

	splx(spl);
	
	//end dumbvm implementation. add/fix later
	
	(void)as;  // suppress warning until code gets written
}
Пример #5
0
int
vm_fault(int faulttype, vaddr_t faultaddress)
{
	vaddr_t vbase1, vtop1, vbase2, vtop2, stackbase, stacktop;
	paddr_t paddr;
	int i;
	u_int32_t ehi, elo;
	struct addrspace *as = curthread->t_vmspace;
	int spl;
    int result;
    int probe;
    
    if (first_v != faultaddress)
    {
        first_v = faultaddress;
        first_read = 0;
    } else {
        first_read = 1;
    }
    
    int p_i;
    
	spl = splhigh();
    
    
	faultaddress &= PAGE_FRAME;
    
	DEBUG(DB_VM, "dumbvm: fault: 0x%x\n", faultaddress);
    
    
	if (as == NULL) {
		/*
		 * No address space set up. This is probably a kernel
		 * fault early in boot. Return EFAULT so as to panic
		 * instead of getting into an infinite faulting loop.
		 */
		return EFAULT;
	}
    
	vbase1 = as->as_vbase1;
	vtop1 = vbase1 + as->as_npages1 * PAGE_SIZE;
	vbase2 = as->as_vbase2;
	vtop2 = vbase2 + as->as_npages2 * PAGE_SIZE;
	stackbase = USERSTACK - DUMBVM_STACKPAGES * PAGE_SIZE;
	stacktop = USERSTACK;
    
    p_i = faultaddress/PAGE_SIZE;
    
    int segment; // -1invalid 0code 1data 2stack
    size_t seg_size;
    size_t f_size;
    off_t offset;
    int flags;
    
    // Determine in which segment page lies,
    // Set file properties
    struct page *pg;
    if (p_i < 0)    {
        panic("addrspace: invalid page table index\n"); // need exception handling
        
    } else if (faultaddress >= vbase1 && faultaddress < vtop1)   { // look in code
        pg = (struct page *)array_getguy(as->useg1, (faultaddress-vbase1)/PAGE_SIZE);
        segment=0;
        seg_size=as->as_npages1;
        f_size = as->as_filesz1;
        offset = as->as_off1;
        flags=as->flag1;
        p_i = (faultaddress-vbase1)/PAGE_SIZE;
    } else if (faultaddress >= vbase2 && faultaddress < vtop2)    { // look in data
        pg = (struct page *)array_getguy(as->useg2, (faultaddress-vbase2)/PAGE_SIZE);
        segment =1;
        seg_size=as->as_npages2;
        f_size = as->as_filesz2;
        offset = as->as_off2;
        flags=as->flag2;
        p_i = (faultaddress-vbase2)/PAGE_SIZE;
    } else if (faultaddress >= stackbase && faultaddress < stacktop) { // look in stack
        pg = (struct page *)array_getguy(as->usegs, ((faultaddress - stackbase)/PAGE_SIZE));
        segment = 2;
        seg_size=DUMBVM_STACKPAGES;
        f_size = 0;
        flags=RWE;
        p_i = (faultaddress-vbase2)/PAGE_SIZE;
    } else {
        segment = -1;
        return EFAULT;
    }
    
    int wr_to = 0;
    int err;
    int f_amount = PAGE_SIZE;
    
    // Handling TLB miss fault type
	switch (faulttype) {
	    case VM_FAULT_READONLY:

                // kill the current process
                    err = EFAULT;
                    _exit(0, &err);   
                
	    case VM_FAULT_READ:
                if (!pg->valid) // page is not in memory
                {
                    pg->valid = 1;
                    pg->vaddr = faultaddress;
                    
                    
                    if(f_size != 0) {
                        
                        paddr = getppages(1);
                        if (paddr == NULL)
                        {
                            return ENOMEM;
                        }
                        
                        code_write_nread = 1; // reading from code
                        pg->paddr = paddr;
                        if (segment != 2) {
                            
                            if (f_size < f_amount)
                                f_amount = f_size;
                            
                            splx(spl);
                                result = load_each_segment(as->v, offset+(p_i*PAGE_SIZE), faultaddress, paddr, PAGE_SIZE, f_amount, flags & E_ONLY, 0);
                            spl = splhigh();

                            if (result) {
                                    return result;
                            }
                            _vmstats_inc(VMSTAT_PAGE_FAULT_DISK);
                            _vmstats_inc(VMSTAT_ELF_FILE_READ);
                            
                        } else {
                            _vmstats_inc(VMSTAT_PAGE_FAULT_ZERO); /* STATS */
                        }
                        
                        if (segment == 0 && first_code_read == 0)
                            first_code_read = 1;
                    
                    } else {
                        paddr = getppages(1);
                        if (paddr == NULL)
                        {
                            return ENOMEM;

                        }
                        if (segment == 0 && first_code_read == 0)
                        first_code_read = 1;
                        
                        _vmstats_inc(VMSTAT_PAGE_FAULT_ZERO); /* STATS */
                    }
                    
                    pg->paddr = paddr;

                }
                else // page is in memory
                {
                    if (segment == 0 && first_code_read == 0)
                        first_code_read = 1;
                    wr_to = 1;
                    paddr = pg->paddr;
                    
                    _vmstats_inc(VMSTAT_TLB_RELOAD); /* STATS */
                }
    
            break;
            
	    case VM_FAULT_WRITE:
                
                wr_to = 1;
                
                if (!pg->valid)
                {
                    pg->valid = 1;
                    pg->vaddr = faultaddress;
                    
                    
                    if(f_size != 0) {
                        
                        if (second_write == 0 && segment == 1) {
                            first_read = 0;
                            paddr = getppages(1);
                            second_write = 1;
                            wr_to = 0;
                        } else {
                            first_read = 1;
                            paddr = getppages(1);
                        }
                        
                        if (paddr == NULL)
                        {
                            return ENOMEM;

                        }
                        pg->paddr = paddr;
                        
                        if (segment != 2) {
                            
                            if (f_size < f_amount)
                                f_amount = f_size;
                            
                            splx(spl);

                            result = load_each_segment(as->v, offset+(p_i*PAGE_SIZE), faultaddress, paddr, PAGE_SIZE, f_amount, flags & E_ONLY, first_read);
                            spl = splhigh();

                            if (result) {
                                lock_release(tlb.tlb_lock);
                                   return result;
                            }
                                
                            _vmstats_inc(VMSTAT_PAGE_FAULT_DISK);
                            _vmstats_inc(VMSTAT_ELF_FILE_READ);
                            
                        } else {
                            _vmstats_inc(VMSTAT_PAGE_FAULT_ZERO); /* STATS */
                        }
                    } else {
                        paddr = getppages(1);
                        if (paddr == NULL)
                        {
                            return ENOMEM;

                        }
                        _vmstats_inc(VMSTAT_PAGE_FAULT_ZERO); /* STATS */
                    }
                    pg->paddr = paddr;
                }
                else
                {
                    paddr = pg->paddr;
                    
                    _vmstats_inc(VMSTAT_TLB_RELOAD); /* STATS */
                      
                }
		break;
	    default:
		return EINVAL;
	}

        splx(spl);

        _vmstats_inc(VMSTAT_TLB_FAULT);

        if (wr_to == 1 || (segment == 2) || (first_code_read == 1)) {
            
            lock_acquire(tlb.tlb_lock);
        
            if (first_code_read && faultaddress >= vbase1 && faultaddress < vtop1) {

               for (i=0; i<NUM_TLB; i++) {
                    TLB_Read(&ehi, &elo, i);

                    if (!(elo & TLBLO_VALID)) {
                            continue;
                    }

                    if (ehi >= vbase1 && ehi < vtop1) {
                        elo &= ~TLBLO_DIRTY;
                    }
                    TLB_Write(ehi, elo, i);
                }

               probe = TLB_Probe(faultaddress,0);
               if (probe >= 0) {
                    first_code_read = 0;
                    code_write_nread = 0;

                    lock_release(tlb.tlb_lock);

                    vmstats_inc(VMSTAT_TLB_FAULT_FREE); /* STATS */

                    return 0;
               }
            }
            for (i=0; i<NUM_TLB; i++) {
                
                    TLB_Read(&ehi, &elo, i);
                    
                    
                    if (elo & TLBLO_VALID) {
                            continue;
                    }
                    ehi = faultaddress;

                    
                    if ((first_code_read && faultaddress >= vbase1 && faultaddress < vtop1) ||
                            ((code_write_nread == 0) && faultaddress >= vbase1 && faultaddress < vtop1)) {
                        
                            elo = paddr | TLBLO_VALID;

                        first_code_read = 0;
                        code_write_nread = 0;
                    }
                    else {
                        elo = paddr | TLBLO_DIRTY | TLBLO_VALID;
                    }
                        TLB_Write(ehi, elo, i);
                    

                    DEBUG(DB_VM, "dumbvm: 0x%x -> 0x%x\n", faultaddress, paddr);
                    
                    lock_release(tlb.tlb_lock);
                    vmstats_inc(VMSTAT_TLB_FAULT_FREE); /* STATS */
                    return 0;
            }

            int victim = tlb_get_rr_victim();
            //kprintf("vm fault: got our victim, %d \n",victim);
            if (victim < 0 || victim >= NUM_TLB)
            {
                 lock_release(tlb.tlb_lock);
                return EFAULT;
            }

            ehi = faultaddress;
            
            if ((first_code_read && faultaddress >= vbase1 && faultaddress < vtop1) ||
                            ((code_write_nread == 0) && faultaddress >= vbase1 && faultaddress < vtop1)) {
                elo = paddr | TLBLO_VALID;

                first_code_read = 0;
                code_write_nread = 0;
            } else {
                elo = paddr | TLBLO_DIRTY | TLBLO_VALID;
            }
                 TLB_Write(ehi, elo, victim);          
            
            
            lock_release(tlb.tlb_lock);
            vmstats_inc(VMSTAT_TLB_FAULT_REPLACE); /* STATS */
    } else {
        vmstats_inc(VMSTAT_TLB_FAULT_REPLACE);
    }
                return 0;

}
Пример #6
0
paddr_t pte_reload(struct pte *pte, int offset){
  vmstats_inc(VMSTAT_TLB_RELOAD);
	/* Translate the address based on the offset */
	paddr_t paddr = pte->paddr + offset;
	return paddr;
}
Пример #7
0
/* Each thread makes some calls to vmstats functions */
static
void
vmstats_thread(void *junk, unsigned long num)
{
	int i;
	int j;
	(void)num;
	(void)junk;

	for (i=0; i<NTESTLOOPS; i++) {
    for (j=0; j<VMSTAT_COUNT; j++) {
        /* NOTE: The number of calls to vmstats_inc below have been manipulated
         * so the checks during printing add up properly and pass the various tests
         */
        switch(j) {
          /* Need twice as many TLB faults */
          case VMSTAT_TLB_FAULT:
            vmstats_inc(j);
            vmstats_inc(j);
            break;

          case VMSTAT_TLB_FAULT_FREE:
            vmstats_inc(j);
            break;

          case VMSTAT_TLB_FAULT_REPLACE:
            vmstats_inc(j);
            break;

          /* Just reduce these to compare (not necessary) */
          case VMSTAT_TLB_INVALIDATE:
            if (i % 2 == 0) {
               vmstats_inc(j);
            }
            break;

          case VMSTAT_TLB_RELOAD:
            vmstats_inc(j);
            break;

          /* VMSTAT_TLB_FAULT = VMSTAT_TLB_RELOAD + VMSTAT_PAGE_FAULT_DISK + VMSTAT_SWAP_FILE_ZERO */
          case VMSTAT_PAGE_FAULT_ZERO:
            if (i % 2 == 0) {
               vmstats_inc(j);
            }
            break;

          /* VMSTAT_PAGE_FAULT_DISK = VMSTAT_ELF_FILE_READ + VMSTAT_SWAP_FILE_READ */
          case VMSTAT_PAGE_FAULT_DISK:
            if (i % 2 == 0) {
               vmstats_inc(j);
            }
            break;

          case VMSTAT_ELF_FILE_READ:
            if (i % 4 == 0) {
               vmstats_inc(j);
            }
            break;

          case VMSTAT_SWAP_FILE_READ:
            if (i % 4 == 0) {
               vmstats_inc(j);
            }
            break;

          case VMSTAT_SWAP_FILE_WRITE:
            if (i % 8 == 0) {
               vmstats_inc(j);
            }
            break;

          default:
            kprintf("Unknown stat %d\n", j);
            break;
      }
    }
	}

	V(donesem);
	thread_exit();
}
Пример #8
0
int
vm_fault(int faulttype, vaddr_t faultaddress)
{

	vaddr_t vbase1, vtop1, vbase2, vtop2, stackbase, stacktop;
	paddr_t paddr;
	int i;
	u_int32_t ehi, elo;
	struct addrspace *as;
	int spl;
#if OPT_A3
	int flag = 0; // keeps track of whether tlb is full
#endif
	spl = splhigh();

	faultaddress &= PAGE_FRAME;

	DEBUG(DB_VM, "dumbvm: fault: 0x%x\n", faultaddress);

	switch (faulttype) {
	    case VM_FAULT_READONLY:
		/* We always create pages read-write, so we can't get this */
		panic("dumbvm: got VM_FAULT_READONLY\n");
	    case VM_FAULT_READ:
	    case VM_FAULT_WRITE:
		break;
	    default:
		splx(spl);
		return EINVAL;
	}

	as = curthread->t_vmspace;
	if (as == NULL) {
		/*
		 * No address space set up. This is probably a kernel
		 * fault early in boot. Return EFAULT so as to panic
		 * instead of getting into an infinite faulting loop.
		 */
		return EFAULT;
	}

	/* Assert that the address space has been set up properly. */
	assert(as->as_vbase1 != 0);
	assert(as->as_pbase1 != 0);
	assert(as->as_npages1 != 0);
	assert(as->as_vbase2 != 0);
	assert(as->as_pbase2 != 0);
	assert(as->as_npages2 != 0);
	assert(as->as_stackpbase != 0);
	assert((as->as_vbase1 & PAGE_FRAME) == as->as_vbase1);
	assert((as->as_pbase1 & PAGE_FRAME) == as->as_pbase1);
	assert((as->as_vbase2 & PAGE_FRAME) == as->as_vbase2);
	assert((as->as_pbase2 & PAGE_FRAME) == as->as_pbase2);
	assert((as->as_stackpbase & PAGE_FRAME) == as->as_stackpbase);

	vbase1 = as->as_vbase1;
	vtop1 = vbase1 + as->as_npages1 * PAGE_SIZE;
	vbase2 = as->as_vbase2;
	vtop2 = vbase2 + as->as_npages2 * PAGE_SIZE;
	stackbase = USERSTACK - DUMBVM_STACKPAGES * PAGE_SIZE;
	stacktop = USERSTACK;
	int test = 0;
	
	if (faultaddress >= vbase1 && faultaddress < vtop1) {
		paddr = (faultaddress - vbase1) + as->as_pbase1;
		test=1;
	}
	else if (faultaddress >= vbase2 && faultaddress < vtop2) {
		paddr = (faultaddress - vbase2) + as->as_pbase2;
		
	}
	else if (faultaddress >= stackbase && faultaddress < stacktop) {
		paddr = (faultaddress - stackbase) + as->as_stackpbase;
	}
	else {
		splx(spl);
		return EFAULT;
	}

	/* make sure it's page-aligned */
	assert((paddr & PAGE_FRAME)==paddr);
	
	for (i=0; i<NUM_TLB; i++) {
		TLB_Read(&ehi, &elo, i);
		if (elo & TLBLO_VALID) {
			continue;
		}
		ehi = faultaddress;
		kprintf("Val test: %d\n", test);
		if (test==1) {
		elo = paddr | TLBLO_VALID; }
		else {
		elo = paddr | TLBLO_DIRTY | TLBLO_VALID; 
		}
		DEBUG(DB_VM, "dumbvm: 0x%x -> 0x%x\n", faultaddress, paddr);
#if OPT_A3
		flag = 1;
#endif
		TLB_Write(ehi, elo, i);
		vmstats_inc(1); // increase tlbfree count
		vmstats_inc(0);
		splx(spl);
		return 0;
	}
	
#if OPT_A3
	if (flag == 0) {
		ehi = faultaddress;
		if (test==1) {
		elo = paddr | TLBLO_VALID; }
		else {
		elo = paddr | TLBLO_DIRTY | TLBLO_VALID; 
		}
		DEBUG(DB_VM, "dumbvm: 0x%x -> 0x%x\n", faultaddress, paddr);	
			
		TLB_Write(ehi, elo, tlb_get_rr_victim());
		vmstats_inc(2); // increase replace count
		vmstats_inc(0);
		splx(spl);
		return 0;
	} 		
		
	kprintf("dumbvm: Ran out of TLB entries - cannot handle page fault\n");
	splx(spl);
	return EFAULT;

#else 
	kprintf("dumbvm: Ran out of TLB entries - cannot handle page fault\n");
	splx(spl);
	return EFAULT;
	
#endif 
	

	kprintf("dumbvm: Ran out of TLB entries - cannot handle page fault\n");
	splx(spl);
	return EFAULT;
}