void
as_activate(void)
{
    struct addrspace *as;

    as = proc_getas();
    if (as == NULL) {
        /*
         * Kernel thread without an address space; leave the
         * prior address space in place.
         */
        return;
    }

    /*
     * Write this.
     */

    int i, spl;
    spl = splhigh();

    for (i=0; i<NUM_TLB; i++) {
        tlb_write(TLBHI_INVALID(i), TLBLO_INVALID(), i);
    }

    splx(spl);
}
Exemple #2
0
/*
 * tlb_invalidate: marks a given tlb entry as invalid.
 *
 * Synchronization: assumes we hold coremap_spinlock. Does not block.
 */
static
void
tlb_invalidate(int tlbix)
{
	uint32_t elo, ehi;
	paddr_t pa;
	unsigned cmix;

	KASSERT(spinlock_do_i_hold(&coremap_spinlock));

	tlb_read(&ehi, &elo, tlbix);
	if (elo & TLBLO_VALID) {
		pa = elo & TLBLO_PPAGE;
		cmix = PADDR_TO_COREMAP(pa);
		KASSERT(cmix < num_coremap_entries);
		KASSERT(coremap[cmix].cm_tlbix == tlbix);
		KASSERT(coremap[cmix].cm_cpunum == curcpu->c_number);
		coremap[cmix].cm_tlbix = -1;
		coremap[cmix].cm_cpunum = 0;
		DEBUG(DB_TLB, "... pa 0x%05lx --> tlb --\n", 
			(unsigned long) COREMAP_TO_PADDR(cmix));
	}

	tlb_write(TLBHI_INVALID(tlbix), TLBLO_INVALID(), tlbix);
	DEBUG(DB_TLB, "... pa ------- <-- tlb %d\n", tlbix);
}
Exemple #3
0
void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
	int cpu = smp_processor_id();
	unsigned long flags;
	int oldpid, newpid;
	signed long idx;

	if (!cpu_context(cpu, vma->vm_mm))
		return;

	newpid = cpu_asid(cpu, vma->vm_mm);
	page &= PAGE_MASK;
	local_irq_save(flags);
	oldpid = read_c0_entryhi();
	write_c0_vaddr(page);
	write_c0_entryhi(newpid);
	tlb_probe();
	idx = read_c0_tlbset();
	if (idx < 0)
		goto finish;

	write_c0_entrylo(0);
	write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1)));
	tlb_write();

finish:
	write_c0_entryhi(oldpid);
	local_irq_restore(flags);
}
Exemple #4
0
/*
 * We will need multiple versions of update_mmu_cache(), one that just
 * updates the TLB with the new pte(s), and another which also checks
 * for the R4k "end of page" hardware bug and does the needy.
 */
void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
{
	unsigned long flags;
	pgd_t *pgdp;
	pmd_t *pmdp;
	pte_t *ptep;
	int pid;

	/*
	 * Handle debugger faulting in for debugee.
	 */
	if (current->active_mm != vma->vm_mm)
		return;

	pid = read_c0_entryhi() & ASID_MASK;

	local_irq_save(flags);
	address &= PAGE_MASK;
	write_c0_vaddr(address);
	write_c0_entryhi(pid);
	pgdp = pgd_offset(vma->vm_mm, address);
	pmdp = pmd_offset(pgdp, address);
	ptep = pte_offset_map(pmdp, address);
	tlb_probe();

	write_c0_entrylo(pte_val(*ptep++) >> 6);
	tlb_write();

	write_c0_entryhi(pid);
	local_irq_restore(flags);
}
Exemple #5
0
void
vm_tlbshootdown_all(void)
{
	//panic("dumbvm tried to do tlb shootdown?!\n");
  int i;
    for (i=0; i<NUM_TLB; i++) {
  		tlb_write(TLBHI_INVALID(i), TLBLO_INVALID(), i);
  }
}
Exemple #6
0
void vm_flushtlb(void) {
	int i;
	
	//int spl = splhigh();	// lock TLB while we mess with it
	spinlock_acquire(&tlb_lock);
	for (i=0; i<NUM_TLB;i++) {
		tlb_write(TLBHI_INVALID(i),TLBLO_INVALID(),i);	// invalidate all TLB entries
	}
	spinlock_release(&tlb_lock);
	//splx(spl);
}
Exemple #7
0
void
vm_tlbflush_all(void)
{
	int i, spl;

	spl = splhigh();

	for (i=0; i<NUM_TLB; i++) {
		tlb_write(TLBHI_INVALID(i), TLBLO_INVALID(), i);
	}
	splx(spl);
}
Exemple #8
0
void 
vm_tlbflush(vaddr_t target) 
{
    int spl;
    int index;

    spl = splhigh();
    index = tlb_probe(target & PAGE_FRAME, 0);
    if (index >=0) 
        tlb_write(TLBHI_INVALID(index), TLBLO_INVALID(), index);
    splx(spl);
}
Exemple #9
0
void vm_tlbshootdown_all(void)
{
	int i, spl;

	/* Disable interrupts on this CPU while frobbing the TLB. */
	spl = splhigh();

	for (i=0; i<NUM_TLB; i++) {
		tlb_write(TLBHI_INVALID(i), TLBLO_INVALID(), i);
	}

	splx(spl);
}
void freePage(struct page* page) {
	if(page->pt_state == PT_STATE_MAPPED) {
		coremap_freeuserpages(page->pt_pagebase * PAGE_SIZE);
		int spl = splhigh();
		int tlbpos = tlb_probe(page->pt_pagebase * PAGE_SIZE, 0);
		if (tlbpos >= 0) {
			tlb_write(TLBHI_INVALID(tlbpos), TLBLO_INVALID(), tlbpos);
		}
		splx(spl);
	} else if(page->pt_state == PT_STATE_MAPPED) {
		swapfree(page);
	}
}
Exemple #11
0
/*
 * mmu_map: Enter a translation into the MMU. (This is the end result
 * of fault handling.)
 *
 * Synchronization: Takes coremap_spinlock. Does not block.
 */
void
mmu_map(struct addrspace *as, vaddr_t va, paddr_t pa, int writable)
{
	int tlbix;
	uint32_t ehi, elo;
	unsigned cmix;
	
	KASSERT(pa/PAGE_SIZE >= base_coremap_page);
	KASSERT(pa/PAGE_SIZE - base_coremap_page < num_coremap_entries);
	
	spinlock_acquire(&coremap_spinlock);

	KASSERT(as == curcpu->c_vm.cvm_lastas);

	cmix = PADDR_TO_COREMAP(pa);
	KASSERT(cmix < num_coremap_entries);

	/* Page must be pinned. */
	KASSERT(coremap[cmix].cm_pinned);

	tlbix = tlb_probe(va, 0);
	if (tlbix < 0) {
		KASSERT(coremap[cmix].cm_tlbix == -1);
		KASSERT(coremap[cmix].cm_cpunum == 0);
		tlbix = mipstlb_getslot();
		KASSERT(tlbix>=0 && tlbix<NUM_TLB);
		coremap[cmix].cm_tlbix = tlbix;
		coremap[cmix].cm_cpunum = curcpu->c_number;
		DEBUG(DB_TLB, "... pa 0x%05lx <-> tlb %d\n", 
			(unsigned long) COREMAP_TO_PADDR(cmix), tlbix);
	}
	else {
		KASSERT(tlbix>=0 && tlbix<NUM_TLB);
		KASSERT(coremap[cmix].cm_tlbix == tlbix);
		KASSERT(coremap[cmix].cm_cpunum == curcpu->c_number);
	}

	ehi = va & TLBHI_VPAGE;
	elo = (pa & TLBLO_PPAGE) | TLBLO_VALID;
	if (writable) {
		elo |= TLBLO_DIRTY;
	}

	tlb_write(ehi, elo, tlbix);

	/* Unpin the page. */
	coremap[cmix].cm_pinned = 0;
	wchan_wakeall(coremap_pinchan);

	spinlock_release(&coremap_spinlock);
}
Exemple #12
0
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
	unsigned long end)
{
	struct mm_struct *mm = vma->vm_mm;
	int cpu = smp_processor_id();
	unsigned long flags;
	int oldpid, newpid, size;

	if (!cpu_context(cpu, mm))
		return;

	size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
	size = (size + 1) >> 1;

	local_irq_save(flags);

	if (size > TFP_TLB_SIZE / 2) {
		drop_mmu_context(mm, cpu);
		goto out_restore;
	}

	oldpid = read_c0_entryhi();
	newpid = cpu_asid(cpu, mm);

	write_c0_entrylo(0);

	start &= PAGE_MASK;
	end += (PAGE_SIZE - 1);
	end &= PAGE_MASK;
	while (start < end) {
		signed long idx;

		write_c0_vaddr(start);
		write_c0_entryhi(start);
		start += PAGE_SIZE;
		tlb_probe();
		idx = read_c0_tlbset();
		if (idx < 0)
			continue;

		write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1)));
		tlb_write();
	}
	write_c0_entryhi(oldpid);

out_restore:
	local_irq_restore(flags);
}
Exemple #13
0
/*Shoot down a TLB entry based on given virtual address*/
void
tlb_shootdown_page_table_entry(vaddr_t va) {
  int i;
  uint32_t ehi, elo;
  KASSERT((va & PAGE_FRAME ) == va); //assert that va is a valid virtual address
  spinlock_acquire(&stealmem_lock);
  for(i=0; i < NUM_TLB; i++) {
    tlb_read(&ehi, &elo, i);
    if (ehi  == va) {
      tlb_write(TLBHI_INVALID(i), TLBLO_INVALID(), i);
      break;
    }

  }
  spinlock_release(&stealmem_lock);
}
Exemple #14
0
/* TLB shootdown handling called from interprocessor_interrupt */
void vm_tlbshootdown_all(void)
{
	int spl;
	/* Disable interrupts on this CPU while frobbing the TLB. */
	bool lock = get_coremap_spinlock();
	spl = splhigh();

	/* Shoot down all the TLB entries. */
	for (int i = 0; i < NUM_TLB; i++) {
		tlb_write(TLBHI_INVALID(i), TLBLO_INVALID(), i);
	}

	splx(spl);
	release_coremap_spinlock(lock);

	return;
}
Exemple #15
0
void vm_tlbshootdown(const struct tlbshootdown *ts)
{
    	int i, spl;

    /* Disable interrupts on this CPU while frobbing the TLB. */
	spl = splhigh();

	uint32_t ehi, elo;
	for (i=0; i<NUM_TLB; i++) {
		tlb_read(&ehi, &elo, i);
		if (ehi == ts->ts_vaddr)
		{
			tlb_write(TLBHI_INVALID(i), TLBLO_INVALID(), i);
			break ;
		}
	}
	splx(spl);
}
static void swaponepagein(int idx, struct addrspace* as) {
	(void) as;
	// 3. clear their tlb entries if present TODO
	cm_setEntryDirtyState(COREMAP(idx),true);
	struct page* pg = findPageFromCoreMap(COREMAP(idx), idx);
	int spl = splhigh();
	int tlbpos = tlb_probe(pg->pt_pagebase * PAGE_SIZE, 0);
	if (tlbpos >= 0) {
		tlb_write(TLBHI_INVALID(tlbpos), TLBLO_INVALID(), tlbpos);
	} else {
		//kprintf("was not on tlb\n");
	}
	splx(spl);

	int swapPageindex = getOneSwapPage();
	kprintf("Swap in :\tswap= %x,\tpage=%x \n",swapPageindex,pg->pt_virtbase);
	//kprintf("Swap in page Vaddr = %x\n", pg->pt_virtbase);
	struct iovec iov;
	struct uio kuio;
	iov.iov_kbase = (void*) PADDR_TO_KVADDR(cm_getEntryPaddr(idx));
	iov.iov_len = PAGE_SIZE; // length of the memory space
	kuio.uio_iov = &iov;
	kuio.uio_iovcnt = 1;
	kuio.uio_resid = PAGE_SIZE; // amount to write to the file
	kuio.uio_space = NULL;
	kuio.uio_offset = swap_map[swapPageindex].se_paddr * PAGE_SIZE;
	kuio.uio_segflg = UIO_SYSSPACE;
	kuio.uio_rw = UIO_WRITE;
	//kprintf("before write \n");
	// 4. write them to disk
	spinlock_release(&coremap_lock);
	int result = VOP_WRITE(swap_vnode, &kuio);
	spinlock_acquire(&coremap_lock);
	if (result) {
		// release lock on the vnode
		panic("WRITE FAILED!\n");
		return;
	}
	cm_setEntryDirtyState(COREMAP(idx),false);
	//kprintf("write complete\n");

	pg->pt_state = PT_STATE_SWAPPED;
	pg->pt_pagebase = swap_map[swapPageindex].se_paddr;
}
Exemple #17
0
void local_flush_tlb_all(void)
{
	unsigned long flags;
	unsigned long old_ctx;
	int entry;

	local_irq_save(flags);
	/* Save old context and create impossible VPN2 value */
	old_ctx = read_c0_entryhi();
	write_c0_entrylo(0);

	for (entry = 0; entry < TFP_TLB_SIZE; entry++) {
		write_c0_tlbset(entry >> TFP_TLB_SET_SHIFT);
		write_c0_vaddr(entry << PAGE_SHIFT);
		write_c0_entryhi(CKSEG0 + (entry << (PAGE_SHIFT + 1)));
		mtc0_tlbw_hazard();
		tlb_write();
	}
	tlbw_use_hazard();
	write_c0_entryhi(old_ctx);
	local_irq_restore(flags);
}
Exemple #18
0
/* Usable for KV1 addresses only! */
void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
	unsigned long flags;
	int size;

	size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
	size = (size + 1) >> 1;

	if (size > TFP_TLB_SIZE / 2) {
		local_flush_tlb_all();
		return;
	}

	local_irq_save(flags);

	write_c0_entrylo(0);

	start &= PAGE_MASK;
	end += (PAGE_SIZE - 1);
	end &= PAGE_MASK;
	while (start < end) {
		signed long idx;

		write_c0_vaddr(start);
		write_c0_entryhi(start);
		start += PAGE_SIZE;
		tlb_probe();
		idx = read_c0_tlbset();
		if (idx < 0)
			continue;

		write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1)));
		tlb_write();
	}

	local_irq_restore(flags);
}
Exemple #19
0
void vm_tlbshootdown(const struct tlbshootdown *ts)
{
	//(void)ts;

	int tlb_entry, spl;

	/* Disable interrupts on this CPU while frobbing the TLB. */
	bool lock = get_coremap_spinlock();
	spl = splhigh();
	// Probe TLB so see if particular VA is present.
	tlb_entry = tlb_probe(VA_TO_VPF(ts->ts_vaddr), 0);
	if(tlb_entry < 0) {
		// No entry found, so shoot down succeeded
		splx(spl);
		return;
	}

	// Invalidate the particular TLB entry
	tlb_write(TLBHI_INVALID(tlb_entry), TLBLO_INVALID(), tlb_entry);

	splx(spl);
	release_coremap_spinlock(lock);

}
Exemple #20
0
void tlb_map(uint32_t epn, uint32_t rpn, uint32_t erpn, uint32_t flags,
    uint32_t perms)
{

	tlb_write(++tlb_static_entries, epn, rpn, erpn, 0, flags, perms);
}
Exemple #21
0
void *
sys_sbrk(intptr_t amount, int *err){

    vaddr_t retval = curproc->p_addrspace->heap_end;

    if (amount < 0 && amount <= -4096*1024*256) {
        *err = EINVAL;
        return (void *)-1;
    }

    if (curproc->p_addrspace->heap_end + amount < curproc->p_addrspace->heap_start)  {
        *err = EINVAL;
        return (void *)-1;
    }

    if (curproc->p_addrspace->heap_start + amount >= USERSTACK - 1024 * PAGE_SIZE)  {
        *err = ENOMEM;
        return (void *)-1;
    }

    /* Align in multiples of 4 */
    if (amount % 4 != 0) {
        *err = EINVAL;
        return (void *)-1;
    }

    int num_pages = (amount < 0) ? (amount * -1)/PAGE_SIZE : amount/PAGE_SIZE;

    if (amount % PAGE_SIZE != 0)
        num_pages++;

    struct page_table *proc_pg_table = curproc->p_addrspace->page_table_entry;
    struct page_table *prev_proc_pg_table;
    struct page_table *free_proc_pg_table;

    if(amount < 0) {
        for (int i = 1; i <= num_pages; i++) {

            // Page Table removal loop through the page table and remove entries correspondingly.

            proc_pg_table = curproc->p_addrspace->page_table_entry;
            prev_proc_pg_table = proc_pg_table;

            while(proc_pg_table != NULL) {
                if (proc_pg_table->vpn < USERSTACK - 1024 * PAGE_SIZE &&
                    proc_pg_table->vpn >= (curproc->p_addrspace->heap_end + amount)) {

                    prev_proc_pg_table->next = proc_pg_table->next;

                    free_proc_pg_table = proc_pg_table;
                    kfree((void *)PADDR_TO_KVADDR(free_proc_pg_table->ppn));
                    kfree(free_proc_pg_table);

                    break;
                }

                prev_proc_pg_table = proc_pg_table;
                proc_pg_table = proc_pg_table->next;
            }

        }

        // TLB cleanup code
        int spl;

        spl = splhigh();

        for (int i=0; i < NUM_TLB; i++) {
            tlb_write(TLBHI_INVALID(i), TLBLO_INVALID(), i);
        }

        splx(spl);
    }

    curproc->p_addrspace->heap_end += amount;

    if (amount < 0)
        as_activate();

    return (void *)retval;
}
Exemple #22
0
int vm_fault(int faulttype, vaddr_t faultaddress) 
{
	bool lock = false;	// Indicates if lock was aquired in "this" function
	//int spl = splhigh();
	// bool lock = get_coremap_lock();
	// DEBUG(DB_VM,"F:%p\n",(void*) faultaddress);
	struct addrspace *as = curthread->t_addrspace;
	//We ALWAYS update TLB with writable bits ASAP. So this means a fault.
	if(faulttype == VM_FAULT_READONLY && as->use_permissions)
	{
		// DEBUG(DB_VM, "NOT ALLOWED\n");
		//splx(spl);
		return EFAULT;
	}
	//Null Pointer
	if(faultaddress == 0x0)
	{	
		//splx(spl);
		return EFAULT;
	}
	//Align the fault address to a page (4k) boundary.
	faultaddress &= PAGE_FRAME;
	
	//Make sure address is valid
	if(faultaddress >= 0x80000000)
	{
		//splx(spl);
		return EFAULT;
	}
	/*If we're trying to access a region after the end of the heap but 
	 * before the stack, that's invalid (unless load_elf is running) */
	if(as->loadelf_done && faultaddress < USER_STACK_LIMIT && faultaddress > as->heap_end)
	{
		//splx(spl);
		return EFAULT;
	}
	
	//Translate....
	struct page_table *pt = pgdir_walk(as,faultaddress,false);
	int pt_index = VA_TO_PT_INDEX(faultaddress);
	int pfn = PTE_TO_PFN(pt->table[pt_index]);
	int permissions = PTE_TO_PERMISSIONS(pt->table[pt_index]);
	int swapped = PTE_TO_LOCATION(pt->table[pt_index]);
	struct page *page = NULL;

	/*If the PFN is 0, we might need to dynamically allocate
	on the stack or the heap */
	if(pfn == 0)
	{
		//Stack
		if(faultaddress < as->stack && faultaddress > USER_STACK_LIMIT)
		{
			as->stack -= PAGE_SIZE;
			lock = get_coremap_lock();
			page = page_alloc(as,as->stack, PF_RW);
			release_coremap_lock(lock);
		}
		//Heap
		else if(faultaddress < as->heap_end && faultaddress >= as->heap_start)
		{
			lock = get_coremap_lock();
			page = page_alloc(as,faultaddress, PF_RW);
			release_coremap_lock(lock);
		}
		//Static Segment(s)
		else if(faultaddress < as->heap_start && faultaddress >= as->static_start)
		{
			panic("code not loaded: %p",(void*) faultaddress);
			//TODO
			// page = page_alloc(as,faultaddress,PF_)
		}
		else
		{
			//splx(spl);
			return EFAULT;
		}
	}

	/*We grew the stack and/or heap dynamically. Try translating again */
	pt = pgdir_walk(as,faultaddress,false);
	pt_index = VA_TO_PT_INDEX(faultaddress);
	pfn = PTE_TO_PFN(pt->table[pt_index]);
	permissions = PTE_TO_PERMISSIONS(pt->table[pt_index]);
	swapped = PTE_TO_LOCATION(pt->table[pt_index]);

	/* If we're swapped out, time to do some extra stuff. */
	while(swapped == PTE_SWAPPING)
	{
		// Busy wait for the swap to complete, since we cannot sleep in an interrupt
		thread_yield();
		pfn = PTE_TO_PFN(pt->table[pt_index]);
		permissions = PTE_TO_PERMISSIONS(pt->table[pt_index]);
		swapped = PTE_TO_LOCATION(pt->table[pt_index]);
	}

	// Swap completed and page is now in memory or on disk; if disk, bring it back to memory
	if(swapped == PTE_SWAP)
	{
		//bool lock = get_coremap_lock();
		//TODO get the page back in to ram. 
		//Does this work?
		// DEBUG(DB_SWAP,"PTE (vmfault)1:%p\n",(void*) pt->table[pt_index]);
		lock = get_coremap_lock();
		page = page_alloc(as,faultaddress,permissions);
		/* Page now has a home in RAM. But set the swap bit to 1 so we can swap the page in*/
		pt->table[pt_index] |= PTE_SWAP;
		// DEBUG(DB_SWAP,"PTE (vmfault)2:%p\n",(void*) pt->table[pt_index]);
		swapin_page(as,faultaddress,page);

		release_coremap_lock(lock);

		/* Page was swapped back in. Re-translate */
		pt = pgdir_walk(as,faultaddress,false);
		pt_index = VA_TO_PT_INDEX(faultaddress);
		pfn = PTE_TO_PFN(pt->table[pt_index]);
		permissions = PTE_TO_PERMISSIONS(pt->table[pt_index]);
		swapped = PTE_TO_LOCATION(pt->table[pt_index]);
		//release_coremap_lock(lock);
	}

	// DEBUG(DB_VM, "PTERWX:%d\n",permissions);
	//Page is writable if permissions say so or if we're ignoring permissions.
	bool writable = (permissions & PF_W) || !(as->use_permissions);

	//This time, it shouldn't be 0.
	//Static Segment(s)
	// if(faultaddress < as->heap_start && faultaddress >= as->static_start)
	// {
	// 	panic("code not loaded: %p",(void*) faultaddress);
	// 	//TODO
	// 	// page = page_alloc(as,faultaddress,PF_)
	// }
	KASSERT(pfn > 0);
	KASSERT(pfn <= PAGE_SIZE * (int) page_count);

	uint32_t ehi,elo;

	/* Disable interrupts on this CPU while frobbing the TLB. */

	lock = get_coremap_spinlock();
	int spl = splhigh();

	// What does it mean for the page to be NULL in this case?
	if(page != NULL)
	{
		// DEBUG(DB_SWAP, "Page : %p\n", &page);
		KASSERT(page->state != SWAPPINGOUT);
		page->state = DIRTY;
	}

	for (int i=0; i<NUM_TLB; i++) {
		tlb_read(&ehi, &elo, i);

		if (elo & TLBLO_VALID) {
			// kprintf("Index %d in use\n",i);
			continue;
		}

		ehi = faultaddress;
		elo = pfn | TLBLO_VALID;

		if(writable)
		{
			elo |= TLBLO_DIRTY;
		}

		// kprintf("Writing TLB Index %d\n",i); 
		// DEBUG(DB_VM, "dumbvm: 0x%x -> 0x%x\n", faultaddress, pfn);
		tlb_write(ehi, elo, i);

		splx(spl);
		release_coremap_spinlock(lock);
		return 0;
	}

	/*If we get here, TLB was full. Kill an entry, round robin style*/
	ehi = faultaddress;
	elo = pfn | TLBLO_VALID;
	if(writable)
	{
		elo |= TLBLO_DIRTY;
	}
	tlb_write(ehi,elo,tlb_offering);
	tlb_offering++;
	if(tlb_offering == NUM_TLB)
	{
		//At the end of the TLB. Start back at 0 again.
		tlb_offering = 0;
	}

	splx(spl);
	release_coremap_spinlock(lock);
	return 0;
}
Exemple #23
0
int
vm_fault(int faulttype, vaddr_t faultaddress)
{
	vaddr_t stackbase, stacktop;
	paddr_t paddr = 0;
	int i;
	uint32_t ehi, elo;
	struct addrspace *as;
	int spl;

	faultaddress &= PAGE_FRAME;

	DEBUG(DB_VM, "dumbvm: fault: 0x%x\n", faultaddress);

	switch (faulttype) {
	case VM_FAULT_READONLY:
		/* We always create pages read-write, so we can't get this */
		panic("dumbvm: got VM_FAULT_READONLY\n");
	case VM_FAULT_READ:
	case VM_FAULT_WRITE:
		break;
	default:
		return EINVAL;
	}

	as = curthread->t_addrspace;
	if (as == NULL) {
		/*
		 * No address space set up. This is probably a kernel
		 * fault early in boot. Return EFAULT so as to panic
		 * instead of getting into an infinite faulting loop.
		 */
		return EFAULT;
	}

	/* Assert that the address space has been set up properly. */
	KASSERT(as->heap_end != 0);
	KASSERT(as->heap_start != 0);
	KASSERT(as->pages != NULL);
	KASSERT(as->stack != NULL);
	KASSERT(as->heap != NULL);
	KASSERT(as->regions != NULL);
	//KASSERT((as->heap_start & PAGE_FRAME) == as->heap_start);
	//KASSERT((as->heap_end & PAGE_FRAME) == as->heap_end);
	KASSERT((as->pages->vaddr & PAGE_FRAME) == as->pages->vaddr);

	stackbase = USERSTACK - DUMBVM_STACKPAGES * PAGE_SIZE;
	stacktop = USERSTACK;

	struct page_table_entry *pte;

	// CHECK HEAP ADDRESS TOO (REMAINING)!!
	if (faultaddress >= stackbase && faultaddress < stacktop) {
		pte = as->stack;
		while(pte!=NULL){
			if(faultaddress >= pte->vaddr && faultaddress < (pte->vaddr + PAGE_SIZE)){
				paddr = (faultaddress - pte->vaddr) + pte->paddr; // CHECK THIS
				break;
			}

			pte = pte->next;
		}

	}else {
		pte = as->pages;
		while(pte!=NULL){

			if(faultaddress >= pte->vaddr && faultaddress < (pte->vaddr + PAGE_SIZE)){
				paddr = (faultaddress - pte->vaddr) + pte->paddr;
				break;
			}

			pte = pte->next;
		}
	}

	if(paddr==0){
		return EFAULT;
	}

	/* make sure it's page-aligned */
	KASSERT((paddr & PAGE_FRAME) == paddr);

	/* Disable interrupts on this CPU while frobbing the TLB. */
	spl = splhigh();

	for (i=0; i<NUM_TLB; i++) {
		tlb_read(&ehi, &elo, i);
		if (elo & TLBLO_VALID) {
			continue;
		}
		ehi = faultaddress;
		elo = paddr | TLBLO_DIRTY | TLBLO_VALID;
		DEBUG(DB_VM, "dumbvm: 0x%x -> 0x%x\n", faultaddress, paddr);
		tlb_write(ehi, elo, i);
		splx(spl);
		return 0;
	}

	ehi = faultaddress;
	elo = paddr | TLBLO_DIRTY | TLBLO_VALID;
	DEBUG(DB_VM, "dumbvm: 0x%x -> 0x%x\n", faultaddress, paddr);
	tlb_random(ehi, elo);
	splx(spl);
	return 0;

	//	kprintf("dumbvm: Ran out of TLB entries - cannot handle page fault\n");
	//	splx(spl);
	//	return EFAULT;
	//(void)faulttype;
	//(void)faultaddress;
	//return 0;
}
Exemple #24
0
int
vm_fault(int faulttype, vaddr_t faultaddress)
{

	struct pte *target;
    uint32_t tlbhi, tlblo;
    int spl;

    struct addrspace* as = curproc->p_addrspace;

    int permission = as_check_region(as, faultaddress);
    

    if (permission < 0 // check if not in region
    	&& as_check_stack(as,faultaddress) // check if not in stack
    	&& as_check_heap(as,faultaddress)) // check if not in heap
        	return EFAULT;
    
    if(permission<0)
    	permission = READ | WRITE;


    target = pte_get(as,faultaddress & PAGE_FRAME);

    if(target==NULL) {
    	target = pt_alloc_page(as,faultaddress & PAGE_FRAME);
    }
	

    // Lock pagetable entry
    if(swap_enabled == true)
    	lock_acquire(target->pte_lock);


    
    if(target->in_memory == 0) { // Page is allocated but not in memory.

    	target = pt_load_page(as, faultaddress & PAGE_FRAME); 	// pt_alloc_page creates the page table entry if neccessary and also
    	    													// allocates it using coremap.    	
    }
    



	KASSERT(target->in_memory != 0);
	KASSERT(target->paddr != 0);
	KASSERT(target->paddr != 1);
	
    tlbhi = faultaddress & PAGE_FRAME;
    tlblo = (target->paddr & PAGE_FRAME) | TLBLO_VALID;

    /* Adding CPU index to corresponding TLB entry */

    coremap[PADDR_TO_CM(target->paddr)].cpu = curcpu->c_number;    
    coremap[PADDR_TO_CM(target->paddr)].page = target;    
    coremap[PADDR_TO_CM(target->paddr)].accessed = 1;    
    int index;

    spl = splhigh();

    // TODO permissions
    //kprintf(" \n %x - %x \n",tlbhi, tlblo);
    switch (faulttype) {
        case VM_FAULT_READ:
        case VM_FAULT_WRITE:


            //index = PADDR_TO_CM(target->paddr);
            //coremap[index].state = DIRTY;
            tlb_random(tlbhi, tlblo);
            break;
        case VM_FAULT_READONLY:

            tlblo |= TLBLO_DIRTY;
            // TODO: Change physical page's state to DIRTY.
            index = PADDR_TO_CM(target->paddr);
            //KASSERT(coremap[index].state!=FIXED);
            //KASSERT(coremap[index].state!=VICTIM);
            KASSERT(target->in_memory != 0); // Someone swapped me out. Synchronization is broken.
            //KASSERT(coremap[index].as ==as);
            coremap[index].state = DIRTY; // Set it to dirty!


            index = tlb_probe(faultaddress & PAGE_FRAME, 0);
            tlb_write(tlbhi, tlblo, index);
    }

    splx(spl);
    if(swap_enabled == true)
    	lock_release(target->pte_lock);

    return 0;

}