/*
 * We will need multiple versions of update_mmu_cache(), one that just
 * updates the TLB with the new pte(s), and another which also checks
 * for the R4k "end of page" hardware bug and does the needy.
 */
void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
{
	unsigned long flags;
	pgd_t *pgdp;
	pmd_t *pmdp;
	pte_t *ptep;
	int pid;

	/*
	 * Handle debugger faulting in for debugee.
	 */
	if (current->active_mm != vma->vm_mm)
		return;

	pid = read_c0_entryhi() & cpu_asid_mask(&current_cpu_data);

	local_irq_save(flags);
	address &= PAGE_MASK;
	write_c0_vaddr(address);
	write_c0_entryhi(pid);
	pgdp = pgd_offset(vma->vm_mm, address);
	pmdp = pmd_offset(pgdp, address);
	ptep = pte_offset_map(pmdp, address);
	tlb_probe();

	write_c0_entrylo(pte_val(*ptep++) >> 6);
	tlb_write();

	write_c0_entryhi(pid);
	local_irq_restore(flags);
}
void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
	int cpu = smp_processor_id();
	unsigned long flags;
	int oldpid, newpid;
	signed long idx;

	if (!cpu_context(cpu, vma->vm_mm))
		return;

	newpid = cpu_asid(cpu, vma->vm_mm);
	page &= PAGE_MASK;
	local_irq_save(flags);
	oldpid = read_c0_entryhi();
	write_c0_vaddr(page);
	write_c0_entryhi(newpid);
	tlb_probe();
	idx = read_c0_tlbset();
	if (idx < 0)
		goto finish;

	write_c0_entrylo(0);
	write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1)));
	tlb_write();

finish:
	write_c0_entryhi(oldpid);
	local_irq_restore(flags);
}
Beispiel #3
0
void 
vm_tlbflush(vaddr_t target) 
{
    int spl;
    int index;

    spl = splhigh();
    index = tlb_probe(target & PAGE_FRAME, 0);
    if (index >=0) 
        tlb_write(TLBHI_INVALID(index), TLBLO_INVALID(), index);
    splx(spl);
}
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
	unsigned long end)
{
	struct mm_struct *mm = vma->vm_mm;
	int cpu = smp_processor_id();

	if (cpu_context(cpu, mm) != 0) {
		unsigned long size, flags;
		unsigned long config6_flags;

		ENTER_CRITICAL(flags);
		disable_pgwalker(config6_flags);
		size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
		size = (size + 1) >> 1;
		if (size <= current_cpu_data.tlbsize/2) {
			int oldpid = read_c0_entryhi();
			int newpid = cpu_asid(cpu, mm);

			start &= (PAGE_MASK << 1);
			end += ((PAGE_SIZE << 1) - 1);
			end &= (PAGE_MASK << 1);
			while (start < end) {
				int idx;

				write_c0_entryhi(start | newpid);
				start += (PAGE_SIZE << 1);
				mtc0_tlbw_hazard();
				tlb_probe();
				tlb_probe_hazard();
				idx = read_c0_index();
				write_c0_entrylo0(0);
				write_c0_entrylo1(0);
				if (idx < 0)
					continue;
				/* Make sure all entries differ. */
#ifndef CONFIG_NLM_VMIPS
				write_c0_entryhi(UNIQUE_ENTRYHI(idx));
#else
				__write_64bit_c0_register($10, 0, (UNIQUE_VMIPS_ENTRYHI(idx)));
#endif
				mtc0_tlbw_hazard();
				tlb_write_indexed();
			}
			tlbw_use_hazard();
			write_c0_entryhi(oldpid);
		} else {
			drop_mmu_context(mm, cpu);
		}
		FLUSH_ITLB;
		enable_pgwalker(config6_flags);
		EXIT_CRITICAL(flags);
	}
void freePage(struct page* page) {
	if(page->pt_state == PT_STATE_MAPPED) {
		coremap_freeuserpages(page->pt_pagebase * PAGE_SIZE);
		int spl = splhigh();
		int tlbpos = tlb_probe(page->pt_pagebase * PAGE_SIZE, 0);
		if (tlbpos >= 0) {
			tlb_write(TLBHI_INVALID(tlbpos), TLBLO_INVALID(), tlbpos);
		}
		splx(spl);
	} else if(page->pt_state == PT_STATE_MAPPED) {
		swapfree(page);
	}
}
Beispiel #6
0
/*
 * mmu_map: Enter a translation into the MMU. (This is the end result
 * of fault handling.)
 *
 * Synchronization: Takes coremap_spinlock. Does not block.
 */
void
mmu_map(struct addrspace *as, vaddr_t va, paddr_t pa, int writable)
{
	int tlbix;
	uint32_t ehi, elo;
	unsigned cmix;
	
	KASSERT(pa/PAGE_SIZE >= base_coremap_page);
	KASSERT(pa/PAGE_SIZE - base_coremap_page < num_coremap_entries);
	
	spinlock_acquire(&coremap_spinlock);

	KASSERT(as == curcpu->c_vm.cvm_lastas);

	cmix = PADDR_TO_COREMAP(pa);
	KASSERT(cmix < num_coremap_entries);

	/* Page must be pinned. */
	KASSERT(coremap[cmix].cm_pinned);

	tlbix = tlb_probe(va, 0);
	if (tlbix < 0) {
		KASSERT(coremap[cmix].cm_tlbix == -1);
		KASSERT(coremap[cmix].cm_cpunum == 0);
		tlbix = mipstlb_getslot();
		KASSERT(tlbix>=0 && tlbix<NUM_TLB);
		coremap[cmix].cm_tlbix = tlbix;
		coremap[cmix].cm_cpunum = curcpu->c_number;
		DEBUG(DB_TLB, "... pa 0x%05lx <-> tlb %d\n", 
			(unsigned long) COREMAP_TO_PADDR(cmix), tlbix);
	}
	else {
		KASSERT(tlbix>=0 && tlbix<NUM_TLB);
		KASSERT(coremap[cmix].cm_tlbix == tlbix);
		KASSERT(coremap[cmix].cm_cpunum == curcpu->c_number);
	}

	ehi = va & TLBHI_VPAGE;
	elo = (pa & TLBLO_PPAGE) | TLBLO_VALID;
	if (writable) {
		elo |= TLBLO_DIRTY;
	}

	tlb_write(ehi, elo, tlbix);

	/* Unpin the page. */
	coremap[cmix].cm_pinned = 0;
	wchan_wakeall(coremap_pinchan);

	spinlock_release(&coremap_spinlock);
}
Beispiel #7
0
void local_flush_tlb_range(struct mm_struct *mm, unsigned long start,
				unsigned long end)
{
	if (CPU_CONTEXT(smp_processor_id(), mm) != 0) {
		unsigned long flags;
		int size;

#ifdef DEBUG_TLB
		printk("[tlbrange<%02x,%08lx,%08lx>]", (mm->context & 0xff),
		       start, end);
#endif
		__save_and_cli(flags);
		size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
		size = (size + 1) >> 1;
		if(size <= mips_cpu.tlbsize/2) {
			int oldpid = (get_entryhi() & 0xff);
			int newpid = (CPU_CONTEXT(smp_processor_id(), mm) &
				      0xff);

			start &= (PAGE_MASK << 1);
			end += ((PAGE_SIZE << 1) - 1);
			end &= (PAGE_MASK << 1);
			while(start < end) {
				int idx;

				set_entryhi(start | newpid);
				start += (PAGE_SIZE << 1);
				BARRIER;
				tlb_probe();
				BARRIER;
				idx = get_index();
				set_entrylo0(0);
				set_entrylo1(0);
				if(idx < 0)
					continue;
				/* Make sure all entries differ. */
				set_entryhi(KSEG0+idx*0x2000);
				BARRIER;
				tlb_write_indexed();
				BARRIER;
			}
			set_entryhi(oldpid);
		} else {
			get_new_mmu_context(mm, smp_processor_id());
			if (mm == current->active_mm)
				set_entryhi(CPU_CONTEXT(smp_processor_id(),
							mm) & 0xff);
		}
		__restore_flags(flags);
	}
Beispiel #8
0
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
	unsigned long end)
{
	struct mm_struct *mm = vma->vm_mm;
	int cpu = smp_processor_id();
	unsigned long flags;
	int oldpid, newpid, size;

	if (!cpu_context(cpu, mm))
		return;

	size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
	size = (size + 1) >> 1;

	local_irq_save(flags);

	if (size > TFP_TLB_SIZE / 2) {
		drop_mmu_context(mm, cpu);
		goto out_restore;
	}

	oldpid = read_c0_entryhi();
	newpid = cpu_asid(cpu, mm);

	write_c0_entrylo(0);

	start &= PAGE_MASK;
	end += (PAGE_SIZE - 1);
	end &= PAGE_MASK;
	while (start < end) {
		signed long idx;

		write_c0_vaddr(start);
		write_c0_entryhi(start);
		start += PAGE_SIZE;
		tlb_probe();
		idx = read_c0_tlbset();
		if (idx < 0)
			continue;

		write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1)));
		tlb_write();
	}
	write_c0_entryhi(oldpid);

out_restore:
	local_irq_restore(flags);
}
void local_flush_tlb_range(struct mm_struct *mm, unsigned long start,
				unsigned long end)
{
	int cpu = smp_processor_id();

	if (cpu_context(cpu, mm) != 0) {
		unsigned long flags;
		int size;

#ifdef DEBUG_TLB
		printk("[tlbrange<%02x,%08lx,%08lx>]", (mm->context & ASID_MASK),
		       start, end);
#endif
		local_irq_save(flags);
		size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
		size = (size + 1) >> 1;
		if(size <= current_cpu_data.tlbsize/2) {
			int oldpid = read_c0_entryhi() & ASID_MASK;
			int newpid = cpu_asid(cpu, mm);

			start &= (PAGE_MASK << 1);
			end += ((PAGE_SIZE << 1) - 1);
			end &= (PAGE_MASK << 1);
			while(start < end) {
				int idx;

				write_c0_entryhi(start | newpid);
				start += (PAGE_SIZE << 1);
				BARRIER;
				tlb_probe();
				BARRIER;
				idx = read_c0_index();
				write_c0_entrylo0(0);
				write_c0_entrylo1(0);
				if(idx < 0)
					continue;
				/* Make sure all entries differ. */
				write_c0_entryhi(XKPHYS+idx*0x2000);
				BARRIER;
				tlb_write_indexed();
				BARRIER;
			}
			write_c0_entryhi(oldpid);
		} else {
			drop_mmu_context(mm, cpu);
		}
		local_irq_restore(flags);
	}
Beispiel #10
0
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
	unsigned long end)
{
	struct mm_struct *mm = vma->vm_mm;
	int cpu = smp_processor_id();

	if (cpu_context(cpu, mm) != 0) {
		unsigned long size, flags;

		local_irq_save(flags);
		start = round_down(start, PAGE_SIZE << 1);
		end = round_up(end, PAGE_SIZE << 1);
		size = (end - start) >> (PAGE_SHIFT + 1);
		if (size <= (current_cpu_data.tlbsizeftlbsets ?
			     current_cpu_data.tlbsize / 8 :
			     current_cpu_data.tlbsize / 2)) {
			int oldpid = read_c0_entryhi();
			int newpid = cpu_asid(cpu, mm);

			htw_stop();
			while (start < end) {
				int idx;

				write_c0_entryhi(start | newpid);
				start += (PAGE_SIZE << 1);
				mtc0_tlbw_hazard();
				tlb_probe();
				tlb_probe_hazard();
				idx = read_c0_index();
				write_c0_entrylo0(0);
				write_c0_entrylo1(0);
				if (idx < 0)
					continue;
				/* Make sure all entries differ. */
				write_c0_entryhi(UNIQUE_ENTRYHI(idx));
				mtc0_tlbw_hazard();
				tlb_write_indexed();
			}
			tlbw_use_hazard();
			write_c0_entryhi(oldpid);
			htw_start();
		} else {
			drop_mmu_context(mm, cpu);
		}
		flush_micro_tlb();
		local_irq_restore(flags);
	}
Beispiel #11
0
static void refill_tbl_to(struct km_walk_ctx * ctx, unsigned int asid, int write, int pos)
{	
	unsigned long entry, oldl1, oldl2;
	unsigned long G_FLAG;
	int idx;
	int oldpid;

	/* Just test ASID consistency: Current ASID must equal to Given ASID, kernel process do not obay this rule. */
	oldpid = read_c0_entryhi();

	/* Entry HI */	
	asid = asid & CPU_PAGE_FALG_ASID_MASK;
	entry = get_vpn2(ctx->current_virtual_address);
	entry |= asid;
	write_c0_entryhi(entry);
	mtc0_tlbw_hazard();
	tlb_probe();
	tlb_probe_hazard();
	idx = read_c0_index();

	oldl1 = read_c0_entrylo0();
	oldl2 = read_c0_entrylo1();
	/* Add the G_FLAG if ASID == 0, because the entry is from kernel and shared by all process */
	G_FLAG = (ctx->mem == &kp_get_system()->mem_ctx)? 1 : 0;

	/* Entry Low0 and Low1 */
	WRITE_LO;

	/* Write by type, the write is random if the TLB entry is flushed for R/W flags changing */
	mtc0_tlbw_hazard();
	if (unlikely(idx < 0))
		tlb_write_random();
	else
	{
		if (write == 2)
		{
			printk("Write is forced index for %x, pos %d, idx %d,asid %d, %x %x.\n", ctx->current_virtual_address, pos, idx, asid, oldl1, oldl2);
		}
		
		tlb_write_indexed();
	}
	tlbw_use_hazard();

	/* Sanity: Just test ASID consistency: Current ASID must equal to Given ASID, kernel process do not obey this rule. */
	if ((oldpid & 0xff) != (asid & 0xff) && asid != 0/*kernel asid*/)
 		printk("Why old = %x, asid = %x. ", oldpid, asid);
}
Beispiel #12
0
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
	unsigned long end)
{
	struct mm_struct *mm = vma->vm_mm;
	int cpu = smp_processor_id();

	if (cpu_context(cpu, mm) != 0) {
		unsigned long flags;
		int size;

		ENTER_CRITICAL(flags);
		size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
		size = (size + 1) >> 1;
		local_irq_save(flags);
		if (size <= current_cpu_data.tlbsize/2) {
			int oldpid = read_c0_entryhi();
			int newpid = cpu_asid(cpu, mm);

			start &= (PAGE_MASK << 1);
			end += ((PAGE_SIZE << 1) - 1);
			end &= (PAGE_MASK << 1);
			while (start < end) {
				int idx;

				write_c0_entryhi(start | newpid);
				start += (PAGE_SIZE << 1);
				mtc0_tlbw_hazard();
				tlb_probe();
				tlb_probe_hazard();
				idx = read_c0_index();
				write_c0_entrylo0(0);
				write_c0_entrylo1(0);
				if (idx < 0)
					continue;
				/* Make sure all entries differ. */
				write_c0_entryhi(UNIQUE_ENTRYHI(idx));
				mtc0_tlbw_hazard();
				tlb_write_indexed();
			}
			tlbw_use_hazard();
			write_c0_entryhi(oldpid);
		} else {
			drop_mmu_context(mm, cpu);
		}
		EXIT_CRITICAL(flags);
	}
static void swaponepagein(int idx, struct addrspace* as) {
	(void) as;
	// 3. clear their tlb entries if present TODO
	cm_setEntryDirtyState(COREMAP(idx),true);
	struct page* pg = findPageFromCoreMap(COREMAP(idx), idx);
	int spl = splhigh();
	int tlbpos = tlb_probe(pg->pt_pagebase * PAGE_SIZE, 0);
	if (tlbpos >= 0) {
		tlb_write(TLBHI_INVALID(tlbpos), TLBLO_INVALID(), tlbpos);
	} else {
		//kprintf("was not on tlb\n");
	}
	splx(spl);

	int swapPageindex = getOneSwapPage();
	kprintf("Swap in :\tswap= %x,\tpage=%x \n",swapPageindex,pg->pt_virtbase);
	//kprintf("Swap in page Vaddr = %x\n", pg->pt_virtbase);
	struct iovec iov;
	struct uio kuio;
	iov.iov_kbase = (void*) PADDR_TO_KVADDR(cm_getEntryPaddr(idx));
	iov.iov_len = PAGE_SIZE; // length of the memory space
	kuio.uio_iov = &iov;
	kuio.uio_iovcnt = 1;
	kuio.uio_resid = PAGE_SIZE; // amount to write to the file
	kuio.uio_space = NULL;
	kuio.uio_offset = swap_map[swapPageindex].se_paddr * PAGE_SIZE;
	kuio.uio_segflg = UIO_SYSSPACE;
	kuio.uio_rw = UIO_WRITE;
	//kprintf("before write \n");
	// 4. write them to disk
	spinlock_release(&coremap_lock);
	int result = VOP_WRITE(swap_vnode, &kuio);
	spinlock_acquire(&coremap_lock);
	if (result) {
		// release lock on the vnode
		panic("WRITE FAILED!\n");
		return;
	}
	cm_setEntryDirtyState(COREMAP(idx),false);
	//kprintf("write complete\n");

	pg->pt_state = PT_STATE_SWAPPED;
	pg->pt_pagebase = swap_map[swapPageindex].se_paddr;
}
Beispiel #14
0
/**
	@brief Flush memory range

	If the memory range is too big, we flush all entries with this ASID
*/
void local_flush_tlb_range(unsigned int asid, unsigned long start, unsigned long end)
{
	unsigned long size, flags;

	ENTER_CRITICAL(flags);
	size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
	size = (size + 1) >> 1;

	if (size <= current_cpu_data.tlbsize / 2)
	{
		int oldpid = read_c0_entryhi();
		int newpid = asid;

		start &= (PAGE_MASK << 1);
		end += ((PAGE_SIZE << 1) - 1);
		end &= (PAGE_MASK << 1);
		while (start < end) {
			int idx;

			write_c0_entryhi(start | newpid);
			start += (PAGE_SIZE << 1);
			mtc0_tlbw_hazard();
			tlb_probe();
			tlb_probe_hazard();
			idx = read_c0_index();
			write_c0_entrylo0(0);
			write_c0_entrylo1(0);
			if (idx < 0)
				continue;
			/* Make sure all entries differ. */
			write_c0_entryhi(UNIQUE_ENTRYHI(idx));
			mtc0_tlbw_hazard();
			tlb_write_indexed();
		}
		tlbw_use_hazard();
		write_c0_entryhi(oldpid);
	} 
	else
		local_flush_asid(asid);

	FLUSH_ITLB;
	EXIT_CRITICAL(flags);
}
void local_flush_tlb_range(struct mm_struct *mm, unsigned long start,
                           unsigned long end)
{
	int cpu = smp_processor_id();
	if (cpu_context(cpu, mm) != 0) {
		unsigned long flags;
		int size;

#ifdef DEBUG_TLB
		printk("[tlbrange<%02x,%08lx,%08lx>]",
		       (mm->context & ASID_MASK), start, end);
#endif
		local_irq_save(flags);
		size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
		size = (size + 1) >> 1;
		if (size <= NTLB_ENTRIES_HALF) {
			int oldpid = (read_c0_entryhi() & ASID_MASK);
			int newpid = (cpu_context(smp_processor_id(), mm)
				      & ASID_MASK);

			start &= (PAGE_MASK << 1);
			end += ((PAGE_SIZE << 1) - 1);
			end &= (PAGE_MASK << 1);
			while(start < end) {
				int idx;

				write_c0_entryhi(start | newpid);
				start += (PAGE_SIZE << 1);
				tlb_probe();
				idx = read_c0_index();
				write_c0_entrylo0(0);
				write_c0_entrylo1(0);
				write_c0_entryhi(KSEG0);
				if(idx < 0)
					continue;
				tlb_write_indexed();
			}
			write_c0_entryhi(oldpid);
		} else {
			drop_mmu_context(mm, cpu);
		}
		local_irq_restore(flags);
	}
Beispiel #16
0
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
			   unsigned long end)
{
	struct mm_struct *mm = vma->vm_mm;
	int cpu = smp_processor_id();

	if (cpu_context(cpu, mm) != 0) {
		unsigned long flags;
		int size;

#ifdef DEBUG_TLB
		printk("[tlbrange<%lu,0x%08lx,0x%08lx>]",
			cpu_context(cpu, mm) & ASID_MASK, start, end);
#endif
		local_irq_save(flags);
		size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
		if (size <= current_cpu_data.tlbsize) {
			int oldpid = read_c0_entryhi() & ASID_MASK;
			int newpid = cpu_context(cpu, mm) & ASID_MASK;

			start &= PAGE_MASK;
			end += PAGE_SIZE - 1;
			end &= PAGE_MASK;
			while (start < end) {
				int idx;

				write_c0_entryhi(start | newpid);
				start += PAGE_SIZE;	/* BARRIER */
				tlb_probe();
				idx = read_c0_index();
				write_c0_entrylo0(0);
				write_c0_entryhi(KSEG0);
				if (idx < 0)		/* BARRIER */
					continue;
				tlb_write_indexed();
			}
			write_c0_entryhi(oldpid);
		} else {
			drop_mmu_context(mm, cpu);
		}
		local_irq_restore(flags);
	}
Beispiel #17
0
void local_flush_tlb_range(struct mm_struct *mm, unsigned long start,
                     unsigned long end)
{
	if (mm->context != 0) {
		unsigned long flags;
		int size;

#ifdef DEBUG_TLB
		printk("[tlbrange<%lu,0x%08lx,0x%08lx>]",
			(mm->context & 0xfc0), start, end);
#endif
		save_and_cli(flags);
		size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
		if (size <= mips_cpu.tlbsize) {
			int oldpid = (get_entryhi() & 0xfc0);
			int newpid = (mm->context & 0xfc0);

			start &= PAGE_MASK;
			end += (PAGE_SIZE - 1);
			end &= PAGE_MASK;
			while (start < end) {
				int idx;

				set_entryhi(start | newpid);
				start += PAGE_SIZE;
				tlb_probe();
				idx = get_index();
				set_entrylo0(0);
				set_entryhi(KSEG0);
				if (idx < 0)
					continue;
				tlb_write_indexed();
			}
			set_entryhi(oldpid);
		} else {
			get_new_mmu_context(mm, smp_processor_id());
			if (mm == current->active_mm)
				set_entryhi(mm->context & 0xfc0);
		}
		restore_flags(flags);
	}
Beispiel #18
0
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
	unsigned long end)
{
	struct mm_struct *mm = vma->vm_mm;
	unsigned long vma_mm_context = mm->context;
	if (mm->context != 0) {
		unsigned long flags;
		int size;

		local_irq_save(flags);
		size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
		if (size <= TLBSIZE) {
			int oldpid = pevn_get() & ASID_MASK;
			int newpid = vma_mm_context & ASID_MASK;

			start &= PAGE_MASK;
			end += (PAGE_SIZE - 1);
			end &= PAGE_MASK;
			while (start < end) {
				int idx;

				pevn_set(start | newpid);
				start += PAGE_SIZE;
				barrier();
				tlb_probe();
				idx = tlbpt_get();
				pectx_set(0);
				pevn_set(KSEG1);
				if (idx < 0)
					continue;
				tlb_write_indexed();
			}
			pevn_set(oldpid);
		} else {
			/* Bigger than TLBSIZE, get new ASID directly */
			get_new_mmu_context(mm);
			if (mm == current->active_mm)
				pevn_set(vma_mm_context & ASID_MASK);
		}
		local_irq_restore(flags);
	}
void
dump_tlb_addr(unsigned long addr)
{
	unsigned long flags, oldpid;
	int index;

	local_irq_save(flags);
	oldpid = read_c0_entryhi() & 0xff;
	write_c0_entryhi((addr & PAGE_MASK) | oldpid);
	tlb_probe();
	index = read_c0_index();
	write_c0_entryhi(oldpid);
	local_irq_restore(flags);

	if (index < 0) {
		printk("No entry for address 0x%08lx in TLB\n", addr);
		return;
	}

	printk("Entry %d maps address 0x%08lx\n", index, addr);
	dump_tlb(index, index);
}
Beispiel #20
0
void
dump_tlb_addr(unsigned long addr)
{
	unsigned int flags, oldpid;
	int index;

	__save_and_cli(flags);
	oldpid = get_entryhi() & 0xff;
	set_entryhi((addr & PAGE_MASK) | oldpid);
	tlb_probe();
	index = get_index();
	set_entryhi(oldpid);
	__restore_flags(flags);

	if (index < 0) {
		printk("No entry for address 0x%08lx in TLB\n", addr);
		return;
	}

	printk("Entry %d maps address 0x%08lx\n", index, addr);
	dump_tlb(index, index);
}
Beispiel #21
0
/* Usable for KV1 addresses only! */
void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
	unsigned long flags;
	int size;

	size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
	size = (size + 1) >> 1;

	if (size > TFP_TLB_SIZE / 2) {
		local_flush_tlb_all();
		return;
	}

	local_irq_save(flags);

	write_c0_entrylo(0);

	start &= PAGE_MASK;
	end += (PAGE_SIZE - 1);
	end &= PAGE_MASK;
	while (start < end) {
		signed long idx;

		write_c0_vaddr(start);
		write_c0_entryhi(start);
		start += PAGE_SIZE;
		tlb_probe();
		idx = read_c0_tlbset();
		if (idx < 0)
			continue;

		write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1)));
		tlb_write();
	}

	local_irq_restore(flags);
}
Beispiel #22
0
/*
 * tlb_unmap: Searches the TLB for a vaddr translation and invalidates
 * it if it exists.
 *
 * Synchronization: assumes we hold coremap_spinlock. Does not block. 
 */
static
void
tlb_unmap(vaddr_t va)
{
	int i;
	uint32_t elo = 0, ehi = 0;

	KASSERT(spinlock_do_i_hold(&coremap_spinlock));

	KASSERT(va < MIPS_KSEG0);

	i = tlb_probe(va & PAGE_FRAME,0);
	if (i < 0) {
		return;
	}
	
	tlb_read(&ehi, &elo, i);
	
	KASSERT(elo & TLBLO_VALID);
	
	DEBUG(DB_TLB, "invalidating tlb slot %d (va: 0x%x)\n", i, va); 
	
	tlb_invalidate(i);
}
Beispiel #23
0
void vm_tlbshootdown(const struct tlbshootdown *ts)
{
	//(void)ts;

	int tlb_entry, spl;

	/* Disable interrupts on this CPU while frobbing the TLB. */
	bool lock = get_coremap_spinlock();
	spl = splhigh();
	// Probe TLB so see if particular VA is present.
	tlb_entry = tlb_probe(VA_TO_VPF(ts->ts_vaddr), 0);
	if(tlb_entry < 0) {
		// No entry found, so shoot down succeeded
		splx(spl);
		return;
	}

	// Invalidate the particular TLB entry
	tlb_write(TLBHI_INVALID(tlb_entry), TLBLO_INVALID(), tlb_entry);

	splx(spl);
	release_coremap_spinlock(lock);

}
Beispiel #24
0
// handle page faults
int vm_fault(int faulttype, vaddr_t faultaddress) {
	
	(void)faulttype;
//	(void)faultaddress;

	uint32_t tlbhi;
	uint32_t tlblo;

	if (curthread->t_addrspace == NULL)	// kernel has page faulted, so return EFAULT, which will cause a panic (as it should)
		return EFAULT;

	faultaddress &= PAGE_FRAME;	// page-align the fault address

	
	struct page* pg = as_fault(curthread->t_addrspace,faultaddress);

	if (pg==NULL){
		return EFAULT;
	}
	
	spinlock_acquire(&pg->pg_lock);

		int stat = pg->status;

	spinlock_release(&pg->pg_lock);

	if (stat==NOT_ALLOCD) {
		int err = page_alloc(pg);
		if (err)			
			return err;
	}

	KASSERT((pg->ram_addr&PAGE_FRAME)==pg->ram_addr);
	KASSERT(pg->status==IN_MEM);

	spinlock_acquire(&pg->pg_lock);

		pg->is_dirty = 1;
		tlblo = (pg->ram_addr & TLBLO_PPAGE) | TLBLO_VALID | TLBLO_DIRTY;

	spinlock_release(&pg->pg_lock);

	tlbhi = faultaddress & TLBHI_VPAGE;

	spinlock_acquire(&tlb_lock);;	// only one thread should be messing with the TLB at a time

		//int probe = tlb_probe(tlbhi,0);
	
		//if (probe<0) 			
			tlb_random(tlbhi,tlblo);
		//else
		//	tlb_write(tlbhi,tlblo,probe);

		int probe = tlb_probe(tlbhi,0);

		KASSERT(probe>=0);

	spinlock_release(&tlb_lock);
		
	return 0;
}
Beispiel #25
0
/*
 * Handle a TLB miss exception for a page marked as able to trigger the
 * end-of-page errata.
 * Returns nonzero if the exception has been completely serviced, and no
 * further processing in the trap handler is necessary.
 */
int
eop_tlb_miss_handler(struct trap_frame *trapframe, struct cpu_info *ci,
    struct proc *p)
{
	struct pcb *pcb;
	vaddr_t va, faultva;
	struct vmspace *vm;
	vm_map_t map;
	pmap_t pmap;
	pt_entry_t *pte, entry;
	int onfault;
	u_long asid;
	uint i, npairs;
	int64_t tlbidx;

	/*
	 * Check for a valid pte with the `special' bit set (PG_SP)
	 * in order to apply the end-of-page errata workaround.
	 */

	vm = p->p_vmspace;
	map = &vm->vm_map;
	faultva = trunc_page((vaddr_t)trapframe->badvaddr);
	pmap = map->pmap;

	pte = pmap_segmap(pmap, faultva);
	if (pte == NULL)
		return 0;

	pte += uvtopte(faultva);
	entry = *pte;
	if ((entry & PG_SP) == 0)
		return 0;

	pcb = &p->p_addr->u_pcb;
	asid = pmap->pm_asid[ci->ci_cpuid].pma_asid << PG_ASID_SHIFT;

	/*
	 * For now, only allow one EOP vulnerable page to get a wired TLB
	 * entry.  We will aggressively attempt to recycle the wired TLB
	 * entries created for that purpose, as soon as we are no longer
	 * needing the EOP page resident in the TLB.
	 */

	/*
	 * Figure out how many pages to wire in the TLB.
	 */

	if ((faultva & PG_ODDPG) != 0) {
		/* odd page: need two pairs */
		npairs = 2;
	} else {
		/* even page: only need one pair */
		npairs = 1;
	}

	/*
	 * Fault-in the next page.
	 */

	va = faultva + PAGE_SIZE;
	pte = pmap_segmap(pmap, va);
	if (pte != NULL)
		pte += uvtopte(va);

	if (pte == NULL || (*pte & PG_V) == 0) {
		onfault = pcb->pcb_onfault;
		pcb->pcb_onfault = 0;
		KERNEL_LOCK();
		(void)uvm_fault(map, va, 0, PROT_READ | PROT_EXEC);
		KERNEL_UNLOCK();
		pcb->pcb_onfault = onfault;
	}

	/*
	 * Clear possible TLB entries for the pages we're about to wire.
	 */

	for (i = npairs, va = faultva & PG_HVPN; i != 0;
	    i--, va += 2 * PAGE_SIZE) {
		tlbidx = tlb_probe(va | asid);
		if (tlbidx >= 0)
			tlb_update_indexed(CKSEG0_BASE, PG_NV, PG_NV, tlbidx);
	}

	/*
	 * Reserve the extra wired TLB, and fill them with the existing ptes.
	 */

	tlb_set_wired((UPAGES / 2) + npairs);
	for (i = 0, va = faultva & PG_HVPN; i != npairs;
	    i++, va += 2 * PAGE_SIZE) {
		pte = pmap_segmap(pmap, va);
		if (pte == NULL)
			tlb_update_indexed(va | asid,
			    PG_NV, PG_NV, (UPAGES / 2) + i);
		else {
			pte += uvtopte(va);
			tlb_update_indexed(va | asid,
			    pte[0], pte[1], (UPAGES / 2) + i);
		}
	}

	/*
	 * Save the base address of the EOP vulnerable page, to be able to
	 * figure out when the wired entry is no longer necessary.
	 */

	pcb->pcb_nwired = npairs;
	pcb->pcb_wiredva = faultva & PG_HVPN;
	pcb->pcb_wiredpc = faultva;

	return 1;
}
static int dec_kn01_be_backend(struct pt_regs *regs, int is_fixup, int invoker)
{
	volatile u32 *kn01_erraddr = (void *)CKSEG1ADDR(KN01_SLOT_BASE +
							KN01_ERRADDR);

	static const char excstr[] = "exception";
	static const char intstr[] = "interrupt";
	static const char cpustr[] = "CPU";
	static const char mreadstr[] = "memory read";
	static const char readstr[] = "read";
	static const char writestr[] = "write";
	static const char timestr[] = "timeout";
	static const char paritystr[] = "parity error";

	int data = regs->cp0_cause & 4;
	unsigned int __user *pc = (unsigned int __user *)regs->cp0_epc +
				  ((regs->cp0_cause & CAUSEF_BD) != 0);
	union mips_instruction insn;
	unsigned long entrylo, offset;
	long asid, entryhi, vaddr;

	const char *kind, *agent, *cycle, *event;
	unsigned long address;

	u32 erraddr = *kn01_erraddr;
	int action = MIPS_BE_FATAL;

	/* Ack ASAP, so that any subsequent errors get caught. */
	dec_kn01_be_ack();

	kind = invoker ? intstr : excstr;

	agent = cpustr;

	if (invoker)
		address = erraddr;
	else {
		/* Bloody hardware doesn't record the address for reads... */
		if (data) {
			/* This never faults. */
			__get_user(insn.word, pc);
			vaddr = regs->regs[insn.i_format.rs] +
				insn.i_format.simmediate;
		} else
			vaddr = (long)pc;
		if (KSEGX(vaddr) == CKSEG0 || KSEGX(vaddr) == CKSEG1)
			address = CPHYSADDR(vaddr);
		else {
			/* Peek at what physical address the CPU used. */
			asid = read_c0_entryhi();
			entryhi = asid & (PAGE_SIZE - 1);
			entryhi |= vaddr & ~(PAGE_SIZE - 1);
			write_c0_entryhi(entryhi);
			BARRIER;
			tlb_probe();
			/* No need to check for presence. */
			tlb_read();
			entrylo = read_c0_entrylo0();
			write_c0_entryhi(asid);
			offset = vaddr & (PAGE_SIZE - 1);
			address = (entrylo & ~(PAGE_SIZE - 1)) | offset;
		}
	}

	/* Treat low 256MB as memory, high -- as I/O. */
	if (address < 0x10000000) {
		cycle = mreadstr;
		event = paritystr;
	} else {
		cycle = invoker ? writestr : readstr;
		event = timestr;
	}

	if (is_fixup)
		action = MIPS_BE_FIXUP;

	if (action != MIPS_BE_FIXUP)
		printk(KERN_ALERT "Bus error %s: %s %s %s at %#010lx\n",
			kind, agent, cycle, event, address);

	return action;
}
Beispiel #27
0
int
vm_fault(int faulttype, vaddr_t faultaddress)
{

	struct pte *target;
    uint32_t tlbhi, tlblo;
    int spl;

    struct addrspace* as = curproc->p_addrspace;

    int permission = as_check_region(as, faultaddress);
    

    if (permission < 0 // check if not in region
    	&& as_check_stack(as,faultaddress) // check if not in stack
    	&& as_check_heap(as,faultaddress)) // check if not in heap
        	return EFAULT;
    
    if(permission<0)
    	permission = READ | WRITE;


    target = pte_get(as,faultaddress & PAGE_FRAME);

    if(target==NULL) {
    	target = pt_alloc_page(as,faultaddress & PAGE_FRAME);
    }
	

    // Lock pagetable entry
    if(swap_enabled == true)
    	lock_acquire(target->pte_lock);


    
    if(target->in_memory == 0) { // Page is allocated but not in memory.

    	target = pt_load_page(as, faultaddress & PAGE_FRAME); 	// pt_alloc_page creates the page table entry if neccessary and also
    	    													// allocates it using coremap.    	
    }
    



	KASSERT(target->in_memory != 0);
	KASSERT(target->paddr != 0);
	KASSERT(target->paddr != 1);
	
    tlbhi = faultaddress & PAGE_FRAME;
    tlblo = (target->paddr & PAGE_FRAME) | TLBLO_VALID;

    /* Adding CPU index to corresponding TLB entry */

    coremap[PADDR_TO_CM(target->paddr)].cpu = curcpu->c_number;    
    coremap[PADDR_TO_CM(target->paddr)].page = target;    
    coremap[PADDR_TO_CM(target->paddr)].accessed = 1;    
    int index;

    spl = splhigh();

    // TODO permissions
    //kprintf(" \n %x - %x \n",tlbhi, tlblo);
    switch (faulttype) {
        case VM_FAULT_READ:
        case VM_FAULT_WRITE:


            //index = PADDR_TO_CM(target->paddr);
            //coremap[index].state = DIRTY;
            tlb_random(tlbhi, tlblo);
            break;
        case VM_FAULT_READONLY:

            tlblo |= TLBLO_DIRTY;
            // TODO: Change physical page's state to DIRTY.
            index = PADDR_TO_CM(target->paddr);
            //KASSERT(coremap[index].state!=FIXED);
            //KASSERT(coremap[index].state!=VICTIM);
            KASSERT(target->in_memory != 0); // Someone swapped me out. Synchronization is broken.
            //KASSERT(coremap[index].as ==as);
            coremap[index].state = DIRTY; // Set it to dirty!


            index = tlb_probe(faultaddress & PAGE_FRAME, 0);
            tlb_write(tlbhi, tlblo, index);
    }

    splx(spl);
    if(swap_enabled == true)
    	lock_release(target->pte_lock);

    return 0;

}
static void swapin(int npages, struct addrspace* as) {
	// 1. check if coremap lock is already held, else acquire it

	if (swap_state == SWAP_STATE_NOSWAP) {
		panic("Attempting to swap in when no swap disk is found!\n");
	}
	lock_acquire(swap_lock);
	// 2. select a bunch of non kernel pages.
	unsigned int i;

	for (i = 0; i < page_count; i++) {
		int i_idx = SWAP_IDX(i + swap_prev_write_idx);
		if (cm_isEntryUsed(COREMAP(i_idx)) && cm_getEntryAddrspaceIdent(COREMAP(i_idx)) != NULL) {
			struct page* pg = NULL;
			if (cm_getEntryAddrspaceIdent(COREMAP(i_idx)) == as) {
				pg = findPageFromCoreMap(COREMAP(i_idx), i_idx);
				int spl = splhigh();
				int tlbpos = tlb_probe(pg->pt_virtbase * PAGE_SIZE, 0);
				splx(spl);
				if (tlbpos >= 0) {
					//kprintf("tlb hit at %x %d\n",pg->pt_pagebase, tlbpos);
					continue;
				}
			}
			int j = 0;
			int flag = 1;
			for (j = 0; j < npages; j++) {
				int j_idx = SWAP_IDX(i + j + swap_prev_write_idx);
				if (j_idx >= (int)page_count) {
					kprintf("page count greater than i+j\n");
					flag = 0;
					break;
				}
				if (cm_isEntryUsed(
						COREMAP(j_idx)) && cm_getEntryAddrspaceIdent(COREMAP(j_idx)) == NULL) {
					kprintf("page used by kernel\n");
					flag = 0;
					break;
				}
			}
			if (flag == 1) {
				for (j = 0; j < npages; j++) {
					int j_idx = SWAP_IDX(i + j + swap_prev_write_idx);
					if (cm_isEntryUsed(COREMAP(j_idx)) && cm_getEntryAddrspaceIdent(COREMAP(j_idx)) != NULL) {
						swaponepagein(j_idx, as);
						cm_setEntryUseState(COREMAP(j_idx), false);
						cm_setEntryDirtyState(COREMAP(j_idx), false);
						// let the address space identifier be NULL initially
						cm_setEntryAddrspaceIdent(COREMAP(j_idx), NULL);
						coremap_pages_free++;
						swap_prev_write_idx = j_idx + 1;
						/*if(pg != NULL) {
							kprintf("swapped in page was = %x\n", pg->pt_virtbase);
						} else {
							kprintf("page was nill\n");
						}*/
					} else {
						kprintf("How did this get approved?\n");
					}
				}
				//spinlock_acquire(&coremap_lock);
				lock_release(swap_lock);
				return;
			}
		}
	}
	panic("Out of pages to swap out!\n");
	//spinlock_acquire(&coremap_lock);
	lock_release(swap_lock);

	// 2.5 Maintain a index of last page that was swapped in so that you swap in the one after that

	// 5. free lock if you had acquired it in this method
}