Esempio n. 1
0
static void flush_range(struct mm_struct *mm, unsigned long start,
			unsigned long end)
{
	pmd_t *pmd;
	unsigned long pmd_end;
	int count;
	unsigned int ctx = mm->context.id;

	if (Hash == 0) {
		_tlbia();
		return;
	}
	start &= PAGE_MASK;
	if (start >= end)
		return;
	end = (end - 1) | ~PAGE_MASK;
	pmd = pmd_offset(pgd_offset(mm, start), start);
	for (;;) {
		pmd_end = ((start + PGDIR_SIZE) & PGDIR_MASK) - 1;
		if (pmd_end > end)
			pmd_end = end;
		if (!pmd_none(*pmd)) {
			count = ((pmd_end - start) >> PAGE_SHIFT) + 1;
			flush_hash_pages(ctx, start, pmd_val(*pmd), count);
		}
		if (pmd_end == end)
			break;
		start = pmd_end + 1;
		++pmd;
	}
Esempio n. 2
0
/*
 * Common functions
 */
void smp_message_recv(int msg, struct pt_regs *regs)
{
	atomic_inc(&ipi_recv);

	switch( msg ) {
	case PPC_MSG_CALL_FUNCTION:
		smp_call_function_interrupt();
		break;
	case PPC_MSG_RESCHEDULE:
		set_need_resched();
		break;
	case PPC_MSG_INVALIDATE_TLB:
		_tlbia();
		break;
#ifdef CONFIG_XMON
	case PPC_MSG_XMON_BREAK:
		xmon(regs);
		break;
#endif /* CONFIG_XMON */
	default:
		printk("SMP %d: smp_message_recv(): unknown msg %d\n",
		       smp_processor_id(), msg);
		break;
	}
}
Esempio n. 3
0
/*
 * Called at the end of a mmu_gather operation to make sure the
 * TLB flush is completely done.
 */
void tlb_flush(struct mmu_gather *tlb)
{
	if (Hash == 0) {
		/*
		 * 603 needs to flush the whole TLB here since
		 * it doesn't use a hash table.
		 */
		_tlbia();
	}
}
Esempio n. 4
0
/*
 * Called at the end of a mmu_gather operation to make sure the
 * TLB flush is completely done.
 */
void tlb_flush(struct mmu_gather *tlb)
{
	if (Hash == 0) {
		/*
		 * 603 needs to flush the whole TLB here since
		 * it doesn't use a hash table.
		 */
		_tlbia();
	}

	/* Push out batch of freed page tables */
	pte_free_finish();
}
Esempio n. 5
0
/*
 * Flush all the (user) entries for the address space described
 * by mm.  We can't rely on mm->mmap describing all the entries
 * that might be in the hash table.
 */
void
local_flush_tlb_mm(struct mm_struct *mm)
{
	if (Hash == 0) {
		_tlbia();
		return;
	}

	if (mm->map_count) {
		struct vm_area_struct *mp;
		for (mp = mm->mmap; mp != NULL; mp = mp->vm_next)
			local_flush_tlb_range(mp, mp->vm_start, mp->vm_end);
	} else {
		struct vm_area_struct vma;
		vma.vm_mm = mm;
		local_flush_tlb_range(&vma, 0, TASK_SIZE);
	}

#ifdef CONFIG_SMP
	smp_send_tlb_invalidate(0);
#endif	
}
Esempio n. 6
0
/*
 * For each address in the range, find the pte for the address
 * and check _PAGE_HASHPTE bit; if it is set, find and destroy
 * the corresponding HPTE.
 */
void
local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
	struct mm_struct *mm = vma->vm_mm;
	pmd_t *pmd;
	pte_t *pte;
	unsigned long pmd_end;
	unsigned int ctx = mm->context;

	if (Hash == 0) {
		_tlbia();
		return;
	}
	start &= PAGE_MASK;
	if (start >= end)
		return;
	pmd = pmd_offset(pgd_offset(mm, start), start);
	do {
		pmd_end = (start + PGDIR_SIZE) & PGDIR_MASK;
		if (!pmd_none(*pmd)) {
			if (!pmd_end || pmd_end > end)
				pmd_end = end;
			pte = pte_offset(pmd, start);
			do {
				if ((pte_val(*pte) & _PAGE_HASHPTE) != 0)
					flush_hash_page(ctx, start, pte);
				start += PAGE_SIZE;
				++pte;
			} while (start && start < pmd_end);
		} else {
			start = pmd_end;
		}
		++pmd;
	} while (start && start < end);

#ifdef CONFIG_SMP
	smp_send_tlb_invalidate(0);
#endif	
}