Esempio n. 1
0
static void flush_range(struct mm_struct *mm, unsigned long start,
			unsigned long end)
{
	pmd_t *pmd;
	unsigned long pmd_end;
	int count;
	unsigned int ctx = mm->context.id;

	if (Hash == 0) {
		_tlbia();
		return;
	}
	start &= PAGE_MASK;
	if (start >= end)
		return;
	end = (end - 1) | ~PAGE_MASK;
	pmd = pmd_offset(pgd_offset(mm, start), start);
	for (;;) {
		pmd_end = ((start + PGDIR_SIZE) & PGDIR_MASK) - 1;
		if (pmd_end > end)
			pmd_end = end;
		if (!pmd_none(*pmd)) {
			count = ((pmd_end - start) >> PAGE_SHIFT) + 1;
			flush_hash_pages(ctx, start, pmd_val(*pmd), count);
		}
		if (pmd_end == end)
			break;
		start = pmd_end + 1;
		++pmd;
	}
Esempio n. 2
0
/*
 * Called when unmapping pages to flush entries from the TLB/hash table.
 */
void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, unsigned long addr)
{
	unsigned long ptephys;

	if (Hash != 0) {
		ptephys = __pa(ptep) & PAGE_MASK;
		flush_hash_pages(mm->context.id, addr, ptephys, 1);
	}
}
Esempio n. 3
0
/*
 * Called by ptep_test_and_clear_young()
 */
void flush_hash_one_pte(pte_t *ptep)
{
	struct page *ptepage;
	struct mm_struct *mm;
	unsigned long ptephys;
	unsigned long addr;

	if (Hash == 0)
		return;
	
	ptepage = virt_to_page(ptep);
	mm = (struct mm_struct *) ptepage->mapping;
	ptephys = __pa(ptep) & PAGE_MASK;
	addr = ptepage->index + (((unsigned long)ptep & ~PAGE_MASK) << 10);
	flush_hash_pages(mm->context, addr, ptephys, 1);
}