Example #1
0
void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr)
{
    pte_t *pte;
    pte_t *start_pte;
    unsigned long flags;

    addr = _ALIGN_DOWN(addr, PMD_SIZE);
    /* Note: Normally, we should only ever use a batch within a
     * PTE locked section. This violates the rule, but will work
     * since we don't actually modify the PTEs, we just flush the
     * hash while leaving the PTEs intact (including their reference
     * to being hashed). This is not the most performance oriented
     * way to do things but is fine for our needs here.
     */
    local_irq_save(flags);
    arch_enter_lazy_mmu_mode();
    start_pte = pte_offset_map(pmd, addr);
    for (pte = start_pte; pte < start_pte + PTRS_PER_PTE; pte++) {
        unsigned long pteval = pte_val(*pte);
        if (pteval & _PAGE_HASHPTE)
            hpte_need_flush(mm, addr, pte, pteval, 0);
        addr += PAGE_SIZE;
    }
    arch_leave_lazy_mmu_mode();
    local_irq_restore(flags);
}
Example #2
0
void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
			      unsigned long end)
{
	unsigned long flags;

	start = _ALIGN_DOWN(start, PAGE_SIZE);
	end = _ALIGN_UP(end, PAGE_SIZE);

	BUG_ON(!mm->pgd);

	/* Note: Normally, we should only ever use a batch within a
	 * PTE locked section. This violates the rule, but will work
	 * since we don't actually modify the PTEs, we just flush the
	 * hash while leaving the PTEs intact (including their reference
	 * to being hashed). This is not the most performance oriented
	 * way to do things but is fine for our needs here.
	 */
	local_irq_save(flags);
	arch_enter_lazy_mmu_mode();
	for (; start < end; start += PAGE_SIZE) {
		pte_t *ptep = find_linux_pte(mm->pgd, start);
		unsigned long pte;

		if (ptep == NULL)
			continue;
		pte = pte_val(*ptep);
		if (!(pte & _PAGE_HASHPTE))
			continue;
		hpte_need_flush(mm, start, ptep, pte, 0);
	}
	arch_leave_lazy_mmu_mode();
	local_irq_restore(flags);
}