/* * Note that this is intended to be called only from the copy_user_page * asm code; anything else will require special locking to prevent the * mini-cache space being re-used. (Note: probably preempt unsafe). * * We rely on the fact that the minicache is 2K, and we'll be pushing * 4K of data through it, so we don't actually have to specifically * flush the minicache when we change the mapping. * * Note also: assert(PAGE_OFFSET <= virt < high_memory). * Unsafe: preempt, kmap. */ unsigned long map_page_minicache(unsigned long virt) { set_pte(minicache_pte, mk_pte_phys(__pa(virt), minicache_pgprot)); cpu_tlb_invalidate_page(minicache_address, 0); return minicache_address; }
/* cpd_tlb_flush_page: Insures coherence of TLB entry owned by 'vma_p's mm for * 'va_page'. * Assumptions: * - Mapping is not a kernel one so TLB is coherent with respect to kernel * mappins. * - User CPD entry coherent, via set_pmd() and only a single page effected. * - TLB entry mapping 'va_page' is not coherent iff covered by a CPD entry * owned by the mm associated with 'vma_p'. * Action: * - if CPD covering 'va_page' is owned by 'vma_p's mm, invalidate TLB * entries. * Notes: * - The page is specified by a VA while the flushing call and CPD access * uses a MVA. */ void cpd_tlb_flush_page(struct vm_area_struct* vma_p, unsigned long va_page) { pmd_t cpd; int domain; unsigned long mva_page = va_to_mva(va_page, vma_p->vm_mm); /* Does 'vma_p's mm have any incoherencies? */ if (!cpd_is_mm_tlb_coherent(vma_p->vm_mm)) { cpd = *pmd_offset(pgd_offset_k(mva_page), mva_page); domain = pmd_domain(cpd); /* * Is CPD entry's domain incoherent and active in * 'vma_p's mm? */ if (!cpd_is_domain_tlb_coherent(domain) && domain_active(vma_p->vm_mm->context.dacr, domain)) { cpu_tlb_invalidate_page(mva_page, vma_p->vm_flags & VM_EXEC); } } }