Esempio n. 1
0
void flush_anon_page(struct vm_area_struct *vma, struct page *page,
		     unsigned long u_vaddr)
{
	/* TBD: do we really need to clear the kernel mapping */
	__flush_dcache_page(page_address(page), u_vaddr);
	__flush_dcache_page(page_address(page), page_address(page));

}
Esempio n. 2
0
/*
 * Write back and invalidate I/D-caches for the page.
 *
 * ADDRESS: Virtual Address (U0 address)
 */
static void sh7705_flush_cache_page(void *args)
{
	struct flusher_data *data = args;
	unsigned long pfn = data->addr2;

	__flush_dcache_page(pfn << PAGE_SHIFT);
}
Esempio n. 3
0
/*
 * Handle cache congruency of kernel and userspace mappings of page when kernel
 * writes-to/reads-from
 *
 * The idea is to defer flushing of kernel mapping after a WRITE, possible if:
 *  -dcache is NOT aliasing, hence any U/K-mappings of page are congruent
 *  -U-mapping doesn't exist yet for page (finalised in update_mmu_cache)
 *  -In SMP, if hardware caches are coherent
 *
 * There's a corollary case, where kernel READs from a userspace mapped page.
 * If the U-mapping is not congruent to to K-mapping, former needs flushing.
 */
void flush_dcache_page(struct page *page)
{
	struct address_space *mapping;

	if (!cache_is_vipt_aliasing()) {
		clear_bit(PG_dc_clean, &page->flags);
		return;
	}

	/* don't handle anon pages here */
	mapping = page_mapping(page);
	if (!mapping)
		return;

	/*
	 * pagecache page, file not yet mapped to userspace
	 * Make a note that K-mapping is dirty
	 */
	if (!mapping_mapped(mapping)) {
		clear_bit(PG_dc_clean, &page->flags);
	} else if (page_mapped(page)) {

		/* kernel reading from page with U-mapping */
		void *paddr = page_address(page);
		unsigned long vaddr = page->index << PAGE_CACHE_SHIFT;

		if (addr_not_cache_congruent(paddr, vaddr))
			__flush_dcache_page(paddr, vaddr);
	}
}
Esempio n. 4
0
/*
 * Write back & invalidate the D-cache of the page.
 * (To avoid "alias" issues)
 */
static void sh7705_flush_dcache_page(void *arg)
{
	struct page *page = arg;
	struct address_space *mapping = page_mapping(page);

	if (mapping && !mapping_mapped(mapping))
		set_bit(PG_dcache_dirty, &page->flags);
	else
		__flush_dcache_page(PHYSADDR(page_address(page)));
}
Esempio n. 5
0
/*
 * Write back & invalidate the D-cache of the page.
 * (To avoid "alias" issues)
 */
static void sh7705_flush_dcache_page(void *arg)
{
	struct page *page = arg;
	struct address_space *mapping = page_mapping(page);

	if (mapping && !mapping_mapped(mapping))
		clear_bit(PG_dcache_clean, &page->flags);
	else
		__flush_dcache_page(__pa(page_address(page)));
}
Esempio n. 6
0
void copy_user_highpage(struct page *to, struct page *from,
	unsigned long u_vaddr, struct vm_area_struct *vma)
{
	void *kfrom = page_address(from);
	void *kto = page_address(to);
	int clean_src_k_mappings = 0;

	/*
	 * If SRC page was already mapped in userspace AND it's U-mapping is
	 * not congruent with K-mapping, sync former to physical page so that
	 * K-mapping in memcpy below, sees the right data
	 *
	 * Note that while @u_vaddr refers to DST page's userspace vaddr, it is
	 * equally valid for SRC page as well
	 */
	if (page_mapped(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
		__flush_dcache_page(kfrom, u_vaddr);
		clean_src_k_mappings = 1;
	}

	copy_page(kto, kfrom);

	/*
	 * Mark DST page K-mapping as dirty for a later finalization by
	 * update_mmu_cache(). Although the finalization could have been done
	 * here as well (given that both vaddr/paddr are available).
	 * But update_mmu_cache() already has code to do that for other
	 * non copied user pages (e.g. read faults which wire in pagecache page
	 * directly).
	 */
	clear_bit(PG_dc_clean, &to->flags);

	/*
	 * if SRC was already usermapped and non-congruent to kernel mapping
	 * sync the kernel mapping back to physical page
	 */
	if (clean_src_k_mappings) {
		__flush_dcache_page(kfrom, kfrom);
		set_bit(PG_dc_clean, &from->flags);
	} else {
		clear_bit(PG_dc_clean, &from->flags);
	}
}
Esempio n. 7
0
void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
		      unsigned long pfn)
{
	unsigned int paddr = pfn << PAGE_SHIFT;

	u_vaddr &= PAGE_MASK;

	__flush_dcache_page(paddr, u_vaddr);

	if (vma->vm_flags & VM_EXEC)
		__inv_icache_page(paddr, u_vaddr);
}
Esempio n. 8
0
/*
 * Ensure cache coherency between kernel mapping and userspace mapping
 * of this page.
 */
void flush_dcache_page(struct page *page)
{
	struct address_space *mapping = page_mapping(page);

	if (!PageHighMem(page) && mapping && !mapping_mapped(mapping))
		set_bit(PG_dcache_dirty, &page->flags);
	else
	{
		__flush_dcache_page(mapping, page);
		if (mapping)
			__flush_icache_all();
	}
}
Esempio n. 9
0
__inline__ void flush_dcache_page_impl(struct page *page)
{
#ifdef CONFIG_DEBUG_DCFLUSH
	atomic_inc(&dcpage_flushes);
#endif

#if (L1DCACHE_SIZE > PAGE_SIZE)
	__flush_dcache_page(page_address(page),
			    ((tlb_type == spitfire) &&
			     page_mapping(page) != NULL));
#else
	if (page_mapping(page) != NULL &&
	    tlb_type == spitfire)
		__flush_icache_page(__pa(page_address(page)));
#endif
}
Esempio n. 10
0
inline void flush_dcache_page_impl(struct page *page)
{
	BUG_ON(tlb_type == hypervisor);
#ifdef CONFIG_DEBUG_DCFLUSH
	atomic_inc(&dcpage_flushes);
#endif

#ifdef DCACHE_ALIASING_POSSIBLE
	__flush_dcache_page(page_address(page),
			    ((tlb_type == spitfire) &&
			     page_mapping(page) != NULL));
#else
	if (page_mapping(page) != NULL &&
	    tlb_type == spitfire)
		__flush_icache_page(__pa(page_address(page)));
#endif
}
Esempio n. 11
0
static void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned long vaddr)
{
#if 0
   unsigned int offset = CACHE_COLOUR(vaddr);
   unsigned long from, to;
   struct page *page = virt_to_page(kfrom);

   if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
      __flush_dcache_page(page_mapping(page), page);

   /*
    * Discard data in the kernel mapping for the new page.
    * FIXME: needs this MCRR to be supported.
    */
   __asm__("mcrr  p15, 0, %1, %0, c6   @ 0xec401f06"
           :
           : "r" (kto),
             "r" ((unsigned long)kto + PAGE_SIZE - L1_CACHE_BYTES)
           : "cc");
#endif

   /*
    * Now copy the page using the same cache colour as the
    * pages ultimate destination.
    */
   spin_lock(&v6_lock);

#if 0
   set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(__pa(kfrom) >> PAGE_SHIFT, PAGE_KERNEL), 0);
   set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(__pa(kto) >> PAGE_SHIFT, PAGE_KERNEL), 0);

   from = from_address + (offset << PAGE_SHIFT);
   to   = to_address + (offset << PAGE_SHIFT);

   flush_tlb_kernel_page(from);
   flush_tlb_kernel_page(to);
#endif

   cpu_flush_tlb_all();

   copy_page(kto, kfrom);

   spin_unlock(&v6_lock);
}
Esempio n. 12
0
void
update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
{
	unsigned long pfn = pte_pfn(*ptep);
	struct page *page;

	if (!pfn_valid(pfn))
		return;

	page = pfn_to_page(pfn);

	

	invalidate_itlb_mapping(addr);
	invalidate_dtlb_mapping(addr);

#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK

	if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) {

		unsigned long vaddr = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK);
		unsigned long paddr = (unsigned long) page_address(page);
		unsigned long phys = page_to_phys(page);

		__flush_invalidate_dcache_page(paddr);

		__flush_invalidate_dcache_page_alias(vaddr, phys);
		__invalidate_icache_page_alias(vaddr, phys);

		clear_bit(PG_arch_1, &page->flags);
	}
#else
	if (!PageReserved(page) && !test_bit(PG_arch_1, &page->flags)
	    && (vma->vm_flags & VM_EXEC) != 0) {
	    	unsigned long paddr = (unsigned long) page_address(page);
		__flush_dcache_page(paddr);
		__invalidate_icache_page(paddr);
		set_bit(PG_arch_1, &page->flags);
	}
#endif
}
Esempio n. 13
0
void
update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
{
	unsigned long pfn = pte_pfn(*ptep);
	struct page *page;

	if (!pfn_valid(pfn))
		return;

	page = pfn_to_page(pfn);

	/* Invalidate old entry in TLBs */

	flush_tlb_page(vma, addr);

#if (DCACHE_WAY_SIZE > PAGE_SIZE)

	if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) {
		unsigned long phys = page_to_phys(page);
		unsigned long tmp;

		tmp = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK);
		__flush_invalidate_dcache_page_alias(tmp, phys);
		tmp = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK);
		__flush_invalidate_dcache_page_alias(tmp, phys);
		__invalidate_icache_page_alias(tmp, phys);

		clear_bit(PG_arch_1, &page->flags);
	}
#else
	if (!PageReserved(page) && !test_bit(PG_arch_1, &page->flags)
	    && (vma->vm_flags & VM_EXEC) != 0) {
		unsigned long paddr = (unsigned long)kmap_atomic(page);
		__flush_dcache_page(paddr);
		__invalidate_icache_page(paddr);
		set_bit(PG_arch_1, &page->flags);
		kunmap_atomic((void *)paddr);
	}
#endif
}
Esempio n. 14
0
/*
 * Ensure cache coherency between kernel mapping and userspace mapping
 * of this page.
 */
void flush_dcache_page(struct page *page)
{
	struct address_space *mapping;

	/*
	 * The zero page is never written to, so never has any dirty
	 * cache lines, and therefore never needs to be flushed.
	 */
	if (page == ZERO_PAGE(0))
		return;

	mapping = page_mapping(page);

	if (mapping && !mapping_mapped(mapping))
		clear_bit(PG_dcache_clean, &page->flags);
	else {
		__flush_dcache_page(mapping, page);
		if (mapping)
			__flush_icache_all();
		set_bit(PG_dcache_clean, &page->flags);
	}
}
Esempio n. 15
0
/*
 * Called at the end of pagefault, for a userspace mapped page
 *  -pre-install the corresponding TLB entry into MMU
 *  -Finalize the delayed D-cache flush of kernel mapping of page due to
 *  	flush_dcache_page(), copy_user_page()
 *
 * Note that flush (when done) involves both WBACK - so physical page is
 * in sync as well as INV - so any non-congruent aliases don't remain
 */
void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
		      pte_t *ptep)
{
	unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
	unsigned long paddr = pte_val(*ptep) & PAGE_MASK;
	struct page *page = pfn_to_page(pte_pfn(*ptep));

	create_tlb(vma, vaddr, ptep);

	if (page == ZERO_PAGE(0)) {
		return;
	}

	/*
	 * Exec page : Independent of aliasing/page-color considerations,
	 *	       since icache doesn't snoop dcache on ARC, any dirty
	 *	       K-mapping of a code page needs to be wback+inv so that
	 *	       icache fetch by userspace sees code correctly.
	 * !EXEC page: If K-mapping is NOT congruent to U-mapping, flush it
	 *	       so userspace sees the right data.
	 *  (Avoids the flush for Non-exec + congruent mapping case)
	 */
	if ((vma->vm_flags & VM_EXEC) ||
	     addr_not_cache_congruent(paddr, vaddr)) {

		int dirty = !test_and_set_bit(PG_dc_clean, &page->flags);
		if (dirty) {
			/* wback + inv dcache lines */
			__flush_dcache_page(paddr, paddr);

			/* invalidate any existing icache lines */
			if (vma->vm_flags & VM_EXEC)
				__inv_icache_page(paddr, vaddr);
		}
	}
}
Esempio n. 16
0
/*
 * Write back and invalidate I/D-caches for the page.
 *
 * ADDRESS: Virtual Address (U0 address)
 */
void flush_cache_page(struct vm_area_struct *vma, unsigned long address,
              unsigned long pfn)
{
    __flush_dcache_page(pfn << PAGE_SHIFT);
}
Esempio n. 17
0
/*
 * Write back & invalidate the D-cache of the page.
 * (To avoid "alias" issues)
 */
void flush_dcache_page(struct page *page)
{
    if (test_bit(PG_mapped, &page->flags))
        __flush_dcache_page(PHYSADDR(page_address(page)));
}