Esempio n. 1
0
void __flush_anon_page(struct page *page, unsigned long vmaddr)
{
#ifdef CONFIG_RALINK_SOC
	if (!PageHighMem(page)) {
		unsigned long addr = (unsigned long) page_address(page);

		if (pages_do_alias(addr, vmaddr & PAGE_MASK)) {
			if (page_mapped(page) && !Page_dcache_dirty(page)) {
				void *kaddr;

				kaddr = kmap_coherent(page, vmaddr);
				flush_data_cache_page((unsigned long)kaddr);
				kunmap_coherent();
			} else {
				flush_data_cache_page(addr);
				ClearPageDcacheDirty(page);
			}
		}
	} else {
		void *laddr = lowmem_page_address(page);

		if (pages_do_alias((unsigned long)laddr, vmaddr & PAGE_MASK)) {
			if (page_mapped(page) && !Page_dcache_dirty(page)) {
				void *kaddr;

				kaddr = kmap_coherent(page, vmaddr);
				flush_data_cache_page((unsigned long)kaddr);
				kunmap_coherent();
			} else {
				void *kaddr;

				kaddr = kmap_atomic(page, KM_PTE1);
				flush_data_cache_page((unsigned long)kaddr);
				kunmap_atomic(kaddr, KM_PTE1);
				ClearPageDcacheDirty(page);
			}
		}
	}
#else
	unsigned long addr = (unsigned long) page_address(page);

	if (pages_do_alias(addr, vmaddr)) {
		if (page_mapped(page) && !Page_dcache_dirty(page)) {
			void *kaddr;

			kaddr = kmap_coherent(page, vmaddr);
			flush_data_cache_page((unsigned long)kaddr);
			kunmap_coherent();
		} else
			flush_data_cache_page(addr);
	}
#endif
}
Esempio n. 2
0
void copy_user_highpage(struct page *to, struct page *from,
			unsigned long vaddr, struct vm_area_struct *vma)
{
	void *vfrom, *vto;

	vto = kmap_atomic(to);

	if (boot_cpu_data.dcache.n_aliases && page_mapped(from) &&
	    test_bit(PG_dcache_clean, &from->flags)) {
		vfrom = kmap_coherent(from, vaddr);
		copy_page(vto, vfrom);
		kunmap_coherent(vfrom);
	} else {
		vfrom = kmap_atomic(from);
		copy_page(vto, vfrom);
		kunmap_atomic(vfrom);
	}

	if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK) ||
	    (vma->vm_flags & VM_EXEC))
		__flush_purge_region(vto, PAGE_SIZE);

	kunmap_atomic(vto);
	/* Make sure this page is cleared on other CPU's too before using it */
	smp_wmb();
}
Esempio n. 3
0
void __flush_anon_page(struct page *page, unsigned long vmaddr)
{
	if (pages_do_alias((unsigned long)page_address(page), vmaddr)) {
		void *kaddr;

		kaddr = kmap_coherent(page, vmaddr);
		flush_data_cache_page((unsigned long)kaddr);
		kunmap_coherent();
	}
}
Esempio n. 4
0
void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
			 unsigned long vaddr, void *dst, const void *src,
			 unsigned long len)
{
	if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
	    test_bit(PG_dcache_clean, &page->flags)) {
		void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
		memcpy(dst, vfrom, len);
		kunmap_coherent(vfrom);
	} else {
		memcpy(dst, src, len);
		if (boot_cpu_data.dcache.n_aliases)
			clear_bit(PG_dcache_clean, &page->flags);
	}
}
Esempio n. 5
0
void __flush_anon_page(struct page *page, unsigned long vmaddr)
{
	unsigned long addr = (unsigned long) page_address(page);

	if (pages_do_alias(addr, vmaddr)) {
		if (page_mapped(page) && !Page_dcache_dirty(page)) {
			void *kaddr;

			kaddr = kmap_coherent(page, vmaddr);
			flush_data_cache_page((unsigned long)kaddr);
			kunmap_coherent();
		} else
			flush_data_cache_page(addr);
	}
}
Esempio n. 6
0
void __flush_anon_page(struct page *page, unsigned long vmaddr)
{
	unsigned long addr = (unsigned long) page_address(page);

	if (pages_do_alias(addr, vmaddr)) {
		if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
		    test_bit(PG_dcache_clean, &page->flags)) {
			void *kaddr;

			kaddr = kmap_coherent(page, vmaddr);
			/* XXX.. For now kunmap_coherent() does a purge */
			/* __flush_purge_region((void *)kaddr, PAGE_SIZE); */
			kunmap_coherent(kaddr);
		} else
			__flush_purge_region((void *)addr, PAGE_SIZE);
	}
}
Esempio n. 7
0
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
		       unsigned long vaddr, void *dst, const void *src,
		       unsigned long len)
{
	if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
	    test_bit(PG_dcache_clean, &page->flags)) {
		void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
		memcpy(vto, src, len);
		kunmap_coherent(vto);
	} else {
		memcpy(dst, src, len);
		if (boot_cpu_data.dcache.n_aliases)
			clear_bit(PG_dcache_clean, &page->flags);
	}

	if (vma->vm_flags & VM_EXEC)
		flush_cache_page(vma, vaddr, page_to_pfn(page));
}