Example #1
0
void __flush_anon_page(struct page *page, unsigned long vmaddr)
{
	unsigned long addr = (unsigned long) page_address(page);

	if (pages_do_alias(addr, vmaddr)) {
		if (page_mapped(page) && !Page_dcache_dirty(page)) {
			void *kaddr;

			kaddr = kmap_coherent(page, vmaddr);
			flush_data_cache_page((unsigned long)kaddr);
			kunmap_coherent();
		} else
			flush_data_cache_page(addr);
	}
}
Example #2
0
void __flush_dcache_page(struct page *page)
{
	struct address_space *mapping = page_mapping(page);
	unsigned long addr;

	if (mapping && !mapping_mapped(mapping)) {
		SetPageDcacheDirty(page);
		return;
	}

	/*
	 * We could delay the flush for the !page_mapping case too.  But that
	 * case is for exec env/arg pages and those are %99 certainly going to
	 * get faulted into the tlb (and thus flushed) anyways.
	 */
	if (PageHighMem(page))
		addr = (unsigned long)__kmap_atomic(page);
	else
		addr = (unsigned long)page_address(page);

	flush_data_cache_page(addr);

	if (PageHighMem(page))
		__kunmap_atomic((void *)addr);
}
Example #3
0
void __update_cache(unsigned long address, pte_t pte)
{
	struct page *page;
	unsigned long pfn, addr;
	int exec = !pte_no_exec(pte) && !cpu_has_ic_fills_f_dc;

	pfn = pte_pfn(pte);
	if (unlikely(!pfn_valid(pfn)))
		return;
	page = pfn_to_page(pfn);
	if (Page_dcache_dirty(page)) {
		if (PageHighMem(page))
			addr = (unsigned long)__kmap_atomic(page);
		else
			addr = (unsigned long)page_address(page);

		if (exec || pages_do_alias(addr, address & PAGE_MASK))
			flush_data_cache_page(addr);

		if (PageHighMem(page))
			__kunmap_atomic((void *)addr);

		ClearPageDcacheDirty(page);
	}
}
Example #4
0
void __update_cache(struct vm_area_struct *vma, unsigned long address,
	pte_t pte)
{
	struct page *page;
	unsigned long pfn, addr;
	int exec = (vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc;

	pfn = pte_pfn(pte);
	if (unlikely(!pfn_valid(pfn))) {
		wmb();
		return;
	}
	page = pfn_to_page(pfn);
	if (page_mapped(page) && Page_dcache_dirty(page)) {
		void *kaddr = NULL;
		if (PageHighMem(page)) {
			addr = (unsigned long)kmap_atomic(page);
			kaddr = (void *)addr;
		} else
			addr = (unsigned long) page_address(page);
		if (exec || (cpu_has_dc_aliases &&
		    pages_do_alias(addr, address & PAGE_MASK))) {
			flush_data_cache_page(addr);
			ClearPageDcacheDirty(page);
		}

		if (kaddr)
			kunmap_atomic((void *)kaddr);
	}
	wmb();  /* finish any outstanding arch cache flushes before ret to user */
}
Example #5
0
void __kunmap_atomic(void *kvaddr, enum km_type type)
{
	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
	enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();

	if (vaddr < FIXADDR_START) { // FIXME
		pagefault_enable();
		return;
	}

	BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));

	/*
	 * Protect against multiple unmaps
	 * Can't cache flush an unmapped page.
	 */
	if ( kmap_atomic_maps[smp_processor_id()].map[type].vaddr ) {
		kmap_atomic_maps[smp_processor_id()].map[type].page = (struct page *)0;
		kmap_atomic_maps[smp_processor_id()].map[type].vaddr = (void *) 0;

		flush_data_cache_page((unsigned long)vaddr);
	}

#ifdef CONFIG_DEBUG_HIGHMEM
	/*
	 * force other mappings to Oops if they'll try to access
	 * this pte without first remap it
	 */
	pte_clear(&init_mm, vaddr, kmap_pte-idx);
	local_flush_tlb_one(vaddr);
#endif

	pagefault_enable();
}
Example #6
0
void __flush_anon_page(struct page *page, unsigned long vmaddr)
{
	if (pages_do_alias((unsigned long)page_address(page), vmaddr)) {
		void *kaddr;

		kaddr = kmap_coherent(page, vmaddr);
		flush_data_cache_page((unsigned long)kaddr);
		kunmap_coherent();
	}
}
Example #7
0
void __flush_icache_page(struct vm_area_struct *vma, struct page *page)
{
	unsigned long addr;

	if (PageHighMem(page))
		return;

	addr = (unsigned long) page_address(page);
	flush_data_cache_page(addr);
}
Example #8
0
void __flush_dcache_page(struct page *page)
{
	unsigned long addr;

	if (PageHighMem(page)) {
		addr = (unsigned long) kmap_atomic_page_address(page);
		if (addr) {
			flush_data_cache_page(addr);
			return;
		}
	}

	/*
	 * We could delay the flush for the !page_mapping case too.  But that
	 * case is for exec env/arg pages and those are %99 certainly going to
	 * get faulted into the tlb (and thus flushed) anyways.
	 */
	addr = (unsigned long) page_address(page);
	flush_data_cache_page(addr);
}
Example #9
0
void __flush_dcache_page(struct page *page)
{
#ifdef CONFIG_RALINK_SOC
	void *addr;

	if (page_mapping(page) && !page_mapped(page)) {
		SetPageDcacheDirty(page);
		return;
	}
#else
	struct address_space *mapping = page_mapping(page);
	unsigned long addr;

	if (PageHighMem(page))
		return;
	if (mapping && !mapping_mapped(mapping)) {
		SetPageDcacheDirty(page);
		return;
	}
#endif

	/*
	 * We could delay the flush for the !page_mapping case too.  But that
	 * case is for exec env/arg pages and those are %99 certainly going to
	 * get faulted into the tlb (and thus flushed) anyways.
	 */
#ifdef CONFIG_RALINK_SOC
	if (PageHighMem(page)) {
		addr = kmap_atomic(page, KM_PTE1);
		flush_data_cache_page((unsigned long)addr);
		kunmap_atomic(addr, KM_PTE1);
	} else {
		addr = (void *) page_address(page);
		flush_data_cache_page((unsigned long)addr);
	}
	ClearPageDcacheDirty(page);
#else
	addr = (unsigned long) page_address(page);
	flush_data_cache_page(addr);
#endif
}
Example #10
0
void __flush_anon_page(struct page *page, unsigned long vmaddr)
{
	if (!PageHighMem(page)) {
		unsigned long addr = (unsigned long) page_address(page);

		if (pages_do_alias(addr, vmaddr & PAGE_MASK)) {
			if (page_mapped(page) && !Page_dcache_dirty(page)) {
				void *kaddr;

				kaddr = kmap_coherent(page, vmaddr);
				flush_data_cache_page((unsigned long)kaddr);
				kunmap_coherent();
			} else {
				flush_data_cache_page(addr);
				ClearPageDcacheDirty(page);
			}
		}
	} else {
		void *laddr = lowmem_page_address(page);

		if (pages_do_alias((unsigned long)laddr, vmaddr & PAGE_MASK)) {
			if (page_mapped(page) && !Page_dcache_dirty(page)) {
				void *kaddr;

				kaddr = kmap_coherent(page, vmaddr);
				flush_data_cache_page((unsigned long)kaddr);
				kunmap_coherent();
			} else {
				void *kaddr;

				kaddr = kmap_atomic(page);
				flush_data_cache_page((unsigned long)kaddr);
				kunmap_atomic(kaddr);
				ClearPageDcacheDirty(page);
			}
		}
	}
}
Example #11
0
void __flush_dcache_page(struct page *page)
{
	struct address_space *mapping = page_mapping(page);
	unsigned long addr;

	if (PageHighMem(page))
		return;
	if (mapping && !mapping_mapped(mapping)) {
		SetPageDcacheDirty(page);
		return;
	}

	addr = (unsigned long) page_address(page);
	flush_data_cache_page(addr);
}
Example #12
0
void __flush_dcache_page(struct page *page)
{
	void *addr;

	if (page_mapping(page) && !page_mapped(page)) {
		SetPageDcacheDirty(page);
		return;
	}

	/*
	 * We could delay the flush for the !page_mapping case too.  But that
	 * case is for exec env/arg pages and those are %99 certainly going to
	 * get faulted into the tlb (and thus flushed) anyways.
	 */
	if (PageHighMem(page)) {
		addr = kmap_atomic(page);
		flush_data_cache_page((unsigned long)addr);
		kunmap_atomic(addr);
	} else {
		addr = (void *) page_address(page);
		flush_data_cache_page((unsigned long)addr);
	}
	ClearPageDcacheDirty(page);
}
void __update_cache(struct vm_area_struct *vma, unsigned long address,
	pte_t pte)
{
	struct page *page;
	unsigned long pfn, addr;
	int exec = (vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc;

	pfn = pte_pfn(pte);
	if (unlikely(!pfn_valid(pfn)))
		return;
	page = pfn_to_page(pfn);
	if (page_mapping(page) && Page_dcache_dirty(page)) {
		addr = (unsigned long) page_address(page);
		if (exec || pages_do_alias(addr, address & PAGE_MASK))
			flush_data_cache_page(addr);
		ClearPageDcacheDirty(page);
	}
}
Example #14
0
File: cache.c Project: jur/smp86xx
/*
 * We could optimize the case where the cache argument is not BCACHE but
 * that seems very atypical use ...
 */
asmlinkage int sys_cacheflush(unsigned long __user addr,
	unsigned long bytes, unsigned int cache)
{
	if (bytes == 0)
		return 0;
	if (!access_ok(VERIFY_WRITE, (void __user *) addr, bytes))
		return -EFAULT;

	if (cache & ICACHE)
		flush_icache_range(addr, addr + bytes);
	if (cache & DCACHE) {
		unsigned long start_addr;
		for (start_addr = addr; start_addr < (addr + bytes); start_addr += PAGE_SIZE)
			flush_data_cache_page(start_addr);
	}

	return 0;
}
Example #15
0
void __update_cache(struct vm_area_struct *vma, unsigned long address,
	pte_t pte)
{
	struct page *page;
	unsigned long pfn, addr;

	pfn = pte_pfn(pte);
	if (pfn_valid(pfn) && (page = pfn_to_page(pfn), page_mapping(page)) &&
	    Page_dcache_dirty(page)) {
		if (pages_do_alias((unsigned long)page_address(page),
		                   address & PAGE_MASK)) {
			addr = (unsigned long) page_address(page);
			flush_data_cache_page(addr);
		}

		ClearPageDcacheDirty(page);
	}
}
Example #16
0
static void mips_flush_dcache_from_pte(pte_t pteval, unsigned long address)
{
	struct page *page;
	unsigned long pfn = pte_pfn(pteval);

	if (unlikely(!pfn_valid(pfn)))
		return;

	page = pfn_to_page(pfn);
	if (page_mapping(page) && Page_dcache_dirty(page)) {
		unsigned long page_addr = (unsigned long) page_address(page);

		if (!cpu_has_ic_fills_f_dc ||
		    pages_do_alias(page_addr, address & PAGE_MASK))
			flush_data_cache_page(page_addr);
		ClearPageDcacheDirty(page);
	}
}
Example #17
0
void __update_cache(struct vm_area_struct *vma, unsigned long address,
        pte_t pte)
{
	unsigned long addr;
	struct page *page;

	if (!cpu_has_dc_aliases)
		return;

	page = pte_page(pte);
	if (VALID_PAGE(page) && page->mapping &&
	    (page->flags & (1UL << PG_dcache_dirty))) {
		if (pages_do_alias((unsigned long)page_address(page), address & PAGE_MASK)) {
			addr = (unsigned long) page_address(page);
			flush_data_cache_page(addr);
		}

		ClearPageDcacheDirty(page);
	}
}
Example #18
0
void flush_dcache_page(struct page *page)
{
	unsigned long addr;

	if (!cpu_has_dc_aliases)
		return;

	if (page->mapping && page->mapping->i_mmap == NULL &&
	    page->mapping->i_mmap_shared == NULL) {
		SetPageDcacheDirty(page);

		return;
	}

	/*
	 * We could delay the flush for the !page->mapping case too.  But that
	 * case is for exec env/arg pages and those are 99% certainly going to
	 * get faulted into the tlb (and thus flushed) anyways.
	 */
	addr = (unsigned long) page_address(page);
	flush_data_cache_page(addr);
}
Example #19
0
/* write-back and invalidate dcache */
void flush_dcache_range(void __user *userbuf, unsigned int len)
{
	unsigned long start_addr, addr;
	for (start_addr = addr = (unsigned long)userbuf; addr < (start_addr + len); addr += PAGE_SIZE)
		flush_data_cache_page(addr);
}