void copy_to_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, const void *src, unsigned long len) { unsigned long phys = page_to_phys(page); unsigned long alias = !(DCACHE_ALIAS_EQ(vaddr, phys)); /* Flush and invalidate user page if aliased. */ if (alias) { unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK); __flush_invalidate_dcache_page_alias(t, phys); } /* Copy data */ memcpy(dst, src, len); /* * Flush and invalidate kernel page if aliased and synchronize * data and instruction caches for executable pages. */ if (alias) { unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK); __flush_invalidate_dcache_range((unsigned long) dst, len); if ((vma->vm_flags & VM_EXEC) != 0) __invalidate_icache_page_alias(t, phys); } else if ((vma->vm_flags & VM_EXEC) != 0) { __flush_dcache_range((unsigned long)dst,len); __invalidate_icache_range((unsigned long) dst, len); } }
void flush_dcache_page(struct page *page) { struct address_space *mapping = page_mapping(page); if (mapping && !mapping_mapped(mapping)) { if (!test_bit(PG_arch_1, &page->flags)) set_bit(PG_arch_1, &page->flags); return; } else { unsigned long phys = page_to_phys(page); unsigned long temp = page->index << PAGE_SHIFT; unsigned long alias = !(DCACHE_ALIAS_EQ(temp, phys)); unsigned long virt; if (!alias && !mapping) return; __flush_invalidate_dcache_page((long)page_address(page)); virt = TLBTEMP_BASE_1 + (temp & DCACHE_ALIAS_MASK); if (alias) __flush_invalidate_dcache_page_alias(virt, phys); if (mapping) __invalidate_icache_page_alias(virt, phys); } }
void copy_to_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, const void *src, unsigned long len) { unsigned long phys = page_to_phys(page); unsigned long alias = !(DCACHE_ALIAS_EQ(vaddr, phys)); if (alias) { unsigned long temp = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK); __flush_invalidate_dcache_page_alias(temp, phys); } memcpy(dst, src, len); if (alias) { unsigned long temp = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK); __flush_invalidate_dcache_range((unsigned long) dst, len); if ((vma->vm_flags & VM_EXEC) != 0) { __invalidate_icache_page_alias(temp, phys); } } else if ((vma->vm_flags & VM_EXEC) != 0) { __flush_dcache_range((unsigned long)dst,len); __invalidate_icache_range((unsigned long) dst, len); } }
static inline void *coherent_kvaddr(struct page *page, unsigned long base, unsigned long vaddr, unsigned long *paddr) { if (PageHighMem(page) || !DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) { *paddr = page_to_phys(page); return (void *)(base + (vaddr & DCACHE_ALIAS_MASK)); } else { *paddr = 0; return page_to_virt(page); } }
extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, const void *src, unsigned long len) { unsigned long phys = page_to_phys(page); unsigned long alias = !(DCACHE_ALIAS_EQ(vaddr, phys)); if (alias) { unsigned long temp = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK); __flush_invalidate_dcache_page_alias(temp, phys); } memcpy(dst, src, len); }
void flush_dcache_page(struct page *page) { struct address_space *mapping = page_mapping(page); /* * If we have a mapping but the page is not mapped to user-space * yet, we simply mark this page dirty and defer flushing the * caches until update_mmu(). */ if (mapping && !mapping_mapped(mapping)) { if (!test_bit(PG_arch_1, &page->flags)) set_bit(PG_arch_1, &page->flags); return; } else { unsigned long phys = page_to_phys(page); unsigned long temp = page->index << PAGE_SHIFT; unsigned long alias = !(DCACHE_ALIAS_EQ(temp, phys)); unsigned long virt; /* * Flush the page in kernel space and user space. * Note that we can omit that step if aliasing is not * an issue, but we do have to synchronize I$ and D$ * if we have a mapping. */ if (!alias && !mapping) return; virt = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK); __flush_invalidate_dcache_page_alias(virt, phys); virt = TLBTEMP_BASE_1 + (temp & DCACHE_ALIAS_MASK); if (alias) __flush_invalidate_dcache_page_alias(virt, phys); if (mapping) __invalidate_icache_page_alias(virt, phys); } /* There shouldn't be an entry in the cache for this page anymore. */ }
static inline void kmap_invalidate_coherent(struct page *page, unsigned long vaddr) { if (!DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) { unsigned long kvaddr; if (!PageHighMem(page)) { kvaddr = (unsigned long)page_to_virt(page); __invalidate_dcache_page(kvaddr); } else { kvaddr = TLBTEMP_BASE_1 + (page_to_phys(page) & DCACHE_ALIAS_MASK); __invalidate_dcache_page_alias(kvaddr, page_to_phys(page)); } } }
extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, const void *src, unsigned long len) { unsigned long phys = page_to_phys(page); unsigned long alias = !(DCACHE_ALIAS_EQ(vaddr, phys)); /* * Flush user page if aliased. * (Note: a simply flush would be sufficient) */ if (alias) { unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK); __flush_invalidate_dcache_page_alias(t, phys); } memcpy(dst, src, len); }