Example #1
0
static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
				unsigned long uaddr, void *kaddr,
				unsigned long len)
{
	if (vma->vm_flags & VM_EXEC)
		sync_icache_aliases(kaddr, len);
}
Example #2
0
void __sync_icache_dcache(pte_t pte)
{
	struct page *page = pte_page(pte);

	if (!test_and_set_bit(PG_dcache_clean, &page->flags))
		sync_icache_aliases(page_address(page),
				    PAGE_SIZE << compound_order(page));
}
void __sync_icache_dcache(pte_t pte, unsigned long addr)
{
	struct page *page = pte_page(pte);

	if (!test_and_set_bit(PG_dcache_clean, &page->flags))
		sync_icache_aliases(page_address(page),
				    PAGE_SIZE << compound_order(page));
	else if (icache_is_aivivt())
		__flush_icache_all();
}
Example #4
0
void __sync_icache_dcache(pte_t pte, unsigned long addr)
{
	struct page *page = pte_page(pte);

	/* no flushing needed for anonymous pages */
	if (!page_mapping(page))
		return;

	if (!test_and_set_bit(PG_dcache_clean, &page->flags))
		sync_icache_aliases(page_address(page),
				    PAGE_SIZE << compound_order(page));
	else if (icache_is_aivivt())
		__flush_icache_all();
}
void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
		void *src, unsigned long len)
{
	void *xol_page_kaddr = kmap_atomic(page);
	void *dst = xol_page_kaddr + (vaddr & ~PAGE_MASK);

	/* Initialize the slot */
	memcpy(dst, src, len);

	/* flush caches (dcache/icache) */
	sync_icache_aliases(dst, len);

	kunmap_atomic(xol_page_kaddr);
}