Exemple #1
0
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
		unsigned long vaddr, void *dst, const void *src,
		unsigned long len)
{
	unsigned long phys = page_to_phys(page);
	unsigned long alias = !(DCACHE_ALIAS_EQ(vaddr, phys));

	/* Flush and invalidate user page if aliased. */

	if (alias) {
		unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
		__flush_invalidate_dcache_page_alias(t, phys);
	}

	/* Copy data */
	
	memcpy(dst, src, len);

	/*
	 * Flush and invalidate kernel page if aliased and synchronize 
	 * data and instruction caches for executable pages. 
	 */

	if (alias) {
		unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);

		__flush_invalidate_dcache_range((unsigned long) dst, len);
		if ((vma->vm_flags & VM_EXEC) != 0)
			__invalidate_icache_page_alias(t, phys);

	} else if ((vma->vm_flags & VM_EXEC) != 0) {
		__flush_dcache_range((unsigned long)dst,len);
		__invalidate_icache_range((unsigned long) dst, len);
	}
}
void copy_to_user_page(struct vm_area_struct *vma, struct page *page, 
		unsigned long vaddr, void *dst, const void *src,
		unsigned long len)
{
	unsigned long phys = page_to_phys(page);
	unsigned long alias = !(DCACHE_ALIAS_EQ(vaddr, phys));

	

	if (alias) {
		unsigned long temp = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
		__flush_invalidate_dcache_page_alias(temp, phys);
	}

	
	
	memcpy(dst, src, len);


	if (alias) {
		unsigned long temp = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);

		__flush_invalidate_dcache_range((unsigned long) dst, len);
		if ((vma->vm_flags & VM_EXEC) != 0) {
			__invalidate_icache_page_alias(temp, phys);
		}

	} else if ((vma->vm_flags & VM_EXEC) != 0) {
		__flush_dcache_range((unsigned long)dst,len);
		__invalidate_icache_range((unsigned long) dst, len);
	}
}
Exemple #3
0
void flush_cache(ulong start_addr, ulong size)
{
	__flush_invalidate_dcache_range(start_addr, size);
	__invalidate_icache_range(start_addr, size);
}
static int
gen_return_code(unsigned char *codemem, unsigned int use_rt_sigreturn)
{
	unsigned int retcall;
	int err = 0;

#if 0
	/* Ignoring SA_RESTORER for now; it's supposed to be obsolete,
	 * and the xtensa glibc doesn't use it.
	 */
	if (ka->sa.sa_flags & SA_RESTORER) {
		regs->pr = (unsigned long) ka->sa.sa_restorer;
	} else
#endif /* 0 */
	{

#if (__NR_sigreturn > 255) || (__NR_rt_sigreturn > 255)

/* The 12-bit immediate is really split up within the 24-bit MOVI
 * instruction.  As long as the above system call numbers fit within
 * 8-bits, the following code works fine. See the Xtensa ISA for
 * details.
 */

#error Generating the MOVI instruction below breaks!
#endif

		retcall = use_rt_sigreturn ? __NR_rt_sigreturn : __NR_sigreturn;

#ifdef __XTENSA_EB__   /* Big Endian version */
		/* Generate instruction:  MOVI a2, retcall */
		err |= __put_user(0x22, &codemem[0]);
		err |= __put_user(0x0a, &codemem[1]);
		err |= __put_user(retcall, &codemem[2]);
		/* Generate instruction:  SYSCALL */
		err |= __put_user(0x00, &codemem[3]);
		err |= __put_user(0x05, &codemem[4]);
		err |= __put_user(0x00, &codemem[5]);

#elif defined __XTENSA_EL__   /* Little Endian version */
		/* Generate instruction:  MOVI a2, retcall */
		err |= __put_user(0x22, &codemem[0]);
		err |= __put_user(0xa0, &codemem[1]);
		err |= __put_user(retcall, &codemem[2]);
		/* Generate instruction:  SYSCALL */
		err |= __put_user(0x00, &codemem[3]);
		err |= __put_user(0x50, &codemem[4]);
		err |= __put_user(0x00, &codemem[5]);
#else
#error Must use compiler for Xtensa processors.
#endif
	}

	/* Flush generated code out of the data cache */

	if (err == 0) {
		__invalidate_icache_range((unsigned long)codemem, 6UL);
		__flush_invalidate_dcache_range((unsigned long)codemem, 6UL);
	}

	return err;
}
Exemple #5
0
void __invalidate_cache_sigtramp(unsigned long addr)
{
	__invalidate_icache_range(addr, addr + 8);
}