Ejemplo n.º 1
0
void *
dma_alloc_coherent(struct device *dev,size_t size,dma_addr_t *handle,gfp_t flag)
{
	unsigned long ret;
	unsigned long uncached = 0;

	/* ignore region speicifiers */

	flag &= ~(__GFP_DMA | __GFP_HIGHMEM);

	if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
		flag |= GFP_DMA;
	ret = (unsigned long)__get_free_pages(flag, get_order(size));

	if (ret == 0)
		return NULL;

	/* We currently don't support coherent memory outside KSEG */

	BUG_ON(ret < XCHAL_KSEG_CACHED_VADDR ||
	       ret > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1);


	if (ret != 0) {
		memset((void*) ret, 0, size);
		uncached = ret+XCHAL_KSEG_BYPASS_VADDR-XCHAL_KSEG_CACHED_VADDR;
		*handle = virt_to_bus((void*)ret);
		__flush_invalidate_dcache_range(ret, size);
	}

	return (void*)uncached;
}
Ejemplo n.º 2
0
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
		unsigned long vaddr, void *dst, const void *src,
		unsigned long len)
{
	unsigned long phys = page_to_phys(page);
	unsigned long alias = !(DCACHE_ALIAS_EQ(vaddr, phys));

	/* Flush and invalidate user page if aliased. */

	if (alias) {
		unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
		__flush_invalidate_dcache_page_alias(t, phys);
	}

	/* Copy data */
	
	memcpy(dst, src, len);

	/*
	 * Flush and invalidate kernel page if aliased and synchronize 
	 * data and instruction caches for executable pages. 
	 */

	if (alias) {
		unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);

		__flush_invalidate_dcache_range((unsigned long) dst, len);
		if ((vma->vm_flags & VM_EXEC) != 0)
			__invalidate_icache_page_alias(t, phys);

	} else if ((vma->vm_flags & VM_EXEC) != 0) {
		__flush_dcache_range((unsigned long)dst,len);
		__invalidate_icache_range((unsigned long) dst, len);
	}
}
Ejemplo n.º 3
0
void copy_to_user_page(struct vm_area_struct *vma, struct page *page, 
		unsigned long vaddr, void *dst, const void *src,
		unsigned long len)
{
	unsigned long phys = page_to_phys(page);
	unsigned long alias = !(DCACHE_ALIAS_EQ(vaddr, phys));

	

	if (alias) {
		unsigned long temp = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
		__flush_invalidate_dcache_page_alias(temp, phys);
	}

	
	
	memcpy(dst, src, len);


	if (alias) {
		unsigned long temp = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);

		__flush_invalidate_dcache_range((unsigned long) dst, len);
		if ((vma->vm_flags & VM_EXEC) != 0) {
			__invalidate_icache_page_alias(temp, phys);
		}

	} else if ((vma->vm_flags & VM_EXEC) != 0) {
		__flush_dcache_range((unsigned long)dst,len);
		__invalidate_icache_range((unsigned long) dst, len);
	}
}
Ejemplo n.º 4
0
void consistent_sync(void *vaddr, size_t size, int direction)
{
	switch (direction) {
	case PCI_DMA_NONE:
		BUG();
	case PCI_DMA_FROMDEVICE:        /* invalidate only */
		__invalidate_dcache_range((unsigned long)vaddr,
				          (unsigned long)size);
		break;

	case PCI_DMA_TODEVICE:          /* writeback only */
	case PCI_DMA_BIDIRECTIONAL:     /* writeback and invalidate */
		__flush_invalidate_dcache_range((unsigned long)vaddr,
				    		(unsigned long)size);
		break;
	}
}
Ejemplo n.º 5
0
void flush_dcache_range(ulong start_addr, ulong end_addr)
{
	__flush_invalidate_dcache_range(start_addr, end_addr - start_addr);
}
Ejemplo n.º 6
0
void flush_cache(ulong start_addr, ulong size)
{
	__flush_invalidate_dcache_range(start_addr, size);
	__invalidate_icache_range(start_addr, size);
}
Ejemplo n.º 7
0
static int
gen_return_code(unsigned char *codemem, unsigned int use_rt_sigreturn)
{
	unsigned int retcall;
	int err = 0;

#if 0
	/* Ignoring SA_RESTORER for now; it's supposed to be obsolete,
	 * and the xtensa glibc doesn't use it.
	 */
	if (ka->sa.sa_flags & SA_RESTORER) {
		regs->pr = (unsigned long) ka->sa.sa_restorer;
	} else
#endif /* 0 */
	{

#if (__NR_sigreturn > 255) || (__NR_rt_sigreturn > 255)

/* The 12-bit immediate is really split up within the 24-bit MOVI
 * instruction.  As long as the above system call numbers fit within
 * 8-bits, the following code works fine. See the Xtensa ISA for
 * details.
 */

#error Generating the MOVI instruction below breaks!
#endif

		retcall = use_rt_sigreturn ? __NR_rt_sigreturn : __NR_sigreturn;

#ifdef __XTENSA_EB__   /* Big Endian version */
		/* Generate instruction:  MOVI a2, retcall */
		err |= __put_user(0x22, &codemem[0]);
		err |= __put_user(0x0a, &codemem[1]);
		err |= __put_user(retcall, &codemem[2]);
		/* Generate instruction:  SYSCALL */
		err |= __put_user(0x00, &codemem[3]);
		err |= __put_user(0x05, &codemem[4]);
		err |= __put_user(0x00, &codemem[5]);

#elif defined __XTENSA_EL__   /* Little Endian version */
		/* Generate instruction:  MOVI a2, retcall */
		err |= __put_user(0x22, &codemem[0]);
		err |= __put_user(0xa0, &codemem[1]);
		err |= __put_user(retcall, &codemem[2]);
		/* Generate instruction:  SYSCALL */
		err |= __put_user(0x00, &codemem[3]);
		err |= __put_user(0x50, &codemem[4]);
		err |= __put_user(0x00, &codemem[5]);
#else
#error Must use compiler for Xtensa processors.
#endif
	}

	/* Flush generated code out of the data cache */

	if (err == 0) {
		__invalidate_icache_range((unsigned long)codemem, 6UL);
		__flush_invalidate_dcache_range((unsigned long)codemem, 6UL);
	}

	return err;
}