Exemple #1
0
int meson_trustzone_efuse(struct efuse_hal_api_arg* arg)
{
	int ret;
	if (!arg) {
		return -1;
	}
	set_cpus_allowed_ptr(current, cpumask_of(0));
	__cpuc_flush_dcache_area(__va(arg->buffer_phy), arg->size);
	outer_clean_range((arg->buffer_phy), (arg->buffer_phy + arg->size));

	__cpuc_flush_dcache_area(__va(arg->retcnt_phy), sizeof(unsigned int));
	outer_clean_range(arg->retcnt_phy, (arg->retcnt_phy + sizeof(unsigned int)));

	__cpuc_flush_dcache_area((void*)arg, sizeof(struct efuse_hal_api_arg));
	outer_clean_range(__pa(arg), __pa(arg + 1));

	ret = meson_smc_hal_api(TRUSTZONE_HAL_API_EFUSE, __pa(arg));

	if (arg->cmd == EFUSE_HAL_API_READ) {
		outer_inv_range((arg->buffer_phy), (arg->buffer_phy + arg->size));
		dmac_unmap_area(__va(arg->buffer_phy), arg->size, DMA_FROM_DEVICE);
	}
	outer_inv_range((arg->retcnt_phy), (arg->retcnt_phy + sizeof(unsigned int)));
	dmac_unmap_area(__va(arg->buffer_phy), arg->size, DMA_FROM_DEVICE);

	return ret;
}
Exemple #2
0
/* Check protocol version returned by the PA */
static u32 tf_rpc_init(struct tf_comm *comm)
{
	u32 protocol_version;
	u32 rpc_error = RPC_SUCCESS;

	dpr_info("%s(%p)\n", __func__, comm);

	spin_lock(&(comm->lock));

#if 0
	dmac_flush_range((void *)comm->l1_buffer,
		(void *)(((u32)(comm->l1_buffer)) + PAGE_SIZE));
	outer_inv_range(__pa(comm->l1_buffer),
		__pa(comm->l1_buffer) +  PAGE_SIZE);
#endif
	protocol_version = comm->l1_buffer->protocol_version;

	if ((GET_PROTOCOL_MAJOR_VERSION(protocol_version))
			!= TF_S_PROTOCOL_MAJOR_VERSION) {
		dpr_err("SMC: Unsupported SMC Protocol PA Major "
			"Version (0x%02x, expected 0x%02x)!\n",
			GET_PROTOCOL_MAJOR_VERSION(protocol_version),
			TF_S_PROTOCOL_MAJOR_VERSION);
		rpc_error = RPC_ERROR_CONNECTION_PROTOCOL;
	} else {
		rpc_error = RPC_SUCCESS;
	}

	spin_unlock(&(comm->lock));

	return rpc_error;
}
Exemple #3
0
/*
 *  ======== MEM_FlushCache ========
 *  Purpose:
 *      Flush cache
 */
void MEM_FlushCache(void *pMemBuf, u32 cBytes, s32 FlushType)
{
	if (cRefs <= 0 || !pMemBuf)
		goto func_end;
	switch (FlushType) {
	/* invalidate only */
	case PROC_INVALIDATE_MEM:
		dmac_inv_range(pMemBuf, pMemBuf + cBytes);
		outer_inv_range(__pa((u32)pMemBuf), __pa((u32)pMemBuf +
				cBytes));
	break;
	/* writeback only */
	case PROC_WRITEBACK_MEM:
		dmac_clean_range(pMemBuf, pMemBuf + cBytes);
		outer_clean_range(__pa((u32)pMemBuf), __pa((u32)pMemBuf +
				  cBytes));
	break;
	/* writeback and invalidate */
	case PROC_WRITEBACK_INVALIDATE_MEM:
		dmac_flush_range(pMemBuf, pMemBuf + cBytes);
		outer_flush_range(__pa((u32)pMemBuf), __pa((u32)pMemBuf +
				  cBytes));
	break;
	default:
		GT_1trace(MEM_debugMask, GT_6CLASS, "MEM_FlushCache: invalid "
			  "FlushMemType 0x%x\n", FlushType);
	break;
	}
func_end:
	return;
}
Exemple #4
0
void mshci_s3c_dma_page_cpu_to_dev(struct page *page, unsigned long off,
	size_t size, enum dma_data_direction dir, int flush_type)
{
	unsigned long paddr;

	if (dir != DMA_FROM_DEVICE) {
		mshci_s3c_dma_cache_maint_page(page, off, size, dir, 
			dmac_map_area, 
			flush_type, 1);

		paddr = page_to_phys(page) + off;
		if ( flush_type != 2) {
			outer_clean_range(paddr, paddr + size);
		}
		/* FIXME: non-speculating: flush on bidirectional mappings? */
	} else {
		paddr = page_to_phys(page) + off;

		if ( flush_type != 2) {
			outer_inv_range(paddr, paddr + size);
		}
		/* FIXME: non-speculating: flush on bidirectional mappings? */
		
		mshci_s3c_dma_cache_maint_page(page, off, size, dir, 
			dmac_unmap_area, 
			flush_type, 1);
	}
}
static int __init tzasc_test_init(void)
{
	int ret;

	tzasc_buf_phys = ALIGN(virt_to_phys(tzasc_buf), PAGE_SIZE << CONFIG_TZASC_TEST_PAGE_ORDER);

	__flush_dcache_area(phys_to_virt(tzasc_buf_phys),
			PAGE_SIZE << CONFIG_TZASC_TEST_PAGE_ORDER);
	outer_inv_range(tzasc_buf_phys, tzasc_buf_phys +
			(PAGE_SIZE << CONFIG_TZASC_TEST_PAGE_ORDER));

	ret = misc_register(&tzasc_dev);
	if (ret) {
		pr_err("misc device registration failed\n");
		goto out;
	}

	ret = device_create_file(tzasc_dev.this_device, &dev_attr_physaddr);
	if (ret) {
		pr_err("physaddr sysfs file creation failed\n");
		goto out_deregister;
	}

	return 0;

out_deregister:
	misc_deregister(&tzasc_dev);
out:
	return ret;
}
Exemple #6
0
/* Check protocol version returned by the PA */
static u32 tf_rpc_init(struct tf_comm *comm)
{
	u32 protocol_version;
	u32 rpc_error = RPC_SUCCESS;

	dprintk(KERN_INFO "tf_rpc_init(%p)\n", comm);

	spin_lock(&(comm->lock));

	dmac_flush_range((void *)comm->init_shared_buffer,
		(void *)(((u32)(comm->init_shared_buffer)) + PAGE_SIZE));
	outer_inv_range(__pa(comm->init_shared_buffer),
		__pa(comm->init_shared_buffer) +  PAGE_SIZE);

	protocol_version = ((struct tf_init_buffer *)
				(comm->init_shared_buffer))->protocol_version;

	if ((GET_PROTOCOL_MAJOR_VERSION(protocol_version))
			!= TF_S_PROTOCOL_MAJOR_VERSION) {
		dprintk(KERN_ERR "SMC: Unsupported SMC Protocol PA Major "
			"Version (0x%02x, expected 0x%02x)!\n",
			GET_PROTOCOL_MAJOR_VERSION(protocol_version),
			TF_S_PROTOCOL_MAJOR_VERSION);
		rpc_error = RPC_ERROR_CONNECTION_PROTOCOL;
	} else {
		rpc_error = RPC_SUCCESS;
	}

	spin_unlock(&(comm->lock));

	register_smc_public_crypto_digest();
	register_smc_public_crypto_aes();

	return rpc_error;
}
Exemple #7
0
inline void BCMFASTPATH
osl_cache_inv(void *va, uint size)
{
	unsigned long paddr;
	dmac_map_area(va, size, DMA_RX);
	paddr = __pa(va);
	outer_inv_range(paddr, paddr + size);

	/* WAR : Call it once more, to make sure INVALIDATE really happens.
	 * On 4708 ARM platforms, intermittently, we are seeing corrupt/dirty data after
	 * INVALIDATE. Calling outer_inv_range twice seems to solve the problem.
	 */
	dmac_map_area(va, size, DMA_RX);
	paddr = __pa(va);
	outer_inv_range(paddr, paddr + size);
}
Exemple #8
0
void s5p_mfc_cache_inv(const void *start_addr, unsigned long size)
{
	unsigned long paddr;

	paddr = __pa((unsigned long)start_addr);
	outer_inv_range(paddr, paddr + size);
	dmac_unmap_area(start_addr, size, DMA_FROM_DEVICE);
}
static int vcm_mem_allocator(vcm_allocator *info, ump_dd_mem *descriptor)
{
	unsigned long num_blocks;
	int i;
	struct vcm_phys *phys;
	struct vcm_phys_part *part;
	int size_total = 0;
	struct ump_vcm *ump_vcm;

	ump_vcm = (struct ump_vcm*)descriptor->backend_info;
	
	ump_vcm->vcm_res =
	    vcm_make_binding(ump_vcm->vcm, descriptor->size_bytes,
	    ump_vcm->dev_id, 0);

	phys = ump_vcm->vcm_res->phys;
	part = phys->parts;
	num_blocks = phys->count;

	DBG_MSG(5,
		("Allocating page array. Size: %lu, VCM Reservation : 0x%x\n",
		 phys->count * sizeof(ump_dd_physical_block),
		 ump_vcm->vcm_res->start));

	/* Now, make a copy of the block information supplied by the user */
	descriptor->block_array =
	    (ump_dd_physical_block *) vmalloc(sizeof(ump_dd_physical_block) *
					      num_blocks);

	if (NULL == descriptor->block_array) {
		vfree(descriptor->block_array);
		DBG_MSG(1, ("Could not allocate a mem handle for function.\n"));
		return 0; /* failure */
	}

	for (i = 0; i < num_blocks; i++) {
		descriptor->block_array[i].addr = part->start;
		descriptor->block_array[i].size = part->size;

		dmac_unmap_area(phys_to_virt(part->start), part->size, DMA_FROM_DEVICE);
		outer_inv_range(part->start, part->start + part->size);

		++part;
		size_total += descriptor->block_array[i].size;
		DBG_MSG(6,
			("UMP memory created with VCM. addr 0x%x, size: 0x%x\n",
			 descriptor->block_array[i].addr,
			 descriptor->block_array[i].size));
	}

	descriptor->size_bytes = size_total;
	descriptor->nr_blocks = num_blocks;
	descriptor->ctx = NULL;

	info->num_vcm_blocks += num_blocks;
	return 1;
}
int memory_engine_cache(memory_engine_t *engine, uint cmd,
				shm_driver_operation_t op)
{
	int res = 0;
	memory_node_t  *node;
	char tag_clean[] = "clean";
	char tag_invalidate[] = "invalidate";
	char tag_cleanAndinvalidate[] = "clean and invalidate";
	char *ptr_tag;
	if (engine == NULL) {
		return -EINVAL;
	}
	down(&(engine->m_mutex));

	node = memory_engine_lookup_shm_node_for_cache(&(engine->m_shm_root),
						op.m_param3, op.m_param2);
	if ((node == NULL) || (node->m_next_free != NULL)) {
		res = 0;
		if (cmd == SHM_DEVICE_CMD_INVALIDATE) {
			ptr_tag = tag_invalidate;
		} else if (cmd == SHM_DEVICE_CMD_CLEAN) {
			ptr_tag = tag_clean;
		} else {
			ptr_tag = tag_cleanAndinvalidate;
		}

		up(&(engine->m_mutex));
		return res;
	}
	up(&(engine->m_mutex));

	switch (cmd) {
	case SHM_DEVICE_CMD_INVALIDATE:
		dmac_map_area((const void *)op.m_param1,
			      op.m_param2, DMA_FROM_DEVICE);
		outer_inv_range(op.m_param3,
				op.m_param3 + op.m_param2);
		break;
	case SHM_DEVICE_CMD_CLEAN:
		dmac_map_area((const void *)op.m_param1,
			      op.m_param2, DMA_TO_DEVICE);
		outer_clean_range(op.m_param3,
				  op.m_param3 + op.m_param2);
		break;
	case SHM_DEVICE_CMD_CLEANANDINVALIDATE:
		dmac_flush_range((const void *)op.m_param1,
				 (const void *)(op.m_param1 +
						op.m_param2));
		outer_flush_range(op.m_param3,
				  op.m_param3 + op.m_param2);
		break;
	default:
		res = -ENOTTY;
	}

	return res;
}
Exemple #11
0
void s5p_mfc_cache_inv(const void *start_addr, unsigned long size)
{
	unsigned long paddr;
	void *cur_addr, *end_addr;

	cur_addr = (void *)((unsigned long)start_addr & PAGE_MASK);
	end_addr = cur_addr + PAGE_ALIGN(size);

	while (cur_addr < end_addr) {
		paddr = page_to_pfn(vmalloc_to_page(cur_addr));
		paddr <<= PAGE_SHIFT;
		if (paddr)
			outer_inv_range(paddr, paddr + PAGE_SIZE);
		cur_addr += PAGE_SIZE;
	}

	dmac_unmap_area(start_addr, size, DMA_FROM_DEVICE);

	/* FIXME: L2 operation optimization */
	/*
	unsigned long start, end, unitsize;
	unsigned long cur_addr, remain;

	cur_addr = (unsigned long)start_addr;
	remain = size;

	start = page_to_pfn(vmalloc_to_page(cur_addr));
	start <<= PAGE_SHIFT;
	if (start & PAGE_MASK) {
		unitsize = min((start | PAGE_MASK) - start + 1, remain);
		end = start + unitsize;
		outer_inv_range(start, end);
		remain -= unitsize;
		cur_addr += unitsize;
	}

	while (remain >= PAGE_SIZE) {
		start = page_to_pfn(vmalloc_to_page(cur_addr));
		start <<= PAGE_SHIFT;
		end = start + PAGE_SIZE;
		outer_inv_range(start, end);
		remain -= PAGE_SIZE;
		cur_addr += PAGE_SIZE;
	}

	if (remain) {
		start = page_to_pfn(vmalloc_to_page(cur_addr));
		start <<= PAGE_SHIFT;
		end = start + remain;
		outer_inv_range(start, end);
	}

	dmac_unmap_area(start_addr, size, DMA_FROM_DEVICE);
	*/
}
Exemple #12
0
int meson_trustzone_memconfig()
{
	int ret;
	struct memconfig_hal_api_arg arg;
	arg.memconfigbuf_phy_addr = __pa(memsecure);
	arg.memconfigbuf_count = MEMCONFIG_NUM;

	__cpuc_flush_dcache_area(memsecure, sizeof(memsecure));
	outer_clean_range(__pa(memsecure), (__pa(memsecure + MEMCONFIG_NUM)));
	__cpuc_flush_dcache_area(&arg, sizeof(arg));
	outer_clean_range(__pa(&arg), __pa(((struct memconfig_hal_api_arg*)&arg)) + 1);

	ret = meson_smc_hal_api(TRUSTZONE_HAL_API_MEMCONFIG, __pa(&arg));

	outer_inv_range(__pa(&arg), __pa(((struct memconfig_hal_api_arg*)&arg)) + 1);
	dmac_unmap_area(&arg, sizeof(arg), DMA_FROM_DEVICE);
	outer_inv_range(__pa(memsecure), __pa(memsecure + MEMCONFIG_NUM));
	dmac_unmap_area(memsecure, sizeof(memsecure), DMA_FROM_DEVICE);

	return ret;
}
void ___dma_page_dev_to_cpu(struct page *page, unsigned long off,
	size_t size, enum dma_data_direction dir)
{
	unsigned long paddr = page_to_phys(page) + off;

	/* FIXME: non-speculating: not required */
	/* don't bother invalidating if DMA to device */
	if (dir != DMA_TO_DEVICE)
		outer_inv_range(paddr, paddr + size);

	dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
}
Exemple #14
0
static void cacheperf(void *vbuf, enum cachemaintenance id)
{
	struct timespec beforets;
	struct timespec afterts;
	phys_addr_t pbuf = virt_to_phys(vbuf);
	u32 pbufend, xfer_size, i;
	long timeval;

	xfer_size = START_SIZE;
	while (xfer_size <= END_SIZE) {
		pbufend = pbuf + xfer_size;
		timeval = 0;

		for (i = 0; i < try_cnt; i++) {
			memset(vbuf, i, xfer_size);
			getnstimeofday(&beforets);

			switch (id) {
			case CM_CLEAN:
				if (l1)
					dmac_map_area(vbuf, xfer_size,
							DMA_TO_DEVICE);
				if (l2)
					outer_clean_range(pbuf, pbufend);
				break;
			case CM_INV:
				if (l2)
					outer_inv_range(pbuf, pbufend);
				if (l1)
					dmac_unmap_area(vbuf, xfer_size,
							DMA_FROM_DEVICE);
				break;
			case CM_FLUSH:
				if (l1)
					dmac_flush_range(vbuf,
					(void *)((u32) vbuf + xfer_size));
				if (l2)
					outer_flush_range(pbuf, pbufend);
				break;
			case CM_FLUSHALL:
				if (l1)
					flush_cache_all();
				if (l2)
					outer_flush_all();
				break;
			}
			getnstimeofday(&afterts);
			timeval += update_timeval(beforets, afterts);
		}
		printk(KERN_INFO "%lu\n", timeval/try_cnt);
		xfer_size *= 2;
	}
}
int omap_tiler_cache_operation(struct ion_buffer *buffer, size_t len,
			unsigned long vaddr, enum cache_operation cacheop)
{
	struct omap_tiler_info *info;
	int n_pages;
        phys_addr_t paddr = tiler_virt2phys(vaddr);

	if (!buffer) {
		pr_err("%s(): buffer is NULL\n", __func__);
		return -EINVAL;
	}
	if (!buffer->cached) {
		pr_err("%s(): buffer not mapped as cacheable\n", __func__);
		return -EINVAL;
	}

	info = buffer->priv_virt;
	if (!info) {
		pr_err("%s(): tiler info of buffer is NULL\n", __func__);
		return -EINVAL;
	}

	n_pages = info->n_tiler_pages;
	if (len > (n_pages * PAGE_SIZE)) {
		pr_err("%s(): size to flush is greater than allocated size\n",
			__func__);
		return -EINVAL;
	}

	if (TILER_PIXEL_FMT_PAGE != info->fmt) {
		pr_err("%s(): only TILER 1D buffers can be cached\n",
			__func__);
		return -EINVAL;
	}

#if 0
	if (len > FULL_CACHE_FLUSH_THRESHOLD) {
		on_each_cpu(per_cpu_cache_flush_arm, NULL, 1);
		outer_flush_all();
		return 0;
	}
#endif

	if (cacheop == CACHE_FLUSH) {
		flush_cache_user_range(vaddr, vaddr + len);
		outer_flush_range(paddr, paddr + len);
	} else {
		outer_inv_range(paddr, paddr + len);
		dmac_map_area((const void*) vaddr, len, DMA_FROM_DEVICE);
	}
	return 0;
}
void mfc_mem_cache_inv(const void *start_addr, unsigned long size)
{
	unsigned long paddr;

	paddr = __pa((unsigned long)start_addr);
	outer_inv_range(paddr, paddr + size);
	dmac_unmap_area(start_addr, size, DMA_FROM_DEVICE);

	/* OPT#1: kernel provide below function */
	/*
	dma_unmap_single(NULL, (void *)start_addr, size, DMA_FROM_DEVICE);
	*/
}
Exemple #17
0
void ___dma_single_dev_to_cpu(const void *kaddr, size_t size,
	enum dma_data_direction dir)
{
	BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));

	/* don't bother invalidating if DMA to device */
	if (dir != DMA_TO_DEVICE) {
		unsigned long paddr = __pa(kaddr);
		outer_inv_range(paddr, paddr + size);
	}

	dmac_unmap_area(kaddr, size, dir);
}
void s5p_mfc_cache_inv(void *alloc_ctx)
{
	struct vb2_cma_phys_buf *buf = (struct vb2_cma_phys_buf *)alloc_ctx;
	void *start_addr;
	unsigned long size;
	unsigned long paddr = (dma_addr_t)buf->paddr;

	start_addr = (dma_addr_t *)phys_to_virt(buf->paddr);
	size = buf->size;

	outer_inv_range(paddr, paddr + size);
	dmac_unmap_area(start_addr, size, DMA_FROM_DEVICE);
}
Exemple #19
0
static long kgsl_cache_range_op(unsigned long addr, int size,
					unsigned int flags)
{
#ifdef CONFIG_OUTER_CACHE
	unsigned long end;
#endif
	BUG_ON(addr & (KGSL_PAGESIZE - 1));
	BUG_ON(size & (KGSL_PAGESIZE - 1));

	if (flags & KGSL_CACHE_FLUSH)
		dmac_flush_range((const void *)addr,
				(const void *)(addr + size));
	else
		if (flags & KGSL_CACHE_CLEAN)
			dmac_clean_range((const void *)addr,
					(const void *)(addr + size));
		else
			dmac_inv_range((const void *)addr,
					(const void *)(addr + size));

#ifdef CONFIG_OUTER_CACHE
	for (end = addr; end < (addr + size); end += KGSL_PAGESIZE) {
		pte_t *pte_ptr, pte;
		unsigned long physaddr;
		if (flags & KGSL_CACHE_VMALLOC_ADDR)
			physaddr = vmalloc_to_pfn((void *)end);
		else
			if (flags & KGSL_CACHE_USER_ADDR) {
				pte_ptr = kgsl_get_pte_from_vaddr(end);
				if (!pte_ptr)
					return -EINVAL;
				pte = *pte_ptr;
				physaddr = pte_pfn(pte);
				pte_unmap(pte_ptr);
			} else
				return -EINVAL;

		physaddr <<= PAGE_SHIFT;
		if (flags & KGSL_CACHE_FLUSH)
			outer_flush_range(physaddr, physaddr + KGSL_PAGESIZE);
		else
			if (flags & KGSL_CACHE_CLEAN)
				outer_clean_range(physaddr,
					physaddr + KGSL_PAGESIZE);
			else
				outer_inv_range(physaddr,
					physaddr + KGSL_PAGESIZE);
	}
#endif
	return 0;
}
Exemple #20
0
void BCMFASTPATH_HOST ___dma_page_cpu_to_dev(struct page *page, unsigned long off,
	size_t size, enum dma_data_direction dir)
{
	unsigned long paddr;

	dma_cache_maint_page(page, off, size, dir, dmac_map_area);

	paddr = page_to_phys(page) + off;
	if (dir == DMA_FROM_DEVICE) {
		outer_inv_range(paddr, paddr + size);
	} else {
		outer_clean_range(paddr, paddr + size);
	}
}
Exemple #21
0
static long kgsl_cache_range_op(unsigned long addr, int size,
					unsigned int flags)
{
#ifdef CONFIG_OUTER_CACHE
	unsigned long end;
#endif
	BUG_ON(addr & (KGSL_PAGESIZE - 1));
	BUG_ON(size & (KGSL_PAGESIZE - 1));

	if (flags & KGSL_MEMFLAGS_CACHE_FLUSH)
		dmac_flush_range((const void *)addr,
				(const void *)(addr + size));
	else
		if (flags & KGSL_MEMFLAGS_CACHE_CLEAN)
			dmac_clean_range((const void *)addr,
					(const void *)(addr + size));
		else if (flags & KGSL_MEMFLAGS_CACHE_INV)
			dmac_inv_range((const void *)addr,
					(const void *)(addr + size));

#ifdef CONFIG_OUTER_CACHE
	for (end = addr; end < (addr + size); end += KGSL_PAGESIZE) {
		unsigned long physaddr;
		if (flags & KGSL_MEMFLAGS_VMALLOC_MEM)
			physaddr = page_to_phys(vmalloc_to_page((void *) end));
		else
			if (flags & KGSL_MEMFLAGS_HOSTADDR) {
				physaddr = kgsl_virtaddr_to_physaddr(end);
				if (!physaddr) {
					KGSL_MEM_ERR
					("Unable to find physaddr for "
					"address: %x\n", (unsigned int)end);
					return -EINVAL;
				}
			} else
				return -EINVAL;

		if (flags & KGSL_MEMFLAGS_CACHE_FLUSH)
			outer_flush_range(physaddr, physaddr + KGSL_PAGESIZE);
		else
			if (flags & KGSL_MEMFLAGS_CACHE_CLEAN)
				outer_clean_range(physaddr,
					physaddr + KGSL_PAGESIZE);
			else if (flags & KGSL_MEMFLAGS_CACHE_INV)
				outer_inv_range(physaddr,
					physaddr + KGSL_PAGESIZE);
	}
#endif
	return 0;
}
Exemple #22
0
int omap_tiler_cache_operation(struct ion_buffer *buffer, size_t len,
			unsigned long vaddr, enum cache_operation cacheop)
{
	struct omap_tiler_info *info;
	int n_pages;

	if (!buffer) {
		pr_err("%s(): buffer is NULL\n", __func__);
		return -EINVAL;
	}
	if (!buffer->cached) {
		pr_err("%s(): buffer not mapped as cacheable\n", __func__);
		return -EINVAL;
	}

	info = buffer->priv_virt;
	if (!info) {
		pr_err("%s(): tiler info of buffer is NULL\n", __func__);
		return -EINVAL;
	}

	n_pages = info->n_tiler_pages;
	if (len > (n_pages * PAGE_SIZE)) {
		pr_err("%s(): size to flush is greater than allocated size\n",
			__func__);
		return -EINVAL;
	}

	if (TILER_PIXEL_FMT_PAGE != info->fmt) {
		pr_err("%s(): only TILER 1D buffers can be cached\n",
			__func__);
		return -EINVAL;
	}

	if (len > FULL_CACHE_FLUSH_THRESHOLD) {
		on_each_cpu(per_cpu_cache_flush_arm, NULL, 1);
		outer_flush_all();
		return 0;
	}

	flush_cache_user_range(vaddr, vaddr + len);

	if (cacheop == CACHE_FLUSH)
		outer_flush_range(info->tiler_addrs[0],
			info->tiler_addrs[0] + len);
	else
		outer_inv_range(info->tiler_addrs[0],
			info->tiler_addrs[0] + len);
	return 0;
}
void ___dma_single_dev_to_cpu(const void *kaddr, size_t size,
	enum dma_data_direction dir)
{
#ifdef CONFIG_OUTER_CACHE
	BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));

	/* FIXME: non-speculating: not required */
	/* don't bother invalidating if DMA to device */
	if (dir != DMA_TO_DEVICE) {
		unsigned long paddr = __pa(kaddr);
		outer_inv_range(paddr, paddr + size);
	}
#endif
	dmac_unmap_area(kaddr, size, dir);
}
Exemple #24
0
void ___dma_page_cpu_to_dev(struct page *page, unsigned long off,
                            size_t size, enum dma_data_direction dir)
{
    unsigned long paddr;

    dma_cache_maint_page(page, off, size, dir, dmac_map_area);

    paddr = page_to_phys(page) + off;
    if (dir == DMA_FROM_DEVICE) {
        outer_inv_range(paddr, paddr + size);
    } else {
        outer_clean_range(paddr, paddr + size);
    }
    /* FIXME: non-speculating: flush on bidirectional mappings? */
}
Exemple #25
0
/*
 * Make an area consistent for devices.
 * Note: Drivers should NOT use this function directly, as it will break
 * platforms with CONFIG_DMABOUNCE.
 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
 */
void ___dma_single_cpu_to_dev(const void *kaddr, size_t size,
	enum dma_data_direction dir)
{
	unsigned long paddr;

	BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));

	dmac_map_area(kaddr, size, dir);

	paddr = __pa(kaddr);
	if (dir == DMA_FROM_DEVICE) {
		outer_inv_range(paddr, paddr + size);
	} else {
		outer_clean_range(paddr, paddr + size);
	}
}
Exemple #26
0
/* Function to invalidate the Cache module */
Void Cache_inv(Ptr blockPtr, UInt32 byteCnt, Bits16 type, Bool wait) {
    GT_4trace (curTrace, GT_ENTER, "Cache_inv", blockPtr, byteCnt, type, wait);

#ifdef USE_CACHE_VOID_ARG
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34))
    dmac_map_area(blockPtr, (size_t)byteCnt, DMA_FROM_DEVICE);
    outer_inv_range(__pa((UInt32)blockPtr),
                    __pa((UInt32)(blockPtr + byteCnt)) );
#else
    dmac_inv_range(blockPtr, (blockPtr + byteCnt) );
#endif
#else
    dmac_inv_range( (UInt32)blockPtr, (UInt32)(blockPtr + byteCnt) );
#endif

    GT_0trace (curTrace, GT_LEAVE, "Cache_inv");
}
Exemple #27
0
void ___dma_page_dev_to_cpu(struct page *page, unsigned long off,
                            size_t size, enum dma_data_direction dir)
{
    unsigned long paddr = page_to_phys(page) + off;

    /* FIXME: non-speculating: not required */
    /* don't bother invalidating if DMA to device */
    if (dir != DMA_TO_DEVICE)
        outer_inv_range(paddr, paddr + size);

    dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);

    /*
     * Mark the D-cache clean for this page to avoid extra flushing.
     */
    if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE)
        set_bit(PG_dcache_clean, &page->flags);
}
Exemple #28
0
void ___dma_page_dev_to_cpu(struct page *page, unsigned long off,
	size_t size, enum dma_data_direction dir)
{
	unsigned long paddr = page_to_phys(page) + off;

	/* don't bother invalidating if DMA to device */
	if (dir != DMA_TO_DEVICE)
		outer_inv_range(paddr, paddr + size);

	dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
#ifdef CONFIG_BCM47XX
	/*
	 * Merged from Linux-2.6.37
	 * Mark the D-cache clean for this page to avoid extra flushing.
	 */
	if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE)
		set_bit(PG_dcache_clean, &page->flags);
#endif /* CONFIG_BCM47XX */
}
/*
 * Make an area consistent for devices.
 * Note: Drivers should NOT use this function directly, as it will break
 * platforms with CONFIG_DMABOUNCE.
 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
 */
void ___dma_single_cpu_to_dev(const void *kaddr, size_t size,
	enum dma_data_direction dir)
{
#ifdef CONFIG_OUTER_CACHE
	unsigned long paddr;

	BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
#endif

	dmac_map_area(kaddr, size, dir);

#ifdef CONFIG_OUTER_CACHE
	paddr = __pa(kaddr);
	if (dir == DMA_FROM_DEVICE) {
		outer_inv_range(paddr, paddr + size);
	} else {
		outer_clean_range(paddr, paddr + size);
	}
#endif
	/* FIXME: non-speculating: flush on bidirectional mappings? */
}
int vb2_cma_phys_cache_inv(struct vb2_buffer *vb, u32 num_planes)
{
	struct vb2_cma_phys_buf *buf;
	phys_addr_t start;
	size_t size;
	int i;

	for (i = 0; i < num_planes; i++) {
		buf = vb->planes[i].mem_priv;
		start = buf->paddr;
		size = buf->size;

		if (!buf->cacheable) {
			pr_warning("This is non-cacheable buffer allocator\n");
			return -EINVAL;
		}

		dmac_unmap_area(phys_to_virt(start), size, DMA_FROM_DEVICE);
		outer_inv_range(start, start + size);	/* L2 */
	}

	return 0;
}