Exemplo n.º 1
0
static void __dma_clear_buffer(struct page *page, size_t size)
{
	/*
	 * Ensure that the allocated pages are zeroed, and that any data
	 * lurking in the kernel direct-mapped region is invalidated.
	 */
	if (!PageHighMem(page)) {
		void *ptr = page_address(page);
		if (ptr) {
			memset(ptr, 0, size);
			dmac_flush_range(ptr, ptr + size);
			outer_flush_range(__pa(ptr), __pa(ptr) + size);
		}
	} else {
		phys_addr_t base = __pfn_to_phys(page_to_pfn(page));
		phys_addr_t end = base + size;
		while (size > 0) {
			void *ptr = kmap_atomic(page);
			memset(ptr, 0, PAGE_SIZE);
			dmac_flush_range(ptr, ptr + PAGE_SIZE);
			kunmap_atomic(ptr);
			page++;
			size -= PAGE_SIZE;
		}
		outer_flush_range(base, end);
	}
}
Exemplo n.º 2
0
/* Function to write back invalidate the Cache module */
Void Cache_wbInv(Ptr blockPtr, UInt32 byteCnt, Bits16 type, Bool wait) {
    GT_4trace (curTrace, GT_ENTER, "Cache_wbInv", blockPtr, byteCnt, type, wait);
#if 0

/*
 * It appears that this #if 0'ed code doesn't actually perform the
 * invalidate part of the wbInv, and it appears that Cache_wb() and Cache_inv()
 * work properly, so for now we implement wbInv as a combination of the two
 * individual functions.
*/

#ifdef USE_CACHE_VOID_ARG
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34)
                dmac_map_area(blockPtr, (size_t)byteCnt, DMA_BIDIRECTIONAL);
                outer_flush_range(__pa((UInt32)blockPtr),
                                  __pa((UInt32)(blockPtr+byteCnt)) );
#else
                dmac_flush_range(blockPtr, (blockPtr+byteCnt) );
#endif
#else
                dmac_flush_range( (UInt32)blockPtr, (UInt32)(blockPtr + byteCnt) );
#endif

#else

    Cache_wb(blockPtr, byteCnt, type, wait);
    Cache_inv(blockPtr, byteCnt, type, wait);

#endif

    GT_0trace (curTrace, GT_LEAVE, "Cache_wbInv");
}
Exemplo n.º 3
0
static int mb_rproc_start(struct rproc *rproc)
{
	struct device *dev = rproc->dev.parent;
	struct platform_device *pdev = to_platform_device(dev);
	struct mb_rproc_pdata *local = platform_get_drvdata(pdev);
	const struct firmware *fw;
	int ret;

	dev_info(dev, "%s\n", __func__);
	INIT_WORK(&workqueue, handle_event);

	flush_cache_all();
	outer_flush_range(local->mem_start, local->mem_end);

	remoteprocdev = pdev;

	ret = request_firmware(&fw, local->bootloader, &pdev->dev);
	if (ret < 0) {
		dev_err(&pdev->dev, "request_firmware failed\n");
		return ret;
	}
	/* Copy bootloader to memory */
	memcpy(local->vbase, fw->data, fw->size);
	release_firmware(fw);

	/* Just for sure synchronize memories */
	dsb();

	/* Release Microblaze from reset */
	gpio_set_value(local->reset_gpio, 0);

	return 0;
}
Exemplo n.º 4
0
static int32_t isp_binging4awb_buf_alloc(struct isp_k_private *isp_private, uint32_t len)
{
	int32_t ret = 0x00;
#ifndef CONFIG_64BIT
	uint32_t buf = 0x00;
	void *ptr = NULL;
#endif

	if (0x00 < len) {
		isp_private->bing4awb_buf_len = len;
		isp_private->bing4awb_buf_order = get_order(len);
		isp_private->bing4awb_buf_addr = (unsigned long)__get_free_pages(GFP_KERNEL | __GFP_COMP,
			isp_private->bing4awb_buf_order);
		if (NULL == (void *)isp_private->bing4awb_buf_addr) {
			printk("isp_binging4awb_buf_alloc: memory error, addr:0x%lx, len:0x%x, order:0x%x.\n",
				isp_private->bing4awb_buf_addr,
				isp_private->bing4awb_buf_len,
				isp_private->bing4awb_buf_order);
			return -1;
		}
	#ifndef CONFIG_64BIT
		ptr = (void *)isp_private->bing4awb_buf_addr;
		buf = virt_to_phys((volatile void *)isp_private->bing4awb_buf_addr);
		dmac_flush_range(ptr, ptr + len);
		outer_flush_range(__pa(ptr), __pa(ptr) + len);
	#endif
	}

	return ret;
}
Exemplo n.º 5
0
void nvmap_kunmap(struct nvmap_handle_ref *ref, unsigned int pagenum,
		  void *addr)
{
	struct nvmap_handle *h;
	phys_addr_t paddr;
	pte_t **pte;

	BUG_ON(!addr || !ref);
	h = ref->handle;

	if (nvmap_find_cache_maint_op(h->dev, h)) {
		struct nvmap_share *share = nvmap_get_share_from_dev(h->dev);
		/* acquire pin lock to ensure maintenance is done before
		 * handle is pinned */
		mutex_lock(&share->pin_lock);
		nvmap_cache_maint_ops_flush(h->dev, h);
		mutex_unlock(&share->pin_lock);
	}

	if (h->heap_pgalloc)
		paddr = page_to_phys(h->pgalloc.pages[pagenum]);
	else
		paddr = h->carveout->base + pagenum * PAGE_SIZE;

	if (h->flags != NVMAP_HANDLE_UNCACHEABLE &&
	    h->flags != NVMAP_HANDLE_WRITE_COMBINE) {
		dmac_flush_range(addr, addr + PAGE_SIZE);
		outer_flush_range(paddr, paddr + PAGE_SIZE);
	}

	pte = nvmap_vaddr_to_pte(nvmap_dev, (unsigned long)addr);
	nvmap_free_pte(nvmap_dev, pte);
	nvmap_handle_put(h);
}
Exemplo n.º 6
0
static int omap_tiler_alloc_dynamicpages(struct omap_tiler_info *info)
{
	int i;
	int ret;
	struct page *pg;

	for (i = 0; i < info->n_phys_pages; i++) {
		pg = alloc_page(GFP_KERNEL | GFP_DMA | GFP_HIGHUSER);
		if (!pg) {
			ret = -ENOMEM;
			pr_err("%s: alloc_page failed\n",
				__func__);
			goto err_page_alloc;
		}
		info->phys_addrs[i] = page_to_phys(pg);
		dmac_flush_range((void *)page_address(pg),
			(void *)page_address(pg) + PAGE_SIZE);
		outer_flush_range(info->phys_addrs[i],
			info->phys_addrs[i] + PAGE_SIZE);
	}
	return 0;

err_page_alloc:
	for (i -= 1; i >= 0; i--) {
		pg = phys_to_page(info->phys_addrs[i]);
		__free_page(pg);
	}
	return ret;
}
Exemplo n.º 7
0
/*
 *  ======== MEM_FlushCache ========
 *  Purpose:
 *      Flush cache
 */
void MEM_FlushCache(void *pMemBuf, u32 cBytes, s32 FlushType)
{
	if (cRefs <= 0 || !pMemBuf)
		goto func_end;
	switch (FlushType) {
	/* invalidate only */
	case PROC_INVALIDATE_MEM:
		dmac_inv_range(pMemBuf, pMemBuf + cBytes);
		outer_inv_range(__pa((u32)pMemBuf), __pa((u32)pMemBuf +
				cBytes));
	break;
	/* writeback only */
	case PROC_WRITEBACK_MEM:
		dmac_clean_range(pMemBuf, pMemBuf + cBytes);
		outer_clean_range(__pa((u32)pMemBuf), __pa((u32)pMemBuf +
				  cBytes));
	break;
	/* writeback and invalidate */
	case PROC_WRITEBACK_INVALIDATE_MEM:
		dmac_flush_range(pMemBuf, pMemBuf + cBytes);
		outer_flush_range(__pa((u32)pMemBuf), __pa((u32)pMemBuf +
				  cBytes));
	break;
	default:
		GT_1trace(MEM_debugMask, GT_6CLASS, "MEM_FlushCache: invalid "
			  "FlushMemType 0x%x\n", FlushType);
	break;
	}
func_end:
	return;
}
Exemplo n.º 8
0
static void tegra114_flush_dcache(struct page *page, unsigned long offset,
				  size_t size)
{
	phys_addr_t phys = page_to_phys(page) + offset;
	void *virt = page_address(page) + offset;

	__cpuc_flush_dcache_area(virt, size);
	outer_flush_range(phys, phys + size);
}
int memory_engine_cache(memory_engine_t *engine, uint cmd,
				shm_driver_operation_t op)
{
	int res = 0;
	memory_node_t  *node;
	char tag_clean[] = "clean";
	char tag_invalidate[] = "invalidate";
	char tag_cleanAndinvalidate[] = "clean and invalidate";
	char *ptr_tag;
	if (engine == NULL) {
		return -EINVAL;
	}
	down(&(engine->m_mutex));

	node = memory_engine_lookup_shm_node_for_cache(&(engine->m_shm_root),
						op.m_param3, op.m_param2);
	if ((node == NULL) || (node->m_next_free != NULL)) {
		res = 0;
		if (cmd == SHM_DEVICE_CMD_INVALIDATE) {
			ptr_tag = tag_invalidate;
		} else if (cmd == SHM_DEVICE_CMD_CLEAN) {
			ptr_tag = tag_clean;
		} else {
			ptr_tag = tag_cleanAndinvalidate;
		}

		up(&(engine->m_mutex));
		return res;
	}
	up(&(engine->m_mutex));

	switch (cmd) {
	case SHM_DEVICE_CMD_INVALIDATE:
		dmac_map_area((const void *)op.m_param1,
			      op.m_param2, DMA_FROM_DEVICE);
		outer_inv_range(op.m_param3,
				op.m_param3 + op.m_param2);
		break;
	case SHM_DEVICE_CMD_CLEAN:
		dmac_map_area((const void *)op.m_param1,
			      op.m_param2, DMA_TO_DEVICE);
		outer_clean_range(op.m_param3,
				  op.m_param3 + op.m_param2);
		break;
	case SHM_DEVICE_CMD_CLEANANDINVALIDATE:
		dmac_flush_range((const void *)op.m_param1,
				 (const void *)(op.m_param1 +
						op.m_param2));
		outer_flush_range(op.m_param3,
				  op.m_param3 + op.m_param2);
		break;
	default:
		res = -ENOTTY;
	}

	return res;
}
Exemplo n.º 10
0
int __cpuinit zynq_cpun_start(u32 address, int cpu)
{
	u32 trampoline_code_size = &zynq_secondary_trampoline_end -
						&zynq_secondary_trampoline;

	if (cpu > ncores) {
		pr_warn("CPU No. is not available in the system\n");
		return -1;
	}

	/* MS: Expectation that SLCR are directly map and accessible */
	/* Not possible to jump to non aligned address */
	if (!(address & 3) && (!address || (address >= trampoline_code_size))) {
		/* Store pointer to ioremap area which points to address 0x0 */
		static u8 __iomem *zero;
		u32 trampoline_size = &zynq_secondary_trampoline_jump -
						&zynq_secondary_trampoline;

		zynq_slcr_cpu_stop(cpu);

		if (__pa(PAGE_OFFSET)) {
			zero = ioremap(0, trampoline_code_size);
			if (!zero) {
				pr_warn("BOOTUP jump vectors not accessible\n");
				return -1;
			}
		} else {
			zero = (__force u8 __iomem *)PAGE_OFFSET;
		}

		/*
		 * This is elegant way how to jump to any address
		 * 0x0: Load address at 0x8 to r0
		 * 0x4: Jump by mov instruction
		 * 0x8: Jumping address
		 */
		memcpy((__force void *)zero, &zynq_secondary_trampoline,
						trampoline_size);
		writel(address, zero + trampoline_size);

		flush_cache_all();
		outer_flush_range(0, trampoline_code_size);
		smp_wmb();

		if (__pa(PAGE_OFFSET))
			iounmap(zero);

		zynq_slcr_cpu_start(cpu);

		return 0;
	}

	pr_warn("Can't start CPU%d: Wrong starting address %x\n", cpu, address);

	return -1;
}
Exemplo n.º 11
0
static void handle_event(struct work_struct *work)
{
	struct mb_rproc_pdata *local = platform_get_drvdata(remoteprocdev);

	flush_cache_all();
	outer_flush_range(local->mem_start, local->mem_end);

	if (rproc_vq_interrupt(local->rproc, 0) == IRQ_NONE)
		dev_info(&remoteprocdev->dev, "no message found in vqid 0\n");
}
void dmmupwl_flush_cache(void *virAddr, IM_UINT32 phyAddr, IM_INT32 size)
{
	IM_INFOMSG((IM_STR("%s(virAddr=0x%x, phyAddr=0x%x, size=%d)"), IM_STR(_IM_FUNC_), (IM_INT32)virAddr, phyAddr, size));

	// Flush L1 using virtual address.
	dmac_flush_range(virAddr, (void *)(virAddr + size - 1));

	// Flush L2 using physical address.
	outer_flush_range(phyAddr, phyAddr + size - 1);	
}
static void __dma_clear_buffer(struct page *page, size_t size)
{
	void *ptr;
	/*
	 * Ensure that the allocated pages are zeroed, and that any data
	 * lurking in the kernel direct-mapped region is invalidated.
	 */
	ptr = page_address(page);
	memset(ptr, 0, size);
	dmac_flush_range(ptr, ptr + size);
	outer_flush_range(__pa(ptr), __pa(ptr) + size);
}
Exemplo n.º 14
0
static void cacheperf(void *vbuf, enum cachemaintenance id)
{
	struct timespec beforets;
	struct timespec afterts;
	phys_addr_t pbuf = virt_to_phys(vbuf);
	u32 pbufend, xfer_size, i;
	long timeval;

	xfer_size = START_SIZE;
	while (xfer_size <= END_SIZE) {
		pbufend = pbuf + xfer_size;
		timeval = 0;

		for (i = 0; i < try_cnt; i++) {
			memset(vbuf, i, xfer_size);
			getnstimeofday(&beforets);

			switch (id) {
			case CM_CLEAN:
				if (l1)
					dmac_map_area(vbuf, xfer_size,
							DMA_TO_DEVICE);
				if (l2)
					outer_clean_range(pbuf, pbufend);
				break;
			case CM_INV:
				if (l2)
					outer_inv_range(pbuf, pbufend);
				if (l1)
					dmac_unmap_area(vbuf, xfer_size,
							DMA_FROM_DEVICE);
				break;
			case CM_FLUSH:
				if (l1)
					dmac_flush_range(vbuf,
					(void *)((u32) vbuf + xfer_size));
				if (l2)
					outer_flush_range(pbuf, pbufend);
				break;
			case CM_FLUSHALL:
				if (l1)
					flush_cache_all();
				if (l2)
					outer_flush_all();
				break;
			}
			getnstimeofday(&afterts);
			timeval += update_timeval(beforets, afterts);
		}
		printk(KERN_INFO "%lu\n", timeval/try_cnt);
		xfer_size *= 2;
	}
}
int omap_tiler_cache_operation(struct ion_buffer *buffer, size_t len,
			unsigned long vaddr, enum cache_operation cacheop)
{
	struct omap_tiler_info *info;
	int n_pages;
        phys_addr_t paddr = tiler_virt2phys(vaddr);

	if (!buffer) {
		pr_err("%s(): buffer is NULL\n", __func__);
		return -EINVAL;
	}
	if (!buffer->cached) {
		pr_err("%s(): buffer not mapped as cacheable\n", __func__);
		return -EINVAL;
	}

	info = buffer->priv_virt;
	if (!info) {
		pr_err("%s(): tiler info of buffer is NULL\n", __func__);
		return -EINVAL;
	}

	n_pages = info->n_tiler_pages;
	if (len > (n_pages * PAGE_SIZE)) {
		pr_err("%s(): size to flush is greater than allocated size\n",
			__func__);
		return -EINVAL;
	}

	if (TILER_PIXEL_FMT_PAGE != info->fmt) {
		pr_err("%s(): only TILER 1D buffers can be cached\n",
			__func__);
		return -EINVAL;
	}

#if 0
	if (len > FULL_CACHE_FLUSH_THRESHOLD) {
		on_each_cpu(per_cpu_cache_flush_arm, NULL, 1);
		outer_flush_all();
		return 0;
	}
#endif

	if (cacheop == CACHE_FLUSH) {
		flush_cache_user_range(vaddr, vaddr + len);
		outer_flush_range(paddr, paddr + len);
	} else {
		outer_inv_range(paddr, paddr + len);
		dmac_map_area((const void*) vaddr, len, DMA_FROM_DEVICE);
	}
	return 0;
}
Exemplo n.º 16
0
static void handle_event(struct work_struct *work)
{
	struct virtqueue *vq;

	flush_cache_all();

	outer_flush_range(zynq_rpmsg_p->mem_start, zynq_rpmsg_p->mem_end);

	vq = zynq_rpmsg_p->vrings[0].vq;

	if (vring_interrupt(0, vq) == IRQ_NONE)
		dev_dbg(&zynq_rpmsg_platform->dev, "no message found in vqid 0\n");
}
Exemplo n.º 17
0
static long kgsl_cache_range_op(unsigned long addr, int size,
					unsigned int flags)
{
#ifdef CONFIG_OUTER_CACHE
	unsigned long end;
#endif
	BUG_ON(addr & (KGSL_PAGESIZE - 1));
	BUG_ON(size & (KGSL_PAGESIZE - 1));

	if (flags & KGSL_CACHE_FLUSH)
		dmac_flush_range((const void *)addr,
				(const void *)(addr + size));
	else
		if (flags & KGSL_CACHE_CLEAN)
			dmac_clean_range((const void *)addr,
					(const void *)(addr + size));
		else
			dmac_inv_range((const void *)addr,
					(const void *)(addr + size));

#ifdef CONFIG_OUTER_CACHE
	for (end = addr; end < (addr + size); end += KGSL_PAGESIZE) {
		pte_t *pte_ptr, pte;
		unsigned long physaddr;
		if (flags & KGSL_CACHE_VMALLOC_ADDR)
			physaddr = vmalloc_to_pfn((void *)end);
		else
			if (flags & KGSL_CACHE_USER_ADDR) {
				pte_ptr = kgsl_get_pte_from_vaddr(end);
				if (!pte_ptr)
					return -EINVAL;
				pte = *pte_ptr;
				physaddr = pte_pfn(pte);
				pte_unmap(pte_ptr);
			} else
				return -EINVAL;

		physaddr <<= PAGE_SHIFT;
		if (flags & KGSL_CACHE_FLUSH)
			outer_flush_range(physaddr, physaddr + KGSL_PAGESIZE);
		else
			if (flags & KGSL_CACHE_CLEAN)
				outer_clean_range(physaddr,
					physaddr + KGSL_PAGESIZE);
			else
				outer_inv_range(physaddr,
					physaddr + KGSL_PAGESIZE);
	}
#endif
	return 0;
}
Exemplo n.º 18
0
int omap_tiler_cache_operation(struct ion_buffer *buffer, size_t len,
			unsigned long vaddr, enum cache_operation cacheop)
{
	struct omap_tiler_info *info;
	int n_pages;

	if (!buffer) {
		pr_err("%s(): buffer is NULL\n", __func__);
		return -EINVAL;
	}
	if (!buffer->cached) {
		pr_err("%s(): buffer not mapped as cacheable\n", __func__);
		return -EINVAL;
	}

	info = buffer->priv_virt;
	if (!info) {
		pr_err("%s(): tiler info of buffer is NULL\n", __func__);
		return -EINVAL;
	}

	n_pages = info->n_tiler_pages;
	if (len > (n_pages * PAGE_SIZE)) {
		pr_err("%s(): size to flush is greater than allocated size\n",
			__func__);
		return -EINVAL;
	}

	if (TILER_PIXEL_FMT_PAGE != info->fmt) {
		pr_err("%s(): only TILER 1D buffers can be cached\n",
			__func__);
		return -EINVAL;
	}

	if (len > FULL_CACHE_FLUSH_THRESHOLD) {
		on_each_cpu(per_cpu_cache_flush_arm, NULL, 1);
		outer_flush_all();
		return 0;
	}

	flush_cache_user_range(vaddr, vaddr + len);

	if (cacheop == CACHE_FLUSH)
		outer_flush_range(info->tiler_addrs[0],
			info->tiler_addrs[0] + len);
	else
		outer_inv_range(info->tiler_addrs[0],
			info->tiler_addrs[0] + len);
	return 0;
}
Exemplo n.º 19
0
static long kgsl_cache_range_op(unsigned long addr, int size,
					unsigned int flags)
{
#ifdef CONFIG_OUTER_CACHE
	unsigned long end;
#endif
	BUG_ON(addr & (KGSL_PAGESIZE - 1));
	BUG_ON(size & (KGSL_PAGESIZE - 1));

	if (flags & KGSL_MEMFLAGS_CACHE_FLUSH)
		dmac_flush_range((const void *)addr,
				(const void *)(addr + size));
	else
		if (flags & KGSL_MEMFLAGS_CACHE_CLEAN)
			dmac_clean_range((const void *)addr,
					(const void *)(addr + size));
		else if (flags & KGSL_MEMFLAGS_CACHE_INV)
			dmac_inv_range((const void *)addr,
					(const void *)(addr + size));

#ifdef CONFIG_OUTER_CACHE
	for (end = addr; end < (addr + size); end += KGSL_PAGESIZE) {
		unsigned long physaddr;
		if (flags & KGSL_MEMFLAGS_VMALLOC_MEM)
			physaddr = page_to_phys(vmalloc_to_page((void *) end));
		else
			if (flags & KGSL_MEMFLAGS_HOSTADDR) {
				physaddr = kgsl_virtaddr_to_physaddr(end);
				if (!physaddr) {
					KGSL_MEM_ERR
					("Unable to find physaddr for "
					"address: %x\n", (unsigned int)end);
					return -EINVAL;
				}
			} else
				return -EINVAL;

		if (flags & KGSL_MEMFLAGS_CACHE_FLUSH)
			outer_flush_range(physaddr, physaddr + KGSL_PAGESIZE);
		else
			if (flags & KGSL_MEMFLAGS_CACHE_CLEAN)
				outer_clean_range(physaddr,
					physaddr + KGSL_PAGESIZE);
			else if (flags & KGSL_MEMFLAGS_CACHE_INV)
				outer_inv_range(physaddr,
					physaddr + KGSL_PAGESIZE);
	}
#endif
	return 0;
}
Exemplo n.º 20
0
static void __dma_clear_buffer(struct page *page, size_t size)
{
	if (!PageHighMem(page)) {
		void *ptr = page_address(page);
		if (ptr) {
			memset(ptr, 0, size);
			dmac_flush_range(ptr, ptr + size);
			outer_flush_range(__pa(ptr), __pa(ptr) + size);
		}
	} else {
		phys_addr_t base = __pfn_to_phys(page_to_pfn(page));
		phys_addr_t end = base + size;
		while (size > 0) {
			void *ptr = kmap_atomic(page);
			memset(ptr, 0, PAGE_SIZE);
			dmac_flush_range(ptr, ptr + PAGE_SIZE);
			kunmap_atomic(ptr);
			page++;
			size -= PAGE_SIZE;
		}
		outer_flush_range(base, end);
	}
}
Exemplo n.º 21
0
static void _vb2_cma_phys_cache_flush_range(struct vb2_cma_phys_buf *buf,
					    unsigned long size)
{
	phys_addr_t start = buf->paddr;
	phys_addr_t end = start + size - 1;

	if (size > SZ_64K ) {
		flush_cache_all();	/* L1 */
		smp_call_function((smp_call_func_t)__cpuc_flush_kern_all, NULL, 1);
	} else {
		dmac_flush_range(phys_to_virt(start), phys_to_virt(end));
	}

	outer_flush_range(start, end);	/* L2 */
}
Exemplo n.º 22
0
void kbase_sync_to_cpu(phys_addr_t paddr, void *vaddr, size_t sz)
{
#ifdef CONFIG_ARM
	__cpuc_flush_dcache_area(vaddr, sz);
	outer_flush_range(paddr, paddr + sz);
#elif defined(CONFIG_ARM64)
	/* FIXME (MID64-46): There's no other suitable cache flush function for ARM64 */
	flush_cache_all();
#elif defined(CONFIG_X86)
	struct scatterlist scl = { 0, };
	sg_set_page(&scl, pfn_to_page(PFN_DOWN(paddr)), sz, paddr & (PAGE_SIZE - 1));
	dma_sync_sg_for_cpu(NULL, &scl, 1, DMA_FROM_DEVICE);
#else
#error Implement cache maintenance for your architecture here
#endif
}
/*
 * Allocate a DMA buffer for 'dev' of size 'size' using the
 * specified gfp mask.  Note that 'size' must be page aligned.
 */
static struct page *__dma_alloc_buffer(struct device *dev,
	size_t size, gfp_t gfp)
{
	unsigned long order = get_order(size);
	struct page *page, *p, *e;
	void *ptr;
	u64 mask = get_coherent_dma_mask(dev);

#ifdef CONFIG_DMA_API_DEBUG
	u64 limit = (mask + 1) & ~mask;
	if (limit && size >= limit) {
		dev_warn(dev, "coherent allocation too big"
			"(requested %#x mask %#llx)\n",
			size, mask);
		return NULL;
	}
#endif

	if (!mask)
		return NULL;

	if (mask < 0xffffffffULL)
		gfp |= GFP_DMA;

	page = alloc_pages(gfp, order);
	if (!page)
		return NULL;

	/*
	 * Now split the huge page and free the excess pages
	 */
	split_page(page, order);
	for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order);
		p < e; p++)
		__free_page(p);

	/*
	 * Ensure that the allocated pages are zeroed, and that any data
	 * lurking in the kernel direct-mapped region is invalidated.
	 */
	ptr = page_address(page);
	memset(ptr, 0, size);
	dmac_flush_range(ptr, ptr + size);
	outer_flush_range(__pa(ptr), __pa(ptr) + size);

	return page;
}
Exemplo n.º 24
0
/* Function to write back invalidate the Cache module */
Void Cache_wbInv(Ptr blockPtr, UInt32 byteCnt, Bits16 type, Bool wait) {
    GT_4trace (curTrace, GT_ENTER, "Cache_wbInv", blockPtr, byteCnt, type, wait);

#ifdef USE_CACHE_VOID_ARG
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34)
                dmac_map_area(blockPtr, (size_t)byteCnt, DMA_BIDIRECTIONAL);
                outer_flush_range(__pa((UInt32)blockPtr),
                                  __pa((UInt32)(blockPtr+byteCnt)) );
#else
                dmac_flush_range(blockPtr, (blockPtr+byteCnt) );
#endif
#else
                dmac_flush_range( (UInt32)blockPtr, (UInt32)(blockPtr + byteCnt) );
#endif

    GT_0trace (curTrace, GT_LEAVE, "Cache_wbInv");
}
Exemplo n.º 25
0
static int ion_cp_change_mem_v2(unsigned int phy_base, unsigned int size,
			      void *data, int lock)
{
	enum cp_mem_usage usage = (enum cp_mem_usage) data;
	unsigned long *chunk_list;
	int nchunks;
	int ret;
	int i;
	int chunk_list_len;
	phys_addr_t chunk_list_phys;

	if (usage < 0 || usage >= MAX_USAGE)
		return -EINVAL;

	if (!IS_ALIGNED(size, V2_CHUNK_SIZE)) {
		pr_err("%s: heap size is not aligned to %x\n",
			__func__, V2_CHUNK_SIZE);
		return -EINVAL;
	}

	nchunks = size / V2_CHUNK_SIZE;
	chunk_list_len = sizeof(unsigned long)*nchunks;

	chunk_list = kmalloc(chunk_list_len, GFP_KERNEL);
	if (!chunk_list)
		return -ENOMEM;

	chunk_list_phys = virt_to_phys(chunk_list);
	for (i = 0; i < nchunks; i++)
		chunk_list[i] = phy_base + i * V2_CHUNK_SIZE;

	/*
                                                         
                                                             
          
  */
	dmac_flush_range(chunk_list, chunk_list + chunk_list_len);
	outer_flush_range(chunk_list_phys,
			  chunk_list_phys + chunk_list_len);

	ret = ion_cp_change_chunks_state(chunk_list_phys,
					nchunks, V2_CHUNK_SIZE, usage, lock);

	kfree(chunk_list);
	return ret;
}
Exemplo n.º 26
0
static void pagemap_flush_page(struct page *page)
{
#ifdef CONFIG_HIGHMEM
    void *km = NULL;

    if (!page_address(page)) {
        km = kmap(page);
        if (!km) {
            pr_err("unable to map high page\n");
            return;
        }
    }
#endif

    __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
    outer_flush_range(page_to_phys(page), page_to_phys(page)+PAGE_SIZE);
    wmb();

#ifdef CONFIG_HIGHMEM
    if (km) kunmap(page);
#endif
}
Exemplo n.º 27
0
static long ion_sys_cache_sync(struct ion_client *client, ion_sys_cache_sync_param_t* pParam)
{
    ION_FUNC_ENTER;
    if (pParam->sync_type < ION_CACHE_CLEAN_ALL)
    {
        // By range operation
        unsigned int start = (unsigned int) ion_map_kernel(client, pParam->handle);
        size_t size = ion_handle_buffer(pParam->handle)->size;
        unsigned int end;
        unsigned int page_num;
        unsigned int i;
        unsigned int page_start;
        struct page* ppage;
        phys_addr_t phys_addr;
        // Cache line align
        end = start + size;
        start = (start / L1_CACHE_BYTES * L1_CACHE_BYTES);
        size = (end - start + L1_CACHE_BYTES - 1) / L1_CACHE_BYTES * L1_CACHE_BYTES;
        page_num = ((start&(~PAGE_MASK))+size+(~PAGE_MASK))>>PAGE_ORDER;
        page_start = start & PAGE_MASK;
        // L1 cache sync
        if (pParam->sync_type == ION_CACHE_CLEAN_BY_RANGE)
        {
            printk("[ion_sys_cache_sync]: ION cache clean by range. start=0x%08X size=0x%08X\n", start, size);
            dmac_map_area((void*)start, size, DMA_TO_DEVICE);
        }
        else if (pParam->sync_type == ION_CACHE_INVALID_BY_RANGE)
        {
            printk("[ion_sys_cache_sync]: ION cache invalid by range. start=0x%08X size=0x%08X\n", start, size);
            dmac_unmap_area((void*)start, size, DMA_FROM_DEVICE);
        }
        else if (pParam->sync_type == ION_CACHE_FLUSH_BY_RANGE)
        {
            printk("[ion_sys_cache_sync]: ION cache flush by range. start=0x%08X size=0x%08X\n", start, size);
            dmac_flush_range((void*)start, (void*)(start+size-1));
        }
        // L2 cache sync
        printk("[ion_sys_cache_sync]: page_start=0x%08X, page_num=%d\n", page_start, page_num);
        for (i=0; i<page_num; i++, page_start+=DEFAULT_PAGE_SIZE)
        {
            if (page_start>=VMALLOC_START && page_start<=VMALLOC_END)
            {
                ppage = vmalloc_to_page((void*)page_start);
                if (!ppage)
                {
                	printk("[ion_sys_cache_sync]: Cannot get vmalloc page. addr=0x%08X\n", page_start);
                    ion_unmap_kernel(client, pParam->handle);
                    return -EFAULT;
                }
                phys_addr = page_to_phys(ppage);
            }
            else
                phys_addr = virt_to_phys((void*)page_start);
            if (pParam->sync_type == ION_CACHE_CLEAN_BY_RANGE)
                outer_clean_range(phys_addr, phys_addr+DEFAULT_PAGE_SIZE);
            else if (pParam->sync_type == ION_CACHE_INVALID_BY_RANGE)
                outer_inv_range(phys_addr, phys_addr+DEFAULT_PAGE_SIZE);
            else if (pParam->sync_type == ION_CACHE_FLUSH_BY_RANGE)
                outer_flush_range(phys_addr, phys_addr+DEFAULT_PAGE_SIZE);
        }
        ion_unmap_kernel(client, pParam->handle);
    }
Exemplo n.º 28
0
static void cache_maint_phys(phys_addr_t start, size_t length, enum cacheop op)
{
	size_t left = length;
	phys_addr_t begin = start;

	if (!soc_is_exynos5250() && !soc_is_exynos5210()) {
		if (length > (size_t) L1_FLUSH_ALL) {
			flush_cache_all();
			smp_call_function(
					(smp_call_func_t)__cpuc_flush_kern_all,
					NULL, 1);

			goto outer_cache_ops;
		}
	}

#ifdef CONFIG_HIGHMEM
	do {
		size_t len;
		struct page *page;
		void *vaddr;
		off_t offset;

		page = phys_to_page(start);
		offset = offset_in_page(start);
		len = PAGE_SIZE - offset;

		if (left < len)
			len = left;

		if (PageHighMem(page)) {
			vaddr = kmap(page);
			cache_maint_inner(vaddr + offset, len, op);
			kunmap(page);
		} else {
			vaddr = page_address(page) + offset;
			cache_maint_inner(vaddr, len, op);
		}
		left -= len;
		start += len;
	} while (left);
#else
	cache_maint_inner(phys_to_virt(begin), left, op);
#endif

outer_cache_ops:
	switch (op) {
	case EM_CLEAN:
		outer_clean_range(begin, begin + length);
		break;
	case EM_INV:
		if (length <= L2_FLUSH_ALL) {
			outer_inv_range(begin, begin + length);
			break;
		}
		/* else FALL THROUGH */
	case EM_FLUSH:
		outer_flush_range(begin, begin + length);
		break;
	}
}
Exemplo n.º 29
0
static int cacheperf(void)
{
	u32 xfer_size;
	int i = 0;
	void *vbuf;
	phys_addr_t pbuf;
	u32 bufend;
	struct timespec beforets;
	struct timespec afterts;
	long timeval;
	vbuf = kmalloc(END_SIZE, GFP_KERNEL);
	pbuf = virt_to_phys(vbuf);

	if (mset) {
		printk(KERN_INFO "## Memset perf (ns)\n");

		xfer_size = START_SIZE;
		while (xfer_size <= END_SIZE) {
			bufend = pbuf + xfer_size;

			getnstimeofday(&beforets);
			for (i = 0; i < try_cnt; i++)
				memset(vbuf, i, xfer_size);
			getnstimeofday(&afterts);
			print_result(xfer_size, beforets, afterts);
			xfer_size *= 2;
		}
	}

	if (cops) {
		printk(KERN_INFO "## Clean perf (ns)\n");

		xfer_size = START_SIZE;
		while (xfer_size <= END_SIZE) {
			bufend = pbuf + xfer_size;
			timeval = 0;

			for (i = 0; i < try_cnt; i++) {
				memset(vbuf, i, xfer_size);
				getnstimeofday(&beforets);
				if (l1)
					dmac_map_area(vbuf, xfer_size,
							DMA_TO_DEVICE);
				if (l2)
					outer_clean_range(pbuf, bufend);
				getnstimeofday(&afterts);
				timeval += update_timeval(beforets, afterts);
				xfer_size *= 2;
			}
			printk(KERN_INFO "%lu\n", timeval);
		}
	}

	if (iops) {
		printk(KERN_INFO "## Invalidate perf (ns)\n");

		xfer_size = START_SIZE;
		while (xfer_size <= END_SIZE) {
			bufend = pbuf + xfer_size;
			timeval = 0;

			for (i = 0; i < try_cnt; i++) {
				memset(vbuf, i, xfer_size);
				getnstimeofday(&beforets);
				if (l2)
					outer_inv_range(pbuf, bufend);
				if (l1)
					dmac_unmap_area(vbuf, xfer_size,
							DMA_FROM_DEVICE);
				getnstimeofday(&afterts);
				timeval += update_timeval(beforets, afterts);
				xfer_size *= 2;
			}
			printk(KERN_INFO "%lu\n", timeval);
		}
	}

	if (fops) {
		printk(KERN_INFO "## Flush perf (ns)\n");

		xfer_size = START_SIZE;
		while (xfer_size <= END_SIZE) {
			bufend = pbuf + xfer_size;
			timeval = 0;

			for (i = 0; i < try_cnt; i++) {
				memset(vbuf, i, xfer_size);
				getnstimeofday(&beforets);
				if (l1)
					dmac_flush_range(vbuf,
					(void *)((u32) vbuf + xfer_size));
				if (l2)
					outer_flush_range(pbuf, bufend);
				getnstimeofday(&afterts);
				timeval += update_timeval(beforets, afterts);
				xfer_size *= 2;
			}
			printk(KERN_INFO "%lu\n", timeval);
		}
	}


	if (all) {
		printk(KERN_INFO "## Flush all perf (ns)\n");

		xfer_size = START_SIZE;
		while (xfer_size <= END_SIZE) {
			bufend = pbuf + xfer_size;
			timeval = 0;

			for (i = 0; i < try_cnt; i++) {
				memset(vbuf, i, xfer_size);
				getnstimeofday(&beforets);
					if (l1)
						flush_cache_all();
					if (l2)
						outer_flush_all();
				getnstimeofday(&afterts);
				timeval += update_timeval(beforets, afterts);
				xfer_size *= 2;
			}
			printk(KERN_INFO "%lu\n", timeval);
		}
	}

	kfree(vbuf);

	return 0;
}
Exemplo n.º 30
0
/* Initializes the SE (SDP, SRAM resize, RPC handler) */
static int tf_se_init(struct tf_comm *comm,
	u32 sdp_backing_store_addr, u32 sdp_bkext_store_addr)
{
	int error;
	unsigned int crc;

	if (comm->se_initialized) {
		dpr_info("%s: SE already initialized... nothing to do\n",
			__func__);
		return 0;
	}

	/* Secure CRC read */
	dpr_info("%s: Secure CRC Read...\n", __func__);

	crc = omap4_secure_dispatcher(API_HAL_KM_GETSECUREROMCODECRC_INDEX,
		0, 0, 0, 0, 0, 0);
	pr_info("SMC: SecureCRC=0x%08X\n", crc);

	/*
	 * Flush caches before resize, just to be sure there is no
	 * pending public data writes back to SRAM that could trigger a
	 * security violation once their address space is marked as
	 * secure.
	 */
#define OMAP4_SRAM_PA   0x40300000
#define OMAP4_SRAM_SIZE 0xe000
	flush_cache_all();
	outer_flush_range(OMAP4_SRAM_PA,
			OMAP4_SRAM_PA + OMAP4_SRAM_SIZE);
	wmb();

	/* SRAM resize */
	dpr_info("%s: SRAM resize (52KB)...\n", __func__);
	error = omap4_secure_dispatcher(API_HAL_SEC_L3_RAM_RESIZE_INDEX,
		FLAG_FIQ_ENABLE | FLAG_START_HAL_CRITICAL, 1,
		SEC_RAM_SIZE_52KB, 0, 0, 0);

	if (error == API_HAL_RET_VALUE_OK) {
		dpr_info("%s: SRAM resize OK\n", __func__);
	} else {
		dpr_err("%s: SRAM resize failed [0x%x]\n", __func__, error);
		goto error;
	}

	/* SDP init */
	dpr_info("%s: SDP runtime init..."
		"(sdp_backing_store_addr=%x, sdp_bkext_store_addr=%x)\n",
		__func__,
		sdp_backing_store_addr, sdp_bkext_store_addr);
	error = omap4_secure_dispatcher(API_HAL_SDP_RUNTIMEINIT_INDEX,
		FLAG_FIQ_ENABLE | FLAG_START_HAL_CRITICAL, 2,
		sdp_backing_store_addr, sdp_bkext_store_addr, 0, 0);

	if (error == API_HAL_RET_VALUE_OK) {
		dpr_info("%s: SDP runtime init OK\n", __func__);
	} else {
		dpr_err("%s: SDP runtime init failed [0x%x]\n",
			__func__, error);
		goto error;
	}

	/* RPC init */
	dpr_info("%s: RPC init...\n", __func__);
	error = omap4_secure_dispatcher(API_HAL_TASK_MGR_RPCINIT_INDEX,
		FLAG_START_HAL_CRITICAL, 1,
		(u32) (u32(*const) (u32, u32, u32, u32)) &rpc_handler, 0, 0, 0);

	if (error == API_HAL_RET_VALUE_OK) {
		dpr_info("%s: RPC init OK\n", __func__);
	} else {
		dpr_err("%s: RPC init failed [0x%x]\n", __func__, error);
		goto error;
	}

	comm->se_initialized = true;

	return 0;

error:
	return -EFAULT;
}