示例#1
0
void mfc_mem_cache_inv(const void *start_addr, unsigned long size)
{
	unsigned long paddr;

	paddr = __pa((unsigned long)start_addr);
	outer_inv_range(paddr, paddr + size);
	dmac_unmap_area(start_addr, size, DMA_FROM_DEVICE);

	/* OPT#1: kernel provide below function */
	/*
	dma_unmap_single(NULL, (void *)start_addr, size, DMA_FROM_DEVICE);
	*/
}
static void cache_maint_inner(void *vaddr, size_t size, enum cacheop op)
{
	switch (op) {
		case EM_CLEAN:
			dmac_map_area(vaddr, size, DMA_TO_DEVICE);
			break;
		case EM_INV:
			dmac_unmap_area(vaddr, size, DMA_TO_DEVICE);
			break;
		case EM_FLUSH:
			dmac_flush_range(vaddr, vaddr + size);
	}
}
void s5p_mfc_cache_inv(void *alloc_ctx)
{
	struct vb2_cma_phys_buf *buf = (struct vb2_cma_phys_buf *)alloc_ctx;
	void *start_addr;
	unsigned long size;
	unsigned long paddr = (dma_addr_t)buf->paddr;

	start_addr = (dma_addr_t *)phys_to_virt(buf->paddr);
	size = buf->size;

	outer_inv_range(paddr, paddr + size);
	dmac_unmap_area(start_addr, size, DMA_FROM_DEVICE);
}
/* FIXME: l2 cache clean all should be implemented */
int vb2_cma_phys_cache_clean2(struct vb2_buffer *vb, u32 num_planes)
{
	struct vb2_cma_phys_buf *buf;
	unsigned long t_size = 0;
	phys_addr_t start;
	size_t size;
	int i;

	for (i = 0; i < num_planes; i++) {
		buf = vb->planes[i].mem_priv;
		if (!buf->cacheable) {
			pr_warning("This is non-cacheable buffer allocator\n");
			return -EINVAL;
		}

		t_size += buf->size;
	}

	if (t_size > (unsigned long)SIZE_THRESHOLD) {
		for (i = 0; i < num_planes; i++) {
			buf = vb->planes[i].mem_priv;
			start = buf->paddr;
			size = buf->size;

			dmac_unmap_area(phys_to_virt(start), size, DMA_TO_DEVICE);
		}
	} else {
		for (i = 0; i < num_planes; i++) {
			buf = vb->planes[i].mem_priv;
			start = buf->paddr;
			size = buf->size;

			dmac_unmap_area(phys_to_virt(start), size, DMA_TO_DEVICE);
			outer_clean_range(start, start + size - 1);	/* L2 */
		}
	}

	return 0;
}
示例#5
0
void ___dma_single_dev_to_cpu(const void *kaddr, size_t size,
	enum dma_data_direction dir)
{
	BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));

	/* don't bother invalidating if DMA to device */
	if (dir != DMA_TO_DEVICE) {
		unsigned long paddr = __pa(kaddr);
		outer_inv_range(paddr, paddr + size);
	}

	dmac_unmap_area(kaddr, size, dir);
}
void ___dma_single_dev_to_cpu(const void *kaddr, size_t size,
	enum dma_data_direction dir)
{
#ifdef CONFIG_OUTER_CACHE
	BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));

	/* FIXME: non-speculating: not required */
	/* don't bother invalidating if DMA to device */
	if (dir != DMA_TO_DEVICE) {
		unsigned long paddr = __pa(kaddr);
		outer_inv_range(paddr, paddr + size);
	}
#endif
	dmac_unmap_area(kaddr, size, dir);
}
int vb2_cma_phys_cache_clean(struct vb2_buffer *vb, u32 num_planes)
{
	struct vb2_cma_phys_buf *buf;
	phys_addr_t start;
	size_t size;
	int i;

	for (i = 0; i < num_planes; i++) {
		buf = vb->planes[i].mem_priv;
		start = buf->paddr;
		size = buf->size;

		if (!buf->cacheable) {
			pr_warning("This is non-cacheable buffer allocator\n");
			return -EINVAL;
		}

		dmac_unmap_area(phys_to_virt(start), size, DMA_TO_DEVICE);
		outer_clean_range(start, start + size - 1);	/* L2 */
	}

	return 0;
}
static long ion_sys_cache_sync(struct ion_client *client, ion_sys_cache_sync_param_t* pParam)
{
    ION_FUNC_ENTER;
    if (pParam->sync_type < ION_CACHE_CLEAN_ALL)
    {
        // By range operation
        unsigned int start = (unsigned int) ion_map_kernel(client, pParam->handle);
        size_t size = ion_handle_buffer(pParam->handle)->size;
        unsigned int end;
        unsigned int page_num;
        unsigned int i;
        unsigned int page_start;
        struct page* ppage;
        phys_addr_t phys_addr;
        // Cache line align
        end = start + size;
        start = (start / L1_CACHE_BYTES * L1_CACHE_BYTES);
        size = (end - start + L1_CACHE_BYTES - 1) / L1_CACHE_BYTES * L1_CACHE_BYTES;
        page_num = ((start&(~PAGE_MASK))+size+(~PAGE_MASK))>>PAGE_ORDER;
        page_start = start & PAGE_MASK;
        // L1 cache sync
        if (pParam->sync_type == ION_CACHE_CLEAN_BY_RANGE)
        {
            printk("[ion_sys_cache_sync]: ION cache clean by range. start=0x%08X size=0x%08X\n", start, size);
            dmac_map_area((void*)start, size, DMA_TO_DEVICE);
        }
        else if (pParam->sync_type == ION_CACHE_INVALID_BY_RANGE)
        {
            printk("[ion_sys_cache_sync]: ION cache invalid by range. start=0x%08X size=0x%08X\n", start, size);
            dmac_unmap_area((void*)start, size, DMA_FROM_DEVICE);
        }
        else if (pParam->sync_type == ION_CACHE_FLUSH_BY_RANGE)
        {
            printk("[ion_sys_cache_sync]: ION cache flush by range. start=0x%08X size=0x%08X\n", start, size);
            dmac_flush_range((void*)start, (void*)(start+size-1));
        }
        // L2 cache sync
        printk("[ion_sys_cache_sync]: page_start=0x%08X, page_num=%d\n", page_start, page_num);
        for (i=0; i<page_num; i++, page_start+=DEFAULT_PAGE_SIZE)
        {
            if (page_start>=VMALLOC_START && page_start<=VMALLOC_END)
            {
                ppage = vmalloc_to_page((void*)page_start);
                if (!ppage)
                {
                	printk("[ion_sys_cache_sync]: Cannot get vmalloc page. addr=0x%08X\n", page_start);
                    ion_unmap_kernel(client, pParam->handle);
                    return -EFAULT;
                }
                phys_addr = page_to_phys(ppage);
            }
            else
                phys_addr = virt_to_phys((void*)page_start);
            if (pParam->sync_type == ION_CACHE_CLEAN_BY_RANGE)
                outer_clean_range(phys_addr, phys_addr+DEFAULT_PAGE_SIZE);
            else if (pParam->sync_type == ION_CACHE_INVALID_BY_RANGE)
                outer_inv_range(phys_addr, phys_addr+DEFAULT_PAGE_SIZE);
            else if (pParam->sync_type == ION_CACHE_FLUSH_BY_RANGE)
                outer_flush_range(phys_addr, phys_addr+DEFAULT_PAGE_SIZE);
        }
        ion_unmap_kernel(client, pParam->handle);
    }
示例#9
0
static int cacheperf(void)
{
	u32 xfer_size;
	int i = 0;
	void *vbuf;
	phys_addr_t pbuf;
	u32 bufend;
	struct timespec beforets;
	struct timespec afterts;
	long timeval;
	vbuf = kmalloc(END_SIZE, GFP_KERNEL);
	pbuf = virt_to_phys(vbuf);

	if (mset) {
		printk(KERN_INFO "## Memset perf (ns)\n");

		xfer_size = START_SIZE;
		while (xfer_size <= END_SIZE) {
			bufend = pbuf + xfer_size;

			getnstimeofday(&beforets);
			for (i = 0; i < try_cnt; i++)
				memset(vbuf, i, xfer_size);
			getnstimeofday(&afterts);
			print_result(xfer_size, beforets, afterts);
			xfer_size *= 2;
		}
	}

	if (cops) {
		printk(KERN_INFO "## Clean perf (ns)\n");

		xfer_size = START_SIZE;
		while (xfer_size <= END_SIZE) {
			bufend = pbuf + xfer_size;
			timeval = 0;

			for (i = 0; i < try_cnt; i++) {
				memset(vbuf, i, xfer_size);
				getnstimeofday(&beforets);
				if (l1)
					dmac_map_area(vbuf, xfer_size,
							DMA_TO_DEVICE);
				if (l2)
					outer_clean_range(pbuf, bufend);
				getnstimeofday(&afterts);
				timeval += update_timeval(beforets, afterts);
				xfer_size *= 2;
			}
			printk(KERN_INFO "%lu\n", timeval);
		}
	}

	if (iops) {
		printk(KERN_INFO "## Invalidate perf (ns)\n");

		xfer_size = START_SIZE;
		while (xfer_size <= END_SIZE) {
			bufend = pbuf + xfer_size;
			timeval = 0;

			for (i = 0; i < try_cnt; i++) {
				memset(vbuf, i, xfer_size);
				getnstimeofday(&beforets);
				if (l2)
					outer_inv_range(pbuf, bufend);
				if (l1)
					dmac_unmap_area(vbuf, xfer_size,
							DMA_FROM_DEVICE);
				getnstimeofday(&afterts);
				timeval += update_timeval(beforets, afterts);
				xfer_size *= 2;
			}
			printk(KERN_INFO "%lu\n", timeval);
		}
	}

	if (fops) {
		printk(KERN_INFO "## Flush perf (ns)\n");

		xfer_size = START_SIZE;
		while (xfer_size <= END_SIZE) {
			bufend = pbuf + xfer_size;
			timeval = 0;

			for (i = 0; i < try_cnt; i++) {
				memset(vbuf, i, xfer_size);
				getnstimeofday(&beforets);
				if (l1)
					dmac_flush_range(vbuf,
					(void *)((u32) vbuf + xfer_size));
				if (l2)
					outer_flush_range(pbuf, bufend);
				getnstimeofday(&afterts);
				timeval += update_timeval(beforets, afterts);
				xfer_size *= 2;
			}
			printk(KERN_INFO "%lu\n", timeval);
		}
	}


	if (all) {
		printk(KERN_INFO "## Flush all perf (ns)\n");

		xfer_size = START_SIZE;
		while (xfer_size <= END_SIZE) {
			bufend = pbuf + xfer_size;
			timeval = 0;

			for (i = 0; i < try_cnt; i++) {
				memset(vbuf, i, xfer_size);
				getnstimeofday(&beforets);
					if (l1)
						flush_cache_all();
					if (l2)
						outer_flush_all();
				getnstimeofday(&afterts);
				timeval += update_timeval(beforets, afterts);
				xfer_size *= 2;
			}
			printk(KERN_INFO "%lu\n", timeval);
		}
	}

	kfree(vbuf);

	return 0;
}
示例#10
0
static long ion_sys_cache_sync(struct ion_client *client, ion_sys_cache_sync_param_t* pParam, int from_kernel)
{
    ION_FUNC_ENTER;
    if (pParam->sync_type < ION_CACHE_CLEAN_ALL)
    {
        // By range operation
        unsigned int start;
        size_t size;
        unsigned int end, page_num, page_start;
        struct ion_handle *kernel_handle;

        kernel_handle = ion_drv_get_kernel_handle(client,
                        pParam->handle, from_kernel);
        if(IS_ERR(kernel_handle))
        {
            IONMSG("ion cache sync fail! \n");
            return -EINVAL;
        }

#ifdef __ION_CACHE_SYNC_USER_VA_EN__
        if(pParam->sync_type < ION_CACHE_CLEAN_BY_RANGE_USE_VA)
#else
        if(1)
#endif
        {
            start = (unsigned int) ion_map_kernel(client, kernel_handle);
            if(IS_ERR_OR_NULL((void*)start))
            {
                IONMSG("cannot do cachesync, unable to map_kernel: ret=%d\n", start);
                return -EFAULT;
            }
            size = ion_handle_buffer(kernel_handle)->size;
        }
        else
        {
            start = pParam->va;
            size = pParam->size;
        }

        // Cache line align
        end = start + size;
        start = (start / L1_CACHE_BYTES * L1_CACHE_BYTES);
        size = (end - start + L1_CACHE_BYTES - 1) / L1_CACHE_BYTES * L1_CACHE_BYTES;
        page_num = ((start&(~PAGE_MASK))+size+(~PAGE_MASK))>>PAGE_ORDER;
        page_start = start & PAGE_MASK;
        // L1 cache sync
        if((pParam->sync_type==ION_CACHE_CLEAN_BY_RANGE) || (pParam->sync_type==ION_CACHE_CLEAN_BY_RANGE_USE_VA))
        {
            MMProfileLogEx(ION_MMP_Events[PROFILE_DMA_CLEAN_RANGE], MMProfileFlagStart, size, 0);
            //printk("[ion_sys_cache_sync]: ION cache clean by range. start=0x%08X size=0x%08X\n", start, size);
            dmac_map_area((void*)start, size, DMA_TO_DEVICE);
        }
        else if ((pParam->sync_type == ION_CACHE_INVALID_BY_RANGE)||(pParam->sync_type == ION_CACHE_INVALID_BY_RANGE_USE_VA))
        {
            MMProfileLogEx(ION_MMP_Events[PROFILE_DMA_INVALID_RANGE], MMProfileFlagStart, size, 0);
            //printk("[ion_sys_cache_sync]: ION cache invalid by range. start=0x%08X size=0x%08X\n", start, size);
            dmac_unmap_area((void*)start, size, DMA_FROM_DEVICE);
        }
        else if ((pParam->sync_type == ION_CACHE_FLUSH_BY_RANGE)||(pParam->sync_type == ION_CACHE_FLUSH_BY_RANGE_USE_VA))
        {
            MMProfileLogEx(ION_MMP_Events[PROFILE_DMA_FLUSH_RANGE], MMProfileFlagStart, size, 0);
            //printk("[ion_sys_cache_sync]: ION cache flush by range. start=0x%08X size=0x%08X\n", start, size);
            dmac_flush_range((void*)start, (void*)(start+size-1));
        }

#if 0
        // L2 cache sync
        //printk("[ion_sys_cache_sync]: page_start=0x%08X, page_num=%d\n", page_start, page_num);
        for (i=0; i<page_num; i++, page_start+=DEFAULT_PAGE_SIZE)
        {
            phys_addr_t phys_addr;

            if (page_start>=VMALLOC_START && page_start<=VMALLOC_END)
            {
                ppage = vmalloc_to_page((void*)page_start);
                if (!ppage)
                {
                    printk("[ion_sys_cache_sync]: Cannot get vmalloc page. addr=0x%08X\n", page_start);
                    ion_unmap_kernel(client, pParam->handle);
                    return -EFAULT;
                }
                phys_addr = page_to_phys(ppage);
            }
            else
                phys_addr = virt_to_phys((void*)page_start);
            if (pParam->sync_type == ION_CACHE_CLEAN_BY_RANGE)
                outer_clean_range(phys_addr, phys_addr+DEFAULT_PAGE_SIZE);
            else if (pParam->sync_type == ION_CACHE_INVALID_BY_RANGE)
                outer_inv_range(phys_addr, phys_addr+DEFAULT_PAGE_SIZE);
            else if (pParam->sync_type == ION_CACHE_FLUSH_BY_RANGE)
                outer_flush_range(phys_addr, phys_addr+DEFAULT_PAGE_SIZE);
        }
#endif

#ifdef __ION_CACHE_SYNC_USER_VA_EN__
        if(pParam->sync_type < ION_CACHE_CLEAN_BY_RANGE_USE_VA)
#endif
        {
            ion_unmap_kernel(client, kernel_handle);
        }

        if (pParam->sync_type == ION_CACHE_CLEAN_BY_RANGE)
            MMProfileLogEx(ION_MMP_Events[PROFILE_DMA_CLEAN_RANGE], MMProfileFlagEnd, size, 0);
        else if (pParam->sync_type == ION_CACHE_INVALID_BY_RANGE)
            MMProfileLogEx(ION_MMP_Events[PROFILE_DMA_INVALID_RANGE], MMProfileFlagEnd, size, 0);
        else if (pParam->sync_type == ION_CACHE_FLUSH_BY_RANGE)
            MMProfileLogEx(ION_MMP_Events[PROFILE_DMA_FLUSH_RANGE], MMProfileFlagEnd, size, 0);

    }