示例#1
0
void s5p_mfc_cache_clean(void *alloc_ctx)
{
	struct vb2_cma_phys_buf *buf = (struct vb2_cma_phys_buf *)alloc_ctx;
	void *start_addr;
	unsigned long size;
	unsigned long paddr = (dma_addr_t)buf->paddr;

	start_addr = (dma_addr_t *)phys_to_virt(buf->paddr);
	size = buf->size;

	dmac_map_area(start_addr, size, DMA_TO_DEVICE);
	outer_clean_range(paddr, paddr + size);
}
示例#2
0
static void cache_maint_inner(void *vaddr, size_t size, enum cacheop op)
{
	switch (op) {
		case EM_CLEAN:
			dmac_map_area(vaddr, size, DMA_TO_DEVICE);
			break;
		case EM_INV:
			dmac_unmap_area(vaddr, size, DMA_TO_DEVICE);
			break;
		case EM_FLUSH:
			dmac_flush_range(vaddr, vaddr + size);
	}
}
示例#3
0
/*
 * Make an area consistent for devices.
 * Note: Drivers should NOT use this function directly, as it will break
 * platforms with CONFIG_DMABOUNCE.
 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
 */
void ___dma_single_cpu_to_dev(const void *kaddr, size_t size,
	enum dma_data_direction dir)
{
	unsigned long paddr;

	BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));

	dmac_map_area(kaddr, size, dir);

	paddr = __pa(kaddr);
	if (dir == DMA_FROM_DEVICE) {
		outer_inv_range(paddr, paddr + size);
	} else {
		outer_clean_range(paddr, paddr + size);
	}
}
示例#4
0
/* Function to invalidate the Cache module */
Void Cache_inv(Ptr blockPtr, UInt32 byteCnt, Bits16 type, Bool wait) {
    GT_4trace (curTrace, GT_ENTER, "Cache_inv", blockPtr, byteCnt, type, wait);

#ifdef USE_CACHE_VOID_ARG
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34))
    dmac_map_area(blockPtr, (size_t)byteCnt, DMA_FROM_DEVICE);
    outer_inv_range(__pa((UInt32)blockPtr),
                    __pa((UInt32)(blockPtr + byteCnt)) );
#else
    dmac_inv_range(blockPtr, (blockPtr + byteCnt) );
#endif
#else
    dmac_inv_range( (UInt32)blockPtr, (UInt32)(blockPtr + byteCnt) );
#endif

    GT_0trace (curTrace, GT_LEAVE, "Cache_inv");
}
示例#5
0
	/* CMA or bootmem(memblock) */
void mfc_mem_cache_clean(const void *start_addr, unsigned long size)
{
	unsigned long paddr;

	dmac_map_area(start_addr, size, DMA_TO_DEVICE);
	/*
	 * virtual & phsical addrees mapped directly, so we can convert
	 * the address just using offset
	 */
	paddr = __pa((unsigned long)start_addr);
	outer_clean_range(paddr, paddr + size);

	/* OPT#1: kernel provide below function */
	/*
	dma_map_single(NULL, (void *)start_addr, size, DMA_TO_DEVICE);
	*/
}
示例#6
0
/*
 * Make an area consistent for devices.
 * Note: Drivers should NOT use this function directly, as it will break
 * platforms with CONFIG_DMABOUNCE.
 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
 */
void ___dma_single_cpu_to_dev(const void *kaddr, size_t size,
	enum dma_data_direction dir)
{
#ifdef CONFIG_OUTER_CACHE
	unsigned long paddr;

	BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
#endif

	dmac_map_area(kaddr, size, dir);

#ifdef CONFIG_OUTER_CACHE
	paddr = __pa(kaddr);
	if (dir == DMA_FROM_DEVICE) {
		outer_inv_range(paddr, paddr + size);
	} else {
		outer_clean_range(paddr, paddr + size);
	}
#endif
	/* FIXME: non-speculating: flush on bidirectional mappings? */
}
示例#7
0
int s3cfb_draw_logo(struct fb_info *fb)
{
	struct fb_fix_screeninfo *fix = &fb->fix;
	struct fb_var_screeninfo *var = &fb->var;
	u32 height = var->yres / 3;
	u32 line = fix->line_length;
	u32 i, j;

	for (i = 0; i < height; i++) {
		for (j = 0; j < var->xres; j++) {
			memset(fb->screen_base + i * line + j * 4 + 0, 0x00, 1);
			memset(fb->screen_base + i * line + j * 4 + 1, 0x00, 1);
			memset(fb->screen_base + i * line + j * 4 + 2, 0xff, 1);
			memset(fb->screen_base + i * line + j * 4 + 3, 0x00, 1);
		}
	}

	for (i = height; i < height * 2; i++) {
		for (j = 0; j < var->xres; j++) {
			memset(fb->screen_base + i * line + j * 4 + 0, 0x00, 1);
			memset(fb->screen_base + i * line + j * 4 + 1, 0xff, 1);
			memset(fb->screen_base + i * line + j * 4 + 2, 0x00, 1);
			memset(fb->screen_base + i * line + j * 4 + 3, 0x00, 1);
		}
	}

	for (i = height * 2; i < height * 3; i++) {
		for (j = 0; j < var->xres; j++) {
			memset(fb->screen_base + i * line + j * 4 + 0, 0xff, 1);
			memset(fb->screen_base + i * line + j * 4 + 1, 0x00, 1);
			memset(fb->screen_base + i * line + j * 4 + 2, 0x00, 1);
			memset(fb->screen_base + i * line + j * 4 + 3, 0x00, 1);
		}
	}
	dmac_map_area((void *)fb->screen_base, fix->smem_len, DMA_TO_DEVICE);
	outer_clean_range(fix->smem_start, fix->smem_start + fix->smem_len);

	return 0;
}
static long ion_sys_cache_sync(struct ion_client *client, ion_sys_cache_sync_param_t* pParam)
{
    ION_FUNC_ENTER;
    if (pParam->sync_type < ION_CACHE_CLEAN_ALL)
    {
        // By range operation
        unsigned int start = (unsigned int) ion_map_kernel(client, pParam->handle);
        size_t size = ion_handle_buffer(pParam->handle)->size;
        unsigned int end;
        unsigned int page_num;
        unsigned int i;
        unsigned int page_start;
        struct page* ppage;
        phys_addr_t phys_addr;
        // Cache line align
        end = start + size;
        start = (start / L1_CACHE_BYTES * L1_CACHE_BYTES);
        size = (end - start + L1_CACHE_BYTES - 1) / L1_CACHE_BYTES * L1_CACHE_BYTES;
        page_num = ((start&(~PAGE_MASK))+size+(~PAGE_MASK))>>PAGE_ORDER;
        page_start = start & PAGE_MASK;
        // L1 cache sync
        if (pParam->sync_type == ION_CACHE_CLEAN_BY_RANGE)
        {
            printk("[ion_sys_cache_sync]: ION cache clean by range. start=0x%08X size=0x%08X\n", start, size);
            dmac_map_area((void*)start, size, DMA_TO_DEVICE);
        }
        else if (pParam->sync_type == ION_CACHE_INVALID_BY_RANGE)
        {
            printk("[ion_sys_cache_sync]: ION cache invalid by range. start=0x%08X size=0x%08X\n", start, size);
            dmac_unmap_area((void*)start, size, DMA_FROM_DEVICE);
        }
        else if (pParam->sync_type == ION_CACHE_FLUSH_BY_RANGE)
        {
            printk("[ion_sys_cache_sync]: ION cache flush by range. start=0x%08X size=0x%08X\n", start, size);
            dmac_flush_range((void*)start, (void*)(start+size-1));
        }
        // L2 cache sync
        printk("[ion_sys_cache_sync]: page_start=0x%08X, page_num=%d\n", page_start, page_num);
        for (i=0; i<page_num; i++, page_start+=DEFAULT_PAGE_SIZE)
        {
            if (page_start>=VMALLOC_START && page_start<=VMALLOC_END)
            {
                ppage = vmalloc_to_page((void*)page_start);
                if (!ppage)
                {
                	printk("[ion_sys_cache_sync]: Cannot get vmalloc page. addr=0x%08X\n", page_start);
                    ion_unmap_kernel(client, pParam->handle);
                    return -EFAULT;
                }
                phys_addr = page_to_phys(ppage);
            }
            else
                phys_addr = virt_to_phys((void*)page_start);
            if (pParam->sync_type == ION_CACHE_CLEAN_BY_RANGE)
                outer_clean_range(phys_addr, phys_addr+DEFAULT_PAGE_SIZE);
            else if (pParam->sync_type == ION_CACHE_INVALID_BY_RANGE)
                outer_inv_range(phys_addr, phys_addr+DEFAULT_PAGE_SIZE);
            else if (pParam->sync_type == ION_CACHE_FLUSH_BY_RANGE)
                outer_flush_range(phys_addr, phys_addr+DEFAULT_PAGE_SIZE);
        }
        ion_unmap_kernel(client, pParam->handle);
    }
示例#9
0
static int cacheperf(void)
{
	u32 xfer_size;
	int i = 0;
	void *vbuf;
	phys_addr_t pbuf;
	u32 bufend;
	struct timespec beforets;
	struct timespec afterts;
	long timeval;
	vbuf = kmalloc(END_SIZE, GFP_KERNEL);
	pbuf = virt_to_phys(vbuf);

	if (mset) {
		printk(KERN_INFO "## Memset perf (ns)\n");

		xfer_size = START_SIZE;
		while (xfer_size <= END_SIZE) {
			bufend = pbuf + xfer_size;

			getnstimeofday(&beforets);
			for (i = 0; i < try_cnt; i++)
				memset(vbuf, i, xfer_size);
			getnstimeofday(&afterts);
			print_result(xfer_size, beforets, afterts);
			xfer_size *= 2;
		}
	}

	if (cops) {
		printk(KERN_INFO "## Clean perf (ns)\n");

		xfer_size = START_SIZE;
		while (xfer_size <= END_SIZE) {
			bufend = pbuf + xfer_size;
			timeval = 0;

			for (i = 0; i < try_cnt; i++) {
				memset(vbuf, i, xfer_size);
				getnstimeofday(&beforets);
				if (l1)
					dmac_map_area(vbuf, xfer_size,
							DMA_TO_DEVICE);
				if (l2)
					outer_clean_range(pbuf, bufend);
				getnstimeofday(&afterts);
				timeval += update_timeval(beforets, afterts);
				xfer_size *= 2;
			}
			printk(KERN_INFO "%lu\n", timeval);
		}
	}

	if (iops) {
		printk(KERN_INFO "## Invalidate perf (ns)\n");

		xfer_size = START_SIZE;
		while (xfer_size <= END_SIZE) {
			bufend = pbuf + xfer_size;
			timeval = 0;

			for (i = 0; i < try_cnt; i++) {
				memset(vbuf, i, xfer_size);
				getnstimeofday(&beforets);
				if (l2)
					outer_inv_range(pbuf, bufend);
				if (l1)
					dmac_unmap_area(vbuf, xfer_size,
							DMA_FROM_DEVICE);
				getnstimeofday(&afterts);
				timeval += update_timeval(beforets, afterts);
				xfer_size *= 2;
			}
			printk(KERN_INFO "%lu\n", timeval);
		}
	}

	if (fops) {
		printk(KERN_INFO "## Flush perf (ns)\n");

		xfer_size = START_SIZE;
		while (xfer_size <= END_SIZE) {
			bufend = pbuf + xfer_size;
			timeval = 0;

			for (i = 0; i < try_cnt; i++) {
				memset(vbuf, i, xfer_size);
				getnstimeofday(&beforets);
				if (l1)
					dmac_flush_range(vbuf,
					(void *)((u32) vbuf + xfer_size));
				if (l2)
					outer_flush_range(pbuf, bufend);
				getnstimeofday(&afterts);
				timeval += update_timeval(beforets, afterts);
				xfer_size *= 2;
			}
			printk(KERN_INFO "%lu\n", timeval);
		}
	}


	if (all) {
		printk(KERN_INFO "## Flush all perf (ns)\n");

		xfer_size = START_SIZE;
		while (xfer_size <= END_SIZE) {
			bufend = pbuf + xfer_size;
			timeval = 0;

			for (i = 0; i < try_cnt; i++) {
				memset(vbuf, i, xfer_size);
				getnstimeofday(&beforets);
					if (l1)
						flush_cache_all();
					if (l2)
						outer_flush_all();
				getnstimeofday(&afterts);
				timeval += update_timeval(beforets, afterts);
				xfer_size *= 2;
			}
			printk(KERN_INFO "%lu\n", timeval);
		}
	}

	kfree(vbuf);

	return 0;
}
int s3c_mem_mmap(struct file *filp, struct vm_area_struct *vma)
{
	unsigned long pageFrameNo = 0, size, phys_addr;

#ifdef USE_DMA_ALLOC
	unsigned long virt_addr;
#else
	unsigned long *virt_addr;
#endif

	size = vma->vm_end - vma->vm_start;

	switch (flag) {
	case MEM_ALLOC:
	case MEM_ALLOC_CACHEABLE:

#ifdef USE_DMA_ALLOC
		virt_addr = (unsigned long)dma_alloc_writecombine(NULL, size,
				(unsigned int *) &phys_addr,
				GFP_KERNEL);
#else
		virt_addr = kmalloc(size, GFP_DMA|GFP_ATOMIC);
#endif
		if (!virt_addr) {
			printk(KERN_INFO "kmalloc() failed !\n");
			return -EINVAL;
		}
		DEBUG("MMAP_KMALLOC : virt addr = 0x%08x, size = %d, %d\n",
						virt_addr, size, __LINE__);

#ifndef USE_DMA_ALLOC
		dmac_map_area(virt_addr, size / sizeof(unsigned long), 2);
		phys_addr = virt_to_phys((unsigned long *)virt_addr);
#endif
		physical_address = (unsigned int)phys_addr;

#ifdef USE_DMA_ALLOC
		virtual_address = virt_addr;
#endif
		pageFrameNo = __phys_to_pfn(phys_addr);
		break;

	case MEM_ALLOC_SHARE:
	case MEM_ALLOC_CACHEABLE_SHARE:
		DEBUG("MMAP_KMALLOC_SHARE : phys addr = 0x%08x, %d\n",
						physical_address, __LINE__);

/* page frame number of the address for the physical_address to be shared. */
		pageFrameNo = __phys_to_pfn(physical_address);
		DEBUG("MMAP_KMALLOC_SHARE : vma->end = 0x%08x, "
				"vma->start = 0x%08x, size = %d, %d\n",
				vma->vm_end, vma->vm_start, size, __LINE__);
		break;

	default:
		break;
	}

	if ((flag == MEM_ALLOC) || (flag == MEM_ALLOC_SHARE))
		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);

	vma->vm_flags |= VM_RESERVED;

	if (remap_pfn_range(vma, vma->vm_start, pageFrameNo,
						size, vma->vm_page_prot)) {
		printk(KERN_INFO "s3c_mem_mmap() : remap_pfn_range() failed !\n");
		return -EINVAL;
	}

	return 0;
}
示例#11
0
static long ion_sys_cache_sync(struct ion_client *client, ion_sys_cache_sync_param_t* pParam, int from_kernel)
{
    ION_FUNC_ENTER;
    if (pParam->sync_type < ION_CACHE_CLEAN_ALL)
    {
        // By range operation
        unsigned int start;
        size_t size;
        unsigned int end, page_num, page_start;
        struct ion_handle *kernel_handle;

        kernel_handle = ion_drv_get_kernel_handle(client,
                        pParam->handle, from_kernel);
        if(IS_ERR(kernel_handle))
        {
            IONMSG("ion cache sync fail! \n");
            return -EINVAL;
        }

#ifdef __ION_CACHE_SYNC_USER_VA_EN__
        if(pParam->sync_type < ION_CACHE_CLEAN_BY_RANGE_USE_VA)
#else
        if(1)
#endif
        {
            start = (unsigned int) ion_map_kernel(client, kernel_handle);
            if(IS_ERR_OR_NULL((void*)start))
            {
                IONMSG("cannot do cachesync, unable to map_kernel: ret=%d\n", start);
                return -EFAULT;
            }
            size = ion_handle_buffer(kernel_handle)->size;
        }
        else
        {
            start = pParam->va;
            size = pParam->size;
        }

        // Cache line align
        end = start + size;
        start = (start / L1_CACHE_BYTES * L1_CACHE_BYTES);
        size = (end - start + L1_CACHE_BYTES - 1) / L1_CACHE_BYTES * L1_CACHE_BYTES;
        page_num = ((start&(~PAGE_MASK))+size+(~PAGE_MASK))>>PAGE_ORDER;
        page_start = start & PAGE_MASK;
        // L1 cache sync
        if((pParam->sync_type==ION_CACHE_CLEAN_BY_RANGE) || (pParam->sync_type==ION_CACHE_CLEAN_BY_RANGE_USE_VA))
        {
            MMProfileLogEx(ION_MMP_Events[PROFILE_DMA_CLEAN_RANGE], MMProfileFlagStart, size, 0);
            //printk("[ion_sys_cache_sync]: ION cache clean by range. start=0x%08X size=0x%08X\n", start, size);
            dmac_map_area((void*)start, size, DMA_TO_DEVICE);
        }
        else if ((pParam->sync_type == ION_CACHE_INVALID_BY_RANGE)||(pParam->sync_type == ION_CACHE_INVALID_BY_RANGE_USE_VA))
        {
            MMProfileLogEx(ION_MMP_Events[PROFILE_DMA_INVALID_RANGE], MMProfileFlagStart, size, 0);
            //printk("[ion_sys_cache_sync]: ION cache invalid by range. start=0x%08X size=0x%08X\n", start, size);
            dmac_unmap_area((void*)start, size, DMA_FROM_DEVICE);
        }
        else if ((pParam->sync_type == ION_CACHE_FLUSH_BY_RANGE)||(pParam->sync_type == ION_CACHE_FLUSH_BY_RANGE_USE_VA))
        {
            MMProfileLogEx(ION_MMP_Events[PROFILE_DMA_FLUSH_RANGE], MMProfileFlagStart, size, 0);
            //printk("[ion_sys_cache_sync]: ION cache flush by range. start=0x%08X size=0x%08X\n", start, size);
            dmac_flush_range((void*)start, (void*)(start+size-1));
        }

#if 0
        // L2 cache sync
        //printk("[ion_sys_cache_sync]: page_start=0x%08X, page_num=%d\n", page_start, page_num);
        for (i=0; i<page_num; i++, page_start+=DEFAULT_PAGE_SIZE)
        {
            phys_addr_t phys_addr;

            if (page_start>=VMALLOC_START && page_start<=VMALLOC_END)
            {
                ppage = vmalloc_to_page((void*)page_start);
                if (!ppage)
                {
                    printk("[ion_sys_cache_sync]: Cannot get vmalloc page. addr=0x%08X\n", page_start);
                    ion_unmap_kernel(client, pParam->handle);
                    return -EFAULT;
                }
                phys_addr = page_to_phys(ppage);
            }
            else
                phys_addr = virt_to_phys((void*)page_start);
            if (pParam->sync_type == ION_CACHE_CLEAN_BY_RANGE)
                outer_clean_range(phys_addr, phys_addr+DEFAULT_PAGE_SIZE);
            else if (pParam->sync_type == ION_CACHE_INVALID_BY_RANGE)
                outer_inv_range(phys_addr, phys_addr+DEFAULT_PAGE_SIZE);
            else if (pParam->sync_type == ION_CACHE_FLUSH_BY_RANGE)
                outer_flush_range(phys_addr, phys_addr+DEFAULT_PAGE_SIZE);
        }
#endif

#ifdef __ION_CACHE_SYNC_USER_VA_EN__
        if(pParam->sync_type < ION_CACHE_CLEAN_BY_RANGE_USE_VA)
#endif
        {
            ion_unmap_kernel(client, kernel_handle);
        }

        if (pParam->sync_type == ION_CACHE_CLEAN_BY_RANGE)
            MMProfileLogEx(ION_MMP_Events[PROFILE_DMA_CLEAN_RANGE], MMProfileFlagEnd, size, 0);
        else if (pParam->sync_type == ION_CACHE_INVALID_BY_RANGE)
            MMProfileLogEx(ION_MMP_Events[PROFILE_DMA_INVALID_RANGE], MMProfileFlagEnd, size, 0);
        else if (pParam->sync_type == ION_CACHE_FLUSH_BY_RANGE)
            MMProfileLogEx(ION_MMP_Events[PROFILE_DMA_FLUSH_RANGE], MMProfileFlagEnd, size, 0);

    }