Exemplo n.º 1
0
int boot_secondary(unsigned int cpu, struct task_struct *idle)
{
	int cnt = 0;
	printk(KERN_DEBUG "Starting secondary CPU %d\n", cpu);

	
	pen_release = cpu;
	dmac_clean_range((void *)&pen_release,
			 (void *)(&pen_release + sizeof(pen_release)));
	dmac_clean_range((void *)&secondary_data,
			 (void *)(&secondary_data + sizeof(secondary_data)));
	sev();
	dsb();

	
	while (pen_release != 0xFFFFFFFF) {
		dmac_inv_range((void *)&pen_release,
			       (void *)(&pen_release+sizeof(pen_release)));
		msleep_interruptible(1);
		if (cnt++ >= SECONDARY_CPU_WAIT_MS)
			break;
	}

	if (pen_release == 0xFFFFFFFF)
		printk(KERN_DEBUG "Secondary CPU start acked %d\n", cpu);
	else
		printk(KERN_ERR "Secondary CPU failed to start..." \
		       "continuing\n");

	return 0;
}
int ion_cp_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer,
			void *vaddr, unsigned int offset, unsigned int length,
			unsigned int cmd)
{
	void (*outer_cache_op)(phys_addr_t, phys_addr_t);
	struct ion_cp_heap *cp_heap =
	     container_of(heap, struct  ion_cp_heap, heap);

	switch (cmd) {
	case ION_IOC_CLEAN_CACHES:
		dmac_clean_range(vaddr, vaddr + length);
		outer_cache_op = outer_clean_range;
		break;
	case ION_IOC_INV_CACHES:
		dmac_inv_range(vaddr, vaddr + length);
		outer_cache_op = outer_inv_range;
		break;
	case ION_IOC_CLEAN_INV_CACHES:
		dmac_flush_range(vaddr, vaddr + length);
		outer_cache_op = outer_flush_range;
		break;
	default:
		return -EINVAL;
	}

	if (cp_heap->has_outer_cache) {
		unsigned long pstart = buffer->priv_phys + offset;
		outer_cache_op(pstart, pstart + length);
	}
	return 0;
}
Exemplo n.º 3
0
SDIO_Status SDIO_SyncWrite(SDIO_Handle Handle, SDIO_Request_t *Req)
{
    struct sdio_func *func = (struct sdio_func *)Handle;
    int rc;
    void *src = Req->buffer;

    if (Req->buffer_len >= DMA_THRESHOLD_SIZE) {
#if USE_SKETCHY_WRITES
        if (is_vmalloc_addr(src)) {
            if (!spans_page(src, Req->buffer_len)) {
                src = vmalloc_to_unity(src);
                dmac_clean_range(Req->buffer,
                                 Req->buffer + Req->buffer_len);
            } else {
#endif
                src = sdio_dma_ptr;
                memcpy(src, Req->buffer, Req->buffer_len);
#if USE_SKETCHY_WRITES
            }
        }
#endif
    }

    rc = sdio_memcpy_toio(func, Req->peripheral_addr, src,
                          Req->buffer_len);

    if (!rc)
        return SDIO_SUCCESS;

    printk(KERN_ERR "%s: failed (%d)\n", __func__, rc);
    return SDIO_FAILURE;
}
Exemplo n.º 4
0
static inline void unmap_page(struct device *dev, dma_addr_t dma_addr,
		size_t size, enum dma_data_direction dir)
{
	struct safe_buffer *buf = find_safe_buffer_dev(dev, dma_addr, "unmap");

	if (buf) {
		BUG_ON(buf->size != size);
		BUG_ON(buf->direction != dir);
		BUG_ON(!buf->page);
		BUG_ON(buf->ptr);

		dev_dbg(dev,
			"%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
			__func__, page_address(buf->page),
			page_to_dma(dev, buf->page),
			buf->safe, buf->safe_dma_addr);

		DO_STATS(dev->archdata.dmabounce->bounce_count++);
		if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
			void *ptr;
			unsigned long phys;
			ptr = kmap_atomic(buf->page, KM_BOUNCE_READ) + buf->offset;
			phys = page_to_phys(buf->page) + buf->offset;
			memcpy(ptr, buf->safe, size);
			dmac_clean_range(ptr, ptr + size);
			kunmap_atomic(ptr - buf->offset, KM_BOUNCE_READ);
			outer_clean_range(phys, phys + size);
		}
		free_safe_buffer(dev->archdata.dmabounce, buf);
	} else {
		__dma_page_dev_to_cpu(dma_to_page(dev, dma_addr),
			      dma_addr & ~PAGE_MASK, size, dir);
	}
}
static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
		size_t size, enum dma_data_direction dir)
{
	struct safe_buffer *buf = find_safe_buffer_dev(dev, dma_addr, "unmap");

	if (buf) {
		BUG_ON(buf->size != size);
		BUG_ON(buf->direction != dir);

		dev_dbg(dev,
			"%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
			__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
			buf->safe, buf->safe_dma_addr);

		DO_STATS(dev->archdata.dmabounce->bounce_count++);

		if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
			void *ptr = buf->ptr;

			dev_dbg(dev,
				"%s: copy back safe %p to unsafe %p size %d\n",
				__func__, buf->safe, ptr, size);
			memcpy(ptr, buf->safe, size);

			
			dmac_clean_range(ptr, ptr + size);
			outer_clean_range(__pa(ptr), __pa(ptr) + size);
		}
		free_safe_buffer(dev->archdata.dmabounce, buf);
	}
}
Exemplo n.º 6
0
/*
 *  ======== MEM_FlushCache ========
 *  Purpose:
 *      Flush cache
 */
void MEM_FlushCache(void *pMemBuf, u32 cBytes, s32 FlushType)
{
	if (cRefs <= 0 || !pMemBuf)
		goto func_end;
	switch (FlushType) {
	/* invalidate only */
	case PROC_INVALIDATE_MEM:
		dmac_inv_range(pMemBuf, pMemBuf + cBytes);
		outer_inv_range(__pa((u32)pMemBuf), __pa((u32)pMemBuf +
				cBytes));
	break;
	/* writeback only */
	case PROC_WRITEBACK_MEM:
		dmac_clean_range(pMemBuf, pMemBuf + cBytes);
		outer_clean_range(__pa((u32)pMemBuf), __pa((u32)pMemBuf +
				  cBytes));
	break;
	/* writeback and invalidate */
	case PROC_WRITEBACK_INVALIDATE_MEM:
		dmac_flush_range(pMemBuf, pMemBuf + cBytes);
		outer_flush_range(__pa((u32)pMemBuf), __pa((u32)pMemBuf +
				  cBytes));
	break;
	default:
		GT_1trace(MEM_debugMask, GT_6CLASS, "MEM_FlushCache: invalid "
			  "FlushMemType 0x%x\n", FlushType);
	break;
	}
func_end:
	return;
}
void mfc_write_shared_mem(unsigned int host_wr_addr, MFC_SHARED_MEM *shared_mem)
{
//	printk("mfc_read_shared_mem() In: MFC Chip Type (0x%08x)\n", mfc_read_shared_mem_item(host_wr_addr, P720_LIMIT_ENABLE));

	//mfc_write_shared_mem_item(host_wr_addr, shared_mem->num_dpb);
	//mfc_write_shared_mem_item(host_wr_addr, allocated_dpb_size);
	mfc_write_shared_mem_item(host_wr_addr, RC_CONTROL_ENABLE				, 1);		// RC_CONTROL_CONFIG : enable (1), disable(0)
	
	mfc_write_shared_mem_item(host_wr_addr, SET_FRAME_TAG					, shared_mem->set_frame_tag);
	mfc_write_shared_mem_item(host_wr_addr, START_BYTE_NUM					, shared_mem->start_byte_num);
	mfc_write_shared_mem_item(host_wr_addr, EXT_ENC_CONTROL				, shared_mem->ext_enc_control);
	mfc_write_shared_mem_item(host_wr_addr, ENC_PARAM_CHANGE			, shared_mem->enc_param_change);
	mfc_write_shared_mem_item(host_wr_addr, VOP_TIMING							, shared_mem->vop_timing);
	mfc_write_shared_mem_item(host_wr_addr, HEC_PERIOD							, shared_mem->hec_period);
	mfc_write_shared_mem_item(host_wr_addr, P_B_FRAME_QP						, shared_mem->P_B_frame_qp);
	mfc_write_shared_mem_item(host_wr_addr, METADATA_ENABLE				, shared_mem->metadata_enable);
	mfc_write_shared_mem_item(host_wr_addr, EXT_METADATA_START_ADDR	, shared_mem->ext_metadata_start_addr);
	mfc_write_shared_mem_item(host_wr_addr, PUT_EXTRADATA					, shared_mem->put_extradata);
	mfc_write_shared_mem_item(host_wr_addr, DBG_INFO_INPUT0					, shared_mem->dbg_info_input0);
	mfc_write_shared_mem_item(host_wr_addr, DBG_INFO_INPUT1					, shared_mem->dbg_info_input1);
	mfc_write_shared_mem_item(host_wr_addr, ALLOCATED_LUMA_DPB_SIZE		, shared_mem->allocated_luma_dpb_size);
	mfc_write_shared_mem_item(host_wr_addr, ALLOCATED_CHROMA_DPB_SIZE	, shared_mem->allocated_chroma_dpb_size);
	mfc_write_shared_mem_item(host_wr_addr, ALLOCATED_MV_SIZE				, shared_mem->allocated_mv_size);
	mfc_write_shared_mem_item(host_wr_addr, P720_LIMIT_ENABLE				, shared_mem->p720_limit_enable);

	dmac_clean_range((void *)host_wr_addr, (void *)(host_wr_addr + SHARED_MEM_MAX));

#if	DEBUG_ENABLE	
	mfc_print_shared_mem(host_wr_addr);
#endif
}
int ion_cma_cache_ops(struct ion_heap *heap,
			struct ion_buffer *buffer, void *vaddr,
			unsigned int offset, unsigned int length,
			unsigned int cmd)
{
	void (*outer_cache_op)(phys_addr_t, phys_addr_t);

	switch (cmd) {
	case ION_IOC_CLEAN_CACHES:
		dmac_clean_range(vaddr, vaddr + length);
		outer_cache_op = outer_clean_range;
		break;
	case ION_IOC_INV_CACHES:
		dmac_inv_range(vaddr, vaddr + length);
		outer_cache_op = outer_inv_range;
		break;
	case ION_IOC_CLEAN_INV_CACHES:
		dmac_flush_range(vaddr, vaddr + length);
		outer_cache_op = outer_flush_range;
		break;
	default:
		return -EINVAL;
	}

	if (cma_heap_has_outer_cache) {
		struct ion_cma_buffer_info *info = buffer->priv_virt;

		outer_cache_op(info->handle, info->handle + length);
	}

	return 0;
}
Exemplo n.º 9
0
int ion_cp_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer,
			void *vaddr, unsigned int offset, unsigned int length,
			unsigned int cmd)
{
	void (*outer_cache_op)(phys_addr_t, phys_addr_t) = NULL;
	struct ion_cp_heap *cp_heap =
		container_of(heap, struct  ion_cp_heap, heap);
	unsigned int size_to_vmap, total_size;
	int i, j;
	void *ptr = NULL;
	ion_phys_addr_t buff_phys = buffer->priv_phys;

	if (!vaddr) {
		/*
		 * Split the vmalloc space into smaller regions in
		 * order to clean and/or invalidate the cache.
		 */
		size_to_vmap = (VMALLOC_END - VMALLOC_START)/8;
		total_size = buffer->size;
		for (i = 0; i < total_size; i += size_to_vmap) {
			size_to_vmap = min(size_to_vmap, total_size - i);
			for (j = 0; j < 10 && size_to_vmap; ++j) {
				ptr = ioremap(buff_phys, size_to_vmap);
				if (ptr) {
					switch (cmd) {
					case ION_IOC_CLEAN_CACHES:
						dmac_clean_range(ptr,
							ptr + size_to_vmap);
						outer_cache_op =
							outer_clean_range;
						break;
					case ION_IOC_INV_CACHES:
						dmac_inv_range(ptr,
							ptr + size_to_vmap);
						outer_cache_op =
							outer_inv_range;
						break;
					case ION_IOC_CLEAN_INV_CACHES:
						dmac_flush_range(ptr,
							ptr + size_to_vmap);
						outer_cache_op =
							outer_flush_range;
						break;
					default:
						return -EINVAL;
					}
					buff_phys += size_to_vmap;
					break;
				} else {
					size_to_vmap >>= 1;
				}
			}
			if (!ptr) {
				pr_err("Couldn't io-remap the memory\n");
				return -EINVAL;
			}
			iounmap(ptr);
		}
	} else {
Exemplo n.º 10
0
/* Function to write back the Cache module */
Void Cache_wb(Ptr blockPtr, UInt32 byteCnt, Bits16 type, Bool wait) {
    GT_4trace (curTrace, GT_ENTER, "Cache_wb", blockPtr, byteCnt, type, wait);

#ifdef USE_CACHE_VOID_ARG
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34))
    dmac_map_area(blockPtr, (size_t)byteCnt, DMA_TO_DEVICE);
    outer_clean_range(__pa((UInt32)blockPtr),
                      __pa((UInt32)(blockPtr+byteCnt)) );
#else
    dmac_clean_range(blockPtr, (blockPtr+byteCnt) );
#endif
#else
    dmac_clean_range( (UInt32)blockPtr, (UInt32)(blockPtr + byteCnt) );
#endif

    GT_0trace (curTrace, GT_LEAVE, "Cache_wb");
}
Exemplo n.º 11
0
/* Executed by primary CPU, brings other CPUs out of reset. Called at boot
   as well as when a CPU is coming out of shutdown induced by echo 0 >
   /sys/devices/.../cpuX.
*/
int boot_secondary(unsigned int cpu, struct task_struct *idle)
{
	int cnt = 0;
	printk(KERN_DEBUG "Starting secondary CPU %d\n", cpu);

	/* Tell other CPUs to come out or reset.  Note that secondary CPUs
	 * are probably running with caches off, so we'll need to clean to
	 * memory. Normal cache ops will only clean to L2.
	 */
	pen_release = cpu;
	dmac_clean_range((void *)&pen_release,
			 (void *)(&pen_release + sizeof(pen_release)));
	dmac_clean_range((void *)&secondary_data,
			 (void *)(&secondary_data + sizeof(secondary_data)));
	sev();
	dsb();

	/* Use smp_cross_call() to send a soft interrupt to wake up
	 * the other core.
	 */
	smp_cross_call(cpumask_of(cpu));

	/* Wait for done signal. The cpu receiving the signal does not
	 * have the MMU or caching turned on, so all of its reads and
	 * writes are to/from memory.  Need to ensure that when
	 * reading the value we invalidate the cache line so we see the
	 * fresh data from memory as the normal routines may only
	 * invalidate to POU or L1.
	 */
	while (pen_release != 0xFFFFFFFF) {
		dmac_inv_range((void *)&pen_release,
			       (void *)(&pen_release+sizeof(pen_release)));
		msleep_interruptible(1);
		if (cnt++ >= SECONDARY_CPU_WAIT_MS)
			break;
	}

	if (pen_release == 0xFFFFFFFF)
		printk(KERN_DEBUG "Secondary CPU start acked %d\n", cpu);
	else
		printk(KERN_ERR "Secondary CPU failed to start..." \
		       "continuing\n");

	return 0;
}
Exemplo n.º 12
0
static long kgsl_cache_range_op(unsigned long addr, int size,
					unsigned int flags)
{
#ifdef CONFIG_OUTER_CACHE
	unsigned long end;
#endif
	BUG_ON(addr & (KGSL_PAGESIZE - 1));
	BUG_ON(size & (KGSL_PAGESIZE - 1));

	if (flags & KGSL_CACHE_FLUSH)
		dmac_flush_range((const void *)addr,
				(const void *)(addr + size));
	else
		if (flags & KGSL_CACHE_CLEAN)
			dmac_clean_range((const void *)addr,
					(const void *)(addr + size));
		else
			dmac_inv_range((const void *)addr,
					(const void *)(addr + size));

#ifdef CONFIG_OUTER_CACHE
	for (end = addr; end < (addr + size); end += KGSL_PAGESIZE) {
		pte_t *pte_ptr, pte;
		unsigned long physaddr;
		if (flags & KGSL_CACHE_VMALLOC_ADDR)
			physaddr = vmalloc_to_pfn((void *)end);
		else
			if (flags & KGSL_CACHE_USER_ADDR) {
				pte_ptr = kgsl_get_pte_from_vaddr(end);
				if (!pte_ptr)
					return -EINVAL;
				pte = *pte_ptr;
				physaddr = pte_pfn(pte);
				pte_unmap(pte_ptr);
			} else
				return -EINVAL;

		physaddr <<= PAGE_SHIFT;
		if (flags & KGSL_CACHE_FLUSH)
			outer_flush_range(physaddr, physaddr + KGSL_PAGESIZE);
		else
			if (flags & KGSL_CACHE_CLEAN)
				outer_clean_range(physaddr,
					physaddr + KGSL_PAGESIZE);
			else
				outer_inv_range(physaddr,
					physaddr + KGSL_PAGESIZE);
	}
#endif
	return 0;
}
Exemplo n.º 13
0
static long kgsl_cache_range_op(unsigned long addr, int size,
					unsigned int flags)
{
#ifdef CONFIG_OUTER_CACHE
	unsigned long end;
#endif
	BUG_ON(addr & (KGSL_PAGESIZE - 1));
	BUG_ON(size & (KGSL_PAGESIZE - 1));

	if (flags & KGSL_MEMFLAGS_CACHE_FLUSH)
		dmac_flush_range((const void *)addr,
				(const void *)(addr + size));
	else
		if (flags & KGSL_MEMFLAGS_CACHE_CLEAN)
			dmac_clean_range((const void *)addr,
					(const void *)(addr + size));
		else if (flags & KGSL_MEMFLAGS_CACHE_INV)
			dmac_inv_range((const void *)addr,
					(const void *)(addr + size));

#ifdef CONFIG_OUTER_CACHE
	for (end = addr; end < (addr + size); end += KGSL_PAGESIZE) {
		unsigned long physaddr;
		if (flags & KGSL_MEMFLAGS_VMALLOC_MEM)
			physaddr = page_to_phys(vmalloc_to_page((void *) end));
		else
			if (flags & KGSL_MEMFLAGS_HOSTADDR) {
				physaddr = kgsl_virtaddr_to_physaddr(end);
				if (!physaddr) {
					KGSL_MEM_ERR
					("Unable to find physaddr for "
					"address: %x\n", (unsigned int)end);
					return -EINVAL;
				}
			} else
				return -EINVAL;

		if (flags & KGSL_MEMFLAGS_CACHE_FLUSH)
			outer_flush_range(physaddr, physaddr + KGSL_PAGESIZE);
		else
			if (flags & KGSL_MEMFLAGS_CACHE_CLEAN)
				outer_clean_range(physaddr,
					physaddr + KGSL_PAGESIZE);
			else if (flags & KGSL_MEMFLAGS_CACHE_INV)
				outer_inv_range(physaddr,
					physaddr + KGSL_PAGESIZE);
	}
#endif
	return 0;
}
Exemplo n.º 14
0
static inline void
unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
		enum dma_data_direction dir)
{
	struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
	struct safe_buffer *buf = NULL;

	/*
	 * Trying to unmap an invalid mapping
	 */
	if (dma_mapping_error(dma_addr)) {
		dev_err(dev, "Trying to unmap invalid mapping\n");
		return;
	}

	if (device_info)
		buf = find_safe_buffer(device_info, dma_addr);

	if (buf) {
		BUG_ON(buf->size != size);

		dev_dbg(dev,
			"%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n",
			__func__, buf->ptr, (void *) virt_to_dma(dev, buf->ptr),
			buf->safe, (void *) buf->safe_dma_addr);

		DO_STATS ( device_info->bounce_count++ );

		if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
			void *ptr = buf->ptr;

			dev_dbg(dev,
				"%s: copy back safe %p to unsafe %p size %d\n",
				__func__, buf->safe, ptr, size);
			memcpy(ptr, buf->safe, size);

			/*
			 * DMA buffers must have the same cache properties
			 * as if they were really used for DMA - which means
			 * data must be written back to RAM.  Note that
			 * we don't use dmac_flush_range() here for the
			 * bidirectional case because we know the cache
			 * lines will be coherent with the data written.
			 */
			dmac_clean_range(ptr, ptr + size);
			outer_clean_range(__pa(ptr), __pa(ptr) + size);
		}
		free_safe_buffer(device_info, buf);
	}
}
Exemplo n.º 15
0
static void msm_pm_write_boot_vector(unsigned int cpu, unsigned long address)
{
	uint32_t clust_id = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1);
	uint32_t cpu_id = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 0);
	unsigned long *start_address;
	unsigned long *end_address;

	if (clust_id >= MAX_NUM_CLUSTER || cpu_id >= MAX_CPUS_PER_CLUSTER)
		BUG();

	msm_pm_boot_vector[CPU_INDEX(clust_id, cpu_id)] = address;
	start_address = &msm_pm_boot_vector[CPU_INDEX(clust_id, cpu_id)];
	end_address = &msm_pm_boot_vector[CPU_INDEX(clust_id, cpu_id + 1)];
	dmac_clean_range((void *)start_address, (void *)end_address);
}
Exemplo n.º 16
0
int ion_cma_cache_ops(struct ion_heap *heap,
			struct ion_buffer *buffer, void *vaddr,
			unsigned int offset, unsigned int length,
			unsigned int cmd)
{
	void (*outer_cache_op)(phys_addr_t, phys_addr_t);

	switch (cmd) {
	case ION_IOC_CLEAN_CACHES:
		if (!vaddr)
			dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
				buffer->sg_table->nents, DMA_TO_DEVICE);
		else
			dmac_clean_range(vaddr, vaddr + length);
		outer_cache_op = outer_clean_range;
		break;
	case ION_IOC_INV_CACHES:
		if (!vaddr)
			dma_sync_sg_for_cpu(NULL, buffer->sg_table->sgl,
				buffer->sg_table->nents, DMA_FROM_DEVICE);
		else
			dmac_inv_range(vaddr, vaddr + length);
		outer_cache_op = outer_inv_range;
		break;
	case ION_IOC_CLEAN_INV_CACHES:
		if (!vaddr) {
			dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
				buffer->sg_table->nents, DMA_TO_DEVICE);
			dma_sync_sg_for_cpu(NULL, buffer->sg_table->sgl,
				buffer->sg_table->nents, DMA_FROM_DEVICE);
		} else {
			dmac_flush_range(vaddr, vaddr + length);
		}
		outer_cache_op = outer_flush_range;
		break;
	default:
		return -EINVAL;
	}

	if (cma_heap_has_outer_cache) {
		struct ion_cma_buffer_info *info = buffer->priv_virt;

		outer_cache_op(info->handle, info->handle + length);
	}

	return 0;
}
Exemplo n.º 17
0
static int g2d_ioctl(struct inode *inode, struct file *file,
		     unsigned int cmd, unsigned long arg)
{
	struct g2d_dma_info dma_info;
	void *vaddr;

	if (cmd == G2D_WAIT_FOR_IRQ) {
		wait_event_timeout(g2d->wq,
				(atomic_read(&g2d->in_use) == 1), 10000);
		atomic_set(&g2d->in_use, 0);
		return 0;
	}

	if (copy_from_user(&dma_info, (struct g2d_dma_info *)arg,
				sizeof(dma_info)))
		return -EFAULT;

	vaddr = phys_to_virt(dma_info.addr);

	switch (cmd) {
	case G2D_DMA_CACHE_INVAL:
		dmac_inv_range(vaddr, vaddr + dma_info.size);
		break;

	case G2D_DMA_CACHE_CLEAN:
		dmac_clean_range(vaddr, vaddr + dma_info.size);
		break;

	case G2D_DMA_CACHE_FLUSH:
		dmac_flush_range(vaddr, vaddr + dma_info.size);
		break;

	case G2D_DMA_CACHE_FLUSH_ALL:
		__cpuc_flush_kern_all();
		break;

	default:
		break;
	}

	return 0;
}
Exemplo n.º 18
0
static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
		size_t size, enum dma_data_direction dir)
{
	struct safe_buffer *buf = find_safe_buffer_dev(dev, dma_addr, "unmap");

	if (buf) {
		BUG_ON(buf->size != size);
		BUG_ON(buf->direction != dir);
		BUG_ON(buf->page);
		BUG_ON(!buf->ptr);

		dev_dbg(dev,
			"%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
			__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
			buf->safe, buf->safe_dma_addr);

		DO_STATS(dev->archdata.dmabounce->bounce_count++);

		if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
			void *ptr = buf->ptr;

			dev_dbg(dev,
				"%s: copy back safe %p to unsafe %p size %d\n",
				__func__, buf->safe, ptr, size);
			memcpy(ptr, buf->safe, size);

			/*
			 * DMA buffers must have the same cache properties
			 * as if they were really used for DMA - which means
			 * data must be written back to RAM.  Note that
			 * we don't use dmac_flush_range() here for the
			 * bidirectional case because we know the cache
			 * lines will be coherent with the data written.
			 */
			dmac_clean_range(ptr, ptr + size);
			outer_clean_range(__pa(ptr), __pa(ptr) + size);
		}
		free_safe_buffer(dev->archdata.dmabounce, buf);
	} else {
		__dma_single_dev_to_cpu(dma_to_virt(dev, dma_addr), size, dir);
	}
}
int ion_system_contig_heap_cache_ops(struct ion_heap *heap,
			struct ion_buffer *buffer, void *vaddr,
			unsigned int offset, unsigned int length,
			unsigned int cmd)
{
	void (*outer_cache_op)(phys_addr_t, phys_addr_t);

	switch (cmd) {
	case ION_IOC_CLEAN_CACHES:
		dmac_clean_range(vaddr, vaddr + length);
		outer_cache_op = outer_clean_range;
		break;
	case ION_IOC_INV_CACHES:
		dmac_inv_range(vaddr, vaddr + length);
		outer_cache_op = outer_inv_range;
		break;
	case ION_IOC_CLEAN_INV_CACHES:
		dmac_flush_range(vaddr, vaddr + length);
		outer_cache_op = outer_flush_range;
		break;
	default:
		return -EINVAL;
	}

	if (system_heap_contig_has_outer_cache) {
		unsigned long pstart;

		pstart = virt_to_phys(buffer->priv_virt) + offset;
		if (!pstart) {
			WARN(1, "Could not do virt to phys translation on %p\n",
				buffer->priv_virt);
			return -EINVAL;
		}

		outer_cache_op(pstart, pstart + PAGE_SIZE);
	}

	return 0;
}
Exemplo n.º 20
0
int ipp_blit(const struct rk29_ipp_req *req)
{
	
	uint32_t rotate;
	uint32_t pre_scale	= 0;
	uint32_t post_scale = 0;
	uint32_t pre_scale_w, pre_scale_h;//pre_scale para
	uint32_t post_scale_w = 0x1000;
	uint32_t post_scale_h = 0x1000;
	uint32_t pre_scale_output_w=0, pre_scale_output_h=0;//pre_scale output with&height
	uint32_t post_scale_input_w, post_scale_input_h;//post_scale input width&height
	uint32_t dst0_YrgbMst=0,dst0_CbrMst=0;
	uint32_t ret = 0;
	uint32_t deinterlace_config = 0;
	uint32_t src0_w = req->src0.w;
	uint32_t src0_h = req->src0.h;
	
	//printk("ipp_blit\n");
	if (drvdata == NULL) {			/* [email protected] : check driver is normal or not */
		printk("%s drvdata is NULL, IPP driver probe is fail!!\n", __FUNCTION__);
		return -EPERM;
	}

	drvdata->ipp_result = -1;

	//When ipp_blit_async is called in kernel space req->complete should NOT be NULL, otherwise req->complete should be NULL
	if(req->complete)
	{
		drvdata->ipp_irq_callback = req->complete;
	}
	else
	{
		drvdata->ipp_irq_callback = ipp_blit_complete;
	}

	ret = ipp_check_param(req);
	if(ret ==  -EINVAL)
	{	
		printk("IPP invalid input!\n");
		goto erorr_input;
	}
	
	rotate = req->flag;
	switch (rotate) {
	case IPP_ROT_90:
		//for rotation 90 degree
		DBG("rotate %d, src0.fmt %d",rotate,req->src0.fmt);
		switch(req->src0.fmt)
		{
		case IPP_XRGB_8888:
			dst0_YrgbMst  =  req->dst0.YrgbMst + req->dst0.w*4;
			dst0_CbrMst  = req->dst0.CbrMst;
			break;

		case IPP_RGB_565:
			dst0_YrgbMst  =  req->dst0.YrgbMst +  req->dst0.w*2;
			dst0_CbrMst  = req->dst0.CbrMst;
			break;

		case IPP_Y_CBCR_H1V1:
			dst0_YrgbMst  =  req->dst0.YrgbMst + req->dst0.w;
			dst0_CbrMst   =  req->dst0.CbrMst + req->dst0.w*2;
			break;

		case IPP_Y_CBCR_H2V1:
			dst0_YrgbMst =	req->dst0.YrgbMst + req->dst0.w;
			dst0_CbrMst  =	req->dst0.CbrMst + req->dst0.w*2;
			break;

		case IPP_Y_CBCR_H2V2:
			dst0_YrgbMst = req->dst0.YrgbMst+ req->dst0.w;
			dst0_CbrMst  = req->dst0.CbrMst + req->dst0.w;
			break;

		default:

			break;
		}
	break;

	case IPP_ROT_180:
		//for rotation 180 degree
		DBG("rotate %d, src0.fmt %d",rotate,req->src0.fmt);
		switch(req->src0.fmt)
		{
		case IPP_XRGB_8888:
			dst0_YrgbMst =	req->dst0.YrgbMst+(req->dst0.h-1)*req->dst_vir_w*4+req->dst0.w*4;
			dst0_CbrMst  = req->dst0.CbrMst;
			break;

		case IPP_RGB_565:
			dst0_YrgbMst =	req->dst0.YrgbMst+(req->dst0.h-1)*req->dst_vir_w*2+req->dst0.w*2;
			dst0_CbrMst  = req->dst0.CbrMst;
			break;

		case IPP_Y_CBCR_H1V1:
			dst0_YrgbMst =	req->dst0.YrgbMst+(req->dst0.h-1)*req->dst_vir_w+req->dst0.w;
			dst0_CbrMst  =	req->dst0.CbrMst+(req->dst0.h-1)*req->dst_vir_w*2+req->dst0.w*2;
			break;

		case IPP_Y_CBCR_H2V1:
			dst0_YrgbMst =	req->dst0.YrgbMst+(req->dst0.h-1)*req->dst_vir_w+req->dst0.w;
			dst0_CbrMst  =	req->dst0.CbrMst+(req->dst0.h-1)*req->dst_vir_w+req->dst0.w;
			break;

		case IPP_Y_CBCR_H2V2:
			dst0_YrgbMst =	req->dst0.YrgbMst+(req->dst0.h-1)*req->dst_vir_w+req->dst0.w;
			dst0_CbrMst  =	req->dst0.CbrMst+((req->dst0.h/2)-1)*req->dst_vir_w+req->dst0.w;
			break;

		default:
			break;
		}
			break;

	case IPP_ROT_270:
		DBG("rotate %d, src0.fmt %d \n",rotate,req->src0.fmt);
		switch(req->src0.fmt)
		{
		case IPP_XRGB_8888:
			dst0_YrgbMst =	req->dst0.YrgbMst+(req->dst0.h-1)*req->dst_vir_w*4;
			dst0_CbrMst  = req->dst0.CbrMst;
			break;

		case IPP_RGB_565:
			dst0_YrgbMst =	req->dst0.YrgbMst +(req->dst0.h-1)*req->dst_vir_w*2;
			dst0_CbrMst  = req->dst0.CbrMst;
			break;

		case IPP_Y_CBCR_H1V1:
			dst0_YrgbMst =	req->dst0.YrgbMst+(req->dst0.h-1)*req->dst_vir_w;
			dst0_CbrMst  =	req->dst0.CbrMst+(req->dst0.h-1)*req->dst_vir_w*2;
			break;

		case IPP_Y_CBCR_H2V1:
			dst0_YrgbMst =	req->dst0.YrgbMst+(req->dst0.h-1)*req->dst_vir_w;
			dst0_CbrMst  =	req->dst0.CbrMst+(req->dst0.h-1)*req->dst_vir_w*2;
			break;

		case IPP_Y_CBCR_H2V2:
			dst0_YrgbMst =	req->dst0.YrgbMst+(req->dst0.h-1)*req->dst_vir_w;
			dst0_CbrMst  =	req->dst0.CbrMst+((req->dst0.h/2)-1)*req->dst_vir_w;
			break;

		default:
			break;
		}
		break;

	case IPP_ROT_X_FLIP:
		DBG("rotate %d, src0.fmt %d",rotate,req->src0.fmt);
		switch(req->src0.fmt)
		{
		case IPP_XRGB_8888:
			dst0_YrgbMst =	req->dst0.YrgbMst+req->dst0.w*4;
			dst0_CbrMst  = req->dst0.CbrMst;
			break;

		case IPP_RGB_565:
			dst0_YrgbMst =	req->dst0.YrgbMst+req->dst0.w*2;
			dst0_CbrMst  = req->dst0.CbrMst;
			break;

		case IPP_Y_CBCR_H1V1:
			dst0_YrgbMst =	req->dst0.YrgbMst+req->dst0.w;
			dst0_CbrMst  =	req->dst0.CbrMst+req->dst0.w*2;
			break;

		case IPP_Y_CBCR_H2V1:
			dst0_YrgbMst =	req->dst0.YrgbMst+req->dst0.w;
			dst0_CbrMst  =	req->dst0.CbrMst+req->dst0.w;
			break;

		case IPP_Y_CBCR_H2V2:
			dst0_YrgbMst =	req->dst0.YrgbMst+req->dst0.w;
			dst0_CbrMst  =	req->dst0.CbrMst+req->dst0.w;
			break;

		default:
			break;
		}

	break;

	case IPP_ROT_Y_FLIP:
		DBG("rotate %d, src0.fmt %d",rotate,req->src0.fmt);
		switch(req->src0.fmt)
		{
		case IPP_XRGB_8888:
			dst0_YrgbMst =	req->dst0.YrgbMst+(req->dst0.h-1)*req->dst_vir_w*4;
			dst0_CbrMst  = req->dst0.CbrMst;
			break;

		case IPP_RGB_565:
			dst0_YrgbMst =	req->dst0.YrgbMst+(req->dst0.h-1)*req->dst_vir_w*2;
			dst0_CbrMst  = req->dst0.CbrMst;
			break;

		case IPP_Y_CBCR_H1V1:
			dst0_YrgbMst =	req->dst0.YrgbMst+(req->dst0.h-1)*req->dst_vir_w;
			dst0_CbrMst = req->dst0.CbrMst+(req->dst0.h-1)*req->dst_vir_w*2;
			break;

		case IPP_Y_CBCR_H2V1:
			dst0_YrgbMst =	req->dst0.YrgbMst+(req->dst0.h-1)*req->dst_vir_w;
			dst0_CbrMst = req->dst0.CbrMst+(req->dst0.h-1)*req->dst_vir_w;
			break;

		case IPP_Y_CBCR_H2V2:
			dst0_YrgbMst =	req->dst0.YrgbMst+(req->dst0.h-1)*req->dst_vir_w;
			dst0_CbrMst = req->dst0.CbrMst+((req->dst0.h/2)-1)*req->dst_vir_w;
			break;

	default:
		break;
	}

	break;
	case IPP_ROT_0:
		//for  0 degree
		DBG("rotate %d, src0.fmt %d",rotate,req->src0.fmt);
		dst0_YrgbMst =	req->dst0.YrgbMst;
		dst0_CbrMst  = req->dst0.CbrMst;
	break;

	default:
		ERR("ipp is not surpport degree!!\n" );
		break;
	}
	
	//we start to interact with hw now
	wq_condition = 0;

	ipp_power_on();
	
	
	//check if IPP is idle
	if(((ipp_read(IPP_INT)>>6)&0x3) !=0)// idle
	{
		printk("IPP staus is not idle,can not set register\n");
		goto error_status;
	}


	/* Configure source image */
	DBG("src YrgbMst 0x%x , CbrMst0x%x,  %dx%d, fmt = %d\n", req->src0.YrgbMst,req->src0.CbrMst,
					req->src0.w, req->src0.h, req->src0.fmt);

	ipp_write(req->src0.YrgbMst, IPP_SRC0_Y_MST);
	if(IS_YCRCB(req->src0.fmt))
	{
		ipp_write(req->src0.CbrMst, IPP_SRC0_CBR_MST);
	}
	//ipp_write(req->src0.h<<16|req->src0.w, IPP_SRC_IMG_INFO);
	ipp_write((ipp_read(IPP_CONFIG)&(~0x7))|req->src0.fmt, IPP_CONFIG);

	/* Configure destination image */
	DBG("dst YrgbMst 0x%x , CbrMst0x%x, %dx%d\n", dst0_YrgbMst,dst0_CbrMst,
					req->dst0.w, req->dst0.h);

	ipp_write(dst0_YrgbMst, IPP_DST0_Y_MST);

	if(IS_YCRCB(req->src0.fmt))
	{
		ipp_write(dst0_CbrMst, IPP_DST0_CBR_MST);
	}

	ipp_write(req->dst0.h<<16|req->dst0.w, IPP_DST_IMG_INFO);

	/*Configure Pre_scale*/
	if((IPP_ROT_90 == rotate) || (IPP_ROT_270 == rotate))
	{
		pre_scale = ((req->dst0.w <= req->src0.h/2) ? 1 : 0)||((req->dst0.h <= req->src0.w/2) ? 1 : 0);
	}
	else //other degree
	{
		pre_scale = ((req->dst0.w <= req->src0.w/2) ? 1 : 0)||((req->dst0.h <= req->src0.h/2) ? 1 : 0);
	}

	if(pre_scale)
	{
		if(((IPP_ROT_90 == rotate) || (IPP_ROT_270 == rotate)))
		{
			if((req->src0.w>req->dst0.h))
			{
				pre_scale_w = (uint32_t)( req->src0.w/req->dst0.h);//floor
			}
			else
			{
			   pre_scale_w = 1;
			}
			if((req->src0.h>req->dst0.w))
			{
				pre_scale_h = (uint32_t)( req->src0.h/req->dst0.w);//floor
			}
			else
			{
				pre_scale_h = 1;
			}

			DBG("!!!!!pre_scale_h %d,pre_scale_w %d \n",pre_scale_h,pre_scale_w);
		}
		else//0 180 x ,y
		{
			if((req->src0.w>req->dst0.w))
			{
				pre_scale_w = (uint32_t)( req->src0.w/req->dst0.w);//floor
			}
			else
			{
			   pre_scale_w = 1;
			}

			if((req->src0.h>req->dst0.h))
			{
				pre_scale_h = (uint32_t)( req->src0.h/req->dst0.h);//floor
			}
			else
			{
				pre_scale_h = 1;
			}
			DBG("!!!!!pre_scale_h %d,pre_scale_w %d \n",pre_scale_h,pre_scale_w);
		}

		//pre_scale only support 1/2 to 1/8 time
		if(pre_scale_w > 8)
		{
			if(pre_scale_w < 16)
			{
				pre_scale_w = 8;
			}
			else
			{
				printk("invalid pre_scale operation! Down scaling ratio should not be more than 16!\n");
				goto error_scale;
			}
		}
		if(pre_scale_h > 8)
		{
			if(pre_scale_h < 16)
			{
				pre_scale_h = 8;
			}
			else
			{
				printk("invalid pre_scale operation! Down scaling ratio should not be more than 16!\n");
				goto error_scale;
			}
		}
			
		if((req->src0.w%pre_scale_w)!=0) //ceil
		{
			pre_scale_output_w = req->src0.w/pre_scale_w+1;
		}
		else
		{
			pre_scale_output_w = req->src0.w/pre_scale_w;
		}

		if((req->src0.h%pre_scale_h)!=0)//ceil
		{
			pre_scale_output_h	= req->src0.h/pre_scale_h +1;
		}
		else
		{
			pre_scale_output_h = req->src0.h/pre_scale_h;
		}

		//when fmt is YUV,make sure pre_scale_output_w and pre_scale_output_h are even
		if(IS_YCRCB(req->src0.fmt))
		{
			if((pre_scale_output_w & 0x1) != 0)
			{
				pre_scale_output_w -=1;
				src0_w = pre_scale_output_w * pre_scale_w;
			}
			if((pre_scale_output_h & 0x1) != 0)
			{
				pre_scale_output_h -=1;
				src0_h = pre_scale_output_h * pre_scale_h;
			}
		}
			
		ipp_write((ipp_read(IPP_CONFIG)&0xffffffef)|PRE_SCALE, IPP_CONFIG); 		//enable pre_scale
		ipp_write((pre_scale_h-1)<<3|(pre_scale_w-1),IPP_PRE_SCL_PARA);
		ipp_write(((pre_scale_output_h)<<16)|(pre_scale_output_w), IPP_PRE_IMG_INFO);

	}
	else//no pre_scale
	{
		ipp_write(ipp_read(IPP_CONFIG)&(~PRE_SCALE), IPP_CONFIG); //disable pre_scale
		ipp_write(0,IPP_PRE_SCL_PARA);
		ipp_write((req->src0.h<<16)|req->src0.w, IPP_PRE_IMG_INFO);
	}

	ipp_write(src0_h<<16|src0_w, IPP_SRC_IMG_INFO);
	
	/*Configure Post_scale*/
	if((IPP_ROT_90 == rotate) || (IPP_ROT_270 == rotate))
	{
		if (( (req->src0.h%req->dst0.w)!=0)||( (req->src0.w%req->dst0.h)!= 0)//non-interger down-scaling 
			||((req->src0.h/req->dst0.w)>8)||((req->src0.h/req->dst0.w)>8)	 //down-scaling ratio > 8
			||(req->dst0.w > req->src0.h)  ||(req->dst0.h > req->src0.w))	 //up-scaling
							
		{
			post_scale = 1;
		}
		else
		{
			post_scale = 0;
		}
	}
	else  //0 180 x-flip y-flip
	{
		if (( (req->src0.w%req->dst0.w)!=0)||( (req->src0.h%req->dst0.h)!= 0)//non-interger down-scaling 
			||((req->src0.w/req->dst0.w)>8)||((req->src0.h/req->dst0.h)>8)	 //down-scaling ratio > 8
			||(req->dst0.w > req->src0.w)  ||(req->dst0.h > req->src0.h))	 //up-scaling
		{
			post_scale = 1;
		}
		else
		{
			post_scale = 0;
		}
	}

	if(post_scale)
	{
		if(pre_scale)
		{
			post_scale_input_w = pre_scale_output_w;
			post_scale_input_h = pre_scale_output_h;
		}
		else
		{
			post_scale_input_w = req->src0.w;
			post_scale_input_h = req->src0.h;
		}
			
		if((IPP_ROT_90 == rotate) || (IPP_ROT_270 == rotate))
		{
			DBG("post_scale_input_w %d ,post_scale_input_h %d !!!\n",post_scale_input_w,post_scale_input_h);

			switch(req->src0.fmt)
			{
			case IPP_XRGB_8888:
			case IPP_RGB_565:
			case IPP_Y_CBCR_H1V1:
				//In horiaontial scale case, the factor must be minus 1 if the result of the factor is integer
				 if(((4096*(post_scale_input_w-1))%(req->dst0.h-1))==0)
				 {
					post_scale_w = (uint32_t)(4096*(post_scale_input_w-1)/(req->dst0.h-1))-1;
				 }
				 else
				 {
					 post_scale_w = (uint32_t)(4096*(post_scale_input_w-1)/(req->dst0.h-1));
				 }
				 break;

			case IPP_Y_CBCR_H2V1:
			case IPP_Y_CBCR_H2V2:
				//In horiaontial scale case, the factor must be minus 1 if the result of the factor is integer
				 if(((4096*(post_scale_input_w/2-1))%(req->dst0.h/2-1))==0)
				 {
					post_scale_w = (uint32_t)(4096*(post_scale_input_w/2-1)/(req->dst0.h/2-1))-1;
				 }
				 else
				 {
					 post_scale_w = (uint32_t)(4096*(post_scale_input_w/2-1)/(req->dst0.h/2-1));
				 }
				 break;

			default:
				break;
			}
			post_scale_h = (uint32_t)(4096*(post_scale_input_h -1)/(req->dst0.w-1));

			DBG("1111 post_scale_w %x,post_scale_h %x!!! \n",post_scale_w,post_scale_h);
		}
		else// 0 180 x-flip y-flip
		{
			switch(req->src0.fmt)
			{
			case IPP_XRGB_8888:
			case IPP_RGB_565:
			case IPP_Y_CBCR_H1V1:
				//In horiaontial scale case, the factor must be minus 1 if the result of the factor is integer
				 if(((4096*(post_scale_input_w-1))%(req->dst0.w-1))==0)
				 {
					post_scale_w = (uint32_t)(4096*(post_scale_input_w-1)/(req->dst0.w-1))-1;
				 }
				 else
				 {
					 post_scale_w = (uint32_t)(4096*(post_scale_input_w-1)/(req->dst0.w-1));
				 }
				 break;

			case IPP_Y_CBCR_H2V1:
			case IPP_Y_CBCR_H2V2:
				////In horiaontial scale case, the factor must be minus 1 if the result of the factor is integer
				 if(((4096*(post_scale_input_w/2-1))%(req->dst0.w/2-1))==0)
				 {
					post_scale_w = (uint32_t)(4096*(post_scale_input_w/2-1)/(req->dst0.w/2-1))-1;
				 }
				 else
				 {
					 post_scale_w = (uint32_t)(4096*(post_scale_input_w/2-1)/(req->dst0.w/2-1));
				 }
				 break;

			default:
				break;
			}
			post_scale_h = (uint32_t)(4096*(post_scale_input_h -1)/(req->dst0.h-1));

		}

		/*only support 1/2 to 4 times scaling,but two cases can pass
		1.176*144->480*800, YUV420 
		2.128*128->480*800, YUV420
		*/
		if(!((req->src0.fmt == IPP_Y_CBCR_H2V2)&&
			(((req->src0.w == 176)&&(req->src0.h == 144))||((req->src0.w == 128)&&(req->src0.h == 128)))&&
			((req->dst0.w == 480)&&(req->dst0.h == 800))))	
		{	
			
			if(post_scale_w<0x3ff || post_scale_w>0x1fff || post_scale_h<0x400 || post_scale_h>0x2000 )
			{
				printk("invalid post_scale para!\n");
				goto error_scale;
			}
		}
		ipp_write((ipp_read(IPP_CONFIG)&0xfffffff7)|POST_SCALE, IPP_CONFIG); //enable post_scale
		ipp_write((post_scale_h<<16)|post_scale_w, IPP_POST_SCL_PARA);
	}
	else //no post_scale
	{
		DBG("no post_scale !!!!!! \n");
		ipp_write(ipp_read(IPP_CONFIG)&(~POST_SCALE), IPP_CONFIG); //disable post_scale
		ipp_write((post_scale_h<<16)|post_scale_w, IPP_POST_SCL_PARA);
	}

	/* Configure rotation */

	if(IPP_ROT_0 == req->flag)
	{
		ipp_write(ipp_read(IPP_CONFIG)&(~ROT_ENABLE), IPP_CONFIG);
	}
	else
	{
		ipp_write(ipp_read(IPP_CONFIG)|ROT_ENABLE, IPP_CONFIG);
		ipp_write((ipp_read(IPP_CONFIG)&0xffffff1f)|(rotate<<5), IPP_CONFIG);
	}

	/*Configure deinterlace*/
	if(req->deinterlace_enable == 1)
	{
		//only support YUV format
		if(IS_YCRCB(req->src0.fmt))
		{
			//If pre_scale is enable, Deinterlace is done by scale filter
			if(!pre_scale)
			{
				//check the deinterlace parameters
				if((req->deinterlace_para0 < 32) && (req->deinterlace_para1 < 32) && (req->deinterlace_para2 < 32) 
					&& ((req->deinterlace_para0 + req->deinterlace_para1 + req->deinterlace_para2) == 32))
				{
					deinterlace_config = (req->deinterlace_enable<<24) | (req->deinterlace_para0<<19) | (req->deinterlace_para1<<14) | (req->deinterlace_para2<<9);
					DBG("para0 %d, para1 %d, para2 %d,deinterlace_config  %x\n",req->deinterlace_para0,req->deinterlace_para1,req->deinterlace_para2,deinterlace_config);
					#ifdef CONFIG_DEINTERLACE
						ipp_write((ipp_read(IPP_CONFIG)&0xFE0001FF)|deinterlace_config, IPP_CONFIG);
					#else
						printk("does not support deinterlacing!\n");
						ipp_write(ipp_read(IPP_CONFIG)&(~DEINTERLACE_ENABLE), IPP_CONFIG); //disable deinterlace
					#endif
				}
				else
				{
					ERR("invalid deinterlace parameters!\n");
				}
			}
		}
		else
		{
			ERR("only support YUV format!\n");
		}
	}
	else
	{
		ipp_write(ipp_read(IPP_CONFIG)&(~DEINTERLACE_ENABLE), IPP_CONFIG); //disable deinterlace
	}

	/*Configure other*/
	ipp_write((req->dst_vir_w<<16)|req->src_vir_w, IPP_IMG_VIR);

	//store clip mode 
	if(req->store_clip_mode == 1)
	{
		ipp_write(ipp_read(IPP_CONFIG)|STORE_CLIP_MODE, IPP_CONFIG);
	}
	else
	{
		ipp_write(ipp_read(IPP_CONFIG)&(~STORE_CLIP_MODE), IPP_CONFIG);
	}


	/* Start the operation */
	ipp_write(8, IPP_INT);		

	ipp_write(1, IPP_PROCESS_ST);

	dsb();
	dmac_clean_range(drvdata->ipp_base,drvdata->ipp_base+0x54);
#ifdef IPP_TEST
	hw_start = ktime_get(); 
#endif	

    goto error_noerror;


error_status:
error_scale:
	ret = -EINVAL;
	ipp_soft_reset();
	ipp_power_off(NULL);
erorr_input:
error_noerror:
	drvdata->ipp_result = ret;
	//printk("ipp_blit done\n");
	return ret;
}
Exemplo n.º 21
0
static void ipp_test_work_handler(struct work_struct *work)
{

		 struct rk29_ipp_req ipp_req;
         int i=0,j;
		 int ret = 0,ret1=0,ret2=0;

		 uint8_t *srcY ;
		 uint8_t *dstY;
		 uint8_t *srcUV ;
		 uint8_t *dstUV;
		  uint32_t src_addr;
		 uint32_t dst_addr;
		 uint8_t *p;
		 pm_message_t pm;
#if 1
//test WIMO bug
#define CAMERA_MEM 0x90400000

		src_addr = CAMERA_MEM;
		dst_addr = CAMERA_MEM + 1280*800*4;
		printk("src_addr: %x-%x, dst_addr: %x-%x\n",srcY,src_addr,dstY,dst_addr);

		ipp_req.src0.YrgbMst = src_addr;
		//ipp_req.src0.CbrMst = src_addr + 1280*800;
		ipp_req.src0.w = 1280;
		ipp_req.src0.h = 800;
		ipp_req.src0.fmt = IPP_XRGB_8888;
		
		ipp_req.dst0.YrgbMst = dst_addr;
		//ipp_req.dst0.CbrMst = dst_addr + 1024*768;
		ipp_req.dst0.w = 1280;
		ipp_req.dst0.h = 800;
	
		ipp_req.src_vir_w = 1280;
		ipp_req.dst_vir_w = 1280;
		ipp_req.timeout = 100;
		ipp_req.flag = IPP_ROT_90;

		while(ret==0)
		{
			ret = ipp_blit_sync_real(&ipp_req);
			msleep(10);
		}
#endif
/*
//test zyc's bug		 

		uint32_t size = 800*480;
		srcY = (uint32_t)__get_free_pages(GFP_KERNEL, 9); //320*240*2*2bytes=300k < 128 pages
		srcUV = srcY + size;
		dstY = srcUV + size;
		dstUV = dstY + size;
		src_addr = virt_to_phys(srcY);
		dst_addr = virt_to_phys(dstY);
		printk("src_addr: %x-%x, dst_addr: %x-%x\n",srcY,srcUV,dstY,dstUV);

		
		ipp_req.src0.YrgbMst = src_addr;
		ipp_req.src0.CbrMst = src_addr + size;
		ipp_req.src0.w = 128;
		ipp_req.src0.h = 128;
		ipp_req.src0.fmt = IPP_Y_CBCR_H2V2;
		
		ipp_req.dst0.YrgbMst = dst_addr;
		ipp_req.dst0.CbrMst = dst_addr + size;
		ipp_req.dst0.w = 480;
		ipp_req.dst0.h = 800;
	
		ipp_req.src_vir_w = 128;
		ipp_req.dst_vir_w = 480;
		ipp_req.timeout = 100;
		ipp_req.flag = IPP_ROT_0;
		
		ipp_req.deinterlace_enable =0;
		ipp_req.deinterlace_para0 = 16;
		ipp_req.deinterlace_para1 = 16;
		ipp_req.deinterlace_para2 = 0;

		ipp_req.complete = ipp_test_complete;
*/
		/*0 test whether IPP_CONFIG is set correctly*/
		/*
		ipp_blit_sync(&ipp_req);
		ipp_req.dst0.w = 480;
		ipp_req.dst0.h = 320;
		ipp_req.dst_vir_w = 480;
		ipp_blit_sync(&ipp_req);
		*/
		
		/*1 test ipp_blit_sync*/
		/*
		while(!ret)
		{
			ipp_req.timeout = 50;
			//ret = ipp_do_blit(&ipp_req);
			ret = ipp_blit_sync(&ipp_req);
		}*/
		

		/*2.test ipp_blit_async*/
		/*
		do
		{
			test_condition = 0;
			ret = ipp_blit_async(&ipp_req);
			if(ret == 0)
				ret = wait_event_interruptible_timeout(test_queue, test_condition, msecs_to_jiffies(ipp_req.timeout));
			
			irq_end = ktime_get(); 
			irq_end = ktime_sub(irq_end,irq_start);
			hw_end = ktime_sub(hw_end,hw_start);
			if((((int)ktime_to_us(hw_end)/1000)>10)||(((int)ktime_to_us(irq_end)/1000)>10))
			{
				printk("hw time: %d ms, irq time: %d ms\n",(int)ktime_to_us(hw_end)/1000,(int)ktime_to_us(irq_end)/1000);
			}
		}while(ret>0);
		printk("test ipp_blit_async over!!!\n");
		*/

		/*3.two async req,get the rigt result respectively*/
		
		/*
	[   10.253532] ipp_blit_async
	[   10.256210] ipp_blit_async2
	[   10.259000] ipp_blit_async
	[   10.261700] ipp_test_complete retval=0
	[   10.282832] ipp_blit_async2
	[   10.284304] ipp_test_complete retval=1        
		*/
		/*
		ret1 = ipp_blit_async(&ipp_req);
		ret2 = ipp_blit_async(&ipp_req);
		*/

		/*4 two sync req,get the rigt result respectively*/
		/*
		[   10.703628] ipp_blit_async
		[   10.711211] wait_event_interruptible waitret= 0
		[   10.712878] ipp_blit_async2
		[   10.720036] ipp_blit_sync done
		[   10.720229] ipp_blit_async
		[   10.722920] wait_event_interruptible waitret= 0
		[   10.727422] ipp_blit_async2
		[   10.790052] hw time: 1 ms, irq time: 48 ms
		[   10.791294] ipp_blit_sync wait_ret=0,wait_event_timeout 
		*/
		/*
		ret1 = ipp_blit_sync(&ipp_req);
		ret2 = ipp_blit_sync(&ipp_req);*/

		/*5*/
		/*
		ret1 = ipp_blit_async(&ipp_req);
		ret2 = ipp_blit_sync(&ipp_req);	
		ret1 = ipp_blit_sync(&ipp_req);
		ret2 = ipp_blit_async(&ipp_req);
		*/

		/*6.call IPP driver in the kernel space and the user space at the same time*/
		/*
			ipp_req.src_vir_w = 280;
		ipp_req.dst_vir_w = 800;
		do
		{
			 ret = ipp_blit_sync(&ipp_req);
			 msleep(40);
		}
		while(ret==0);

		printk("error! ret =%d\n",ret);

		ipp_req.src_vir_w = 480;
		ipp_req.dst_vir_w = 600;

		ipp_blit_sync(&ipp_req);
		*/
		
		/*7.suspand and resume*/
		/*
		//do
		{
			 ret = ipp_blit_async(&ipp_req);
			 ipp_suspend(NULL,pm);
			 msleep(1000);
			 ipp_resume(NULL);
		}
		//while(ret==0);*/
	/*	
		do
		{
			 ret = ipp_blit_async(&ipp_req);
			 if(ret == 0)
				ret = wait_event_interruptible_timeout(test_queue, test_condition, msecs_to_jiffies(ipp_req.timeout));
			 msleep(100);

		}
		while(ret>0);
*/

		/*8 test special up scaling*/
		/*
		ipp_req.src0.fmt = IPP_Y_CBCR_H2V2;
		ipp_req.src0.w = 128;
		ipp_req.src0.h = 128;
		ipp_req.dst0.w = 480;
		ipp_req.dst0.h = 800;
		ipp_req.src_vir_w = 128;
		ipp_req.dst_vir_w = 480;
		ret = -1;
		ret = ipp_blit_sync(&ipp_req);
		printk("128x128->480x800: %d \n",ret);

		ipp_req.src0.w = 160;
		ipp_req.src0.h = 160;
		ipp_req.src_vir_w = 160;
		ret = -1;
		ret = ipp_blit_sync(&ipp_req);
		printk("160x160->480x800: %d \n",ret);

		ipp_req.src0.w = 176;
		ipp_req.src0.h = 144;
		ipp_req.src_vir_w = 176;
		ret = -1;
		ret = ipp_blit_sync(&ipp_req);
		printk("176x144->480x800: %d \n",ret);

		ipp_req.src0.fmt = IPP_Y_CBCR_H2V1;
		ret = -1;
		ret = ipp_blit_sync(&ipp_req);
		printk("fmt:422 176x144->480x800 : %d \n",ret);

		ipp_req.src0.fmt = IPP_Y_CBCR_H2V2;
		ipp_req.dst0.w = 800;
		ipp_req.dst0.h = 480;
		ipp_req.dst_vir_w = 800;
		ret = -1;
		ret = ipp_blit_sync(&ipp_req);
		printk("176x144->800x480: %d \n",ret);
		*/

		/*9 test rotate config*/
/*
		ipp_req.flag = IPP_ROT_180;
		ipp_blit_sync(&ipp_req);
		ipp_req.flag = IPP_ROT_270;
		ipp_blit_sync(&ipp_req);
		
		free_pages(srcY, 9);
*/
//test deinterlace
#if 0
		uint32_t size = 16*16;
		srcY = (uint32_t*)kmalloc(size*2*2,GFP_KERNEL);
		memset(srcY,0,size*2*2);
		srcUV = srcY + size;
		dstY = srcY + size*2;
		dstUV = dstY + size;
		src_addr = virt_to_phys(srcY);
		dst_addr = virt_to_phys(dstY);
		printk("src_addr: %x-%x, dst_addr: %x-%x\n",srcY,srcUV,dstY,dstUV);

		//set srcY
		p=srcY;
		for(i=0;i<256; i++)
		{
			*p++ = i;
		}
		
		//set srcUV
		p=srcUV;
		for(i=0;i<128; i++)
		{
			*p++ = 2*i;
		}
		
		dmac_clean_range(srcY,srcY+size*2);
		
		ipp_req.src0.YrgbMst = src_addr;
		ipp_req.src0.CbrMst = src_addr + size;
		ipp_req.src0.w = 16;
		ipp_req.src0.h = 16;
		ipp_req.src0.fmt = IPP_Y_CBCR_H2V2;
		
		ipp_req.dst0.YrgbMst = dst_addr;
		ipp_req.dst0.CbrMst = dst_addr + size;
		ipp_req.dst0.w = 16;
		ipp_req.dst0.h = 16;
	
		ipp_req.src_vir_w = 16;
		ipp_req.dst_vir_w = 16;
		ipp_req.timeout = 1000;
		ipp_req.flag = IPP_ROT_0;

		ipp_req.deinterlace_enable =1;
		ipp_req.deinterlace_para0 = 16;
		ipp_req.deinterlace_para1 = 16;
		ipp_req.deinterlace_para2 = 0;
		
		ipp_do_blit(&ipp_req);

		
/*
		for(i=0;i<8;i++)
		{
			for(j=0;j<32;j++)
			{
				printk("%4x ",*(srcY+j+i*32));
			}
			printk("\n");
		}

		
		printk("\n\n");
		dmac_inv_range(dstY,dstY+size*2);
		for(i=0;i<8;i++)
		{
			for(j=0;j<32;j++)
			{
				printk("%4x ",*(dstY+j+i*32));
			}
			printk("\n");
		}

		printk("\n\n");
		for(i=0;i<8;i++)
		{
			for(j=0;j<16;j++)
			{
				printk("%4x ",*(srcUV+j+i*16));
			}
			printk("\n");
		}
		printk("\n\n");
		for(i=0;i<8;i++)
		{
			for(j=0;j<16;j++)
			{
				printk("%4x ",*(dstUV+j+i*16));
			}
			printk("\n");
		}
*/		
		kfree(srcY);
#endif

}
Exemplo n.º 22
0
int s3c_mem_ioctl(struct inode *inode, struct file *file,
		unsigned int cmd, unsigned long arg)
{
#ifdef USE_DMA_ALLOC
	unsigned long virt_addr;
#else
	unsigned long *virt_addr;
#endif
    void *flush_start, *flush_end;
    
	struct mm_struct *mm = current->mm;
	struct s3c_mem_alloc param;

	switch (cmd) {
	case S3C_MEM_ALLOC:
		mutex_lock(&mem_alloc_lock);
		if (copy_from_user(&param, (struct s3c_mem_alloc *)arg,
					sizeof(struct s3c_mem_alloc))) {
			mutex_unlock(&mem_alloc_lock);
			return -EFAULT;
		}
		flag = MEM_ALLOC;
		param.vir_addr = do_mmap(file, 0, param.size,
				(PROT_READ|PROT_WRITE), MAP_SHARED, 0);
		DEBUG("param.vir_addr = %08x, %d\n",
						param.vir_addr, __LINE__);
		if (param.vir_addr == -EINVAL) {
			printk(KERN_INFO "S3C_MEM_ALLOC FAILED\n");
			flag = 0;
			mutex_unlock(&mem_alloc_lock);
			return -EFAULT;
		}
		param.phy_addr = physical_address;
#ifdef USE_DMA_ALLOC
		param.kvir_addr = virtual_address;
#endif

		DEBUG("KERNEL MALLOC : param.phy_addr = 0x%X \t "
				"size = %d \t param.vir_addr = 0x%X, %d\n",
				param.phy_addr, param.size, param.vir_addr,
				__LINE__);

		if (copy_to_user((struct s3c_mem_alloc *)arg, &param,
					sizeof(struct s3c_mem_alloc))) {
			flag = 0;
			mutex_unlock(&mem_alloc_lock);
			return -EFAULT;
		}
		flag = 0;
		mutex_unlock(&mem_alloc_lock);

		break;

	case S3C_MEM_CACHEABLE_ALLOC:
		mutex_lock(&mem_cacheable_alloc_lock);
		if (copy_from_user(&param, (struct s3c_mem_alloc *)arg,
					sizeof(struct s3c_mem_alloc))) {
			mutex_unlock(&mem_cacheable_alloc_lock);
			return -EFAULT;
		}
		flag = MEM_ALLOC_CACHEABLE;
		param.vir_addr = do_mmap(file, 0, param.size,
				(PROT_READ|PROT_WRITE), MAP_SHARED, 0);
		DEBUG("param.vir_addr = %08x, %d\n",
				param.vir_addr, __LINE__);
		if (param.vir_addr == -EINVAL) {
			printk(KERN_INFO "S3C_MEM_ALLOC FAILED\n");
			flag = 0;
			mutex_unlock(&mem_cacheable_alloc_lock);
			return -EFAULT;
		}
		param.phy_addr = physical_address;
		DEBUG("KERNEL MALLOC : param.phy_addr = 0x%X"
				" \t size = %d \t param.vir_addr = 0x%X, %d\n",
				param.phy_addr, param.size, param.vir_addr,
				__LINE__);

		if (copy_to_user((struct s3c_mem_alloc *)arg, &param,
					sizeof(struct s3c_mem_alloc))) {
			flag = 0;
			mutex_unlock(&mem_cacheable_alloc_lock);
			return -EFAULT;
		}
		flag = 0;
		mutex_unlock(&mem_cacheable_alloc_lock);

		break;

	case S3C_MEM_SHARE_ALLOC:
		mutex_lock(&mem_share_alloc_lock);
		if (copy_from_user(&param, (struct s3c_mem_alloc *)arg,
					sizeof(struct s3c_mem_alloc))) {
			mutex_unlock(&mem_share_alloc_lock);
			return -EFAULT;
		}
		flag = MEM_ALLOC_SHARE;
		physical_address = param.phy_addr;
		DEBUG("param.phy_addr = %08x, %d\n",
				physical_address, __LINE__);
		param.vir_addr = do_mmap(file, 0, param.size,
				(PROT_READ|PROT_WRITE), MAP_SHARED, 0);
		DEBUG("param.vir_addr = %08x, %d\n",
				param.vir_addr, __LINE__);
		if (param.vir_addr == -EINVAL) {
			printk(KERN_INFO "S3C_MEM_SHARE_ALLOC FAILED\n");
			flag = 0;
			mutex_unlock(&mem_share_alloc_lock);
			return -EFAULT;
		}
		DEBUG("MALLOC_SHARE : param.phy_addr = 0x%X \t "
				"size = %d \t param.vir_addr = 0x%X, %d\n",
				param.phy_addr, param.size, param.vir_addr,
				__LINE__);

		if (copy_to_user((struct s3c_mem_alloc *)arg, &param,
					sizeof(struct s3c_mem_alloc))) {
			flag = 0;
			mutex_unlock(&mem_share_alloc_lock);
			return -EFAULT;
		}
		flag = 0;
		mutex_unlock(&mem_share_alloc_lock);

		break;

	case S3C_MEM_CACHEABLE_SHARE_ALLOC:
		mutex_lock(&mem_cacheable_share_alloc_lock);
		if (copy_from_user(&param, (struct s3c_mem_alloc *)arg,
					sizeof(struct s3c_mem_alloc))) {
			mutex_unlock(&mem_cacheable_share_alloc_lock);
			return -EFAULT;
		}
		flag = MEM_ALLOC_CACHEABLE_SHARE;
		physical_address = param.phy_addr;
		DEBUG("param.phy_addr = %08x, %d\n",
				physical_address, __LINE__);
		param.vir_addr = do_mmap(file, 0, param.size,
				(PROT_READ|PROT_WRITE), MAP_SHARED, 0);
		DEBUG("param.vir_addr = %08x, %d\n",
				param.vir_addr, __LINE__);
		if (param.vir_addr == -EINVAL) {
			printk(KERN_INFO "S3C_MEM_SHARE_ALLOC FAILED\n");
			flag = 0;
			mutex_unlock(&mem_cacheable_share_alloc_lock);
			return -EFAULT;
		}
		DEBUG("MALLOC_SHARE : param.phy_addr = 0x%X \t "
				"size = %d \t param.vir_addr = 0x%X, %d\n",
				param.phy_addr, param.size, param.vir_addr,
				__LINE__);

		if (copy_to_user((struct s3c_mem_alloc *)arg, &param,
					sizeof(struct s3c_mem_alloc))) {
			flag = 0;
			mutex_unlock(&mem_cacheable_share_alloc_lock);
			return -EFAULT;
		}
		flag = 0;
		mutex_unlock(&mem_cacheable_share_alloc_lock);

		break;

	case S3C_MEM_FREE:
		mutex_lock(&mem_free_lock);
		if (copy_from_user(&param, (struct s3c_mem_alloc *)arg,
					sizeof(struct s3c_mem_alloc))) {
			mutex_unlock(&mem_free_lock);
			return -EFAULT;
		}

		DEBUG("KERNEL FREE : param.phy_addr = 0x%X \t "
				"size = %d \t param.vir_addr = 0x%X, %d\n",
				param.phy_addr, param.size, param.vir_addr,
				__LINE__);

		if (do_munmap(mm, param.vir_addr, param.size) < 0) {
			printk(KERN_INFO "do_munmap() failed !!\n");
			mutex_unlock(&mem_free_lock);
			return -EINVAL;
		}

#ifdef USE_DMA_ALLOC
		virt_addr = param.kvir_addr;
		dma_free_writecombine(NULL, param.size,
				(unsigned int *) virt_addr, param.phy_addr);
#else
		virt_addr = (unsigned long *)phys_to_virt(param.phy_addr);
		kfree(virt_addr);
#endif
		param.size = 0;
		DEBUG("do_munmap() succeed !!\n");

		if (copy_to_user((struct s3c_mem_alloc *)arg, &param,
					sizeof(struct s3c_mem_alloc))) {
			mutex_unlock(&mem_free_lock);
			return -EFAULT;
		}

		mutex_unlock(&mem_free_lock);

		break;

	case S3C_MEM_SHARE_FREE:
		mutex_lock(&mem_share_free_lock);
		if (copy_from_user(&param, (struct s3c_mem_alloc *)arg,
					sizeof(struct s3c_mem_alloc))) {
			mutex_unlock(&mem_share_free_lock);
			return -EFAULT; }

		DEBUG("MEM_SHARE_FREE : param.phy_addr = 0x%X \t "
				"size = %d \t param.vir_addr = 0x%X, %d\n",
				param.phy_addr, param.size, param.vir_addr,
				__LINE__);

		if (do_munmap(mm, param.vir_addr, param.size) < 0) {
			printk(KERN_INFO "do_munmap() failed - MEM_SHARE_FREE!!\n");
			mutex_unlock(&mem_share_free_lock);
			return -EINVAL;
		}

		param.vir_addr = 0;
		DEBUG("do_munmap() succeed !! - MEM_SHARE_FREE\n");

		if (copy_to_user((struct s3c_mem_alloc *)arg, &param,
					sizeof(struct s3c_mem_alloc))) {
			mutex_unlock(&mem_share_free_lock);
			return -EFAULT;
		}

		mutex_unlock(&mem_share_free_lock);

		break;

	case S3C_MEM_CACHE_FLUSH:
		mutex_lock(&mem_dma_cache_lock);

		if (copy_from_user(&param, (struct s3c_mem_alloc *)arg,
			sizeof(struct s3c_mem_alloc))) {
			mutex_unlock(&mem_dma_cache_lock);
			return -EFAULT;
		}

		if(param.size == 0){
			DEBUG("S3C_MEM_CACHE_FLUSH : param.size == 0\n", __LINE__);
			mutex_unlock(&mem_dma_cache_lock);
			return -EFAULT;
		}
		if(param.vir_addr == 0)
		{
			if(param.phy_addr == 0)
			{
				DEBUG("S3C_MEM_CACHE_FLUSH : param.vir_addr == 0 and param.phy_addr == 0\n", __LINE__);
				mutex_unlock(&mem_dma_cache_lock);
				return -EFAULT;
			}
			flush_start = phys_to_virt(param.phy_addr);
		}
        	else
           		flush_start = (void *)param.vir_addr;

		flush_end = flush_start + param.size;

		dmac_flush_range(flush_start, flush_end);
		mutex_unlock(&mem_dma_cache_lock);
		break;

	case S3C_MEM_CACHE_INVAL:
		mutex_lock(&mem_dma_cache_lock);

		if (copy_from_user(&param, (struct s3c_mem_alloc *)arg,
			sizeof(struct s3c_mem_alloc))) {
			mutex_unlock(&mem_dma_cache_lock);
			return -EFAULT;
		}

		if(param.size == 0){
			DEBUG("S3C_MEM_CACHE_INVAL : param.size == 0\n", __LINE__);
			mutex_unlock(&mem_dma_cache_lock);
			return -EFAULT;
		}
		if(param.vir_addr == 0)
		{
			if(param.phy_addr == 0)
			{
				DEBUG("S3C_MEM_CACHE_INVAL : param.vir_addr == 0 and param.phy_addr == 0\n", __LINE__);
				mutex_unlock(&mem_dma_cache_lock);
				return -EFAULT;
			}
			flush_start = phys_to_virt(param.phy_addr);
		}
		else
            		flush_start = (void *)param.vir_addr;

		flush_end = flush_start + param.size;

		dmac_inv_range(flush_start, flush_end);
		mutex_unlock(&mem_dma_cache_lock);
		break;

	case S3C_MEM_CACHE_CLEAN:
		mutex_lock(&mem_dma_cache_lock);

		if (copy_from_user(&param, (struct s3c_mem_alloc *)arg,
			sizeof(struct s3c_mem_alloc))) {
			mutex_unlock(&mem_dma_cache_lock);
			return -EFAULT;
		}

		if(param.size == 0){
			DEBUG("S3C_MEM_CACHE_CLEAN : param.size == 0\n", __LINE__);
			mutex_unlock(&mem_dma_cache_lock);
			return -EFAULT;
		}
		if(param.vir_addr == 0)
		{
			if(param.phy_addr == 0)
			{
				DEBUG("S3C_MEM_CACHE_CLEAN : param.vir_addr == 0 and param.phy_addr == 0\n", __LINE__);
				mutex_unlock(&mem_dma_cache_lock);
				return -EFAULT;
			}
			flush_start = phys_to_virt(param.phy_addr);
		}
		else
			flush_start = (void *)param.vir_addr;

		flush_end = flush_start + param.size;

		dmac_clean_range(flush_start, flush_end);
		mutex_unlock(&mem_dma_cache_lock);
		break;

	default:
		DEBUG("s3c_mem_ioctl() : default !!\n");
		return -EINVAL;
	}

	return 0;
}
Exemplo n.º 23
0
static int s3c_cmm_ioctl(struct inode *inode, struct file *file, unsigned
		int cmd, unsigned long arg)
{
	int                     ret;
	CODEC_MEM_CTX *         CodecMem;
	CODEC_MEM_ALLOC_ARG     codec_mem_alloc_arg;
	CODEC_CACHE_FLUSH_ARG   codec_cache_flush_arg;
	CODEC_GET_PHY_ADDR_ARG  codec_get_phy_addr_arg;
	int                     result = 0;
	void *                  start;
	void *                  end;
	ALLOC_MEM_T *           node;
	CODEC_MEM_FREE_ARG      codec_mem_free_arg;

	CodecMem = (CODEC_MEM_CTX *)file->private_data;
	if (!CodecMem) {
		LOG_MSG(LOG_ERROR, "s3c_cmm_ioctl", "CMM Invalid Input Handle\n");
		return -1;
	}

	ret = LockCMMMutex();
	if(!ret){
		LOG_MSG(LOG_ERROR, "s3c_cmm_ioctl", "DD::CMM Mutex Lock Fail\r\n");
		return -1;
	}

	switch (cmd) 
	{
		case IOCTL_CODEC_MEM_ALLOC:
			
			LOG_MSG(LOG_TRACE, "s3c_cmm_ioctl", "IOCTL_CODEC_MEM_GET\n");

			copy_from_user(&codec_mem_alloc_arg, (CODEC_MEM_ALLOC_ARG *)arg, sizeof(CODEC_MEM_ALLOC_ARG));

			node = GetCodecVirAddr(CodecMem->inst_no, &codec_mem_alloc_arg);
			if(node == NULL){
				LOG_MSG(LOG_WARNING, "s3c_cmm_ioctl", "GetCodecVirAddr(%d)\r\n", CodecMem->inst_no);
				result = -1;
				break;
			}
			
			ret = copy_to_user((void *)arg, (void *)&codec_mem_alloc_arg, sizeof(CODEC_MEM_ALLOC_ARG));
			break;

		case IOCTL_CODEC_MEM_FREE:

			LOG_MSG(LOG_TRACE, "s3c_cmm_ioctl", "IOCTL_CODEC_MEM_FREE\n");

			copy_from_user(&codec_mem_free_arg, (CODEC_MEM_FREE_ARG *)arg, sizeof(CODEC_MEM_FREE_ARG));

			for(node = AllocMemHead; node != AllocMemTail; node = node->next) {
				if(node->u_addr == (unsigned char *)codec_mem_free_arg.u_addr)
					break;
			}

			if(node  == AllocMemTail){
				LOG_MSG(LOG_ERROR, "s3c_cmm_ioctl", "invalid virtual address(0x%x)\r\n", codec_mem_free_arg.u_addr);
				result = -1;
				break;
			}

			ReleaseAllocMem(node, CodecMem);

			break;

		case IOCTL_CODEC_CACHE_FLUSH:
		
			LOG_MSG(LOG_TRACE, "s3c_cmm_ioctl", "IOCTL_CODEC_CACHE_FLUSH\n");

			copy_from_user(&codec_cache_flush_arg, (CODEC_CACHE_FLUSH_ARG *)arg, sizeof(CODEC_CACHE_FLUSH_ARG));

			for(node = AllocMemHead; node != AllocMemTail; node = node->next) {
				if(node->u_addr == (unsigned char *)codec_cache_flush_arg.u_addr)
					break;
			}

			if(node  == AllocMemTail){
				LOG_MSG(LOG_ERROR, "s3c_cmm_ioctl", "invalid virtual address(0x%x)\r\n", codec_cache_flush_arg.u_addr);
				result = -1;
				break;
			}

			start = node->v_addr;
			end = start + codec_cache_flush_arg.size;
			dmac_clean_range(start, end);
			outer_clean_range(__pa(start), __pa(end));

			break;

		case IOCTL_CODEC_GET_PHY_ADDR:
			copy_from_user(&codec_get_phy_addr_arg, (CODEC_GET_PHY_ADDR_ARG *)arg, sizeof(CODEC_GET_PHY_ADDR_ARG));

			for(node = AllocMemHead; node != AllocMemTail; node = node->next) {
				if(node->u_addr == (unsigned char *)codec_get_phy_addr_arg.u_addr)
					break;
			}

			if(node  == AllocMemTail){
				LOG_MSG(LOG_ERROR, "s3c_cmm_ioctl", "invalid virtual address(0x%x)\r\n", codec_get_phy_addr_arg.u_addr);
				result = -1;
				break;
			}

			if(node->cacheFlag)
				codec_get_phy_addr_arg.p_addr = node->cached_p_addr;
			else
				codec_get_phy_addr_arg.p_addr = node->uncached_p_addr;
			
			copy_to_user((void *)arg, (void *)&codec_get_phy_addr_arg, sizeof(CODEC_GET_PHY_ADDR_ARG));

			break;
		case IOCTL_CODEC_MERGE_FRAGMENTATION:

			MergeFragmentation(CodecMem->inst_no);

			break;

		case IOCTL_CODEC_CACHE_INVALIDATE:
			
			LOG_MSG(LOG_TRACE, "s3c_cmm_ioctl", "IOCTL_CODEC_CACHE_INVALIDATE\n");

			copy_from_user(&codec_cache_flush_arg, (CODEC_CACHE_FLUSH_ARG *)arg, sizeof(CODEC_CACHE_FLUSH_ARG));

			for(node = AllocMemHead; node != AllocMemTail; node = node->next) {
				if(node->u_addr == (unsigned char *)codec_cache_flush_arg.u_addr)
					break;
			}

			if(node  == AllocMemTail){
				LOG_MSG(LOG_ERROR, "s3c_cmm_ioctl", "invalid virtual address(0x%x)\r\n", codec_cache_flush_arg.u_addr);
				result = -1;
				break;
			}

			start = node->v_addr;
			end = start + codec_cache_flush_arg.size;
			dmac_flush_range(start, end);

			break;

		case IOCTL_CODEC_CACHE_CLEAN:

			LOG_MSG(LOG_TRACE, "s3c_cmm_ioctl", "IOCTL_CODEC_CACHE_CLEAN\n");

			copy_from_user(&codec_cache_flush_arg, (CODEC_CACHE_FLUSH_ARG *)arg, sizeof(CODEC_CACHE_FLUSH_ARG));

			for(node = AllocMemHead; node != AllocMemTail; node = node->next) {
				if(node->u_addr == (unsigned char *)codec_cache_flush_arg.u_addr)
					break;
			}

			if(node  == AllocMemTail){
				LOG_MSG(LOG_ERROR, "s3c_cmm_ioctl", "invalid virtual address(0x%x)\r\n", codec_cache_flush_arg.u_addr);
				result = -1;
				break;
			}

			start = node->v_addr;
			end = start + codec_cache_flush_arg.size;
			dmac_clean_range(start, end);

			break;

		case IOCTL_CODEC_CACHE_CLEAN_INVALIDATE:

			LOG_MSG(LOG_TRACE, "s3c_cmm_ioctl", "IOCTL_CODEC_CACHE_INVALIDATE\n");

			copy_from_user(&codec_cache_flush_arg, (CODEC_CACHE_FLUSH_ARG *)arg, sizeof(CODEC_CACHE_FLUSH_ARG));

			for(node = AllocMemHead; node != AllocMemTail; node = node->next) {
				if(node->u_addr == (unsigned char *)codec_cache_flush_arg.u_addr)
					break;
			}

			if(node  == AllocMemTail){
				LOG_MSG(LOG_ERROR, "s3c_cmm_ioctl", "invalid virtual address(0x%x)\r\n", codec_cache_flush_arg.u_addr);
				result = -1;
				break;
			}

			start = node->v_addr;
			end = start + codec_cache_flush_arg.size;
			dmac_clean_range(start, end);
			dmac_flush_range(start, end);
			
			break;
			
		default : 
			LOG_MSG(LOG_ERROR, "s3c_cmm_ioctl", "DD::CMM Invalid ioctl : 0x%X\r\n", cmd);
	}

	UnlockCMMMutex();

	if(result == 0)
		return TRUE;
	else
		return FALSE;
}
Exemplo n.º 24
0
int xor_mv(unsigned int src_no, unsigned int bytes, void **bh_ptr)
{
	void *bptr = NULL;
	int i;
        u32      *srcAddr;
        int         chan;
        struct xor_channel_t *channel;

	if(src_no <= 1)
	{
		printk(KERN_ERR "%s: need more than 1 src for XOR\n",
			__func__);
		BUG();
                return bytes;
	}
        if (xor_engine_initialized == 0)
        {
            printk(KERN_WARNING" %s: xor engines not initialized yet\n", __func__);
            return bytes;
        }

        chan = allocate_channel();
        if ( chan == -1)
         {
                DPRINTK("XOR engines are busy, return\n");
                return bytes;
        }
	DPRINTK("setting up rest of descriptor for channel %d\n", chan);
        channel = &xor_channel[chan];
	// flush the cache to memory before XOR engine touches them
        srcAddr = &(channel->pDescriptor->srcAdd0);
	for(i = src_no-1; i >= 0; i--)
	{
		DPRINTK("flushing source %d\n", i);
		bptr = (bh_ptr[i]);
		/* Buffer 0 is also the destination */
		if(i==0)
			dmac_flush_range(bptr,
					 bptr + bytes);			
		else
			dmac_clean_range(bptr,
					 bptr + bytes);
                srcAddr[i] = virt_to_phys(bh_ptr[i]);
	}

	channel->pDescriptor->phyDestAdd = virt_to_phys(bh_ptr[0]);
        channel->pDescriptor->byteCnt = bytes;
        channel->pDescriptor->phyNextDescPtr = 0;
        channel->pDescriptor->descCommand = (1 << src_no) - 1;
        channel->pDescriptor->status = BIT31;
	channel->chan_active = 1;
        if( mvXorTransfer(chan, MV_XOR, channel->descPhyAddr) != MV_OK )
        {
            printk(KERN_ERR "%s: XOR operation on channel %d failed!\n", __func__, chan);
            print_xor_regs(chan);
            BUG();
            free_channel(channel);
            return bytes;
        }
#ifdef CONFIG_ENABLE_XOR_INTERRUPTS
        wait_event(channel->waitq, (( channel->pDescriptor->status & BIT31) == 0));/*TODO add timeout*/
#else
        xor_waiton_eng(chan);
#endif
	DPRINTK("XOR complete\n");
#if 0
	if (!(channel->pDescriptor->status & BIT30)) {
	    printk(KERN_ERR "%s: XOR operation completed with error!\n", __func__);
            print_xor_regs(chan);            
	    BUG();
            free_channel(channel);
	    return MV_REG_READ(XOR_BYTE_COUNT_REG(chan));
        }
#endif
	DPRINTK("invalidate result in cache\n");
#if 0
	// invalidate the cache region to destination
        bptr = (bh_ptr[0]);
	dmac_inv_range(bptr, bptr + bytes);
#endif
        free_channel(channel);
        xor_hit++;
        return 0;
}
Exemplo n.º 25
0
void clean_caches(unsigned long vstart,
	unsigned long length, unsigned long pstart)
{
	dmac_clean_range((void *)vstart, (void *) (vstart + length));
	outer_clean_range(pstart, pstart + length);
}
int ion_system_heap_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer,
			void *vaddr, unsigned int offset, unsigned int length,
			unsigned int cmd)
{
	void (*outer_cache_op)(phys_addr_t, phys_addr_t);

	switch (cmd) {
	case ION_IOC_CLEAN_CACHES:
		dmac_clean_range(vaddr, vaddr + length);
		outer_cache_op = outer_clean_range;
		break;
	case ION_IOC_INV_CACHES:
		dmac_inv_range(vaddr, vaddr + length);
		outer_cache_op = outer_inv_range;
		break;
	case ION_IOC_CLEAN_INV_CACHES:
		dmac_flush_range(vaddr, vaddr + length);
		outer_cache_op = outer_flush_range;
		break;
	default:
		return -EINVAL;
	}

	if (system_heap_has_outer_cache) {
		unsigned long pstart;
		void *vend;
		void *vtemp;
		unsigned long ln = 0;
		vend = buffer->priv_virt + buffer->size;
		vtemp = buffer->priv_virt + offset;

		if ((vtemp+length) > vend) {
			pr_err("Trying to flush outside of mapped range.\n");
			pr_err("End of mapped range: %p, trying to flush to "
				"address %p\n", vend, vtemp+length);
			WARN(1, "%s: called with heap name %s, buffer size 0x%x, "
				"vaddr 0x%p, offset 0x%x, length: 0x%x\n",
				__func__, heap->name, buffer->size, vaddr,
				offset, length);
			return -EINVAL;
		}

		for (; ln < length && vtemp < vend;
		      vtemp += PAGE_SIZE, ln += PAGE_SIZE) {
			struct page *page = vmalloc_to_page(vtemp);
			if (!page) {
				WARN(1, "Could not find page for virt. address %p\n",
					vtemp);
				return -EINVAL;
			}
			pstart = page_to_phys(page);
			/*
			 * If page -> phys is returning NULL, something
			 * has really gone wrong...
			 */
			if (!pstart) {
				WARN(1, "Could not translate %p to physical address\n",
					vtemp);
				return -EINVAL;
			}

			outer_cache_op(pstart, pstart + PAGE_SIZE);
		}
	}
	return 0;
}
Exemplo n.º 27
0
/*=======================================================================*/
void *xor_memcpy(void *to, const void *from, __kernel_size_t n)
{
	u32 xor_dma_unaligned_to, xor_dma_unaligned_from;
	void *orig_to = to;
	u32 to_pa, from_pa;
        int ua = 0;
        int         chan;
        struct xor_channel_t *channel;

	DPRINTK("xor_memcpy(0x%x, 0x%x, %lu): entering\n", (u32) to, (u32) from,
		(unsigned long)n);

        if (xor_engine_initialized == 0)
        {
            DPRINTK(KERN_WARNING" %s: xor engines not initialized yet\n", __func__);
       	    xor_dma_miss++;
	    return asm_memmove(to, from, n);
        }
 	if (!(virt_addr_valid((u32) to) && virt_addr_valid((u32) from))) {
		DPRINTK("xor_memcpy(0x%x, 0x%x, %lu): falling back to memcpy\n",
			(u32) to, (u32) from, (unsigned long)n);
		xor_dma_miss++;
		return asm_memmove(to, from, n);
	}

	/*
	 * We can only handled completely cache-aligned transactions
	 * with the DMA engine.  Source and Dst must be cache-line
	 * aligned AND the length must be a multiple of the cache-line.
	 */

	to_pa = virt_to_phys(to);
	from_pa = virt_to_phys((void*)from);

	if (((to_pa + n > from_pa) && (to_pa < from_pa)) ||
	    ((from_pa < to_pa) && (from_pa + n > to_pa))) {
		DPRINTK("overlapping copy region (0x%x, 0x%x, %lu), falling back\n",
		     to_pa, from_pa, (unsigned long)n);
		xor_dma_miss++;
		return asm_memmove(to, from, n);
	}
	/*
	 * Ok, start addr is not cache line-aligned, so we need to make it so.
	 */
	xor_dma_unaligned_to = (u32) to & 31;
	xor_dma_unaligned_from = (u32) from & 31;;
	if (xor_dma_unaligned_to | xor_dma_unaligned_from) {
		ua++;
		if (xor_dma_unaligned_from > xor_dma_unaligned_to) {
			asm_memmove(to, from, 32 - xor_dma_unaligned_to);
			to = (void *)((u32)to + 32 - xor_dma_unaligned_to);
			from = (void *)((u32)from + 32 - xor_dma_unaligned_to);
			n -= 32 - xor_dma_unaligned_to;
		} else {
			asm_memmove(to, from, 32 - xor_dma_unaligned_from);
			to = (void *)((u32)to + 32 - xor_dma_unaligned_from);
			from = (void *)((u32)from + 32 - xor_dma_unaligned_from);
			n -= 32 - xor_dma_unaligned_from;
		}
	}

	/*
	 * Ok, we're aligned at the top, now let's check the end
	 * of the buffer and align that. After this we should have
	 * a block that is a multiple of cache line size.
	 */
	xor_dma_unaligned_to = ((u32) to + n) & 31;
	xor_dma_unaligned_from = ((u32) from + n) & 31;;
	if (xor_dma_unaligned_to | xor_dma_unaligned_from) {
		ua++;
		if (xor_dma_unaligned_to > xor_dma_unaligned_from) {
			u32 tmp_to = (u32) to + n - xor_dma_unaligned_to;
			u32 tmp_from = (u32) from + n - xor_dma_unaligned_to;

			asm_memmove((void *)tmp_to, (void *)tmp_from,
				   xor_dma_unaligned_to);

			n -= xor_dma_unaligned_to;
		} else {
			u32 tmp_to = (u32) to + n - xor_dma_unaligned_from;
			u32 tmp_from = (u32) from + n - xor_dma_unaligned_from;

			asm_memmove((void *)tmp_to, (void *)tmp_from,
				   xor_dma_unaligned_from);

			n -= xor_dma_unaligned_from;
		}
	}

	/*
	 * OK! We should now be fully aligned on both ends. 
	 */
        chan = allocate_channel();
        if ( chan == -1)
        {
                DPRINTK("XOR engines are busy, return\n");
       		xor_dma_miss++;
		return asm_memmove(to, from, n);
        }
	DPRINTK("setting up rest of descriptor for channel %d\n", chan);
        channel = &xor_channel[chan];
	
        /* Ensure that the cache is clean */
	dmac_clean_range(from, from + n);
	dmac_inv_range(to, to + n);

	DPRINTK("setting up rest of descriptor\n");
	// flush the cache to memory before XOR engine touches them
        channel->pDescriptor->srcAdd0 = virt_to_phys((void*)from);
	channel->pDescriptor->phyDestAdd = virt_to_phys(to);
        channel->pDescriptor->byteCnt = n;
        channel->pDescriptor->phyNextDescPtr = 0;
        channel->pDescriptor->status = BIT31;
	channel->chan_active = 1;

        if( mvXorTransfer(chan, MV_DMA, channel->descPhyAddr) != MV_OK)
        {
            printk(KERN_ERR "%s: DMA copy operation on channel %d failed!\n", __func__, chan);
            print_xor_regs(chan);
            BUG();
            free_channel(channel);
       	    return asm_memmove(to, from, n);
        }
        xor_waiton_eng(chan);


        DPRINTK("DMA copy complete\n");
	// check to see if failed
#if 0
	if (!(channel->pDescriptor->status & BIT30))
        {
            printk(KERN_ERR "%s: DMA copy operation completed with error!\n", __func__);
            printk(" srcAdd %x DestAddr %x, count %x\n", channel->pDescriptor->srcAdd0,
                                                channel->pDescriptor->phyDestAdd, n); 
            print_xor_regs(chan);            
	    BUG();
            free_channel(channel);
       	    return asm_memmove(to, from, n);
        }
#endif
        free_channel(channel);
 
	xor_dma_hit++;
	if (ua)
		xor_dma_unaligned++;

	return orig_to;
}
Exemplo n.º 28
0
static int s3c_g3d_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
{
	u32 val;
	DMA_BLOCK_STRUCT dma_block;
	s3c_3d_dma_info dma_info;
	DECLARE_COMPLETION_ONSTACK(complete);

	struct mm_struct *mm = current->mm;
	struct s3c_3d_mem_alloc param;
	struct s3c_3d_pm_status param_pm;

	unsigned int timer;
	
	switch (cmd) {
	case WAIT_FOR_FLUSH:
		//if fifo has already been flushed, return;
		val = __raw_readl(s3c_g3d_base+FGGB_PIPESTATE);
		//printk("read pipestate = 0x%x\n",val);
		if((val & arg) ==0) break;

		// enable interrupt
		interrupt_already_recevied = 0;
		__raw_writel(0x0001171f,s3c_g3d_base+FGGB_PIPEMASK);
		__raw_writel(1,s3c_g3d_base+FGGB_INTMASK);

		//printk("wait for flush (arg=0x%lx)\n",arg);

		timer = 1000000;

		while(timer) {
			wait_event_interruptible_timeout(waitq, (interrupt_already_recevied>0), 1*HZ);

			__raw_writel(0,s3c_g3d_base+FGGB_INTMASK);
			interrupt_already_recevied = 0;
			//if(interrupt_already_recevied==0)interruptible_sleep_on(&waitq);
			val = __raw_readl(s3c_g3d_base+FGGB_PIPESTATE);
			//printk("in while read pipestate = 0x%x\n",val);
			if(val & arg){
			} else{
				break;
			}
			__raw_writel(1,s3c_g3d_base+FGGB_INTMASK);
			timer --;
		}
		break;

	case GET_CONFIG:
		if (copy_to_user((void *)arg,&g3d_config,sizeof(G3D_CONFIG_STRUCT))) {
			printk("G3D: copy_to_user failed to get g3d_config\n");
			return -EFAULT;		
		}
		break;

	case START_DMA_BLOCK:
		if (copy_from_user(&dma_block,(void *)arg,sizeof(DMA_BLOCK_STRUCT))) {
			printk("G3D: copy_to_user failed to get dma_block\n");
			return -EFAULT;		
		}

		if (dma_block.offset%4!=0) {
			printk("G3D: dma offset is not aligned by word\n");
			return -EINVAL;
		}
		if (dma_block.size%4!=0) {
			printk("G3D: dma size is not aligned by word\n");
			return -EINVAL;
		}
		if (dma_block.offset+dma_block.size >g3d_config.dma_buffer_size) {
			printk("G3D: offset+size exceeds dam buffer\n");
			return -EINVAL;
		}

		dma_info.src = g3d_config.dma_buffer_addr+dma_block.offset;
		dma_info.len = dma_block.size;
		dma_info.dst = s3c_g3d_base_physical+FGGB_HOSTINTERFACE;

		DEBUG(" dma src=0x%x\n", dma_info.src);
		DEBUG(" dma len =%u\n", dma_info.len);
		DEBUG(" dma dst = 0x%x\n", dma_info.dst);

		dma_3d_done = &complete;

		if (s3c2410_dma_request(DMACH_3D_M2M, &s3c6410_3d_dma_client, NULL)) {
			printk(KERN_WARNING "Unable to get DMA channel(DMACH_3D_M2M).\n");
			return -EFAULT;
		}

		s3c2410_dma_set_buffdone_fn(DMACH_3D_M2M, s3c_g3d_dma_finish);
		s3c2410_dma_devconfig(DMACH_3D_M2M, S3C_DMA_MEM2MEM, 1, (u_long) dma_info.src);
		s3c2410_dma_config(DMACH_3D_M2M, 4, 4);
		s3c2410_dma_setflags(DMACH_3D_M2M, S3C2410_DMAF_AUTOSTART);

		//consistent_sync((void *) dma_info.dst, dma_info.len, DMA_FROM_DEVICE);
	//	s3c2410_dma_enqueue(DMACH_3D_M2M, NULL, (dma_addr_t) virt_to_dma(NULL, dma_info.dst), dma_info.len);
		s3c2410_dma_enqueue(DMACH_3D_M2M, NULL, (dma_addr_t) dma_info.dst, dma_info.len);

	//	printk("wait for end of dma operation\n");
		wait_for_completion(&complete);
	//	printk("dma operation is performed\n");

		s3c2410_dma_free(DMACH_3D_M2M, &s3c6410_3d_dma_client);

		break;

	case S3C_3D_MEM_ALLOC:		
		mutex_lock(&mem_alloc_lock);
		if(copy_from_user(&param, (struct s3c_3d_mem_alloc *)arg, sizeof(struct s3c_3d_mem_alloc))){
			mutex_unlock(&mem_alloc_lock);			
			return -EFAULT;
		}
       
		flag = MEM_ALLOC;
		
		param.size = s3c_g3d_available_chunk_size(param.size,(unsigned int)file->private_data);

		if (param.size == 0){
			printk("S3C_3D_MEM_ALLOC FAILED because there is no block memory bigger than you request\n");
			flag = 0;
			mutex_unlock(&mem_alloc_lock);			
			return -EFAULT;
		}			
             
		param.vir_addr = do_mmap(file, 0, param.size, PROT_READ|PROT_WRITE, MAP_SHARED, 0);
		DEBUG("param.vir_addr = %08x\n", param.vir_addr);

		if(param.vir_addr == -EINVAL) {
			printk("S3C_3D_MEM_ALLOC FAILED\n");
			flag = 0;
			mutex_unlock(&mem_alloc_lock);			
			return -EFAULT;
		}
		param.phy_addr = physical_address;

       // printk("alloc %d\n", param.size);
		DEBUG("KERNEL MALLOC : param.phy_addr = 0x%X \t size = %d \t param.vir_addr = 0x%X\n", param.phy_addr, param.size, param.vir_addr);

		if(copy_to_user((struct s3c_3d_mem_alloc *)arg, &param, sizeof(struct s3c_3d_mem_alloc))){
			flag = 0;
			mutex_unlock(&mem_alloc_lock);
			return -EFAULT;		
		}

		flag = 0;
		
//		printk("\n\n====Success the malloc from kernel=====\n");
		mutex_unlock(&mem_alloc_lock);
		
		break;

	case S3C_3D_MEM_FREE:	
		mutex_lock(&mem_free_lock);
		if(copy_from_user(&param, (struct s3c_3d_mem_alloc *)arg, sizeof(struct s3c_3d_mem_alloc))){
			mutex_unlock(&mem_free_lock);
			return -EFAULT;
		}

		DEBUG("KERNEL FREE : param.phy_addr = 0x%X \t size = %d \t param.vir_addr = 0x%X\n", param.phy_addr, param.size, param.vir_addr);

		/*
		if (do_munmap(mm, param.vir_addr, param.size) < 0) {
			printk("do_munmap() failed !!\n");
			mutex_unlock(&mem_free_lock);
			return -EINVAL;
		}
		*/

		s3c_g3d_release_chunk(param.phy_addr, param.size);
		//printk("KERNEL : virt_addr = 0x%X\n", virt_addr);
		//printk("free %d\n", param.size);


		param.size = 0;
		DEBUG("do_munmap() succeed !!\n");

		if(copy_to_user((struct s3c_3d_mem_alloc *)arg, &param, sizeof(struct s3c_3d_mem_alloc))){
			mutex_unlock(&mem_free_lock);
			return -EFAULT;
		}
		
		mutex_unlock(&mem_free_lock);
		
		break;

	case S3C_3D_SFR_LOCK:
		mutex_lock(&mem_sfr_lock);
		mutex_lock_processID = (unsigned int)file->private_data;
		DEBUG("s3c_g3d_ioctl() : You got a muxtex lock !!\n");
		break;

	case S3C_3D_SFR_UNLOCK:
		mutex_lock_processID = 0;
		mutex_unlock(&mem_sfr_lock);
		DEBUG("s3c_g3d_ioctl() : The muxtex unlock called !!\n");
		break;

	case S3C_3D_MEM_ALLOC_SHARE:		
		mutex_lock(&mem_alloc_share_lock);
		if(copy_from_user(&param, (struct s3c_3d_mem_alloc *)arg, sizeof(struct s3c_3d_mem_alloc))){
			mutex_unlock(&mem_alloc_share_lock);
			return -EFAULT;
		}
		flag = MEM_ALLOC_SHARE;

		physical_address = param.phy_addr;

		DEBUG("param.phy_addr = %08x\n", physical_address);

		param.vir_addr = do_mmap(file, 0, param.size, PROT_READ|PROT_WRITE, MAP_SHARED, 0);
		DEBUG("param.vir_addr = %08x\n", param.vir_addr);

		if(param.vir_addr == -EINVAL) {
			printk("S3C_3D_MEM_ALLOC_SHARE FAILED\n");
			flag = 0;
			mutex_unlock(&mem_alloc_share_lock);
			return -EFAULT;
		}

		DEBUG("MALLOC_SHARE : param.phy_addr = 0x%X \t size = %d \t param.vir_addr = 0x%X\n", param.phy_addr, param.size, param.vir_addr);

		if(copy_to_user((struct s3c_3d_mem_alloc *)arg, &param, sizeof(struct s3c_3d_mem_alloc))){
			flag = 0;
			mutex_unlock(&mem_alloc_share_lock);
			return -EFAULT;		
		}

		flag = 0;
		
		mutex_unlock(&mem_alloc_share_lock);
		
		break;

	case S3C_3D_MEM_SHARE_FREE:	
		mutex_lock(&mem_share_free_lock);
		if(copy_from_user(&param, (struct s3c_3d_mem_alloc *)arg, sizeof(struct s3c_3d_mem_alloc))){
			mutex_unlock(&mem_share_free_lock);
			return -EFAULT;		
		}

		DEBUG("MEM_SHARE_FREE : param.phy_addr = 0x%X \t size = %d \t param.vir_addr = 0x%X\n", param.phy_addr, param.size, param.vir_addr);

		if (do_munmap(mm, param.vir_addr, param.size) < 0) {
			printk("do_munmap() failed - MEM_SHARE_FREE!!\n");
			mutex_unlock(&mem_share_free_lock);
			return -EINVAL;
		}

		param.vir_addr = 0;
		DEBUG("do_munmap() succeed !! - MEM_SHARE_FREE\n");

		if(copy_to_user((struct s3c_3d_mem_alloc *)arg, &param, sizeof(struct s3c_3d_mem_alloc))){
			mutex_unlock(&mem_share_free_lock);
			return -EFAULT;		
		}

		mutex_unlock(&mem_share_free_lock);
		
		break;

	case S3C_3D_CACHE_INVALID:
		mutex_lock(&cache_invalid_lock);
		if(copy_from_user(&param, (struct s3c_3d_mem_alloc *)arg, sizeof(struct s3c_3d_mem_alloc))){
			printk("ERR: Invalid Cache Error\n");	
			mutex_unlock(&cache_invalid_lock);
			return -EFAULT;	
		}
		dmac_inv_range((unsigned int) param.vir_addr,(unsigned int)param.vir_addr + param.size);
		mutex_unlock(&cache_invalid_lock);
		break;

	case S3C_3D_CACHE_CLEAN:
		mutex_lock(&cache_clean_lock);
		if(copy_from_user(&param, (struct s3c_3d_mem_alloc *)arg, sizeof(struct s3c_3d_mem_alloc))){
			printk("ERR: Invalid Cache Error\n");	
			mutex_unlock(&cache_clean_lock);
			return -EFAULT;	
		}
		dmac_clean_range((unsigned int) param.vir_addr,(unsigned int)param.vir_addr + param.size);
		mutex_unlock(&cache_clean_lock);
		break;

	case S3C_3D_CACHE_CLEAN_INVALID:
		mutex_lock(&cache_clean_invalid_lock);
		if(copy_from_user(&param, (struct s3c_3d_mem_alloc *)arg, sizeof(struct s3c_3d_mem_alloc))){
			mutex_unlock(&cache_clean_invalid_lock);
			printk("ERR: Invalid Cache Error\n");	
			return -EFAULT;	
		}
		dmac_flush_range((unsigned int) param.vir_addr,(unsigned int)param.vir_addr + param.size);
		mutex_unlock(&cache_clean_invalid_lock);
		break;

	case S3C_3D_POWER_INIT:
		if(copy_from_user(&param_pm, (struct s3c_3d_pm_status *)arg, sizeof(struct s3c_3d_pm_status))){
			printk("ERR: Invalid Cache Error\n");	
			return -EFAULT;	
		}
		break;

	case S3C_3D_CRITICAL_SECTION:
#ifdef USE_G3D_DOMAIN_GATING
		mutex_lock(&pm_critical_section_lock);
		if(copy_from_user(&param_pm, (struct s3c_3d_pm_status *)arg, sizeof(struct s3c_3d_pm_status))){
			printk("ERR: Invalid Cache Error\n");	
			mutex_unlock(&pm_critical_section_lock);
			return -EFAULT;	
		}

//		param_pm.memStatus = check_memStatus((unsigned int)file->private_data);

		if(param_pm.criticalSection) g_G3D_CriticalFlag++;
		else g_G3D_CriticalFlag--;

		if(g_G3D_CriticalFlag==0)
		{/*kick power off*/
			/*power off*/
			/*kick timer*/
			mod_timer(&g3d_pm_timer, jiffies + TIMER_INTERVAL);
		}
		else if(g_G3D_CriticalFlag>0)
		{/*kick power on*/
			if(domain_off_check(S3C64XX_DOMAIN_G))
			{/*if powered off*/                        
				if(g_G3D_SelfPowerOFF)
				{/*powered off by 3D PM or by Resume*/
					/*power on*/
					s3c_set_normal_cfg(S3C64XX_DOMAIN_G, S3C64XX_ACTIVE_MODE, S3C64XX_3D);
					if(s3c_wait_blk_pwr_ready(S3C64XX_BLK_G)) {
						printk("[3D] s3c_wait_blk_pwr_ready err\n");
						mutex_unlock(&pm_critical_section_lock);
						return -EFAULT;	
					}
					clk_g3d_enable();
					/*Need here??*/
					softReset_g3d();
					// printk("[3D] Power on\n");  
				}
				else
				{
					/*powered off by the system :: error*/
					printk("Error on the system :: app tries to work during sleep\n");
					mutex_unlock(&pm_critical_section_lock);
					return -EFAULT;	
				}
			}
			else
			{
				/*already powered on : nothing to do*/
				//g_G3D_SelfPowerOFF=0;
			}
		}
		else if(g_G3D_CriticalFlag < 0) 
		{
			printk("Error on the system :: g_G3D_CriticalFlag < 0\n");
		}
//		printk("S3C_3D_CRITICAL_SECTION: param_pm.criticalSection=%d\n",param_pm.criticalSection);

		if (copy_to_user((void *)arg,&param_pm,sizeof(struct s3c_3d_pm_status)))
		{
			printk("G3D: copy_to_user failed to get s3c_3d_pm_status\n");

			mutex_unlock(&pm_critical_section_lock);
			return -EFAULT;		
		}
		mutex_unlock(&pm_critical_section_lock);
#endif /* USE_G3D_DOMAIN_GATING */
		break;

	default:
		DEBUG("s3c_g3d_ioctl() : default !!\n");
		return -EINVAL;
	}
	
	return 0;
}
static int ion_no_pages_cache_ops(struct ion_client *client,
                                  struct ion_handle *handle,
                                  void *vaddr,
                                  unsigned int offset, unsigned int length,
                                  unsigned int cmd)
{
    unsigned long size_to_vmap, total_size;
    int i, j, ret;
    void *ptr = NULL;
    ion_phys_addr_t buff_phys = 0;
    ion_phys_addr_t buff_phys_start = 0;
    size_t buf_length = 0;

    ret = ion_phys(client, handle, &buff_phys_start, &buf_length);
    if (ret)
        return -EINVAL;

    buff_phys = buff_phys_start;

    if (!vaddr) {
        /*
         * Split the vmalloc space into smaller regions in
         * order to clean and/or invalidate the cache.
         */
        size_to_vmap = ((VMALLOC_END - VMALLOC_START)/8);
        total_size = buf_length;

        for (i = 0; i < total_size; i += size_to_vmap) {
            size_to_vmap = min(size_to_vmap, total_size - i);
            for (j = 0; j < 10 && size_to_vmap; ++j) {
                ptr = ioremap(buff_phys, size_to_vmap);
                if (ptr) {
                    switch (cmd) {
                    case ION_HISI_CLEAN_CACHES:
                        dmac_clean_range(ptr,
                                         ptr + size_to_vmap);
                        break;
                    case ION_HISI_INV_CACHES:
                        dmac_inv_range(ptr,
                                       ptr + size_to_vmap);
                        break;
                    case ION_HISI_CLEAN_INV_CACHES:
                        dmac_flush_range(ptr,
                                         ptr + size_to_vmap);
                        break;
                    default:
                        return -EINVAL;
                    }
                    buff_phys += size_to_vmap;
                    break;
                } else {
                    size_to_vmap >>= 1;
                }
            }
            if (!ptr) {
                pr_err("Couldn't io-remap the memory\n");
                return -EINVAL;
            }
            iounmap(ptr);
        }
    } else {