Beispiel #1
0
/*
 *  ======== MEM_FlushCache ========
 *  Purpose:
 *      Flush cache
 */
void MEM_FlushCache(void *pMemBuf, u32 cBytes, s32 FlushType)
{
	if (cRefs <= 0 || !pMemBuf)
		goto func_end;
	switch (FlushType) {
	/* invalidate only */
	case PROC_INVALIDATE_MEM:
		dmac_inv_range(pMemBuf, pMemBuf + cBytes);
		outer_inv_range(__pa((u32)pMemBuf), __pa((u32)pMemBuf +
				cBytes));
	break;
	/* writeback only */
	case PROC_WRITEBACK_MEM:
		dmac_clean_range(pMemBuf, pMemBuf + cBytes);
		outer_clean_range(__pa((u32)pMemBuf), __pa((u32)pMemBuf +
				  cBytes));
	break;
	/* writeback and invalidate */
	case PROC_WRITEBACK_INVALIDATE_MEM:
		dmac_flush_range(pMemBuf, pMemBuf + cBytes);
		outer_flush_range(__pa((u32)pMemBuf), __pa((u32)pMemBuf +
				  cBytes));
	break;
	default:
		GT_1trace(MEM_debugMask, GT_6CLASS, "MEM_FlushCache: invalid "
			  "FlushMemType 0x%x\n", FlushType);
	break;
	}
func_end:
	return;
}
int ion_cma_cache_ops(struct ion_heap *heap,
			struct ion_buffer *buffer, void *vaddr,
			unsigned int offset, unsigned int length,
			unsigned int cmd)
{
	void (*outer_cache_op)(phys_addr_t, phys_addr_t);

	switch (cmd) {
	case ION_IOC_CLEAN_CACHES:
		dmac_clean_range(vaddr, vaddr + length);
		outer_cache_op = outer_clean_range;
		break;
	case ION_IOC_INV_CACHES:
		dmac_inv_range(vaddr, vaddr + length);
		outer_cache_op = outer_inv_range;
		break;
	case ION_IOC_CLEAN_INV_CACHES:
		dmac_flush_range(vaddr, vaddr + length);
		outer_cache_op = outer_flush_range;
		break;
	default:
		return -EINVAL;
	}

	if (cma_heap_has_outer_cache) {
		struct ion_cma_buffer_info *info = buffer->priv_virt;

		outer_cache_op(info->handle, info->handle + length);
	}

	return 0;
}
Beispiel #3
0
static inline void platform_do_lowpower(unsigned int cpu)
{
	/* Just enter wfi for now. TODO: Properly shut off the cpu. */
	for (;;) {

		msm_pm_cpu_enter_lowpower(cpu);
		if (pen_release == cpu) {
			/*
			 * OK, proper wakeup, we're done
			 */
			pen_release = -1;
			dmac_flush_range((void *)&pen_release,
				(void *)(&pen_release + sizeof(pen_release)));
			break;
		}

		/*
		 * getting here, means that we have come out of WFI without
		 * having been woken up - this shouldn't happen
		 *
		 * The trouble is, letting people know about this is not really
		 * possible, since we are currently running incoherently, and
		 * therefore cannot safely call printk() or anything else
		 */
		dmac_inv_range((void *)&pen_release,
			       (void *)(&pen_release + sizeof(pen_release)));
		pr_debug("CPU%u: spurious wakeup call\n", cpu);
	}
}
int ion_cp_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer,
			void *vaddr, unsigned int offset, unsigned int length,
			unsigned int cmd)
{
	void (*outer_cache_op)(phys_addr_t, phys_addr_t);
	struct ion_cp_heap *cp_heap =
	     container_of(heap, struct  ion_cp_heap, heap);

	switch (cmd) {
	case ION_IOC_CLEAN_CACHES:
		dmac_clean_range(vaddr, vaddr + length);
		outer_cache_op = outer_clean_range;
		break;
	case ION_IOC_INV_CACHES:
		dmac_inv_range(vaddr, vaddr + length);
		outer_cache_op = outer_inv_range;
		break;
	case ION_IOC_CLEAN_INV_CACHES:
		dmac_flush_range(vaddr, vaddr + length);
		outer_cache_op = outer_flush_range;
		break;
	default:
		return -EINVAL;
	}

	if (cp_heap->has_outer_cache) {
		unsigned long pstart = buffer->priv_phys + offset;
		outer_cache_op(pstart, pstart + length);
	}
	return 0;
}
Beispiel #5
0
int boot_secondary(unsigned int cpu, struct task_struct *idle)
{
	int cnt = 0;
	printk(KERN_DEBUG "Starting secondary CPU %d\n", cpu);

	
	pen_release = cpu;
	dmac_clean_range((void *)&pen_release,
			 (void *)(&pen_release + sizeof(pen_release)));
	dmac_clean_range((void *)&secondary_data,
			 (void *)(&secondary_data + sizeof(secondary_data)));
	sev();
	dsb();

	
	while (pen_release != 0xFFFFFFFF) {
		dmac_inv_range((void *)&pen_release,
			       (void *)(&pen_release+sizeof(pen_release)));
		msleep_interruptible(1);
		if (cnt++ >= SECONDARY_CPU_WAIT_MS)
			break;
	}

	if (pen_release == 0xFFFFFFFF)
		printk(KERN_DEBUG "Secondary CPU start acked %d\n", cpu);
	else
		printk(KERN_ERR "Secondary CPU failed to start..." \
		       "continuing\n");

	return 0;
}
int ion_cp_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer,
			void *vaddr, unsigned int offset, unsigned int length,
			unsigned int cmd)
{
	void (*outer_cache_op)(phys_addr_t, phys_addr_t) = NULL;
	struct ion_cp_heap *cp_heap =
		container_of(heap, struct  ion_cp_heap, heap);
	unsigned int size_to_vmap, total_size;
	int i, j;
	void *ptr = NULL;
	ion_phys_addr_t buff_phys = buffer->priv_phys;

	if (!vaddr) {
		/*
		 * Split the vmalloc space into smaller regions in
		 * order to clean and/or invalidate the cache.
		 */
		size_to_vmap = (VMALLOC_END - VMALLOC_START)/8;
		total_size = buffer->size;
		for (i = 0; i < total_size; i += size_to_vmap) {
			size_to_vmap = min(size_to_vmap, total_size - i);
			for (j = 0; j < 10 && size_to_vmap; ++j) {
				ptr = ioremap(buff_phys, size_to_vmap);
				if (ptr) {
					switch (cmd) {
					case ION_IOC_CLEAN_CACHES:
						dmac_clean_range(ptr,
							ptr + size_to_vmap);
						outer_cache_op =
							outer_clean_range;
						break;
					case ION_IOC_INV_CACHES:
						dmac_inv_range(ptr,
							ptr + size_to_vmap);
						outer_cache_op =
							outer_inv_range;
						break;
					case ION_IOC_CLEAN_INV_CACHES:
						dmac_flush_range(ptr,
							ptr + size_to_vmap);
						outer_cache_op =
							outer_flush_range;
						break;
					default:
						return -EINVAL;
					}
					buff_phys += size_to_vmap;
					break;
				} else {
					size_to_vmap >>= 1;
				}
			}
			if (!ptr) {
				pr_err("Couldn't io-remap the memory\n");
				return -EINVAL;
			}
			iounmap(ptr);
		}
	} else {
Beispiel #7
0
/* Function to invalidate the Cache module */
Void Cache_inv(Ptr blockPtr, UInt32 byteCnt, Bits16 type, Bool wait) {
    GT_4trace (curTrace, GT_ENTER, "Cache_inv", blockPtr, byteCnt, type, wait);

#ifdef USE_CACHE_VOID_ARG
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34))
    dmac_map_area(blockPtr, (size_t)byteCnt, DMA_FROM_DEVICE);
    outer_inv_range(__pa((UInt32)blockPtr),
                    __pa((UInt32)(blockPtr + byteCnt)) );
#else
    dmac_inv_range(blockPtr, (blockPtr + byteCnt) );
#endif
#else
    dmac_inv_range( (UInt32)blockPtr, (UInt32)(blockPtr + byteCnt) );
#endif

    GT_0trace (curTrace, GT_LEAVE, "Cache_inv");
}
Beispiel #8
0
/*
 * Helper function to update buffer cache pages
 */
static void isp_af_update_req_buffer(struct isp_af_buffer *buffer)
{
	int size = afstat.stats_buf_size;

	size = PAGE_ALIGN(size);
	/* Update the kernel pages of the requested buffer */
	dmac_inv_range((void *)buffer->addr_align, (void *)buffer->addr_align +
		       size);
}
/* Executed by primary CPU, brings other CPUs out of reset. Called at boot
   as well as when a CPU is coming out of shutdown induced by echo 0 >
   /sys/devices/.../cpuX.
*/
int boot_secondary(unsigned int cpu, struct task_struct *idle)
{
	static int cold_boot_done;
	int cnt = 0;
	int ret;

	pr_debug("Starting secondary CPU %d\n", cpu);

	/* Set preset_lpj to avoid subsequent lpj recalculations */
	preset_lpj = loops_per_jiffy;

	if (cold_boot_done == false) {
		ret = scm_set_boot_addr((void *)
					virt_to_phys(msm_secondary_startup),
					SCM_FLAG_COLDBOOT_CPU1);
		if (ret == 0) {
			void *sc1_base_ptr;
			sc1_base_ptr = ioremap_nocache(0x00902000, SZ_4K*2);
			if (sc1_base_ptr) {
				writel(0x0, sc1_base_ptr+0x15A0);
				dmb();
				writel(0x0, sc1_base_ptr+0xD80);
				writel(0x3, sc1_base_ptr+0xE64);
				dsb();
				iounmap(sc1_base_ptr);
			}
		} else
			printk(KERN_DEBUG "Failed to set secondary core boot "
					  "address\n");
		cold_boot_done = true;
	}

	pen_release = cpu;
	dmac_flush_range((void *)&pen_release,
			 (void *)(&pen_release + sizeof(pen_release)));
	__asm__("sev");
	dsb();

	/* Use smp_cross_call() to send a soft interrupt to wake up
	 * the other core.
	 */
	smp_cross_call(cpumask_of(cpu));

	while (pen_release != 0xFFFFFFFF) {
		dmac_inv_range((void *)&pen_release,
			       (void *)(&pen_release+sizeof(pen_release)));
			usleep(500);
			if (cnt++ >= 10)
			break;
	}

	return 0;
}
int boot_secondary(unsigned int cpu, struct task_struct *idle)
{
	int ret;
	int flag = 0;
	unsigned long timeout;

	pr_debug("Starting secondary CPU %d\n", cpu);

	
	preset_lpj = loops_per_jiffy;

	if (cpu > 0 && cpu < ARRAY_SIZE(cold_boot_flags))
		flag = cold_boot_flags[cpu];
	else
		__WARN();

	if (per_cpu(cold_boot_done, cpu) == false) {
		ret = scm_set_boot_addr((void *)
					virt_to_phys(msm_secondary_startup),
					flag);
		if (ret == 0)
			release_secondary(cpu);
		else
			printk(KERN_DEBUG "Failed to set secondary core boot "
					  "address\n");
		per_cpu(cold_boot_done, cpu) = true;
		init_cpu_debug_counter_for_cold_boot();
	}

	spin_lock(&boot_lock);

	pen_release = cpu_logical_map(cpu);
	__cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release));
	outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1));

	gic_raise_softirq(cpumask_of(cpu), 1);

	timeout = jiffies + (1 * HZ);
	while (time_before(jiffies, timeout)) {
		smp_rmb();
		if (pen_release == -1)
			break;

		dmac_inv_range((void *)&pen_release,
			       (void *)(&pen_release+sizeof(pen_release)));
		udelay(10);
	}

	spin_unlock(&boot_lock);

	return pen_release != -1 ? -ENOSYS : 0;
}
Beispiel #11
0
static long kgsl_cache_range_op(unsigned long addr, int size,
					unsigned int flags)
{
#ifdef CONFIG_OUTER_CACHE
	unsigned long end;
#endif
	BUG_ON(addr & (KGSL_PAGESIZE - 1));
	BUG_ON(size & (KGSL_PAGESIZE - 1));

	if (flags & KGSL_CACHE_FLUSH)
		dmac_flush_range((const void *)addr,
				(const void *)(addr + size));
	else
		if (flags & KGSL_CACHE_CLEAN)
			dmac_clean_range((const void *)addr,
					(const void *)(addr + size));
		else
			dmac_inv_range((const void *)addr,
					(const void *)(addr + size));

#ifdef CONFIG_OUTER_CACHE
	for (end = addr; end < (addr + size); end += KGSL_PAGESIZE) {
		pte_t *pte_ptr, pte;
		unsigned long physaddr;
		if (flags & KGSL_CACHE_VMALLOC_ADDR)
			physaddr = vmalloc_to_pfn((void *)end);
		else
			if (flags & KGSL_CACHE_USER_ADDR) {
				pte_ptr = kgsl_get_pte_from_vaddr(end);
				if (!pte_ptr)
					return -EINVAL;
				pte = *pte_ptr;
				physaddr = pte_pfn(pte);
				pte_unmap(pte_ptr);
			} else
				return -EINVAL;

		physaddr <<= PAGE_SHIFT;
		if (flags & KGSL_CACHE_FLUSH)
			outer_flush_range(physaddr, physaddr + KGSL_PAGESIZE);
		else
			if (flags & KGSL_CACHE_CLEAN)
				outer_clean_range(physaddr,
					physaddr + KGSL_PAGESIZE);
			else
				outer_inv_range(physaddr,
					physaddr + KGSL_PAGESIZE);
	}
#endif
	return 0;
}
Beispiel #12
0
static long kgsl_cache_range_op(unsigned long addr, int size,
					unsigned int flags)
{
#ifdef CONFIG_OUTER_CACHE
	unsigned long end;
#endif
	BUG_ON(addr & (KGSL_PAGESIZE - 1));
	BUG_ON(size & (KGSL_PAGESIZE - 1));

	if (flags & KGSL_MEMFLAGS_CACHE_FLUSH)
		dmac_flush_range((const void *)addr,
				(const void *)(addr + size));
	else
		if (flags & KGSL_MEMFLAGS_CACHE_CLEAN)
			dmac_clean_range((const void *)addr,
					(const void *)(addr + size));
		else if (flags & KGSL_MEMFLAGS_CACHE_INV)
			dmac_inv_range((const void *)addr,
					(const void *)(addr + size));

#ifdef CONFIG_OUTER_CACHE
	for (end = addr; end < (addr + size); end += KGSL_PAGESIZE) {
		unsigned long physaddr;
		if (flags & KGSL_MEMFLAGS_VMALLOC_MEM)
			physaddr = page_to_phys(vmalloc_to_page((void *) end));
		else
			if (flags & KGSL_MEMFLAGS_HOSTADDR) {
				physaddr = kgsl_virtaddr_to_physaddr(end);
				if (!physaddr) {
					KGSL_MEM_ERR
					("Unable to find physaddr for "
					"address: %x\n", (unsigned int)end);
					return -EINVAL;
				}
			} else
				return -EINVAL;

		if (flags & KGSL_MEMFLAGS_CACHE_FLUSH)
			outer_flush_range(physaddr, physaddr + KGSL_PAGESIZE);
		else
			if (flags & KGSL_MEMFLAGS_CACHE_CLEAN)
				outer_clean_range(physaddr,
					physaddr + KGSL_PAGESIZE);
			else if (flags & KGSL_MEMFLAGS_CACHE_INV)
				outer_inv_range(physaddr,
					physaddr + KGSL_PAGESIZE);
	}
#endif
	return 0;
}
/* Executed by primary CPU, brings other CPUs out of reset. Called at boot
   as well as when a CPU is coming out of shutdown induced by echo 0 >
   /sys/devices/.../cpuX.
*/
int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
{
	int cnt = 0;
	int ret;
	int flag = 0;

	pr_debug("Starting secondary CPU %d\n", cpu);

	/* Set preset_lpj to avoid subsequent lpj recalculations */
	preset_lpj = loops_per_jiffy;

	if (cpu > 0 && cpu < ARRAY_SIZE(cold_boot_flags))
		flag = cold_boot_flags[cpu];
	else
		__WARN();

	if (per_cpu(cold_boot_done, cpu) == false) {
		ret = scm_set_boot_addr((void *)
					virt_to_phys(msm_secondary_startup),
					flag);
		if (ret == 0)
			release_secondary(cpu);
		else
			printk(KERN_DEBUG "Failed to set secondary core boot "
					  "address\n");
		per_cpu(cold_boot_done, cpu) = true;
	}

	pen_release = cpu;
	dmac_flush_range((void *)&pen_release,
			 (void *)(&pen_release + sizeof(pen_release)));
	__asm__("sev");
	mb();

	/* Use smp_cross_call() to send a soft interrupt to wake up
	 * the other core.
	 */
	gic_raise_softirq(cpumask_of(cpu), 1);

	while (pen_release != 0xFFFFFFFF) {
		dmac_inv_range((void *)&pen_release,
			       (void *)(&pen_release+sizeof(pen_release)));
		usleep(500);
		if (cnt++ >= 10)
			break;
	}

	return 0;
}
Beispiel #14
0
int ion_cma_cache_ops(struct ion_heap *heap,
			struct ion_buffer *buffer, void *vaddr,
			unsigned int offset, unsigned int length,
			unsigned int cmd)
{
	void (*outer_cache_op)(phys_addr_t, phys_addr_t);

	switch (cmd) {
	case ION_IOC_CLEAN_CACHES:
		if (!vaddr)
			dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
				buffer->sg_table->nents, DMA_TO_DEVICE);
		else
			dmac_clean_range(vaddr, vaddr + length);
		outer_cache_op = outer_clean_range;
		break;
	case ION_IOC_INV_CACHES:
		if (!vaddr)
			dma_sync_sg_for_cpu(NULL, buffer->sg_table->sgl,
				buffer->sg_table->nents, DMA_FROM_DEVICE);
		else
			dmac_inv_range(vaddr, vaddr + length);
		outer_cache_op = outer_inv_range;
		break;
	case ION_IOC_CLEAN_INV_CACHES:
		if (!vaddr) {
			dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
				buffer->sg_table->nents, DMA_TO_DEVICE);
			dma_sync_sg_for_cpu(NULL, buffer->sg_table->sgl,
				buffer->sg_table->nents, DMA_FROM_DEVICE);
		} else {
			dmac_flush_range(vaddr, vaddr + length);
		}
		outer_cache_op = outer_flush_range;
		break;
	default:
		return -EINVAL;
	}

	if (cma_heap_has_outer_cache) {
		struct ion_cma_buffer_info *info = buffer->priv_virt;

		outer_cache_op(info->handle, info->handle + length);
	}

	return 0;
}
Beispiel #15
0
/* Executed by primary CPU, brings other CPUs out of reset. Called at boot
   as well as when a CPU is coming out of shutdown induced by echo 0 >
   /sys/devices/.../cpuX.
*/
int boot_secondary(unsigned int cpu, struct task_struct *idle)
{
	int cnt = 0;
	printk(KERN_DEBUG "Starting secondary CPU %d\n", cpu);

	/* Tell other CPUs to come out or reset.  Note that secondary CPUs
	 * are probably running with caches off, so we'll need to clean to
	 * memory. Normal cache ops will only clean to L2.
	 */
	pen_release = cpu;
	dmac_clean_range((void *)&pen_release,
			 (void *)(&pen_release + sizeof(pen_release)));
	dmac_clean_range((void *)&secondary_data,
			 (void *)(&secondary_data + sizeof(secondary_data)));
	sev();
	dsb();

	/* Use smp_cross_call() to send a soft interrupt to wake up
	 * the other core.
	 */
	smp_cross_call(cpumask_of(cpu));

	/* Wait for done signal. The cpu receiving the signal does not
	 * have the MMU or caching turned on, so all of its reads and
	 * writes are to/from memory.  Need to ensure that when
	 * reading the value we invalidate the cache line so we see the
	 * fresh data from memory as the normal routines may only
	 * invalidate to POU or L1.
	 */
	while (pen_release != 0xFFFFFFFF) {
		dmac_inv_range((void *)&pen_release,
			       (void *)(&pen_release+sizeof(pen_release)));
		msleep_interruptible(1);
		if (cnt++ >= SECONDARY_CPU_WAIT_MS)
			break;
	}

	if (pen_release == 0xFFFFFFFF)
		printk(KERN_DEBUG "Secondary CPU start acked %d\n", cpu);
	else
		printk(KERN_ERR "Secondary CPU failed to start..." \
		       "continuing\n");

	return 0;
}
Beispiel #16
0
static inline void platform_do_lowpower(unsigned int cpu)
{
	
	for (;;) {

		msm_pm_cpu_enter_lowpower(cpu);
		if (pen_release == cpu_logical_map(cpu)) {
			pen_release = -1;
			dmac_flush_range((void *)&pen_release,
				(void *)(&pen_release + sizeof(pen_release)));
			break;
		}

		dmac_inv_range((void *)&pen_release,
			       (void *)(&pen_release + sizeof(pen_release)));
		pr_debug("CPU%u: spurious wakeup call\n", cpu);
	}
}
Beispiel #17
0
static int g2d_ioctl(struct inode *inode, struct file *file,
		     unsigned int cmd, unsigned long arg)
{
	struct g2d_dma_info dma_info;
	void *vaddr;

	if (cmd == G2D_WAIT_FOR_IRQ) {
		wait_event_timeout(g2d->wq,
				(atomic_read(&g2d->in_use) == 1), 10000);
		atomic_set(&g2d->in_use, 0);
		return 0;
	}

	if (copy_from_user(&dma_info, (struct g2d_dma_info *)arg,
				sizeof(dma_info)))
		return -EFAULT;

	vaddr = phys_to_virt(dma_info.addr);

	switch (cmd) {
	case G2D_DMA_CACHE_INVAL:
		dmac_inv_range(vaddr, vaddr + dma_info.size);
		break;

	case G2D_DMA_CACHE_CLEAN:
		dmac_clean_range(vaddr, vaddr + dma_info.size);
		break;

	case G2D_DMA_CACHE_FLUSH:
		dmac_flush_range(vaddr, vaddr + dma_info.size);
		break;

	case G2D_DMA_CACHE_FLUSH_ALL:
		__cpuc_flush_kern_all();
		break;

	default:
		break;
	}

	return 0;
}
int ion_system_contig_heap_cache_ops(struct ion_heap *heap,
			struct ion_buffer *buffer, void *vaddr,
			unsigned int offset, unsigned int length,
			unsigned int cmd)
{
	void (*outer_cache_op)(phys_addr_t, phys_addr_t);

	switch (cmd) {
	case ION_IOC_CLEAN_CACHES:
		dmac_clean_range(vaddr, vaddr + length);
		outer_cache_op = outer_clean_range;
		break;
	case ION_IOC_INV_CACHES:
		dmac_inv_range(vaddr, vaddr + length);
		outer_cache_op = outer_inv_range;
		break;
	case ION_IOC_CLEAN_INV_CACHES:
		dmac_flush_range(vaddr, vaddr + length);
		outer_cache_op = outer_flush_range;
		break;
	default:
		return -EINVAL;
	}

	if (system_heap_contig_has_outer_cache) {
		unsigned long pstart;

		pstart = virt_to_phys(buffer->priv_virt) + offset;
		if (!pstart) {
			WARN(1, "Could not do virt to phys translation on %p\n",
				buffer->priv_virt);
			return -EINVAL;
		}

		outer_cache_op(pstart, pstart + PAGE_SIZE);
	}

	return 0;
}
static void q6venc_callback(void *context, void *data, uint32_t len)
{
	struct q6venc_dev *q6venc = context;
	struct q6_frame_type *q6frame = data;
	struct buf_info *rlc_buf;
	unsigned long flags;
	int i;

	pr_debug("%s \n", __func__);

	spin_lock_irqsave(&q6venc->done_lock, flags);
	q6venc->encode_done = true;
	for (i = 0; i < RLC_MAX_BUF_NUM; ++i) {
		rlc_buf = &q6venc->rlc_bufs[i];
		if (rlc_buf->paddr == q6frame->frame_addr)
			goto frame_found;
	}

	pr_err("%s: got incorrect phy address 0x%08x from q6 \n", __func__,
	       q6frame->frame_addr);
	q6venc->done_frame.q6_frame_type.frame_len = 0;
	wake_up_interruptible(&q6venc->encode_wq);
	goto done;

frame_found:
	memcpy(&q6venc->done_frame.frame_addr, &rlc_buf->venc_buf,
	       sizeof(struct venc_buf));
	memcpy(&q6venc->done_frame.q6_frame_type, q6frame,
	       sizeof(struct q6_frame_type));

	dmac_inv_range((const void *)q6venc->rlc_bufs[i].vaddr,
		       (const void *)(q6venc->rlc_bufs[i].vaddr +
				      q6venc->rlc_buf_len));

	wake_up_interruptible(&q6venc->encode_wq);

done:
	spin_unlock_irqrestore(&q6venc->done_lock, flags);
}
void mfc_read_shared_mem(unsigned int host_wr_addr, MFC_SHARED_MEM *shared_mem)
{
	dmac_inv_range((void *)host_wr_addr, (void *)(host_wr_addr + SHARED_MEM_MAX));

	shared_mem->extended_decode_status = mfc_read_shared_mem_item(host_wr_addr, EXTENEDED_DECODE_STATUS);
	shared_mem->get_frame_tag_top      = mfc_read_shared_mem_item(host_wr_addr, GET_FRAME_TAG_TOP);
	shared_mem->get_frame_tag_bot      = mfc_read_shared_mem_item(host_wr_addr, GET_FRAME_TAG_BOT);
	shared_mem->pic_time_top           = mfc_read_shared_mem_item(host_wr_addr, PIC_TIME_TOP);
	shared_mem->pic_time_bot           = mfc_read_shared_mem_item(host_wr_addr, PIC_TIME_BOT);
	shared_mem->start_byte_num         = mfc_read_shared_mem_item(host_wr_addr, START_BYTE_NUM);
	shared_mem->dec_frm_size           = mfc_read_shared_mem_item(host_wr_addr, DEC_FRM_SIZE);
	shared_mem->crop_info1             = mfc_read_shared_mem_item(host_wr_addr, CROP_INFO1);
	shared_mem->crop_info2             = mfc_read_shared_mem_item(host_wr_addr, CROP_INFO2);
	shared_mem->metadata_status        = mfc_read_shared_mem_item(host_wr_addr, METADATA_STATUS);
	shared_mem->metadata_display_index = mfc_read_shared_mem_item(host_wr_addr, METADATA_DISPLAY_INDEX);
	shared_mem->dbg_info_output0       = mfc_read_shared_mem_item(host_wr_addr, DBG_INFO_OUTPUT0);
	shared_mem->dbg_info_output1       = mfc_read_shared_mem_item(host_wr_addr, DBG_INFO_OUTPUT1);

#if	DEBUG_ENABLE	
	mfc_print_shared_mem(host_wr_addr);
#endif	
}
Beispiel #21
0
/*=======================================================================*/
void *dma_memcpy(void *to, const void *from, __kernel_size_t n)
{
	u32 phys_from, phys_to;	
	u32 unaligned_to;
	unsigned long flags;

	DPRINTK("dma_memcopy: entering\n");

	/* This is used in the very early stages */
	if(!idma_init)
    		return asm_memmove(to, from,n);

	/* Fallback for the case that one or both buffers are not physically contiguous  */
	if(!virt_addr_valid(to) || !virt_addr_valid(from))
        {
		DPRINTK("Failing back to asm_memmove because of limitations\n");
            return asm_memmove(to,from,n);
        }	

	/* Check for Overlap */
	if (((to + n > from) && (to < from)) ||((from < to) && (from + n > to))) 
	{
		DPRINTK("overlapping copy region (0x%x, 0x%x, %lu), falling back\n",
		     to, from, (unsigned long)n);
		return asm_memmove(to, from, n);
	}

	++dma_memcpy_cnt;

	/*
	 * Ok, start addr is not cache line-aligned, so we need to make it so.
	 */
	unaligned_to = (u32)to & 31;
	if(unaligned_to)
	{
		DPRINTK("Fixing up starting address %d bytes\n", 32 - unaligned_to);

		asm_memmove(to, from, 32 - unaligned_to);

		to = (void*)((u32)to + (32 - unaligned_to));
		from = (void*)((u32)from + (32 - unaligned_to));

                /*it's ok, n supposed to be greater than 32 bytes at this point*/
		n -= (32 - unaligned_to);
	}	

        spin_lock_irqsave(&current->mm->page_table_lock, flags);
        if (idma_busy)
        {
            /*
             * The idma engine is busy,
             * might happen when dma_copy_to/from_user will call the arch_copy_to/from_user
             * which might cause a page fault, that can lead to a memcpy or memzero.
             */
            DPRINTK(" idma is busy... \n");
            spin_unlock_irqrestore(&current->mm->page_table_lock, flags);
            return asm_memmove(to, from, n);
        }
        idma_busy = 1;
     
        phys_from = physical_address((u32)from, 0);
        phys_to = physical_address((u32)to, 1);
	
    	/*
	 *  Prepare the IDMA.
	 */
	if ((!phys_from) || (!phys_to))
        {
	    /* The requested page isn't available, fall back to */
            DPRINTK(" no physical address, fall back: from %p , to %p \n", from, to);
	    idma_busy = 0;
	    spin_unlock_irqrestore(&current->mm->page_table_lock, flags);
            return asm_memmove(to, from,n);
        }
        else
        {
	    /* 
	     * Ensure that the cache is clean:
	     *      - from range must be cleaned
	     *      - to range must be invalidated
	     */
		dmac_flush_range(from, from + n);
		dmac_inv_range(to, to + n);

               
	    /* Start DMA */
            DPRINTK(" activate DMA: channel %d from %x to %x len %x\n",CPY_CHAN1, phys_from, phys_to, n);
	    mvDmaTransfer(CPY_CHAN1, phys_from, phys_to, n, 0);
#ifdef RT_DEBUG
                    dma_activations++;
#endif
	}
        
	if(wait_for_idma(CPY_CHAN1))
        {
	    BUG(); 
	}	
        
        DPRINTK("dma_memcopy(0x%x, 0x%x, %lu): exiting\n", (u32) to, (u32) from, n);

	idma_busy = 0;
	spin_unlock_irqrestore(&current->mm->page_table_lock, flags);
       
        return 0;
}
Beispiel #22
0
/*=======================================================================*/
static unsigned long dma_copy(void *to, const void *from, unsigned long n, unsigned int to_user)
{
	u32 chunk,i;
	u32 k_chunk = 0;
	u32 u_chunk = 0;
	u32 phys_from, phys_to;
	
        unsigned long flags;
	u32 unaligned_to;
	u32 index = 0;
        u32 temp;

        unsigned long uaddr, kaddr;
        unsigned char kaddr_kernel_static = 0;
	DPRINTK("dma_copy: entering\n");


	/* 
      	 * The unaligned is taken care seperatly since the dst might be part of a cache line that is changed 
	 * by other process -> we must not invalidate this cache lines and we can't also flush it, since other 
	 * process (or the exception handler) might fetch the cache line before we copied it. 
	 */

	/*
	 * Ok, start addr is not cache line-aligned, so we need to make it so.
	 */
	unaligned_to = (u32)to & 31;
	if(unaligned_to)
	{
		DPRINTK("Fixing up starting address %d bytes\n", 32 - unaligned_to);

		if(to_user)
		{
		    if(__arch_copy_to_user(to, from, 32 - unaligned_to)) 
			goto exit_dma; 
		}
		else
		{
		    if(__arch_copy_from_user(to, from, 32 - unaligned_to)) 
			goto exit_dma;
		}

		temp = (u32)to + (32 - unaligned_to);
		to = (void *)temp;
		temp = (u32)from + (32 - unaligned_to);
		from = (void *)temp;

                /*it's ok, n supposed to be greater than 32 bytes at this point*/
		n -= (32 - unaligned_to);
	}

	/*
	 * Ok, we're aligned at the top, now let's check the end
	 * of the buffer and align that. After this we should have
	 * a block that is a multiple of cache line size.
	 */
	unaligned_to = ((u32)to + n) & 31;
	if(unaligned_to)
	{	
		u32 tmp_to = (u32)to + (n - unaligned_to);
		u32 tmp_from = (u32)from + (n - unaligned_to);
		DPRINTK("Fixing ending alignment %d bytes\n", unaligned_to);

		if(to_user)
		{
		    if(__arch_copy_to_user((void *)tmp_to, (void *)tmp_from, unaligned_to))
			goto exit_dma;
		}
		else
		{
		    if(__arch_copy_from_user((void *)tmp_to, (void *)tmp_from, unaligned_to))
			goto exit_dma;
		}

                /*it's ok, n supposed to be greater than 32 bytes at this point*/
		n -= unaligned_to;
	}

        if(to_user)
        {
            uaddr = (unsigned long)to;  
            kaddr = (unsigned long)from;
        }
        else
        {
             uaddr = (unsigned long)from;
             kaddr = (unsigned long)to;
        }
        if(virt_addr_valid(kaddr))
        {
            kaddr_kernel_static = 1;
            k_chunk = n;
        }
	else
	{
		DPRINTK("kernel address is not linear, fall back\n");
		goto exit_dma;
	}
         
        spin_lock_irqsave(&current->mm->page_table_lock, flags);
	if (idma_busy)
	{
	    BUG();
	}
	idma_busy = 1;
     
        i = 0;
	while(n > 0)
	{
	    if(k_chunk == 0)
	    {
                /* virtual address */
	        k_chunk = page_remainder((u32)kaddr);
		DPRINTK("kaddr reminder %d \n",k_chunk);
	    }

	    if(u_chunk == 0)
	    {
                u_chunk = page_remainder((u32)uaddr);
                DPRINTK("uaddr reminder %d \n", u_chunk);
            }
        
            chunk = ((u_chunk < k_chunk) ? u_chunk : k_chunk);
            if(n < chunk)
	    {
		chunk = n;
	    }

	    if(chunk == 0)
	    {
	    	break;
	    }
            phys_from = physical_address((u32)from, 0);
            phys_to = physical_address((u32)to, 1);
	    DPRINTK("choose chunk %d \n",chunk);
	    /* if page doesn't exist go out */
	    if ((!phys_from) || (!phys_to))
	    {
		/* The requested page isn't available, fall back to */
		DPRINTK(" no physical address, fall back: from %p , to %p \n", from, to);
		goto wait_for_idmas;
   
	    }
	    /*
	     *  Prepare the IDMA.
	     */
            if (chunk < IDMA_MIN_COPY_CHUNK)
            {
        	DPRINTK(" chunk %d too small , use memcpy \n",chunk);
                /* the "to" address might cross cache line boundary, so part of the line*/  
                /* may be subject to DMA, so we need to wait to last DMA engine to finish */
                if (index > 0)
                {
                    if(wait_for_idma(PREV_CHANNEL(current_dma_channel)))
                    {
	                BUG();
                    }
                }
                

                if(to_user) 
		{
	       	    if(__arch_copy_to_user((void *)to, (void *)from, chunk)) {
			printk("ERROR: %s %d shouldn't happen\n",__FUNCTION__, __LINE__);	
			goto wait_for_idmas;
		    }
		}
	        else
		{
	            if(__arch_copy_from_user((void *)to, (void *)from, chunk)) {
			printk("ERROR: %s %d shouldn't happen\n",__FUNCTION__, __LINE__);	
			goto wait_for_idmas;	
		    }
		}
           }
            else
            {
                /* 
	 	 * Ensure that the cache is clean:
	 	 *      - from range must be cleaned
        	 *      - to range must be invalidated
	         */
		dmac_flush_range(from, from + chunk);
		dmac_inv_range(to, to + chunk);
               
               	    if(index > 1)
		    {
		        if(wait_for_idma(current_dma_channel))
                        {
		            BUG(); 
                            goto unlock_dma;
                        }
                    }
		    /* Start DMA */
                    DPRINTK(" activate DMA: channel %d from %x to %x len %x\n",
                            current_dma_channel, phys_from, phys_to, chunk);
		    mvDmaTransfer(current_dma_channel, phys_from, phys_to, chunk, 0);
                    current_dma_channel = NEXT_CHANNEL(current_dma_channel); 
#ifdef RT_DEBUG
                    dma_activations++;
#endif
		    index++;
                }
                

		/* go to next chunk */
		from += chunk;
		to += chunk;
                kaddr += chunk;
                uaddr += chunk;
		n -= chunk;
		u_chunk -= chunk;
		k_chunk -= chunk;		
	}
        
wait_for_idmas:
        if (index > 1)
        {
	    if(wait_for_idma(current_dma_channel))
            {
	        BUG(); 
            }
        }

        if (index > 0)
        {
            if(wait_for_idma(PREV_CHANNEL(current_dma_channel)))
            {
	        BUG();
            }
        }

unlock_dma:    
	idma_busy = 0;    
        spin_unlock_irqrestore(&current->mm->page_table_lock, flags);
 exit_dma:
        
        DPRINTK("dma_copy(0x%x, 0x%x, %lu): exiting\n", (u32) to,
                (u32) from, n);
       

        if(n != 0)
        {
       	    if(to_user)
                return __arch_copy_to_user((void *)to, (void *)from, n);
	            else
                return __arch_copy_from_user((void *)to, (void *)from, n);
        }
        return 0;
}
static int s3c_g3d_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
{
	u32 val;
	DMA_BLOCK_STRUCT dma_block;
	s3c_3d_dma_info dma_info;
	DECLARE_COMPLETION_ONSTACK(complete);

	struct mm_struct *mm = current->mm;
	struct s3c_3d_mem_alloc param;
	struct s3c_3d_pm_status param_pm;

	unsigned int timer;
	
	switch (cmd) {
	case WAIT_FOR_FLUSH:
		//if fifo has already been flushed, return;
		val = __raw_readl(s3c_g3d_base+FGGB_PIPESTATE);
		//printk("read pipestate = 0x%x\n",val);
		if((val & arg) ==0) break;

		// enable interrupt
		interrupt_already_recevied = 0;
		__raw_writel(0x0001171f,s3c_g3d_base+FGGB_PIPEMASK);
		__raw_writel(1,s3c_g3d_base+FGGB_INTMASK);

		//printk("wait for flush (arg=0x%lx)\n",arg);

		timer = 1000000;

		while(timer) {
			wait_event_interruptible_timeout(waitq, (interrupt_already_recevied>0), 1*HZ);

			__raw_writel(0,s3c_g3d_base+FGGB_INTMASK);
			interrupt_already_recevied = 0;
			//if(interrupt_already_recevied==0)interruptible_sleep_on(&waitq);
			val = __raw_readl(s3c_g3d_base+FGGB_PIPESTATE);
			//printk("in while read pipestate = 0x%x\n",val);
			if(val & arg){
			} else{
				break;
			}
			__raw_writel(1,s3c_g3d_base+FGGB_INTMASK);
			timer --;
		}
		break;

	case GET_CONFIG:
		if (copy_to_user((void *)arg,&g3d_config,sizeof(G3D_CONFIG_STRUCT))) {
			printk("G3D: copy_to_user failed to get g3d_config\n");
			return -EFAULT;		
		}
		break;

	case START_DMA_BLOCK:
		if (copy_from_user(&dma_block,(void *)arg,sizeof(DMA_BLOCK_STRUCT))) {
			printk("G3D: copy_to_user failed to get dma_block\n");
			return -EFAULT;		
		}

		if (dma_block.offset%4!=0) {
			printk("G3D: dma offset is not aligned by word\n");
			return -EINVAL;
		}
		if (dma_block.size%4!=0) {
			printk("G3D: dma size is not aligned by word\n");
			return -EINVAL;
		}
		if (dma_block.offset+dma_block.size >g3d_config.dma_buffer_size) {
			printk("G3D: offset+size exceeds dam buffer\n");
			return -EINVAL;
		}

		dma_info.src = g3d_config.dma_buffer_addr+dma_block.offset;
		dma_info.len = dma_block.size;
		dma_info.dst = s3c_g3d_base_physical+FGGB_HOSTINTERFACE;

		DEBUG(" dma src=0x%x\n", dma_info.src);
		DEBUG(" dma len =%u\n", dma_info.len);
		DEBUG(" dma dst = 0x%x\n", dma_info.dst);

		dma_3d_done = &complete;

		if (s3c2410_dma_request(DMACH_3D_M2M, &s3c6410_3d_dma_client, NULL)) {
			printk(KERN_WARNING "Unable to get DMA channel(DMACH_3D_M2M).\n");
			return -EFAULT;
		}

		s3c2410_dma_set_buffdone_fn(DMACH_3D_M2M, s3c_g3d_dma_finish);
		s3c2410_dma_devconfig(DMACH_3D_M2M, S3C_DMA_MEM2MEM, 1, (u_long) dma_info.src);
		s3c2410_dma_config(DMACH_3D_M2M, 4, 4);
		s3c2410_dma_setflags(DMACH_3D_M2M, S3C2410_DMAF_AUTOSTART);

		//consistent_sync((void *) dma_info.dst, dma_info.len, DMA_FROM_DEVICE);
	//	s3c2410_dma_enqueue(DMACH_3D_M2M, NULL, (dma_addr_t) virt_to_dma(NULL, dma_info.dst), dma_info.len);
		s3c2410_dma_enqueue(DMACH_3D_M2M, NULL, (dma_addr_t) dma_info.dst, dma_info.len);

	//	printk("wait for end of dma operation\n");
		wait_for_completion(&complete);
	//	printk("dma operation is performed\n");

		s3c2410_dma_free(DMACH_3D_M2M, &s3c6410_3d_dma_client);

		break;

	case S3C_3D_MEM_ALLOC:		
		mutex_lock(&mem_alloc_lock);
		if(copy_from_user(&param, (struct s3c_3d_mem_alloc *)arg, sizeof(struct s3c_3d_mem_alloc))){
			mutex_unlock(&mem_alloc_lock);			
			return -EFAULT;
		}
       
		flag = MEM_ALLOC;
		
		param.size = s3c_g3d_available_chunk_size(param.size,(unsigned int)file->private_data);

		if (param.size == 0){
			printk("S3C_3D_MEM_ALLOC FAILED because there is no block memory bigger than you request\n");
			flag = 0;
			mutex_unlock(&mem_alloc_lock);			
			return -EFAULT;
		}			
             
		param.vir_addr = do_mmap(file, 0, param.size, PROT_READ|PROT_WRITE, MAP_SHARED, 0);
		DEBUG("param.vir_addr = %08x\n", param.vir_addr);

		if(param.vir_addr == -EINVAL) {
			printk("S3C_3D_MEM_ALLOC FAILED\n");
			flag = 0;
			mutex_unlock(&mem_alloc_lock);			
			return -EFAULT;
		}
		param.phy_addr = physical_address;

       // printk("alloc %d\n", param.size);
		DEBUG("KERNEL MALLOC : param.phy_addr = 0x%X \t size = %d \t param.vir_addr = 0x%X\n", param.phy_addr, param.size, param.vir_addr);

		if(copy_to_user((struct s3c_3d_mem_alloc *)arg, &param, sizeof(struct s3c_3d_mem_alloc))){
			flag = 0;
			mutex_unlock(&mem_alloc_lock);
			return -EFAULT;		
		}

		flag = 0;
		
//		printk("\n\n====Success the malloc from kernel=====\n");
		mutex_unlock(&mem_alloc_lock);
		
		break;

	case S3C_3D_MEM_FREE:	
		mutex_lock(&mem_free_lock);
		if(copy_from_user(&param, (struct s3c_3d_mem_alloc *)arg, sizeof(struct s3c_3d_mem_alloc))){
			mutex_unlock(&mem_free_lock);
			return -EFAULT;
		}

		DEBUG("KERNEL FREE : param.phy_addr = 0x%X \t size = %d \t param.vir_addr = 0x%X\n", param.phy_addr, param.size, param.vir_addr);

		/*
		if (do_munmap(mm, param.vir_addr, param.size) < 0) {
			printk("do_munmap() failed !!\n");
			mutex_unlock(&mem_free_lock);
			return -EINVAL;
		}
		*/

		s3c_g3d_release_chunk(param.phy_addr, param.size);
		//printk("KERNEL : virt_addr = 0x%X\n", virt_addr);
		//printk("free %d\n", param.size);


		param.size = 0;
		DEBUG("do_munmap() succeed !!\n");

		if(copy_to_user((struct s3c_3d_mem_alloc *)arg, &param, sizeof(struct s3c_3d_mem_alloc))){
			mutex_unlock(&mem_free_lock);
			return -EFAULT;
		}
		
		mutex_unlock(&mem_free_lock);
		
		break;

	case S3C_3D_SFR_LOCK:
		mutex_lock(&mem_sfr_lock);
		mutex_lock_processID = (unsigned int)file->private_data;
		DEBUG("s3c_g3d_ioctl() : You got a muxtex lock !!\n");
		break;

	case S3C_3D_SFR_UNLOCK:
		mutex_lock_processID = 0;
		mutex_unlock(&mem_sfr_lock);
		DEBUG("s3c_g3d_ioctl() : The muxtex unlock called !!\n");
		break;

	case S3C_3D_MEM_ALLOC_SHARE:		
		mutex_lock(&mem_alloc_share_lock);
		if(copy_from_user(&param, (struct s3c_3d_mem_alloc *)arg, sizeof(struct s3c_3d_mem_alloc))){
			mutex_unlock(&mem_alloc_share_lock);
			return -EFAULT;
		}
		flag = MEM_ALLOC_SHARE;

		physical_address = param.phy_addr;

		DEBUG("param.phy_addr = %08x\n", physical_address);

		param.vir_addr = do_mmap(file, 0, param.size, PROT_READ|PROT_WRITE, MAP_SHARED, 0);
		DEBUG("param.vir_addr = %08x\n", param.vir_addr);

		if(param.vir_addr == -EINVAL) {
			printk("S3C_3D_MEM_ALLOC_SHARE FAILED\n");
			flag = 0;
			mutex_unlock(&mem_alloc_share_lock);
			return -EFAULT;
		}

		DEBUG("MALLOC_SHARE : param.phy_addr = 0x%X \t size = %d \t param.vir_addr = 0x%X\n", param.phy_addr, param.size, param.vir_addr);

		if(copy_to_user((struct s3c_3d_mem_alloc *)arg, &param, sizeof(struct s3c_3d_mem_alloc))){
			flag = 0;
			mutex_unlock(&mem_alloc_share_lock);
			return -EFAULT;		
		}

		flag = 0;
		
		mutex_unlock(&mem_alloc_share_lock);
		
		break;

	case S3C_3D_MEM_SHARE_FREE:	
		mutex_lock(&mem_share_free_lock);
		if(copy_from_user(&param, (struct s3c_3d_mem_alloc *)arg, sizeof(struct s3c_3d_mem_alloc))){
			mutex_unlock(&mem_share_free_lock);
			return -EFAULT;		
		}

		DEBUG("MEM_SHARE_FREE : param.phy_addr = 0x%X \t size = %d \t param.vir_addr = 0x%X\n", param.phy_addr, param.size, param.vir_addr);

		if (do_munmap(mm, param.vir_addr, param.size) < 0) {
			printk("do_munmap() failed - MEM_SHARE_FREE!!\n");
			mutex_unlock(&mem_share_free_lock);
			return -EINVAL;
		}

		param.vir_addr = 0;
		DEBUG("do_munmap() succeed !! - MEM_SHARE_FREE\n");

		if(copy_to_user((struct s3c_3d_mem_alloc *)arg, &param, sizeof(struct s3c_3d_mem_alloc))){
			mutex_unlock(&mem_share_free_lock);
			return -EFAULT;		
		}

		mutex_unlock(&mem_share_free_lock);
		
		break;

	case S3C_3D_CACHE_INVALID:
		mutex_lock(&cache_invalid_lock);
		if(copy_from_user(&param, (struct s3c_3d_mem_alloc *)arg, sizeof(struct s3c_3d_mem_alloc))){
			printk("ERR: Invalid Cache Error\n");	
			mutex_unlock(&cache_invalid_lock);
			return -EFAULT;	
		}
		dmac_inv_range((unsigned int) param.vir_addr,(unsigned int)param.vir_addr + param.size);
		mutex_unlock(&cache_invalid_lock);
		break;

	case S3C_3D_CACHE_CLEAN:
		mutex_lock(&cache_clean_lock);
		if(copy_from_user(&param, (struct s3c_3d_mem_alloc *)arg, sizeof(struct s3c_3d_mem_alloc))){
			printk("ERR: Invalid Cache Error\n");	
			mutex_unlock(&cache_clean_lock);
			return -EFAULT;	
		}
		dmac_clean_range((unsigned int) param.vir_addr,(unsigned int)param.vir_addr + param.size);
		mutex_unlock(&cache_clean_lock);
		break;

	case S3C_3D_CACHE_CLEAN_INVALID:
		mutex_lock(&cache_clean_invalid_lock);
		if(copy_from_user(&param, (struct s3c_3d_mem_alloc *)arg, sizeof(struct s3c_3d_mem_alloc))){
			mutex_unlock(&cache_clean_invalid_lock);
			printk("ERR: Invalid Cache Error\n");	
			return -EFAULT;	
		}
		dmac_flush_range((unsigned int) param.vir_addr,(unsigned int)param.vir_addr + param.size);
		mutex_unlock(&cache_clean_invalid_lock);
		break;

	case S3C_3D_POWER_INIT:
		if(copy_from_user(&param_pm, (struct s3c_3d_pm_status *)arg, sizeof(struct s3c_3d_pm_status))){
			printk("ERR: Invalid Cache Error\n");	
			return -EFAULT;	
		}
		break;

	case S3C_3D_CRITICAL_SECTION:
#ifdef USE_G3D_DOMAIN_GATING
		mutex_lock(&pm_critical_section_lock);
		if(copy_from_user(&param_pm, (struct s3c_3d_pm_status *)arg, sizeof(struct s3c_3d_pm_status))){
			printk("ERR: Invalid Cache Error\n");	
			mutex_unlock(&pm_critical_section_lock);
			return -EFAULT;	
		}

//		param_pm.memStatus = check_memStatus((unsigned int)file->private_data);

		if(param_pm.criticalSection) g_G3D_CriticalFlag++;
		else g_G3D_CriticalFlag--;

		if(g_G3D_CriticalFlag==0)
		{/*kick power off*/
			/*power off*/
			/*kick timer*/
			mod_timer(&g3d_pm_timer, jiffies + TIMER_INTERVAL);
		}
		else if(g_G3D_CriticalFlag>0)
		{/*kick power on*/
			if(domain_off_check(S3C64XX_DOMAIN_G))
			{/*if powered off*/                        
				if(g_G3D_SelfPowerOFF)
				{/*powered off by 3D PM or by Resume*/
					/*power on*/
					s3c_set_normal_cfg(S3C64XX_DOMAIN_G, S3C64XX_ACTIVE_MODE, S3C64XX_3D);
					if(s3c_wait_blk_pwr_ready(S3C64XX_BLK_G)) {
						printk("[3D] s3c_wait_blk_pwr_ready err\n");
						mutex_unlock(&pm_critical_section_lock);
						return -EFAULT;	
					}
					clk_g3d_enable();
					/*Need here??*/
					softReset_g3d();
					// printk("[3D] Power on\n");  
				}
				else
				{
					/*powered off by the system :: error*/
					printk("Error on the system :: app tries to work during sleep\n");
					mutex_unlock(&pm_critical_section_lock);
					return -EFAULT;	
				}
			}
			else
			{
				/*already powered on : nothing to do*/
				//g_G3D_SelfPowerOFF=0;
			}
		}
		else if(g_G3D_CriticalFlag < 0) 
		{
			printk("Error on the system :: g_G3D_CriticalFlag < 0\n");
		}
//		printk("S3C_3D_CRITICAL_SECTION: param_pm.criticalSection=%d\n",param_pm.criticalSection);

		if (copy_to_user((void *)arg,&param_pm,sizeof(struct s3c_3d_pm_status)))
		{
			printk("G3D: copy_to_user failed to get s3c_3d_pm_status\n");

			mutex_unlock(&pm_critical_section_lock);
			return -EFAULT;		
		}
		mutex_unlock(&pm_critical_section_lock);
#endif /* USE_G3D_DOMAIN_GATING */
		break;

	default:
		DEBUG("s3c_g3d_ioctl() : default !!\n");
		return -EINVAL;
	}
	
	return 0;
}
static int q6_config_encode(struct q6venc_dev *q6venc, uint32_t type,
			    struct init_config *init_config)
{
	struct q6_init_config *q6_init_config = &init_config->q6_init_config;
	int ret;
	int i;

	mutex_lock(&q6venc->lock);

	if (q6venc->num_enc_bufs != 0) {
		pr_err("%s: multiple sessions not supported\n", __func__);
		ret = -EBUSY;
		goto err_busy;
	}

	ret = get_buf_info(&q6venc->enc_bufs[0], &init_config->ref_frame_buf1);
	if (ret) {
		pr_err("%s: can't get ref_frame_buf1\n", __func__);
		goto err_get_ref_frame_buf1;
	}

	ret = get_buf_info(&q6venc->enc_bufs[1], &init_config->ref_frame_buf2);
	if (ret) {
		pr_err("%s: can't get ref_frame_buf2\n", __func__);
		goto err_get_ref_frame_buf2;
	}

	ret = get_buf_info(&q6venc->rlc_bufs[0], &init_config->rlc_buf1);
	if (ret) {
		pr_err("%s: can't get rlc_buf1\n", __func__);
		goto err_get_rlc_buf1;
	}

	ret = get_buf_info(&q6venc->rlc_bufs[1], &init_config->rlc_buf2);
	if (ret) {
		pr_err("%s: can't get rlc_buf2\n", __func__);
		goto err_get_rlc_buf2;
	}
	q6venc->rlc_buf_len = 2 * q6_init_config->rlc_buf_length;
	q6venc->num_enc_bufs = 2;

	q6venc->enc_buf_size =
		(q6_init_config->enc_frame_width_inmb * PIXELS_PER_MACROBLOCK) *
		(q6_init_config->enc_frame_height_inmb * PIXELS_PER_MACROBLOCK) *
		BITS_PER_PIXEL / 8;

	q6_init_config->ref_frame_buf1_phy = q6venc->enc_bufs[0].paddr;
	q6_init_config->ref_frame_buf2_phy = q6venc->enc_bufs[1].paddr;
	q6_init_config->rlc_buf1_phy = q6venc->rlc_bufs[0].paddr;
	q6_init_config->rlc_buf2_phy = q6venc->rlc_bufs[1].paddr;

	// The DSP may use the rlc_bufs during initialization,
	for (i=0; i<RLC_MAX_BUF_NUM; i++)
	{
		dmac_inv_range((const void *)q6venc->rlc_bufs[i].vaddr,
			(const void *)(q6venc->rlc_bufs[i].vaddr +
				q6venc->rlc_buf_len));
	}

	ret = dal_call_f5(q6venc->venc, type, q6_init_config,
			  sizeof(struct q6_init_config));
	if (ret) {
		pr_err("%s: rpc failed \n", __func__);
		goto err_dal_rpc_init;
	}
	mutex_unlock(&q6venc->lock);
	return 0;

err_dal_rpc_init:
	q6venc->num_enc_bufs = 0;
	put_pmem_file(q6venc->rlc_bufs[1].file);
err_get_rlc_buf2:
	put_pmem_file(q6venc->rlc_bufs[0].file);
err_get_rlc_buf1:
	put_pmem_file(q6venc->enc_bufs[1].file);
err_get_ref_frame_buf2:
	put_pmem_file(q6venc->enc_bufs[0].file);
err_get_ref_frame_buf1:
err_busy:
	mutex_unlock(&q6venc->lock);
	return ret;
}
static int ion_no_pages_cache_ops(struct ion_client *client,
                                  struct ion_handle *handle,
                                  void *vaddr,
                                  unsigned int offset, unsigned int length,
                                  unsigned int cmd)
{
    unsigned long size_to_vmap, total_size;
    int i, j, ret;
    void *ptr = NULL;
    ion_phys_addr_t buff_phys = 0;
    ion_phys_addr_t buff_phys_start = 0;
    size_t buf_length = 0;

    ret = ion_phys(client, handle, &buff_phys_start, &buf_length);
    if (ret)
        return -EINVAL;

    buff_phys = buff_phys_start;

    if (!vaddr) {
        /*
         * Split the vmalloc space into smaller regions in
         * order to clean and/or invalidate the cache.
         */
        size_to_vmap = ((VMALLOC_END - VMALLOC_START)/8);
        total_size = buf_length;

        for (i = 0; i < total_size; i += size_to_vmap) {
            size_to_vmap = min(size_to_vmap, total_size - i);
            for (j = 0; j < 10 && size_to_vmap; ++j) {
                ptr = ioremap(buff_phys, size_to_vmap);
                if (ptr) {
                    switch (cmd) {
                    case ION_HISI_CLEAN_CACHES:
                        dmac_clean_range(ptr,
                                         ptr + size_to_vmap);
                        break;
                    case ION_HISI_INV_CACHES:
                        dmac_inv_range(ptr,
                                       ptr + size_to_vmap);
                        break;
                    case ION_HISI_CLEAN_INV_CACHES:
                        dmac_flush_range(ptr,
                                         ptr + size_to_vmap);
                        break;
                    default:
                        return -EINVAL;
                    }
                    buff_phys += size_to_vmap;
                    break;
                } else {
                    size_to_vmap >>= 1;
                }
            }
            if (!ptr) {
                pr_err("Couldn't io-remap the memory\n");
                return -EINVAL;
            }
            iounmap(ptr);
        }
    } else {
void invalidate_caches(unsigned long vstart,
	unsigned long length, unsigned long pstart)
{
	dmac_inv_range((void *)vstart, (void *) (vstart + length));
	outer_inv_range(pstart, pstart + length);
}
int ion_system_heap_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer,
			void *vaddr, unsigned int offset, unsigned int length,
			unsigned int cmd)
{
	void (*outer_cache_op)(phys_addr_t, phys_addr_t);

	switch (cmd) {
	case ION_IOC_CLEAN_CACHES:
		dmac_clean_range(vaddr, vaddr + length);
		outer_cache_op = outer_clean_range;
		break;
	case ION_IOC_INV_CACHES:
		dmac_inv_range(vaddr, vaddr + length);
		outer_cache_op = outer_inv_range;
		break;
	case ION_IOC_CLEAN_INV_CACHES:
		dmac_flush_range(vaddr, vaddr + length);
		outer_cache_op = outer_flush_range;
		break;
	default:
		return -EINVAL;
	}

	if (system_heap_has_outer_cache) {
		unsigned long pstart;
		void *vend;
		void *vtemp;
		unsigned long ln = 0;
		vend = buffer->priv_virt + buffer->size;
		vtemp = buffer->priv_virt + offset;

		if ((vtemp+length) > vend) {
			pr_err("Trying to flush outside of mapped range.\n");
			pr_err("End of mapped range: %p, trying to flush to "
				"address %p\n", vend, vtemp+length);
			WARN(1, "%s: called with heap name %s, buffer size 0x%x, "
				"vaddr 0x%p, offset 0x%x, length: 0x%x\n",
				__func__, heap->name, buffer->size, vaddr,
				offset, length);
			return -EINVAL;
		}

		for (; ln < length && vtemp < vend;
		      vtemp += PAGE_SIZE, ln += PAGE_SIZE) {
			struct page *page = vmalloc_to_page(vtemp);
			if (!page) {
				WARN(1, "Could not find page for virt. address %p\n",
					vtemp);
				return -EINVAL;
			}
			pstart = page_to_phys(page);
			/*
			 * If page -> phys is returning NULL, something
			 * has really gone wrong...
			 */
			if (!pstart) {
				WARN(1, "Could not translate %p to physical address\n",
					vtemp);
				return -EINVAL;
			}

			outer_cache_op(pstart, pstart + PAGE_SIZE);
		}
	}
	return 0;
}
Beispiel #28
0
int s3c_mem_mmap(struct file* filp, struct vm_area_struct *vma)
{
	unsigned long pageFrameNo=0, size, phys_addr;

#ifdef USE_DMA_ALLOC
	unsigned long virt_addr;
#else
	unsigned long *virt_addr;
#endif

	size = vma->vm_end - vma->vm_start;

	switch (flag) {
	case MEM_ALLOC :
	case MEM_ALLOC_CACHEABLE :

#ifdef USE_DMA_ALLOC
		virt_addr = (unsigned long)dma_alloc_writecombine(NULL, size, (unsigned int *) &phys_addr, GFP_KERNEL);
#else
		virt_addr = (unsigned long *)kmalloc(size, GFP_DMA|GFP_ATOMIC);
#endif
		if (!virt_addr) {
			printk("kmalloc() failed !\n");
			return -EINVAL;
		}
		DEBUG("MMAP_KMALLOC : virt addr = 0x%08x, size = %d, %d\n", virt_addr, size, __LINE__);

#ifndef USE_DMA_ALLOC
		dmac_inv_range(virt_addr, virt_addr + (size / sizeof(unsigned long)));
		phys_addr = virt_to_phys((unsigned long *)virt_addr);
#endif
		physical_address = (unsigned int)phys_addr;

#ifdef USE_DMA_ALLOC
		virtual_address = virt_addr;
#endif
		pageFrameNo = __phys_to_pfn(phys_addr);
		break;

	case MEM_ALLOC_SHARE :
	case MEM_ALLOC_CACHEABLE_SHARE :
		DEBUG("MMAP_KMALLOC_SHARE : phys addr = 0x%08x, %d\n", physical_address, __LINE__);

		// page frame number of the address for the physical_address to be shared.
		pageFrameNo = __phys_to_pfn(physical_address);
		DEBUG("MMAP_KMALLOC_SHARE : vma->end = 0x%08x, vma->start = 0x%08x, size = %d, %d\n", vma->vm_end, vma->vm_start, size, __LINE__);
		break;

	default :
		break;
	}

	if( (flag == MEM_ALLOC) || (flag == MEM_ALLOC_SHARE) )
		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);

	vma->vm_flags |= VM_RESERVED;

	if (remap_pfn_range(vma, vma->vm_start, pageFrameNo, size, vma->vm_page_prot)) {
		printk("s3c_mem_mmap() : remap_pfn_range() failed !\n");
		return -EINVAL;
	}

	return 0;
}
Beispiel #29
0
int boot_secondary(unsigned int cpu, struct task_struct *idle)
{
	int ret;
	int flag = 0;
	unsigned long timeout;

	pr_debug("Starting secondary CPU %d\n", cpu);

	/* Set preset_lpj to avoid subsequent lpj recalculations */
	preset_lpj = loops_per_jiffy;

	if (cpu > 0 && cpu < ARRAY_SIZE(cold_boot_flags))
		flag = cold_boot_flags[cpu];
	else
		__WARN();

	if (per_cpu(cold_boot_done, cpu) == false) {
		ret = scm_set_boot_addr((void *)
					virt_to_phys(msm_secondary_startup),
					flag);
		if (ret == 0)
			release_secondary(cpu);
		else
			printk(KERN_DEBUG "Failed to set secondary core boot "
					  "address\n");
		per_cpu(cold_boot_done, cpu) = true;
		init_cpu_debug_counter_for_cold_boot();
	}

	/*
	 * set synchronisation state between this boot processor
	 * and the secondary one
	 */
	spin_lock(&boot_lock);

	/*
	 * The secondary processor is waiting to be released from
	 * the holding pen - release it, then wait for it to flag
	 * that it has been released by resetting pen_release.
	 *
	 * Note that "pen_release" is the hardware CPU ID, whereas
	 * "cpu" is Linux's internal ID.
	 */
	pen_release = cpu_logical_map(cpu);
	__cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release));
	outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1));

	/*
	 * Send the secondary CPU a soft interrupt, thereby causing
	 * the boot monitor to read the system wide flags register,
	 * and branch to the address found there.
	 */
	gic_raise_softirq(cpumask_of(cpu), 1);

	timeout = jiffies + (1 * HZ);
	while (time_before(jiffies, timeout)) {
		smp_rmb();
		if (pen_release == -1)
			break;

		dmac_inv_range((void *)&pen_release,
			       (void *)(&pen_release+sizeof(pen_release)));
		udelay(10);
	}

	/*
	 * now the secondary core is starting up let it run its
	 * calibrations, then wait for it to finish
	 */
	spin_unlock(&boot_lock);

	return pen_release != -1 ? -ENOSYS : 0;
}
static int q6_encode(struct q6venc_dev *q6venc, struct encode_param *enc_param)
{
	struct q6_encode_param *q6_param = &enc_param->q6_encode_param;
	struct file *file;
	struct buf_info *buf;
	int i;
	int ret;
	int rlc_buf_index;

	pr_debug("y_addr fd=%d offset=0x%08lx uv_offset=0x%08lx\n",
		 enc_param->y_addr.fd, enc_param->y_addr.offset,
		 enc_param->uv_offset);

	file = fget(enc_param->y_addr.fd);
	if (!file) {
		pr_err("%s: invalid encode buffer fd %d\n", __func__,
		       enc_param->y_addr.fd);
		return -EBADF;
	}

	mutex_lock(&q6venc->lock);

	for (i = 0; i < q6venc->num_enc_bufs; i++) {
		buf = &q6venc->enc_bufs[i];
		if (buf->file == file
		    && buf->venc_buf.offset == enc_param->y_addr.offset)
			break;
	}

	if (i == q6venc->num_enc_bufs) {
		if (q6venc->num_enc_bufs == VENC_MAX_BUF_NUM) {
			pr_err("%s: too many input buffers\n", __func__);
			ret = -ENOMEM;
			goto done;
		}

		buf = &q6venc->enc_bufs[q6venc->num_enc_bufs];
		ret = get_buf_info(buf, &enc_param->y_addr);
		if (ret) {
			pr_err("%s: can't get encode buffer\n", __func__);
			ret = -EINVAL;
			goto done;
		}

		if (!IS_ALIGNED(buf->paddr, PAGE_SIZE)) {
			pr_err("%s: input buffer not 4k aligned\n", __func__);
			put_buf_info(buf);
			ret = -EINVAL;
			goto done;
		}
		q6venc->num_enc_bufs++;
	}

	/* We must invalidate the buffer that the DSP will write to
	* to ensure that a dirty cache line doesn't get flushed on
	* top of the data that the DSP is writing.
	* Unfortunately, we have to predict which rlc_buf index the
	* DSP is going to write to.  We assume it will write to buf
	* 0 the first time we call q6_encode, and alternate afterwards
	* */
	rlc_buf_index = q6venc->rlc_buf_index;
	dmac_inv_range((const void *)q6venc->rlc_bufs[rlc_buf_index].vaddr,
		       (const void *)(q6venc->rlc_bufs[rlc_buf_index].vaddr +
				      q6venc->rlc_buf_len));
	q6venc->rlc_buf_index = (q6venc->rlc_buf_index + 1) % RLC_MAX_BUF_NUM;

	q6_param->luma_addr = buf->paddr;
	q6_param->chroma_addr = q6_param->luma_addr + enc_param->uv_offset;
	pr_debug("luma_addr=0x%08x chroma_addr=0x%08x\n", q6_param->luma_addr,
		 q6_param->chroma_addr);

	/* Ideally, each ioctl that passed in a data buffer would include the size
	* of the input buffer, so we can properly flush the cache on it.  Since
	* userspace does not fill in the size fields, we have to assume the size
	* based on the encoder configuration for now.
	*/
	flush_pmem_file(buf->file, enc_param->y_addr.offset,
		q6venc->enc_buf_size);

	ret = dal_call_f5(q6venc->venc, VENC_DALRPC_ENCODE, q6_param,
			  sizeof(struct q6_encode_param));
	if (ret) {
		pr_err("%s: encode rpc failed\n", __func__);
		goto done;
	}

	ret = 0;

done:
	mutex_unlock(&q6venc->lock);
	fput(file);
	return ret;
}