static int _lzo1x_decompress_single( uint32_t src_addr , uint32_t src_num , uint32_t dst_addr , uint32_t *dst_len )
{

	uint32_t phy_src_addr,phy_dst_addr;

	*dst_len=0;

	phy_src_addr = lzo1x_virt_to_phys(src_addr);
	phy_dst_addr = lzo1x_virt_to_phys(dst_addr);

	//printk("phy_src_addr:0x%x phy_dst_addr:0x%x src_num:0x%x src_addr:0x%x dst_addr:%x\n",phy_src_addr,phy_dst_addr,src_num,src_addr,dst_addr);

	if((phy_src_addr==(-1UL))||(phy_dst_addr==(-1UL))){
		printk("lzo1x phy addr errors \n");
		return -1;
	}

	spin_lock(&zipdec_lock);
	ZipDec_SetSrcCfg(0 , phy_src_addr, src_num);
	ZipDec_SetDestCfg(0, phy_dst_addr, ZIP_WORK_LENGTH);

	dmac_flush_range((void *)(src_addr),(void *)(src_addr+src_num));
	dmac_flush_range((void *)dst_addr,(void *)(dst_addr+ZIP_WORK_LENGTH));

	ZipDec_Run();
	spin_unlock(&zipdec_lock);

	if(Zip_Dec_Wait(ZIPDEC_DONE_INT))
	{
		return -1;
	}

	*dst_len=ZIP_WORK_LENGTH;
	return 0;
}
Ejemplo n.º 2
0
static void __dma_clear_buffer(struct page *page, size_t size)
{
	/*
	 * Ensure that the allocated pages are zeroed, and that any data
	 * lurking in the kernel direct-mapped region is invalidated.
	 */
	if (!PageHighMem(page)) {
		void *ptr = page_address(page);
		if (ptr) {
			memset(ptr, 0, size);
			dmac_flush_range(ptr, ptr + size);
			outer_flush_range(__pa(ptr), __pa(ptr) + size);
		}
	} else {
		phys_addr_t base = __pfn_to_phys(page_to_pfn(page));
		phys_addr_t end = base + size;
		while (size > 0) {
			void *ptr = kmap_atomic(page);
			memset(ptr, 0, PAGE_SIZE);
			dmac_flush_range(ptr, ptr + PAGE_SIZE);
			kunmap_atomic(ptr);
			page++;
			size -= PAGE_SIZE;
		}
		outer_flush_range(base, end);
	}
}
Ejemplo n.º 3
0
/* Function to write back invalidate the Cache module */
Void Cache_wbInv(Ptr blockPtr, UInt32 byteCnt, Bits16 type, Bool wait) {
    GT_4trace (curTrace, GT_ENTER, "Cache_wbInv", blockPtr, byteCnt, type, wait);
#if 0

/*
 * It appears that this #if 0'ed code doesn't actually perform the
 * invalidate part of the wbInv, and it appears that Cache_wb() and Cache_inv()
 * work properly, so for now we implement wbInv as a combination of the two
 * individual functions.
*/

#ifdef USE_CACHE_VOID_ARG
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34)
                dmac_map_area(blockPtr, (size_t)byteCnt, DMA_BIDIRECTIONAL);
                outer_flush_range(__pa((UInt32)blockPtr),
                                  __pa((UInt32)(blockPtr+byteCnt)) );
#else
                dmac_flush_range(blockPtr, (blockPtr+byteCnt) );
#endif
#else
                dmac_flush_range( (UInt32)blockPtr, (UInt32)(blockPtr + byteCnt) );
#endif

#else

    Cache_wb(blockPtr, byteCnt, type, wait);
    Cache_inv(blockPtr, byteCnt, type, wait);

#endif

    GT_0trace (curTrace, GT_LEAVE, "Cache_wbInv");
}
Ejemplo n.º 4
0
int qti_pfk_ice_set_key(uint32_t index, uint8_t *key, uint8_t *salt)
{
	struct scm_desc desc = {0};
	int ret;
	char *tzbuf_key = (char *)ice_key;
	char *tzbuf_salt = (char *)ice_salt;

	uint32_t smc_id = 0;
	u32 tzbuflen_key = sizeof(ice_key);
	u32 tzbuflen_salt = sizeof(ice_salt);

	if (index < MIN_ICE_KEY_INDEX || index > MAX_ICE_KEY_INDEX)
		return -EINVAL;

	if (!key || !salt)
		return -EINVAL;

	if (!tzbuf_key || !tzbuf_salt)
		return -ENOMEM;

	memset(tzbuf_key, 0, tzbuflen_key);
	memset(tzbuf_salt, 0, tzbuflen_salt);

	memcpy(ice_key, key, tzbuflen_key);
	memcpy(ice_salt, salt, tzbuflen_salt);

	dmac_flush_range(tzbuf_key, tzbuf_key + tzbuflen_key);
	dmac_flush_range(tzbuf_salt, tzbuf_salt + tzbuflen_salt);

	smc_id = TZ_ES_SET_ICE_KEY_ID;
	pr_debug(" %s , smc_id = 0x%x\n", __func__, smc_id);

	desc.arginfo = TZ_ES_SET_ICE_KEY_PARAM_ID;
	desc.args[0] = index;
	desc.args[1] = virt_to_phys(tzbuf_key);
	desc.args[2] = tzbuflen_key;
	desc.args[3] = virt_to_phys(tzbuf_salt);
	desc.args[4] = tzbuflen_salt;


	ret = scm_call2_atomic(smc_id, &desc);
	pr_debug(" %s , ret = %d\n", __func__, ret);
	if (ret) {
		pr_err("%s: Error: 0x%x\n", __func__, ret);

		smc_id = TZ_ES_INVALIDATE_ICE_KEY_ID;
		desc.arginfo = TZ_ES_INVALIDATE_ICE_KEY_PARAM_ID;
		desc.args[0] = index;
		scm_call2_atomic(smc_id, &desc);
	}

	return ret;
}
Ejemplo n.º 5
0
static int32_t isp_binging4awb_buf_alloc(struct isp_k_private *isp_private, uint32_t len)
{
	int32_t ret = 0x00;
#ifndef CONFIG_64BIT
	uint32_t buf = 0x00;
	void *ptr = NULL;
#endif

	if (0x00 < len) {
		isp_private->bing4awb_buf_len = len;
		isp_private->bing4awb_buf_order = get_order(len);
		isp_private->bing4awb_buf_addr = (unsigned long)__get_free_pages(GFP_KERNEL | __GFP_COMP,
			isp_private->bing4awb_buf_order);
		if (NULL == (void *)isp_private->bing4awb_buf_addr) {
			printk("isp_binging4awb_buf_alloc: memory error, addr:0x%lx, len:0x%x, order:0x%x.\n",
				isp_private->bing4awb_buf_addr,
				isp_private->bing4awb_buf_len,
				isp_private->bing4awb_buf_order);
			return -1;
		}
	#ifndef CONFIG_64BIT
		ptr = (void *)isp_private->bing4awb_buf_addr;
		buf = virt_to_phys((volatile void *)isp_private->bing4awb_buf_addr);
		dmac_flush_range(ptr, ptr + len);
		outer_flush_range(__pa(ptr), __pa(ptr) + len);
	#endif
	}

	return ret;
}
int ion_cma_cache_ops(struct ion_heap *heap,
			struct ion_buffer *buffer, void *vaddr,
			unsigned int offset, unsigned int length,
			unsigned int cmd)
{
	void (*outer_cache_op)(phys_addr_t, phys_addr_t);

	switch (cmd) {
	case ION_IOC_CLEAN_CACHES:
		dmac_clean_range(vaddr, vaddr + length);
		outer_cache_op = outer_clean_range;
		break;
	case ION_IOC_INV_CACHES:
		dmac_inv_range(vaddr, vaddr + length);
		outer_cache_op = outer_inv_range;
		break;
	case ION_IOC_CLEAN_INV_CACHES:
		dmac_flush_range(vaddr, vaddr + length);
		outer_cache_op = outer_flush_range;
		break;
	default:
		return -EINVAL;
	}

	if (cma_heap_has_outer_cache) {
		struct ion_cma_buffer_info *info = buffer->priv_virt;

		outer_cache_op(info->handle, info->handle + length);
	}

	return 0;
}
Ejemplo n.º 7
0
static inline void platform_do_lowpower(unsigned int cpu)
{
	/* Just enter wfi for now. TODO: Properly shut off the cpu. */
	for (;;) {

		msm_pm_cpu_enter_lowpower(cpu);
		if (pen_release == cpu) {
			/*
			 * OK, proper wakeup, we're done
			 */
			pen_release = -1;
			dmac_flush_range((void *)&pen_release,
				(void *)(&pen_release + sizeof(pen_release)));
			break;
		}

		/*
		 * getting here, means that we have come out of WFI without
		 * having been woken up - this shouldn't happen
		 *
		 * The trouble is, letting people know about this is not really
		 * possible, since we are currently running incoherently, and
		 * therefore cannot safely call printk() or anything else
		 */
		dmac_inv_range((void *)&pen_release,
			       (void *)(&pen_release + sizeof(pen_release)));
		pr_debug("CPU%u: spurious wakeup call\n", cpu);
	}
}
Ejemplo n.º 8
0
/*
 *  ======== MEM_FlushCache ========
 *  Purpose:
 *      Flush cache
 */
void MEM_FlushCache(void *pMemBuf, u32 cBytes, s32 FlushType)
{
	if (cRefs <= 0 || !pMemBuf)
		goto func_end;
	switch (FlushType) {
	/* invalidate only */
	case PROC_INVALIDATE_MEM:
		dmac_inv_range(pMemBuf, pMemBuf + cBytes);
		outer_inv_range(__pa((u32)pMemBuf), __pa((u32)pMemBuf +
				cBytes));
	break;
	/* writeback only */
	case PROC_WRITEBACK_MEM:
		dmac_clean_range(pMemBuf, pMemBuf + cBytes);
		outer_clean_range(__pa((u32)pMemBuf), __pa((u32)pMemBuf +
				  cBytes));
	break;
	/* writeback and invalidate */
	case PROC_WRITEBACK_INVALIDATE_MEM:
		dmac_flush_range(pMemBuf, pMemBuf + cBytes);
		outer_flush_range(__pa((u32)pMemBuf), __pa((u32)pMemBuf +
				  cBytes));
	break;
	default:
		GT_1trace(MEM_debugMask, GT_6CLASS, "MEM_FlushCache: invalid "
			  "FlushMemType 0x%x\n", FlushType);
	break;
	}
func_end:
	return;
}
Ejemplo n.º 9
0
SDIO_Status SDIO_SyncRead(SDIO_Handle Handle, SDIO_Request_t *Req)
{
    struct sdio_func *func = (struct sdio_func *)Handle;
    int rc;
    void *tgt = Req->buffer;

    if (Req->buffer_len >= DMA_THRESHOLD_SIZE) {
        if (is_vmalloc_addr(tgt)) {
            if (!spans_page(tgt, Req->buffer_len)) {
                tgt = vmalloc_to_unity(tgt);
                dmac_flush_range(Req->buffer,
                                 Req->buffer + Req->buffer_len);
            } else
                tgt = sdio_dma_ptr;
        }
    }

    if ((rc = sdio_memcpy_fromio(func, tgt, Req->peripheral_addr,
                                 Req->buffer_len))) {
        printk(KERN_ERR "%s: failed (%d)\n", __func__, rc);
        return SDIO_FAILURE;
    }

    if (tgt == sdio_dma_ptr)
        memcpy(Req->buffer, sdio_dma_ptr, Req->buffer_len);

    return SDIO_SUCCESS;
}
Ejemplo n.º 10
0
static int omap_tiler_alloc_dynamicpages(struct omap_tiler_info *info)
{
	int i;
	int ret;
	struct page *pg;

	for (i = 0; i < info->n_phys_pages; i++) {
		pg = alloc_page(GFP_KERNEL | GFP_DMA | GFP_HIGHUSER);
		if (!pg) {
			ret = -ENOMEM;
			pr_err("%s: alloc_page failed\n",
				__func__);
			goto err_page_alloc;
		}
		info->phys_addrs[i] = page_to_phys(pg);
		dmac_flush_range((void *)page_address(pg),
			(void *)page_address(pg) + PAGE_SIZE);
		outer_flush_range(info->phys_addrs[i],
			info->phys_addrs[i] + PAGE_SIZE);
	}
	return 0;

err_page_alloc:
	for (i -= 1; i >= 0; i--) {
		pg = phys_to_page(info->phys_addrs[i]);
		__free_page(pg);
	}
	return ret;
}
Ejemplo n.º 11
0
void nvmap_kunmap(struct nvmap_handle_ref *ref, unsigned int pagenum,
		  void *addr)
{
	struct nvmap_handle *h;
	phys_addr_t paddr;
	pte_t **pte;

	BUG_ON(!addr || !ref);
	h = ref->handle;

	if (nvmap_find_cache_maint_op(h->dev, h)) {
		struct nvmap_share *share = nvmap_get_share_from_dev(h->dev);
		/* acquire pin lock to ensure maintenance is done before
		 * handle is pinned */
		mutex_lock(&share->pin_lock);
		nvmap_cache_maint_ops_flush(h->dev, h);
		mutex_unlock(&share->pin_lock);
	}

	if (h->heap_pgalloc)
		paddr = page_to_phys(h->pgalloc.pages[pagenum]);
	else
		paddr = h->carveout->base + pagenum * PAGE_SIZE;

	if (h->flags != NVMAP_HANDLE_UNCACHEABLE &&
	    h->flags != NVMAP_HANDLE_WRITE_COMBINE) {
		dmac_flush_range(addr, addr + PAGE_SIZE);
		outer_flush_range(paddr, paddr + PAGE_SIZE);
	}

	pte = nvmap_vaddr_to_pte(nvmap_dev, (unsigned long)addr);
	nvmap_free_pte(nvmap_dev, pte);
	nvmap_handle_put(h);
}
Ejemplo n.º 12
0
/* Check protocol version returned by the PA */
static u32 tf_rpc_init(struct tf_comm *comm)
{
	u32 protocol_version;
	u32 rpc_error = RPC_SUCCESS;

	dpr_info("%s(%p)\n", __func__, comm);

	spin_lock(&(comm->lock));

#if 0
	dmac_flush_range((void *)comm->l1_buffer,
		(void *)(((u32)(comm->l1_buffer)) + PAGE_SIZE));
	outer_inv_range(__pa(comm->l1_buffer),
		__pa(comm->l1_buffer) +  PAGE_SIZE);
#endif
	protocol_version = comm->l1_buffer->protocol_version;

	if ((GET_PROTOCOL_MAJOR_VERSION(protocol_version))
			!= TF_S_PROTOCOL_MAJOR_VERSION) {
		dpr_err("SMC: Unsupported SMC Protocol PA Major "
			"Version (0x%02x, expected 0x%02x)!\n",
			GET_PROTOCOL_MAJOR_VERSION(protocol_version),
			TF_S_PROTOCOL_MAJOR_VERSION);
		rpc_error = RPC_ERROR_CONNECTION_PROTOCOL;
	} else {
		rpc_error = RPC_SUCCESS;
	}

	spin_unlock(&(comm->lock));

	return rpc_error;
}
Ejemplo n.º 13
0
/* Check protocol version returned by the PA */
static u32 tf_rpc_init(struct tf_comm *comm)
{
	u32 protocol_version;
	u32 rpc_error = RPC_SUCCESS;

	dprintk(KERN_INFO "tf_rpc_init(%p)\n", comm);

	spin_lock(&(comm->lock));

	dmac_flush_range((void *)comm->init_shared_buffer,
		(void *)(((u32)(comm->init_shared_buffer)) + PAGE_SIZE));
	outer_inv_range(__pa(comm->init_shared_buffer),
		__pa(comm->init_shared_buffer) +  PAGE_SIZE);

	protocol_version = ((struct tf_init_buffer *)
				(comm->init_shared_buffer))->protocol_version;

	if ((GET_PROTOCOL_MAJOR_VERSION(protocol_version))
			!= TF_S_PROTOCOL_MAJOR_VERSION) {
		dprintk(KERN_ERR "SMC: Unsupported SMC Protocol PA Major "
			"Version (0x%02x, expected 0x%02x)!\n",
			GET_PROTOCOL_MAJOR_VERSION(protocol_version),
			TF_S_PROTOCOL_MAJOR_VERSION);
		rpc_error = RPC_ERROR_CONNECTION_PROTOCOL;
	} else {
		rpc_error = RPC_SUCCESS;
	}

	spin_unlock(&(comm->lock));

	register_smc_public_crypto_digest();
	register_smc_public_crypto_aes();

	return rpc_error;
}
int ion_cp_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer,
			void *vaddr, unsigned int offset, unsigned int length,
			unsigned int cmd)
{
	void (*outer_cache_op)(phys_addr_t, phys_addr_t);
	struct ion_cp_heap *cp_heap =
	     container_of(heap, struct  ion_cp_heap, heap);

	switch (cmd) {
	case ION_IOC_CLEAN_CACHES:
		dmac_clean_range(vaddr, vaddr + length);
		outer_cache_op = outer_clean_range;
		break;
	case ION_IOC_INV_CACHES:
		dmac_inv_range(vaddr, vaddr + length);
		outer_cache_op = outer_inv_range;
		break;
	case ION_IOC_CLEAN_INV_CACHES:
		dmac_flush_range(vaddr, vaddr + length);
		outer_cache_op = outer_flush_range;
		break;
	default:
		return -EINVAL;
	}

	if (cp_heap->has_outer_cache) {
		unsigned long pstart = buffer->priv_phys + offset;
		outer_cache_op(pstart, pstart + length);
	}
	return 0;
}
Ejemplo n.º 15
0
int ion_cp_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer,
			void *vaddr, unsigned int offset, unsigned int length,
			unsigned int cmd)
{
	void (*outer_cache_op)(phys_addr_t, phys_addr_t) = NULL;
	struct ion_cp_heap *cp_heap =
		container_of(heap, struct  ion_cp_heap, heap);
	unsigned int size_to_vmap, total_size;
	int i, j;
	void *ptr = NULL;
	ion_phys_addr_t buff_phys = buffer->priv_phys;

	if (!vaddr) {
		/*
		 * Split the vmalloc space into smaller regions in
		 * order to clean and/or invalidate the cache.
		 */
		size_to_vmap = (VMALLOC_END - VMALLOC_START)/8;
		total_size = buffer->size;
		for (i = 0; i < total_size; i += size_to_vmap) {
			size_to_vmap = min(size_to_vmap, total_size - i);
			for (j = 0; j < 10 && size_to_vmap; ++j) {
				ptr = ioremap(buff_phys, size_to_vmap);
				if (ptr) {
					switch (cmd) {
					case ION_IOC_CLEAN_CACHES:
						dmac_clean_range(ptr,
							ptr + size_to_vmap);
						outer_cache_op =
							outer_clean_range;
						break;
					case ION_IOC_INV_CACHES:
						dmac_inv_range(ptr,
							ptr + size_to_vmap);
						outer_cache_op =
							outer_inv_range;
						break;
					case ION_IOC_CLEAN_INV_CACHES:
						dmac_flush_range(ptr,
							ptr + size_to_vmap);
						outer_cache_op =
							outer_flush_range;
						break;
					default:
						return -EINVAL;
					}
					buff_phys += size_to_vmap;
					break;
				} else {
					size_to_vmap >>= 1;
				}
			}
			if (!ptr) {
				pr_err("Couldn't io-remap the memory\n");
				return -EINVAL;
			}
			iounmap(ptr);
		}
	} else {
Ejemplo n.º 16
0
/* Function to write back invalidate the Cache module */
Void Cache_wbInv(Ptr blockPtr, UInt32 byteCnt, Bits16 type, Bool wait) {
    GT_4trace (curTrace, GT_ENTER, "Cache_wbInv", blockPtr, byteCnt, type, wait);

#ifdef USE_CACHE_VOID_ARG
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34)
                dmac_map_area(blockPtr, (size_t)byteCnt, DMA_BIDIRECTIONAL);
                outer_flush_range(__pa((UInt32)blockPtr),
                                  __pa((UInt32)(blockPtr+byteCnt)) );
#else
                dmac_flush_range(blockPtr, (blockPtr+byteCnt) );
#endif
#else
                dmac_flush_range( (UInt32)blockPtr, (UInt32)(blockPtr + byteCnt) );
#endif

    GT_0trace (curTrace, GT_LEAVE, "Cache_wbInv");
}
int memory_engine_cache(memory_engine_t *engine, uint cmd,
				shm_driver_operation_t op)
{
	int res = 0;
	memory_node_t  *node;
	char tag_clean[] = "clean";
	char tag_invalidate[] = "invalidate";
	char tag_cleanAndinvalidate[] = "clean and invalidate";
	char *ptr_tag;
	if (engine == NULL) {
		return -EINVAL;
	}
	down(&(engine->m_mutex));

	node = memory_engine_lookup_shm_node_for_cache(&(engine->m_shm_root),
						op.m_param3, op.m_param2);
	if ((node == NULL) || (node->m_next_free != NULL)) {
		res = 0;
		if (cmd == SHM_DEVICE_CMD_INVALIDATE) {
			ptr_tag = tag_invalidate;
		} else if (cmd == SHM_DEVICE_CMD_CLEAN) {
			ptr_tag = tag_clean;
		} else {
			ptr_tag = tag_cleanAndinvalidate;
		}

		up(&(engine->m_mutex));
		return res;
	}
	up(&(engine->m_mutex));

	switch (cmd) {
	case SHM_DEVICE_CMD_INVALIDATE:
		dmac_map_area((const void *)op.m_param1,
			      op.m_param2, DMA_FROM_DEVICE);
		outer_inv_range(op.m_param3,
				op.m_param3 + op.m_param2);
		break;
	case SHM_DEVICE_CMD_CLEAN:
		dmac_map_area((const void *)op.m_param1,
			      op.m_param2, DMA_TO_DEVICE);
		outer_clean_range(op.m_param3,
				  op.m_param3 + op.m_param2);
		break;
	case SHM_DEVICE_CMD_CLEANANDINVALIDATE:
		dmac_flush_range((const void *)op.m_param1,
				 (const void *)(op.m_param1 +
						op.m_param2));
		outer_flush_range(op.m_param3,
				  op.m_param3 + op.m_param2);
		break;
	default:
		res = -ENOTTY;
	}

	return res;
}
Ejemplo n.º 18
0
int mv_dma_init(void)
{
#if defined(CONFIG_MV78200) || defined(CONFIG_MV632X)
	if (MV_FALSE == mvSocUnitIsMappedToThisCpu(IDMA))
	{
		printk(KERN_INFO"IDMA is not mapped to this CPU\n");
		return -ENODEV;
	}
#endif
	printk(KERN_INFO "Use IDMA channels %d and %d for enhancing the following function:\n",
                CPY_CHAN1, CPY_CHAN2);
#ifdef CONFIG_MV_IDMA_COPYUSER
        printk(KERN_INFO "  o Copy From/To user space operations.\n");
#endif
#ifdef CONFIG_MV_IDMA_MEMCOPY
	printk(KERN_INFO "  o memcpy() and memmove() operations.\n");
#endif
#ifdef CONFIG_MV_IDMA_MEMZERO
	printk(KERN_INFO "  o memzero() operations.\n");
#endif

#ifdef CONFIG_MV_IDMA_MEMZERO
	DPRINTK(KERN_ERR "ZERO buffer address 0x%08x\n", (u32)dmaMemInitBuff);
	
	asm_memzero(dmaMemInitBuff, sizeof(dmaMemInitBuff));
	dmac_flush_range(dmaMemInitBuff, dmaMemInitBuff + sizeof(dmaMemInitBuff));
#endif

        MV_REG_WRITE(IDMA_BYTE_COUNT_REG(CPY_CHAN1), 0);
        MV_REG_WRITE(IDMA_CURR_DESC_PTR_REG(CPY_CHAN1), 0);
        MV_REG_WRITE(IDMA_CTRL_HIGH_REG(CPY_CHAN1), ICCHR_ENDIAN_LITTLE 
#ifdef MV_CPU_LE
      		| ICCHR_DESC_BYTE_SWAP_EN
#endif
		 );
        MV_REG_WRITE(IDMA_CTRL_LOW_REG(CPY_CHAN1), CPY_IDMA_CTRL_LOW_VALUE);

        MV_REG_WRITE(IDMA_BYTE_COUNT_REG(CPY_CHAN2), 0);
        MV_REG_WRITE(IDMA_CURR_DESC_PTR_REG(CPY_CHAN2), 0);
        MV_REG_WRITE(IDMA_CTRL_HIGH_REG(CPY_CHAN2), ICCHR_ENDIAN_LITTLE 
#ifdef MV_CPU_LE
      		| ICCHR_DESC_BYTE_SWAP_EN
#endif
		 );
        MV_REG_WRITE(IDMA_CTRL_LOW_REG(CPY_CHAN2), CPY_IDMA_CTRL_LOW_VALUE);

        current_dma_channel = CPY_CHAN1;
	dma_proc_entry = create_proc_entry("dma_copy", S_IFREG | S_IRUGO, 0);
	dma_proc_entry->read_proc = dma_read_proc;
//	dma_proc_entry->write_proc = dma_write_proc;
	dma_proc_entry->nlink = 1;

	idma_init = 1;

	return 0;
}
void dmmupwl_flush_cache(void *virAddr, IM_UINT32 phyAddr, IM_INT32 size)
{
	IM_INFOMSG((IM_STR("%s(virAddr=0x%x, phyAddr=0x%x, size=%d)"), IM_STR(_IM_FUNC_), (IM_INT32)virAddr, phyAddr, size));

	// Flush L1 using virtual address.
	dmac_flush_range(virAddr, (void *)(virAddr + size - 1));

	// Flush L2 using physical address.
	outer_flush_range(phyAddr, phyAddr + size - 1);	
}
static void __dma_clear_buffer(struct page *page, size_t size)
{
	void *ptr;
	/*
	 * Ensure that the allocated pages are zeroed, and that any data
	 * lurking in the kernel direct-mapped region is invalidated.
	 */
	ptr = page_address(page);
	memset(ptr, 0, size);
	dmac_flush_range(ptr, ptr + size);
	outer_flush_range(__pa(ptr), __pa(ptr) + size);
}
Ejemplo n.º 21
0
static void cacheperf(void *vbuf, enum cachemaintenance id)
{
	struct timespec beforets;
	struct timespec afterts;
	phys_addr_t pbuf = virt_to_phys(vbuf);
	u32 pbufend, xfer_size, i;
	long timeval;

	xfer_size = START_SIZE;
	while (xfer_size <= END_SIZE) {
		pbufend = pbuf + xfer_size;
		timeval = 0;

		for (i = 0; i < try_cnt; i++) {
			memset(vbuf, i, xfer_size);
			getnstimeofday(&beforets);

			switch (id) {
			case CM_CLEAN:
				if (l1)
					dmac_map_area(vbuf, xfer_size,
							DMA_TO_DEVICE);
				if (l2)
					outer_clean_range(pbuf, pbufend);
				break;
			case CM_INV:
				if (l2)
					outer_inv_range(pbuf, pbufend);
				if (l1)
					dmac_unmap_area(vbuf, xfer_size,
							DMA_FROM_DEVICE);
				break;
			case CM_FLUSH:
				if (l1)
					dmac_flush_range(vbuf,
					(void *)((u32) vbuf + xfer_size));
				if (l2)
					outer_flush_range(pbuf, pbufend);
				break;
			case CM_FLUSHALL:
				if (l1)
					flush_cache_all();
				if (l2)
					outer_flush_all();
				break;
			}
			getnstimeofday(&afterts);
			timeval += update_timeval(beforets, afterts);
		}
		printk(KERN_INFO "%lu\n", timeval/try_cnt);
		xfer_size *= 2;
	}
}
Ejemplo n.º 22
0
/* Executed by primary CPU, brings other CPUs out of reset. Called at boot
   as well as when a CPU is coming out of shutdown induced by echo 0 >
   /sys/devices/.../cpuX.
*/
int boot_secondary(unsigned int cpu, struct task_struct *idle)
{
	static int cold_boot_done;
	int cnt = 0;
	int ret;

	pr_debug("Starting secondary CPU %d\n", cpu);

	/* Set preset_lpj to avoid subsequent lpj recalculations */
	preset_lpj = loops_per_jiffy;

	if (cold_boot_done == false) {
		ret = scm_set_boot_addr((void *)
					virt_to_phys(msm_secondary_startup),
					SCM_FLAG_COLDBOOT_CPU1);
		if (ret == 0) {
			void *sc1_base_ptr;
			sc1_base_ptr = ioremap_nocache(0x00902000, SZ_4K*2);
			if (sc1_base_ptr) {
				writel(0x0, sc1_base_ptr+0x15A0);
				dmb();
				writel(0x0, sc1_base_ptr+0xD80);
				writel(0x3, sc1_base_ptr+0xE64);
				dsb();
				iounmap(sc1_base_ptr);
			}
		} else
			printk(KERN_DEBUG "Failed to set secondary core boot "
					  "address\n");
		cold_boot_done = true;
	}

	pen_release = cpu;
	dmac_flush_range((void *)&pen_release,
			 (void *)(&pen_release + sizeof(pen_release)));
	__asm__("sev");
	dsb();

	/* Use smp_cross_call() to send a soft interrupt to wake up
	 * the other core.
	 */
	smp_cross_call(cpumask_of(cpu));

	while (pen_release != 0xFFFFFFFF) {
		dmac_inv_range((void *)&pen_release,
			       (void *)(&pen_release+sizeof(pen_release)));
			usleep(500);
			if (cnt++ >= 10)
			break;
	}

	return 0;
}
Ejemplo n.º 23
0
static void cache_maint_inner(void *vaddr, size_t size, enum cacheop op)
{
	switch (op) {
		case EM_CLEAN:
			dmac_map_area(vaddr, size, DMA_TO_DEVICE);
			break;
		case EM_INV:
			dmac_unmap_area(vaddr, size, DMA_TO_DEVICE);
			break;
		case EM_FLUSH:
			dmac_flush_range(vaddr, vaddr + size);
	}
}
Ejemplo n.º 24
0
static long kgsl_cache_range_op(unsigned long addr, int size,
					unsigned int flags)
{
#ifdef CONFIG_OUTER_CACHE
	unsigned long end;
#endif
	BUG_ON(addr & (KGSL_PAGESIZE - 1));
	BUG_ON(size & (KGSL_PAGESIZE - 1));

	if (flags & KGSL_CACHE_FLUSH)
		dmac_flush_range((const void *)addr,
				(const void *)(addr + size));
	else
		if (flags & KGSL_CACHE_CLEAN)
			dmac_clean_range((const void *)addr,
					(const void *)(addr + size));
		else
			dmac_inv_range((const void *)addr,
					(const void *)(addr + size));

#ifdef CONFIG_OUTER_CACHE
	for (end = addr; end < (addr + size); end += KGSL_PAGESIZE) {
		pte_t *pte_ptr, pte;
		unsigned long physaddr;
		if (flags & KGSL_CACHE_VMALLOC_ADDR)
			physaddr = vmalloc_to_pfn((void *)end);
		else
			if (flags & KGSL_CACHE_USER_ADDR) {
				pte_ptr = kgsl_get_pte_from_vaddr(end);
				if (!pte_ptr)
					return -EINVAL;
				pte = *pte_ptr;
				physaddr = pte_pfn(pte);
				pte_unmap(pte_ptr);
			} else
				return -EINVAL;

		physaddr <<= PAGE_SHIFT;
		if (flags & KGSL_CACHE_FLUSH)
			outer_flush_range(physaddr, physaddr + KGSL_PAGESIZE);
		else
			if (flags & KGSL_CACHE_CLEAN)
				outer_clean_range(physaddr,
					physaddr + KGSL_PAGESIZE);
			else
				outer_inv_range(physaddr,
					physaddr + KGSL_PAGESIZE);
	}
#endif
	return 0;
}
Ejemplo n.º 25
0
static void __dma_clear_buffer(struct page *page, size_t size)
{
	if (!PageHighMem(page)) {
		void *ptr = page_address(page);
		if (ptr) {
			memset(ptr, 0, size);
			dmac_flush_range(ptr, ptr + size);
			outer_flush_range(__pa(ptr), __pa(ptr) + size);
		}
	} else {
		phys_addr_t base = __pfn_to_phys(page_to_pfn(page));
		phys_addr_t end = base + size;
		while (size > 0) {
			void *ptr = kmap_atomic(page);
			memset(ptr, 0, PAGE_SIZE);
			dmac_flush_range(ptr, ptr + PAGE_SIZE);
			kunmap_atomic(ptr);
			page++;
			size -= PAGE_SIZE;
		}
		outer_flush_range(base, end);
	}
}
Ejemplo n.º 26
0
static long kgsl_cache_range_op(unsigned long addr, int size,
					unsigned int flags)
{
#ifdef CONFIG_OUTER_CACHE
	unsigned long end;
#endif
	BUG_ON(addr & (KGSL_PAGESIZE - 1));
	BUG_ON(size & (KGSL_PAGESIZE - 1));

	if (flags & KGSL_MEMFLAGS_CACHE_FLUSH)
		dmac_flush_range((const void *)addr,
				(const void *)(addr + size));
	else
		if (flags & KGSL_MEMFLAGS_CACHE_CLEAN)
			dmac_clean_range((const void *)addr,
					(const void *)(addr + size));
		else if (flags & KGSL_MEMFLAGS_CACHE_INV)
			dmac_inv_range((const void *)addr,
					(const void *)(addr + size));

#ifdef CONFIG_OUTER_CACHE
	for (end = addr; end < (addr + size); end += KGSL_PAGESIZE) {
		unsigned long physaddr;
		if (flags & KGSL_MEMFLAGS_VMALLOC_MEM)
			physaddr = page_to_phys(vmalloc_to_page((void *) end));
		else
			if (flags & KGSL_MEMFLAGS_HOSTADDR) {
				physaddr = kgsl_virtaddr_to_physaddr(end);
				if (!physaddr) {
					KGSL_MEM_ERR
					("Unable to find physaddr for "
					"address: %x\n", (unsigned int)end);
					return -EINVAL;
				}
			} else
				return -EINVAL;

		if (flags & KGSL_MEMFLAGS_CACHE_FLUSH)
			outer_flush_range(physaddr, physaddr + KGSL_PAGESIZE);
		else
			if (flags & KGSL_MEMFLAGS_CACHE_CLEAN)
				outer_clean_range(physaddr,
					physaddr + KGSL_PAGESIZE);
			else if (flags & KGSL_MEMFLAGS_CACHE_INV)
				outer_inv_range(physaddr,
					physaddr + KGSL_PAGESIZE);
	}
#endif
	return 0;
}
static int tz_init(struct devfreq_msm_adreno_tz_data *priv,
			unsigned int *tz_pwrlevels, u32 size_pwrlevels,
			unsigned int *version, u32 size_version)
{
	int ret;
	/* Make sure all CMD IDs are avaialble */
	if (scm_is_call_available(SCM_SVC_DCVS, TZ_INIT_ID)) {
		ret = scm_call(SCM_SVC_DCVS, TZ_INIT_ID, tz_pwrlevels,
				size_pwrlevels, NULL, 0);
		*version = 0;

	} else if (scm_is_call_available(SCM_SVC_DCVS, TZ_INIT_ID_64) &&
			scm_is_call_available(SCM_SVC_DCVS, TZ_UPDATE_ID_64) &&
			scm_is_call_available(SCM_SVC_DCVS, TZ_RESET_ID_64)) {
		struct scm_desc desc = {0};
		unsigned int *tz_buf;

		if (!is_scm_armv8()) {
			ret = scm_call(SCM_SVC_DCVS, TZ_INIT_ID_64,
				       tz_pwrlevels, size_pwrlevels,
				       version, size_version);
			if (!ret)
				priv->is_64 = true;
			return ret;
		}

		tz_buf = kzalloc(PAGE_ALIGN(size_pwrlevels), GFP_KERNEL);
		if (!tz_buf)
			return -ENOMEM;
		memcpy(tz_buf, tz_pwrlevels, size_pwrlevels);
		/* Ensure memcpy completes execution */
		mb();
		dmac_flush_range(tz_buf, tz_buf + PAGE_ALIGN(size_pwrlevels));

		desc.args[0] = virt_to_phys(tz_buf);
		desc.args[1] = size_pwrlevels;
		desc.arginfo = SCM_ARGS(2, SCM_RW, SCM_VAL);

		ret = scm_call2(SCM_SIP_FNID(SCM_SVC_DCVS, TZ_V2_INIT_ID_64),
				&desc);
		*version = desc.ret[0];
		if (!ret)
			priv->is_64 = true;
		kzfree(tz_buf);
	} else
		ret = -EINVAL;

	return ret;
}
Ejemplo n.º 28
0
/*
 * Function responsible for formatting parameters to pass from NS world to
 * S world
 */
u32 omap4_secure_dispatcher(u32 app_id, u32 flags, u32 nargs,
	u32 arg1, u32 arg2, u32 arg3, u32 arg4)
{
	u32 ret;
	unsigned long iflags;
	u32 pub2sec_args[5] = {0, 0, 0, 0, 0};

	/*dpr_info("%s: app_id=0x%08x, flags=0x%08x, nargs=%u\n",
		__func__, app_id, flags, nargs);*/

	/*if (nargs != 0)
		dpr_info("%s: args=%08x, %08x, %08x, %08x\n",
			__func__, arg1, arg2, arg3, arg4);*/

	pub2sec_args[0] = nargs;
	pub2sec_args[1] = arg1;
	pub2sec_args[2] = arg2;
	pub2sec_args[3] = arg3;
	pub2sec_args[4] = arg4;

	/* Make sure parameters are visible to the secure world */
	dmac_flush_range((void *)pub2sec_args,
		(void *)(((u32)(pub2sec_args)) + 5*sizeof(u32)));
	outer_clean_range(__pa(pub2sec_args),
		__pa(pub2sec_args) + 5*sizeof(u32));
	wmb();

	/*
	 * Put L4 Secure clock domain to SW_WKUP so that modules are accessible
	 */
	clkdm_wakeup(smc_l4_sec_clkdm);

	local_irq_save(iflags);

	/* proc_id is always 0 */
	ret = schedule_secure_world(app_id, 0, flags, __pa(pub2sec_args));
	local_irq_restore(iflags);

	/* Restore the HW_SUP on L4 Sec clock domain so hardware can idle */
	if ((app_id != API_HAL_HWATURNOFF_INDEX) &&
	    (!timer_pending(&tf_crypto_clock_timer))) {
		(void) tf_crypto_turn_off_clocks();
		clkdm_allow_idle(smc_l4_sec_clkdm);
	}

	/*dpr_info("%s()\n", __func__);*/

	return ret;
}
Ejemplo n.º 29
0
/* Executed by primary CPU, brings other CPUs out of reset. Called at boot
   as well as when a CPU is coming out of shutdown induced by echo 0 >
   /sys/devices/.../cpuX.
*/
int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
{
	int cnt = 0;
	int ret;
	int flag = 0;

	pr_debug("Starting secondary CPU %d\n", cpu);

	/* Set preset_lpj to avoid subsequent lpj recalculations */
	preset_lpj = loops_per_jiffy;

	if (cpu > 0 && cpu < ARRAY_SIZE(cold_boot_flags))
		flag = cold_boot_flags[cpu];
	else
		__WARN();

	if (per_cpu(cold_boot_done, cpu) == false) {
		ret = scm_set_boot_addr((void *)
					virt_to_phys(msm_secondary_startup),
					flag);
		if (ret == 0)
			release_secondary(cpu);
		else
			printk(KERN_DEBUG "Failed to set secondary core boot "
					  "address\n");
		per_cpu(cold_boot_done, cpu) = true;
	}

	pen_release = cpu;
	dmac_flush_range((void *)&pen_release,
			 (void *)(&pen_release + sizeof(pen_release)));
	__asm__("sev");
	mb();

	/* Use smp_cross_call() to send a soft interrupt to wake up
	 * the other core.
	 */
	gic_raise_softirq(cpumask_of(cpu), 1);

	while (pen_release != 0xFFFFFFFF) {
		dmac_inv_range((void *)&pen_release,
			       (void *)(&pen_release+sizeof(pen_release)));
		usleep(500);
		if (cnt++ >= 10)
			break;
	}

	return 0;
}
Ejemplo n.º 30
0
/*
 * Function responsible for formatting parameters to pass from NS world to
 * S world
 */
u32 omap4_secure_dispatcher(u32 app_id, u32 flags, u32 nargs,
	u32 arg1, u32 arg2, u32 arg3, u32 arg4)
{
	u32 ret;
	unsigned long iflags;
	u32 pub2sec_args[5] = {0, 0, 0, 0, 0};

	/*dprintk(KERN_INFO "omap4_secure_dispatcher: "
		"app_id=0x%08x, flags=0x%08x, nargs=%u\n",
		app_id, flags, nargs);*/

	/*if (nargs != 0)
		dprintk(KERN_INFO
		"omap4_secure_dispatcher: args=%08x, %08x, %08x, %08x\n",
		arg1, arg2, arg3, arg4);*/

	pub2sec_args[0] = nargs;
	pub2sec_args[1] = arg1;
	pub2sec_args[2] = arg2;
	pub2sec_args[3] = arg3;
	pub2sec_args[4] = arg4;

	/* Make sure parameters are visible to the secure world */
	dmac_flush_range((void *)pub2sec_args,
		(void *)(((u32)(pub2sec_args)) + 5*sizeof(u32)));
	outer_clean_range(__pa(pub2sec_args),
		__pa(pub2sec_args) + 5*sizeof(u32));
	wmb();

	/*
	 * Put L4 Secure clock domain to SW_WKUP so that modules are accessible
	 */
	tf_l4sec_clkdm_wakeup(false);

	local_irq_save(iflags);
#ifdef DEBUG
	BUG_ON((read_mpidr() & 0x00000003) != 0);
#endif
	/* proc_id is always 0 */
	ret = schedule_secure_world(app_id, 0, flags, __pa(pub2sec_args));
	local_irq_restore(iflags);

	/* Restore the HW_SUP on L4 Sec clock domain so hardware can idle */
	tf_l4sec_clkdm_allow_idle(false);

	/*dprintk(KERN_INFO "omap4_secure_dispatcher()\n");*/

	return ret;
}