static int32_t msm_mem_allocate(struct videobuf2_contig_pmem *mem)
{
	int32_t phyaddr;
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
	int rc, len;
	mem->client = msm_ion_client_create(-1, "camera");
	if (IS_ERR((void *)mem->client)) {
		pr_err("%s Could not create client\n", __func__);
		goto client_failed;
	}
	mem->ion_handle = ion_alloc(mem->client, mem->size, SZ_4K,
		(0x1 << ION_CP_MM_HEAP_ID | 0x1 << ION_IOMMU_HEAP_ID));
	if (IS_ERR((void *)mem->ion_handle)) {
		pr_err("%s Could not allocate\n", __func__);
		goto alloc_failed;
	}
	rc = ion_phys(mem->client, mem->ion_handle, (ion_phys_addr_t *)&phyaddr,
		 (size_t *)&len);
	if (rc < 0) {
		pr_err("%s Could not get physical address\n", __func__);
		goto phys_failed;
	}
#else
	phyaddr = allocate_contiguous_ebi_nomap(mem->size, SZ_4K);
#endif
	return phyaddr;
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
phys_failed:
	ion_free(mem->client, mem->ion_handle);
alloc_failed:
	ion_client_destroy(mem->client);
client_failed:
	return 0;
#endif
}
Esempio n. 2
0
static int get_device_address(struct smem_client *smem_client,
		struct ion_handle *hndl, unsigned long align,
		dma_addr_t *iova, unsigned long *buffer_size,
		u32 flags, enum hal_buffer buffer_type)
{
	int rc = 0;
	int domain, partition;
	struct ion_client *clnt = NULL;

	if (!iova || !buffer_size || !hndl || !smem_client) {
		dprintk(VIDC_ERR, "Invalid params: %pK, %pK, %pK, %pK\n",
				smem_client, hndl, iova, buffer_size);
		return -EINVAL;
	}

	clnt = smem_client->clnt;
	if (!clnt) {
		dprintk(VIDC_ERR, "Invalid client\n");
		return -EINVAL;
	}

	rc = msm_smem_get_domain_partition(smem_client, flags, buffer_type,
			&domain, &partition);
	if (rc) {
		dprintk(VIDC_ERR, "Failed to get domain and partition: %d\n",
				rc);
		goto mem_domain_get_failed;
	}

	if (flags & SMEM_SECURE) {
		rc = msm_ion_secure_buffer(clnt, hndl,
			get_tz_usage(smem_client, buffer_type), 0);
		if (rc) {
			dprintk(VIDC_ERR, "Failed to secure memory\n");
			goto mem_domain_get_failed;
		}
	}
	if (is_iommu_present(smem_client->res)) {
		dprintk(VIDC_DBG,
				"Calling ion_map_iommu - domain: %d, partition: %d\n",
				domain, partition);
		rc = ion_map_iommu(clnt, hndl, domain, partition, align,
				0, iova, buffer_size, 0, 0);
	} else {
		dprintk(VIDC_DBG, "Using physical memory address\n");
		rc = ion_phys(clnt, hndl, iova, (size_t *)buffer_size);
	}
	if (rc) {
		dprintk(VIDC_ERR, "ion memory map failed - %d\n", rc);
		goto mem_map_failed;
	}

	return 0;
mem_map_failed:
	if (flags & SMEM_SECURE)
		msm_ion_unsecure_buffer(clnt, hndl);
mem_domain_get_failed:
	return rc;
}
uint32_t hal_tui_alloc(tuiAllocBuffer_t allocbuffer[MAX_DCI_BUFFER_NUMBER],
		size_t allocsize, uint32_t count)
{
	int ret = TUI_DCI_ERR_INTERNAL_ERROR;
	dma_addr_t buf_addr;
	ion_phys_addr_t phys_addr;
	unsigned long offset = 0;
	unsigned int size;

	size=allocsize*(count+1);

	client = ion_client_create(ion_exynos, "TUI module");
	handle = ion_alloc(client, size, 0, EXYNOS_ION_HEAP_EXYNOS_CONTIG_MASK,
							ION_EXYNOS_VIDEO_MASK);

	dbuf = ion_share_dma_buf(client, handle);
	buf_addr = decon_map_sec_dma_buf(dbuf, 0);

	ion_phys(client, handle, (unsigned long *)&phys_addr, &dbuf->size);

	/* TUI frame buffer must be aligned 16M */
	if(phys_addr % 0x1000000){
		offset = 0x1000000 - (phys_addr % 0x1000000);
	}

	phys_addr = phys_addr+offset;
	va = buf_addr + offset;
	printk("buf_addr : %x\n",va);
	printk("phys_addr : %lx\n",phys_addr);
#if 0 // this is testing. MUST BE REMOVE
	void *kernel_addr;
	//kernel_addr = (void*)ion_map_kernel(client, handle);
	kernel_addr = phys_to_virt(phys_addr+0x2000000);
	*((u32*)kernel_addr) = va;
	printk("DATA ON phys_addr : addr[%lx] val[%x]\n"
			,phys_addr+0x2000000
			,*((u32*)kernel_addr));
#endif

        g_tuiMemPool.pa = phys_addr;
        g_tuiMemPool.size = allocsize*count;

	if ((size_t)(allocsize*count) <= g_tuiMemPool.size) {
		allocbuffer[0].pa = (uint64_t) g_tuiMemPool.pa;
		allocbuffer[1].pa = (uint64_t) (g_tuiMemPool.pa + g_tuiMemPool.size/2);
	}else{
                /* requested buffer is bigger than the memory pool, return an
                   error */
                pr_debug("%s(%d): %s\n", __func__, __LINE__, "Memory pool too small");
                ret = TUI_DCI_ERR_INTERNAL_ERROR;
		return ret;
	}
        ret = TUI_DCI_OK;

        return ret;
}
Esempio n. 4
0
int sys_get_phyaddr(unsigned long *phys, ion_user_handle_t p_ion_handle)
{
    int ret;
    ret = ion_phys(ion_fd,p_ion_handle, phys);
    if(ret){
        printf("ion get phys addr error \n");
        return -1;
    }
    return 0;
}
int get_phys_addr(struct vcap_dev *dev, struct vb2_queue *q,
				  struct v4l2_buffer *b)
{
	struct vb2_buffer *vb;
	struct vcap_buffer *buf;
	unsigned long len, offset;
	int rc;

	if (q->fileio) {
		dprintk(1, "%s: file io in progress\n", __func__);
		return -EBUSY;
	}

	if (b->type != q->type) {
		dprintk(1, "%s: invalid buffer type\n", __func__);
		return -EINVAL;
	}

	if (b->index >= q->num_buffers) {
		dprintk(1, "%s: buffer index out of range\n", __func__);
		return -EINVAL;
	}

	vb = q->bufs[b->index];
	if (NULL == vb) {
		dprintk(1, "%s: buffer is NULL\n", __func__);
		return -EINVAL;
	}

	if (vb->state != VB2_BUF_STATE_DEQUEUED) {
		dprintk(1, "%s: buffer already in use\n", __func__);
		return -EINVAL;
	}

	buf = container_of(vb, struct vcap_buffer, vb);

	buf->ion_handle = ion_import_fd(dev->ion_client, b->m.userptr);
	if (IS_ERR((void *)buf->ion_handle)) {
		pr_err("%s: Could not alloc memory\n", __func__);
		buf->ion_handle = NULL;
		return -ENOMEM;
	}
	rc = ion_phys(dev->ion_client, buf->ion_handle,
			&buf->paddr, (size_t *)&len);
	if (rc < 0) {
		pr_err("%s: Could not get phys addr\n", __func__);
		ion_free(dev->ion_client, buf->ion_handle);
		buf->ion_handle = NULL;
		return -EFAULT;
	}

	offset = b->reserved;
	buf->paddr += offset;
	return 0;
}
Esempio n. 6
0
int omap_ion_get_pages(struct ion_client *client, struct ion_handle *handle,
		int *n, unsigned long *ion_addr,
		struct omap_ion_tiler_alloc_data *sAllocData)
{
	size_t len;

	/* validate that the handle exists in this client */
	ion_phys(client, handle, ion_addr, &len);
	*n = len / PAGE_SIZE; /* Number of pages */

	return 0;
}
Esempio n. 7
0
static uint32_t tiler_alloc( int size ) 
{
	int res;
	
	struct omap_ion_tiler_alloc_data alloc_data = {
		.w = size,
		.h = 1,
		.fmt = TILER_PIXEL_FMT_PAGE,
		.flags = 0,
	};

	struct alloc_t *alloc = get_alloc();
	if( !alloc ) {
		return 0;
	}
	
	res = omap_ion_tiler_alloc(imt_ion_client, &alloc_data);

	if (res < 0) {
		ERR("imt: could not alloc %d\n", res);
		return 0;
	} else {
		ion_phys_addr_t phys;
		size_t len;
		ion_phys(imt_ion_client, alloc_data.handle, &phys, &len);
		DBG2("imt: ion: siz %8d  %08X  phys %08lX  len %8d", size, (unsigned int)alloc_data.handle, phys, (int)len );
		alloc->phys   = phys;
		alloc->handle = alloc_data.handle;
		return phys;
	}
}

static void tiler_free( uint32_t data )
{
	struct ion_handle *handle = put_alloc( (ion_phys_addr_t)data );
	if( handle ) {
		ion_free(imt_ion_client, handle);
	}
}

static void free_all( void ) 
{
	int i;
	for( i = 0; i < MAX_ALLOC; i++ ) {
		if( alloc_list[i].handle ) {
			DBG("imt: cleanup[%d] %08X  phys %08lX", i, (unsigned int)alloc_list[i].handle, alloc_list[i].phys);
			ion_free(imt_ion_client, alloc_list[i].handle);
			alloc_list[i].handle = NULL;
			alloc_list[i].phys   = 0;
		}
	}
}
int mdss_mdp_get_img(struct ion_client *iclient, struct msmfb_data *img,
		     struct mdss_mdp_img_data *data)
{
	struct file *file;
	int ret = -EINVAL;
	int fb_num;
	unsigned long *start, *len;

	start = (unsigned long *) &data->addr;
	len = (unsigned long *) &data->len;
	data->flags = img->flags;
	data->p_need = 0;

	if (img->flags & MDP_BLIT_SRC_GEM) {
		data->srcp_file = NULL;
		ret = kgsl_gem_obj_addr(img->memory_id, (int) img->priv,
					start, len);
	} else if (img->flags & MDP_MEMORY_ID_TYPE_FB) {
		file = fget_light(img->memory_id, &data->p_need);
		if (file && FB_MAJOR ==
				MAJOR(file->f_dentry->d_inode->i_rdev)) {
			data->srcp_file = file;
			fb_num = MINOR(file->f_dentry->d_inode->i_rdev);
			ret = mdss_fb_get_phys_info(start, len, fb_num);
		}
	} else if (iclient) {
		data->iclient = iclient;
		data->srcp_ihdl = ion_import_dma_buf(iclient, img->memory_id);
		if (IS_ERR_OR_NULL(data->srcp_ihdl))
			return PTR_ERR(data->srcp_ihdl);
		ret = ion_phys(iclient, data->srcp_ihdl,
			       start, (size_t *) len);
	} else {
		unsigned long vstart;
		ret = get_pmem_file(img->memory_id, start, &vstart, len,
				    &data->srcp_file);
	}

	if (!ret && (img->offset < data->len)) {
		data->addr += img->offset;
		data->len -= img->offset;
	} else {
		mdss_mdp_put_img(data);
		ret = -EINVAL;
	}

	return ret;
}
Esempio n. 9
0
int omap_tiler_pages(struct ion_client *client, struct ion_handle *handle,
		     int *n, u32 **tiler_addrs)
{
	ion_phys_addr_t addr;
	size_t len;
	int ret;
	struct omap_tiler_info *info = ion_handle_buffer(handle)->priv_virt;

	/* validate that the handle exists in this client */
	ret = ion_phys(client, handle, &addr, &len);
	if (ret)
		return ret;

	*n = info->n_tiler_pages;
	*tiler_addrs = info->tiler_addrs;
	return 0;
}
Esempio n. 10
0
long omap_ion_ioctl(struct ion_client *client, unsigned int cmd,
		    unsigned long arg)
{
	switch (cmd) {
	case OMAP_ION_TILER_ALLOC:
	{
		struct omap_ion_tiler_alloc_data data;
		int ret;

		if (!tiler_heap) {
			pr_err("%s: Tiler heap requested but no tiler "
			       "heap exists on this platform\n", __func__);
			return -EINVAL;
		}
		if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
			return -EFAULT;
		ret = omap_ion_tiler_alloc(client, &data);
		if (ret)
			return ret;
		if (copy_to_user((void __user *)arg, &data,
				 sizeof(data)))
			return -EFAULT;
		break;
	}
	case OMAP_ION_PHYS_ADDR:
	{
		struct omap_ion_phys_addr_data data;
		int ret;
		if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
			return -EFAULT;
		ret = ion_phys(client, data.handle,
				&data.phys_addr, &data.size);
		if (ret)
			return ret;
		if (copy_to_user((void __user *)arg, &data,
				 sizeof(data)))
			return -EFAULT;
		break;
	}
	default:
		pr_err("%s: Unknown custom ioctl\n", __func__);
		return -ENOTTY;
	}
	return 0;
}
Esempio n. 11
0
static int alloc_ion_mem(struct smem_client *client, size_t size,
		u32 align, u32 flags, struct msm_smem *mem)
{
	struct ion_handle *hndl;
	size_t len;
	unsigned long ionflags = 0;
	unsigned long heap_mask = 0;
	int rc = 0;
	if (size == 0)
		goto skip_mem_alloc;
	if (flags == SMEM_CACHED)
		ionflags = ION_SET_CACHED(ionflags);
	else
		ionflags = ION_SET_UNCACHED(ionflags);

	heap_mask = ION_HEAP(ION_CP_MM_HEAP_ID);
	hndl = ion_alloc(client->clnt, size, align, heap_mask, ionflags);
	if (IS_ERR_OR_NULL(hndl)) {
		pr_err("Failed to allocate shared memory = %p, %d, %d, 0x%x\n",
				client, size, align, ionflags);
		rc = -ENOMEM;
		goto fail_shared_mem_alloc;
	}
	mem->mem_type = client->mem_type;
	mem->smem_priv = hndl;
	if (ion_phys(client->clnt, hndl, &mem->paddr, &len)) {
		pr_err("Failed to get physical address\n");
		rc = -EIO;
		goto fail_map;
	}
	mem->device_addr = mem->paddr;
	mem->size = size;
	mem->kvaddr = ion_map_kernel(client->clnt, hndl);
	if (!mem->kvaddr) {
		pr_err("Failed to map shared mem in kernel\n");
		rc = -EIO;
		goto fail_map;
	}
	return rc;
fail_map:
	ion_free(client->clnt, hndl);
fail_shared_mem_alloc:
skip_mem_alloc:
	return rc;
}
Esempio n. 12
0
static long hisi_ion_custom_ioctl(struct ion_client *client,
				unsigned int cmd,
				unsigned long arg)
{
	int ret = 0;

	switch (cmd) {
	case ION_HISI_CUSTOM_PHYS:
	{
		struct ion_phys_data data;
		struct ion_handle *handle;

		if (copy_from_user(&data, (void __user *)arg,
				sizeof(data))) {
			return -EFAULT;
		}

		handle = ion_import_dma_buf(client, data.fd_buffer);
		if (IS_ERR(handle))
		{
			ion_free(client, handle);
			return PTR_ERR(handle);
	        }
		ret = ion_phys(client, handle, &data.phys, &data.size);
		if (ret)
		{
			ion_free(client, handle);
			return ret;
	        }
		if (copy_to_user((void __user *)arg, &data, sizeof(data)))
		{
			ion_free(client, handle);
			return -EFAULT;
		}
		ion_free(client, handle);

		break;
	}
	default:
		return -ENOTTY;
	}

	return ret;
}
/**
 * Allocate memory for channel output of specific TSIF.
 *
 * @tsif: The TSIF id to which memory should be allocated.
 *
 * Return  error status
 */
static int mpq_dmx_channel_mem_alloc(int tsif)
{
	int result;
	size_t len;

	MPQ_DVB_DBG_PRINT("%s(%d)\n", __func__, tsif);

	mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_handle =
		ion_alloc(mpq_dmx_tspp_info.ion_client,
		 (mpq_dmx_tspp_info.tsif[tsif].buffer_count *
		  TSPP_DESCRIPTOR_SIZE),
		 SZ_4K,
		 ION_HEAP(tspp_out_ion_heap),
		 0); /* non-cached */

	if (IS_ERR_OR_NULL(mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_handle)) {
		MPQ_DVB_ERR_PRINT("%s: ion_alloc() failed\n", __func__);
		mpq_dmx_channel_mem_free(tsif);
		return -ENOMEM;
	}

	/* save virtual base address of heap */
	mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_virt_base =
		ion_map_kernel(mpq_dmx_tspp_info.ion_client,
			mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_handle);
	if (IS_ERR_OR_NULL(mpq_dmx_tspp_info.tsif[tsif].
				ch_mem_heap_virt_base)) {
		MPQ_DVB_ERR_PRINT("%s: ion_map_kernel() failed\n", __func__);
		mpq_dmx_channel_mem_free(tsif);
		return -ENOMEM;
	}

	/* save physical base address of heap */
	result = ion_phys(mpq_dmx_tspp_info.ion_client,
		mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_handle,
		&(mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_phys_base), &len);
	if (result < 0) {
		MPQ_DVB_ERR_PRINT("%s: ion_phys() failed\n", __func__);
		mpq_dmx_channel_mem_free(tsif);
		return -ENOMEM;
	}

	return 0;
}
Esempio n. 14
0
static int ion_user_to_kernel(struct smem_client *client,
			int fd, u32 offset, struct msm_smem *mem)
{
	struct ion_handle *hndl;
	unsigned long ionflag;
	size_t len;
	int rc = 0;
	hndl = ion_import_dma_buf(client->clnt, fd);
	if (IS_ERR_OR_NULL(hndl)) {
		pr_err("Failed to get handle: %p, %d, %d, %p\n",
				client, fd, offset, hndl);
		rc = -ENOMEM;
		goto fail_import_fd;
	}
	rc = ion_handle_get_flags(client->clnt, hndl, &ionflag);
	if (rc) {
		pr_err("Failed to get ion flags: %d", rc);
		goto fail_map;
	}
	rc = ion_phys(client->clnt, hndl, &mem->paddr, &len);
	if (rc) {
		pr_err("Failed to get physical address\n");
		goto fail_map;
	}
	mem->kvaddr = ion_map_kernel(client->clnt, hndl);
	if (!mem->kvaddr) {
		pr_err("Failed to map shared mem in kernel\n");
		rc = -EIO;
		goto fail_map;
	}

	mem->kvaddr += offset;
	mem->paddr += offset;
	mem->mem_type = client->mem_type;
	mem->smem_priv = hndl;
	mem->device_addr = mem->paddr;
	mem->size = len;
	return rc;
fail_map:
	ion_free(client->clnt, hndl);
fail_import_fd:
	return rc;
}
Esempio n. 15
0
int mdp_mmap(struct v4l2_subdev *sd, void *arg)
{
	int rc = 0, domain = -1;
	struct mem_region_map *mmap = arg;
	struct mem_region *mregion;
	bool use_iommu = true;
	struct mdp_instance *inst = NULL;

	if (!mmap || !mmap->mregion || !mmap->cookie) {
		WFD_MSG_ERR("Invalid argument\n");
		return -EINVAL;
	}

	inst = mmap->cookie;
	mregion = mmap->mregion;
	if (mregion->size % SZ_4K != 0) {
		WFD_MSG_ERR("Memregion not aligned to %d\n", SZ_4K);
		return -EINVAL;
	}

	if (inst->uses_iommu_split_domain) {
		if (inst->secure)
			use_iommu = false;
		else
			domain = DISPLAY_WRITE_DOMAIN;
	} else {
		domain = DISPLAY_READ_DOMAIN;
	}

	if (use_iommu) {
		rc = ion_map_iommu(mmap->ion_client, mregion->ion_handle,
				domain, GEN_POOL, SZ_4K, 0,
				(unsigned long *)&mregion->paddr,
				(unsigned long *)&mregion->size,
				0, 0);
	} else {
		rc = ion_phys(mmap->ion_client,	mregion->ion_handle,
				(unsigned long *)&mregion->paddr,
				(size_t *)&mregion->size);
	}

	return rc;
}
Esempio n. 16
0
static int smcmod_ion_fd_to_phys(int32_t fd, struct ion_client *ion_clientp,
	struct ion_handle **ion_handlep, uint32_t *phys_addrp, size_t *sizep)
{
	int ret = 0;

	/* sanity check args */
	if ((fd < 0) || IS_ERR_OR_NULL(ion_clientp) ||
		IS_ERR_OR_NULL(ion_handlep) || IS_ERR_OR_NULL(phys_addrp) ||
		IS_ERR_OR_NULL(sizep))
		return -EINVAL;

	/* import the buffer fd */
	*ion_handlep = ion_import_dma_buf(ion_clientp, fd);

	/* sanity check the handle */
	if (IS_ERR_OR_NULL(*ion_handlep))
		return -EINVAL;

	/* get the physical address */
	ret = ion_phys(ion_clientp, *ion_handlep, (ion_phys_addr_t *)phys_addrp,
		sizep);

	return ret;
}
Esempio n. 17
0
static void *res_trk_pmem_alloc
	(struct ddl_buf_addr *addr, size_t sz, u32 alignment)
{
	u32 alloc_size;
	struct ddl_context *ddl_context;
	int rc = -EINVAL;
	ion_phys_addr_t phyaddr = 0;
	size_t len = 0;
	DBG_PMEM("\n%s() IN: Requested alloc size(%u)", __func__, (u32)sz);
	if (!addr) {
		DDL_MSG_ERROR("\n%s() Invalid Parameters", __func__);
		goto bail_out;
	}
	ddl_context = ddl_get_context();
	res_trk_set_mem_type(addr->mem_type);
	alloc_size = (sz + alignment);
	if (res_trk_get_enable_ion()) {
		if (!ddl_context->video_ion_client)
			ddl_context->video_ion_client =
				res_trk_get_ion_client();
		if (!ddl_context->video_ion_client) {
			DDL_MSG_ERROR("%s() :DDL ION Client Invalid handle\n",
						 __func__);
			goto bail_out;
		}
		addr->alloc_handle = ion_alloc(
		ddl_context->video_ion_client, alloc_size, SZ_4K,
			res_trk_get_mem_type());
		if (IS_ERR_OR_NULL(addr->alloc_handle)) {
			DDL_MSG_ERROR("%s() :DDL ION alloc failed\n",
						 __func__);
			goto bail_out;
		}
		rc = ion_phys(ddl_context->video_ion_client,
				addr->alloc_handle, &phyaddr,
				 &len);
		if (rc || !phyaddr) {
			DDL_MSG_ERROR("%s():DDL ION client physical failed\n",
						 __func__);
			goto free_acm_ion_alloc;
		}
		addr->alloced_phys_addr = phyaddr;
	} else {
		addr->alloced_phys_addr = (phys_addr_t)
		allocate_contiguous_memory_nomap(alloc_size,
			res_trk_get_mem_type(), SZ_4K);
		if (!addr->alloced_phys_addr) {
			DDL_MSG_ERROR("%s() : acm alloc failed (%d)\n",
					 __func__, alloc_size);
			goto bail_out;
		}
	}

	addr->buffer_size = sz;
	return (void *)addr->alloced_phys_addr;

free_acm_ion_alloc:
	if (ddl_context->video_ion_client) {
		if (addr->alloc_handle) {
			ion_free(ddl_context->video_ion_client,
				addr->alloc_handle);
			addr->alloc_handle = NULL;
		}
	}
bail_out:
	return NULL;
}
static int register_memory(void)
{
	int			result;
	unsigned long		paddr;
	void                    *kvptr;
	unsigned long		kvaddr;
	unsigned long		mem_len;

	mutex_lock(&acdb_data.acdb_mutex);
	acdb_data.ion_client =
		msm_ion_client_create(UINT_MAX, "audio_acdb_client");
	if (IS_ERR_OR_NULL(acdb_data.ion_client)) {
		pr_err("%s: Could not register ION client!!!\n", __func__);
		result = PTR_ERR(acdb_data.ion_client);
		goto err;
	}

	acdb_data.ion_handle = ion_import_dma_buf(acdb_data.ion_client,
		atomic_read(&acdb_data.map_handle));
	if (IS_ERR_OR_NULL(acdb_data.ion_handle)) {
		pr_err("%s: Could not import map handle!!!\n", __func__);
		result = PTR_ERR(acdb_data.ion_handle);
		goto err_ion_client;
	}

	result = ion_phys(acdb_data.ion_client, acdb_data.ion_handle,
				&paddr, (size_t *)&mem_len);
	if (result != 0) {
		pr_err("%s: Could not get phys addr!!!\n", __func__);
		goto err_ion_handle;
	}

	kvptr = ion_map_kernel(acdb_data.ion_client,
		acdb_data.ion_handle, 0);
	if (IS_ERR_OR_NULL(kvptr)) {
		pr_err("%s: Could not get kernel virt addr!!!\n", __func__);
		result = PTR_ERR(kvptr);
		goto err_ion_handle;
	}
	kvaddr = (unsigned long)kvptr;
	atomic64_set(&acdb_data.paddr, paddr);
	atomic64_set(&acdb_data.kvaddr, kvaddr);
	atomic64_set(&acdb_data.mem_len, mem_len);
	mutex_unlock(&acdb_data.acdb_mutex);

	pr_debug("%s done! paddr = 0x%lx, "
		"kvaddr = 0x%lx, len = x%lx\n",
		 __func__,
		(long)atomic64_read(&acdb_data.paddr),
		(long)atomic64_read(&acdb_data.kvaddr),
		(long)atomic64_read(&acdb_data.mem_len));

	return result;
err_ion_handle:
	ion_free(acdb_data.ion_client, acdb_data.ion_handle);
err_ion_client:
	ion_client_destroy(acdb_data.ion_client);
err:
	atomic64_set(&acdb_data.mem_len, 0);
	mutex_unlock(&acdb_data.acdb_mutex);
	return result;
}
void *ddl_pmem_alloc(struct ddl_buf_addr *addr, size_t sz, u32 alignment)
{
	u32 alloc_size, offset = 0, flags = 0;
	u32 index = 0;
	struct ddl_context *ddl_context;
	struct msm_mapped_buffer *mapped_buffer = NULL;
	int rc = -EINVAL;
	ion_phys_addr_t phyaddr = 0;
	size_t len = 0;
	DBG_PMEM("\n%s() IN: Requested alloc size(%u)", __func__, (u32)sz);
	if (!addr) {
		DDL_MSG_ERROR("\n%s() Invalid Parameters", __func__);
		goto bail_out;
	}
	ddl_context = ddl_get_context();
	alloc_size = (sz + alignment);
	if (res_trk_get_enable_ion()) {
		if (!ddl_context->video_ion_client)
			ddl_context->video_ion_client =
				res_trk_get_ion_client();
		if (!ddl_context->video_ion_client) {
			DDL_MSG_ERROR("%s() :DDL ION Client Invalid handle\n",
						 __func__);
			goto bail_out;
		}
		addr->alloc_handle = ion_alloc(
		ddl_context->video_ion_client, alloc_size, SZ_4K,
			(1<<res_trk_get_mem_type()));
		if (IS_ERR_OR_NULL(addr->alloc_handle)) {
			DDL_MSG_ERROR("%s() :DDL ION alloc failed\n",
						 __func__);
			goto bail_out;
		}
		rc = ion_phys(ddl_context->video_ion_client,
				addr->alloc_handle, &phyaddr,
				 &len);
		if (rc || !phyaddr) {
			DDL_MSG_ERROR("%s():DDL ION client physical failed\n",
						 __func__);
			goto free_acm_ion_alloc;
		}
		addr->alloced_phys_addr = phyaddr;
	} else {
		addr->alloced_phys_addr = (phys_addr_t)
		allocate_contiguous_memory_nomap(alloc_size,
			res_trk_get_mem_type(), SZ_4K);
		if (!addr->alloced_phys_addr) {
			DDL_MSG_ERROR("%s() : acm alloc failed (%d)\n",
					 __func__, alloc_size);
			goto bail_out;
		}
	}
	flags = MSM_SUBSYSTEM_MAP_IOVA | MSM_SUBSYSTEM_MAP_KADDR;
	if (alignment == DDL_KILO_BYTE(128))
			index = 1;
	else if (alignment > SZ_4K)
		flags |= MSM_SUBSYSTEM_ALIGN_IOVA_8K;

	addr->mapped_buffer =
	msm_subsystem_map_buffer((unsigned long)addr->alloced_phys_addr,
	alloc_size, flags, &vidc_mmu_subsystem[index],
	sizeof(vidc_mmu_subsystem[index])/sizeof(unsigned int));
	if (IS_ERR(addr->mapped_buffer)) {
		pr_err(" %s() buffer map failed", __func__);
		goto free_acm_ion_alloc;
	}
	mapped_buffer = addr->mapped_buffer;
	if (!mapped_buffer->vaddr || !mapped_buffer->iova[0]) {
		pr_err("%s() map buffers failed\n", __func__);
		goto free_map_buffers;
	}
	addr->physical_base_addr = (u8 *)mapped_buffer->iova[0];
	addr->virtual_base_addr = mapped_buffer->vaddr;
	addr->align_physical_addr = (u8 *) DDL_ALIGN((u32)
		addr->physical_base_addr, alignment);
	offset = (u32)(addr->align_physical_addr -
			addr->physical_base_addr);
	addr->align_virtual_addr = addr->virtual_base_addr + offset;
	addr->buffer_size = sz;
	return addr->virtual_base_addr;

free_map_buffers:
	msm_subsystem_unmap_buffer(addr->mapped_buffer);
	addr->mapped_buffer = NULL;
free_acm_ion_alloc:
	if (ddl_context->video_ion_client) {
		if (addr->alloc_handle) {
			ion_free(ddl_context->video_ion_client,
				addr->alloc_handle);
			addr->alloc_handle = NULL;
		}
	} else {
		free_contiguous_memory_by_paddr(
			(unsigned long)addr->alloced_phys_addr);
		addr->alloced_phys_addr = (phys_addr_t)NULL;
	}
bail_out:
	return NULL;
}
Esempio n. 20
0
static int audpcm_in_open(struct inode *inode, struct file *file)
{
	struct audio_in *audio = &the_audio_in;
	int rc;
	int len = 0;
	unsigned long ionflag = 0;
	ion_phys_addr_t addr = 0;
	struct ion_handle *handle = NULL;
	struct ion_client *client = NULL;

	int encid;
	mutex_lock(&audio->lock);
	if (audio->opened) {
		rc = -EBUSY;
		goto done;
	}

	/* Settings will be re-config at AUDIO_SET_CONFIG,
	 * but at least we need to have initial config
	 */
	audio->mode = MSM_AUD_ENC_MODE_TUNNEL;
	audio->samp_rate = RPC_AUD_DEF_SAMPLE_RATE_11025;
	audio->samp_rate_index = AUDREC_CMD_SAMP_RATE_INDX_11025;
	audio->channel_mode = AUDREC_CMD_STEREO_MODE_MONO;
	audio->buffer_size = MONO_DATA_SIZE;
	audio->enc_type = AUDREC_CMD_TYPE_0_INDEX_WAV | audio->mode;

	rc = audmgr_open(&audio->audmgr);
	if (rc)
		goto done;
	encid = audpreproc_aenc_alloc(audio->enc_type, &audio->module_name,
			&audio->queue_ids);
	if (encid < 0) {
		MM_ERR("No free encoder available\n");
		rc = -ENODEV;
		goto done;
	}
	audio->enc_id = encid;

	rc = msm_adsp_get(audio->module_name, &audio->audrec,
			   &audrec_adsp_ops, audio);
	if (rc) {
		audpreproc_aenc_free(audio->enc_id);
		goto done;
	}

	rc = msm_adsp_get("AUDPREPROCTASK", &audio->audpre,
				&audpre_adsp_ops, audio);
	if (rc) {
		msm_adsp_put(audio->audrec);
		audpreproc_aenc_free(audio->enc_id);
		goto done;
	}

	audio->dsp_cnt = 0;
	audio->stopped = 0;

	audpcm_in_flush(audio);

	client = msm_ion_client_create(UINT_MAX, "Audio_PCM_in_client");
	if (IS_ERR_OR_NULL(client)) {
		MM_ERR("Unable to create ION client\n");
		rc = -ENOMEM;
		goto client_create_error;
	}
	audio->client = client;

	MM_DBG("allocating mem sz = %d\n", DMASZ);
	handle = ion_alloc(client, DMASZ, SZ_4K,
		ION_HEAP(ION_AUDIO_HEAP_ID), 0);
	if (IS_ERR_OR_NULL(handle)) {
		MM_ERR("Unable to create allocate O/P buffers\n");
		rc = -ENOMEM;
		goto output_buff_alloc_error;
	}

	audio->output_buff_handle = handle;

	rc = ion_phys(client , handle, &addr, &len);
	if (rc) {
		MM_ERR("O/P buffers:Invalid phy: %x sz: %x\n",
			(unsigned int) addr, (unsigned int) len);
		rc = -ENOMEM;
		goto output_buff_get_phys_error;
	} else {
		MM_INFO("O/P buffers:valid phy: %x sz: %x\n",
			(unsigned int) addr, (unsigned int) len);
	}
	audio->phys = (int32_t)addr;

	rc = ion_handle_get_flags(client, handle, &ionflag);
	if (rc) {
		MM_ERR("could not get flags for the handle\n");
		rc = -ENOMEM;
		goto output_buff_get_flags_error;
	}

	audio->data = ion_map_kernel(client, handle);
	if (IS_ERR(audio->data)) {
		MM_ERR("could not map read buffers,freeing instance 0x%08x\n",
				(int)audio);
		rc = -ENOMEM;
		goto output_buff_map_error;
	}
	MM_DBG("read buf: phy addr 0x%08x kernel addr 0x%08x\n",
		audio->phys, (int)audio->data);

	file->private_data = audio;
	audio->opened = 1;
	rc = 0;
done:
	mutex_unlock(&audio->lock);
	return rc;
output_buff_map_error:
output_buff_get_phys_error:
output_buff_get_flags_error:
	ion_free(client, audio->output_buff_handle);
output_buff_alloc_error:
	ion_client_destroy(client);
client_create_error:
	msm_adsp_put(audio->audrec);
	msm_adsp_put(audio->audpre);
	audpreproc_aenc_free(audio->enc_id);
	mutex_unlock(&audio->lock);
	return rc;
}
Esempio n. 21
0
void *ddl_pmem_alloc(struct ddl_buf_addr *addr, size_t sz, u32 alignment)
{
	u32 alloc_size, offset = 0 ;
	u32 index = 0;
	struct ddl_context *ddl_context;
	struct msm_mapped_buffer *mapped_buffer = NULL;
	unsigned long iova = 0;
	unsigned long buffer_size = 0;
	unsigned long *kernel_vaddr = NULL;
	unsigned long ionflag = 0;
	unsigned long flags = 0;
	int ret = 0;
	ion_phys_addr_t phyaddr = 0;
	size_t len = 0;
	int rc = 0;
	DBG_PMEM("\n%s() IN: Requested alloc size(%u)", __func__, (u32)sz);
	if (!addr) {
		DDL_MSG_ERROR("\n%s() Invalid Parameters", __func__);
		goto bail_out;
	}
	ddl_context = ddl_get_context();
	res_trk_set_mem_type(addr->mem_type);
	alloc_size = (sz + alignment);
	if (res_trk_get_enable_ion()) {
		if (!ddl_context->video_ion_client)
			ddl_context->video_ion_client =
				res_trk_get_ion_client();
		if (!ddl_context->video_ion_client) {
			DDL_MSG_ERROR("%s() :DDL ION Client Invalid handle\n",
						 __func__);
			goto bail_out;
		}
		alloc_size = (alloc_size+4095) & ~4095;
		addr->alloc_handle = ion_alloc(
		ddl_context->video_ion_client, alloc_size, SZ_4K,
			res_trk_get_mem_type());
		if (IS_ERR_OR_NULL(addr->alloc_handle)) {
			DDL_MSG_ERROR("%s() :DDL ION alloc failed\n",
						 __func__);
			goto bail_out;
		}
		if (res_trk_check_for_sec_session() ||
			addr->mem_type == DDL_FW_MEM)
			ionflag = UNCACHED;
		else
			ionflag = CACHED;
		kernel_vaddr = (unsigned long *) ion_map_kernel(
					ddl_context->video_ion_client,
					addr->alloc_handle, ionflag);
		if (IS_ERR_OR_NULL(kernel_vaddr)) {
				DDL_MSG_ERROR("%s() :DDL ION map failed\n",
							 __func__);
				goto free_ion_alloc;
		}
		addr->virtual_base_addr = (u8 *) kernel_vaddr;
		if (res_trk_check_for_sec_session()) {
			rc = ion_phys(ddl_context->video_ion_client,
				addr->alloc_handle, &phyaddr,
					&len);
			if (rc || !phyaddr) {
				DDL_MSG_ERROR(
				"%s():DDL ION client physical failed\n",
				__func__);
				goto unmap_ion_alloc;
			}
			addr->alloced_phys_addr = phyaddr;
		} else {
			ret = ion_map_iommu(ddl_context->video_ion_client,
					addr->alloc_handle,
					VIDEO_DOMAIN,
					VIDEO_MAIN_POOL,
					SZ_4K,
					0,
					&iova,
					&buffer_size,
					UNCACHED, 0);
			if (ret || !iova) {
				DDL_MSG_ERROR(
				"%s():DDL ION ion map iommu failed, ret = %d iova = 0x%lx\n",
					__func__, ret, iova);
				goto unmap_ion_alloc;
			}
			addr->alloced_phys_addr = (phys_addr_t) iova;
		}
		if (!addr->alloced_phys_addr) {
			DDL_MSG_ERROR("%s():DDL ION client physical failed\n",
						__func__);
			goto unmap_ion_alloc;
		}
		addr->mapped_buffer = NULL;
		addr->physical_base_addr = (u8 *) addr->alloced_phys_addr;
		addr->align_physical_addr = (u8 *) DDL_ALIGN((u32)
			addr->physical_base_addr, alignment);
		offset = (u32)(addr->align_physical_addr -
				addr->physical_base_addr);
		addr->align_virtual_addr = addr->virtual_base_addr + offset;
		addr->buffer_size = alloc_size;
	} else {
		addr->alloced_phys_addr = (phys_addr_t)
		allocate_contiguous_memory_nomap(alloc_size,
			res_trk_get_mem_type(), SZ_4K);
		if (!addr->alloced_phys_addr) {
			DDL_MSG_ERROR("%s() : acm alloc failed (%d)\n",
					 __func__, alloc_size);
			goto bail_out;
		}
		flags = MSM_SUBSYSTEM_MAP_IOVA | MSM_SUBSYSTEM_MAP_KADDR;
		if (alignment == DDL_KILO_BYTE(128))
				index = 1;
		else if (alignment > SZ_4K)
			flags |= MSM_SUBSYSTEM_ALIGN_IOVA_8K;

		addr->mapped_buffer =
		msm_subsystem_map_buffer((unsigned long)addr->alloced_phys_addr,
			alloc_size, flags, &vidc_mmu_subsystem[index],
			sizeof(vidc_mmu_subsystem[index])/sizeof(unsigned int));
		if (IS_ERR(addr->mapped_buffer)) {
			pr_err(" %s() buffer map failed", __func__);
			goto free_acm_alloc;
		}
		mapped_buffer = addr->mapped_buffer;
		if (!mapped_buffer->vaddr || !mapped_buffer->iova[0]) {
			pr_err("%s() map buffers failed\n", __func__);
			goto free_map_buffers;
		}
		addr->physical_base_addr = (u8 *)mapped_buffer->iova[0];
		addr->virtual_base_addr = mapped_buffer->vaddr;
		addr->align_physical_addr = (u8 *) DDL_ALIGN((u32)
			addr->physical_base_addr, alignment);
		offset = (u32)(addr->align_physical_addr -
				addr->physical_base_addr);
		addr->align_virtual_addr = addr->virtual_base_addr + offset;
		addr->buffer_size = sz;
	}
	return addr->virtual_base_addr;
free_map_buffers:
	msm_subsystem_unmap_buffer(addr->mapped_buffer);
	addr->mapped_buffer = NULL;
free_acm_alloc:
		free_contiguous_memory_by_paddr(
			(unsigned long)addr->alloced_phys_addr);
		addr->alloced_phys_addr = (phys_addr_t)NULL;
		return NULL;
unmap_ion_alloc:
	ion_unmap_kernel(ddl_context->video_ion_client,
		addr->alloc_handle);
	addr->virtual_base_addr = NULL;
	addr->alloced_phys_addr = (phys_addr_t)NULL;
free_ion_alloc:
	ion_free(ddl_context->video_ion_client,
		addr->alloc_handle);
	addr->alloc_handle = NULL;
bail_out:
	return NULL;
}
Esempio n. 22
0
static void *vb2_ion_get_userptr(void *alloc_ctx, unsigned long vaddr,
				 unsigned long size, int write)
{
	struct vb2_ion_conf *conf = alloc_ctx;
	struct vb2_ion_buf *buf = NULL;
	struct mm_struct *mm = current->mm;
	struct vm_area_struct *vma = NULL;
	size_t len;
	int ret = 0;
	bool malloced = false;
	struct scatterlist *sg;

	/* Create vb2_ion_buf */
	buf = kzalloc(sizeof *buf, GFP_KERNEL);
	if (!buf) {
		pr_err("kzalloc failed\n");
		return ERR_PTR(-ENOMEM);
	}

	/* Getting handle, client from DVA */
	buf->handle = ion_import_uva(conf->client, vaddr);
	if (IS_ERR(buf->handle)) {
		if ((PTR_ERR(buf->handle) == -ENXIO) && conf->use_mmu) {
			int flags = ION_HEAP_EXYNOS_USER_MASK;

			if (write)
				flags |= ION_EXYNOS_WRITE_MASK;

			buf->handle = ion_exynos_get_user_pages(conf->client,
							vaddr, size, flags);
			if (IS_ERR(buf->handle))
				ret = PTR_ERR(buf->handle);
		} else {
			ret = -EINVAL;
		}

		if (ret) {
			pr_err("%s: Failed to retrieving non-ion user buffer @ "
				"0x%lx (size:0x%lx, dev:%s, errno %ld)\n",
				__func__, vaddr, size, dev_name(conf->dev),
					PTR_ERR(buf->handle));
			goto err_import_uva;
		}

		malloced = true;
	}

	/* TODO: Need to check whether already DVA is created or not */

	buf->sg = ion_map_dma(conf->client, buf->handle);
	if (IS_ERR(buf->sg)) {
		ret = -ENOMEM;
		goto err_map_dma;
	}
	dbg(6, "PA(0x%x) size(%x)\n", buf->sg->dma_address, buf->sg->length);

	sg = buf->sg;
	do {
		buf->nents++;
	} while ((sg = sg_next(sg)));

	/* Map DVA */
	if (conf->use_mmu) {
		buf->dva = iovmm_map(conf->dev, buf->sg);
		if (!buf->dva) {
			pr_err("iovmm_map: conf->name(%s)\n", conf->name);
			goto err_ion_map_dva;
		}
		dbg(6, "DVA(0x%x)\n", buf->dva);
	} else {
		ret = ion_phys(conf->client, buf->handle,
			       (unsigned long *)&buf->dva, &len);
		if (ret) {
			pr_err("ion_phys: conf->name(%s)\n", conf->name);
			goto err_ion_map_dva;
		}
	}

	if (!malloced) {
		/* Get offset from the start */
		down_read(&mm->mmap_sem);
		vma = find_vma(mm, vaddr);
		if (vma == NULL) {
			pr_err("Failed acquiring VMA to get offset 0x%08lx\n",
					vaddr);
			up_read(&mm->mmap_sem);

			if (conf->use_mmu)
				iovmm_unmap(conf->dev, buf->dva);

			goto err_get_vma;
		}
		buf->offset = vaddr - vma->vm_start;
		up_read(&mm->mmap_sem);
	}
	dbg(6, "dva(0x%x), size(0x%x), offset(0x%x)\n",
			(u32)buf->dva, (u32)size, (u32)buf->offset);

	/* Set vb2_ion_buf */
	ret = _vb2_ion_get_vma(vaddr, size, &vma);
	if (ret) {
		pr_err("Failed acquiring VMA 0x%08lx\n", vaddr);

		if (conf->use_mmu)
			iovmm_unmap(conf->dev, buf->dva);

		goto err_get_vma;
	}

	buf->vma = vma;
	buf->conf = conf;
	buf->size = size;
	buf->cacheable = conf->cacheable;

	return buf;

err_get_vma:	/* fall through */
err_ion_map_dva:
	ion_unmap_dma(conf->client, buf->handle);

err_map_dma:
	ion_free(conf->client, buf->handle);

err_import_uva:
	kfree(buf);

	return ERR_PTR(ret);
}
Esempio n. 23
0
static void *vb2_ion_alloc(void *alloc_ctx, unsigned long size)
{
	struct vb2_ion_conf	*conf = alloc_ctx;
	struct vb2_ion_buf	*buf;
	struct scatterlist	*sg;
	size_t	len;
	u32 heap = 0;
	int ret = 0;

	buf = kzalloc(sizeof *buf, GFP_KERNEL);
	if (!buf) {
		pr_err("no memory for vb2_ion_conf\n");
		return ERR_PTR(-ENOMEM);
	}

	/* Set vb2_ion_buf */
	buf->conf = conf;
	buf->size = size;
	buf->cacheable = conf->cacheable;

	/* Allocate: physical memory */
	if (conf->contig)
		heap = ION_HEAP_EXYNOS_CONTIG_MASK;
	else
		heap = ION_HEAP_EXYNOS_MASK;

	buf->handle = ion_alloc(conf->client, size, conf->align, heap);
	if (IS_ERR(buf->handle)) {
		pr_err("ion_alloc of size %ld\n", size);
		ret = -ENOMEM;
		goto err_alloc;
	}

	/* Getting scatterlist */
	buf->sg = ion_map_dma(conf->client, buf->handle);
	if (IS_ERR(buf->sg)) {
		pr_err("ion_map_dma conf->name(%s)\n", conf->name);
		ret = -ENOMEM;
		goto err_map_dma;
	}
	dbg(6, "PA(0x%x), SIZE(%x)\n", buf->sg->dma_address, buf->sg->length);

	sg = buf->sg;
	do {
		buf->nents++;
	} while ((sg = sg_next(sg)));
	dbg(6, "buf->nents(0x%x)\n", buf->nents);

	/* Map DVA */
	if (conf->use_mmu) {
		buf->dva = iovmm_map(conf->dev, buf->sg);
		if (!buf->dva) {
			pr_err("iovmm_map: conf->name(%s)\n", conf->name);
			goto err_ion_map_dva;
		}
		dbg(6, "DVA(0x%x)\n", buf->dva);
	} else {
		ret = ion_phys(conf->client, buf->handle,
			       (unsigned long *)&buf->dva, &len);
		if (ret) {
			pr_err("ion_phys: conf->name(%s)\n", conf->name);
			goto err_ion_map_dva;
		}
	}

	/* Set struct vb2_vmarea_handler */
	buf->handler.refcount = &buf->ref;
	buf->handler.put = vb2_ion_put;
	buf->handler.arg = buf;

	atomic_inc(&buf->ref);

	return buf;

err_ion_map_dva:
	ion_unmap_dma(conf->client, buf->handle);

err_map_dma:
	ion_free(conf->client, buf->handle);

err_alloc:
	kfree(buf);

	return ERR_PTR(ret);
}
Esempio n. 24
0
static PVRSRV_ERROR InitBltFBsMapTiler2D(OMAPLFB_DEVINFO *psDevInfo)
{
	OMAPLFB_FBINFO *psPVRFBInfo = &psDevInfo->sFBInfo;
	struct fb_info *psLINFBInfo = psDevInfo->psLINFBInfo;
	struct bvbuffdesc *pBvDesc;
	struct bvphysdesc *pBvPhysDesc;
	struct bventry *pBvEntry;
	enum bverror eBvErr;
	int iFB;
	ion_phys_addr_t phys;
	size_t size;
	int res = PVRSRV_OK;

	pBvEntry = &gsBvInterface;
	ion_phys(gpsIONClient, psPVRFBInfo->psBltFBsIonHndl, &phys, &size);

	for (iFB = 0; iFB < psPVRFBInfo->psBltFBsNo; iFB++)
	{
		unsigned long *pPageList;

		struct tiler_view_t view;
		int wpages = psPVRFBInfo->uiBltFBsByteStride >> PAGE_SHIFT;
		int h = psLINFBInfo->var.yres;
		int x, y;

		phys += psPVRFBInfo->uiBltFBsByteStride * iFB;
		pPageList = kzalloc(
				wpages * h * sizeof(*pPageList),
		                GFP_KERNEL);
		if ( !pPageList) {
			printk(KERN_WARNING DRIVER_PREFIX
					": %s: Could not allocate page list\n",
					__FUNCTION__);
			return OMAPLFB_ERROR_INIT_FAILURE;
		}
		tilview_create(&view, phys, psLINFBInfo->var.xres, h);
		for (y = 0; y < h; y++) {
			for (x = 0; x < wpages; x++) {
				pPageList[y * wpages + x] = phys + view.v_inc * y
						+ (x << PAGE_SHIFT);
			}
		}
		pBvDesc = kzalloc(sizeof(*pBvDesc), GFP_KERNEL);
		pBvDesc->structsize = sizeof(*pBvDesc);
		pBvDesc->auxtype = BVAT_PHYSDESC;

		pBvPhysDesc = kzalloc(sizeof(*pBvPhysDesc), GFP_KERNEL);
		pBvPhysDesc->pagesize = PAGE_SIZE;
		pBvPhysDesc->pagearray = pPageList;
		pBvPhysDesc->pagecount = wpages * h;

		pBvDesc->auxptr = pBvPhysDesc;

		eBvErr = pBvEntry->bv_map(pBvDesc);

		pBvPhysDesc->pagearray = NULL;

		if (eBvErr)
		{
			WARN(1, "%s: BV map blt buffer failed %d\n",__func__, eBvErr);
			psPVRFBInfo->psBltFBsBvHndl[iFB]= NULL;
			kfree(pBvDesc);
			kfree(pBvPhysDesc);
			res = PVRSRV_ERROR_OUT_OF_MEMORY;
		}
		else
		{
			psPVRFBInfo->psBltFBsBvHndl[iFB] = pBvDesc;
			psPVRFBInfo->psBltFBsBvPhys[iFB] = pPageList[0];
		}
	}

	return res;
}
Esempio n. 25
0
void ddl_pmem_alloc(struct ddl_buf_addr *buff_addr, size_t sz, u32 align)
{
	u32 guard_bytes, align_mask;
	u32 physical_addr;
	u32 align_offset;
	u32 alloc_size, flags = 0;
	struct ddl_context *ddl_context;
	struct msm_mapped_buffer *mapped_buffer = NULL;
	unsigned long *kernel_vaddr = NULL;
	ion_phys_addr_t phyaddr = 0;
	size_t len = 0;
	int ret = -EINVAL;

	if (!buff_addr) {
		ERR("\n%s() Invalid Parameters\n", __func__);
		return;
	}
	if (align == DDL_LINEAR_BUFFER_ALIGN_BYTES) {
		guard_bytes = 31;
		align_mask = 0xFFFFFFE0U;
	} else {
		guard_bytes = DDL_TILE_BUF_ALIGN_GUARD_BYTES;
		align_mask = DDL_TILE_BUF_ALIGN_MASK;
	}
	ddl_context = ddl_get_context();
	alloc_size = sz + guard_bytes;
	if (res_trk_get_enable_ion()) {
		if (!ddl_context->video_ion_client)
			ddl_context->video_ion_client =
				res_trk_get_ion_client();
		if (!ddl_context->video_ion_client) {
			ERR("\n%s(): DDL ION Client Invalid handle\n",
				__func__);
			goto bailout;
		}
		buff_addr->mem_type = res_trk_get_mem_type();
		buff_addr->alloc_handle = ion_alloc(
					ddl_context->video_ion_client,
					alloc_size,
					SZ_4K,
					buff_addr->mem_type);
		if (!buff_addr->alloc_handle) {
			ERR("\n%s(): DDL ION alloc failed\n",
					__func__);
			goto bailout;
		}
		ret = ion_phys(ddl_context->video_ion_client,
					buff_addr->alloc_handle,
					&phyaddr,
					&len);
		if (ret || !phyaddr) {
			ERR("\n%s(): DDL ION client physical failed\n",
					__func__);
			goto free_ion_buffer;
		}
		buff_addr->physical_base_addr = (u32 *)phyaddr;
		kernel_vaddr = (unsigned long *) ion_map_kernel(
					ddl_context->video_ion_client,
					buff_addr->alloc_handle,
					UNCACHED);
		if (IS_ERR_OR_NULL(kernel_vaddr)) {
			ERR("\n%s(): DDL ION map failed\n", __func__);
			goto unmap_ion_buffer;
		}
		buff_addr->virtual_base_addr = (u32 *)kernel_vaddr;
		DBG("ddl_ion_alloc: handle(0x%x), mem_type(0x%x), "\
			"phys(0x%x), virt(0x%x), size(%u), align(%u), "\
			"alloced_len(%u)", (u32)buff_addr->alloc_handle,
			(u32)buff_addr->mem_type,
			(u32)buff_addr->physical_base_addr,
			(u32)buff_addr->virtual_base_addr,
			alloc_size, align, len);
	} else {
		physical_addr = (u32)
			allocate_contiguous_memory_nomap(alloc_size,
						ddl_context->memtype, SZ_4K);
		if (!physical_addr) {
			ERR("\n%s(): DDL pmem allocate failed\n",
			       __func__);
			goto bailout;
		}
		buff_addr->physical_base_addr = (u32 *) physical_addr;
		flags = MSM_SUBSYSTEM_MAP_KADDR;
		buff_addr->mapped_buffer =
		msm_subsystem_map_buffer((unsigned long)physical_addr,
		alloc_size, flags, NULL, 0);
		if (IS_ERR(buff_addr->mapped_buffer)) {
			ERR("\n%s() buffer map failed\n", __func__);
			goto free_pmem_buffer;
		}
		mapped_buffer = buff_addr->mapped_buffer;
		if (!mapped_buffer->vaddr) {
			ERR("\n%s() mapped virtual address is NULL\n",
				__func__);
			goto unmap_pmem_buffer;
		}
		buff_addr->virtual_base_addr = mapped_buffer->vaddr;
		DBG("ddl_pmem_alloc: mem_type(0x%x), phys(0x%x),"\
			" virt(0x%x), sz(%u), align(%u)",
			(u32)buff_addr->mem_type,
			(u32)buff_addr->physical_base_addr,
			(u32)buff_addr->virtual_base_addr,
			alloc_size, SZ_4K);
	}

	memset(buff_addr->virtual_base_addr, 0 , sz + guard_bytes);
	buff_addr->buffer_size = sz;
	buff_addr->align_physical_addr = (u32 *)
		(((u32)buff_addr->physical_base_addr + guard_bytes) &
		align_mask);
	align_offset = (u32) (buff_addr->align_physical_addr) -
		(u32)buff_addr->physical_base_addr;
	buff_addr->align_virtual_addr =
	    (u32 *) ((u32) (buff_addr->virtual_base_addr)
		     + align_offset);
	DBG("%s(): phys(0x%x) align_phys(0x%x), virt(0x%x),"\
		" align_virt(0x%x)", __func__,
		(u32)buff_addr->physical_base_addr,
		(u32)buff_addr->align_physical_addr,
		(u32)buff_addr->virtual_base_addr,
		(u32)buff_addr->align_virtual_addr);
	return;

unmap_pmem_buffer:
	if (buff_addr->mapped_buffer)
		msm_subsystem_unmap_buffer(buff_addr->mapped_buffer);
free_pmem_buffer:
	if (buff_addr->physical_base_addr)
		free_contiguous_memory_by_paddr((unsigned long)
			buff_addr->physical_base_addr);
	memset(buff_addr, 0, sizeof(struct ddl_buf_addr));
	return;

unmap_ion_buffer:
	if (ddl_context->video_ion_client) {
		if (buff_addr->alloc_handle)
			ion_unmap_kernel(ddl_context->video_ion_client,
				buff_addr->alloc_handle);
	}
free_ion_buffer:
	if (ddl_context->video_ion_client) {
		if (buff_addr->alloc_handle)
			ion_free(ddl_context->video_ion_client,
				buff_addr->alloc_handle);
	}
bailout:
	memset(buff_addr, 0, sizeof(struct ddl_buf_addr));
}
Esempio n. 26
0
static int audpcm_in_open(struct inode *inode, struct file *file)
{
	struct audio_in *audio = &the_audio_in;
	int rc;
	int len = 0;
	unsigned long ionflag = 0;
	ion_phys_addr_t addr = 0;
	struct ion_handle *handle = NULL;
	struct ion_client *client = NULL;

	int encid;
	struct timespec ts;
	struct rtc_time tm;

	mutex_lock(&audio->lock);
	if (audio->opened) {
		rc = -EBUSY;
		goto done;
	}

	audio->mode = MSM_AUD_ENC_MODE_TUNNEL;
	audio->samp_rate = RPC_AUD_DEF_SAMPLE_RATE_11025;
	audio->samp_rate_index = AUDREC_CMD_SAMP_RATE_INDX_11025;
	audio->channel_mode = AUDREC_CMD_STEREO_MODE_MONO;
	audio->buffer_size = MONO_DATA_SIZE;
	audio->enc_type = AUDREC_CMD_TYPE_0_INDEX_WAV | audio->mode;

	rc = audmgr_open(&audio->audmgr);
	if (rc)
		goto done;
	encid = audpreproc_aenc_alloc(audio->enc_type, &audio->module_name,
			&audio->queue_ids);
	if (encid < 0) {
		MM_AUD_ERR("No free encoder available\n");
		rc = -ENODEV;
		goto done;
	}
	audio->enc_id = encid;

	rc = msm_adsp_get(audio->module_name, &audio->audrec,
			   &audrec_adsp_ops, audio);
	if (rc) {
		audpreproc_aenc_free(audio->enc_id);
		goto done;
	}

	audio->dsp_cnt = 0;
	audio->stopped = 0;

	audpcm_in_flush(audio);

	client = msm_ion_client_create(UINT_MAX, "Audio_PCM_in_client");
	if (IS_ERR_OR_NULL(client)) {
		MM_ERR("Unable to create ION client\n");
		rc = -ENOMEM;
		goto client_create_error;
	}
	audio->client = client;

	MM_DBG("allocating mem sz = %d\n", DMASZ);
	handle = ion_alloc(client, DMASZ, SZ_4K,
		ION_HEAP(ION_AUDIO_HEAP_ID));
	if (IS_ERR_OR_NULL(handle)) {
		MM_ERR("Unable to create allocate O/P buffers\n");
		rc = -ENOMEM;
		goto output_buff_alloc_error;
	}

	audio->output_buff_handle = handle;

	rc = ion_phys(client , handle, &addr, &len);
	if (rc) {
		MM_ERR("O/P buffers:Invalid phy: %x sz: %x\n",
			(unsigned int) addr, (unsigned int) len);
		rc = -ENOMEM;
		goto output_buff_get_phys_error;
	} else {
		MM_INFO("O/P buffers:valid phy: %x sz: %x\n",
			(unsigned int) addr, (unsigned int) len);
	}
	audio->phys = (int32_t)addr;

	rc = ion_handle_get_flags(client, handle, &ionflag);
	if (rc) {
		MM_ERR("could not get flags for the handle\n");
		rc = -ENOMEM;
		goto output_buff_get_flags_error;
	}

	audio->data = ion_map_kernel(client, handle, ionflag);
	if (IS_ERR(audio->data)) {
		MM_ERR("could not map read buffers,freeing instance 0x%08x\n",
				(int)audio);
		rc = -ENOMEM;
		goto output_buff_map_error;
	}
	MM_DBG("read buf: phy addr 0x%08x kernel addr 0x%08x\n",
		audio->phys, (int)audio->data);

	file->private_data = audio;
	audio->opened = 1;
	rc = 0;
done:
	mutex_unlock(&audio->lock);
	getnstimeofday(&ts);
	rtc_time_to_tm(ts.tv_sec, &tm);
	pr_aud_info1("[ATS][start_recording][successful] at %lld \
		(%d-%02d-%02d %02d:%02d:%02d.%09lu UTC)\n",
		ktime_to_ns(ktime_get()),
		tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
		tm.tm_hour, tm.tm_min, tm.tm_sec, ts.tv_nsec);
	return rc;
output_buff_map_error:
output_buff_get_phys_error:
output_buff_get_flags_error:
	ion_free(client, audio->output_buff_handle);
output_buff_alloc_error:
	ion_client_destroy(client);
client_create_error:
	msm_adsp_put(audio->audrec);
	audpreproc_aenc_free(audio->enc_id);
	mutex_unlock(&audio->lock);
	return rc;
}
static int exynos_secure_mem_enable(struct kbase_device *kbdev, int ion_fd, u64 flags, struct kbase_va_region *reg)
{
	/* enable secure world mode : TZASC */
	int ret = 0;

	if (!kbdev)
		goto secure_out;

	if (!kbdev->secure_mode_support) {
		GPU_LOG(DVFS_ERROR, LSI_GPU_SECURE, 0u, 0u, "%s: wrong operation! DDK cannot support Secure Rendering\n", __func__);
		ret = -EINVAL;
		goto secure_out;
	}

	if (!reg) {
		GPU_LOG(DVFS_ERROR, LSI_GPU_SECURE, 0u, 0u, "%s: wrong input argument, reg %p\n",
			__func__, reg);
		goto secure_out;
	}
#if defined(CONFIG_ION) && defined(CONFIG_EXYNOS_CONTENT_PATH_PROTECTION)
#if MALI_SEC_ASP_SECURE_BUF_CTRL
	{
		struct ion_client *client;
		struct ion_handle *ion_handle;
		size_t len = 0;
		ion_phys_addr_t phys = 0;

		flush_all_cpu_caches();

		if ((flags & kbdev->sec_sr_info.secure_flags_crc_asp) == kbdev->sec_sr_info.secure_flags_crc_asp) {
			reg->flags |= KBASE_REG_SECURE_CRC | KBASE_REG_SECURE;
		} else {

			client = ion_client_create(ion_exynos, "G3D");
			if (IS_ERR(client)) {
				GPU_LOG(DVFS_ERROR, LSI_GPU_SECURE, 0u, 0u, "%s: Failed to get ion_client of G3D\n",
						__func__);
				goto secure_out;
			}

			ion_handle = ion_import_dma_buf(client, ion_fd);

			if (IS_ERR(ion_handle)) {
				GPU_LOG(DVFS_ERROR, LSI_GPU_SECURE, 0u, 0u, "%s: Failed to get ion_handle of G3D\n",
						__func__);
				ion_client_destroy(client);
				goto secure_out;
			}

			if (ion_phys(client, ion_handle, &phys, &len)) {
				GPU_LOG(DVFS_ERROR, LSI_GPU_SECURE, 0u, 0u, "%s: Failed to get phys. addr of G3D\n",
						__func__);
				ion_free(client, ion_handle);
				ion_client_destroy(client);
				goto secure_out;
			}

			ion_free(client, ion_handle);
			ion_client_destroy(client);

			ret = exynos_smc(SMC_DRM_SECBUF_CFW_PROT, phys, len, PROT_G3D);
			if (ret != DRMDRV_OK) {
				GPU_LOG(DVFS_ERROR, LSI_GPU_SECURE, 0u, 0u, "%s: failed to set secure buffer region of G3D buffer, phy 0x%08x, error 0x%x\n",
					__func__, (unsigned int)phys, ret);
				BUG();
			}

			reg->flags |= KBASE_REG_SECURE;
		}

		reg->phys_by_ion = phys;
		reg->len_by_ion = len;
	}
#else
	reg->flags |= KBASE_REG_SECURE;

	reg->phys_by_ion = 0;
	reg->len_by_ion = 0;
#endif
#else
	GPU_LOG(DVFS_ERROR, LSI_GPU_SECURE, 0u, 0u, "%s: wrong operation! DDK cannot support Secure Rendering\n", __func__);
	ret = -EINVAL;
#endif // defined(CONFIG_ION) && defined(CONFIG_EXYNOS_CONTENT_PATH_PROTECTION)

	return ret;
secure_out:
	ret = -EINVAL;
	return ret;
}
void *ddl_pmem_alloc(struct ddl_buf_addr *addr, size_t sz, u32 alignment)
{
	u32 alloc_size, offset = 0 ;
	struct ddl_context *ddl_context;
	unsigned long iova = 0;
	unsigned long buffer_size = 0;
	unsigned long *kernel_vaddr = NULL;
	int ret = 0;
	ion_phys_addr_t phyaddr = 0;
	size_t len = 0;
	int rc = 0;
	DBG_PMEM("\n%s() IN: Requested alloc size(%u)", __func__, (u32)sz);
	if (!addr) {
		DDL_MSG_ERROR("\n%s() Invalid Parameters", __func__);
		goto bail_out;
	}
	ddl_context = ddl_get_context();
	res_trk_set_mem_type(addr->mem_type);
	alloc_size = (sz + alignment);
	if (res_trk_get_enable_ion()) {
		if (!ddl_context->video_ion_client)
			ddl_context->video_ion_client =
				res_trk_get_ion_client();
		if (!ddl_context->video_ion_client) {
			DDL_MSG_ERROR("%s() :DDL ION Client Invalid handle\n",
						 __func__);
			goto bail_out;
		}
		alloc_size = (alloc_size+4095) & ~4095;
		addr->alloc_handle = ion_alloc(
		ddl_context->video_ion_client, alloc_size, SZ_4K,
			res_trk_get_mem_type(), res_trk_get_ion_flags());
		if (IS_ERR_OR_NULL(addr->alloc_handle)) {
			DDL_MSG_ERROR("%s() :DDL ION alloc failed\n",
						 __func__);
			goto bail_out;
		}
		kernel_vaddr = (unsigned long *) ion_map_kernel(
					ddl_context->video_ion_client,
					addr->alloc_handle);
		if (IS_ERR_OR_NULL(kernel_vaddr)) {
				DDL_MSG_ERROR("%s() :DDL ION map failed\n",
							 __func__);
				goto free_ion_alloc;
		}
		addr->virtual_base_addr = (u8 *) kernel_vaddr;
		if (res_trk_check_for_sec_session()) {
			rc = ion_phys(ddl_context->video_ion_client,
				addr->alloc_handle, &phyaddr,
					&len);
			if (rc || !phyaddr) {
				DDL_MSG_ERROR(
				"%s():DDL ION client physical failed\n",
				__func__);
				goto unmap_ion_alloc;
			}
			addr->alloced_phys_addr = phyaddr;
		} else {
			ret = ion_map_iommu(ddl_context->video_ion_client,
					addr->alloc_handle,
					VIDEO_DOMAIN,
					VIDEO_MAIN_POOL,
					SZ_4K,
					0,
					&iova,
					&buffer_size,
					0, 0);
			if (ret || !iova) {
				DDL_MSG_ERROR(
				"%s():DDL ION ion map iommu failed, ret = %d iova = 0x%lx\n",
					__func__, ret, iova);
				goto unmap_ion_alloc;
			}
			addr->alloced_phys_addr = (phys_addr_t) iova;
		}
		if (!addr->alloced_phys_addr) {
			DDL_MSG_ERROR("%s():DDL ION client physical failed\n",
						__func__);
			goto unmap_ion_alloc;
		}
		addr->mapped_buffer = NULL;
		addr->physical_base_addr = (u8 *) addr->alloced_phys_addr;
		addr->align_physical_addr = (u8 *) DDL_ALIGN((u32)
			addr->physical_base_addr, alignment);
		offset = (u32)(addr->align_physical_addr -
				addr->physical_base_addr);
		addr->align_virtual_addr = addr->virtual_base_addr + offset;
		addr->buffer_size = alloc_size;
	} else {
		pr_err("ION must be enabled.");
		goto bail_out;
	}
	return addr->virtual_base_addr;
unmap_ion_alloc:
	ion_unmap_kernel(ddl_context->video_ion_client,
		addr->alloc_handle);
	addr->virtual_base_addr = NULL;
	addr->alloced_phys_addr = (phys_addr_t)NULL;
free_ion_alloc:
	ion_free(ddl_context->video_ion_client,
		addr->alloc_handle);
	addr->alloc_handle = NULL;
bail_out:
	return NULL;
}
int _IOGetPhyMem(int which, vpu_mem_desc *buff)
{
#ifdef BUILD_FOR_ANDROID
	const size_t pagesize = getpagesize();
	int err, fd;
#ifdef USE_ION
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
	ion_user_handle_t handle;
#else
	struct ion_handle *handle;
#endif
	int share_fd, ret = -1;
	unsigned char *ptr;
#elif USE_GPU
        struct g2d_buf *gbuf;
        int bytes;
#else
	/* Get memory from pmem space for android */
	struct pmem_region region;
#endif

	if ((!buff) || (!buff->size)) {
		err_msg("Error!_IOGetPhyMem:Invalid parameters");
		return -1;
	}

	buff->cpu_addr = 0;
	buff->phy_addr = 0;
	buff->virt_uaddr = 0;

	if (which == VPU_IOC_GET_WORK_ADDR) {
		if (ioctl(vpu_fd, which, buff) < 0) {
			err_msg("mem allocation failed!\n");
			buff->phy_addr = 0;
			buff->cpu_addr = 0;
			return -1;
		}
		return 0;
	}

	if (which != VPU_IOC_PHYMEM_ALLOC) {
		err_msg("Error!_IOGetPhyMem unsupported memtype: %d", which);
		return -1;
	}

	buff->size = (buff->size + pagesize-1) & ~(pagesize - 1);

#ifdef USE_ION
	fd = ion_open();
	if (fd <= 0) {
		err_msg("ion open failed!\n");
		return -1;
	}

#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
	err = ion_alloc(fd, buff->size, pagesize, 1, 0, &handle);
#else
	err = ion_alloc(fd, buff->size, pagesize, 1, &handle);
#endif
	if (err) {
		err_msg("ion allocation failed!\n");
		goto error;
	}

	err = ion_map(fd, handle, buff->size,
			    PROT_READ|PROT_WRITE, MAP_SHARED,
			    0, &ptr, &share_fd);
	if (err) {
		err_msg("ion map failed!\n");
		goto error;
	}

	err = ion_phys(fd, handle);
	if (err == 0) {
		err_msg("ion get physical address failed!\n");
		goto error;
	}

	buff->virt_uaddr = (unsigned long)ptr;
	buff->phy_addr = (unsigned long)err;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
	ion_free(fd, handle);
	buff->cpu_addr = (unsigned long)share_fd;
#else
	buff->cpu_addr = (unsigned long)handle;
#endif
	memset((void*)buff->virt_uaddr, 0, buff->size);
	ret = 0;
	info_msg("<ion> alloc handle: 0x%x, paddr: 0x%x, vaddr: 0x%x",
			(unsigned int)handle, (unsigned int)buff->phy_addr,
			(unsigned int)buff->virt_uaddr);
error:
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)
	close(share_fd);
#endif
	ion_close(fd);
	return ret;
#elif USE_GPU
        bytes = buff->size + PAGE_SIZE;
        gbuf = g2d_alloc(bytes, 0);
        if(!gbuf) {
            err_msg("%s: gpu allocator failed to alloc buffer with size %d", __FUNCTION__, buff->size);
            return -1;
        }

        buff->virt_uaddr = (unsigned long)gbuf->buf_vaddr;
        buff->phy_addr = (unsigned long)gbuf->buf_paddr;
        buff->cpu_addr = (unsigned long)gbuf;

        //vpu requires page alignment for the address implicitly, round it to page edge
        buff->virt_uaddr = (buff->virt_uaddr + PAGE_SIZE -1) & ~(PAGE_SIZE -1);
        buff->phy_addr = (buff->phy_addr + PAGE_SIZE -1) & ~(PAGE_SIZE -1);
        memset((void*)buff->virt_uaddr, 0, buff->size);

        info_msg("<gpu> alloc handle: 0x%x, paddr: 0x%x, vaddr: 0x%x",
			(unsigned int)gbuf, (unsigned int)buff->phy_addr,
			(unsigned int)buff->virt_uaddr);
        return 0;
#else
	fd = (unsigned long)open("/dev/pmem_adsp", O_RDWR | O_SYNC);
	if (fd < 0) {
		err_msg("Error!_IOGetPhyMem Error,cannot open pmem");
		return -1;
	}

	err = ioctl(fd, PMEM_GET_TOTAL_SIZE, &region);

	buff->virt_uaddr = (unsigned long)mmap(0, buff->size,
			    PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);

	if (buff->virt_uaddr == (unsigned long)MAP_FAILED) {
		err_msg("Error!mmap(fd=%d, size=%u) failed (%s)",
			fd, buff->size, strerror(errno));
		close(fd);
		return -1;
	}

	memset(&region, 0, sizeof(region));

	if (ioctl(fd, PMEM_GET_PHYS, &region) == -1) {
		err_msg("Error!Failed to get physical address of source!");
		munmap((void *)buff->virt_uaddr, buff->size);
		close(fd);
		return -1;
	}

	buff->phy_addr = (unsigned long)region.offset;
	buff->cpu_addr = (unsigned long)fd;
	memset((void*)buff->virt_uaddr, 0, buff->size);
#endif
#else
	if (ioctl(vpu_fd, which, buff) < 0) {
		err_msg("mem allocation failed!\n");
		buff->phy_addr = 0;
		buff->cpu_addr = 0;
		return -1;
	}
	sz_alloc += buff->size;
	dprintf(3, "%s: phy addr = %08lx\n", __func__, buff->phy_addr);
	dprintf(3, "%s: alloc=%d, total=%d\n", __func__, buff->size, sz_alloc);
#endif

	return 0;
}
/*!
******************************************************************************

 @Function              ImportPages

******************************************************************************/
static IMG_RESULT ImportPages(
    SYSMEM_Heap		*heap,
    SYSDEVU_sInfo	*sysdev,
    IMG_UINT32		ui32Size,
    SYSMEMU_sPages *psPages,
    SYS_eMemAttrib	eMemAttrib,
    IMG_INT32		buff_fd,
    IMG_UINT64		*pPhyAddrs,
    IMG_VOID		*priv,
    IMG_BOOL		kernelMapped
)
{
	size_t numPages = (ui32Size + HOST_MMU_PAGE_SIZE - 1)/HOST_MMU_PAGE_SIZE;
	struct ion_handle *ionHandle;
	IMG_RESULT result = IMG_ERROR_FATAL;
	unsigned pg_i = 0;
	struct ion_client *pIONcl;

	DEBUG_REPORT(REPORT_MODULE_SYSMEM, "Importing buff_fd %d of size %u", buff_fd, ui32Size);

	pIONcl = get_ion_client();
	if (!pIONcl)
		goto exitFailGetClient;

	ionHandle = ion_import_dma_buf(pIONcl, buff_fd);
	if (IS_ERR(ionHandle)) {
		REPORT(REPORT_MODULE_SYSMEM, REPORT_ERR, "Error obtaining handle from fd %d", buff_fd);
		result = IMG_ERROR_FATAL;
		goto exitFailImportFD;
	}

	psPages->pvImplData = ionHandle;

#if defined(ION_SYSTEM_HEAP)
	{
		struct scatterlist *psScattLs, *psScattLsAux;
		struct sg_table *psSgTable;

		psSgTable = ion_sg_table(pIONcl, ionHandle);

		if (psSgTable == NULL)
		{
			REPORT(REPORT_MODULE_SYSMEM, REPORT_ERR, "Error obtaining sg table");
			result = IMG_ERROR_FATAL;
			goto exitFailMap;
		}
		psScattLs = psSgTable->sgl;

		if (psScattLs == NULL)
		{
			REPORT(REPORT_MODULE_SYSMEM, REPORT_ERR, "Error obtaining scatter list");
			result = IMG_ERROR_FATAL;
			goto exitFailMap;
		}

		// Get physical addresses from scatter list
		for (psScattLsAux = psScattLs; psScattLsAux; psScattLsAux = sg_next(psScattLsAux))
		{
			int offset;
			dma_addr_t chunkBase = sg_phys(psScattLsAux);

			for (offset = 0; offset < psScattLsAux->length; offset += PAGE_SIZE, ++pg_i)
			{
				if (pg_i >= numPages)
					break;

				pPhyAddrs[pg_i] = chunkBase + offset;
			}

			if (pg_i >= numPages)
				break;
		}

		if (kernelMapped)
			psPages->pvCpuKmAddr = ion_map_kernel(pIONcl, ionHandle);
	}
#else
	{
		int offset;
		ion_phys_addr_t physaddr;
		size_t len = 0;

		result = ion_phys(pIONcl, ionHandle, &physaddr, &len);

		if(result)
		{
			IMG_ASSERT(!"ion_phys failed");
			result = IMG_ERROR_FATAL;
			goto exitFailMap;
		}

		for (offset = 0; pg_i < numPages; offset += PAGE_SIZE, ++pg_i)
		{
			if (pg_i >= numPages)
				break;
			pPhyAddrs[pg_i] = physaddr + offset;
		}

		if (kernelMapped)
			psPages->pvCpuKmAddr = SYSMEMU_CpuPAddrToCpuKmAddr(heap->memId, physaddr);
	}
#endif

	{
		size_t  physAddrArrSize = numPages * sizeof(psPages->ppaPhysAddr[0]);
		size_t  phy_i;

		psPages->ppaPhysAddr = IMG_BIGORSMALL_ALLOC(physAddrArrSize);
		IMG_ASSERT(psPages->ppaPhysAddr != IMG_NULL);
		if (psPages->ppaPhysAddr == IMG_NULL)
		{
			return IMG_ERROR_OUT_OF_MEMORY;
		}

		for (phy_i = 0; phy_i < numPages; ++phy_i)
			psPages->ppaPhysAddr[phy_i] = pPhyAddrs[phy_i];
	}

	if (kernelMapped && psPages->pvCpuKmAddr == NULL) {
		REPORT(REPORT_MODULE_SYSMEM, REPORT_ERR, "Error mapping to kernel address");
		result = IMG_ERROR_FATAL;
		goto exitFailMapKernel;
	}

	result = IMG_SUCCESS;

exitFailMapKernel:
exitFailMap:
exitFailImportFD:
exitFailGetClient:
	return result;
}