Example #1
0
static int32_t msm_mem_free(struct videobuf2_contig_pmem *mem)
{
    int32_t rc = 0;
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
    ion_free(mem->client, mem->ion_handle);
    ion_client_destroy(mem->client);
#else
    free_contiguous_memory_by_paddr(mem->phyaddr);
#endif
    return rc;
}
static int deregister_memory(void)
{
	if (atomic64_read(&acdb_data.mem_len)) {
		mutex_lock(&acdb_data.acdb_mutex);
		ion_unmap_kernel(acdb_data.ion_client, acdb_data.ion_handle);
		ion_free(acdb_data.ion_client, acdb_data.ion_handle);
		ion_client_destroy(acdb_data.ion_client);
		mutex_unlock(&acdb_data.acdb_mutex);
		atomic64_set(&acdb_data.mem_len, 0);
	}
	return 0;
}
Example #3
0
void *htc_msm_smem_new_client(enum smem_type mtype,
                struct msm_vidc_platform_resources *res)
{
        struct smem_client *client = NULL;
        void *clnt_alloc = NULL;
        void *clnt_import = NULL;
        switch (mtype) {
        case SMEM_ION:
                clnt_alloc = htc_ion_new_client_alloc();
                clnt_import = htc_ion_new_client_import();
                break;
        default:
                dprintk(VIDC_ERR, "Mem type not supported\n");
                break;
        }
        if (clnt_alloc && clnt_import) {
                client = kzalloc(sizeof(*client), GFP_KERNEL);
                if (client) {
                        client->mem_type = mtype;
                        client->clnt = client->clnt_import = clnt_import;
                        client->clnt_alloc = clnt_alloc;
                        client->res = res;
                        client->inst = NULL;
                }
        } else {
                if (clnt_alloc == NULL) {
                        dprintk(VIDC_ERR, "Failed to create new alloc ion_client\n");
                } else {
                        ion_client_destroy(clnt_alloc);
                }

                if (clnt_import == NULL) {
                        dprintk(VIDC_ERR, "Failed to create new import ion_client\n");
                } else {
                        ion_client_destroy(clnt_import);
                }
        }
        return client;
};
int msm_gemini_platform_release(struct resource *mem, void *base, int irq,
	void *context)
{
	int result;
	free_irq(irq, context);
	result = msm_camio_jpeg_clk_disable();
	iounmap(base);
	release_mem_region(mem->start, resource_size(mem));
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
	ion_client_destroy(gemini_client);
#endif
	GMN_DBG("%s:%d] success\n", __func__, __LINE__);
	return result;
}
static int __devexit vcap_remove(struct platform_device *pdev)
{
	struct vcap_dev *dev = vcap_ctrl;
	ion_client_destroy(dev->ion_client);
	flush_workqueue(dev->vcap_wq);
	destroy_workqueue(dev->vcap_wq);
	video_device_release(dev->vfd);
	deinit_vc();
	vcap_disable(dev);
	v4l2_device_unregister(&dev->v4l2_dev);
	iounmap(dev->vcapbase);
	release_mem_region(dev->vcapmem->start, resource_size(dev->vcapmem));
	vcap_ctrl = NULL;
	kfree(dev);

	return 0;
}
Example #6
0
int msm_jpeg_platform_release(struct resource *mem, void *base, int irq,
	void *context)
{
	int result = 0;
	int i = 0;
	struct msm_jpeg_device *pgmn_dev =
		(struct msm_jpeg_device *) context;

	free_irq(irq, context);

#ifdef CONFIG_MSM_IOMMU
	for (i = 0; i < pgmn_dev->iommu_cnt; i++) {
		iommu_detach_device(pgmn_dev->domain,
				pgmn_dev->iommu_ctx_arr[i]);
		JPEG_DBG("%s:%d]", __func__, __LINE__);
	}
#endif
	if (pgmn_dev->jpeg_bus_client) {
		msm_bus_scale_client_update_request(
			pgmn_dev->jpeg_bus_client, 0);
		msm_bus_scale_unregister_client(pgmn_dev->jpeg_bus_client);
	}

	msm_cam_clk_enable(&pgmn_dev->pdev->dev, jpeg_8x_clk_info,
	pgmn_dev->jpeg_clk, ARRAY_SIZE(jpeg_8x_clk_info), 0);
	JPEG_DBG("%s:%d] clock disbale done", __func__, __LINE__);

	if (pgmn_dev->jpeg_fs) {
		result = regulator_disable(pgmn_dev->jpeg_fs);
		if (!result)
			regulator_put(pgmn_dev->jpeg_fs);
		else
			JPEG_PR_ERR("%s:%d] regulator disable failed %d",
				__func__, __LINE__, result);
		pgmn_dev->jpeg_fs = NULL;
	}
	iounmap(pgmn_dev->jpeg_vbif);
	iounmap(base);
	release_mem_region(mem->start, resource_size(mem));
	ion_client_destroy(pgmn_dev->jpeg_client);
	pgmn_dev->state = MSM_JPEG_IDLE;
	JPEG_DBG("%s:%d] success\n", __func__, __LINE__);
	return result;
}
Example #7
0
void vb2_ion_cleanup_multi(void **alloc_ctxes)
{
	struct vb2_ion_conf *conf = alloc_ctxes[0];

	BUG_ON(!conf);

	if (conf->use_mmu) {
		if (atomic_read(&conf->mmu_enable)) {
			pr_warning("mmu_enable(%d)\n", atomic_read(&conf->mmu_enable));
			iovmm_deactivate(conf->dev);
		}

		iovmm_cleanup(conf->dev);
	}

	ion_client_destroy(conf->client);

	kfree(alloc_ctxes);
}
int msm_mercury_platform_release(struct resource *mem, void *base,
	int irq, void *context)
{
	int result = 0;
	struct msm_mercury_device *pmercury_dev =
		(struct msm_mercury_device *) context;

	free_irq(irq, context);
	msm_cam_clk_enable(&pmercury_dev->pdev->dev, mercury_jpegd_clk_info,
		pmercury_dev->mercury_clk, ARRAY_SIZE(mercury_jpegd_clk_info),
		0);
	iounmap(base);
	release_mem_region(mem->start, resource_size(mem));
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
	ion_client_destroy(mercury_client);
#endif
	MCR_DBG("%s:%d] success\n", __func__, __LINE__);
	return result;
}
STATIC int balong_compose_remove(struct platform_device *pdev)
{
    struct ade_compose_data_type   *ade_pri_data;
    struct balong_fb_data_type     *fb_data;
    struct cmdqueue_buff    *cmdq_list = NULL;
    struct cmdfile_buffer   *cf_buff = NULL;
    int i = 0;
    int j = 0;

    BUG_ON(compose_class == NULL);

    ade_pri_data = (struct ade_compose_data_type *)platform_get_drvdata(pdev);
    fb_data = (struct balong_fb_data_type *)platform_get_drvdata(ade_pri_data->parent);

    for (i = 0; i < ADE_CMDQ_TYPE_MAX; i++) {
        cmdq_list = &ade_pri_data->cmdq_list[i];

        for (j = 0; j < ADE_CMD_FILE_BUFF_MAX; j++) {
            cf_buff = &cmdq_list->cf_list[j];
            if (cf_buff->cmd_ion_handle != NULL) {
                ion_unmap_kernel(fb_data->fb_ion_client, cf_buff->cmd_ion_handle);
                ion_free(fb_data->fb_ion_client, cf_buff->cmd_ion_handle);
            }
        }
    }
#if 0
    if(g_smmu_flag == ION_IOMMU_MODE){
        ion_free(fb_data->fb_ion_client, ade_pri_data->wdma2_ion_handle);
        ion_client_destroy(fb_data->fb_ion_client);
    }
    else{
        kfree(phys_to_virt(ade_pri_data->wdma2_phy));
    }
#endif
    memset(ade_pri_data->cmdq_list, 0, sizeof(ade_pri_data->cmdq_list));

    device_destroy(compose_class, MKDEV(major, 0));
    class_destroy(compose_class);
    unregister_chrdev(major, DEV_ADE_COMPOSE_NAME);

    return 0;
}
static int audpcm_in_release(struct inode *inode, struct file *file)
{
	struct audio_in *audio = file->private_data;

	mutex_lock(&audio->lock);
	audpcm_in_disable(audio);
	audpcm_in_flush(audio);
	audpreproc_aenc_free(audio->enc_id);
	msm_adsp_put(audio->audrec);
	audio->audrec = NULL;
	audio->opened = 0;
	if (audio->data) {
		ion_unmap_kernel(audio->client, audio->output_buff_handle);
		ion_free(audio->client, audio->output_buff_handle);
		audio->data = NULL;
	}
	ion_client_destroy(audio->client);
	mutex_unlock(&audio->lock);
	return 0;
}
static int deregister_memory(void)
{
	mutex_lock(&acdb_data.acdb_mutex);
	kfree(acdb_data.hw_delay_tx.delay_info);
	kfree(acdb_data.hw_delay_rx.delay_info);
	mutex_unlock(&acdb_data.acdb_mutex);

	if (atomic64_read(&acdb_data.mem_len)) {
		mutex_lock(&acdb_data.acdb_mutex);
		atomic_set(&acdb_data.vocstrm_total_cal_size, 0);
		atomic_set(&acdb_data.vocproc_total_cal_size, 0);
		atomic_set(&acdb_data.vocvol_total_cal_size, 0);
		atomic64_set(&acdb_data.mem_len, 0);
		ion_unmap_kernel(acdb_data.ion_client, acdb_data.ion_handle);
		ion_free(acdb_data.ion_client, acdb_data.ion_handle);
		ion_client_destroy(acdb_data.ion_client);
		mutex_unlock(&acdb_data.acdb_mutex);
	}
	return 0;
}
PVRSRV_ERROR OSPerProcessPrivateDataDeInit(IMG_HANDLE hOsPrivateData)
{
	PVRSRV_ERROR eError;
	PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc;

	if (hOsPrivateData == IMG_NULL)
	{
		return PVRSRV_OK;
	}

	psEnvPerProc = (PVRSRV_ENV_PER_PROCESS_DATA *)hOsPrivateData;

#if defined(SUPPORT_ION)
	if (psEnvPerProc->psIONClient)
	{
		ion_client_destroy(psEnvPerProc->psIONClient);
		psEnvPerProc->psIONClient = IMG_NULL;
	}
#endif /* defined(SUPPORT_ION) */

	/* Linux specific mmap processing */
	LinuxMMapPerProcessDisconnect(psEnvPerProc);

	/* Remove per process /proc entries */
#ifdef CONFIG_PVR_PROC
	RemovePerProcessProcDir(psEnvPerProc);
#endif

	eError = OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
				sizeof(PVRSRV_ENV_PER_PROCESS_DATA),
				hOsPrivateData,
				psEnvPerProc->hBlockAlloc);
	/*not nulling pointer, copy on stack*/

	if (eError != PVRSRV_OK)
	{
		PVR_DPF((PVR_DBG_ERROR, "%s: OSFreeMem failed (%d)", __FUNCTION__, eError));
	}

	return PVRSRV_OK;
}
Example #13
0
/*
 * msm_fd_release - Fd device release method.
 * @file: Pointer to file struct.
 */
static int msm_fd_release(struct file *file)
{
	struct fd_ctx *ctx = msm_fd_ctx_from_fh(file->private_data);

	vb2_queue_release(&ctx->vb2_q);

	vfree(ctx->stats);

	if (ctx->work_buf.handle)
		msm_fd_hw_unmap_buffer(&ctx->work_buf);

	iommu_detach_device(ctx->fd_device->iommu_domain,
		ctx->fd_device->iommu_dev);
	ion_client_destroy(ctx->mem_pool.client);

	v4l2_fh_del(&ctx->fh);
	v4l2_fh_exit(&ctx->fh);

	kfree(ctx);

	return 0;
}
int msm_jpeg_platform_release(struct resource *mem, void *base, int irq,
	void *context)
{
	int result = 0;

	struct msm_jpeg_device *pgmn_dev =
		(struct msm_jpeg_device *) context;

	free_irq(irq, context);

	msm_jpeg_detach_iommu(pgmn_dev);

	if (pgmn_dev->jpeg_bus_client) {
		msm_bus_scale_client_update_request(
			pgmn_dev->jpeg_bus_client, 0);
		msm_bus_scale_unregister_client(pgmn_dev->jpeg_bus_client);
	}

	msm_cam_clk_enable(&pgmn_dev->pdev->dev, pgmn_dev->jpeg_clk_info,
	pgmn_dev->jpeg_clk, pgmn_dev->num_clk, 0);
	JPEG_DBG("%s:%d] clock disbale done", __func__, __LINE__);

	if (pgmn_dev->jpeg_fs) {
		result = regulator_disable(pgmn_dev->jpeg_fs);
		if (!result)
			regulator_put(pgmn_dev->jpeg_fs);
		else
			JPEG_PR_ERR("%s:%d] regulator disable failed %d",
				__func__, __LINE__, result);
		pgmn_dev->jpeg_fs = NULL;
	}
	iounmap(pgmn_dev->jpeg_vbif);
	iounmap(base);
	release_mem_region(mem->start, resource_size(mem));
	ion_client_destroy(pgmn_dev->jpeg_client);
	pgmn_dev->state = MSM_JPEG_IDLE;
	JPEG_DBG("%s:%d] success\n", __func__, __LINE__);
	return result;
}
Example #15
0
static int deregister_memory(void)
{
	int result = 0;

	pr_debug("%s\n", __func__);

	if (atomic64_read(&acdb_data.mem_len)) {
		mutex_lock(&acdb_data.acdb_mutex);
		result = unmap_cal_tables();
		if (result < 0)
			pr_err("%s: unmap_cal_tables failed, err = %d\n",
				__func__, result);
		atomic_set(&acdb_data.vocstrm_total_cal_size, 0);
		atomic_set(&acdb_data.vocproc_total_cal_size, 0);
		atomic_set(&acdb_data.vocvol_total_cal_size, 0);
		atomic64_set(&acdb_data.mem_len, 0);
		ion_unmap_kernel(acdb_data.ion_client, acdb_data.ion_handle);
		ion_free(acdb_data.ion_client, acdb_data.ion_handle);
		ion_client_destroy(acdb_data.ion_client);
		mutex_unlock(&acdb_data.acdb_mutex);
	}
	return result;
}
static unsigned long msm_mem_allocate(struct videobuf2_contig_pmem *mem)
{
	unsigned long phyaddr;
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
	int rc, len;
	mem->client = msm_ion_client_create(-1, "camera");
	if (IS_ERR((void *)mem->client)) {
		pr_err("%s Could not create client\n", __func__);
		goto client_failed;
	}
	mem->ion_handle = ion_alloc(mem->client, mem->size, SZ_4K,
		(0x1 << ION_CP_MM_HEAP_ID | 0x1 << ION_IOMMU_HEAP_ID));
	if (IS_ERR((void *)mem->ion_handle)) {
		pr_err("%s Could not allocate\n", __func__);
		goto alloc_failed;
	}
	rc = ion_map_iommu(mem->client, mem->ion_handle,
			-1, 0, SZ_4K, 0,
			(unsigned long *)&phyaddr,
			(unsigned long *)&len, UNCACHED, 0);
	if (rc < 0) {
		pr_err("%s Could not get physical address\n", __func__);
		goto phys_failed;
	}
#else
	phyaddr = allocate_contiguous_ebi_nomap(mem->size, SZ_4K);
#endif
	return phyaddr;
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
phys_failed:
	ion_free(mem->client, mem->ion_handle);
alloc_failed:
	ion_client_destroy(mem->client);
client_failed:
	return 0;
#endif
}
Example #17
0
static void ion_delete_client(struct smem_client *client)
{
	ion_client_destroy(client->clnt);
}
static int register_memory(void)
{
	int			result;
	unsigned long		paddr;
	void                    *kvptr;
	unsigned long		kvaddr;
	unsigned long		mem_len;

	mutex_lock(&acdb_data.acdb_mutex);
	acdb_data.ion_client =
		msm_ion_client_create(UINT_MAX, "audio_acdb_client");
	if (IS_ERR_OR_NULL(acdb_data.ion_client)) {
		pr_err("%s: Could not register ION client!!!\n", __func__);
		result = PTR_ERR(acdb_data.ion_client);
		goto err;
	}

	acdb_data.ion_handle = ion_import_dma_buf(acdb_data.ion_client,
		atomic_read(&acdb_data.map_handle));
	if (IS_ERR_OR_NULL(acdb_data.ion_handle)) {
		pr_err("%s: Could not import map handle!!!\n", __func__);
		result = PTR_ERR(acdb_data.ion_handle);
		goto err_ion_client;
	}

	result = ion_phys(acdb_data.ion_client, acdb_data.ion_handle,
				&paddr, (size_t *)&mem_len);
	if (result != 0) {
		pr_err("%s: Could not get phys addr!!!\n", __func__);
		goto err_ion_handle;
	}

	kvptr = ion_map_kernel(acdb_data.ion_client,
		acdb_data.ion_handle, 0);
	if (IS_ERR_OR_NULL(kvptr)) {
		pr_err("%s: Could not get kernel virt addr!!!\n", __func__);
		result = PTR_ERR(kvptr);
		goto err_ion_handle;
	}
	kvaddr = (unsigned long)kvptr;
	atomic64_set(&acdb_data.paddr, paddr);
	atomic64_set(&acdb_data.kvaddr, kvaddr);
	atomic64_set(&acdb_data.mem_len, mem_len);
	mutex_unlock(&acdb_data.acdb_mutex);

	pr_debug("%s done! paddr = 0x%lx, "
		"kvaddr = 0x%lx, len = x%lx\n",
		 __func__,
		(long)atomic64_read(&acdb_data.paddr),
		(long)atomic64_read(&acdb_data.kvaddr),
		(long)atomic64_read(&acdb_data.mem_len));

	return result;
err_ion_handle:
	ion_free(acdb_data.ion_client, acdb_data.ion_handle);
err_ion_client:
	ion_client_destroy(acdb_data.ion_client);
err:
	atomic64_set(&acdb_data.mem_len, 0);
	mutex_unlock(&acdb_data.acdb_mutex);
	return result;
}
Example #19
0
static int audpcm_in_open(struct inode *inode, struct file *file)
{
	struct audio_in *audio = &the_audio_in;
	int rc;
	int len = 0;
	unsigned long ionflag = 0;
	ion_phys_addr_t addr = 0;
	struct ion_handle *handle = NULL;
	struct ion_client *client = NULL;

	int encid;
	mutex_lock(&audio->lock);
	if (audio->opened) {
		rc = -EBUSY;
		goto done;
	}

	/* Settings will be re-config at AUDIO_SET_CONFIG,
	 * but at least we need to have initial config
	 */
	audio->mode = MSM_AUD_ENC_MODE_TUNNEL;
	audio->samp_rate = RPC_AUD_DEF_SAMPLE_RATE_11025;
	audio->samp_rate_index = AUDREC_CMD_SAMP_RATE_INDX_11025;
	audio->channel_mode = AUDREC_CMD_STEREO_MODE_MONO;
	audio->buffer_size = MONO_DATA_SIZE;
	audio->enc_type = AUDREC_CMD_TYPE_0_INDEX_WAV | audio->mode;

	rc = audmgr_open(&audio->audmgr);
	if (rc)
		goto done;
	encid = audpreproc_aenc_alloc(audio->enc_type, &audio->module_name,
			&audio->queue_ids);
	if (encid < 0) {
		MM_ERR("No free encoder available\n");
		rc = -ENODEV;
		goto done;
	}
	audio->enc_id = encid;

	rc = msm_adsp_get(audio->module_name, &audio->audrec,
			   &audrec_adsp_ops, audio);
	if (rc) {
		audpreproc_aenc_free(audio->enc_id);
		goto done;
	}

	rc = msm_adsp_get("AUDPREPROCTASK", &audio->audpre,
				&audpre_adsp_ops, audio);
	if (rc) {
		msm_adsp_put(audio->audrec);
		audpreproc_aenc_free(audio->enc_id);
		goto done;
	}

	audio->dsp_cnt = 0;
	audio->stopped = 0;

	audpcm_in_flush(audio);

	client = msm_ion_client_create(UINT_MAX, "Audio_PCM_in_client");
	if (IS_ERR_OR_NULL(client)) {
		MM_ERR("Unable to create ION client\n");
		rc = -ENOMEM;
		goto client_create_error;
	}
	audio->client = client;

	MM_DBG("allocating mem sz = %d\n", DMASZ);
	handle = ion_alloc(client, DMASZ, SZ_4K,
		ION_HEAP(ION_AUDIO_HEAP_ID), 0);
	if (IS_ERR_OR_NULL(handle)) {
		MM_ERR("Unable to create allocate O/P buffers\n");
		rc = -ENOMEM;
		goto output_buff_alloc_error;
	}

	audio->output_buff_handle = handle;

	rc = ion_phys(client , handle, &addr, &len);
	if (rc) {
		MM_ERR("O/P buffers:Invalid phy: %x sz: %x\n",
			(unsigned int) addr, (unsigned int) len);
		rc = -ENOMEM;
		goto output_buff_get_phys_error;
	} else {
		MM_INFO("O/P buffers:valid phy: %x sz: %x\n",
			(unsigned int) addr, (unsigned int) len);
	}
	audio->phys = (int32_t)addr;

	rc = ion_handle_get_flags(client, handle, &ionflag);
	if (rc) {
		MM_ERR("could not get flags for the handle\n");
		rc = -ENOMEM;
		goto output_buff_get_flags_error;
	}

	audio->data = ion_map_kernel(client, handle);
	if (IS_ERR(audio->data)) {
		MM_ERR("could not map read buffers,freeing instance 0x%08x\n",
				(int)audio);
		rc = -ENOMEM;
		goto output_buff_map_error;
	}
	MM_DBG("read buf: phy addr 0x%08x kernel addr 0x%08x\n",
		audio->phys, (int)audio->data);

	file->private_data = audio;
	audio->opened = 1;
	rc = 0;
done:
	mutex_unlock(&audio->lock);
	return rc;
output_buff_map_error:
output_buff_get_phys_error:
output_buff_get_flags_error:
	ion_free(client, audio->output_buff_handle);
output_buff_alloc_error:
	ion_client_destroy(client);
client_create_error:
	msm_adsp_put(audio->audrec);
	msm_adsp_put(audio->audpre);
	audpreproc_aenc_free(audio->enc_id);
	mutex_unlock(&audio->lock);
	return rc;
}
static int smcmod_send_buf_cmd(struct smcmod_buf_req *reqp)
{
	int ret = 0;
	struct ion_client *ion_clientp = NULL;
	struct ion_handle *ion_cmd_handlep = NULL;
	struct ion_handle *ion_resp_handlep = NULL;
	void *cmd_vaddrp = NULL;
	void *resp_vaddrp = NULL;
	unsigned long cmd_buf_size = 0;
	unsigned long resp_buf_size = 0;

	/* sanity check the argument */
	if (IS_ERR_OR_NULL(reqp))
		return -EINVAL;

	/* sanity check the fds */
	if (reqp->ion_cmd_fd < 0)
		return -EINVAL;

	/* create an ion client */
	ion_clientp = msm_ion_client_create(UINT_MAX, "smcmod");

	/* check for errors */
	if (IS_ERR_OR_NULL(ion_clientp))
		return -EINVAL;

	/* import the command buffer fd */
	ion_cmd_handlep = ion_import_dma_buf(ion_clientp, reqp->ion_cmd_fd);

	/* sanity check the handle */
	if (IS_ERR_OR_NULL(ion_cmd_handlep)) {
		ret = -EINVAL;
		goto buf_cleanup;
	}

	/* retrieve the size of the buffer */
	if (ion_handle_get_size(ion_clientp, ion_cmd_handlep,
		&cmd_buf_size) < 0) {
		ret = -EINVAL;
		goto buf_cleanup;
	}

	/* ensure that the command buffer size is not
	 * greater than the size of the buffer.
	 */
	if (reqp->cmd_len > cmd_buf_size) {
		ret = -EINVAL;
		goto buf_cleanup;
	}

	/* map the area to get a virtual address */
	cmd_vaddrp = ion_map_kernel(ion_clientp, ion_cmd_handlep);

	/* sanity check the address */
	if (IS_ERR_OR_NULL(cmd_vaddrp)) {
		ret = -EINVAL;
		goto buf_cleanup;
	}

	/* check if there is a response buffer */
	if (reqp->ion_resp_fd >= 0) {
		/* import the handle */
		ion_resp_handlep =
			ion_import_dma_buf(ion_clientp, reqp->ion_resp_fd);

		/* sanity check the handle */
		if (IS_ERR_OR_NULL(ion_resp_handlep)) {
			ret = -EINVAL;
			goto buf_cleanup;
		}

		/* retrieve the size of the buffer */
		if (ion_handle_get_size(ion_clientp, ion_resp_handlep,
			&resp_buf_size) < 0) {
			ret = -EINVAL;
			goto buf_cleanup;
		}

		/* ensure that the command buffer size is not
		 * greater than the size of the buffer.
		 */
		if (reqp->resp_len > resp_buf_size) {
			ret = -EINVAL;
			goto buf_cleanup;
		}

		/* map the area to get a virtual address */
		resp_vaddrp = ion_map_kernel(ion_clientp, ion_resp_handlep);

		/* sanity check the address */
		if (IS_ERR_OR_NULL(resp_vaddrp)) {
			ret = -EINVAL;
			goto buf_cleanup;
		}
	}

	/* No need to flush the cache lines for the command buffer here,
	 * because the buffer will be flushed by scm_call.
	 */

	/* call scm function to switch to secure world */
	reqp->return_val = scm_call(reqp->service_id, reqp->command_id,
		cmd_vaddrp, reqp->cmd_len, resp_vaddrp, reqp->resp_len);

	/* The cache lines for the response buffer have already been
	 * invalidated by scm_call before returning.
	 */

buf_cleanup:
	/* if the client and handle(s) are valid, free them */
	if (!IS_ERR_OR_NULL(ion_clientp)) {
		if (!IS_ERR_OR_NULL(ion_cmd_handlep)) {
			if (!IS_ERR_OR_NULL(cmd_vaddrp))
				ion_unmap_kernel(ion_clientp, ion_cmd_handlep);
			ion_free(ion_clientp, ion_cmd_handlep);
		}

		if (!IS_ERR_OR_NULL(ion_resp_handlep)) {
			if (!IS_ERR_OR_NULL(resp_vaddrp))
				ion_unmap_kernel(ion_clientp, ion_resp_handlep);
			ion_free(ion_clientp, ion_resp_handlep);
		}

		ion_client_destroy(ion_clientp);
	}

	return ret;
}
Example #21
0
static long secmem_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
	struct secmem_info *info = filp->private_data;

	static int nbufs = 0;

	switch (cmd) {
	case SECMEM_IOC_GET_CHUNK_NUM:
	{
		char **mname;

		nbufs = 0;
		for (mname = secmem_regions; *mname != NULL; mname++)
			nbufs++;

		if (nbufs == 0)
			return -ENOMEM;

		if (copy_to_user((void __user *)arg, &nbufs, sizeof(int)))
			return -EFAULT;
		break;
	}
	case SECMEM_IOC_CHUNKINFO:
	{
		struct cma_info cinfo;
		struct secchunk_info minfo;

		if (copy_from_user(&minfo, (void __user *)arg, sizeof(minfo)))
			return -EFAULT;

		memset(&minfo.name, 0, MAX_NAME_LEN);

		if (minfo.index < 0)
			return -EINVAL;

		if (minfo.index >= nbufs) {
			minfo.index = -1; /* No more memory region */
		} else {

			if (cma_info(&cinfo, info->dev,
					secmem_regions[minfo.index]))
				return -EINVAL;

			minfo.base = cinfo.lower_bound;
			minfo.size = cinfo.total_size;
			memcpy(minfo.name, secmem_regions[minfo.index], MAX_NAME_LEN);
		}

		if (copy_to_user((void __user *)arg, &minfo, sizeof(minfo)))
			return -EFAULT;
		break;
	}
#if defined(CONFIG_ION)
	case SECMEM_IOC_GET_FD_PHYS_ADDR:
	{
		struct ion_client *client;
		struct secfd_info fd_info;
		struct ion_fd_data data;
		size_t len;

		if (copy_from_user(&fd_info, (int __user *)arg,
					sizeof(fd_info)))
			return -EFAULT;

		client = ion_client_create(ion_exynos, "DRM");
		if (IS_ERR(client)) {
			pr_err("%s: Failed to get ion_client of DRM\n",
				__func__);
			return -ENOMEM;
		}

		data.fd = fd_info.fd;
		data.handle = ion_import_dma_buf(client, data.fd);
		pr_debug("%s: fd from user space = %d\n",
				__func__, fd_info.fd);
		if (IS_ERR(data.handle)) {
			pr_err("%s: Failed to get ion_handle of DRM\n",
				__func__);
			ion_client_destroy(client);
			return -ENOMEM;
		}

		if (ion_phys(client, data.handle, &fd_info.phys, &len)) {
			pr_err("%s: Failed to get phys. addr of DRM\n",
				__func__);
			ion_client_destroy(client);
			ion_free(client, data.handle);
			return -ENOMEM;
		}

		pr_debug("%s: physical addr from kernel space = 0x%08x\n",
				__func__, (unsigned int)fd_info.phys);

		ion_free(client, data.handle);
		ion_client_destroy(client);

		if (copy_to_user((void __user *)arg, &fd_info, sizeof(fd_info)))
			return -EFAULT;
		break;
	}
#endif
	case SECMEM_IOC_GET_DRM_ONOFF:
		smp_rmb();
		if (copy_to_user((void __user *)arg, &drm_onoff, sizeof(int)))
			return -EFAULT;
		break;
	case SECMEM_IOC_SET_DRM_ONOFF:
	{
		int val = 0;

		if (copy_from_user(&val, (int __user *)arg, sizeof(int)))
			return -EFAULT;

		mutex_lock(&drm_lock);
		if ((info->drm_enabled && !val) ||
		    (!info->drm_enabled && val)) {
			/*
			 * 1. if we enabled drm, then disable it
			 * 2. if we don't already hdrm enabled,
			 *    try to enable it.
			 */
			drm_enable_locked(info, val);
		}
		mutex_unlock(&drm_lock);
		break;
	}
	case SECMEM_IOC_GET_CRYPTO_LOCK:
	{
		break;
	}
	case SECMEM_IOC_RELEASE_CRYPTO_LOCK:
	{
		break;
	}
#if defined(CONFIG_ARM_EXYNOS5410_BUS_DEVFREQ)
	case SECMEM_IOC_REQ_MIF_LOCK:
	{
		int req_mif_lock;

		if (copy_from_user(&req_mif_lock, (void __user *)arg, sizeof(int)))
			return -EFAULT;

		if (req_mif_lock) {
			pm_qos_update_request(&exynos5_secmem_mif_qos, 800000);
			pr_debug("%s: Get MIF lock successfully\n", __func__);
		} else {
			pm_qos_update_request(&exynos5_secmem_mif_qos, 0);
			pr_debug("%s: Release MIF lock successfully\n", __func__);
		}
		break;
	}
#endif
	default:
		return -ENOTTY;
	}

	return 0;
}
Example #22
0
static int audpcm_in_open(struct inode *inode, struct file *file)
{
	struct audio_in *audio = &the_audio_in;
	int rc;
	int len = 0;
	unsigned long ionflag = 0;
	ion_phys_addr_t addr = 0;
	struct ion_handle *handle = NULL;
	struct ion_client *client = NULL;

	int encid;
	struct timespec ts;
	struct rtc_time tm;

	mutex_lock(&audio->lock);
	if (audio->opened) {
		rc = -EBUSY;
		goto done;
	}

	audio->mode = MSM_AUD_ENC_MODE_TUNNEL;
	audio->samp_rate = RPC_AUD_DEF_SAMPLE_RATE_11025;
	audio->samp_rate_index = AUDREC_CMD_SAMP_RATE_INDX_11025;
	audio->channel_mode = AUDREC_CMD_STEREO_MODE_MONO;
	audio->buffer_size = MONO_DATA_SIZE;
	audio->enc_type = AUDREC_CMD_TYPE_0_INDEX_WAV | audio->mode;

	rc = audmgr_open(&audio->audmgr);
	if (rc)
		goto done;
	encid = audpreproc_aenc_alloc(audio->enc_type, &audio->module_name,
			&audio->queue_ids);
	if (encid < 0) {
		MM_AUD_ERR("No free encoder available\n");
		rc = -ENODEV;
		goto done;
	}
	audio->enc_id = encid;

	rc = msm_adsp_get(audio->module_name, &audio->audrec,
			   &audrec_adsp_ops, audio);
	if (rc) {
		audpreproc_aenc_free(audio->enc_id);
		goto done;
	}

	audio->dsp_cnt = 0;
	audio->stopped = 0;

	audpcm_in_flush(audio);

	client = msm_ion_client_create(UINT_MAX, "Audio_PCM_in_client");
	if (IS_ERR_OR_NULL(client)) {
		MM_ERR("Unable to create ION client\n");
		rc = -ENOMEM;
		goto client_create_error;
	}
	audio->client = client;

	MM_DBG("allocating mem sz = %d\n", DMASZ);
	handle = ion_alloc(client, DMASZ, SZ_4K,
		ION_HEAP(ION_AUDIO_HEAP_ID));
	if (IS_ERR_OR_NULL(handle)) {
		MM_ERR("Unable to create allocate O/P buffers\n");
		rc = -ENOMEM;
		goto output_buff_alloc_error;
	}

	audio->output_buff_handle = handle;

	rc = ion_phys(client , handle, &addr, &len);
	if (rc) {
		MM_ERR("O/P buffers:Invalid phy: %x sz: %x\n",
			(unsigned int) addr, (unsigned int) len);
		rc = -ENOMEM;
		goto output_buff_get_phys_error;
	} else {
		MM_INFO("O/P buffers:valid phy: %x sz: %x\n",
			(unsigned int) addr, (unsigned int) len);
	}
	audio->phys = (int32_t)addr;

	rc = ion_handle_get_flags(client, handle, &ionflag);
	if (rc) {
		MM_ERR("could not get flags for the handle\n");
		rc = -ENOMEM;
		goto output_buff_get_flags_error;
	}

	audio->data = ion_map_kernel(client, handle, ionflag);
	if (IS_ERR(audio->data)) {
		MM_ERR("could not map read buffers,freeing instance 0x%08x\n",
				(int)audio);
		rc = -ENOMEM;
		goto output_buff_map_error;
	}
	MM_DBG("read buf: phy addr 0x%08x kernel addr 0x%08x\n",
		audio->phys, (int)audio->data);

	file->private_data = audio;
	audio->opened = 1;
	rc = 0;
done:
	mutex_unlock(&audio->lock);
	getnstimeofday(&ts);
	rtc_time_to_tm(ts.tv_sec, &tm);
	pr_aud_info1("[ATS][start_recording][successful] at %lld \
		(%d-%02d-%02d %02d:%02d:%02d.%09lu UTC)\n",
		ktime_to_ns(ktime_get()),
		tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
		tm.tm_hour, tm.tm_min, tm.tm_sec, ts.tv_nsec);
	return rc;
output_buff_map_error:
output_buff_get_phys_error:
output_buff_get_flags_error:
	ion_free(client, audio->output_buff_handle);
output_buff_alloc_error:
	ion_client_destroy(client);
client_create_error:
	msm_adsp_put(audio->audrec);
	audpreproc_aenc_free(audio->enc_id);
	mutex_unlock(&audio->lock);
	return rc;
}
static long secmem_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
	struct secmem_info *info = filp->private_data;

	static int nbufs = 0;

	switch (cmd) {
	case SECMEM_IOC_GET_CHUNK_NUM:
	{
		nbufs = sizeof(secmem_regions) / sizeof(uint32_t);

		if (nbufs == 0)
			return -ENOMEM;

		if (copy_to_user((void __user *)arg, &nbufs, sizeof(int)))
			return -EFAULT;
		break;
	}
	case SECMEM_IOC_CHUNKINFO:
	{
		struct secchunk_info minfo;

		if (copy_from_user(&minfo, (void __user *)arg, sizeof(minfo)))
			return -EFAULT;

		memset(&minfo.name, 0, MAX_NAME_LEN);

		if (minfo.index < 0)
			return -EINVAL;

		if (minfo.index >= nbufs) {
			minfo.index = -1; /* No more memory region */
		} else {

			if (ion_exynos_contig_heap_info(secmem_regions[minfo.index],
					&minfo.base, &minfo.size))
				return -EINVAL;

			memcpy(minfo.name, secmem_regions_name[minfo.index], MAX_NAME_LEN);
		}

		if (copy_to_user((void __user *)arg, &minfo, sizeof(minfo)))
			return -EFAULT;
		break;
	}
#if defined(CONFIG_ION)
	case SECMEM_IOC_GET_FD_PHYS_ADDR:
	{
		struct ion_client *client;
		struct secfd_info fd_info;
		struct ion_fd_data data;
		size_t len;

		if (copy_from_user(&fd_info, (int __user *)arg,
					sizeof(fd_info)))
			return -EFAULT;

		client = ion_client_create(ion_exynos, "DRM");
		if (IS_ERR(client)) {
			pr_err("%s: Failed to get ion_client of DRM\n",
				__func__);
			return -ENOMEM;
		}

		data.fd = fd_info.fd;
		data.handle = ion_import_dma_buf(client, data.fd);
		pr_debug("%s: fd from user space = %d\n",
				__func__, fd_info.fd);
		if (IS_ERR(data.handle)) {
			pr_err("%s: Failed to get ion_handle of DRM\n",
				__func__);
			ion_client_destroy(client);
			return -ENOMEM;
		}

		if (ion_phys(client, data.handle, &fd_info.phys, &len)) {
			pr_err("%s: Failed to get phys. addr of DRM\n",
				__func__);
			ion_client_destroy(client);
			ion_free(client, data.handle);
			return -ENOMEM;
		}

		pr_debug("%s: physical addr from kernel space = 0x%08x\n",
				__func__, (unsigned int)fd_info.phys);

		ion_free(client, data.handle);
		ion_client_destroy(client);

		if (copy_to_user((void __user *)arg, &fd_info, sizeof(fd_info)))
			return -EFAULT;
		break;
	}
#endif
	case SECMEM_IOC_GET_DRM_ONOFF:
		smp_rmb();
		if (copy_to_user((void __user *)arg, &drm_onoff, sizeof(int)))
			return -EFAULT;
		break;
	case SECMEM_IOC_SET_DRM_ONOFF:
	{
		int ret, val = 0;

		if (copy_from_user(&val, (int __user *)arg, sizeof(int)))
			return -EFAULT;

		mutex_lock(&drm_lock);
		if ((info->drm_enabled && !val) ||
		    (!info->drm_enabled && val)) {
			/*
			 * 1. if we enabled drm, then disable it
			 * 2. if we don't already hdrm enabled,
			 *    try to enable it.
			 */
			ret = drm_enable_locked(info, val);
			if (ret < 0)
				pr_err("fail to lock/unlock drm status. lock = %d\n", val);
		}
		mutex_unlock(&drm_lock);
		break;
	}
	case SECMEM_IOC_GET_CRYPTO_LOCK:
	{
		break;
	}
	case SECMEM_IOC_RELEASE_CRYPTO_LOCK:
	{
		break;
	}
	case SECMEM_IOC_SET_TZPC:
	{
#if !defined(CONFIG_SOC_EXYNOS5422) && !defined(CONFIG_SOC_EXYNOS5430)
		struct protect_info prot;

		if (copy_from_user(&prot, (void __user *)arg, sizeof(struct protect_info)))
			return -EFAULT;

		mutex_lock(&smc_lock);
		exynos_smc((uint32_t)(0x81000000), 0, prot.dev, prot.enable);
		mutex_unlock(&smc_lock);
#endif
		break;
	}
	default:
		return -ENOTTY;
	}

	return 0;
}
Example #24
0
static int audamrnb_in_open(struct inode *inode, struct file *file)
{
	struct audio_in *audio = &the_audio_amrnb_in;
	int rc;
	int encid;
	int len = 0;
	unsigned long ionflag = 0;
	ion_phys_addr_t addr = 0;
	struct ion_handle *handle = NULL;
	struct ion_client *client = NULL;

	mutex_lock(&audio->lock);
	if (audio->opened) {
		rc = -EBUSY;
		goto done;
	}

	client = msm_ion_client_create(UINT_MAX, "Audio_AMR_In_Client");
	if (IS_ERR_OR_NULL(client)) {
		MM_ERR("Unable to create ION client\n");
		rc = -ENOMEM;
		goto client_create_error;
	}
	audio->client = client;

	handle = ion_alloc(client, DMASZ, SZ_4K,
		ION_HEAP(ION_AUDIO_HEAP_ID),0);
	if (IS_ERR_OR_NULL(handle)) {
		MM_ERR("Unable to create allocate O/P buffers\n");
		rc = -ENOMEM;
		goto buff_alloc_error;
	}
	audio->buff_handle = handle;

	rc = ion_phys(client, handle, &addr, &len);
	if (rc) {
		MM_ERR("O/P buffers:Invalid phy: %x sz: %x\n",
			(unsigned int) addr, (unsigned int) len);
		goto buff_get_phys_error;
	} else {
		MM_INFO("O/P buffers:valid phy: %x sz: %x\n",
			(unsigned int) addr, (unsigned int) len);
	}
	audio->phys = (int32_t)addr;

	rc = ion_handle_get_flags(client, handle, &ionflag);
	if (rc) {
		MM_ERR("could not get flags for the handle\n");
		goto buff_get_flags_error;
	}

	audio->map_v_read = ion_map_kernel(client, handle);
	if (IS_ERR(audio->map_v_read)) {
		MM_ERR("could not map write buffers\n");
		rc = -ENOMEM;
		goto buff_map_error;
	}
	audio->data = audio->map_v_read;
	MM_DBG("write buf: phy addr 0x%08x kernel addr 0x%08x\n",
		audio->phys, (int)audio->data);

	MM_DBG("Memory addr = 0x%8x  phy addr = 0x%8x\n",\
		(int) audio->data, (int) audio->phys);
	if ((file->f_mode & FMODE_WRITE) &&
			(file->f_mode & FMODE_READ)) {
		rc = -EACCES;
		MM_ERR("Non tunnel encoding is not supported\n");
		goto buff_map_error;
	} else if (!(file->f_mode & FMODE_WRITE) &&
					(file->f_mode & FMODE_READ)) {
		audio->mode = MSM_AUD_ENC_MODE_TUNNEL;
		MM_DBG("Opened for tunnel mode encoding\n");
	} else {
		rc = -EACCES;
		goto buff_map_error;
	}


	/* Settings will be re-config at AUDIO_SET_CONFIG,
	 * but at least we need to have initial config
	 */
	audio->buffer_size = (FRAME_SIZE - 8);
	audio->enc_type = ENC_TYPE_AMRNB | audio->mode;
	audio->dtx_mode = -1;
	audio->frame_format = 0;
	audio->used_mode = 7; /* Bit Rate 12.2 kbps MR122 */

	encid = audpreproc_aenc_alloc(audio->enc_type, &audio->module_name,
			&audio->queue_ids);
	if (encid < 0) {
		MM_ERR("No free encoder available\n");
		rc = -ENODEV;
		goto aenc_alloc_error;
	}
	audio->enc_id = encid;

	rc = msm_adsp_get(audio->module_name, &audio->audrec,
			   &audrec_amrnb_adsp_ops, audio);

	if (rc) {
		audpreproc_aenc_free(audio->enc_id);
		goto aenc_alloc_error;
	}

	audio->stopped = 0;
	audio->source = 0;

	audamrnb_in_flush(audio);

	audio->device_events = AUDDEV_EVT_DEV_RDY | AUDDEV_EVT_DEV_RLS |
				AUDDEV_EVT_VOICE_STATE_CHG;

	audio->voice_state = msm_get_voice_state();
	rc = auddev_register_evt_listner(audio->device_events,
					AUDDEV_CLNT_ENC, audio->enc_id,
					amrnb_in_listener, (void *) audio);
	if (rc) {
		MM_ERR("failed to register device event listener\n");
		goto evt_error;
	}
	audio->build_id = socinfo_get_build_id();
	MM_DBG("Modem build id = %s\n", audio->build_id);

	file->private_data = audio;
	audio->opened = 1;
	mutex_unlock(&audio->lock);
	return rc;
evt_error:
	msm_adsp_put(audio->audrec);
	audpreproc_aenc_free(audio->enc_id);
	ion_unmap_kernel(client, audio->buff_handle);
aenc_alloc_error:
buff_map_error:
buff_get_phys_error:
buff_get_flags_error:
	ion_free(client, audio->buff_handle);
buff_alloc_error:
	ion_client_destroy(client);
client_create_error:
done:
	mutex_unlock(&audio->lock);
	return rc;
}
static int smcmod_send_dec_cmd(struct smcmod_decrypt_req *reqp)
{
	struct ion_client *ion_clientp;
	struct ion_handle *ion_handlep = NULL;
	int ion_fd;
	int ret;
	u32 pa;
	size_t size;
	struct {
		u32 args[4];
	} req;
	struct {
		u32 args[3];
	} rsp;

	ion_clientp = msm_ion_client_create(UINT_MAX, "smcmod");
	if (IS_ERR_OR_NULL(ion_clientp))
		return PTR_ERR(ion_clientp);

	switch (reqp->operation) {
	case SMCMOD_DECRYPT_REQ_OP_METADATA: {
		ion_fd = reqp->request.metadata.ion_fd;
		ret = smcmod_ion_fd_to_phys(ion_fd, ion_clientp,
					    &ion_handlep, &pa, &size);
		if (ret)
			goto error;

		req.args[0] = reqp->request.metadata.len;
		req.args[1] = pa;
		break;
	}
	case SMCMOD_DECRYPT_REQ_OP_IMG_FRAG: {
		ion_fd = reqp->request.img_frag.ion_fd;
		ret = smcmod_ion_fd_to_phys(ion_fd, ion_clientp,
					    &ion_handlep, &pa, &size);
		if (ret)
			goto error;

		req.args[0] = reqp->request.img_frag.ctx_id;
		req.args[1] = reqp->request.img_frag.last_frag;
		req.args[2] = reqp->request.img_frag.frag_len;
		req.args[3] = pa + reqp->request.img_frag.offset;
		break;
	}
	default:
		ret = -EINVAL;
		goto error;
	}

	/*
	 * scm_call does cache maintenance over request and response buffers.
	 * The userspace must flush/invalidate ion input/output buffers itself.
	 */

	ret = scm_call(reqp->service_id, reqp->command_id,
		       &req, sizeof(req), &rsp, sizeof(rsp));
	if (ret)
		goto error;

	switch (reqp->operation) {
	case SMCMOD_DECRYPT_REQ_OP_METADATA:
		reqp->response.metadata.status = rsp.args[0];
		reqp->response.metadata.ctx_id = rsp.args[1];
		reqp->response.metadata.end_offset = rsp.args[2] - pa;
		break;
	case SMCMOD_DECRYPT_REQ_OP_IMG_FRAG: {
		reqp->response.img_frag.status = rsp.args[0];
		break;
	}
	default:
		break;
	}

error:
	if (!IS_ERR_OR_NULL(ion_clientp)) {
		if (!IS_ERR_OR_NULL(ion_handlep))
			ion_free(ion_clientp, ion_handlep);
		ion_client_destroy(ion_clientp);
	}
	return ret;
}
void BpMemoryHeap::assertReallyMapped() const
{
    if (mHeapId == -1) {

        // remote call without mLock held, worse case scenario, we end up
        // calling transact() from multiple threads, but that's not a problem,
        // only mmap below must be in the critical section.

        Parcel data, reply;
        data.writeInterfaceToken(IMemoryHeap::getInterfaceDescriptor());
        status_t err = remote()->transact(HEAP_ID, data, &reply);
        int parcel_fd = reply.readFileDescriptor();
        ssize_t size = reply.readInt32();
        uint32_t flags = reply.readInt32();
        uint32_t offset = reply.readInt32();

        ALOGE_IF(err, "binder=%p transaction failed fd=%d, size=%ld, err=%d (%s)",
                asBinder().get(), parcel_fd, size, err, strerror(-err));

#ifdef USE_V4L2_ION
        int ion_client = -1;
        if (flags & USE_ION_FD) {
            ion_client = ion_client_create();
            ALOGE_IF(ion_client < 0, "BpMemoryHeap : ion client creation error");
        }
#endif

        int fd = dup( parcel_fd );
        ALOGE_IF(fd==-1, "cannot dup fd=%d, size=%ld, err=%d (%s)",
                parcel_fd, size, err, strerror(errno));

        int access = PROT_READ;
        if (!(flags & READ_ONLY)) {
            access |= PROT_WRITE;
        }

        Mutex::Autolock _l(mLock);
        if (mHeapId == -1) {
            mRealHeap = true;

#ifdef USE_V4L2_ION
        if (flags & USE_ION_FD) {
            if (ion_client < 0)
                mBase = MAP_FAILED;
            else
                mBase = ion_map(fd, size, offset);
            } else
#endif
                mBase = mmap(0, size, access, MAP_SHARED, fd, offset);
            if (mBase == MAP_FAILED) {
                ALOGE("cannot map BpMemoryHeap (binder=%p), size=%ld, fd=%d (%s)",
                        asBinder().get(), size, fd, strerror(errno));
                close(fd);
            } else {
                mSize = size;
                mFlags = flags;
                mOffset = offset;
                android_atomic_write(fd, &mHeapId);
            }
        }
#ifdef USE_V4L2_ION
        if (ion_client < 0)
            ion_client = -1;
        else
            ion_client_destroy(ion_client);
#endif
    }
}
static int smcmod_send_cipher_cmd(struct smcmod_cipher_req *reqp)
{
	int ret = 0;
	struct smcmod_cipher_scm_req scm_req;
	struct ion_client *ion_clientp = NULL;
	struct ion_handle *ion_key_handlep = NULL;
	struct ion_handle *ion_plain_handlep = NULL;
	struct ion_handle *ion_cipher_handlep = NULL;
	struct ion_handle *ion_iv_handlep = NULL;
	size_t size = 0;

	if (IS_ERR_OR_NULL(reqp))
		return -EINVAL;

	/* sanity check the fds */
	if ((reqp->ion_plain_text_fd < 0) ||
		(reqp->ion_cipher_text_fd < 0) ||
		(reqp->ion_init_vector_fd < 0))
		return -EINVAL;

	/* create an ion client */
	ion_clientp = msm_ion_client_create(UINT_MAX, "smcmod");

	/* check for errors */
	if (IS_ERR_OR_NULL(ion_clientp))
		return -EINVAL;

	/* fill in the scm request structure */
	scm_req.algorithm = reqp->algorithm;
	scm_req.operation = reqp->operation;
	scm_req.mode = reqp->mode;
	scm_req.key_phys_addr = 0;
	scm_req.key_size = reqp->key_size;
	scm_req.plain_text_size = reqp->plain_text_size;
	scm_req.cipher_text_size = reqp->cipher_text_size;
	scm_req.init_vector_size = reqp->init_vector_size;

	if (!reqp->key_is_null) {
		/* import the key buffer and get the physical address */
		ret = smcmod_ion_fd_to_phys(reqp->ion_key_fd, ion_clientp,
			&ion_key_handlep, &scm_req.key_phys_addr, &size);
		if (ret < 0)
			goto buf_cleanup;

		/* ensure that the key size is not
		 * greater than the size of the buffer.
		 */
		if (reqp->key_size > size) {
			ret = -EINVAL;
			goto buf_cleanup;
		}
	}

	/* import the plain text buffer and get the physical address */
	ret = smcmod_ion_fd_to_phys(reqp->ion_plain_text_fd, ion_clientp,
		&ion_plain_handlep, &scm_req.plain_text_phys_addr, &size);

	if (ret < 0)
		goto buf_cleanup;

	/* ensure that the plain text size is not
	 * greater than the size of the buffer.
	 */
	if (reqp->plain_text_size > size) {
		ret = -EINVAL;
		goto buf_cleanup;
	}

	/* import the cipher text buffer and get the physical address */
	ret = smcmod_ion_fd_to_phys(reqp->ion_cipher_text_fd, ion_clientp,
		&ion_cipher_handlep, &scm_req.cipher_text_phys_addr, &size);
	if (ret < 0)
		goto buf_cleanup;

	/* ensure that the cipher text size is not
	 * greater than the size of the buffer.
	 */
	if (reqp->cipher_text_size > size) {
		ret = -EINVAL;
		goto buf_cleanup;
	}

	/* import the init vector buffer and get the physical address */
	ret = smcmod_ion_fd_to_phys(reqp->ion_init_vector_fd, ion_clientp,
		&ion_iv_handlep, &scm_req.init_vector_phys_addr, &size);
	if (ret < 0)
		goto buf_cleanup;

	/* ensure that the init vector size is not
	 * greater than the size of the buffer.
	 */
	if (reqp->init_vector_size > size) {
		ret = -EINVAL;
		goto buf_cleanup;
	}

	/* Only the scm_req structure will be flushed by scm_call,
	 * so we must flush the cache for the input ion buffers here.
	 */
	msm_ion_do_cache_op(ion_clientp, ion_key_handlep, NULL,
		scm_req.key_size, ION_IOC_CLEAN_CACHES);
	msm_ion_do_cache_op(ion_clientp, ion_iv_handlep, NULL,
		scm_req.init_vector_size, ION_IOC_CLEAN_CACHES);

	/* For decrypt, cipher text is input, otherwise it's plain text. */
	if (reqp->operation)
		msm_ion_do_cache_op(ion_clientp, ion_cipher_handlep, NULL,
			scm_req.cipher_text_size, ION_IOC_CLEAN_CACHES);
	else
		msm_ion_do_cache_op(ion_clientp, ion_plain_handlep, NULL,
			scm_req.plain_text_size, ION_IOC_CLEAN_CACHES);

	/* call scm function to switch to secure world */
	reqp->return_val = scm_call(SMCMOD_SVC_CRYPTO,
		SMCMOD_CRYPTO_CMD_CIPHER, &scm_req,
		sizeof(scm_req), NULL, 0);

	/* Invalidate the output buffer, since it's not done by scm_call */

	/* for decrypt, plain text is the output, otherwise it's cipher text */
	if (reqp->operation)
		msm_ion_do_cache_op(ion_clientp, ion_plain_handlep, NULL,
			scm_req.plain_text_size, ION_IOC_INV_CACHES);
	else
		msm_ion_do_cache_op(ion_clientp, ion_cipher_handlep, NULL,
			scm_req.cipher_text_size, ION_IOC_INV_CACHES);

buf_cleanup:
	/* if the client and handles are valid, free them */
	if (!IS_ERR_OR_NULL(ion_clientp)) {
		if (!IS_ERR_OR_NULL(ion_key_handlep))
			ion_free(ion_clientp, ion_key_handlep);

		if (!IS_ERR_OR_NULL(ion_plain_handlep))
			ion_free(ion_clientp, ion_plain_handlep);

		if (!IS_ERR_OR_NULL(ion_cipher_handlep))
			ion_free(ion_clientp, ion_cipher_handlep);

		if (!IS_ERR_OR_NULL(ion_iv_handlep))
			ion_free(ion_clientp, ion_iv_handlep);

		ion_client_destroy(ion_clientp);
	}

	return ret;
}
static int exynos_secure_mem_enable(struct kbase_device *kbdev, int ion_fd, u64 flags, struct kbase_va_region *reg)
{
	/* enable secure world mode : TZASC */
	int ret = 0;

	if (!kbdev)
		goto secure_out;

	if (!kbdev->secure_mode_support) {
		GPU_LOG(DVFS_ERROR, LSI_GPU_SECURE, 0u, 0u, "%s: wrong operation! DDK cannot support Secure Rendering\n", __func__);
		ret = -EINVAL;
		goto secure_out;
	}

	if (!reg) {
		GPU_LOG(DVFS_ERROR, LSI_GPU_SECURE, 0u, 0u, "%s: wrong input argument, reg %p\n",
			__func__, reg);
		goto secure_out;
	}
#if defined(CONFIG_ION) && defined(CONFIG_EXYNOS_CONTENT_PATH_PROTECTION)
#if MALI_SEC_ASP_SECURE_BUF_CTRL
	{
		struct ion_client *client;
		struct ion_handle *ion_handle;
		size_t len = 0;
		ion_phys_addr_t phys = 0;

		flush_all_cpu_caches();

		if ((flags & kbdev->sec_sr_info.secure_flags_crc_asp) == kbdev->sec_sr_info.secure_flags_crc_asp) {
			reg->flags |= KBASE_REG_SECURE_CRC | KBASE_REG_SECURE;
		} else {

			client = ion_client_create(ion_exynos, "G3D");
			if (IS_ERR(client)) {
				GPU_LOG(DVFS_ERROR, LSI_GPU_SECURE, 0u, 0u, "%s: Failed to get ion_client of G3D\n",
						__func__);
				goto secure_out;
			}

			ion_handle = ion_import_dma_buf(client, ion_fd);

			if (IS_ERR(ion_handle)) {
				GPU_LOG(DVFS_ERROR, LSI_GPU_SECURE, 0u, 0u, "%s: Failed to get ion_handle of G3D\n",
						__func__);
				ion_client_destroy(client);
				goto secure_out;
			}

			if (ion_phys(client, ion_handle, &phys, &len)) {
				GPU_LOG(DVFS_ERROR, LSI_GPU_SECURE, 0u, 0u, "%s: Failed to get phys. addr of G3D\n",
						__func__);
				ion_free(client, ion_handle);
				ion_client_destroy(client);
				goto secure_out;
			}

			ion_free(client, ion_handle);
			ion_client_destroy(client);

			ret = exynos_smc(SMC_DRM_SECBUF_CFW_PROT, phys, len, PROT_G3D);
			if (ret != DRMDRV_OK) {
				GPU_LOG(DVFS_ERROR, LSI_GPU_SECURE, 0u, 0u, "%s: failed to set secure buffer region of G3D buffer, phy 0x%08x, error 0x%x\n",
					__func__, (unsigned int)phys, ret);
				BUG();
			}

			reg->flags |= KBASE_REG_SECURE;
		}

		reg->phys_by_ion = phys;
		reg->len_by_ion = len;
	}
#else
	reg->flags |= KBASE_REG_SECURE;

	reg->phys_by_ion = 0;
	reg->len_by_ion = 0;
#endif
#else
	GPU_LOG(DVFS_ERROR, LSI_GPU_SECURE, 0u, 0u, "%s: wrong operation! DDK cannot support Secure Rendering\n", __func__);
	ret = -EINVAL;
#endif // defined(CONFIG_ION) && defined(CONFIG_EXYNOS_CONTENT_PATH_PROTECTION)

	return ret;
secure_out:
	ret = -EINVAL;
	return ret;
}
static int smcmod_send_msg_digest_cmd(struct smcmod_msg_digest_req *reqp)
{
	int ret = 0;
	struct smcmod_msg_digest_scm_req scm_req;
	struct ion_client *ion_clientp = NULL;
	struct ion_handle *ion_key_handlep = NULL;
	struct ion_handle *ion_input_handlep = NULL;
	struct ion_handle *ion_output_handlep = NULL;
	size_t size = 0;

	if (IS_ERR_OR_NULL(reqp))
		return -EINVAL;

	/* sanity check the fds */
	if ((reqp->ion_input_fd < 0) || (reqp->ion_output_fd < 0))
		return -EINVAL;

	/* create an ion client */
	ion_clientp = msm_ion_client_create(UINT_MAX, "smcmod");

	/* check for errors */
	if (IS_ERR_OR_NULL(ion_clientp))
		return -EINVAL;

	/* fill in the scm request structure */
	scm_req.algorithm = reqp->algorithm;
	scm_req.key_phys_addr = 0;
	scm_req.key_size = reqp->key_size;
	scm_req.input_size = reqp->input_size;
	scm_req.output_size = reqp->output_size;
	scm_req.verify = 0;

	if (!reqp->key_is_null) {
		/* import the key buffer and get the physical address */
		ret = smcmod_ion_fd_to_phys(reqp->ion_key_fd, ion_clientp,
			&ion_key_handlep, &scm_req.key_phys_addr, &size);
		if (ret < 0)
			goto buf_cleanup;

		/* ensure that the key size is not
		 * greater than the size of the buffer.
		 */
		if (reqp->key_size > size) {
			ret = -EINVAL;
			goto buf_cleanup;
		}
	}

	/* import the input buffer and get the physical address */
	ret = smcmod_ion_fd_to_phys(reqp->ion_input_fd, ion_clientp,
		&ion_input_handlep, &scm_req.input_phys_addr, &size);
	if (ret < 0)
		goto buf_cleanup;

	/* ensure that the input size is not
	 * greater than the size of the buffer.
	 */
	if (reqp->input_size > size) {
		ret = -EINVAL;
		goto buf_cleanup;
	}

	/* import the output buffer and get the physical address */
	ret = smcmod_ion_fd_to_phys(reqp->ion_output_fd, ion_clientp,
		&ion_output_handlep, &scm_req.output_phys_addr, &size);
	if (ret < 0)
		goto buf_cleanup;

	/* ensure that the output size is not
	 * greater than the size of the buffer.
	 */
	if (reqp->output_size > size) {
		ret = -EINVAL;
		goto buf_cleanup;
	}

	/* Only the scm_req structure will be flushed by scm_call,
	 * so we must flush the cache for the input ion buffers here.
	 */
	msm_ion_do_cache_op(ion_clientp, ion_key_handlep, NULL,
		scm_req.key_size, ION_IOC_CLEAN_CACHES);
	msm_ion_do_cache_op(ion_clientp, ion_input_handlep, NULL,
		scm_req.input_size, ION_IOC_CLEAN_CACHES);

	/* call scm function to switch to secure world */
	if (reqp->fixed_block)
		reqp->return_val = scm_call(SMCMOD_SVC_CRYPTO,
			SMCMOD_CRYPTO_CMD_MSG_DIGEST_FIXED,
			&scm_req,
			sizeof(scm_req),
			NULL, 0);
	else
		reqp->return_val = scm_call(SMCMOD_SVC_CRYPTO,
			SMCMOD_CRYPTO_CMD_MSG_DIGEST,
			&scm_req,
			sizeof(scm_req),
			NULL, 0);

	/* Invalidate the output buffer, since it's not done by scm_call */
	msm_ion_do_cache_op(ion_clientp, ion_output_handlep, NULL,
		scm_req.output_size, ION_IOC_INV_CACHES);

buf_cleanup:
	/* if the client and handles are valid, free them */
	if (!IS_ERR_OR_NULL(ion_clientp)) {
		if (!IS_ERR_OR_NULL(ion_key_handlep))
			ion_free(ion_clientp, ion_key_handlep);

		if (!IS_ERR_OR_NULL(ion_input_handlep))
			ion_free(ion_clientp, ion_input_handlep);

		if (!IS_ERR_OR_NULL(ion_output_handlep))
			ion_free(ion_clientp, ion_output_handlep);

		ion_client_destroy(ion_clientp);
	}

	return ret;
}
static inline void free_ion_client(struct msm_ion_test *ion_test)
{
	ion_client_destroy(ion_test->ion_client);
}