static int32_t msm_mem_allocate(struct videobuf2_contig_pmem *mem) { int32_t phyaddr; #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION int rc, len; mem->client = msm_ion_client_create(-1, "camera"); if (IS_ERR((void *)mem->client)) { pr_err("%s Could not create client\n", __func__); goto client_failed; } mem->ion_handle = ion_alloc(mem->client, mem->size, SZ_4K, (0x1 << ION_CP_MM_HEAP_ID | 0x1 << ION_IOMMU_HEAP_ID)); if (IS_ERR((void *)mem->ion_handle)) { pr_err("%s Could not allocate\n", __func__); goto alloc_failed; } rc = ion_phys(mem->client, mem->ion_handle, (ion_phys_addr_t *)&phyaddr, (size_t *)&len); if (rc < 0) { pr_err("%s Could not get physical address\n", __func__); goto phys_failed; } #else phyaddr = allocate_contiguous_ebi_nomap(mem->size, SZ_4K); #endif return phyaddr; #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION phys_failed: ion_free(mem->client, mem->ion_handle); alloc_failed: ion_client_destroy(mem->client); client_failed: return 0; #endif }
static u32 mdss_mdp_res_init(struct mdss_data_type *mdata) { u32 rc = 0; if (mdata->res_init) { pr_err("mdss resources already initialized\n"); return -EPERM; } mdata->res_init = true; mdata->clk_ena = false; mdata->irq_mask = MDSS_MDP_DEFAULT_INTR_MASK; mdata->irq_ena = false; rc = mdss_mdp_irq_clk_setup(mdata); if (rc) return rc; mdata->iclient = msm_ion_client_create(-1, mdata->pdev->name); if (IS_ERR_OR_NULL(mdata->iclient)) { pr_err("msm_ion_client_create() return error (%p)\n", mdata->iclient); mdata->iclient = NULL; } rc = mdss_iommu_init(mdata); init_completion(&mdata->iommu_attach_done); return rc; }
static void *ion_new_client(void) { struct ion_client *client = NULL; client = msm_ion_client_create(-1, "video_client"); if (!client) pr_err("Failed to create smem client\n"); return client; };
static void *htc_ion_new_client_alloc(void) { struct ion_client *client = NULL; client = msm_ion_client_create("vcodec_alloc"); if (!client) dprintk(VIDC_ERR, "Failed to create smem client\n"); return client; };
static struct ion_client *res_trk_create_ion_client(void){ struct ion_client *video_client; video_client = msm_ion_client_create(-1, "video_client"); if (IS_ERR_OR_NULL(video_client)) { VCDRES_MSG_ERROR("%s: Unable to create ION client\n", __func__); video_client = NULL; } return video_client; }
static u32 mdss_mdp_res_init(struct mdss_data_type *mdata) { u32 rc = 0; rc = mdss_mdp_irq_clk_setup(mdata); if (rc) return rc; mdata->clk_ctrl_wq = create_singlethread_workqueue("mdp_clk_wq"); INIT_DELAYED_WORK(&mdata->clk_ctrl_worker, mdss_mdp_clk_ctrl_workqueue_handler); mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false); mdata->rev = MDSS_MDP_REG_READ(MDSS_REG_HW_VERSION); mdata->mdp_rev = MDSS_MDP_REG_READ(MDSS_MDP_REG_HW_VERSION); mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false); mdata->smp_mb_cnt = MDSS_MDP_SMP_MMB_BLOCKS; mdata->smp_mb_size = MDSS_MDP_SMP_MMB_SIZE; mdata->pipe_type_map = mdss_mdp_pipe_type_map; mdata->mixer_type_map = mdss_mdp_mixer_type_map; pr_info("mdss_revision=%x\n", mdata->rev); pr_info("mdp_hw_revision=%x\n", mdata->mdp_rev); mdata->res_init = true; mdata->timeout = HZ/20; mdata->clk_ena = false; mdata->irq_mask = MDSS_MDP_DEFAULT_INTR_MASK; mdata->suspend = false; mdata->prim_ptype = NO_PANEL; mdata->irq_ena = false; mdata->iclient = msm_ion_client_create(-1, mdata->pdev->name); if (IS_ERR_OR_NULL(mdata->iclient)) { pr_err("msm_ion_client_create() return error (%p)\n", mdata->iclient); mdata->iclient = NULL; } rc = mdss_iommu_init(); if (!IS_ERR_VALUE(rc)) mdss_iommu_attach(); rc = mdss_hw_init(mdata); return rc; }
static unsigned long msm_mem_allocate(struct videobuf2_contig_pmem *mem) { unsigned long phyaddr; #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION int rc, len; mem->client = msm_ion_client_create(-1, "camera"); if (IS_ERR((void *)mem->client)) { pr_err("%s Could not create client\n", __func__); goto client_failed; } mem->ion_handle = ion_alloc(mem->client, mem->size, SZ_4K, (0x1 << ION_CP_MM_HEAP_ID | 0x1 << ION_IOMMU_HEAP_ID)); if (IS_ERR((void *)mem->ion_handle)) { pr_err("%s Could not allocate\n", __func__); goto alloc_failed; } rc = ion_map_iommu(mem->client, mem->ion_handle, CAMERA_DOMAIN, GEN_POOL, SZ_4K, 0, (unsigned long *)&phyaddr, (unsigned long *)&len, UNCACHED, 0); if (rc < 0) { pr_err("%s Could not get physical address\n", __func__); goto phys_failed; } #else phyaddr = allocate_contiguous_ebi_nomap(mem->size, SZ_4K); #endif return phyaddr; #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION phys_failed: ion_free(mem->client, mem->ion_handle); alloc_failed: ion_client_destroy(mem->client); client_failed: return 0; #endif }
static int audamrnb_in_open(struct inode *inode, struct file *file) { struct audio_in *audio = &the_audio_amrnb_in; int rc; int encid; int len = 0; unsigned long ionflag = 0; ion_phys_addr_t addr = 0; struct ion_handle *handle = NULL; struct ion_client *client = NULL; mutex_lock(&audio->lock); if (audio->opened) { rc = -EBUSY; goto done; } client = msm_ion_client_create(UINT_MAX, "Audio_AMR_In_Client"); if (IS_ERR_OR_NULL(client)) { MM_ERR("Unable to create ION client\n"); rc = -ENOMEM; goto client_create_error; } audio->client = client; handle = ion_alloc(client, DMASZ, SZ_4K, ION_HEAP(ION_AUDIO_HEAP_ID),0); if (IS_ERR_OR_NULL(handle)) { MM_ERR("Unable to create allocate O/P buffers\n"); rc = -ENOMEM; goto buff_alloc_error; } audio->buff_handle = handle; rc = ion_phys(client, handle, &addr, &len); if (rc) { MM_ERR("O/P buffers:Invalid phy: %x sz: %x\n", (unsigned int) addr, (unsigned int) len); goto buff_get_phys_error; } else { MM_INFO("O/P buffers:valid phy: %x sz: %x\n", (unsigned int) addr, (unsigned int) len); } audio->phys = (int32_t)addr; rc = ion_handle_get_flags(client, handle, &ionflag); if (rc) { MM_ERR("could not get flags for the handle\n"); goto buff_get_flags_error; } audio->map_v_read = ion_map_kernel(client, handle); if (IS_ERR(audio->map_v_read)) { MM_ERR("could not map write buffers\n"); rc = -ENOMEM; goto buff_map_error; } audio->data = audio->map_v_read; MM_DBG("write buf: phy addr 0x%08x kernel addr 0x%08x\n", audio->phys, (int)audio->data); MM_DBG("Memory addr = 0x%8x phy addr = 0x%8x\n",\ (int) audio->data, (int) audio->phys); if ((file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) { rc = -EACCES; MM_ERR("Non tunnel encoding is not supported\n"); goto buff_map_error; } else if (!(file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) { audio->mode = MSM_AUD_ENC_MODE_TUNNEL; MM_DBG("Opened for tunnel mode encoding\n"); } else { rc = -EACCES; goto buff_map_error; } /* Settings will be re-config at AUDIO_SET_CONFIG, * but at least we need to have initial config */ audio->buffer_size = (FRAME_SIZE - 8); audio->enc_type = ENC_TYPE_AMRNB | audio->mode; audio->dtx_mode = -1; audio->frame_format = 0; audio->used_mode = 7; /* Bit Rate 12.2 kbps MR122 */ encid = audpreproc_aenc_alloc(audio->enc_type, &audio->module_name, &audio->queue_ids); if (encid < 0) { MM_ERR("No free encoder available\n"); rc = -ENODEV; goto aenc_alloc_error; } audio->enc_id = encid; rc = msm_adsp_get(audio->module_name, &audio->audrec, &audrec_amrnb_adsp_ops, audio); if (rc) { audpreproc_aenc_free(audio->enc_id); goto aenc_alloc_error; } audio->stopped = 0; audio->source = 0; audamrnb_in_flush(audio); audio->device_events = AUDDEV_EVT_DEV_RDY | AUDDEV_EVT_DEV_RLS | AUDDEV_EVT_VOICE_STATE_CHG; audio->voice_state = msm_get_voice_state(); rc = auddev_register_evt_listner(audio->device_events, AUDDEV_CLNT_ENC, audio->enc_id, amrnb_in_listener, (void *) audio); if (rc) { MM_ERR("failed to register device event listener\n"); goto evt_error; } audio->build_id = socinfo_get_build_id(); MM_DBG("Modem build id = %s\n", audio->build_id); file->private_data = audio; audio->opened = 1; mutex_unlock(&audio->lock); return rc; evt_error: msm_adsp_put(audio->audrec); audpreproc_aenc_free(audio->enc_id); ion_unmap_kernel(client, audio->buff_handle); aenc_alloc_error: buff_map_error: buff_get_phys_error: buff_get_flags_error: ion_free(client, audio->buff_handle); buff_alloc_error: ion_client_destroy(client); client_create_error: done: mutex_unlock(&audio->lock); return rc; }
static struct ion_client *msm_gemini_ion_client_create(unsigned int heap_mask, const char *name) { return msm_ion_client_create(name); }
static int audpcm_in_open(struct inode *inode, struct file *file) { struct audio_in *audio = &the_audio_in; int rc; int len = 0; unsigned long ionflag = 0; ion_phys_addr_t addr = 0; struct ion_handle *handle = NULL; struct ion_client *client = NULL; int encid; mutex_lock(&audio->lock); if (audio->opened) { rc = -EBUSY; goto done; } /* Settings will be re-config at AUDIO_SET_CONFIG, * but at least we need to have initial config */ audio->mode = MSM_AUD_ENC_MODE_TUNNEL; audio->samp_rate = RPC_AUD_DEF_SAMPLE_RATE_11025; audio->samp_rate_index = AUDREC_CMD_SAMP_RATE_INDX_11025; audio->channel_mode = AUDREC_CMD_STEREO_MODE_MONO; audio->buffer_size = MONO_DATA_SIZE; audio->enc_type = AUDREC_CMD_TYPE_0_INDEX_WAV | audio->mode; rc = audmgr_open(&audio->audmgr); if (rc) goto done; encid = audpreproc_aenc_alloc(audio->enc_type, &audio->module_name, &audio->queue_ids); if (encid < 0) { MM_ERR("No free encoder available\n"); rc = -ENODEV; goto done; } audio->enc_id = encid; rc = msm_adsp_get(audio->module_name, &audio->audrec, &audrec_adsp_ops, audio); if (rc) { audpreproc_aenc_free(audio->enc_id); goto done; } rc = msm_adsp_get("AUDPREPROCTASK", &audio->audpre, &audpre_adsp_ops, audio); if (rc) { msm_adsp_put(audio->audrec); audpreproc_aenc_free(audio->enc_id); goto done; } audio->dsp_cnt = 0; audio->stopped = 0; audpcm_in_flush(audio); client = msm_ion_client_create(UINT_MAX, "Audio_PCM_in_client"); if (IS_ERR_OR_NULL(client)) { MM_ERR("Unable to create ION client\n"); rc = -ENOMEM; goto client_create_error; } audio->client = client; MM_DBG("allocating mem sz = %d\n", DMASZ); handle = ion_alloc(client, DMASZ, SZ_4K, ION_HEAP(ION_AUDIO_HEAP_ID), 0); if (IS_ERR_OR_NULL(handle)) { MM_ERR("Unable to create allocate O/P buffers\n"); rc = -ENOMEM; goto output_buff_alloc_error; } audio->output_buff_handle = handle; rc = ion_phys(client , handle, &addr, &len); if (rc) { MM_ERR("O/P buffers:Invalid phy: %x sz: %x\n", (unsigned int) addr, (unsigned int) len); rc = -ENOMEM; goto output_buff_get_phys_error; } else { MM_INFO("O/P buffers:valid phy: %x sz: %x\n", (unsigned int) addr, (unsigned int) len); } audio->phys = (int32_t)addr; rc = ion_handle_get_flags(client, handle, &ionflag); if (rc) { MM_ERR("could not get flags for the handle\n"); rc = -ENOMEM; goto output_buff_get_flags_error; } audio->data = ion_map_kernel(client, handle); if (IS_ERR(audio->data)) { MM_ERR("could not map read buffers,freeing instance 0x%08x\n", (int)audio); rc = -ENOMEM; goto output_buff_map_error; } MM_DBG("read buf: phy addr 0x%08x kernel addr 0x%08x\n", audio->phys, (int)audio->data); file->private_data = audio; audio->opened = 1; rc = 0; done: mutex_unlock(&audio->lock); return rc; output_buff_map_error: output_buff_get_phys_error: output_buff_get_flags_error: ion_free(client, audio->output_buff_handle); output_buff_alloc_error: ion_client_destroy(client); client_create_error: msm_adsp_put(audio->audrec); msm_adsp_put(audio->audpre); audpreproc_aenc_free(audio->enc_id); mutex_unlock(&audio->lock); return rc; }
int msm_gemini_platform_init(struct platform_device *pdev, struct resource **mem, void **base, int *irq, irqreturn_t (*handler) (int, void *), void *context) { int rc = -1; int gemini_irq; struct resource *gemini_mem, *gemini_io, *gemini_irq_res; void *gemini_base; gemini_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!gemini_mem) { GMN_PR_ERR("%s: no mem resource?\n", __func__); return -ENODEV; } gemini_irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!gemini_irq_res) { GMN_PR_ERR("no irq resource?\n"); return -ENODEV; } gemini_irq = gemini_irq_res->start; gemini_io = request_mem_region(gemini_mem->start, resource_size(gemini_mem), pdev->name); if (!gemini_io) { GMN_PR_ERR("%s: region already claimed\n", __func__); return -EBUSY; } gemini_base = ioremap(gemini_mem->start, resource_size(gemini_mem)); if (!gemini_base) { rc = -ENOMEM; GMN_PR_ERR("%s: ioremap failed\n", __func__); goto fail1; } rc = msm_camio_jpeg_clk_enable(); if (rc) { GMN_PR_ERR("%s: clk failed rc = %d\n", __func__, rc); goto fail2; } msm_gemini_hw_init(gemini_base, resource_size(gemini_mem)); rc = request_irq(gemini_irq, handler, IRQF_TRIGGER_RISING, "gemini", context); if (rc) { GMN_PR_ERR("%s: request_irq failed, %d\n", __func__, gemini_irq); goto fail3; } *mem = gemini_mem; *base = gemini_base; *irq = gemini_irq; #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION gemini_client = msm_ion_client_create(-1, "camera/gemini"); #endif GMN_DBG("%s:%d] success\n", __func__, __LINE__); return rc; fail3: msm_camio_jpeg_clk_disable(); fail2: iounmap(gemini_base); fail1: release_mem_region(gemini_mem->start, resource_size(gemini_mem)); GMN_DBG("%s:%d] fail\n", __func__, __LINE__); return rc; }
int msm_gemini_platform_init(struct platform_device *pdev, struct resource **mem, void **base, int *irq, irqreturn_t (*handler) (int, void *), void *context) { int rc = -1; int gemini_irq; struct resource *gemini_mem, *gemini_io, *gemini_irq_res; void *gemini_base; struct msm_gemini_device *pgmn_dev = (struct msm_gemini_device *) context; gemini_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!gemini_mem) { GMN_PR_ERR("%s: no mem resource?\n", __func__); return -ENODEV; } gemini_irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!gemini_irq_res) { GMN_PR_ERR("no irq resource?\n"); return -ENODEV; } gemini_irq = gemini_irq_res->start; gemini_io = request_mem_region(gemini_mem->start, resource_size(gemini_mem), pdev->name); if (!gemini_io) { GMN_PR_ERR("%s: region already claimed\n", __func__); return -EBUSY; } gemini_base = ioremap(gemini_mem->start, resource_size(gemini_mem)); if (!gemini_base) { rc = -ENOMEM; GMN_PR_ERR("%s: ioremap failed\n", __func__); goto fail1; } pgmn_dev->hw_version = GEMINI_8X60; rc = msm_cam_clk_enable(&pgmn_dev->pdev->dev, gemini_8x_clk_info, pgmn_dev->gemini_clk, ARRAY_SIZE(gemini_8x_clk_info), 1); if (rc < 0) { pgmn_dev->hw_version = GEMINI_7X; rc = msm_cam_clk_enable(&pgmn_dev->pdev->dev, gemini_7x_clk_info, pgmn_dev->gemini_clk, ARRAY_SIZE(gemini_7x_clk_info), 1); if (rc < 0) { GMN_PR_ERR("%s: clk failed rc = %d\n", __func__, rc); goto fail2; } } else { rc = msm_cam_clk_enable(&pgmn_dev->pdev->dev, gemini_imem_clk_info, &pgmn_dev->gemini_clk[2], ARRAY_SIZE(gemini_imem_clk_info), 1); if (!rc) pgmn_dev->hw_version = GEMINI_8960; } if (pgmn_dev->hw_version != GEMINI_7X) { if (pgmn_dev->gemini_fs == NULL) { pgmn_dev->gemini_fs = regulator_get(&pgmn_dev->pdev->dev, "vdd"); if (IS_ERR(pgmn_dev->gemini_fs)) { pr_err("%s: Regulator FS_ijpeg get failed %ld\n", __func__, PTR_ERR(pgmn_dev->gemini_fs)); pgmn_dev->gemini_fs = NULL; goto gemini_fs_failed; } else if (regulator_enable(pgmn_dev->gemini_fs)) { pr_err("%s: Regulator FS_ijpeg enable failed\n", __func__); regulator_put(pgmn_dev->gemini_fs); pgmn_dev->gemini_fs = NULL; goto gemini_fs_failed; } } } msm_gemini_hw_init(gemini_base, resource_size(gemini_mem)); rc = request_irq(gemini_irq, handler, IRQF_TRIGGER_RISING, "gemini", context); if (rc) { GMN_PR_ERR("%s: request_irq failed, %d\n", __func__, gemini_irq); goto fail3; } *mem = gemini_mem; *base = gemini_base; *irq = gemini_irq; #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION gemini_client = msm_ion_client_create(-1, "camera/gemini"); #endif GMN_DBG("%s:%d] success\n", __func__, __LINE__); return rc; fail3: if (pgmn_dev->hw_version != GEMINI_7X) { regulator_disable(pgmn_dev->gemini_fs); regulator_put(pgmn_dev->gemini_fs); pgmn_dev->gemini_fs = NULL; } gemini_fs_failed: if (pgmn_dev->hw_version == GEMINI_8960) msm_cam_clk_enable(&pgmn_dev->pdev->dev, gemini_imem_clk_info, &pgmn_dev->gemini_clk[2], ARRAY_SIZE(gemini_imem_clk_info), 0); if (pgmn_dev->hw_version != GEMINI_7X) msm_cam_clk_enable(&pgmn_dev->pdev->dev, gemini_8x_clk_info, pgmn_dev->gemini_clk, ARRAY_SIZE(gemini_8x_clk_info), 0); else msm_cam_clk_enable(&pgmn_dev->pdev->dev, gemini_7x_clk_info, pgmn_dev->gemini_clk, ARRAY_SIZE(gemini_7x_clk_info), 0); fail2: iounmap(gemini_base); fail1: release_mem_region(gemini_mem->start, resource_size(gemini_mem)); GMN_DBG("%s:%d] fail\n", __func__, __LINE__); return rc; }
/* * msm_fd_open - Fd device open method. * @file: Pointer to file struct. */ static int msm_fd_open(struct file *file) { struct msm_fd_device *device = video_drvdata(file); struct video_device *video = video_devdata(file); struct fd_ctx *ctx; int ret; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; ctx->fd_device = device; /* Initialize work buffer handler */ ctx->work_buf.pool = NULL; ctx->work_buf.fd = -1; /* Set ctx defaults */ ctx->settings.speed = ctx->fd_device->clk_rates_num; ctx->settings.angle_index = MSM_FD_DEF_ANGLE_IDX; ctx->settings.direction_index = MSM_FD_DEF_DIR_IDX; ctx->settings.min_size_index = MSM_FD_DEF_MIN_SIZE_IDX; ctx->settings.threshold = MSM_FD_DEF_THRESHOLD; atomic_set(&ctx->subscribed_for_event, 0); v4l2_fh_init(&ctx->fh, video); file->private_data = &ctx->fh; v4l2_fh_add(&ctx->fh); ctx->vb2_q.drv_priv = ctx; ctx->vb2_q.mem_ops = &msm_fd_vb2_mem_ops; ctx->vb2_q.ops = &msm_fd_vb2_q_ops; ctx->vb2_q.buf_struct_size = sizeof(struct msm_fd_buffer); ctx->vb2_q.type = V4L2_BUF_TYPE_VIDEO_OUTPUT; ctx->vb2_q.io_modes = VB2_USERPTR; ctx->vb2_q.timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY; ret = vb2_queue_init(&ctx->vb2_q); if (ret < 0) { dev_err(device->dev, "Error queue init\n"); goto error_vb2_queue_init; } ctx->mem_pool.client = msm_ion_client_create(MSM_FD_DRV_NAME); if (IS_ERR_OR_NULL(ctx->mem_pool.client)) { dev_err(device->dev, "Error ion client create\n"); goto error_ion_client_create; } ctx->mem_pool.domain_num = ctx->fd_device->iommu_domain_num; ret = iommu_attach_device(ctx->fd_device->iommu_domain, ctx->fd_device->iommu_dev); if (ret) { dev_err(device->dev, "Can not attach iommu domain\n"); goto error_iommu_attach; } ctx->stats = vmalloc(sizeof(*ctx->stats) * MSM_FD_MAX_RESULT_BUFS); if (!ctx->stats) { dev_err(device->dev, "No memory for face statistics\n"); ret = -ENOMEM; goto error_stats_vmalloc; } return 0; error_stats_vmalloc: iommu_detach_device(ctx->fd_device->iommu_domain, ctx->fd_device->iommu_dev); error_iommu_attach: ion_client_destroy(ctx->mem_pool.client); error_ion_client_create: vb2_queue_release(&ctx->vb2_q); error_vb2_queue_init: v4l2_fh_del(&ctx->fh); v4l2_fh_exit(&ctx->fh); kfree(ctx); return ret; }
static struct ion_client *res_trk_create_ion_client(void){ struct ion_client *video_client; VCDRES_MSG_LOW("%s", __func__); video_client = msm_ion_client_create(-1, "video_client"); return video_client; }
static int audpcm_in_open(struct inode *inode, struct file *file) { struct audio_in *audio = &the_audio_in; int rc; int len = 0; unsigned long ionflag = 0; ion_phys_addr_t addr = 0; struct ion_handle *handle = NULL; struct ion_client *client = NULL; int encid; struct timespec ts; struct rtc_time tm; mutex_lock(&audio->lock); if (audio->opened) { rc = -EBUSY; goto done; } audio->mode = MSM_AUD_ENC_MODE_TUNNEL; audio->samp_rate = RPC_AUD_DEF_SAMPLE_RATE_11025; audio->samp_rate_index = AUDREC_CMD_SAMP_RATE_INDX_11025; audio->channel_mode = AUDREC_CMD_STEREO_MODE_MONO; audio->buffer_size = MONO_DATA_SIZE; audio->enc_type = AUDREC_CMD_TYPE_0_INDEX_WAV | audio->mode; rc = audmgr_open(&audio->audmgr); if (rc) goto done; encid = audpreproc_aenc_alloc(audio->enc_type, &audio->module_name, &audio->queue_ids); if (encid < 0) { MM_AUD_ERR("No free encoder available\n"); rc = -ENODEV; goto done; } audio->enc_id = encid; rc = msm_adsp_get(audio->module_name, &audio->audrec, &audrec_adsp_ops, audio); if (rc) { audpreproc_aenc_free(audio->enc_id); goto done; } audio->dsp_cnt = 0; audio->stopped = 0; audpcm_in_flush(audio); client = msm_ion_client_create(UINT_MAX, "Audio_PCM_in_client"); if (IS_ERR_OR_NULL(client)) { MM_ERR("Unable to create ION client\n"); rc = -ENOMEM; goto client_create_error; } audio->client = client; MM_DBG("allocating mem sz = %d\n", DMASZ); handle = ion_alloc(client, DMASZ, SZ_4K, ION_HEAP(ION_AUDIO_HEAP_ID)); if (IS_ERR_OR_NULL(handle)) { MM_ERR("Unable to create allocate O/P buffers\n"); rc = -ENOMEM; goto output_buff_alloc_error; } audio->output_buff_handle = handle; rc = ion_phys(client , handle, &addr, &len); if (rc) { MM_ERR("O/P buffers:Invalid phy: %x sz: %x\n", (unsigned int) addr, (unsigned int) len); rc = -ENOMEM; goto output_buff_get_phys_error; } else { MM_INFO("O/P buffers:valid phy: %x sz: %x\n", (unsigned int) addr, (unsigned int) len); } audio->phys = (int32_t)addr; rc = ion_handle_get_flags(client, handle, &ionflag); if (rc) { MM_ERR("could not get flags for the handle\n"); rc = -ENOMEM; goto output_buff_get_flags_error; } audio->data = ion_map_kernel(client, handle, ionflag); if (IS_ERR(audio->data)) { MM_ERR("could not map read buffers,freeing instance 0x%08x\n", (int)audio); rc = -ENOMEM; goto output_buff_map_error; } MM_DBG("read buf: phy addr 0x%08x kernel addr 0x%08x\n", audio->phys, (int)audio->data); file->private_data = audio; audio->opened = 1; rc = 0; done: mutex_unlock(&audio->lock); getnstimeofday(&ts); rtc_time_to_tm(ts.tv_sec, &tm); pr_aud_info1("[ATS][start_recording][successful] at %lld \ (%d-%02d-%02d %02d:%02d:%02d.%09lu UTC)\n", ktime_to_ns(ktime_get()), tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec, ts.tv_nsec); return rc; output_buff_map_error: output_buff_get_phys_error: output_buff_get_flags_error: ion_free(client, audio->output_buff_handle); output_buff_alloc_error: ion_client_destroy(client); client_create_error: msm_adsp_put(audio->audrec); audpreproc_aenc_free(audio->enc_id); mutex_unlock(&audio->lock); return rc; }
int msm_mercury_platform_init(struct platform_device *pdev, struct resource **mem, void **base, int *irq, irqreturn_t (*handler) (int, void *), void *context) { int rc = 0; int mercury_irq; struct resource *mercury_mem, *mercury_io, *mercury_irq_res; void *mercury_base; struct msm_mercury_device *pmercury_dev = (struct msm_mercury_device *) context; MCR_DBG("%s:%d]\n", __func__, __LINE__); mercury_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mercury_mem) { MCR_PR_ERR("%s: no mem resource?\n", __func__); return -ENODEV; } mercury_irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!mercury_irq_res) { MCR_PR_ERR("no irq resource?\n"); return -ENODEV; } mercury_irq = mercury_irq_res->start; mercury_io = request_mem_region(mercury_mem->start, resource_size(mercury_mem), pdev->name); if (!mercury_io) { MCR_PR_ERR("%s: region already claimed\n", __func__); return -EBUSY; } MCR_DBG("%s:%d]\n", __func__, __LINE__); mercury_base = ioremap(mercury_mem->start, resource_size(mercury_mem)); if (!mercury_base) { rc = -ENOMEM; MCR_PR_ERR("%s: ioremap failed\n", __func__); goto fail1; } MCR_DBG("%s:%d]\n", __func__, __LINE__); rc = msm_cam_clk_enable(&pmercury_dev->pdev->dev, mercury_jpegd_clk_info, pmercury_dev->mercury_clk, ARRAY_SIZE(mercury_jpegd_clk_info), 1); if (rc < 0) MCR_PR_ERR("%s:%d] rc = %d\n", __func__, __LINE__, rc); MCR_DBG("%s:%d]\n", __func__, __LINE__); msm_mercury_hw_init(mercury_base, resource_size(mercury_mem)); rc = request_irq(mercury_irq, handler, IRQF_TRIGGER_RISING, "mercury", context); if (rc) { MCR_PR_ERR("%s: request_irq failed, %d\n", __func__, mercury_irq); goto fail3; } MCR_DBG("%s:%d]\n", __func__, __LINE__); *mem = mercury_mem; *base = mercury_base; *irq = mercury_irq; MCR_DBG("%s:%d]\n", __func__, __LINE__); #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION mercury_client = msm_ion_client_create(-1, "camera/mercury"); #endif MCR_PR_ERR("%s:%d] success\n", __func__, __LINE__); return rc; fail3: MCR_DBG("%s:%d]\n", __func__, __LINE__); msm_cam_clk_enable(&pmercury_dev->pdev->dev, mercury_jpegd_clk_info, pmercury_dev->mercury_clk, ARRAY_SIZE(mercury_jpegd_clk_info), 0); MCR_DBG("%s:%d]\n", __func__, __LINE__); iounmap(mercury_base); fail1: MCR_DBG("%s:%d]\n", __func__, __LINE__); release_mem_region(mercury_mem->start, resource_size(mercury_mem)); MCR_DBG("%s:%d]\n", __func__, __LINE__); return rc; }
int msm_jpeg_platform_init(struct platform_device *pdev, struct resource **mem, void **base, int *irq, irqreturn_t (*handler) (int, void *), void *context) { int rc = -1; int jpeg_irq; struct resource *jpeg_mem, *vbif_mem, *jpeg_io, *jpeg_irq_res; void *jpeg_base; struct msm_jpeg_device *pgmn_dev = (struct msm_jpeg_device *) context; pgmn_dev->state = MSM_JPEG_IDLE; jpeg_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!jpeg_mem) { JPEG_PR_ERR("%s: jpeg no mem resource?\n", __func__); return -ENODEV; } vbif_mem = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (!vbif_mem) { JPEG_PR_ERR("%s: vbif no mem resource?\n", __func__); return -ENODEV; } jpeg_irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!jpeg_irq_res) { JPEG_PR_ERR("no irq resource?\n"); return -ENODEV; } jpeg_irq = jpeg_irq_res->start; JPEG_DBG("%s base address: 0x%x, jpeg irq number: %d\n", __func__, jpeg_mem->start, jpeg_irq); pgmn_dev->jpeg_bus_client = msm_bus_scale_register_client(&msm_jpeg_bus_client_pdata); if (!pgmn_dev->jpeg_bus_client) { JPEG_PR_ERR("%s: Registration Failed!\n", __func__); pgmn_dev->jpeg_bus_client = 0; return -EINVAL; } msm_bus_scale_client_update_request( pgmn_dev->jpeg_bus_client, 1); jpeg_io = request_mem_region(jpeg_mem->start, resource_size(jpeg_mem), pdev->name); if (!jpeg_io) { JPEG_PR_ERR("%s: region already claimed\n", __func__); return -EBUSY; } jpeg_base = ioremap(jpeg_mem->start, resource_size(jpeg_mem)); if (!jpeg_base) { rc = -ENOMEM; JPEG_PR_ERR("%s: ioremap failed\n", __func__); goto fail_remap; } pgmn_dev->jpeg_fs = regulator_get(&pgmn_dev->pdev->dev, "vdd"); rc = regulator_enable(pgmn_dev->jpeg_fs); if (rc) { JPEG_PR_ERR("%s:%d]jpeg regulator get failed\n", __func__, __LINE__); goto fail_fs; } if (msm_jpeg_get_clk_info(pgmn_dev, pgmn_dev->pdev) < 0) { JPEG_PR_ERR("%s:%d]jpeg clock get failed\n", __func__, __LINE__); goto fail_fs; } rc = msm_cam_clk_enable(&pgmn_dev->pdev->dev, jpeg_8x_clk_info, pgmn_dev->jpeg_clk, pgmn_dev->num_clk, 1); if (rc < 0) { JPEG_PR_ERR("%s: clk failed rc = %d\n", __func__, rc); goto fail_clk; } pgmn_dev->hw_version = readl_relaxed(jpeg_base + JPEG_HW_VERSION); JPEG_DBG_HIGH("%s:%d] jpeg HW version 0x%x", __func__, __LINE__, pgmn_dev->hw_version); pgmn_dev->jpeg_vbif = ioremap(vbif_mem->start, resource_size(vbif_mem)); if (!pgmn_dev->jpeg_vbif) { rc = -ENOMEM; JPEG_PR_ERR("%s: ioremap failed\n", __func__); goto fail_vbif; } JPEG_DBG("%s:%d] jpeg_vbif 0x%x", __func__, __LINE__, (uint32_t)pgmn_dev->jpeg_vbif); rc = msm_jpeg_attach_iommu(pgmn_dev); if (rc < 0) goto fail_iommu; set_vbif_params(pgmn_dev, pgmn_dev->jpeg_vbif); if (pgmn_dev->hw_version == JPEG_8939) { writel_relaxed(0x0000550e, jpeg_base + JPEG_FE_QOS_CFG); writel_relaxed(0x00005555, jpeg_base + JPEG_WE_QOS_CFG); } rc = request_irq(jpeg_irq, handler, IRQF_TRIGGER_RISING, "jpeg", context); if (rc) { JPEG_PR_ERR("%s: request_irq failed, %d\n", __func__, jpeg_irq); goto fail_request_irq; } *mem = jpeg_mem; *base = jpeg_base; *irq = jpeg_irq; pgmn_dev->jpeg_client = msm_ion_client_create(-1, "camera/jpeg"); JPEG_DBG("%s:%d] success\n", __func__, __LINE__); pgmn_dev->state = MSM_JPEG_INIT; return rc; fail_request_irq: msm_jpeg_detach_iommu(pgmn_dev); fail_iommu: iounmap(pgmn_dev->jpeg_vbif); fail_vbif: msm_cam_clk_enable(&pgmn_dev->pdev->dev, jpeg_8x_clk_info, pgmn_dev->jpeg_clk, pgmn_dev->num_clk, 0); fail_clk: rc = regulator_disable(pgmn_dev->jpeg_fs); if (!rc) regulator_put(pgmn_dev->jpeg_fs); else JPEG_PR_ERR("%s:%d] regulator disable failed %d", __func__, __LINE__, rc); pgmn_dev->jpeg_fs = NULL; fail_fs: iounmap(jpeg_base); fail_remap: release_mem_region(jpeg_mem->start, resource_size(jpeg_mem)); JPEG_DBG("%s:%d] fail\n", __func__, __LINE__); return rc; }
static int smcmod_send_buf_cmd(struct smcmod_buf_req *reqp) { int ret = 0; struct ion_client *ion_clientp = NULL; struct ion_handle *ion_cmd_handlep = NULL; struct ion_handle *ion_resp_handlep = NULL; void *cmd_vaddrp = NULL; void *resp_vaddrp = NULL; unsigned long cmd_buf_size = 0; unsigned long resp_buf_size = 0; /* sanity check the argument */ if (IS_ERR_OR_NULL(reqp)) return -EINVAL; /* sanity check the fds */ if (reqp->ion_cmd_fd < 0) return -EINVAL; /* create an ion client */ ion_clientp = msm_ion_client_create(UINT_MAX, "smcmod"); /* check for errors */ if (IS_ERR_OR_NULL(ion_clientp)) return -EINVAL; /* import the command buffer fd */ ion_cmd_handlep = ion_import_dma_buf(ion_clientp, reqp->ion_cmd_fd); /* sanity check the handle */ if (IS_ERR_OR_NULL(ion_cmd_handlep)) { ret = -EINVAL; goto buf_cleanup; } /* retrieve the size of the buffer */ if (ion_handle_get_size(ion_clientp, ion_cmd_handlep, &cmd_buf_size) < 0) { ret = -EINVAL; goto buf_cleanup; } /* ensure that the command buffer size is not * greater than the size of the buffer. */ if (reqp->cmd_len > cmd_buf_size) { ret = -EINVAL; goto buf_cleanup; } /* map the area to get a virtual address */ cmd_vaddrp = ion_map_kernel(ion_clientp, ion_cmd_handlep); /* sanity check the address */ if (IS_ERR_OR_NULL(cmd_vaddrp)) { ret = -EINVAL; goto buf_cleanup; } /* check if there is a response buffer */ if (reqp->ion_resp_fd >= 0) { /* import the handle */ ion_resp_handlep = ion_import_dma_buf(ion_clientp, reqp->ion_resp_fd); /* sanity check the handle */ if (IS_ERR_OR_NULL(ion_resp_handlep)) { ret = -EINVAL; goto buf_cleanup; } /* retrieve the size of the buffer */ if (ion_handle_get_size(ion_clientp, ion_resp_handlep, &resp_buf_size) < 0) { ret = -EINVAL; goto buf_cleanup; } /* ensure that the command buffer size is not * greater than the size of the buffer. */ if (reqp->resp_len > resp_buf_size) { ret = -EINVAL; goto buf_cleanup; } /* map the area to get a virtual address */ resp_vaddrp = ion_map_kernel(ion_clientp, ion_resp_handlep); /* sanity check the address */ if (IS_ERR_OR_NULL(resp_vaddrp)) { ret = -EINVAL; goto buf_cleanup; } } /* No need to flush the cache lines for the command buffer here, * because the buffer will be flushed by scm_call. */ /* call scm function to switch to secure world */ reqp->return_val = scm_call(reqp->service_id, reqp->command_id, cmd_vaddrp, reqp->cmd_len, resp_vaddrp, reqp->resp_len); /* The cache lines for the response buffer have already been * invalidated by scm_call before returning. */ buf_cleanup: /* if the client and handle(s) are valid, free them */ if (!IS_ERR_OR_NULL(ion_clientp)) { if (!IS_ERR_OR_NULL(ion_cmd_handlep)) { if (!IS_ERR_OR_NULL(cmd_vaddrp)) ion_unmap_kernel(ion_clientp, ion_cmd_handlep); ion_free(ion_clientp, ion_cmd_handlep); } if (!IS_ERR_OR_NULL(ion_resp_handlep)) { if (!IS_ERR_OR_NULL(resp_vaddrp)) ion_unmap_kernel(ion_clientp, ion_resp_handlep); ion_free(ion_clientp, ion_resp_handlep); } ion_client_destroy(ion_clientp); } return ret; }
int msm_jpeg_platform_init(struct platform_device *pdev, struct resource **mem, void **base, int *irq, irqreturn_t (*handler) (int, void *), void *context) { int rc = -1; int i = 0; int jpeg_irq; struct resource *jpeg_mem, *jpeg_io, *jpeg_irq_res; void *jpeg_base; struct msm_jpeg_device *pgmn_dev = (struct msm_jpeg_device *) context; pgmn_dev->state = MSM_JPEG_IDLE; jpeg_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!jpeg_mem) { JPEG_PR_ERR("%s: no mem resource?\n", __func__); return -ENODEV; } jpeg_irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!jpeg_irq_res) { JPEG_PR_ERR("no irq resource?\n"); return -ENODEV; } jpeg_irq = jpeg_irq_res->start; JPEG_DBG("%s base address: 0x%x, jpeg irq number: %d\n", __func__, jpeg_mem->start, jpeg_irq); pgmn_dev->jpeg_bus_client = msm_bus_scale_register_client(&msm_jpeg_bus_client_pdata); if (!pgmn_dev->jpeg_bus_client) { JPEG_PR_ERR("%s: Registration Failed!\n", __func__); pgmn_dev->jpeg_bus_client = 0; return -EINVAL; } msm_bus_scale_client_update_request( pgmn_dev->jpeg_bus_client, 1); jpeg_io = request_mem_region(jpeg_mem->start, resource_size(jpeg_mem), pdev->name); if (!jpeg_io) { JPEG_PR_ERR("%s: region already claimed\n", __func__); return -EBUSY; } jpeg_base = ioremap(jpeg_mem->start, resource_size(jpeg_mem)); if (!jpeg_base) { rc = -ENOMEM; JPEG_PR_ERR("%s: ioremap failed\n", __func__); goto fail_remap; } pgmn_dev->jpeg_fs = regulator_get(&pgmn_dev->pdev->dev, "vdd"); rc = regulator_enable(pgmn_dev->jpeg_fs); if (rc) { JPEG_PR_ERR("%s:%d]jpeg regulator get failed\n", __func__, __LINE__); goto fail_fs; } rc = msm_cam_clk_enable(&pgmn_dev->pdev->dev, jpeg_8x_clk_info, pgmn_dev->jpeg_clk, ARRAY_SIZE(jpeg_8x_clk_info), 1); if (rc < 0) { JPEG_PR_ERR("%s: clk failed rc = %d\n", __func__, rc); goto fail_clk; } pgmn_dev->hw_version = readl_relaxed(jpeg_base + JPEG_HW_VERSION); JPEG_DBG_HIGH("%s:%d] jpeg HW version 0x%x", __func__, __LINE__, pgmn_dev->hw_version); pgmn_dev->jpeg_vbif = ioremap(VBIF_BASE_ADDRESS, VBIF_REGION_SIZE); if (!pgmn_dev->jpeg_vbif) { rc = -ENOMEM; JPEG_PR_ERR("%s:%d] ioremap failed\n", __func__, __LINE__); goto fail_vbif; } JPEG_DBG("%s:%d] jpeg_vbif 0x%x", __func__, __LINE__, (uint32_t)pgmn_dev->jpeg_vbif); #ifdef CONFIG_MSM_IOMMU for (i = 0; i < pgmn_dev->iommu_cnt; i++) { rc = iommu_attach_device(pgmn_dev->domain, pgmn_dev->iommu_ctx_arr[i]); if (rc < 0) { rc = -ENODEV; JPEG_PR_ERR("%s: Device attach failed\n", __func__); goto fail_iommu; } JPEG_DBG("%s:%d] dom 0x%x ctx 0x%x", __func__, __LINE__, (uint32_t)pgmn_dev->domain, (uint32_t)pgmn_dev->iommu_ctx_arr[i]); } #endif set_vbif_params(pgmn_dev, pgmn_dev->jpeg_vbif); #ifdef CONFIG_MACH_LGE *mem = jpeg_mem; *base = jpeg_base; #endif rc = request_irq(jpeg_irq, handler, IRQF_TRIGGER_RISING, "jpeg", context); if (rc) { JPEG_PR_ERR("%s: request_irq failed, %d\n", __func__, jpeg_irq); goto fail_request_irq; } #ifndef CONFIG_MACH_LGE /* QCT origin */ *mem = jpeg_mem; *base = jpeg_base; #endif *irq = jpeg_irq; pgmn_dev->jpeg_client = msm_ion_client_create(-1, "camera/jpeg"); JPEG_DBG("%s:%d] success\n", __func__, __LINE__); pgmn_dev->state = MSM_JPEG_INIT; return rc; fail_request_irq: #ifdef CONFIG_MACH_LGE *mem = NULL; *base = NULL; #endif #ifdef CONFIG_MSM_IOMMU for (i = 0; i < pgmn_dev->iommu_cnt; i++) { JPEG_PR_ERR("%s:%d] dom 0x%x ctx 0x%x", __func__, __LINE__, (uint32_t)pgmn_dev->domain, (uint32_t)pgmn_dev->iommu_ctx_arr[i]); iommu_detach_device(pgmn_dev->domain, pgmn_dev->iommu_ctx_arr[i]); } #endif fail_iommu: iounmap(pgmn_dev->jpeg_vbif); fail_vbif: msm_cam_clk_enable(&pgmn_dev->pdev->dev, jpeg_8x_clk_info, pgmn_dev->jpeg_clk, ARRAY_SIZE(jpeg_8x_clk_info), 0); fail_clk: rc = regulator_disable(pgmn_dev->jpeg_fs); if (!rc) regulator_put(pgmn_dev->jpeg_fs); else JPEG_PR_ERR("%s:%d] regulator disable failed %d", __func__, __LINE__, rc); pgmn_dev->jpeg_fs = NULL; fail_fs: iounmap(jpeg_base); fail_remap: release_mem_region(jpeg_mem->start, resource_size(jpeg_mem)); JPEG_DBG("%s:%d] fail\n", __func__, __LINE__); return rc; }
static int smcmod_send_cipher_cmd(struct smcmod_cipher_req *reqp) { int ret = 0; struct smcmod_cipher_scm_req scm_req; struct ion_client *ion_clientp = NULL; struct ion_handle *ion_key_handlep = NULL; struct ion_handle *ion_plain_handlep = NULL; struct ion_handle *ion_cipher_handlep = NULL; struct ion_handle *ion_iv_handlep = NULL; size_t size = 0; if (IS_ERR_OR_NULL(reqp)) return -EINVAL; /* sanity check the fds */ if ((reqp->ion_plain_text_fd < 0) || (reqp->ion_cipher_text_fd < 0) || (reqp->ion_init_vector_fd < 0)) return -EINVAL; /* create an ion client */ ion_clientp = msm_ion_client_create(UINT_MAX, "smcmod"); /* check for errors */ if (IS_ERR_OR_NULL(ion_clientp)) return -EINVAL; /* fill in the scm request structure */ scm_req.algorithm = reqp->algorithm; scm_req.operation = reqp->operation; scm_req.mode = reqp->mode; scm_req.key_phys_addr = 0; scm_req.key_size = reqp->key_size; scm_req.plain_text_size = reqp->plain_text_size; scm_req.cipher_text_size = reqp->cipher_text_size; scm_req.init_vector_size = reqp->init_vector_size; if (!reqp->key_is_null) { /* import the key buffer and get the physical address */ ret = smcmod_ion_fd_to_phys(reqp->ion_key_fd, ion_clientp, &ion_key_handlep, &scm_req.key_phys_addr, &size); if (ret < 0) goto buf_cleanup; /* ensure that the key size is not * greater than the size of the buffer. */ if (reqp->key_size > size) { ret = -EINVAL; goto buf_cleanup; } } /* import the plain text buffer and get the physical address */ ret = smcmod_ion_fd_to_phys(reqp->ion_plain_text_fd, ion_clientp, &ion_plain_handlep, &scm_req.plain_text_phys_addr, &size); if (ret < 0) goto buf_cleanup; /* ensure that the plain text size is not * greater than the size of the buffer. */ if (reqp->plain_text_size > size) { ret = -EINVAL; goto buf_cleanup; } /* import the cipher text buffer and get the physical address */ ret = smcmod_ion_fd_to_phys(reqp->ion_cipher_text_fd, ion_clientp, &ion_cipher_handlep, &scm_req.cipher_text_phys_addr, &size); if (ret < 0) goto buf_cleanup; /* ensure that the cipher text size is not * greater than the size of the buffer. */ if (reqp->cipher_text_size > size) { ret = -EINVAL; goto buf_cleanup; } /* import the init vector buffer and get the physical address */ ret = smcmod_ion_fd_to_phys(reqp->ion_init_vector_fd, ion_clientp, &ion_iv_handlep, &scm_req.init_vector_phys_addr, &size); if (ret < 0) goto buf_cleanup; /* ensure that the init vector size is not * greater than the size of the buffer. */ if (reqp->init_vector_size > size) { ret = -EINVAL; goto buf_cleanup; } /* Only the scm_req structure will be flushed by scm_call, * so we must flush the cache for the input ion buffers here. */ msm_ion_do_cache_op(ion_clientp, ion_key_handlep, NULL, scm_req.key_size, ION_IOC_CLEAN_CACHES); msm_ion_do_cache_op(ion_clientp, ion_iv_handlep, NULL, scm_req.init_vector_size, ION_IOC_CLEAN_CACHES); /* For decrypt, cipher text is input, otherwise it's plain text. */ if (reqp->operation) msm_ion_do_cache_op(ion_clientp, ion_cipher_handlep, NULL, scm_req.cipher_text_size, ION_IOC_CLEAN_CACHES); else msm_ion_do_cache_op(ion_clientp, ion_plain_handlep, NULL, scm_req.plain_text_size, ION_IOC_CLEAN_CACHES); /* call scm function to switch to secure world */ reqp->return_val = scm_call(SMCMOD_SVC_CRYPTO, SMCMOD_CRYPTO_CMD_CIPHER, &scm_req, sizeof(scm_req), NULL, 0); /* Invalidate the output buffer, since it's not done by scm_call */ /* for decrypt, plain text is the output, otherwise it's cipher text */ if (reqp->operation) msm_ion_do_cache_op(ion_clientp, ion_plain_handlep, NULL, scm_req.plain_text_size, ION_IOC_INV_CACHES); else msm_ion_do_cache_op(ion_clientp, ion_cipher_handlep, NULL, scm_req.cipher_text_size, ION_IOC_INV_CACHES); buf_cleanup: /* if the client and handles are valid, free them */ if (!IS_ERR_OR_NULL(ion_clientp)) { if (!IS_ERR_OR_NULL(ion_key_handlep)) ion_free(ion_clientp, ion_key_handlep); if (!IS_ERR_OR_NULL(ion_plain_handlep)) ion_free(ion_clientp, ion_plain_handlep); if (!IS_ERR_OR_NULL(ion_cipher_handlep)) ion_free(ion_clientp, ion_cipher_handlep); if (!IS_ERR_OR_NULL(ion_iv_handlep)) ion_free(ion_clientp, ion_iv_handlep); ion_client_destroy(ion_clientp); } return ret; }
static struct ion_client *res_trk_create_ion_client(void){ struct ion_client *video_client; video_client = msm_ion_client_create(-1, "vcodec_krnl"); return video_client; }
static int smcmod_send_msg_digest_cmd(struct smcmod_msg_digest_req *reqp) { int ret = 0; struct smcmod_msg_digest_scm_req scm_req; struct ion_client *ion_clientp = NULL; struct ion_handle *ion_key_handlep = NULL; struct ion_handle *ion_input_handlep = NULL; struct ion_handle *ion_output_handlep = NULL; size_t size = 0; if (IS_ERR_OR_NULL(reqp)) return -EINVAL; /* sanity check the fds */ if ((reqp->ion_input_fd < 0) || (reqp->ion_output_fd < 0)) return -EINVAL; /* create an ion client */ ion_clientp = msm_ion_client_create(UINT_MAX, "smcmod"); /* check for errors */ if (IS_ERR_OR_NULL(ion_clientp)) return -EINVAL; /* fill in the scm request structure */ scm_req.algorithm = reqp->algorithm; scm_req.key_phys_addr = 0; scm_req.key_size = reqp->key_size; scm_req.input_size = reqp->input_size; scm_req.output_size = reqp->output_size; scm_req.verify = 0; if (!reqp->key_is_null) { /* import the key buffer and get the physical address */ ret = smcmod_ion_fd_to_phys(reqp->ion_key_fd, ion_clientp, &ion_key_handlep, &scm_req.key_phys_addr, &size); if (ret < 0) goto buf_cleanup; /* ensure that the key size is not * greater than the size of the buffer. */ if (reqp->key_size > size) { ret = -EINVAL; goto buf_cleanup; } } /* import the input buffer and get the physical address */ ret = smcmod_ion_fd_to_phys(reqp->ion_input_fd, ion_clientp, &ion_input_handlep, &scm_req.input_phys_addr, &size); if (ret < 0) goto buf_cleanup; /* ensure that the input size is not * greater than the size of the buffer. */ if (reqp->input_size > size) { ret = -EINVAL; goto buf_cleanup; } /* import the output buffer and get the physical address */ ret = smcmod_ion_fd_to_phys(reqp->ion_output_fd, ion_clientp, &ion_output_handlep, &scm_req.output_phys_addr, &size); if (ret < 0) goto buf_cleanup; /* ensure that the output size is not * greater than the size of the buffer. */ if (reqp->output_size > size) { ret = -EINVAL; goto buf_cleanup; } /* Only the scm_req structure will be flushed by scm_call, * so we must flush the cache for the input ion buffers here. */ msm_ion_do_cache_op(ion_clientp, ion_key_handlep, NULL, scm_req.key_size, ION_IOC_CLEAN_CACHES); msm_ion_do_cache_op(ion_clientp, ion_input_handlep, NULL, scm_req.input_size, ION_IOC_CLEAN_CACHES); /* call scm function to switch to secure world */ if (reqp->fixed_block) reqp->return_val = scm_call(SMCMOD_SVC_CRYPTO, SMCMOD_CRYPTO_CMD_MSG_DIGEST_FIXED, &scm_req, sizeof(scm_req), NULL, 0); else reqp->return_val = scm_call(SMCMOD_SVC_CRYPTO, SMCMOD_CRYPTO_CMD_MSG_DIGEST, &scm_req, sizeof(scm_req), NULL, 0); /* Invalidate the output buffer, since it's not done by scm_call */ msm_ion_do_cache_op(ion_clientp, ion_output_handlep, NULL, scm_req.output_size, ION_IOC_INV_CACHES); buf_cleanup: /* if the client and handles are valid, free them */ if (!IS_ERR_OR_NULL(ion_clientp)) { if (!IS_ERR_OR_NULL(ion_key_handlep)) ion_free(ion_clientp, ion_key_handlep); if (!IS_ERR_OR_NULL(ion_input_handlep)) ion_free(ion_clientp, ion_input_handlep); if (!IS_ERR_OR_NULL(ion_output_handlep)) ion_free(ion_clientp, ion_output_handlep); ion_client_destroy(ion_clientp); } return ret; }
static int register_memory(void) { int result; unsigned long paddr; void *kvptr; unsigned long kvaddr; unsigned long mem_len; mutex_lock(&acdb_data.acdb_mutex); acdb_data.ion_client = msm_ion_client_create(UINT_MAX, "audio_acdb_client"); if (IS_ERR_OR_NULL(acdb_data.ion_client)) { pr_err("%s: Could not register ION client!!!\n", __func__); result = PTR_ERR(acdb_data.ion_client); goto err; } acdb_data.ion_handle = ion_import_dma_buf(acdb_data.ion_client, atomic_read(&acdb_data.map_handle)); if (IS_ERR_OR_NULL(acdb_data.ion_handle)) { pr_err("%s: Could not import map handle!!!\n", __func__); result = PTR_ERR(acdb_data.ion_handle); goto err_ion_client; } result = ion_phys(acdb_data.ion_client, acdb_data.ion_handle, &paddr, (size_t *)&mem_len); if (result != 0) { pr_err("%s: Could not get phys addr!!!\n", __func__); goto err_ion_handle; } kvptr = ion_map_kernel(acdb_data.ion_client, acdb_data.ion_handle, 0); if (IS_ERR_OR_NULL(kvptr)) { pr_err("%s: Could not get kernel virt addr!!!\n", __func__); result = PTR_ERR(kvptr); goto err_ion_handle; } kvaddr = (unsigned long)kvptr; atomic64_set(&acdb_data.paddr, paddr); atomic64_set(&acdb_data.kvaddr, kvaddr); atomic64_set(&acdb_data.mem_len, mem_len); mutex_unlock(&acdb_data.acdb_mutex); pr_debug("%s done! paddr = 0x%lx, " "kvaddr = 0x%lx, len = x%lx\n", __func__, (long)atomic64_read(&acdb_data.paddr), (long)atomic64_read(&acdb_data.kvaddr), (long)atomic64_read(&acdb_data.mem_len)); return result; err_ion_handle: ion_free(acdb_data.ion_client, acdb_data.ion_handle); err_ion_client: ion_client_destroy(acdb_data.ion_client); err: atomic64_set(&acdb_data.mem_len, 0); mutex_unlock(&acdb_data.acdb_mutex); return result; }
static int smcmod_send_dec_cmd(struct smcmod_decrypt_req *reqp) { struct ion_client *ion_clientp; struct ion_handle *ion_handlep = NULL; int ion_fd; int ret; u32 pa; size_t size; struct { u32 args[4]; } req; struct { u32 args[3]; } rsp; ion_clientp = msm_ion_client_create(UINT_MAX, "smcmod"); if (IS_ERR_OR_NULL(ion_clientp)) return PTR_ERR(ion_clientp); switch (reqp->operation) { case SMCMOD_DECRYPT_REQ_OP_METADATA: { ion_fd = reqp->request.metadata.ion_fd; ret = smcmod_ion_fd_to_phys(ion_fd, ion_clientp, &ion_handlep, &pa, &size); if (ret) goto error; req.args[0] = reqp->request.metadata.len; req.args[1] = pa; break; } case SMCMOD_DECRYPT_REQ_OP_IMG_FRAG: { ion_fd = reqp->request.img_frag.ion_fd; ret = smcmod_ion_fd_to_phys(ion_fd, ion_clientp, &ion_handlep, &pa, &size); if (ret) goto error; req.args[0] = reqp->request.img_frag.ctx_id; req.args[1] = reqp->request.img_frag.last_frag; req.args[2] = reqp->request.img_frag.frag_len; req.args[3] = pa + reqp->request.img_frag.offset; break; } default: ret = -EINVAL; goto error; } /* * scm_call does cache maintenance over request and response buffers. * The userspace must flush/invalidate ion input/output buffers itself. */ ret = scm_call(reqp->service_id, reqp->command_id, &req, sizeof(req), &rsp, sizeof(rsp)); if (ret) goto error; switch (reqp->operation) { case SMCMOD_DECRYPT_REQ_OP_METADATA: reqp->response.metadata.status = rsp.args[0]; reqp->response.metadata.ctx_id = rsp.args[1]; reqp->response.metadata.end_offset = rsp.args[2] - pa; break; case SMCMOD_DECRYPT_REQ_OP_IMG_FRAG: { reqp->response.img_frag.status = rsp.args[0]; break; } default: break; } error: if (!IS_ERR_OR_NULL(ion_clientp)) { if (!IS_ERR_OR_NULL(ion_handlep)) ion_free(ion_clientp, ion_handlep); ion_client_destroy(ion_clientp); } return ret; }
static int __devinit vcap_probe(struct platform_device *pdev) { struct vcap_dev *dev; struct video_device *vfd; int ret; dprintk(1, "Probe started\n"); dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; vcap_ctrl = dev; dev->vcap_pdata = pdev->dev.platform_data; dev->vcapmem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vcap"); if (!dev->vcapmem) { pr_err("VCAP: %s: no mem resource?\n", __func__); ret = -ENODEV; goto free_dev; } dev->vcapio = request_mem_region(dev->vcapmem->start, resource_size(dev->vcapmem), pdev->name); if (!dev->vcapio) { pr_err("VCAP: %s: no valid mem region\n", __func__); ret = -EBUSY; goto free_dev; } dev->vcapbase = ioremap(dev->vcapmem->start, resource_size(dev->vcapmem)); if (!dev->vcapbase) { ret = -ENOMEM; pr_err("VCAP: %s: vcap ioremap failed\n", __func__); goto free_resource; } dev->vcirq = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "vc_irq"); if (!dev->vcirq) { pr_err("%s: no vc irq resource?\n", __func__); ret = -ENODEV; goto free_resource; } dev->vpirq = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "vp_irq"); if (!dev->vpirq) { pr_err("%s: no vp irq resource?\n", __func__); ret = -ENODEV; goto free_resource; } ret = request_irq(dev->vcirq->start, vcap_vc_handler, IRQF_TRIGGER_RISING, "vc_irq", 0); if (ret < 0) { pr_err("%s: vc irq request fail\n", __func__); ret = -EBUSY; goto free_resource; } disable_irq(dev->vcirq->start); ret = request_irq(dev->vpirq->start, vcap_vp_handler, IRQF_TRIGGER_RISING, "vp_irq", 0); if (ret < 0) { pr_err("%s: vp irq request fail\n", __func__); ret = -EBUSY; goto free_resource; } disable_irq(dev->vpirq->start); snprintf(dev->v4l2_dev.name, sizeof(dev->v4l2_dev.name), "%s", MSM_VCAP_DRV_NAME); ret = v4l2_device_register(NULL, &dev->v4l2_dev); if (ret) goto free_resource; ret = vcap_enable(dev, &pdev->dev); if (ret) goto unreg_dev; msm_bus_scale_client_update_request(dev->bus_client_handle, 3); ret = detect_vc(dev); if (ret) goto power_down; /* init video device*/ vfd = video_device_alloc(); if (!vfd) goto deinit_vc; *vfd = vcap_template; vfd->v4l2_dev = &dev->v4l2_dev; ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1); if (ret < 0) goto rel_vdev; dev->vfd = vfd; video_set_drvdata(vfd, dev); dev->vcap_wq = create_workqueue("vcap"); if (!dev->vcap_wq) { pr_err("Could not create workqueue"); goto rel_vdev; } dev->ion_client = msm_ion_client_create(-1, "vcap"); if (IS_ERR((void *)dev->ion_client)) { pr_err("could not get ion client"); goto rel_vcap_wq; } atomic_set(&dev->vc_enabled, 0); atomic_set(&dev->vp_enabled, 0); dprintk(1, "Exit probe succesfully"); return 0; rel_vcap_wq: destroy_workqueue(dev->vcap_wq); rel_vdev: video_device_release(vfd); deinit_vc: deinit_vc(); power_down: vcap_disable(dev); unreg_dev: v4l2_device_unregister(&dev->v4l2_dev); free_resource: iounmap(dev->vcapbase); release_mem_region(dev->vcapmem->start, resource_size(dev->vcapmem)); free_dev: vcap_ctrl = NULL; kfree(dev); return ret; }
static struct ion_client *res_trk_create_ion_client(void){ struct ion_client *video_client; video_client = msm_ion_client_create((1<<ION_HEAP_TYPE_CARVEOUT), "video_client"); return video_client; }