Exemple #1
0
static PVRSRV_ERROR
PMRAcquireKernelMappingDataIon(PMR_IMPL_PRIVDATA pvPriv,
							   IMG_SIZE_T uiOffset,
							   IMG_SIZE_T uiSize,
							   IMG_VOID **ppvKernelAddressOut,
							   IMG_HANDLE *phHandleOut,
							   PMR_FLAGS_T ulFlags)
{
	PMR_ION_DATA *psPrivData = pvPriv;
	IMG_PVOID pvKernAddr;
	PVRSRV_ERROR eError;

	pvKernAddr = ion_map_kernel(psPrivData->psIonClient, psPrivData->psIonHandle);

	if (IS_ERR(pvKernAddr))
	{
		eError = PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING;
		goto fail;
	}

	*ppvKernelAddressOut = pvKernAddr;

	return PVRSRV_OK;

	/*
	  error exit paths follow
	*/

fail:
	PVR_ASSERT(eError != PVRSRV_OK);
	return eError;
}
static int register_memory(void)
{
	int			result;
	unsigned long		paddr;
	unsigned long		kvaddr;
	unsigned long		mem_len;

	mutex_lock(&acdb_data.acdb_mutex);
	acdb_data.ion_client =
		msm_ion_client_create(UINT_MAX, "audio_acdb_client");
	if (IS_ERR_OR_NULL(acdb_data.ion_client)) {
		pr_err("%s: Could not register ION client!!!\n", __func__);
		result = PTR_ERR(acdb_data.ion_client);
		goto err;
	}

	acdb_data.ion_handle = ion_import_fd(acdb_data.ion_client,
		atomic_read(&acdb_data.map_handle));
	if (IS_ERR_OR_NULL(acdb_data.ion_handle)) {
		pr_err("%s: Could not import map handle!!!\n", __func__);
		result = PTR_ERR(acdb_data.ion_client);
		goto err_ion_client;
	}

	result = ion_phys(acdb_data.ion_client, acdb_data.ion_handle,
				&paddr, (size_t *)&mem_len);
	if (result != 0) {
		pr_err("%s: Could not get phys addr!!!\n", __func__);
		goto err_ion_handle;
	}

	kvaddr = (unsigned long)ion_map_kernel(acdb_data.ion_client,
		acdb_data.ion_handle, 0);
	if (IS_ERR_OR_NULL(&kvaddr)) {
		pr_err("%s: Could not get kernel virt addr!!!\n", __func__);
		result = -EINVAL;
		goto err_ion_handle;
	}
	mutex_unlock(&acdb_data.acdb_mutex);

	atomic64_set(&acdb_data.paddr, paddr);
	atomic64_set(&acdb_data.kvaddr, kvaddr);
	atomic64_set(&acdb_data.mem_len, mem_len);
	pr_debug("%s done! paddr = 0x%lx, "
		"kvaddr = 0x%lx, len = x%lx\n",
		 __func__,
		(long)atomic64_read(&acdb_data.paddr),
		(long)atomic64_read(&acdb_data.kvaddr),
		(long)atomic64_read(&acdb_data.mem_len));

	return result;
err_ion_handle:
	ion_free(acdb_data.ion_client, acdb_data.ion_handle);
err_ion_client:
	ion_client_destroy(acdb_data.ion_client);
err:
	atomic64_set(&acdb_data.mem_len, 0);
	mutex_unlock(&acdb_data.acdb_mutex);
	return result;
}
static int alloc_ion_mem(struct smem_client *client, size_t size,
		u32 align, u32 flags, struct msm_smem *mem)
{
	struct ion_handle *hndl;
	size_t len;
	int rc = 0;
	flags = flags | ION_HEAP(ION_CP_MM_HEAP_ID);
	hndl = ion_alloc(client->clnt, size, align, flags);
	if (IS_ERR_OR_NULL(hndl)) {
		pr_err("Failed to allocate shared memory = %p, %d, %d, 0x%x\n",
				client, size, align, flags);
		rc = -ENOMEM;
		goto fail_shared_mem_alloc;
	}
	mem->mem_type = client->mem_type;
	mem->smem_priv = hndl;
	if (ion_phys(client->clnt, hndl, &mem->paddr, &len)) {
		pr_err("Failed to get physical address\n");
		rc = -EIO;
		goto fail_map;
	}
	mem->device_addr = mem->paddr;
	mem->size = size;
	mem->kvaddr = ion_map_kernel(client->clnt, hndl, 0);
	if (!mem->kvaddr) {
		pr_err("Failed to map shared mem in kernel\n");
		rc = -EIO;
		goto fail_map;
	}
	return rc;
fail_map:
	ion_free(client->clnt, hndl);
fail_shared_mem_alloc:
	return rc;
}
static int alloc_ion_mem(struct smem_client *client, size_t size,
		u32 align, u32 flags, int domain, int partition,
		struct msm_smem *mem)
{
	struct ion_handle *hndl;
	unsigned long iova = 0;
	unsigned long buffer_size = 0;
	unsigned long ionflags = 0;
	unsigned long heap_mask = 0;
	int rc = 0;
	if (flags == SMEM_CACHED)
		ionflags = ION_SET_CACHED(ionflags);
	else
		ionflags = ION_SET_UNCACHED(ionflags);

	heap_mask = ION_HEAP(ION_CP_MM_HEAP_ID);
	if (align < 4096)
		align = 4096;
	size = (size + 4095) & (~4095);
	pr_debug("\n in %s domain: %d, Partition: %d\n",
		__func__, domain, partition);
	hndl = ion_alloc(client->clnt, size, align, heap_mask, ionflags);
	if (IS_ERR_OR_NULL(hndl)) {
		pr_err("Failed to allocate shared memory = %p, %d, %d, 0x%x\n",
				client, size, align, ionflags);
		rc = -ENOMEM;
		goto fail_shared_mem_alloc;
	}
	mem->mem_type = client->mem_type;
	mem->smem_priv = hndl;
	mem->domain = domain;
	mem->partition_num = partition;
	mem->kvaddr = ion_map_kernel(client->clnt, hndl);
	if (!mem->kvaddr) {
		pr_err("Failed to map shared mem in kernel\n");
		rc = -EIO;
		goto fail_map;
	}
	rc = get_device_address(client->clnt, hndl, mem->domain,
		mem->partition_num, align, &iova, &buffer_size);
	if (rc) {
		pr_err("Failed to get device address: %d\n", rc);
		goto fail_device_address;
	}
	mem->device_addr = iova;
	pr_debug("device_address = 0x%lx, kvaddr = 0x%p\n",
		mem->device_addr, mem->kvaddr);
	mem->size = size;
	return rc;
fail_device_address:
	ion_unmap_kernel(client->clnt, hndl);
fail_map:
	ion_free(client->clnt, hndl);
fail_shared_mem_alloc:
	return rc;
}
/**
 * videobuf_pmem_contig_user_get() - setup user space memory pointer
 * @mem: per-buffer private videobuf-contig-pmem data
 * @vb: video buffer to map
 *
 * This function validates and sets up a pointer to user space memory.
 * Only physically contiguous pfn-mapped memory is accepted.
 *
 * Returns 0 if successful.
 */
int videobuf2_pmem_contig_user_get(struct videobuf2_contig_pmem *mem,
					struct videobuf2_msm_offset *offset,
					enum videobuf2_buffer_type buffer_type,
					uint32_t addr_offset, int path,
					struct ion_client *client)
{
	unsigned long len;
	int rc = 0;
#ifndef CONFIG_MSM_MULTIMEDIA_USE_ION
	unsigned long kvstart;
#endif
	unsigned long paddr = 0;
	if (mem->phyaddr != 0)
		return 0;
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
	mem->ion_handle = ion_import_fd(client, (int)mem->vaddr);
	if (IS_ERR_OR_NULL(mem->ion_handle)) {
		pr_err("%s ION import failed\n", __func__);
		return PTR_ERR(mem->ion_handle);
	}
	rc = ion_map_iommu(client, mem->ion_handle, CAMERA_DOMAIN, GEN_POOL,
		SZ_4K, 0, (unsigned long *)&mem->phyaddr, &len, UNCACHED, 0);
	if (rc < 0)
		ion_free(client, mem->ion_handle);
	rc = ion_handle_get_flags(client, mem->ion_handle, &mem->ion_flags);
	mem->kernel_vaddr = ion_map_kernel(client,
		mem->ion_handle, mem->ion_flags);
#elif CONFIG_ANDROID_PMEM
	rc = get_pmem_file((int)mem->vaddr, (unsigned long *)&mem->phyaddr,
					&kvstart, &len, &mem->file);
	if (rc < 0) {
		pr_err("%s: get_pmem_file fd %d error %d\n",
					__func__, (int)mem->vaddr, rc);
		return rc;
	}
#else
	paddr = 0;
	kvstart = 0;
#endif
	if (offset)
		mem->offset = *offset;
	else
		memset(&mem->offset, 0, sizeof(struct videobuf2_msm_offset));
	mem->path = path;
	mem->buffer_type = buffer_type;
	paddr = mem->phyaddr;
	mem->mapped_phyaddr = paddr + addr_offset;
	mem->addr_offset = addr_offset;
	return rc;
}
static int ion_user_to_kernel(struct smem_client *client,
			int fd, u32 offset, int domain, int partition,
			struct msm_smem *mem)
{
	struct ion_handle *hndl;
	unsigned long iova = 0;
	unsigned long buffer_size = 0;
	int rc = 0;
	hndl = ion_import_dma_buf(client->clnt, fd);
	if (IS_ERR_OR_NULL(hndl)) {
		pr_err("Failed to get handle: %p, %d, %d, %p\n",
				client, fd, offset, hndl);
		rc = -ENOMEM;
		goto fail_import_fd;
	}
	mem->kvaddr = ion_map_kernel(client->clnt, hndl);
	if (!mem->kvaddr) {
		pr_err("Failed to map shared mem in kernel\n");
		rc = -EIO;
		goto fail_map;
	}
	mem->domain = domain;
	mem->partition_num = partition;
	rc = get_device_address(client->clnt, hndl, mem->domain,
		mem->partition_num, 4096, &iova, &buffer_size);
	if (rc) {
		pr_err("Failed to get device address: %d\n", rc);
		goto fail_device_address;
	}

	mem->kvaddr += offset;
	mem->mem_type = client->mem_type;
	mem->smem_priv = hndl;
	mem->device_addr = iova + offset;
	mem->size = buffer_size;
	pr_debug("Buffer device address: 0x%lx, size: %d\n",
		mem->device_addr, mem->size);
	return rc;
fail_device_address:
	ion_unmap_kernel(client->clnt, hndl);
fail_map:
	ion_free(client->clnt, hndl);
fail_import_fd:
	return rc;
}
/**
 * Allocate memory for channel output of specific TSIF.
 *
 * @tsif: The TSIF id to which memory should be allocated.
 *
 * Return  error status
 */
static int mpq_dmx_channel_mem_alloc(int tsif)
{
	int result;
	size_t len;

	MPQ_DVB_DBG_PRINT("%s(%d)\n", __func__, tsif);

	mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_handle =
		ion_alloc(mpq_dmx_tspp_info.ion_client,
		 (mpq_dmx_tspp_info.tsif[tsif].buffer_count *
		  TSPP_DESCRIPTOR_SIZE),
		 SZ_4K,
		 ION_HEAP(tspp_out_ion_heap),
		 0); /* non-cached */

	if (IS_ERR_OR_NULL(mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_handle)) {
		MPQ_DVB_ERR_PRINT("%s: ion_alloc() failed\n", __func__);
		mpq_dmx_channel_mem_free(tsif);
		return -ENOMEM;
	}

	/* save virtual base address of heap */
	mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_virt_base =
		ion_map_kernel(mpq_dmx_tspp_info.ion_client,
			mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_handle);
	if (IS_ERR_OR_NULL(mpq_dmx_tspp_info.tsif[tsif].
				ch_mem_heap_virt_base)) {
		MPQ_DVB_ERR_PRINT("%s: ion_map_kernel() failed\n", __func__);
		mpq_dmx_channel_mem_free(tsif);
		return -ENOMEM;
	}

	/* save physical base address of heap */
	result = ion_phys(mpq_dmx_tspp_info.ion_client,
		mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_handle,
		&(mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_phys_base), &len);
	if (result < 0) {
		MPQ_DVB_ERR_PRINT("%s: ion_phys() failed\n", __func__);
		mpq_dmx_channel_mem_free(tsif);
		return -ENOMEM;
	}

	return 0;
}
static int ion_user_to_kernel(struct smem_client *client,
			int fd, u32 offset, struct msm_smem *mem)
{
	struct ion_handle *hndl;
	unsigned long ionflag;
	size_t len;
	int rc = 0;
	hndl = ion_import_dma_buf(client->clnt, fd);
	if (IS_ERR_OR_NULL(hndl)) {
		pr_err("Failed to get handle: %p, %d, %d, %p\n",
				client, fd, offset, hndl);
		rc = -ENOMEM;
		goto fail_import_fd;
	}
	rc = ion_handle_get_flags(client->clnt, hndl, &ionflag);
	if (rc) {
		pr_err("Failed to get ion flags: %d", rc);
		goto fail_map;
	}
	rc = ion_phys(client->clnt, hndl, &mem->paddr, &len);
	if (rc) {
		pr_err("Failed to get physical address\n");
		goto fail_map;
	}
	mem->kvaddr = ion_map_kernel(client->clnt, hndl);
	if (!mem->kvaddr) {
		pr_err("Failed to map shared mem in kernel\n");
		rc = -EIO;
		goto fail_map;
	}

	mem->kvaddr += offset;
	mem->paddr += offset;
	mem->mem_type = client->mem_type;
	mem->smem_priv = hndl;
	mem->device_addr = mem->paddr;
	mem->size = len;
	return rc;
fail_map:
	ion_free(client->clnt, hndl);
fail_import_fd:
	return rc;
}
void *vb2_ion_private_alloc(void *alloc_ctx, size_t size)
{
    struct vb2_ion_context *ctx = alloc_ctx;
    struct vb2_ion_buf *buf;
    int flags = ion_heapflag(ctx->flags);
    int ret = 0;

    buf = kzalloc(sizeof(*buf), GFP_KERNEL);
    if (!buf) {
        pr_err("%s error: fail to kzalloc size(%d)\n", __func__, sizeof(*buf));
        return ERR_PTR(-ENOMEM);
    }

    size = PAGE_ALIGN(size);

    buf->handle = ion_alloc(ctx->client, size, ctx->alignment, flags, flags);
    if (IS_ERR(buf->handle)) {
        ret = -ENOMEM;
        goto err_alloc;
    }

    buf->cookie.sgt = ion_sg_table(ctx->client, buf->handle);

    buf->ctx  = ctx;
    buf->size = size;

    buf->kva  = ion_map_kernel(ctx->client, buf->handle);
    if (IS_ERR(buf->kva)) {
        ret = PTR_ERR(buf->kva);
        buf->kva = NULL;
        goto err_map_kernel;
    }

    return &buf->cookie;

err_map_kernel:
    ion_free(ctx->client, buf->handle);
err_alloc:
    kfree(buf);

    pr_err("%s: Error occured while allocating\n", __func__);
    return ERR_PTR(ret);
}
Exemple #10
0
static void *vb2_ion_vaddr(void *buf_priv)
{
	struct vb2_ion_buf *buf = buf_priv;
	struct vb2_ion_conf *conf = buf->conf;

	if (!buf) {
		pr_err("failed to get buffer\n");
		return NULL;
	}

	if (!buf->kva) {
		buf->kva = (dma_addr_t)ion_map_kernel(conf->client, buf->handle);
		if (IS_ERR(ERR_PTR(buf->kva))) {
			pr_err("ion_map_kernel handle(%x)\n",
				(u32)buf->handle);
			return NULL;
		}
	}

	return (void *)buf->kva;
}
Exemple #11
0
static PVRSRV_ERROR
PMRFinalizeIon(PMR_IMPL_PRIVDATA pvPriv)
{
	PMR_ION_DATA *psPrivData = IMG_NULL;

	psPrivData = pvPriv;

	if (psPrivData->bPDumpMalloced)
	{
		PDumpPMRFree(psPrivData->hPDumpAllocInfo);
	}

	if (psPrivData->bPoisonOnFree)
	{
		IMG_PVOID pvKernAddr;

		pvKernAddr = ion_map_kernel(psPrivData->psIonClient, psPrivData->psIonHandle);

		if (IS_ERR(pvKernAddr))
		{
			PVR_DPF((PVR_DBG_ERROR, "%s: Failed to poison allocation before free", __FUNCTION__));
			PVR_ASSERT(IMG_FALSE);
		}

		_Poison(pvKernAddr,
				psPrivData->uiSize,
				_FreePoison,
				_FreePoisonSize);

		ion_unmap_kernel(psPrivData->psIonClient, psPrivData->psIonHandle);
	}

	IonPhysAddrRelease(psPrivData);
	ion_free(psPrivData->psIonClient, psPrivData->psIonHandle);
	PhysHeapRelease(psPrivData->psPhysHeap);
	kfree(psPrivData);

	return PVRSRV_OK;
}
/*!
******************************************************************************
 @Function                GetCpuKmAddr
******************************************************************************/
static IMG_RESULT GetCpuKmAddr(
	SYSMEM_Heap *			heap,
	IMG_VOID **				ppvCpuKmAddr,
	IMG_HANDLE 				hPagesHandle
)
{
	struct ion_client *ion_client = heap->priv;
    SYSMEMU_sPages *  psPages = hPagesHandle;

    if(psPages->pvCpuKmAddr == IMG_NULL)
    {
        psPages->pvCpuKmAddr = ion_map_kernel(ion_client, psPages->pvImplData);

        if (!psPages->pvCpuKmAddr) {
            REPORT(REPORT_MODULE_SYSMEM, REPORT_ERR,
                   "Error mapping buffer to kernel address space");
            return IMG_ERROR_FATAL;
        }
        
    }
    *ppvCpuKmAddr = psPages->pvCpuKmAddr;
    /* Return success...*/
    return IMG_SUCCESS;
}
static int audpcm_in_open(struct inode *inode, struct file *file)
{
	struct audio_in *audio = &the_audio_in;
	int rc;
	int len = 0;
	unsigned long ionflag = 0;
	ion_phys_addr_t addr = 0;
	struct ion_handle *handle = NULL;
	struct ion_client *client = NULL;

	int encid;
	struct timespec ts;
	struct rtc_time tm;

	mutex_lock(&audio->lock);
	if (audio->opened) {
		rc = -EBUSY;
		goto done;
	}

	audio->mode = MSM_AUD_ENC_MODE_TUNNEL;
	audio->samp_rate = RPC_AUD_DEF_SAMPLE_RATE_11025;
	audio->samp_rate_index = AUDREC_CMD_SAMP_RATE_INDX_11025;
	audio->channel_mode = AUDREC_CMD_STEREO_MODE_MONO;
	audio->buffer_size = MONO_DATA_SIZE;
	audio->enc_type = AUDREC_CMD_TYPE_0_INDEX_WAV | audio->mode;

	rc = audmgr_open(&audio->audmgr);
	if (rc)
		goto done;
	encid = audpreproc_aenc_alloc(audio->enc_type, &audio->module_name,
			&audio->queue_ids);
	if (encid < 0) {
		MM_AUD_ERR("No free encoder available\n");
		rc = -ENODEV;
		goto done;
	}
	audio->enc_id = encid;

	rc = msm_adsp_get(audio->module_name, &audio->audrec,
			   &audrec_adsp_ops, audio);
	if (rc) {
		audpreproc_aenc_free(audio->enc_id);
		goto done;
	}

	audio->dsp_cnt = 0;
	audio->stopped = 0;

	audpcm_in_flush(audio);

	client = msm_ion_client_create(UINT_MAX, "Audio_PCM_in_client");
	if (IS_ERR_OR_NULL(client)) {
		MM_ERR("Unable to create ION client\n");
		rc = -ENOMEM;
		goto client_create_error;
	}
	audio->client = client;

	MM_DBG("allocating mem sz = %d\n", DMASZ);
	handle = ion_alloc(client, DMASZ, SZ_4K,
		ION_HEAP(ION_AUDIO_HEAP_ID));
	if (IS_ERR_OR_NULL(handle)) {
		MM_ERR("Unable to create allocate O/P buffers\n");
		rc = -ENOMEM;
		goto output_buff_alloc_error;
	}

	audio->output_buff_handle = handle;

	rc = ion_phys(client , handle, &addr, &len);
	if (rc) {
		MM_ERR("O/P buffers:Invalid phy: %x sz: %x\n",
			(unsigned int) addr, (unsigned int) len);
		rc = -ENOMEM;
		goto output_buff_get_phys_error;
	} else {
		MM_INFO("O/P buffers:valid phy: %x sz: %x\n",
			(unsigned int) addr, (unsigned int) len);
	}
	audio->phys = (int32_t)addr;

	rc = ion_handle_get_flags(client, handle, &ionflag);
	if (rc) {
		MM_ERR("could not get flags for the handle\n");
		rc = -ENOMEM;
		goto output_buff_get_flags_error;
	}

	audio->data = ion_map_kernel(client, handle, ionflag);
	if (IS_ERR(audio->data)) {
		MM_ERR("could not map read buffers,freeing instance 0x%08x\n",
				(int)audio);
		rc = -ENOMEM;
		goto output_buff_map_error;
	}
	MM_DBG("read buf: phy addr 0x%08x kernel addr 0x%08x\n",
		audio->phys, (int)audio->data);

	file->private_data = audio;
	audio->opened = 1;
	rc = 0;
done:
	mutex_unlock(&audio->lock);
	getnstimeofday(&ts);
	rtc_time_to_tm(ts.tv_sec, &tm);
	pr_aud_info1("[ATS][start_recording][successful] at %lld \
		(%d-%02d-%02d %02d:%02d:%02d.%09lu UTC)\n",
		ktime_to_ns(ktime_get()),
		tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
		tm.tm_hour, tm.tm_min, tm.tm_sec, ts.tv_nsec);
	return rc;
output_buff_map_error:
output_buff_get_phys_error:
output_buff_get_flags_error:
	ion_free(client, audio->output_buff_handle);
output_buff_alloc_error:
	ion_client_destroy(client);
client_create_error:
	msm_adsp_put(audio->audrec);
	audpreproc_aenc_free(audio->enc_id);
	mutex_unlock(&audio->lock);
	return rc;
}
Exemple #14
0
void ddl_pmem_alloc(struct ddl_buf_addr *buff_addr, size_t sz, u32 align)
{
	u32 guard_bytes, align_mask;
	u32 physical_addr;
	u32 align_offset;
	u32 alloc_size, flags = 0;
	struct ddl_context *ddl_context;
	struct msm_mapped_buffer *mapped_buffer = NULL;
	unsigned long *kernel_vaddr = NULL;
	ion_phys_addr_t phyaddr = 0;
	size_t len = 0;
	int ret = -EINVAL;

	if (!buff_addr) {
		ERR("\n%s() Invalid Parameters\n", __func__);
		return;
	}
	if (align == DDL_LINEAR_BUFFER_ALIGN_BYTES) {
		guard_bytes = 31;
		align_mask = 0xFFFFFFE0U;
	} else {
		guard_bytes = DDL_TILE_BUF_ALIGN_GUARD_BYTES;
		align_mask = DDL_TILE_BUF_ALIGN_MASK;
	}
	ddl_context = ddl_get_context();
	alloc_size = sz + guard_bytes;
	if (res_trk_get_enable_ion()) {
		if (!ddl_context->video_ion_client)
			ddl_context->video_ion_client =
				res_trk_get_ion_client();
		if (!ddl_context->video_ion_client) {
			ERR("\n%s(): DDL ION Client Invalid handle\n",
				__func__);
			goto bailout;
		}
		buff_addr->mem_type = res_trk_get_mem_type();
		buff_addr->alloc_handle = ion_alloc(
					ddl_context->video_ion_client,
					alloc_size,
					SZ_4K,
					buff_addr->mem_type);
		if (!buff_addr->alloc_handle) {
			ERR("\n%s(): DDL ION alloc failed\n",
					__func__);
			goto bailout;
		}
		ret = ion_phys(ddl_context->video_ion_client,
					buff_addr->alloc_handle,
					&phyaddr,
					&len);
		if (ret || !phyaddr) {
			ERR("\n%s(): DDL ION client physical failed\n",
					__func__);
			goto free_ion_buffer;
		}
		buff_addr->physical_base_addr = (u32 *)phyaddr;
		kernel_vaddr = (unsigned long *) ion_map_kernel(
					ddl_context->video_ion_client,
					buff_addr->alloc_handle,
					UNCACHED);
		if (IS_ERR_OR_NULL(kernel_vaddr)) {
			ERR("\n%s(): DDL ION map failed\n", __func__);
			goto unmap_ion_buffer;
		}
		buff_addr->virtual_base_addr = (u32 *)kernel_vaddr;
		DBG("ddl_ion_alloc: handle(0x%x), mem_type(0x%x), "\
			"phys(0x%x), virt(0x%x), size(%u), align(%u), "\
			"alloced_len(%u)", (u32)buff_addr->alloc_handle,
			(u32)buff_addr->mem_type,
			(u32)buff_addr->physical_base_addr,
			(u32)buff_addr->virtual_base_addr,
			alloc_size, align, len);
	} else {
		physical_addr = (u32)
			allocate_contiguous_memory_nomap(alloc_size,
						ddl_context->memtype, SZ_4K);
		if (!physical_addr) {
			ERR("\n%s(): DDL pmem allocate failed\n",
			       __func__);
			goto bailout;
		}
		buff_addr->physical_base_addr = (u32 *) physical_addr;
		flags = MSM_SUBSYSTEM_MAP_KADDR;
		buff_addr->mapped_buffer =
		msm_subsystem_map_buffer((unsigned long)physical_addr,
		alloc_size, flags, NULL, 0);
		if (IS_ERR(buff_addr->mapped_buffer)) {
			ERR("\n%s() buffer map failed\n", __func__);
			goto free_pmem_buffer;
		}
		mapped_buffer = buff_addr->mapped_buffer;
		if (!mapped_buffer->vaddr) {
			ERR("\n%s() mapped virtual address is NULL\n",
				__func__);
			goto unmap_pmem_buffer;
		}
		buff_addr->virtual_base_addr = mapped_buffer->vaddr;
		DBG("ddl_pmem_alloc: mem_type(0x%x), phys(0x%x),"\
			" virt(0x%x), sz(%u), align(%u)",
			(u32)buff_addr->mem_type,
			(u32)buff_addr->physical_base_addr,
			(u32)buff_addr->virtual_base_addr,
			alloc_size, SZ_4K);
	}

	memset(buff_addr->virtual_base_addr, 0 , sz + guard_bytes);
	buff_addr->buffer_size = sz;
	buff_addr->align_physical_addr = (u32 *)
		(((u32)buff_addr->physical_base_addr + guard_bytes) &
		align_mask);
	align_offset = (u32) (buff_addr->align_physical_addr) -
		(u32)buff_addr->physical_base_addr;
	buff_addr->align_virtual_addr =
	    (u32 *) ((u32) (buff_addr->virtual_base_addr)
		     + align_offset);
	DBG("%s(): phys(0x%x) align_phys(0x%x), virt(0x%x),"\
		" align_virt(0x%x)", __func__,
		(u32)buff_addr->physical_base_addr,
		(u32)buff_addr->align_physical_addr,
		(u32)buff_addr->virtual_base_addr,
		(u32)buff_addr->align_virtual_addr);
	return;

unmap_pmem_buffer:
	if (buff_addr->mapped_buffer)
		msm_subsystem_unmap_buffer(buff_addr->mapped_buffer);
free_pmem_buffer:
	if (buff_addr->physical_base_addr)
		free_contiguous_memory_by_paddr((unsigned long)
			buff_addr->physical_base_addr);
	memset(buff_addr, 0, sizeof(struct ddl_buf_addr));
	return;

unmap_ion_buffer:
	if (ddl_context->video_ion_client) {
		if (buff_addr->alloc_handle)
			ion_unmap_kernel(ddl_context->video_ion_client,
				buff_addr->alloc_handle);
	}
free_ion_buffer:
	if (ddl_context->video_ion_client) {
		if (buff_addr->alloc_handle)
			ion_free(ddl_context->video_ion_client,
				buff_addr->alloc_handle);
	}
bailout:
	memset(buff_addr, 0, sizeof(struct ddl_buf_addr));
}
Exemple #15
0
static int audpcm_in_open(struct inode *inode, struct file *file)
{
	struct audio_in *audio = &the_audio_in;
	int rc;
	int len = 0;
	unsigned long ionflag = 0;
	ion_phys_addr_t addr = 0;
	struct ion_handle *handle = NULL;
	struct ion_client *client = NULL;

	int encid;
	mutex_lock(&audio->lock);
	if (audio->opened) {
		rc = -EBUSY;
		goto done;
	}

	/* Settings will be re-config at AUDIO_SET_CONFIG,
	 * but at least we need to have initial config
	 */
	audio->mode = MSM_AUD_ENC_MODE_TUNNEL;
	audio->samp_rate = RPC_AUD_DEF_SAMPLE_RATE_11025;
	audio->samp_rate_index = AUDREC_CMD_SAMP_RATE_INDX_11025;
	audio->channel_mode = AUDREC_CMD_STEREO_MODE_MONO;
	audio->buffer_size = MONO_DATA_SIZE;
	audio->enc_type = AUDREC_CMD_TYPE_0_INDEX_WAV | audio->mode;

	rc = audmgr_open(&audio->audmgr);
	if (rc)
		goto done;
	encid = audpreproc_aenc_alloc(audio->enc_type, &audio->module_name,
			&audio->queue_ids);
	if (encid < 0) {
		MM_ERR("No free encoder available\n");
		rc = -ENODEV;
		goto done;
	}
	audio->enc_id = encid;

	rc = msm_adsp_get(audio->module_name, &audio->audrec,
			   &audrec_adsp_ops, audio);
	if (rc) {
		audpreproc_aenc_free(audio->enc_id);
		goto done;
	}

	rc = msm_adsp_get("AUDPREPROCTASK", &audio->audpre,
				&audpre_adsp_ops, audio);
	if (rc) {
		msm_adsp_put(audio->audrec);
		audpreproc_aenc_free(audio->enc_id);
		goto done;
	}

	audio->dsp_cnt = 0;
	audio->stopped = 0;

	audpcm_in_flush(audio);

	client = msm_ion_client_create(UINT_MAX, "Audio_PCM_in_client");
	if (IS_ERR_OR_NULL(client)) {
		MM_ERR("Unable to create ION client\n");
		rc = -ENOMEM;
		goto client_create_error;
	}
	audio->client = client;

	MM_DBG("allocating mem sz = %d\n", DMASZ);
	handle = ion_alloc(client, DMASZ, SZ_4K,
		ION_HEAP(ION_AUDIO_HEAP_ID), 0);
	if (IS_ERR_OR_NULL(handle)) {
		MM_ERR("Unable to create allocate O/P buffers\n");
		rc = -ENOMEM;
		goto output_buff_alloc_error;
	}

	audio->output_buff_handle = handle;

	rc = ion_phys(client , handle, &addr, &len);
	if (rc) {
		MM_ERR("O/P buffers:Invalid phy: %x sz: %x\n",
			(unsigned int) addr, (unsigned int) len);
		rc = -ENOMEM;
		goto output_buff_get_phys_error;
	} else {
		MM_INFO("O/P buffers:valid phy: %x sz: %x\n",
			(unsigned int) addr, (unsigned int) len);
	}
	audio->phys = (int32_t)addr;

	rc = ion_handle_get_flags(client, handle, &ionflag);
	if (rc) {
		MM_ERR("could not get flags for the handle\n");
		rc = -ENOMEM;
		goto output_buff_get_flags_error;
	}

	audio->data = ion_map_kernel(client, handle);
	if (IS_ERR(audio->data)) {
		MM_ERR("could not map read buffers,freeing instance 0x%08x\n",
				(int)audio);
		rc = -ENOMEM;
		goto output_buff_map_error;
	}
	MM_DBG("read buf: phy addr 0x%08x kernel addr 0x%08x\n",
		audio->phys, (int)audio->data);

	file->private_data = audio;
	audio->opened = 1;
	rc = 0;
done:
	mutex_unlock(&audio->lock);
	return rc;
output_buff_map_error:
output_buff_get_phys_error:
output_buff_get_flags_error:
	ion_free(client, audio->output_buff_handle);
output_buff_alloc_error:
	ion_client_destroy(client);
client_create_error:
	msm_adsp_put(audio->audrec);
	msm_adsp_put(audio->audpre);
	audpreproc_aenc_free(audio->enc_id);
	mutex_unlock(&audio->lock);
	return rc;
}
void *ddl_pmem_alloc(struct ddl_buf_addr *addr, size_t sz, u32 alignment)
{
	u32 alloc_size, offset = 0 ;
	struct ddl_context *ddl_context;
	unsigned long iova = 0;
	unsigned long buffer_size = 0;
	unsigned long *kernel_vaddr = NULL;
	int ret = 0;
	ion_phys_addr_t phyaddr = 0;
	size_t len = 0;
	int rc = 0;
	DBG_PMEM("\n%s() IN: Requested alloc size(%u)", __func__, (u32)sz);
	if (!addr) {
		DDL_MSG_ERROR("\n%s() Invalid Parameters", __func__);
		goto bail_out;
	}
	ddl_context = ddl_get_context();
	res_trk_set_mem_type(addr->mem_type);
	alloc_size = (sz + alignment);
	if (res_trk_get_enable_ion()) {
		if (!ddl_context->video_ion_client)
			ddl_context->video_ion_client =
				res_trk_get_ion_client();
		if (!ddl_context->video_ion_client) {
			DDL_MSG_ERROR("%s() :DDL ION Client Invalid handle\n",
						 __func__);
			goto bail_out;
		}
		alloc_size = (alloc_size+4095) & ~4095;
		addr->alloc_handle = ion_alloc(
		ddl_context->video_ion_client, alloc_size, SZ_4K,
			res_trk_get_mem_type(), res_trk_get_ion_flags());
		if (IS_ERR_OR_NULL(addr->alloc_handle)) {
			DDL_MSG_ERROR("%s() :DDL ION alloc failed\n",
						 __func__);
			goto bail_out;
		}
		kernel_vaddr = (unsigned long *) ion_map_kernel(
					ddl_context->video_ion_client,
					addr->alloc_handle);
		if (IS_ERR_OR_NULL(kernel_vaddr)) {
				DDL_MSG_ERROR("%s() :DDL ION map failed\n",
							 __func__);
				goto free_ion_alloc;
		}
		addr->virtual_base_addr = (u8 *) kernel_vaddr;
		if (res_trk_check_for_sec_session()) {
			rc = ion_phys(ddl_context->video_ion_client,
				addr->alloc_handle, &phyaddr,
					&len);
			if (rc || !phyaddr) {
				DDL_MSG_ERROR(
				"%s():DDL ION client physical failed\n",
				__func__);
				goto unmap_ion_alloc;
			}
			addr->alloced_phys_addr = phyaddr;
		} else {
			ret = ion_map_iommu(ddl_context->video_ion_client,
					addr->alloc_handle,
					VIDEO_DOMAIN,
					VIDEO_MAIN_POOL,
					SZ_4K,
					0,
					&iova,
					&buffer_size,
					0, 0);
			if (ret || !iova) {
				DDL_MSG_ERROR(
				"%s():DDL ION ion map iommu failed, ret = %d iova = 0x%lx\n",
					__func__, ret, iova);
				goto unmap_ion_alloc;
			}
			addr->alloced_phys_addr = (phys_addr_t) iova;
		}
		if (!addr->alloced_phys_addr) {
			DDL_MSG_ERROR("%s():DDL ION client physical failed\n",
						__func__);
			goto unmap_ion_alloc;
		}
		addr->mapped_buffer = NULL;
		addr->physical_base_addr = (u8 *) addr->alloced_phys_addr;
		addr->align_physical_addr = (u8 *) DDL_ALIGN((u32)
			addr->physical_base_addr, alignment);
		offset = (u32)(addr->align_physical_addr -
				addr->physical_base_addr);
		addr->align_virtual_addr = addr->virtual_base_addr + offset;
		addr->buffer_size = alloc_size;
	} else {
		pr_err("ION must be enabled.");
		goto bail_out;
	}
	return addr->virtual_base_addr;
unmap_ion_alloc:
	ion_unmap_kernel(ddl_context->video_ion_client,
		addr->alloc_handle);
	addr->virtual_base_addr = NULL;
	addr->alloced_phys_addr = (phys_addr_t)NULL;
free_ion_alloc:
	ion_free(ddl_context->video_ion_client,
		addr->alloc_handle);
	addr->alloc_handle = NULL;
bail_out:
	return NULL;
}
static long ion_test_ioctl(struct file *file, unsigned cmd, unsigned long arg)
{
	int ret;
	ion_phys_addr_t phys_addr;
	void *addr;
	size_t len;
	unsigned long flags, size;
	struct msm_ion_test *ion_test = file->private_data;
	struct ion_test_data *test_data = &ion_test->test_data;

	switch (cmd) {
	case IOC_ION_KCLIENT_CREATE:
	{
		ret = create_ion_client(ion_test);
		break;
	}
	case IOC_ION_KCLIENT_DESTROY:
	{
		free_ion_client(ion_test);
		ret = 0;
		break;
	}
	case IOC_ION_KALLOC:
	{
		if (copy_from_user(test_data, (void __user *)arg,
						sizeof(struct ion_test_data)))
			return -EFAULT;
		ret = alloc_ion_buf(ion_test, test_data);
		if (ret)
			pr_info("allocating ion buffer failed\n");
		break;
	}
	case IOC_ION_KFREE:
	{
		free_ion_buf(ion_test);
		ret = 0;
		break;
	}
	case IOC_ION_KPHYS:
	{
		ret = ion_phys(ion_test->ion_client, ion_test->ion_handle,
							&phys_addr, &len);
		if (!ret)
			pr_info("size is 0x%x\n phys addr 0x%x", len,
						(unsigned int)phys_addr);
		break;
	}
	case IOC_ION_KMAP:
	{
		addr = ion_map_kernel(ion_test->ion_client,
					ion_test->ion_handle);
		if (IS_ERR_OR_NULL(addr)) {
			ret = -EIO;
			pr_info("mapping kernel buffer failed\n");
		} else {
			ret = 0;
			test_data->vaddr = (unsigned long)addr;
		}
		break;
	}
	case IOC_ION_KUMAP:
	{
		ion_unmap_kernel(ion_test->ion_client, ion_test->ion_handle);
		ret = 0;
		break;
	}
	case IOC_ION_UIMPORT:
	{
		if (copy_from_user(test_data, (void __user *)arg,
						sizeof(struct ion_test_data)))
			return -EFAULT;
		ion_test->ion_handle = ion_import_dma_buf(ion_test->ion_client,
							test_data->shared_fd);
		if (IS_ERR_OR_NULL(ion_test->ion_handle)) {
			ret = -EIO;
			pr_info("import of user buf failed\n");
		} else
			ret = 0;
		break;
	}
	case IOC_ION_UBUF_FLAGS:
	{
		ret = ion_handle_get_flags(ion_test->ion_client,
						ion_test->ion_handle, &flags);
		if (ret)
			pr_info("user flags cannot be retrieved\n");
		else
			if (copy_to_user((void __user *)arg, &flags,
						sizeof(unsigned long)))
				ret = -EFAULT;
		break;
	}
	case IOC_ION_UBUF_SIZE:
	{
		ret = ion_handle_get_size(ion_test->ion_client,
						ion_test->ion_handle, &size);
		if (ret)
			pr_info("buffer size cannot be retrieved\n");
		else
			if (copy_to_user((void __user *)arg, &size,
							sizeof(unsigned long)))
				ret = -EFAULT;
		break;
	}
	case IOC_ION_WRITE_VERIFY:
	{
		write_pattern(test_data->vaddr, test_data->size);
		if (verify_pattern(test_data->vaddr, test_data->size)) {
			pr_info("verify of mapped buf failed\n");
			ret = -EIO;
		} else
			ret = 0;
		break;
	}
	case IOC_ION_VERIFY:
	{
		if (verify_pattern(test_data->vaddr, test_data->size)) {
			pr_info("fail in verifying imported buffer\n");
			ret = -EIO;
		} else
			ret = 0;
		break;
	}
	case IOC_ION_SEC:
	{
		ret = msm_ion_secure_heap(ION_CP_MM_HEAP_ID);
		if (ret)
			pr_info("unable to secure heap\n");
		else
			pr_info("able to secure heap\n");
		break;
	}
	case IOC_ION_UNSEC:
	{
		ret = msm_ion_unsecure_heap(ION_CP_MM_HEAP_ID);
		if (ret)
			pr_info("unable to unsecure heap\n");
		else
			pr_info("able to unsecure heap\n");
		break;
	}
	default:
	{
		pr_info("command not supproted\n");
		ret = -EINVAL;
	}
	};
	return ret;
}
Exemple #18
0
PVRSRV_ERROR
PhysmemImportIon(CONNECTION_DATA *psConnection,
				 IMG_INT fd,
				 PVRSRV_MEMALLOCFLAGS_T uiFlags,
				 PMR **ppsPMRPtr,
				 IMG_DEVMEM_SIZE_T *puiSize,
				 IMG_DEVMEM_ALIGN_T *puiAlign)
{
	PMR_ION_DATA *psPrivData = IMG_NULL;
	IMG_HANDLE hPDumpAllocInfo = IMG_NULL;
	ENV_CONNECTION_DATA *psEnvConnectionData;
	PMR *psPMR = IMG_NULL;
	PMR_FLAGS_T uiPMRFlags;
    IMG_BOOL bZero;
    IMG_BOOL bPoisonOnAlloc;
    IMG_BOOL bPoisonOnFree;
	IMG_BOOL bMappingTable = IMG_TRUE;
	PVRSRV_ERROR eError;

    if (uiFlags & PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC)
    {
        bZero = IMG_TRUE;
    }
    else
    {
        bZero = IMG_FALSE;
    }

    if (uiFlags & PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC)
    {
        bPoisonOnAlloc = IMG_TRUE;
    }
    else
    {
        bPoisonOnAlloc = IMG_FALSE;
    }

    if (uiFlags & PVRSRV_MEMALLOCFLAG_POISON_ON_FREE)
    {
        bPoisonOnFree = IMG_TRUE;
    }
    else
    {
        bPoisonOnFree = IMG_FALSE;
    }

    if ((uiFlags & PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) &&
        (uiFlags & PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC))
    {
        /* Zero on Alloc and Poison on Alloc are mutually exclusive */
        eError = PVRSRV_ERROR_INVALID_PARAMS;
        goto fail_params;
    }

	psPrivData = kmalloc(sizeof(*psPrivData), GFP_KERNEL);
	if (psPrivData == NULL)
	{
		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
		goto fail_privalloc;
	}

	/*
		Get the physical heap for this PMR
		
		Note:
		While we have no way to determine the type of the buffer
		we just assume that all Ion buffers are from the same
		physical heap.
	*/
	eError = PhysHeapAcquire(IonPhysHeapID(), &psPrivData->psPhysHeap);
	if (eError != PVRSRV_OK)
	{
		goto fail_physheap;
	}

	/* Get the ion client from this connection */
	psEnvConnectionData = PVRSRVConnectionPrivateData(psConnection);
	psPrivData->psIonClient = EnvDataIonClientGet(psEnvConnectionData);

	/* Get the buffer handle */
	psPrivData->psIonHandle = ion_import_fd(psPrivData->psIonClient, fd);
	if (psPrivData->psIonHandle == IMG_NULL)
	{
		/* FIXME: add ion specific error? */
		eError = PVRSRV_ERROR_BAD_MAPPING;
		goto fail_ionimport;
	}

	/*
		Note:

		We could defer the import until lock address time but we
		do it here as then we can detect any errors at import time.
		Also we need to know the ion buffer size here and there seems
		to be no other way to find that other then map the buffer for dma.
	*/
	eError = IonPhysAddrAcquire(psPrivData,
							   fd);
	if (eError != PVRSRV_OK)
	{
		goto fail_acquire;
	}

	if (bZero || bPoisonOnAlloc)
	{
		IMG_PVOID pvKernAddr;

		pvKernAddr = ion_map_kernel(psPrivData->psIonClient, psPrivData->psIonHandle);

		if (IS_ERR(pvKernAddr))
		{
			eError = PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING;
			goto fail_kernelmap;
		}

		if (bZero)
		{
			memset(pvKernAddr, 0, psPrivData->uiSize);
		}
		else
		{
			_Poison(pvKernAddr,
					psPrivData->uiSize,
					_AllocPoison,
					_AllocPoisonSize);
		}

		ion_unmap_kernel(psPrivData->psIonClient, psPrivData->psIonHandle);
	}

	psPrivData->bPoisonOnFree = bPoisonOnFree;

	uiPMRFlags = (PMR_FLAGS_T)(uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK);
	/* check no significant bits were lost in cast due to different
	   bit widths for flags */
	PVR_ASSERT(uiPMRFlags == (uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK));

	eError = PMRCreatePMR(psPrivData->psPhysHeap,
						  psPrivData->uiSize,
                          psPrivData->uiSize,
                          1,
                          1,
                          &bMappingTable,
						  PAGE_SHIFT,
						  uiPMRFlags,
						  "PMRION",
						  &_sPMRIonFuncTab,
						  psPrivData,
						  &psPMR);
	if (eError != PVRSRV_OK)
	{
		goto fail_pmrcreate;
	}

	_PDumpPMRMalloc(psPMR,
					psPrivData->uiSize,
					PAGE_SIZE,
					&hPDumpAllocInfo);
	psPrivData->hPDumpAllocInfo = hPDumpAllocInfo;
	psPrivData->bPDumpMalloced = IMG_TRUE;

	*ppsPMRPtr = psPMR;
	*puiSize = psPrivData->uiSize;
	*puiAlign = PAGE_SIZE;
	return PVRSRV_OK;

fail_pmrcreate:
fail_kernelmap:
	IonPhysAddrRelease(psPrivData);
fail_acquire:
	ion_free(psPrivData->psIonClient, psPrivData->psIonHandle);
fail_ionimport:
	PhysHeapRelease(psPrivData->psPhysHeap);
fail_physheap:
	kfree(psPrivData);

fail_privalloc:
fail_params:
	PVR_ASSERT(eError != PVRSRV_OK);
	return eError;
}
/*!
******************************************************************************

 @Function              ImportPages

******************************************************************************/
static IMG_RESULT ImportPages(
    SYSMEM_Heap		*heap,
    SYSDEVU_sInfo	*sysdev,
    IMG_UINT32		ui32Size,
    SYSMEMU_sPages *psPages,
    SYS_eMemAttrib	eMemAttrib,
    IMG_INT32		buff_fd,
    IMG_UINT64		*pPhyAddrs,
    IMG_VOID		*priv,
    IMG_BOOL		kernelMapped
)
{
	size_t numPages = (ui32Size + HOST_MMU_PAGE_SIZE - 1)/HOST_MMU_PAGE_SIZE;
	struct ion_handle *ionHandle;
	IMG_RESULT result = IMG_ERROR_FATAL;
	unsigned pg_i = 0;
	struct ion_client *pIONcl;

	DEBUG_REPORT(REPORT_MODULE_SYSMEM, "Importing buff_fd %d of size %u", buff_fd, ui32Size);

	pIONcl = get_ion_client();
	if (!pIONcl)
		goto exitFailGetClient;

	ionHandle = ion_import_dma_buf(pIONcl, buff_fd);
	if (IS_ERR(ionHandle)) {
		REPORT(REPORT_MODULE_SYSMEM, REPORT_ERR, "Error obtaining handle from fd %d", buff_fd);
		result = IMG_ERROR_FATAL;
		goto exitFailImportFD;
	}

	psPages->pvImplData = ionHandle;

#if defined(ION_SYSTEM_HEAP)
	{
		struct scatterlist *psScattLs, *psScattLsAux;
		struct sg_table *psSgTable;

		psSgTable = ion_sg_table(pIONcl, ionHandle);

		if (psSgTable == NULL)
		{
			REPORT(REPORT_MODULE_SYSMEM, REPORT_ERR, "Error obtaining sg table");
			result = IMG_ERROR_FATAL;
			goto exitFailMap;
		}
		psScattLs = psSgTable->sgl;

		if (psScattLs == NULL)
		{
			REPORT(REPORT_MODULE_SYSMEM, REPORT_ERR, "Error obtaining scatter list");
			result = IMG_ERROR_FATAL;
			goto exitFailMap;
		}

		// Get physical addresses from scatter list
		for (psScattLsAux = psScattLs; psScattLsAux; psScattLsAux = sg_next(psScattLsAux))
		{
			int offset;
			dma_addr_t chunkBase = sg_phys(psScattLsAux);

			for (offset = 0; offset < psScattLsAux->length; offset += PAGE_SIZE, ++pg_i)
			{
				if (pg_i >= numPages)
					break;

				pPhyAddrs[pg_i] = chunkBase + offset;
			}

			if (pg_i >= numPages)
				break;
		}

		if (kernelMapped)
			psPages->pvCpuKmAddr = ion_map_kernel(pIONcl, ionHandle);
	}
#else
	{
		int offset;
		ion_phys_addr_t physaddr;
		size_t len = 0;

		result = ion_phys(pIONcl, ionHandle, &physaddr, &len);

		if(result)
		{
			IMG_ASSERT(!"ion_phys failed");
			result = IMG_ERROR_FATAL;
			goto exitFailMap;
		}

		for (offset = 0; pg_i < numPages; offset += PAGE_SIZE, ++pg_i)
		{
			if (pg_i >= numPages)
				break;
			pPhyAddrs[pg_i] = physaddr + offset;
		}

		if (kernelMapped)
			psPages->pvCpuKmAddr = SYSMEMU_CpuPAddrToCpuKmAddr(heap->memId, physaddr);
	}
#endif

	{
		size_t  physAddrArrSize = numPages * sizeof(psPages->ppaPhysAddr[0]);
		size_t  phy_i;

		psPages->ppaPhysAddr = IMG_BIGORSMALL_ALLOC(physAddrArrSize);
		IMG_ASSERT(psPages->ppaPhysAddr != IMG_NULL);
		if (psPages->ppaPhysAddr == IMG_NULL)
		{
			return IMG_ERROR_OUT_OF_MEMORY;
		}

		for (phy_i = 0; phy_i < numPages; ++phy_i)
			psPages->ppaPhysAddr[phy_i] = pPhyAddrs[phy_i];
	}

	if (kernelMapped && psPages->pvCpuKmAddr == NULL) {
		REPORT(REPORT_MODULE_SYSMEM, REPORT_ERR, "Error mapping to kernel address");
		result = IMG_ERROR_FATAL;
		goto exitFailMapKernel;
	}

	result = IMG_SUCCESS;

exitFailMapKernel:
exitFailMap:
exitFailImportFD:
exitFailGetClient:
	return result;
}
Exemple #20
0
static int msm_pmem_table_add(struct hlist_head *ptype,
	struct msm_pmem_info *info, struct ion_client *client)
{
	unsigned long paddr;
#ifndef CONFIG_MSM_MULTIMEDIA_USE_ION
	unsigned long kvstart;
	struct file *file;
#endif
	int rc = -ENOMEM;

	unsigned long len;
	struct msm_pmem_region *region;
	unsigned long ionflag;
	void *vaddr;

	region = kmalloc(sizeof(struct msm_pmem_region), GFP_KERNEL);
	if (!region)
		goto out;
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
	region->handle = ion_import_dma_buf(client, info->fd);
	if (IS_ERR_OR_NULL(region->handle))
		goto out1;
	if (ion_map_iommu(client, region->handle, CAMERA_DOMAIN, GEN_POOL,
				  SZ_4K, 0, &paddr, &len, 0 /* UNCACHED */, 0) < 0)
		goto out2;

	rc = ion_handle_get_flags(client, region->handle, &ionflag);
	if (rc) {
		pr_err("%s: could not get flags for the handle\n", __func__);
		return 0;
	}
	D("ionflag=%ld\n", ionflag);
	vaddr = ion_map_kernel(client, region->handle);
	if (IS_ERR_OR_NULL(vaddr)) {
		pr_err("%s: could not get virtual address\n", __func__);
		return 0;
	}
	region->vaddr = (unsigned long) vaddr;

#elif CONFIG_ANDROID_PMEM
	rc = get_pmem_file(info->fd, &paddr, &kvstart, &len, &file);
	if (rc < 0) {
		pr_err("%s: get_pmem_file fd %d error %d\n",
				__func__, info->fd, rc);
		goto out1;
	}
	region->file = file;
#else
	paddr = 0;
	file = NULL;
	kvstart = 0;
#endif
	if (!info->len)
		info->len = len;
	rc = check_pmem_info(info, len);
	if (rc < 0)
		goto out3;
	paddr += info->offset;
	len = info->len;

	if (check_overlap(ptype, paddr, len) < 0) {
		rc = -EINVAL;
		goto out3;
	}

	CDBG("%s: type %d, active flag %d, paddr 0x%lx, vaddr 0x%lx\n",
		__func__, info->type, info->active, paddr,
		(unsigned long)info->vaddr);

	INIT_HLIST_NODE(&region->list);
	region->paddr = paddr;
	region->len = len;
	memcpy(&region->info, info, sizeof(region->info));
	D("%s Adding region to list with type %d\n", __func__,
						region->info.type);
	D("%s pmem_stats address is 0x%p\n", __func__, ptype);
	hlist_add_head(&(region->list), ptype);

	return 0;
out3:
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
	ion_unmap_iommu(client, region->handle, CAMERA_DOMAIN, GEN_POOL);
#endif
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
out2:
	ion_free(client, region->handle);
#elif CONFIG_ANDROID_PMEM
	put_pmem_file(region->file);
#endif
out1:
	kfree(region);
out:
	return rc;
}
Exemple #21
0
PVRSRV_ERROR IonImportBufferAndAquirePhysAddr(IMG_HANDLE hIonDev,
											  IMG_HANDLE hIonFD,
											  IMG_UINT32 *pui32PageCount,
											  IMG_SYS_PHYADDR **ppasSysPhysAddr,
											  IMG_PVOID *ppvKernAddr,
											  IMG_HANDLE *phPriv)
{
	struct ion_client *psIonClient = hIonDev;
	struct ion_handle *psIonHandle;
	struct scatterlist *psScatterList;
	struct scatterlist *psTemp;
	IMG_SYS_PHYADDR *pasSysPhysAddr = NULL;
	ION_IMPORT_DATA *psImportData;
	PVRSRV_ERROR eError;
	IMG_UINT32 ui32PageCount = 0;
	IMG_UINT32 i;
	IMG_PVOID pvKernAddr;
	int fd = (int) hIonFD;

	psImportData = kmalloc(sizeof(ION_IMPORT_DATA), GFP_KERNEL);
	if (psImportData == NULL)
	{
		return PVRSRV_ERROR_OUT_OF_MEMORY;
	}

	/* Get the buffer handle */
	psIonHandle = ion_import_fd(psIonClient, fd);
	if (psIonHandle == IMG_NULL)
	{
		eError = PVRSRV_ERROR_BAD_MAPPING;
		goto exitFailImport;
	}

	/* Create data for free callback */
	psImportData->psIonClient = psIonClient;
	psImportData->psIonHandle = psIonHandle;	

	psScatterList = ion_map_dma(psIonClient, psIonHandle);
	if (psScatterList == NULL)
	{
		eError = PVRSRV_ERROR_INVALID_PARAMS;
		goto exitFailMap;
	}

	/*
		We do a two pass process, 1st workout how many pages there
		are, 2nd fill in the data.
	*/
	for (i=0;i<2;i++)
	{
		psTemp = psScatterList;
		if (i == 1)
		{
			pasSysPhysAddr = kmalloc(sizeof(IMG_SYS_PHYADDR) * ui32PageCount, GFP_KERNEL);
			if (pasSysPhysAddr == NULL)
			{
				eError = PVRSRV_ERROR_OUT_OF_MEMORY;
				goto exitFailAlloc;
			}
			ui32PageCount = 0;	/* Reset the page count a we use if for the index */
		}

		while(psTemp)
		{
			IMG_UINT32 j;

			for (j=0;j<psTemp->length;j+=PAGE_SIZE)
			{
				if (i == 1)
				{
					/* Pass 2: Get the page data */
					pasSysPhysAddr[ui32PageCount].uiAddr = sg_phys(psTemp);
				}
				ui32PageCount++;
			}
			psTemp = sg_next(psTemp);
		}
	}

	pvKernAddr = ion_map_kernel(psIonClient, psIonHandle);
	if (IS_ERR(pvKernAddr))
	{
		pvKernAddr = IMG_NULL;
	}

	psImportData->pvKernAddr = pvKernAddr;

	*ppvKernAddr = pvKernAddr;
	*pui32PageCount = ui32PageCount;
	*ppasSysPhysAddr = pasSysPhysAddr;
	*phPriv = psImportData;
	return PVRSRV_OK;

exitFailAlloc:
	ion_unmap_dma(psIonClient, psIonHandle);
exitFailMap:
	ion_free(psIonClient, psIonHandle);
exitFailImport:
	kfree(psImportData);
	return eError;
}
static int alloc_ion_mem(struct smem_client *client, size_t size, u32 align,
	u32 flags, enum hal_buffer buffer_type, struct msm_smem *mem,
	int map_kernel)
{
	struct ion_handle *hndl;
	unsigned long iova = 0;
	unsigned long buffer_size = 0;
	unsigned long heap_mask = 0;
	int rc = 0;

	align = ALIGN(align, SZ_4K);
	size = ALIGN(size, SZ_4K);

	if (flags & SMEM_SECURE) {
		size = ALIGN(size, SZ_1M);
		align = ALIGN(align, SZ_1M);
	}

	if (is_iommu_present(client->res)) {
		heap_mask = ION_HEAP(ION_IOMMU_HEAP_ID);
	} else {
		dprintk(VIDC_DBG,
			"allocate shared memory from adsp heap size %d align %d\n",
			size, align);
		heap_mask = ION_HEAP(ION_ADSP_HEAP_ID);
	}

	if (flags & SMEM_SECURE)
		heap_mask = ION_HEAP(ION_CP_MM_HEAP_ID);

	hndl = ion_alloc(client->clnt, size, align, heap_mask, flags);
	if (IS_ERR_OR_NULL(hndl)) {
		dprintk(VIDC_ERR,
		"Failed to allocate shared memory = %p, %d, %d, 0x%x\n",
		client, size, align, flags);
		rc = -ENOMEM;
		goto fail_shared_mem_alloc;
	}
	mem->mem_type = client->mem_type;
	mem->smem_priv = hndl;
	mem->flags = flags;
	mem->buffer_type = buffer_type;
	if (map_kernel) {
		mem->kvaddr = ion_map_kernel(client->clnt, hndl);
		if (!mem->kvaddr) {
			dprintk(VIDC_ERR,
				"Failed to map shared mem in kernel\n");
			rc = -EIO;
			goto fail_map;
		}
	} else
		mem->kvaddr = NULL;

	rc = get_device_address(client, hndl, align, &iova, &buffer_size,
				flags, buffer_type);
	if (rc) {
		dprintk(VIDC_ERR, "Failed to get device address: %d\n",
			rc);
		goto fail_device_address;
	}
	mem->device_addr = iova;
	dprintk(VIDC_DBG,
		"device_address = 0x%lx, kvaddr = 0x%p, size = %d\n",
		mem->device_addr, mem->kvaddr, size);
	mem->size = size;
	return rc;
fail_device_address:
	ion_unmap_kernel(client->clnt, hndl);
fail_map:
	ion_free(client->clnt, hndl);
fail_shared_mem_alloc:
	return rc;
}
Exemple #23
0
void *ddl_pmem_alloc(struct ddl_buf_addr *addr, size_t sz, u32 alignment)
{
	u32 alloc_size, offset = 0 ;
	u32 index = 0;
	struct ddl_context *ddl_context;
	struct msm_mapped_buffer *mapped_buffer = NULL;
	unsigned long iova = 0;
	unsigned long buffer_size = 0;
	unsigned long *kernel_vaddr = NULL;
	unsigned long ionflag = 0;
	unsigned long flags = 0;
	int ret = 0;
	ion_phys_addr_t phyaddr = 0;
	size_t len = 0;
	int rc = 0;
	DBG_PMEM("\n%s() IN: Requested alloc size(%u)", __func__, (u32)sz);
	if (!addr) {
		DDL_MSG_ERROR("\n%s() Invalid Parameters", __func__);
		goto bail_out;
	}
	ddl_context = ddl_get_context();
	res_trk_set_mem_type(addr->mem_type);
	alloc_size = (sz + alignment);
	if (res_trk_get_enable_ion()) {
		if (!ddl_context->video_ion_client)
			ddl_context->video_ion_client =
				res_trk_get_ion_client();
		if (!ddl_context->video_ion_client) {
			DDL_MSG_ERROR("%s() :DDL ION Client Invalid handle\n",
						 __func__);
			goto bail_out;
		}
		alloc_size = (alloc_size+4095) & ~4095;
		addr->alloc_handle = ion_alloc(
		ddl_context->video_ion_client, alloc_size, SZ_4K,
			res_trk_get_mem_type());
		if (IS_ERR_OR_NULL(addr->alloc_handle)) {
			DDL_MSG_ERROR("%s() :DDL ION alloc failed\n",
						 __func__);
			goto bail_out;
		}
		if (res_trk_check_for_sec_session() ||
			addr->mem_type == DDL_FW_MEM)
			ionflag = UNCACHED;
		else
			ionflag = CACHED;
		kernel_vaddr = (unsigned long *) ion_map_kernel(
					ddl_context->video_ion_client,
					addr->alloc_handle, ionflag);
		if (IS_ERR_OR_NULL(kernel_vaddr)) {
				DDL_MSG_ERROR("%s() :DDL ION map failed\n",
							 __func__);
				goto free_ion_alloc;
		}
		addr->virtual_base_addr = (u8 *) kernel_vaddr;
		if (res_trk_check_for_sec_session()) {
			rc = ion_phys(ddl_context->video_ion_client,
				addr->alloc_handle, &phyaddr,
					&len);
			if (rc || !phyaddr) {
				DDL_MSG_ERROR(
				"%s():DDL ION client physical failed\n",
				__func__);
				goto unmap_ion_alloc;
			}
			addr->alloced_phys_addr = phyaddr;
		} else {
			ret = ion_map_iommu(ddl_context->video_ion_client,
					addr->alloc_handle,
					VIDEO_DOMAIN,
					VIDEO_MAIN_POOL,
					SZ_4K,
					0,
					&iova,
					&buffer_size,
					UNCACHED, 0);
			if (ret || !iova) {
				DDL_MSG_ERROR(
				"%s():DDL ION ion map iommu failed, ret = %d iova = 0x%lx\n",
					__func__, ret, iova);
				goto unmap_ion_alloc;
			}
			addr->alloced_phys_addr = (phys_addr_t) iova;
		}
		if (!addr->alloced_phys_addr) {
			DDL_MSG_ERROR("%s():DDL ION client physical failed\n",
						__func__);
			goto unmap_ion_alloc;
		}
		addr->mapped_buffer = NULL;
		addr->physical_base_addr = (u8 *) addr->alloced_phys_addr;
		addr->align_physical_addr = (u8 *) DDL_ALIGN((u32)
			addr->physical_base_addr, alignment);
		offset = (u32)(addr->align_physical_addr -
				addr->physical_base_addr);
		addr->align_virtual_addr = addr->virtual_base_addr + offset;
		addr->buffer_size = alloc_size;
	} else {
		addr->alloced_phys_addr = (phys_addr_t)
		allocate_contiguous_memory_nomap(alloc_size,
			res_trk_get_mem_type(), SZ_4K);
		if (!addr->alloced_phys_addr) {
			DDL_MSG_ERROR("%s() : acm alloc failed (%d)\n",
					 __func__, alloc_size);
			goto bail_out;
		}
		flags = MSM_SUBSYSTEM_MAP_IOVA | MSM_SUBSYSTEM_MAP_KADDR;
		if (alignment == DDL_KILO_BYTE(128))
				index = 1;
		else if (alignment > SZ_4K)
			flags |= MSM_SUBSYSTEM_ALIGN_IOVA_8K;

		addr->mapped_buffer =
		msm_subsystem_map_buffer((unsigned long)addr->alloced_phys_addr,
			alloc_size, flags, &vidc_mmu_subsystem[index],
			sizeof(vidc_mmu_subsystem[index])/sizeof(unsigned int));
		if (IS_ERR(addr->mapped_buffer)) {
			pr_err(" %s() buffer map failed", __func__);
			goto free_acm_alloc;
		}
		mapped_buffer = addr->mapped_buffer;
		if (!mapped_buffer->vaddr || !mapped_buffer->iova[0]) {
			pr_err("%s() map buffers failed\n", __func__);
			goto free_map_buffers;
		}
		addr->physical_base_addr = (u8 *)mapped_buffer->iova[0];
		addr->virtual_base_addr = mapped_buffer->vaddr;
		addr->align_physical_addr = (u8 *) DDL_ALIGN((u32)
			addr->physical_base_addr, alignment);
		offset = (u32)(addr->align_physical_addr -
				addr->physical_base_addr);
		addr->align_virtual_addr = addr->virtual_base_addr + offset;
		addr->buffer_size = sz;
	}
	return addr->virtual_base_addr;
free_map_buffers:
	msm_subsystem_unmap_buffer(addr->mapped_buffer);
	addr->mapped_buffer = NULL;
free_acm_alloc:
		free_contiguous_memory_by_paddr(
			(unsigned long)addr->alloced_phys_addr);
		addr->alloced_phys_addr = (phys_addr_t)NULL;
		return NULL;
unmap_ion_alloc:
	ion_unmap_kernel(ddl_context->video_ion_client,
		addr->alloc_handle);
	addr->virtual_base_addr = NULL;
	addr->alloced_phys_addr = (phys_addr_t)NULL;
free_ion_alloc:
	ion_free(ddl_context->video_ion_client,
		addr->alloc_handle);
	addr->alloc_handle = NULL;
bail_out:
	return NULL;
}
static int smcmod_send_buf_cmd(struct smcmod_buf_req *reqp)
{
	int ret = 0;
	struct ion_client *ion_clientp = NULL;
	struct ion_handle *ion_cmd_handlep = NULL;
	struct ion_handle *ion_resp_handlep = NULL;
	void *cmd_vaddrp = NULL;
	void *resp_vaddrp = NULL;
	unsigned long cmd_buf_size = 0;
	unsigned long resp_buf_size = 0;

	/* sanity check the argument */
	if (IS_ERR_OR_NULL(reqp))
		return -EINVAL;

	/* sanity check the fds */
	if (reqp->ion_cmd_fd < 0)
		return -EINVAL;

	/* create an ion client */
	ion_clientp = msm_ion_client_create(UINT_MAX, "smcmod");

	/* check for errors */
	if (IS_ERR_OR_NULL(ion_clientp))
		return -EINVAL;

	/* import the command buffer fd */
	ion_cmd_handlep = ion_import_dma_buf(ion_clientp, reqp->ion_cmd_fd);

	/* sanity check the handle */
	if (IS_ERR_OR_NULL(ion_cmd_handlep)) {
		ret = -EINVAL;
		goto buf_cleanup;
	}

	/* retrieve the size of the buffer */
	if (ion_handle_get_size(ion_clientp, ion_cmd_handlep,
		&cmd_buf_size) < 0) {
		ret = -EINVAL;
		goto buf_cleanup;
	}

	/* ensure that the command buffer size is not
	 * greater than the size of the buffer.
	 */
	if (reqp->cmd_len > cmd_buf_size) {
		ret = -EINVAL;
		goto buf_cleanup;
	}

	/* map the area to get a virtual address */
	cmd_vaddrp = ion_map_kernel(ion_clientp, ion_cmd_handlep);

	/* sanity check the address */
	if (IS_ERR_OR_NULL(cmd_vaddrp)) {
		ret = -EINVAL;
		goto buf_cleanup;
	}

	/* check if there is a response buffer */
	if (reqp->ion_resp_fd >= 0) {
		/* import the handle */
		ion_resp_handlep =
			ion_import_dma_buf(ion_clientp, reqp->ion_resp_fd);

		/* sanity check the handle */
		if (IS_ERR_OR_NULL(ion_resp_handlep)) {
			ret = -EINVAL;
			goto buf_cleanup;
		}

		/* retrieve the size of the buffer */
		if (ion_handle_get_size(ion_clientp, ion_resp_handlep,
			&resp_buf_size) < 0) {
			ret = -EINVAL;
			goto buf_cleanup;
		}

		/* ensure that the command buffer size is not
		 * greater than the size of the buffer.
		 */
		if (reqp->resp_len > resp_buf_size) {
			ret = -EINVAL;
			goto buf_cleanup;
		}

		/* map the area to get a virtual address */
		resp_vaddrp = ion_map_kernel(ion_clientp, ion_resp_handlep);

		/* sanity check the address */
		if (IS_ERR_OR_NULL(resp_vaddrp)) {
			ret = -EINVAL;
			goto buf_cleanup;
		}
	}

	/* No need to flush the cache lines for the command buffer here,
	 * because the buffer will be flushed by scm_call.
	 */

	/* call scm function to switch to secure world */
	reqp->return_val = scm_call(reqp->service_id, reqp->command_id,
		cmd_vaddrp, reqp->cmd_len, resp_vaddrp, reqp->resp_len);

	/* The cache lines for the response buffer have already been
	 * invalidated by scm_call before returning.
	 */

buf_cleanup:
	/* if the client and handle(s) are valid, free them */
	if (!IS_ERR_OR_NULL(ion_clientp)) {
		if (!IS_ERR_OR_NULL(ion_cmd_handlep)) {
			if (!IS_ERR_OR_NULL(cmd_vaddrp))
				ion_unmap_kernel(ion_clientp, ion_cmd_handlep);
			ion_free(ion_clientp, ion_cmd_handlep);
		}

		if (!IS_ERR_OR_NULL(ion_resp_handlep)) {
			if (!IS_ERR_OR_NULL(resp_vaddrp))
				ion_unmap_kernel(ion_clientp, ion_resp_handlep);
			ion_free(ion_clientp, ion_resp_handlep);
		}

		ion_client_destroy(ion_clientp);
	}

	return ret;
}
static void *res_trk_pmem_map
	(struct ddl_buf_addr *addr, size_t sz, u32 alignment)
{
	u32 offset = 0, flags = 0;
	u32 index = 0;
	struct ddl_context *ddl_context;
	struct msm_mapped_buffer *mapped_buffer = NULL;
	int ret = 0;
	unsigned long iova = 0;
	unsigned long buffer_size  = 0;
	unsigned long *kernel_vaddr = NULL;

	ddl_context = ddl_get_context();
	if (res_trk_get_enable_ion() && addr->alloc_handle) {
		kernel_vaddr = (unsigned long *) ion_map_kernel(
					ddl_context->video_ion_client,
					addr->alloc_handle, UNCACHED);
		if (IS_ERR_OR_NULL(kernel_vaddr)) {
			DDL_MSG_ERROR("%s():DDL ION client map failed\n",
						 __func__);
			goto ion_bail_out;
		}
		addr->virtual_base_addr = (u8 *) kernel_vaddr;
		ret = ion_map_iommu(ddl_context->video_ion_client,
				addr->alloc_handle,
				VIDEO_DOMAIN,
				VIDEO_FIRMWARE_POOL,
				SZ_4K,
				0,
				&iova,
				&buffer_size,
				UNCACHED, 0);
		if (ret || !iova) {
			DDL_MSG_ERROR(
			"%s():DDL ION client iommu map failed, ret = %d iova = 0x%lx\n",
			__func__, ret, iova);
			goto ion_unmap_bail_out;
		}
		addr->mapped_buffer = NULL;
		addr->physical_base_addr = (u8 *)iova;
		addr->align_physical_addr = (u8 *) DDL_ALIGN((u32)
		addr->physical_base_addr, alignment);
		offset = (u32)(addr->align_physical_addr -
				addr->physical_base_addr);
		addr->align_virtual_addr = addr->virtual_base_addr + offset;
		addr->buffer_size = buffer_size;
	} else {
		if (!res_trk_check_for_sec_session()) {
			if (!addr->alloced_phys_addr) {
				pr_err(" %s() alloced addres NULL", __func__);
				goto bail_out;
			}
			flags = MSM_SUBSYSTEM_MAP_IOVA |
				MSM_SUBSYSTEM_MAP_KADDR;
			if (alignment == DDL_KILO_BYTE(128))
					index = 1;
			else if (alignment > SZ_4K)
				flags |= MSM_SUBSYSTEM_ALIGN_IOVA_8K;
			addr->mapped_buffer =
			msm_subsystem_map_buffer(
			(unsigned long)addr->alloced_phys_addr,
			sz, flags, &restrk_mmu_subsystem[index],
			sizeof(restrk_mmu_subsystem[index])/
				sizeof(unsigned int));
			if (IS_ERR(addr->mapped_buffer)) {
				pr_err(" %s() buffer map failed", __func__);
				goto bail_out;
			}
			mapped_buffer = addr->mapped_buffer;
			if (!mapped_buffer->vaddr || !mapped_buffer->iova[0]) {
				pr_err("%s() map buffers failed\n", __func__);
				goto bail_out;
			}
			addr->physical_base_addr =
				 (u8 *)mapped_buffer->iova[0];
			addr->virtual_base_addr =
					mapped_buffer->vaddr;
		} else {
			addr->physical_base_addr =
				(u8 *) addr->alloced_phys_addr;
			addr->virtual_base_addr =
				(u8 *)addr->alloced_phys_addr;
		}
		addr->align_physical_addr = (u8 *) DDL_ALIGN((u32)
		addr->physical_base_addr, alignment);
		offset = (u32)(addr->align_physical_addr -
				addr->physical_base_addr);
		addr->align_virtual_addr = addr->virtual_base_addr + offset;
		addr->buffer_size = sz;
	}
	return addr->virtual_base_addr;
bail_out:
	if (IS_ERR(addr->mapped_buffer))
		msm_subsystem_unmap_buffer(addr->mapped_buffer);
	return NULL;
ion_unmap_bail_out:
	if (!IS_ERR_OR_NULL(addr->alloc_handle)) {
		ion_unmap_kernel(resource_context.
			res_ion_client,	addr->alloc_handle);
	}
ion_bail_out:
	return NULL;
}
Exemple #26
0
PVRSRV_ERROR IonImportBufferAndAcquirePhysAddr(IMG_HANDLE hIonDev,
											   IMG_UINT32 ui32NumFDs,
											   IMG_INT32  *pai32BufferFDs,
											   IMG_UINT32 *pui32PageCount,
											   IMG_SYS_PHYADDR **ppsSysPhysAddr,
											   IMG_PVOID  *ppvKernAddr0,
											   IMG_HANDLE *phPriv,
											   IMG_HANDLE *phUnique)
{
	struct scatterlist *psTemp, *psScatterList[MAX_IMPORT_ION_FDS] = {};
	PVRSRV_ERROR eError = PVRSRV_ERROR_OUT_OF_MEMORY;
	struct ion_client *psIonClient = hIonDev;
	IMG_UINT32 i, k, ui32PageCount = 0;
	ION_IMPORT_DATA *psImportData;

	if(ui32NumFDs > MAX_IMPORT_ION_FDS)
	{
		printk(KERN_ERR "%s: More ion export fds passed in than supported "
						"(%d provided, %d max)", __func__, ui32NumFDs,
						MAX_IMPORT_ION_FDS);
		return PVRSRV_ERROR_INVALID_PARAMS;
	}

	psImportData = kzalloc(sizeof(ION_IMPORT_DATA), GFP_KERNEL);
	if (psImportData == NULL)
	{
		goto exitFailKMallocImportData;
	}

	/* Set up import data for free call */
	psImportData->psIonClient = psIonClient;
	psImportData->ui32NumIonHandles = ui32NumFDs;

	for(i = 0; i < ui32NumFDs; i++)
	{
		int fd = (int)pai32BufferFDs[i];
		struct sg_table *psSgTable;

		psImportData->apsIonHandle[i] = ion_import_dma_buf(psIonClient, fd);
		if (psImportData->apsIonHandle[i] == IMG_NULL)
		{
			eError = PVRSRV_ERROR_BAD_MAPPING;
			goto exitFailImport;
		}

		psSgTable = ion_sg_table(psIonClient, psImportData->apsIonHandle[i]);
		psScatterList[i] = psSgTable->sgl;
		if (psScatterList[i] == NULL)
		{
			eError = PVRSRV_ERROR_INVALID_PARAMS;
			goto exitFailImport;
		}

		/* Although all heaps will provide an sg_table, the tables cannot
		 * always be trusted because sg_lists are just pointers to "struct
		 * page" values, and some memory e.g. carveout may not have valid
		 * "struct page" values. In particular, on ARM, carveout is
		 * generally reserved with memblock_remove(), which leaves the
		 * "struct page" entries uninitialized when SPARSEMEM is enabled.
		 * The effect of this is that page_to_pfn(pfn_to_page(pfn)) != pfn.
		 *
		 * There's more discussion on this mailing list thread:
		 * http://lists.linaro.org/pipermail/linaro-mm-sig/2012-August/002440.html
		 *
		 * If the heap this buffer comes from implements ->phys(), it's
		 * probably a contiguous allocator. If the phys() function is
		 * implemented, we'll use it to check sg_table->sgl[0]. If we find
		 * they don't agree, we'll assume phys() is more reliable and use
		 * that.
		 *
		 * Some heaps out there will implement phys() even though they are
		 * not for physically contiguous allocations (so the sg_table must
		 * be used). Therefore use the sg_table if the phys() and first
		 * sg_table entry match. This should be reliable because for most
		 * contiguous allocators, the sg_table should be a single span
		 * from 'start' to 'start+size'.
		 *
		 * Also, ion prints out an error message if the heap doesn't implement
		 * ->phys(), which we want to avoid, so only use ->phys() if the
		 * sg_table contains a single span and therefore could plausibly
		 * be a contiguous allocator.
		 */
		if(!sg_next(psScatterList[i]))
		{
			ion_phys_addr_t sPhyAddr;
			size_t sLength;

			if(!ion_phys(psIonClient, psImportData->apsIonHandle[i],
						 &sPhyAddr, &sLength))
			{
				BUG_ON(sLength & ~PAGE_MASK);

				if(sg_phys(psScatterList[i]) != sPhyAddr)
				{
					psScatterList[i] = IMG_NULL;
					ui32PageCount += sLength / PAGE_SIZE;
				}
			}
		}

		for(psTemp = psScatterList[i]; psTemp; psTemp = sg_next(psTemp))
		{
			IMG_UINT32 j;
			for (j = 0; j < psTemp->length; j += PAGE_SIZE)
			{
				ui32PageCount++;
			}
		}
	}

	BUG_ON(ui32PageCount == 0);

	psImportData->psSysPhysAddr = kmalloc(sizeof(IMG_SYS_PHYADDR) * ui32PageCount, GFP_KERNEL);
	if (psImportData->psSysPhysAddr == NULL)
	{
		goto exitFailImport;
	}

	for(i = 0, k = 0; i < ui32NumFDs; i++)
	{
		if(psScatterList[i])
		{
			for(psTemp = psScatterList[i]; psTemp; psTemp = sg_next(psTemp))
			{
				IMG_UINT32 j;
				for (j = 0; j < psTemp->length; j += PAGE_SIZE)
				{
					psImportData->psSysPhysAddr[k].uiAddr = sg_phys(psTemp) + j;
					k++;
				}
			}
		}
		else
		{
			ion_phys_addr_t sPhyAddr;
			size_t sLength, j;

			ion_phys(psIonClient, psImportData->apsIonHandle[i],
					 &sPhyAddr, &sLength);

			for(j = 0; j < sLength; j += PAGE_SIZE)
			{
				psImportData->psSysPhysAddr[k].uiAddr = sPhyAddr + j;
				k++;
			}
		}
	}

	*pui32PageCount = ui32PageCount;
	*ppsSysPhysAddr = psImportData->psSysPhysAddr;

#if defined(PDUMP)
	if(ui32NumFDs == 1)
	{
		IMG_PVOID pvKernAddr0;

		pvKernAddr0 = ion_map_kernel(psIonClient, psImportData->apsIonHandle[0]);
		if (IS_ERR(pvKernAddr0))
		{
			pvKernAddr0 = IMG_NULL;
		}

		psImportData->pvKernAddr0 = pvKernAddr0;
		*ppvKernAddr0 = pvKernAddr0;
	}
	else
#endif /* defined(PDUMP) */
	{
		*ppvKernAddr0 = NULL;
	}

	*phPriv = psImportData;
	*phUnique = (IMG_HANDLE)psImportData->psSysPhysAddr[0].uiAddr;

	return PVRSRV_OK;

exitFailImport:
	for(i = 0; psImportData->apsIonHandle[i] != NULL; i++)
	{
		ion_free(psIonClient, psImportData->apsIonHandle[i]);
	}
	kfree(psImportData);
exitFailKMallocImportData:
	return eError;
}
/*
 * make sure the MIPI_DSI_WRITEBACK_SIZE defined at boardfile
 * has enough space h * w * 3 * 2
 */
static void mdp4_dsi_video_do_blt(struct msm_fb_data_type *mfd, int enable)
{
    unsigned long flag;
    int cndx = 0;
    struct vsycn_ctrl *vctrl;
    struct mdp4_overlay_pipe *pipe;
    long long vtime;

    vctrl = &vsync_ctrl_db[cndx];
    pipe = vctrl->base_pipe;

    mdp4_allocate_writeback_buf(mfd, MDP4_MIXER0);

    if (mfd->ov0_wb_buf->write_addr == 0) {
        pr_info("%s: no blt_base assigned\n", __func__);
        return;
    }

    spin_lock_irqsave(&vctrl->spin_lock, flag);
    if (enable && pipe->ov_blt_addr == 0) {
        pipe->ov_blt_addr = mfd->ov0_wb_buf->write_addr;
        pipe->dma_blt_addr = mfd->ov0_wb_buf->read_addr;
        pipe->ov_cnt = 0;
        pipe->dmap_cnt = 0;
        vctrl->ov_koff = 0;
        vctrl->ov_done = 0;
        vctrl->blt_free = 0;
        mdp4_stat.blt_dsi_video++;
        vctrl->blt_change++;
    } else if (enable == 0 && pipe->ov_blt_addr) {
        pipe->ov_blt_addr = 0;
        pipe->dma_blt_addr =  0;
        vctrl->blt_free = 4;	/* 4 commits to free wb buf */
        vctrl->blt_change++;
    }

    pr_info("%s: changed=%d enable=%d ov_blt_addr=%x\n", __func__,
            vctrl->blt_change, enable, (int)pipe->ov_blt_addr);

    if (!vctrl->blt_change) {
        spin_unlock_irqrestore(&vctrl->spin_lock, flag);
        return;
    }

    if (vctrl->blt_ctrl == BLT_SWITCH_TG_OFF) // QC_1204
        vctrl->blt_change = 0; // QC_1204

    spin_unlock_irqrestore(&vctrl->spin_lock, flag);

    // QC_1206 - start
    if (enable && pipe->ov_blt_addr) {
        size_t blt_vm_size;
        char *blt_vm_addr;
        unsigned long ionflag = 0;
        int rc = -1;

        blt_vm_size = roundup(mfd->panel_info.xres * \
                              mfd->panel_info.yres * 3 * 2, SZ_4K);

        if (!IS_ERR_OR_NULL(mfd->iclient)) {
            rc = ion_handle_get_flags(mfd->iclient,
                                      mfd->ov0_wb_buf->ihdl,
                                      &ionflag);
            pr_err("*** %s: rc=%x\n", __func__, rc); // QC_1207

            if (!rc) {
                blt_vm_addr = (char *) ion_map_kernel(
                                  mfd->iclient,
                                  mfd->ov0_wb_buf->ihdl,
                                  ionflag);
                pr_err("*** %s: blt_vm_addr=%x\n", __func__, (unsigned int)blt_vm_addr); // QC_1207
                if (blt_vm_addr) {
                    pr_err("*** %s: Calling memset() with blt_vm_size=%d\n", __func__, blt_vm_size); // QC_1207
                    memset(blt_vm_addr, 0, blt_vm_size);
                    ion_unmap_kernel(mfd->iclient,
                                     mfd->ov0_wb_buf->ihdl);
                }
            }
        }
    }
    // QC_1206 - end

    if (vctrl->blt_ctrl == BLT_SWITCH_TG_OFF) {
        int tg_enabled;

        vctrl->blt_change = 0;
        tg_enabled = inpdw(MDP_BASE + DSI_VIDEO_BASE) & 0x01;
        if (tg_enabled) {
            vsync_irq_enable(INTR_PRIMARY_VSYNC, MDP_PRIM_VSYNC_TERM);
            mdp4_dsi_video_wait4vsync(0, &vtime);
            MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE, 0);
            mdp4_dsi_video_wait4dmap_done(0);
        }
        mdp4_overlayproc_cfg(pipe);
        mdp4_overlay_dmap_xy(pipe);
        if (tg_enabled) {
            /*
            * need wait for more than 1 ms to
            * make sure dsi lanes' fifo is empty and
            * lanes in stop state befroe reset
            * controller
            */
            usleep(2000);
            mipi_dsi_sw_reset();
            MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE, 1);
        }
    }
}
static long ion_sys_cache_sync(struct ion_client *client, ion_sys_cache_sync_param_t* pParam, int from_kernel)
{
    ION_FUNC_ENTER;
    if (pParam->sync_type < ION_CACHE_CLEAN_ALL)
    {
        // By range operation
        unsigned int start;
        size_t size;
        unsigned int end, page_num, page_start;
        struct ion_handle *kernel_handle;

        kernel_handle = ion_drv_get_kernel_handle(client,
                        pParam->handle, from_kernel);
        if(IS_ERR(kernel_handle))
        {
            IONMSG("ion cache sync fail! \n");
            return -EINVAL;
        }

#ifdef __ION_CACHE_SYNC_USER_VA_EN__
        if(pParam->sync_type < ION_CACHE_CLEAN_BY_RANGE_USE_VA)
#else
        if(1)
#endif
        {
            start = (unsigned int) ion_map_kernel(client, kernel_handle);
            if(IS_ERR_OR_NULL((void*)start))
            {
                IONMSG("cannot do cachesync, unable to map_kernel: ret=%d\n", start);
                return -EFAULT;
            }
            size = ion_handle_buffer(kernel_handle)->size;
        }
        else
        {
            start = pParam->va;
            size = pParam->size;
        }

        // Cache line align
        end = start + size;
        start = (start / L1_CACHE_BYTES * L1_CACHE_BYTES);
        size = (end - start + L1_CACHE_BYTES - 1) / L1_CACHE_BYTES * L1_CACHE_BYTES;
        page_num = ((start&(~PAGE_MASK))+size+(~PAGE_MASK))>>PAGE_ORDER;
        page_start = start & PAGE_MASK;
        // L1 cache sync
        if((pParam->sync_type==ION_CACHE_CLEAN_BY_RANGE) || (pParam->sync_type==ION_CACHE_CLEAN_BY_RANGE_USE_VA))
        {
            MMProfileLogEx(ION_MMP_Events[PROFILE_DMA_CLEAN_RANGE], MMProfileFlagStart, size, 0);
            //printk("[ion_sys_cache_sync]: ION cache clean by range. start=0x%08X size=0x%08X\n", start, size);
            dmac_map_area((void*)start, size, DMA_TO_DEVICE);
        }
        else if ((pParam->sync_type == ION_CACHE_INVALID_BY_RANGE)||(pParam->sync_type == ION_CACHE_INVALID_BY_RANGE_USE_VA))
        {
            MMProfileLogEx(ION_MMP_Events[PROFILE_DMA_INVALID_RANGE], MMProfileFlagStart, size, 0);
            //printk("[ion_sys_cache_sync]: ION cache invalid by range. start=0x%08X size=0x%08X\n", start, size);
            dmac_unmap_area((void*)start, size, DMA_FROM_DEVICE);
        }
        else if ((pParam->sync_type == ION_CACHE_FLUSH_BY_RANGE)||(pParam->sync_type == ION_CACHE_FLUSH_BY_RANGE_USE_VA))
        {
            MMProfileLogEx(ION_MMP_Events[PROFILE_DMA_FLUSH_RANGE], MMProfileFlagStart, size, 0);
            //printk("[ion_sys_cache_sync]: ION cache flush by range. start=0x%08X size=0x%08X\n", start, size);
            dmac_flush_range((void*)start, (void*)(start+size-1));
        }

#if 0
        // L2 cache sync
        //printk("[ion_sys_cache_sync]: page_start=0x%08X, page_num=%d\n", page_start, page_num);
        for (i=0; i<page_num; i++, page_start+=DEFAULT_PAGE_SIZE)
        {
            phys_addr_t phys_addr;

            if (page_start>=VMALLOC_START && page_start<=VMALLOC_END)
            {
                ppage = vmalloc_to_page((void*)page_start);
                if (!ppage)
                {
                    printk("[ion_sys_cache_sync]: Cannot get vmalloc page. addr=0x%08X\n", page_start);
                    ion_unmap_kernel(client, pParam->handle);
                    return -EFAULT;
                }
                phys_addr = page_to_phys(ppage);
            }
            else
                phys_addr = virt_to_phys((void*)page_start);
            if (pParam->sync_type == ION_CACHE_CLEAN_BY_RANGE)
                outer_clean_range(phys_addr, phys_addr+DEFAULT_PAGE_SIZE);
            else if (pParam->sync_type == ION_CACHE_INVALID_BY_RANGE)
                outer_inv_range(phys_addr, phys_addr+DEFAULT_PAGE_SIZE);
            else if (pParam->sync_type == ION_CACHE_FLUSH_BY_RANGE)
                outer_flush_range(phys_addr, phys_addr+DEFAULT_PAGE_SIZE);
        }
#endif

#ifdef __ION_CACHE_SYNC_USER_VA_EN__
        if(pParam->sync_type < ION_CACHE_CLEAN_BY_RANGE_USE_VA)
#endif
        {
            ion_unmap_kernel(client, kernel_handle);
        }

        if (pParam->sync_type == ION_CACHE_CLEAN_BY_RANGE)
            MMProfileLogEx(ION_MMP_Events[PROFILE_DMA_CLEAN_RANGE], MMProfileFlagEnd, size, 0);
        else if (pParam->sync_type == ION_CACHE_INVALID_BY_RANGE)
            MMProfileLogEx(ION_MMP_Events[PROFILE_DMA_INVALID_RANGE], MMProfileFlagEnd, size, 0);
        else if (pParam->sync_type == ION_CACHE_FLUSH_BY_RANGE)
            MMProfileLogEx(ION_MMP_Events[PROFILE_DMA_FLUSH_RANGE], MMProfileFlagEnd, size, 0);

    }
static long ion_sys_cache_sync(struct ion_client *client, ion_sys_cache_sync_param_t* pParam)
{
    ION_FUNC_ENTER;
    if (pParam->sync_type < ION_CACHE_CLEAN_ALL)
    {
        // By range operation
        unsigned int start = (unsigned int) ion_map_kernel(client, pParam->handle);
        size_t size = ion_handle_buffer(pParam->handle)->size;
        unsigned int end;
        unsigned int page_num;
        unsigned int i;
        unsigned int page_start;
        struct page* ppage;
        phys_addr_t phys_addr;
        // Cache line align
        end = start + size;
        start = (start / L1_CACHE_BYTES * L1_CACHE_BYTES);
        size = (end - start + L1_CACHE_BYTES - 1) / L1_CACHE_BYTES * L1_CACHE_BYTES;
        page_num = ((start&(~PAGE_MASK))+size+(~PAGE_MASK))>>PAGE_ORDER;
        page_start = start & PAGE_MASK;
        // L1 cache sync
        if (pParam->sync_type == ION_CACHE_CLEAN_BY_RANGE)
        {
            printk("[ion_sys_cache_sync]: ION cache clean by range. start=0x%08X size=0x%08X\n", start, size);
            dmac_map_area((void*)start, size, DMA_TO_DEVICE);
        }
        else if (pParam->sync_type == ION_CACHE_INVALID_BY_RANGE)
        {
            printk("[ion_sys_cache_sync]: ION cache invalid by range. start=0x%08X size=0x%08X\n", start, size);
            dmac_unmap_area((void*)start, size, DMA_FROM_DEVICE);
        }
        else if (pParam->sync_type == ION_CACHE_FLUSH_BY_RANGE)
        {
            printk("[ion_sys_cache_sync]: ION cache flush by range. start=0x%08X size=0x%08X\n", start, size);
            dmac_flush_range((void*)start, (void*)(start+size-1));
        }
        // L2 cache sync
        printk("[ion_sys_cache_sync]: page_start=0x%08X, page_num=%d\n", page_start, page_num);
        for (i=0; i<page_num; i++, page_start+=DEFAULT_PAGE_SIZE)
        {
            if (page_start>=VMALLOC_START && page_start<=VMALLOC_END)
            {
                ppage = vmalloc_to_page((void*)page_start);
                if (!ppage)
                {
                	printk("[ion_sys_cache_sync]: Cannot get vmalloc page. addr=0x%08X\n", page_start);
                    ion_unmap_kernel(client, pParam->handle);
                    return -EFAULT;
                }
                phys_addr = page_to_phys(ppage);
            }
            else
                phys_addr = virt_to_phys((void*)page_start);
            if (pParam->sync_type == ION_CACHE_CLEAN_BY_RANGE)
                outer_clean_range(phys_addr, phys_addr+DEFAULT_PAGE_SIZE);
            else if (pParam->sync_type == ION_CACHE_INVALID_BY_RANGE)
                outer_inv_range(phys_addr, phys_addr+DEFAULT_PAGE_SIZE);
            else if (pParam->sync_type == ION_CACHE_FLUSH_BY_RANGE)
                outer_flush_range(phys_addr, phys_addr+DEFAULT_PAGE_SIZE);
        }
        ion_unmap_kernel(client, pParam->handle);
    }
Exemple #30
0
static int audamrnb_in_open(struct inode *inode, struct file *file)
{
	struct audio_in *audio = &the_audio_amrnb_in;
	int rc;
	int encid;
	int len = 0;
	unsigned long ionflag = 0;
	ion_phys_addr_t addr = 0;
	struct ion_handle *handle = NULL;
	struct ion_client *client = NULL;

	mutex_lock(&audio->lock);
	if (audio->opened) {
		rc = -EBUSY;
		goto done;
	}

	client = msm_ion_client_create(UINT_MAX, "Audio_AMR_In_Client");
	if (IS_ERR_OR_NULL(client)) {
		MM_ERR("Unable to create ION client\n");
		rc = -ENOMEM;
		goto client_create_error;
	}
	audio->client = client;

	handle = ion_alloc(client, DMASZ, SZ_4K,
		ION_HEAP(ION_AUDIO_HEAP_ID),0);
	if (IS_ERR_OR_NULL(handle)) {
		MM_ERR("Unable to create allocate O/P buffers\n");
		rc = -ENOMEM;
		goto buff_alloc_error;
	}
	audio->buff_handle = handle;

	rc = ion_phys(client, handle, &addr, &len);
	if (rc) {
		MM_ERR("O/P buffers:Invalid phy: %x sz: %x\n",
			(unsigned int) addr, (unsigned int) len);
		goto buff_get_phys_error;
	} else {
		MM_INFO("O/P buffers:valid phy: %x sz: %x\n",
			(unsigned int) addr, (unsigned int) len);
	}
	audio->phys = (int32_t)addr;

	rc = ion_handle_get_flags(client, handle, &ionflag);
	if (rc) {
		MM_ERR("could not get flags for the handle\n");
		goto buff_get_flags_error;
	}

	audio->map_v_read = ion_map_kernel(client, handle);
	if (IS_ERR(audio->map_v_read)) {
		MM_ERR("could not map write buffers\n");
		rc = -ENOMEM;
		goto buff_map_error;
	}
	audio->data = audio->map_v_read;
	MM_DBG("write buf: phy addr 0x%08x kernel addr 0x%08x\n",
		audio->phys, (int)audio->data);

	MM_DBG("Memory addr = 0x%8x  phy addr = 0x%8x\n",\
		(int) audio->data, (int) audio->phys);
	if ((file->f_mode & FMODE_WRITE) &&
			(file->f_mode & FMODE_READ)) {
		rc = -EACCES;
		MM_ERR("Non tunnel encoding is not supported\n");
		goto buff_map_error;
	} else if (!(file->f_mode & FMODE_WRITE) &&
					(file->f_mode & FMODE_READ)) {
		audio->mode = MSM_AUD_ENC_MODE_TUNNEL;
		MM_DBG("Opened for tunnel mode encoding\n");
	} else {
		rc = -EACCES;
		goto buff_map_error;
	}


	/* Settings will be re-config at AUDIO_SET_CONFIG,
	 * but at least we need to have initial config
	 */
	audio->buffer_size = (FRAME_SIZE - 8);
	audio->enc_type = ENC_TYPE_AMRNB | audio->mode;
	audio->dtx_mode = -1;
	audio->frame_format = 0;
	audio->used_mode = 7; /* Bit Rate 12.2 kbps MR122 */

	encid = audpreproc_aenc_alloc(audio->enc_type, &audio->module_name,
			&audio->queue_ids);
	if (encid < 0) {
		MM_ERR("No free encoder available\n");
		rc = -ENODEV;
		goto aenc_alloc_error;
	}
	audio->enc_id = encid;

	rc = msm_adsp_get(audio->module_name, &audio->audrec,
			   &audrec_amrnb_adsp_ops, audio);

	if (rc) {
		audpreproc_aenc_free(audio->enc_id);
		goto aenc_alloc_error;
	}

	audio->stopped = 0;
	audio->source = 0;

	audamrnb_in_flush(audio);

	audio->device_events = AUDDEV_EVT_DEV_RDY | AUDDEV_EVT_DEV_RLS |
				AUDDEV_EVT_VOICE_STATE_CHG;

	audio->voice_state = msm_get_voice_state();
	rc = auddev_register_evt_listner(audio->device_events,
					AUDDEV_CLNT_ENC, audio->enc_id,
					amrnb_in_listener, (void *) audio);
	if (rc) {
		MM_ERR("failed to register device event listener\n");
		goto evt_error;
	}
	audio->build_id = socinfo_get_build_id();
	MM_DBG("Modem build id = %s\n", audio->build_id);

	file->private_data = audio;
	audio->opened = 1;
	mutex_unlock(&audio->lock);
	return rc;
evt_error:
	msm_adsp_put(audio->audrec);
	audpreproc_aenc_free(audio->enc_id);
	ion_unmap_kernel(client, audio->buff_handle);
aenc_alloc_error:
buff_map_error:
buff_get_phys_error:
buff_get_flags_error:
	ion_free(client, audio->buff_handle);
buff_alloc_error:
	ion_client_destroy(client);
client_create_error:
done:
	mutex_unlock(&audio->lock);
	return rc;
}