Example #1
0
/**
 * videobuf_pmem_contig_user_get() - setup user space memory pointer
 * @mem: per-buffer private videobuf-contig-pmem data
 * @vb: video buffer to map
 *
 * This function validates and sets up a pointer to user space memory.
 * Only physically contiguous pfn-mapped memory is accepted.
 *
 * Returns 0 if successful.
 */
int videobuf2_pmem_contig_user_get(struct videobuf2_contig_pmem *mem,
                                   struct videobuf2_msm_offset *offset,
                                   enum videobuf2_buffer_type buffer_type,
                                   uint32_t addr_offset, int path,
                                   struct ion_client *client)
{
    unsigned long len;
    int rc = 0;
#ifndef CONFIG_MSM_MULTIMEDIA_USE_ION
    unsigned long kvstart;
#endif
    unsigned int flags = 0;
    unsigned long paddr = 0;
    if (mem->phyaddr != 0)
        return 0;
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
    mem->ion_handle = ion_import_fd(client, (int)mem->vaddr);
    if (IS_ERR_OR_NULL(mem->ion_handle)) {
        pr_err("%s ION import failed\n", __func__);
        return PTR_ERR(mem->ion_handle);
    }
    rc = ion_phys(client, mem->ion_handle, (ion_phys_addr_t *)&mem->phyaddr,
                  (size_t *)&len);
#elif CONFIG_ANDROID_PMEM
    rc = get_pmem_file((int)mem->vaddr, (unsigned long *)&mem->phyaddr,
                       &kvstart, &len, &mem->file);
    if (rc < 0) {
        pr_err("%s: get_pmem_file fd %d error %d\n",
               __func__, (int)mem->vaddr, rc);
        return rc;
    }
#else
    paddr = 0;
    kvstart = 0;
#endif
    if (offset)
        mem->offset = *offset;
    else
        memset(&mem->offset, 0, sizeof(struct videobuf2_msm_offset));
    mem->path = path;
    mem->buffer_type = buffer_type;
    paddr = mem->phyaddr;
    flags = MSM_SUBSYSTEM_MAP_IOVA;
    mem->subsys_id = MSM_SUBSYSTEM_CAMERA;
    mem->msm_buffer = msm_subsystem_map_buffer(mem->phyaddr, len,
                      flags, &(mem->subsys_id), 1);
    if (IS_ERR((void *)mem->msm_buffer)) {
        pr_err("%s: msm_subsystem_map_buffer failed\n", __func__);
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
        ion_free(client, mem->ion_handle);
#elif CONFIG_ANDROID_PMEM
        put_pmem_file(mem->file);
#endif
        return PTR_ERR((void *)mem->msm_buffer);
    }
    paddr = mem->msm_buffer->iova[0];
    mem->mapped_phyaddr = paddr + addr_offset;
    mem->addr_offset = addr_offset;
    return rc;
}
Example #2
0
static void *msm_vb2_mem_ops_alloc(void *alloc_ctx, unsigned long size)
{
    struct videobuf2_contig_pmem *mem;
    unsigned int flags = 0;
    long rc;
    mem = kzalloc(sizeof(*mem), GFP_KERNEL);
    if (!mem)
        return ERR_PTR(-ENOMEM);

    mem->magic = MAGIC_PMEM;
    mem->size =  PAGE_ALIGN(size);
    mem->alloc_ctx = alloc_ctx;
    mem->is_userptr = 0;
    mem->phyaddr = msm_mem_allocate(mem);
    if (!mem->phyaddr) {
        pr_err("%s : pmem memory allocation failed\n", __func__);
        kfree(mem);
        return ERR_PTR(-ENOMEM);
    }
    flags = MSM_SUBSYSTEM_MAP_IOVA;
    mem->subsys_id = MSM_SUBSYSTEM_CAMERA;
    mem->msm_buffer = msm_subsystem_map_buffer(mem->phyaddr, mem->size,
                      flags, &(mem->subsys_id), 1);
    if (IS_ERR((void *)mem->msm_buffer)) {
        pr_err("%s: msm_subsystem_map_buffer failed\n", __func__);
        rc = PTR_ERR((void *)mem->msm_buffer);
        msm_mem_free(mem);
        kfree(mem);
        return ERR_PTR(-ENOMEM);
    }
    mem->mapped_phyaddr = mem->msm_buffer->iova[0];
    return mem;
}
Example #3
0
void *ddl_pmem_alloc(struct ddl_buf_addr *addr, size_t sz, u32 alignment)
{
	u32 alloc_size, offset = 0, flags = 0;
	u32 index = 0;
	struct ddl_context *ddl_context;
	struct msm_mapped_buffer *mapped_buffer = NULL;
	DBG_PMEM("\n%s() IN: Requested alloc size(%u)", __func__, (u32)sz);
	if (!addr) {
		DDL_MSG_ERROR("\n%s() Invalid Parameters", __func__);
		goto bail_out;
	}
	ddl_context = ddl_get_context();
	alloc_size = (sz + alignment);
	addr->alloced_phys_addr = (phys_addr_t)
	allocate_contiguous_memory_nomap(alloc_size,
		res_trk_get_mem_type(), SZ_4K);
	if (!addr->alloced_phys_addr) {
		DDL_MSG_ERROR("%s() : acm alloc failed (%d)\n", __func__,
			alloc_size);
		goto bail_out;
	}
	flags = MSM_SUBSYSTEM_MAP_IOVA | MSM_SUBSYSTEM_MAP_KADDR;
	if (alignment == DDL_KILO_BYTE(128))
			index = 1;
	else if (alignment > SZ_4K)
		flags |= MSM_SUBSYSTEM_ALIGN_IOVA_8K;

	addr->mapped_buffer =
	msm_subsystem_map_buffer((unsigned long)addr->alloced_phys_addr,
	alloc_size, flags, &vidc_mmu_subsystem[index],
	sizeof(vidc_mmu_subsystem[index])/sizeof(unsigned int));
	if (IS_ERR(addr->mapped_buffer)) {
		pr_err(" %s() buffer map failed", __func__);
		goto free_acm_alloc;
	}
	mapped_buffer = addr->mapped_buffer;
	if (!mapped_buffer->vaddr || !mapped_buffer->iova[0]) {
		pr_err("%s() map buffers failed\n", __func__);
		goto free_map_buffers;
	}
	addr->physical_base_addr = (u8 *)mapped_buffer->iova[0];
	addr->virtual_base_addr = mapped_buffer->vaddr;
	addr->align_physical_addr = (u8 *) DDL_ALIGN((u32)
		addr->physical_base_addr, alignment);
	offset = (u32)(addr->align_physical_addr -
			addr->physical_base_addr);
	addr->align_virtual_addr = addr->virtual_base_addr + offset;
	addr->buffer_size = sz;
	return addr->virtual_base_addr;

free_map_buffers:
	msm_subsystem_unmap_buffer(addr->mapped_buffer);
	addr->mapped_buffer = NULL;
free_acm_alloc:
	free_contiguous_memory_by_paddr(
		(unsigned long)addr->alloced_phys_addr);
	addr->alloced_phys_addr = (phys_addr_t)NULL;
bail_out:
	return NULL;
}
Example #4
0
static void *res_trk_pmem_map
	(struct ddl_buf_addr *addr, size_t sz, u32 alignment)
{
	u32 offset = 0, flags = 0;
	u32 index = 0;
	struct ddl_context *ddl_context;
	struct msm_mapped_buffer *mapped_buffer = NULL;
	ddl_context = ddl_get_context();
	if (!addr->alloced_phys_addr) {
		pr_err(" %s() alloced addres NULL", __func__);
		goto bail_out;
	}
	flags = MSM_SUBSYSTEM_MAP_IOVA | MSM_SUBSYSTEM_MAP_KADDR;
	if (alignment == DDL_KILO_BYTE(128))
			index = 1;
	else if (alignment > SZ_4K)
		flags |= MSM_SUBSYSTEM_ALIGN_IOVA_8K;

	addr->mapped_buffer =
	msm_subsystem_map_buffer((unsigned long)addr->alloced_phys_addr,
	sz, flags, &restrk_mmu_subsystem[index],
	sizeof(restrk_mmu_subsystem[index])/sizeof(unsigned int));
	if (IS_ERR(addr->mapped_buffer)) {
		pr_err(" %s() buffer map failed", __func__);
		goto bail_out;
	}
	mapped_buffer = addr->mapped_buffer;
	if (!mapped_buffer->vaddr || !mapped_buffer->iova[0]) {
		pr_err("%s() map buffers failed\n", __func__);
		goto bail_out;
	}
	addr->physical_base_addr = (u8 *)mapped_buffer->iova[0];
	addr->virtual_base_addr = mapped_buffer->vaddr;
	addr->align_physical_addr = (u8 *) DDL_ALIGN((u32)
		addr->physical_base_addr, alignment);
	offset = (u32)(addr->align_physical_addr -
			addr->physical_base_addr);
	addr->align_virtual_addr = addr->virtual_base_addr + offset;
	addr->buffer_size = sz;
	return addr->virtual_base_addr;
bail_out:
	return NULL;
}
uint32_t msm_gemini_platform_v2p(int fd, uint32_t len, struct file **file_p,
					struct msm_mapped_buffer **msm_buffer,
					int *subsys_id)
{
	unsigned long paddr;
	unsigned long size;
	int rc;
	int flags;
#ifdef CONFIG_ANDROID_PMEM
	unsigned long kvstart;
	rc = get_pmem_file(fd, &paddr, &kvstart, &size, file_p);
#else
	rc = 0;
	paddr = 0;
	size = 0;
#endif
	if (rc < 0) {
		GMN_PR_ERR("%s: get_pmem_file fd %d error %d\n", __func__, fd,
			rc);
		return 0;
	}

	/* validate user input */
	if (len > size) {
		GMN_PR_ERR("%s: invalid offset + len\n", __func__);
		return 0;
	}

	flags = MSM_SUBSYSTEM_MAP_IOVA;
	*subsys_id = MSM_SUBSYSTEM_CAMERA;
	*msm_buffer = msm_subsystem_map_buffer(paddr, size,
					flags, subsys_id, 1);
	if (IS_ERR((void *)*msm_buffer)) {
		pr_err("%s: msm_subsystem_map_buffer failed\n", __func__);
		return 0;
	}
	paddr = ((struct msm_mapped_buffer *)*msm_buffer)->iova[0];
	return paddr;
}
Example #6
0
void ddl_pmem_alloc(struct ddl_buf_addr *buff_addr, size_t sz, u32 align)
{
	u32 guard_bytes, align_mask;
	u32 physical_addr;
	u32 align_offset;
	u32 alloc_size, flags = 0;
	struct ddl_context *ddl_context;
	struct msm_mapped_buffer *mapped_buffer = NULL;
	unsigned long *kernel_vaddr = NULL;
	ion_phys_addr_t phyaddr = 0;
	size_t len = 0;
	int ret = -EINVAL;

	if (!buff_addr) {
		ERR("\n%s() Invalid Parameters\n", __func__);
		return;
	}
	if (align == DDL_LINEAR_BUFFER_ALIGN_BYTES) {
		guard_bytes = 31;
		align_mask = 0xFFFFFFE0U;
	} else {
		guard_bytes = DDL_TILE_BUF_ALIGN_GUARD_BYTES;
		align_mask = DDL_TILE_BUF_ALIGN_MASK;
	}
	ddl_context = ddl_get_context();
	alloc_size = sz + guard_bytes;
	if (res_trk_get_enable_ion()) {
		if (!ddl_context->video_ion_client)
			ddl_context->video_ion_client =
				res_trk_get_ion_client();
		if (!ddl_context->video_ion_client) {
			ERR("\n%s(): DDL ION Client Invalid handle\n",
				__func__);
			goto bailout;
		}
		buff_addr->mem_type = res_trk_get_mem_type();
		buff_addr->alloc_handle = ion_alloc(
					ddl_context->video_ion_client,
					alloc_size,
					SZ_4K,
					buff_addr->mem_type);
		if (!buff_addr->alloc_handle) {
			ERR("\n%s(): DDL ION alloc failed\n",
					__func__);
			goto bailout;
		}
		ret = ion_phys(ddl_context->video_ion_client,
					buff_addr->alloc_handle,
					&phyaddr,
					&len);
		if (ret || !phyaddr) {
			ERR("\n%s(): DDL ION client physical failed\n",
					__func__);
			goto free_ion_buffer;
		}
		buff_addr->physical_base_addr = (u32 *)phyaddr;
		kernel_vaddr = (unsigned long *) ion_map_kernel(
					ddl_context->video_ion_client,
					buff_addr->alloc_handle,
					UNCACHED);
		if (IS_ERR_OR_NULL(kernel_vaddr)) {
			ERR("\n%s(): DDL ION map failed\n", __func__);
			goto unmap_ion_buffer;
		}
		buff_addr->virtual_base_addr = (u32 *)kernel_vaddr;
		DBG("ddl_ion_alloc: handle(0x%x), mem_type(0x%x), "\
			"phys(0x%x), virt(0x%x), size(%u), align(%u), "\
			"alloced_len(%u)", (u32)buff_addr->alloc_handle,
			(u32)buff_addr->mem_type,
			(u32)buff_addr->physical_base_addr,
			(u32)buff_addr->virtual_base_addr,
			alloc_size, align, len);
	} else {
		physical_addr = (u32)
			allocate_contiguous_memory_nomap(alloc_size,
						ddl_context->memtype, SZ_4K);
		if (!physical_addr) {
			ERR("\n%s(): DDL pmem allocate failed\n",
			       __func__);
			goto bailout;
		}
		buff_addr->physical_base_addr = (u32 *) physical_addr;
		flags = MSM_SUBSYSTEM_MAP_KADDR;
		buff_addr->mapped_buffer =
		msm_subsystem_map_buffer((unsigned long)physical_addr,
		alloc_size, flags, NULL, 0);
		if (IS_ERR(buff_addr->mapped_buffer)) {
			ERR("\n%s() buffer map failed\n", __func__);
			goto free_pmem_buffer;
		}
		mapped_buffer = buff_addr->mapped_buffer;
		if (!mapped_buffer->vaddr) {
			ERR("\n%s() mapped virtual address is NULL\n",
				__func__);
			goto unmap_pmem_buffer;
		}
		buff_addr->virtual_base_addr = mapped_buffer->vaddr;
		DBG("ddl_pmem_alloc: mem_type(0x%x), phys(0x%x),"\
			" virt(0x%x), sz(%u), align(%u)",
			(u32)buff_addr->mem_type,
			(u32)buff_addr->physical_base_addr,
			(u32)buff_addr->virtual_base_addr,
			alloc_size, SZ_4K);
	}

	memset(buff_addr->virtual_base_addr, 0 , sz + guard_bytes);
	buff_addr->buffer_size = sz;
	buff_addr->align_physical_addr = (u32 *)
		(((u32)buff_addr->physical_base_addr + guard_bytes) &
		align_mask);
	align_offset = (u32) (buff_addr->align_physical_addr) -
		(u32)buff_addr->physical_base_addr;
	buff_addr->align_virtual_addr =
	    (u32 *) ((u32) (buff_addr->virtual_base_addr)
		     + align_offset);
	DBG("%s(): phys(0x%x) align_phys(0x%x), virt(0x%x),"\
		" align_virt(0x%x)", __func__,
		(u32)buff_addr->physical_base_addr,
		(u32)buff_addr->align_physical_addr,
		(u32)buff_addr->virtual_base_addr,
		(u32)buff_addr->align_virtual_addr);
	return;

unmap_pmem_buffer:
	if (buff_addr->mapped_buffer)
		msm_subsystem_unmap_buffer(buff_addr->mapped_buffer);
free_pmem_buffer:
	if (buff_addr->physical_base_addr)
		free_contiguous_memory_by_paddr((unsigned long)
			buff_addr->physical_base_addr);
	memset(buff_addr, 0, sizeof(struct ddl_buf_addr));
	return;

unmap_ion_buffer:
	if (ddl_context->video_ion_client) {
		if (buff_addr->alloc_handle)
			ion_unmap_kernel(ddl_context->video_ion_client,
				buff_addr->alloc_handle);
	}
free_ion_buffer:
	if (ddl_context->video_ion_client) {
		if (buff_addr->alloc_handle)
			ion_free(ddl_context->video_ion_client,
				buff_addr->alloc_handle);
	}
bailout:
	memset(buff_addr, 0, sizeof(struct ddl_buf_addr));
}
void *ddl_pmem_alloc(struct ddl_buf_addr *addr, size_t sz, u32 alignment)
{
	u32 alloc_size, offset = 0, flags = 0;
	u32 index = 0;
	struct ddl_context *ddl_context;
	struct msm_mapped_buffer *mapped_buffer = NULL;
	int rc = -EINVAL;
	ion_phys_addr_t phyaddr = 0;
	size_t len = 0;
	DBG_PMEM("\n%s() IN: Requested alloc size(%u)", __func__, (u32)sz);
	if (!addr) {
		DDL_MSG_ERROR("\n%s() Invalid Parameters", __func__);
		goto bail_out;
	}
	ddl_context = ddl_get_context();
	alloc_size = (sz + alignment);
	if (res_trk_get_enable_ion()) {
		if (!ddl_context->video_ion_client)
			ddl_context->video_ion_client =
				res_trk_get_ion_client();
		if (!ddl_context->video_ion_client) {
			DDL_MSG_ERROR("%s() :DDL ION Client Invalid handle\n",
						 __func__);
			goto bail_out;
		}
		addr->alloc_handle = ion_alloc(
		ddl_context->video_ion_client, alloc_size, SZ_4K,
			(1<<res_trk_get_mem_type()));
		if (IS_ERR_OR_NULL(addr->alloc_handle)) {
			DDL_MSG_ERROR("%s() :DDL ION alloc failed\n",
						 __func__);
			goto bail_out;
		}
		rc = ion_phys(ddl_context->video_ion_client,
				addr->alloc_handle, &phyaddr,
				 &len);
		if (rc || !phyaddr) {
			DDL_MSG_ERROR("%s():DDL ION client physical failed\n",
						 __func__);
			goto free_acm_ion_alloc;
		}
		addr->alloced_phys_addr = phyaddr;
	} else {
		addr->alloced_phys_addr = (phys_addr_t)
		allocate_contiguous_memory_nomap(alloc_size,
			res_trk_get_mem_type(), SZ_4K);
		if (!addr->alloced_phys_addr) {
			DDL_MSG_ERROR("%s() : acm alloc failed (%d)\n",
					 __func__, alloc_size);
			goto bail_out;
		}
	}
	flags = MSM_SUBSYSTEM_MAP_IOVA | MSM_SUBSYSTEM_MAP_KADDR;
	if (alignment == DDL_KILO_BYTE(128))
			index = 1;
	else if (alignment > SZ_4K)
		flags |= MSM_SUBSYSTEM_ALIGN_IOVA_8K;

	addr->mapped_buffer =
	msm_subsystem_map_buffer((unsigned long)addr->alloced_phys_addr,
	alloc_size, flags, &vidc_mmu_subsystem[index],
	sizeof(vidc_mmu_subsystem[index])/sizeof(unsigned int));
	if (IS_ERR(addr->mapped_buffer)) {
		pr_err(" %s() buffer map failed", __func__);
		goto free_acm_ion_alloc;
	}
	mapped_buffer = addr->mapped_buffer;
	if (!mapped_buffer->vaddr || !mapped_buffer->iova[0]) {
		pr_err("%s() map buffers failed\n", __func__);
		goto free_map_buffers;
	}
	addr->physical_base_addr = (u8 *)mapped_buffer->iova[0];
	addr->virtual_base_addr = mapped_buffer->vaddr;
	addr->align_physical_addr = (u8 *) DDL_ALIGN((u32)
		addr->physical_base_addr, alignment);
	offset = (u32)(addr->align_physical_addr -
			addr->physical_base_addr);
	addr->align_virtual_addr = addr->virtual_base_addr + offset;
	addr->buffer_size = sz;
	return addr->virtual_base_addr;

free_map_buffers:
	msm_subsystem_unmap_buffer(addr->mapped_buffer);
	addr->mapped_buffer = NULL;
free_acm_ion_alloc:
	if (ddl_context->video_ion_client) {
		if (addr->alloc_handle) {
			ion_free(ddl_context->video_ion_client,
				addr->alloc_handle);
			addr->alloc_handle = NULL;
		}
	} else {
		free_contiguous_memory_by_paddr(
			(unsigned long)addr->alloced_phys_addr);
		addr->alloced_phys_addr = (phys_addr_t)NULL;
	}
bail_out:
	return NULL;
}
static void *res_trk_pmem_map
	(struct ddl_buf_addr *addr, size_t sz, u32 alignment)
{
	u32 offset = 0, flags = 0;
	u32 index = 0;
	struct ddl_context *ddl_context;
	struct msm_mapped_buffer *mapped_buffer = NULL;
	int ret = 0;
	unsigned long iova = 0;
	unsigned long buffer_size  = 0;
	unsigned long *kernel_vaddr = NULL;

	ddl_context = ddl_get_context();
	if (res_trk_get_enable_ion() && addr->alloc_handle) {
		kernel_vaddr = (unsigned long *) ion_map_kernel(
					ddl_context->video_ion_client,
					addr->alloc_handle, UNCACHED);
		if (IS_ERR_OR_NULL(kernel_vaddr)) {
			DDL_MSG_ERROR("%s():DDL ION client map failed\n",
						 __func__);
			goto ion_bail_out;
		}
		addr->virtual_base_addr = (u8 *) kernel_vaddr;
		ret = ion_map_iommu(ddl_context->video_ion_client,
				addr->alloc_handle,
				VIDEO_DOMAIN,
				VIDEO_FIRMWARE_POOL,
				SZ_4K,
				0,
				&iova,
				&buffer_size,
				UNCACHED, 0);
		if (ret || !iova) {
			DDL_MSG_ERROR(
			"%s():DDL ION client iommu map failed, ret = %d iova = 0x%lx\n",
			__func__, ret, iova);
			goto ion_unmap_bail_out;
		}
		addr->mapped_buffer = NULL;
		addr->physical_base_addr = (u8 *)iova;
		addr->align_physical_addr = (u8 *) DDL_ALIGN((u32)
		addr->physical_base_addr, alignment);
		offset = (u32)(addr->align_physical_addr -
				addr->physical_base_addr);
		addr->align_virtual_addr = addr->virtual_base_addr + offset;
		addr->buffer_size = buffer_size;
	} else {
		if (!res_trk_check_for_sec_session()) {
			if (!addr->alloced_phys_addr) {
				pr_err(" %s() alloced addres NULL", __func__);
				goto bail_out;
			}
			flags = MSM_SUBSYSTEM_MAP_IOVA |
				MSM_SUBSYSTEM_MAP_KADDR;
			if (alignment == DDL_KILO_BYTE(128))
					index = 1;
			else if (alignment > SZ_4K)
				flags |= MSM_SUBSYSTEM_ALIGN_IOVA_8K;
			addr->mapped_buffer =
			msm_subsystem_map_buffer(
			(unsigned long)addr->alloced_phys_addr,
			sz, flags, &restrk_mmu_subsystem[index],
			sizeof(restrk_mmu_subsystem[index])/
				sizeof(unsigned int));
			if (IS_ERR(addr->mapped_buffer)) {
				pr_err(" %s() buffer map failed", __func__);
				goto bail_out;
			}
			mapped_buffer = addr->mapped_buffer;
			if (!mapped_buffer->vaddr || !mapped_buffer->iova[0]) {
				pr_err("%s() map buffers failed\n", __func__);
				goto bail_out;
			}
			addr->physical_base_addr =
				 (u8 *)mapped_buffer->iova[0];
			addr->virtual_base_addr =
					mapped_buffer->vaddr;
		} else {
			addr->physical_base_addr =
				(u8 *) addr->alloced_phys_addr;
			addr->virtual_base_addr =
				(u8 *)addr->alloced_phys_addr;
		}
		addr->align_physical_addr = (u8 *) DDL_ALIGN((u32)
		addr->physical_base_addr, alignment);
		offset = (u32)(addr->align_physical_addr -
				addr->physical_base_addr);
		addr->align_virtual_addr = addr->virtual_base_addr + offset;
		addr->buffer_size = sz;
	}
	return addr->virtual_base_addr;
bail_out:
	if (IS_ERR(addr->mapped_buffer))
		msm_subsystem_unmap_buffer(addr->mapped_buffer);
	return NULL;
ion_unmap_bail_out:
	if (!IS_ERR_OR_NULL(addr->alloc_handle)) {
		ion_unmap_kernel(resource_context.
			res_ion_client,	addr->alloc_handle);
	}
ion_bail_out:
	return NULL;
}
Example #9
0
static int auda2dp_in_open(struct inode *inode, struct file *file)
{
	struct audio_a2dp_in *audio = &the_audio_a2dp_in;
	int rc;
	int encid;

	mutex_lock(&audio->lock);
	if (audio->opened) {
		rc = -EBUSY;
		goto done;
	}

	audio->phys = allocate_contiguous_ebi_nomap(DMASZ, SZ_4K);
	if (audio->phys) {
		audio->msm_map = msm_subsystem_map_buffer(
					audio->phys, DMASZ,
					MSM_SUBSYSTEM_MAP_KADDR, NULL, 0);
		if (IS_ERR(audio->msm_map)) {
			MM_ERR("could not map the phys address to kernel"
							"space\n");
			rc = -ENOMEM;
			free_contiguous_memory_by_paddr(audio->phys);
			goto done;
		}
		audio->data = (u8 *)audio->msm_map->vaddr;
	} else {
		MM_ERR("could not allocate DMA buffers\n");
		rc = -ENOMEM;
		goto done;
	}
	MM_DBG("Memory addr = 0x%8x  phy addr = 0x%8x\n",\
		(int) audio->data, (int) audio->phys);

	if ((file->f_mode & FMODE_WRITE) &&
				(file->f_mode & FMODE_READ)) {
		rc = -EACCES;
		MM_ERR("Non tunnel encoding is not supported\n");
		goto done;
	} else if (!(file->f_mode & FMODE_WRITE) &&
					(file->f_mode & FMODE_READ)) {
		audio->mode = MSM_AUD_ENC_MODE_TUNNEL;
		MM_DBG("Opened for Tunnel mode encoding\n");
	} else {
		rc = -EACCES;
		goto done;
	}
	/* Settings will be re-config at AUDIO_SET_CONFIG/SBC_ENC_CONFIG,
	 * but at least we need to have initial config
	 */
	audio->channel_mode = AUDREC_CMD_MODE_MONO;
	audio->buffer_size = FRAME_SIZE_SBC;
	audio->samp_rate = 48000;
	audio->enc_type = ENC_TYPE_SBC | audio->mode;
	audio->cfg.bit_allocation = AUDIO_SBC_BA_SNR;
	audio->cfg.mode = AUDIO_SBC_MODE_JSTEREO;
	audio->cfg.number_of_subbands = AUDIO_SBC_BANDS_8;
	audio->cfg.number_of_blocks = AUDIO_SBC_BLOCKS_16;
	audio->cfg.bit_rate = 320000; /* max 512kbps(mono), 320kbs(others) */

	encid = audpreproc_aenc_alloc(audio->enc_type, &audio->module_name,
			&audio->queue_ids);
	if (encid < 0) {
		MM_ERR("No free encoder available\n");
		rc = -ENODEV;
		goto done;
	}
	audio->enc_id = encid;

	rc = msm_adsp_get(audio->module_name, &audio->audrec,
			   &audrec_adsp_ops, audio);

	if (rc) {
		audpreproc_aenc_free(audio->enc_id);
		goto done;
	}

	audio->stopped = 0;
	audio->source = 0;
	audio->abort = 0;
	auda2dp_in_flush(audio);
	audio->device_events = AUDDEV_EVT_DEV_RDY | AUDDEV_EVT_DEV_RLS |
				AUDDEV_EVT_FREQ_CHG;

	rc = auddev_register_evt_listner(audio->device_events,
					AUDDEV_CLNT_ENC, audio->enc_id,
					a2dp_in_listener, (void *) audio);
	if (rc) {
		MM_ERR("failed to register device event listener\n");
		goto evt_error;
	}
	audio->build_id = socinfo_get_build_id();
	MM_DBG("Modem build id = %s\n", audio->build_id);
	file->private_data = audio;
	audio->opened = 1;
	rc = 0;
done:
	mutex_unlock(&audio->lock);
	return rc;
evt_error:
	msm_adsp_put(audio->audrec);
	audpreproc_aenc_free(audio->enc_id);
	mutex_unlock(&audio->lock);
	return rc;
}
Example #10
0
void *ddl_pmem_alloc(struct ddl_buf_addr *addr, size_t sz, u32 alignment)
{
	u32 alloc_size, offset = 0 ;
	u32 index = 0;
	struct ddl_context *ddl_context;
	struct msm_mapped_buffer *mapped_buffer = NULL;
	unsigned long iova = 0;
	unsigned long buffer_size = 0;
	unsigned long *kernel_vaddr = NULL;
	unsigned long ionflag = 0;
	unsigned long flags = 0;
	int ret = 0;
	ion_phys_addr_t phyaddr = 0;
	size_t len = 0;
	int rc = 0;
	DBG_PMEM("\n%s() IN: Requested alloc size(%u)", __func__, (u32)sz);
	if (!addr) {
		DDL_MSG_ERROR("\n%s() Invalid Parameters", __func__);
		goto bail_out;
	}
	ddl_context = ddl_get_context();
	res_trk_set_mem_type(addr->mem_type);
	alloc_size = (sz + alignment);
	if (res_trk_get_enable_ion()) {
		if (!ddl_context->video_ion_client)
			ddl_context->video_ion_client =
				res_trk_get_ion_client();
		if (!ddl_context->video_ion_client) {
			DDL_MSG_ERROR("%s() :DDL ION Client Invalid handle\n",
						 __func__);
			goto bail_out;
		}
		alloc_size = (alloc_size+4095) & ~4095;
		addr->alloc_handle = ion_alloc(
		ddl_context->video_ion_client, alloc_size, SZ_4K,
			res_trk_get_mem_type());
		if (IS_ERR_OR_NULL(addr->alloc_handle)) {
			DDL_MSG_ERROR("%s() :DDL ION alloc failed\n",
						 __func__);
			goto bail_out;
		}
		if (res_trk_check_for_sec_session() ||
			addr->mem_type == DDL_FW_MEM)
			ionflag = UNCACHED;
		else
			ionflag = CACHED;
		kernel_vaddr = (unsigned long *) ion_map_kernel(
					ddl_context->video_ion_client,
					addr->alloc_handle, ionflag);
		if (IS_ERR_OR_NULL(kernel_vaddr)) {
				DDL_MSG_ERROR("%s() :DDL ION map failed\n",
							 __func__);
				goto free_ion_alloc;
		}
		addr->virtual_base_addr = (u8 *) kernel_vaddr;
		if (res_trk_check_for_sec_session()) {
			rc = ion_phys(ddl_context->video_ion_client,
				addr->alloc_handle, &phyaddr,
					&len);
			if (rc || !phyaddr) {
				DDL_MSG_ERROR(
				"%s():DDL ION client physical failed\n",
				__func__);
				goto unmap_ion_alloc;
			}
			addr->alloced_phys_addr = phyaddr;
		} else {
			ret = ion_map_iommu(ddl_context->video_ion_client,
					addr->alloc_handle,
					VIDEO_DOMAIN,
					VIDEO_MAIN_POOL,
					SZ_4K,
					0,
					&iova,
					&buffer_size,
					UNCACHED, 0);
			if (ret || !iova) {
				DDL_MSG_ERROR(
				"%s():DDL ION ion map iommu failed, ret = %d iova = 0x%lx\n",
					__func__, ret, iova);
				goto unmap_ion_alloc;
			}
			addr->alloced_phys_addr = (phys_addr_t) iova;
		}
		if (!addr->alloced_phys_addr) {
			DDL_MSG_ERROR("%s():DDL ION client physical failed\n",
						__func__);
			goto unmap_ion_alloc;
		}
		addr->mapped_buffer = NULL;
		addr->physical_base_addr = (u8 *) addr->alloced_phys_addr;
		addr->align_physical_addr = (u8 *) DDL_ALIGN((u32)
			addr->physical_base_addr, alignment);
		offset = (u32)(addr->align_physical_addr -
				addr->physical_base_addr);
		addr->align_virtual_addr = addr->virtual_base_addr + offset;
		addr->buffer_size = alloc_size;
	} else {
		addr->alloced_phys_addr = (phys_addr_t)
		allocate_contiguous_memory_nomap(alloc_size,
			res_trk_get_mem_type(), SZ_4K);
		if (!addr->alloced_phys_addr) {
			DDL_MSG_ERROR("%s() : acm alloc failed (%d)\n",
					 __func__, alloc_size);
			goto bail_out;
		}
		flags = MSM_SUBSYSTEM_MAP_IOVA | MSM_SUBSYSTEM_MAP_KADDR;
		if (alignment == DDL_KILO_BYTE(128))
				index = 1;
		else if (alignment > SZ_4K)
			flags |= MSM_SUBSYSTEM_ALIGN_IOVA_8K;

		addr->mapped_buffer =
		msm_subsystem_map_buffer((unsigned long)addr->alloced_phys_addr,
			alloc_size, flags, &vidc_mmu_subsystem[index],
			sizeof(vidc_mmu_subsystem[index])/sizeof(unsigned int));
		if (IS_ERR(addr->mapped_buffer)) {
			pr_err(" %s() buffer map failed", __func__);
			goto free_acm_alloc;
		}
		mapped_buffer = addr->mapped_buffer;
		if (!mapped_buffer->vaddr || !mapped_buffer->iova[0]) {
			pr_err("%s() map buffers failed\n", __func__);
			goto free_map_buffers;
		}
		addr->physical_base_addr = (u8 *)mapped_buffer->iova[0];
		addr->virtual_base_addr = mapped_buffer->vaddr;
		addr->align_physical_addr = (u8 *) DDL_ALIGN((u32)
			addr->physical_base_addr, alignment);
		offset = (u32)(addr->align_physical_addr -
				addr->physical_base_addr);
		addr->align_virtual_addr = addr->virtual_base_addr + offset;
		addr->buffer_size = sz;
	}
	return addr->virtual_base_addr;
free_map_buffers:
	msm_subsystem_unmap_buffer(addr->mapped_buffer);
	addr->mapped_buffer = NULL;
free_acm_alloc:
		free_contiguous_memory_by_paddr(
			(unsigned long)addr->alloced_phys_addr);
		addr->alloced_phys_addr = (phys_addr_t)NULL;
		return NULL;
unmap_ion_alloc:
	ion_unmap_kernel(ddl_context->video_ion_client,
		addr->alloc_handle);
	addr->virtual_base_addr = NULL;
	addr->alloced_phys_addr = (phys_addr_t)NULL;
free_ion_alloc:
	ion_free(ddl_context->video_ion_client,
		addr->alloc_handle);
	addr->alloc_handle = NULL;
bail_out:
	return NULL;
}
Example #11
0
void ddl_pmem_alloc(struct ddl_buf_addr *buff_addr, size_t sz, u32 align)
{
	u32 guard_bytes, align_mask;
	u32 physical_addr;
	u32 align_offset;
	u32 alloc_size, flags = 0;
	struct ddl_context *ddl_context;
	struct msm_mapped_buffer *mapped_buffer = NULL;

	if (!buff_addr) {
		ERR("\n%s() Invalid Parameters", __func__);
		return;
	}

	DBG_PMEM("\n%s() IN: Requested alloc size(%u)", __func__, (u32)sz);

	if (align == DDL_LINEAR_BUFFER_ALIGN_BYTES) {

		guard_bytes = 31;
		align_mask = 0xFFFFFFE0U;

	} else {

		guard_bytes = DDL_TILE_BUF_ALIGN_GUARD_BYTES;
		align_mask = DDL_TILE_BUF_ALIGN_MASK;
	}
	ddl_context = ddl_get_context();
	alloc_size = sz + guard_bytes;

	physical_addr = (u32)
		allocate_contiguous_memory_nomap(alloc_size,
					ddl_context->memtype, SZ_4K);

	if (!physical_addr) {
		pr_err("%s(): could not allocate kernel pmem buffers\n",
		       __func__);
		goto bailout;
	}
	buff_addr->physical_base_addr = (u32 *) physical_addr;
	flags = MSM_SUBSYSTEM_MAP_KADDR;
	buff_addr->mapped_buffer =
	msm_subsystem_map_buffer((unsigned long)physical_addr,
	alloc_size, flags, NULL, 0);
	if (IS_ERR(buff_addr->mapped_buffer)) {
		pr_err(" %s() buffer map failed", __func__);
		goto free_acm_alloc;
	}
	mapped_buffer = buff_addr->mapped_buffer;
	if (!mapped_buffer->vaddr) {
		pr_err("%s() mapped virtual address is NULL", __func__);
		goto free_map_buffers;
	}
	buff_addr->virtual_base_addr = mapped_buffer->vaddr;
	memset(buff_addr->virtual_base_addr, 0 , sz + guard_bytes);
	buff_addr->buffer_size = sz;

	buff_addr->align_physical_addr =
	    (u32 *) ((physical_addr + guard_bytes) & align_mask);

	align_offset =
	    (u32) (buff_addr->align_physical_addr) - physical_addr;

	buff_addr->align_virtual_addr =
	    (u32 *) ((u32) (buff_addr->virtual_base_addr)
		     + align_offset);

	DBG_PMEM("\n%s() OUT: phy_addr(%p) ker_addr(%p) size(%u)", __func__,
		buff_addr->physical_base_addr, buff_addr->virtual_base_addr,
		buff_addr->buffer_size);

	return;
free_map_buffers:
	msm_subsystem_unmap_buffer(buff_addr->mapped_buffer);
free_acm_alloc:
	free_contiguous_memory_by_paddr(
		(unsigned long) physical_addr);
bailout:
	buff_addr->physical_base_addr = NULL;
	buff_addr->virtual_base_addr = NULL;
	buff_addr->buffer_size = 0;
	buff_addr->mapped_buffer = NULL;
}