void FormerlyValidHandle::SetUp()
{
    IonTest::SetUp();
    ASSERT_EQ(0, ion_alloc(m_ionFd, 4096, 0, 1/* ion_env->m_firstHeap */, 0, &m_handle));
    ASSERT_TRUE(m_handle != 0);
    ASSERT_EQ(0, ion_free(m_ionFd, m_handle));
}
static int res_trk_pmem_alloc
	(struct ddl_buf_addr *addr, size_t sz, u32 alignment)
{
	u32 alloc_size;
	struct ddl_context *ddl_context;
	int rc = 0;
	DBG_PMEM("\n%s() IN: Requested alloc size(%u)", __func__, (u32)sz);
	if (!addr) {
		DDL_MSG_ERROR("\n%s() Invalid Parameters", __func__);
		rc = -EINVAL;
		goto bail_out;
	}
	ddl_context = ddl_get_context();
	res_trk_set_mem_type(addr->mem_type);
	alloc_size = (sz + alignment);
	if (res_trk_get_enable_ion()) {
		if (!res_trk_is_cp_enabled() ||
			 !res_trk_check_for_sec_session()) {
			if (!ddl_context->video_ion_client)
				ddl_context->video_ion_client =
					res_trk_get_ion_client();
			if (!ddl_context->video_ion_client) {
				DDL_MSG_ERROR(
				"%s() :DDL ION Client Invalid handle\n",
						__func__);
				rc = -ENOMEM;
				goto bail_out;
			}
			alloc_size = (alloc_size+4095) & ~4095;
			addr->alloc_handle = ion_alloc(
					ddl_context->video_ion_client,
					 alloc_size, SZ_4K,
					res_trk_get_mem_type());
			if (IS_ERR_OR_NULL(addr->alloc_handle)) {
				DDL_MSG_ERROR("%s() :DDL ION alloc failed\n",
						__func__);
				rc = -ENOMEM;
				goto bail_out;
			}
		} else {
			addr->alloc_handle = NULL;
			addr->alloced_phys_addr = PIL_FW_BASE_ADDR;
			addr->buffer_size = sz;
		}
	} else {
		addr->alloced_phys_addr = (phys_addr_t)
			allocate_contiguous_memory_nomap(alloc_size,
					res_trk_get_mem_type(), SZ_4K);
		if (!addr->alloced_phys_addr) {
			DDL_MSG_ERROR("%s() : acm alloc failed (%d)\n",
					__func__, alloc_size);
			rc = -ENOMEM;
			goto bail_out;
		}
		addr->buffer_size = sz;
		return rc;
	}
bail_out:
	return rc;
}
Ejemplo n.º 3
0
static int32_t msm_mem_allocate(struct videobuf2_contig_pmem *mem)
{
    int32_t phyaddr;
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
    int rc, len;
    mem->client = msm_ion_client_create(-1, "camera");
    if (IS_ERR((void *)mem->client)) {
        pr_err("%s Could not create client\n", __func__);
        goto client_failed;
    }
    mem->ion_handle = ion_alloc(mem->client, mem->size, SZ_4K,
                                (0x1 << ION_CP_MM_HEAP_ID | 0x1 << ION_IOMMU_HEAP_ID));
    if (IS_ERR((void *)mem->ion_handle)) {
        pr_err("%s Could not allocate\n", __func__);
        goto alloc_failed;
    }
    rc = ion_phys(mem->client, mem->ion_handle, (ion_phys_addr_t *)&phyaddr,
                  (size_t *)&len);
    if (rc < 0) {
        pr_err("%s Could not get physical address\n", __func__);
        goto phys_failed;
    }
#else
    phyaddr = allocate_contiguous_ebi_nomap(mem->size, SZ_4K);
#endif
    return phyaddr;
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
phys_failed:
    ion_free(mem->client, mem->ion_handle);
alloc_failed:
    ion_client_destroy(mem->client);
client_failed:
    return 0;
#endif
}
Ejemplo n.º 4
0
static int alloc_ion_mem(struct smem_client *client, size_t size,
		u32 align, u32 flags, struct msm_smem *mem)
{
	struct ion_handle *hndl;
	size_t len;
	int rc = 0;
	flags = flags | ION_HEAP(ION_CP_MM_HEAP_ID);
	hndl = ion_alloc(client->clnt, size, align, flags);
	if (IS_ERR_OR_NULL(hndl)) {
		pr_err("Failed to allocate shared memory = %p, %d, %d, 0x%x\n",
				client, size, align, flags);
		rc = -ENOMEM;
		goto fail_shared_mem_alloc;
	}
	mem->mem_type = client->mem_type;
	mem->smem_priv = hndl;
	if (ion_phys(client->clnt, hndl, &mem->paddr, &len)) {
		pr_err("Failed to get physical address\n");
		rc = -EIO;
		goto fail_map;
	}
	mem->device_addr = mem->paddr;
	mem->size = size;
	mem->kvaddr = ion_map_kernel(client->clnt, hndl, 0);
	if (!mem->kvaddr) {
		pr_err("Failed to map shared mem in kernel\n");
		rc = -EIO;
		goto fail_map;
	}
	return rc;
fail_map:
	ion_free(client->clnt, hndl);
fail_shared_mem_alloc:
	return rc;
}
MemoryHeapIon::MemoryHeapIon(size_t size, uint32_t flags, char const *name):MemoryHeapBase()
{
    void* base = NULL;
    int fd = -1;
    uint32_t isReadOnly, heapMask, flagMask;

    mIonClient = ion_client_create();

    if (mIonClient < 0) {
        ALOGE("MemoryHeapIon : ION client creation failed : %s", strerror(errno));
        mIonClient = -1;
    } else {
        isReadOnly = flags & (IMemoryHeap::READ_ONLY);
        heapMask = ion_HeapMask_valid_check(flags);
        flagMask = ion_FlagMask_valid_check(flags);

        if (heapMask) {
            ALOGD("MemoryHeapIon : Allocated with size:%d, heap:0x%X , flag:0x%X", size, heapMask, flagMask);
            fd = ion_alloc(mIonClient, size, 0, heapMask, flagMask);
            if (fd < 0) {
                ALOGE("MemoryHeapIon : ION Reserve memory allocation failed(size[%u]) : %s", size, strerror(errno));
                if (errno == ENOMEM) { // Out of reserve memory. So re-try allocating in system heap
                    ALOGD("MemoryHeapIon : Re-try Allocating in default heap - SYSTEM heap");
                    fd = ion_alloc(mIonClient, size, 0, ION_HEAP_SYSTEM_MASK, ION_FLAG_CACHED | ION_FLAG_CACHED_NEEDS_SYNC | ION_FLAG_PRESERVE_KMAP);
                }
            }
        } else {
            fd = ion_alloc(mIonClient, size, 0, ION_HEAP_SYSTEM_MASK, ION_FLAG_CACHED | ION_FLAG_CACHED_NEEDS_SYNC | ION_FLAG_PRESERVE_KMAP);
            ALOGD("MemoryHeapIon : Allocated with default heap - SYSTEM heap");
        }

        flags = isReadOnly | heapMask | flagMask;

        if (fd < 0) {
            ALOGE("MemoryHeapIon : ION memory allocation failed(size[%u]) : %s", size, strerror(errno));
        } else {
            flags |= USE_ION_FD;
            base = ion_map(fd, size, 0);
            if (base != MAP_FAILED) {
                init(fd, base, size, flags, NULL);
            } else {
                ALOGE("MemoryHeapIon : ION mmap failed(size[%u], fd[%d]) : %s", size, fd, strerror(errno));
                ion_free(fd);
            }
        }
    }
}
TEST_F(Allocate, Large)
{
    for (unsigned int heapMask : m_allHeaps) {
            SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
        ion_user_handle_t handle = 0;
        ASSERT_EQ(-ENOMEM, ion_alloc(m_ionFd, 3UL*1024*1024*1024, 0, heapMask, 0, &handle));
    }
}
static void *res_trk_pmem_alloc
	(struct ddl_buf_addr *addr, size_t sz, u32 alignment)
{
	u32 alloc_size;
	struct ddl_context *ddl_context;
	DBG_PMEM("\n%s() IN: Requested alloc size(%u)", __func__, (u32)sz);
	if (!addr) {
		DDL_MSG_ERROR("\n%s() Invalid Parameters", __func__);
		goto bail_out;
	}
	ddl_context = ddl_get_context();
	res_trk_set_mem_type(addr->mem_type);
	alloc_size = (sz + alignment);
	if (res_trk_get_enable_ion()) {
		if (!ddl_context->video_ion_client)
			ddl_context->video_ion_client =
				res_trk_get_ion_client();
		if (!ddl_context->video_ion_client) {
			DDL_MSG_ERROR("%s() :DDL ION Client Invalid handle\n",
						 __func__);
			goto bail_out;
		}
		alloc_size = (alloc_size+4095) & ~4095;
		addr->alloc_handle = ion_alloc(
		ddl_context->video_ion_client, alloc_size, SZ_4K,
			res_trk_get_mem_type());
		if (IS_ERR_OR_NULL(addr->alloc_handle)) {
			DDL_MSG_ERROR("%s() :DDL ION alloc failed\n",
						 __func__);
			goto free_acm_ion_alloc;
		}
		return (void *) addr->alloc_handle;
	} else {
		addr->alloced_phys_addr = (phys_addr_t)
		allocate_contiguous_memory_nomap(alloc_size,
			res_trk_get_mem_type(), SZ_4K);
		if (!addr->alloced_phys_addr) {
			DDL_MSG_ERROR("%s() : acm alloc failed (%d)\n",
					 __func__, alloc_size);
			goto bail_out;
		}
		addr->buffer_size = sz;
		return (void *)addr->alloced_phys_addr;
	}


free_acm_ion_alloc:
	if (ddl_context->video_ion_client) {
		if (addr->alloc_handle) {
			ion_free(ddl_context->video_ion_client,
				addr->alloc_handle);
			addr->alloc_handle = NULL;
		}
	}
bail_out:
	return NULL;
}
uint32_t hal_tui_alloc(tuiAllocBuffer_t allocbuffer[MAX_DCI_BUFFER_NUMBER],
		size_t allocsize, uint32_t count)
{
	int ret = TUI_DCI_ERR_INTERNAL_ERROR;
	dma_addr_t buf_addr;
	ion_phys_addr_t phys_addr;
	unsigned long offset = 0;
	unsigned int size;

	size=allocsize*(count+1);

	client = ion_client_create(ion_exynos, "TUI module");
	handle = ion_alloc(client, size, 0, EXYNOS_ION_HEAP_EXYNOS_CONTIG_MASK,
							ION_EXYNOS_VIDEO_MASK);

	dbuf = ion_share_dma_buf(client, handle);
	buf_addr = decon_map_sec_dma_buf(dbuf, 0);

	ion_phys(client, handle, (unsigned long *)&phys_addr, &dbuf->size);

	/* TUI frame buffer must be aligned 16M */
	if(phys_addr % 0x1000000){
		offset = 0x1000000 - (phys_addr % 0x1000000);
	}

	phys_addr = phys_addr+offset;
	va = buf_addr + offset;
	printk("buf_addr : %x\n",va);
	printk("phys_addr : %lx\n",phys_addr);
#if 0 // this is testing. MUST BE REMOVE
	void *kernel_addr;
	//kernel_addr = (void*)ion_map_kernel(client, handle);
	kernel_addr = phys_to_virt(phys_addr+0x2000000);
	*((u32*)kernel_addr) = va;
	printk("DATA ON phys_addr : addr[%lx] val[%x]\n"
			,phys_addr+0x2000000
			,*((u32*)kernel_addr));
#endif

        g_tuiMemPool.pa = phys_addr;
        g_tuiMemPool.size = allocsize*count;

	if ((size_t)(allocsize*count) <= g_tuiMemPool.size) {
		allocbuffer[0].pa = (uint64_t) g_tuiMemPool.pa;
		allocbuffer[1].pa = (uint64_t) (g_tuiMemPool.pa + g_tuiMemPool.size/2);
	}else{
                /* requested buffer is bigger than the memory pool, return an
                   error */
                pr_debug("%s(%d): %s\n", __func__, __LINE__, "Memory pool too small");
                ret = TUI_DCI_ERR_INTERNAL_ERROR;
		return ret;
	}
        ret = TUI_DCI_OK;

        return ret;
}
Ejemplo n.º 9
0
static int alloc_ion_mem(struct smem_client *client, size_t size,
		u32 align, u32 flags, int domain, int partition,
		struct msm_smem *mem)
{
	struct ion_handle *hndl;
	unsigned long iova = 0;
	unsigned long buffer_size = 0;
	unsigned long ionflags = 0;
	unsigned long heap_mask = 0;
	int rc = 0;
	if (flags == SMEM_CACHED)
		ionflags = ION_SET_CACHED(ionflags);
	else
		ionflags = ION_SET_UNCACHED(ionflags);

	heap_mask = ION_HEAP(ION_CP_MM_HEAP_ID);
	if (align < 4096)
		align = 4096;
	size = (size + 4095) & (~4095);
	pr_debug("\n in %s domain: %d, Partition: %d\n",
		__func__, domain, partition);
	hndl = ion_alloc(client->clnt, size, align, heap_mask, ionflags);
	if (IS_ERR_OR_NULL(hndl)) {
		pr_err("Failed to allocate shared memory = %p, %d, %d, 0x%x\n",
				client, size, align, ionflags);
		rc = -ENOMEM;
		goto fail_shared_mem_alloc;
	}
	mem->mem_type = client->mem_type;
	mem->smem_priv = hndl;
	mem->domain = domain;
	mem->partition_num = partition;
	mem->kvaddr = ion_map_kernel(client->clnt, hndl);
	if (!mem->kvaddr) {
		pr_err("Failed to map shared mem in kernel\n");
		rc = -EIO;
		goto fail_map;
	}
	rc = get_device_address(client->clnt, hndl, mem->domain,
		mem->partition_num, align, &iova, &buffer_size);
	if (rc) {
		pr_err("Failed to get device address: %d\n", rc);
		goto fail_device_address;
	}
	mem->device_addr = iova;
	pr_debug("device_address = 0x%lx, kvaddr = 0x%p\n",
		mem->device_addr, mem->kvaddr);
	mem->size = size;
	return rc;
fail_device_address:
	ion_unmap_kernel(client->clnt, hndl);
fail_map:
	ion_free(client->clnt, hndl);
fail_shared_mem_alloc:
	return rc;
}
Ejemplo n.º 10
0
long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
	int ret = 0;
	unsigned int dir;
	union ion_ioctl_arg data;

	dir = ion_ioctl_dir(cmd);

	if (_IOC_SIZE(cmd) > sizeof(data))
		return -EINVAL;

	/*
	 * The copy_from_user is unconditional here for both read and write
	 * to do the validate. If there is no write for the ioctl, the
	 * buffer is cleared
	 */
	if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
		return -EFAULT;

	ret = validate_ioctl_arg(cmd, &data);
	if (ret) {
		pr_warn_once("%s: ioctl validate failed\n", __func__);
		return ret;
	}

	if (!(dir & _IOC_WRITE))
		memset(&data, 0, sizeof(data));

	switch (cmd) {
	case ION_IOC_ALLOC:
	{
		int fd;

		fd = ion_alloc(data.allocation.len,
			       data.allocation.heap_id_mask,
			       data.allocation.flags);
		if (fd < 0)
			return fd;

		data.allocation.fd = fd;

		break;
	}
	case ION_IOC_HEAP_QUERY:
		ret = ion_query_heaps(&data.query);
		break;
	default:
		return -ENOTTY;
	}

	if (dir & _IOC_READ) {
		if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd)))
			return -EFAULT;
	}
	return ret;
}
static int alloc_ion_buf(struct msm_ion_test *ion_test,
					struct ion_test_data *test_data)
{
	ion_test->ion_handle = ion_alloc(ion_test->ion_client, test_data->size,
					test_data->align, test_data->heap_mask,
					test_data->flags);
	if (IS_ERR_OR_NULL(ion_test->ion_handle))
		return -EIO;
	return 0;
}
Ejemplo n.º 12
0
int ion_phys(int fd, ion_user_handle_t handle, unsigned long *phys)
{
          int ret;
        struct owl_ion_phys_data phys_data = {
                .handle = handle,
        };
        
        struct ion_custom_data data = {
                .cmd = OWL_ION_GET_PHY,
                .arg = (unsigned long)&phys_data,
        }; 
        
        ret = ion_ioctl(fd, ION_IOC_CUSTOM, &data);

        if (ret < 0)
            return ret;
        *phys = phys_data.phys_addr;
        return ret;

}
#endif
int ion_count = 0;
/*利用ion分配内存,成功返回0*/
int sys_mem_allocate(unsigned int size, void **vir_addr, ion_user_handle_t * p_ion_handle)
{
    int ret;

    if (!ion_count)
    {
        ion_fd = ion_open();
      if(ion_fd < 0){
          printf("ion_open failed\n");
          return -1;
      }
      printf("ion_open ok ion_fd = %d \n",ion_fd);
    }
    ret = ion_alloc(ion_fd, size, 0, 1,0, &ion_handle_t);
    if(ret)
    {
        printf("%s failed: %s\n", __func__, strerror(ret));
        return -1;
    }
    *p_ion_handle  = ion_handle_t;
    ret = ion_map(ion_fd, ion_handle_t, size, PROT_READ | PROT_WRITE, MAP_SHARED, 0, (unsigned char **)vir_addr, &ion_map_fd);
    if (ret){
    printf("ion_map error \n");
    return -1 ;
    }
    printf("ion_map ok \n");
    ion_count++;
    return 0;

}
Ejemplo n.º 13
0
int ion_alloc_fd(int fd, size_t len, size_t align, unsigned int heap_mask,
                 unsigned int flags, int *handle_fd) {
    ion_user_handle_t handle;
    int ret;

    ret = ion_alloc(fd, len, align, heap_mask, flags, &handle);
    if (ret < 0)
        return ret;
    ret = ion_share(fd, handle, handle_fd);
    ion_free(fd, handle);
    return ret;
}
TEST_F(Allocate, AllocateCachedNeedsSync)
{
    static const size_t allocationSizes[] = {4*1024, 64*1024, 1024*1024, 2*1024*1024};
    for (unsigned int heapMask : m_allHeaps) {
        for (size_t size : allocationSizes) {
            SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
            SCOPED_TRACE(::testing::Message() << "size " << size);
            ion_user_handle_t handle = 0;
            ASSERT_EQ(0, ion_alloc(m_ionFd, size, 0, heapMask, ION_FLAG_CACHED_NEEDS_SYNC, &handle));
            ASSERT_TRUE(handle != 0);
            ASSERT_EQ(0, ion_free(m_ionFd, handle));
        }
    }
}
int _ion_alloc_test(int fd, struct ion_handle **handle)
{
	int ret;

	if (tiler_test)
		ret = ion_alloc_tiler(fd, width, height, fmt, alloc_flags,
					  handle, &stride);
	else
		ret = ion_alloc(fd, len, align, alloc_flags, handle);

	if (ret)
		printf("%s() failed: %s\n", __func__, strerror(ret));
	return ret;
}
Ejemplo n.º 16
0
int _ion_alloc_test(int *fd, ion_user_handle_t *handle)
{
	int ret;

	*fd = ion_open();
	if (*fd < 0)
		return *fd;

	ret = ion_alloc(*fd, len, align, heap_id, alloc_flags, handle);

	if (ret)
		printf("%s failed: %s\n", __func__, strerror(ret));
	return ret;
}
Ejemplo n.º 17
0
int MemoryManager::allocateBufferList(int size, int numBufs)
{
    int mmap_fd;
    LOG_FUNCTION_NAME;


    //2D Allocations are not supported currently
    if(size != 0) {
        struct ion_handle *handle;

        size_t stride;

        ///1D buffers
        for (int i = 0; i < numBufs; i++) {
            unsigned char *data;
            int ret = ion_alloc(mIonFd, size, 0, 1 << ION_HEAP_TYPE_CARVEOUT,
                    &handle);

            if((ret < 0) || ((int)handle == -ENOMEM)) {
                printe("FAILED to allocate ion buffer of size=%d. ret=%d(0x%x)", size, ret, ret);
                goto error;
            }

            if ((ret = ion_map(mIonFd, handle, size, PROT_READ | PROT_WRITE, MAP_SHARED, 0,
                          &data, &mmap_fd)) < 0) {
                printe("Userspace mapping of ION buffers returned error %d", ret);
                ion_free(mIonFd, handle);
                goto error;
            }
        }
    }

    LOG_FUNCTION_NAME_EXIT;

    return mmap_fd;

error:

    printe("Freeing buffers already allocated after error occurred");

#if 0
    if ( NULL != mErrorNotifier.get() )
        mErrorNotifier->errorNotify(-ENOMEM);
    LOG_FUNCTION_NAME_EXIT;
#endif
    return NULL;
}
TEST_F(Allocate, RepeatedAllocate)
{
    static const size_t allocationSizes[] = {4*1024, 64*1024, 1024*1024, 2*1024*1024};
    for (unsigned int heapMask : m_allHeaps) {
        for (size_t size : allocationSizes) {
            SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
            SCOPED_TRACE(::testing::Message() << "size " << size);
            ion_user_handle_t handle = 0;

            for (unsigned int i = 0; i < 1024; i++) {
                SCOPED_TRACE(::testing::Message() << "iteration " << i);
                ASSERT_EQ(0, ion_alloc(m_ionFd, size, 0, heapMask, 0, &handle));
                ASSERT_TRUE(handle != 0);
                ASSERT_EQ(0, ion_free(m_ionFd, handle));
            }
        }
    }
}
/**
 * Allocate memory for channel output of specific TSIF.
 *
 * @tsif: The TSIF id to which memory should be allocated.
 *
 * Return  error status
 */
static int mpq_dmx_channel_mem_alloc(int tsif)
{
	int result;
	size_t len;

	MPQ_DVB_DBG_PRINT("%s(%d)\n", __func__, tsif);

	mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_handle =
		ion_alloc(mpq_dmx_tspp_info.ion_client,
		 (mpq_dmx_tspp_info.tsif[tsif].buffer_count *
		  TSPP_DESCRIPTOR_SIZE),
		 SZ_4K,
		 ION_HEAP(tspp_out_ion_heap),
		 0); /* non-cached */

	if (IS_ERR_OR_NULL(mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_handle)) {
		MPQ_DVB_ERR_PRINT("%s: ion_alloc() failed\n", __func__);
		mpq_dmx_channel_mem_free(tsif);
		return -ENOMEM;
	}

	/* save virtual base address of heap */
	mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_virt_base =
		ion_map_kernel(mpq_dmx_tspp_info.ion_client,
			mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_handle);
	if (IS_ERR_OR_NULL(mpq_dmx_tspp_info.tsif[tsif].
				ch_mem_heap_virt_base)) {
		MPQ_DVB_ERR_PRINT("%s: ion_map_kernel() failed\n", __func__);
		mpq_dmx_channel_mem_free(tsif);
		return -ENOMEM;
	}

	/* save physical base address of heap */
	result = ion_phys(mpq_dmx_tspp_info.ion_client,
		mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_handle,
		&(mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_phys_base), &len);
	if (result < 0) {
		MPQ_DVB_ERR_PRINT("%s: ion_phys() failed\n", __func__);
		mpq_dmx_channel_mem_free(tsif);
		return -ENOMEM;
	}

	return 0;
}
Ejemplo n.º 20
0
void *vb2_ion_private_alloc(void *alloc_ctx, size_t size)
{
    struct vb2_ion_context *ctx = alloc_ctx;
    struct vb2_ion_buf *buf;
    int flags = ion_heapflag(ctx->flags);
    int ret = 0;

    buf = kzalloc(sizeof(*buf), GFP_KERNEL);
    if (!buf) {
        pr_err("%s error: fail to kzalloc size(%d)\n", __func__, sizeof(*buf));
        return ERR_PTR(-ENOMEM);
    }

    size = PAGE_ALIGN(size);

    buf->handle = ion_alloc(ctx->client, size, ctx->alignment, flags, flags);
    if (IS_ERR(buf->handle)) {
        ret = -ENOMEM;
        goto err_alloc;
    }

    buf->cookie.sgt = ion_sg_table(ctx->client, buf->handle);

    buf->ctx  = ctx;
    buf->size = size;

    buf->kva  = ion_map_kernel(ctx->client, buf->handle);
    if (IS_ERR(buf->kva)) {
        ret = PTR_ERR(buf->kva);
        buf->kva = NULL;
        goto err_map_kernel;
    }

    return &buf->cookie;

err_map_kernel:
    ion_free(ctx->client, buf->handle);
err_alloc:
    kfree(buf);

    pr_err("%s: Error occured while allocating\n", __func__);
    return ERR_PTR(ret);
}
Ejemplo n.º 21
0
int omap_ion_mem_alloc(struct ion_client *client,
		struct omap_ion_tiler_alloc_data *sAllocData)
{
	int ret = 0;
	struct ion_allocation_data data;

	data.len = (sAllocData->w * sAllocData->h * sAllocData->fmt) /
					BITS_PER_PIXEL;
	data.align = 0;
	data.flags = 1 << ION_HEAP_TYPE_CARVEOUT;

	sAllocData->handle = ion_alloc(client,
			PAGE_ALIGN(data.len), data.align, data.flags);
	if (!sAllocData->handle) {
		pr_err("%s: Failed to allocate via ion_alloc\n", __func__);
		ret = -ENOMEM;
	}

	return ret;
}
Ejemplo n.º 22
0
static unsigned long msm_mem_allocate(struct videobuf2_contig_pmem *mem)
{
	unsigned long phyaddr;
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
	int rc, len;
	mem->client = msm_ion_client_create(-1, "camera");
	if (IS_ERR((void *)mem->client)) {
		pr_err("%s Could not create client\n", __func__);
		goto client_failed;
	}
	mem->ion_handle = ion_alloc(mem->client, mem->size, SZ_4K,
		(0x1 << ION_CP_MM_HEAP_ID | 0x1 << ION_IOMMU_HEAP_ID));
	if (IS_ERR((void *)mem->ion_handle)) {
		pr_err("%s Could not allocate\n", __func__);
		goto alloc_failed;
	}
	rc = ion_map_iommu(mem->client, mem->ion_handle,
			CAMERA_DOMAIN, GEN_POOL, SZ_4K, 0,
			(unsigned long *)&phyaddr,
			(unsigned long *)&len, UNCACHED, 0);
	if (rc < 0) {
		pr_err("%s Could not get physical address\n", __func__);
		goto phys_failed;
	}
#else
	phyaddr = allocate_contiguous_ebi_nomap(mem->size, SZ_4K);
#endif
	return phyaddr;
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
phys_failed:
	ion_free(mem->client, mem->ion_handle);
alloc_failed:
	ion_client_destroy(mem->client);
client_failed:
	return 0;
#endif
}
void *ddl_pmem_alloc(struct ddl_buf_addr *addr, size_t sz, u32 alignment)
{
	u32 alloc_size, offset = 0, flags = 0;
	u32 index = 0;
	struct ddl_context *ddl_context;
	struct msm_mapped_buffer *mapped_buffer = NULL;
	int rc = -EINVAL;
	ion_phys_addr_t phyaddr = 0;
	size_t len = 0;
	DBG_PMEM("\n%s() IN: Requested alloc size(%u)", __func__, (u32)sz);
	if (!addr) {
		DDL_MSG_ERROR("\n%s() Invalid Parameters", __func__);
		goto bail_out;
	}
	ddl_context = ddl_get_context();
	alloc_size = (sz + alignment);
	if (res_trk_get_enable_ion()) {
		if (!ddl_context->video_ion_client)
			ddl_context->video_ion_client =
				res_trk_get_ion_client();
		if (!ddl_context->video_ion_client) {
			DDL_MSG_ERROR("%s() :DDL ION Client Invalid handle\n",
						 __func__);
			goto bail_out;
		}
		addr->alloc_handle = ion_alloc(
		ddl_context->video_ion_client, alloc_size, SZ_4K,
			(1<<res_trk_get_mem_type()));
		if (IS_ERR_OR_NULL(addr->alloc_handle)) {
			DDL_MSG_ERROR("%s() :DDL ION alloc failed\n",
						 __func__);
			goto bail_out;
		}
		rc = ion_phys(ddl_context->video_ion_client,
				addr->alloc_handle, &phyaddr,
				 &len);
		if (rc || !phyaddr) {
			DDL_MSG_ERROR("%s():DDL ION client physical failed\n",
						 __func__);
			goto free_acm_ion_alloc;
		}
		addr->alloced_phys_addr = phyaddr;
	} else {
		addr->alloced_phys_addr = (phys_addr_t)
		allocate_contiguous_memory_nomap(alloc_size,
			res_trk_get_mem_type(), SZ_4K);
		if (!addr->alloced_phys_addr) {
			DDL_MSG_ERROR("%s() : acm alloc failed (%d)\n",
					 __func__, alloc_size);
			goto bail_out;
		}
	}
	flags = MSM_SUBSYSTEM_MAP_IOVA | MSM_SUBSYSTEM_MAP_KADDR;
	if (alignment == DDL_KILO_BYTE(128))
			index = 1;
	else if (alignment > SZ_4K)
		flags |= MSM_SUBSYSTEM_ALIGN_IOVA_8K;

	addr->mapped_buffer =
	msm_subsystem_map_buffer((unsigned long)addr->alloced_phys_addr,
	alloc_size, flags, &vidc_mmu_subsystem[index],
	sizeof(vidc_mmu_subsystem[index])/sizeof(unsigned int));
	if (IS_ERR(addr->mapped_buffer)) {
		pr_err(" %s() buffer map failed", __func__);
		goto free_acm_ion_alloc;
	}
	mapped_buffer = addr->mapped_buffer;
	if (!mapped_buffer->vaddr || !mapped_buffer->iova[0]) {
		pr_err("%s() map buffers failed\n", __func__);
		goto free_map_buffers;
	}
	addr->physical_base_addr = (u8 *)mapped_buffer->iova[0];
	addr->virtual_base_addr = mapped_buffer->vaddr;
	addr->align_physical_addr = (u8 *) DDL_ALIGN((u32)
		addr->physical_base_addr, alignment);
	offset = (u32)(addr->align_physical_addr -
			addr->physical_base_addr);
	addr->align_virtual_addr = addr->virtual_base_addr + offset;
	addr->buffer_size = sz;
	return addr->virtual_base_addr;

free_map_buffers:
	msm_subsystem_unmap_buffer(addr->mapped_buffer);
	addr->mapped_buffer = NULL;
free_acm_ion_alloc:
	if (ddl_context->video_ion_client) {
		if (addr->alloc_handle) {
			ion_free(ddl_context->video_ion_client,
				addr->alloc_handle);
			addr->alloc_handle = NULL;
		}
	} else {
		free_contiguous_memory_by_paddr(
			(unsigned long)addr->alloced_phys_addr);
		addr->alloced_phys_addr = (phys_addr_t)NULL;
	}
bail_out:
	return NULL;
}
Ejemplo n.º 24
0
static int audpcm_in_open(struct inode *inode, struct file *file)
{
	struct audio_in *audio = &the_audio_in;
	int rc;
	int len = 0;
	unsigned long ionflag = 0;
	ion_phys_addr_t addr = 0;
	struct ion_handle *handle = NULL;
	struct ion_client *client = NULL;

	int encid;
	mutex_lock(&audio->lock);
	if (audio->opened) {
		rc = -EBUSY;
		goto done;
	}

	/* Settings will be re-config at AUDIO_SET_CONFIG,
	 * but at least we need to have initial config
	 */
	audio->mode = MSM_AUD_ENC_MODE_TUNNEL;
	audio->samp_rate = RPC_AUD_DEF_SAMPLE_RATE_11025;
	audio->samp_rate_index = AUDREC_CMD_SAMP_RATE_INDX_11025;
	audio->channel_mode = AUDREC_CMD_STEREO_MODE_MONO;
	audio->buffer_size = MONO_DATA_SIZE;
	audio->enc_type = AUDREC_CMD_TYPE_0_INDEX_WAV | audio->mode;

	rc = audmgr_open(&audio->audmgr);
	if (rc)
		goto done;
	encid = audpreproc_aenc_alloc(audio->enc_type, &audio->module_name,
			&audio->queue_ids);
	if (encid < 0) {
		MM_ERR("No free encoder available\n");
		rc = -ENODEV;
		goto done;
	}
	audio->enc_id = encid;

	rc = msm_adsp_get(audio->module_name, &audio->audrec,
			   &audrec_adsp_ops, audio);
	if (rc) {
		audpreproc_aenc_free(audio->enc_id);
		goto done;
	}

	rc = msm_adsp_get("AUDPREPROCTASK", &audio->audpre,
				&audpre_adsp_ops, audio);
	if (rc) {
		msm_adsp_put(audio->audrec);
		audpreproc_aenc_free(audio->enc_id);
		goto done;
	}

	audio->dsp_cnt = 0;
	audio->stopped = 0;

	audpcm_in_flush(audio);

	client = msm_ion_client_create(UINT_MAX, "Audio_PCM_in_client");
	if (IS_ERR_OR_NULL(client)) {
		MM_ERR("Unable to create ION client\n");
		rc = -ENOMEM;
		goto client_create_error;
	}
	audio->client = client;

	MM_DBG("allocating mem sz = %d\n", DMASZ);
	handle = ion_alloc(client, DMASZ, SZ_4K,
		ION_HEAP(ION_AUDIO_HEAP_ID), 0);
	if (IS_ERR_OR_NULL(handle)) {
		MM_ERR("Unable to create allocate O/P buffers\n");
		rc = -ENOMEM;
		goto output_buff_alloc_error;
	}

	audio->output_buff_handle = handle;

	rc = ion_phys(client , handle, &addr, &len);
	if (rc) {
		MM_ERR("O/P buffers:Invalid phy: %x sz: %x\n",
			(unsigned int) addr, (unsigned int) len);
		rc = -ENOMEM;
		goto output_buff_get_phys_error;
	} else {
		MM_INFO("O/P buffers:valid phy: %x sz: %x\n",
			(unsigned int) addr, (unsigned int) len);
	}
	audio->phys = (int32_t)addr;

	rc = ion_handle_get_flags(client, handle, &ionflag);
	if (rc) {
		MM_ERR("could not get flags for the handle\n");
		rc = -ENOMEM;
		goto output_buff_get_flags_error;
	}

	audio->data = ion_map_kernel(client, handle);
	if (IS_ERR(audio->data)) {
		MM_ERR("could not map read buffers,freeing instance 0x%08x\n",
				(int)audio);
		rc = -ENOMEM;
		goto output_buff_map_error;
	}
	MM_DBG("read buf: phy addr 0x%08x kernel addr 0x%08x\n",
		audio->phys, (int)audio->data);

	file->private_data = audio;
	audio->opened = 1;
	rc = 0;
done:
	mutex_unlock(&audio->lock);
	return rc;
output_buff_map_error:
output_buff_get_phys_error:
output_buff_get_flags_error:
	ion_free(client, audio->output_buff_handle);
output_buff_alloc_error:
	ion_client_destroy(client);
client_create_error:
	msm_adsp_put(audio->audrec);
	msm_adsp_put(audio->audpre);
	audpreproc_aenc_free(audio->enc_id);
	mutex_unlock(&audio->lock);
	return rc;
}
Ejemplo n.º 25
0
void *ddl_pmem_alloc(struct ddl_buf_addr *addr, size_t sz, u32 alignment)
{
	u32 alloc_size, offset = 0 ;
	u32 index = 0;
	struct ddl_context *ddl_context;
	struct msm_mapped_buffer *mapped_buffer = NULL;
	unsigned long iova = 0;
	unsigned long buffer_size = 0;
	unsigned long *kernel_vaddr = NULL;
	unsigned long ionflag = 0;
	unsigned long flags = 0;
	int ret = 0;
	ion_phys_addr_t phyaddr = 0;
	size_t len = 0;
	int rc = 0;
	DBG_PMEM("\n%s() IN: Requested alloc size(%u)", __func__, (u32)sz);
	if (!addr) {
		DDL_MSG_ERROR("\n%s() Invalid Parameters", __func__);
		goto bail_out;
	}
	ddl_context = ddl_get_context();
	res_trk_set_mem_type(addr->mem_type);
	alloc_size = (sz + alignment);
	if (res_trk_get_enable_ion()) {
		if (!ddl_context->video_ion_client)
			ddl_context->video_ion_client =
				res_trk_get_ion_client();
		if (!ddl_context->video_ion_client) {
			DDL_MSG_ERROR("%s() :DDL ION Client Invalid handle\n",
						 __func__);
			goto bail_out;
		}
		alloc_size = (alloc_size+4095) & ~4095;
		addr->alloc_handle = ion_alloc(
		ddl_context->video_ion_client, alloc_size, SZ_4K,
			res_trk_get_mem_type());
		if (IS_ERR_OR_NULL(addr->alloc_handle)) {
			DDL_MSG_ERROR("%s() :DDL ION alloc failed\n",
						 __func__);
			goto bail_out;
		}
		if (res_trk_check_for_sec_session() ||
			addr->mem_type == DDL_FW_MEM)
			ionflag = UNCACHED;
		else
			ionflag = CACHED;
		kernel_vaddr = (unsigned long *) ion_map_kernel(
					ddl_context->video_ion_client,
					addr->alloc_handle, ionflag);
		if (IS_ERR_OR_NULL(kernel_vaddr)) {
				DDL_MSG_ERROR("%s() :DDL ION map failed\n",
							 __func__);
				goto free_ion_alloc;
		}
		addr->virtual_base_addr = (u8 *) kernel_vaddr;
		if (res_trk_check_for_sec_session()) {
			rc = ion_phys(ddl_context->video_ion_client,
				addr->alloc_handle, &phyaddr,
					&len);
			if (rc || !phyaddr) {
				DDL_MSG_ERROR(
				"%s():DDL ION client physical failed\n",
				__func__);
				goto unmap_ion_alloc;
			}
			addr->alloced_phys_addr = phyaddr;
		} else {
			ret = ion_map_iommu(ddl_context->video_ion_client,
					addr->alloc_handle,
					VIDEO_DOMAIN,
					VIDEO_MAIN_POOL,
					SZ_4K,
					0,
					&iova,
					&buffer_size,
					UNCACHED, 0);
			if (ret || !iova) {
				DDL_MSG_ERROR(
				"%s():DDL ION ion map iommu failed, ret = %d iova = 0x%lx\n",
					__func__, ret, iova);
				goto unmap_ion_alloc;
			}
			addr->alloced_phys_addr = (phys_addr_t) iova;
		}
		if (!addr->alloced_phys_addr) {
			DDL_MSG_ERROR("%s():DDL ION client physical failed\n",
						__func__);
			goto unmap_ion_alloc;
		}
		addr->mapped_buffer = NULL;
		addr->physical_base_addr = (u8 *) addr->alloced_phys_addr;
		addr->align_physical_addr = (u8 *) DDL_ALIGN((u32)
			addr->physical_base_addr, alignment);
		offset = (u32)(addr->align_physical_addr -
				addr->physical_base_addr);
		addr->align_virtual_addr = addr->virtual_base_addr + offset;
		addr->buffer_size = alloc_size;
	} else {
		addr->alloced_phys_addr = (phys_addr_t)
		allocate_contiguous_memory_nomap(alloc_size,
			res_trk_get_mem_type(), SZ_4K);
		if (!addr->alloced_phys_addr) {
			DDL_MSG_ERROR("%s() : acm alloc failed (%d)\n",
					 __func__, alloc_size);
			goto bail_out;
		}
		flags = MSM_SUBSYSTEM_MAP_IOVA | MSM_SUBSYSTEM_MAP_KADDR;
		if (alignment == DDL_KILO_BYTE(128))
				index = 1;
		else if (alignment > SZ_4K)
			flags |= MSM_SUBSYSTEM_ALIGN_IOVA_8K;

		addr->mapped_buffer =
		msm_subsystem_map_buffer((unsigned long)addr->alloced_phys_addr,
			alloc_size, flags, &vidc_mmu_subsystem[index],
			sizeof(vidc_mmu_subsystem[index])/sizeof(unsigned int));
		if (IS_ERR(addr->mapped_buffer)) {
			pr_err(" %s() buffer map failed", __func__);
			goto free_acm_alloc;
		}
		mapped_buffer = addr->mapped_buffer;
		if (!mapped_buffer->vaddr || !mapped_buffer->iova[0]) {
			pr_err("%s() map buffers failed\n", __func__);
			goto free_map_buffers;
		}
		addr->physical_base_addr = (u8 *)mapped_buffer->iova[0];
		addr->virtual_base_addr = mapped_buffer->vaddr;
		addr->align_physical_addr = (u8 *) DDL_ALIGN((u32)
			addr->physical_base_addr, alignment);
		offset = (u32)(addr->align_physical_addr -
				addr->physical_base_addr);
		addr->align_virtual_addr = addr->virtual_base_addr + offset;
		addr->buffer_size = sz;
	}
	return addr->virtual_base_addr;
free_map_buffers:
	msm_subsystem_unmap_buffer(addr->mapped_buffer);
	addr->mapped_buffer = NULL;
free_acm_alloc:
		free_contiguous_memory_by_paddr(
			(unsigned long)addr->alloced_phys_addr);
		addr->alloced_phys_addr = (phys_addr_t)NULL;
		return NULL;
unmap_ion_alloc:
	ion_unmap_kernel(ddl_context->video_ion_client,
		addr->alloc_handle);
	addr->virtual_base_addr = NULL;
	addr->alloced_phys_addr = (phys_addr_t)NULL;
free_ion_alloc:
	ion_free(ddl_context->video_ion_client,
		addr->alloc_handle);
	addr->alloc_handle = NULL;
bail_out:
	return NULL;
}
Ejemplo n.º 26
0
/*--------------------MemoryManager Class STARTS here-----------------------------*/
void* MemoryManager::allocateBuffer(int width, int height, const char* format,
		int &bytes, int numBufs) {
	LOG_FUNCTION_NAME;

	if (mIonFd == 0) {
		mIonFd = ion_open();
		if (mIonFd == 0) {
			LOGE("ion_open failed!!!");
			return NULL;
		}
	}

	///We allocate numBufs+1 because the last entry will be marked NULL to indicate end of array, which is used when freeing
	///the buffers
	const uint numArrayEntriesC = (uint)(numBufs + 1);

	///Allocate a buffer array
	uint32_t *bufsArr = new uint32_t[numArrayEntriesC];
	if (!bufsArr) {
		LOGE(
				"Allocation failed when creating buffers array of %d uint32_t elements",
				numArrayEntriesC);
		LOG_FUNCTION_NAME_EXIT;
		return NULL;
	}

	///Initialize the array with zeros - this will help us while freeing the array in case of error
	///If a value of an array element is NULL, it means we didnt allocate it
	memset(bufsArr, 0, sizeof(*bufsArr) * numArrayEntriesC);

	//2D Allocations are not supported currently
	if (bytes != 0) {
		struct ion_handle *handle;
		int mmap_fd;

		///1D buffers
		for (int i = 0; i < numBufs; i++) {
			int ret = ion_alloc(mIonFd, bytes, 0, 1 << ION_HEAP_TYPE_CARVEOUT,
					&handle);
			if (ret < 0) {
				LOGE("ion_alloc resulted in error %d", ret);
				goto error;
			}

			LOGE("Before mapping, handle = %x, nSize = %d", handle, bytes);
			if ((ret = ion_map(mIonFd, handle, bytes, PROT_READ | PROT_WRITE,
					MAP_SHARED, 0, (unsigned char**) &bufsArr[i], &mmap_fd))
					< 0) {
				LOGE("Userspace mapping of ION buffers returned error %d", ret);
				ion_free(mIonFd, handle);
				goto error;
			}

			mIonHandleMap.add(bufsArr[i], (unsigned int) handle);
			mIonFdMap.add(bufsArr[i], (unsigned int) mmap_fd);
			mIonBufLength.add(bufsArr[i], (unsigned int) bytes);
		}

	} else // If bytes is not zero, then it is a 2-D tiler buffer request
	{
	}

	LOG_FUNCTION_NAME_EXIT;

	return (void*) bufsArr;

	error: LOGE("Freeing buffers already allocated after error occurred");
	freeBuffer(bufsArr);

	if (NULL != mErrorNotifier.get()) {
		mErrorNotifier->errorNotify(-ENOMEM);
	}

	LOG_FUNCTION_NAME_EXIT;
	return NULL;
}
Ejemplo n.º 27
0
static void *vb2_ion_alloc(void *alloc_ctx, unsigned long size)
{
	struct vb2_ion_conf	*conf = alloc_ctx;
	struct vb2_ion_buf	*buf;
	struct scatterlist	*sg;
	size_t	len;
	u32 heap = 0;
	int ret = 0;

	buf = kzalloc(sizeof *buf, GFP_KERNEL);
	if (!buf) {
		pr_err("no memory for vb2_ion_conf\n");
		return ERR_PTR(-ENOMEM);
	}

	/* Set vb2_ion_buf */
	buf->conf = conf;
	buf->size = size;
	buf->cacheable = conf->cacheable;

	/* Allocate: physical memory */
	if (conf->contig)
		heap = ION_HEAP_EXYNOS_CONTIG_MASK;
	else
		heap = ION_HEAP_EXYNOS_MASK;

	buf->handle = ion_alloc(conf->client, size, conf->align, heap);
	if (IS_ERR(buf->handle)) {
		pr_err("ion_alloc of size %ld\n", size);
		ret = -ENOMEM;
		goto err_alloc;
	}

	/* Getting scatterlist */
	buf->sg = ion_map_dma(conf->client, buf->handle);
	if (IS_ERR(buf->sg)) {
		pr_err("ion_map_dma conf->name(%s)\n", conf->name);
		ret = -ENOMEM;
		goto err_map_dma;
	}
	dbg(6, "PA(0x%x), SIZE(%x)\n", buf->sg->dma_address, buf->sg->length);

	sg = buf->sg;
	do {
		buf->nents++;
	} while ((sg = sg_next(sg)));
	dbg(6, "buf->nents(0x%x)\n", buf->nents);

	/* Map DVA */
	if (conf->use_mmu) {
		buf->dva = iovmm_map(conf->dev, buf->sg);
		if (!buf->dva) {
			pr_err("iovmm_map: conf->name(%s)\n", conf->name);
			goto err_ion_map_dva;
		}
		dbg(6, "DVA(0x%x)\n", buf->dva);
	} else {
		ret = ion_phys(conf->client, buf->handle,
			       (unsigned long *)&buf->dva, &len);
		if (ret) {
			pr_err("ion_phys: conf->name(%s)\n", conf->name);
			goto err_ion_map_dva;
		}
	}

	/* Set struct vb2_vmarea_handler */
	buf->handler.refcount = &buf->ref;
	buf->handler.put = vb2_ion_put;
	buf->handler.arg = buf;

	atomic_inc(&buf->ref);

	return buf;

err_ion_map_dva:
	ion_unmap_dma(conf->client, buf->handle);

err_map_dma:
	ion_free(conf->client, buf->handle);

err_alloc:
	kfree(buf);

	return ERR_PTR(ret);
}
Ejemplo n.º 28
0
void ddl_pmem_alloc(struct ddl_buf_addr *buff_addr, size_t sz, u32 align)
{
	u32 guard_bytes, align_mask;
	u32 physical_addr;
	u32 align_offset;
	u32 alloc_size, flags = 0;
	struct ddl_context *ddl_context;
	struct msm_mapped_buffer *mapped_buffer = NULL;
	unsigned long *kernel_vaddr = NULL;
	ion_phys_addr_t phyaddr = 0;
	size_t len = 0;
	int ret = -EINVAL;

	if (!buff_addr) {
		ERR("\n%s() Invalid Parameters\n", __func__);
		return;
	}
	if (align == DDL_LINEAR_BUFFER_ALIGN_BYTES) {
		guard_bytes = 31;
		align_mask = 0xFFFFFFE0U;
	} else {
		guard_bytes = DDL_TILE_BUF_ALIGN_GUARD_BYTES;
		align_mask = DDL_TILE_BUF_ALIGN_MASK;
	}
	ddl_context = ddl_get_context();
	alloc_size = sz + guard_bytes;
	if (res_trk_get_enable_ion()) {
		if (!ddl_context->video_ion_client)
			ddl_context->video_ion_client =
				res_trk_get_ion_client();
		if (!ddl_context->video_ion_client) {
			ERR("\n%s(): DDL ION Client Invalid handle\n",
				__func__);
			goto bailout;
		}
		buff_addr->mem_type = res_trk_get_mem_type();
		buff_addr->alloc_handle = ion_alloc(
					ddl_context->video_ion_client,
					alloc_size,
					SZ_4K,
					buff_addr->mem_type);
		if (!buff_addr->alloc_handle) {
			ERR("\n%s(): DDL ION alloc failed\n",
					__func__);
			goto bailout;
		}
		ret = ion_phys(ddl_context->video_ion_client,
					buff_addr->alloc_handle,
					&phyaddr,
					&len);
		if (ret || !phyaddr) {
			ERR("\n%s(): DDL ION client physical failed\n",
					__func__);
			goto free_ion_buffer;
		}
		buff_addr->physical_base_addr = (u32 *)phyaddr;
		kernel_vaddr = (unsigned long *) ion_map_kernel(
					ddl_context->video_ion_client,
					buff_addr->alloc_handle,
					UNCACHED);
		if (IS_ERR_OR_NULL(kernel_vaddr)) {
			ERR("\n%s(): DDL ION map failed\n", __func__);
			goto unmap_ion_buffer;
		}
		buff_addr->virtual_base_addr = (u32 *)kernel_vaddr;
		DBG("ddl_ion_alloc: handle(0x%x), mem_type(0x%x), "\
			"phys(0x%x), virt(0x%x), size(%u), align(%u), "\
			"alloced_len(%u)", (u32)buff_addr->alloc_handle,
			(u32)buff_addr->mem_type,
			(u32)buff_addr->physical_base_addr,
			(u32)buff_addr->virtual_base_addr,
			alloc_size, align, len);
	} else {
		physical_addr = (u32)
			allocate_contiguous_memory_nomap(alloc_size,
						ddl_context->memtype, SZ_4K);
		if (!physical_addr) {
			ERR("\n%s(): DDL pmem allocate failed\n",
			       __func__);
			goto bailout;
		}
		buff_addr->physical_base_addr = (u32 *) physical_addr;
		flags = MSM_SUBSYSTEM_MAP_KADDR;
		buff_addr->mapped_buffer =
		msm_subsystem_map_buffer((unsigned long)physical_addr,
		alloc_size, flags, NULL, 0);
		if (IS_ERR(buff_addr->mapped_buffer)) {
			ERR("\n%s() buffer map failed\n", __func__);
			goto free_pmem_buffer;
		}
		mapped_buffer = buff_addr->mapped_buffer;
		if (!mapped_buffer->vaddr) {
			ERR("\n%s() mapped virtual address is NULL\n",
				__func__);
			goto unmap_pmem_buffer;
		}
		buff_addr->virtual_base_addr = mapped_buffer->vaddr;
		DBG("ddl_pmem_alloc: mem_type(0x%x), phys(0x%x),"\
			" virt(0x%x), sz(%u), align(%u)",
			(u32)buff_addr->mem_type,
			(u32)buff_addr->physical_base_addr,
			(u32)buff_addr->virtual_base_addr,
			alloc_size, SZ_4K);
	}

	memset(buff_addr->virtual_base_addr, 0 , sz + guard_bytes);
	buff_addr->buffer_size = sz;
	buff_addr->align_physical_addr = (u32 *)
		(((u32)buff_addr->physical_base_addr + guard_bytes) &
		align_mask);
	align_offset = (u32) (buff_addr->align_physical_addr) -
		(u32)buff_addr->physical_base_addr;
	buff_addr->align_virtual_addr =
	    (u32 *) ((u32) (buff_addr->virtual_base_addr)
		     + align_offset);
	DBG("%s(): phys(0x%x) align_phys(0x%x), virt(0x%x),"\
		" align_virt(0x%x)", __func__,
		(u32)buff_addr->physical_base_addr,
		(u32)buff_addr->align_physical_addr,
		(u32)buff_addr->virtual_base_addr,
		(u32)buff_addr->align_virtual_addr);
	return;

unmap_pmem_buffer:
	if (buff_addr->mapped_buffer)
		msm_subsystem_unmap_buffer(buff_addr->mapped_buffer);
free_pmem_buffer:
	if (buff_addr->physical_base_addr)
		free_contiguous_memory_by_paddr((unsigned long)
			buff_addr->physical_base_addr);
	memset(buff_addr, 0, sizeof(struct ddl_buf_addr));
	return;

unmap_ion_buffer:
	if (ddl_context->video_ion_client) {
		if (buff_addr->alloc_handle)
			ion_unmap_kernel(ddl_context->video_ion_client,
				buff_addr->alloc_handle);
	}
free_ion_buffer:
	if (ddl_context->video_ion_client) {
		if (buff_addr->alloc_handle)
			ion_free(ddl_context->video_ion_client,
				buff_addr->alloc_handle);
	}
bailout:
	memset(buff_addr, 0, sizeof(struct ddl_buf_addr));
}
int _IOGetPhyMem(int which, vpu_mem_desc *buff)
{
#ifdef BUILD_FOR_ANDROID
	const size_t pagesize = getpagesize();
	int err, fd;
#ifdef USE_ION
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
	ion_user_handle_t handle;
#else
	struct ion_handle *handle;
#endif
	int share_fd, ret = -1;
	unsigned char *ptr;
#elif USE_GPU
        struct g2d_buf *gbuf;
        int bytes;
#else
	/* Get memory from pmem space for android */
	struct pmem_region region;
#endif

	if ((!buff) || (!buff->size)) {
		err_msg("Error!_IOGetPhyMem:Invalid parameters");
		return -1;
	}

	buff->cpu_addr = 0;
	buff->phy_addr = 0;
	buff->virt_uaddr = 0;

	if (which == VPU_IOC_GET_WORK_ADDR) {
		if (ioctl(vpu_fd, which, buff) < 0) {
			err_msg("mem allocation failed!\n");
			buff->phy_addr = 0;
			buff->cpu_addr = 0;
			return -1;
		}
		return 0;
	}

	if (which != VPU_IOC_PHYMEM_ALLOC) {
		err_msg("Error!_IOGetPhyMem unsupported memtype: %d", which);
		return -1;
	}

	buff->size = (buff->size + pagesize-1) & ~(pagesize - 1);

#ifdef USE_ION
	fd = ion_open();
	if (fd <= 0) {
		err_msg("ion open failed!\n");
		return -1;
	}

#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
	err = ion_alloc(fd, buff->size, pagesize, 1, 0, &handle);
#else
	err = ion_alloc(fd, buff->size, pagesize, 1, &handle);
#endif
	if (err) {
		err_msg("ion allocation failed!\n");
		goto error;
	}

	err = ion_map(fd, handle, buff->size,
			    PROT_READ|PROT_WRITE, MAP_SHARED,
			    0, &ptr, &share_fd);
	if (err) {
		err_msg("ion map failed!\n");
		goto error;
	}

	err = ion_phys(fd, handle);
	if (err == 0) {
		err_msg("ion get physical address failed!\n");
		goto error;
	}

	buff->virt_uaddr = (unsigned long)ptr;
	buff->phy_addr = (unsigned long)err;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
	ion_free(fd, handle);
	buff->cpu_addr = (unsigned long)share_fd;
#else
	buff->cpu_addr = (unsigned long)handle;
#endif
	memset((void*)buff->virt_uaddr, 0, buff->size);
	ret = 0;
	info_msg("<ion> alloc handle: 0x%x, paddr: 0x%x, vaddr: 0x%x",
			(unsigned int)handle, (unsigned int)buff->phy_addr,
			(unsigned int)buff->virt_uaddr);
error:
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)
	close(share_fd);
#endif
	ion_close(fd);
	return ret;
#elif USE_GPU
        bytes = buff->size + PAGE_SIZE;
        gbuf = g2d_alloc(bytes, 0);
        if(!gbuf) {
            err_msg("%s: gpu allocator failed to alloc buffer with size %d", __FUNCTION__, buff->size);
            return -1;
        }

        buff->virt_uaddr = (unsigned long)gbuf->buf_vaddr;
        buff->phy_addr = (unsigned long)gbuf->buf_paddr;
        buff->cpu_addr = (unsigned long)gbuf;

        //vpu requires page alignment for the address implicitly, round it to page edge
        buff->virt_uaddr = (buff->virt_uaddr + PAGE_SIZE -1) & ~(PAGE_SIZE -1);
        buff->phy_addr = (buff->phy_addr + PAGE_SIZE -1) & ~(PAGE_SIZE -1);
        memset((void*)buff->virt_uaddr, 0, buff->size);

        info_msg("<gpu> alloc handle: 0x%x, paddr: 0x%x, vaddr: 0x%x",
			(unsigned int)gbuf, (unsigned int)buff->phy_addr,
			(unsigned int)buff->virt_uaddr);
        return 0;
#else
	fd = (unsigned long)open("/dev/pmem_adsp", O_RDWR | O_SYNC);
	if (fd < 0) {
		err_msg("Error!_IOGetPhyMem Error,cannot open pmem");
		return -1;
	}

	err = ioctl(fd, PMEM_GET_TOTAL_SIZE, &region);

	buff->virt_uaddr = (unsigned long)mmap(0, buff->size,
			    PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);

	if (buff->virt_uaddr == (unsigned long)MAP_FAILED) {
		err_msg("Error!mmap(fd=%d, size=%u) failed (%s)",
			fd, buff->size, strerror(errno));
		close(fd);
		return -1;
	}

	memset(&region, 0, sizeof(region));

	if (ioctl(fd, PMEM_GET_PHYS, &region) == -1) {
		err_msg("Error!Failed to get physical address of source!");
		munmap((void *)buff->virt_uaddr, buff->size);
		close(fd);
		return -1;
	}

	buff->phy_addr = (unsigned long)region.offset;
	buff->cpu_addr = (unsigned long)fd;
	memset((void*)buff->virt_uaddr, 0, buff->size);
#endif
#else
	if (ioctl(vpu_fd, which, buff) < 0) {
		err_msg("mem allocation failed!\n");
		buff->phy_addr = 0;
		buff->cpu_addr = 0;
		return -1;
	}
	sz_alloc += buff->size;
	dprintf(3, "%s: phy addr = %08lx\n", __func__, buff->phy_addr);
	dprintf(3, "%s: alloc=%d, total=%d\n", __func__, buff->size, sz_alloc);
#endif

	return 0;
}
/*****************************************************************************
 @Function                AllocPages
******************************************************************************/
static IMG_RESULT AllocPages(
	SYSMEM_Heap *		heap,
	IMG_UINT32			ui32Size,
	SYSMEMU_sPages *	psPages,
	SYS_eMemAttrib		eMemAttrib
)
{
    IMG_UINT32           Res;
    struct ion_handle *  ion_handle;
    unsigned             allocFlags;
    struct ion_client *  ion_client;
    IMG_UINT64 *         pCpuPhysAddrs;
    size_t               numPages;
    size_t               physAddrArrSize;

    ion_client = (struct ion_client *)heap->priv;

    if (   (eMemAttrib & SYS_MEMATTRIB_WRITECOMBINE)
        || (eMemAttrib & SYS_MEMATTRIB_UNCACHED))
    {
        allocFlags = 0;
    } else {
        allocFlags = ION_FLAG_CACHED;
    }

    if (eMemAttrib == SYS_MEMATTRIB_UNCACHED)
        REPORT(REPORT_MODULE_SYSMEM, REPORT_WARNING,
               "Purely uncached memory is not supported by ION");

    // PAGE_SIZE aligment, heap depends on platform
    ion_handle = ion_alloc(ion_client, ui32Size, PAGE_SIZE,
    					ION_HEAP_SYSTEM_MASK,
                          allocFlags);
    if (!ion_handle) {
        REPORT(REPORT_MODULE_SYSMEM, REPORT_ERR,
               "Error allocating %u bytes from ion", ui32Size);
        Res = IMG_ERROR_OUT_OF_MEMORY;
        goto errAlloc;
    }

    /* Find out physical addresses in the mappable region */
    numPages = (ui32Size + HOST_MMU_PAGE_SIZE - 1)/HOST_MMU_PAGE_SIZE;

    physAddrArrSize = sizeof *pCpuPhysAddrs * numPages;
    pCpuPhysAddrs = IMG_BIGORSMALL_ALLOC(physAddrArrSize);
    if (!pCpuPhysAddrs) {
        Res = IMG_ERROR_OUT_OF_MEMORY;
        goto errPhysArrAlloc;
    }

    {
        struct scatterlist *psScattLs, *psScattLsAux;
        struct sg_table *psSgTable;
        size_t pg_i = 0;

        psSgTable = ion_sg_table(ion_client, ion_handle);
        if (psSgTable == NULL)
        {
            REPORT(REPORT_MODULE_SYSMEM, REPORT_ERR, "Error obtaining sg table");
            Res = IMG_ERROR_FATAL;
            goto errGetPhys;
        }
        psScattLs = psSgTable->sgl;

        if (psScattLs == NULL)
        {
            REPORT(REPORT_MODULE_SYSMEM, REPORT_ERR, "Error obtaining scatter list");
            Res = IMG_ERROR_FATAL;
            goto errGetPhys;
        }

        // Get physical addresses from scatter list
        for (psScattLsAux = psScattLs; psScattLsAux; psScattLsAux = sg_next(psScattLsAux))
        {
            int offset;
            dma_addr_t chunkBase = sg_phys(psScattLsAux);

            for (offset = 0; offset < psScattLsAux->length; offset += PAGE_SIZE, ++pg_i)
            {
                if (pg_i >= numPages)
                    break;

                //pCpuPhysAddrs[pg_i] = dma_map_page(NULL, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
                pCpuPhysAddrs[pg_i] = chunkBase + offset;
            }
            if (pg_i >= numPages)
                break;
        }
    }

    // Set pointer to physical address in structure
    psPages->ppaPhysAddr = pCpuPhysAddrs;

    DEBUG_REPORT(REPORT_MODULE_SYSMEM, "%s region of size %u phys 0x%llx",
                 __FUNCTION__, ui32Size, psPages->ppaPhysAddr[0]);

    Res = SYSBRGU_CreateMappableRegion(psPages->ppaPhysAddr[0], ui32Size, eMemAttrib,
    						psPages, &psPages->hRegHandle);
    if (Res != IMG_SUCCESS) {
        REPORT(REPORT_MODULE_SYSMEM, REPORT_ERR,
               "Error %u in SYSBRGU_CreateMappableRegion", Res);
        goto errCreateMapRegion;
    }

    psPages->pvImplData = ion_handle;

    return IMG_SUCCESS;

errCreateMapRegion:
errGetPhys:
    IMG_BIGORSMALL_FREE(numPages*sizeof(*pCpuPhysAddrs), pCpuPhysAddrs);
errPhysArrAlloc:
    ion_unmap_kernel(ion_client, ion_handle);
    ion_free(ion_client, ion_handle);
errAlloc:
    return Res;
}