/*
 * IOCTL operation; Import fd to  UMP memory
 */
int ump_ion_import_wrapper(u32 __user * argument, struct ump_session_data  * session_data)
{
	_ump_uk_ion_import_s user_interaction;
	ump_dd_handle *ump_handle;
	ump_dd_physical_block * blocks;
	unsigned long num_blocks;
	struct ion_handle *ion_hnd;
	struct scatterlist *sg;
	struct scatterlist *sg_ion;
	unsigned long i = 0;

	ump_session_memory_list_element * session_memory_element = NULL;
	if (ion_client_ump==NULL)
	    ion_client_ump = ion_client_create(ion_exynos, -1, "ump");

	/* Sanity check input parameters */
	if (NULL == argument || NULL == session_data)
	{
		MSG_ERR(("NULL parameter in ump_ioctl_allocate()\n"));
		return -ENOTTY;
	}

	/* Copy the user space memory to kernel space (so we safely can read it) */
	if (0 != copy_from_user(&user_interaction, argument, sizeof(user_interaction)))
	{
		MSG_ERR(("copy_from_user() in ump_ioctl_allocate()\n"));
		return -EFAULT;
	}

	user_interaction.ctx = (void *) session_data;

	/* translate fd to secure ID*/
	ion_hnd = ion_import_fd(ion_client_ump, user_interaction.ion_fd);
	sg_ion = ion_map_dma(ion_client_ump,ion_hnd);

	blocks = (ump_dd_physical_block*)_mali_osk_malloc(sizeof(ump_dd_physical_block)*1024);

	if (NULL == blocks) {
		MSG_ERR(("Failed to allocate blocks in ump_ioctl_allocate()\n"));
		return -ENOMEM;
	}

	sg = sg_ion;
	do {
		blocks[i].addr = sg_phys(sg);
		blocks[i].size = sg_dma_len(sg);
		i++;
		if (i>=1024) {
			_mali_osk_free(blocks);
			MSG_ERR(("ion_import fail() in ump_ioctl_allocate()\n"));
			return -EFAULT;
		}
		sg = sg_next(sg);
	} while(sg);

	num_blocks = i;

	/* Initialize the session_memory_element, and add it to the session object */
	session_memory_element = _mali_osk_calloc( 1, sizeof(ump_session_memory_list_element));

	if (NULL == session_memory_element)
	{
		_mali_osk_free(blocks);
		DBG_MSG(1, ("Failed to allocate ump_session_memory_list_element in ump_ioctl_allocate()\n"));
		return -EFAULT;
	}

	ump_handle = ump_dd_handle_create_from_phys_blocks(blocks, num_blocks);
	if (UMP_DD_HANDLE_INVALID == ump_handle)
	{
		_mali_osk_free(session_memory_element);
		_mali_osk_free(blocks);
		DBG_MSG(1, ("Failed to allocate ump_session_memory_list_element in ump_ioctl_allocate()\n"));
		return -EFAULT;
	}

	session_memory_element->mem = (ump_dd_mem*)ump_handle;
	_mali_osk_mutex_wait(session_data->lock);
	_mali_osk_list_add(&(session_memory_element->list), &(session_data->list_head_session_memory_list));
	_mali_osk_mutex_signal(session_data->lock);
	ion_unmap_dma(ion_client_ump,ion_hnd);
	ion_free(ion_client_ump, ion_hnd);

	_mali_osk_free(blocks);

	user_interaction.secure_id = ump_dd_secure_id_get(ump_handle);
	user_interaction.size = ump_dd_size_get(ump_handle);
	user_interaction.ctx = NULL;

	if (0 != copy_to_user(argument, &user_interaction, sizeof(user_interaction)))
	{
		/* If the copy fails then we should release the memory. We can use the IOCTL release to accomplish this */

		MSG_ERR(("copy_to_user() failed in ump_ioctl_allocate()\n"));

		return -EFAULT;
	}
	return 0; /* success */
}
Example #2
0
static void *vb2_ion_get_userptr(void *alloc_ctx, unsigned long vaddr,
				 unsigned long size, int write)
{
	struct vb2_ion_conf *conf = alloc_ctx;
	struct vb2_ion_buf *buf = NULL;
	struct mm_struct *mm = current->mm;
	struct vm_area_struct *vma = NULL;
	size_t len;
	int ret = 0;
	bool malloced = false;
	struct scatterlist *sg;

	/* Create vb2_ion_buf */
	buf = kzalloc(sizeof *buf, GFP_KERNEL);
	if (!buf) {
		pr_err("kzalloc failed\n");
		return ERR_PTR(-ENOMEM);
	}

	/* Getting handle, client from DVA */
	buf->handle = ion_import_uva(conf->client, vaddr);
	if (IS_ERR(buf->handle)) {
		if ((PTR_ERR(buf->handle) == -ENXIO) && conf->use_mmu) {
			int flags = ION_HEAP_EXYNOS_USER_MASK;

			if (write)
				flags |= ION_EXYNOS_WRITE_MASK;

			buf->handle = ion_exynos_get_user_pages(conf->client,
							vaddr, size, flags);
			if (IS_ERR(buf->handle))
				ret = PTR_ERR(buf->handle);
		} else {
			ret = -EINVAL;
		}

		if (ret) {
			pr_err("%s: Failed to retrieving non-ion user buffer @ "
				"0x%lx (size:0x%lx, dev:%s, errno %ld)\n",
				__func__, vaddr, size, dev_name(conf->dev),
					PTR_ERR(buf->handle));
			goto err_import_uva;
		}

		malloced = true;
	}

	/* TODO: Need to check whether already DVA is created or not */

	buf->sg = ion_map_dma(conf->client, buf->handle);
	if (IS_ERR(buf->sg)) {
		ret = -ENOMEM;
		goto err_map_dma;
	}
	dbg(6, "PA(0x%x) size(%x)\n", buf->sg->dma_address, buf->sg->length);

	sg = buf->sg;
	do {
		buf->nents++;
	} while ((sg = sg_next(sg)));

	/* Map DVA */
	if (conf->use_mmu) {
		buf->dva = iovmm_map(conf->dev, buf->sg);
		if (!buf->dva) {
			pr_err("iovmm_map: conf->name(%s)\n", conf->name);
			goto err_ion_map_dva;
		}
		dbg(6, "DVA(0x%x)\n", buf->dva);
	} else {
		ret = ion_phys(conf->client, buf->handle,
			       (unsigned long *)&buf->dva, &len);
		if (ret) {
			pr_err("ion_phys: conf->name(%s)\n", conf->name);
			goto err_ion_map_dva;
		}
	}

	if (!malloced) {
		/* Get offset from the start */
		down_read(&mm->mmap_sem);
		vma = find_vma(mm, vaddr);
		if (vma == NULL) {
			pr_err("Failed acquiring VMA to get offset 0x%08lx\n",
					vaddr);
			up_read(&mm->mmap_sem);

			if (conf->use_mmu)
				iovmm_unmap(conf->dev, buf->dva);

			goto err_get_vma;
		}
		buf->offset = vaddr - vma->vm_start;
		up_read(&mm->mmap_sem);
	}
	dbg(6, "dva(0x%x), size(0x%x), offset(0x%x)\n",
			(u32)buf->dva, (u32)size, (u32)buf->offset);

	/* Set vb2_ion_buf */
	ret = _vb2_ion_get_vma(vaddr, size, &vma);
	if (ret) {
		pr_err("Failed acquiring VMA 0x%08lx\n", vaddr);

		if (conf->use_mmu)
			iovmm_unmap(conf->dev, buf->dva);

		goto err_get_vma;
	}

	buf->vma = vma;
	buf->conf = conf;
	buf->size = size;
	buf->cacheable = conf->cacheable;

	return buf;

err_get_vma:	/* fall through */
err_ion_map_dva:
	ion_unmap_dma(conf->client, buf->handle);

err_map_dma:
	ion_free(conf->client, buf->handle);

err_import_uva:
	kfree(buf);

	return ERR_PTR(ret);
}
Example #3
0
static void *vb2_ion_alloc(void *alloc_ctx, unsigned long size)
{
	struct vb2_ion_conf	*conf = alloc_ctx;
	struct vb2_ion_buf	*buf;
	struct scatterlist	*sg;
	size_t	len;
	u32 heap = 0;
	int ret = 0;

	buf = kzalloc(sizeof *buf, GFP_KERNEL);
	if (!buf) {
		pr_err("no memory for vb2_ion_conf\n");
		return ERR_PTR(-ENOMEM);
	}

	/* Set vb2_ion_buf */
	buf->conf = conf;
	buf->size = size;
	buf->cacheable = conf->cacheable;

	/* Allocate: physical memory */
	if (conf->contig)
		heap = ION_HEAP_EXYNOS_CONTIG_MASK;
	else
		heap = ION_HEAP_EXYNOS_MASK;

	buf->handle = ion_alloc(conf->client, size, conf->align, heap);
	if (IS_ERR(buf->handle)) {
		pr_err("ion_alloc of size %ld\n", size);
		ret = -ENOMEM;
		goto err_alloc;
	}

	/* Getting scatterlist */
	buf->sg = ion_map_dma(conf->client, buf->handle);
	if (IS_ERR(buf->sg)) {
		pr_err("ion_map_dma conf->name(%s)\n", conf->name);
		ret = -ENOMEM;
		goto err_map_dma;
	}
	dbg(6, "PA(0x%x), SIZE(%x)\n", buf->sg->dma_address, buf->sg->length);

	sg = buf->sg;
	do {
		buf->nents++;
	} while ((sg = sg_next(sg)));
	dbg(6, "buf->nents(0x%x)\n", buf->nents);

	/* Map DVA */
	if (conf->use_mmu) {
		buf->dva = iovmm_map(conf->dev, buf->sg);
		if (!buf->dva) {
			pr_err("iovmm_map: conf->name(%s)\n", conf->name);
			goto err_ion_map_dva;
		}
		dbg(6, "DVA(0x%x)\n", buf->dva);
	} else {
		ret = ion_phys(conf->client, buf->handle,
			       (unsigned long *)&buf->dva, &len);
		if (ret) {
			pr_err("ion_phys: conf->name(%s)\n", conf->name);
			goto err_ion_map_dva;
		}
	}

	/* Set struct vb2_vmarea_handler */
	buf->handler.refcount = &buf->ref;
	buf->handler.put = vb2_ion_put;
	buf->handler.arg = buf;

	atomic_inc(&buf->ref);

	return buf;

err_ion_map_dva:
	ion_unmap_dma(conf->client, buf->handle);

err_map_dma:
	ion_free(conf->client, buf->handle);

err_alloc:
	kfree(buf);

	return ERR_PTR(ret);
}
Example #4
0
PVRSRV_ERROR IonImportBufferAndAquirePhysAddr(IMG_HANDLE hIonDev,
											  IMG_HANDLE hIonFD,
											  IMG_UINT32 *pui32PageCount,
											  IMG_SYS_PHYADDR **ppasSysPhysAddr,
											  IMG_PVOID *ppvKernAddr,
											  IMG_HANDLE *phPriv)
{
	struct ion_client *psIonClient = hIonDev;
	struct ion_handle *psIonHandle;
	struct scatterlist *psScatterList;
	struct scatterlist *psTemp;
	IMG_SYS_PHYADDR *pasSysPhysAddr = NULL;
	ION_IMPORT_DATA *psImportData;
	PVRSRV_ERROR eError;
	IMG_UINT32 ui32PageCount = 0;
	IMG_UINT32 i;
	IMG_PVOID pvKernAddr;
	int fd = (int) hIonFD;

	psImportData = kmalloc(sizeof(ION_IMPORT_DATA), GFP_KERNEL);
	if (psImportData == NULL)
	{
		return PVRSRV_ERROR_OUT_OF_MEMORY;
	}

	/* Get the buffer handle */
	psIonHandle = ion_import_fd(psIonClient, fd);
	if (psIonHandle == IMG_NULL)
	{
		eError = PVRSRV_ERROR_BAD_MAPPING;
		goto exitFailImport;
	}

	/* Create data for free callback */
	psImportData->psIonClient = psIonClient;
	psImportData->psIonHandle = psIonHandle;	

	psScatterList = ion_map_dma(psIonClient, psIonHandle);
	if (psScatterList == NULL)
	{
		eError = PVRSRV_ERROR_INVALID_PARAMS;
		goto exitFailMap;
	}

	/*
		We do a two pass process, 1st workout how many pages there
		are, 2nd fill in the data.
	*/
	for (i=0;i<2;i++)
	{
		psTemp = psScatterList;
		if (i == 1)
		{
			pasSysPhysAddr = kmalloc(sizeof(IMG_SYS_PHYADDR) * ui32PageCount, GFP_KERNEL);
			if (pasSysPhysAddr == NULL)
			{
				eError = PVRSRV_ERROR_OUT_OF_MEMORY;
				goto exitFailAlloc;
			}
			ui32PageCount = 0;	/* Reset the page count a we use if for the index */
		}

		while(psTemp)
		{
			IMG_UINT32 j;

			for (j=0;j<psTemp->length;j+=PAGE_SIZE)
			{
				if (i == 1)
				{
					/* Pass 2: Get the page data */
					pasSysPhysAddr[ui32PageCount].uiAddr = sg_phys(psTemp);
				}
				ui32PageCount++;
			}
			psTemp = sg_next(psTemp);
		}
	}

	pvKernAddr = ion_map_kernel(psIonClient, psIonHandle);
	if (IS_ERR(pvKernAddr))
	{
		pvKernAddr = IMG_NULL;
	}

	psImportData->pvKernAddr = pvKernAddr;

	*ppvKernAddr = pvKernAddr;
	*pui32PageCount = ui32PageCount;
	*ppasSysPhysAddr = pasSysPhysAddr;
	*phPriv = psImportData;
	return PVRSRV_OK;

exitFailAlloc:
	ion_unmap_dma(psIonClient, psIonHandle);
exitFailMap:
	ion_free(psIonClient, psIonHandle);
exitFailImport:
	kfree(psImportData);
	return eError;
}
Example #5
0
/*
	Obtain a list of physical pages from the ion
	handle.
*/
static
PVRSRV_ERROR IonPhysAddrAcquire(PMR_ION_DATA *psPrivData,
							   int fd)
{
	struct scatterlist *psScatterList;
	struct scatterlist *psTemp;
	IMG_CPU_PHYADDR psCpuPhysAddr;
	IMG_DEV_PHYADDR *pasDevPhysAddr = NULL;
	PVRSRV_ERROR eError;
	IMG_UINT32 ui32PageCount = 0;
	IMG_UINT32 i;

	psScatterList = ion_map_dma(psPrivData->psIonClient, psPrivData->psIonHandle);
	if (psScatterList == NULL)
	{
		eError = PVRSRV_ERROR_INVALID_PARAMS;
		goto exitFailMap;
	}

	/*
		We do a two pass process, 1st workout how many pages there
		are, 2nd fill in the data.
	*/
	for (i=0;i<2;i++)
	{
		psTemp = psScatterList;
		if (i == 1)
		{
			pasDevPhysAddr = kmalloc(sizeof(IMG_DEV_PHYADDR) * ui32PageCount, GFP_KERNEL);
			if (pasDevPhysAddr == NULL)
			{
				eError = PVRSRV_ERROR_OUT_OF_MEMORY;
				goto exitFailAlloc;
			}
			ui32PageCount = 0;	/* Reset the page count a we use if for the index */
		}

		while(psTemp)
		{
			IMG_UINT32 j;

			for (j=0;j<psTemp->length;j+=PAGE_SIZE)
			{
				if (i == 1)
				{
					/* Pass 2: Get the page data */
					psCpuPhysAddr.uiAddr = sg_phys(psTemp);

					/*
						Note:

						We have made an assumption that the physical address
						returned by the Ion buffer is the right address for
						the device to use.
						
						For UMA this is true.
						
						For LMA this can also be true if the ion buffer returned
						device physical address. However, this would stop us being
						able to map LMA buffers into Ion devices that aren't using
						the device address. For now there is no way to know if
						this buffer is LMA or not so we don't know if a translation
						needs to be done.
					*/
						
					pasDevPhysAddr[ui32PageCount].uiAddr = psCpuPhysAddr.uiAddr;
				}
				ui32PageCount++;
			}
			psTemp = sg_next(psTemp);
		}
	}

	psPrivData->pasDevPhysAddr = pasDevPhysAddr;
	psPrivData->ui32PageCount = ui32PageCount;
	psPrivData->uiSize = ui32PageCount * PAGE_SIZE;

	return PVRSRV_OK;

exitFailAlloc:
	ion_unmap_dma(psPrivData->psIonClient, psPrivData->psIonHandle);
exitFailMap:

	PVR_ASSERT(eError!= PVRSRV_OK);
	return eError;
}
Example #6
0
PVRSRV_ERROR IonImportBufferAndAcquirePhysAddr(IMG_HANDLE hIonDev,
											   IMG_UINT32 ui32NumFDs,
											   IMG_INT32  *pai32BufferFDs,
											   IMG_UINT32 *pui32PageCount,
											   IMG_SYS_PHYADDR **ppsSysPhysAddr,
											   IMG_PVOID  *ppvKernAddr0,
											   IMG_HANDLE *phPriv,
											   IMG_HANDLE *phUnique)
{
	struct scatterlist *psTemp, *psScatterList[MAX_IMPORT_ION_FDS] = {};
	PVRSRV_ERROR eError = PVRSRV_ERROR_OUT_OF_MEMORY;
	struct ion_client *psIonClient = hIonDev;
	IMG_UINT32 i, k, ui32PageCount = 0;
	ION_IMPORT_DATA *psImportData;

	if(ui32NumFDs > MAX_IMPORT_ION_FDS)
	{
		printk(KERN_ERR "%s: More ion export fds passed in than supported "
						"(%d provided, %d max)", __func__, ui32NumFDs,
						MAX_IMPORT_ION_FDS);
		return PVRSRV_ERROR_INVALID_PARAMS;
	}

	psImportData = kzalloc(sizeof(ION_IMPORT_DATA), GFP_KERNEL);
	if (psImportData == NULL)
	{
		goto exitFailKMallocImportData;
	}

	/* Set up import data for free call */
	psImportData->psIonClient = psIonClient;
	psImportData->ui32NumIonHandles = ui32NumFDs;

	for(i = 0; i < ui32NumFDs; i++)
	{
		int fd = (int)pai32BufferFDs[i];

		psImportData->apsIonHandle[i] = ion_import_fd(psIonClient, fd);
		if (psImportData->apsIonHandle[i] == IMG_NULL)
		{
			eError = PVRSRV_ERROR_BAD_MAPPING;
			goto exitFailImport;
		}

		psScatterList[i] = ion_map_dma(psIonClient, psImportData->apsIonHandle[i]);
		if (psScatterList[i] == NULL)
		{
			eError = PVRSRV_ERROR_INVALID_PARAMS;
			goto exitFailImport;
		}

		for(psTemp = psScatterList[i]; psTemp; psTemp = sg_next(psTemp))
		{
			IMG_UINT32 j;
			for (j = 0; j < psTemp->length; j += PAGE_SIZE)
			{
				ui32PageCount++;
			}
		}
	}

	BUG_ON(ui32PageCount == 0);

	psImportData->psSysPhysAddr = kmalloc(sizeof(IMG_SYS_PHYADDR) * ui32PageCount, GFP_KERNEL);
	if (psImportData->psSysPhysAddr == NULL)
	{
		goto exitFailImport;
	}

	for(i = 0, k = 0; i < ui32NumFDs; i++)
	{
		for(psTemp = psScatterList[i]; psTemp; psTemp = sg_next(psTemp))
		{
			IMG_UINT32 j;
			for (j = 0; j < psTemp->length; j += PAGE_SIZE)
			{
				psImportData->psSysPhysAddr[k].uiAddr = sg_phys(psTemp) + j;
				k++;
			}
		}
	}

	*pui32PageCount = ui32PageCount;
	*ppsSysPhysAddr = psImportData->psSysPhysAddr;

	if(ui32NumFDs == 1)
	{
		IMG_PVOID pvKernAddr0;

		pvKernAddr0 = ion_map_kernel(psIonClient, psImportData->apsIonHandle[0]);
		if (IS_ERR(pvKernAddr0))
		{
			pvKernAddr0 = IMG_NULL;
		}

		psImportData->pvKernAddr0 = pvKernAddr0;
		*ppvKernAddr0 = pvKernAddr0;
	}
	else
	{
		*ppvKernAddr0 = NULL;
	}

	*phPriv = psImportData;
	*phUnique = (IMG_HANDLE)psImportData->psSysPhysAddr[0].uiAddr;

	return PVRSRV_OK;

exitFailImport:
	for(i = 0; psImportData->apsIonHandle[i] != NULL; i++)
	{
		if(psScatterList[i])
			ion_unmap_dma(psIonClient, psImportData->apsIonHandle[i]);
		ion_free(psIonClient, psImportData->apsIonHandle[i]);
	}
	kfree(psImportData);
exitFailKMallocImportData:
	return eError;
}