int mali_memory_bind_ump_buf(mali_mem_allocation *alloc, mali_mem_backend *mem_backend, u32  secure_id, u32 flags)
{
	ump_dd_handle ump_mem;
	int ret;
	MALI_DEBUG_ASSERT_POINTER(alloc);
	MALI_DEBUG_ASSERT_POINTER(mem_backend);
	MALI_DEBUG_ASSERT(MALI_MEM_UMP == mem_backend->type);

	MALI_DEBUG_PRINT(3,
			 ("Requested to map ump memory with secure id %d into virtual memory 0x%08X, size 0x%08X\n",
			  secure_id, alloc->mali_vma_node.vm_node.start, alloc->mali_vma_node.vm_node.size));

	ump_mem = ump_dd_handle_create_from_secure_id(secure_id);
	if (UMP_DD_HANDLE_INVALID == ump_mem) MALI_ERROR(_MALI_OSK_ERR_FAULT);
	alloc->flags |= MALI_MEM_FLAG_DONT_CPU_MAP;
	if (flags & _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE) {
		alloc->flags |= MALI_MEM_FLAG_MALI_GUARD_PAGE;
	}

	mem_backend->ump_mem.handle = ump_mem;

	ret = mali_mem_ump_map(mem_backend);
	if (0 != ret) {
		ump_dd_reference_release(ump_mem);
		return _MALI_OSK_ERR_FAULT;
	}
	MALI_DEBUG_PRINT(3, ("Returning from UMP bind\n"));
	return _MALI_OSK_ERR_OK;
}
void mali_mem_ump_release(mali_mem_backend *mem_backend)
{
	ump_dd_handle ump_mem;
	mali_mem_allocation *alloc;
	MALI_DEBUG_ASSERT_POINTER(mem_backend);
	MALI_DEBUG_ASSERT(MALI_MEM_UMP == mem_backend->type);
	ump_mem = mem_backend->ump_mem.handle;
	MALI_DEBUG_ASSERT(UMP_DD_HANDLE_INVALID != ump_mem);

	alloc = mem_backend->mali_allocation;
	MALI_DEBUG_ASSERT_POINTER(alloc);
	mali_mem_ump_unmap(alloc);
	ump_dd_reference_release(ump_mem);
}
void mali_ump_unmap(struct mali_session_data *session, mali_mem_allocation *descriptor)
{
    ump_dd_handle ump_mem;
    struct mali_page_directory *pagedir;

    ump_mem = descriptor->ump_mem.handle;
    pagedir = session->page_directory;

    MALI_DEBUG_ASSERT(UMP_DD_HANDLE_INVALID != ump_mem);

    mali_mem_mali_map_free(descriptor);

    ump_dd_reference_release(ump_mem);
    return;
}
_mali_osk_errcode_t _mali_ukk_attach_ump_mem(_mali_uk_attach_ump_mem_s *args)
{
    ump_dd_handle ump_mem;
    struct mali_session_data *session;
    mali_mem_allocation *descriptor;
    int md, ret;

    MALI_DEBUG_ASSERT_POINTER(args);
    MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);

    session = (struct mali_session_data *)args->ctx;
    MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_INVALID_ARGS);

    /* check arguments */
    /* NULL might be a valid Mali address */
    if (!args->size) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);

    /* size must be a multiple of the system page size */
    if (args->size % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);

    MALI_DEBUG_PRINT(3,
                     ("Requested to map ump memory with secure id %d into virtual memory 0x%08X, size 0x%08X\n",
                      args->secure_id, args->mali_address, args->size));

    ump_mem = ump_dd_handle_create_from_secure_id((int)args->secure_id);

    if (UMP_DD_HANDLE_INVALID == ump_mem) MALI_ERROR(_MALI_OSK_ERR_FAULT);

    descriptor = mali_mem_descriptor_create(session, MALI_MEM_UMP);
    if (NULL == descriptor) {
        ump_dd_reference_release(ump_mem);
        MALI_ERROR(_MALI_OSK_ERR_NOMEM);
    }

    descriptor->ump_mem.handle = ump_mem;
    descriptor->mali_mapping.addr = args->mali_address;
    descriptor->size = args->size;
    descriptor->mali_mapping.properties = MALI_MMU_FLAGS_DEFAULT;
    descriptor->flags |= MALI_MEM_FLAG_DONT_CPU_MAP;

    if (args->flags & _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE) {
        descriptor->flags = MALI_MEM_FLAG_MALI_GUARD_PAGE;
    }

    _mali_osk_mutex_wait(session->memory_lock);

    ret = mali_ump_map(session, descriptor);
    if (0 != ret) {
        _mali_osk_mutex_signal(session->memory_lock);
        ump_dd_reference_release(ump_mem);
        mali_mem_descriptor_destroy(descriptor);
        MALI_ERROR(_MALI_OSK_ERR_NOMEM);
    }

    _mali_osk_mutex_signal(session->memory_lock);


    if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_allocate_mapping(session->descriptor_mapping, descriptor, &md)) {
        ump_dd_reference_release(ump_mem);
        mali_mem_descriptor_destroy(descriptor);
        MALI_ERROR(_MALI_OSK_ERR_FAULT);
    }

    args->cookie = md;

    MALI_DEBUG_PRINT(5,("Returning from UMP attach\n"));

    MALI_SUCCESS;
}
int ump_dmabuf_import_wrapper(u32 __user *argument,
				struct ump_session_data  *session_data)
{
	ump_session_memory_list_element *session = NULL;
	struct ump_uk_dmabuf ump_dmabuf;
	ump_dd_handle *ump_handle;
	ump_dd_physical_block *blocks;
	struct dma_buf_attachment *attach;
	struct dma_buf *dma_buf;
	struct sg_table *sgt;
	struct scatterlist *sgl;
	unsigned long block_size;
	/* FIXME */
	struct device dev;
	unsigned int i = 0, npages;
	int ret;

	/* Sanity check input parameters */
	if (!argument || !session_data) {
		MSG_ERR(("NULL parameter.\n"));
		return -EINVAL;
	}

	if (copy_from_user(&ump_dmabuf, argument,
				sizeof(struct ump_uk_dmabuf))) {
		MSG_ERR(("copy_from_user() failed.\n"));
		return -EFAULT;
	}

	dma_buf = dma_buf_get(ump_dmabuf.fd);
	if (IS_ERR(dma_buf))
		return PTR_ERR(dma_buf);

	/*
	 * check whether dma_buf imported already exists or not.
	 *
	 * TODO
	 * if already imported then dma_buf_put() should be called
	 * and then just return dma_buf imported.
	 */

	attach = dma_buf_attach(dma_buf, &dev);
	if (IS_ERR(attach)) {
		ret = PTR_ERR(attach);
		goto err_dma_buf_put;
	}

	sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
	if (IS_ERR(sgt)) {
		ret = PTR_ERR(sgt);
		goto err_dma_buf_detach;
	}

	npages = sgt->nents;

	/* really need? */
	ump_dmabuf.ctx = (void *)session_data;

	block_size = sizeof(ump_dd_physical_block) * npages;

	blocks = (ump_dd_physical_block *)_mali_osk_malloc(block_size);

	if (NULL == blocks) {
		MSG_ERR(("Failed to allocate blocks\n"));
		ret = -ENOMEM;
		goto err_dmu_buf_unmap;
	}

	sgl = sgt->sgl;

	while (i < npages) {
		blocks[i].addr = sg_phys(sgl);
		blocks[i].size = sg_dma_len(sgl);
		sgl = sg_next(sgl);
		i++;
	}

	/*
	 * Initialize the session memory list element, and add it
	 * to the session object
	 */
	session = _mali_osk_calloc(1, sizeof(*session));
	if (!session) {
		DBG_MSG(1, ("Failed to allocate session.\n"));
		ret = -EFAULT;
		goto err_free_block;
	}

	ump_handle = ump_dd_handle_create_from_phys_blocks(blocks, i);
	if (UMP_DD_HANDLE_INVALID == ump_handle) {
		DBG_MSG(1, ("Failed to create ump handle.\n"));
		ret = -EFAULT;
		goto err_free_session;
	}

	session->mem = (ump_dd_mem *)ump_handle;

	_mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW);
	_mali_osk_list_add(&(session->list),
			&(session_data->list_head_session_memory_list));
	_mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);

	_mali_osk_free(blocks);

	ump_dmabuf.ump_handle = (uint32_t)ump_handle;
	ump_dmabuf.size = ump_dd_size_get(ump_handle);

	if (copy_to_user(argument, &ump_dmabuf,
				sizeof(struct ump_uk_dmabuf))) {
		MSG_ERR(("copy_to_user() failed.\n"));
		ret =  -EFAULT;
		goto err_release_ump_handle;
	}

	return 0;

err_release_ump_handle:
	ump_dd_reference_release(ump_handle);
err_free_session:
	_mali_osk_free(session);
err_free_block:
	_mali_osk_free(blocks);
err_dmu_buf_unmap:
	dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
err_dma_buf_detach:
	dma_buf_detach(dma_buf, attach);
err_dma_buf_put:
	dma_buf_put(dma_buf);
	return ret;
}