int show_framebuffer_on_crtc(struct drm_crtc *crtc,
				struct drm_framebuffer *fb, bool page_flip,
				struct drm_pending_vblank_event *event)
{
	struct pl111_gem_bo *bo;
	struct pl111_drm_flip_resource *flip_res;
	int flips_in_flight;
	int old_flips_in_flight;

	crtc->fb = fb;

	bo = PL111_BO_FROM_FRAMEBUFFER(fb);
	if (bo == NULL) {
		DRM_DEBUG_KMS("Failed to get pl111_gem_bo object\n");
		return -EINVAL;
	}

	/* If this is a full modeset, wait for all outstanding flips to complete
	 * before continuing. This avoids unnecessary complication from being
	 * able to queue up multiple modesets and queues of mixed modesets and
	 * page flips.
	 *
	 * Modesets should be uncommon and will not be performant anyway, so
	 * making them synchronous should have negligible performance impact.
	 */
	if (!page_flip) {
		int ret = wait_event_killable(priv.wait_for_flips,
				atomic_read(&priv.nr_flips_in_flight) == 0);
		if (ret)
			return ret;
	}

	/*
	 * There can be more 'early display' flips in flight than there are
	 * buffers, and there is (currently) no explicit bound on the number of
	 * flips. Hence, we need a new allocation for each one.
	 *
	 * Note: this could be optimized down if we knew a bound on the flips,
	 * since an application can only have so many buffers in flight to be
	 * useful/not hog all the memory
	 */
	flip_res = kmem_cache_alloc(priv.page_flip_slab, GFP_KERNEL);
	if (flip_res == NULL) {
		pr_err("kmem_cache_alloc failed to alloc - flip ignored\n");
		return -ENOMEM;
	}

	/*
	 * increment flips in flight, whilst blocking when we reach
	 * NR_FLIPS_IN_FLIGHT_THRESHOLD
	 */
	do {
		/*
		 * Note: use of assign-and-then-compare in the condition to set
		 * flips_in_flight
		 */
		int ret = wait_event_killable(priv.wait_for_flips,
				(flips_in_flight =
					atomic_read(&priv.nr_flips_in_flight))
				< NR_FLIPS_IN_FLIGHT_THRESHOLD);
		if (ret != 0) {
			kmem_cache_free(priv.page_flip_slab, flip_res);
			return ret;
		}

		old_flips_in_flight = atomic_cmpxchg(&priv.nr_flips_in_flight,
					flips_in_flight, flips_in_flight + 1);
	} while (old_flips_in_flight != flips_in_flight);

	flip_res->fb = fb;
	flip_res->crtc = crtc;
	flip_res->page_flip = page_flip;
	flip_res->event = event;
	INIT_LIST_HEAD(&flip_res->link);
	DRM_DEBUG_KMS("DRM alloc flip_res=%p\n", flip_res);
#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
	if (bo->gem_object.export_dma_buf != NULL) {
		struct dma_buf *buf = bo->gem_object.export_dma_buf;
		unsigned long shared[1] = { 0 };
		struct kds_resource *resource_list[1] = {
				get_dma_buf_kds_resource(buf) };
		int err;

		get_dma_buf(buf);
		DRM_DEBUG_KMS("Got dma_buf %p\n", buf);

		/* Wait for the KDS resource associated with this buffer */
		err = kds_async_waitall(&flip_res->kds_res_set,
					&priv.kds_cb, flip_res, fb, 1, shared,
					resource_list);
		BUG_ON(err);
	} else {
		struct pl111_drm_crtc *pl111_crtc = to_pl111_crtc(crtc);

		DRM_DEBUG_KMS("No dma_buf for this flip\n");

		/* No dma-buf attached so just call the callback directly */
		flip_res->kds_res_set = NULL;
		pl111_crtc->show_framebuffer_cb(flip_res, fb);
	}
#else
	if (bo->gem_object.export_dma_buf != NULL) {
		struct dma_buf *buf = bo->gem_object.export_dma_buf;

		get_dma_buf(buf);
		DRM_DEBUG_KMS("Got dma_buf %p\n", buf);
	} else {
		DRM_DEBUG_KMS("No dma_buf for this flip\n");
	}

	/* No dma-buf attached to this so just call the callback directly */
	{
		struct pl111_drm_crtc *pl111_crtc = to_pl111_crtc(crtc);
		pl111_crtc->show_framebuffer_cb(flip_res, fb);
	}
#endif

	/* For the same reasons as the wait at the start of this function,
	 * wait for the modeset to complete before continuing.
	 */
	if (!page_flip) {
		int ret = wait_event_killable(priv.wait_for_flips,
				flips_in_flight == 0);
		if (ret)
			return ret;
	}

	return 0;
}
/*
 * Start requested lock.
 *
 * Allocates required memory, copies dma_buf_fd list from userspace,
 * acquires related KDS resources, and starts the lock.
 */
static int dma_buf_lock_dolock(dma_buf_lock_k_request *request)
{
	dma_buf_lock_resource *resource;
	int size;
	int fd;
	int i;
	int ret;

	if (NULL == request->list_of_dma_buf_fds)
	{
		return -EINVAL;
	}
	if (request->count <= 0)
	{
		return -EINVAL;
	}
	if (request->count > DMA_BUF_LOCK_BUF_MAX)
	{
		return -EINVAL;
	}
	if (request->exclusive != DMA_BUF_LOCK_NONEXCLUSIVE &&
	    request->exclusive != DMA_BUF_LOCK_EXCLUSIVE)
	{
		return -EINVAL;
	}

	resource = kzalloc(sizeof(dma_buf_lock_resource), GFP_KERNEL);
	if (NULL == resource)
	{
		return -ENOMEM;
	}

	atomic_set(&resource->locked, 0);
	kref_init(&resource->refcount);
	INIT_LIST_HEAD(&resource->link);
	resource->count = request->count;

	/* Allocate space to store dma_buf_fds received from user space */
	size = request->count * sizeof(int);
	resource->list_of_dma_buf_fds = kmalloc(size, GFP_KERNEL);

	if (NULL == resource->list_of_dma_buf_fds)
	{
		kfree(resource);
		return -ENOMEM;
	}

	/* Allocate space to store dma_buf pointers associated with dma_buf_fds */
	size = sizeof(struct dma_buf *) * request->count;
	resource->dma_bufs = kmalloc(size, GFP_KERNEL);

	if (NULL == resource->dma_bufs)
	{
		kfree(resource->list_of_dma_buf_fds);
		kfree(resource);
		return -ENOMEM;
	}
	/* Allocate space to store kds_resources associated with dma_buf_fds */
	size = sizeof(struct kds_resource *) * request->count;
	resource->kds_resources = kmalloc(size, GFP_KERNEL);

	if (NULL == resource->kds_resources)
	{
		kfree(resource->dma_bufs);
		kfree(resource->list_of_dma_buf_fds);
		kfree(resource);
		return -ENOMEM;
	}

	/* Copy requested list of dma_buf_fds from user space */
	size = request->count * sizeof(int);
	if (0 != copy_from_user(resource->list_of_dma_buf_fds, (void __user *)request->list_of_dma_buf_fds, size))
	{
		kfree(resource->list_of_dma_buf_fds);
		kfree(resource->dma_bufs);
		kfree(resource->kds_resources);
		kfree(resource);
		return -ENOMEM;
	}
#if DMA_BUF_LOCK_DEBUG
	for (i = 0; i < request->count; i++)
	{
		printk("dma_buf %i = %X\n", i, resource->list_of_dma_buf_fds[i]);
	}
#endif

	/* Add resource to global list */
	mutex_lock(&dma_buf_lock_mutex);

	list_add(&resource->link, &dma_buf_lock_resource_list);

	mutex_unlock(&dma_buf_lock_mutex);

	for (i = 0; i < request->count; i++)
	{
		/* Convert fd into dma_buf structure */
		resource->dma_bufs[i] = dma_buf_get(resource->list_of_dma_buf_fds[i]);

		if (IS_ERR_VALUE(PTR_ERR(resource->dma_bufs[i])))
		{
			mutex_lock(&dma_buf_lock_mutex);
			kref_put(&resource->refcount, dma_buf_lock_dounlock);
			mutex_unlock(&dma_buf_lock_mutex);
			return -EINVAL;
		}

		/*Get kds_resource associated with dma_buf */
		resource->kds_resources[i] = get_dma_buf_kds_resource(resource->dma_bufs[i]);

		if (NULL == resource->kds_resources[i])
		{
			mutex_lock(&dma_buf_lock_mutex);
			kref_put(&resource->refcount, dma_buf_lock_dounlock);
			mutex_unlock(&dma_buf_lock_mutex);
			return -EINVAL;
		}
#if DMA_BUF_LOCK_DEBUG
		printk("dma_buf_lock_dolock : dma_buf_fd %i dma_buf %X kds_resource %X\n", resource->list_of_dma_buf_fds[i],
		       (unsigned int)resource->dma_bufs[i], (unsigned int)resource->kds_resources[i]);
#endif	
	}

	kds_callback_init(&resource->cb, 1, dma_buf_lock_kds_callback);
	init_waitqueue_head(&resource->wait);

	kref_get(&resource->refcount);

	/* Create file descriptor associated with lock request */
	fd = anon_inode_getfd("dma_buf_lock", &dma_buf_lock_handle_fops, 
	                      (void *)resource, 0);
	if (fd < 0)
	{
		mutex_lock(&dma_buf_lock_mutex);
		kref_put(&resource->refcount, dma_buf_lock_dounlock);
		kref_put(&resource->refcount, dma_buf_lock_dounlock);
		mutex_unlock(&dma_buf_lock_mutex);
		return fd;
	}

	resource->exclusive = request->exclusive;

	/* Start locking process */
	ret = kds_async_waitall(&resource->resource_set,KDS_FLAG_LOCKED_ACTION,
	                        &resource->cb, resource, NULL,
	                        request->count,  &resource->exclusive,
	                        resource->kds_resources);

	if (IS_ERR_VALUE(ret))
	{
		put_unused_fd(fd);

		mutex_lock(&dma_buf_lock_mutex);
		kref_put(&resource->refcount, dma_buf_lock_dounlock);
		mutex_unlock(&dma_buf_lock_mutex);

		return ret;
	}

#if DMA_BUF_LOCK_DEBUG
	printk("dma_buf_lock_dolock : complete\n");
#endif
	mutex_lock(&dma_buf_lock_mutex);
	kref_put(&resource->refcount, dma_buf_lock_dounlock);
	mutex_unlock(&dma_buf_lock_mutex);

	return fd;
}