/**
 * drm_gem_mmap - memory map routine for GEM objects
 * @filp: DRM file pointer
 * @vma: VMA for the area to be mapped
 *
 * If a driver supports GEM object mapping, mmap calls on the DRM file
 * descriptor will end up here.
 *
 * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
 * contain the fake offset we created when the GTT map ioctl was called on
 * the object) and map it with a call to drm_gem_mmap_obj().
 *
 * If the caller is not granted access to the buffer object, the mmap will fail
 * with EACCES. Please see the vma manager for more information.
 */
int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{
	struct drm_file *priv = filp->private_data;
	struct drm_device *dev = priv->minor->dev;
	struct drm_gem_mm *mm = dev->mm_private;
	struct drm_gem_object *obj;
	struct drm_vma_offset_node *node;
	int ret = 0;

	if (drm_device_is_unplugged(dev))
		return -ENODEV;

	mutex_lock(&dev->struct_mutex);

	node = drm_vma_offset_exact_lookup(&mm->vma_manager, vma->vm_pgoff,
					   vma_pages(vma));
	if (!node) {
		mutex_unlock(&dev->struct_mutex);
		return drm_mmap(filp, vma);
	} else if (!drm_vma_node_is_allowed(node, filp)) {
		mutex_unlock(&dev->struct_mutex);
		return -EACCES;
	}

	obj = container_of(node, struct drm_gem_object, vma_node);
	ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, vma);

	mutex_unlock(&dev->struct_mutex);

	return ret;
}
int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
{
	struct drm_file *file_priv;
	struct radeon_device *rdev;
	int r;

	if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
		return drm_mmap(filp, vma);
	}

	file_priv = filp->private_data;
	rdev = file_priv->minor->dev->dev_private;
	if (rdev == NULL) {
		return -EINVAL;
	}
	r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
	if (unlikely(r != 0)) {
		return r;
	}
	if (unlikely(ttm_vm_ops == NULL)) {
		ttm_vm_ops = vma->vm_ops;
		radeon_ttm_vm_ops = *ttm_vm_ops;
		radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
	}
	vma->vm_ops = &radeon_ttm_vm_ops;
	return 0;
}
Exemple #3
0
static int
exynos_bo_map(struct kms_bo *_bo, void **out)
{
	struct exynos_bo *bo = (struct exynos_bo *)_bo;
	struct drm_mode_map_dumb arg;
	void *map = NULL;
	int ret;

	if (bo->base.ptr) {
		bo->map_count++;
		*out = bo->base.ptr;
		return 0;
	}

	memset(&arg, 0, sizeof(arg));
	arg.handle = bo->base.handle;

	ret = drmIoctl(bo->base.kms->fd, DRM_IOCTL_MODE_MAP_DUMB, &arg);
	if (ret)
		return ret;

	map = drm_mmap(0, bo->base.size, PROT_READ | PROT_WRITE, MAP_SHARED, bo->base.kms->fd, arg.offset);
	if (map == MAP_FAILED)
		return -errno;

	bo->base.ptr = map;
	bo->map_count++;
	*out = bo->base.ptr;

	return 0;
}
Exemple #4
0
/**
 * drm_gem_mmap - memory map routine for GEM objects
 * @filp: DRM file pointer
 * @vma: VMA for the area to be mapped
 *
 * If a driver supports GEM object mapping, mmap calls on the DRM file
 * descriptor will end up here.
 *
 * If we find the object based on the offset passed in (vma->vm_pgoff will
 * contain the fake offset we created when the GTT map ioctl was called on
 * the object), we set up the driver fault handler so that any accesses
 * to the object can be trapped, to perform migration, GTT binding, surface
 * register allocation, or performance monitoring.
 */
int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{
	struct drm_file *priv = filp->private_data;
	struct drm_device *dev = priv->minor->dev;
	struct drm_gem_mm *mm = dev->mm_private;
	struct drm_local_map *map = NULL;
	struct drm_gem_object *obj;
	struct drm_hash_item *hash;
	int ret = 0;

	if (drm_device_is_unplugged(dev))
		return -ENODEV;

	mutex_lock(&dev->struct_mutex);

	if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) {
		mutex_unlock(&dev->struct_mutex);
		return drm_mmap(filp, vma);
	}

	map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
	if (!map ||
	    ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {
		ret =  -EPERM;
		goto out_unlock;
	}

	/* Check for valid size. */
	if (map->size < vma->vm_end - vma->vm_start) {
		ret = -EINVAL;
		goto out_unlock;
	}

	obj = map->handle;
	if (!obj->dev->driver->gem_vm_ops) {
		ret = -EINVAL;
		goto out_unlock;
	}

	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
	vma->vm_ops = obj->dev->driver->gem_vm_ops;
	vma->vm_private_data = map->handle;
	vma->vm_page_prot =  pgprot_writecombine(vm_get_page_prot(vma->vm_flags));

	/* Take a ref for this mapping of the object, so that the fault
	 * handler can dereference the mmap offset's pointer to the object.
	 * This reference is cleaned up by the corresponding vm_close
	 * (which should happen whether the vma was created by this call, or
	 * by a vm_open due to mremap or partial unmap or whatever).
	 */
	drm_gem_object_reference(obj);

	drm_vm_open_locked(dev, vma);

out_unlock:
	mutex_unlock(&dev->struct_mutex);

	return ret;
}
int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{
	struct drm_file *priv = filp->private_data;
	struct drm_device *dev = priv->minor->dev;
	struct drm_gem_mm *mm = dev->mm_private;
	struct drm_local_map *map = NULL;
	struct drm_gem_object *obj;
	struct drm_hash_item *hash;
	int ret = 0;

	if (drm_device_is_unplugged(dev))
		return -ENODEV;

	mutex_lock(&dev->struct_mutex);

	if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) {
		mutex_unlock(&dev->struct_mutex);
		return drm_mmap(filp, vma);
	}

	map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
	if (!map ||
	    ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {
		ret =  -EPERM;
		goto out_unlock;
	}

	/*                       */
	if (map->size < vma->vm_end - vma->vm_start) {
		ret = -EINVAL;
		goto out_unlock;
	}

	obj = map->handle;
	if (!obj->dev->driver->gem_vm_ops) {
		ret = -EINVAL;
		goto out_unlock;
	}

	vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
	vma->vm_ops = obj->dev->driver->gem_vm_ops;
	vma->vm_private_data = map->handle;
	vma->vm_page_prot =  pgprot_writecombine(vm_get_page_prot(vma->vm_flags));

	/*                                                             
                                                                    
                                                              
                                                                     
                                                             
  */
	drm_gem_object_reference(obj);

	drm_vm_open_locked(vma);

out_unlock:
	mutex_unlock(&dev->struct_mutex);

	return ret;
}
Exemple #6
0
int pscnv_mmap(struct file *filp, struct vm_area_struct *vma)
{
	struct drm_file *priv = filp->private_data;
	struct drm_device *dev = priv->minor->dev;
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct drm_gem_object *obj;
	struct pscnv_bo *bo;
	int ret;

	if (vma->vm_pgoff * PAGE_SIZE < (1ull << 31))
		return drm_mmap(filp, vma);

	if (vma->vm_pgoff * PAGE_SIZE < (1ull << 32))
		return pscnv_chan_mmap(filp, vma);

	obj = drm_gem_object_lookup(dev, priv, (vma->vm_pgoff * PAGE_SIZE) >> 32);
	if (!obj)
		return -ENOENT;
	bo = obj->driver_private;
	
	if (vma->vm_end - vma->vm_start > bo->size) {
		drm_gem_object_unreference_unlocked(obj);
		return -EINVAL;
	}
	switch (bo->flags & PSCNV_GEM_MEMTYPE_MASK) {
	case PSCNV_GEM_VRAM_SMALL:
	case PSCNV_GEM_VRAM_LARGE:
		if ((ret = dev_priv->vm->map_user(bo))) {
			drm_gem_object_unreference_unlocked(obj);
			return ret;
		}

		vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
		vma->vm_ops = &pscnv_vram_ops;
		vma->vm_private_data = obj;
		vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));

		vma->vm_file = filp;

		return remap_pfn_range(vma, vma->vm_start, 
				(dev_priv->fb_phys + bo->map1->start) >> PAGE_SHIFT,
				vma->vm_end - vma->vm_start, PAGE_SHARED);
	case PSCNV_GEM_SYSRAM_SNOOP:
	case PSCNV_GEM_SYSRAM_NOSNOOP:
		/* XXX */
		vma->vm_flags |= VM_RESERVED;
		vma->vm_ops = &pscnv_sysram_ops;
		vma->vm_private_data = obj;

		vma->vm_file = filp;

		return 0;
	default:
		drm_gem_object_unreference_unlocked(obj);
		return -ENOSYS;
	}
}
int
nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
{
	struct drm_file *file_priv = filp->private_data;
	struct drm_nouveau_private *dev_priv =
		file_priv->minor->dev->dev_private;

	if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
		return drm_mmap(filp, vma);

	return ttm_bo_mmap(filp, vma, &dev_priv->ttm.bdev);
}
Exemple #8
0
int bochs_mmap(struct file *filp, struct vm_area_struct *vma)
{
    struct drm_file *file_priv;
    struct bochs_device *bochs;

    if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
        return drm_mmap(filp, vma);

    file_priv = filp->private_data;
    bochs = file_priv->minor->dev->dev_private;
    return ttm_bo_mmap(filp, vma, &bochs->ttm.bdev);
}
int
SYSPVRMMap(struct file* pFile, struct vm_area_struct* ps_vma)
{
	int ret;

	ret = PVRMMap(pFile, ps_vma);
	if (ret == -ENOENT)
	{
		ret = drm_mmap(pFile, ps_vma);
	}

	return ret;
}
Exemple #10
0
int
pvdrm_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
{
	/* FIXME: We should implement ttm shadow memory on the guest side. */
	struct drm_file* file_priv = filp->private_data;
	struct pvdrm_device* pvdrm = drm_device_to_pvdrm(file_priv->minor->dev);

	if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
		return drm_mmap(filp, vma);
	}

	return ttm_bo_mmap(filp, vma, &pvdrm->ttm->bdev);
}
Exemple #11
0
int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
{
	struct drm_file *file_priv;
	struct vmw_private *dev_priv;

	if (unlikely(vma->vm_pgoff < VMWGFX_FILE_PAGE_OFFSET)) {
		if (vmw_fifo_mmap(filp, vma) == 0)
			return 0;
		return drm_mmap(filp, vma);
	}

	file_priv = filp->private_data;
	dev_priv = vmw_priv(file_priv->minor->dev);
	return ttm_bo_mmap(filp, vma, &dev_priv->bdev);
}
Exemple #12
0
int amdgpu_bo_cpu_map(amdgpu_bo_handle bo, void **cpu)
{
	union drm_amdgpu_gem_mmap args;
	void *ptr;
	int r;

	pthread_mutex_lock(&bo->cpu_access_mutex);

	if (bo->cpu_ptr) {
		/* already mapped */
		assert(bo->cpu_map_count > 0);
		bo->cpu_map_count++;
		*cpu = bo->cpu_ptr;
		pthread_mutex_unlock(&bo->cpu_access_mutex);
		return 0;
	}

	assert(bo->cpu_map_count == 0);

	memset(&args, 0, sizeof(args));

	/* Query the buffer address (args.addr_ptr).
	 * The kernel driver ignores the offset and size parameters. */
	args.in.handle = bo->handle;

	r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_MMAP, &args,
				sizeof(args));
	if (r) {
		pthread_mutex_unlock(&bo->cpu_access_mutex);
		return r;
	}

	/* Map the buffer. */
	ptr = drm_mmap(NULL, bo->alloc_size, PROT_READ | PROT_WRITE, MAP_SHARED,
		       bo->dev->fd, args.out.addr_ptr);
	if (ptr == MAP_FAILED) {
		pthread_mutex_unlock(&bo->cpu_access_mutex);
		return -errno;
	}

	bo->cpu_ptr = ptr;
	bo->cpu_map_count = 1;
	pthread_mutex_unlock(&bo->cpu_access_mutex);

	*cpu = ptr;
	return 0;
}
static int bo_map(struct radeon_bo_int *boi, int write)
{
    struct radeon_bo_gem *bo_gem = (struct radeon_bo_gem*)boi;
    struct drm_radeon_gem_mmap args;
    int r;
    void *ptr;

    if (bo_gem->map_count++ != 0) {
        return 0;
    }
    if (bo_gem->priv_ptr) {
        goto wait;
    }

    boi->ptr = NULL;

    /* Zero out args to make valgrind happy */
    memset(&args, 0, sizeof(args));
    args.handle = boi->handle;
    args.offset = 0;
    args.size = (uint64_t)boi->size;
    r = drmCommandWriteRead(boi->bom->fd,
                            DRM_RADEON_GEM_MMAP,
                            &args,
                            sizeof(args));
    if (r) {
        fprintf(stderr, "error mapping %p 0x%08X (error = %d)\n",
                boi, boi->handle, r);
        return r;
    }
    ptr = drm_mmap(0, args.size, PROT_READ|PROT_WRITE, MAP_SHARED, boi->bom->fd, args.addr_ptr);
    if (ptr == MAP_FAILED)
        return -errno;
    bo_gem->priv_ptr = ptr;
wait:
    boi->ptr = bo_gem->priv_ptr;
    r = bo_wait(boi);
    if (r)
        return r;
    return 0;
}
Exemple #14
0
static void
read_file(const char *filename, void **ptr, size_t *size)
{
	int fd, ret;
	struct stat st;

	fd = open(filename, O_RDONLY);
	if (fd == -1)
		errx(1, "couldn't open `%s'", filename);

	ret = fstat(fd, &st);
	if (ret)
		errx(1, "couldn't stat `%s'", filename);

	*size = st.st_size;
	*ptr = drm_mmap(NULL, st.st_size, PROT_READ, MAP_SHARED, fd, 0);
	if (*ptr == MAP_FAILED)
		errx(1, "couldn't map `%s'", filename);

	close(fd);
}
Exemple #15
0
static int bo_map(struct bo *bo, void **out)
{
	struct drm_mode_map_dumb arg;
	void *map;
	int ret;

	memset(&arg, 0, sizeof(arg));
	arg.handle = bo->handle;

	ret = drmIoctl(bo->fd, DRM_IOCTL_MODE_MAP_DUMB, &arg);
	if (ret)
		return ret;

	map = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
		       bo->fd, arg.offset);
	if (map == MAP_FAILED)
		return -EINVAL;

	bo->ptr = map;
	*out = map;

	return 0;
}
Exemple #16
0
/* Called under table_lock */
drm_private void bo_del(struct fd_bo *bo)
{
	VG_BO_FREE(bo);

	if (bo->map)
		drm_munmap(bo->map, bo->size);

	/* TODO probably bo's in bucket list get removed from
	 * handle table??
	 */

	if (bo->handle) {
		struct drm_gem_close req = {
				.handle = bo->handle,
		};
		drmHashDelete(bo->dev->handle_table, bo->handle);
		if (bo->name)
			drmHashDelete(bo->dev->name_table, bo->name);
		drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
	}

	bo->funcs->destroy(bo);
}

int fd_bo_get_name(struct fd_bo *bo, uint32_t *name)
{
	if (!bo->name) {
		struct drm_gem_flink req = {
				.handle = bo->handle,
		};
		int ret;

		ret = drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_FLINK, &req);
		if (ret) {
			return ret;
		}

		pthread_mutex_lock(&table_lock);
		set_name(bo, req.name);
		pthread_mutex_unlock(&table_lock);
		bo->bo_reuse = FALSE;
	}

	*name = bo->name;

	return 0;
}

uint32_t fd_bo_handle(struct fd_bo *bo)
{
	return bo->handle;
}

int fd_bo_dmabuf(struct fd_bo *bo)
{
	int ret, prime_fd;

	ret = drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC,
			&prime_fd);
	if (ret) {
		ERROR_MSG("failed to get dmabuf fd: %d", ret);
		return ret;
	}

	bo->bo_reuse = FALSE;

	return prime_fd;
}

uint32_t fd_bo_size(struct fd_bo *bo)
{
	return bo->size;
}

void * fd_bo_map(struct fd_bo *bo)
{
	if (!bo->map) {
		uint64_t offset;
		int ret;

		ret = bo->funcs->offset(bo, &offset);
		if (ret) {
			return NULL;
		}

		bo->map = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
				bo->dev->fd, offset);
		if (bo->map == MAP_FAILED) {
			ERROR_MSG("mmap failed: %s", strerror(errno));
			bo->map = NULL;
		}
	}
	return bo->map;
}

/* a bit odd to take the pipe as an arg, but it's a, umm, quirk of kgsl.. */
int fd_bo_cpu_prep(struct fd_bo *bo, struct fd_pipe *pipe, uint32_t op)
{
	return bo->funcs->cpu_prep(bo, pipe, op);
}

void fd_bo_cpu_fini(struct fd_bo *bo)
{
	bo->funcs->cpu_fini(bo);
}
Exemple #17
0
struct fd_bo *
fd_bo_from_fbdev(struct fd_pipe *pipe, int fbfd, uint32_t size)
{
	struct fd_bo *bo;

	if (!is_kgsl_pipe(pipe))
		return NULL;

	bo = fd_bo_new(pipe->dev, 1, 0);

	/* this is fugly, but works around a bug in the kernel..
	 * priv->memdesc.size never gets set, so getbufinfo ioctl
	 * thinks the buffer hasn't be allocate and fails
	 */
	if (bo) {
		void *fbmem = drm_mmap(NULL, size, PROT_READ | PROT_WRITE,
				MAP_SHARED, fbfd, 0);
		struct kgsl_map_user_mem req = {
				.memtype = KGSL_USER_MEM_TYPE_ADDR,
				.len     = size,
				.offset  = 0,
				.hostptr = (unsigned long)fbmem,
		};
		struct kgsl_bo *kgsl_bo = to_kgsl_bo(bo);
		int ret;

		ret = ioctl(to_kgsl_pipe(pipe)->fd, IOCTL_KGSL_MAP_USER_MEM, &req);
		if (ret) {
			ERROR_MSG("mapping user mem failed: %s",
					strerror(errno));
			goto fail;
		}
		kgsl_bo->gpuaddr = req.gpuaddr;
		bo->map = fbmem;
	}

	return bo;
fail:
	if (bo)
		fd_bo_del(bo);
	return NULL;
}

drm_private uint32_t kgsl_bo_gpuaddr(struct kgsl_bo *kgsl_bo, uint32_t offset)
{
	struct fd_bo *bo = &kgsl_bo->base;
	if (!kgsl_bo->gpuaddr) {
		struct drm_kgsl_gem_bufinfo req = {
				.handle = bo->handle,
		};
		int ret;

		ret = bo_alloc(kgsl_bo);
		if (ret) {
			return ret;
		}

		ret = drmCommandWriteRead(bo->dev->fd, DRM_KGSL_GEM_GET_BUFINFO,
				&req, sizeof(req));
		if (ret) {
			ERROR_MSG("get bufinfo failed: %s", strerror(errno));
			return 0;
		}

		kgsl_bo->gpuaddr = req.gpuaddr[0];
	}
	return kgsl_bo->gpuaddr + offset;
}

/*
 * Super-cheezy way to synchronization between mesa and ddx..  the
 * SET_ACTIVE ioctl gives us a way to stash a 32b # w/ a GEM bo, and
 * GET_BUFINFO gives us a way to retrieve it.  We use this to stash
 * the timestamp of the last ISSUEIBCMDS on the buffer.
 *
 * To avoid an obscene amount of syscalls, we:
 *  1) Only set the timestamp for buffers w/ an flink name, ie.
 *     only buffers shared across processes.  This is enough to
 *     catch the DRI2 buffers.
 *  2) Only set the timestamp for buffers submitted to the 3d ring
 *     and only check the timestamps on buffers submitted to the
 *     2d ring.  This should be enough to handle synchronizing of
 *     presentation blit.  We could do synchronization in the other
 *     direction too, but that would be problematic if we are using
 *     the 3d ring from DDX, since client side wouldn't know this.
 *
 * The waiting on timestamp happens before flush, and setting of
 * timestamp happens after flush.  It is transparent to the user
 * of libdrm_freedreno as all the tracking of buffers happens via
 * _emit_reloc()..
 */

drm_private void kgsl_bo_set_timestamp(struct kgsl_bo *kgsl_bo,
		uint32_t timestamp)
{
	struct fd_bo *bo = &kgsl_bo->base;
	if (bo->name) {
		struct drm_kgsl_gem_active req = {
				.handle = bo->handle,
				.active = timestamp,
		};
		int ret;

		ret = drmCommandWrite(bo->dev->fd, DRM_KGSL_GEM_SET_ACTIVE,
				&req, sizeof(req));
		if (ret) {
			ERROR_MSG("set active failed: %s", strerror(errno));
		}
	}
}

drm_private uint32_t kgsl_bo_get_timestamp(struct kgsl_bo *kgsl_bo)
{
	struct fd_bo *bo = &kgsl_bo->base;
	uint32_t timestamp = 0;
	if (bo->name) {
		struct drm_kgsl_gem_bufinfo req = {
				.handle = bo->handle,
		};
		int ret;

		ret = drmCommandWriteRead(bo->dev->fd, DRM_KGSL_GEM_GET_BUFINFO,
				&req, sizeof(req));
		if (ret) {
			ERROR_MSG("get bufinfo failed: %s", strerror(errno));
			return 0;
		}

		timestamp = req.active;
	}
	return timestamp;
}
Exemple #18
0
/*
 * Destroy a exynos buffer object.
 *
 * @bo: a exynos buffer object to be destroyed.
 */
drm_public void exynos_bo_destroy(struct exynos_bo *bo)
{
	if (!bo)
		return;

	if (bo->vaddr)
		munmap(bo->vaddr, bo->size);

	if (bo->handle) {
		struct drm_gem_close req = {
			.handle = bo->handle,
		};

		drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
	}

	free(bo);
}


/*
 * Get a exynos buffer object from a gem global object name.
 *
 * @dev: a exynos device object.
 * @name: a gem global object name exported by another process.
 *
 * this interface is used to get a exynos buffer object from a gem
 * global object name sent by another process for buffer sharing.
 *
 * if true, return a exynos buffer object else NULL.
 *
 */
drm_public struct exynos_bo *
exynos_bo_from_name(struct exynos_device *dev, uint32_t name)
{
	struct exynos_bo *bo;
	struct drm_gem_open req = {
		.name = name,
	};

	bo = calloc(sizeof(*bo), 1);
	if (!bo) {
		fprintf(stderr, "failed to allocate bo[%s].\n",
				strerror(errno));
		return NULL;
	}

	if (drmIoctl(dev->fd, DRM_IOCTL_GEM_OPEN, &req)) {
		fprintf(stderr, "failed to open gem object[%s].\n",
				strerror(errno));
		goto err_free_bo;
	}

	bo->dev = dev;
	bo->name = name;
	bo->handle = req.handle;

	return bo;

err_free_bo:
	free(bo);
	return NULL;
}

/*
 * Get a gem global object name from a gem object handle.
 *
 * @bo: a exynos buffer object including gem handle.
 * @name: a gem global object name to be got by kernel driver.
 *
 * this interface is used to get a gem global object name from a gem object
 * handle to a buffer that wants to share it with another process.
 *
 * if true, return 0 else negative.
 */
drm_public int exynos_bo_get_name(struct exynos_bo *bo, uint32_t *name)
{
	if (!bo->name) {
		struct drm_gem_flink req = {
			.handle = bo->handle,
		};
		int ret;

		ret = drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_FLINK, &req);
		if (ret) {
			fprintf(stderr, "failed to get gem global name[%s].\n",
					strerror(errno));
			return ret;
		}

		bo->name = req.name;
	}

	*name = bo->name;

	return 0;
}

drm_public uint32_t exynos_bo_handle(struct exynos_bo *bo)
{
	return bo->handle;
}

/*
 * Mmap a buffer to user space.
 *
 * @bo: a exynos buffer object including a gem object handle to be mmapped
 *	to user space.
 *
 * if true, user pointer mmapped else NULL.
 */
drm_public void *exynos_bo_map(struct exynos_bo *bo)
{
	if (!bo->vaddr) {
		struct exynos_device *dev = bo->dev;
		struct drm_mode_map_dumb arg;
		void *map = NULL;
		int ret;

		memset(&arg, 0, sizeof(arg));
		arg.handle = bo->handle;

		ret = drmIoctl(dev->fd, DRM_IOCTL_MODE_MAP_DUMB, &arg);
		if (ret) {
			fprintf(stderr, "failed to map dumb buffer[%s].\n",
				strerror(errno));
			return NULL;
		}

		map = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
				dev->fd, arg.offset);

		if (map != MAP_FAILED)
			bo->vaddr = map;
	}

	return bo->vaddr;
}

/*
 * Export gem object to dmabuf as file descriptor.
 *
 * @dev: exynos device object
 * @handle: gem handle to export as file descriptor of dmabuf
 * @fd: file descriptor returned from kernel
 *
 * @return: 0 on success, -1 on error, and errno will be set
 */
drm_public int
exynos_prime_handle_to_fd(struct exynos_device *dev, uint32_t handle, int *fd)
{
	return drmPrimeHandleToFD(dev->fd, handle, 0, fd);
}

/*
 * Import file descriptor into gem handle.
 *
 * @dev: exynos device object
 * @fd: file descriptor of dmabuf to import
 * @handle: gem handle returned from kernel
 *
 * @return: 0 on success, -1 on error, and errno will be set
 */
drm_public int
exynos_prime_fd_to_handle(struct exynos_device *dev, int fd, uint32_t *handle)
{
	return drmPrimeFDToHandle(dev->fd, fd, handle);
}



/*
 * Request Wireless Display connection or disconnection.
 *
 * @dev: a exynos device object.
 * @connect: indicate whether connectoin or disconnection request.
 * @ext: indicate whether edid data includes extensions data or not.
 * @edid: a pointer to edid data from Wireless Display device.
 *
 * this interface is used to request Virtual Display driver connection or
 * disconnection. for this, user should get a edid data from the Wireless
 * Display device and then send that data to kernel driver with connection
 * request
 *
 * if true, return 0 else negative.
 */
drm_public int
exynos_vidi_connection(struct exynos_device *dev, uint32_t connect,
		       uint32_t ext, void *edid)
{
	struct drm_exynos_vidi_connection req = {
		.connection	= connect,
		.extensions	= ext,
		.edid		= (uint64_t)(uintptr_t)edid,
	};
	int ret;

	ret = drmIoctl(dev->fd, DRM_IOCTL_EXYNOS_VIDI_CONNECTION, &req);
	if (ret) {
		fprintf(stderr, "failed to request vidi connection[%s].\n",
				strerror(errno));
		return ret;
	}

	return 0;
}

static void
exynos_handle_vendor(int fd, struct drm_event *e, void *ctx)
{
	struct drm_exynos_g2d_event *g2d;
	struct exynos_event_context *ectx = ctx;

	switch (e->type) {
		case DRM_EXYNOS_G2D_EVENT:
			if (ectx->version < 1 || ectx->g2d_event_handler == NULL)
				break;
			g2d = (struct drm_exynos_g2d_event *)e;
			ectx->g2d_event_handler(fd, g2d->cmdlist_no, g2d->tv_sec,
						g2d->tv_usec, U642VOID(g2d->user_data));
			break;

		default:
			break;
	}
}

drm_public int
exynos_handle_event(struct exynos_device *dev, struct exynos_event_context *ctx)
{
	char buffer[1024];
	int len, i;
	struct drm_event *e;
	struct drm_event_vblank *vblank;
	drmEventContextPtr evctx = &ctx->base;

	/* The DRM read semantics guarantees that we always get only
	 * complete events. */
	len = read(dev->fd, buffer, sizeof buffer);
	if (len == 0)
		return 0;
	if (len < (int)sizeof *e)
		return -1;

	i = 0;
	while (i < len) {
		e = (struct drm_event *)(buffer + i);
		switch (e->type) {
		case DRM_EVENT_VBLANK:
			if (evctx->version < 1 ||
			    evctx->vblank_handler == NULL)
				break;
			vblank = (struct drm_event_vblank *) e;
			evctx->vblank_handler(dev->fd,
					      vblank->sequence,
					      vblank->tv_sec,
					      vblank->tv_usec,
					      U642VOID (vblank->user_data));
			break;
		case DRM_EVENT_FLIP_COMPLETE:
			if (evctx->version < 2 ||
			    evctx->page_flip_handler == NULL)
				break;
			vblank = (struct drm_event_vblank *) e;
			evctx->page_flip_handler(dev->fd,
						 vblank->sequence,
						 vblank->tv_sec,
						 vblank->tv_usec,
						 U642VOID (vblank->user_data));
			break;
		default:
			exynos_handle_vendor(dev->fd, e, evctx);
			break;
		}
		i += e->length;
	}

	return 0;
}