示例#1
0
struct fd_bo * fd_bo_from_name(struct fd_device *dev, uint32_t name)
{
	struct drm_gem_open req = {
			.name = name,
	};
	struct fd_bo *bo;

	pthread_mutex_lock(&table_lock);

	/* check name table first, to see if bo is already open: */
	bo = lookup_bo(dev->name_table, name);
	if (bo)
		goto out_unlock;

	if (drmIoctl(dev->fd, DRM_IOCTL_GEM_OPEN, &req)) {
		ERROR_MSG("gem-open failed: %s", strerror(errno));
		goto out_unlock;
	}

	bo = lookup_bo(dev->handle_table, req.handle);
	if (bo)
		goto out_unlock;

	bo = bo_from_handle(dev, req.size, req.handle);
	if (bo)
		set_name(bo, name);

out_unlock:
	pthread_mutex_unlock(&table_lock);

	return bo;
}
示例#2
0
/* allocate a new buffer object */
static struct omap_bo * omap_bo_new_impl(struct omap_device *dev,
		union omap_gem_size size, uint32_t flags)
{
	struct omap_bo *bo = NULL;
	struct drm_omap_gem_new req = {
			.size = size,
			.flags = flags,
	};

	if (size.bytes == 0) {
		goto fail;
	}

	if (drmCommandWriteRead(dev->fd, DRM_OMAP_GEM_NEW, &req, sizeof(req))) {
		goto fail;
	}

	pthread_mutex_lock(&table_lock);
	bo = bo_from_handle(dev, req.handle);
	pthread_mutex_unlock(&table_lock);

	if (flags & OMAP_BO_TILED) {
		bo->size = round_up(size.tiled.width, PAGE_SIZE) * size.tiled.height;
	} else {
		bo->size = size.bytes;
	}

	return bo;

fail:
	free(bo);
	return NULL;
}
示例#3
0
struct fd_bo * fd_bo_new(struct fd_device *dev,
		uint32_t size, uint32_t flags)
{
	struct fd_bo *bo = NULL;
	struct fd_bo_bucket *bucket;
	uint32_t handle;
	int ret;

	size = ALIGN(size, 4096);
	bucket = get_bucket(dev, size);

	/* see if we can be green and recycle: */
	if (bucket) {
		size = bucket->size;
		bo = find_in_bucket(dev, bucket, flags);
		if (bo) {
			atomic_set(&bo->refcnt, 1);
			fd_device_ref(bo->dev);
			return bo;
		}
	}

	ret = dev->funcs->bo_new_handle(dev, size, flags, &handle);
	if (ret)
		return NULL;

	pthread_mutex_lock(&table_lock);
	bo = bo_from_handle(dev, size, handle);
	bo->bo_reuse = 1;
	pthread_mutex_unlock(&table_lock);

	return bo;
}
示例#4
0
struct fd_bo *
fd_bo_from_dmabuf(struct fd_device *dev, int fd)
{
	int ret, size;
	uint32_t handle;
	struct fd_bo *bo;

	pthread_mutex_lock(&table_lock);
	ret = drmPrimeFDToHandle(dev->fd, fd, &handle);
	if (ret) {
		return NULL;
	}

	bo = lookup_bo(dev->handle_table, handle);
	if (bo)
		goto out_unlock;

	/* lseek() to get bo size */
	size = lseek(fd, 0, SEEK_END);
	lseek(fd, 0, SEEK_CUR);

	bo = bo_from_handle(dev, size, handle);

	VG_BO_ALLOC(bo);

out_unlock:
	pthread_mutex_unlock(&table_lock);

	return bo;
}
示例#5
0
struct fd_bo *fd_bo_from_handle(struct fd_device *dev,
		uint32_t handle, uint32_t size)
{
	struct fd_bo *bo = NULL;

	pthread_mutex_lock(&table_lock);
	bo = bo_from_handle(dev, size, handle);
	pthread_mutex_unlock(&table_lock);

	return bo;
}
示例#6
0
drm_public struct fd_bo *
fd_bo_from_handle(struct fd_device *dev, uint32_t handle, uint32_t size)
{
	struct fd_bo *bo = NULL;

	pthread_mutex_lock(&table_lock);

	bo = lookup_bo(dev->handle_table, handle);
	if (bo)
		goto out_unlock;

	bo = bo_from_handle(dev, size, handle);

out_unlock:
	pthread_mutex_unlock(&table_lock);

	return bo;
}
示例#7
0
/* allocate a new (un-tiled) buffer object */
struct etna_bo *etna_bo_new(struct etna_device *dev,
		uint32_t size, uint32_t flags)
{
	struct etna_bo *bo = NULL;

	struct drm_vivante_gem_new req = {
			.size = size,
			.flags = MSM_BO_WC,  // TODO figure out proper flags..
	};

	int ret;
	ret = drmCommandWriteRead(dev->fd, DRM_VIVANTE_GEM_NEW,
			&req, sizeof(req));
	if (ret)
		return NULL;

	pthread_mutex_lock(&table_lock);
	bo = bo_from_handle(dev, size, req.handle);
	pthread_mutex_unlock(&table_lock);

	return bo;
}
示例#8
0
struct fd_bo *
fd_bo_new(struct fd_device *dev, uint32_t size, uint32_t flags)
{
	struct fd_bo *bo = NULL;
	uint32_t handle;
	int ret;

	bo = fd_bo_cache_alloc(&dev->bo_cache, &size, flags);
	if (bo)
		return bo;

	ret = dev->funcs->bo_new_handle(dev, size, flags, &handle);
	if (ret)
		return NULL;

	pthread_mutex_lock(&table_lock);
	bo = bo_from_handle(dev, size, handle);
	bo->bo_reuse = TRUE;
	pthread_mutex_unlock(&table_lock);

	VG_BO_ALLOC(bo);

	return bo;
}
示例#9
0
/* get buffer info */
static int get_buffer_info(struct etna_bo *bo)
{
	struct drm_vivante_gem_info req = {
			.handle = bo->handle,
	};
	int ret = drmCommandWriteRead(bo->dev->fd, DRM_VIVANTE_GEM_INFO,
			&req, sizeof(req));
	if (ret) {
		return ret;
	}

	/* really all we need for now is mmap offset */
	bo->offset = req.offset;

	return 0;
}

/* import a buffer object from DRI2 name */
struct etna_bo * etna_bo_from_name(struct etna_device *dev, uint32_t name)
{
	struct drm_gem_open req = {
			.name = name,
	};
	struct etna_bo *bo;

	pthread_mutex_lock(&table_lock);

	/* check name table first, to see if bo is already open: */
	bo = lookup_bo(dev, req.handle);
	if (bo)
		goto out_unlock;

	if (drmIoctl(dev->fd, DRM_IOCTL_GEM_OPEN, &req)) {
		ERROR_MSG("gem-open failed: %s", strerror(errno));
		goto out_unlock;
	}

	bo = lookup_bo(dev, req.handle);
	if (bo)
		goto out_unlock;

	bo = bo_from_handle(dev, req.size, req.handle);
	if (bo)
		set_name(bo, name);

out_unlock:
	pthread_mutex_unlock(&table_lock);

	return bo;
}

/* import a buffer from dmabuf fd, does not take ownership of the
 * fd so caller should close() the fd when it is otherwise done
 * with it (even if it is still using the 'struct etna_bo *')
 */
struct etna_bo * etna_bo_from_dmabuf(struct etna_device *dev, int fd)
{
#if 0
	struct etna_bo *bo = NULL;
	struct drm_prime_handle req = {
			.fd = fd,
	};
	int ret;

	pthread_mutex_lock(&table_lock);

	ret = drmIoctl(dev->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &req);
	if (ret) {
		goto fail;
	}

	bo = lookup_bo(dev, req.handle);
	if (!bo) {
		bo = bo_from_handle(dev, req.handle);
	}

	pthread_mutex_unlock(&table_lock);

	return bo;

fail:
	pthread_mutex_unlock(&table_lock);
	free(bo);
#endif
	return NULL;
}

/* destroy a buffer object */
void etna_bo_del(struct etna_bo *bo)
{
	if (!bo)
		return;

	if (!atomic_dec_and_test(&bo->refcnt))
		return;

	if (bo->map)
		munmap(bo->map, bo->size);

	if (bo->fd)
		close(bo->fd);

	if (bo->handle) {
		struct drm_gem_close req = {
				.handle = bo->handle,
		};
		pthread_mutex_lock(&table_lock);
		drmHashDelete(bo->dev->handle_table, bo->handle);
		drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
		pthread_mutex_unlock(&table_lock);
	}

	etna_device_del(bo->dev);

	free(bo);
}

/* get the global flink/DRI2 buffer name */
int etna_bo_get_name(struct etna_bo *bo, uint32_t *name)
{
	if (!bo->name) {
		struct drm_gem_flink req = {
				.handle = bo->handle,
		};
		int ret;

		ret = drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_FLINK, &req);
		if (ret) {
			return ret;
		}

		bo->name = req.name;
	}

	*name = bo->name;

	return 0;
}

uint32_t etna_bo_handle(struct etna_bo *bo)
{
	return bo->handle;
}

/* caller owns the dmabuf fd that is returned and is responsible
 * to close() it when done
 */
int etna_bo_dmabuf(struct etna_bo *bo)
{
	if (!bo->fd) {
		struct drm_prime_handle req = {
				.handle = bo->handle,
				.flags = DRM_CLOEXEC,
		};
		int ret;

		ret = drmIoctl(bo->dev->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &req);
		if (ret) {
			return ret;
		}

		bo->fd = req.fd;
	}
	return dup(bo->fd);
}

uint32_t etna_bo_size(struct etna_bo *bo)
{
	if (!bo->size) {
		get_buffer_info(bo);
	}
	return bo->size;
}

void * etna_bo_map(struct etna_bo *bo)
{
	if (!bo->map) {
		if (!bo->offset) {
			get_buffer_info(bo);
		}

		bo->map = mmap(0, bo->size, PROT_READ | PROT_WRITE,
				MAP_SHARED, bo->dev->fd, bo->offset);
		if (bo->map == MAP_FAILED) {
			bo->map = NULL;
		}
	}
	return bo->map;
}

int etna_bo_cpu_prep(struct etna_bo *bo, uint32_t op)
{
	return 0;
}

void etna_bo_cpu_fini(struct etna_bo *bo)
{
}
示例#10
0
/* get buffer info */
static int get_buffer_info(struct omap_bo *bo)
{
	struct drm_omap_gem_info req = {
			.handle = bo->handle,
	};
	int ret = drmCommandWriteRead(bo->dev->fd, DRM_OMAP_GEM_INFO,
			&req, sizeof(req));
	if (ret) {
		return ret;
	}

	/* really all we need for now is mmap offset */
	bo->offset = req.offset;
	bo->size = req.size;

	return 0;
}

/* import a buffer object from DRI2 name */
drm_public struct omap_bo *
omap_bo_from_name(struct omap_device *dev, uint32_t name)
{
	struct omap_bo *bo = NULL;
	struct drm_gem_open req = {
			.name = name,
	};

	pthread_mutex_lock(&table_lock);

	if (drmIoctl(dev->fd, DRM_IOCTL_GEM_OPEN, &req)) {
		goto fail;
	}

	bo = lookup_bo(dev, req.handle);
	if (!bo) {
		bo = bo_from_handle(dev, req.handle);
		bo->name = name;
	}

	pthread_mutex_unlock(&table_lock);

	return bo;

fail:
	pthread_mutex_unlock(&table_lock);
	free(bo);
	return NULL;
}

/* import a buffer from dmabuf fd, does not take ownership of the
 * fd so caller should close() the fd when it is otherwise done
 * with it (even if it is still using the 'struct omap_bo *')
 */
drm_public struct omap_bo *
omap_bo_from_dmabuf(struct omap_device *dev, int fd)
{
	struct omap_bo *bo = NULL;
	struct drm_prime_handle req = {
			.fd = fd,
	};
	int ret;

	pthread_mutex_lock(&table_lock);

	ret = drmIoctl(dev->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &req);
	if (ret) {
		goto fail;
	}

	bo = lookup_bo(dev, req.handle);
	if (!bo) {
		bo = bo_from_handle(dev, req.handle);
	}

	pthread_mutex_unlock(&table_lock);

	return bo;

fail:
	pthread_mutex_unlock(&table_lock);
	free(bo);
	return NULL;
}

/* destroy a buffer object */
drm_public void omap_bo_del(struct omap_bo *bo)
{
	if (!bo) {
		return;
	}

	if (!atomic_dec_and_test(&bo->refcnt))
		return;

	if (bo->map) {
		munmap(bo->map, bo->size);
	}

	if (bo->fd >= 0) {
		close(bo->fd);
	}

	if (bo->handle) {
		struct drm_gem_close req = {
				.handle = bo->handle,
		};
		pthread_mutex_lock(&table_lock);
		drmHashDelete(bo->dev->handle_table, bo->handle);
		drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
		pthread_mutex_unlock(&table_lock);
	}

	omap_device_del(bo->dev);

	free(bo);
}

/* get the global flink/DRI2 buffer name */
drm_public int omap_bo_get_name(struct omap_bo *bo, uint32_t *name)
{
	if (!bo->name) {
		struct drm_gem_flink req = {
				.handle = bo->handle,
		};
		int ret;

		ret = drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_FLINK, &req);
		if (ret) {
			return ret;
		}

		bo->name = req.name;
	}

	*name = bo->name;

	return 0;
}

drm_public uint32_t omap_bo_handle(struct omap_bo *bo)
{
	return bo->handle;
}

/* caller owns the dmabuf fd that is returned and is responsible
 * to close() it when done
 */
drm_public int omap_bo_dmabuf(struct omap_bo *bo)
{
	if (bo->fd < 0) {
		struct drm_prime_handle req = {
				.handle = bo->handle,
				.flags = DRM_CLOEXEC,
		};
		int ret;

		ret = drmIoctl(bo->dev->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &req);
		if (ret) {
			return ret;
		}

		bo->fd = req.fd;
	}
	return dup(bo->fd);
}

drm_public uint32_t omap_bo_size(struct omap_bo *bo)
{
	if (!bo->size) {
		get_buffer_info(bo);
	}
	return bo->size;
}

drm_public void *omap_bo_map(struct omap_bo *bo)
{
	if (!bo->map) {
		if (!bo->offset) {
			get_buffer_info(bo);
		}

		bo->map = mmap(0, bo->size, PROT_READ | PROT_WRITE,
				MAP_SHARED, bo->dev->fd, bo->offset);
		if (bo->map == MAP_FAILED) {
			bo->map = NULL;
		}
	}
	return bo->map;
}

drm_public int omap_bo_cpu_prep(struct omap_bo *bo, enum omap_gem_op op)
{
	struct drm_omap_gem_cpu_prep req = {
			.handle = bo->handle,
			.op = op,
	};
	return drmCommandWrite(bo->dev->fd,
			DRM_OMAP_GEM_CPU_PREP, &req, sizeof(req));
}

drm_public int omap_bo_cpu_fini(struct omap_bo *bo, enum omap_gem_op op)
{
	struct drm_omap_gem_cpu_fini req = {
			.handle = bo->handle,
			.op = op,
			.nregions = 0,
	};
	return drmCommandWrite(bo->dev->fd,
			DRM_OMAP_GEM_CPU_FINI, &req, sizeof(req));
}