struct fd_bo * fd_bo_from_name(struct fd_device *dev, uint32_t name) { struct drm_gem_open req = { .name = name, }; struct fd_bo *bo; pthread_mutex_lock(&table_lock); /* check name table first, to see if bo is already open: */ bo = lookup_bo(dev->name_table, name); if (bo) goto out_unlock; if (drmIoctl(dev->fd, DRM_IOCTL_GEM_OPEN, &req)) { ERROR_MSG("gem-open failed: %s", strerror(errno)); goto out_unlock; } bo = lookup_bo(dev->handle_table, req.handle); if (bo) goto out_unlock; bo = bo_from_handle(dev, req.size, req.handle); if (bo) set_name(bo, name); out_unlock: pthread_mutex_unlock(&table_lock); return bo; }
struct fd_bo * fd_bo_from_dmabuf(struct fd_device *dev, int fd) { int ret, size; uint32_t handle; struct fd_bo *bo; pthread_mutex_lock(&table_lock); ret = drmPrimeFDToHandle(dev->fd, fd, &handle); if (ret) { return NULL; } bo = lookup_bo(dev->handle_table, handle); if (bo) goto out_unlock; /* lseek() to get bo size */ size = lseek(fd, 0, SEEK_END); lseek(fd, 0, SEEK_CUR); bo = bo_from_handle(dev, size, handle); VG_BO_ALLOC(bo); out_unlock: pthread_mutex_unlock(&table_lock); return bo; }
drm_public struct fd_bo * fd_bo_from_handle(struct fd_device *dev, uint32_t handle, uint32_t size) { struct fd_bo *bo = NULL; pthread_mutex_lock(&table_lock); bo = lookup_bo(dev->handle_table, handle); if (bo) goto out_unlock; bo = bo_from_handle(dev, size, handle); out_unlock: pthread_mutex_unlock(&table_lock); return bo; }
drm_public int drm_tegra_bo_wrap(struct drm_tegra_bo **bop, struct drm_tegra *drm, uint32_t handle, uint32_t flags, uint32_t size) { struct drm_tegra_bo *bo; int err = 0; if (!drm || !bop) return -EINVAL; pthread_mutex_lock(&table_lock); /* check handle table to see if BO is already open */ bo = lookup_bo(drm->handle_table, handle); if (bo) goto unlock; bo = calloc(1, sizeof(*bo)); if (!bo) { err = -ENOMEM; goto unlock; } DRMINITLISTHEAD(&bo->push_list); DRMINITLISTHEAD(&bo->bo_list); atomic_set(&bo->ref, 1); bo->handle = handle; bo->flags = flags; bo->size = size; bo->drm = drm; VG_BO_ALLOC(bo); /* add ourselves into the handle table */ drmHashInsert(drm->handle_table, handle, bo); DBG_BO(bo, "success\n"); unlock: pthread_mutex_unlock(&table_lock); *bop = bo; return err; }
/* get buffer info */ static int get_buffer_info(struct etna_bo *bo) { struct drm_vivante_gem_info req = { .handle = bo->handle, }; int ret = drmCommandWriteRead(bo->dev->fd, DRM_VIVANTE_GEM_INFO, &req, sizeof(req)); if (ret) { return ret; } /* really all we need for now is mmap offset */ bo->offset = req.offset; return 0; } /* import a buffer object from DRI2 name */ struct etna_bo * etna_bo_from_name(struct etna_device *dev, uint32_t name) { struct drm_gem_open req = { .name = name, }; struct etna_bo *bo; pthread_mutex_lock(&table_lock); /* check name table first, to see if bo is already open: */ bo = lookup_bo(dev, req.handle); if (bo) goto out_unlock; if (drmIoctl(dev->fd, DRM_IOCTL_GEM_OPEN, &req)) { ERROR_MSG("gem-open failed: %s", strerror(errno)); goto out_unlock; } bo = lookup_bo(dev, req.handle); if (bo) goto out_unlock; bo = bo_from_handle(dev, req.size, req.handle); if (bo) set_name(bo, name); out_unlock: pthread_mutex_unlock(&table_lock); return bo; } /* import a buffer from dmabuf fd, does not take ownership of the * fd so caller should close() the fd when it is otherwise done * with it (even if it is still using the 'struct etna_bo *') */ struct etna_bo * etna_bo_from_dmabuf(struct etna_device *dev, int fd) { #if 0 struct etna_bo *bo = NULL; struct drm_prime_handle req = { .fd = fd, }; int ret; pthread_mutex_lock(&table_lock); ret = drmIoctl(dev->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &req); if (ret) { goto fail; } bo = lookup_bo(dev, req.handle); if (!bo) { bo = bo_from_handle(dev, req.handle); } pthread_mutex_unlock(&table_lock); return bo; fail: pthread_mutex_unlock(&table_lock); free(bo); #endif return NULL; } /* destroy a buffer object */ void etna_bo_del(struct etna_bo *bo) { if (!bo) return; if (!atomic_dec_and_test(&bo->refcnt)) return; if (bo->map) munmap(bo->map, bo->size); if (bo->fd) close(bo->fd); if (bo->handle) { struct drm_gem_close req = { .handle = bo->handle, }; pthread_mutex_lock(&table_lock); drmHashDelete(bo->dev->handle_table, bo->handle); drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_CLOSE, &req); pthread_mutex_unlock(&table_lock); } etna_device_del(bo->dev); free(bo); } /* get the global flink/DRI2 buffer name */ int etna_bo_get_name(struct etna_bo *bo, uint32_t *name) { if (!bo->name) { struct drm_gem_flink req = { .handle = bo->handle, }; int ret; ret = drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_FLINK, &req); if (ret) { return ret; } bo->name = req.name; } *name = bo->name; return 0; } uint32_t etna_bo_handle(struct etna_bo *bo) { return bo->handle; } /* caller owns the dmabuf fd that is returned and is responsible * to close() it when done */ int etna_bo_dmabuf(struct etna_bo *bo) { if (!bo->fd) { struct drm_prime_handle req = { .handle = bo->handle, .flags = DRM_CLOEXEC, }; int ret; ret = drmIoctl(bo->dev->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &req); if (ret) { return ret; } bo->fd = req.fd; } return dup(bo->fd); } uint32_t etna_bo_size(struct etna_bo *bo) { if (!bo->size) { get_buffer_info(bo); } return bo->size; } void * etna_bo_map(struct etna_bo *bo) { if (!bo->map) { if (!bo->offset) { get_buffer_info(bo); } bo->map = mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED, bo->dev->fd, bo->offset); if (bo->map == MAP_FAILED) { bo->map = NULL; } } return bo->map; } int etna_bo_cpu_prep(struct etna_bo *bo, uint32_t op) { return 0; } void etna_bo_cpu_fini(struct etna_bo *bo) { }
drm_public int drm_tegra_bo_from_dmabuf(struct drm_tegra_bo **bop, struct drm_tegra *drm, int fd, uint32_t flags) { struct drm_tegra_bo *dup; struct drm_tegra_bo *bo; uint32_t handle; uint32_t size; int err; if (!drm || !bop) return -EINVAL; pthread_mutex_lock(&table_lock); bo = calloc(1, sizeof(*bo)); if (!bo) { err = -ENOMEM; goto unlock; } err = drmPrimeFDToHandle(drm->fd, fd, &handle); if (err) { free(bo); bo = NULL; goto unlock; } /* check handle table to see if BO is already open */ dup = lookup_bo(drm->handle_table, handle); if (dup) { DBG_BO(dup, "success reused\n"); free(bo); bo = dup; goto unlock; } errno = 0; /* lseek() to get bo size */ size = lseek(fd, 0, SEEK_END); lseek(fd, 0, SEEK_CUR); /* store lseek() error number */ err = -errno; atomic_set(&bo->ref, 1); bo->handle = handle; bo->flags = flags; bo->size = size; bo->drm = drm; VG_BO_ALLOC(bo); /* add ourself into the handle table: */ drmHashInsert(drm->handle_table, handle, bo); /* handle lseek() error */ if (err) { VDBG_BO(bo, "lseek failed %d (%s)\n", err, strerror(-err)); drm_tegra_bo_free(bo); bo = NULL; } else { DBG_BO(bo, "success\n"); } unlock: pthread_mutex_unlock(&table_lock); *bop = bo; return err; }
drm_public int drm_tegra_bo_from_name(struct drm_tegra_bo **bop, struct drm_tegra *drm, uint32_t name, uint32_t flags) { struct drm_gem_open args; struct drm_tegra_bo *dup; struct drm_tegra_bo *bo; int err = 0; if (!drm || !name || !bop) return -EINVAL; pthread_mutex_lock(&table_lock); /* check name table first, to see if BO is already open */ bo = lookup_bo(drm->name_table, name); if (bo) goto unlock; bo = calloc(1, sizeof(*bo)); if (!bo) { err = -ENOMEM; goto unlock; } memset(&args, 0, sizeof(args)); args.name = name; err = drmIoctl(drm->fd, DRM_IOCTL_GEM_OPEN, &args); if (err < 0) { VDBG_DRM(drm, "failed name 0x%08X err %d strerror(%s)\n", name, err, strerror(-err)); err = -errno; free(bo); bo = NULL; goto unlock; } /* check handle table second, to see if BO is already open */ dup = lookup_bo(drm->handle_table, args.handle); if (dup) { VDBG_BO(dup, "success reused name 0x%08X\n", name); free(bo); bo = dup; goto unlock; } drmHashInsert(drm->name_table, name, bo); atomic_set(&bo->ref, 1); bo->name = name; bo->handle = args.handle; bo->flags = flags; bo->size = args.size; bo->drm = drm; DBG_BO(bo, "success\n"); VG_BO_ALLOC(bo); unlock: pthread_mutex_unlock(&table_lock); *bop = bo; return err; }
/* get buffer info */ static int get_buffer_info(struct omap_bo *bo) { struct drm_omap_gem_info req = { .handle = bo->handle, }; int ret = drmCommandWriteRead(bo->dev->fd, DRM_OMAP_GEM_INFO, &req, sizeof(req)); if (ret) { return ret; } /* really all we need for now is mmap offset */ bo->offset = req.offset; bo->size = req.size; return 0; } /* import a buffer object from DRI2 name */ drm_public struct omap_bo * omap_bo_from_name(struct omap_device *dev, uint32_t name) { struct omap_bo *bo = NULL; struct drm_gem_open req = { .name = name, }; pthread_mutex_lock(&table_lock); if (drmIoctl(dev->fd, DRM_IOCTL_GEM_OPEN, &req)) { goto fail; } bo = lookup_bo(dev, req.handle); if (!bo) { bo = bo_from_handle(dev, req.handle); bo->name = name; } pthread_mutex_unlock(&table_lock); return bo; fail: pthread_mutex_unlock(&table_lock); free(bo); return NULL; } /* import a buffer from dmabuf fd, does not take ownership of the * fd so caller should close() the fd when it is otherwise done * with it (even if it is still using the 'struct omap_bo *') */ drm_public struct omap_bo * omap_bo_from_dmabuf(struct omap_device *dev, int fd) { struct omap_bo *bo = NULL; struct drm_prime_handle req = { .fd = fd, }; int ret; pthread_mutex_lock(&table_lock); ret = drmIoctl(dev->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &req); if (ret) { goto fail; } bo = lookup_bo(dev, req.handle); if (!bo) { bo = bo_from_handle(dev, req.handle); } pthread_mutex_unlock(&table_lock); return bo; fail: pthread_mutex_unlock(&table_lock); free(bo); return NULL; } /* destroy a buffer object */ drm_public void omap_bo_del(struct omap_bo *bo) { if (!bo) { return; } if (!atomic_dec_and_test(&bo->refcnt)) return; if (bo->map) { munmap(bo->map, bo->size); } if (bo->fd >= 0) { close(bo->fd); } if (bo->handle) { struct drm_gem_close req = { .handle = bo->handle, }; pthread_mutex_lock(&table_lock); drmHashDelete(bo->dev->handle_table, bo->handle); drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_CLOSE, &req); pthread_mutex_unlock(&table_lock); } omap_device_del(bo->dev); free(bo); } /* get the global flink/DRI2 buffer name */ drm_public int omap_bo_get_name(struct omap_bo *bo, uint32_t *name) { if (!bo->name) { struct drm_gem_flink req = { .handle = bo->handle, }; int ret; ret = drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_FLINK, &req); if (ret) { return ret; } bo->name = req.name; } *name = bo->name; return 0; } drm_public uint32_t omap_bo_handle(struct omap_bo *bo) { return bo->handle; } /* caller owns the dmabuf fd that is returned and is responsible * to close() it when done */ drm_public int omap_bo_dmabuf(struct omap_bo *bo) { if (bo->fd < 0) { struct drm_prime_handle req = { .handle = bo->handle, .flags = DRM_CLOEXEC, }; int ret; ret = drmIoctl(bo->dev->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &req); if (ret) { return ret; } bo->fd = req.fd; } return dup(bo->fd); } drm_public uint32_t omap_bo_size(struct omap_bo *bo) { if (!bo->size) { get_buffer_info(bo); } return bo->size; } drm_public void *omap_bo_map(struct omap_bo *bo) { if (!bo->map) { if (!bo->offset) { get_buffer_info(bo); } bo->map = mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED, bo->dev->fd, bo->offset); if (bo->map == MAP_FAILED) { bo->map = NULL; } } return bo->map; } drm_public int omap_bo_cpu_prep(struct omap_bo *bo, enum omap_gem_op op) { struct drm_omap_gem_cpu_prep req = { .handle = bo->handle, .op = op, }; return drmCommandWrite(bo->dev->fd, DRM_OMAP_GEM_CPU_PREP, &req, sizeof(req)); } drm_public int omap_bo_cpu_fini(struct omap_bo *bo, enum omap_gem_op op) { struct drm_omap_gem_cpu_fini req = { .handle = bo->handle, .op = op, .nregions = 0, }; return drmCommandWrite(bo->dev->fd, DRM_OMAP_GEM_CPU_FINI, &req, sizeof(req)); }