int radeon_gem_create_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { struct radeon_device *rdev = dev->dev_private; struct drm_radeon_gem_create *args = data; struct drm_gem_object *gobj; uint32_t handle; int r; sx_slock(&rdev->exclusive_lock); /* create a gem object to contain this object in */ args->size = roundup(args->size, PAGE_SIZE); r = radeon_gem_object_create(rdev, args->size, args->alignment, args->initial_domain, false, false, &gobj); if (r) { sx_sunlock(&rdev->exclusive_lock); r = radeon_gem_handle_lockup(rdev, r); return r; } handle = 0; r = drm_gem_handle_create(filp, gobj, &handle); /* drop reference from allocate - handle holds it now */ drm_gem_object_unreference_unlocked(gobj); if (r) { sx_sunlock(&rdev->exclusive_lock); r = radeon_gem_handle_lockup(rdev, r); return r; } args->handle = handle; sx_sunlock(&rdev->exclusive_lock); return 0; }
int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev, struct drm_mode_create_dumb *args) { struct drm_gem_object *gobj; u32 handle; int ret; args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 16); args->size = args->pitch * args->height; ret = hibmc_gem_create(dev, args->size, false, &gobj); if (ret) { DRM_ERROR("failed to create GEM object: %d\n", ret); return ret; } ret = drm_gem_handle_create(file, gobj, &handle); drm_gem_object_unreference_unlocked(gobj); if (ret) { DRM_ERROR("failed to unreference GEM object: %d\n", ret); return ret; } args->handle = handle; return 0; }
int nouveau_gem_ioctl_new(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct nouveau_cli *cli = nouveau_cli(file_priv); struct drm_nouveau_gem_new *req = data; struct nouveau_bo *nvbo = NULL; int ret = 0; ret = nouveau_gem_new(cli, req->info.size, req->align, req->info.domain, req->info.tile_mode, req->info.tile_flags, &nvbo); if (ret) return ret; ret = drm_gem_handle_create(file_priv, &nvbo->gem, &req->info.handle); if (ret == 0) { ret = nouveau_gem_info(file_priv, &nvbo->gem, &req->info); if (ret) drm_gem_handle_delete(file_priv, req->info.handle); } /* drop reference from allocate - handle holds it now */ drm_gem_object_unreference_unlocked(&nvbo->gem); return ret; }
int mtk_drm_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args) { struct mtk_drm_gem_obj *mtk_gem; unsigned int min_pitch = args->width * ((args->bpp + 7) / 8); int ret; args->pitch = min_pitch; args->size = args->pitch * args->height; mtk_gem = mtk_drm_gem_create(dev, args->size, false); if (IS_ERR(mtk_gem)) return PTR_ERR(mtk_gem); /* * allocate a id of idr table where the obj is registered * and handle has the id what user can see. */ ret = drm_gem_handle_create(file_priv, &mtk_gem->base, &args->handle); if (ret) goto err_handle_create; /* drop reference from allocate - handle holds it now. */ drm_gem_object_unreference_unlocked(&mtk_gem->base); return 0; err_handle_create: mtk_drm_gem_free_object(&mtk_gem->base); return ret; }
int mediatek_gem_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct mtk_drm_gem_obj *mtk_gem; struct drm_mtk_gem_create *args = data; int ret; mtk_gem = mtk_drm_gem_create(dev, args->size, false); if (IS_ERR(mtk_gem)) return PTR_ERR(mtk_gem); /* * allocate a id of idr table where the obj is registered * and handle has the id what user can see. */ ret = drm_gem_handle_create(file_priv, &mtk_gem->base, &args->handle); if (ret) goto err_handle_create; /* drop reference from allocate - handle holds it now. */ drm_gem_object_unreference_unlocked(&mtk_gem->base); return 0; err_handle_create: mtk_drm_gem_free_object(&mtk_gem->base); return ret; }
int virtio_gpu_gem_create(struct drm_file *file, struct drm_device *dev, uint64_t size, struct drm_gem_object **obj_p, uint32_t *handle_p) { struct virtio_gpu_object *obj; int ret; u32 handle; obj = virtio_gpu_alloc_object(dev, size, false, false); if (IS_ERR(obj)) return PTR_ERR(obj); ret = drm_gem_handle_create(file, &obj->gem_base, &handle); if (ret) { drm_gem_object_release(&obj->gem_base); return ret; } *obj_p = &obj->gem_base; /* drop reference from allocate - handle holds it now */ drm_gem_object_unreference_unlocked(&obj->gem_base); *handle_p = handle; return 0; }
struct drm_gem_object *vkms_gem_create(struct drm_device *dev, struct drm_file *file, u32 *handle, u64 size) { struct vkms_gem_object *obj; int ret; if (!file || !dev || !handle) return ERR_PTR(-EINVAL); obj = __vkms_gem_create(dev, size); if (IS_ERR(obj)) return ERR_CAST(obj); ret = drm_gem_handle_create(file, &obj->gem, handle); drm_gem_object_put_unlocked(&obj->gem); if (ret) { drm_gem_object_release(&obj->gem); kfree(obj); return ERR_PTR(ret); } return &obj->gem; }
struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file, struct drm_device *drm, unsigned int size, unsigned long flags, unsigned int *handle) { struct tegra_bo *bo; int ret; bo = tegra_bo_create(drm, size, flags); if (IS_ERR(bo)) return bo; ret = drm_gem_handle_create(file, &bo->gem, handle); if (ret) goto err; drm_gem_object_unreference_unlocked(&bo->gem); return bo; err: tegra_bo_free_object(&bo->gem); return ERR_PTR(ret); }
int qxl_gem_object_create_with_handle(struct qxl_device *qdev, struct drm_file *file_priv, u32 domain, size_t size, struct qxl_surface *surf, struct qxl_bo **qobj, uint32_t *handle) { struct drm_gem_object *gobj; int r; BUG_ON(!qobj); BUG_ON(!handle); r = qxl_gem_object_create(qdev, size, 0, domain, false, false, surf, &gobj); if (r) return -ENOMEM; r = drm_gem_handle_create(file_priv, gobj, handle); if (r) return r; /* drop reference from allocate - handle holds it now */ *qobj = gem_to_qxl_bo(gobj); drm_gem_object_unreference_unlocked(gobj); return 0; }
/** * Open an object using the global name, returning a handle and the size. * * This handle (of course) holds a reference to the object, so the object * will not go away until the handle is deleted. */ int drm_gem_open_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_gem_open *args = data; struct drm_gem_object *obj; int ret; u32 handle; if (!(dev->driver->driver_features & DRIVER_GEM)) return -ENODEV; spin_lock(&dev->object_name_lock); obj = idr_find(&dev->object_name_idr, (int) args->name); if (obj) drm_gem_object_reference(obj); spin_unlock(&dev->object_name_lock); if (!obj) return -ENOENT; ret = drm_gem_handle_create(file_priv, obj, &handle); drm_gem_object_unreference_unlocked(obj); if (ret) return ret; args->handle = handle; args->size = obj->size; return 0; }
int radeon_mode_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args) { struct radeon_device *rdev = dev->dev_private; struct drm_gem_object *gobj; uint32_t handle; int r; args->pitch = radeon_align_pitch(rdev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8); args->size = args->pitch * args->height; args->size = roundup2(args->size, PAGE_SIZE); r = radeon_gem_object_create(rdev, args->size, 0, RADEON_GEM_DOMAIN_VRAM, false, ttm_bo_type_device, &gobj); if (r) return -ENOMEM; handle = 0; r = drm_gem_handle_create(file_priv, gobj, &handle); /* drop reference from allocate - handle holds it now */ drm_gem_object_unreference_unlocked(gobj); if (r) { return r; } args->handle = handle; return 0; }
/** * drm_gem_cma_create_with_handle - allocate an object with the given size and * return a GEM handle to it * @file_priv: DRM file-private structure to register the handle for * @drm: DRM device * @size: size of the object to allocate * @handle: return location for the GEM handle * * This function creates a CMA GEM object, allocating a physically contiguous * chunk of memory as backing store. The GEM object is then added to the list * of object associated with the given file and a handle to it is returned. * * Returns: * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative * error code on failure. */ static struct drm_gem_cma_object * drm_gem_cma_create_with_handle(struct drm_file *file_priv, struct drm_device *drm, size_t size, uint32_t *handle) { struct drm_gem_cma_object *cma_obj; struct drm_gem_object *gem_obj; int ret; cma_obj = drm_gem_cma_create(drm, size); if (IS_ERR(cma_obj)) return cma_obj; gem_obj = &cma_obj->base; /* * allocate a id of idr table where the obj is registered * and handle has the id what user can see. */ ret = drm_gem_handle_create(file_priv, gem_obj, handle); if (ret) goto err_handle_create; /* drop reference from allocate - handle holds it now. */ drm_gem_object_unreference_unlocked(gem_obj); return cma_obj; err_handle_create: drm->driver->gem_free_object(gem_obj); return ERR_PTR(ret); }
static int tegra_fb_create_handle(struct drm_framebuffer *framebuffer, struct drm_file *file, unsigned int *handle) { struct tegra_fb *fb = to_tegra_fb(framebuffer); return drm_gem_handle_create(file, &fb->planes[0]->gem, handle); }
int drm_gem_open_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_gem_open *args; struct drm_gem_object *obj; int ret; uint32_t handle; if (!drm_core_check_feature(dev, DRIVER_GEM)) return (ENODEV); args = data; obj = drm_gem_name_ref(&dev->object_names, args->name, (void (*)(void *))drm_gem_object_reference); if (obj == NULL) return (ENOENT); handle = 0; ret = drm_gem_handle_create(file_priv, obj, &handle); drm_gem_object_unreference_unlocked(obj); if (ret != 0) return (ret); args->handle = handle; args->size = obj->size; return (0); }
static int omap_framebuffer_create_handle(struct drm_framebuffer *fb, struct drm_file *file_priv, unsigned int *handle) { struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb); return drm_gem_handle_create(file_priv, omap_fb->bo, handle); }
/* * rockchip_gem_create_with_handle - allocate an object with the given * size and create a gem handle on it * * returns a struct rockchip_gem_object* on success or ERR_PTR values * on failure. */ static struct rockchip_gem_object * rockchip_gem_create_with_handle(struct drm_file *file_priv, struct drm_device *drm, unsigned int size, unsigned int *handle) { struct rockchip_gem_object *rk_obj; struct drm_gem_object *obj; int ret; rk_obj = rockchip_gem_create_object(drm, size, false); if (IS_ERR(rk_obj)) return ERR_CAST(rk_obj); obj = &rk_obj->base; /* * allocate a id of idr table where the obj is registered * and handle has the id what user can see. */ ret = drm_gem_handle_create(file_priv, obj, handle); if (ret) goto err_handle_create; /* drop reference from allocate - handle holds it now. */ drm_gem_object_unreference_unlocked(obj); return rk_obj; err_handle_create: rockchip_gem_free_object(obj); return ERR_PTR(ret); }
int nouveau_gem_ioctl_new(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_gem_new *req = data; struct nouveau_bo *nvbo = NULL; int ret = 0; if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL)) dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping; if (!dev_priv->engine.vram.flags_valid(dev, req->info.tile_flags)) { NV_ERROR(dev, "bad page flags: 0x%08x\n", req->info.tile_flags); return -EINVAL; } ret = nouveau_gem_new(dev, req->info.size, req->align, req->info.domain, req->info.tile_mode, req->info.tile_flags, &nvbo); if (ret) return ret; ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle); if (ret == 0) { ret = nouveau_gem_info(file_priv, nvbo->gem, &req->info); if (ret) drm_gem_handle_delete(file_priv, req->info.handle); } drm_gem_object_unreference_unlocked(nvbo->gem); return ret; }
int radeon_gem_create_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { struct radeon_device *rdev = dev->dev_private; struct drm_radeon_gem_create *args = data; struct drm_gem_object *gobj; uint32_t handle; int r; /* create a gem object to contain this object in */ args->size = roundup(args->size, PAGE_SIZE); r = radeon_gem_object_create(rdev, args->size, args->alignment, args->initial_domain, false, false, &gobj); if (r) { return r; } r = drm_gem_handle_create(filp, gobj, &handle); if (r) { drm_gem_object_unreference_unlocked(gobj); return r; } drm_gem_object_handle_unreference_unlocked(gobj); args->handle = handle; return 0; }
int nouveau_gem_ioctl_new(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_fb *pfb = nouveau_fb(drm->device); struct drm_nouveau_gem_new *req = data; struct nouveau_bo *nvbo = NULL; int ret = 0; drm->ttm.bdev.dev_mapping = drm->dev->dev_mapping; if (!pfb->memtype_valid(pfb, req->info.tile_flags)) { NV_ERROR(drm, "bad page flags: 0x%08x\n", req->info.tile_flags); return -EINVAL; } ret = nouveau_gem_new(dev, req->info.size, req->align, req->info.domain, req->info.tile_mode, req->info.tile_flags, &nvbo); if (ret) return ret; ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle); if (ret == 0) { ret = nouveau_gem_info(file_priv, nvbo->gem, &req->info); if (ret) drm_gem_handle_delete(file_priv, req->info.handle); } /* drop reference from allocate - handle holds it now */ drm_gem_object_unreference_unlocked(nvbo->gem); return ret; }
static int udl_gem_create(struct drm_file *file, struct drm_device *dev, uint64_t size, uint32_t *handle_p) { struct udl_gem_object *obj; int ret; u32 handle; size = roundup(size, PAGE_SIZE); obj = udl_gem_alloc_object(dev, size); if (obj == NULL) return -ENOMEM; ret = drm_gem_handle_create(file, &obj->base, &handle); if (ret) { drm_gem_object_release(&obj->base); kfree(obj); return ret; } drm_gem_object_unreference_unlocked(&obj->base); *handle_p = handle; return 0; }
static int mtk_drm_fb_create_handle(struct drm_framebuffer *fb, struct drm_file *file_priv, unsigned int *handle) { struct mtk_drm_fb *mtk_fb = to_mtk_fb(fb); return drm_gem_handle_create(file_priv, mtk_fb->gem_obj, handle); }
static int xylon_drm_fb_create_handle(struct drm_framebuffer *fb, struct drm_file *file_priv, unsigned int *handle) { struct xylon_drm_fb *xfb = fb_to_xylon_drm_fb(fb); return drm_gem_handle_create(file_priv, xfb->obj, handle); }
static int evdi_user_framebuffer_create_handle(struct drm_framebuffer *fb, struct drm_file *file_priv, unsigned int *handle) { struct evdi_framebuffer *efb = to_evdi_fb(fb); return drm_gem_handle_create(file_priv, &efb->obj->base, handle); }
static int msm_framebuffer_create_handle(struct drm_framebuffer *fb, struct drm_file *file_priv, unsigned int *handle) { struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb); return drm_gem_handle_create(file_priv, msm_fb->planes[0], handle); }
static int rockchip_drm_fb_create_handle(struct drm_framebuffer *fb, struct drm_file *file_priv, unsigned int *handle) { struct rockchip_drm_fb *rockchip_fb = to_rockchip_fb(fb); return drm_gem_handle_create(file_priv, rockchip_fb->obj[0], handle); }
static int exynos_drm_fb_create_handle(struct drm_framebuffer *fb, struct drm_file *file_priv, unsigned int *handle) { struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); return drm_gem_handle_create(file_priv, &exynos_fb->exynos_gem[0]->base, handle); }
int vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_vc4_create_shader_bo *args = data; struct vc4_bo *bo = NULL; int ret; if (args->size == 0) return -EINVAL; if (args->size % sizeof(u64) != 0) return -EINVAL; if (args->flags != 0) { DRM_INFO("Unknown flags set: 0x%08x\n", args->flags); return -EINVAL; } if (args->pad != 0) { DRM_INFO("Pad set: 0x%08x\n", args->pad); return -EINVAL; } bo = vc4_bo_create(dev, args->size, true); if (IS_ERR(bo)) return PTR_ERR(bo); if (copy_from_user(bo->base.vaddr, (void __user *)(uintptr_t)args->data, args->size)) { ret = -EFAULT; goto fail; } /* Clear the rest of the memory from allocating from the BO * cache. */ memset(bo->base.vaddr + args->size, 0, bo->base.base.size - args->size); bo->validated_shader = vc4_validate_shader(&bo->base); if (!bo->validated_shader) { ret = -EINVAL; goto fail; } /* We have to create the handle after validation, to avoid * races for users to do doing things like mmap the shader BO. */ ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle); fail: drm_gem_object_unreference_unlocked(&bo->base.base); return ret; }
static int exynos_drm_fb_create_handle(struct drm_framebuffer *fb, struct drm_file *file_priv, unsigned int *handle) { struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); DRM_DEBUG_KMS("%s\n", __FILE__); return drm_gem_handle_create(file_priv, &exynos_fb->exynos_gem_obj[0]->base, handle); }
static int nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_channel_alloc *init = data; struct nouveau_channel *chan; int ret; if (dev_priv->engine.graph.accel_blocked) return -ENODEV; if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0) return -EINVAL; ret = nouveau_channel_alloc(dev, &chan, file_priv, init->fb_ctxdma_handle, init->tt_ctxdma_handle); if (ret) return ret; init->channel = chan->id; if (chan->dma.ib_max) init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART; else if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_VRAM) init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM; else init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART; if (dev_priv->card_type < NV_C0) { init->subchan[0].handle = NvM2MF; if (dev_priv->card_type < NV_50) init->subchan[0].grclass = 0x0039; else init->subchan[0].grclass = 0x5039; init->subchan[1].handle = NvSw; init->subchan[1].grclass = NV_SW; init->nr_subchan = 2; } else { init->subchan[0].handle = 0x9039; init->subchan[0].grclass = 0x9039; init->nr_subchan = 1; } /* Named memory object area */ ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem, &init->notifier_handle); if (ret == 0) atomic_inc(&chan->users); /* userspace reference */ nouveau_channel_put(&chan); return ret; }
static int exynos_drm_fb_create_handle(struct drm_framebuffer *fb, struct drm_file *file_priv, unsigned int *handle) { struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); /* This fb should have only one gem object. */ if (WARN_ON(exynos_fb->buf_cnt != 1)) return -EINVAL; return drm_gem_handle_create(file_priv, &exynos_fb->exynos_gem_obj[0]->base, handle); }