static struct mtk_drm_fb *mtk_drm_framebuffer_init(struct drm_device *dev, struct drm_mode_fb_cmd2 *mode, struct drm_gem_object **obj) { struct mtk_drm_fb *mtk_fb; unsigned int i; int ret; mtk_fb = kzalloc(sizeof(*mtk_fb), GFP_KERNEL); if (!mtk_fb) return ERR_PTR(-ENOMEM); drm_helper_mode_fill_fb_struct(&mtk_fb->base, mode); for (i = 0; i < drm_format_num_planes(mode->pixel_format); i++) mtk_fb->gem_obj[i] = obj[i]; ret = drm_framebuffer_init(dev, &mtk_fb->base, &mediatek_drm_fb_funcs); if (ret) { DRM_ERROR("failed to initialize framebuffer\n"); return ERR_PTR(ret); } return mtk_fb; }
struct hibmc_framebuffer * hibmc_framebuffer_init(struct drm_device *dev, const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object *obj) { struct hibmc_framebuffer *hibmc_fb; int ret; hibmc_fb = kzalloc(sizeof(*hibmc_fb), GFP_KERNEL); if (!hibmc_fb) { DRM_ERROR("failed to allocate hibmc_fb\n"); return ERR_PTR(-ENOMEM); } drm_helper_mode_fill_fb_struct(dev, &hibmc_fb->fb, mode_cmd); hibmc_fb->obj = obj; ret = drm_framebuffer_init(dev, &hibmc_fb->fb, &hibmc_fb_funcs); if (ret) { DRM_ERROR("drm_framebuffer_init failed: %d\n", ret); kfree(hibmc_fb); return ERR_PTR(ret); } return hibmc_fb; }
struct drm_framebuffer *xylon_drm_fb_init(struct drm_device *dev, struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object *obj) { struct drm_framebuffer *fb; struct xylon_drm_fb *xfb; int ret; xfb = kzalloc(sizeof(*xfb), GFP_KERNEL); if (!xfb) { DRM_ERROR("failed allocate framebuffer\n"); return ERR_PTR(-ENOMEM); } xfb->obj = obj; fb = &xfb->fb; drm_helper_mode_fill_fb_struct(fb, mode_cmd); ret = drm_framebuffer_init(dev, fb, &xylon_fb_funcs); if (ret) { DRM_ERROR("failed framebuffer init\n"); goto err; } return fb; err: xylon_drm_fb_destroy(fb); return ERR_PTR(ret); }
struct drm_framebuffer * exynos_drm_framebuffer_init(struct drm_device *dev, struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object *obj) { struct exynos_drm_fb *exynos_fb; int ret; exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL); if (!exynos_fb) { DRM_ERROR("failed to allocate exynos drm framebuffer\n"); return ERR_PTR(-ENOMEM); } ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs); if (ret) { DRM_ERROR("failed to initialize framebuffer\n"); return ERR_PTR(ret); } drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd); exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj); return &exynos_fb->fb; }
static int nv_drm_framebuffer_init( struct drm_device *dev, struct nv_drm_framebuffer *nv_fb, NvU32 pixel_format) { struct nv_drm_device *nv_dev = to_nv_device(dev); enum NvKmsSurfaceMemoryFormat format; int ret; NV_DRM_WARN(!drm_format_to_nvkms_format(pixel_format, &format)); /* Initialize the base framebuffer object and add it to drm subsystem */ ret = drm_framebuffer_init(dev, &nv_fb->base, &nv_framebuffer_funcs); if (ret != 0) { NV_DRM_DEV_LOG_ERR(nv_dev, "Failed to initialize framebuffer object"); return ret; } /* Create NvKmsKapiSurface */ nv_fb->pSurface = nvKms->createSurface( nv_dev->pDevice, nv_fb->nv_nvkms_memory->pMemory, format, nv_fb->base.width, nv_fb->base.height, nv_fb->base.pitches[0]); if (nv_fb->pSurface == NULL) { NV_DRM_DEV_LOG_ERR(nv_dev, "Failed to create NvKmsKapiSurface"); drm_framebuffer_cleanup(&nv_fb->base); return -EINVAL; } return 0; }
static struct mtk_drm_fb *mtk_drm_framebuffer_init(struct drm_device *dev, const struct drm_mode_fb_cmd2 *mode, struct drm_gem_object *obj) { struct mtk_drm_fb *mtk_fb; int ret; if (drm_format_num_planes(mode->pixel_format) != 1) return ERR_PTR(-EINVAL); mtk_fb = kzalloc(sizeof(*mtk_fb), GFP_KERNEL); if (!mtk_fb) return ERR_PTR(-ENOMEM); drm_helper_mode_fill_fb_struct(&mtk_fb->base, mode); mtk_fb->gem_obj = obj; ret = drm_framebuffer_init(dev, &mtk_fb->base, &mtk_drm_fb_funcs); if (ret) { DRM_ERROR("failed to initialize framebuffer\n"); kfree(mtk_fb); return ERR_PTR(ret); } return mtk_fb; }
static struct rockchip_drm_fb * rockchip_fb_alloc(struct drm_device *dev, const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **obj, unsigned int num_planes) { struct rockchip_drm_fb *rockchip_fb; int ret; int i; rockchip_fb = kzalloc(sizeof(*rockchip_fb), GFP_KERNEL); if (!rockchip_fb) return ERR_PTR(-ENOMEM); drm_helper_mode_fill_fb_struct(&rockchip_fb->fb, mode_cmd); for (i = 0; i < num_planes; i++) rockchip_fb->obj[i] = obj[i]; ret = drm_framebuffer_init(dev, &rockchip_fb->fb, &rockchip_drm_fb_funcs); if (ret) { dev_err(dev->dev, "Failed to initialize framebuffer: %d\n", ret); kfree(rockchip_fb); return ERR_PTR(ret); } return rockchip_fb; }
static struct drm_framebuffer *tegra_fb_alloc(struct drm_device *drm, const struct drm_mode_fb_cmd2 *mode_cmd, struct tegra_bo **planes, unsigned int num_planes) { struct drm_framebuffer *fb; unsigned int i; int err; fb = kzalloc(sizeof(*fb), GFP_KERNEL); if (!fb) return ERR_PTR(-ENOMEM); drm_helper_mode_fill_fb_struct(drm, fb, mode_cmd); for (i = 0; i < fb->format->num_planes; i++) fb->obj[i] = &planes[i]->gem; err = drm_framebuffer_init(drm, fb, &tegra_fb_funcs); if (err < 0) { dev_err(drm->dev, "failed to initialize framebuffer: %d\n", err); kfree(fb); return ERR_PTR(err); } return fb; }
struct drm_framebuffer * exynos_drm_framebuffer_init(struct drm_device *dev, struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object *obj) { struct exynos_drm_fb *exynos_fb; struct exynos_drm_gem_obj *exynos_gem_obj; int ret; exynos_gem_obj = to_exynos_gem_obj(obj); ret = check_fb_gem_memory_type(dev, exynos_gem_obj); if (ret < 0) { DRM_ERROR("cannot use this gem memory type for fb.\n"); return ERR_PTR(-EINVAL); } exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL); if (!exynos_fb) return ERR_PTR(-ENOMEM); drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd); exynos_fb->exynos_gem_obj[0] = exynos_gem_obj; ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs); if (ret) { DRM_ERROR("failed to initialize framebuffer\n"); return ERR_PTR(ret); } return &exynos_fb->fb; }
static struct drm_framebuffer * drm_gem_fb_alloc(struct drm_device *dev, const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **obj, unsigned int num_planes, const struct drm_framebuffer_funcs *funcs) { struct drm_framebuffer *fb; int ret, i; fb = kzalloc(sizeof(*fb), GFP_KERNEL); if (!fb) return ERR_PTR(-ENOMEM); drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd); for (i = 0; i < num_planes; i++) fb->obj[i] = obj[i]; ret = drm_framebuffer_init(dev, fb, funcs); if (ret) { DRM_DEV_ERROR(dev->dev, "Failed to init framebuffer: %d\n", ret); kfree(fb); return ERR_PTR(ret); } return fb; }
int virtio_gpu_framebuffer_init(struct drm_device *dev, struct virtio_gpu_framebuffer *vgfb, const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object *obj) { int ret; struct virtio_gpu_object *bo; vgfb->base.obj[0] = obj; bo = gem_to_virtio_gpu_obj(obj); drm_helper_mode_fill_fb_struct(dev, &vgfb->base, mode_cmd); ret = drm_framebuffer_init(dev, &vgfb->base, &virtio_gpu_fb_funcs); if (ret) { vgfb->base.obj[0] = NULL; return ret; } spin_lock_init(&vgfb->dirty_lock); vgfb->x1 = vgfb->y1 = INT_MAX; vgfb->x2 = vgfb->y2 = 0; return 0; }
static int udl_framebuffer_init(struct drm_device *dev, struct udl_framebuffer *ufb, const struct drm_mode_fb_cmd2 *mode_cmd, struct udl_gem_object *obj) { int ret; ufb->obj = obj; drm_helper_mode_fill_fb_struct(&ufb->base, mode_cmd); ret = drm_framebuffer_init(dev, &ufb->base, &udlfb_funcs); return ret; }
static int evdi_framebuffer_init(struct drm_device *dev, struct evdi_framebuffer *ufb, #if KERNEL_VERSION(4, 4, 0) >= LINUX_VERSION_CODE struct drm_mode_fb_cmd2 *mode_cmd, #else const struct drm_mode_fb_cmd2 *mode_cmd, #endif struct evdi_gem_object *obj) { ufb->obj = obj; drm_helper_mode_fill_fb_struct(&ufb->base, mode_cmd); return drm_framebuffer_init(dev, &ufb->base, &evdifb_funcs); }
int mgag200_framebuffer_init(struct drm_device *dev, struct mga_framebuffer *gfb, struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object *obj) { int ret = drm_framebuffer_init(dev, &gfb->base, &mga_fb_funcs); if (ret) { DRM_ERROR("drm_framebuffer_init failed: %d\n", ret); return ret; } drm_helper_mode_fill_fb_struct(&gfb->base, mode_cmd); gfb->obj = obj; return 0; }
int ast_framebuffer_init(struct drm_device *dev, struct ast_framebuffer *ast_fb, struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object *obj) { int ret; drm_helper_mode_fill_fb_struct(&ast_fb->base, mode_cmd); ast_fb->obj = obj; ret = drm_framebuffer_init(dev, &ast_fb->base, &ast_fb_funcs); if (ret) { DRM_ERROR("framebuffer init failed %d\n", ret); return ret; } return 0; }
int cirrus_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *gfb, const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object *obj) { int ret; drm_helper_mode_fill_fb_struct(dev, gfb, mode_cmd); gfb->obj[0] = obj; ret = drm_framebuffer_init(dev, gfb, &cirrus_fb_funcs); if (ret) { DRM_ERROR("drm_framebuffer_init failed: %d\n", ret); return ret; } return 0; }
int qxl_framebuffer_init(struct drm_device *dev, struct qxl_framebuffer *qfb, struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object *obj) { int ret; qfb->obj = obj; ret = drm_framebuffer_init(dev, &qfb->base, &qxl_fb_funcs); if (ret) { qfb->obj = NULL; return ret; } drm_helper_mode_fill_fb_struct(&qfb->base, mode_cmd); return 0; }
struct drm_framebuffer * exynos_drm_framebuffer_init(struct drm_device *dev, const struct drm_mode_fb_cmd2 *mode_cmd, struct exynos_drm_gem **exynos_gem, int count) { struct exynos_drm_fb *exynos_fb; int i; int ret; exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL); if (!exynos_fb) return ERR_PTR(-ENOMEM); for (i = 0; i < count; i++) { ret = check_fb_gem_memory_type(dev, exynos_gem[i]); if (ret < 0) goto err; exynos_fb->exynos_gem[i] = exynos_gem[i]; exynos_fb->dma_addr[i] = exynos_gem[i]->dma_addr + mode_cmd->offsets[i]; } drm_helper_mode_fill_fb_struct(dev, &exynos_fb->fb, mode_cmd); ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs); if (ret < 0) { DRM_ERROR("failed to initialize framebuffer\n"); goto err; } return &exynos_fb->fb; err: kfree(exynos_fb); return ERR_PTR(ret); }
struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev, struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos) { struct omap_framebuffer *omap_fb; struct drm_framebuffer *fb = NULL; const struct format *format = NULL; int ret, i, n = drm_format_num_planes(mode_cmd->pixel_format); DBG("create framebuffer: dev=%p, mode_cmd=%p (%dx%d@%4.4s)", dev, mode_cmd, mode_cmd->width, mode_cmd->height, (char *)&mode_cmd->pixel_format); for (i = 0; i < ARRAY_SIZE(formats); i++) { if (formats[i].pixel_format == mode_cmd->pixel_format) { format = &formats[i]; break; } } if (!format) { dev_err(dev->dev, "unsupported pixel format: %4.4s\n", (char *)&mode_cmd->pixel_format); ret = -EINVAL; goto fail; } omap_fb = kzalloc(sizeof(*omap_fb), GFP_KERNEL); if (!omap_fb) { ret = -ENOMEM; goto fail; } fb = &omap_fb->base; omap_fb->format = format; for (i = 0; i < n; i++) { struct plane *plane = &omap_fb->planes[i]; int size, pitch = mode_cmd->pitches[i]; if (pitch < (mode_cmd->width * format->planes[i].stride_bpp)) { dev_err(dev->dev, "provided buffer pitch is too small! %d < %d\n", pitch, mode_cmd->width * format->planes[i].stride_bpp); ret = -EINVAL; goto fail; } size = pitch * mode_cmd->height / format->planes[i].sub_y; if (size > (omap_gem_mmap_size(bos[i]) - mode_cmd->offsets[i])) { dev_err(dev->dev, "provided buffer object is too small! %d < %d\n", bos[i]->size - mode_cmd->offsets[i], size); ret = -EINVAL; goto fail; } plane->bo = bos[i]; plane->offset = mode_cmd->offsets[i]; plane->pitch = pitch; plane->paddr = 0; } drm_helper_mode_fill_fb_struct(fb, mode_cmd); ret = drm_framebuffer_init(dev, fb, &omap_framebuffer_funcs); if (ret) { dev_err(dev->dev, "framebuffer init failed: %d\n", ret); goto fail; } DBG("create: FB ID: %d (%p)", fb->base.id, fb); return fb; fail: if (fb) omap_framebuffer_destroy(fb); return ERR_PTR(ret); }
static struct drm_framebuffer * exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv, struct drm_mode_fb_cmd2 *mode_cmd) { struct drm_gem_object *obj; struct exynos_drm_gem_obj *exynos_gem_obj; struct exynos_drm_fb *exynos_fb; int i, ret; exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL); if (!exynos_fb) return ERR_PTR(-ENOMEM); obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]); if (!obj) { DRM_ERROR("failed to lookup gem object\n"); ret = -ENOENT; goto err_free; } drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd); exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj); exynos_fb->buf_cnt = exynos_drm_format_num_buffers(mode_cmd); DRM_DEBUG_KMS("buf_cnt = %d\n", exynos_fb->buf_cnt); for (i = 1; i < exynos_fb->buf_cnt; i++) { obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[i]); if (!obj) { DRM_ERROR("failed to lookup gem object\n"); ret = -ENOENT; exynos_fb->buf_cnt = i; goto err_unreference; } exynos_gem_obj = to_exynos_gem_obj(obj); exynos_fb->exynos_gem_obj[i] = exynos_gem_obj; ret = check_fb_gem_memory_type(dev, exynos_gem_obj); if (ret < 0) { DRM_ERROR("cannot use this gem memory type for fb.\n"); goto err_unreference; } } ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs); if (ret) { DRM_ERROR("failed to init framebuffer.\n"); goto err_unreference; } return &exynos_fb->fb; err_unreference: for (i = 0; i < exynos_fb->buf_cnt; i++) { struct drm_gem_object *obj; obj = &exynos_fb->exynos_gem_obj[i]->base; if (obj) drm_gem_object_unreference_unlocked(obj); } err_free: kfree(exynos_fb); return ERR_PTR(ret); }
struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev, const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos) { struct msm_drm_private *priv = dev->dev_private; struct msm_kms *kms = priv->kms; struct msm_framebuffer *msm_fb = NULL; struct drm_framebuffer *fb; const struct msm_format *format; int ret, i, n; unsigned int hsub, vsub; DBG("create framebuffer: dev=%p, mode_cmd=%p (%dx%d@%4.4s)", dev, mode_cmd, mode_cmd->width, mode_cmd->height, (char *)&mode_cmd->pixel_format); n = drm_format_num_planes(mode_cmd->pixel_format); hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format); vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format); format = kms->funcs->get_format(kms, mode_cmd->pixel_format); if (!format) { dev_err(dev->dev, "unsupported pixel format: %4.4s\n", (char *)&mode_cmd->pixel_format); ret = -EINVAL; goto fail; } msm_fb = kzalloc(sizeof(*msm_fb), GFP_KERNEL); if (!msm_fb) { ret = -ENOMEM; goto fail; } fb = &msm_fb->base; msm_fb->format = format; if (n > ARRAY_SIZE(msm_fb->planes)) { ret = -EINVAL; goto fail; } for (i = 0; i < n; i++) { unsigned int width = mode_cmd->width / (i ? hsub : 1); unsigned int height = mode_cmd->height / (i ? vsub : 1); unsigned int min_size; min_size = (height - 1) * mode_cmd->pitches[i] + width * drm_format_plane_cpp(mode_cmd->pixel_format, i) + mode_cmd->offsets[i]; if (bos[i]->size < min_size) { ret = -EINVAL; goto fail; } msm_fb->planes[i] = bos[i]; } drm_helper_mode_fill_fb_struct(fb, mode_cmd); ret = drm_framebuffer_init(dev, fb, &msm_framebuffer_funcs); if (ret) { dev_err(dev->dev, "framebuffer init failed: %d\n", ret); goto fail; } DBG("create: FB ID: %d (%p)", fb->base.id, fb); return fb; fail: kfree(msm_fb); return ERR_PTR(ret); }
struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev, struct drm_mode_fb_cmd *mode_cmd, struct drm_gem_object *bo) { struct omap_framebuffer *omap_fb; struct drm_framebuffer *fb = NULL; int size, ret; DBG("create framebuffer: dev=%p, mode_cmd=%p (%dx%d@%d)", dev, mode_cmd, mode_cmd->width, mode_cmd->height, mode_cmd->bpp); /* in case someone tries to feed us a completely bogus stride: */ mode_cmd->pitch = align_pitch(mode_cmd->pitch, mode_cmd->width, mode_cmd->bpp); omap_fb = kzalloc(sizeof(*omap_fb), GFP_KERNEL); if (!omap_fb) { dev_err(dev->dev, "could not allocate fb\n"); goto fail; } fb = &omap_fb->base; ret = drm_framebuffer_init(dev, fb, &omap_framebuffer_funcs); if (ret) { dev_err(dev->dev, "framebuffer init failed: %d\n", ret); goto fail; } DBG("create: FB ID: %d (%p)", fb->base.id, fb); size = PAGE_ALIGN(mode_cmd->pitch * mode_cmd->height); if (bo) { DBG("using existing %d byte buffer (needed %d)", bo->size, size); if (size > bo->size) { dev_err(dev->dev, "provided buffer object is too small!\n"); goto fail; } } else { /* for convenience of all the various callers who don't want * to be bothered to allocate their own buffer.. */ union omap_gem_size gsize = { .bytes = size, }; DBG("allocating %d bytes for fb %d", size, dev->primary->index); bo = omap_gem_new(dev, gsize, OMAP_BO_SCANOUT | OMAP_BO_WC); if (!bo) { dev_err(dev->dev, "failed to allocate buffer object\n"); goto fail; } } omap_fb->bo = bo; omap_fb->size = size; if (omap_gem_get_paddr(bo, &omap_fb->paddr, true)) { dev_err(dev->dev, "could not map (paddr)!\n"); goto fail; } drm_helper_mode_fill_fb_struct(fb, mode_cmd); return fb; fail: if (fb) { omap_framebuffer_destroy(fb); } return NULL; }
struct armada_framebuffer *armada_framebuffer_create(struct drm_device *dev, struct drm_mode_fb_cmd2 *mode, struct armada_gem_object *obj) { struct armada_framebuffer *dfb; uint8_t format, config; int ret; switch (mode->pixel_format) { #define FMT(drm, fmt, mod) \ case DRM_FORMAT_##drm: \ format = CFG_##fmt; \ config = mod; \ break FMT(RGB565, 565, CFG_SWAPRB); FMT(BGR565, 565, 0); FMT(ARGB1555, 1555, CFG_SWAPRB); FMT(ABGR1555, 1555, 0); FMT(RGB888, 888PACK, CFG_SWAPRB); FMT(BGR888, 888PACK, 0); FMT(XRGB8888, X888, CFG_SWAPRB); FMT(XBGR8888, X888, 0); FMT(ARGB8888, 8888, CFG_SWAPRB); FMT(ABGR8888, 8888, 0); FMT(YUYV, 422PACK, CFG_YUV2RGB | CFG_SWAPYU | CFG_SWAPUV); FMT(UYVY, 422PACK, CFG_YUV2RGB); FMT(VYUY, 422PACK, CFG_YUV2RGB | CFG_SWAPUV); FMT(YVYU, 422PACK, CFG_YUV2RGB | CFG_SWAPYU); FMT(YUV422, 422, CFG_YUV2RGB); FMT(YVU422, 422, CFG_YUV2RGB | CFG_SWAPUV); FMT(YUV420, 420, CFG_YUV2RGB); FMT(YVU420, 420, CFG_YUV2RGB | CFG_SWAPUV); FMT(C8, PSEUDO8, 0); #undef FMT default: return ERR_PTR(-EINVAL); } dfb = kzalloc(sizeof(*dfb), GFP_KERNEL); if (!dfb) { DRM_ERROR("failed to allocate Armada fb object\n"); return ERR_PTR(-ENOMEM); } dfb->fmt = format; dfb->mod = config; dfb->obj = obj; drm_helper_mode_fill_fb_struct(&dfb->fb, mode); ret = drm_framebuffer_init(dev, &dfb->fb, &armada_fb_funcs); if (ret) { kfree(dfb); return ERR_PTR(ret); } /* * Take a reference on our object as we're successful - the * caller already holds a reference, which keeps us safe for * the above call, but the caller will drop their reference * to it. Hence we need to take our own reference. */ drm_gem_object_reference(&obj->obj); return dfb; }