/* * drm_gem_cma_create_with_handle - allocate an object with the given * size and create a gem handle on it * * returns a struct drm_gem_cma_object* on success or ERR_PTR values * on failure. */ static struct drm_gem_cma_object *drm_gem_cma_create_with_handle( struct drm_file *file_priv, struct drm_device *drm, unsigned int size, unsigned int *handle) { struct drm_gem_cma_object *cma_obj; struct drm_gem_object *gem_obj; int ret; cma_obj = drm_gem_cma_create(drm, size); if (IS_ERR(cma_obj)) return cma_obj; gem_obj = &cma_obj->base; /* * allocate a id of idr table where the obj is registered * and handle has the id what user can see. */ ret = drm_gem_handle_create(file_priv, gem_obj, handle); if (ret) goto err_handle_create; /* drop reference from allocate - handle holds it now. */ drm_gem_object_unreference_unlocked(gem_obj); return cma_obj; err_handle_create: drm_gem_cma_free_object(gem_obj); return ERR_PTR(ret); }
/* * drm_gem_cma_create - allocate an object with the given size * * returns a struct drm_gem_cma_object* on success or ERR_PTR values * on failure. */ struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm, unsigned int size) { struct drm_gem_cma_object *cma_obj; int ret; size = round_up(size, PAGE_SIZE); cma_obj = __drm_gem_cma_create(drm, size); if (IS_ERR(cma_obj)) return cma_obj; cma_obj->vaddr = dma_alloc_writecombine(drm->dev, size, &cma_obj->paddr, GFP_KERNEL | __GFP_NOWARN); if (!cma_obj->vaddr) { dev_err(drm->dev, "failed to allocate buffer with size %d\n", size); ret = -ENOMEM; goto error; } return cma_obj; error: drm_gem_cma_free_object(&cma_obj->base); return ERR_PTR(ret); }
/** * tinydrm_gem_cma_free_object - Free resources associated with a CMA GEM * object * @gem_obj: GEM object to free * * This function frees the backing memory of the CMA GEM object, cleans up the * GEM object state and frees the memory used to store the object itself using * drm_gem_cma_free_object(). It also handles PRIME buffers which has the kernel * virtual address set by tinydrm_gem_cma_prime_import_sg_table(). Drivers * can use this as their &drm_driver->gem_free_object callback. */ void tinydrm_gem_cma_free_object(struct drm_gem_object *gem_obj) { if (gem_obj->import_attach) { struct drm_gem_cma_object *cma_obj; cma_obj = to_drm_gem_cma_obj(gem_obj); dma_buf_vunmap(gem_obj->import_attach->dmabuf, cma_obj->vaddr); cma_obj->vaddr = NULL; } drm_gem_cma_free_object(gem_obj); }
/* Must be called with bo_lock held. */ static void vc4_bo_destroy(struct vc4_bo *bo) { struct drm_gem_object *obj = &bo->base.base; struct vc4_dev *vc4 = to_vc4_dev(obj->dev); if (bo->validated_shader) { kfree(bo->validated_shader->texture_samples); kfree(bo->validated_shader); bo->validated_shader = NULL; } vc4->bo_stats.num_allocated--; vc4->bo_stats.size_allocated -= obj->size; drm_gem_cma_free_object(obj); }
/* * drm_gem_cma_create - allocate an object with the given size * * returns a struct drm_gem_cma_object* on success or ERR_PTR values * on failure. */ struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm, unsigned int size) { struct drm_gem_cma_object *cma_obj; struct sg_table *sgt = NULL; int ret; size = round_up(size, PAGE_SIZE); cma_obj = __drm_gem_cma_create(drm, size); if (IS_ERR(cma_obj)) return cma_obj; cma_obj->vaddr = dma_alloc_writecombine(drm->dev, size, &cma_obj->paddr, GFP_KERNEL | __GFP_NOWARN); if (!cma_obj->vaddr) { dev_err(drm->dev, "failed to allocate buffer with size %d\n", size); ret = -ENOMEM; goto error; } sgt = kzalloc(sizeof(*cma_obj->sgt), GFP_KERNEL); if (sgt == NULL) { ret = -ENOMEM; goto error; } ret = dma_get_sgtable(drm->dev, sgt, cma_obj->vaddr, cma_obj->paddr, size); if (ret < 0) goto error; cma_obj->sgt = sgt; return cma_obj; error: kfree(sgt); drm_gem_cma_free_object(&cma_obj->base); return ERR_PTR(ret); }
static int xylon_drm_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args) { struct drm_gem_cma_object *cma_obj; struct drm_gem_object *gem_obj; struct xylon_drm_device *xdev = dev->dev_private; unsigned int buff_width; int ret; ret = xylon_drm_crtc_get_param(xdev->crtc, &buff_width, XYLON_DRM_CRTC_BUFF_WIDTH); if (ret) return ret; args->pitch = buff_width * DIV_ROUND_UP(args->bpp, 8); args->size = (u64)(buff_width * DIV_ROUND_UP(args->bpp, 8) * args->height); cma_obj = drm_gem_cma_create(dev, (unsigned int)args->size); if (IS_ERR(cma_obj)) return PTR_ERR(cma_obj); gem_obj = &cma_obj->base; ret = drm_gem_handle_create(file_priv, gem_obj, &args->handle); if (ret) goto err_handle_create; drm_gem_object_unreference_unlocked(gem_obj); return PTR_ERR_OR_ZERO(cma_obj); err_handle_create: drm_gem_cma_free_object(gem_obj); return ret; }
static int hisi_drm_fbdev_probe(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes) { struct hisi_drm_fbdev *fbdev = to_hisi_drm_fbdev(helper); struct drm_mode_fb_cmd2 mode_cmd = { 0 }; struct drm_device *dev = helper->dev; struct drm_gem_cma_object *obj; struct drm_framebuffer *fb; unsigned int bytes_per_pixel; unsigned long offset; struct fb_info *fbi; size_t size; int ret; /* TODO: Need to use ion heaps to create frame buffer?? */ DRM_DEBUG_DRIVER("surface width(%d), height(%d) and bpp(%d)\n", sizes->surface_width, sizes->surface_height, sizes->surface_bpp); bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8); sizes->surface_depth = PREFERRED_BPP; mode_cmd.width = sizes->surface_width; mode_cmd.height = sizes->surface_height * HISI_NUM_FRAMEBUFFERS; mode_cmd.pitches[0] = sizes->surface_width * bytes_per_pixel; mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth); size = roundup(mode_cmd.pitches[0] * mode_cmd.height, PAGE_SIZE); obj = drm_gem_cma_create(dev, size); if (IS_ERR(obj)) return -ENOMEM; fbi = framebuffer_alloc(0, dev->dev); if (!fbi) { dev_err(dev->dev, "Failed to allocate framebuffer info.\n"); ret = -ENOMEM; goto err_drm_gem_cma_free_object; } fbdev->fb = hisi_drm_fb_alloc(dev, &mode_cmd, &obj, 1, true); if (IS_ERR(fbdev->fb)) { dev_err(dev->dev, "Failed to allocate DRM framebuffer.\n"); ret = PTR_ERR(fbdev->fb); goto err_framebuffer_release; } fb = &fbdev->fb->fb; helper->fb = fb; helper->fbdev = fbi; fbi->par = helper; fbi->flags = FBINFO_FLAG_DEFAULT; fbi->fbops = &hisi_drm_fbdev_ops; ret = fb_alloc_cmap(&fbi->cmap, 256, 0); if (ret) { dev_err(dev->dev, "Failed to allocate color map.\n"); goto err_hisi_drm_fb_destroy; } drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth); drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height/HISI_NUM_FRAMEBUFFERS); offset = fbi->var.xoffset * bytes_per_pixel; offset += fbi->var.yoffset * fb->pitches[0]; dev->mode_config.fb_base = (resource_size_t)obj->paddr; fbi->screen_base = obj->vaddr + offset; fbi->fix.smem_start = (unsigned long)(obj->paddr + offset); fbi->screen_size = size; fbi->fix.smem_len = size; DRM_DEBUG_DRIVER("exit successfully.\n"); return 0; err_hisi_drm_fb_destroy: drm_framebuffer_unregister_private(fb); hisi_drm_fb_destroy(fb); err_framebuffer_release: framebuffer_release(fbi); err_drm_gem_cma_free_object: drm_gem_cma_free_object(&obj->base); return ret; }