/* * __drm_gem_cma_create - Create a GEM CMA object without allocating memory * @drm: The drm device * @size: The GEM object size * * This function creates and initializes a GEM CMA object of the given size, but * doesn't allocate any memory to back the object. * * Return a struct drm_gem_cma_object* on success or ERR_PTR values on failure. */ static struct drm_gem_cma_object * __drm_gem_cma_create(struct drm_device *drm, unsigned int size) { struct drm_gem_cma_object *cma_obj; struct drm_gem_object *gem_obj; int ret; cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL); if (!cma_obj) return ERR_PTR(-ENOMEM); gem_obj = &cma_obj->base; ret = drm_gem_object_init(drm, gem_obj, size); if (ret) goto error; ret = drm_gem_create_mmap_offset(gem_obj); if (ret) { drm_gem_object_release(gem_obj); goto error; } return cma_obj; error: kfree(cma_obj); return ERR_PTR(ret); }
struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32_t flags) { struct drm_gem_object *obj = NULL; int ret; WARN_ON(!mutex_is_locked(&dev->struct_mutex)); size = PAGE_ALIGN(size); ret = msm_gem_new_impl(dev, size, flags, NULL, &obj); if (ret) goto fail; if (use_pages(obj)) { ret = drm_gem_object_init(dev, obj, size); if (ret) goto fail; } else { drm_gem_private_object_init(dev, obj, size); } return obj; fail: drm_gem_object_unreference(obj); return ERR_PTR(ret); }
static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm, size_t size) { struct tegra_bo *bo; int err; bo = kzalloc(sizeof(*bo), GFP_KERNEL); if (!bo) return ERR_PTR(-ENOMEM); host1x_bo_init(&bo->base, &tegra_bo_ops); size = round_up(size, PAGE_SIZE); err = drm_gem_object_init(drm, &bo->gem, size); if (err < 0) goto free; err = drm_gem_create_mmap_offset(&bo->gem); if (err < 0) goto release; return bo; release: drm_gem_object_release(&bo->gem); free: kfree(bo); return ERR_PTR(err); }
/** * __drm_gem_cma_create - Create a GEM CMA object without allocating memory * @drm: DRM device * @size: size of the object to allocate * * This function creates and initializes a GEM CMA object of the given size, * but doesn't allocate any memory to back the object. * * Returns: * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative * error code on failure. */ static struct drm_gem_cma_object * __drm_gem_cma_create(struct drm_device *drm, size_t size) { struct drm_gem_cma_object *cma_obj; struct drm_gem_object *gem_obj; int ret; if (drm->driver->gem_create_object) gem_obj = drm->driver->gem_create_object(drm, size); else gem_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL); if (!gem_obj) return ERR_PTR(-ENOMEM); cma_obj = container_of(gem_obj, struct drm_gem_cma_object, base); ret = drm_gem_object_init(drm, gem_obj, size); if (ret) goto error; ret = drm_gem_create_mmap_offset(gem_obj); if (ret) { drm_gem_object_release(gem_obj); goto error; } return cma_obj; error: kfree(cma_obj); return ERR_PTR(ret); }
struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev, size_t size) { struct udl_gem_object *obj; obj = kzalloc(sizeof(*obj), GFP_KERNEL); if (obj == NULL) return NULL; if (drm_gem_object_init(dev, &obj->base, size) != 0) { kfree(obj); return NULL; } return obj; }
struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size, unsigned long flags) { struct tegra_bo *bo; int err; bo = kzalloc(sizeof(*bo), GFP_KERNEL); if (!bo) return ERR_PTR(-ENOMEM); host1x_bo_init(&bo->base, &tegra_bo_ops); size = round_up(size, PAGE_SIZE); bo->vaddr = dma_alloc_writecombine(drm->dev, size, &bo->paddr, GFP_KERNEL | __GFP_NOWARN); if (!bo->vaddr) { dev_err(drm->dev, "failed to allocate buffer with size %u\n", size); err = -ENOMEM; goto err_dma; } err = drm_gem_object_init(drm, &bo->gem, size); if (err) goto err_init; err = drm_gem_create_mmap_offset(&bo->gem); if (err) goto err_mmap; if (flags & DRM_TEGRA_GEM_CREATE_TILED) bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED; if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP) bo->flags |= TEGRA_BO_BOTTOM_UP; return bo; err_mmap: drm_gem_object_release(&bo->gem); err_init: tegra_bo_destroy(drm, bo); err_dma: kfree(bo); return ERR_PTR(err); }
int nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain, uint32_t tile_mode, uint32_t tile_flags, struct nouveau_bo **pnvbo) { struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_bo *nvbo; u32 flags = 0; int ret; if (domain & NOUVEAU_GEM_DOMAIN_VRAM) flags |= TTM_PL_FLAG_VRAM; if (domain & NOUVEAU_GEM_DOMAIN_GART) flags |= TTM_PL_FLAG_TT; if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU) flags |= TTM_PL_FLAG_SYSTEM; if (domain & NOUVEAU_GEM_DOMAIN_MAPPABLE) flags |= TTM_PL_FLAG_UNCACHED; ret = nouveau_bo_new(dev, size, align, flags, tile_mode, tile_flags, NULL, NULL, pnvbo); if (ret) return ret; nvbo = *pnvbo; /* we restrict allowed domains on nv50+ to only the types * that were requested at creation time. not possibly on * earlier chips without busting the ABI. */ nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART; if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) nvbo->valid_domains &= domain; /* Initialize the embedded gem-object. We return a single gem-reference * to the caller, instead of a normal nouveau_bo ttm reference. */ ret = drm_gem_object_init(dev, &nvbo->gem, nvbo->bo.mem.size); if (ret) { nouveau_bo_ref(NULL, pnvbo); return -ENOMEM; } nvbo->bo.persistent_swap_storage = nvbo->gem.filp; return 0; }
/* * drm_gem_cma_create - allocate an object with the given size * * returns a struct drm_gem_cma_object* on success or ERR_PTR values * on failure. */ struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm, unsigned int size) { struct drm_gem_cma_object *cma_obj; struct drm_gem_object *gem_obj; int ret; size = round_up(size, PAGE_SIZE); cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL); if (!cma_obj) return ERR_PTR(-ENOMEM); cma_obj->vaddr = dma_alloc_writecombine(drm->dev, size, &cma_obj->paddr, GFP_KERNEL | __GFP_NOWARN); if (!cma_obj->vaddr) { dev_err(drm->dev, "failed to allocate buffer with size %d\n", size); ret = -ENOMEM; goto err_dma_alloc; } gem_obj = &cma_obj->base; ret = drm_gem_object_init(drm, gem_obj, size); if (ret) goto err_obj_init; ret = drm_gem_create_mmap_offset(gem_obj); if (ret) goto err_create_mmap_offset; return cma_obj; err_create_mmap_offset: drm_gem_object_release(gem_obj); err_obj_init: drm_gem_cma_buf_destroy(drm, cma_obj); err_dma_alloc: kfree(cma_obj); return ERR_PTR(ret); }
int virtio_gpu_object_create(struct virtio_gpu_device *vgdev, unsigned long size, bool kernel, bool pinned, struct virtio_gpu_object **bo_ptr) { struct virtio_gpu_object *bo; enum ttm_bo_type type; size_t acc_size; int ret; if (kernel) type = ttm_bo_type_kernel; else type = ttm_bo_type_device; *bo_ptr = NULL; acc_size = ttm_bo_dma_acc_size(&vgdev->mman.bdev, size, sizeof(struct virtio_gpu_object)); bo = kzalloc(sizeof(struct virtio_gpu_object), GFP_KERNEL); if (bo == NULL) return -ENOMEM; size = roundup(size, PAGE_SIZE); ret = drm_gem_object_init(vgdev->ddev, &bo->gem_base, size); if (ret != 0) goto err_gem_init; bo->dumb = false; virtio_gpu_init_ttm_placement(bo, pinned); ret = ttm_bo_init(&vgdev->mman.bdev, &bo->tbo, size, type, &bo->placement, 0, !kernel, NULL, acc_size, NULL, NULL, &virtio_gpu_ttm_bo_destroy); if (ret != 0) goto err_ttm_init; *bo_ptr = bo; return 0; err_ttm_init: drm_gem_object_release(&bo->gem_base); err_gem_init: kfree(bo); return ret; }
struct drm_gem_object * drm_gem_object_alloc(struct drm_device *dev, size_t size) { struct drm_gem_object *obj; obj = malloc(sizeof(*obj), DRM_MEM_DRIVER, M_WAITOK | M_ZERO); if (drm_gem_object_init(dev, obj, size) != 0) goto free; if (dev->driver->gem_init_object != NULL && dev->driver->gem_init_object(obj) != 0) goto dealloc; return (obj); dealloc: vm_object_deallocate(obj->vm_obj); free: free(obj, DRM_MEM_DRIVER); return (NULL); }
static struct drm_vgem_gem_object *__vgem_gem_create(struct drm_device *dev, unsigned long size) { struct drm_vgem_gem_object *obj; int ret; obj = kzalloc(sizeof(*obj), GFP_KERNEL); if (!obj) return ERR_PTR(-ENOMEM); ret = drm_gem_object_init(dev, &obj->base, roundup(size, PAGE_SIZE)); if (ret) { kfree(obj); return ERR_PTR(ret); } mutex_init(&obj->pages_lock); return obj; }
int hibmc_bo_create(struct drm_device *dev, int size, int align, u32 flags, struct hibmc_bo **phibmcbo) { struct hibmc_drm_private *hibmc = dev->dev_private; struct hibmc_bo *hibmcbo; size_t acc_size; int ret; hibmcbo = kzalloc(sizeof(*hibmcbo), GFP_KERNEL); if (!hibmcbo) { DRM_ERROR("failed to allocate hibmcbo\n"); return -ENOMEM; } ret = drm_gem_object_init(dev, &hibmcbo->gem, size); if (ret) { DRM_ERROR("failed to initialize drm gem object: %d\n", ret); kfree(hibmcbo); return ret; } hibmcbo->bo.bdev = &hibmc->bdev; hibmc_ttm_placement(hibmcbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); acc_size = ttm_bo_dma_acc_size(&hibmc->bdev, size, sizeof(struct hibmc_bo)); ret = ttm_bo_init(&hibmc->bdev, &hibmcbo->bo, size, ttm_bo_type_device, &hibmcbo->placement, align >> PAGE_SHIFT, false, NULL, acc_size, NULL, NULL, hibmc_bo_ttm_destroy); if (ret) { hibmc_bo_unref(&hibmcbo); DRM_ERROR("failed to initialize ttm_bo: %d\n", ret); return ret; } *phibmcbo = hibmcbo; return 0; }
/** * psb_gem_create - create a mappable object * @file: the DRM file of the client * @dev: our device * @size: the size requested * @handlep: returned handle (opaque number) * * Create a GEM object, fill in the boilerplate and attach a handle to * it so that userspace can speak about it. This does the core work * for the various methods that do/will create GEM objects for things */ static int psb_gem_create(struct drm_file *file, struct drm_device *dev, uint64_t size, uint32_t *handlep) { struct gtt_range *r; int ret; u32 handle; size = roundup(size, PAGE_SIZE); /* Allocate our object - for now a direct gtt range which is not stolen memory backed */ r = psb_gtt_alloc_range(dev, size, "gem", 0); if (r == NULL) { dev_err(dev->dev, "no memory for %lld byte GEM object\n", size); return -ENOSPC; } /* Initialize the extra goodies GEM needs to do all the hard work */ if (drm_gem_object_init(dev, &r->gem, size) != 0) { psb_gtt_free_range(dev, r); /* GEM doesn't give an error code so use -ENOMEM */ dev_err(dev->dev, "GEM init failed for %lld\n", size); return -ENOMEM; } /* Limit the object to 32bit mappings */ mapping_set_gfp_mask(r->gem.filp->f_mapping, GFP_KERNEL | __GFP_DMA32); /* Give the object a handle so we can carry it more easily */ ret = drm_gem_handle_create(file, &r->gem, &handle); if (ret) { dev_err(dev->dev, "GEM handle failed for %p, %lld\n", &r->gem, size); drm_gem_object_release(&r->gem); psb_gtt_free_range(dev, r); return ret; } /* We have the initial and handle reference but need only one now */ drm_gem_object_unreference(&r->gem); *handlep = handle; return 0; }
/** * Allocate a GEM object of the specified size with shmfs backing store */ struct drm_gem_object * drm_gem_object_alloc(struct drm_device *dev, size_t size) { struct drm_gem_object *obj; obj = kzalloc(sizeof(*obj), GFP_KERNEL); if (!obj) goto free; if (drm_gem_object_init(dev, obj, size) != 0) goto free; if (dev->driver->gem_init_object != NULL && dev->driver->gem_init_object(obj) != 0) { goto fput; } return obj; fput: drm_gem_object_release(obj); free: kfree(obj); return NULL; }
/** * Allocate a GEM object of the specified size with shmfs backing store */ struct drm_gem_object * drm_gem_object_alloc(struct drm_device *dev, size_t size) { struct drm_gem_object *obj; obj = kzalloc(sizeof(*obj), GFP_KERNEL); if (!obj) goto free; if (drm_gem_object_init(dev, obj, size) != 0) goto free; if (dev->driver->gem_init_object != NULL && dev->driver->gem_init_object(obj) != 0) { goto fput; } return obj; fput: /* Object_init mangles the global counters - readjust them. */ fput(obj->filp); free: kfree(obj); return NULL; }
static int bochs_bo_create(struct drm_device *dev, int size, int align, uint32_t flags, struct bochs_bo **pbochsbo) { struct bochs_device *bochs = dev->dev_private; struct bochs_bo *bochsbo; size_t acc_size; int ret; bochsbo = kzalloc(sizeof(struct bochs_bo), GFP_KERNEL); if (!bochsbo) return -ENOMEM; ret = drm_gem_object_init(dev, &bochsbo->gem, size); if (ret) { kfree(bochsbo); return ret; } bochsbo->bo.bdev = &bochs->ttm.bdev; bochsbo->bo.bdev->dev_mapping = dev->anon_inode->i_mapping; bochs_ttm_placement(bochsbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); acc_size = ttm_bo_dma_acc_size(&bochs->ttm.bdev, size, sizeof(struct bochs_bo)); ret = ttm_bo_init(&bochs->ttm.bdev, &bochsbo->bo, size, ttm_bo_type_device, &bochsbo->placement, align >> PAGE_SHIFT, false, NULL, acc_size, NULL, NULL, bochs_bo_ttm_destroy); if (ret) return ret; *pbochsbo = bochsbo; return 0; }
struct mtk_drm_gem_obj *mtk_drm_gem_init(struct drm_device *dev, unsigned long size) { struct mtk_drm_gem_obj *mtk_gem_obj; struct drm_gem_object *obj; int ret; size = round_up(size, PAGE_SIZE); mtk_gem_obj = kzalloc(sizeof(*mtk_gem_obj), GFP_KERNEL); if (!mtk_gem_obj) return ERR_PTR(-ENOMEM); obj = &mtk_gem_obj->base; ret = drm_gem_object_init(dev, obj, size); if (ret < 0) { DRM_ERROR("failed to initialize gem object\n"); kfree(mtk_gem_obj); return ERR_PTR(ret); } return mtk_gem_obj; }
void i915_gem_alloc_object(filler_t **readpage) { if (drm_gem_object_init(readpage) != 0) ; }
static struct tegra_bo *tegra_bo_import(struct drm_device *drm, struct dma_buf *buf) { struct dma_buf_attachment *attach; struct tegra_bo *bo; ssize_t size; int err; bo = kzalloc(sizeof(*bo), GFP_KERNEL); if (!bo) return ERR_PTR(-ENOMEM); host1x_bo_init(&bo->base, &tegra_bo_ops); size = round_up(buf->size, PAGE_SIZE); err = drm_gem_object_init(drm, &bo->gem, size); if (err < 0) goto free; err = drm_gem_create_mmap_offset(&bo->gem); if (err < 0) goto release; attach = dma_buf_attach(buf, drm->dev); if (IS_ERR(attach)) { err = PTR_ERR(attach); goto free_mmap; } get_dma_buf(buf); bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE); if (!bo->sgt) { err = -ENOMEM; goto detach; } if (IS_ERR(bo->sgt)) { err = PTR_ERR(bo->sgt); goto detach; } if (bo->sgt->nents > 1) { err = -EINVAL; goto detach; } bo->paddr = sg_dma_address(bo->sgt->sgl); bo->gem.import_attach = attach; return bo; detach: if (!IS_ERR_OR_NULL(bo->sgt)) dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE); dma_buf_detach(buf, attach); dma_buf_put(buf); free_mmap: drm_gem_free_mmap_offset(&bo->gem); release: drm_gem_object_release(&bo->gem); free: kfree(bo); return ERR_PTR(err); }