/** * tinydrm_gem_cma_prime_import_sg_table - Produce a CMA GEM object from * another driver's scatter/gather table of pinned pages * @drm: DRM device to import into * @attach: DMA-BUF attachment * @sgt: Scatter/gather table of pinned pages * * This function imports a scatter/gather table exported via DMA-BUF by * another driver using drm_gem_cma_prime_import_sg_table(). It sets the * kernel virtual address on the CMA object. Drivers should use this as their * &drm_driver->gem_prime_import_sg_table callback if they need the virtual * address. tinydrm_gem_cma_free_object() should be used in combination with * this function. * * Returns: * A pointer to a newly created GEM object or an ERR_PTR-encoded negative * error code on failure. */ struct drm_gem_object * tinydrm_gem_cma_prime_import_sg_table(struct drm_device *drm, struct dma_buf_attachment *attach, struct sg_table *sgt) { struct drm_gem_cma_object *cma_obj; struct drm_gem_object *obj; void *vaddr; vaddr = dma_buf_vmap(attach->dmabuf); if (!vaddr) { DRM_ERROR("Failed to vmap PRIME buffer\n"); return ERR_PTR(-ENOMEM); } obj = drm_gem_cma_prime_import_sg_table(drm, attach, sgt); if (IS_ERR(obj)) { dma_buf_vunmap(attach->dmabuf, vaddr); return obj; } cma_obj = to_drm_gem_cma_obj(obj); cma_obj->vaddr = vaddr; return obj; }
/** * tinydrm_gem_cma_free_object - Free resources associated with a CMA GEM * object * @gem_obj: GEM object to free * * This function frees the backing memory of the CMA GEM object, cleans up the * GEM object state and frees the memory used to store the object itself using * drm_gem_cma_free_object(). It also handles PRIME buffers which has the kernel * virtual address set by tinydrm_gem_cma_prime_import_sg_table(). Drivers * can use this as their &drm_driver->gem_free_object callback. */ void tinydrm_gem_cma_free_object(struct drm_gem_object *gem_obj) { if (gem_obj->import_attach) { struct drm_gem_cma_object *cma_obj; cma_obj = to_drm_gem_cma_obj(gem_obj); dma_buf_vunmap(gem_obj->import_attach->dmabuf, cma_obj->vaddr); cma_obj->vaddr = NULL; } drm_gem_cma_free_object(gem_obj); }
/** * drm_gem_cma_prime_mmap - memory-map an exported CMA GEM object * @obj: GEM object * @vma: VMA for the area to be mapped * * This function maps a buffer imported via DRM PRIME into a userspace * process's address space. Drivers that use the CMA helpers should set this * as their DRM driver's ->gem_prime_mmap() callback. * * Returns: * 0 on success or a negative error code on failure. */ int drm_gem_cma_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) { struct drm_gem_cma_object *cma_obj; int ret; ret = drm_gem_mmap_obj(obj, obj->size, vma); if (ret < 0) return ret; cma_obj = to_drm_gem_cma_obj(obj); return drm_gem_cma_mmap_obj(cma_obj, vma); }
/** * drm_gem_cma_mmap - memory-map a CMA GEM object * @filp: file object * @vma: VMA for the area to be mapped * * This function implements an augmented version of the GEM DRM file mmap * operation for CMA objects: In addition to the usual GEM VMA setup it * immediately faults in the entire object instead of using on-demaind * faulting. Drivers which employ the CMA helpers should use this function * as their ->mmap() handler in the DRM device file's file_operations * structure. * * Returns: * 0 on success or a negative error code on failure. */ int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma) { struct drm_gem_cma_object *cma_obj; struct drm_gem_object *gem_obj; int ret; ret = drm_gem_mmap(filp, vma); if (ret) return ret; gem_obj = vma->vm_private_data; cma_obj = to_drm_gem_cma_obj(gem_obj); return drm_gem_cma_mmap_obj(cma_obj, vma); }
/* * drm_gem_cma_free_object - (struct drm_driver)->gem_free_object callback * function */ void drm_gem_cma_free_object(struct drm_gem_object *gem_obj) { struct drm_gem_cma_object *cma_obj; if (gem_obj->map_list.map) drm_gem_free_mmap_offset(gem_obj); drm_gem_object_release(gem_obj); cma_obj = to_drm_gem_cma_obj(gem_obj); drm_gem_cma_buf_destroy(gem_obj->dev, cma_obj); kfree(cma_obj); }
/** * drm_gem_cma_prime_mmap - memory-map an exported CMA GEM object * @obj: GEM object * @vma: VMA for the area to be mapped * * This function maps a buffer imported via DRM PRIME into a userspace * process's address space. Drivers that use the CMA helpers should set this * as their DRM driver's ->gem_prime_mmap() callback. * * Returns: * 0 on success or a negative error code on failure. */ int drm_gem_cma_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) { struct drm_gem_cma_object *cma_obj; struct drm_device *dev = obj->dev; int ret; mutex_lock(&dev->struct_mutex); ret = drm_gem_mmap_obj(obj, obj->size, vma); mutex_unlock(&dev->struct_mutex); if (ret < 0) return ret; cma_obj = to_drm_gem_cma_obj(obj); return drm_gem_cma_mmap_obj(cma_obj, vma); }
/** * drm_gem_cma_free_object - free resources associated with a CMA GEM object * @gem_obj: GEM object to free * * This function frees the backing memory of the CMA GEM object, cleans up the * GEM object state and frees the memory used to store the object itself. * Drivers using the CMA helpers should set this as their DRM driver's * ->gem_free_object() callback. */ void drm_gem_cma_free_object(struct drm_gem_object *gem_obj) { struct drm_gem_cma_object *cma_obj; cma_obj = to_drm_gem_cma_obj(gem_obj); if (cma_obj->vaddr) { dma_free_writecombine(gem_obj->dev->dev, cma_obj->base.size, cma_obj->vaddr, cma_obj->paddr); } else if (gem_obj->import_attach) { drm_prime_gem_destroy(gem_obj, cma_obj->sgt); } drm_gem_object_release(gem_obj); kfree(cma_obj); }
/* * drm_gem_cma_mmap - (struct file_operation)->mmap callback function */ int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma) { struct drm_gem_object *gem_obj; struct drm_gem_cma_object *cma_obj; int ret; ret = drm_gem_mmap(filp, vma); if (ret) return ret; gem_obj = vma->vm_private_data; cma_obj = to_drm_gem_cma_obj(gem_obj); ret = remap_pfn_range(vma, vma->vm_start, cma_obj->paddr >> PAGE_SHIFT, vma->vm_end - vma->vm_start, vma->vm_page_prot); if (ret) drm_gem_vm_close(vma); return ret; }
/** * drm_gem_cma_prime_get_sg_table - provide a scatter/gather table of pinned * pages for a CMA GEM object * @obj: GEM object * * This function exports a scatter/gather table suitable for PRIME usage by * calling the standard DMA mapping API. Drivers using the CMA helpers should * set this as their DRM driver's ->gem_prime_get_sg_table() callback. * * Returns: * A pointer to the scatter/gather table of pinned pages or NULL on failure. */ struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj) { struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj); struct sg_table *sgt; int ret; sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); if (!sgt) return NULL; ret = dma_get_sgtable(obj->dev->dev, sgt, cma_obj->vaddr, cma_obj->paddr, obj->size); if (ret < 0) goto out; return sgt; out: kfree(sgt); return NULL; }
/* this code was heavily inspired by _ump_ukk_msync() in * drivers/amlogic/gpu/ump/common/ump_kernel_api.c */ int meson_ioctl_msync(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_gem_object *gem_obj; struct drm_gem_cma_object *cma_obj; struct drm_meson_msync *args = data; struct meson_drm_session_data *session_data = file->driver_priv; void *virtual = NULL; u32 size = 0; u32 offset = 0; if (!args || !session_data) return -EINVAL; gem_obj = drm_gem_object_lookup(dev, file, args->handle); if (NULL == gem_obj) { DBG_MSG(1, ("meson_ioctl_msync(): %02u Failed to look up mapping\n", args->handle)); return -EFAULT; } cma_obj = to_drm_gem_cma_obj(gem_obj); if (NULL == cma_obj) { DBG_MSG(1, ("meson_ioctl_msync(): %02u Failed to get gem_cma_obj containing gem_obj\n", args->handle)); return -EFAULT; } /* Returns the cache settings back to Userspace */ args->is_cached = dma_get_attr(DMA_ATTR_NON_CONSISTENT, &cma_obj->dma_attrs); DBG_MSG(3, ("meson_ioctl_msync(): %02u cache_enabled %d\n op %d address 0x%08x mapping 0x%08x\n", args->handle, args->is_cached, args->op, args->address, args->mapping)); /* Nothing to do in these cases */ if ((DRM_MESON_MSYNC_READOUT_CACHE_ENABLED == args->op) || (!args->is_cached)) return 0; if (args->address) { virtual = (void *)((u32)args->address); offset = (u32)((args->address) - (args->mapping)); } else {
/** * drm_gem_cma_prime_vmap - map a CMA GEM object into the kernel's virtual * address space * @obj: GEM object * * This function maps a buffer exported via DRM PRIME into the kernel's * virtual address space. Since the CMA buffers are already mapped into the * kernel virtual address space this simply returns the cached virtual * address. Drivers using the CMA helpers should set this as their DRM * driver's ->gem_prime_vmap() callback. * * Returns: * The kernel virtual address of the CMA GEM object's backing store. */ void *drm_gem_cma_prime_vmap(struct drm_gem_object *obj) { struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj); return cma_obj->vaddr; }