/* the dumb interface doesn't work with the GEM straight MMAP interface, it expects to do MMAP on the drm fd, like normal */ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev, uint32_t handle, uint64_t *offset) { struct udl_gem_object *gobj; struct drm_gem_object *obj; int ret = 0; mutex_lock(&dev->struct_mutex); obj = drm_gem_object_lookup(file, handle); if (obj == NULL) { ret = -ENOENT; goto unlock; } gobj = to_udl_bo(obj); ret = udl_gem_get_pages(gobj); if (ret) goto out; ret = drm_gem_create_mmap_offset(obj); if (ret) goto out; *offset = drm_vma_node_offset_addr(&gobj->base.vma_node); out: drm_gem_object_unreference(&gobj->base); unlock: mutex_unlock(&dev->struct_mutex); return ret; }
int udl_gem_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct udl_gem_object *obj = to_udl_bo(vma->vm_private_data); struct page *page; unsigned int page_offset; int ret = 0; page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT; if (!obj->pages) return VM_FAULT_SIGBUS; page = obj->pages[page_offset]; ret = vm_insert_page(vma, vmf->address, page); switch (ret) { case -EAGAIN: case 0: case -ERESTARTSYS: return VM_FAULT_NOPAGE; case -ENOMEM: return VM_FAULT_OOM; default: return VM_FAULT_SIGBUS; } }
/* the dumb interface doesn't work with the GEM straight MMAP interface, it expects to do MMAP on the drm fd, like normal */ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev, uint32_t handle, uint64_t *offset) { struct udl_gem_object *gobj; struct drm_gem_object *obj; int ret = 0; mutex_lock(&dev->struct_mutex); obj = drm_gem_object_lookup(dev, file, handle); if (obj == NULL) { ret = -ENOENT; goto unlock; } gobj = to_udl_bo(obj); ret = udl_gem_get_pages(gobj, GFP_KERNEL); if (ret) goto out; if (!gobj->base.map_list.map) { ret = drm_gem_create_mmap_offset(obj); if (ret) goto out; } *offset = (u64)gobj->base.map_list.hash.key << PAGE_SHIFT; out: drm_gem_object_unreference(&gobj->base); unlock: mutex_unlock(&dev->struct_mutex); return ret; }
struct drm_framebuffer * udl_fb_user_fb_create(struct drm_device *dev, struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd) { struct drm_gem_object *obj; struct udl_framebuffer *ufb; int ret; uint32_t size; obj = drm_gem_object_lookup(file, mode_cmd->handles[0]); if (obj == NULL) return ERR_PTR(-ENOENT); size = mode_cmd->pitches[0] * mode_cmd->height; size = ALIGN(size, PAGE_SIZE); if (size > obj->size) { DRM_ERROR("object size not sufficient for fb %d %zu %d %d\n", size, obj->size, mode_cmd->pitches[0], mode_cmd->height); return ERR_PTR(-ENOMEM); } ufb = kzalloc(sizeof(*ufb), GFP_KERNEL); if (ufb == NULL) return ERR_PTR(-ENOMEM); ret = udl_framebuffer_init(dev, ufb, mode_cmd, to_udl_bo(obj)); if (ret) { kfree(ufb); return ERR_PTR(-EINVAL); } return &ufb->base; }
int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) { int ret; ret = drm_gem_mmap(filp, vma); if (ret) return ret; vma->vm_flags &= ~VM_PFNMAP; vma->vm_flags |= VM_MIXEDMAP; update_vm_cache_attr(to_udl_bo(vma->vm_private_data), vma); return ret; }
void udl_gem_free_object(struct drm_gem_object *gem_obj) { struct udl_gem_object *obj = to_udl_bo(gem_obj); if (obj->vmapping) udl_gem_vunmap(obj); if (gem_obj->import_attach) drm_prime_gem_destroy(gem_obj, obj->sg); if (obj->pages) udl_gem_put_pages(obj); drm_gem_free_mmap_offset(gem_obj); }
static struct sg_table *udl_map_dma_buf(struct dma_buf_attachment *attach, enum dma_data_direction dir) { struct udl_drm_dmabuf_attachment *udl_attach = attach->priv; struct udl_gem_object *obj = to_udl_bo(attach->dmabuf->priv); struct drm_device *dev = obj->base.dev; struct scatterlist *rd, *wr; struct sg_table *sgt = NULL; unsigned int i; int page_count; int nents, ret; DRM_DEBUG_PRIME("[DEV:%s] size:%zd dir=%d\n", dev_name(attach->dev), attach->dmabuf->size, dir); /* just return current sgt if already requested. */ if (udl_attach->dir == dir && udl_attach->is_mapped) return &udl_attach->sgt; if (!obj->pages) { ret = udl_gem_get_pages(obj); if (ret) { DRM_ERROR("failed to map pages.\n"); return ERR_PTR(ret); } } page_count = obj->base.size / PAGE_SIZE; obj->sg = drm_prime_pages_to_sg(obj->pages, page_count); if (IS_ERR(obj->sg)) { DRM_ERROR("failed to allocate sgt.\n"); return ERR_CAST(obj->sg); } sgt = &udl_attach->sgt; ret = sg_alloc_table(sgt, obj->sg->orig_nents, GFP_KERNEL); if (ret) { DRM_ERROR("failed to alloc sgt.\n"); return ERR_PTR(-ENOMEM); } mutex_lock(&dev->struct_mutex); rd = obj->sg->sgl; wr = sgt->sgl; for (i = 0; i < sgt->orig_nents; ++i) { sg_set_page(wr, sg_page(rd), rd->length, rd->offset); rd = sg_next(rd); wr = sg_next(wr); } if (dir != DMA_NONE) { nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir); if (!nents) { DRM_ERROR("failed to map sgl with iommu.\n"); sg_free_table(sgt); sgt = ERR_PTR(-EIO); goto err_unlock; } } udl_attach->is_mapped = true; udl_attach->dir = dir; attach->priv = udl_attach; err_unlock: mutex_unlock(&dev->struct_mutex); return sgt; }