int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) { struct drm_file *priv = filp->private_data; struct drm_device *dev = priv->minor->dev; struct drm_gem_mm *mm = dev->mm_private; struct drm_local_map *map = NULL; struct drm_gem_object *obj; struct drm_hash_item *hash; int ret = 0; if (drm_device_is_unplugged(dev)) return -ENODEV; mutex_lock(&dev->struct_mutex); if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) { mutex_unlock(&dev->struct_mutex); return drm_mmap(filp, vma); } map = drm_hash_entry(hash, struct drm_map_list, hash)->map; if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) { ret = -EPERM; goto out_unlock; } /* */ if (map->size < vma->vm_end - vma->vm_start) { ret = -EINVAL; goto out_unlock; } obj = map->handle; if (!obj->dev->driver->gem_vm_ops) { ret = -EINVAL; goto out_unlock; } vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND; vma->vm_ops = obj->dev->driver->gem_vm_ops; vma->vm_private_data = map->handle; vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); /* */ drm_gem_object_reference(obj); drm_vm_open_locked(vma); out_unlock: mutex_unlock(&dev->struct_mutex); return ret; }
/** * drm_gem_mmap - memory map routine for GEM objects * @filp: DRM file pointer * @vma: VMA for the area to be mapped * * If a driver supports GEM object mapping, mmap calls on the DRM file * descriptor will end up here. * * If we find the object based on the offset passed in (vma->vm_pgoff will * contain the fake offset we created when the GTT map ioctl was called on * the object), we set up the driver fault handler so that any accesses * to the object can be trapped, to perform migration, GTT binding, surface * register allocation, or performance monitoring. */ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) { struct drm_file *priv = filp->private_data; struct drm_device *dev = priv->minor->dev; struct drm_gem_mm *mm = dev->mm_private; struct drm_local_map *map = NULL; struct drm_gem_object *obj; struct drm_hash_item *hash; int ret = 0; if (drm_device_is_unplugged(dev)) return -ENODEV; mutex_lock(&dev->struct_mutex); if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) { mutex_unlock(&dev->struct_mutex); return drm_mmap(filp, vma); } map = drm_hash_entry(hash, struct drm_map_list, hash)->map; if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) { ret = -EPERM; goto out_unlock; } /* Check for valid size. */ if (map->size < vma->vm_end - vma->vm_start) { ret = -EINVAL; goto out_unlock; } obj = map->handle; if (!obj->dev->driver->gem_vm_ops) { ret = -EINVAL; goto out_unlock; } vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; vma->vm_ops = obj->dev->driver->gem_vm_ops; vma->vm_private_data = map->handle; vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); /* Take a ref for this mapping of the object, so that the fault * handler can dereference the mmap offset's pointer to the object. * This reference is cleaned up by the corresponding vm_close * (which should happen whether the vma was created by this call, or * by a vm_open due to mremap or partial unmap or whatever). */ drm_gem_object_reference(obj); drm_vm_open_locked(dev, vma); out_unlock: mutex_unlock(&dev->struct_mutex); return ret; }
/** * Find the file with the given magic number. * * \param dev DRM device. * \param magic magic number. * * Searches in drm_device::magiclist within all files with the same hash key * the one with matching magic number, while holding the drm_device::struct_mutex * lock. */ static struct drm_file *drm_find_file(struct drm_device * dev, drm_magic_t magic) { struct drm_file *retval = NULL; struct drm_magic_entry *pt; struct drm_hash_item *hash; mutex_lock(&dev->struct_mutex); if (!drm_ht_find_item(&dev->magiclist, (unsigned long)magic, &hash)) { pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item); retval = pt->priv; }
/** * Find the file with the given magic number. * * \param dev DRM device. * \param magic magic number. * * Searches in drm_device::magiclist within all files with the same hash key * the one with matching magic number, while holding the drm_device::struct_mutex * lock. */ static struct drm_file *drm_find_file(struct drm_master *master, drm_magic_t magic) { struct drm_file *retval = NULL; struct drm_magic_entry *pt; struct drm_hash_item *hash; struct drm_device *dev = master->minor->dev; DRM_LOCK(dev); if (!drm_ht_find_item(&master->magiclist, (unsigned long)magic, &hash)) { pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item); retval = pt->priv; }
struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile, uint32_t key) { struct ttm_object_device *tdev = tfile->tdev; struct ttm_base_object *base; struct drm_hash_item *hash; int ret; read_lock(&tdev->object_lock); ret = drm_ht_find_item(&tdev->object_hash, key, &hash); if (likely(ret == 0)) { base = drm_hash_entry(hash, struct ttm_base_object, hash); kref_get(&base->refcount); }
struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile, uint32_t key) { struct ttm_object_device *tdev = tfile->tdev; struct ttm_base_object *base; struct drm_hash_item *hash; int ret; mtx_enter(&tdev->object_lock); ret = drm_ht_find_item(&tdev->object_hash, key, &hash); if (likely(ret == 0)) { base = drm_hash_entry(hash, struct ttm_base_object, hash); ret = kref_get_unless_zero(&base->refcount) ? 0 : -EINVAL; }
static struct drm_gem_object * drm_gem_object_from_offset(struct drm_device *dev, vm_ooffset_t offset) { struct drm_gem_object *obj; struct drm_gem_mm *mm; struct drm_hash_item *map_list; if ((offset & DRM_GEM_MAPPING_MASK) != DRM_GEM_MAPPING_KEY) return (NULL); offset &= ~DRM_GEM_MAPPING_KEY; mm = dev->mm_private; if (drm_ht_find_item(&mm->offset_hash, DRM_GEM_MAPPING_IDX(offset), &map_list) != 0) { DRM_DEBUG("drm_gem_object_from_offset: offset 0x%jx obj not found\n", (uintmax_t)offset); return (NULL); } obj = __containerof(map_list, struct drm_gem_object, map_list); return (obj); }
/** * Removes the given magic number from the hash table of used magic number * lists. */ static int drm_remove_magic(struct drm_device *dev, drm_magic_t magic) { drm_magic_entry_t *pt; struct drm_hash_item *hash; DRM_DEBUG("%d\n", magic); mutex_lock(&dev->struct_mutex); if (drm_ht_find_item(&dev->magiclist, (unsigned long)magic, &hash)) { mutex_unlock(&dev->struct_mutex); return -EINVAL; } pt = drm_hash_entry(hash, drm_magic_entry_t, hash_item); drm_ht_remove_item(&dev->magiclist, hash); list_del(&pt->head); mutex_unlock(&dev->struct_mutex); kfree(pt); return 0; }