void i915_vma_close(struct i915_vma *vma) { GEM_BUG_ON(i915_vma_is_closed(vma)); vma->flags |= I915_VMA_CLOSED; list_del(&vma->obj_link); rb_erase(&vma->obj_node, &vma->obj->vma_tree); if (!i915_vma_is_active(vma) && !i915_vma_is_pinned(vma)) WARN_ON(i915_vma_unbind(vma)); }
static void i915_vma_retire(struct i915_gem_active *active, struct i915_request *rq) { const unsigned int idx = rq->engine->id; struct i915_vma *vma = container_of(active, struct i915_vma, last_read[idx]); struct drm_i915_gem_object *obj = vma->obj; GEM_BUG_ON(!i915_vma_has_active_engine(vma, idx)); i915_vma_clear_active(vma, idx); if (i915_vma_is_active(vma)) return; GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); list_move_tail(&vma->vm_link, &vma->vm->inactive_list); if (unlikely(i915_vma_is_closed(vma) && !i915_vma_is_pinned(vma))) WARN_ON(i915_vma_unbind(vma)); GEM_BUG_ON(!i915_gem_object_is_active(obj)); if (--obj->active_count) return; /* Prune the shared fence arrays iff completely idle (inc. external) */ if (reservation_object_trylock(obj->resv)) { if (reservation_object_test_signaled_rcu(obj->resv, true)) reservation_object_add_excl_fence(obj->resv, NULL); reservation_object_unlock(obj->resv); } /* Bump our place on the bound list to keep it roughly in LRU order * so that we don't steal from recently used but inactive objects * (unless we are forced to ofc!) */ spin_lock(&rq->i915->mm.obj_lock); if (obj->bind_count) list_move_tail(&obj->mm.link, &rq->i915->mm.bound_list); spin_unlock(&rq->i915->mm.obj_lock); obj->mm.dirty = true; /* be paranoid */ if (i915_gem_object_has_active_reference(obj)) { i915_gem_object_clear_active_reference(obj); i915_gem_object_put(obj); } }
static void i915_vma_retire(struct i915_gem_active *active, struct drm_i915_gem_request *rq) { const unsigned int idx = rq->engine->id; struct i915_vma *vma = container_of(active, struct i915_vma, last_read[idx]); struct drm_i915_gem_object *obj = vma->obj; GEM_BUG_ON(!i915_vma_has_active_engine(vma, idx)); i915_vma_clear_active(vma, idx); if (i915_vma_is_active(vma)) return; list_move_tail(&vma->vm_link, &vma->vm->inactive_list); if (unlikely(i915_vma_is_closed(vma) && !i915_vma_is_pinned(vma))) WARN_ON(i915_vma_unbind(vma)); GEM_BUG_ON(!i915_gem_object_is_active(obj)); if (--obj->active_count) return; /* Bump our place on the bound list to keep it roughly in LRU order * so that we don't steal from recently used but inactive objects * (unless we are forced to ofc!) */ if (obj->bind_count) list_move_tail(&obj->global_link, &rq->i915->mm.bound_list); obj->mm.dirty = true; /* be paranoid */ if (i915_gem_object_has_active_reference(obj)) { i915_gem_object_clear_active_reference(obj); i915_gem_object_put(obj); } }
/** * i915_gem_set_tiling - IOCTL handler to set tiling mode * @dev: DRM device * @data: data pointer for the ioctl * @file: DRM file for the ioctl call * * Sets the tiling mode of an object, returning the required swizzling of * bit 6 of addresses in the object. * * Called by the user via ioctl. * * Returns: * Zero on success, negative errno on failure. */ int i915_gem_set_tiling(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_gem_set_tiling *args = data; struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_object *obj; int ret = 0; obj = to_intel_bo(drm_gem_object_lookup(file, args->handle)); if (&obj->base == NULL) return -ENOENT; if (!i915_tiling_ok(dev, args->stride, obj->base.size, args->tiling_mode)) { drm_gem_object_unreference_unlocked(&obj->base); return -EINVAL; } intel_runtime_pm_get(dev_priv); mutex_lock(&dev->struct_mutex); if (obj->pin_display || obj->framebuffer_references) { ret = -EBUSY; goto err; } if (args->tiling_mode == I915_TILING_NONE) { args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; args->stride = 0; } else { if (args->tiling_mode == I915_TILING_X) args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; else args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y; /* Hide bit 17 swizzling from the user. This prevents old Mesa * from aborting the application on sw fallbacks to bit 17, * and we use the pread/pwrite bit17 paths to swizzle for it. * If there was a user that was relying on the swizzle * information for drm_intel_bo_map()ed reads/writes this would * break it, but we don't have any of those. */ if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17) args->swizzle_mode = I915_BIT_6_SWIZZLE_9; if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10; /* If we can't handle the swizzling, make it untiled. */ if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) { args->tiling_mode = I915_TILING_NONE; args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; args->stride = 0; } } if (args->tiling_mode != obj->tiling_mode || args->stride != obj->stride) { /* We need to rebind the object if its current allocation * no longer meets the alignment restrictions for its new * tiling mode. Otherwise we can just leave it alone, but * need to ensure that any fence register is updated before * the next fenced (either through the GTT or by the BLT unit * on older GPUs) access. * * After updating the tiling parameters, we then flag whether * we need to update an associated fence register. Note this * has to also include the unfenced register the GPU uses * whilst executing a fenced command for an untiled object. */ if (obj->map_and_fenceable && !i915_gem_object_fence_ok(obj, args->tiling_mode)) ret = i915_vma_unbind(i915_gem_obj_to_ggtt(obj)); if (ret == 0) { if (obj->pages && obj->madv == I915_MADV_WILLNEED && dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) { if (args->tiling_mode == I915_TILING_NONE) i915_gem_object_unpin_pages(obj); if (obj->tiling_mode == I915_TILING_NONE) i915_gem_object_pin_pages(obj); } obj->fence_dirty = obj->last_fenced_req || obj->fence_reg != I915_FENCE_REG_NONE; obj->tiling_mode = args->tiling_mode; obj->stride = args->stride; /* Force the fence to be reacquired for GTT access */ i915_gem_release_mmap(obj); } } /* we have to maintain this existing ABI... */ args->stride = obj->stride; args->tiling_mode = obj->tiling_mode; /* Try to preallocate memory required to save swizzling on put-pages */ if (i915_gem_object_needs_bit17_swizzle(obj)) { if (obj->bit_17 == NULL) { obj->bit_17 = kcalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT), sizeof(long), GFP_KERNEL); } } else {