/** * Sets the tiling mode of an object, returning the required swizzling of * bit 6 of addresses in the object. */ int i915_gem_set_tiling(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_i915_gem_set_tiling *args = data; drm_i915_private_t *dev_priv = dev->dev_private; struct drm_gem_object *obj; struct drm_i915_gem_object *obj_priv; obj = drm_gem_object_lookup(dev, file_priv, args->handle); if (obj == NULL) return -EINVAL; obj_priv = obj->driver_private; if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) { drm_gem_object_unreference(obj); return -EINVAL; } mutex_lock(&dev->struct_mutex); if (args->tiling_mode == I915_TILING_NONE) { obj_priv->tiling_mode = I915_TILING_NONE; args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; } else { if (args->tiling_mode == I915_TILING_X) args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; else args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y; /* If we can't handle the swizzling, make it untiled. */ if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) { args->tiling_mode = I915_TILING_NONE; args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; } } if (args->tiling_mode != obj_priv->tiling_mode) { int ret; /* Unbind the object, as switching tiling means we're * switching the cache organization due to fencing, probably. */ ret = i915_gem_object_unbind(obj); if (ret != 0) { WARN(ret != -ERESTARTSYS, "failed to unbind object for tiling switch"); args->tiling_mode = obj_priv->tiling_mode; mutex_unlock(&dev->struct_mutex); drm_gem_object_unreference(obj); return ret; } obj_priv->tiling_mode = args->tiling_mode; } obj_priv->stride = args->stride; drm_gem_object_unreference(obj); mutex_unlock(&dev->struct_mutex); return 0; }
/** * Sets the tiling mode of an object, returning the required swizzling of * bit 6 of addresses in the object. */ int i915_gem_set_tiling(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_i915_gem_set_tiling *args = data; drm_i915_private_t *dev_priv = dev->dev_private; struct drm_gem_object *obj; struct drm_i915_gem_object *obj_priv; int ret = 0; obj = drm_gem_object_lookup(dev, file_priv, args->handle); if (obj == NULL) return -EINVAL; obj_priv = obj->driver_private; if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) { mutex_lock(&dev->struct_mutex); drm_gem_object_unreference(obj); mutex_unlock(&dev->struct_mutex); return -EINVAL; } if (args->tiling_mode == I915_TILING_NONE) { args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; args->stride = 0; } else { if (args->tiling_mode == I915_TILING_X) args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; else args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y; /* Hide bit 17 swizzling from the user. This prevents old Mesa * from aborting the application on sw fallbacks to bit 17, * and we use the pread/pwrite bit17 paths to swizzle for it. * If there was a user that was relying on the swizzle * information for drm_intel_bo_map()ed reads/writes this would * break it, but we don't have any of those. */ if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17) args->swizzle_mode = I915_BIT_6_SWIZZLE_9; if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10; /* If we can't handle the swizzling, make it untiled. */ if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) { args->tiling_mode = I915_TILING_NONE; args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; args->stride = 0; } } mutex_lock(&dev->struct_mutex); if (args->tiling_mode != obj_priv->tiling_mode || args->stride != obj_priv->stride) { /* We need to rebind the object if its current allocation * no longer meets the alignment restrictions for its new * tiling mode. Otherwise we can just leave it alone, but * need to ensure that any fence register is cleared. */ if (!i915_gem_object_fence_offset_ok(obj, args->tiling_mode)) ret = i915_gem_object_unbind(obj); else ret = i915_gem_object_put_fence_reg(obj); if (ret != 0) { WARN(ret != -ERESTARTSYS, "failed to reset object for tiling switch"); args->tiling_mode = obj_priv->tiling_mode; args->stride = obj_priv->stride; goto err; } /* If we've changed tiling, GTT-mappings of the object * need to re-fault to ensure that the correct fence register * setup is in place. */ i915_gem_release_mmap(obj); obj_priv->tiling_mode = args->tiling_mode; obj_priv->stride = args->stride; } err: drm_gem_object_unreference(obj); mutex_unlock(&dev->struct_mutex); return ret; }
/** * Sets the tiling mode of an object, returning the required swizzling of * bit 6 of addresses in the object. */ int i915_gem_set_tiling(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_gem_set_tiling *args = data; drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj; int ret; ret = i915_gem_check_is_wedged(dev); if (ret) return ret; obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); if (obj == NULL) return -ENOENT; if (!i915_tiling_ok(dev, args->stride, obj->base.size, args->tiling_mode)) { drm_gem_object_unreference_unlocked(&obj->base); return -EINVAL; } if (obj->pin_count) { drm_gem_object_unreference_unlocked(&obj->base); return -EBUSY; } if (args->tiling_mode == I915_TILING_NONE) { args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; args->stride = 0; } else { if (args->tiling_mode == I915_TILING_X) args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; else args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y; /* Hide bit 17 swizzling from the user. This prevents old Mesa * from aborting the application on sw fallbacks to bit 17, * and we use the pread/pwrite bit17 paths to swizzle for it. * If there was a user that was relying on the swizzle * information for drm_intel_bo_map()ed reads/writes this would * break it, but we don't have any of those. */ if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17) args->swizzle_mode = I915_BIT_6_SWIZZLE_9; if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10; /* If we can't handle the swizzling, make it untiled. */ if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) { args->tiling_mode = I915_TILING_NONE; args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; args->stride = 0; } } mutex_lock(&dev->struct_mutex); if (args->tiling_mode != obj->tiling_mode || args->stride != obj->stride) { /* We need to rebind the object if its current allocation * no longer meets the alignment restrictions for its new * tiling mode. Otherwise we can just leave it alone, but * need to ensure that any fence register is cleared. */ i915_gem_release_mmap(obj); obj->map_and_fenceable = obj->gtt_space == NULL || (obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end && i915_gem_object_fence_ok(obj, args->tiling_mode)); obj->tiling_changed = true; obj->tiling_mode = args->tiling_mode; obj->stride = args->stride; } drm_gem_object_unreference(&obj->base); mutex_unlock(&dev->struct_mutex); return 0; }
/** * Sets the tiling mode of an object, returning the required swizzling of * bit 6 of addresses in the object. */ int i915_gem_set_tiling(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_gem_set_tiling *args = data; struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj; int ret = 0; obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); if (&obj->base == NULL) return -ENOENT; if (!i915_tiling_ok(dev, args->stride, obj->base.size, args->tiling_mode)) { drm_gem_object_unreference_unlocked(&obj->base); return -EINVAL; } if (i915_gem_obj_is_pinned(obj) || obj->framebuffer_references) { drm_gem_object_unreference_unlocked(&obj->base); return -EBUSY; } if (args->tiling_mode == I915_TILING_NONE) { args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; args->stride = 0; } else { if (args->tiling_mode == I915_TILING_X) args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; else args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y; /* Hide bit 17 swizzling from the user. This prevents old Mesa * from aborting the application on sw fallbacks to bit 17, * and we use the pread/pwrite bit17 paths to swizzle for it. * If there was a user that was relying on the swizzle * information for drm_intel_bo_map()ed reads/writes this would * break it, but we don't have any of those. */ if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17) args->swizzle_mode = I915_BIT_6_SWIZZLE_9; if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10; /* If we can't handle the swizzling, make it untiled. */ if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) { args->tiling_mode = I915_TILING_NONE; args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; args->stride = 0; } } mutex_lock(&dev->struct_mutex); if (args->tiling_mode != obj->tiling_mode || args->stride != obj->stride) { /* We need to rebind the object if its current allocation * no longer meets the alignment restrictions for its new * tiling mode. Otherwise we can just leave it alone, but * need to ensure that any fence register is updated before * the next fenced (either through the GTT or by the BLT unit * on older GPUs) access. * * After updating the tiling parameters, we then flag whether * we need to update an associated fence register. Note this * has to also include the unfenced register the GPU uses * whilst executing a fenced command for an untiled object. */ obj->map_and_fenceable = !i915_gem_obj_ggtt_bound(obj) || (i915_gem_obj_ggtt_offset(obj) + obj->base.size <= dev_priv->gtt.mappable_end && i915_gem_object_fence_ok(obj, args->tiling_mode)); /* Rebind if we need a change of alignment */ if (!obj->map_and_fenceable) { u32 unfenced_align = i915_gem_get_gtt_alignment(dev, obj->base.size, args->tiling_mode, false); if (i915_gem_obj_ggtt_offset(obj) & (unfenced_align - 1)) ret = i915_gem_object_ggtt_unbind(obj); } if (ret == 0) { obj->fence_dirty = obj->last_fenced_seqno || obj->fence_reg != I915_FENCE_REG_NONE; obj->tiling_mode = args->tiling_mode; obj->stride = args->stride; /* Force the fence to be reacquired for GTT access */ i915_gem_release_mmap(obj); } } /* we have to maintain this existing ABI... */ args->stride = obj->stride; args->tiling_mode = obj->tiling_mode; /* Try to preallocate memory required to save swizzling on put-pages */ if (i915_gem_object_needs_bit17_swizzle(obj)) { if (obj->bit_17 == NULL) { obj->bit_17 = kcalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT), sizeof(long), GFP_KERNEL); } } else {
/*ARGSUSED*/ int i915_gem_set_tiling(DRM_IOCTL_ARGS) { DRM_DEVICE; struct drm_i915_gem_set_tiling args; drm_i915_private_t *dev_priv = dev->dev_private; struct drm_gem_object *obj; struct drm_i915_gem_object *obj_priv; int ret; if (dev->driver->use_gem != 1) return ENODEV; DRM_COPYFROM_WITH_RETURN(&args, (struct drm_i915_gem_set_tiling __user *) data, sizeof(args)); obj = drm_gem_object_lookup(fpriv, args.handle); if (obj == NULL) return EINVAL; obj_priv = obj->driver_private; if (!i915_tiling_ok(dev, args.stride, obj->size, args.tiling_mode)) { drm_gem_object_unreference(obj); DRM_DEBUG("i915 tiling is not OK"); return EINVAL; } spin_lock(&dev->struct_mutex); if (args.tiling_mode == I915_TILING_NONE) { args.swizzle_mode = I915_BIT_6_SWIZZLE_NONE; } else { if (args.tiling_mode == I915_TILING_X) args.swizzle_mode = dev_priv->mm.bit_6_swizzle_x; else args.swizzle_mode = dev_priv->mm.bit_6_swizzle_y; /* If we can't handle the swizzling, make it untiled. */ if (args.swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) { args.tiling_mode = I915_TILING_NONE; args.swizzle_mode = I915_BIT_6_SWIZZLE_NONE; } } if (args.tiling_mode != obj_priv->tiling_mode) { int ret; /* Unbind the object, as switching tiling means we're * switching the cache organization due to fencing, probably. */ ret = i915_gem_object_unbind(obj, 1); if (ret != 0) { args.tiling_mode = obj_priv->tiling_mode; spin_unlock(&dev->struct_mutex); drm_gem_object_unreference(obj); DRM_ERROR("tiling switch!! unbind error %d", ret); return ret; } obj_priv->tiling_mode = args.tiling_mode; } obj_priv->stride = args.stride; ret = DRM_COPY_TO_USER((struct drm_i915_gem_set_tiling __user *) data, &args, sizeof(args)); if ( ret != 0) DRM_ERROR(" gem set tiling error! %d", ret); drm_gem_object_unreference(obj); spin_unlock(&dev->struct_mutex); return 0; }
/** * Sets the tiling mode of an object, returning the required swizzling of * bit 6 of addresses in the object. */ int i915_gem_set_tiling(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_gem_set_tiling *args = data; drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj; int ret = 0; obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); if (&obj->base == NULL) return -ENOENT; if (!i915_tiling_ok(dev, args->stride, obj->base.size, args->tiling_mode)) { drm_gem_object_unreference_unlocked(&obj->base); return -EINVAL; } if (obj->pin_count) { drm_gem_object_unreference_unlocked(&obj->base); return -EBUSY; } if (args->tiling_mode == I915_TILING_NONE) { args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; args->stride = 0; } else { if (args->tiling_mode == I915_TILING_X) args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; else args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y; /* Hide bit 17 swizzling from the user. This prevents old Mesa * from aborting the application on sw fallbacks to bit 17, * and we use the pread/pwrite bit17 paths to swizzle for it. * If there was a user that was relying on the swizzle * information for drm_intel_bo_map()ed reads/writes this would * break it, but we don't have any of those. */ if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17) args->swizzle_mode = I915_BIT_6_SWIZZLE_9; if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10; /* If we can't handle the swizzling, make it untiled. */ if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) { args->tiling_mode = I915_TILING_NONE; args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; args->stride = 0; } } DRM_LOCK(dev); if (args->tiling_mode != obj->tiling_mode || args->stride != obj->stride) { /* We need to rebind the object if its current allocation * no longer meets the alignment restrictions for its new * tiling mode. Otherwise we can just leave it alone, but * need to ensure that any fence register is cleared. * * After updating the tiling parameters, we then flag whether * we need to update an associated fence register. Note this * has to also include the unfenced register the GPU uses * whilst executing a fenced command for an untiled object. */ i915_gem_release_mmap(obj); obj->map_and_fenceable = obj->gtt_space == NULL || (obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end && i915_gem_object_fence_ok(obj, args->tiling_mode)); /* Rebind if we need a change of alignment */ if (!obj->map_and_fenceable) { u32 unfenced_alignment = i915_gem_get_unfenced_gtt_alignment(dev, obj->base.size, args->tiling_mode); if (obj->gtt_offset & (unfenced_alignment - 1)) ret = i915_gem_object_unbind(obj); } if (ret == 0) { obj->fence_dirty = obj->fenced_gpu_access || obj->fence_reg != I915_FENCE_REG_NONE; obj->tiling_mode = args->tiling_mode; obj->stride = args->stride; } } /* we have to maintain this existing ABI... */ args->stride = obj->stride; args->tiling_mode = obj->tiling_mode; drm_gem_object_unreference(&obj->base); DRM_UNLOCK(dev); return ret; }
/** * Sets the tiling mode of an object, returning the required swizzling of * bit 6 of addresses in the object. */ int i915_gem_set_tiling(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_i915_gem_set_tiling *args = data; drm_i915_private_t *dev_priv = dev->dev_private; struct drm_gem_object *obj; struct drm_i915_gem_object *obj_priv; obj = drm_gem_object_lookup(dev, file_priv, args->handle); if (obj == NULL) return -EINVAL; obj_priv = obj->driver_private; if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) { drm_gem_object_unreference(obj); return -EINVAL; } mutex_lock(&dev->struct_mutex); if (args->tiling_mode == I915_TILING_NONE) { args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; } else { if (args->tiling_mode == I915_TILING_X) args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; else args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y; /* Hide bit 17 swizzling from the user. This prevents old Mesa * from aborting the application on sw fallbacks to bit 17, * and we use the pread/pwrite bit17 paths to swizzle for it. * If there was a user that was relying on the swizzle * information for drm_intel_bo_map()ed reads/writes this would * break it, but we don't have any of those. */ if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17) args->swizzle_mode = I915_BIT_6_SWIZZLE_9; if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10; /* If we can't handle the swizzling, make it untiled. */ if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) { args->tiling_mode = I915_TILING_NONE; args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; } } if (args->tiling_mode != obj_priv->tiling_mode) { int ret; /* Unbind the object, as switching tiling means we're * switching the cache organization due to fencing, probably. */ ret = i915_gem_object_unbind(obj); if (ret != 0) { WARN(ret != -ERESTARTSYS, "failed to unbind object for tiling switch"); args->tiling_mode = obj_priv->tiling_mode; mutex_unlock(&dev->struct_mutex); drm_gem_object_unreference(obj); return ret; } obj_priv->tiling_mode = args->tiling_mode; } obj_priv->stride = args->stride; drm_gem_object_unreference(obj); mutex_unlock(&dev->struct_mutex); return 0; }