/**
 * Sets the tiling mode of an object, returning the required swizzling of
 * bit 6 of addresses in the object.
 */
int
i915_gem_set_tiling(struct drm_device *dev, void *data,
		   struct drm_file *file_priv)
{
	struct drm_i915_gem_set_tiling *args = data;
	drm_i915_private_t *dev_priv = dev->dev_private;
	struct drm_gem_object *obj;
	struct drm_i915_gem_object *obj_priv;

	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
	if (obj == NULL)
		return -EINVAL;
	obj_priv = obj->driver_private;

	if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) {
		drm_gem_object_unreference(obj);
		return -EINVAL;
	}

	mutex_lock(&dev->struct_mutex);

	if (args->tiling_mode == I915_TILING_NONE) {
		obj_priv->tiling_mode = I915_TILING_NONE;
		args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
	} else {
		if (args->tiling_mode == I915_TILING_X)
			args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
		else
			args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
		/* If we can't handle the swizzling, make it untiled. */
		if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) {
			args->tiling_mode = I915_TILING_NONE;
			args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
		}
	}
	if (args->tiling_mode != obj_priv->tiling_mode) {
		int ret;

		/* Unbind the object, as switching tiling means we're
		 * switching the cache organization due to fencing, probably.
		 */
		ret = i915_gem_object_unbind(obj);
		if (ret != 0) {
			WARN(ret != -ERESTARTSYS,
			     "failed to unbind object for tiling switch");
			args->tiling_mode = obj_priv->tiling_mode;
			mutex_unlock(&dev->struct_mutex);
			drm_gem_object_unreference(obj);

			return ret;
		}
		obj_priv->tiling_mode = args->tiling_mode;
	}
	obj_priv->stride = args->stride;

	drm_gem_object_unreference(obj);
	mutex_unlock(&dev->struct_mutex);

	return 0;
}
Exemple #2
0
static void cancel_userptr(struct work_struct *work)
{
	struct i915_mmu_object *mo = container_of(work, typeof(*mo), work);
	struct drm_i915_gem_object *obj = mo->obj;
	struct work_struct *active;

	/* Cancel any active worker and force us to re-evaluate gup */
	mutex_lock(&obj->mm.lock);
	active = fetch_and_zero(&obj->userptr.work);
	mutex_unlock(&obj->mm.lock);
	if (active)
		goto out;

	i915_gem_object_wait(obj, I915_WAIT_ALL, MAX_SCHEDULE_TIMEOUT, NULL);

	mutex_lock(&obj->base.dev->struct_mutex);

	/* We are inside a kthread context and can't be interrupted */
	if (i915_gem_object_unbind(obj) == 0)
		__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
	WARN_ONCE(i915_gem_object_has_pages(obj),
		  "Failed to release pages: bind_count=%d, pages_pin_count=%d, pin_global=%d\n",
		  obj->bind_count,
		  atomic_read(&obj->mm.pages_pin_count),
		  obj->pin_global);

	mutex_unlock(&obj->base.dev->struct_mutex);

out:
	i915_gem_object_put(obj);
}
/**
 * Sets the tiling mode of an object, returning the required swizzling of
 * bit 6 of addresses in the object.
 */
int
i915_gem_set_tiling(struct drm_device *dev, void *data,
		   struct drm_file *file_priv)
{
	struct drm_i915_gem_set_tiling *args = data;
	drm_i915_private_t *dev_priv = dev->dev_private;
	struct drm_gem_object *obj;
	struct drm_i915_gem_object *obj_priv;
	int ret = 0;

	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
	if (obj == NULL)
		return -EINVAL;
	obj_priv = obj->driver_private;

	if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) {
		mutex_lock(&dev->struct_mutex);
		drm_gem_object_unreference(obj);
		mutex_unlock(&dev->struct_mutex);
		return -EINVAL;
	}

	if (args->tiling_mode == I915_TILING_NONE) {
		args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
		args->stride = 0;
	} else {
		if (args->tiling_mode == I915_TILING_X)
			args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
		else
			args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;

		/* Hide bit 17 swizzling from the user.  This prevents old Mesa
		 * from aborting the application on sw fallbacks to bit 17,
		 * and we use the pread/pwrite bit17 paths to swizzle for it.
		 * If there was a user that was relying on the swizzle
		 * information for drm_intel_bo_map()ed reads/writes this would
		 * break it, but we don't have any of those.
		 */
		if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
			args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
		if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
			args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;

		/* If we can't handle the swizzling, make it untiled. */
		if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) {
			args->tiling_mode = I915_TILING_NONE;
			args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
			args->stride = 0;
		}
	}

	mutex_lock(&dev->struct_mutex);
	if (args->tiling_mode != obj_priv->tiling_mode ||
	    args->stride != obj_priv->stride) {
		/* We need to rebind the object if its current allocation
		 * no longer meets the alignment restrictions for its new
		 * tiling mode. Otherwise we can just leave it alone, but
		 * need to ensure that any fence register is cleared.
		 */
		if (!i915_gem_object_fence_offset_ok(obj, args->tiling_mode))
		    ret = i915_gem_object_unbind(obj);
		else
		    ret = i915_gem_object_put_fence_reg(obj);
		if (ret != 0) {
			WARN(ret != -ERESTARTSYS,
			     "failed to reset object for tiling switch");
			args->tiling_mode = obj_priv->tiling_mode;
			args->stride = obj_priv->stride;
			goto err;
		}

		/* If we've changed tiling, GTT-mappings of the object
		 * need to re-fault to ensure that the correct fence register
		 * setup is in place.
		 */
		i915_gem_release_mmap(obj);

		obj_priv->tiling_mode = args->tiling_mode;
		obj_priv->stride = args->stride;
	}
err:
	drm_gem_object_unreference(obj);
	mutex_unlock(&dev->struct_mutex);

	return ret;
}
static bool unsafe_drop_pages(struct drm_i915_gem_object *obj)
{
	if (i915_gem_object_unbind(obj) == 0)
		__i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
	return !i915_gem_object_has_pages(obj);
}
/**
 * Sets the tiling mode of an object, returning the required swizzling of
 * bit 6 of addresses in the object.
 */
int
i915_gem_set_tiling(struct drm_device *dev, void *data,
		   struct drm_file *file)
{
	struct drm_i915_gem_set_tiling *args = data;
	drm_i915_private_t *dev_priv = dev->dev_private;
	struct drm_i915_gem_object *obj;
	int ret = 0;

	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
	if (&obj->base == NULL)
		return -ENOENT;

	if (!i915_tiling_ok(dev,
			    args->stride, obj->base.size, args->tiling_mode)) {
		drm_gem_object_unreference_unlocked(&obj->base);
		return -EINVAL;
	}

	if (obj->pin_count) {
		drm_gem_object_unreference_unlocked(&obj->base);
		return -EBUSY;
	}

	if (args->tiling_mode == I915_TILING_NONE) {
		args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
		args->stride = 0;
	} else {
		if (args->tiling_mode == I915_TILING_X)
			args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
		else
			args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;

		/* Hide bit 17 swizzling from the user.  This prevents old Mesa
		 * from aborting the application on sw fallbacks to bit 17,
		 * and we use the pread/pwrite bit17 paths to swizzle for it.
		 * If there was a user that was relying on the swizzle
		 * information for drm_intel_bo_map()ed reads/writes this would
		 * break it, but we don't have any of those.
		 */
		if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
			args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
		if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
			args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;

		/* If we can't handle the swizzling, make it untiled. */
		if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) {
			args->tiling_mode = I915_TILING_NONE;
			args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
			args->stride = 0;
		}
	}

	mutex_lock(&dev->struct_mutex);
	if (args->tiling_mode != obj->tiling_mode ||
	    args->stride != obj->stride) {
		/* We need to rebind the object if its current allocation
		 * no longer meets the alignment restrictions for its new
		 * tiling mode. Otherwise we can just leave it alone, but
		 * need to ensure that any fence register is cleared.
		 */
		i915_gem_release_mmap(obj);

		obj->map_and_fenceable =
			obj->gtt_space == NULL ||
			(obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end &&
			 i915_gem_object_fence_ok(obj, args->tiling_mode));

		/* Rebind if we need a change of alignment */
		if (!obj->map_and_fenceable) {
			u32 unfenced_alignment =
				i915_gem_get_unfenced_gtt_alignment(dev,
								    obj->base.size,
								    args->tiling_mode);
			if (obj->gtt_offset & (unfenced_alignment - 1))
				ret = i915_gem_object_unbind(obj);
		}

		if (ret == 0) {
			obj->tiling_changed = true;
			obj->tiling_mode = args->tiling_mode;
			obj->stride = args->stride;
		}
	}
	/* we have to maintain this existing ABI... */
	args->stride = obj->stride;
	args->tiling_mode = obj->tiling_mode;
	drm_gem_object_unreference(&obj->base);
	mutex_unlock(&dev->struct_mutex);

	return ret;
}
Exemple #6
0
/*ARGSUSED*/
int
i915_gem_set_tiling(DRM_IOCTL_ARGS)
{
	DRM_DEVICE;
	struct drm_i915_gem_set_tiling args;
	drm_i915_private_t *dev_priv = dev->dev_private;
	struct drm_gem_object *obj;
	struct drm_i915_gem_object *obj_priv;
	int ret;

	if (dev->driver->use_gem != 1)
		return ENODEV;

	DRM_COPYFROM_WITH_RETURN(&args,
            (struct drm_i915_gem_set_tiling __user *) data, sizeof(args));

	obj = drm_gem_object_lookup(fpriv, args.handle);
	if (obj == NULL)
		return EINVAL;
	obj_priv = obj->driver_private;

	if (!i915_tiling_ok(dev, args.stride, obj->size, args.tiling_mode)) {
		drm_gem_object_unreference(obj);
		DRM_DEBUG("i915 tiling is not OK");
		return EINVAL;
	}

	spin_lock(&dev->struct_mutex);

	if (args.tiling_mode == I915_TILING_NONE) {
		args.swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
	} else {
		if (args.tiling_mode == I915_TILING_X)
			args.swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
		else
			args.swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
		/* If we can't handle the swizzling, make it untiled. */
		if (args.swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) {
			args.tiling_mode = I915_TILING_NONE;
			args.swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
		}
	}

	if (args.tiling_mode != obj_priv->tiling_mode) {
		int ret;

		/* Unbind the object, as switching tiling means we're
		 * switching the cache organization due to fencing, probably.
		 */
		ret = i915_gem_object_unbind(obj, 1);
		if (ret != 0) {
			args.tiling_mode = obj_priv->tiling_mode;
			spin_unlock(&dev->struct_mutex);
			drm_gem_object_unreference(obj);
			DRM_ERROR("tiling switch!! unbind error %d", ret);
			return ret;
		}
		obj_priv->tiling_mode = args.tiling_mode;
	}
	obj_priv->stride = args.stride;

	ret = DRM_COPY_TO_USER((struct drm_i915_gem_set_tiling __user *) data, &args, sizeof(args));
        if ( ret != 0)
                DRM_ERROR(" gem set tiling error! %d", ret);

	drm_gem_object_unreference(obj);
	spin_unlock(&dev->struct_mutex);

	return 0;
}
/**
 * Sets the tiling mode of an object, returning the required swizzling of
 * bit 6 of addresses in the object.
 */
int
i915_gem_set_tiling(struct drm_device *dev, void *data,
		   struct drm_file *file)
{
	struct drm_i915_gem_set_tiling *args = data;
	drm_i915_private_t *dev_priv = dev->dev_private;
	struct drm_i915_gem_object *obj;
	int ret = 0;

	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
	if (&obj->base == NULL)
		return -ENOENT;

	if (!i915_tiling_ok(dev,
			    args->stride, obj->base.size, args->tiling_mode)) {
		drm_gem_object_unreference_unlocked(&obj->base);
		return -EINVAL;
	}

	if (obj->pin_count) {
		drm_gem_object_unreference_unlocked(&obj->base);
		return -EBUSY;
	}

	if (args->tiling_mode == I915_TILING_NONE) {
		args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
		args->stride = 0;
	} else {
		if (args->tiling_mode == I915_TILING_X)
			args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
		else
			args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;

		/* Hide bit 17 swizzling from the user.  This prevents old Mesa
		 * from aborting the application on sw fallbacks to bit 17,
		 * and we use the pread/pwrite bit17 paths to swizzle for it.
		 * If there was a user that was relying on the swizzle
		 * information for drm_intel_bo_map()ed reads/writes this would
		 * break it, but we don't have any of those.
		 */
		if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
			args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
		if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
			args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;

		/* If we can't handle the swizzling, make it untiled. */
		if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) {
			args->tiling_mode = I915_TILING_NONE;
			args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
			args->stride = 0;
		}
	}

	mutex_lock(&dev->struct_mutex);
	if (args->tiling_mode != obj->tiling_mode ||
	    args->stride != obj->stride) {
		/* We need to rebind the object if its current allocation
		 * no longer meets the alignment restrictions for its new
		 * tiling mode. Otherwise we can just leave it alone, but
		 * need to ensure that any fence register is updated before
		 * the next fenced (either through the GTT or by the BLT unit
		 * on older GPUs) access.
		 *
		 * After updating the tiling parameters, we then flag whether
		 * we need to update an associated fence register. Note this
		 * has to also include the unfenced register the GPU uses
		 * whilst executing a fenced command for an untiled object.
		 */

		obj->map_and_fenceable =
			obj->gtt_space == NULL ||
			(obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end &&
			 i915_gem_object_fence_ok(obj, args->tiling_mode));

		/* Rebind if we need a change of alignment */
		if (!obj->map_and_fenceable) {
			u32 unfenced_alignment =
				i915_gem_get_unfenced_gtt_alignment(dev,
								    obj->base.size,
								    args->tiling_mode);
			if (obj->gtt_offset & (unfenced_alignment - 1))
				ret = i915_gem_object_unbind(obj);
		}

		if (ret == 0) {
			obj->fence_dirty =
				obj->fenced_gpu_access ||
				obj->fence_reg != I915_FENCE_REG_NONE;

			obj->tiling_mode = args->tiling_mode;
			obj->stride = args->stride;

			/* Force the fence to be reacquired for GTT access */
			i915_gem_release_mmap(obj);
		}
	}
	/* we have to maintain this existing ABI... */
	args->stride = obj->stride;
	args->tiling_mode = obj->tiling_mode;

	/* Try to preallocate memory required to save swizzling on put-pages */
	if (i915_gem_object_needs_bit17_swizzle(obj)) {
		if (obj->bit_17 == NULL) {
			obj->bit_17 = kmalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT) *
					      sizeof(long), GFP_KERNEL);
		}
	} else {
static int
userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
				  const struct mmu_notifier_range *range)
{
	struct i915_mmu_notifier *mn =
		container_of(_mn, struct i915_mmu_notifier, mn);
	struct interval_tree_node *it;
	struct mutex *unlock = NULL;
	unsigned long end;
	int ret = 0;

	if (RB_EMPTY_ROOT(&mn->objects.rb_root))
		return 0;

	/* interval ranges are inclusive, but invalidate range is exclusive */
	end = range->end - 1;

	spin_lock(&mn->lock);
	it = interval_tree_iter_first(&mn->objects, range->start, end);
	while (it) {
		struct drm_i915_gem_object *obj;

		if (!mmu_notifier_range_blockable(range)) {
			ret = -EAGAIN;
			break;
		}

		/*
		 * The mmu_object is released late when destroying the
		 * GEM object so it is entirely possible to gain a
		 * reference on an object in the process of being freed
		 * since our serialisation is via the spinlock and not
		 * the struct_mutex - and consequently use it after it
		 * is freed and then double free it. To prevent that
		 * use-after-free we only acquire a reference on the
		 * object if it is not in the process of being destroyed.
		 */
		obj = container_of(it, struct i915_mmu_object, it)->obj;
		if (!kref_get_unless_zero(&obj->base.refcount)) {
			it = interval_tree_iter_next(it, range->start, end);
			continue;
		}
		spin_unlock(&mn->lock);

		if (!unlock) {
			unlock = &mn->mm->i915->drm.struct_mutex;

			switch (mutex_trylock_recursive(unlock)) {
			default:
			case MUTEX_TRYLOCK_FAILED:
				if (mutex_lock_killable_nested(unlock, I915_MM_SHRINKER)) {
					i915_gem_object_put(obj);
					return -EINTR;
				}
				/* fall through */
			case MUTEX_TRYLOCK_SUCCESS:
				break;

			case MUTEX_TRYLOCK_RECURSIVE:
				unlock = ERR_PTR(-EEXIST);
				break;
			}
		}

		ret = i915_gem_object_unbind(obj);
		if (ret == 0)
			ret = __i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
		i915_gem_object_put(obj);
		if (ret)
			goto unlock;

		spin_lock(&mn->lock);

		/*
		 * As we do not (yet) protect the mmu from concurrent insertion
		 * over this range, there is no guarantee that this search will
		 * terminate given a pathologic workload.
		 */
		it = interval_tree_iter_first(&mn->objects, range->start, end);
	}
	spin_unlock(&mn->lock);

unlock:
	if (!IS_ERR_OR_NULL(unlock))
		mutex_unlock(unlock);

	return ret;

}
Exemple #9
0
/**
 * Sets the tiling mode of an object, returning the required swizzling of
 * bit 6 of addresses in the object.
 */
int
i915_gem_set_tiling(struct drm_device *dev, void *data,
		   struct drm_file *file_priv)
{
	struct drm_i915_gem_set_tiling *args = data;
	drm_i915_private_t *dev_priv = dev->dev_private;
	struct drm_gem_object *obj;
	struct drm_i915_gem_object *obj_priv;

	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
	if (obj == NULL)
		return -EINVAL;
	obj_priv = obj->driver_private;

	if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) {
		drm_gem_object_unreference(obj);
		return -EINVAL;
	}

	mutex_lock(&dev->struct_mutex);

	if (args->tiling_mode == I915_TILING_NONE) {
		args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
	} else {
		if (args->tiling_mode == I915_TILING_X)
			args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
		else
			args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;

		/* Hide bit 17 swizzling from the user.  This prevents old Mesa
		 * from aborting the application on sw fallbacks to bit 17,
		 * and we use the pread/pwrite bit17 paths to swizzle for it.
		 * If there was a user that was relying on the swizzle
		 * information for drm_intel_bo_map()ed reads/writes this would
		 * break it, but we don't have any of those.
		 */
		if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
			args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
		if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
			args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;

		/* If we can't handle the swizzling, make it untiled. */
		if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) {
			args->tiling_mode = I915_TILING_NONE;
			args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
		}
	}
	if (args->tiling_mode != obj_priv->tiling_mode) {
		int ret;

		/* Unbind the object, as switching tiling means we're
		 * switching the cache organization due to fencing, probably.
		 */
		ret = i915_gem_object_unbind(obj);
		if (ret != 0) {
			WARN(ret != -ERESTARTSYS,
			     "failed to unbind object for tiling switch");
			args->tiling_mode = obj_priv->tiling_mode;
			mutex_unlock(&dev->struct_mutex);
			drm_gem_object_unreference(obj);

			return ret;
		}
		obj_priv->tiling_mode = args->tiling_mode;
	}
	obj_priv->stride = args->stride;

	drm_gem_object_unreference(obj);
	mutex_unlock(&dev->struct_mutex);

	return 0;
}