Пример #1
0
static int vgt_balloon_space(struct drm_mm *mm,
			     struct drm_mm_node *node,
			     unsigned long start, unsigned long end)
{
	unsigned long size = end - start;

	if (start == end)
		return -EINVAL;

	DRM_INFO("balloon space: range [ 0x%lx - 0x%lx ] %lu KiB.\n",
		 start, end, size / 1024);

	node->start = start;
	node->size = size;

	return drm_mm_reserve_node(mm, node);
}
Пример #2
0
struct drm_i915_gem_object *
i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
					       u32 stolen_offset,
					       u32 gtt_offset,
					       u32 size)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct i915_address_space *ggtt = &dev_priv->gtt.base;
	struct drm_i915_gem_object *obj;
	struct drm_mm_node *stolen;
	struct i915_vma *vma;
	int ret;

	if (!drm_mm_initialized(&dev_priv->mm.stolen))
		return NULL;

	DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n",
			stolen_offset, gtt_offset, size);

	/* KISS and expect everything to be page-aligned */
	BUG_ON(stolen_offset & 4095);
	BUG_ON(size & 4095);

	if (WARN_ON(size == 0))
		return NULL;

	stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
	if (!stolen)
		return NULL;

	stolen->start = stolen_offset;
	stolen->size = size;
	ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
	if (ret) {
		DRM_DEBUG_KMS("failed to allocate stolen space\n");
		kfree(stolen);
		return NULL;
	}

	obj = _i915_gem_object_create_stolen(dev, stolen);
	if (obj == NULL) {
		DRM_DEBUG_KMS("failed to allocate stolen object\n");
		drm_mm_remove_node(stolen);
		kfree(stolen);
		return NULL;
	}

	/* Some objects just need physical mem from stolen space */
	if (gtt_offset == I915_GTT_OFFSET_NONE)
		return obj;

	vma = i915_gem_vma_create(obj, ggtt);
	if (IS_ERR(vma)) {
		ret = PTR_ERR(vma);
		goto err_out;
	}

	/* To simplify the initialisation sequence between KMS and GTT,
	 * we allow construction of the stolen object prior to
	 * setting up the GTT space. The actual reservation will occur
	 * later.
	 */
	vma->node.start = gtt_offset;
	vma->node.size = size;
	if (drm_mm_initialized(&ggtt->mm)) {
		ret = drm_mm_reserve_node(&ggtt->mm, &vma->node);
		if (ret) {
			DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
			goto err_vma;
		}
	}

	obj->has_global_gtt_mapping = 1;

	list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
	list_add_tail(&vma->mm_list, &ggtt->inactive_list);

	return obj;

err_vma:
	i915_gem_vma_destroy(vma);
err_out:
	drm_mm_remove_node(stolen);
	kfree(stolen);
	drm_gem_object_unreference(&obj->base);
	return NULL;
}
Пример #3
0
struct drm_i915_gem_object *
kos_gem_fb_object_create(struct drm_device *dev,
                           u32 gtt_offset,
                           u32 size)
{
    struct drm_i915_private *dev_priv = dev->dev_private;
    struct i915_address_space *ggtt = &dev_priv->gtt.base;
    struct drm_i915_gem_object *obj;
    struct drm_mm_node *fb_node;
    struct i915_vma *vma;
    int ret;

    DRM_DEBUG_KMS("creating preallocated framebuffer object: gtt_offset=%x, size=%x\n",
                  gtt_offset, size);

    /* KISS and expect everything to be page-aligned */
    BUG_ON(size & 4095);

    if (WARN_ON(size == 0))
        return NULL;

    fb_node = kzalloc(sizeof(*fb_node), GFP_KERNEL);
    if (!fb_node)
        return NULL;

    fb_node->start = gtt_offset;
    fb_node->size = size;

    obj = _kos_fb_object_create(dev, fb_node);
    if (obj == NULL) {
        DRM_DEBUG_KMS("failed to preallocate framebuffer object\n");
        kfree(fb_node);
        return NULL;
    }

    vma = i915_gem_obj_lookup_or_create_vma(obj, ggtt);
    if (IS_ERR(vma)) {
        ret = PTR_ERR(vma);
        goto err_out;
    }

    /* To simplify the initialisation sequence between KMS and GTT,
     * we allow construction of the stolen object prior to
     * setting up the GTT space. The actual reservation will occur
     * later.
     */
    vma->node.start = gtt_offset;
    vma->node.size = size;
    if (drm_mm_initialized(&ggtt->mm)) {
        ret = drm_mm_reserve_node(&ggtt->mm, &vma->node);
        if (ret) {
            DRM_DEBUG_KMS("failed to allocate framebuffer GTT space\n");
            goto err_vma;
        }
    }

//    obj->has_global_gtt_mapping = 1;

    list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
    list_add_tail(&vma->mm_list, &ggtt->inactive_list);

    mutex_lock(&dev->object_name_lock);
    idr_preload(GFP_KERNEL);

    if (!obj->base.name) {
        ret = idr_alloc(&dev->object_name_idr, &obj->base, 1, 0, GFP_NOWAIT);
        if (ret < 0)
            goto err_gem;

        obj->base.name = ret;

        /* Allocate a reference for the name table.  */
        drm_gem_object_reference(&obj->base);

        DRM_DEBUG_KMS("%s allocate fb name %d\n", __FUNCTION__, obj->base.name );
    }

    idr_preload_end();
    mutex_unlock(&dev->object_name_lock);
    drm_gem_object_unreference(&obj->base);
    return obj;

err_gem:
    idr_preload_end();
    mutex_unlock(&dev->object_name_lock);
err_vma:
    i915_gem_vma_destroy(vma);
err_out:
    kfree(fb_node);
    drm_gem_object_unreference(&obj->base);
    return NULL;
}
Пример #4
0
struct drm_i915_gem_object *
i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv,
					       resource_size_t stolen_offset,
					       resource_size_t gtt_offset,
					       resource_size_t size)
{
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
	struct drm_i915_gem_object *obj;
	struct drm_mm_node *stolen;
	struct i915_vma *vma;
	int ret;

	if (!drm_mm_initialized(&dev_priv->mm.stolen))
		return NULL;

	lockdep_assert_held(&dev_priv->drm.struct_mutex);

	DRM_DEBUG_DRIVER("creating preallocated stolen object: stolen_offset=%pa, gtt_offset=%pa, size=%pa\n",
			 &stolen_offset, &gtt_offset, &size);

	/* KISS and expect everything to be page-aligned */
	if (WARN_ON(size == 0) ||
	    WARN_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)) ||
	    WARN_ON(!IS_ALIGNED(stolen_offset, I915_GTT_MIN_ALIGNMENT)))
		return NULL;

	stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
	if (!stolen)
		return NULL;

	stolen->start = stolen_offset;
	stolen->size = size;
	mutex_lock(&dev_priv->mm.stolen_lock);
	ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
	mutex_unlock(&dev_priv->mm.stolen_lock);
	if (ret) {
		DRM_DEBUG_DRIVER("failed to allocate stolen space\n");
		kfree(stolen);
		return NULL;
	}

	obj = _i915_gem_object_create_stolen(dev_priv, stolen);
	if (obj == NULL) {
		DRM_DEBUG_DRIVER("failed to allocate stolen object\n");
		i915_gem_stolen_remove_node(dev_priv, stolen);
		kfree(stolen);
		return NULL;
	}

	/* Some objects just need physical mem from stolen space */
	if (gtt_offset == I915_GTT_OFFSET_NONE)
		return obj;

	ret = i915_gem_object_pin_pages(obj);
	if (ret)
		goto err;

	vma = i915_vma_instance(obj, &ggtt->vm, NULL);
	if (IS_ERR(vma)) {
		ret = PTR_ERR(vma);
		goto err_pages;
	}

	/* To simplify the initialisation sequence between KMS and GTT,
	 * we allow construction of the stolen object prior to
	 * setting up the GTT space. The actual reservation will occur
	 * later.
	 */
	ret = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
				   size, gtt_offset, obj->cache_level,
				   0);
	if (ret) {
		DRM_DEBUG_DRIVER("failed to allocate stolen GTT space\n");
		goto err_pages;
	}

	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));

	vma->pages = obj->mm.pages;
	vma->flags |= I915_VMA_GLOBAL_BIND;
	__i915_vma_set_map_and_fenceable(vma);

	mutex_lock(&ggtt->vm.mutex);
	list_move_tail(&vma->vm_link, &ggtt->vm.bound_list);
	mutex_unlock(&ggtt->vm.mutex);

	spin_lock(&dev_priv->mm.obj_lock);
	list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
	obj->bind_count++;
	spin_unlock(&dev_priv->mm.obj_lock);

	return obj;

err_pages:
	i915_gem_object_unpin_pages(obj);
err:
	i915_gem_object_put(obj);
	return NULL;
}
Пример #5
0
/**
 * i915_vma_insert - finds a slot for the vma in its address space
 * @vma: the vma
 * @size: requested size in bytes (can be larger than the VMA)
 * @alignment: required alignment
 * @flags: mask of PIN_* flags to use
 *
 * First we try to allocate some free space that meets the requirements for
 * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
 * preferrably the oldest idle entry to make room for the new VMA.
 *
 * Returns:
 * 0 on success, negative error code otherwise.
 */
static int
i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
{
	struct drm_i915_private *dev_priv = to_i915(vma->vm->dev);
	struct drm_i915_gem_object *obj = vma->obj;
	u64 start, end;
	int ret;

	GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
	GEM_BUG_ON(drm_mm_node_allocated(&vma->node));

	size = max(size, vma->size);
	if (flags & PIN_MAPPABLE)
		size = i915_gem_get_ggtt_size(dev_priv, size,
					      i915_gem_object_get_tiling(obj));

	alignment = max(max(alignment, vma->display_alignment),
			i915_gem_get_ggtt_alignment(dev_priv, size,
						    i915_gem_object_get_tiling(obj),
						    flags & PIN_MAPPABLE));

	start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;

	end = vma->vm->total;
	if (flags & PIN_MAPPABLE)
		end = min_t(u64, end, dev_priv->ggtt.mappable_end);
	if (flags & PIN_ZONE_4G)
		end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE);

	/* If binding the object/GGTT view requires more space than the entire
	 * aperture has, reject it early before evicting everything in a vain
	 * attempt to find space.
	 */
	if (size > end) {
		DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n",
			  size, obj->base.size,
			  flags & PIN_MAPPABLE ? "mappable" : "total",
			  end);
		return -E2BIG;
	}

	ret = i915_gem_object_pin_pages(obj);
	if (ret)
		return ret;

	if (flags & PIN_OFFSET_FIXED) {
		u64 offset = flags & PIN_OFFSET_MASK;
		if (offset & (alignment - 1) || offset > end - size) {
			ret = -EINVAL;
			goto err_unpin;
		}

		vma->node.start = offset;
		vma->node.size = size;
		vma->node.color = obj->cache_level;
		ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
		if (ret) {
			ret = i915_gem_evict_for_vma(vma);
			if (ret == 0)
				ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
			if (ret)
				goto err_unpin;
		}
	} else {
		u32 search_flag, alloc_flag;

		if (flags & PIN_HIGH) {
			search_flag = DRM_MM_SEARCH_BELOW;
			alloc_flag = DRM_MM_CREATE_TOP;
		} else {
			search_flag = DRM_MM_SEARCH_DEFAULT;
			alloc_flag = DRM_MM_CREATE_DEFAULT;
		}

		/* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
		 * so we know that we always have a minimum alignment of 4096.
		 * The drm_mm range manager is optimised to return results
		 * with zero alignment, so where possible use the optimal
		 * path.
		 */
		if (alignment <= 4096)
			alignment = 0;

search_free:
		ret = drm_mm_insert_node_in_range_generic(&vma->vm->mm,
							  &vma->node,
							  size, alignment,
							  obj->cache_level,
							  start, end,
							  search_flag,
							  alloc_flag);
		if (ret) {
			ret = i915_gem_evict_something(vma->vm, size, alignment,
						       obj->cache_level,
						       start, end,
						       flags);
			if (ret == 0)
				goto search_free;

			goto err_unpin;
		}

		GEM_BUG_ON(vma->node.start < start);
		GEM_BUG_ON(vma->node.start + vma->node.size > end);
	}
	GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));

	list_move_tail(&obj->global_link, &dev_priv->mm.bound_list);
	list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
	obj->bind_count++;
	GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);

	return 0;

err_unpin:
	i915_gem_object_unpin_pages(obj);
	return ret;
}