Example #1
0
struct drm_i915_gem_object *
i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
			      resource_size_t size)
{
	struct drm_i915_gem_object *obj;
	struct drm_mm_node *stolen;
	int ret;

	if (!drm_mm_initialized(&dev_priv->mm.stolen))
		return NULL;

	if (size == 0)
		return NULL;

	stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
	if (!stolen)
		return NULL;

	ret = i915_gem_stolen_insert_node(dev_priv, stolen, size, 4096);
	if (ret) {
		kfree(stolen);
		return NULL;
	}

	obj = _i915_gem_object_create_stolen(dev_priv, stolen);
	if (obj)
		return obj;

	i915_gem_stolen_remove_node(dev_priv, stolen);
	kfree(stolen);
	return NULL;
}
Example #2
0
struct drm_i915_gem_object *
i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct drm_i915_gem_object *obj;
	struct drm_mm_node *stolen;
	int ret;

	if (!drm_mm_initialized(&dev_priv->mm.stolen))
		return NULL;

	DRM_DEBUG_KMS("creating stolen object: size=%x\n", size);
	if (size == 0)
		return NULL;

	stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
	if (!stolen)
		return NULL;

	ret = drm_mm_insert_node(&dev_priv->mm.stolen, stolen, size,
				 4096, DRM_MM_SEARCH_DEFAULT);
	if (ret) {
		kfree(stolen);
		return NULL;
	}

	obj = _i915_gem_object_create_stolen(dev, stolen);
	if (obj)
		return obj;

	drm_mm_remove_node(stolen);
	kfree(stolen);
	return NULL;
}
Example #3
0
void i915_gem_cleanup_stolen(struct drm_i915_private *dev_priv)
{
	if (!drm_mm_initialized(&dev_priv->mm.stolen))
		return;

	drm_mm_takedown(&dev_priv->mm.stolen);
}
Example #4
0
void i915_gem_cleanup_stolen(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;

	if (!drm_mm_initialized(&dev_priv->mm.stolen))
		return;

	i915_gem_stolen_cleanup_compression(dev);
	drm_mm_takedown(&dev_priv->mm.stolen);
}
Example #5
0
int i915_gem_stolen_setup_compression(struct drm_device *dev, int size)
{
	struct drm_i915_private *dev_priv = dev->dev_private;

	if (!drm_mm_initialized(&dev_priv->mm.stolen))
		return -ENODEV;

	if (size < dev_priv->fbc.size)
		return 0;

	/* Release any current block */
	i915_gem_stolen_cleanup_compression(dev);

	return i915_setup_compression(dev, size);
}
static void
nvc0_channel_del(struct nouveau_channel **pchan)
{
	struct nouveau_channel *chan;

	chan = *pchan;
	*pchan = NULL;
	if (!chan)
		return;

	nouveau_vm_ref(NULL, &chan->vm, NULL);
	if (drm_mm_initialized(&chan->ramin_heap))
		drm_mm_takedown(&chan->ramin_heap);
	nouveau_gpuobj_ref(NULL, &chan->ramin);
	kfree(chan);
}
Example #7
0
int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
					 struct drm_mm_node *node, u64 size,
					 unsigned alignment, u64 start, u64 end)
{
	int ret;

	if (!drm_mm_initialized(&dev_priv->mm.stolen))
		return -ENODEV;

	mutex_lock(&dev_priv->mm.stolen_lock);
	ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node,
					  size, alignment, 0,
					  start, end, DRM_MM_INSERT_BEST);
	mutex_unlock(&dev_priv->mm.stolen_lock);

	return ret;
}
Example #8
0
int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
					 struct drm_mm_node *node, u64 size,
					 unsigned alignment, u64 start, u64 end)
{
	int ret;

	if (!drm_mm_initialized(&dev_priv->mm.stolen))
		return -ENODEV;

	/* WaSkipStolenMemoryFirstPage:bdw+ */
	if (INTEL_GEN(dev_priv) >= 8 && start < 4096)
		start = 4096;

	mutex_lock(&dev_priv->mm.stolen_lock);
	ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node,
					  size, alignment, 0,
					  start, end, DRM_MM_INSERT_BEST);
	mutex_unlock(&dev_priv->mm.stolen_lock);

	return ret;
}
Example #9
0
int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
					 struct drm_mm_node *node, u64 size,
					 unsigned alignment, u64 start, u64 end)
{
	int ret;

	if (!drm_mm_initialized(&dev_priv->mm.stolen))
		return -ENODEV;

	/* See the comment at the drm_mm_init() call for more about this check.
	 * WaSkipStolenMemoryFirstPage:bdw,chv (incomplete) */
	if (INTEL_INFO(dev_priv)->gen == 8 && start < 4096)
		start = 4096;

	mutex_lock(&dev_priv->mm.stolen_lock);
	ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node, size,
					  alignment, start, end,
					  DRM_MM_SEARCH_DEFAULT);
	mutex_unlock(&dev_priv->mm.stolen_lock);

	return ret;
}
void
nv50_instmem_takedown(struct drm_device *dev)
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
	struct nouveau_channel *chan = dev_priv->channels.ptr[0];
	int i;

	NV_DEBUG(dev, "\n");

	if (!priv)
		return;

	dev_priv->ramin_available = false;

	nouveau_vm_ref(NULL, &dev_priv->chan_vm, NULL);

	for (i = 0x1700; i <= 0x1710; i += 4)
		nv_wr32(dev, i, priv->save1700[(i - 0x1700) / 4]);

	nouveau_gpuobj_ref(NULL, &priv->bar3_dmaobj);
	nouveau_gpuobj_ref(NULL, &priv->bar1_dmaobj);

	nouveau_vm_ref(NULL, &dev_priv->bar1_vm, chan->vm_pd);
	dev_priv->channels.ptr[127] = 0;
	nv50_channel_del(&dev_priv->channels.ptr[0]);

	nouveau_gpuobj_ref(NULL, &dev_priv->bar3_vm->pgt[0].obj[0]);
	nouveau_vm_ref(NULL, &dev_priv->bar3_vm, NULL);

	if (drm_mm_initialized(&dev_priv->ramin_heap))
		drm_mm_takedown(&dev_priv->ramin_heap);

	dev_priv->engine.instmem.priv = NULL;
	kfree(priv);
}
Example #11
0
struct drm_i915_gem_object *
i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
					       u32 stolen_offset,
					       u32 gtt_offset,
					       u32 size)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct i915_address_space *ggtt = &dev_priv->gtt.base;
	struct drm_i915_gem_object *obj;
	struct drm_mm_node *stolen;
	struct i915_vma *vma;
	int ret;

	if (!drm_mm_initialized(&dev_priv->mm.stolen))
		return NULL;

	DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n",
			stolen_offset, gtt_offset, size);

	/* KISS and expect everything to be page-aligned */
	BUG_ON(stolen_offset & 4095);
	BUG_ON(size & 4095);

	if (WARN_ON(size == 0))
		return NULL;

	stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
	if (!stolen)
		return NULL;

	stolen->start = stolen_offset;
	stolen->size = size;
	ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
	if (ret) {
		DRM_DEBUG_KMS("failed to allocate stolen space\n");
		kfree(stolen);
		return NULL;
	}

	obj = _i915_gem_object_create_stolen(dev, stolen);
	if (obj == NULL) {
		DRM_DEBUG_KMS("failed to allocate stolen object\n");
		drm_mm_remove_node(stolen);
		kfree(stolen);
		return NULL;
	}

	/* Some objects just need physical mem from stolen space */
	if (gtt_offset == I915_GTT_OFFSET_NONE)
		return obj;

	vma = i915_gem_vma_create(obj, ggtt);
	if (IS_ERR(vma)) {
		ret = PTR_ERR(vma);
		goto err_out;
	}

	/* To simplify the initialisation sequence between KMS and GTT,
	 * we allow construction of the stolen object prior to
	 * setting up the GTT space. The actual reservation will occur
	 * later.
	 */
	vma->node.start = gtt_offset;
	vma->node.size = size;
	if (drm_mm_initialized(&ggtt->mm)) {
		ret = drm_mm_reserve_node(&ggtt->mm, &vma->node);
		if (ret) {
			DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
			goto err_vma;
		}
	}

	obj->has_global_gtt_mapping = 1;

	list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
	list_add_tail(&vma->mm_list, &ggtt->inactive_list);

	return obj;

err_vma:
	i915_gem_vma_destroy(vma);
err_out:
	drm_mm_remove_node(stolen);
	kfree(stolen);
	drm_gem_object_unreference(&obj->base);
	return NULL;
}
Example #12
0
static int intel_fbc_alloc_cfb(struct intel_crtc *crtc)
{
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
	struct intel_fbc *fbc = &dev_priv->fbc;
	struct drm_mm_node *uninitialized_var(compressed_llb);
	int size, fb_cpp, ret;

	WARN_ON(drm_mm_node_allocated(&fbc->compressed_fb));

	size = intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache);
	fb_cpp = fbc->state_cache.fb.format->cpp[0];

	ret = find_compression_threshold(dev_priv, &fbc->compressed_fb,
					 size, fb_cpp);
	if (!ret)
		goto err_llb;
	else if (ret > 1) {
		DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");

	}

	fbc->threshold = ret;

	if (INTEL_GEN(dev_priv) >= 5)
		I915_WRITE(ILK_DPFC_CB_BASE, fbc->compressed_fb.start);
	else if (IS_GM45(dev_priv)) {
		I915_WRITE(DPFC_CB_BASE, fbc->compressed_fb.start);
	} else {
		compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
		if (!compressed_llb)
			goto err_fb;

		ret = i915_gem_stolen_insert_node(dev_priv, compressed_llb,
						  4096, 4096);
		if (ret)
			goto err_fb;

		fbc->compressed_llb = compressed_llb;

		GEM_BUG_ON(range_overflows_t(u64, dev_priv->dsm.start,
					     fbc->compressed_fb.start,
					     U32_MAX));
		GEM_BUG_ON(range_overflows_t(u64, dev_priv->dsm.start,
					     fbc->compressed_llb->start,
					     U32_MAX));
		I915_WRITE(FBC_CFB_BASE,
			   dev_priv->dsm.start + fbc->compressed_fb.start);
		I915_WRITE(FBC_LL_BASE,
			   dev_priv->dsm.start + compressed_llb->start);
	}

	DRM_DEBUG_KMS("reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n",
		      fbc->compressed_fb.size, fbc->threshold);

	return 0;

err_fb:
	kfree(compressed_llb);
	i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);
err_llb:
	if (drm_mm_initialized(&dev_priv->mm.stolen))
		pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
	return -ENOSPC;
}
Example #13
0
struct drm_i915_gem_object *
i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv,
					       resource_size_t stolen_offset,
					       resource_size_t gtt_offset,
					       resource_size_t size)
{
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
	struct drm_i915_gem_object *obj;
	struct drm_mm_node *stolen;
	struct i915_vma *vma;
	int ret;

	if (!drm_mm_initialized(&dev_priv->mm.stolen))
		return NULL;

	lockdep_assert_held(&dev_priv->drm.struct_mutex);

	DRM_DEBUG_DRIVER("creating preallocated stolen object: stolen_offset=%pa, gtt_offset=%pa, size=%pa\n",
			 &stolen_offset, &gtt_offset, &size);

	/* KISS and expect everything to be page-aligned */
	if (WARN_ON(size == 0) ||
	    WARN_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)) ||
	    WARN_ON(!IS_ALIGNED(stolen_offset, I915_GTT_MIN_ALIGNMENT)))
		return NULL;

	stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
	if (!stolen)
		return NULL;

	stolen->start = stolen_offset;
	stolen->size = size;
	mutex_lock(&dev_priv->mm.stolen_lock);
	ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
	mutex_unlock(&dev_priv->mm.stolen_lock);
	if (ret) {
		DRM_DEBUG_DRIVER("failed to allocate stolen space\n");
		kfree(stolen);
		return NULL;
	}

	obj = _i915_gem_object_create_stolen(dev_priv, stolen);
	if (obj == NULL) {
		DRM_DEBUG_DRIVER("failed to allocate stolen object\n");
		i915_gem_stolen_remove_node(dev_priv, stolen);
		kfree(stolen);
		return NULL;
	}

	/* Some objects just need physical mem from stolen space */
	if (gtt_offset == I915_GTT_OFFSET_NONE)
		return obj;

	ret = i915_gem_object_pin_pages(obj);
	if (ret)
		goto err;

	vma = i915_vma_instance(obj, &ggtt->vm, NULL);
	if (IS_ERR(vma)) {
		ret = PTR_ERR(vma);
		goto err_pages;
	}

	/* To simplify the initialisation sequence between KMS and GTT,
	 * we allow construction of the stolen object prior to
	 * setting up the GTT space. The actual reservation will occur
	 * later.
	 */
	ret = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
				   size, gtt_offset, obj->cache_level,
				   0);
	if (ret) {
		DRM_DEBUG_DRIVER("failed to allocate stolen GTT space\n");
		goto err_pages;
	}

	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));

	vma->pages = obj->mm.pages;
	vma->flags |= I915_VMA_GLOBAL_BIND;
	__i915_vma_set_map_and_fenceable(vma);

	mutex_lock(&ggtt->vm.mutex);
	list_move_tail(&vma->vm_link, &ggtt->vm.bound_list);
	mutex_unlock(&ggtt->vm.mutex);

	spin_lock(&dev_priv->mm.obj_lock);
	list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
	obj->bind_count++;
	spin_unlock(&dev_priv->mm.obj_lock);

	return obj;

err_pages:
	i915_gem_object_unpin_pages(obj);
err:
	i915_gem_object_put(obj);
	return NULL;
}
struct drm_i915_gem_object *
kos_gem_fb_object_create(struct drm_device *dev,
                           u32 gtt_offset,
                           u32 size)
{
    struct drm_i915_private *dev_priv = dev->dev_private;
    struct i915_address_space *ggtt = &dev_priv->gtt.base;
    struct drm_i915_gem_object *obj;
    struct drm_mm_node *fb_node;
    struct i915_vma *vma;
    int ret;

    DRM_DEBUG_KMS("creating preallocated framebuffer object: gtt_offset=%x, size=%x\n",
                  gtt_offset, size);

    /* KISS and expect everything to be page-aligned */
    BUG_ON(size & 4095);

    if (WARN_ON(size == 0))
        return NULL;

    fb_node = kzalloc(sizeof(*fb_node), GFP_KERNEL);
    if (!fb_node)
        return NULL;

    fb_node->start = gtt_offset;
    fb_node->size = size;

    obj = _kos_fb_object_create(dev, fb_node);
    if (obj == NULL) {
        DRM_DEBUG_KMS("failed to preallocate framebuffer object\n");
        kfree(fb_node);
        return NULL;
    }

    vma = i915_gem_obj_lookup_or_create_vma(obj, ggtt);
    if (IS_ERR(vma)) {
        ret = PTR_ERR(vma);
        goto err_out;
    }

    /* To simplify the initialisation sequence between KMS and GTT,
     * we allow construction of the stolen object prior to
     * setting up the GTT space. The actual reservation will occur
     * later.
     */
    vma->node.start = gtt_offset;
    vma->node.size = size;
    if (drm_mm_initialized(&ggtt->mm)) {
        ret = drm_mm_reserve_node(&ggtt->mm, &vma->node);
        if (ret) {
            DRM_DEBUG_KMS("failed to allocate framebuffer GTT space\n");
            goto err_vma;
        }
    }

//    obj->has_global_gtt_mapping = 1;

    list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
    list_add_tail(&vma->mm_list, &ggtt->inactive_list);

    mutex_lock(&dev->object_name_lock);
    idr_preload(GFP_KERNEL);

    if (!obj->base.name) {
        ret = idr_alloc(&dev->object_name_idr, &obj->base, 1, 0, GFP_NOWAIT);
        if (ret < 0)
            goto err_gem;

        obj->base.name = ret;

        /* Allocate a reference for the name table.  */
        drm_gem_object_reference(&obj->base);

        DRM_DEBUG_KMS("%s allocate fb name %d\n", __FUNCTION__, obj->base.name );
    }

    idr_preload_end();
    mutex_unlock(&dev->object_name_lock);
    drm_gem_object_unreference(&obj->base);
    return obj;

err_gem:
    idr_preload_end();
    mutex_unlock(&dev->object_name_lock);
err_vma:
    i915_gem_vma_destroy(vma);
err_out:
    kfree(fb_node);
    drm_gem_object_unreference(&obj->base);
    return NULL;
}