static void render_state_free(struct i915_render_state *so)
{
	kunmap(so->batch);
	i915_gem_object_ggtt_unpin(so->obj);
	drm_gem_object_unreference(&so->obj->base);
	kfree(so);
}
static int intelfb_alloc(struct drm_fb_helper *helper,
			 struct drm_fb_helper_surface_size *sizes)
{
	struct intel_fbdev *ifbdev =
		container_of(helper, struct intel_fbdev, helper);
	struct drm_framebuffer *fb;
	struct drm_device *dev = helper->dev;
	struct drm_mode_fb_cmd2 mode_cmd = {};
	struct drm_i915_gem_object *obj;
	int size, ret;

	/* we don't do packed 24bpp */
	if (sizes->surface_bpp == 24)
		sizes->surface_bpp = 32;

	mode_cmd.width = sizes->surface_width;
	mode_cmd.height = sizes->surface_height;

	mode_cmd.pitches[0] = ALIGN(mode_cmd.width *
				    DIV_ROUND_UP(sizes->surface_bpp, 8), 64);
	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
							  sizes->surface_depth);

	size = mode_cmd.pitches[0] * mode_cmd.height;
	size = ALIGN(size, PAGE_SIZE);
	obj = i915_gem_object_create_stolen(dev, size);
	if (obj == NULL)
		obj = i915_gem_alloc_object(dev, size);
	if (!obj) {
		DRM_ERROR("failed to allocate framebuffer\n");
		ret = -ENOMEM;
		goto out;
	}

	/* Flush everything out, we'll be doing GTT only from now on */
	ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
	if (ret) {
		DRM_ERROR("failed to pin obj: %d\n", ret);
		goto out_unref;
	}

	fb = __intel_framebuffer_create(dev, &mode_cmd, obj);
	if (IS_ERR(fb)) {
		ret = PTR_ERR(fb);
		goto out_unpin;
	}

	ifbdev->fb = to_intel_framebuffer(fb);

	return 0;

out_unpin:
	i915_gem_object_ggtt_unpin(obj);
out_unref:
	drm_gem_object_unreference(&obj->base);
out:
	return ret;
}
Example #3
0
void i915_gem_context_fini(struct drm_device *dev)
{
    struct drm_i915_private *dev_priv = dev->dev_private;
    struct intel_context *dctx = dev_priv->ring[RCS].default_context;
    int i;

    if (dctx->legacy_hw_ctx.rcs_state) {
        /* The only known way to stop the gpu from accessing the hw context is
         * to reset it. Do this as the very last operation to avoid confusing
         * other code, leading to spurious errors. */
        intel_gpu_reset(dev);

        /* When default context is created and switched to, base object refcount
         * will be 2 (+1 from object creation and +1 from do_switch()).
         * i915_gem_context_fini() will be called after gpu_idle() has switched
         * to default context. So we need to unreference the base object once
         * to offset the do_switch part, so that i915_gem_context_unreference()
         * can then free the base object correctly. */
        WARN_ON(!dev_priv->ring[RCS].last_context);
        if (dev_priv->ring[RCS].last_context == dctx) {
            /* Fake switch to NULL context */
            WARN_ON(dctx->legacy_hw_ctx.rcs_state->active);
            i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
            i915_gem_context_unreference(dctx);
            dev_priv->ring[RCS].last_context = NULL;
        }

        i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
    }

    for (i = 0; i < I915_NUM_RINGS; i++) {
        struct intel_engine_cs *ring = &dev_priv->ring[i];

        if (ring->last_context)
            i915_gem_context_unreference(ring->last_context);

        ring->default_context = NULL;
        ring->last_context = NULL;
    }

    i915_gem_context_unreference(dctx);
}
Example #4
0
/**
 * The default context needs to exist per ring that uses contexts. It stores the
 * context state of the GPU for applications that don't utilize HW contexts, as
 * well as an idle case.
 */
static struct intel_context *
i915_gem_create_context(struct drm_device *dev,
                        struct drm_i915_file_private *file_priv)
{
    const bool is_global_default_ctx = file_priv == NULL;
    struct intel_context *ctx;
    int ret = 0;

    BUG_ON(!mutex_is_locked(&dev->struct_mutex));

    ctx = __create_hw_context(dev, file_priv);
    if (IS_ERR(ctx))
        return ctx;

    if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state) {
        /* We may need to do things with the shrinker which
         * require us to immediately switch back to the default
         * context. This can cause a problem as pinning the
         * default context also requires GTT space which may not
         * be available. To avoid this we always pin the default
         * context.
         */
        ret = i915_gem_obj_ggtt_pin(ctx->legacy_hw_ctx.rcs_state,
                                    get_context_alignment(dev), 0);
        if (ret) {
            DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
            goto err_destroy;
        }
    }

    if (USES_FULL_PPGTT(dev)) {
        struct i915_hw_ppgtt *ppgtt = i915_ppgtt_create(dev, file_priv);

        if (IS_ERR_OR_NULL(ppgtt)) {
            DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
                             PTR_ERR(ppgtt));
            ret = PTR_ERR(ppgtt);
            goto err_unpin;
        }

        ctx->ppgtt = ppgtt;
    }

    trace_i915_context_create(ctx);

    return ctx;

err_unpin:
    if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state)
        i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state);
err_destroy:
    i915_gem_context_unreference(ctx);
    return ERR_PTR(ret);
}
static struct i915_render_state *render_state_alloc(struct drm_device *dev)
{
	struct i915_render_state *so;
	struct page *page;
	int ret;

	so = kzalloc(sizeof(*so), GFP_KERNEL);
	if (!so)
		return ERR_PTR(-ENOMEM);

	so->obj = i915_gem_alloc_object(dev, 4096);
	if (so->obj == NULL) {
		ret = -ENOMEM;
		goto free;
	}
	so->size = 4096;

	ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0);
	if (ret)
		goto free_gem;

	BUG_ON(so->obj->pages->nents != 1);
	page = sg_page(so->obj->pages->sgl);

	so->batch = kmap(page);
	if (!so->batch) {
		ret = -ENOMEM;
		goto unpin;
	}

	so->ggtt_offset = i915_gem_obj_ggtt_offset(so->obj);

	return so;
unpin:
	i915_gem_object_ggtt_unpin(so->obj);
free_gem:
	drm_gem_object_unreference(&so->obj->base);
free:
	kfree(so);
	return ERR_PTR(ret);
}
Example #6
0
void i915_gem_context_reset(struct drm_device *dev)
{
    struct drm_i915_private *dev_priv = dev->dev_private;
    int i;

    /* In execlists mode we will unreference the context when the execlist
     * queue is cleared and the requests destroyed.
     */
    if (i915.enable_execlists)
        return;

    for (i = 0; i < I915_NUM_RINGS; i++) {
        struct intel_engine_cs *ring = &dev_priv->ring[i];
        struct intel_context *lctx = ring->last_context;

        if (lctx) {
            if (lctx->legacy_hw_ctx.rcs_state && i == RCS)
                i915_gem_object_ggtt_unpin(lctx->legacy_hw_ctx.rcs_state);

            i915_gem_context_unreference(lctx);
            ring->last_context = NULL;
        }
    }
}
Example #7
0
void i915_gem_render_state_fini(struct render_state *so)
{
	i915_gem_object_ggtt_unpin(so->obj);
	drm_gem_object_unreference(&so->obj->base);
}
static int intelfb_create(struct drm_fb_helper *helper,
			  struct drm_fb_helper_surface_size *sizes)
{
	struct intel_fbdev *ifbdev =
		container_of(helper, struct intel_fbdev, helper);
	struct intel_framebuffer *intel_fb = ifbdev->fb;
	struct drm_device *dev = helper->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct fb_info *info;
	struct drm_framebuffer *fb;
	struct drm_i915_gem_object *obj;
	int size, ret;

	mutex_lock(&dev->struct_mutex);

	if (!intel_fb || WARN_ON(!intel_fb->obj)) {
		DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n");
		ret = intelfb_alloc(helper, sizes);
		if (ret)
			goto out_unlock;
		intel_fb = ifbdev->fb;
	} else {
		DRM_DEBUG_KMS("re-using BIOS fb\n");
		sizes->fb_width = intel_fb->base.width;
		sizes->fb_height = intel_fb->base.height;
	}

	obj = intel_fb->obj;
	size = obj->base.size;

	info = framebuffer_alloc(0, &dev->pdev->dev);
	if (!info) {
		ret = -ENOMEM;
		goto out_unpin;
	}

	info->par = helper;

	fb = &ifbdev->fb->base;

	ifbdev->helper.fb = fb;
	ifbdev->helper.fbdev = info;

	strcpy(info->fix.id, "inteldrmfb");

	info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
	info->fbops = &intelfb_ops;

	ret = fb_alloc_cmap(&info->cmap, 256, 0);
	if (ret) {
		ret = -ENOMEM;
		goto out_unpin;
	}
	/* setup aperture base/size for vesafb takeover */
	info->apertures = alloc_apertures(1);
	if (!info->apertures) {
		ret = -ENOMEM;
		goto out_unpin;
	}
	info->apertures->ranges[0].base = dev->mode_config.fb_base;
	info->apertures->ranges[0].size = dev_priv->gtt.mappable_end;

	info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_ggtt_offset(obj);
	info->fix.smem_len = size;

	info->screen_base =
		ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
			   size);
	if (!info->screen_base) {
		ret = -ENOSPC;
		goto out_unpin;
	}
	info->screen_size = size;

	/* This driver doesn't need a VT switch to restore the mode on resume */
	info->skip_vt_switch = true;

	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
	drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);

	/* If the object is shmemfs backed, it will have given us zeroed pages.
	 * If the object is stolen however, it will be full of whatever
	 * garbage was left in there.
	 */
	if (ifbdev->fb->obj->stolen)
		memset_io(info->screen_base, 0, info->screen_size);

	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */

	DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08lx, bo %p\n",
		      fb->width, fb->height,
		      i915_gem_obj_ggtt_offset(obj), obj);

	mutex_unlock(&dev->struct_mutex);
	vga_switcheroo_client_fb_set(dev->pdev, info);
	return 0;

out_unpin:
	i915_gem_object_ggtt_unpin(obj);
	drm_gem_object_unreference(&obj->base);
out_unlock:
	mutex_unlock(&dev->struct_mutex);
	return ret;
}