void i915_gem_context_fini(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;

	if (dev_priv->hw_contexts_disabled)
		return;

	/* The only known way to stop the gpu from accessing the hw context is
	 * to reset it. Do this as the very last operation to avoid confusing
	 * other code, leading to spurious errors. */
	intel_gpu_reset(dev);

	i915_gem_object_unpin(dev_priv->ring[RCS].default_context->obj);

	do_destroy(dev_priv->ring[RCS].default_context);
}
/**
 * The default context needs to exist per ring that uses contexts. It stores the
 * context state of the GPU for applications that don't utilize HW contexts, as
 * well as an idle case.
 */
static int create_default_context(struct drm_i915_private *dev_priv)
{
	struct i915_hw_context *ctx;
	int ret;

	BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));

	ctx = create_hw_context(dev_priv->dev, NULL);
	if (IS_ERR(ctx))
		return PTR_ERR(ctx);

	/* We may need to do things with the shrinker which require us to
	 * immediately switch back to the default context. This can cause a
	 * problem as pinning the default context also requires GTT space which
	 * may not be available. To avoid this we always pin the
	 * default context.
	 */
	ret = i915_gem_obj_ggtt_pin(ctx->obj, CONTEXT_ALIGN, false, false);
	if (ret) {
		DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
		goto err_destroy;
	}

	ret = do_switch(ctx);
	if (ret) {
		DRM_DEBUG_DRIVER("Switch failed %d\n", ret);
		goto err_unpin;
	}

	dev_priv->ring[RCS].default_context = ctx;

	DRM_DEBUG_DRIVER("Default HW context loaded\n");
	return 0;

err_unpin:
	i915_gem_object_unpin(ctx->obj);
err_destroy:
	i915_gem_context_unreference(ctx);
	return ret;
}
Ejemplo n.º 3
0
/**
 * The default context needs to exist per ring that uses contexts. It stores the
 * context state of the GPU for applications that don't utilize HW contexts, as
 * well as an idle case.
 */
static int create_default_context(struct drm_i915_private *dev_priv)
{
	struct i915_hw_context *ctx;
	struct drm_device *dev = (struct drm_device *)dev_priv->drmdev;
	int ret;

	rw_assert_wrlock(&dev->dev_lock);

	ctx = create_hw_context(dev, NULL);
	if (IS_ERR(ctx))
		return PTR_ERR(ctx);

	/* We may need to do things with the shrinker which require us to
	 * immediately switch back to the default context. This can cause a
	 * problem as pinning the default context also requires GTT space which
	 * may not be available. To avoid this we always pin the
	 * default context.
	 */
	dev_priv->ring[RCS].default_context = ctx;
	ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false, false);
	if (ret)
		goto err_destroy;

	ret = do_switch(ctx);
	if (ret)
		goto err_unpin;

	DRM_DEBUG_DRIVER("Default HW context loaded\n");
	return 0;

err_unpin:
	i915_gem_object_unpin(ctx->obj);
err_destroy:
	do_destroy(ctx);
	return ret;
}
Ejemplo n.º 4
0
void i915_gem_context_fini(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct i915_hw_context *dctx = dev_priv->ring[RCS].default_context;

	if (dev_priv->hw_contexts_disabled)
		return;

	/* The only known way to stop the gpu from accessing the hw context is
	 * to reset it. Do this as the very last operation to avoid confusing
	 * other code, leading to spurious errors. */
	intel_gpu_reset(dev);

	i915_gem_object_unpin(dctx->obj);

	/* When default context is created and switched to, base object refcount
	 * will be 2 (+1 from object creation and +1 from do_switch()).
	 * i915_gem_context_fini() will be called after gpu_idle() has switched
	 * to default context. So we need to unreference the base object once
	 * to offset the do_switch part, so that i915_gem_context_unreference()
	 * can then free the base object correctly. */
	drm_gem_object_unreference(&dctx->obj->base);
	i915_gem_context_unreference(dctx);
}
Ejemplo n.º 5
0
static int
intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
		   struct drm_framebuffer *fb, int crtc_x, int crtc_y,
		   unsigned int crtc_w, unsigned int crtc_h,
		   uint32_t src_x, uint32_t src_y,
		   uint32_t src_w, uint32_t src_h)
{
	struct drm_device *dev = plane->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
	struct intel_plane *intel_plane = to_intel_plane(plane);
	struct intel_framebuffer *intel_fb;
	struct drm_i915_gem_object *obj, *old_obj;
	int pipe = intel_plane->pipe;
	int ret = 0;
	int x = src_x >> 16, y = src_y >> 16;
	int primary_w = crtc->mode.hdisplay, primary_h = crtc->mode.vdisplay;
	bool disable_primary = false;

	intel_fb = to_intel_framebuffer(fb);
	obj = intel_fb->obj;

	old_obj = intel_plane->obj;

	/* Pipe must be running... */
	if (!(I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE))
		return -EINVAL;

	if (crtc_x >= primary_w || crtc_y >= primary_h)
		return -EINVAL;

	/* Don't modify another pipe's plane */
	if (intel_plane->pipe != intel_crtc->pipe)
		return -EINVAL;

	/*
	 * Clamp the width & height into the visible area.  Note we don't
	 * try to scale the source if part of the visible region is offscreen.
	 * The caller must handle that by adjusting source offset and size.
	 */
	if ((crtc_x < 0) && ((crtc_x + crtc_w) > 0)) {
		crtc_w += crtc_x;
		crtc_x = 0;
	}
	if ((crtc_x + crtc_w) <= 0) /* Nothing to display */
		goto out;
	if ((crtc_x + crtc_w) > primary_w)
		crtc_w = primary_w - crtc_x;

	if ((crtc_y < 0) && ((crtc_y + crtc_h) > 0)) {
		crtc_h += crtc_y;
		crtc_y = 0;
	}
	if ((crtc_y + crtc_h) <= 0) /* Nothing to display */
		goto out;
	if (crtc_y + crtc_h > primary_h)
		crtc_h = primary_h - crtc_y;

	if (!crtc_w || !crtc_h) /* Again, nothing to display */
		goto out;

	/*
	 * We can take a larger source and scale it down, but
	 * only so much...  16x is the max on SNB.
	 */
	if (((src_w * src_h) / (crtc_w * crtc_h)) > intel_plane->max_downscale)
		return -EINVAL;

	/*
	 * If the sprite is completely covering the primary plane,
	 * we can disable the primary and save power.
	 */
	if ((crtc_x == 0) && (crtc_y == 0) &&
	    (crtc_w == primary_w) && (crtc_h == primary_h))
		disable_primary = true;

	mutex_lock(&dev->struct_mutex);

	ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
	if (ret)
		goto out_unlock;

	intel_plane->obj = obj;

	/*
	 * Be sure to re-enable the primary before the sprite is no longer
	 * covering it fully.
	 */
	if (!disable_primary && intel_plane->primary_disabled) {
		intel_enable_primary(crtc);
		intel_plane->primary_disabled = false;
	}

	intel_plane->update_plane(plane, fb, obj, crtc_x, crtc_y,
				  crtc_w, crtc_h, x, y, src_w, src_h);

	if (disable_primary) {
		intel_disable_primary(crtc);
		intel_plane->primary_disabled = true;
	}

	/* Unpin old obj after new one is active to avoid ugliness */
	if (old_obj) {
		/*
		 * It's fairly common to simply update the position of
		 * an existing object.  In that case, we don't need to
		 * wait for vblank to avoid ugliness, we only need to
		 * do the pin & ref bookkeeping.
		 */
		if (old_obj != obj) {
			mutex_unlock(&dev->struct_mutex);
			intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe);
			mutex_lock(&dev->struct_mutex);
		}
		i915_gem_object_unpin(old_obj);
	}

out_unlock:
	mutex_unlock(&dev->struct_mutex);
out:
	return ret;
}
static int do_switch(struct drm_i915_gem_object *from_obj,
		     struct i915_hw_context *to,
		     u32 seqno)
{
	struct intel_ring_buffer *ring = NULL;
	u32 hw_flags = 0;
	int ret;

	BUG_ON(to == NULL);
	BUG_ON(from_obj != NULL && from_obj->pin_count == 0);

	ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false);
	if (ret)
		return ret;

	/* Clear this page out of any CPU caches for coherent swap-in/out. Note
	 * that thanks to write = false in this call and us not setting any gpu
	 * write domains when putting a context object onto the active list
	 * (when switching away from it), this won't block.
	 * XXX: We need a real interface to do this instead of trickery. */
	ret = i915_gem_object_set_to_gtt_domain(to->obj, false);
	if (ret) {
		i915_gem_object_unpin(to->obj);
		return ret;
	}

	if (!to->obj->has_global_gtt_mapping)
		i915_gem_gtt_bind_object(to->obj, to->obj->cache_level);

	if (!to->is_initialized || is_default_context(to))
		hw_flags |= MI_RESTORE_INHIBIT;
	else if (WARN_ON_ONCE(from_obj == to->obj)) /* not yet expected */
		hw_flags |= MI_FORCE_RESTORE;

	ring = to->ring;
	ret = mi_set_context(ring, to, hw_flags);
	if (ret) {
		i915_gem_object_unpin(to->obj);
		return ret;
	}

	/* The backing object for the context is done after switching to the
	 * *next* context. Therefore we cannot retire the previous context until
	 * the next context has already started running. In fact, the below code
	 * is a bit suboptimal because the retiring can occur simply after the
	 * MI_SET_CONTEXT instead of when the next seqno has completed.
	 */
	if (from_obj != NULL) {
		from_obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
		i915_gem_object_move_to_active(from_obj, ring, seqno);
		/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
		 * whole damn pipeline, we don't need to explicitly mark the
		 * object dirty. The only exception is that the context must be
		 * correct in case the object gets swapped out. Ideally we'd be
		 * able to defer doing this until we know the object would be
		 * swapped, but there is no way to do that yet.
		 */
		from_obj->dirty = 1;
		BUG_ON(from_obj->ring != to->ring);
		i915_gem_object_unpin(from_obj);

		drm_gem_object_unreference(&from_obj->base);
	}

	drm_gem_object_reference(&to->obj->base);
	ring->last_context_obj = to->obj;
	to->is_initialized = true;

	return 0;
}
Ejemplo n.º 7
0
static int intelfb_create(struct intel_fbdev *ifbdev,
			  struct drm_fb_helper_surface_size *sizes)
{
	struct drm_device *dev = ifbdev->helper.dev;
	struct fb_info *info;
	struct drm_framebuffer *fb;
	struct drm_mode_fb_cmd mode_cmd;
	struct drm_i915_gem_object *obj;
	struct device *device = &dev->pdev->dev;
	int size, ret;

	/* we don't do packed 24bpp */
	if (sizes->surface_bpp == 24)
		sizes->surface_bpp = 32;

	mode_cmd.width = sizes->surface_width;
	mode_cmd.height = sizes->surface_height;

	mode_cmd.bpp = sizes->surface_bpp;
	mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 7) / 8), 64);
	mode_cmd.depth = sizes->surface_depth;

	size = mode_cmd.pitch * mode_cmd.height;
	size = ALIGN(size, PAGE_SIZE);
	obj = i915_gem_alloc_object(dev, size);
	if (!obj) {
		DRM_ERROR("failed to allocate framebuffer\n");
		ret = -ENOMEM;
		goto out;
	}

	mutex_lock(&dev->struct_mutex);

	/* Flush everything out, we'll be doing GTT only from now on */
	ret = intel_pin_and_fence_fb_obj(dev, obj, false);
	if (ret) {
		DRM_ERROR("failed to pin fb: %d\n", ret);
		goto out_unref;
	}

	info = framebuffer_alloc(0, device);
	if (!info) {
		ret = -ENOMEM;
		goto out_unpin;
	}

	info->par = ifbdev;

	ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj);
	if (ret)
		goto out_unpin;

	fb = &ifbdev->ifb.base;

	ifbdev->helper.fb = fb;
	ifbdev->helper.fbdev = info;

	strcpy(info->fix.id, "inteldrmfb");

	info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
	info->fbops = &intelfb_ops;

	ret = fb_alloc_cmap(&info->cmap, 256, 0);
	if (ret) {
		ret = -ENOMEM;
		goto out_unpin;
	}
	/* setup aperture base/size for vesafb takeover */
	info->aperture_base = dev->mode_config.fb_base;
	if (!IS_GEN2(dev))
		info->aperture_size = pci_resource_len(dev->pdev, 2);
	else
		info->aperture_size = pci_resource_len(dev->pdev, 0);

	info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset;
	info->fix.smem_len = size;

	info->screen_base = ioremap_wc(dev->agp->base + obj->gtt_offset, size);
	if (!info->screen_base) {
		ret = -ENOSPC;
		goto out_unpin;
	}
	info->screen_size = size;

//	memset(info->screen_base, 0, size);

	drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
	drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);

	info->pixmap.size = 64*1024;
	info->pixmap.buf_align = 8;
	info->pixmap.access_align = 32;
	info->pixmap.flags = FB_PIXMAP_SYSTEM;
	info->pixmap.scan_align = 1;

	DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
		      fb->width, fb->height,
		      obj->gtt_offset, obj);


	mutex_unlock(&dev->struct_mutex);
	vga_switcheroo_client_fb_set(dev->pdev, info);
	return 0;

out_unpin:
	i915_gem_object_unpin(obj);
out_unref:
	drm_gem_object_unreference(&obj->base);
	mutex_unlock(&dev->struct_mutex);
out:
	return ret;
}
Ejemplo n.º 8
0
static int intelfb_create(struct drm_fb_helper *helper,
			  struct drm_fb_helper_surface_size *sizes)
{
	struct intel_fbdev *ifbdev =
		container_of(helper, struct intel_fbdev, helper);
	struct intel_framebuffer *intel_fb = &ifbdev->ifb;
	struct drm_device *dev = helper->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct fb_info *info;
	struct drm_framebuffer *fb;
	struct drm_i915_gem_object *obj;
	int size, ret;

	mutex_lock(&dev->struct_mutex);

	if (!intel_fb->obj) {
		DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n");
		ret = intelfb_alloc(helper, sizes);
		if (ret)
			goto out_unlock;
	} else {
		DRM_DEBUG_KMS("re-using BIOS fb\n");
		sizes->fb_width = intel_fb->base.width;
		sizes->fb_height = intel_fb->base.height;
	}

	obj = intel_fb->obj;
	size = obj->base.size;

	info = framebuffer_alloc(0, &dev->pdev->dev);
	if (!info) {
		ret = -ENOMEM;
		goto out_unpin;
	}

	info->par = helper;

	fb = &ifbdev->ifb.base;

	ifbdev->helper.fb = fb;
	ifbdev->helper.fbdev = info;

	strcpy(info->fix.id, "inteldrmfb");

	info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
	info->fbops = &intelfb_ops;

	ret = fb_alloc_cmap(&info->cmap, 256, 0);
	if (ret) {
		ret = -ENOMEM;
		goto out_unpin;
	}

	info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_ggtt_offset(obj);
	info->fix.smem_len = size;

	info->screen_base =
		ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
			   size);
	if (!info->screen_base) {
		ret = -ENOSPC;
		goto out_unpin;
	}
	info->screen_size = size;

	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
	drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);

	/* If the object is shmemfs backed, it will have given us zeroed pages.
	 * If the object is stolen however, it will be full of whatever
	 * garbage was left in there.
	 */
	if (ifbdev->ifb.obj->stolen)
		memset_io(info->screen_base, 0, info->screen_size);

	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */

	DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08lx, bo %p\n",
		      fb->width, fb->height,
		      i915_gem_obj_ggtt_offset(obj), obj);

	mutex_unlock(&dev->struct_mutex);
	vga_switcheroo_client_fb_set(dev->pdev, info);
	return 0;

out_unpin:
	i915_gem_object_unpin(obj);
	drm_gem_object_unreference(&obj->base);
out_unlock:
	mutex_unlock(&dev->struct_mutex);
	return ret;
}
Ejemplo n.º 9
0
static int intelfb_create(struct intel_fbdev *ifbdev,
                          struct drm_fb_helper_surface_size *sizes)
{
    struct drm_device *dev = ifbdev->helper.dev;
    struct drm_i915_private *dev_priv = dev->dev_private;
    struct fb_info *info;
    struct drm_framebuffer *fb;
    struct drm_mode_fb_cmd2 mode_cmd;
    struct drm_i915_gem_object *obj;
    struct device *device = &dev->pdev->dev;
    int size, ret;


    if (sizes->surface_bpp == 24)
        sizes->surface_bpp = 32;

    mode_cmd.width = sizes->surface_width;
    mode_cmd.height = sizes->surface_height;

    mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((sizes->surface_bpp + 7) /
                                8), 64);
    mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
                            sizes->surface_depth);

    size = mode_cmd.pitches[0] * mode_cmd.height;
    size = ALIGN(size, PAGE_SIZE);
    obj = i915_gem_alloc_object(dev, size);
    if (!obj) {
        DRM_ERROR("failed to allocate framebuffer\n");
        ret = -ENOMEM;
        goto out;
    }

    mutex_lock(&dev->struct_mutex);


    ret = intel_pin_and_fence_fb_obj(dev, obj, false);
    if (ret) {
        DRM_ERROR("failed to pin fb: %d\n", ret);
        goto out_unref;
    }

    info = framebuffer_alloc(0, device);
    if (!info) {
        ret = -ENOMEM;
        goto out_unpin;
    }

    info->par = ifbdev;

    ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj);
    if (ret)
        goto out_unpin;

    fb = &ifbdev->ifb.base;

    ifbdev->helper.fb = fb;
    ifbdev->helper.fbdev = info;

    strcpy(info->fix.id, "inteldrmfb");

    info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
    info->fbops = &intelfb_ops;

    ret = fb_alloc_cmap(&info->cmap, 256, 0);
    if (ret) {
        ret = -ENOMEM;
        goto out_unpin;
    }

    info->apertures = alloc_apertures(1);
    if (!info->apertures) {
        ret = -ENOMEM;
        goto out_unpin;
    }
    info->apertures->ranges[0].base = dev->mode_config.fb_base;
    info->apertures->ranges[0].size =
        dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;

    info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset;
    info->fix.smem_len = size;

    info->screen_base = ioremap_wc(dev->agp->base + obj->gtt_offset, size);
    if (!info->screen_base) {
        ret = -ENOSPC;
        goto out_unpin;
    }
    info->screen_size = size;


    drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
    drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);



    DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
                  fb->width, fb->height,
                  obj->gtt_offset, obj);


    mutex_unlock(&dev->struct_mutex);
    vga_switcheroo_client_fb_set(dev->pdev, info);
    return 0;

out_unpin:
    i915_gem_object_unpin(obj);
out_unref:
    drm_gem_object_unreference(&obj->base);
    mutex_unlock(&dev->struct_mutex);
out:
    return ret;
}
Ejemplo n.º 10
0
static int intelfb_create(struct intel_fbdev *ifbdev,
			  struct drm_fb_helper_surface_size *sizes)
{
	struct drm_device *dev = ifbdev->helper.dev;
#if 0
	struct drm_i915_private *dev_priv = dev->dev_private;
#endif
	struct fb_info *info;
	struct drm_framebuffer *fb;
	struct drm_mode_fb_cmd2 mode_cmd;
	struct drm_i915_gem_object *obj;
	int size, ret;

	/* we don't do packed 24bpp */
	if (sizes->surface_bpp == 24)
		sizes->surface_bpp = 32;

	mode_cmd.width = sizes->surface_width;
	mode_cmd.height = sizes->surface_height;

	mode_cmd.pitches[0] = roundup2(mode_cmd.width * ((sizes->surface_bpp + 7) /
							 8), 64);
	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
							  sizes->surface_depth);

	size = mode_cmd.pitches[0] * mode_cmd.height;
	size = roundup2(size, PAGE_SIZE);
	obj = i915_gem_alloc_object(dev, size);
	if (!obj) {
		DRM_ERROR("failed to allocate framebuffer\n");
		ret = -ENOMEM;
		goto out;
	}

	DRM_LOCK(dev);

	/* Flush everything out, we'll be doing GTT only from now on */
	ret = intel_pin_and_fence_fb_obj(dev, obj, false);
	if (ret) {
		DRM_ERROR("failed to pin fb: %d\n", ret);
		goto out_unref;
	}

#if 0
	info = framebuffer_alloc(0, device);
	if (!info) {
		ret = -ENOMEM;
		goto out_unpin;
	}

	info->par = ifbdev;
#else
	info = malloc(sizeof(struct fb_info), DRM_MEM_KMS, M_WAITOK | M_ZERO);
	info->fb_size = size;
	info->fb_bpp = sizes->surface_bpp;
	info->fb_width = sizes->fb_width;
	info->fb_height = sizes->fb_height;
	info->fb_pbase = dev->agp->base + obj->gtt_offset;
	info->fb_vbase = (vm_offset_t)pmap_mapdev_attr(info->fb_pbase, size,
	    PAT_WRITE_COMBINING);

#endif

	ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj);
	if (ret)
		goto out_unpin;

	fb = &ifbdev->ifb.base;

	ifbdev->helper.fb = fb;
	ifbdev->helper.fbdev = info;
#if 0

	strcpy(info->fix.id, "inteldrmfb");

	info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
	info->fbops = &intelfb_ops;

	ret = fb_alloc_cmap(&info->cmap, 256, 0);
	if (ret) {
		ret = -ENOMEM;
		goto out_unpin;
	}
	/* setup aperture base/size for vesafb takeover */
	info->apertures = alloc_apertures(1);
	if (!info->apertures) {
		ret = -ENOMEM;
		goto out_unpin;
	}
	info->apertures->ranges[0].base = dev->mode_config.fb_base;
	info->apertures->ranges[0].size =
		dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;

	info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset;
	info->fix.smem_len = size;

	info->screen_base = ioremap_wc(dev->agp->base + obj->gtt_offset, size);
	if (!info->screen_base) {
		ret = -ENOSPC;
		goto out_unpin;
	}
	info->screen_size = size;

//	memset(info->screen_base, 0, size);

	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
	drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);

	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
#endif
	DRM_DEBUG_KMS("allocated %dx%d (s %dbits) fb: 0x%08x, bo %p\n",
		      fb->width, fb->height, fb->depth,
		      obj->gtt_offset, obj);

	DRM_UNLOCK(dev);
#if 1
	KIB_NOTYET();
#else
	vga_switcheroo_client_fb_set(dev->pdev, info);
#endif
	return 0;

out_unpin:
	i915_gem_object_unpin(obj);
out_unref:
	drm_gem_object_unreference(&obj->base);
	DRM_UNLOCK(dev);
out:
	return ret;
}
static int intelfb_create(struct drm_fb_helper *helper,
			  struct drm_fb_helper_surface_size *sizes)
{
	struct intel_fbdev *ifbdev =
		container_of(helper, struct intel_fbdev, helper);
	struct drm_device *dev = helper->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct fb_info *info;
	struct drm_framebuffer *fb;
	struct drm_mode_fb_cmd2 mode_cmd = {};
	struct drm_i915_gem_object *obj;
	struct device *device = &dev->pdev->dev;
	int size, ret;

	/* we don't do packed 24bpp */
	if (sizes->surface_bpp == 24)
		sizes->surface_bpp = 32;

	mode_cmd.width = sizes->surface_width;
	mode_cmd.height = sizes->surface_height;

	mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((sizes->surface_bpp + 7) /
						      8), 64);
	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
							  sizes->surface_depth);

	size = mode_cmd.pitches[0] * mode_cmd.height;
	size = ALIGN(size, PAGE_SIZE);
	obj = i915_gem_object_create_stolen(dev, size);
	if (obj == NULL)
		obj = i915_gem_alloc_object(dev, size);
	if (!obj) {
		DRM_ERROR("failed to allocate framebuffer\n");
		ret = -ENOMEM;
		goto out;
	}

	mutex_lock(&dev->struct_mutex);

	/* Flush everything out, we'll be doing GTT only from now on */
	ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
	if (ret) {
		DRM_ERROR("failed to pin fb: %d\n", ret);
		goto out_unref;
	}

	info = framebuffer_alloc(0, device);
	if (!info) {
		ret = -ENOMEM;
		goto out_unpin;
	}

	info->par = helper;

	ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj);
	if (ret)
		goto out_fbrelease;

	fb = &ifbdev->ifb.base;

	ifbdev->helper.fb = fb;
	ifbdev->helper.fbdev = info;

	strncpy(info->fix.id, "inteldrmfb", sizeof(info->fix.id) - 1);

	info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
	info->fbops = &intelfb_ops;

	ret = fb_alloc_cmap(&info->cmap, 256, 0);
	if (ret) {
		ret = -ENOMEM;
		goto out_fbdestroy;
	}
	/* setup aperture base/size for vesafb takeover */
	info->apertures = alloc_apertures(1);
	if (!info->apertures) {
		ret = -ENOMEM;
		goto out_decmap;
	}
	info->apertures->ranges[0].base = dev->mode_config.fb_base;
	info->apertures->ranges[0].size = dev_priv->gtt.mappable_end;

	info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_ggtt_offset(obj);
	info->fix.smem_len = size;

	info->screen_base =
		ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
			   size);
	if (!info->screen_base) {
		ret = -ENOSPC;
		goto out_freeap;
	}
	info->screen_size = size;

	/* This driver doesn't need a VT switch to restore the mode on resume */
	info->skip_vt_switch = true;

	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
	drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);

	/* If the object is shmemfs backed, it will have given us zeroed pages.
	 * If the object is stolen however, it will be full of whatever
	 * garbage was left in there.
	 */
	if (ifbdev->ifb.obj->stolen)
		memset_io(info->screen_base, 0, info->screen_size);

	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */

	DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08lx, bo %p\n",
		      fb->width, fb->height,
		      i915_gem_obj_ggtt_offset(obj), obj);


	mutex_unlock(&dev->struct_mutex);
	vga_switcheroo_client_fb_set(dev->pdev, info);
	return 0;

out_freeap:
	kfree(info->apertures);
out_decmap:
	fb_dealloc_cmap(&info->cmap);
out_fbdestroy:
	fb->funcs->destroy(fb);
out_fbrelease:
	kfree(info);
out_unpin:
	i915_gem_object_unpin(obj);
out_unref:
	drm_gem_object_unreference(&obj->base);
	mutex_unlock(&dev->struct_mutex);
out:
	return ret;
}
Ejemplo n.º 12
0
static int do_switch(struct i915_hw_context *to)
{
	struct intel_ring_buffer *ring = to->ring;
	struct i915_hw_context *from = ring->last_context;
	u32 hw_flags = 0;
	int ret, i;

	BUG_ON(from != NULL && from->obj != NULL && from->obj->pin_count == 0);

	if (from == to && !to->remap_slice)
		return 0;

	ret = i915_gem_obj_ggtt_pin(to->obj, CONTEXT_ALIGN, false, false);
	if (ret)
		return ret;

	/*
	 * Pin can switch back to the default context if we end up calling into
	 * evict_everything - as a last ditch gtt defrag effort that also
	 * switches to the default context. Hence we need to reload from here.
	 */
	from = ring->last_context;

	/*
	 * Clear this page out of any CPU caches for coherent swap-in/out. Note
	 * that thanks to write = false in this call and us not setting any gpu
	 * write domains when putting a context object onto the active list
	 * (when switching away from it), this won't block.
	 *
	 * XXX: We need a real interface to do this instead of trickery.
	 */
	ret = i915_gem_object_set_to_gtt_domain(to->obj, false);
	if (ret) {
		i915_gem_object_unpin(to->obj);
		return ret;
	}

	if (!to->obj->has_global_gtt_mapping)
		i915_gem_gtt_bind_object(to->obj, to->obj->cache_level);

	if (!to->is_initialized || is_default_context(to))
		hw_flags |= MI_RESTORE_INHIBIT;

	ret = mi_set_context(ring, to, hw_flags);
	if (ret) {
		i915_gem_object_unpin(to->obj);
		return ret;
	}

	for (i = 0; i < MAX_L3_SLICES; i++) {
		if (!(to->remap_slice & (1<<i)))
			continue;

		ret = i915_gem_l3_remap(ring, i);
		/* If it failed, try again next round */
		if (ret)
			DRM_DEBUG_DRIVER("L3 remapping failed\n");
		else
			to->remap_slice &= ~(1<<i);
	}

	/* The backing object for the context is done after switching to the
	 * *next* context. Therefore we cannot retire the previous context until
	 * the next context has already started running. In fact, the below code
	 * is a bit suboptimal because the retiring can occur simply after the
	 * MI_SET_CONTEXT instead of when the next seqno has completed.
	 */
	if (from != NULL) {
		from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
		i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->obj), ring);
		/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
		 * whole damn pipeline, we don't need to explicitly mark the
		 * object dirty. The only exception is that the context must be
		 * correct in case the object gets swapped out. Ideally we'd be
		 * able to defer doing this until we know the object would be
		 * swapped, but there is no way to do that yet.
		 */
		from->obj->dirty = 1;
		BUG_ON(from->obj->ring != ring);

		/* obj is kept alive until the next request by its active ref */
		i915_gem_object_unpin(from->obj);
		i915_gem_context_unreference(from);
	}

	i915_gem_context_reference(to);
	ring->last_context = to;
	to->is_initialized = true;

	return 0;
}
Ejemplo n.º 13
0
static int intelfb_create(struct intel_fbdev *ifbdev,
    struct drm_fb_helper_surface_size *sizes)
{
	struct drm_device *dev = ifbdev->helper.dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
#if 0
	struct fb_info *info;
#endif
	struct drm_framebuffer *fb;
	struct drm_mode_fb_cmd2 mode_cmd = {};
	struct drm_i915_gem_object *obj;
	int size, ret;

	/* we don't do packed 24bpp */
	if (sizes->surface_bpp == 24)
		sizes->surface_bpp = 32;

	mode_cmd.width = sizes->surface_width;
	mode_cmd.height = sizes->surface_height;

	mode_cmd.pitches[0] = roundup2(mode_cmd.width * ((sizes->surface_bpp + 7) /
							 8), 64);
	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
							  sizes->surface_depth);

	size = mode_cmd.pitches[0] * mode_cmd.height;
	size = roundup2(size, PAGE_SIZE);
	obj = i915_gem_alloc_object(dev, size);
	if (!obj) {
		DRM_ERROR("failed to allocate framebuffer\n");
		ret = -ENOMEM;
		goto out;
	}

	DRM_LOCK();

	/* Flush everything out, we'll be doing GTT only from now on */
	ret = intel_pin_and_fence_fb_obj(dev, obj, false);
	if (ret) {
		DRM_ERROR("failed to pin fb: %d\n", ret);
		goto out_unref;
	}

#if 0
	info = framebuffer_alloc(0, device);
	if (!info) {
		ret = -ENOMEM;
		goto out_unpin;
	}

	info->par = ifbdev;
#endif

	ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj);
	if (ret)
		goto out_unpin;

	fb = &ifbdev->ifb.base;

	ifbdev->helper.fb = fb;
#if 0
	ifbdev->helper.fbdev = info;

	strlcpy(info->fix.id, "inteldrmfb", sizeof(info->fix.id));

	info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
	info->fbops = &intelfb_ops;

	ret = fb_alloc_cmap(&info->cmap, 256, 0);
	if (ret) {
		ret = -ENOMEM;
		goto out_unpin;
	}
	/* setup aperture base/size for vesafb takeover */
	info->apertures = alloc_apertures(1);
	if (!info->apertures) {
		ret = -ENOMEM;
		goto out_unpin;
	}
	info->apertures->ranges[0].base = dev->mode_config.fb_base;
	info->apertures->ranges[0].size =
		dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;

	info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset;
	info->fix.smem_len = size;

	info->screen_base = ioremap_wc(dev->agp->base + obj->gtt_offset, size);
	if (!info->screen_base) {
		ret = -ENOSPC;
		goto out_unpin;
	}
	info->screen_size = size;

//	memset(info->screen_base, 0, size);

	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
	drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);

	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
#else
{
	struct rasops_info *ri = &dev_priv->ro;
	bus_space_handle_t bsh;
	int err;

	err = agp_map_subregion(dev_priv->agph, obj->gtt_offset, size, &bsh);
	if (err) {
		ret = -err;
		goto out_unpin;
	}

	ri->ri_bits = bus_space_vaddr(dev->bst, bsh);
	ri->ri_depth = fb->bits_per_pixel;
	ri->ri_stride = fb->pitches[0];
	ri->ri_width = sizes->fb_width;
	ri->ri_height = sizes->fb_height;

	switch (fb->pixel_format) {
	case DRM_FORMAT_XRGB8888:
		ri->ri_rnum = 8;
		ri->ri_rpos = 16;
		ri->ri_gnum = 8;
		ri->ri_gpos = 8;
		ri->ri_bnum = 8;
		ri->ri_bpos = 0;
		break;
	case DRM_FORMAT_RGB565:
		ri->ri_rnum = 5;
		ri->ri_rpos = 11;
		ri->ri_gnum = 6;
		ri->ri_gpos = 5;
		ri->ri_bnum = 5;
		ri->ri_bpos = 0;
		break;
	}
}
#endif

	DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
		      fb->width, fb->height,
		      obj->gtt_offset, obj);

	DRM_UNLOCK();
#if 1
	DRM_DEBUG_KMS("skipping call to vga_switcheroo_client_fb_set\n");
#else
	vga_switcheroo_client_fb_set(dev->pdev, info);
#endif
	return 0;

out_unpin:
	i915_gem_object_unpin(obj);
out_unref:
	drm_gem_object_unreference(&obj->base);
	DRM_UNLOCK();
out:
	return ret;
}