Пример #1
0
static void
nv84_fence_destroy(struct nouveau_drm *drm)
{
	struct nv84_fence_priv *priv = drm->fence;
	nouveau_bo_unmap(priv->bo_gart);
	if (priv->bo_gart)
		nouveau_bo_unpin(priv->bo_gart);
	nouveau_bo_ref(NULL, &priv->bo_gart);
	nouveau_bo_unmap(priv->bo);
	if (priv->bo)
		nouveau_bo_unpin(priv->bo);
	nouveau_bo_ref(NULL, &priv->bo);
	drm->fence = NULL;
	kfree(priv);
}
Пример #2
0
static struct nouveau_bo *
nouveau_channel_user_pushbuf_alloc(struct drm_device *dev)
{
	struct nouveau_bo *pushbuf = NULL;
	int location, ret;

	if (nouveau_vram_pushbuf)
		location = TTM_PL_FLAG_VRAM;
	else
		location = TTM_PL_FLAG_TT;

	ret = nouveau_bo_new(dev, NULL, 65536, 0, location, 0, 0x0000, false,
			     true, &pushbuf);
	if (ret) {
		NV_ERROR(dev, "error allocating DMA push buffer: %d\n", ret);
		return NULL;
	}

	ret = nouveau_bo_pin(pushbuf, location);
	if (ret) {
		NV_ERROR(dev, "error pinning DMA push buffer: %d\n", ret);
		nouveau_bo_ref(NULL, &pushbuf);
		return NULL;
	}

	ret = nouveau_bo_map(pushbuf);
	if (ret) {
		nouveau_bo_unpin(pushbuf);
		nouveau_bo_ref(NULL, &pushbuf);
		return NULL;
	}

	return pushbuf;
}
Пример #3
0
static void nouveau_card_takedown(struct drm_device *dev)
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_engine *engine = &dev_priv->engine;
	int e;

	if (dev->mode_config.num_crtc) {
		nouveau_fbcon_fini(dev);
		nouveau_display_fini(dev);
	}

	if (dev_priv->channel) {
		nouveau_channel_put_unlocked(&dev_priv->channel);
		nouveau_fence_fini(dev);
	}

	nouveau_backlight_exit(dev);
	nouveau_display_destroy(dev);

	if (!dev_priv->noaccel) {
		engine->fifo.takedown(dev);
		for (e = NVOBJ_ENGINE_NR - 1; e >= 0; e--) {
			if (dev_priv->eng[e]) {
				dev_priv->eng[e]->fini(dev, e, false);
				dev_priv->eng[e]->destroy(dev,e );
			}
		}
	}
	engine->fb.takedown(dev);
	engine->timer.takedown(dev);
	nouveau_gpio_destroy(dev);
	engine->mc.takedown(dev);
	engine->display.late_takedown(dev);

	if (dev_priv->vga_ram) {
		nouveau_bo_unpin(dev_priv->vga_ram);
		nouveau_bo_ref(NULL, &dev_priv->vga_ram);
	}

	mutex_lock(&dev->struct_mutex);
	ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
	ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT);
	mutex_unlock(&dev->struct_mutex);
	nouveau_mem_gart_fini(dev);
	nouveau_mem_vram_fini(dev);

	engine->instmem.takedown(dev);
	nouveau_gpuobj_takedown(dev);
	engine->vram.takedown(dev);

	nouveau_irq_fini(dev);

	nouveau_pm_fini(dev);
	nouveau_bios_takedown(dev);

	vga_client_register(dev->pdev, NULL, NULL, NULL);
}
Пример #4
0
static void
nv84_fence_destroy(struct nouveau_drm *drm)
{
	struct nv84_fence_priv *priv = drm->fence;

#ifdef __NetBSD__
	spin_lock_destroy(&priv->base.waitlock);
	DRM_DESTROY_WAITQUEUE(&priv->base.waitqueue);
#endif

	nouveau_bo_unmap(priv->bo_gart);
	if (priv->bo_gart)
		nouveau_bo_unpin(priv->bo_gart);
	nouveau_bo_ref(NULL, &priv->bo_gart);
	nouveau_bo_unmap(priv->bo);
	if (priv->bo)
		nouveau_bo_unpin(priv->bo);
	nouveau_bo_ref(NULL, &priv->bo);
	drm->fence = NULL;
	kfree(priv);
}
void
nouveau_notifier_takedown_channel(struct nouveau_channel *chan)
{
	struct drm_device *dev = chan->dev;

	if (!chan->notifier_bo)
		return;

	nouveau_bo_unmap(chan->notifier_bo);
	mutex_lock(&dev->struct_mutex);
	nouveau_bo_unpin(chan->notifier_bo);
	drm_gem_object_unreference(chan->notifier_bo->gem);
	mutex_unlock(&dev->struct_mutex);
	nouveau_mem_takedown(&chan->notifier_heap);
}
Пример #6
0
int
nv84_fence_create(struct nouveau_drm *drm)
{
	struct nv84_fence_priv *priv;
	u32 domain;
	int ret;

	priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
	if (!priv)
		return -ENOMEM;

	priv->base.dtor = nv84_fence_destroy;
	priv->base.suspend = nv84_fence_suspend;
	priv->base.resume = nv84_fence_resume;
	priv->base.context_new = nv84_fence_context_new;
	priv->base.context_del = nv84_fence_context_del;

	priv->base.uevent = true;

	mutex_init(&priv->mutex);

	/* Use VRAM if there is any ; otherwise fallback to system memory */
	domain = drm->client.device.info.ram_size != 0 ? TTM_PL_FLAG_VRAM :
			 /*
			  * fences created in sysmem must be non-cached or we
			  * will lose CPU/GPU coherency!
			  */
			 TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED;
	ret = nouveau_bo_new(&drm->client, 16 * drm->chan.nr, 0,
			     domain, 0, 0, NULL, NULL, &priv->bo);
	if (ret == 0) {
		ret = nouveau_bo_pin(priv->bo, domain, false);
		if (ret == 0) {
			ret = nouveau_bo_map(priv->bo);
			if (ret)
				nouveau_bo_unpin(priv->bo);
		}
		if (ret)
			nouveau_bo_ref(NULL, &priv->bo);
	}

	if (ret)
		nv84_fence_destroy(drm);
	return ret;
}
Пример #7
0
static void nouveau_card_takedown(struct drm_device *dev)
{
    struct drm_nouveau_private *dev_priv = dev->dev_private;
    struct nouveau_engine *engine = &dev_priv->engine;

    if (!engine->graph.accel_blocked) {
        nouveau_fence_fini(dev);
        nouveau_channel_put_unlocked(&dev_priv->channel);
    }

    if (!nouveau_noaccel) {
        engine->fifo.takedown(dev);
        engine->crypt.takedown(dev);
        engine->graph.takedown(dev);
    }
    engine->fb.takedown(dev);
    engine->timer.takedown(dev);
    engine->gpio.takedown(dev);
    engine->mc.takedown(dev);
    engine->display.late_takedown(dev);

    if (dev_priv->vga_ram) {
        nouveau_bo_unpin(dev_priv->vga_ram);
        nouveau_bo_ref(NULL, &dev_priv->vga_ram);
    }

    mutex_lock(&dev->struct_mutex);
    ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
    ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT);
    mutex_unlock(&dev->struct_mutex);
    nouveau_mem_gart_fini(dev);

    engine->instmem.takedown(dev);
    nouveau_gpuobj_takedown(dev);
    nouveau_mem_vram_fini(dev);

    nouveau_irq_fini(dev);
    drm_vblank_cleanup(dev);

    nouveau_pm_fini(dev);
    nouveau_bios_takedown(dev);

    vga_client_register(dev->pdev, NULL, NULL, NULL);
}
void
nouveau_gem_object_del(struct drm_gem_object *gem)
{
	struct nouveau_bo *nvbo = gem->driver_private;
	struct ttm_buffer_object *bo = &nvbo->bo;

	if (!nvbo)
		return;
	nvbo->gem = NULL;

	if (unlikely(nvbo->pin_refcnt)) {
		nvbo->pin_refcnt = 1;
		nouveau_bo_unpin(nvbo);
	}

	ttm_bo_unref(&bo);

	drm_gem_object_release(gem);
	kfree(gem);
}
Пример #9
0
void
nouveau_channel_del(struct nouveau_channel **pchan)
{
	struct nouveau_channel *chan = *pchan;
	if (chan) {
		if (chan->fence)
			nouveau_fence(chan->drm)->context_del(chan);
		nvif_object_fini(&chan->nvsw);
		nvif_object_fini(&chan->gart);
		nvif_object_fini(&chan->vram);
		nvif_object_fini(&chan->user);
		nvif_object_fini(&chan->push.ctxdma);
		nouveau_bo_vma_del(chan->push.buffer, &chan->push.vma);
		nouveau_bo_unmap(chan->push.buffer);
		if (chan->push.buffer && chan->push.buffer->pin_refcnt)
			nouveau_bo_unpin(chan->push.buffer);
		nouveau_bo_ref(NULL, &chan->push.buffer);
		kfree(chan);
	}
	*pchan = NULL;
}
Пример #10
0
static int
nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
		     struct drm_fb_helper_surface_size *sizes)
{
	struct drm_device *dev = nfbdev->dev;
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct fb_info *info;
	struct drm_framebuffer *fb;
	struct nouveau_framebuffer *nouveau_fb;
	struct nouveau_channel *chan;
	struct nouveau_bo *nvbo;
	struct drm_mode_fb_cmd mode_cmd;
	struct pci_dev *pdev = dev->pdev;
	struct device *device = &pdev->dev;
	int size, ret;

	mode_cmd.width = sizes->surface_width;
	mode_cmd.height = sizes->surface_height;

	mode_cmd.bpp = sizes->surface_bpp;
	mode_cmd.pitch = mode_cmd.width * (mode_cmd.bpp >> 3);
	mode_cmd.pitch = roundup(mode_cmd.pitch, 256);
	mode_cmd.depth = sizes->surface_depth;

	size = mode_cmd.pitch * mode_cmd.height;
	size = roundup(size, PAGE_SIZE);

	ret = nouveau_gem_new(dev, size, 0, NOUVEAU_GEM_DOMAIN_VRAM,
			      0, 0x0000, &nvbo);
	if (ret) {
		NV_ERROR(dev, "failed to allocate framebuffer\n");
		goto out;
	}

	ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM);
	if (ret) {
		NV_ERROR(dev, "failed to pin fb: %d\n", ret);
		nouveau_bo_ref(NULL, &nvbo);
		goto out;
	}

	ret = nouveau_bo_map(nvbo);
	if (ret) {
		NV_ERROR(dev, "failed to map fb: %d\n", ret);
		nouveau_bo_unpin(nvbo);
		nouveau_bo_ref(NULL, &nvbo);
		goto out;
	}

	chan = nouveau_nofbaccel ? NULL : dev_priv->channel;
	if (chan && dev_priv->card_type >= NV_50) {
		ret = nouveau_bo_vma_add(nvbo, chan->vm, &nfbdev->nouveau_fb.vma);
		if (ret) {
			NV_ERROR(dev, "failed to map fb into chan: %d\n", ret);
			chan = NULL;
		}
	}

	mutex_lock(&dev->struct_mutex);

	info = framebuffer_alloc(0, device);
	if (!info) {
		ret = -ENOMEM;
		goto out_unref;
	}

	ret = fb_alloc_cmap(&info->cmap, 256, 0);
	if (ret) {
		ret = -ENOMEM;
		goto out_unref;
	}

	info->par = nfbdev;

	nouveau_framebuffer_init(dev, &nfbdev->nouveau_fb, &mode_cmd, nvbo);

	nouveau_fb = &nfbdev->nouveau_fb;
	fb = &nouveau_fb->base;

	/* setup helper */
	nfbdev->helper.fb = fb;
	nfbdev->helper.fbdev = info;

	strcpy(info->fix.id, "nouveaufb");
	if (nouveau_nofbaccel)
		info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_DISABLED;
	else
		info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA |
			      FBINFO_HWACCEL_FILLRECT |
			      FBINFO_HWACCEL_IMAGEBLIT;
	info->flags |= FBINFO_CAN_FORCE_OUTPUT;
	info->fbops = &nouveau_fbcon_sw_ops;
	info->fix.smem_start = nvbo->bo.mem.bus.base +
			       nvbo->bo.mem.bus.offset;
	info->fix.smem_len = size;

	info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo);
	info->screen_size = size;

	drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
	drm_fb_helper_fill_var(info, &nfbdev->helper, sizes->fb_width, sizes->fb_height);

	/* Set aperture base/size for vesafb takeover */
	info->apertures = dev_priv->apertures;
	if (!info->apertures) {
		ret = -ENOMEM;
		goto out_unref;
	}

	info->pixmap.size = 64*1024;
	info->pixmap.buf_align = 8;
	info->pixmap.access_align = 32;
	info->pixmap.flags = FB_PIXMAP_SYSTEM;
	info->pixmap.scan_align = 1;

	mutex_unlock(&dev->struct_mutex);

	if (dev_priv->channel && !nouveau_nofbaccel) {
		ret = -ENODEV;
		if (dev_priv->card_type < NV_50)
			ret = nv04_fbcon_accel_init(info);
		else
		if (dev_priv->card_type < NV_C0)
			ret = nv50_fbcon_accel_init(info);
		else
			ret = nvc0_fbcon_accel_init(info);

		if (ret == 0)
			info->fbops = &nouveau_fbcon_ops;
	}

	nouveau_fbcon_zfill(dev, nfbdev);

	/* To allow resizeing without swapping buffers */
	NV_INFO(dev, "allocated %dx%d fb: 0x%lx, bo %p\n",
						nouveau_fb->base.width,
						nouveau_fb->base.height,
						nvbo->bo.offset, nvbo);

	vga_switcheroo_client_fb_set(dev->pdev, info);
	return 0;

out_unref:
	mutex_unlock(&dev->struct_mutex);
out:
	return ret;
}
Пример #11
0
/* stops a fifo */
void
nouveau_channel_free(struct nouveau_channel *chan)
{
	struct drm_device *dev = chan->dev;
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
	unsigned long flags;
	int ret;

	NV_INFO(dev, "%s: freeing fifo %d\n", __func__, chan->id);

	nouveau_debugfs_channel_fini(chan);

	/* Give outstanding push buffers a chance to complete */
	spin_lock_irqsave(&chan->fence.lock, flags);
	nouveau_fence_update(chan);
	spin_unlock_irqrestore(&chan->fence.lock, flags);
	if (chan->fence.sequence != chan->fence.sequence_ack) {
		struct nouveau_fence *fence = NULL;

		ret = nouveau_fence_new(chan, &fence, true);
		if (ret == 0) {
			ret = nouveau_fence_wait(fence, NULL, false, false);
			nouveau_fence_unref((void *)&fence);
		}

		if (ret)
			NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id);
	}

	/* Ensure all outstanding fences are signaled.  They should be if the
	 * above attempts at idling were OK, but if we failed this'll tell TTM
	 * we're done with the buffers.
	 */
	nouveau_fence_fini(chan);

	/* Ensure the channel is no longer active on the GPU */
	pfifo->reassign(dev, false);

	pgraph->fifo_access(dev, false);
	if (pgraph->channel(dev) == chan)
		pgraph->unload_context(dev);
	pgraph->destroy_context(chan);
	pgraph->fifo_access(dev, true);

	if (pfifo->channel_id(dev) == chan->id) {
		pfifo->disable(dev);
		pfifo->unload_context(dev);
		pfifo->enable(dev);
	}
	pfifo->destroy_context(chan);

	pfifo->reassign(dev, true);

	/* Release the channel's resources */
	nouveau_gpuobj_ref_del(dev, &chan->pushbuf);
	if (chan->pushbuf_bo) {
		nouveau_bo_unpin(chan->pushbuf_bo);
		nouveau_bo_ref(NULL, &chan->pushbuf_bo);
	}
	nouveau_gpuobj_channel_takedown(chan);
	nouveau_notifier_takedown_channel(chan);
	if (chan->user)
		iounmap(chan->user);

	dev_priv->fifos[chan->id] = NULL;
	dev_priv->fifo_alloc_count--;
	kfree(chan);
}
Пример #12
0
void
nouveau_channel_put_unlocked(struct nouveau_channel **pchan)
{
	struct nouveau_channel *chan = *pchan;
	struct drm_device *dev = chan->dev;
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
	struct nouveau_crypt_engine *pcrypt = &dev_priv->engine.crypt;
	unsigned long flags;

	/* decrement the refcount, and we're done if there's still refs */
	if (likely(!atomic_dec_and_test(&chan->users))) {
		nouveau_channel_ref(NULL, pchan);
		return;
	}

	/* noone wants the channel anymore */
	NV_DEBUG(dev, "freeing channel %d\n", chan->id);
	nouveau_debugfs_channel_fini(chan);

	/* give it chance to idle */
	nouveau_channel_idle(chan);

	/* ensure all outstanding fences are signaled.  they should be if the
	 * above attempts at idling were OK, but if we failed this'll tell TTM
	 * we're done with the buffers.
	 */
	nouveau_fence_channel_fini(chan);

	/* boot it off the hardware */
	pfifo->reassign(dev, false);

	/* We want to give pgraph a chance to idle and get rid of all
	 * potential errors. We need to do this without the context
	 * switch lock held, otherwise the irq handler is unable to
	 * process them.
	 */
	if (pgraph->channel(dev) == chan)
		nouveau_wait_for_idle(dev);

	/* destroy the engine specific contexts */
	pfifo->destroy_context(chan);
	pgraph->destroy_context(chan);
	if (pcrypt->destroy_context)
		pcrypt->destroy_context(chan);

	pfifo->reassign(dev, true);

	/* aside from its resources, the channel should now be dead,
	 * remove it from the channel list
	 */
	spin_lock_irqsave(&dev_priv->channels.lock, flags);
	nouveau_channel_ref(NULL, &dev_priv->channels.ptr[chan->id]);
	spin_unlock_irqrestore(&dev_priv->channels.lock, flags);

	/* destroy any resources the channel owned */
	nouveau_gpuobj_ref(NULL, &chan->pushbuf);
	if (chan->pushbuf_bo) {
		nouveau_bo_unmap(chan->pushbuf_bo);
		nouveau_bo_unpin(chan->pushbuf_bo);
		nouveau_bo_ref(NULL, &chan->pushbuf_bo);
	}
	nouveau_gpuobj_channel_takedown(chan);
	nouveau_notifier_takedown_channel(chan);

	nouveau_channel_ref(NULL, pchan);
}