Esempio n. 1
0
static int
nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data,
			 struct drm_file *file_priv)
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct drm_nouveau_channel_alloc *init = data;
	struct nouveau_channel *chan;
	int ret;

	if (dev_priv->engine.graph.accel_blocked)
		return -ENODEV;

	if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
		return -EINVAL;

	ret = nouveau_channel_alloc(dev, &chan, file_priv,
				    init->fb_ctxdma_handle,
				    init->tt_ctxdma_handle);
	if (ret)
		return ret;
	init->channel  = chan->id;

	if (chan->dma.ib_max)
		init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM |
					NOUVEAU_GEM_DOMAIN_GART;
	else if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_VRAM)
		init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;
	else
		init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;

	if (dev_priv->card_type < NV_C0) {
		init->subchan[0].handle = NvM2MF;
		if (dev_priv->card_type < NV_50)
			init->subchan[0].grclass = 0x0039;
		else
			init->subchan[0].grclass = 0x5039;
		init->subchan[1].handle = NvSw;
		init->subchan[1].grclass = NV_SW;
		init->nr_subchan = 2;
	} else {
		init->subchan[0].handle  = 0x9039;
		init->subchan[0].grclass = 0x9039;
		init->nr_subchan = 1;
	}

	/* Named memory object area */
	ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem,
				    &init->notifier_handle);

	if (ret == 0)
		atomic_inc(&chan->users); /* userspace reference */
	nouveau_channel_put(&chan);
	return ret;
}
Esempio n. 2
0
int
nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
		      struct drm_file *file_priv)
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct drm_nouveau_gem_new *req = data;
	struct nouveau_bo *nvbo = NULL;
	struct nouveau_channel *chan = NULL;
	uint32_t flags = 0;
	int ret = 0;

	if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL))
		dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping;

	if (req->info.domain & NOUVEAU_GEM_DOMAIN_VRAM)
		flags |= TTM_PL_FLAG_VRAM;
	if (req->info.domain & NOUVEAU_GEM_DOMAIN_GART)
		flags |= TTM_PL_FLAG_TT;
	if (!flags || req->info.domain & NOUVEAU_GEM_DOMAIN_CPU)
		flags |= TTM_PL_FLAG_SYSTEM;

	if (!dev_priv->engine.vram.flags_valid(dev, req->info.tile_flags)) {
		NV_ERROR(dev, "bad page flags: 0x%08x\n", req->info.tile_flags);
		return -EINVAL;
	}

	if (req->channel_hint) {
		chan = nouveau_channel_get(dev, file_priv, req->channel_hint);
		if (IS_ERR(chan))
			return PTR_ERR(chan);
	}

	ret = nouveau_gem_new(dev, chan, req->info.size, req->align, flags,
			      req->info.tile_mode, req->info.tile_flags, false,
			      (req->info.domain & NOUVEAU_GEM_DOMAIN_MAPPABLE),
			      &nvbo);
	if (chan)
		nouveau_channel_put(&chan);
	if (ret)
		return ret;

	ret = nouveau_gem_info(nvbo->gem, &req->info);
	if (ret)
		goto out;

	ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
	/* drop reference from allocate - handle holds it now */
	drm_gem_object_unreference_unlocked(nvbo->gem);
out:
	return ret;
}
Esempio n. 3
0
static int
nouveau_ioctl_fifo_free(struct drm_device *dev, void *data,
			struct drm_file *file_priv)
{
	struct drm_nouveau_channel_free *req = data;
	struct nouveau_channel *chan;

	chan = nouveau_channel_get(dev, file_priv, req->channel);
	if (IS_ERR(chan))
		return PTR_ERR(chan);

	atomic_dec(&chan->users);
	nouveau_channel_put(&chan);
	return 0;
}
Esempio n. 4
0
/* cleans up all the fifos from file_priv */
void
nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv)
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_engine *engine = &dev_priv->engine;
	struct nouveau_channel *chan;
	int i;

	NV_DEBUG(dev, "clearing FIFO enables from file_priv\n");
	for (i = 0; i < engine->fifo.channels; i++) {
		chan = nouveau_channel_get(dev, file_priv, i);
		if (IS_ERR(chan))
			continue;

		atomic_dec(&chan->users);
		nouveau_channel_put(&chan);
	}
}
int
nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data,
			     struct drm_file *file_priv)
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct drm_nouveau_notifierobj_alloc *na = data;
	struct nouveau_channel *chan;
	int ret;

	/* completely unnecessary for these chipsets... */
	if (unlikely(dev_priv->card_type >= NV_C0))
		return -EINVAL;

	chan = nouveau_channel_get(dev, file_priv, na->channel);
	if (IS_ERR(chan))
		return PTR_ERR(chan);

	ret = nouveau_notifier_alloc(chan, na->handle, na->size, 0, 0x1000,
				     &na->offset);
	nouveau_channel_put(&chan);
	return ret;
}
Esempio n. 6
0
/* allocates and initializes a fifo for user space consumption */
int
nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
		      struct drm_file *file_priv,
		      uint32_t vram_handle, uint32_t gart_handle)
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
	struct nouveau_channel *chan;
	unsigned long flags;
	int ret;

	/* allocate and lock channel structure */
	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
	if (!chan)
		return -ENOMEM;
	chan->dev = dev;
	chan->file_priv = file_priv;
	chan->vram_handle = vram_handle;
	chan->gart_handle = gart_handle;

	kref_init(&chan->ref);
	atomic_set(&chan->users, 1);
	mutex_init(&chan->mutex);
	mutex_lock(&chan->mutex);

	/* allocate hw channel id */
	spin_lock_irqsave(&dev_priv->channels.lock, flags);
	for (chan->id = 0; chan->id < pfifo->channels; chan->id++) {
		if (!dev_priv->channels.ptr[chan->id]) {
			nouveau_channel_ref(chan, &dev_priv->channels.ptr[chan->id]);
			break;
		}
	}
	spin_unlock_irqrestore(&dev_priv->channels.lock, flags);

	if (chan->id == pfifo->channels) {
		mutex_unlock(&chan->mutex);
		kfree(chan);
		return -ENODEV;
	}

	NV_DEBUG(dev, "initialising channel %d\n", chan->id);
	INIT_LIST_HEAD(&chan->nvsw.vbl_wait);
	INIT_LIST_HEAD(&chan->nvsw.flip);
	INIT_LIST_HEAD(&chan->fence.pending);
	spin_lock_init(&chan->fence.lock);

	/* Allocate DMA push buffer */
	chan->pushbuf_bo = nouveau_channel_user_pushbuf_alloc(dev);
	if (!chan->pushbuf_bo) {
		ret = -ENOMEM;
		NV_ERROR(dev, "pushbuf %d\n", ret);
		nouveau_channel_put(&chan);
		return ret;
	}

	nouveau_dma_pre_init(chan);
	chan->user_put = 0x40;
	chan->user_get = 0x44;

	/* Allocate space for per-channel fixed notifier memory */
	ret = nouveau_notifier_init_channel(chan);
	if (ret) {
		NV_ERROR(dev, "ntfy %d\n", ret);
		nouveau_channel_put(&chan);
		return ret;
	}

	/* Setup channel's default objects */
	ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle);
	if (ret) {
		NV_ERROR(dev, "gpuobj %d\n", ret);
		nouveau_channel_put(&chan);
		return ret;
	}

	/* Create a dma object for the push buffer */
	ret = nouveau_channel_pushbuf_ctxdma_init(chan);
	if (ret) {
		NV_ERROR(dev, "pbctxdma %d\n", ret);
		nouveau_channel_put(&chan);
		return ret;
	}

	/* disable the fifo caches */
	pfifo->reassign(dev, false);

	/* Construct inital RAMFC for new channel */
	ret = pfifo->create_context(chan);
	if (ret) {
		nouveau_channel_put(&chan);
		return ret;
	}

	pfifo->reassign(dev, true);

	ret = nouveau_dma_init(chan);
	if (!ret)
		ret = nouveau_fence_channel_init(chan);
	if (ret) {
		nouveau_channel_put(&chan);
		return ret;
	}

	nouveau_debugfs_channel_init(chan);

	NV_DEBUG(dev, "channel %d initialised\n", chan->id);
	*chan_ret = chan;
	return 0;
}