Пример #1
0
int
nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device,
		    u32 handle, u32 arg0, u32 arg1,
		    struct nouveau_channel **pchan)
{
	struct nouveau_cli *cli = (void *)nvif_client(&device->base);
	int ret;

	ret = nouveau_channel_ind(drm, device, handle, arg0, pchan);
	if (ret) {
		NV_PRINTK(debug, cli, "ib channel create, %d\n", ret);
		ret = nouveau_channel_dma(drm, device, handle, pchan);
		if (ret) {
			NV_PRINTK(debug, cli, "dma channel create, %d\n", ret);
			return ret;
		}
	}

	ret = nouveau_channel_init(*pchan, arg0, arg1);
	if (ret) {
		NV_PRINTK(error, cli, "channel failed to initialise, %d\n", ret);
		nouveau_channel_del(pchan);
		return ret;
	}

	return 0;
}
Пример #2
0
int
nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device,
		    u32 handle, u32 arg0, u32 arg1,
		    struct nouveau_channel **pchan)
{
	struct nouveau_cli *cli = (void *)nvif_client(&device->base);
	bool super;
	int ret;

	/* hack until fencenv50 is fixed, and agp access relaxed */
	super = cli->base.super;
	cli->base.super = true;

	ret = nouveau_channel_ind(drm, device, handle, arg0, pchan);
	if (ret) {
		NV_PRINTK(debug, cli, "ib channel create, %d\n", ret);
		ret = nouveau_channel_dma(drm, device, handle, pchan);
		if (ret) {
			NV_PRINTK(debug, cli, "dma channel create, %d\n", ret);
			goto done;
		}
	}

	ret = nouveau_channel_init(*pchan, arg0, arg1);
	if (ret) {
		NV_PRINTK(error, cli, "channel failed to initialise, %d\n", ret);
		nouveau_channel_del(pchan);
	}

done:
	cli->base.super = super;
	return ret;
}
Пример #3
0
int
nouveau_channel_idle(struct nouveau_channel *chan)
{
	struct nouveau_cli *cli = (void *)nvif_client(chan->object);
	struct nouveau_fence *fence = NULL;
	int ret;

	ret = nouveau_fence_new(chan, false, &fence);
	if (!ret) {
		ret = nouveau_fence_wait(fence, false, false);
		nouveau_fence_unref(&fence);
	}

	if (ret)
		NV_PRINTK(error, cli, "failed to idle channel 0x%08x [%s]\n",
			  chan->object->handle, nvkm_client(&cli->base)->name);
	return ret;
}
Пример #4
0
static inline int
nvif_notify_put_(struct nvif_notify *notify)
{
	struct nvif_object *object = notify->object;
	struct {
		struct nvif_ioctl_v0 ioctl;
		struct nvif_ioctl_ntfy_put_v0 ntfy;
	} args = {
		.ioctl.type = NVIF_IOCTL_V0_NTFY_PUT,
		.ntfy.index = notify->index,
	};

	if (atomic_inc_return(&notify->putcnt) != 1)
		return 0;

	return nvif_object_ioctl(object, &args, sizeof(args), NULL);
}

int
nvif_notify_put(struct nvif_notify *notify)
{
	if (likely(notify->object) &&
	    test_and_clear_bit(NVIF_NOTIFY_USER, &notify->flags)) {
		int ret = nvif_notify_put_(notify);
		if (test_bit(NVIF_NOTIFY_WORK, &notify->flags))
			flush_work(&notify->work);
		return ret;
	}
	return 0;
}

static inline int
nvif_notify_get_(struct nvif_notify *notify)
{
	struct nvif_object *object = notify->object;
	struct {
		struct nvif_ioctl_v0 ioctl;
		struct nvif_ioctl_ntfy_get_v0 ntfy;
	} args = {
		.ioctl.type = NVIF_IOCTL_V0_NTFY_GET,
		.ntfy.index = notify->index,
	};

	if (atomic_dec_return(&notify->putcnt) != 0)
		return 0;

	return nvif_object_ioctl(object, &args, sizeof(args), NULL);
}

int
nvif_notify_get(struct nvif_notify *notify)
{
	if (likely(notify->object) &&
	    !test_and_set_bit(NVIF_NOTIFY_USER, &notify->flags))
		return nvif_notify_get_(notify);
	return 0;
}

static inline int
nvif_notify_func(struct nvif_notify *notify, bool keep)
{
	int ret = notify->func(notify);
	if (ret == NVIF_NOTIFY_KEEP ||
	    !test_and_clear_bit(NVKM_NOTIFY_USER, &notify->flags)) {
		if (!keep)
			atomic_dec(&notify->putcnt);
		else
			nvif_notify_get_(notify);
	}
	return ret;
}

static void
nvif_notify_work(struct work_struct *work)
{
	struct nvif_notify *notify = container_of(work, typeof(*notify), work);
	nvif_notify_func(notify, true);
}

int
nvif_notify(const void *header, u32 length, const void *data, u32 size)
{
	struct nvif_notify *notify = NULL;
	const union {
		struct nvif_notify_rep_v0 v0;
	} *args = header;
	int ret = NVIF_NOTIFY_DROP;

	if (length == sizeof(args->v0) && args->v0.version == 0) {
		if (WARN_ON(args->v0.route))
			return NVIF_NOTIFY_DROP;
		notify = (void *)(unsigned long)args->v0.token;
	}

	if (!WARN_ON(notify == NULL)) {
		struct nvif_client *client = nvif_client(notify->object);
		if (!WARN_ON(notify->size != size)) {
			atomic_inc(&notify->putcnt);
			if (test_bit(NVIF_NOTIFY_WORK, &notify->flags)) {
				memcpy((void *)notify->data, data, size);
				schedule_work(&notify->work);
				return NVIF_NOTIFY_DROP;
			}
			notify->data = data;
			ret = nvif_notify_func(notify, client->driver->keep);
			notify->data = NULL;
		}
	}

	return ret;
}

int
nvif_notify_fini(struct nvif_notify *notify)
{
	struct nvif_object *object = notify->object;
	struct {
		struct nvif_ioctl_v0 ioctl;
		struct nvif_ioctl_ntfy_del_v0 ntfy;
	} args = {
		.ioctl.type = NVIF_IOCTL_V0_NTFY_DEL,
		.ntfy.index = notify->index,
	};
	int ret = nvif_notify_put(notify);
	if (ret >= 0 && object) {
		ret = nvif_object_ioctl(object, &args, sizeof(args), NULL);
		if (ret == 0) {
			nvif_object_ref(NULL, &notify->object);
			kfree((void *)notify->data);
		}
	}
	return ret;
}

int
nvif_notify_init(struct nvif_object *object, void (*dtor)(struct nvif_notify *),
		 int (*func)(struct nvif_notify *), bool work, u8 event,
		 void *data, u32 size, u32 reply, struct nvif_notify *notify)
{
	struct {
		struct nvif_ioctl_v0 ioctl;
		struct nvif_ioctl_ntfy_new_v0 ntfy;
		struct nvif_notify_req_v0 req;
	} *args;
	int ret = -ENOMEM;

	notify->object = NULL;
	nvif_object_ref(object, &notify->object);
	notify->flags = 0;
	atomic_set(&notify->putcnt, 1);
	notify->dtor = dtor;
	notify->func = func;
	notify->data = NULL;
	notify->size = reply;
	if (work) {
		INIT_WORK(&notify->work, nvif_notify_work);
		set_bit(NVIF_NOTIFY_WORK, &notify->flags);
		notify->data = kmalloc(notify->size, GFP_KERNEL);
		if (!notify->data)
			goto done;
	}

	if (!(args = kmalloc(sizeof(*args) + size, GFP_KERNEL)))
		goto done;
	args->ioctl.version = 0;
	args->ioctl.type = NVIF_IOCTL_V0_NTFY_NEW;
	args->ntfy.version = 0;
	args->ntfy.event = event;
	args->req.version = 0;
	args->req.reply = notify->size;
	args->req.route = 0;
	args->req.token = (unsigned long)(void *)notify;

	memcpy(args->req.data, data, size);
	ret = nvif_object_ioctl(object, args, sizeof(*args) + size, NULL);
	notify->index = args->ntfy.index;
	kfree(args);
done:
	if (ret)
		nvif_notify_fini(notify);
	return ret;
}

static void
nvif_notify_del(struct nvif_notify *notify)
{
	nvif_notify_fini(notify);
	kfree(notify);
}

void
nvif_notify_ref(struct nvif_notify *notify, struct nvif_notify **pnotify)
{
	BUG_ON(notify != NULL);
	if (*pnotify)
		(*pnotify)->dtor(*pnotify);
	*pnotify = notify;
}
Пример #5
0
static int
nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
		     u32 handle, u32 size, struct nouveau_channel **pchan)
{
	struct nouveau_cli *cli = (void *)nvif_client(&device->base);
	struct nouveau_vmmgr *vmm = nvkm_vmmgr(device);
	struct nv_dma_v0 args = {};
	struct nouveau_channel *chan;
	u32 target;
	int ret;

	chan = *pchan = kzalloc(sizeof(*chan), GFP_KERNEL);
	if (!chan)
		return -ENOMEM;

	nvif_device_ref(device, &chan->device);
	chan->drm = drm;

	/* allocate memory for dma push buffer */
	target = TTM_PL_FLAG_TT;
	if (nouveau_vram_pushbuf)
		target = TTM_PL_FLAG_VRAM;

	ret = nouveau_bo_new(drm->dev, size, 0, target, 0, 0, NULL,
			    &chan->push.buffer);
	if (ret == 0) {
		ret = nouveau_bo_pin(chan->push.buffer, target);
		if (ret == 0)
			ret = nouveau_bo_map(chan->push.buffer);
	}

	if (ret) {
		nouveau_channel_del(pchan);
		return ret;
	}

	/* create dma object covering the *entire* memory space that the
	 * pushbuf lives in, this is because the GEM code requires that
	 * we be able to call out to other (indirect) push buffers
	 */
	chan->push.vma.offset = chan->push.buffer->bo.offset;

	if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
		ret = nouveau_bo_vma_add(chan->push.buffer, cli->vm,
					&chan->push.vma);
		if (ret) {
			nouveau_channel_del(pchan);
			return ret;
		}

		args.target = NV_DMA_V0_TARGET_VM;
		args.access = NV_DMA_V0_ACCESS_VM;
		args.start = 0;
		args.limit = cli->vm->vmm->limit - 1;
	} else
	if (chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM) {
		if (device->info.family == NV_DEVICE_INFO_V0_TNT) {
			/* nv04 vram pushbuf hack, retarget to its location in
			 * the framebuffer bar rather than direct vram access..
			 * nfi why this exists, it came from the -nv ddx.
			 */
			args.target = NV_DMA_V0_TARGET_PCI;
			args.access = NV_DMA_V0_ACCESS_RDWR;
			args.start = nv_device_resource_start(nvkm_device(device), 1);
			args.limit = args.start + device->info.ram_user - 1;
		} else {
			args.target = NV_DMA_V0_TARGET_VRAM;
			args.access = NV_DMA_V0_ACCESS_RDWR;
			args.start = 0;
			args.limit = device->info.ram_user - 1;
		}
	} else {
		if (chan->drm->agp.stat == ENABLED) {
			args.target = NV_DMA_V0_TARGET_AGP;
			args.access = NV_DMA_V0_ACCESS_RDWR;
			args.start = chan->drm->agp.base;
			args.limit = chan->drm->agp.base +
				     chan->drm->agp.size - 1;
		} else {
			args.target = NV_DMA_V0_TARGET_VM;
			args.access = NV_DMA_V0_ACCESS_RDWR;
			args.start = 0;
			args.limit = vmm->limit - 1;
		}
	}

	ret = nvif_object_init(nvif_object(device), NULL, NVDRM_PUSH |
			       (handle & 0xffff), NV_DMA_FROM_MEMORY,
			       &args, sizeof(args), &chan->push.ctxdma);
	if (ret) {
		nouveau_channel_del(pchan);
		return ret;
	}

	return 0;
}
Пример #6
0
static int
nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
{
	struct nvif_device *device = chan->device;
	struct nouveau_cli *cli = (void *)nvif_client(&device->base);
	struct nouveau_vmmgr *vmm = nvkm_vmmgr(device);
	struct nouveau_software_chan *swch;
	struct nv_dma_v0 args = {};
	int ret, i;

	nvif_object_map(chan->object);

	/* allocate dma objects to cover all allowed vram, and gart */
	if (device->info.family < NV_DEVICE_INFO_V0_FERMI) {
		if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
			args.target = NV_DMA_V0_TARGET_VM;
			args.access = NV_DMA_V0_ACCESS_VM;
			args.start = 0;
			args.limit = cli->vm->vmm->limit - 1;
		} else {
			args.target = NV_DMA_V0_TARGET_VRAM;
			args.access = NV_DMA_V0_ACCESS_RDWR;
			args.start = 0;
			args.limit = device->info.ram_user - 1;
		}

		ret = nvif_object_init(chan->object, NULL, vram,
				       NV_DMA_IN_MEMORY, &args,
				       sizeof(args), &chan->vram);
		if (ret)
			return ret;

		if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
			args.target = NV_DMA_V0_TARGET_VM;
			args.access = NV_DMA_V0_ACCESS_VM;
			args.start = 0;
			args.limit = cli->vm->vmm->limit - 1;
		} else
		if (chan->drm->agp.stat == ENABLED) {
			args.target = NV_DMA_V0_TARGET_AGP;
			args.access = NV_DMA_V0_ACCESS_RDWR;
			args.start = chan->drm->agp.base;
			args.limit = chan->drm->agp.base +
				     chan->drm->agp.size - 1;
		} else {
			args.target = NV_DMA_V0_TARGET_VM;
			args.access = NV_DMA_V0_ACCESS_RDWR;
			args.start = 0;
			args.limit = vmm->limit - 1;
		}

		ret = nvif_object_init(chan->object, NULL, gart,
				       NV_DMA_IN_MEMORY, &args,
				       sizeof(args), &chan->gart);
		if (ret)
			return ret;
	}

	/* initialise dma tracking parameters */
	switch (chan->object->oclass & 0x00ff) {
	case 0x006b:
	case 0x006e:
		chan->user_put = 0x40;
		chan->user_get = 0x44;
		chan->dma.max = (0x10000 / 4) - 2;
		break;
	default:
		chan->user_put = 0x40;
		chan->user_get = 0x44;
		chan->user_get_hi = 0x60;
		chan->dma.ib_base =  0x10000 / 4;
		chan->dma.ib_max  = (0x02000 / 8) - 1;
		chan->dma.ib_put  = 0;
		chan->dma.ib_free = chan->dma.ib_max - chan->dma.ib_put;
		chan->dma.max = chan->dma.ib_base;
		break;
	}

	chan->dma.put = 0;
	chan->dma.cur = chan->dma.put;
	chan->dma.free = chan->dma.max - chan->dma.cur;

	ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
	if (ret)
		return ret;

	for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
		OUT_RING(chan, 0x00000000);

	/* allocate software object class (used for fences on <= nv05) */
	if (device->info.family < NV_DEVICE_INFO_V0_CELSIUS) {
		ret = nvif_object_init(chan->object, NULL, 0x006e, 0x006e,
				       NULL, 0, &chan->nvsw);
		if (ret)
			return ret;

		swch = (void *)nvkm_object(&chan->nvsw)->parent;
		swch->flip = nouveau_flip_complete;
		swch->flip_data = chan;

		ret = RING_SPACE(chan, 2);
		if (ret)
			return ret;

		BEGIN_NV04(chan, NvSubSw, 0x0000, 1);
		OUT_RING  (chan, chan->nvsw.handle);
		FIRE_RING (chan);
	}

	/* initialise synchronisation */
	return nouveau_fence(chan->drm)->context_new(chan);
}