Exemple #1
0
static boolean
nouveau_screen_fence_finish(struct pipe_screen *screen,
                            struct pipe_fence_handle *pfence,
                            uint64_t timeout)
{
   if (!timeout)
      return nouveau_fence_signalled(nouveau_fence(pfence));

   return nouveau_fence_wait(nouveau_fence(pfence), NULL);
}
static boolean
nouveau_screen_fence_finish(struct pipe_screen *screen,
			    struct pipe_fence_handle *pfence,
                            uint64_t timeout)
{
	return nouveau_fence_wait(nouveau_fence(pfence));
}
static void
nouveau_screen_fence_ref(struct pipe_screen *pscreen,
			 struct pipe_fence_handle **ptr,
			 struct pipe_fence_handle *pfence)
{
	nouveau_fence_ref(nouveau_fence(pfence), (struct nouveau_fence **)ptr);
}
Exemple #4
0
static void
nouveau_accel_fini(struct nouveau_drm *drm)
{
	nouveau_gpuobj_ref(NULL, &drm->notify);
	nouveau_channel_del(&drm->channel);
	nouveau_channel_del(&drm->cechan);
	if (drm->fence)
		nouveau_fence(drm)->dtor(drm);
}
Exemple #5
0
static void
nouveau_accel_fini(struct nouveau_drm *drm)
{
    nouveau_channel_del(&drm->channel);
    nvif_object_fini(&drm->ntfy);
    nvkm_gpuobj_ref(NULL, &drm->notify);
    nvif_object_fini(&drm->nvsw);
    nouveau_channel_del(&drm->cechan);
    nvif_object_fini(&drm->ttm.copy);
    if (drm->fence)
        nouveau_fence(drm)->dtor(drm);
}
Exemple #6
0
void
nouveau_channel_del(struct nouveau_channel **pchan)
{
	struct nouveau_channel *chan = *pchan;
	if (chan) {
		if (chan->fence)
			nouveau_fence(chan->drm)->context_del(chan);
		nvif_object_fini(&chan->nvsw);
		nvif_object_fini(&chan->gart);
		nvif_object_fini(&chan->vram);
		nvif_object_fini(&chan->user);
		nvif_object_fini(&chan->push.ctxdma);
		nouveau_bo_vma_del(chan->push.buffer, &chan->push.vma);
		nouveau_bo_unmap(chan->push.buffer);
		if (chan->push.buffer && chan->push.buffer->pin_refcnt)
			nouveau_bo_unpin(chan->push.buffer);
		nouveau_bo_ref(NULL, &chan->push.buffer);
		kfree(chan);
	}
	*pchan = NULL;
}
static boolean
nouveau_screen_fence_signalled(struct pipe_screen *screen,
                               struct pipe_fence_handle *pfence)
{
        return nouveau_fence_signalled(nouveau_fence(pfence));
}
Exemple #8
0
static int
nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
{
	struct nvif_device *device = chan->device;
	struct nouveau_cli *cli = (void *)chan->user.client;
	struct nvkm_mmu *mmu = nvxx_mmu(device);
	struct nv_dma_v0 args = {};
	int ret, i;

	nvif_object_map(&chan->user);

	/* allocate dma objects to cover all allowed vram, and gart */
	if (device->info.family < NV_DEVICE_INFO_V0_FERMI) {
		if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
			args.target = NV_DMA_V0_TARGET_VM;
			args.access = NV_DMA_V0_ACCESS_VM;
			args.start = 0;
			args.limit = cli->vm->mmu->limit - 1;
		} else {
			args.target = NV_DMA_V0_TARGET_VRAM;
			args.access = NV_DMA_V0_ACCESS_RDWR;
			args.start = 0;
			args.limit = device->info.ram_user - 1;
		}

		ret = nvif_object_init(&chan->user, vram, NV_DMA_IN_MEMORY,
				       &args, sizeof(args), &chan->vram);
		if (ret)
			return ret;

		if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
			args.target = NV_DMA_V0_TARGET_VM;
			args.access = NV_DMA_V0_ACCESS_VM;
			args.start = 0;
			args.limit = cli->vm->mmu->limit - 1;
		} else
		if (chan->drm->agp.bridge) {
			args.target = NV_DMA_V0_TARGET_AGP;
			args.access = NV_DMA_V0_ACCESS_RDWR;
			args.start = chan->drm->agp.base;
			args.limit = chan->drm->agp.base +
				     chan->drm->agp.size - 1;
		} else {
			args.target = NV_DMA_V0_TARGET_VM;
			args.access = NV_DMA_V0_ACCESS_RDWR;
			args.start = 0;
			args.limit = mmu->limit - 1;
		}

		ret = nvif_object_init(&chan->user, gart, NV_DMA_IN_MEMORY,
				       &args, sizeof(args), &chan->gart);
		if (ret)
			return ret;
	}

	/* initialise dma tracking parameters */
	switch (chan->user.oclass & 0x00ff) {
	case 0x006b:
	case 0x006e:
		chan->user_put = 0x40;
		chan->user_get = 0x44;
		chan->dma.max = (0x10000 / 4) - 2;
		break;
	default:
		chan->user_put = 0x40;
		chan->user_get = 0x44;
		chan->user_get_hi = 0x60;
		chan->dma.ib_base =  0x10000 / 4;
		chan->dma.ib_max  = (0x02000 / 8) - 1;
		chan->dma.ib_put  = 0;
		chan->dma.ib_free = chan->dma.ib_max - chan->dma.ib_put;
		chan->dma.max = chan->dma.ib_base;
		break;
	}

	chan->dma.put = 0;
	chan->dma.cur = chan->dma.put;
	chan->dma.free = chan->dma.max - chan->dma.cur;

	ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
	if (ret)
		return ret;

	for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
		OUT_RING(chan, 0x00000000);

	/* allocate software object class (used for fences on <= nv05) */
	if (device->info.family < NV_DEVICE_INFO_V0_CELSIUS) {
		ret = nvif_object_init(&chan->user, 0x006e,
				       NVIF_CLASS_SW_NV04,
				       NULL, 0, &chan->nvsw);
		if (ret)
			return ret;

		ret = RING_SPACE(chan, 2);
		if (ret)
			return ret;

		BEGIN_NV04(chan, NvSubSw, 0x0000, 1);
		OUT_RING  (chan, chan->nvsw.handle);
		FIRE_RING (chan);
	}

	/* initialise synchronisation */
	return nouveau_fence(chan->drm)->context_new(chan);
}