示例#1
0
int
nv84_fence_create(struct nouveau_drm *drm)
{
	struct nouveau_fifo *pfifo = nouveau_fifo(drm->device);
	struct nv84_fence_priv *priv;
	int ret;

	priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
	if (!priv)
		return -ENOMEM;

	priv->base.dtor = nv84_fence_destroy;
	priv->base.suspend = nv84_fence_suspend;
	priv->base.resume = nv84_fence_resume;
	priv->base.context_new = nv84_fence_context_new;
	priv->base.context_del = nv84_fence_context_del;

#ifdef __NetBSD__
	spin_lock_init(&priv->base.waitlock);
	DRM_INIT_WAITQUEUE(&priv->base.waitqueue, "nvfenceq");
#else
	init_waitqueue_head(&priv->base.waiting);
#endif
	priv->base.uevent = true;

	ret = nouveau_bo_new(drm->dev, 16 * (pfifo->max + 1), 0,
			     TTM_PL_FLAG_VRAM, 0, 0, NULL, &priv->bo);
	if (ret == 0) {
		ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
		if (ret == 0) {
			ret = nouveau_bo_map(priv->bo);
			if (ret)
				nouveau_bo_unpin(priv->bo);
		}
		if (ret)
			nouveau_bo_ref(NULL, &priv->bo);
	}

	if (ret == 0)
		ret = nouveau_bo_new(drm->dev, 16 * (pfifo->max + 1), 0,
				     TTM_PL_FLAG_TT, 0, 0, NULL,
				     &priv->bo_gart);
	if (ret == 0) {
		ret = nouveau_bo_pin(priv->bo_gart, TTM_PL_FLAG_TT);
		if (ret == 0) {
			ret = nouveau_bo_map(priv->bo_gart);
			if (ret)
				nouveau_bo_unpin(priv->bo_gart);
		}
		if (ret)
			nouveau_bo_ref(NULL, &priv->bo_gart);
	}

	if (ret)
		nv84_fence_destroy(drm);
	return ret;
}
static struct nouveau_bo *
nouveau_channel_user_pushbuf_alloc(struct drm_device *dev)
{
	struct nouveau_bo *pushbuf = NULL;
	int location, ret;

	if (nouveau_vram_pushbuf)
		location = TTM_PL_FLAG_VRAM;
	else
		location = TTM_PL_FLAG_TT;

	ret = nouveau_bo_new(dev, NULL, 65536, 0, location, 0, 0x0000, false,
			     true, &pushbuf);
	if (ret) {
		NV_ERROR(dev, "error allocating DMA push buffer: %d\n", ret);
		return NULL;
	}

	ret = nouveau_bo_pin(pushbuf, location);
	if (ret) {
		NV_ERROR(dev, "error pinning DMA push buffer: %d\n", ret);
		nouveau_bo_ref(NULL, &pushbuf);
		return NULL;
	}

	return pushbuf;
}
示例#3
0
static int
sfbhack_init(struct drm_device *dev)
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_bo *nvbo = NULL;
	uint32_t tile_flags = dev_priv->card_type == NV_50 ? 0x7000 : 0x0000;
	int ret, size;

	if (dev_priv->sfb_gem)
		return 0;

	size = nouveau_mem_fb_amount(dev);
	if (size > drm_get_resource_len(dev, 1))
		size = drm_get_resource_len(dev, 1);
	size >>= 1;

	ret = nouveau_gem_new(dev, dev_priv->channel, size, 0, TTM_PL_FLAG_VRAM,
			      0, tile_flags, false, true, &nvbo);
	if (ret)
		return ret;

	ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM);
	if (ret) {
		nouveau_bo_ref(NULL, &nvbo);
		return ret;
	}

	dev_priv->sfb_gem = nvbo->gem;
	return 0;
}
示例#4
0
struct dma_buf *nouveau_gem_prime_export(struct drm_device *dev,
				struct drm_gem_object *obj, int flags)
{
	struct nouveau_bo *nvbo = nouveau_gem_object(obj);
	int ret = 0;

	/* pin buffer into GTT */
	ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_TT);
	if (ret)
		return ERR_PTR(-EINVAL);

	return dma_buf_export(nvbo, &nouveau_dmabuf_ops, obj->size, flags);
}
示例#5
0
int
nv84_fence_create(struct nouveau_drm *drm)
{
	struct nv84_fence_priv *priv;
	u32 domain;
	int ret;

	priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
	if (!priv)
		return -ENOMEM;

	priv->base.dtor = nv84_fence_destroy;
	priv->base.suspend = nv84_fence_suspend;
	priv->base.resume = nv84_fence_resume;
	priv->base.context_new = nv84_fence_context_new;
	priv->base.context_del = nv84_fence_context_del;

	priv->base.uevent = true;

	mutex_init(&priv->mutex);

	/* Use VRAM if there is any ; otherwise fallback to system memory */
	domain = drm->client.device.info.ram_size != 0 ? TTM_PL_FLAG_VRAM :
			 /*
			  * fences created in sysmem must be non-cached or we
			  * will lose CPU/GPU coherency!
			  */
			 TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED;
	ret = nouveau_bo_new(&drm->client, 16 * drm->chan.nr, 0,
			     domain, 0, 0, NULL, NULL, &priv->bo);
	if (ret == 0) {
		ret = nouveau_bo_pin(priv->bo, domain, false);
		if (ret == 0) {
			ret = nouveau_bo_map(priv->bo);
			if (ret)
				nouveau_bo_unpin(priv->bo);
		}
		if (ret)
			nouveau_bo_ref(NULL, &priv->bo);
	}

	if (ret)
		nv84_fence_destroy(drm);
	return ret;
}
int
nv10_fence_create(struct drm_device *dev)
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nv10_fence_priv *priv;
	int ret = 0;

	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
	if (!priv)
		return -ENOMEM;

	priv->base.engine.destroy = nv10_fence_destroy;
	priv->base.engine.init = nv10_fence_init;
	priv->base.engine.fini = nv10_fence_fini;
	priv->base.engine.context_new = nv10_fence_context_new;
	priv->base.engine.context_del = nv10_fence_context_del;
	priv->base.emit = nv10_fence_emit;
	priv->base.read = nv10_fence_read;
	priv->base.sync = nv10_fence_sync;
	dev_priv->eng[NVOBJ_ENGINE_FENCE] = &priv->base.engine;
	spin_lock_init(&priv->lock);

	if (dev_priv->chipset >= 0x17) {
		ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
				     0, 0x0000, NULL, &priv->bo);
		if (!ret) {
			ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
			if (!ret)
				ret = nouveau_bo_map(priv->bo);
			if (ret)
				nouveau_bo_ref(NULL, &priv->bo);
		}

		if (ret == 0) {
			nouveau_bo_wr32(priv->bo, 0x000, 0x00000000);
			priv->base.sync = nv17_fence_sync;
		}
	}

	if (ret)
		nv10_fence_destroy(dev, NVOBJ_ENGINE_FENCE);
	return ret;
}
int
nouveau_notifier_init_channel(struct nouveau_channel *chan)
{
	struct drm_device *dev = chan->dev;
	struct nouveau_bo *ntfy = NULL;
	uint32_t flags;
	int ret;

	if (nouveau_vram_notify)
		flags = TTM_PL_FLAG_VRAM;
	else
		flags = TTM_PL_FLAG_TT;

	ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags,
			      0, 0x0000, false, true, &ntfy);
	if (ret)
		return ret;

	ret = nouveau_bo_pin(ntfy, flags);
	if (ret)
		goto out_err;

	ret = nouveau_bo_map(ntfy);
	if (ret)
		goto out_err;

	ret = nouveau_mem_init_heap(&chan->notifier_heap, 0, ntfy->bo.mem.size);
	if (ret)
		goto out_err;

	chan->notifier_bo = ntfy;
out_err:
	if (ret) {
		mutex_lock(&dev->struct_mutex);
		drm_gem_object_unreference(ntfy->gem);
		mutex_unlock(&dev->struct_mutex);
	}

	return ret;
}
int
nouveau_notifier_init_channel(struct nouveau_channel *chan)
{
	struct drm_device *dev = chan->dev;
	struct nouveau_bo *ntfy = NULL;
	uint32_t flags, ttmpl;
	int ret;

	if (nouveau_vram_notify) {
		flags = NOUVEAU_GEM_DOMAIN_VRAM;
		ttmpl = TTM_PL_FLAG_VRAM;
	} else {
		flags = NOUVEAU_GEM_DOMAIN_GART;
		ttmpl = TTM_PL_FLAG_TT;
	}

	ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags, 0, 0, &ntfy);
	if (ret)
		return ret;

	ret = nouveau_bo_pin(ntfy, ttmpl);
	if (ret)
		goto out_err;

	ret = nouveau_bo_map(ntfy);
	if (ret)
		goto out_err;

	ret = drm_mm_init(&chan->notifier_heap, 0, ntfy->bo.mem.size);
	if (ret)
		goto out_err;

	chan->notifier_bo = ntfy;
out_err:
	if (ret)
		drm_gem_object_unreference_unlocked(ntfy->gem);

	return ret;
}
示例#9
0
static int
nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
		     struct drm_fb_helper_surface_size *sizes)
{
	struct drm_device *dev = nfbdev->dev;
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct fb_info *info;
	struct drm_framebuffer *fb;
	struct nouveau_framebuffer *nouveau_fb;
	struct nouveau_channel *chan;
	struct nouveau_bo *nvbo;
	struct drm_mode_fb_cmd mode_cmd;
	struct pci_dev *pdev = dev->pdev;
	struct device *device = &pdev->dev;
	int size, ret;

	mode_cmd.width = sizes->surface_width;
	mode_cmd.height = sizes->surface_height;

	mode_cmd.bpp = sizes->surface_bpp;
	mode_cmd.pitch = mode_cmd.width * (mode_cmd.bpp >> 3);
	mode_cmd.pitch = roundup(mode_cmd.pitch, 256);
	mode_cmd.depth = sizes->surface_depth;

	size = mode_cmd.pitch * mode_cmd.height;
	size = roundup(size, PAGE_SIZE);

	ret = nouveau_gem_new(dev, size, 0, NOUVEAU_GEM_DOMAIN_VRAM,
			      0, 0x0000, &nvbo);
	if (ret) {
		NV_ERROR(dev, "failed to allocate framebuffer\n");
		goto out;
	}

	ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM);
	if (ret) {
		NV_ERROR(dev, "failed to pin fb: %d\n", ret);
		nouveau_bo_ref(NULL, &nvbo);
		goto out;
	}

	ret = nouveau_bo_map(nvbo);
	if (ret) {
		NV_ERROR(dev, "failed to map fb: %d\n", ret);
		nouveau_bo_unpin(nvbo);
		nouveau_bo_ref(NULL, &nvbo);
		goto out;
	}

	chan = nouveau_nofbaccel ? NULL : dev_priv->channel;
	if (chan && dev_priv->card_type >= NV_50) {
		ret = nouveau_bo_vma_add(nvbo, chan->vm, &nfbdev->nouveau_fb.vma);
		if (ret) {
			NV_ERROR(dev, "failed to map fb into chan: %d\n", ret);
			chan = NULL;
		}
	}

	mutex_lock(&dev->struct_mutex);

	info = framebuffer_alloc(0, device);
	if (!info) {
		ret = -ENOMEM;
		goto out_unref;
	}

	ret = fb_alloc_cmap(&info->cmap, 256, 0);
	if (ret) {
		ret = -ENOMEM;
		goto out_unref;
	}

	info->par = nfbdev;

	nouveau_framebuffer_init(dev, &nfbdev->nouveau_fb, &mode_cmd, nvbo);

	nouveau_fb = &nfbdev->nouveau_fb;
	fb = &nouveau_fb->base;

	/* setup helper */
	nfbdev->helper.fb = fb;
	nfbdev->helper.fbdev = info;

	strcpy(info->fix.id, "nouveaufb");
	if (nouveau_nofbaccel)
		info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_DISABLED;
	else
		info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA |
			      FBINFO_HWACCEL_FILLRECT |
			      FBINFO_HWACCEL_IMAGEBLIT;
	info->flags |= FBINFO_CAN_FORCE_OUTPUT;
	info->fbops = &nouveau_fbcon_sw_ops;
	info->fix.smem_start = nvbo->bo.mem.bus.base +
			       nvbo->bo.mem.bus.offset;
	info->fix.smem_len = size;

	info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo);
	info->screen_size = size;

	drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
	drm_fb_helper_fill_var(info, &nfbdev->helper, sizes->fb_width, sizes->fb_height);

	/* Set aperture base/size for vesafb takeover */
	info->apertures = dev_priv->apertures;
	if (!info->apertures) {
		ret = -ENOMEM;
		goto out_unref;
	}

	info->pixmap.size = 64*1024;
	info->pixmap.buf_align = 8;
	info->pixmap.access_align = 32;
	info->pixmap.flags = FB_PIXMAP_SYSTEM;
	info->pixmap.scan_align = 1;

	mutex_unlock(&dev->struct_mutex);

	if (dev_priv->channel && !nouveau_nofbaccel) {
		ret = -ENODEV;
		if (dev_priv->card_type < NV_50)
			ret = nv04_fbcon_accel_init(info);
		else
		if (dev_priv->card_type < NV_C0)
			ret = nv50_fbcon_accel_init(info);
		else
			ret = nvc0_fbcon_accel_init(info);

		if (ret == 0)
			info->fbops = &nouveau_fbcon_ops;
	}

	nouveau_fbcon_zfill(dev, nfbdev);

	/* To allow resizeing without swapping buffers */
	NV_INFO(dev, "allocated %dx%d fb: 0x%lx, bo %p\n",
						nouveau_fb->base.width,
						nouveau_fb->base.height,
						nvbo->bo.offset, nvbo);

	vga_switcheroo_client_fb_set(dev->pdev, info);
	return 0;

out_unref:
	mutex_unlock(&dev->struct_mutex);
out:
	return ret;
}
示例#10
0
static int
nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
		     u32 size, struct nouveau_channel **pchan)
{
	struct nouveau_cli *cli = (void *)device->object.client;
	struct nvkm_mmu *mmu = nvxx_mmu(device);
	struct nv_dma_v0 args = {};
	struct nouveau_channel *chan;
	u32 target;
	int ret;

	chan = *pchan = kzalloc(sizeof(*chan), GFP_KERNEL);
	if (!chan)
		return -ENOMEM;

	chan->device = device;
	chan->drm = drm;

	/* allocate memory for dma push buffer */
	target = TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED;
	if (nouveau_vram_pushbuf)
		target = TTM_PL_FLAG_VRAM;

	ret = nouveau_bo_new(drm->dev, size, 0, target, 0, 0, NULL, NULL,
			    &chan->push.buffer);
	if (ret == 0) {
		ret = nouveau_bo_pin(chan->push.buffer, target, false);
		if (ret == 0)
			ret = nouveau_bo_map(chan->push.buffer);
	}

	if (ret) {
		nouveau_channel_del(pchan);
		return ret;
	}

	/* create dma object covering the *entire* memory space that the
	 * pushbuf lives in, this is because the GEM code requires that
	 * we be able to call out to other (indirect) push buffers
	 */
	chan->push.vma.offset = chan->push.buffer->bo.offset;

	if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
		ret = nouveau_bo_vma_add(chan->push.buffer, cli->vm,
					&chan->push.vma);
		if (ret) {
			nouveau_channel_del(pchan);
			return ret;
		}

		args.target = NV_DMA_V0_TARGET_VM;
		args.access = NV_DMA_V0_ACCESS_VM;
		args.start = 0;
		args.limit = cli->vm->mmu->limit - 1;
	} else
	if (chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM) {
		if (device->info.family == NV_DEVICE_INFO_V0_TNT) {
			/* nv04 vram pushbuf hack, retarget to its location in
			 * the framebuffer bar rather than direct vram access..
			 * nfi why this exists, it came from the -nv ddx.
			 */
			args.target = NV_DMA_V0_TARGET_PCI;
			args.access = NV_DMA_V0_ACCESS_RDWR;
			args.start = nvxx_device(device)->func->
				resource_addr(nvxx_device(device), 1);
			args.limit = args.start + device->info.ram_user - 1;
		} else {
			args.target = NV_DMA_V0_TARGET_VRAM;
			args.access = NV_DMA_V0_ACCESS_RDWR;
			args.start = 0;
			args.limit = device->info.ram_user - 1;
		}
	} else {
		if (chan->drm->agp.bridge) {
			args.target = NV_DMA_V0_TARGET_AGP;
			args.access = NV_DMA_V0_ACCESS_RDWR;
			args.start = chan->drm->agp.base;
			args.limit = chan->drm->agp.base +
				     chan->drm->agp.size - 1;
		} else {
			args.target = NV_DMA_V0_TARGET_VM;
			args.access = NV_DMA_V0_ACCESS_RDWR;
			args.start = 0;
			args.limit = mmu->limit - 1;
		}
	}

	ret = nvif_object_init(&device->object, 0, NV_DMA_FROM_MEMORY,
			       &args, sizeof(args), &chan->push.ctxdma);
	if (ret) {
		nouveau_channel_del(pchan);
		return ret;
	}

	return 0;
}