Пример #1
0
void
nvc0_graph_chan_free(struct pscnv_engine *eng, struct pscnv_chan *ch)
{
	struct drm_device *dev = eng->dev;
	struct drm_nouveau_private *dev_priv = dev->dev_private;

	struct nvc0_graph_engine *graph = NVC0_GRAPH(eng);
	struct nvc0_graph_chan *grch = ch->engdata[PSCNV_ENGINE_GRAPH];
	int i;
	
	ch->engdata[PSCNV_ENGINE_GRAPH] = NULL;

	for (i = 0; i < ARRAY_SIZE(graph->mmio_data); i++) {
		if (grch->data[i].mem) {
			pscnv_vspace_unmap(ch->vspace, grch->data[i].vm_base);
			pscnv_mem_free(grch->data[i].mem);
		}
	}
	pscnv_vspace_unmap(ch->vspace, grch->mmio_vm_base);
	pscnv_mem_free(grch->mmio);
	
	pscnv_vspace_unmap(ch->vspace, grch->grctx_vm_base);
	pscnv_mem_free(grch->grctx);
	
	kfree(grch);
	
	nv_wv32(ch->bo, 0x210, 0);
	nv_wv32(ch->bo, 0x214, 0);
	dev_priv->vm->bar_flush(dev);
}
Пример #2
0
static void
nv50_evo_channel_del(struct nouveau_channel **pchan)
{
	struct nouveau_channel *chan = *pchan;

	if (!chan)
		return;
	*pchan = NULL;

	if (chan->pushbuf)
		pscnv_mem_free(chan->pushbuf);
	if (chan->evo_obj)
		pscnv_mem_free(chan->evo_obj);

	kfree(chan);
}
Пример #3
0
struct drm_gem_object *pscnv_gem_new(struct drm_device *dev, uint64_t size, uint32_t flags,
		uint32_t tile_flags, uint32_t cookie, uint32_t *user)
{
	int i;
	struct drm_gem_object *obj;
	struct pscnv_bo *vo;

	vo = pscnv_mem_alloc(dev, size, flags, tile_flags, cookie);
	if (!vo)
		return 0;

	obj = drm_gem_object_alloc(dev, vo->size);
	if (!obj) {
		pscnv_mem_free(vo);
		return 0;
	}
	obj->driver_private = vo;
	vo->gem = obj;

	if (user)
		for (i = 0; i < ARRAY_SIZE(vo->user); i++)
			vo->user[i] = user[i];
	else
		for (i = 0; i < ARRAY_SIZE(vo->user); i++)
			vo->user[i] = 0;

	return obj;
}
Пример #4
0
struct drm_gem_object *pscnv_gem_new(struct drm_device *dev, uint64_t size, uint32_t flags,
		uint32_t tile_flags, uint32_t cookie, uint32_t *user)
{
	int i;
	struct drm_gem_object *obj;
	struct pscnv_bo *vo;

	vo = pscnv_mem_alloc(dev, size, flags, tile_flags, cookie);
	if (!vo)
		return 0;

	obj = drm_gem_object_alloc(dev, vo->size);
	if (!obj) {
		pscnv_mem_free(vo);
		return 0;
	}
#ifndef PSCNV_KAPI_DRM_GEM_OBJECT_HANDLE_COUNT
	atomic_inc(&obj->handle_count);
#endif
	obj->driver_private = vo;
	vo->gem = obj;

	if (user)
		for (i = 0; i < ARRAY_SIZE(vo->user); i++)
			vo->user[i] = user[i];
	else
		for (i = 0; i < ARRAY_SIZE(vo->user); i++)
			vo->user[i] = 0;

	return obj;
}
Пример #5
0
void pscnv_gem_free_object (struct drm_gem_object *obj) {
	struct pscnv_bo *vo = obj->driver_private;
#ifndef PSCNV_KAPI_DRM_GEM_OBJECT_HANDLE_COUNT
	atomic_dec(&obj->handle_count);
#endif
	pscnv_mem_free(vo);
	drm_gem_object_release(obj);
	kfree(obj);
}
Пример #6
0
static int
nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
		     struct drm_fb_helper_surface_size *sizes)
{
	struct drm_device *dev = nfbdev->dev;
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct fb_info *info;
	struct drm_framebuffer *fb;
	struct nouveau_framebuffer *nouveau_fb;
	struct pscnv_bo *nvbo;
	struct drm_gem_object *obj;
	struct drm_mode_fb_cmd mode_cmd;
	struct pci_dev *pdev = dev->pdev;
	struct device *device = &pdev->dev;
	int size, ret;

	mode_cmd.width = sizes->surface_width;
	mode_cmd.height = sizes->surface_height;

	mode_cmd.bpp = sizes->surface_bpp;
	mode_cmd.pitch = mode_cmd.width * (mode_cmd.bpp >> 3);
	mode_cmd.pitch = roundup(mode_cmd.pitch, 256);
	mode_cmd.depth = sizes->surface_depth;

	size = mode_cmd.pitch * mode_cmd.height;
	size = roundup(size, PAGE_SIZE);

	obj = pscnv_gem_new(dev, size, PSCNV_GEM_CONTIG, 0, 0xd15fb, 0);
	if (!obj) {
		ret = -ENOMEM;
		NV_ERROR(dev, "failed to allocate framebuffer\n");
		goto out;
	}
	nvbo = obj->driver_private;

	ret = dev_priv->vm->map_user(nvbo);
	if (ret) {
		NV_ERROR(dev, "failed to map fb: %d\n", ret);
		pscnv_mem_free(nvbo);
		goto out;
	}

	mutex_lock(&dev->struct_mutex);

	info = framebuffer_alloc(0, device);
	if (!info) {
		ret = -ENOMEM;
		goto out_unref;
	}

	ret = fb_alloc_cmap(&info->cmap, 256, 0);
	if (ret) {
		ret = -ENOMEM;
		goto out_unref;
	}

	info->par = nfbdev;

	nouveau_framebuffer_init(dev, &nfbdev->nouveau_fb, &mode_cmd, nvbo);

	nouveau_fb = &nfbdev->nouveau_fb;
	fb = &nouveau_fb->base;

	/* setup helper */
	nfbdev->helper.fb = fb;
	nfbdev->helper.fbdev = info;

	strcpy(info->fix.id, "nouveaufb");
	if (nouveau_nofbaccel)
		info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_DISABLED;
	else
		info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA |
			      FBINFO_HWACCEL_FILLRECT |
			      FBINFO_HWACCEL_IMAGEBLIT;
#ifdef FBINFO_CAN_FORCE_OUTPUT
	info->flags |= FBINFO_CAN_FORCE_OUTPUT;
#endif
	info->fbops = &nouveau_fbcon_ops;
	info->fix.smem_start = dev->mode_config.fb_base + nvbo->map1->start;
	info->fix.smem_len = size;

	info->screen_base = ioremap_wc(dev_priv->fb_phys + nvbo->map1->start, size);
	info->screen_size = size;

	drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
	drm_fb_helper_fill_var(info, &nfbdev->helper, sizes->fb_width, sizes->fb_height);

	/* FIXME: we really shouldn't expose mmio space at all */
	info->fix.mmio_start = pci_resource_start(pdev, 1);
	info->fix.mmio_len = pci_resource_len(pdev, 1);

	/* Set aperture base/size for vesafb takeover */
	info->apertures = dev_priv->apertures;
	if (!info->apertures) {
		ret = -ENOMEM;
		goto out_unref;
	}

	info->pixmap.size = 64*1024;
	info->pixmap.buf_align = 8;
	info->pixmap.access_align = 32;
	info->pixmap.flags = FB_PIXMAP_SYSTEM;
	info->pixmap.scan_align = 1;
#if 0
	if (dev_priv->channel && !nouveau_nofbaccel) {
		switch (dev_priv->card_type) {
		case NV_50:
			nv50_fbcon_accel_init(info);
			info->fbops = &nv50_fbcon_ops;
			break;
		default:
			nv04_fbcon_accel_init(info);
			info->fbops = &nv04_fbcon_ops;
			break;
		};
	}
#endif
	nouveau_fbcon_zfill(dev, nfbdev);

	/* To allow resizeing without swapping buffers */
	NV_INFO(dev, "allocated %dx%d fb: 0x%llx 0x%llx, bo %p\n",
						nouveau_fb->base.width,
						nouveau_fb->base.height,
						nvbo->start, nvbo->map1->start, nvbo);

	mutex_unlock(&dev->struct_mutex);
	vga_switcheroo_client_fb_set(dev->pdev, info);
	return 0;

out_unref:
	mutex_unlock(&dev->struct_mutex);
out:
	return ret;
}
Пример #7
0
void pscnv_gem_free_object (struct drm_gem_object *obj) {
	struct pscnv_bo *vo = obj->driver_private;
	pscnv_mem_free(vo);
	drm_gem_object_release(obj);
	kfree(obj);
}
Пример #8
0
static void pscnv_vram_takedown_free(struct pscnv_mm_node *node) {
	struct pscnv_bo *bo = node->tag;
	NV_ERROR(bo->dev, "BO %d of type %08x still exists at takedown!\n",
			bo->serial, bo->cookie);
	pscnv_mem_free(bo);
}
Пример #9
0
int
nvc0_graph_chan_alloc(struct pscnv_engine *eng, struct pscnv_chan *chan)
{
	struct drm_device *dev = eng->dev;
	struct drm_nouveau_private *dev_priv = dev->dev_private;

	struct nvc0_graph_engine *graph = NVC0_GRAPH(eng);
	struct nvc0_graph_chan *grch; /* per channel graph data */
	
	uint32_t cookie = 0xcc000000 + (chan->cid << 8);
	int ret = 0;
	int i;
	
	NV_INFO(dev, "PGRAPH: adding to channel %d in vspace %d\n",
		chan->cid, chan->vspace->vid);
	
	grch = kzalloc(sizeof *grch, GFP_KERNEL);
	if (!grch) {
		ret = -ENOMEM;
		goto fail_kzalloc;
	}
	
	/* allocate the per-channel context page (grctx) */
	grch->grctx = pscnv_mem_alloc_and_map(chan->vspace, graph->grctx_size,
		PSCNV_GEM_CONTIG | PSCNV_GEM_NOUSER | PSCNV_ZEROFILL | PSCNV_MAP_KERNEL,
		cookie, &grch->grctx_vm_base);
	
	if (!grch->grctx) {
		ret = -ENOMEM;
		goto fail_grctx;
	}

	/* allocate memory for a "mmio list" buffer that's used by the HUB
	 * fuc to modify some per-context register settings on first load
	 * of the context.
	 */
	grch->mmio = pscnv_mem_alloc_and_map(chan->vspace, 0x1000 /* size */,
		PSCNV_GEM_CONTIG | PSCNV_MAP_KERNEL,
		cookie + 1, &grch->mmio_vm_base);
	
	if (!grch->mmio) {
		ret = -ENOMEM;
		goto fail_mmio_list;
	}

	/* allocate buffers referenced by mmio list
	 * these buffers are the counterpart to obj08004, obj0800c, obj19848
	 * of the original pscnv */
	for (i = 0; graph->mmio_data[i].size && i < ARRAY_SIZE(graph->mmio_data); i++) {
		
		grch->data[i].mem = pscnv_mem_alloc_and_map(chan->vspace,
			graph->mmio_data[i].size,
			PSCNV_GEM_CONTIG | PSCNV_MAP_KERNEL,
			cookie + 0x10 + i, &grch->data[i].vm_base);
	
		if (!grch->data[i].mem) {
			ret = -ENOMEM;
			goto fail_mmio_data;
		}
	}

	/* finally, fill in the mmio list and point the context at it */
	for (i = 0; graph->mmio_list[i].addr && i < ARRAY_SIZE(graph->mmio_list); i++) {
		u32 addr = graph->mmio_list[i].addr;
		u32 data = graph->mmio_list[i].data;
		u32 shift = graph->mmio_list[i].shift;
		u32 buffer = graph->mmio_list[i].buffer;
		

		if (shift) {
			u64 info = grch->data[buffer].vm_base;
			data |= info >> shift;
		}

		nv_wv32(grch->mmio, grch->mmio_nr++ * 4, addr);
		nv_wv32(grch->mmio, grch->mmio_nr++ * 4, data);
	}

	/* fill grctx with the initial values from the template channel */
	for (i = 0; i < graph->grctx_size; i += 4)
		nv_wv32(grch->grctx, i, graph->data[i / 4]);

	/* set pointer to mmio list */
	nv_wv32(grch->grctx, 0x00, grch->mmio_nr / 2);
	nv_wv32(grch->grctx, 0x04, grch->mmio_vm_base >> 8);
	
	chan->engdata[PSCNV_ENGINE_GRAPH] = grch;
	
	/* register this engines context with the channel */
	nv_wv32(chan->bo, 0x210, lower_32_bits(grch->grctx_vm_base) | 4);
	nv_wv32(chan->bo, 0x214, upper_32_bits(grch->grctx_vm_base));
	dev_priv->vm->bar_flush(dev);

	return 0;
	
fail_mmio_data:
	for (i = 0; i < ARRAY_SIZE(graph->mmio_data); i++) {
		if (grch->data[i].mem) {
			pscnv_vspace_unmap(chan->vspace, grch->data[i].vm_base);
			pscnv_mem_free(grch->data[i].mem);
		}
	}
	pscnv_vspace_unmap(chan->vspace, grch->mmio_vm_base);
	pscnv_mem_free(grch->mmio);
	
fail_mmio_list:
	pscnv_vspace_unmap(chan->vspace, grch->grctx_vm_base);
	pscnv_mem_free(grch->grctx);
	
fail_grctx:
	kfree(grch);

fail_kzalloc:
	NV_ERROR(dev, "PGRAPH: Couldn't allocate channel %d!\n", chan->cid);
	
	return ret;
}