static int nouveau_init_card_mappings(struct drm_device *dev)
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	int ret;

	/* resource 0 is mmio regs */
	/* resource 1 is linear FB */
	/* resource 2 is RAMIN (mmio regs + 0x1000000) */
	/* resource 6 is bios */

	/* map the mmio regs */
	ret = drm_addmap(dev, drm_get_resource_start(dev, 0),
			      drm_get_resource_len(dev, 0),
			      _DRM_REGISTERS, _DRM_READ_ONLY, &dev_priv->mmio);
	if (ret) {
		DRM_ERROR("Unable to initialize the mmio mapping (%d). "
			  "Please report your setup to " DRIVER_EMAIL "\n",
			  ret);
		return -EINVAL;
	}
	DRM_DEBUG("regs mapped ok at 0x%lx\n", dev_priv->mmio->offset);

	/* map larger RAMIN aperture on NV40 cards */
	dev_priv->ramin = NULL;
	if (dev_priv->card_type >= NV_40) {
		int ramin_resource = 2;
		if (drm_get_resource_len(dev, ramin_resource) == 0)
			ramin_resource = 3;

		ret = drm_addmap(dev,
				 drm_get_resource_start(dev, ramin_resource),
				 drm_get_resource_len(dev, ramin_resource),
				 _DRM_REGISTERS, _DRM_READ_ONLY,
				 &dev_priv->ramin);
		if (ret) {
			DRM_ERROR("Failed to init RAMIN mapping, "
				  "limited instance memory available\n");
			dev_priv->ramin = NULL;
		}
	}

	/* On older cards (or if the above failed), create a map covering
	 * the BAR0 PRAMIN aperture */
	if (!dev_priv->ramin) {
		ret = drm_addmap(dev,
				 drm_get_resource_start(dev, 0) + NV_RAMIN,
				 (1*1024*1024),
				 _DRM_REGISTERS, _DRM_READ_ONLY,
				 &dev_priv->ramin);
		if (ret) {
			DRM_ERROR("Failed to map BAR0 PRAMIN: %d\n", ret);
			return ret;
		}
	}

	return 0;
}
/* If you boot an IGP board with a discrete card as the primary,
 * the IGP rom is not accessible via the rom bar as the IGP rom is
 * part of the system bios.  On boot, the system bios puts a
 * copy of the igp rom at the start of vram if a discrete card is
 * present.
 */
static bool igp_read_bios_from_vram(struct radeon_device *rdev)
{
	uint8_t __iomem *bios;
	resource_size_t vram_base;
	resource_size_t size = 256 * 1024; /* ??? */

	rdev->bios = NULL;
	vram_base = drm_get_resource_start(rdev->ddev, 0);
	bios = ioremap(vram_base, size);
	if (!bios) {
		return false;
	}

	if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) {
		iounmap(bios);
		return false;
	}
	rdev->bios = kmalloc(size, GFP_KERNEL);
	if (rdev->bios == NULL) {
		iounmap(bios);
		return false;
	}
	memcpy_fromio(rdev->bios, bios, size);
	iounmap(bios);
	return true;
}
static int mali_driver_load(struct drm_device *dev, unsigned long chipset)
{
	int ret;
	unsigned long base, size;
	drm_mali_private_t *dev_priv;
	printk(KERN_ERR "DRM: mali_driver_load start\n");

	dev_priv = drm_calloc(1, sizeof(drm_mali_private_t), DRM_MEM_DRIVER);
	if ( dev_priv == NULL ) return -ENOMEM;

	dev->dev_private = (void *)dev_priv;

	if ( NULL == dev->platformdev )
	{
		dev->platformdev = platform_device_register_simple(mali_drm_device_name, 0, NULL, 0);
		pdev = dev->platformdev;
	}

	#if 0
	base = drm_get_resource_start(dev, 1 );
	size = drm_get_resource_len(dev, 1 );
	#endif
	ret = drm_sman_init(&dev_priv->sman, 2, 12, 8);
	if ( ret ) drm_free(dev_priv, sizeof(dev_priv), DRM_MEM_DRIVER);
	//if ( ret ) kfree( dev_priv );

	printk(KERN_ERR "DRM: mali_driver_load done\n");

	return ret;
}
Example #4
0
void rs690_vram_info(struct radeon_device *rdev)
{
	uint32_t tmp;
	fixed20_12 a;

	rs400_gart_adjust_size(rdev);
	/* DDR for all card after R300 & IGP */
	rdev->mc.vram_is_ddr = true;
	/* FIXME: is this correct for RS690/RS740 ? */
	tmp = RREG32(RADEON_MEM_CNTL);
	if (tmp & R300_MEM_NUM_CHANNELS_MASK) {
		rdev->mc.vram_width = 128;
	} else {
		rdev->mc.vram_width = 64;
	}
	rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
	rdev->mc.mc_vram_size = rdev->mc.real_vram_size;

	rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
	rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
	rs690_pm_info(rdev);
	/* FIXME: we should enforce default clock in case GPU is not in
	 * default setup
	 */
	a.full = rfixed_const(100);
	rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
	rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
	a.full = rfixed_const(16);
	/* core_bandwidth = sclk(Mhz) * 16 */
	rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a);
}
Example #5
0
void rs780_vram_info(struct radeon_device *rdev)
{
	rs780_vram_get_type(rdev);

	/* FIXME: implement */
	/* Could aper size report 0 ? */
	rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
	rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
}
Example #6
0
/* If you boot an IGP board with a discrete card as the primary,
 * the IGP rom is not accessible via the rom bar as the IGP rom is
 * part of the system bios.  On boot, the system bios puts a
 * copy of the igp rom at the start of vram if a discrete card is
 * present.
 */
static bool igp_read_bios_from_vram(struct radeon_device *rdev)
{
	struct drm_local_map bios_map;
	uint8_t __iomem *bios;
	resource_size_t vram_base;
	resource_size_t size = 256 * 1024; /* ??? */

	DRM_INFO("%s: ===> Try IGP's VRAM...\n", __func__);

	if (!(rdev->flags & RADEON_IS_IGP))
		if (!radeon_card_posted(rdev)) {
			DRM_INFO("%s: not POSTed discrete card detected, skipping this method...\n",
			    __func__);
			return false;
		}

	rdev->bios = NULL;
	vram_base = drm_get_resource_start(rdev->ddev, 0);
	DRM_INFO("%s: VRAM base address: 0x%jx\n", __func__, (uintmax_t)vram_base);

	bios_map.offset = vram_base;
	bios_map.size   = size;
	bios_map.type   = 0;
	bios_map.flags  = 0;
	bios_map.mtrr   = 0;
	drm_core_ioremap(&bios_map, rdev->ddev);
	if (bios_map.handle == NULL) {
		DRM_INFO("%s: failed to ioremap\n", __func__);
		return false;
	}
	bios = bios_map.handle;
	size = bios_map.size;
	DRM_INFO("%s: Map address: %p (%ju bytes)\n", __func__, bios, (uintmax_t)size);

	if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) {
		if (size == 0) {
			DRM_INFO("%s: Incorrect BIOS size\n", __func__);
		} else {
			DRM_INFO("%s: Incorrect BIOS signature: 0x%02X%02X\n",
			    __func__, bios[0], bios[1]);
		}
		drm_core_ioremapfree(&bios_map, rdev->ddev);
		return false;
	}
	rdev->bios = malloc(size, DRM_MEM_DRIVER, M_NOWAIT);
	if (rdev->bios == NULL) {
		drm_core_ioremapfree(&bios_map, rdev->ddev);
		return false;
	}
	memcpy_fromio(rdev->bios, bios, size);
	drm_core_ioremapfree(&bios_map, rdev->ddev);
	return true;
}
Example #7
0
void
pscnv_mem_takedown(struct drm_device *dev)
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	dev_priv->vram->takedown(dev);

	if (dev_priv->fb_mtrr >= 0) {
		drm_mtrr_del(dev_priv->fb_mtrr, drm_get_resource_start(dev, 1),
			     drm_get_resource_len(dev, 1), DRM_MTRR_WC);
		dev_priv->fb_mtrr = 0;
	}
}
Example #8
0
int
pscnv_mem_init(struct drm_device *dev)
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	int ret;

	int dma_bits = 32;
#ifdef __linux__
	if (dev_priv->card_type >= NV_50 &&
	    pci_dma_supported(dev->pdev, DMA_BIT_MASK(40)))
		dma_bits = 40;

	ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
	if (ret) {
		NV_ERROR(dev, "Error setting DMA mask: %d\n", ret);
		return ret;
	}
#else
	if (dev_priv->card_type >= NV_50)
		dma_bits = 40;
#endif
	dev_priv->dma_mask = DMA_BIT_MASK(dma_bits);

	spin_lock_init(&dev_priv->pramin_lock);
	mutex_init(&dev_priv->vram_mutex);
	
	switch (dev_priv->card_type) {
		case NV_50:
			ret = nv50_vram_init(dev);
			break;
		case NV_D0:
		case NV_C0:
			ret = nvc0_vram_init(dev);
			break;
		default:
			NV_ERROR(dev, "No VRAM allocator for NV%02x!\n", dev_priv->chipset);
			ret = -ENOSYS;
	}
	if (ret)
		return ret;

	dev_priv->fb_mtrr = drm_mtrr_add(drm_get_resource_start(dev, 1),
					 drm_get_resource_len(dev, 1),
					 DRM_MTRR_WC);

	return 0;
}
static int
nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
{
	struct drm_device *dev = chan->dev;
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_bo *pb = chan->pushbuf_bo;
	struct nouveau_gpuobj *pushbuf = NULL;
	uint32_t start = pb->bo.mem.mm_node->start << PAGE_SHIFT;
	int ret;

	if (pb->bo.mem.mem_type == TTM_PL_TT) {
		ret = nouveau_gpuobj_gart_dma_new(chan, 0,
						  dev_priv->gart_info.aper_size,
						  NV_DMA_ACCESS_RO, &pushbuf,
						  NULL);
		chan->pushbuf_base = start;
	} else
	if (dev_priv->card_type != NV_04) {
		ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
					     dev_priv->fb_available_size,
					     NV_DMA_ACCESS_RO,
					     NV_DMA_TARGET_VIDMEM, &pushbuf);
		chan->pushbuf_base = start;
	} else {
		/* NV04 cmdbuf hack, from original ddx.. not sure of it's
		 * exact reason for existing :)  PCI access to cmdbuf in
		 * VRAM.
		 */
		ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
					     drm_get_resource_start(dev, 1),
					     dev_priv->fb_available_size,
					     NV_DMA_ACCESS_RO,
					     NV_DMA_TARGET_PCI, &pushbuf);
		chan->pushbuf_base = start;
	}

	ret = nouveau_gpuobj_ref_add(dev, chan, 0, pushbuf, &chan->pushbuf);
	if (ret) {
		NV_ERROR(dev, "Error referencing pushbuf ctxdma: %d\n", ret);
		if (pushbuf != dev_priv->gart_info.sg_ctxdma)
			nouveau_gpuobj_del(dev, &pushbuf);
		return ret;
	}

	return 0;
}
Example #10
0
/*
 * Initalize mappings. On Savage4 and SavageIX the alignment
 * and size of the aperture is not suitable for automatic MTRR setup
 * in drm_addmap. Therefore we add them manually before the maps are
 * initialized, and tear them down on last close.
 */
int savage_driver_firstopen(drm_device_t *dev)
{
	drm_savage_private_t *dev_priv = dev->dev_private;
	unsigned long mmio_base, fb_base, fb_size, aperture_base;
	/* fb_rsrc and aper_rsrc aren't really used currently, but still exist
	 * in case we decide we need information on the BAR for BSD in the
	 * future.
	 */
	unsigned int fb_rsrc, aper_rsrc;
	int ret = 0;

	dev_priv->mtrr[0].handle = -1;
	dev_priv->mtrr[1].handle = -1;
	dev_priv->mtrr[2].handle = -1;
	if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
		fb_rsrc = 0;
		fb_base = drm_get_resource_start(dev, 0);
		fb_size = SAVAGE_FB_SIZE_S3;
		mmio_base = fb_base + SAVAGE_FB_SIZE_S3;
		aper_rsrc = 0;
		aperture_base = fb_base + SAVAGE_APERTURE_OFFSET;
		/* this should always be true */
		if (drm_get_resource_len(dev, 0) == 0x08000000) {
			/* Don't make MMIO write-cobining! We need 3
			 * MTRRs. */
			dev_priv->mtrr[0].base = fb_base;
			dev_priv->mtrr[0].size = 0x01000000;
			dev_priv->mtrr[0].handle = mtrr_add(
				dev_priv->mtrr[0].base, dev_priv->mtrr[0].size,
				MTRR_TYPE_WRCOMB, 1);
			dev_priv->mtrr[1].base = fb_base+0x02000000;
			dev_priv->mtrr[1].size = 0x02000000;
			dev_priv->mtrr[1].handle = mtrr_add(
				dev_priv->mtrr[1].base, dev_priv->mtrr[1].size,
				MTRR_TYPE_WRCOMB, 1);
			dev_priv->mtrr[2].base = fb_base+0x04000000;
			dev_priv->mtrr[2].size = 0x04000000;
			dev_priv->mtrr[2].handle = mtrr_add(
				dev_priv->mtrr[2].base, dev_priv->mtrr[2].size,
				MTRR_TYPE_WRCOMB, 1);
		} else {
			DRM_ERROR("strange pci_resource_len %08lx\n",
				  drm_get_resource_len(dev, 0));
		}
	} else if (dev_priv->chipset != S3_SUPERSAVAGE &&
		   dev_priv->chipset != S3_SAVAGE2000) {
		mmio_base = drm_get_resource_start(dev, 0);
		fb_rsrc = 1;
		fb_base = drm_get_resource_start(dev, 1);
		fb_size = SAVAGE_FB_SIZE_S4;
		aper_rsrc = 1;
		aperture_base = fb_base + SAVAGE_APERTURE_OFFSET;
		/* this should always be true */
		if (drm_get_resource_len(dev, 1) == 0x08000000) {
			/* Can use one MTRR to cover both fb and
			 * aperture. */
			dev_priv->mtrr[0].base = fb_base;
			dev_priv->mtrr[0].size = 0x08000000;
			dev_priv->mtrr[0].handle = mtrr_add(
				dev_priv->mtrr[0].base, dev_priv->mtrr[0].size,
				MTRR_TYPE_WRCOMB, 1);
		} else {
			DRM_ERROR("strange pci_resource_len %08lx\n",
				  drm_get_resource_len(dev, 1));
		}
	} else {
		mmio_base = drm_get_resource_start(dev, 0);
		fb_rsrc = 1;
		fb_base = drm_get_resource_start(dev, 1);
		fb_size = drm_get_resource_len(dev, 1);
		aper_rsrc = 2;
		aperture_base = drm_get_resource_start(dev, 2);
		/* Automatic MTRR setup will do the right thing. */
	}

	ret = drm_addmap(dev, mmio_base, SAVAGE_MMIO_SIZE, _DRM_REGISTERS,
			 _DRM_READ_ONLY, &dev_priv->mmio);
	if (ret)
		return ret;

	ret = drm_addmap(dev, fb_base, fb_size, _DRM_FRAME_BUFFER,
			 _DRM_WRITE_COMBINING, &dev_priv->fb);
	if (ret)
		return ret;

	ret = drm_addmap(dev, aperture_base, SAVAGE_APERTURE_SIZE,
			 _DRM_FRAME_BUFFER, _DRM_WRITE_COMBINING,
			 &dev_priv->aperture);
	if (ret)
		return ret;

	return ret;
}
Example #11
0
int nouveau_load(struct drm_device *dev, unsigned long flags)
{
	struct drm_pscnv_virt_private *dev_priv;
	resource_size_t mmio_start_offs, call_start_offset;
	int ret;

	/* allocate the private device data */
	dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
	if (!dev_priv)
		return -ENOMEM;
	dev->dev_private = dev_priv;
	dev_priv->dev = dev;

	dev_priv->flags = flags;

	/* resource 0 is mmio regs */
	/* resource 1 is hypercall buffer */
	/* resource 2 is mapped vram */

	/* map the mmio regs */
	mmio_start_offs = drm_get_resource_start(dev, 0);
	ret = drm_addmap(dev, mmio_start_offs, PSCNV_VIRT_MMIO_SIZE,
		_DRM_REGISTERS, _DRM_KERNEL | _DRM_DRIVER, &dev_priv->mmio);

	if (ret) {
		NV_ERROR(dev, "Unable to initialize the mmio mapping.\n");
		return ret;
	}

	/* map the ring buffer */
	call_start_offset = drm_get_resource_start(dev, 1);
	ret = drm_addmap(dev, call_start_offset, PSCNV_CALL_AREA_SIZE,
		_DRM_REGISTERS, _DRM_KERNEL | _DRM_DRIVER, &dev_priv->call_data);

	if (ret) {
		NV_ERROR(dev, "Unable to initialize the call data mapping.\n");
		return ret;
	}

	dev_priv->vram_base = drm_get_resource_start(dev, 2);
	dev_priv->vram_size = drm_get_resource_len(dev, 2);

	/* initialize the hypercall interface */
	pscnv_virt_call_init(dev_priv);
	ret = drm_irq_install(dev);
	if (ret) {
		NV_ERROR(dev, "Unable to register the call interrupt.\n");
		return ret;
	}

	memset(dev_priv->vspaces, 0, sizeof(dev_priv->vspaces));
	memset(dev_priv->chans, 0, sizeof(dev_priv->chans));

	/* the channels are directly mapped to the fourth BAR */
	dev_priv->chan_base = drm_get_resource_start(dev, 3);
	dev_priv->chan_size = drm_get_resource_len(dev, 3);
	dev_priv->is_nv50 = dev_priv->chan_size == 128 * 0x2000 ? 1 : 0;

#if 0
	struct drm_nouveau_private *dev_priv;
	uint32_t reg0, strap;
	resource_size_t mmio_start_offs;
	int ret;

	dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
	if (!dev_priv)
		return -ENOMEM;
	dev->dev_private = dev_priv;
	dev_priv->dev = dev;

	dev_priv->flags = flags/* & NOUVEAU_FLAGS*/;
	dev_priv->init_state = NOUVEAU_CARD_INIT_DOWN;

	NV_DEBUG(dev, "vendor: 0x%X device: 0x%X\n",
		 dev->pci_vendor, dev->pci_device);

	dev_priv->wq = create_workqueue("nouveau");
	if (!dev_priv->wq)
		return -EINVAL;

	/* resource 0 is mmio regs */
	/* resource 1 is linear FB */
	/* resource 2 is RAMIN (mmio regs + 0x1000000) */
	/* resource 6 is bios */

	/* map the mmio regs */
	mmio_start_offs = drm_get_resource_start(dev, 0);
	ret = drm_addmap(dev, mmio_start_offs, 0x00800000, _DRM_REGISTERS,
	    _DRM_KERNEL | _DRM_DRIVER, &dev_priv->mmio);

	if (ret) {
		NV_ERROR(dev, "Unable to initialize the mmio mapping. "
			 "Please report your setup to " DRIVER_EMAIL "\n");
		return ret;
	}
	NV_DEBUG(dev, "regs mapped ok at 0x%llx\n",
					(unsigned long long)mmio_start_offs);

#ifdef __BIG_ENDIAN
	/* Put the card in BE mode if it's not */
	if (nv_rd32(dev, NV03_PMC_BOOT_1))
		nv_wr32(dev, NV03_PMC_BOOT_1, 0x00000001);

	DRM_MEMORYBARRIER();
#endif

	/* Time to determine the card architecture */
	reg0 = nv_rd32(dev, NV03_PMC_BOOT_0);

	/* We're dealing with >=NV10 */
	if ((reg0 & 0x0f000000) > 0) {
		/* Bit 27-20 contain the architecture in hex */
		dev_priv->chipset = (reg0 & 0xff00000) >> 20;
	/* NV04 or NV05 */
	} else if ((reg0 & 0xff00fff0) == 0x20004000) {