Esempio n. 1
0
void drm_core_ioremap(struct drm_map *map, struct drm_device *dev)
{
	if (drm_core_has_AGP(dev) &&
	    dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
		map->handle = agp_remap(map->offset, map->size, dev);
	else
		map->handle = ioremap(map->offset, map->size);
}
Esempio n. 2
0
static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
				struct ttm_mem_type_manager *man)
{
	struct radeon_device *rdev;

    ENTER();

	rdev = radeon_get_rdev(bdev);

	switch (type) {
	case TTM_PL_SYSTEM:
		/* System memory */
		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
		man->available_caching = TTM_PL_MASK_CACHING;
		man->default_caching = TTM_PL_FLAG_CACHED;
		break;
	case TTM_PL_TT:
		man->func = &ttm_bo_manager_func;
		man->gpu_offset = rdev->mc.gtt_start;
		man->available_caching = TTM_PL_MASK_CACHING;
		man->default_caching = TTM_PL_FLAG_CACHED;
		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
#if __OS_HAS_AGP
		if (rdev->flags & RADEON_IS_AGP) {
			if (!(drm_core_has_AGP(rdev->ddev) && rdev->ddev->agp)) {
				DRM_ERROR("AGP is not enabled for memory type %u\n",
					  (unsigned)type);
				return -EINVAL;
			}
			if (!rdev->ddev->agp->cant_use_aperture)
				man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
			man->available_caching = TTM_PL_FLAG_UNCACHED |
						 TTM_PL_FLAG_WC;
			man->default_caching = TTM_PL_FLAG_WC;
		}
#endif
		break;
	case TTM_PL_VRAM:
		/* "On-card" video ram */
		man->func = &ttm_bo_manager_func;
		man->gpu_offset = rdev->mc.vram_start;
		man->flags = TTM_MEMTYPE_FLAG_FIXED |
			     TTM_MEMTYPE_FLAG_MAPPABLE;
		man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
		man->default_caching = TTM_PL_FLAG_WC;
		break;
	default:
		DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
		return -EINVAL;
	}

    LEAVE();

	return 0;
}
Esempio n. 3
0
void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev)
{
	if (!map->handle || !map->size)
		return;

	if (drm_core_has_AGP(dev) &&
	    dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
		vunmap(map->handle);
	else
		iounmap(map->handle);
}
Esempio n. 4
0
/**
 * Called via cleanup_module() at module unload time.
 *
 * Cleans up all DRM device, calling drm_lastclose().
 *
 * \sa drm_init
 */
static void __exit drm_cleanup(drm_device_t * dev)
{

	DRM_DEBUG("\n");
	if (!dev) {
		DRM_ERROR("cleanup called no dev\n");
		return;
	}

	drm_lastclose(dev);

	if (dev->maplist) {
		drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
		dev->maplist = NULL;
	}

	if (!drm_fb_loaded)
		pci_disable_device(dev->pdev);

	drm_ctxbitmap_cleanup(dev);

	if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) && dev->agp
	    && dev->agp->agp_mtrr >= 0) {
		int retval;
		retval = mtrr_del(dev->agp->agp_mtrr,
				  dev->agp->agp_info.aper_base,
				  dev->agp->agp_info.aper_size * 1024 * 1024);
		DRM_DEBUG("mtrr_del=%d\n", retval);
	}

	if (drm_core_has_AGP(dev) && dev->agp) {
		drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS);
		dev->agp = NULL;
	}
	if (dev->driver->unload)
		dev->driver->unload(dev);

	drm_put_head(&dev->primary);
	if (drm_put_dev(dev))
		DRM_ERROR("Cannot unload module\n");
}
Esempio n. 5
0
static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
				struct ttm_mem_type_manager *man)
{
	struct radeon_device *rdev;

	rdev = radeon_get_rdev(bdev);

	switch (type) {
	case TTM_PL_SYSTEM:
		
		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
		man->available_caching = TTM_PL_MASK_CACHING;
		man->default_caching = TTM_PL_FLAG_CACHED;
		break;
	case TTM_PL_TT:
		man->gpu_offset = 0;
		man->available_caching = TTM_PL_MASK_CACHING;
		man->default_caching = TTM_PL_FLAG_CACHED;
		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
#if __OS_HAS_AGP
		if (rdev->flags & RADEON_IS_AGP) {
			if (!(drm_core_has_AGP(rdev->ddev) && rdev->ddev->agp)) {
				DRM_ERROR("AGP is not enabled for memory type %u\n",
					  (unsigned)type);
				return -EINVAL;
			}
			man->io_offset = rdev->mc.agp_base;
			man->io_size = rdev->mc.gtt_size;
			man->io_addr = NULL;
			if (!rdev->ddev->agp->cant_use_aperture)
				man->flags = TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
					     TTM_MEMTYPE_FLAG_MAPPABLE;
			man->available_caching = TTM_PL_FLAG_UNCACHED |
						 TTM_PL_FLAG_WC;
			man->default_caching = TTM_PL_FLAG_WC;
		} else
#endif
		{
			man->io_offset = 0;
			man->io_size = 0;
			man->io_addr = NULL;
		}
		break;
	case TTM_PL_VRAM:
		
		man->gpu_offset = 0;
		man->flags = TTM_MEMTYPE_FLAG_FIXED |
			     TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
			     TTM_MEMTYPE_FLAG_MAPPABLE;
		man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
		man->default_caching = TTM_PL_FLAG_WC;
		man->io_addr = NULL;
		man->io_offset = rdev->mc.aper_base;
		man->io_size = rdev->mc.aper_size;
		break;
	default:
		DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
		return -EINVAL;
	}
	return 0;
}
Esempio n. 6
0
/**
 * Take down the DRM device.
 *
 * \param dev DRM device structure.
 *
 * Frees every resource in \p dev.
 *
 * \sa drm_device
 */
int drm_lastclose(drm_device_t * dev)
{
	drm_magic_entry_t *pt, *next;
	drm_map_list_t *r_list;
	drm_vma_entry_t *vma, *vma_next;
	int i;

	DRM_DEBUG("\n");

	if (dev->driver->lastclose)
		dev->driver->lastclose(dev);
	DRM_DEBUG("driver lastclose completed\n");

	if (dev->unique) {
		drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER);
		dev->unique=NULL;
		dev->unique_len=0;
	}

	if (dev->irq_enabled)
		drm_irq_uninstall(dev);

	down(&dev->struct_sem);
	del_timer(&dev->timer);

	if (dev->unique) {
		drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER);
		dev->unique = NULL;
		dev->unique_len = 0;
	}

	/* Clear pid list */
	for (i = 0; i < DRM_HASH_SIZE; i++) {
		for (pt = dev->magiclist[i].head; pt; pt = next) {
			next = pt->next;
			drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
		}
		dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
	}

	/* Clear AGP information */
	if (drm_core_has_AGP(dev) && dev->agp) {
		drm_agp_mem_t *entry;
		drm_agp_mem_t *nexte;

		/* Remove AGP resources, but leave dev->agp
		   intact until drv_cleanup is called. */
		for (entry = dev->agp->memory; entry; entry = nexte) {
			nexte = entry->next;
			if (entry->bound)
				drm_unbind_agp(entry->memory);
			drm_free_agp(entry->memory, entry->pages);
			drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
		}
		dev->agp->memory = NULL;

		if (dev->agp->acquired)
			drm_agp_release(dev);

		dev->agp->acquired = 0;
		dev->agp->enabled = 0;
	}
	if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg) {
		drm_sg_cleanup(dev->sg);
		dev->sg = NULL;
	}

	/* Clear vma list (only built for debugging) */
	if (dev->vmalist) {
		for (vma = dev->vmalist; vma; vma = vma_next) {
			vma_next = vma->next;
			drm_free(vma, sizeof(*vma), DRM_MEM_VMAS);
		}
		dev->vmalist = NULL;
	}

	if (dev->maplist) {
		while (!list_empty(&dev->maplist->head)) {
			struct list_head *list = dev->maplist->head.next;
			r_list = list_entry(list, drm_map_list_t, head);
			drm_rmmap_locked(dev, r_list->map);
		}
	}

	if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist) {
		for (i = 0; i < dev->queue_count; i++) {

			if (dev->queuelist[i]) {
				drm_free(dev->queuelist[i],
					 sizeof(*dev->queuelist[0]),
					 DRM_MEM_QUEUES);
				dev->queuelist[i] = NULL;
			}
		}
		drm_free(dev->queuelist,
			 dev->queue_slots * sizeof(*dev->queuelist),
			 DRM_MEM_QUEUES);
		dev->queuelist = NULL;
	}
	dev->queue_count = 0;

	if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
		drm_dma_takedown(dev);

	if (dev->lock.hw_lock) {
		dev->sigdata.lock = dev->lock.hw_lock = NULL;	/* SHM removed */
		dev->lock.filp = NULL;
		wake_up_interruptible(&dev->lock.lock_queue);
	}
	up(&dev->struct_sem);

	DRM_DEBUG("lastclose completed\n");
	return 0;
}
Esempio n. 7
0
static int fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,
                       const struct pci_device_id *ent,
                       struct drm_driver *driver)
{
    int retcode;

    spin_lock_init(&dev->count_lock);
    init_timer(&dev->timer);
    sema_init(&dev->struct_sem, 1);
    sema_init(&dev->ctxlist_sem, 1);

    dev->pdev = pdev;

#ifdef __alpha__
    dev->hose = pdev->sysdata;
    dev->pci_domain = dev->hose->bus->number;
#else
    dev->pci_domain = 0;
#endif
    dev->pci_bus = pdev->bus->number;
    dev->pci_slot = PCI_SLOT(pdev->devfn);
    dev->pci_func = PCI_FUNC(pdev->devfn);
    dev->irq = pdev->irq;

    dev->maplist = drm_calloc(1, sizeof(*dev->maplist), DRM_MEM_MAPS);
    if (dev->maplist == NULL)
        return -ENOMEM;
    INIT_LIST_HEAD(&dev->maplist->head);

    /* the DRM has 6 counters */
    dev->counters = 6;
    dev->types[0] = _DRM_STAT_LOCK;
    dev->types[1] = _DRM_STAT_OPENS;
    dev->types[2] = _DRM_STAT_CLOSES;
    dev->types[3] = _DRM_STAT_IOCTLS;
    dev->types[4] = _DRM_STAT_LOCKS;
    dev->types[5] = _DRM_STAT_UNLOCKS;

    dev->driver = driver;

    if (dev->driver->load)
        if ((retcode = dev->driver->load(dev, ent->driver_data)))
            goto error_out_unreg;

    if (drm_core_has_AGP(dev)) {
        if (drm_device_is_agp(dev))
            dev->agp = drm_agp_init(dev);
        if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP)
                && (dev->agp == NULL)) {
            DRM_ERROR("Cannot initialize the agpgart module.\n");
            retcode = -EINVAL;
            goto error_out_unreg;
        }

        if (drm_core_has_MTRR(dev)) {
            if (dev->agp)
                dev->agp->agp_mtrr =
                    mtrr_add(dev->agp->agp_info.aper_base,
                             dev->agp->agp_info.aper_size *
                             1024 * 1024, MTRR_TYPE_WRCOMB, 1);
        }
    }

    retcode = drm_ctxbitmap_init(dev);
    if (retcode) {
        DRM_ERROR("Cannot allocate memory for context bitmap.\n");
        goto error_out_unreg;
    }

    return 0;

error_out_unreg:
    drm_lastclose(dev);
    return retcode;
}
static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
			   const struct pci_device_id *ent,
			   struct drm_driver *driver)
{
	int retcode;

	INIT_LIST_HEAD(&dev->filelist);
	INIT_LIST_HEAD(&dev->ctxlist);
	INIT_LIST_HEAD(&dev->vmalist);
	INIT_LIST_HEAD(&dev->maplist);

	spin_lock_init(&dev->count_lock);
	spin_lock_init(&dev->drw_lock);
	spin_lock_init(&dev->tasklet_lock);
	spin_lock_init(&dev->lock.spinlock);
	init_timer(&dev->timer);
	mutex_init(&dev->struct_mutex);
	mutex_init(&dev->ctxlist_mutex);

	idr_init(&dev->drw_idr);

	dev->pdev = pdev;
	dev->pci_device = pdev->device;
	dev->pci_vendor = pdev->vendor;

#ifdef __alpha__
	dev->hose = pdev->sysdata;
#endif
	dev->irq = pdev->irq;

	if (drm_ht_create(&dev->map_hash, 12)) {
		return -ENOMEM;
	}

	/* the DRM has 6 basic counters */
	dev->counters = 6;
	dev->types[0] = _DRM_STAT_LOCK;
	dev->types[1] = _DRM_STAT_OPENS;
	dev->types[2] = _DRM_STAT_CLOSES;
	dev->types[3] = _DRM_STAT_IOCTLS;
	dev->types[4] = _DRM_STAT_LOCKS;
	dev->types[5] = _DRM_STAT_UNLOCKS;

	dev->driver = driver;

	if (drm_core_has_AGP(dev)) {
		if (drm_device_is_agp(dev))
			dev->agp = drm_agp_init(dev);
		if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP)
		    && (dev->agp == NULL)) {
			DRM_ERROR("Cannot initialize the agpgart module.\n");
			retcode = -EINVAL;
			goto error_out_unreg;
		}
		if (drm_core_has_MTRR(dev)) {
			if (dev->agp)
				dev->agp->agp_mtrr =
				    mtrr_add(dev->agp->agp_info.aper_base,
					     dev->agp->agp_info.aper_size *
					     1024 * 1024, MTRR_TYPE_WRCOMB, 1);
		}
	}

	if (dev->driver->load)
		if ((retcode = dev->driver->load(dev, ent->driver_data)))
			goto error_out_unreg;

	retcode = drm_ctxbitmap_init(dev);
	if (retcode) {
		DRM_ERROR("Cannot allocate memory for context bitmap.\n");
		goto error_out_unreg;
	}

	return 0;

      error_out_unreg:
	drm_lastclose(dev);
	return retcode;
}