Esempio n. 1
0
int drm_legacy_sg_alloc(struct drm_device *dev, void *data,
			struct drm_file *file_priv)
{
	struct drm_scatter_gather *request = data;
	struct drm_sg_mem *entry;
	vm_size_t size;
	vm_pindex_t pindex;

	if (dev->sg)
		return -EINVAL;

	DRM_DEBUG("request size=%ld\n", request->size);

	entry = kmalloc(sizeof(*entry), M_DRM, M_WAITOK | M_ZERO);

	size = round_page(request->size);
	entry->pages = OFF_TO_IDX(size);
	entry->busaddr = kmalloc(entry->pages * sizeof(*entry->busaddr),
	    M_DRM, M_WAITOK | M_ZERO);

	entry->vaddr = kmem_alloc_attr(&kernel_map, size,
				       VM_SUBSYS_DRM_SCAT, M_WAITOK | M_ZERO,
				       0, BUS_SPACE_MAXADDR_32BIT,
				       VM_MEMATTR_WRITE_COMBINING);
	if (entry->vaddr == 0) {
		drm_sg_cleanup(entry);
		return (-ENOMEM);
	}

	for(pindex = 0; pindex < entry->pages; pindex++) {
		entry->busaddr[pindex] =
		    vtophys(entry->vaddr + IDX_TO_OFF(pindex));
	}

	DRM_LOCK(dev);
	if (dev->sg) {
		DRM_UNLOCK(dev);
		drm_sg_cleanup(entry);
		return (-EINVAL);
	}
	dev->sg = entry;
	DRM_UNLOCK(dev);

	request->handle = entry->vaddr;

	DRM_DEBUG("allocated %ju pages @ 0x%08jx, contents=%08lx\n",
	    entry->pages, (uintmax_t)entry->vaddr,
	    *(unsigned long *)entry->vaddr);

	return (0);
}
Esempio n. 2
0
void drm_legacy_sg_cleanup(struct drm_device *dev)
{
	if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg &&
	    !drm_core_check_feature(dev, DRIVER_MODESET)) {
		drm_sg_cleanup(dev->sg);
		dev->sg = NULL;
	}
}
Esempio n. 3
0
int
drm_sg_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
	struct drm_scatter_gather *request = data;
	struct drm_sg_mem *entry;

	DRM_LOCK(dev);
	entry = dev->sg;
	dev->sg = NULL;
	DRM_UNLOCK(dev);

	if (!entry || entry->vaddr != request->handle)
		return (EINVAL);

	DRM_DEBUG("free 0x%zx\n", entry->vaddr);

	drm_sg_cleanup(entry);

	return (0);
}
Esempio n. 4
0
int drm_sg_free(struct drm_device *dev, void *data,
		struct drm_file *file_priv)
{
	struct drm_scatter_gather *request = data;
	struct drm_sg_mem *entry;

	if (!drm_core_check_feature(dev, DRIVER_SG))
		return -EINVAL;

	entry = dev->sg;
	dev->sg = NULL;

	if (!entry || entry->vaddr != request->handle)
		return -EINVAL;

	DRM_DEBUG("free 0x%zx\n", entry->vaddr);

	drm_sg_cleanup(entry);

	return 0;
}
Esempio n. 5
0
/**
 * Take down the DRM device.
 *
 * \param dev DRM device structure.
 *
 * Frees every resource in \p dev.
 *
 * \sa drm_device
 */
int drm_lastclose(drm_device_t * dev)
{
	drm_magic_entry_t *pt, *next;
	drm_map_list_t *r_list;
	drm_vma_entry_t *vma, *vma_next;
	int i;

	DRM_DEBUG("\n");

	if (dev->driver->lastclose)
		dev->driver->lastclose(dev);
	DRM_DEBUG("driver lastclose completed\n");

	if (dev->unique) {
		drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER);
		dev->unique=NULL;
		dev->unique_len=0;
	}

	if (dev->irq_enabled)
		drm_irq_uninstall(dev);

	down(&dev->struct_sem);
	del_timer(&dev->timer);

	if (dev->unique) {
		drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER);
		dev->unique = NULL;
		dev->unique_len = 0;
	}

	/* Clear pid list */
	for (i = 0; i < DRM_HASH_SIZE; i++) {
		for (pt = dev->magiclist[i].head; pt; pt = next) {
			next = pt->next;
			drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
		}
		dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
	}

	/* Clear AGP information */
	if (drm_core_has_AGP(dev) && dev->agp) {
		drm_agp_mem_t *entry;
		drm_agp_mem_t *nexte;

		/* Remove AGP resources, but leave dev->agp
		   intact until drv_cleanup is called. */
		for (entry = dev->agp->memory; entry; entry = nexte) {
			nexte = entry->next;
			if (entry->bound)
				drm_unbind_agp(entry->memory);
			drm_free_agp(entry->memory, entry->pages);
			drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
		}
		dev->agp->memory = NULL;

		if (dev->agp->acquired)
			drm_agp_release(dev);

		dev->agp->acquired = 0;
		dev->agp->enabled = 0;
	}
	if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg) {
		drm_sg_cleanup(dev->sg);
		dev->sg = NULL;
	}

	/* Clear vma list (only built for debugging) */
	if (dev->vmalist) {
		for (vma = dev->vmalist; vma; vma = vma_next) {
			vma_next = vma->next;
			drm_free(vma, sizeof(*vma), DRM_MEM_VMAS);
		}
		dev->vmalist = NULL;
	}

	if (dev->maplist) {
		while (!list_empty(&dev->maplist->head)) {
			struct list_head *list = dev->maplist->head.next;
			r_list = list_entry(list, drm_map_list_t, head);
			drm_rmmap_locked(dev, r_list->map);
		}
	}

	if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist) {
		for (i = 0; i < dev->queue_count; i++) {

			if (dev->queuelist[i]) {
				drm_free(dev->queuelist[i],
					 sizeof(*dev->queuelist[0]),
					 DRM_MEM_QUEUES);
				dev->queuelist[i] = NULL;
			}
		}
		drm_free(dev->queuelist,
			 dev->queue_slots * sizeof(*dev->queuelist),
			 DRM_MEM_QUEUES);
		dev->queuelist = NULL;
	}
	dev->queue_count = 0;

	if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
		drm_dma_takedown(dev);

	if (dev->lock.hw_lock) {
		dev->sigdata.lock = dev->lock.hw_lock = NULL;	/* SHM removed */
		dev->lock.filp = NULL;
		wake_up_interruptible(&dev->lock.lock_queue);
	}
	up(&dev->struct_sem);

	DRM_DEBUG("lastclose completed\n");
	return 0;
}