int savage_do_cleanup_bci(drm_device_t *dev) { drm_savage_private_t *dev_priv = dev->dev_private; if (dev_priv->cmd_dma == &dev_priv->fake_dma) { if (dev_priv->fake_dma.handle) drm_free(dev_priv->fake_dma.handle, SAVAGE_FAKE_DMA_SIZE, DRM_MEM_DRIVER); } else if (dev_priv->cmd_dma && dev_priv->cmd_dma->handle && dev_priv->cmd_dma->type == _DRM_AGP && dev_priv->dma_type == SAVAGE_DMA_AGP) drm_core_ioremapfree(dev_priv->cmd_dma, dev); if (dev_priv->dma_type == SAVAGE_DMA_AGP && dev->agp_buffer_map && dev->agp_buffer_map->handle) { drm_core_ioremapfree(dev->agp_buffer_map, dev); /* make sure the next instance (which may be running * in PCI mode) doesn't try to use an old * agp_buffer_map. */ dev->agp_buffer_map = NULL; } if (dev_priv->dma_pages) drm_free(dev_priv->dma_pages, sizeof(drm_savage_dma_page_t)*dev_priv->nr_dma_pages, DRM_MEM_DRIVER); return 0; }
/** * Initialize the AGP resources. * * \return pointer to a drm_agp_head structure. * */ drm_agp_head_t *drm_agp_init(drm_device_t *dev) { drm_agp_head_t *head = NULL; if (!(head = drm_alloc(sizeof(*head), DRM_MEM_AGPLISTS))) return NULL; memset((void *)head, 0, sizeof(*head)); head->bridge = agp_find_bridge(dev->pdev); if (!head->bridge) { if (!(head->bridge = agp_backend_acquire(dev->pdev))) { drm_free(head, sizeof(*head), DRM_MEM_AGPLISTS); return NULL; } agp_copy_info(head->bridge, &head->agp_info); agp_backend_release(head->bridge); } else { agp_copy_info(head->bridge, &head->agp_info); } if (head->agp_info.chipset == NOT_SUPPORTED) { drm_free(head, sizeof(*head), DRM_MEM_AGPLISTS); return NULL; } head->memory = NULL; head->cant_use_aperture = head->agp_info.cant_use_aperture; head->page_mask = head->agp_info.page_mask; return head; }
/** * \brief Allocate a physically contiguous DMA-accessible consistent * memory block. */ drm_dma_handle_t * drm_pci_alloc(struct drm_device *dev, size_t size, size_t align, dma_addr_t maxaddr) { drm_dma_handle_t *dmah; int ret; /* Need power-of-two alignment, so fail the allocation if it isn't. */ if ((align & (align - 1)) != 0) { DRM_ERROR("drm_pci_alloc with non-power-of-two alignment %d\n", (int)align); return NULL; } dmah = kmalloc(sizeof(drm_dma_handle_t), DRM_MEM_DMA, M_ZERO | M_NOWAIT); if (dmah == NULL) return NULL; #if 0 /* HT XXX XXX XXX */ /* Make sure we aren't holding locks here */ mtx_assert(&dev->dev_lock, MA_NOTOWNED); if (mtx_owned(&dev->dev_lock)) DRM_ERROR("called while holding dev_lock\n"); mtx_assert(&dev->dma_lock, MA_NOTOWNED); if (mtx_owned(&dev->dma_lock)) DRM_ERROR("called while holding dma_lock\n"); #endif ret = bus_dma_tag_create(NULL, align, 0, /* tag, align, boundary */ maxaddr, BUS_SPACE_MAXADDR, /* lowaddr, highaddr */ NULL, NULL, /* filtfunc, filtfuncargs */ size, 1, size, /* maxsize, nsegs, maxsegsize */ 0, /* flags */ &dmah->tag); if (ret != 0) { drm_free(dmah, DRM_MEM_DMA); return NULL; } ret = bus_dmamem_alloc(dmah->tag, &dmah->vaddr, BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_NOCACHE, &dmah->map); if (ret != 0) { bus_dma_tag_destroy(dmah->tag); drm_free(dmah, DRM_MEM_DMA); return NULL; } ret = bus_dmamap_load(dmah->tag, dmah->map, dmah->vaddr, size, drm_pci_busdma_callback, dmah, BUS_DMA_NOWAIT); if (ret != 0) { bus_dmamem_free(dmah->tag, dmah->vaddr, dmah->map); bus_dma_tag_destroy(dmah->tag); drm_free(dmah, DRM_MEM_DMA); return NULL; } return dmah; }
static int drm_drawable_free(int idr, void *p, void *data) { struct drm_drawable_info *info = p; if (info) { drm_free(info->rects, info->num_rects * sizeof(struct drm_clip_rect), DRM_MEM_BUFS); drm_free(info, sizeof(*info), DRM_MEM_BUFS); } return 0; }
/** * Allocate AGP memory. * * \param inode device inode. * \param filp file pointer. * \param cmd command. * \param arg pointer to a drm_agp_buffer structure. * \return zero on success or a negative number on failure. * * Verifies the AGP device is present and has been acquired, allocates the * memory via alloc_agp() and creates a drm_agp_mem entry for it. */ int drm_agp_alloc(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; drm_agp_buffer_t request; drm_agp_mem_t *entry; DRM_AGP_MEM *memory; unsigned long pages; u32 type; drm_agp_buffer_t __user *argp = (void __user *)arg; if (!dev->agp || !dev->agp->acquired) return -EINVAL; if (copy_from_user(&request, argp, sizeof(request))) return -EFAULT; if (!(entry = drm_alloc(sizeof(*entry), DRM_MEM_AGPLISTS))) return -ENOMEM; memset(entry, 0, sizeof(*entry)); pages = (request.size + PAGE_SIZE - 1) / PAGE_SIZE; type = (u32) request.type; if (!(memory = drm_alloc_agp(dev, pages, type))) { drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS); return -ENOMEM; } entry->handle = (unsigned long)memory->key + 1; entry->memory = memory; entry->bound = 0; entry->pages = pages; entry->prev = NULL; entry->next = dev->agp->memory; if (dev->agp->memory) dev->agp->memory->prev = entry; dev->agp->memory = entry; request.handle = entry->handle; request.physical = memory->physical; if (copy_to_user(argp, &request, sizeof(request))) { dev->agp->memory = entry->next; dev->agp->memory->prev = NULL; drm_free_agp(memory, pages); drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS); return -EFAULT; } return 0; }
int drm_sman_init(struct drm_sman * sman, unsigned int num_managers, unsigned int user_order, unsigned int owner_order) { int ret = 0; sman->mm = (struct drm_sman_mm *) drm_calloc(num_managers, sizeof(*sman->mm), DRM_MEM_MM); if (!sman->mm) { ret = -ENOMEM; goto out; } sman->num_managers = num_managers; INIT_LIST_HEAD(&sman->owner_items); ret = drm_ht_create(&sman->owner_hash_tab, owner_order); if (ret) goto out1; ret = drm_ht_create(&sman->user_hash_tab, user_order); if (!ret) goto out; drm_ht_remove(&sman->owner_hash_tab); out1: drm_free(sman->mm, num_managers * sizeof(*sman->mm), DRM_MEM_MM); out: return ret; }
int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request) { drm_agp_mem_t *entry; if (!dev->agp || !dev->agp->acquired) return EINVAL; entry = drm_agp_lookup_entry(dev, (void*)request->handle); if (entry == NULL) return EINVAL; if (entry->prev) entry->prev->next = entry->next; else dev->agp->memory = entry->next; if (entry->next) entry->next->prev = entry->prev; if (entry->bound) drm_agp_unbind_memory(entry->handle); drm_agp_free_memory(entry->handle); drm_free(entry, DRM_MEM_AGPLISTS); return 0; }
/* Free all blocks associated with the releasing file. */ void i915_mem_release(struct drm_device * dev, struct drm_file *file_priv, struct mem_block *heap) { struct mem_block *p; if (!heap || !heap->next) return; for (p = heap->next; p != heap; p = p->next) { if (p->file_priv == file_priv) { p->file_priv = NULL; mark_block(dev, p, 0); } } /* Assumes a single contiguous range. Needs a special file_priv in * 'heap' to stop it being subsumed. */ for (p = heap->next; p != heap; p = p->next) { while (p->file_priv == NULL && p->next->file_priv == NULL) { struct mem_block *q = p->next; p->size += q->size; p->next = q->next; p->next->prev = p; drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS); } } }
static int mali_driver_load(struct drm_device *dev, unsigned long chipset) { int ret; unsigned long base, size; drm_mali_private_t *dev_priv; printk(KERN_ERR "DRM: mali_driver_load start\n"); dev_priv = drm_calloc(1, sizeof(drm_mali_private_t), DRM_MEM_DRIVER); if ( dev_priv == NULL ) return -ENOMEM; dev->dev_private = (void *)dev_priv; if ( NULL == dev->platformdev ) { dev->platformdev = platform_device_register_simple(mali_drm_device_name, 0, NULL, 0); pdev = dev->platformdev; } #if 0 base = drm_get_resource_start(dev, 1 ); size = drm_get_resource_len(dev, 1 ); #endif ret = drm_sman_init(&dev_priv->sman, 2, 12, 8); if ( ret ) drm_free(dev_priv, sizeof(dev_priv), DRM_MEM_DRIVER); //if ( ret ) kfree( dev_priv ); printk(KERN_ERR "DRM: mali_driver_load done\n"); return ret; }
/* Shutdown. */ void i915_mem_takedown(struct mem_block **heap) { struct mem_block *p; if (!*heap) return; for (p = (*heap)->next; p != *heap;) { struct mem_block *q = p; p = p->next; drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS); } drm_free(*heap, sizeof(**heap), DRM_MEM_BUFLISTS); *heap = NULL; }
static struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size, uint32_t page_flags, vm_page_t dummy_read_page) { struct radeon_device *rdev; struct radeon_ttm_tt *gtt; rdev = radeon_get_rdev(bdev); #if __OS_HAS_AGP #ifdef DUMBBELL_WIP if (rdev->flags & RADEON_IS_AGP) { return ttm_agp_tt_create(bdev, rdev->ddev->agp->agpdev, size, page_flags, dummy_read_page); } #endif /* DUMBBELL_WIP */ #endif gtt = kmalloc(sizeof(struct radeon_ttm_tt), DRM_MEM_DRIVER, M_WAITOK | M_ZERO); if (gtt == NULL) { return NULL; } gtt->ttm.ttm.func = &radeon_backend_func; gtt->rdev = rdev; if (ttm_dma_tt_init(>t->ttm, bdev, size, page_flags, dummy_read_page)) { drm_free(gtt, DRM_MEM_DRIVER); return NULL; } return >t->ttm.ttm; }
static void radeon_ttm_backend_destroy(struct ttm_tt *ttm) { struct radeon_ttm_tt *gtt = (void *)ttm; ttm_dma_tt_fini(>t->ttm); drm_free(gtt, DRM_MEM_DRIVER); }
int drm_agp_alloc_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; drm_agp_buffer_t request; drm_agp_buffer_t __user *argp = (void __user *)arg; int err; if (copy_from_user(&request, argp, sizeof(request))) return -EFAULT; err = drm_agp_alloc(dev, &request); if (err) return err; if (copy_to_user(argp, &request, sizeof(request))) { drm_agp_mem_t *entry = dev->agp->memory; dev->agp->memory = entry->next; dev->agp->memory->prev = NULL; drm_free_agp(entry->memory, entry->pages); drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS); return -EFAULT; } return 0; }
int drm_platform_init(struct drm_driver *driver) { struct drm_device *dev; int ret; DRM_DEBUG("\n"); dev = drm_calloc(1, sizeof(*dev), DRM_MEM_STUB); if (!dev) return -ENOMEM; ret = drm_fill_in_platform_dev(dev, driver->platform_device, driver); if (ret) { printk(KERN_ERR "DRM: Fill_in_platform_dev failed.\n"); goto err_g1; } ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY); if (ret) goto err_g1; list_add_tail(&dev->driver_item, &driver->device_list); DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", driver->name, driver->major, driver->minor, driver->patchlevel, driver->date, dev->primary->index); return 0; err_g1: drm_free(dev, sizeof(*dev), DRM_MEM_STUB); return ret; }
void drm_vblank_cleanup(struct drm_device *dev) { #if defined(__NetBSD__) int i; #endif /* defined(__NetBSD__) */ if (dev->vblank == NULL) return; /* not initialised */ timeout_del(&dev->vblank->vb_disable_timer); #if defined(__NetBSD__) callout_destroy(&dev->vblank->vb_disable_timer); #endif /* defined(__NetBSD__) */ vblank_disable(dev); #if defined(__NetBSD__) for (i = 0; i < dev->vblank->vb_num; i++) cv_destroy(&dev->vblank->vb_crtcs[i].condvar); mutex_destroy(&dev->vblank->vb_lock); #endif /* defined(__NetBSD__) */ drm_free(dev->vblank); dev->vblank = NULL; }
/** * Free AGP memory (ioctl). * * \param inode device inode. * \param filp file pointer. * \param cmd command. * \param arg pointer to a drm_agp_buffer structure. * \return zero on success or a negative number on failure. * * Verifies the AGP device is present and has been acquired and looks up the * AGP memory entry. If the memory it's currently bound, unbind it via * unbind_agp(). Frees it via free_agp() as well as the entry itself * and unlinks from the doubly linked list it's inserted in. */ int drm_agp_free(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; drm_agp_buffer_t request; drm_agp_mem_t *entry; if (!dev->agp || !dev->agp->acquired) return -EINVAL; if (copy_from_user(&request, (drm_agp_buffer_t __user *)arg, sizeof(request))) return -EFAULT; if (!(entry = drm_agp_lookup_entry(dev, request.handle))) return -EINVAL; if (entry->bound) drm_unbind_agp(entry->memory); if (entry->prev) entry->prev->next = entry->next; else dev->agp->memory = entry->next; if (entry->next) entry->next->prev = entry->prev; drm_free_agp(entry->memory, entry->pages); drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS); return 0; }
int drm_remove_magic(drm_device_t *dev, drm_magic_t magic) { drm_magic_entry_t *prev = NULL; drm_magic_entry_t *pt; int hash; DRM_DEBUG("drm_remove_magic : %d", magic); hash = drm_hash_magic(magic); DRM_LOCK(); for (pt = dev->magiclist[hash].head; pt; prev = pt, pt = pt->next) { if (pt->magic == magic) { if (dev->magiclist[hash].head == pt) { dev->magiclist[hash].head = pt->next; } if (dev->magiclist[hash].tail == pt) { dev->magiclist[hash].tail = prev; } if (prev) { prev->next = pt->next; } DRM_UNLOCK(); drm_free(pt, sizeof (*pt), DRM_MEM_MAGIC); return (0); } } DRM_UNLOCK(); return (EINVAL); }
static void intel_destroy_plane(struct drm_plane *plane) { struct intel_plane *intel_plane = to_intel_plane(plane); intel_disable_plane(plane); drm_plane_cleanup(plane); drm_free(intel_plane, DRM_MEM_KMS); }
/** * Register. * * \param pdev - PCI device structure * \param ent entry from the PCI ID table with device type flags * \return zero on success or a negative number on failure. * * Attempt to gets inter module "drm" information. If we are first * then register the character device and inter module information. * Try and register, if we fail to register, backout previous work. */ int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent, struct drm_driver *driver) { drm_device_t *dev; int ret; DRM_DEBUG("\n"); dev = drm_calloc(1, sizeof(*dev), DRM_MEM_STUB); if (!dev) return -ENOMEM; pci_enable_device(pdev); if ((ret = drm_fill_in_dev(dev, pdev, ent, driver))) { printk(KERN_ERR "DRM: Fill_in_dev failed.\n"); goto err_g1; } if ((ret = drm_get_head(dev, &dev->primary))) goto err_g1; DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", driver->name, driver->major, driver->minor, driver->patchlevel, driver->date, dev->primary.minor); return 0; err_g1: drm_free(dev, sizeof(*dev), DRM_MEM_STUB); return ret; }
/** * radeon_driver_postclose_kms - drm callback for post close * * @dev: drm dev pointer * @file_priv: drm file * * On device post close, tear down vm on cayman+ (all asics). */ void radeon_driver_postclose_kms(struct drm_device *dev, struct drm_file *file_priv) { struct radeon_device *rdev = dev->dev_private; /* new gpu have virtual address space support */ if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) { struct radeon_fpriv *fpriv = file_priv->driver_priv; struct radeon_bo_va *bo_va; int r; r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); if (!r) { bo_va = radeon_vm_bo_find(&fpriv->vm, rdev->ring_tmp_bo.bo); if (bo_va) radeon_vm_bo_rmv(rdev, bo_va); radeon_bo_unreserve(rdev->ring_tmp_bo.bo); } radeon_vm_fini(rdev, &fpriv->vm); drm_free(fpriv, DRM_MEM_DRIVER); file_priv->driver_priv = NULL; } }
/** * Remove a magic number. * * \param dev DRM device. * \param magic magic number. * * Searches and unlinks the entry in drm_device::magiclist with the magic * number hash key, while holding the drm_device::struct_sem lock. */ static int drm_remove_magic(drm_device_t *dev, drm_magic_t magic) { drm_magic_entry_t *prev = NULL; drm_magic_entry_t *pt; int hash; DRM_DEBUG("%d\n", magic); hash = drm_hash_magic(magic); down(&dev->struct_sem); for (pt = dev->magiclist[hash].head; pt; prev = pt, pt = pt->next) { if (pt->magic == magic) { if (dev->magiclist[hash].head == pt) { dev->magiclist[hash].head = pt->next; } if (dev->magiclist[hash].tail == pt) { dev->magiclist[hash].tail = prev; } if (prev) { prev->next = pt->next; } up(&dev->struct_sem); return 0; } } up(&dev->struct_sem); drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC); return -EINVAL; }
/** * Removes the given magic number from the hash table of used magic number * lists. */ static int drm_remove_magic(struct drm_device *dev, drm_magic_t magic) { drm_magic_entry_t *prev = NULL; drm_magic_entry_t *pt; int hash; DRM_LOCK_ASSERT(dev); DRM_DEBUG("%d\n", magic); hash = drm_hash_magic(magic); for (pt = dev->magiclist[hash].head; pt; prev = pt, pt = pt->next) { if (pt->magic == magic) { if (dev->magiclist[hash].head == pt) { dev->magiclist[hash].head = pt->next; } if (dev->magiclist[hash].tail == pt) { dev->magiclist[hash].tail = prev; } if (prev) { prev->next = pt->next; } drm_free(pt, DRM_MEM_MAGIC); return 0; } } return EINVAL; }
static void ttm_object_file_destroy(struct kref *kref) { struct ttm_object_file *tfile = container_of(kref, struct ttm_object_file, refcount); drm_free(tfile, M_TTM_OBJ_FILE); }
int drm_queue_vblank_event(struct drm_device *dev, int crtc, union drm_wait_vblank *vblwait, struct drm_file *file_priv) { struct drm_pending_vblank_event *vev; struct timeval now; u_int seq; vev = drm_calloc(1, sizeof(*vev)); if (vev == NULL) return (ENOMEM); vev->event.base.type = DRM_EVENT_VBLANK; vev->event.base.length = sizeof(vev->event); vev->event.user_data = vblwait->request.signal; vev->base.event = &vev->event.base; vev->base.file_priv = file_priv; vev->base.destroy = (void (*) (struct drm_pending_event *))drm_free; microtime(&now); mtx_enter(&dev->event_lock); if (file_priv->event_space < sizeof(vev->event)) { mtx_leave(&dev->event_lock); drm_free(vev); return (ENOMEM); } seq = drm_vblank_count(dev, crtc); file_priv->event_space -= sizeof(vev->event); DPRINTF("%s: queueing event %d on crtc %d\n", __func__, seq, crtc); if ((vblwait->request.type & _DRM_VBLANK_NEXTONMISS) && (seq - vblwait->request.sequence) <= (1 << 23)) { vblwait->request.sequence = seq + 1; vblwait->reply.sequence = vblwait->request.sequence; } vev->event.sequence = vblwait->request.sequence; if ((seq - vblwait->request.sequence) <= (1 << 23)) { vev->event.tv_sec = now.tv_sec; vev->event.tv_usec = now.tv_usec; DPRINTF("%s: already passed, dequeuing: crtc %d, value %d\n", __func__, crtc, seq); drm_vblank_put(dev, crtc); TAILQ_INSERT_TAIL(&file_priv->evlist, &vev->base, link); wakeup(&file_priv->evlist); selwakeup(&file_priv->rsel); } else { TAILQ_INSERT_TAIL(&dev->vblank->vb_crtcs[crtc].vbl_events, &vev->base, link); } mtx_leave(&dev->event_lock); return (0); }
int via_driver_unload(drm_device_t *dev) { drm_via_private_t *dev_priv = dev->dev_private; drm_free(dev_priv, sizeof(drm_via_private_t), DRM_MEM_DRIVER); return 0; }
/** * Put a device minor number. * * \param dev device data structure * \return always zero * * Cleans up the proc resources. If it is the last minor then release the foreign * "drm" data, otherwise unregisters the "drm" data, frees the dev list and * unregisters the character device. */ int drm_put_dev(drm_device_t * dev) { DRM_DEBUG("release primary %s\n", dev->driver->pci_driver.name); if (dev->unique) { drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER); dev->unique = NULL; dev->unique_len = 0; } if (dev->devname) { drm_free(dev->devname, strlen(dev->devname) + 1, DRM_MEM_DRIVER); dev->devname = NULL; } drm_free(dev, sizeof(*dev), DRM_MEM_STUB); return 0; }
int savage_driver_unload(struct drm_device *dev) { drm_savage_private_t *dev_priv = dev->dev_private; drm_free(dev_priv, sizeof(drm_savage_private_t), DRM_MEM_DRIVER); return 0; }
void drm_sman_takedown(struct drm_sman * sman) { drm_ht_remove(&sman->user_hash_tab); drm_ht_remove(&sman->owner_hash_tab); if (sman->mm) drm_free(sman->mm, sman->num_managers * sizeof(*sman->mm), DRM_MEM_MM); }
static int sis_driver_unload(struct drm_device *dev) { drm_sis_private_t *dev_priv = dev->dev_private; drm_sman_takedown(&dev_priv->sman); drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER); return 0; }
static void __exit drm_core_exit(void) { remove_proc_entry("dri", NULL); drm_sysfs_destroy(drm_class); unregister_chrdev(DRM_MAJOR, "drm"); drm_free(drm_heads, sizeof(*drm_heads) * cards_limit, DRM_MEM_STUB); }