void drm_dma_takedown(struct drm_device *dev) { drm_device_dma_t *dma = dev->dma; int i, j; if (dma == NULL) return; /* Clear dma buffers */ for (i = 0; i <= DRM_MAX_ORDER; i++) { if (dma->bufs[i].seg_count) { DRM_DEBUG("order %d: buf_count = %d," " seg_count = %d\n", i, dma->bufs[i].buf_count, dma->bufs[i].seg_count); for (j = 0; j < dma->bufs[i].seg_count; j++) { drm_pci_free(dev, dma->bufs[i].seglist[j]); } drm_free(dma->bufs[i].seglist, M_DRM); } if (dma->bufs[i].buf_count) { for (j = 0; j < dma->bufs[i].buf_count; j++) { drm_free(dma->bufs[i].buflist[j].dev_private, M_DRM); } drm_free(dma->bufs[i].buflist, M_DRM); } } drm_free(dma->buflist, M_DRM); drm_free(dma->pagelist, M_DRM); drm_free(dev->dma, M_DRM); dev->dma = NULL; spin_uninit(&dev->dma_lock); }
void drm_dma_takedown(struct drm_device *dev) { drm_device_dma_t *dma = dev->dma; int i, j; if (dma == NULL) return; /* Clear dma buffers */ for (i = 0; i <= DRM_MAX_ORDER; i++) { if (dma->bufs[i].seg_count) { DRM_DEBUG("order %d: buf_count = %d," " seg_count = %d\n", i, dma->bufs[i].buf_count, dma->bufs[i].seg_count); for (j = 0; j < dma->bufs[i].seg_count; j++) { drm_pci_free(dev, dma->bufs[i].seglist[j]); } free(dma->bufs[i].seglist, DRM_MEM_SEGS); } if (dma->bufs[i].buf_count) { for (j = 0; j < dma->bufs[i].buf_count; j++) { free(dma->bufs[i].buflist[j].dev_private, DRM_MEM_BUFS); } free(dma->bufs[i].buflist, DRM_MEM_BUFS); } } free(dma->buflist, DRM_MEM_BUFS); free(dma->pagelist, DRM_MEM_PAGES); free(dev->dma, DRM_MEM_DRIVER); dev->dma = NULL; DRM_SPINUNINIT(&dev->dma_lock); }
int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info) { if (dev->sg == NULL) { DRM_ERROR( "no scatter/gather memory!\n" ); return 0; } drm_pci_free(dev, dev->sg->dmah); return 1; }
/** * Cleanup the DMA resources. * * \param dev DRM device. * * Free all pages associated with DMA buffers, the buffers and pages lists, and * finally the drm_device::dma structure itself. */ void drm_legacy_dma_takedown(struct drm_device *dev) { struct drm_device_dma *dma = dev->dma; int i, j; if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA) || drm_core_check_feature(dev, DRIVER_MODESET)) { return; } if (!dma) return; /* Clear dma buffers */ for (i = 0; i <= DRM_MAX_ORDER; i++) { if (dma->bufs[i].seg_count) { DRM_DEBUG("order %d: buf_count = %d," " seg_count = %d\n", i, dma->bufs[i].buf_count, dma->bufs[i].seg_count); for (j = 0; j < dma->bufs[i].seg_count; j++) { if (dma->bufs[i].seglist[j]) { drm_pci_free(dev, dma->bufs[i].seglist[j]); } } kfree(dma->bufs[i].seglist); } if (dma->bufs[i].buf_count) { for (j = 0; j < dma->bufs[i].buf_count; j++) { kfree(dma->bufs[i].buflist[j].dev_private); } kfree(dma->bufs[i].buflist); } } kfree(dma->buflist); kfree(dma->pagelist); kfree(dev->dma); dev->dma = NULL; }
void drm_dma_takedown(struct drm_device *dev) { struct drm_device_dma *dma = dev->dma; int i, j; if (!dma) return; for (i = 0; i <= DRM_MAX_ORDER; i++) { if (dma->bufs[i].seg_count) { DRM_DEBUG("order %d: buf_count = %d," " seg_count = %d\n", i, dma->bufs[i].buf_count, dma->bufs[i].seg_count); for (j = 0; j < dma->bufs[i].seg_count; j++) { if (dma->bufs[i].seglist[j]) { drm_pci_free(dev, dma->bufs[i].seglist[j]); } } kfree(dma->bufs[i].seglist); } if (dma->bufs[i].buf_count) { for (j = 0; j < dma->bufs[i].buf_count; j++) { kfree(dma->bufs[i].buflist[j].dev_private); } kfree(dma->bufs[i].buflist); } } kfree(dma->buflist); kfree(dma->pagelist); kfree(dev->dma); dev->dma = NULL; }
static void drm_ati_free_pcigart_table(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info) { drm_pci_free(dev, dev->sg->dmah); dev->sg->dmah = NULL; }