static int armada_drm_unload(struct drm_device *dev) { struct armada_private *priv = dev->dev_private; drm_kms_helper_poll_fini(dev); armada_fbdev_fini(dev); if (is_componentized(dev->dev)) component_unbind_all(dev->dev, dev); drm_mode_config_cleanup(dev); drm_mm_takedown(&priv->linear); flush_work(&priv->fb_unref_work); dev->dev_private = NULL; return 0; }
static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man) { struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; struct drm_mm *mm = &rman->mm; mtx_lock(&rman->lock); if (drm_mm_clean(mm)) { drm_mm_takedown(mm); mtx_unlock(&rman->lock); mtx_destroy(&rman->lock); free(rman, M_TTM_RMAN); man->priv = NULL; return 0; } mtx_unlock(&rman->lock); return -EBUSY; }
int ttm_bo_device_release(struct ttm_bo_device *bdev) { int ret = 0; unsigned i = TTM_NUM_MEM_TYPES; struct ttm_mem_type_manager *man; struct ttm_bo_global *glob = bdev->glob; while (i--) { man = &bdev->man[i]; if (man->has_type) { man->use_type = false; if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) { ret = -EBUSY; printk(KERN_ERR TTM_PFX "DRM memory manager type %d " "is not clean.\n", i); } man->has_type = false; } } mutex_lock(&glob->device_list_mutex); list_del(&bdev->device_list); mutex_unlock(&glob->device_list_mutex); if (!cancel_delayed_work(&bdev->wq)) flush_scheduled_work(); while (ttm_bo_delayed_delete(bdev, true)) ; spin_lock(&glob->lru_lock); if (list_empty(&bdev->ddestroy)) TTM_DEBUG("Delayed destroy list was clean\n"); if (list_empty(&bdev->man[0].lru)) TTM_DEBUG("Swap list was clean\n"); spin_unlock(&glob->lru_lock); BUG_ON(!drm_mm_clean(&bdev->addr_space_mm)); write_lock(&bdev->vm_lock); drm_mm_takedown(&bdev->addr_space_mm); write_unlock(&bdev->vm_lock); return ret; }
static int msm_unload(struct drm_device *dev) { struct msm_drm_private *priv = dev->dev_private; struct msm_kms *kms = priv->kms; struct msm_gpu *gpu = priv->gpu; drm_kms_helper_poll_fini(dev); drm_mode_config_cleanup(dev); drm_vblank_cleanup(dev); pm_runtime_get_sync(dev->dev); drm_irq_uninstall(dev); pm_runtime_put_sync(dev->dev); flush_workqueue(priv->wq); destroy_workqueue(priv->wq); if (kms) { pm_runtime_disable(dev->dev); kms->funcs->destroy(kms); } if (gpu) { mutex_lock(&dev->struct_mutex); gpu->funcs->pm_suspend(gpu); gpu->funcs->destroy(gpu); mutex_unlock(&dev->struct_mutex); } if (priv->vram.paddr) { DEFINE_DMA_ATTRS(attrs); dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs); drm_mm_takedown(&priv->vram.mm); dma_free_attrs(dev->dev, priv->vram.size, NULL, priv->vram.paddr, &attrs); } component_unbind_all(dev->dev, dev); dev->dev_private = NULL; kfree(priv); return 0; }
static void nv50_channel_del(struct nouveau_channel **pchan) { struct nouveau_channel *chan; chan = *pchan; *pchan = NULL; if (!chan) return; nouveau_gpuobj_ref(NULL, &chan->ramfc); nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd); nouveau_gpuobj_ref(NULL, &chan->vm_pd); if (drm_mm_initialized(&chan->ramin_heap)) drm_mm_takedown(&chan->ramin_heap); nouveau_gpuobj_ref(NULL, &chan->ramin); kfree(chan); }
int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) { struct ttm_bo_global *glob = bdev->glob; struct ttm_mem_type_manager *man; int ret = -EINVAL; if (mem_type >= TTM_NUM_MEM_TYPES) { printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", mem_type); return ret; } man = &bdev->man[mem_type]; if (!man->has_type) { printk(KERN_ERR TTM_PFX "Trying to take down uninitialized " "memory manager type %u\n", mem_type); return ret; } man->use_type = false; man->has_type = false; ret = 0; if (mem_type > 0) { ttm_bo_force_list_clean(bdev, mem_type, false); spin_lock(&glob->lru_lock); if (drm_mm_clean(&man->manager)) drm_mm_takedown(&man->manager); else ret = -EBUSY; spin_unlock(&glob->lru_lock); } return ret; }
void nv50_instmem_takedown(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; struct nouveau_channel *chan = dev_priv->channels.ptr[0]; int i; NV_DEBUG(dev, "\n"); if (!priv) return; dev_priv->ramin_available = false; nouveau_vm_ref(NULL, &dev_priv->chan_vm, NULL); for (i = 0x1700; i <= 0x1710; i += 4) nv_wr32(dev, i, priv->save1700[(i - 0x1700) / 4]); nouveau_gpuobj_ref(NULL, &priv->bar3_dmaobj); nouveau_gpuobj_ref(NULL, &priv->bar1_dmaobj); nouveau_vm_ref(NULL, &dev_priv->bar1_vm, chan->vm_pd); dev_priv->channels.ptr[127] = 0; nv50_channel_del(&dev_priv->channels.ptr[0]); nouveau_gpuobj_ref(NULL, &dev_priv->bar3_vm->pgt[0].obj[0]); nouveau_vm_ref(NULL, &dev_priv->bar3_vm, NULL); if (drm_mm_initialized(&dev_priv->ramin_heap)) drm_mm_takedown(&dev_priv->ramin_heap); dev_priv->engine.instmem.priv = NULL; kfree(priv); }
static int armada_drm_load(struct drm_device *dev, unsigned long flags) { const struct platform_device_id *id; const struct armada_variant *variant; struct armada_private *priv; struct resource *res[ARRAY_SIZE(priv->dcrtc)]; struct resource *mem = NULL; int ret, n, i; memset(res, 0, sizeof(res)); for (n = i = 0; ; n++) { struct resource *r = platform_get_resource(dev->platformdev, IORESOURCE_MEM, n); if (!r) break; /* Resources above 64K are graphics memory */ if (resource_size(r) > SZ_64K) mem = r; else if (i < ARRAY_SIZE(priv->dcrtc)) res[i++] = r; else return -EINVAL; } if (!mem) return -ENXIO; if (!devm_request_mem_region(dev->dev, mem->start, resource_size(mem), "armada-drm")) return -EBUSY; priv = devm_kzalloc(dev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) { DRM_ERROR("failed to allocate private\n"); return -ENOMEM; } platform_set_drvdata(dev->platformdev, dev); dev->dev_private = priv; /* Get the implementation specific driver data. */ id = platform_get_device_id(dev->platformdev); if (!id) return -ENXIO; variant = (const struct armada_variant *)id->driver_data; INIT_WORK(&priv->fb_unref_work, armada_drm_unref_work); INIT_KFIFO(priv->fb_unref); /* Mode setting support */ drm_mode_config_init(dev); dev->mode_config.min_width = 320; dev->mode_config.min_height = 200; /* * With vscale enabled, the maximum width is 1920 due to the * 1920 by 3 lines RAM */ dev->mode_config.max_width = 1920; dev->mode_config.max_height = 2048; dev->mode_config.preferred_depth = 24; dev->mode_config.funcs = &armada_drm_mode_config_funcs; drm_mm_init(&priv->linear, mem->start, resource_size(mem)); /* Create all LCD controllers */ for (n = 0; n < ARRAY_SIZE(priv->dcrtc); n++) { int irq; if (!res[n]) break; irq = platform_get_irq(dev->platformdev, n); if (irq < 0) goto err_kms; ret = armada_drm_crtc_create(dev, dev->dev, res[n], irq, variant, NULL); if (ret) goto err_kms; } if (is_componentized(dev->dev)) { ret = component_bind_all(dev->dev, dev); if (ret) goto err_kms; } else { #ifdef CONFIG_DRM_ARMADA_TDA1998X ret = armada_drm_connector_slave_create(dev, &tda19988_config); if (ret) goto err_kms; #endif } ret = drm_vblank_init(dev, dev->mode_config.num_crtc); if (ret) goto err_comp; dev->irq_enabled = true; dev->vblank_disable_allowed = 1; ret = armada_fbdev_init(dev); if (ret) goto err_comp; drm_kms_helper_poll_init(dev); return 0; err_comp: if (is_componentized(dev->dev)) component_unbind_all(dev->dev, dev); err_kms: drm_mode_config_cleanup(dev); drm_mm_takedown(&priv->linear); flush_work(&priv->fb_unref_work); return ret; }
static int armada_drm_load(struct drm_device *dev, unsigned long flags) { struct armada_private *priv; struct resource *mem = NULL; int ret, n; for (n = 0; ; n++) { struct resource *r = platform_get_resource(dev->platformdev, IORESOURCE_MEM, n); if (!r) break; /* Resources above 64K are graphics memory */ if (resource_size(r) > SZ_64K) mem = r; else return -EINVAL; } if (!mem) return -ENXIO; if (!devm_request_mem_region(dev->dev, mem->start, resource_size(mem), "armada-drm")) return -EBUSY; priv = devm_kzalloc(dev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) { DRM_ERROR("failed to allocate private\n"); return -ENOMEM; } platform_set_drvdata(dev->platformdev, dev); dev->dev_private = priv; INIT_WORK(&priv->fb_unref_work, armada_drm_unref_work); INIT_KFIFO(priv->fb_unref); /* Mode setting support */ drm_mode_config_init(dev); dev->mode_config.min_width = 320; dev->mode_config.min_height = 200; /* * With vscale enabled, the maximum width is 1920 due to the * 1920 by 3 lines RAM */ dev->mode_config.max_width = 1920; dev->mode_config.max_height = 2048; dev->mode_config.preferred_depth = 24; dev->mode_config.funcs = &armada_drm_mode_config_funcs; drm_mm_init(&priv->linear, mem->start, resource_size(mem)); mutex_init(&priv->linear_lock); ret = component_bind_all(dev->dev, dev); if (ret) goto err_kms; ret = drm_vblank_init(dev, dev->mode_config.num_crtc); if (ret) goto err_comp; dev->irq_enabled = true; dev->vblank_disable_allowed = 1; ret = armada_fbdev_init(dev); if (ret) goto err_comp; drm_kms_helper_poll_init(dev); return 0; err_comp: component_unbind_all(dev->dev, dev); err_kms: drm_mode_config_cleanup(dev); drm_mm_takedown(&priv->linear); flush_work(&priv->fb_unref_work); return ret; }