int evdi_driver_unload(struct drm_device *dev) { struct evdi_device *evdi = dev->dev_private; EVDI_CHECKPT(); drm_vblank_cleanup(dev); drm_kms_helper_poll_fini(dev); #if KERNEL_VERSION(4, 8, 0) <= LINUX_VERSION_CODE #elif KERNEL_VERSION(4, 7, 0) <= LINUX_VERSION_CODE drm_connector_unregister_all(dev); #else drm_connector_unplug_all(dev); #endif evdi_fbdev_unplug(dev); if (evdi->cursor) evdi_cursor_free(evdi->cursor); evdi_painter_cleanup(evdi); evdi_stats_cleanup(evdi); evdi_fbdev_cleanup(dev); evdi_modeset_cleanup(dev); kfree(evdi); return 0; }
/** * amdgpu_irq_fini - tear down driver interrupt info * * @adev: amdgpu device pointer * * Tears down the work irq handlers, vblank handlers, MSIs, etc. (all asics). */ void amdgpu_irq_fini(struct amdgpu_device *adev) { unsigned i; drm_vblank_cleanup(adev->ddev); if (adev->irq.installed) { drm_irq_uninstall(adev->ddev); adev->irq.installed = false; if (adev->irq.msi_enabled) pci_disable_msi(adev->pdev); flush_work(&adev->hotplug_work); } for (i = 0; i < AMDGPU_MAX_IRQ_SRC_ID; ++i) { struct amdgpu_irq_src *src = adev->irq.sources[i]; if (!src) continue; kfree(src->enabled_types); src->enabled_types = NULL; if (src->data) { kfree(src->data); kfree(src); adev->irq.sources[i] = NULL; } } }
static int dev_unload(struct drm_device *dev) { struct omap_drm_private *priv = dev->dev_private; int i; DBG("unload: dev=%p", dev); drm_kms_helper_poll_fini(dev); if (priv->fbdev) omap_fbdev_free(dev); /* flush crtcs so the fbs get released */ for (i = 0; i < priv->num_crtcs; i++) omap_crtc_flush(priv->crtcs[i]); omap_modeset_free(dev); omap_gem_deinit(dev); destroy_workqueue(priv->wq); drm_vblank_cleanup(dev); omap_drm_irq_uninstall(dev); kfree(dev->dev_private); dev->dev_private = NULL; dev_set_drvdata(dev->dev, NULL); return 0; }
static void malidp_unbind(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); struct malidp_drm *malidp = drm->dev_private; struct malidp_hw_device *hwdev = malidp->dev; if (malidp->fbdev) { drm_fbdev_cma_fini(malidp->fbdev); malidp->fbdev = NULL; } drm_kms_helper_poll_fini(drm); malidp_se_irq_fini(drm); malidp_de_irq_fini(drm); drm_vblank_cleanup(drm); component_unbind_all(dev, drm); of_node_put(malidp->crtc.port); malidp->crtc.port = NULL; drm_dev_unregister(drm); malidp_de_planes_destroy(drm); drm_mode_config_cleanup(drm); drm->dev_private = NULL; dev_set_drvdata(dev, NULL); clk_disable_unprepare(hwdev->mclk); clk_disable_unprepare(hwdev->aclk); clk_disable_unprepare(hwdev->pclk); drm_dev_unref(drm); of_reserved_mem_device_release(dev); }
int vmw_kms_sou_close_display(struct vmw_private *dev_priv) { struct drm_device *dev = dev_priv->dev; drm_vblank_cleanup(dev); return 0; }
/** * radeon_irq_kms_fini - tear down driver interrrupt info * * @rdev: radeon device pointer * * Tears down the work irq handlers, vblank handlers, MSIs, etc. (all asics). */ void radeon_irq_kms_fini(struct radeon_device *rdev) { drm_vblank_cleanup(rdev->ddev); if (rdev->irq.installed) { drm_irq_uninstall(rdev->ddev); rdev->irq.installed = false; } taskqueue_drain(rdev->tq, &rdev->hotplug_work); }
static void sun4i_drv_unbind(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); drm_dev_unregister(drm); drm_kms_helper_poll_fini(drm); sun4i_framebuffer_free(drm); drm_vblank_cleanup(drm); drm_dev_unref(drm); }
static int fsl_dcu_unload(struct drm_device *dev) { drm_mode_config_cleanup(dev); drm_vblank_cleanup(dev); drm_irq_uninstall(dev); dev->dev_private = NULL; return 0; }
void radeon_irq_kms_fini(struct radeon_device *rdev) { drm_vblank_cleanup(rdev->ddev); if (rdev->irq.installed) { drm_irq_uninstall(rdev->ddev); rdev->irq.installed = false; if (rdev->msi_enabled) pci_disable_msi(rdev->pdev); } }
static int shmob_drm_unload(struct drm_device *dev) { drm_kms_helper_poll_fini(dev); drm_mode_config_cleanup(dev); drm_vblank_cleanup(dev); drm_irq_uninstall(dev); dev->dev_private = NULL; return 0; }
static void sun4i_drv_unbind(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); drm_dev_unregister(drm); drm_kms_helper_poll_fini(drm); sun4i_framebuffer_free(drm); drm_mode_config_cleanup(drm); drm_vblank_cleanup(drm); of_reserved_mem_device_release(dev); drm_dev_unref(drm); }
int vmw_kms_close_screen_object_display(struct vmw_private *dev_priv) { struct drm_device *dev = dev_priv->dev; if (!dev_priv->sou_priv) return -ENOSYS; drm_vblank_cleanup(dev); kfree(dev_priv->sou_priv); return 0; }
int udl_driver_unload(struct drm_device *dev) { struct udl_device *udl = dev->dev_private; drm_vblank_cleanup(dev); if (udl->urbs.count) udl_free_urb_list(dev); udl_fbdev_cleanup(dev); udl_modeset_cleanup(dev); kfree(udl); return 0; }
static int arcpgu_unload(struct drm_device *drm) { struct arcpgu_drm_private *arcpgu = drm->dev_private; if (arcpgu->fbdev) { drm_fbdev_cma_fini(arcpgu->fbdev); arcpgu->fbdev = NULL; } drm_kms_helper_poll_fini(drm); drm_vblank_cleanup(drm); drm_mode_config_cleanup(drm); return 0; }
int vmw_kms_sou_init_display(struct vmw_private *dev_priv) { struct drm_device *dev = dev_priv->dev; int i, ret; if (dev_priv->sou_priv) { DRM_INFO("sou system already on\n"); return -EINVAL; } if (!(dev_priv->capabilities & SVGA_CAP_SCREEN_OBJECT_2)) { DRM_INFO("Not using screen objects," " missing cap SCREEN_OBJECT_2\n"); return -ENOSYS; } ret = -ENOMEM; dev_priv->sou_priv = kmalloc(sizeof(*dev_priv->sou_priv), GFP_KERNEL); if (unlikely(!dev_priv->sou_priv)) goto err_no_mem; dev_priv->sou_priv->num_implicit = 0; dev_priv->sou_priv->implicit_fb = NULL; ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS); if (unlikely(ret != 0)) goto err_free; ret = drm_mode_create_dirty_info_property(dev); if (unlikely(ret != 0)) goto err_vblank_cleanup; for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i) vmw_sou_init(dev_priv, i); dev_priv->active_display_unit = vmw_du_screen_object; DRM_INFO("Screen Objects Display Unit initialized\n"); return 0; err_vblank_cleanup: drm_vblank_cleanup(dev); err_free: kfree(dev_priv->sou_priv); dev_priv->sou_priv = NULL; err_no_mem: return ret; }
static int fsl_dcu_load(struct drm_device *dev, unsigned long flags) { struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; int ret; ret = fsl_dcu_drm_modeset_init(fsl_dev); if (ret < 0) { dev_err(dev->dev, "failed to initialize mode setting\n"); return ret; } ret = drm_vblank_init(dev, dev->mode_config.num_crtc); if (ret < 0) { dev_err(dev->dev, "failed to initialize vblank\n"); goto done; } ret = fsl_dcu_drm_irq_init(dev); if (ret < 0) goto done; dev->irq_enabled = true; if (legacyfb_depth != 16 && legacyfb_depth != 24 && legacyfb_depth != 32) { dev_warn(dev->dev, "Invalid legacyfb_depth. Defaulting to 24bpp\n"); legacyfb_depth = 24; } fsl_dev->fbdev = drm_fbdev_cma_init(dev, legacyfb_depth, 1); if (IS_ERR(fsl_dev->fbdev)) { ret = PTR_ERR(fsl_dev->fbdev); fsl_dev->fbdev = NULL; goto done; } return 0; done: drm_kms_helper_poll_fini(dev); if (fsl_dev->fbdev) drm_fbdev_cma_fini(fsl_dev->fbdev); drm_mode_config_cleanup(dev); drm_vblank_cleanup(dev); drm_irq_uninstall(dev); dev->dev_private = NULL; return ret; }
/** * radeon_irq_kms_fini - tear down driver interrrupt info * * @rdev: radeon device pointer * * Tears down the work irq handlers, vblank handlers, MSIs, etc. (all asics). */ void radeon_irq_kms_fini(struct radeon_device *rdev) { drm_vblank_cleanup(rdev->ddev); if (rdev->irq.installed) { drm_irq_uninstall(rdev->ddev); rdev->irq.installed = false; #ifdef notyet if (rdev->msi_enabled) pci_disable_msi(rdev->pdev); #endif } #ifdef notyet flush_work(&rdev->hotplug_work); #endif }
static int rcar_du_unload(struct drm_device *dev) { struct rcar_du_device *rcdu = dev->dev_private; if (rcdu->fbdev) drm_fbdev_cma_fini(rcdu->fbdev); drm_kms_helper_poll_fini(dev); drm_mode_config_cleanup(dev); drm_vblank_cleanup(dev); dev->irq_enabled = 0; dev->dev_private = NULL; return 0; }
static void fsl_dcu_unload(struct drm_device *dev) { struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; drm_crtc_force_disable_all(dev); drm_kms_helper_poll_fini(dev); if (fsl_dev->fbdev) drm_fbdev_cma_fini(fsl_dev->fbdev); drm_mode_config_cleanup(dev); drm_vblank_cleanup(dev); drm_irq_uninstall(dev); dev->dev_private = NULL; }
static int xylon_drm_unload(struct drm_device *dev) { struct xylon_drm_device *xdev = dev->dev_private; xylon_drm_irq_uninstall(dev); drm_vblank_cleanup(dev); drm_kms_helper_poll_fini(dev); xylon_drm_fbdev_fini(xdev->fbdev); drm_mode_config_cleanup(dev); return 0; }
static int dev_unload(struct drm_device *dev) { DBG("unload: dev=%p", dev); drm_vblank_cleanup(dev); drm_kms_helper_poll_fini(dev); omap_fbdev_free(dev); omap_modeset_free(dev); kfree(dev->dev_private); dev->dev_private = NULL; return 0; }
int qxl_driver_unload(struct drm_device *dev) { struct qxl_device *qdev = dev->dev_private; if (qdev == NULL) return 0; drm_vblank_cleanup(dev); qxl_modeset_fini(qdev); qxl_device_fini(qdev); kfree(qdev); dev->dev_private = NULL; return 0; }
static int fsl_dcu_unload(struct drm_device *dev) { struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; drm_kms_helper_poll_fini(dev); if (fsl_dev->fbdev) drm_fbdev_cma_fini(fsl_dev->fbdev); drm_mode_config_cleanup(dev); drm_vblank_cleanup(dev); drm_irq_uninstall(dev); dev->dev_private = NULL; return 0; }
static int msm_unload(struct drm_device *dev) { struct msm_drm_private *priv = dev->dev_private; struct msm_kms *kms = priv->kms; struct msm_gpu *gpu = priv->gpu; drm_kms_helper_poll_fini(dev); drm_mode_config_cleanup(dev); drm_vblank_cleanup(dev); pm_runtime_get_sync(dev->dev); drm_irq_uninstall(dev); pm_runtime_put_sync(dev->dev); flush_workqueue(priv->wq); destroy_workqueue(priv->wq); if (kms) { pm_runtime_disable(dev->dev); kms->funcs->destroy(kms); } if (gpu) { mutex_lock(&dev->struct_mutex); gpu->funcs->pm_suspend(gpu); gpu->funcs->destroy(gpu); mutex_unlock(&dev->struct_mutex); } if (priv->vram.paddr) { DEFINE_DMA_ATTRS(attrs); dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs); drm_mm_takedown(&priv->vram.mm); dma_free_attrs(dev->dev, priv->vram.size, NULL, priv->vram.paddr, &attrs); } component_unbind_all(dev->dev, dev); dev->dev_private = NULL; kfree(priv); return 0; }
static int rcar_du_remove(struct platform_device *pdev) { struct rcar_du_device *rcdu = platform_get_drvdata(pdev); struct drm_device *ddev = rcdu->ddev; drm_dev_unregister(ddev); if (rcdu->fbdev) drm_fbdev_cma_fini(rcdu->fbdev); drm_kms_helper_poll_fini(ddev); drm_mode_config_cleanup(ddev); drm_vblank_cleanup(ddev); drm_dev_unref(ddev); return 0; }
static void nouveau_card_takedown(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_engine *engine = &dev_priv->engine; if (!engine->graph.accel_blocked) { nouveau_fence_fini(dev); nouveau_channel_put_unlocked(&dev_priv->channel); } if (!nouveau_noaccel) { engine->fifo.takedown(dev); engine->crypt.takedown(dev); engine->graph.takedown(dev); } engine->fb.takedown(dev); engine->timer.takedown(dev); engine->gpio.takedown(dev); engine->mc.takedown(dev); engine->display.late_takedown(dev); if (dev_priv->vga_ram) { nouveau_bo_unpin(dev_priv->vga_ram); nouveau_bo_ref(NULL, &dev_priv->vga_ram); } mutex_lock(&dev->struct_mutex); ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM); ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT); mutex_unlock(&dev->struct_mutex); nouveau_mem_gart_fini(dev); engine->instmem.takedown(dev); nouveau_gpuobj_takedown(dev); nouveau_mem_vram_fini(dev); nouveau_irq_fini(dev); drm_vblank_cleanup(dev); nouveau_pm_fini(dev); nouveau_bios_takedown(dev); vga_client_register(dev->pdev, NULL, NULL, NULL); }
static void mxsfb_unload(struct drm_device *drm) { struct mxsfb_drm_private *mxsfb = drm->dev_private; if (mxsfb->fbdev) drm_fbdev_cma_fini(mxsfb->fbdev); drm_kms_helper_poll_fini(drm); drm_mode_config_cleanup(drm); drm_vblank_cleanup(drm); pm_runtime_get_sync(drm->dev); drm_irq_uninstall(drm); pm_runtime_put_sync(drm->dev); drm->dev_private = NULL; pm_runtime_disable(drm->dev); }
static int kirin_drm_kms_cleanup(struct drm_device *dev) { struct kirin_drm_private *priv = dev->dev_private; #ifdef CONFIG_DRM_FBDEV_EMULATION if (priv->fbdev) { drm_fbdev_cma_fini(priv->fbdev); priv->fbdev = NULL; } #endif drm_kms_helper_poll_fini(dev); drm_vblank_cleanup(dev); dc_ops->cleanup(to_platform_device(dev->dev)); drm_mode_config_cleanup(dev); devm_kfree(dev->dev, priv); dev->dev_private = NULL; return 0; }
static int shmob_drm_unload(struct drm_device *dev) { struct shmob_drm_device *sdev = dev->dev_private; drm_kms_helper_poll_fini(dev); drm_mode_config_cleanup(dev); drm_vblank_cleanup(dev); drm_irq_uninstall(dev); if (sdev->clock) clk_put(sdev->clock); if (sdev->mmio) iounmap(sdev->mmio); dev->dev_private = NULL; kfree(sdev); return 0; }
static int dev_unload(struct drm_device *dev) { struct omap_drm_private *priv = dev->dev_private; DBG("unload: dev=%p", dev); drm_vblank_cleanup(dev); drm_kms_helper_poll_fini(dev); omap_fbdev_free(dev); omap_modeset_free(dev); omap_gem_deinit(dev); flush_workqueue(priv->wq); destroy_workqueue(priv->wq); kfree(dev->dev_private); dev->dev_private = NULL; return 0; }