static void malidp_unbind(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); struct malidp_drm *malidp = drm->dev_private; struct malidp_hw_device *hwdev = malidp->dev; if (malidp->fbdev) { drm_fbdev_cma_fini(malidp->fbdev); malidp->fbdev = NULL; } drm_kms_helper_poll_fini(drm); malidp_se_irq_fini(drm); malidp_de_irq_fini(drm); drm_vblank_cleanup(drm); component_unbind_all(dev, drm); of_node_put(malidp->crtc.port); malidp->crtc.port = NULL; drm_dev_unregister(drm); malidp_de_planes_destroy(drm); drm_mode_config_cleanup(drm); drm->dev_private = NULL; dev_set_drvdata(dev, NULL); clk_disable_unprepare(hwdev->mclk); clk_disable_unprepare(hwdev->aclk); clk_disable_unprepare(hwdev->pclk); drm_dev_unref(drm); of_reserved_mem_device_release(dev); }
void sun4i_framebuffer_free(struct drm_device *drm) { struct sun4i_drv *drv = drm->dev_private; drm_fbdev_cma_fini(drv->fbdev); drm_mode_config_cleanup(drm); }
int fsl_dcu_drm_modeset_init(struct fsl_dcu_drm_device *fsl_dev) { int ret; drm_mode_config_init(fsl_dev->drm); fsl_dev->drm->mode_config.min_width = 0; fsl_dev->drm->mode_config.min_height = 0; fsl_dev->drm->mode_config.max_width = 2031; fsl_dev->drm->mode_config.max_height = 2047; fsl_dev->drm->mode_config.funcs = &fsl_dcu_drm_mode_config_funcs; ret = fsl_dcu_drm_crtc_create(fsl_dev); if (ret) goto err; ret = fsl_dcu_drm_encoder_create(fsl_dev, &fsl_dev->crtc); if (ret) goto err; ret = fsl_dcu_create_outputs(fsl_dev); if (ret) goto err; drm_mode_config_reset(fsl_dev->drm); drm_kms_helper_poll_init(fsl_dev->drm); return 0; err: drm_mode_config_cleanup(fsl_dev->drm); return ret; }
static int pdev_remove(struct platform_device *pdev) { struct drm_device *ddev = platform_get_drvdata(pdev); struct omap_drm_private *priv = ddev->dev_private; DBG(""); drm_dev_unregister(ddev); omap_modeset_disable_external_hpd(); drm_kms_helper_poll_fini(ddev); if (priv->fbdev) omap_fbdev_free(ddev); drm_atomic_helper_shutdown(ddev); drm_mode_config_cleanup(ddev); omap_drm_irq_uninstall(ddev); omap_gem_deinit(ddev); drm_dev_unref(ddev); destroy_workqueue(priv->wq); kfree(priv); omap_disconnect_dssdevs(); omap_crtc_pre_uninit(); return 0; }
static void tinydrm_fini(struct tinydrm_device *tdev) { drm_mode_config_cleanup(tdev->drm); mutex_destroy(&tdev->dirty_lock); tdev->drm->dev_private = NULL; drm_dev_unref(tdev->drm); }
void bochs_kms_fini(struct bochs_device *bochs) { if (bochs->mode_config_initialized) { drm_mode_config_cleanup(bochs->dev); bochs->mode_config_initialized = false; } }
static int kirin_drm_kms_init(struct drm_device *dev) { struct kirin_drm_private *priv; int ret; priv = devm_kzalloc(dev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; dev->dev_private = priv; dev_set_drvdata(dev->dev, dev); /* dev->mode_config initialization */ drm_mode_config_init(dev); kirin_drm_mode_config_init(dev); /* display controller init */ ret = dc_ops->init(to_platform_device(dev->dev)); if (ret) goto err_mode_config_cleanup; /* bind and init sub drivers */ ret = component_bind_all(dev->dev, dev); if (ret) { DRM_ERROR("failed to bind all component.\n"); goto err_dc_cleanup; } /* vblank init */ ret = drm_vblank_init(dev, dev->mode_config.num_crtc); if (ret) { DRM_ERROR("failed to initialize vblank.\n"); goto err_unbind_all; } /* with irq_enabled = true, we can use the vblank feature. */ dev->irq_enabled = true; /* reset all the states of crtc/plane/encoder/connector */ drm_mode_config_reset(dev); /* init kms poll for handling hpd */ drm_kms_helper_poll_init(dev); /* force detection after connectors init */ (void)drm_helper_hpd_irq_event(dev); return 0; err_unbind_all: component_unbind_all(dev->dev, dev); err_dc_cleanup: dc_ops->cleanup(to_platform_device(dev->dev)); err_mode_config_cleanup: drm_mode_config_cleanup(dev); devm_kfree(dev->dev, priv); dev->dev_private = NULL; return ret; }
int mgag200_driver_load(struct drm_device *dev, unsigned long flags) { struct mga_device *mdev; int r; mdev = devm_kzalloc(dev->dev, sizeof(struct mga_device), GFP_KERNEL); if (mdev == NULL) return -ENOMEM; dev->dev_private = (void *)mdev; mdev->dev = dev; r = mgag200_device_init(dev, flags); if (r) { dev_err(&dev->pdev->dev, "Fatal error during GPU init: %d\n", r); return r; } r = mgag200_mm_init(mdev); if (r) goto err_mm; drm_mode_config_init(dev); dev->mode_config.funcs = (void *)&mga_mode_funcs; if (IS_G200_SE(mdev) && mdev->mc.vram_size < (2048*1024)) dev->mode_config.preferred_depth = 16; else dev->mode_config.preferred_depth = 24; dev->mode_config.prefer_shadow = 1; r = mgag200_modeset_init(mdev); if (r) { dev_err(&dev->pdev->dev, "Fatal error during modeset init: %d\n", r); goto err_modeset; } /* Make small buffers to store a hardware cursor (double buffered icon updates) */ mgag200_bo_create(dev, roundup(48*64, PAGE_SIZE), 0, 0, &mdev->cursor.pixels_1); mgag200_bo_create(dev, roundup(48*64, PAGE_SIZE), 0, 0, &mdev->cursor.pixels_2); if (!mdev->cursor.pixels_2 || !mdev->cursor.pixels_1) { mdev->cursor.pixels_1 = NULL; mdev->cursor.pixels_2 = NULL; dev_warn(&dev->pdev->dev, "Could not allocate space for cursors. Not doing hardware cursors.\n"); } else { mdev->cursor.pixels_current = mdev->cursor.pixels_1; mdev->cursor.pixels_prev = mdev->cursor.pixels_2; } return 0; err_modeset: drm_mode_config_cleanup(dev); mgag200_mm_fini(mdev); err_mm: dev->dev_private = NULL; return r; }
static void drv_unload(struct drm_device *ddev) { DRM_DEBUG("%s\n", __func__); drm_kms_helper_poll_fini(ddev); ltdc_unload(ddev); drm_mode_config_cleanup(ddev); }
static int pl111_modeset_init(struct drm_device *dev) { struct drm_mode_config *mode_config; struct pl111_drm_dev_private *priv = dev->dev_private; int ret = 0; drm_mode_config_init(dev); mode_config = &dev->mode_config; mode_config->funcs = &mode_config_funcs; mode_config->min_width = 1; mode_config->max_width = 1024; mode_config->min_height = 1; mode_config->max_height = 768; ret = pl111_connector_init(dev); if (ret) { dev_err(dev->dev, "Failed to create pl111_drm_connector\n"); goto out_config; } /* Don't actually attach if we didn't find a drm_panel * attached to us. This will allow a kernel to include both * the fbdev pl111 driver and this one, and choose between * them based on which subsystem has support for the panel. */ if (!priv->connector.panel) { dev_info(dev->dev, "Disabling due to lack of DRM panel device.\n"); ret = -ENODEV; goto out_config; } ret = pl111_display_init(dev); if (ret != 0) { dev_err(dev->dev, "Failed to init display\n"); goto out_config; } ret = drm_vblank_init(dev, 1); if (ret != 0) { dev_err(dev->dev, "Failed to init vblank\n"); goto out_config; } drm_mode_config_reset(dev); priv->fbdev = drm_fbdev_cma_init(dev, 32, dev->mode_config.num_connector); drm_kms_helper_poll_init(dev); goto finish; out_config: drm_mode_config_cleanup(dev); finish: return ret; }
void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev) { int i; for (i = 0 ; i < vgdev->num_scanouts; ++i) kfree(vgdev->outputs[i].edid); drm_atomic_helper_shutdown(vgdev->ddev); drm_mode_config_cleanup(vgdev->ddev); }
void cirrus_modeset_fini(struct cirrus_device *cdev) { cirrus_fbdev_fini(cdev); if (cdev->mode_info.mode_config_initialized) { drm_mode_config_cleanup(cdev->dev); cdev->mode_info.mode_config_initialized = false; } }
static int fsl_dcu_unload(struct drm_device *dev) { drm_mode_config_cleanup(dev); drm_vblank_cleanup(dev); drm_irq_uninstall(dev); dev->dev_private = NULL; return 0; }
void qxl_modeset_fini(struct qxl_device *qdev) { qxl_fbdev_fini(qdev); qxl_destroy_monitors_object(qdev); if (qdev->mode_info.mode_config_initialized) { drm_mode_config_cleanup(qdev->ddev); qdev->mode_info.mode_config_initialized = false; } }
static int shmob_drm_unload(struct drm_device *dev) { drm_kms_helper_poll_fini(dev); drm_mode_config_cleanup(dev); drm_vblank_cleanup(dev); drm_irq_uninstall(dev); dev->dev_private = NULL; return 0; }
static void sun4i_drv_unbind(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); drm_dev_unregister(drm); drm_kms_helper_poll_fini(drm); sun4i_framebuffer_free(drm); drm_mode_config_cleanup(drm); of_reserved_mem_device_release(dev); drm_dev_put(drm); }
static int dce_virtual_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; kfree(adev->mode_info.bios_hardcoded_edid); drm_kms_helper_poll_fini(adev->ddev); drm_mode_config_cleanup(adev->ddev); adev->mode_info.mode_config_initialized = false; return 0; }
void komeda_kms_detach(struct komeda_kms_dev *kms) { struct drm_device *drm = &kms->base; struct komeda_dev *mdev = drm->dev_private; drm_dev_unregister(drm); component_unbind_all(mdev->dev, drm); komeda_kms_cleanup_private_objs(mdev); drm_mode_config_cleanup(drm); drm->dev_private = NULL; drm_dev_put(drm); }
struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev) { struct komeda_kms_dev *kms = kzalloc(sizeof(*kms), GFP_KERNEL); struct drm_device *drm; int err; if (!kms) return ERR_PTR(-ENOMEM); drm = &kms->base; err = drm_dev_init(drm, &komeda_kms_driver, mdev->dev); if (err) goto free_kms; drm->dev_private = mdev; komeda_kms_mode_config_init(kms, mdev); err = komeda_kms_add_private_objs(kms, mdev); if (err) goto cleanup_mode_config; err = komeda_kms_add_planes(kms, mdev); if (err) goto cleanup_mode_config; err = drm_vblank_init(drm, kms->n_crtcs); if (err) goto cleanup_mode_config; err = komeda_kms_add_crtcs(kms, mdev); if (err) goto cleanup_mode_config; err = component_bind_all(mdev->dev, kms); if (err) goto cleanup_mode_config; drm_mode_config_reset(drm); err = drm_dev_register(drm, 0); if (err) goto cleanup_mode_config; return kms; cleanup_mode_config: drm_mode_config_cleanup(drm); free_kms: kfree(kms); return ERR_PTR(err); }
static int pl111_amba_remove(struct amba_device *amba_dev) { struct drm_device *drm = amba_get_drvdata(amba_dev); struct pl111_drm_dev_private *priv = drm->dev_private; drm_dev_unregister(drm); if (priv->fbdev) drm_fbdev_cma_fini(priv->fbdev); drm_mode_config_cleanup(drm); drm_dev_unref(drm); return 0; }
int mgag200_driver_unload(struct drm_device *dev) { struct mga_device *mdev = dev->dev_private; if (mdev == NULL) return 0; mgag200_modeset_fini(mdev); mgag200_fbdev_fini(mdev); drm_mode_config_cleanup(dev); mgag200_mm_fini(mdev); dev->dev_private = NULL; return 0; }
static void vc4_drm_unbind(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct drm_device *drm = platform_get_drvdata(pdev); drm_dev_unregister(drm); drm_fb_cma_fbdev_fini(drm); drm_mode_config_cleanup(drm); drm_dev_unref(drm); }
static int arcpgu_unload(struct drm_device *drm) { struct arcpgu_drm_private *arcpgu = drm->dev_private; if (arcpgu->fbdev) { drm_fbdev_cma_fini(arcpgu->fbdev); arcpgu->fbdev = NULL; } drm_kms_helper_poll_fini(drm); drm_vblank_cleanup(drm); drm_mode_config_cleanup(drm); return 0; }
static int rcar_du_remove(struct platform_device *pdev) { struct rcar_du_device *rcdu = platform_get_drvdata(pdev); struct drm_device *ddev = rcdu->ddev; drm_dev_unregister(ddev); drm_kms_helper_poll_fini(ddev); drm_mode_config_cleanup(ddev); drm_dev_put(ddev); return 0; }
static void sun4i_drv_unbind(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); drm_dev_unregister(drm); drm_kms_helper_poll_fini(drm); drm_atomic_helper_shutdown(drm); drm_mode_config_cleanup(drm); component_unbind_all(dev, NULL); of_reserved_mem_device_release(dev); drm_dev_put(drm); }
static int armada_drm_unload(struct drm_device *dev) { struct armada_private *priv = dev->dev_private; drm_kms_helper_poll_fini(dev); armada_fbdev_fini(dev); drm_irq_uninstall(dev); drm_mode_config_cleanup(dev); drm_mm_takedown(&priv->linear); flush_work(&priv->fb_unref_work); dev->dev_private = NULL; return 0; }
int ast_driver_unload(struct drm_device *dev) { struct ast_private *ast = dev->dev_private; ast_mode_fini(dev); ast_fbdev_fini(dev); drm_mode_config_cleanup(dev); ast_mm_fini(ast); pci_iounmap(dev->pdev, ast->ioregs); pci_iounmap(dev->pdev, ast->regs); kfree(ast); return 0; }
static int fsl_dcu_load(struct drm_device *dev, unsigned long flags) { struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; int ret; ret = fsl_dcu_drm_modeset_init(fsl_dev); if (ret < 0) { dev_err(dev->dev, "failed to initialize mode setting\n"); return ret; } ret = drm_vblank_init(dev, dev->mode_config.num_crtc); if (ret < 0) { dev_err(dev->dev, "failed to initialize vblank\n"); goto done; } ret = fsl_dcu_drm_irq_init(dev); if (ret < 0) goto done; dev->irq_enabled = true; if (legacyfb_depth != 16 && legacyfb_depth != 24 && legacyfb_depth != 32) { dev_warn(dev->dev, "Invalid legacyfb_depth. Defaulting to 24bpp\n"); legacyfb_depth = 24; } fsl_dev->fbdev = drm_fbdev_cma_init(dev, legacyfb_depth, 1); if (IS_ERR(fsl_dev->fbdev)) { ret = PTR_ERR(fsl_dev->fbdev); fsl_dev->fbdev = NULL; goto done; } return 0; done: drm_kms_helper_poll_fini(dev); if (fsl_dev->fbdev) drm_fbdev_cma_fini(fsl_dev->fbdev); drm_mode_config_cleanup(dev); drm_vblank_cleanup(dev); drm_irq_uninstall(dev); dev->dev_private = NULL; return ret; }
static void imx_drm_unbind(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); drm_dev_unregister(drm); drm_kms_helper_poll_fini(drm); drm_mode_config_cleanup(drm); component_unbind_all(drm->dev, drm); dev_set_drvdata(dev, NULL); drm_dev_put(drm); }
static void fsl_dcu_unload(struct drm_device *dev) { struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; drm_crtc_force_disable_all(dev); drm_kms_helper_poll_fini(dev); if (fsl_dev->fbdev) drm_fbdev_cma_fini(fsl_dev->fbdev); drm_mode_config_cleanup(dev); drm_vblank_cleanup(dev); drm_irq_uninstall(dev); dev->dev_private = NULL; }