/* * only 2 interrupts may occur: screen plug/unplug and EDID read */ static irqreturn_t tda998x_irq_thread(int irq, void *data) { struct tda998x_priv *priv = data; u8 sta, cec, lvl, flag0, flag1, flag2; if (!priv) return IRQ_HANDLED; sta = cec_read(priv, REG_CEC_INTSTATUS); cec = cec_read(priv, REG_CEC_RXSHPDINT); lvl = cec_read(priv, REG_CEC_RXSHPDLEV); flag0 = reg_read(priv, REG_INT_FLAGS_0); flag1 = reg_read(priv, REG_INT_FLAGS_1); flag2 = reg_read(priv, REG_INT_FLAGS_2); DRM_DEBUG_DRIVER( "tda irq sta %02x cec %02x lvl %02x f0 %02x f1 %02x f2 %02x\n", sta, cec, lvl, flag0, flag1, flag2); if ((flag2 & INT_FLAGS_2_EDID_BLK_RD) && priv->wq_edid_wait) { priv->wq_edid_wait = 0; wake_up(&priv->wq_edid); } else if (cec != 0) { /* HPD change */ if (priv->encoder && priv->encoder->dev) drm_helper_hpd_irq_event(priv->encoder->dev); } return IRQ_HANDLED; }
static irqreturn_t imx_hdmi_irq(int irq, void *dev_id) { struct imx_hdmi *hdmi = dev_id; u8 intr_stat; u8 phy_int_pol; intr_stat = hdmi_readb(hdmi, HDMI_IH_PHY_STAT0); phy_int_pol = hdmi_readb(hdmi, HDMI_PHY_POL0); if (intr_stat & HDMI_IH_PHY_STAT0_HPD) { if (phy_int_pol & HDMI_PHY_HPD) { dev_dbg(hdmi->dev, "EVENT=plugin\n"); hdmi_modb(hdmi, 0, HDMI_PHY_HPD, HDMI_PHY_POL0); imx_hdmi_poweron(hdmi); } else { dev_dbg(hdmi->dev, "EVENT=plugout\n"); hdmi_modb(hdmi, HDMI_PHY_HPD, HDMI_PHY_HPD, HDMI_PHY_POL0); imx_hdmi_poweroff(hdmi); } drm_helper_hpd_irq_event(hdmi->connector.dev); } hdmi_writeb(hdmi, intr_stat, HDMI_IH_PHY_STAT0); hdmi_writeb(hdmi, ~HDMI_IH_PHY_STAT0_HPD, HDMI_IH_MUTE_PHY_STAT0); return IRQ_HANDLED; }
ssize_t vbox_connector_write_sysfs(struct device *pDev, struct device_attribute *pAttr, const char *psz, size_t cch) { struct vbox_connector *pVBoxConnector; struct drm_device *pDrmDev; struct vbox_private *pVBox; int cX, cY; char ch; LogFunc(("vboxvideo: %d: pDev=%p, pAttr=%p, psz=%s, cch=%llu\n", __LINE__, pDev, pAttr, psz, (unsigned long long)cch)); pVBoxConnector = container_of(pAttr, struct vbox_connector, deviceAttribute); pDrmDev = pVBoxConnector->base.dev; pVBox = pDrmDev->dev_private; if (sscanf(psz, "%dx%d\n%c", &cX, &cY, &ch) != 2) return -EINVAL; if ( cX < 64 || cX > VBE_DISPI_MAX_XRES || cY < 64 || cY > VBE_DISPI_MAX_YRES) return -EINVAL; pVBoxConnector->modeHint.cX = (uint16_t)cX; pVBoxConnector->modeHint.cY = (uint16_t)cY; drm_helper_hpd_irq_event(pVBoxConnector->base.dev); if (pVBox->fbdev) drm_fb_helper_hotplug_event(&pVBox->fbdev->helper); LogFunc(("vboxvideo: %d\n", __LINE__)); return cch; }
void hdmi_connector_irq(struct drm_connector *connector) { struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector); struct hdmi *hdmi = hdmi_connector->hdmi; uint32_t hpd_int_status, hpd_int_ctrl; /* Process HPD: */ hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS); hpd_int_ctrl = hdmi_read(hdmi, REG_HDMI_HPD_INT_CTRL); if ((hpd_int_ctrl & HDMI_HPD_INT_CTRL_INT_EN) && (hpd_int_status & HDMI_HPD_INT_STATUS_INT)) { bool detected = !!(hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED); DBG("status=%04x, ctrl=%04x", hpd_int_status, hpd_int_ctrl); /* ack the irq: */ hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, hpd_int_ctrl | HDMI_HPD_INT_CTRL_INT_ACK); drm_helper_hpd_irq_event(connector->dev); /* detect disconnect if we are connected or visa versa: */ hpd_int_ctrl = HDMI_HPD_INT_CTRL_INT_EN; if (!detected) hpd_int_ctrl |= HDMI_HPD_INT_CTRL_INT_CONNECT; hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, hpd_int_ctrl); } }
static int kirin_drm_kms_init(struct drm_device *dev) { struct kirin_drm_private *priv; int ret; priv = devm_kzalloc(dev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; dev->dev_private = priv; dev_set_drvdata(dev->dev, dev); /* dev->mode_config initialization */ drm_mode_config_init(dev); kirin_drm_mode_config_init(dev); /* display controller init */ ret = dc_ops->init(to_platform_device(dev->dev)); if (ret) goto err_mode_config_cleanup; /* bind and init sub drivers */ ret = component_bind_all(dev->dev, dev); if (ret) { DRM_ERROR("failed to bind all component.\n"); goto err_dc_cleanup; } /* vblank init */ ret = drm_vblank_init(dev, dev->mode_config.num_crtc); if (ret) { DRM_ERROR("failed to initialize vblank.\n"); goto err_unbind_all; } /* with irq_enabled = true, we can use the vblank feature. */ dev->irq_enabled = true; /* reset all the states of crtc/plane/encoder/connector */ drm_mode_config_reset(dev); /* init kms poll for handling hpd */ drm_kms_helper_poll_init(dev); /* force detection after connectors init */ (void)drm_helper_hpd_irq_event(dev); return 0; err_unbind_all: component_unbind_all(dev->dev, dev); err_dc_cleanup: dc_ops->cleanup(to_platform_device(dev->dev)); err_mode_config_cleanup: drm_mode_config_cleanup(dev); devm_kfree(dev->dev, priv); dev->dev_private = NULL; return ret; }
static void hotplug_work(struct work_struct *work) { struct hdmi_connector *hdmi_connector = container_of(work, struct hdmi_connector, hpd_work); struct drm_connector *connector = &hdmi_connector->base; drm_helper_hpd_irq_event(connector->dev); }
static void cdv_hotplug_work_func(struct work_struct *work) { struct drm_psb_private *dev_priv = container_of(work, struct drm_psb_private, hotplug_work); struct drm_device *dev = dev_priv->dev; /* Just fire off a uevent and let userspace tell us what to do */ drm_helper_hpd_irq_event(dev); }
static irqreturn_t hpd_irq(int irq, void *data) { struct tegra_output *output = data; if (output->connector.dev) drm_helper_hpd_irq_event(output->connector.dev); return IRQ_HANDLED; }
void qxl_display_read_client_monitors_config(struct qxl_device *qdev) { while (qxl_display_copy_rom_client_monitors_config(qdev)) { qxl_io_log(qdev, "failed crc check for client_monitors_config," " retrying\n"); } drm_helper_hpd_irq_event(qdev->ddev); }
static void exynos_dp_hotplug(struct work_struct *work) { struct exynos_dp_device *dp; dp = container_of(work, struct exynos_dp_device, hotplug_work); if (dp->drm_dev) drm_helper_hpd_irq_event(dp->drm_dev); }
/** * radeon_hotplug_work_func - display hotplug work handler * * @work: work struct * * This is the hot plug event work handler (all asics). * The work gets scheduled from the irq handler if there * was a hot plug interrupt. It walks the connector table * and calls the hotplug handler for each one, then sends * a drm hotplug event to alert userspace. */ static void radeon_hotplug_work_func(void *arg, int pending) { struct radeon_device *rdev = arg; struct drm_device *dev = rdev->ddev; struct drm_mode_config *mode_config = &dev->mode_config; struct drm_connector *connector; if (mode_config->num_connector) { list_for_each_entry(connector, &mode_config->connector_list, head) radeon_connector_hotplug(connector); } /* Just fire off a uevent and let userspace tell us what to do */ drm_helper_hpd_irq_event(dev); }
void qxl_display_read_client_monitors_config(struct qxl_device *qdev) { while (qxl_display_copy_rom_client_monitors_config(qdev)) { qxl_io_log(qdev, "failed crc check for client_monitors_config," " retrying\n"); } if (!drm_helper_hpd_irq_event(qdev->ddev)) { /* notify that the monitor configuration changed, to adjust at the arbitrary resolution */ drm_kms_helper_hotplug_event(qdev->ddev); } }
static void radeon_hotplug_work_func(struct work_struct *work) { struct radeon_device *rdev = container_of(work, struct radeon_device, hotplug_work); struct drm_device *dev = rdev->ddev; struct drm_mode_config *mode_config = &dev->mode_config; struct drm_connector *connector; if (mode_config->num_connector) { list_for_each_entry(connector, &mode_config->connector_list, head) radeon_connector_hotplug(connector); } drm_helper_hpd_irq_event(dev); }
/** * amdgpu_hotplug_work_func - display hotplug work handler * * @work: work struct * * This is the hot plug event work handler (all asics). * The work gets scheduled from the irq handler if there * was a hot plug interrupt. It walks the connector table * and calls the hotplug handler for each one, then sends * a drm hotplug event to alert userspace. */ static void amdgpu_hotplug_work_func(struct work_struct *work) { struct amdgpu_device *adev = container_of(work, struct amdgpu_device, hotplug_work); struct drm_device *dev = adev->ddev; struct drm_mode_config *mode_config = &dev->mode_config; struct drm_connector *connector; if (mode_config->num_connector) { list_for_each_entry(connector, &mode_config->connector_list, head) amdgpu_connector_hotplug(connector); } /* Just fire off a uevent and let userspace tell us what to do */ drm_helper_hpd_irq_event(dev); }
/** * radeon_hotplug_work_func - display hotplug work handler * * @work: work struct * * This is the hot plug event work handler (all asics). * The work gets scheduled from the irq handler if there * was a hot plug interrupt. It walks the connector table * and calls the hotplug handler for each one, then sends * a drm hotplug event to alert userspace. */ static void radeon_hotplug_work_func(struct work_struct *work) { struct radeon_device *rdev = container_of(work, struct radeon_device, hotplug_work); struct drm_device *dev = rdev->ddev; struct drm_mode_config *mode_config = &dev->mode_config; struct drm_connector *connector; mutex_lock(&mode_config->mutex); if (mode_config->num_connector) { list_for_each_entry(connector, &mode_config->connector_list, head) radeon_connector_hotplug(connector); } mutex_unlock(&mode_config->mutex); /* Just fire off a uevent and let userspace tell us what to do */ drm_helper_hpd_irq_event(dev); }
static void intel_resume_hotplug(struct drm_device *dev) { struct drm_mode_config *mode_config = &dev->mode_config; struct intel_encoder *encoder; mutex_lock(&mode_config->mutex); DRM_DEBUG_KMS("running encoder hotplug functions\n"); list_for_each_entry(encoder, &mode_config->encoder_list, base.head) if (encoder->hot_plug) encoder->hot_plug(encoder); mutex_unlock(&mode_config->mutex); /* Just fire off a uevent and let userspace tell us what to do */ drm_helper_hpd_irq_event(dev); }
static void virtio_gpu_config_changed_work_func(struct work_struct *work) { struct virtio_gpu_device *vgdev = container_of(work, struct virtio_gpu_device, config_changed_work); u32 events_read, events_clear = 0; /* read the config space */ virtio_cread(vgdev->vdev, struct virtio_gpu_config, events_read, &events_read); if (events_read & VIRTIO_GPU_EVENT_DISPLAY) { virtio_gpu_cmd_get_display_info(vgdev); drm_helper_hpd_irq_event(vgdev->ddev); events_clear |= VIRTIO_GPU_EVENT_DISPLAY; } virtio_cwrite(vgdev->vdev, struct virtio_gpu_config, events_clear, &events_clear); }
void evdi_painter_disconnect(struct evdi_device *evdi, struct drm_file *file) { struct evdi_painter *painter = evdi->painter; EVDI_CHECKPT(); painter_lock(painter); if (file != painter->drm_filp) { EVDI_WARN ("(dev=%d) An unknown connection to %p tries to close us", evdi->dev_index, file); EVDI_WARN(" - ignoring\n"); painter_unlock(painter); return; } if (painter->new_scanout_fb) { drm_framebuffer_unreference(&painter->new_scanout_fb->base); painter->new_scanout_fb = NULL; } if (painter->scanout_fb) { drm_framebuffer_unreference(&painter->scanout_fb->base); painter->scanout_fb = NULL; } painter->is_connected = false; EVDI_DEBUG("(dev=%d) Disconnected from %p\n", evdi->dev_index, painter->drm_filp); painter->drm_filp = NULL; evdi->dev_index = -1; memset(&painter->current_mode, '\0', sizeof(struct drm_display_mode)); painter->was_update_requested = false; painter_unlock(painter); drm_helper_hpd_irq_event(evdi->ddev); }
/* Threaded interrupt handler to manage HPD events */ static irqreturn_t dw_hdmi_top_thread_irq(int irq, void *dev_id) { struct meson_dw_hdmi *dw_hdmi = dev_id; u32 stat = dw_hdmi->irq_stat; /* HPD Events */ if (stat & (HDMITX_TOP_INTR_HPD_RISE | HDMITX_TOP_INTR_HPD_FALL)) { bool hpd_connected = false; if (stat & HDMITX_TOP_INTR_HPD_RISE) hpd_connected = true; dw_hdmi_setup_rx_sense(dw_hdmi->hdmi, hpd_connected, hpd_connected); drm_helper_hpd_irq_event(dw_hdmi->encoder.dev); } return IRQ_HANDLED; }
static irqreturn_t analogix_dp_irq_thread(int irq, void *arg) { struct analogix_dp_device *dp = arg; enum dp_irq_type irq_type; irq_type = analogix_dp_get_irq_type(dp); if (irq_type & DP_IRQ_TYPE_HP_CABLE_IN || irq_type & DP_IRQ_TYPE_HP_CABLE_OUT) { dev_dbg(dp->dev, "Detected cable status changed!\n"); if (dp->drm_dev) drm_helper_hpd_irq_event(dp->drm_dev); } if (irq_type != DP_IRQ_TYPE_UNKNOWN) { analogix_dp_clear_hotplug_interrupts(dp); analogix_dp_unmute_hpd_interrupt(dp); } return IRQ_HANDLED; }
/** * radeon_hotplug_work_func - display hotplug work handler * * @work: work struct * * This is the hot plug event work handler (all asics). * The work gets scheduled from the irq handler if there * was a hot plug interrupt. It walks the connector table * and calls the hotplug handler for each one, then sends * a drm hotplug event to alert userspace. */ static void radeon_hotplug_work_func(struct work_struct *work) { struct radeon_device *rdev = container_of(work, struct radeon_device, hotplug_work); struct drm_device *dev = rdev->ddev; struct drm_mode_config *mode_config = &dev->mode_config; struct drm_connector *connector; /* we can race here at startup, some boards seem to trigger * hotplug irqs when they shouldn't. */ if (!rdev->mode_info.mode_config_initialized) return; mutex_lock(&mode_config->mutex); if (mode_config->num_connector) { list_for_each_entry(connector, &mode_config->connector_list, head) radeon_connector_hotplug(connector); } mutex_unlock(&mode_config->mutex); /* Just fire off a uevent and let userspace tell us what to do */ drm_helper_hpd_irq_event(dev); }
static int mxsfb_load(struct drm_device *drm, unsigned long flags) { struct platform_device *pdev = to_platform_device(drm->dev); struct mxsfb_drm_private *mxsfb; struct resource *res; int ret; mxsfb = devm_kzalloc(&pdev->dev, sizeof(*mxsfb), GFP_KERNEL); if (!mxsfb) return -ENOMEM; drm->dev_private = mxsfb; mxsfb->devdata = &mxsfb_devdata[pdev->id_entry->driver_data]; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); mxsfb->base = devm_ioremap_resource(drm->dev, res); if (IS_ERR(mxsfb->base)) return PTR_ERR(mxsfb->base); mxsfb->clk = devm_clk_get(drm->dev, NULL); if (IS_ERR(mxsfb->clk)) return PTR_ERR(mxsfb->clk); mxsfb->clk_axi = devm_clk_get(drm->dev, "axi"); if (IS_ERR(mxsfb->clk_axi)) mxsfb->clk_axi = NULL; mxsfb->clk_disp_axi = devm_clk_get(drm->dev, "disp_axi"); if (IS_ERR(mxsfb->clk_disp_axi)) mxsfb->clk_disp_axi = NULL; ret = dma_set_mask_and_coherent(drm->dev, DMA_BIT_MASK(32)); if (ret) return ret; pm_runtime_enable(drm->dev); ret = drm_vblank_init(drm, drm->mode_config.num_crtc); if (ret < 0) { dev_err(drm->dev, "Failed to initialise vblank\n"); goto err_vblank; } /* Modeset init */ drm_mode_config_init(drm); ret = mxsfb_create_output(drm); if (ret < 0) { dev_err(drm->dev, "Failed to create outputs\n"); goto err_vblank; } ret = drm_simple_display_pipe_init(drm, &mxsfb->pipe, &mxsfb_funcs, mxsfb_formats, ARRAY_SIZE(mxsfb_formats), &mxsfb->connector); if (ret < 0) { dev_err(drm->dev, "Cannot setup simple display pipe\n"); goto err_vblank; } ret = drm_panel_attach(mxsfb->panel, &mxsfb->connector); if (ret) { dev_err(drm->dev, "Cannot connect panel\n"); goto err_vblank; } drm->mode_config.min_width = MXSFB_MIN_XRES; drm->mode_config.min_height = MXSFB_MIN_YRES; drm->mode_config.max_width = MXSFB_MAX_XRES; drm->mode_config.max_height = MXSFB_MAX_YRES; drm->mode_config.funcs = &mxsfb_mode_config_funcs; drm_mode_config_reset(drm); pm_runtime_get_sync(drm->dev); ret = drm_irq_install(drm, platform_get_irq(pdev, 0)); pm_runtime_put_sync(drm->dev); if (ret < 0) { dev_err(drm->dev, "Failed to install IRQ handler\n"); goto err_irq; } drm_kms_helper_poll_init(drm); mxsfb->fbdev = drm_fbdev_cma_init(drm, 32, drm->mode_config.num_crtc, drm->mode_config.num_connector); if (IS_ERR(mxsfb->fbdev)) { mxsfb->fbdev = NULL; dev_err(drm->dev, "Failed to init FB CMA area\n"); goto err_cma; } platform_set_drvdata(pdev, drm); drm_helper_hpd_irq_event(drm); return 0; err_cma: drm_irq_uninstall(drm); err_irq: drm_panel_detach(mxsfb->panel); err_vblank: pm_runtime_disable(drm->dev); return ret; }
static int i915_drm_resume(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; disable_rpm_wakeref_asserts(dev_priv); mutex_lock(&dev->struct_mutex); i915_gem_restore_gtt_mappings(dev); mutex_unlock(&dev->struct_mutex); i915_restore_state(dev); intel_opregion_setup(dev); intel_init_pch_refclk(dev); drm_mode_config_reset(dev); /* * Interrupts have to be enabled before any batches are run. If not the * GPU will hang. i915_gem_init_hw() will initiate batches to * update/restore the context. * * Modeset enabling in intel_modeset_init_hw() also needs working * interrupts. */ intel_runtime_pm_enable_interrupts(dev_priv); mutex_lock(&dev->struct_mutex); if (i915_gem_init_hw(dev)) { DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n"); atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter); } mutex_unlock(&dev->struct_mutex); intel_guc_resume(dev); intel_modeset_init_hw(dev); spin_lock_irq(&dev_priv->irq_lock); if (dev_priv->display.hpd_irq_setup) dev_priv->display.hpd_irq_setup(dev); spin_unlock_irq(&dev_priv->irq_lock); intel_dp_mst_resume(dev); intel_display_resume(dev); /* * ... but also need to make sure that hotplug processing * doesn't cause havoc. Like in the driver load code we don't * bother with the tiny race here where we might loose hotplug * notifications. * */ intel_hpd_init(dev_priv); /* Config may have changed between suspend and resume */ drm_helper_hpd_irq_event(dev); intel_opregion_init(dev); intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false); mutex_lock(&dev_priv->modeset_restore_lock); dev_priv->modeset_restore = MODESET_DONE; mutex_unlock(&dev_priv->modeset_restore_lock); intel_opregion_notify_adapter(dev, PCI_D0); drm_kms_helper_poll_enable(dev); enable_rpm_wakeref_asserts(dev_priv); return 0; }
static int atmel_hlcdc_dc_load(struct drm_device *dev) { struct platform_device *pdev = to_platform_device(dev->dev); const struct of_device_id *match; struct atmel_hlcdc_dc *dc; int ret; match = of_match_node(atmel_hlcdc_of_match, dev->dev->parent->of_node); if (!match) { dev_err(&pdev->dev, "invalid compatible string\n"); return -ENODEV; } if (!match->data) { dev_err(&pdev->dev, "invalid hlcdc description\n"); return -EINVAL; } dc = devm_kzalloc(dev->dev, sizeof(*dc), GFP_KERNEL); if (!dc) return -ENOMEM; dc->wq = alloc_ordered_workqueue("atmel-hlcdc-dc", 0); if (!dc->wq) return -ENOMEM; init_waitqueue_head(&dc->commit.wait); dc->desc = match->data; dc->hlcdc = dev_get_drvdata(dev->dev->parent); dev->dev_private = dc; ret = clk_prepare_enable(dc->hlcdc->periph_clk); if (ret) { dev_err(dev->dev, "failed to enable periph_clk\n"); goto err_destroy_wq; } pm_runtime_enable(dev->dev); ret = drm_vblank_init(dev, 1); if (ret < 0) { dev_err(dev->dev, "failed to initialize vblank\n"); goto err_periph_clk_disable; } ret = atmel_hlcdc_dc_modeset_init(dev); if (ret < 0) { dev_err(dev->dev, "failed to initialize mode setting\n"); goto err_periph_clk_disable; } drm_mode_config_reset(dev); pm_runtime_get_sync(dev->dev); ret = drm_irq_install(dev, dc->hlcdc->irq); pm_runtime_put_sync(dev->dev); if (ret < 0) { dev_err(dev->dev, "failed to install IRQ handler\n"); goto err_periph_clk_disable; } platform_set_drvdata(pdev, dev); drm_kms_helper_poll_init(dev); /* force connectors detection */ drm_helper_hpd_irq_event(dev); return 0; err_periph_clk_disable: pm_runtime_disable(dev->dev); clk_disable_unprepare(dc->hlcdc->periph_clk); err_destroy_wq: destroy_workqueue(dc->wq); return ret; }
int evdi_painter_connect(struct evdi_device *evdi, void const __user *edid_data, unsigned int edid_length, struct drm_file *file, int dev_index) { struct evdi_painter *painter = evdi->painter; struct edid *new_edid = NULL; int expected_edid_size = 0; EVDI_CHECKPT(); if (edid_length < sizeof(struct edid)) { EVDI_ERROR("Edid length too small\n"); return -EINVAL; } if (edid_length > MAX_EDID_SIZE) { EVDI_ERROR("Edid length too large\n"); return -EINVAL; } new_edid = kzalloc(edid_length, GFP_KERNEL); if (!new_edid) return -ENOMEM; if (copy_from_user(new_edid, edid_data, edid_length)) { EVDI_ERROR("(dev=%d) LSP Failed to read edid\n", dev_index); kfree(new_edid); return -EFAULT; } expected_edid_size = sizeof(struct edid) + new_edid->extensions * EDID_EXT_BLOCK_SIZE; if (expected_edid_size != edid_length) { EVDI_ERROR("Wrong edid size. Expected %d but is %d\n", expected_edid_size, edid_length); kfree(new_edid); return -EINVAL; } if (painter->drm_filp) EVDI_WARN("(dev=%d) Double connect - replacing %p with %p\n", dev_index, painter->drm_filp, file); EVDI_DEBUG("(dev=%d) Connected with %p\n", evdi->dev_index, painter->drm_filp); painter_lock(painter); evdi->dev_index = dev_index; painter->drm_filp = file; kfree(painter->edid); painter->edid_length = edid_length; painter->edid = new_edid; painter->is_connected = true; painter_unlock(painter); drm_helper_hpd_irq_event(evdi->ddev); drm_helper_resume_force_mode(evdi->ddev); return 0; }
static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings) { struct drm_i915_private *dev_priv = dev->dev_private; if (drm_core_check_feature(dev, DRIVER_MODESET) && restore_gtt_mappings) { mutex_lock(&dev->struct_mutex); i915_gem_restore_gtt_mappings(dev); mutex_unlock(&dev->struct_mutex); } i915_restore_state(dev); intel_opregion_setup(dev); /* KMS EnterVT equivalent */ if (drm_core_check_feature(dev, DRIVER_MODESET)) { intel_init_pch_refclk(dev); drm_mode_config_reset(dev); mutex_lock(&dev->struct_mutex); if (i915_gem_init_hw(dev)) { DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n"); atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter); } mutex_unlock(&dev->struct_mutex); intel_runtime_pm_restore_interrupts(dev); intel_modeset_init_hw(dev); { unsigned long irqflags; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); if (dev_priv->display.hpd_irq_setup) dev_priv->display.hpd_irq_setup(dev); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } intel_dp_mst_resume(dev); drm_modeset_lock_all(dev); intel_modeset_setup_hw_state(dev, true); drm_modeset_unlock_all(dev); /* * ... but also need to make sure that hotplug processing * doesn't cause havoc. Like in the driver load code we don't * bother with the tiny race here where we might loose hotplug * notifications. * */ intel_hpd_init(dev); /* Config may have changed between suspend and resume */ drm_helper_hpd_irq_event(dev); } intel_opregion_init(dev); /* * The console lock can be pretty contented on resume due * to all the printk activity. Try to keep it out of the hot * path of resume if possible. */ if (console_trylock()) { intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING); console_unlock(); } else { schedule_work(&dev_priv->console_resume_work); } mutex_lock(&dev_priv->modeset_restore_lock); dev_priv->modeset_restore = MODESET_DONE; mutex_unlock(&dev_priv->modeset_restore_lock); intel_opregion_notify_adapter(dev, PCI_D0); return 0; }