Exemplo n.º 1
0
static int i915_drm_thaw(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int error = 0;

	i915_restore_state(dev);
	intel_opregion_setup(dev);

	/* KMS EnterVT equivalent */
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
		mutex_lock(&dev->struct_mutex);
		dev_priv->mm.suspended = 0;

		error = i915_gem_init_ringbuffer(dev);
		mutex_unlock(&dev->struct_mutex);

		drm_irq_install(dev);

		/* Resume the modeset for every activated CRTC */
		drm_helper_resume_force_mode(dev);
	}

	intel_opregion_init(dev);

	dev_priv->modeset_on_lid = 0;

	return error;
}
Exemplo n.º 2
0
static int __i915_drm_thaw(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int error = 0;

	i915_restore_state(dev);
	intel_opregion_setup(dev);

	/* KMS EnterVT equivalent */
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
		intel_init_pch_refclk(dev);

		mutex_lock(&dev->struct_mutex);
		dev_priv->mm.suspended = 0;

		error = i915_gem_init_hw(dev);
		mutex_unlock(&dev->struct_mutex);

		/* We need working interrupts for modeset enabling ... */
		drm_irq_install(dev);

		intel_modeset_init_hw(dev);
		intel_modeset_setup_hw_state(dev, false);

		/*
		 * ... but also need to make sure that hotplug processing
		 * doesn't cause havoc. Like in the driver load code we don't
		 * bother with the tiny race here where we might loose hotplug
		 * notifications.
		 * */
		intel_hpd_init(dev);
		dev_priv->enable_hotplug_processing = true;
	}

	intel_opregion_init(dev);

	/*
	 * The console lock can be pretty contented on resume due
	 * to all the printk activity.  Try to keep it out of the hot
	 * path of resume if possible.
	 */
	if (console_trylock()) {
		intel_fbdev_set_suspend(dev, 0);
		console_unlock();
	} else {
		schedule_work(&dev_priv->console_resume_work);
	}

	mutex_lock(&dev_priv->modeset_restore_lock);
	dev_priv->modeset_restore = MODESET_DONE;
	mutex_unlock(&dev_priv->modeset_restore_lock);
	return error;
}
Exemplo n.º 3
0
static int i915_drm_thaw(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int error = 0;

	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
		mutex_lock(&dev->struct_mutex);
		i915_gem_restore_gtt_mappings(dev);
		mutex_unlock(&dev->struct_mutex);
	}

	i915_restore_state(dev);
	intel_opregion_setup(dev);

	/* KMS EnterVT equivalent */
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
		mutex_lock(&dev->struct_mutex);
		dev_priv->mm.suspended = 0;

		error = i915_gem_init_hw(dev);
		mutex_unlock(&dev->struct_mutex);

		if (HAS_PCH_SPLIT(dev))
			ironlake_init_pch_refclk(dev);

		drm_mode_config_reset(dev);
		drm_irq_install(dev);

		/* Resume the modeset for every activated CRTC */
		mutex_lock(&dev->mode_config.mutex);
		drm_helper_resume_force_mode(dev);
		mutex_unlock(&dev->mode_config.mutex);

		if (IS_IRONLAKE_M(dev))
			ironlake_enable_rc6(dev);
	}

	intel_opregion_init(dev);

	dev_priv->modeset_on_lid = 0;

	console_lock();
	intel_fbdev_set_suspend(dev, 0);
	console_unlock();
	return error;
}
Exemplo n.º 4
0
static int i915_drm_thaw(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int error = 0;

	DRM_LOCK(dev);
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
		i915_gem_restore_gtt_mappings(dev);
	}

	i915_restore_state(dev);
	intel_opregion_setup(dev);

	/* KMS EnterVT equivalent */
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
		dev_priv->mm.suspended = 0;

		error = i915_gem_init_hw(dev);

		if (HAS_PCH_SPLIT(dev))
			ironlake_init_pch_refclk(dev);

		DRM_UNLOCK(dev);
		lockmgr(&dev->mode_config.mutex, LK_EXCLUSIVE);
		drm_mode_config_reset(dev);
		lockmgr(&dev->mode_config.mutex, LK_RELEASE);
		drm_irq_install(dev);

		lockmgr(&dev->mode_config.mutex, LK_EXCLUSIVE);
		/* Resume the modeset for every activated CRTC */
		drm_helper_resume_force_mode(dev);
		lockmgr(&dev->mode_config.mutex, LK_RELEASE);

		if (IS_IRONLAKE_M(dev))
			ironlake_enable_rc6(dev);
		DRM_LOCK(dev);
	}

	intel_opregion_init(dev);

	dev_priv->modeset_on_lid = 0;

	DRM_UNLOCK(dev);

	return error;
}
Exemplo n.º 5
0
static int __i915_drm_thaw(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int error = 0;

	i915_restore_state(dev);
	intel_opregion_setup(dev);

	/* KMS EnterVT equivalent */
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
		intel_init_pch_refclk(dev);

		mutex_lock(&dev->struct_mutex);
		dev_priv->mm.suspended = 0;

		error = i915_gem_init_hw(dev);
		mutex_unlock(&dev->struct_mutex);

		intel_modeset_init_hw(dev);
		intel_modeset_setup_hw_state(dev, false);
		drm_irq_install(dev);
	}

	intel_opregion_init(dev);

	dev_priv->modeset_on_lid = 0;

#ifndef __NetBSD__		/* XXX fb */
	/*
	 * The console lock can be pretty contented on resume due
	 * to all the printk activity.  Try to keep it out of the hot
	 * path of resume if possible.
	 */
	if (console_trylock()) {
		intel_fbdev_set_suspend(dev, 0);
		console_unlock();
	} else {
		schedule_work(&dev_priv->console_resume_work);
	}
#endif

	return error;
}
Exemplo n.º 6
0
static int i915_drm_thaw(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int error = 0;

	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
		mutex_lock(&dev->struct_mutex);
		i915_gem_restore_gtt_mappings(dev);
		mutex_unlock(&dev->struct_mutex);
	}

	i915_restore_state(dev);
	intel_opregion_setup(dev);

	/* KMS EnterVT equivalent */
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
		if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
			ironlake_init_pch_refclk(dev);

		mutex_lock(&dev->struct_mutex);
		dev_priv->mm.suspended = 0;

		error = i915_gem_init_hw(dev);
		mutex_unlock(&dev->struct_mutex);

		intel_modeset_init_hw(dev);
		intel_modeset_setup_hw_state(dev, false);
		drm_mode_config_reset(dev);
		drm_irq_install(dev);
	}

	intel_opregion_init(dev);

	dev_priv->modeset_on_lid = 0;

	console_lock();
	intel_fbdev_set_suspend(dev, 0);
	console_unlock();
	return error;
}
Exemplo n.º 7
0
static int i915_drm_resume(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;

	disable_rpm_wakeref_asserts(dev_priv);

	mutex_lock(&dev->struct_mutex);
	i915_gem_restore_gtt_mappings(dev);
	mutex_unlock(&dev->struct_mutex);

	i915_restore_state(dev);
	intel_opregion_setup(dev);

	intel_init_pch_refclk(dev);
	drm_mode_config_reset(dev);

	/*
	 * Interrupts have to be enabled before any batches are run. If not the
	 * GPU will hang. i915_gem_init_hw() will initiate batches to
	 * update/restore the context.
	 *
	 * Modeset enabling in intel_modeset_init_hw() also needs working
	 * interrupts.
	 */
	intel_runtime_pm_enable_interrupts(dev_priv);

	mutex_lock(&dev->struct_mutex);
	if (i915_gem_init_hw(dev)) {
		DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
			atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
	}
	mutex_unlock(&dev->struct_mutex);

	intel_guc_resume(dev);

	intel_modeset_init_hw(dev);

	spin_lock_irq(&dev_priv->irq_lock);
	if (dev_priv->display.hpd_irq_setup)
		dev_priv->display.hpd_irq_setup(dev);
	spin_unlock_irq(&dev_priv->irq_lock);

	intel_dp_mst_resume(dev);

	intel_display_resume(dev);

	/*
	 * ... but also need to make sure that hotplug processing
	 * doesn't cause havoc. Like in the driver load code we don't
	 * bother with the tiny race here where we might loose hotplug
	 * notifications.
	 * */
	intel_hpd_init(dev_priv);
	/* Config may have changed between suspend and resume */
	drm_helper_hpd_irq_event(dev);

	intel_opregion_init(dev);

	intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);

	mutex_lock(&dev_priv->modeset_restore_lock);
	dev_priv->modeset_restore = MODESET_DONE;
	mutex_unlock(&dev_priv->modeset_restore_lock);

	intel_opregion_notify_adapter(dev, PCI_D0);

	drm_kms_helper_poll_enable(dev);

	enable_rpm_wakeref_asserts(dev_priv);

	return 0;
}
Exemplo n.º 8
0
static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int error = 0;

	intel_uncore_early_sanitize(dev);

	intel_uncore_sanitize(dev);

	if (drm_core_check_feature(dev, DRIVER_MODESET) &&
	    restore_gtt_mappings) {
		mutex_lock(&dev->struct_mutex);
		i915_gem_restore_gtt_mappings(dev);
		mutex_unlock(&dev->struct_mutex);
	}

	intel_power_domains_init_hw(dev);

	i915_restore_state(dev);
	intel_opregion_setup(dev);

	/* KMS EnterVT equivalent */
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
		intel_init_pch_refclk(dev);

		mutex_lock(&dev->struct_mutex);

		error = i915_gem_init_hw(dev);
		mutex_unlock(&dev->struct_mutex);

		/* We need working interrupts for modeset enabling ... */
		drm_irq_install(dev);

		intel_modeset_init_hw(dev);

		drm_modeset_lock_all(dev);
		drm_mode_config_reset(dev);
		intel_modeset_setup_hw_state(dev, true);
		drm_modeset_unlock_all(dev);

		/*
		 * ... but also need to make sure that hotplug processing
		 * doesn't cause havoc. Like in the driver load code we don't
		 * bother with the tiny race here where we might loose hotplug
		 * notifications.
		 * */
		intel_hpd_init(dev);
		dev_priv->enable_hotplug_processing = true;
		/* Config may have changed between suspend and resume */
		intel_resume_hotplug(dev);
	}

	intel_opregion_init(dev);

	/*
	 * The console lock can be pretty contented on resume due
	 * to all the printk activity.  Try to keep it out of the hot
	 * path of resume if possible.
	 */
	if (console_trylock()) {
		intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
		console_unlock();
	} else {
		schedule_work(&dev_priv->console_resume_work);
	}

	/* Undo what we did at i915_drm_freeze so the refcount goes back to the
	 * expected level. */
	hsw_enable_package_c8(dev_priv);

	mutex_lock(&dev_priv->modeset_restore_lock);
	dev_priv->modeset_restore = MODESET_DONE;
	mutex_unlock(&dev_priv->modeset_restore_lock);

	intel_runtime_pm_put(dev_priv);
	return error;
}
Exemplo n.º 9
0
int i915_driver_load(struct drm_device *dev, unsigned long flags)
{
	struct drm_i915_private *dev_priv;
	struct intel_device_info *info, *device_info;
	int ret = 0, mmio_bar, mmio_size;
	uint32_t aperture_size;

	info = (struct intel_device_info *) flags;

	/* Refuse to load on gen6+ without kms enabled. */
	if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET)) {
		DRM_INFO("Your hardware requires kernel modesetting (KMS)\n");
		DRM_INFO("See CONFIG_DRM_I915_KMS, nomodeset, and i915.modeset parameters\n");
		return -ENODEV;
	}

	/* UMS needs agp support. */
	if (!drm_core_check_feature(dev, DRIVER_MODESET) && !dev->agp)
		return -EINVAL;

	dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
	if (dev_priv == NULL)
		return -ENOMEM;

	dev->dev_private = (void *)dev_priv;
	gpu_perf_dev_priv = (void *)dev_priv;
	dev_priv->dev = dev;

	/* Setup the write-once "constant" device info */
	device_info = (struct intel_device_info *)&dev_priv->info;
	memcpy(device_info, info, sizeof(dev_priv->info));
	device_info->device_id = dev->pdev->device;

	spin_lock_init(&dev_priv->irq_lock);
	spin_lock_init(&dev_priv->gpu_error.lock);
	mutex_init(&dev_priv->backlight_lock);
	spin_lock_init(&dev_priv->uncore.lock);
	spin_lock_init(&dev_priv->mm.object_stat_lock);
	spin_lock_init(&dev_priv->mmio_flip_lock);
	mutex_init(&dev_priv->dpio_lock);
	mutex_init(&dev_priv->modeset_restore_lock);

	intel_pm_setup(dev);

	intel_display_crc_init(dev);

	i915_dump_device_info(dev_priv);

	/* Not all pre-production machines fall into this category, only the
	 * very first ones. Almost everything should work, except for maybe
	 * suspend/resume. And we don't implement workarounds that affect only
	 * pre-production machines. */
	if (IS_HSW_EARLY_SDV(dev))
		DRM_INFO("This is an early pre-production Haswell machine. "
			 "It may not be fully functional.\n");

	if (i915_get_bridge_dev(dev)) {
		ret = -EIO;
		goto free_priv;
	}

	mmio_bar = IS_GEN2(dev) ? 1 : 0;
	/* Before gen4, the registers and the GTT are behind different BARs.
	 * However, from gen4 onwards, the registers and the GTT are shared
	 * in the same BAR, so we want to restrict this ioremap from
	 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
	 * the register BAR remains the same size for all the earlier
	 * generations up to Ironlake.
	 */
	if (info->gen < 5)
		mmio_size = 512*1024;
	else
		mmio_size = 2*1024*1024;

	dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
	if (!dev_priv->regs) {
		DRM_ERROR("failed to map registers\n");
		ret = -EIO;
		goto put_bridge;
	}

	/* This must be called before any calls to HAS_PCH_* */
	intel_detect_pch(dev);

	intel_uncore_init(dev);

	if (i915_start_vgt(dev->pdev))
		i915_host_mediate = true;
	printk("i915_start_vgt: %s\n", i915_host_mediate ? "success" : "fail");

	i915_check_vgt(dev_priv);
	if (USES_VGT(dev))
		i915.enable_fbc = 0;

	ret = i915_gem_gtt_init(dev);
	if (ret)
		goto out_regs;

	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
		/* WARNING: Apparently we must kick fbdev drivers before vgacon,
		 * otherwise the vga fbdev driver falls over. */
		ret = i915_kick_out_firmware_fb(dev_priv);
		if (ret) {
			DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
			goto out_gtt;
		}

		ret = i915_kick_out_vgacon(dev_priv);
		if (ret) {
			DRM_ERROR("failed to remove conflicting VGA console\n");
			goto out_gtt;
		}
	}

	pci_set_master(dev->pdev);

	/* overlay on gen2 is broken and can't address above 1G */
	if (IS_GEN2(dev))
		dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));

	/* 965GM sometimes incorrectly writes to hardware status page (HWS)
	 * using 32bit addressing, overwriting memory if HWS is located
	 * above 4GB.
	 *
	 * The documentation also mentions an issue with undefined
	 * behaviour if any general state is accessed within a page above 4GB,
	 * which also needs to be handled carefully.
	 */
	if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
		dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));

	aperture_size = dev_priv->gtt.mappable_end;

	dev_priv->gtt.mappable =
		io_mapping_create_wc(dev_priv->gtt.mappable_base,
				     aperture_size);
	if (dev_priv->gtt.mappable == NULL) {
		ret = -EIO;
		goto out_gtt;
	}

	dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
					      aperture_size);

	/* The i915 workqueue is primarily used for batched retirement of
	 * requests (and thus managing bo) once the task has been completed
	 * by the GPU. i915_gem_retire_requests() is called directly when we
	 * need high-priority retirement, such as waiting for an explicit
	 * bo.
	 *
	 * It is also used for periodic low-priority events, such as
	 * idle-timers and recording error state.
	 *
	 * All tasks on the workqueue are expected to acquire the dev mutex
	 * so there is no point in running more than one instance of the
	 * workqueue at any time.  Use an ordered one.
	 */
	dev_priv->wq = alloc_ordered_workqueue("i915", 0);
	if (dev_priv->wq == NULL) {
		DRM_ERROR("Failed to create our workqueue.\n");
		ret = -ENOMEM;
		goto out_mtrrfree;
	}

	dev_priv->dp_wq = alloc_ordered_workqueue("i915-dp", 0);
	if (dev_priv->dp_wq == NULL) {
		DRM_ERROR("Failed to create our dp workqueue.\n");
		ret = -ENOMEM;
		goto out_freewq;
	}

	intel_irq_init(dev_priv);
	intel_uncore_sanitize(dev);

	/* Try to make sure MCHBAR is enabled before poking at it */
	intel_setup_mchbar(dev);
	intel_setup_gmbus(dev);
	intel_opregion_setup(dev);

	intel_setup_bios(dev);

	i915_gem_load(dev);

	/* On the 945G/GM, the chipset reports the MSI capability on the
	 * integrated graphics even though the support isn't actually there
	 * according to the published specs.  It doesn't appear to function
	 * correctly in testing on 945G.
	 * This may be a side effect of MSI having been made available for PEG
	 * and the registers being closely associated.
	 *
	 * According to chipset errata, on the 965GM, MSI interrupts may
	 * be lost or delayed, but we use them anyways to avoid
	 * stuck interrupts on some machines.
	 */
	if (!IS_I945G(dev) && !IS_I945GM(dev))
		pci_enable_msi(dev->pdev);

	intel_device_info_runtime_init(dev);

	if (INTEL_INFO(dev)->num_pipes) {
		ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
		if (ret)
			goto out_gem_unload;
	}

	intel_power_domains_init(dev_priv);

	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
		ret = i915_load_modeset_init(dev);
		if (ret < 0) {
			DRM_ERROR("failed to init modeset\n");
			goto out_power_well;
		}
#ifdef DRM_I915_VGT_SUPPORT
		if (USES_VGT(dev)) {
			/*
			 * Tell VGT that we have a valid surface to show
			 * after modesetting. We doesn't distinguish DOM0 and
			 * Linux guest here, The PVINFO write handler will
			 * handle this.
			 */
			I915_WRITE(vgt_info_off(display_ready), 1);
		}
#endif
	}

	i915_setup_sysfs(dev);

	if (INTEL_INFO(dev)->num_pipes) {
		/* Must be done after probing outputs */
		intel_opregion_init(dev);
		acpi_video_register();
	}

	if (IS_GEN5(dev))
		intel_gpu_ips_init(dev_priv);

	intel_runtime_pm_enable(dev_priv);

	return 0;

out_power_well:
	intel_power_domains_fini(dev_priv);
	drm_vblank_cleanup(dev);
out_gem_unload:
	WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
	unregister_shrinker(&dev_priv->mm.shrinker);

	if (dev->pdev->msi_enabled)
		pci_disable_msi(dev->pdev);

	intel_teardown_gmbus(dev);
	intel_teardown_mchbar(dev);
	pm_qos_remove_request(&dev_priv->pm_qos);
	destroy_workqueue(dev_priv->dp_wq);
out_freewq:
	destroy_workqueue(dev_priv->wq);
out_mtrrfree:
	arch_phys_wc_del(dev_priv->gtt.mtrr);
	io_mapping_free(dev_priv->gtt.mappable);
out_gtt:
	i915_global_gtt_cleanup(dev);
out_regs:
	intel_uncore_fini(dev);
	pci_iounmap(dev->pdev, dev_priv->regs);
put_bridge:
	pci_dev_put(dev_priv->bridge_dev);
free_priv:
	if (dev_priv->slab)
		kmem_cache_destroy(dev_priv->slab);
	kfree(dev_priv);
	return ret;
}
Exemplo n.º 10
0
static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
{
	struct drm_i915_private *dev_priv = dev->dev_private;

	if (drm_core_check_feature(dev, DRIVER_MODESET) &&
	    restore_gtt_mappings) {
		mutex_lock(&dev->struct_mutex);
		i915_gem_restore_gtt_mappings(dev);
		mutex_unlock(&dev->struct_mutex);
	}

	i915_restore_state(dev);
	intel_opregion_setup(dev);

	/* KMS EnterVT equivalent */
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
		intel_init_pch_refclk(dev);
		drm_mode_config_reset(dev);

		mutex_lock(&dev->struct_mutex);
		if (i915_gem_init_hw(dev)) {
			DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
			atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
		}
		mutex_unlock(&dev->struct_mutex);

		intel_runtime_pm_restore_interrupts(dev);

		intel_modeset_init_hw(dev);

		{
			unsigned long irqflags;
			spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
			if (dev_priv->display.hpd_irq_setup)
				dev_priv->display.hpd_irq_setup(dev);
			spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
		}

		intel_dp_mst_resume(dev);
		drm_modeset_lock_all(dev);
		intel_modeset_setup_hw_state(dev, true);
		drm_modeset_unlock_all(dev);

		/*
		 * ... but also need to make sure that hotplug processing
		 * doesn't cause havoc. Like in the driver load code we don't
		 * bother with the tiny race here where we might loose hotplug
		 * notifications.
		 * */
		intel_hpd_init(dev);
		/* Config may have changed between suspend and resume */
		drm_helper_hpd_irq_event(dev);
	}

	intel_opregion_init(dev);

	/*
	 * The console lock can be pretty contented on resume due
	 * to all the printk activity.  Try to keep it out of the hot
	 * path of resume if possible.
	 */
	if (console_trylock()) {
		intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
		console_unlock();
	} else {
		schedule_work(&dev_priv->console_resume_work);
	}

	mutex_lock(&dev_priv->modeset_restore_lock);
	dev_priv->modeset_restore = MODESET_DONE;
	mutex_unlock(&dev_priv->modeset_restore_lock);

	intel_opregion_notify_adapter(dev, PCI_D0);

	return 0;
}