コード例 #1
0
ファイル: i915_sysfs.c プロジェクト: 020gzh/linux
static u32 calc_residency(struct drm_device *dev,
			  i915_reg_t reg)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	u64 raw_time; /* 32b value may overflow during fixed point math */
	u64 units = 128ULL, div = 100000ULL;
	u32 ret;

	if (!intel_enable_rc6(dev))
		return 0;

	intel_runtime_pm_get(dev_priv);

	/* On VLV and CHV, residency time is in CZ units rather than 1.28us */
	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
		units = 1;
		div = dev_priv->czclk_freq;

		if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
			units <<= 8;
	} else if (IS_BROXTON(dev)) {
		units = 1;
		div = 1200;		/* 833.33ns */
	}

	raw_time = I915_READ(reg) * units;
	ret = DIV_ROUND_UP_ULL(raw_time, div);

	intel_runtime_pm_put(dev_priv);
	return ret;
}
コード例 #2
0
/**
 * intel_display_power_get - grab a power domain reference
 * @dev_priv: i915 device instance
 * @domain: power domain to reference
 *
 * This function grabs a power domain reference for @domain and ensures that the
 * power domain and all its parents are powered up. Therefore users should only
 * grab a reference to the innermost power domain they need.
 *
 * Any power domain reference obtained by this function must have a symmetric
 * call to intel_display_power_put() to release the reference again.
 */
void intel_display_power_get(struct drm_i915_private *dev_priv,
			     enum intel_display_power_domain domain)
{
	struct i915_power_domains *power_domains;
	struct i915_power_well *power_well;
	int i;

	intel_runtime_pm_get(dev_priv);

	power_domains = &dev_priv->power_domains;

	mutex_lock(&power_domains->lock);

	for_each_power_well(i, power_well, BIT(domain), power_domains) {
		if (!power_well->count++) {
			DRM_DEBUG_KMS("enabling %s\n", power_well->name);
			power_well->ops->enable(dev_priv, power_well);
			power_well->hw_enabled = true;
		}
	}

	power_domains->domain_use_count[domain]++;

	mutex_unlock(&power_domains->lock);
}
コード例 #3
0
/**
 * intel_csr_ucode_init() - initialize the firmware loading.
 * @dev: drm device.
 *
 * This function is called at the time of loading the display driver to read
 * firmware from a .bin file and copied into a internal memory.
 */
void intel_csr_ucode_init(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct intel_csr *csr = &dev_priv->csr;
	int ret;

	if (!HAS_CSR(dev))
		return;

	if (IS_SKYLAKE(dev))
		csr->fw_path = I915_CSR_SKL;
	else {
		DRM_ERROR("Unexpected: no known CSR firmware for platform\n");
		intel_csr_load_status_set(dev_priv, FW_FAILED);
		return;
	}

	/*
	 * Obtain a runtime pm reference, until CSR is loaded,
	 * to avoid entering runtime-suspend.
	 */
	intel_runtime_pm_get(dev_priv);

	/* CSR supported for platform, load firmware */
	ret = request_firmware_nowait(THIS_MODULE, true, csr->fw_path,
				&dev_priv->dev->pdev->dev,
				GFP_KERNEL, dev_priv,
				finish_csr_load);
	if (ret) {
		i915_firmware_load_error_print(csr->fw_path, ret);
		intel_csr_load_status_set(dev_priv, FW_FAILED);
	}
}
コード例 #4
0
ファイル: i915_drv.c プロジェクト: Truefans/KVMGT-kernel
static int i915_drm_freeze(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct drm_crtc *crtc;

	intel_runtime_pm_get(dev_priv);

	/* ignore lid events during suspend */
	mutex_lock(&dev_priv->modeset_restore_lock);
	dev_priv->modeset_restore = MODESET_SUSPENDED;
	mutex_unlock(&dev_priv->modeset_restore_lock);

	/* We do a lot of poking in a lot of registers, make sure they work
	 * properly. */
	hsw_disable_package_c8(dev_priv);
	intel_display_set_init_power(dev, true);

	drm_kms_helper_poll_disable(dev);

	pci_save_state(dev->pdev);

	/* If KMS is active, we do the leavevt stuff here */
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
		int error;

		error = i915_gem_suspend(dev);
		if (error) {
			dev_err(&dev->pdev->dev,
				"GEM idle failed, resume might fail\n");
			return error;
		}

		cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);

		drm_irq_uninstall(dev);
		dev_priv->enable_hotplug_processing = false;
		/*
		 * Disable CRTCs directly since we want to preserve sw state
		 * for _thaw.
		 */
		mutex_lock(&dev->mode_config.mutex);
		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
			dev_priv->display.crtc_disable(crtc);
		mutex_unlock(&dev->mode_config.mutex);

		intel_modeset_suspend_hw(dev);
	}

	i915_gem_suspend_gtt_mappings(dev);

	i915_save_state(dev);

	intel_opregion_fini(dev);

	console_lock();
	intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED);
	console_unlock();

	return 0;
}
コード例 #5
0
ファイル: intel_uncore.c プロジェクト: volk3/CS736
int i915_reg_read_ioctl(struct drm_device *dev,
			void *data, struct drm_file *file)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct drm_i915_reg_read *reg = data;
	struct register_whitelist const *entry = whitelist;
	unsigned size;
	u64 offset;
	int i, ret = 0;

	for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
		if (entry->offset == (reg->offset & -entry->size) &&
		    (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
			break;
	}

	if (i == ARRAY_SIZE(whitelist))
		return -EINVAL;

	/* We use the low bits to encode extra flags as the register should
	 * be naturally aligned (and those that are not so aligned merely
	 * limit the available flags for that register).
	 */
	offset = entry->offset;
	size = entry->size;
	size |= reg->offset ^ offset;

	intel_runtime_pm_get(dev_priv);

	switch (size) {
	case 8 | 1:
		reg->val = I915_READ64_2x32(offset, offset+4);
		break;
	case 8:
		reg->val = I915_READ64(offset);
		break;
	case 4:
		reg->val = I915_READ(offset);
		break;
	case 2:
		reg->val = I915_READ16(offset);
		break;
	case 1:
		reg->val = I915_READ8(offset);
		break;
	default:
		ret = -EINVAL;
		goto out;
	}

out:
	intel_runtime_pm_put(dev_priv);
	return ret;
}
コード例 #6
0
ファイル: i915_sysfs.c プロジェクト: mkahola/drm-intel-mika
static ssize_t gt_act_freq_mhz_show(struct device *kdev,
				    struct device_attribute *attr, char *buf)
{
	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
	int ret;

	intel_runtime_pm_get(dev_priv);

	mutex_lock(&dev_priv->rps.hw_lock);
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
		u32 freq;
		freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
		ret = intel_gpu_freq(dev_priv, (freq >> 8) & 0xff);
	} else {
コード例 #7
0
ファイル: i915_drv.c プロジェクト: spacex/kernel-centos7
static int i915_drm_freeze(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct drm_crtc *crtc;

	intel_runtime_pm_get(dev_priv);

	/* ignore lid events during suspend */
	mutex_lock(&dev_priv->modeset_restore_lock);
	dev_priv->modeset_restore = MODESET_SUSPENDED;
	mutex_unlock(&dev_priv->modeset_restore_lock);

	/* We do a lot of poking in a lot of registers, make sure they work
	 * properly. */
	intel_display_set_init_power(dev_priv, true);

	drm_kms_helper_poll_disable(dev);

	pci_save_state(dev->pdev);

	/* If KMS is active, we do the leavevt stuff here */
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
		int error;

		error = i915_gem_suspend(dev);
		if (error) {
			dev_err(&dev->pdev->dev,
				"GEM idle failed, resume might fail\n");
			return error;
		}

		intel_disable_gt_powersave(dev);

		/*
		 * Disable CRTCs directly since we want to preserve sw state
		 * for _thaw.
		 */
		drm_modeset_lock_all(dev);
		for_each_crtc(dev, crtc) {
			dev_priv->display.crtc_disable(crtc);
		}
		drm_modeset_unlock_all(dev);

		intel_dp_mst_suspend(dev);
		drm_irq_uninstall(dev);

		intel_modeset_suspend_hw(dev);
	}
コード例 #8
0
/*
 * Generally this is called implicitly by the register read function. However,
 * if some sequence requires the GT to not power down then this function should
 * be called at the beginning of the sequence followed by a call to
 * gen6_gt_force_wake_put() at the end of the sequence.
 */
void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
{
	unsigned long irqflags;

	if (!dev_priv->uncore.funcs.force_wake_get)
		return;

	intel_runtime_pm_get(dev_priv);

	/* Redirect to VLV specific routine */
	if (IS_VALLEYVIEW(dev_priv->dev))
		return vlv_force_wake_get(dev_priv, fw_engine);

	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
	if (dev_priv->uncore.forcewake_count++ == 0)
		dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
コード例 #9
0
ファイル: intel_uncore.c プロジェクト: 383530895/linux
int i915_reg_read_ioctl(struct drm_device *dev,
			void *data, struct drm_file *file)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct drm_i915_reg_read *reg = data;
	struct register_whitelist const *entry = whitelist;
	int i, ret = 0;

	for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
		if (entry->offset == reg->offset &&
		    (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
			break;
	}

	if (i == ARRAY_SIZE(whitelist))
		return -EINVAL;

	intel_runtime_pm_get(dev_priv);

	switch (entry->size) {
	case 8:
		reg->val = I915_READ64(reg->offset);
		break;
	case 4:
		reg->val = I915_READ(reg->offset);
		break;
	case 2:
		reg->val = I915_READ16(reg->offset);
		break;
	case 1:
		reg->val = I915_READ8(reg->offset);
		break;
	default:
		WARN_ON(1);
		ret = -EINVAL;
		goto out;
	}

out:
	intel_runtime_pm_put(dev_priv);
	return ret;
}
コード例 #10
0
ファイル: selftest_reset.c プロジェクト: grate-driver/linux
static int igt_wedged_reset(void *arg)
{
	struct drm_i915_private *i915 = arg;
	intel_wakeref_t wakeref;

	/* Check that we can recover a wedged device with a GPU reset */

	igt_global_reset_lock(i915);
	wakeref = intel_runtime_pm_get(i915);

	i915_gem_set_wedged(i915);

	GEM_BUG_ON(!i915_reset_failed(i915));
	i915_reset(i915, ALL_ENGINES, NULL);

	intel_runtime_pm_put(i915, wakeref);
	igt_global_reset_unlock(i915);

	return i915_reset_failed(i915) ? -EIO : 0;
}
コード例 #11
0
ファイル: intel_guc.c プロジェクト: AlexShiLucky/linux
/*
 * Create as many clients as number of doorbells. Note that there's already
 * client(s)/doorbell(s) created during driver load, but this test creates
 * its own and do not interact with the existing ones.
 */
static int igt_guc_doorbells(void *arg)
{
	struct drm_i915_private *dev_priv = arg;
	struct intel_guc *guc;
	int i, err = 0;
	u16 db_id;

	GEM_BUG_ON(!HAS_GUC(dev_priv));
	mutex_lock(&dev_priv->drm.struct_mutex);
	intel_runtime_pm_get(dev_priv);

	guc = &dev_priv->guc;
	if (!guc) {
		pr_err("No guc object!\n");
		err = -EINVAL;
		goto unlock;
	}

	err = check_all_doorbells(guc);
	if (err)
		goto unlock;

	for (i = 0; i < ATTEMPTS; i++) {
		clients[i] = guc_client_alloc(dev_priv,
					      INTEL_INFO(dev_priv)->ring_mask,
					      i % GUC_CLIENT_PRIORITY_NUM,
					      dev_priv->kernel_context);

		if (!clients[i]) {
			pr_err("[%d] No guc client\n", i);
			err = -EINVAL;
			goto out;
		}

		if (IS_ERR(clients[i])) {
			if (PTR_ERR(clients[i]) != -ENOSPC) {
				pr_err("[%d] unexpected error\n", i);
				err = PTR_ERR(clients[i]);
				goto out;
			}

			if (available_dbs(guc, i % GUC_CLIENT_PRIORITY_NUM)) {
				pr_err("[%d] non-db related alloc fail\n", i);
				err = -EINVAL;
				goto out;
			}

			/* expected, ran out of dbs for this client type */
			continue;
		}

		/*
		 * The check below is only valid because we keep a doorbell
		 * assigned during the whole life of the client.
		 */
		if (clients[i]->stage_id >= GUC_NUM_DOORBELLS) {
			pr_err("[%d] more clients than doorbells (%d >= %d)\n",
			       i, clients[i]->stage_id, GUC_NUM_DOORBELLS);
			err = -EINVAL;
			goto out;
		}

		err = validate_client(clients[i],
				      i % GUC_CLIENT_PRIORITY_NUM, false);
		if (err) {
			pr_err("[%d] client_alloc sanity check failed!\n", i);
			err = -EINVAL;
			goto out;
		}

		db_id = clients[i]->doorbell_id;

		err = __guc_client_enable(clients[i]);
		if (err) {
			pr_err("[%d] Failed to create a doorbell\n", i);
			goto out;
		}

		/* doorbell id shouldn't change, we are holding the mutex */
		if (db_id != clients[i]->doorbell_id) {
			pr_err("[%d] doorbell id changed (%d != %d)\n",
			       i, db_id, clients[i]->doorbell_id);
			err = -EINVAL;
			goto out;
		}

		err = check_all_doorbells(guc);
		if (err)
			goto out;

		err = ring_doorbell_nop(clients[i]);
		if (err)
			goto out;
	}

out:
	for (i = 0; i < ATTEMPTS; i++)
		if (!IS_ERR_OR_NULL(clients[i])) {
			__guc_client_disable(clients[i]);
			guc_client_free(clients[i]);
		}
unlock:
	intel_runtime_pm_put(dev_priv);
	mutex_unlock(&dev_priv->drm.struct_mutex);
	return err;
}
コード例 #12
0
ファイル: intel_guc.c プロジェクト: AlexShiLucky/linux
/*
 * Check that we're able to synchronize guc_clients with their doorbells
 *
 * We're creating clients and reserving doorbells once, at module load. During
 * module lifetime, GuC, doorbell HW, and i915 state may go out of sync due to
 * GuC being reset. In other words - GuC clients are still around, but the
 * status of their doorbells may be incorrect. This is the reason behind
 * validating that the doorbells status expected by the driver matches what the
 * GuC/HW have.
 */
static int igt_guc_clients(void *args)
{
	struct drm_i915_private *dev_priv = args;
	struct intel_guc *guc;
	int err = 0;

	GEM_BUG_ON(!HAS_GUC(dev_priv));
	mutex_lock(&dev_priv->drm.struct_mutex);
	intel_runtime_pm_get(dev_priv);

	guc = &dev_priv->guc;
	if (!guc) {
		pr_err("No guc object!\n");
		err = -EINVAL;
		goto unlock;
	}

	err = check_all_doorbells(guc);
	if (err)
		goto unlock;

	/*
	 * Get rid of clients created during driver load because the test will
	 * recreate them.
	 */
	guc_clients_disable(guc);
	guc_clients_destroy(guc);
	if (guc->execbuf_client || guc->preempt_client) {
		pr_err("guc_clients_destroy lied!\n");
		err = -EINVAL;
		goto unlock;
	}

	err = guc_clients_create(guc);
	if (err) {
		pr_err("Failed to create clients\n");
		goto unlock;
	}
	GEM_BUG_ON(!guc->execbuf_client);

	err = validate_client(guc->execbuf_client,
			      GUC_CLIENT_PRIORITY_KMD_NORMAL, false);
	if (err) {
		pr_err("execbug client validation failed\n");
		goto out;
	}

	if (guc->preempt_client) {
		err = validate_client(guc->preempt_client,
				      GUC_CLIENT_PRIORITY_KMD_HIGH, true);
		if (err) {
			pr_err("preempt client validation failed\n");
			goto out;
		}
	}

	/* each client should now have reserved a doorbell */
	if (!has_doorbell(guc->execbuf_client) ||
	    (guc->preempt_client && !has_doorbell(guc->preempt_client))) {
		pr_err("guc_clients_create didn't reserve doorbells\n");
		err = -EINVAL;
		goto out;
	}

	/* Now enable the clients */
	guc_clients_enable(guc);

	/* each client should now have received a doorbell */
	if (!client_doorbell_in_sync(guc->execbuf_client) ||
	    !client_doorbell_in_sync(guc->preempt_client)) {
		pr_err("failed to initialize the doorbells\n");
		err = -EINVAL;
		goto out;
	}

	/*
	 * Basic test - an attempt to reallocate a valid doorbell to the
	 * client it is currently assigned should not cause a failure.
	 */
	err = create_doorbell(guc->execbuf_client);

out:
	/*
	 * Leave clean state for other test, plus the driver always destroy the
	 * clients during unload.
	 */
	guc_clients_disable(guc);
	guc_clients_destroy(guc);
	guc_clients_create(guc);
	guc_clients_enable(guc);
unlock:
	intel_runtime_pm_put(dev_priv);
	mutex_unlock(&dev_priv->drm.struct_mutex);
	return err;
}
コード例 #13
0
ファイル: i915_gem_tiling.c プロジェクト: AK101111/linux
/**
 * i915_gem_set_tiling - IOCTL handler to set tiling mode
 * @dev: DRM device
 * @data: data pointer for the ioctl
 * @file: DRM file for the ioctl call
 *
 * Sets the tiling mode of an object, returning the required swizzling of
 * bit 6 of addresses in the object.
 *
 * Called by the user via ioctl.
 *
 * Returns:
 * Zero on success, negative errno on failure.
 */
int
i915_gem_set_tiling(struct drm_device *dev, void *data,
		   struct drm_file *file)
{
	struct drm_i915_gem_set_tiling *args = data;
	struct drm_i915_private *dev_priv = to_i915(dev);
	struct drm_i915_gem_object *obj;
	int ret = 0;

	obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
	if (&obj->base == NULL)
		return -ENOENT;

	if (!i915_tiling_ok(dev,
			    args->stride, obj->base.size, args->tiling_mode)) {
		drm_gem_object_unreference_unlocked(&obj->base);
		return -EINVAL;
	}

	intel_runtime_pm_get(dev_priv);

	mutex_lock(&dev->struct_mutex);
	if (obj->pin_display || obj->framebuffer_references) {
		ret = -EBUSY;
		goto err;
	}

	if (args->tiling_mode == I915_TILING_NONE) {
		args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
		args->stride = 0;
	} else {
		if (args->tiling_mode == I915_TILING_X)
			args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
		else
			args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;

		/* Hide bit 17 swizzling from the user.  This prevents old Mesa
		 * from aborting the application on sw fallbacks to bit 17,
		 * and we use the pread/pwrite bit17 paths to swizzle for it.
		 * If there was a user that was relying on the swizzle
		 * information for drm_intel_bo_map()ed reads/writes this would
		 * break it, but we don't have any of those.
		 */
		if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
			args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
		if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
			args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;

		/* If we can't handle the swizzling, make it untiled. */
		if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) {
			args->tiling_mode = I915_TILING_NONE;
			args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
			args->stride = 0;
		}
	}

	if (args->tiling_mode != obj->tiling_mode ||
	    args->stride != obj->stride) {
		/* We need to rebind the object if its current allocation
		 * no longer meets the alignment restrictions for its new
		 * tiling mode. Otherwise we can just leave it alone, but
		 * need to ensure that any fence register is updated before
		 * the next fenced (either through the GTT or by the BLT unit
		 * on older GPUs) access.
		 *
		 * After updating the tiling parameters, we then flag whether
		 * we need to update an associated fence register. Note this
		 * has to also include the unfenced register the GPU uses
		 * whilst executing a fenced command for an untiled object.
		 */
		if (obj->map_and_fenceable &&
		    !i915_gem_object_fence_ok(obj, args->tiling_mode))
			ret = i915_vma_unbind(i915_gem_obj_to_ggtt(obj));

		if (ret == 0) {
			if (obj->pages &&
			    obj->madv == I915_MADV_WILLNEED &&
			    dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
				if (args->tiling_mode == I915_TILING_NONE)
					i915_gem_object_unpin_pages(obj);
				if (obj->tiling_mode == I915_TILING_NONE)
					i915_gem_object_pin_pages(obj);
			}

			obj->fence_dirty =
				obj->last_fenced_req ||
				obj->fence_reg != I915_FENCE_REG_NONE;

			obj->tiling_mode = args->tiling_mode;
			obj->stride = args->stride;

			/* Force the fence to be reacquired for GTT access */
			i915_gem_release_mmap(obj);
		}
	}
	/* we have to maintain this existing ABI... */
	args->stride = obj->stride;
	args->tiling_mode = obj->tiling_mode;

	/* Try to preallocate memory required to save swizzling on put-pages */
	if (i915_gem_object_needs_bit17_swizzle(obj)) {
		if (obj->bit_17 == NULL) {
			obj->bit_17 = kcalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT),
					      sizeof(long), GFP_KERNEL);
		}
	} else {
コード例 #14
0
ファイル: intel_fbdev.c プロジェクト: Lyude/linux
static int intelfb_create(struct drm_fb_helper *helper,
			  struct drm_fb_helper_surface_size *sizes)
{
	struct intel_fbdev *ifbdev =
		container_of(helper, struct intel_fbdev, helper);
	struct intel_framebuffer *intel_fb = ifbdev->fb;
	struct drm_device *dev = helper->dev;
	struct drm_i915_private *dev_priv = to_i915(dev);
	struct pci_dev *pdev = dev_priv->drm.pdev;
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
	struct fb_info *info;
	struct drm_framebuffer *fb;
	struct i915_vma *vma;
	unsigned long flags = 0;
	bool prealloc = false;
	void __iomem *vaddr;
	int ret;

	if (intel_fb &&
	    (sizes->fb_width > intel_fb->base.width ||
	     sizes->fb_height > intel_fb->base.height)) {
		DRM_DEBUG_KMS("BIOS fb too small (%dx%d), we require (%dx%d),"
			      " releasing it\n",
			      intel_fb->base.width, intel_fb->base.height,
			      sizes->fb_width, sizes->fb_height);
		drm_framebuffer_put(&intel_fb->base);
		intel_fb = ifbdev->fb = NULL;
	}
	if (!intel_fb || WARN_ON(!intel_fb_obj(&intel_fb->base))) {
		DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n");
		ret = intelfb_alloc(helper, sizes);
		if (ret)
			return ret;
		intel_fb = ifbdev->fb;
	} else {
		DRM_DEBUG_KMS("re-using BIOS fb\n");
		prealloc = true;
		sizes->fb_width = intel_fb->base.width;
		sizes->fb_height = intel_fb->base.height;
	}

	mutex_lock(&dev->struct_mutex);
	intel_runtime_pm_get(dev_priv);

	/* Pin the GGTT vma for our access via info->screen_base.
	 * This also validates that any existing fb inherited from the
	 * BIOS is suitable for own access.
	 */
	vma = intel_pin_and_fence_fb_obj(&ifbdev->fb->base,
					 DRM_MODE_ROTATE_0,
					 false, &flags);
	if (IS_ERR(vma)) {
		ret = PTR_ERR(vma);
		goto out_unlock;
	}

	fb = &ifbdev->fb->base;
	intel_fb_obj_flush(intel_fb_obj(fb), ORIGIN_DIRTYFB);

	info = drm_fb_helper_alloc_fbi(helper);
	if (IS_ERR(info)) {
		DRM_ERROR("Failed to allocate fb_info\n");
		ret = PTR_ERR(info);
		goto out_unpin;
	}

	info->par = helper;

	ifbdev->helper.fb = fb;

	strcpy(info->fix.id, "inteldrmfb");

	info->fbops = &intelfb_ops;

	/* setup aperture base/size for vesafb takeover */
	info->apertures->ranges[0].base = dev->mode_config.fb_base;
	info->apertures->ranges[0].size = ggtt->mappable_end;

	info->fix.smem_start = dev->mode_config.fb_base + i915_ggtt_offset(vma);
	info->fix.smem_len = vma->node.size;

	vaddr = i915_vma_pin_iomap(vma);
	if (IS_ERR(vaddr)) {
		DRM_ERROR("Failed to remap framebuffer into virtual memory\n");
		ret = PTR_ERR(vaddr);
		goto out_unpin;
	}
	info->screen_base = vaddr;
	info->screen_size = vma->node.size;

	/* This driver doesn't need a VT switch to restore the mode on resume */
	info->skip_vt_switch = true;

	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
	drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);

	/* If the object is shmemfs backed, it will have given us zeroed pages.
	 * If the object is stolen however, it will be full of whatever
	 * garbage was left in there.
	 */
	if (intel_fb_obj(fb)->stolen && !prealloc)
		memset_io(info->screen_base, 0, info->screen_size);

	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */

	DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x\n",
		      fb->width, fb->height, i915_ggtt_offset(vma));
	ifbdev->vma = vma;
	ifbdev->vma_flags = flags;

	intel_runtime_pm_put(dev_priv);
	mutex_unlock(&dev->struct_mutex);
	vga_switcheroo_client_fb_set(pdev, info);
	return 0;

out_unpin:
	intel_unpin_fb_vma(vma, flags);
out_unlock:
	intel_runtime_pm_put(dev_priv);
	mutex_unlock(&dev->struct_mutex);
	return ret;
}
コード例 #15
0
ファイル: intel_uncore.c プロジェクト: avagin/linux
static int live_forcewake_ops(void *arg)
{
	static const struct reg {
		const char *name;
		unsigned long platforms;
		unsigned int offset;
	} registers[] = {
		{
			"RING_START",
			INTEL_GEN_MASK(6, 7),
			0x38,
		},
		{
			"RING_MI_MODE",
			INTEL_GEN_MASK(8, BITS_PER_LONG),
			0x9c,
		}
	};
	const struct reg *r;
	struct drm_i915_private *i915 = arg;
	struct intel_uncore_forcewake_domain *domain;
	struct intel_uncore *uncore = &i915->uncore;
	struct intel_engine_cs *engine;
	enum intel_engine_id id;
	intel_wakeref_t wakeref;
	unsigned int tmp;
	int err = 0;

	GEM_BUG_ON(i915->gt.awake);

	/* vlv/chv with their pcu behave differently wrt reads */
	if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
		pr_debug("PCU fakes forcewake badly; skipping\n");
		return 0;
	}

	/*
	 * Not quite as reliable across the gen as one would hope.
	 *
	 * Either our theory of operation is incorrect, or there remain
	 * external parties interfering with the powerwells.
	 *
	 * https://bugs.freedesktop.org/show_bug.cgi?id=110210
	 */
	if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN))
		return 0;

	/* We have to pick carefully to get the exact behaviour we need */
	for (r = registers; r->name; r++)
		if (r->platforms & INTEL_INFO(i915)->gen_mask)
			break;
	if (!r->name) {
		pr_debug("Forcewaked register not known for %s; skipping\n",
			 intel_platform_name(INTEL_INFO(i915)->platform));
		return 0;
	}

	wakeref = intel_runtime_pm_get(i915);

	for_each_fw_domain(domain, uncore, tmp) {
		smp_store_mb(domain->active, false);
		if (!hrtimer_cancel(&domain->timer))
			continue;

		intel_uncore_fw_release_timer(&domain->timer);
	}