static u32 calc_residency(struct drm_device *dev, i915_reg_t reg) { struct drm_i915_private *dev_priv = dev->dev_private; u64 raw_time; /* 32b value may overflow during fixed point math */ u64 units = 128ULL, div = 100000ULL; u32 ret; if (!intel_enable_rc6(dev)) return 0; intel_runtime_pm_get(dev_priv); /* On VLV and CHV, residency time is in CZ units rather than 1.28us */ if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { units = 1; div = dev_priv->czclk_freq; if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH) units <<= 8; } else if (IS_BROXTON(dev)) { units = 1; div = 1200; /* 833.33ns */ } raw_time = I915_READ(reg) * units; ret = DIV_ROUND_UP_ULL(raw_time, div); intel_runtime_pm_put(dev_priv); return ret; }
/* * see gen6_gt_force_wake_get() */ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine) { unsigned long irqflags; bool delayed = false; if (!dev_priv->uncore.funcs.force_wake_put) return; /* Redirect to VLV specific routine */ if (IS_VALLEYVIEW(dev_priv->dev)) { vlv_force_wake_put(dev_priv, fw_engine); goto out; } spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); WARN_ON(!dev_priv->uncore.forcewake_count); if (--dev_priv->uncore.forcewake_count == 0) { dev_priv->uncore.forcewake_count++; delayed = true; mod_timer_pinned(&dev_priv->uncore.force_wake_timer, jiffies + 1); } spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); out: if (!delayed) intel_runtime_pm_put(dev_priv); }
int i915_reg_read_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_reg_read *reg = data; struct register_whitelist const *entry = whitelist; unsigned size; u64 offset; int i, ret = 0; for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) { if (entry->offset == (reg->offset & -entry->size) && (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask)) break; } if (i == ARRAY_SIZE(whitelist)) return -EINVAL; /* We use the low bits to encode extra flags as the register should * be naturally aligned (and those that are not so aligned merely * limit the available flags for that register). */ offset = entry->offset; size = entry->size; size |= reg->offset ^ offset; intel_runtime_pm_get(dev_priv); switch (size) { case 8 | 1: reg->val = I915_READ64_2x32(offset, offset+4); break; case 8: reg->val = I915_READ64(offset); break; case 4: reg->val = I915_READ(offset); break; case 2: reg->val = I915_READ16(offset); break; case 1: reg->val = I915_READ8(offset); break; default: ret = -EINVAL; goto out; } out: intel_runtime_pm_put(dev_priv); return ret; }
static void gen6_force_wake_timer(unsigned long arg) { struct drm_i915_private *dev_priv = (void *)arg; unsigned long irqflags; assert_device_not_suspended(dev_priv); spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); WARN_ON(!dev_priv->uncore.forcewake_count); if (--dev_priv->uncore.forcewake_count == 0) dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL); spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); intel_runtime_pm_put(dev_priv); }
int i915_reg_read_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_reg_read *reg = data; struct register_whitelist const *entry = whitelist; int i, ret = 0; for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) { if (entry->offset == reg->offset && (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask)) break; } if (i == ARRAY_SIZE(whitelist)) return -EINVAL; intel_runtime_pm_get(dev_priv); switch (entry->size) { case 8: reg->val = I915_READ64(reg->offset); break; case 4: reg->val = I915_READ(reg->offset); break; case 2: reg->val = I915_READ16(reg->offset); break; case 1: reg->val = I915_READ8(reg->offset); break; default: WARN_ON(1); ret = -EINVAL; goto out; } out: intel_runtime_pm_put(dev_priv); return ret; }
static int igt_wedged_reset(void *arg) { struct drm_i915_private *i915 = arg; intel_wakeref_t wakeref; /* Check that we can recover a wedged device with a GPU reset */ igt_global_reset_lock(i915); wakeref = intel_runtime_pm_get(i915); i915_gem_set_wedged(i915); GEM_BUG_ON(!i915_reset_failed(i915)); i915_reset(i915, ALL_ENGINES, NULL); intel_runtime_pm_put(i915, wakeref); igt_global_reset_unlock(i915); return i915_reset_failed(i915) ? -EIO : 0; }
/* * see gen6_gt_force_wake_get() */ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine) { unsigned long irqflags; if (!dev_priv->uncore.funcs.force_wake_put) return; /* Redirect to VLV specific routine */ if (IS_VALLEYVIEW(dev_priv->dev)) return vlv_force_wake_put(dev_priv, fw_engine); spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); if (--dev_priv->uncore.forcewake_count == 0) { dev_priv->uncore.forcewake_count++; mod_delayed_work(dev_priv->wq, &dev_priv->uncore.force_wake_work, 1); } spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); intel_runtime_pm_put(dev_priv); }
/* * Create as many clients as number of doorbells. Note that there's already * client(s)/doorbell(s) created during driver load, but this test creates * its own and do not interact with the existing ones. */ static int igt_guc_doorbells(void *arg) { struct drm_i915_private *dev_priv = arg; struct intel_guc *guc; int i, err = 0; u16 db_id; GEM_BUG_ON(!HAS_GUC(dev_priv)); mutex_lock(&dev_priv->drm.struct_mutex); intel_runtime_pm_get(dev_priv); guc = &dev_priv->guc; if (!guc) { pr_err("No guc object!\n"); err = -EINVAL; goto unlock; } err = check_all_doorbells(guc); if (err) goto unlock; for (i = 0; i < ATTEMPTS; i++) { clients[i] = guc_client_alloc(dev_priv, INTEL_INFO(dev_priv)->ring_mask, i % GUC_CLIENT_PRIORITY_NUM, dev_priv->kernel_context); if (!clients[i]) { pr_err("[%d] No guc client\n", i); err = -EINVAL; goto out; } if (IS_ERR(clients[i])) { if (PTR_ERR(clients[i]) != -ENOSPC) { pr_err("[%d] unexpected error\n", i); err = PTR_ERR(clients[i]); goto out; } if (available_dbs(guc, i % GUC_CLIENT_PRIORITY_NUM)) { pr_err("[%d] non-db related alloc fail\n", i); err = -EINVAL; goto out; } /* expected, ran out of dbs for this client type */ continue; } /* * The check below is only valid because we keep a doorbell * assigned during the whole life of the client. */ if (clients[i]->stage_id >= GUC_NUM_DOORBELLS) { pr_err("[%d] more clients than doorbells (%d >= %d)\n", i, clients[i]->stage_id, GUC_NUM_DOORBELLS); err = -EINVAL; goto out; } err = validate_client(clients[i], i % GUC_CLIENT_PRIORITY_NUM, false); if (err) { pr_err("[%d] client_alloc sanity check failed!\n", i); err = -EINVAL; goto out; } db_id = clients[i]->doorbell_id; err = __guc_client_enable(clients[i]); if (err) { pr_err("[%d] Failed to create a doorbell\n", i); goto out; } /* doorbell id shouldn't change, we are holding the mutex */ if (db_id != clients[i]->doorbell_id) { pr_err("[%d] doorbell id changed (%d != %d)\n", i, db_id, clients[i]->doorbell_id); err = -EINVAL; goto out; } err = check_all_doorbells(guc); if (err) goto out; err = ring_doorbell_nop(clients[i]); if (err) goto out; } out: for (i = 0; i < ATTEMPTS; i++) if (!IS_ERR_OR_NULL(clients[i])) { __guc_client_disable(clients[i]); guc_client_free(clients[i]); } unlock: intel_runtime_pm_put(dev_priv); mutex_unlock(&dev_priv->drm.struct_mutex); return err; }
/* * Check that we're able to synchronize guc_clients with their doorbells * * We're creating clients and reserving doorbells once, at module load. During * module lifetime, GuC, doorbell HW, and i915 state may go out of sync due to * GuC being reset. In other words - GuC clients are still around, but the * status of their doorbells may be incorrect. This is the reason behind * validating that the doorbells status expected by the driver matches what the * GuC/HW have. */ static int igt_guc_clients(void *args) { struct drm_i915_private *dev_priv = args; struct intel_guc *guc; int err = 0; GEM_BUG_ON(!HAS_GUC(dev_priv)); mutex_lock(&dev_priv->drm.struct_mutex); intel_runtime_pm_get(dev_priv); guc = &dev_priv->guc; if (!guc) { pr_err("No guc object!\n"); err = -EINVAL; goto unlock; } err = check_all_doorbells(guc); if (err) goto unlock; /* * Get rid of clients created during driver load because the test will * recreate them. */ guc_clients_disable(guc); guc_clients_destroy(guc); if (guc->execbuf_client || guc->preempt_client) { pr_err("guc_clients_destroy lied!\n"); err = -EINVAL; goto unlock; } err = guc_clients_create(guc); if (err) { pr_err("Failed to create clients\n"); goto unlock; } GEM_BUG_ON(!guc->execbuf_client); err = validate_client(guc->execbuf_client, GUC_CLIENT_PRIORITY_KMD_NORMAL, false); if (err) { pr_err("execbug client validation failed\n"); goto out; } if (guc->preempt_client) { err = validate_client(guc->preempt_client, GUC_CLIENT_PRIORITY_KMD_HIGH, true); if (err) { pr_err("preempt client validation failed\n"); goto out; } } /* each client should now have reserved a doorbell */ if (!has_doorbell(guc->execbuf_client) || (guc->preempt_client && !has_doorbell(guc->preempt_client))) { pr_err("guc_clients_create didn't reserve doorbells\n"); err = -EINVAL; goto out; } /* Now enable the clients */ guc_clients_enable(guc); /* each client should now have received a doorbell */ if (!client_doorbell_in_sync(guc->execbuf_client) || !client_doorbell_in_sync(guc->preempt_client)) { pr_err("failed to initialize the doorbells\n"); err = -EINVAL; goto out; } /* * Basic test - an attempt to reallocate a valid doorbell to the * client it is currently assigned should not cause a failure. */ err = create_doorbell(guc->execbuf_client); out: /* * Leave clean state for other test, plus the driver always destroy the * clients during unload. */ guc_clients_disable(guc); guc_clients_destroy(guc); guc_clients_create(guc); guc_clients_enable(guc); unlock: intel_runtime_pm_put(dev_priv); mutex_unlock(&dev_priv->drm.struct_mutex); return err; }
static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings) { struct drm_i915_private *dev_priv = dev->dev_private; int error = 0; intel_uncore_early_sanitize(dev); intel_uncore_sanitize(dev); if (drm_core_check_feature(dev, DRIVER_MODESET) && restore_gtt_mappings) { mutex_lock(&dev->struct_mutex); i915_gem_restore_gtt_mappings(dev); mutex_unlock(&dev->struct_mutex); } intel_power_domains_init_hw(dev); i915_restore_state(dev); intel_opregion_setup(dev); /* KMS EnterVT equivalent */ if (drm_core_check_feature(dev, DRIVER_MODESET)) { intel_init_pch_refclk(dev); mutex_lock(&dev->struct_mutex); error = i915_gem_init_hw(dev); mutex_unlock(&dev->struct_mutex); /* We need working interrupts for modeset enabling ... */ drm_irq_install(dev); intel_modeset_init_hw(dev); drm_modeset_lock_all(dev); drm_mode_config_reset(dev); intel_modeset_setup_hw_state(dev, true); drm_modeset_unlock_all(dev); /* * ... but also need to make sure that hotplug processing * doesn't cause havoc. Like in the driver load code we don't * bother with the tiny race here where we might loose hotplug * notifications. * */ intel_hpd_init(dev); dev_priv->enable_hotplug_processing = true; /* Config may have changed between suspend and resume */ intel_resume_hotplug(dev); } intel_opregion_init(dev); /* * The console lock can be pretty contented on resume due * to all the printk activity. Try to keep it out of the hot * path of resume if possible. */ if (console_trylock()) { intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING); console_unlock(); } else { schedule_work(&dev_priv->console_resume_work); } /* Undo what we did at i915_drm_freeze so the refcount goes back to the * expected level. */ hsw_enable_package_c8(dev_priv); mutex_lock(&dev_priv->modeset_restore_lock); dev_priv->modeset_restore = MODESET_DONE; mutex_unlock(&dev_priv->modeset_restore_lock); intel_runtime_pm_put(dev_priv); return error; }
static int intelfb_create(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes) { struct intel_fbdev *ifbdev = container_of(helper, struct intel_fbdev, helper); struct intel_framebuffer *intel_fb = ifbdev->fb; struct drm_device *dev = helper->dev; struct drm_i915_private *dev_priv = to_i915(dev); struct pci_dev *pdev = dev_priv->drm.pdev; struct i915_ggtt *ggtt = &dev_priv->ggtt; struct fb_info *info; struct drm_framebuffer *fb; struct i915_vma *vma; unsigned long flags = 0; bool prealloc = false; void __iomem *vaddr; int ret; if (intel_fb && (sizes->fb_width > intel_fb->base.width || sizes->fb_height > intel_fb->base.height)) { DRM_DEBUG_KMS("BIOS fb too small (%dx%d), we require (%dx%d)," " releasing it\n", intel_fb->base.width, intel_fb->base.height, sizes->fb_width, sizes->fb_height); drm_framebuffer_put(&intel_fb->base); intel_fb = ifbdev->fb = NULL; } if (!intel_fb || WARN_ON(!intel_fb_obj(&intel_fb->base))) { DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n"); ret = intelfb_alloc(helper, sizes); if (ret) return ret; intel_fb = ifbdev->fb; } else { DRM_DEBUG_KMS("re-using BIOS fb\n"); prealloc = true; sizes->fb_width = intel_fb->base.width; sizes->fb_height = intel_fb->base.height; } mutex_lock(&dev->struct_mutex); intel_runtime_pm_get(dev_priv); /* Pin the GGTT vma for our access via info->screen_base. * This also validates that any existing fb inherited from the * BIOS is suitable for own access. */ vma = intel_pin_and_fence_fb_obj(&ifbdev->fb->base, DRM_MODE_ROTATE_0, false, &flags); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto out_unlock; } fb = &ifbdev->fb->base; intel_fb_obj_flush(intel_fb_obj(fb), ORIGIN_DIRTYFB); info = drm_fb_helper_alloc_fbi(helper); if (IS_ERR(info)) { DRM_ERROR("Failed to allocate fb_info\n"); ret = PTR_ERR(info); goto out_unpin; } info->par = helper; ifbdev->helper.fb = fb; strcpy(info->fix.id, "inteldrmfb"); info->fbops = &intelfb_ops; /* setup aperture base/size for vesafb takeover */ info->apertures->ranges[0].base = dev->mode_config.fb_base; info->apertures->ranges[0].size = ggtt->mappable_end; info->fix.smem_start = dev->mode_config.fb_base + i915_ggtt_offset(vma); info->fix.smem_len = vma->node.size; vaddr = i915_vma_pin_iomap(vma); if (IS_ERR(vaddr)) { DRM_ERROR("Failed to remap framebuffer into virtual memory\n"); ret = PTR_ERR(vaddr); goto out_unpin; } info->screen_base = vaddr; info->screen_size = vma->node.size; /* This driver doesn't need a VT switch to restore the mode on resume */ info->skip_vt_switch = true; drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth); drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height); /* If the object is shmemfs backed, it will have given us zeroed pages. * If the object is stolen however, it will be full of whatever * garbage was left in there. */ if (intel_fb_obj(fb)->stolen && !prealloc) memset_io(info->screen_base, 0, info->screen_size); /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x\n", fb->width, fb->height, i915_ggtt_offset(vma)); ifbdev->vma = vma; ifbdev->vma_flags = flags; intel_runtime_pm_put(dev_priv); mutex_unlock(&dev->struct_mutex); vga_switcheroo_client_fb_set(pdev, info); return 0; out_unpin: intel_unpin_fb_vma(vma, flags); out_unlock: intel_runtime_pm_put(dev_priv); mutex_unlock(&dev->struct_mutex); return ret; }
static void finish_csr_load(const struct firmware *fw, void *context) { struct drm_i915_private *dev_priv = context; struct drm_device *dev = dev_priv->dev; struct intel_css_header *css_header; struct intel_package_header *package_header; struct intel_dmc_header *dmc_header; struct intel_csr *csr = &dev_priv->csr; char stepping = intel_get_stepping(dev); char substepping = intel_get_substepping(dev); uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes; uint32_t i; __be32 *dmc_payload; bool fw_loaded = false; if (!fw) { i915_firmware_load_error_print(csr->fw_path, 0); goto out; } if ((stepping == -ENODATA) || (substepping == -ENODATA)) { DRM_ERROR("Unknown stepping info, firmware loading failed\n"); goto out; } /* Extract CSS Header information*/ css_header = (struct intel_css_header *)fw->data; if (sizeof(struct intel_css_header) != (css_header->header_len * 4)) { DRM_ERROR("Firmware has wrong CSS header length %u bytes\n", (css_header->header_len * 4)); goto out; } readcount += sizeof(struct intel_css_header); /* Extract Package Header information*/ package_header = (struct intel_package_header *) &fw->data[readcount]; if (sizeof(struct intel_package_header) != (package_header->header_len * 4)) { DRM_ERROR("Firmware has wrong package header length %u bytes\n", (package_header->header_len * 4)); goto out; } readcount += sizeof(struct intel_package_header); /* Search for dmc_offset to find firware binary. */ for (i = 0; i < package_header->num_entries; i++) { if (package_header->fw_info[i].substepping == '*' && stepping == package_header->fw_info[i].stepping) { dmc_offset = package_header->fw_info[i].offset; break; } else if (stepping == package_header->fw_info[i].stepping && substepping == package_header->fw_info[i].substepping) { dmc_offset = package_header->fw_info[i].offset; break; } else if (package_header->fw_info[i].stepping == '*' && package_header->fw_info[i].substepping == '*') dmc_offset = package_header->fw_info[i].offset; } if (dmc_offset == CSR_DEFAULT_FW_OFFSET) { DRM_ERROR("Firmware not supported for %c stepping\n", stepping); goto out; } readcount += dmc_offset; /* Extract dmc_header information. */ dmc_header = (struct intel_dmc_header *)&fw->data[readcount]; if (sizeof(struct intel_dmc_header) != (dmc_header->header_len)) { DRM_ERROR("Firmware has wrong dmc header length %u bytes\n", (dmc_header->header_len)); goto out; } readcount += sizeof(struct intel_dmc_header); /* Cache the dmc header info. */ if (dmc_header->mmio_count > ARRAY_SIZE(csr->mmioaddr)) { DRM_ERROR("Firmware has wrong mmio count %u\n", dmc_header->mmio_count); goto out; } csr->mmio_count = dmc_header->mmio_count; for (i = 0; i < dmc_header->mmio_count; i++) { if (dmc_header->mmioaddr[i] < CSR_MMIO_START_RANGE || dmc_header->mmioaddr[i] > CSR_MMIO_END_RANGE) { DRM_ERROR(" Firmware has wrong mmio address 0x%x\n", dmc_header->mmioaddr[i]); goto out; } csr->mmioaddr[i] = dmc_header->mmioaddr[i]; csr->mmiodata[i] = dmc_header->mmiodata[i]; } /* fw_size is in dwords, so multiplied by 4 to convert into bytes. */ nbytes = dmc_header->fw_size * 4; if (nbytes > CSR_MAX_FW_SIZE) { DRM_ERROR("CSR firmware too big (%u) bytes\n", nbytes); goto out; } csr->dmc_fw_size = dmc_header->fw_size; csr->dmc_payload = kmalloc(nbytes, GFP_KERNEL); if (!csr->dmc_payload) { DRM_ERROR("Memory allocation failed for dmc payload\n"); goto out; } dmc_payload = csr->dmc_payload; for (i = 0; i < dmc_header->fw_size; i++) { uint32_t *tmp = (u32 *)&fw->data[readcount + i * 4]; /* * The firmware payload is an array of 32 bit words stored in * little-endian format in the firmware image and programmed * as 32 bit big-endian format to memory. */ dmc_payload[i] = cpu_to_be32(*tmp); } /* load csr program during system boot, as needed for DC states */ intel_csr_load_program(dev); fw_loaded = true; out: if (fw_loaded) intel_runtime_pm_put(dev_priv); else intel_csr_load_status_set(dev_priv, FW_FAILED); release_firmware(fw); }