int i915_save_state(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int i; pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); /* Hardware status page */ dev_priv->saveHWS = I915_READ(HWS_PGA); i915_save_display(dev); /* Interrupt state */ if (IS_IRONLAKE(dev)) { dev_priv->saveDEIER = I915_READ(DEIER); dev_priv->saveDEIMR = I915_READ(DEIMR); dev_priv->saveGTIER = I915_READ(GTIER); dev_priv->saveGTIMR = I915_READ(GTIMR); dev_priv->saveFDI_RXA_IMR = I915_READ(FDI_RXA_IMR); dev_priv->saveFDI_RXB_IMR = I915_READ(FDI_RXB_IMR); dev_priv->saveMCHBAR_RENDER_STANDBY = I915_READ(MCHBAR_RENDER_STANDBY); } else { dev_priv->saveIER = I915_READ(IER); dev_priv->saveIMR = I915_READ(IMR); } if (IS_IRONLAKE_M(dev)) ironlake_disable_drps(dev); /* Cache mode state */ dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); /* Memory Arbitration state */ dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE); /* Scratch space */ for (i = 0; i < 16; i++) { dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2)); dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2)); } for (i = 0; i < 3; i++) dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2)); /* Fences */ if (IS_I965G(dev)) { for (i = 0; i < 16; i++) dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); } else { for (i = 0; i < 8; i++) dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) for (i = 0; i < 8; i++) dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); } return 0; }
int i915_save_state(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int i; pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); mutex_lock(&dev->struct_mutex); dev_priv->saveHWS = I915_READ(HWS_PGA); i915_save_display(dev); if (HAS_PCH_SPLIT(dev)) { dev_priv->saveDEIER = I915_READ(DEIER); dev_priv->saveDEIMR = I915_READ(DEIMR); dev_priv->saveGTIER = I915_READ(GTIER); dev_priv->saveGTIMR = I915_READ(GTIMR); dev_priv->saveFDI_RXA_IMR = I915_READ(_FDI_RXA_IMR); dev_priv->saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR); dev_priv->saveMCHBAR_RENDER_STANDBY = I915_READ(RSTDBYCTL); dev_priv->savePCH_PORT_HOTPLUG = I915_READ(PCH_PORT_HOTPLUG); } else { dev_priv->saveIER = I915_READ(IER); dev_priv->saveIMR = I915_READ(IMR); } if (IS_IRONLAKE_M(dev)) ironlake_disable_drps(dev); if (INTEL_INFO(dev)->gen >= 6) gen6_disable_rps(dev); dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE); for (i = 0; i < 16; i++) { dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2)); dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2)); } for (i = 0; i < 3; i++) dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2)); mutex_unlock(&dev->struct_mutex); return 0; }
int i915_save_state(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int i; mutex_lock(&dev->struct_mutex); i915_save_display(dev); if (!drm_core_check_feature(dev, DRIVER_MODESET)) { /* Interrupt state */ if (HAS_PCH_SPLIT(dev)) { dev_priv->regfile.saveDEIER = I915_READ(DEIER); dev_priv->regfile.saveDEIMR = I915_READ(DEIMR); dev_priv->regfile.saveGTIER = I915_READ(GTIER); dev_priv->regfile.saveGTIMR = I915_READ(GTIMR); dev_priv->regfile.saveFDI_RXA_IMR = I915_READ(_FDI_RXA_IMR); dev_priv->regfile.saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR); dev_priv->regfile.saveMCHBAR_RENDER_STANDBY = I915_READ(RSTDBYCTL); dev_priv->regfile.savePCH_PORT_HOTPLUG = I915_READ(PCH_PORT_HOTPLUG); } else { dev_priv->regfile.saveIER = I915_READ(IER); dev_priv->regfile.saveIMR = I915_READ(IMR); } } if (IS_GEN4(dev)) pci_read_config_word(dev->pdev, GCDGMBUS, &dev_priv->regfile.saveGCDGMBUS); /* Cache mode state */ if (INTEL_INFO(dev)->gen < 7) dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); /* Memory Arbitration state */ dev_priv->regfile.saveMI_ARB_STATE = I915_READ(MI_ARB_STATE); /* Scratch space */ for (i = 0; i < 16; i++) { dev_priv->regfile.saveSWF0[i] = I915_READ(SWF00 + (i << 2)); dev_priv->regfile.saveSWF1[i] = I915_READ(SWF10 + (i << 2)); } for (i = 0; i < 3; i++) dev_priv->regfile.saveSWF2[i] = I915_READ(SWF30 + (i << 2)); mutex_unlock(&dev->struct_mutex); return 0; }
int i915_save_state(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int i; dev_priv->saveLBB = pci_read_config(dev->dev, LBB, 1); /* Hardware status page */ dev_priv->saveHWS = I915_READ(HWS_PGA); i915_save_display(dev); /* Interrupt state */ if (HAS_PCH_SPLIT(dev)) { dev_priv->saveDEIER = I915_READ(DEIER); dev_priv->saveDEIMR = I915_READ(DEIMR); dev_priv->saveGTIER = I915_READ(GTIER); dev_priv->saveGTIMR = I915_READ(GTIMR); dev_priv->saveFDI_RXA_IMR = I915_READ(_FDI_RXA_IMR); dev_priv->saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR); dev_priv->saveMCHBAR_RENDER_STANDBY = I915_READ(RSTDBYCTL); dev_priv->savePCH_PORT_HOTPLUG = I915_READ(PCH_PORT_HOTPLUG); } else { dev_priv->saveIER = I915_READ(IER); dev_priv->saveIMR = I915_READ(IMR); } if (IS_IRONLAKE_M(dev)) ironlake_disable_drps(dev); if (INTEL_INFO(dev)->gen >= 6) gen6_disable_rps(dev); /* Cache mode state */ dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); /* Memory Arbitration state */ dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE); /* Scratch space */ for (i = 0; i < 16; i++) { dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2)); dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2)); } for (i = 0; i < 3; i++) dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2)); return 0; }
int i915_save_state(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int i; mutex_lock(&dev->struct_mutex); i915_save_display(dev); if (IS_GEN4(dev)) pci_read_config_word(dev->pdev, GCDGMBUS, &dev_priv->regfile.saveGCDGMBUS); /* Cache mode state */ if (INTEL_INFO(dev)->gen < 7) dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); /* Memory Arbitration state */ dev_priv->regfile.saveMI_ARB_STATE = I915_READ(MI_ARB_STATE); /* Scratch space */ if (IS_GEN2(dev_priv) && IS_MOBILE(dev_priv)) { for (i = 0; i < 7; i++) { dev_priv->regfile.saveSWF0[i] = I915_READ(SWF0(i)); dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i)); } for (i = 0; i < 3; i++) dev_priv->regfile.saveSWF3[i] = I915_READ(SWF3(i)); } else if (IS_GEN2(dev_priv)) { for (i = 0; i < 7; i++) dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i)); } else if (HAS_GMCH_DISPLAY(dev_priv)) { for (i = 0; i < 16; i++) { dev_priv->regfile.saveSWF0[i] = I915_READ(SWF0(i)); dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i)); } for (i = 0; i < 3; i++) dev_priv->regfile.saveSWF3[i] = I915_READ(SWF3(i)); } mutex_unlock(&dev->struct_mutex); return 0; }
/** * i965_reset - reset chip after a hang * @dev: drm device to reset * @flags: reset domains * * Reset the chip. Useful if a hang is detected. Returns zero on successful * reset or otherwise an error code. * * Procedure is fairly simple: * - reset the chip using the reset reg * - re-init context state * - re-init hardware status page * - re-init ring buffer * - re-init interrupt state * - re-init display */ int i965_reset(struct drm_device *dev, u8 flags) { drm_i915_private_t *dev_priv = dev->dev_private; unsigned long timeout; u8 gdrst; /* * We really should only reset the display subsystem if we actually * need to */ bool need_display = true; mutex_lock(&dev->struct_mutex); /* * Clear request list */ i915_gem_retire_requests(dev); if (need_display) i915_save_display(dev); if (IS_I965G(dev) || IS_G4X(dev)) { /* * Set the domains we want to reset, then the reset bit (bit 0). * Clear the reset bit after a while and wait for hardware status * bit (bit 1) to be set */ pci_read_config_byte(dev->pdev, GDRST, &gdrst); pci_write_config_byte(dev->pdev, GDRST, gdrst | flags | ((flags == GDRST_FULL) ? 0x1 : 0x0)); udelay(50); pci_write_config_byte(dev->pdev, GDRST, gdrst & 0xfe); /* ...we don't want to loop forever though, 500ms should be plenty */ timeout = jiffies + msecs_to_jiffies(500); do { udelay(100); pci_read_config_byte(dev->pdev, GDRST, &gdrst); } while ((gdrst & 0x1) && time_after(timeout, jiffies)); if (gdrst & 0x1) { WARN(true, "i915: Failed to reset chip\n"); mutex_unlock(&dev->struct_mutex); return -EIO; } } else { DRM_ERROR("Error occurred. Don't know how to reset this chip.\n"); mutex_unlock(&dev->struct_mutex); return -ENODEV; } /* Ok, now get things going again... */ /* * Everything depends on having the GTT running, so we need to start * there. Fortunately we don't need to do this unless we reset the * chip at a PCI level. * * Next we need to restore the context, but we don't use those * yet either... * * Ring buffer needs to be re-initialized in the KMS case, or if X * was running at the time of the reset (i.e. we weren't VT * switched away). */ if (drm_core_check_feature(dev, DRIVER_MODESET) || !dev_priv->mm.suspended) { drm_i915_ring_buffer_t *ring = &dev_priv->ring; struct drm_gem_object *obj = ring->ring_obj; struct drm_i915_gem_object *obj_priv = obj->driver_private; dev_priv->mm.suspended = 0; /* Stop the ring if it's running. */ I915_WRITE(PRB0_CTL, 0); I915_WRITE(PRB0_TAIL, 0); I915_WRITE(PRB0_HEAD, 0); /* Initialize the ring. */ I915_WRITE(PRB0_START, obj_priv->gtt_offset); I915_WRITE(PRB0_CTL, ((obj->size - 4096) & RING_NR_PAGES) | RING_NO_REPORT | RING_VALID); if (!drm_core_check_feature(dev, DRIVER_MODESET)) i915_kernel_lost_context(dev); else { ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; ring->space = ring->head - (ring->tail + 8); if (ring->space < 0) ring->space += ring->Size; } mutex_unlock(&dev->struct_mutex); drm_irq_uninstall(dev); drm_irq_install(dev); mutex_lock(&dev->struct_mutex); } /* * Display needs restore too... */ if (need_display) i915_restore_display(dev); mutex_unlock(&dev->struct_mutex); return 0; }
int i965_reset(struct drm_device *dev, u8 flags) { drm_i915_private_t *dev_priv = dev->dev_private; unsigned long timeout; u8 gdrst; /* * We really should only reset the display subsystem if we actually * need to */ bool need_display = true; mutex_lock(&dev->struct_mutex); /* * Clear request list */ i915_gem_retire_requests(dev, &dev_priv->render_ring); if (need_display) i915_save_display(dev); if (IS_I965G(dev) || IS_G4X(dev)) { /* * Set the domains we want to reset, then the reset bit (bit 0). * Clear the reset bit after a while and wait for hardware status * bit (bit 1) to be set */ pci_read_config_byte(dev->pdev, GDRST, &gdrst); pci_write_config_byte(dev->pdev, GDRST, gdrst | flags | ((flags == GDRST_FULL) ? 0x1 : 0x0)); udelay(50); pci_write_config_byte(dev->pdev, GDRST, gdrst & 0xfe); /* ...we don't want to loop forever though, 500ms should be plenty */ timeout = jiffies + msecs_to_jiffies(500); do { udelay(100); pci_read_config_byte(dev->pdev, GDRST, &gdrst); } while ((gdrst & 0x1) && time_after(timeout, jiffies)); if (gdrst & 0x1) { WARN(true, "i915: Failed to reset chip\n"); mutex_unlock(&dev->struct_mutex); return -EIO; } } else { DRM_ERROR("Error occurred. Don't know how to reset this chip.\n"); mutex_unlock(&dev->struct_mutex); return -ENODEV; } /* Ok, now get things going again... */ /* * Everything depends on having the GTT running, so we need to start * there. Fortunately we don't need to do this unless we reset the * chip at a PCI level. * * Next we need to restore the context, but we don't use those * yet either... * * Ring buffer needs to be re-initialized in the KMS case, or if X * was running at the time of the reset (i.e. we weren't VT * switched away). */ if (drm_core_check_feature(dev, DRIVER_MODESET) || !dev_priv->mm.suspended) { struct intel_ring_buffer *ring = &dev_priv->render_ring; dev_priv->mm.suspended = 0; ring->init(dev, ring); mutex_unlock(&dev->struct_mutex); drm_irq_uninstall(dev); drm_irq_install(dev); mutex_lock(&dev->struct_mutex); } /* * Display needs restore too... */ if (need_display) i915_restore_display(dev); mutex_unlock(&dev->struct_mutex); return 0; }