int i915_restore_state(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int i; mutex_lock(&dev->struct_mutex); i915_gem_restore_fences(dev); if (IS_GEN4(dev)) pci_write_config_word(dev->pdev, GCDGMBUS, dev_priv->regfile.saveGCDGMBUS); i915_restore_display(dev); if (!drm_core_check_feature(dev, DRIVER_MODESET)) { /* Interrupt state */ if (HAS_PCH_SPLIT(dev)) { I915_WRITE(DEIER, dev_priv->regfile.saveDEIER); I915_WRITE(DEIMR, dev_priv->regfile.saveDEIMR); I915_WRITE(GTIER, dev_priv->regfile.saveGTIER); I915_WRITE(GTIMR, dev_priv->regfile.saveGTIMR); I915_WRITE(_FDI_RXA_IMR, dev_priv->regfile.saveFDI_RXA_IMR); I915_WRITE(_FDI_RXB_IMR, dev_priv->regfile.saveFDI_RXB_IMR); I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->regfile.savePCH_PORT_HOTPLUG); I915_WRITE(RSTDBYCTL, dev_priv->regfile.saveMCHBAR_RENDER_STANDBY); } else { I915_WRITE(IER, dev_priv->regfile.saveIER); I915_WRITE(IMR, dev_priv->regfile.saveIMR); } } /* Cache mode state */ if (INTEL_INFO(dev)->gen < 7) I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 | 0xffff0000); /* Memory arbitration state */ I915_WRITE(MI_ARB_STATE, dev_priv->regfile.saveMI_ARB_STATE | 0xffff0000); for (i = 0; i < 16; i++) { I915_WRITE(SWF00 + (i << 2), dev_priv->regfile.saveSWF0[i]); I915_WRITE(SWF10 + (i << 2), dev_priv->regfile.saveSWF1[i]); } for (i = 0; i < 3; i++) I915_WRITE(SWF30 + (i << 2), dev_priv->regfile.saveSWF2[i]); mutex_unlock(&dev->struct_mutex); intel_i2c_reset(dev); return 0; }
static int is_backlight_combination_mode(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; if (IS_GEN4(dev)) return I915_READ(BLC_PWM_CTL2) & BLM_COMBINATION_MODE; if (IS_GEN2(dev)) return I915_READ(BLC_PWM_CTL) & BLM_LEGACY_MODE; return 0; }
int i915_save_state(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int i; mutex_lock(&dev->struct_mutex); i915_save_display(dev); if (!drm_core_check_feature(dev, DRIVER_MODESET)) { /* Interrupt state */ if (HAS_PCH_SPLIT(dev)) { dev_priv->regfile.saveDEIER = I915_READ(DEIER); dev_priv->regfile.saveDEIMR = I915_READ(DEIMR); dev_priv->regfile.saveGTIER = I915_READ(GTIER); dev_priv->regfile.saveGTIMR = I915_READ(GTIMR); dev_priv->regfile.saveFDI_RXA_IMR = I915_READ(_FDI_RXA_IMR); dev_priv->regfile.saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR); dev_priv->regfile.saveMCHBAR_RENDER_STANDBY = I915_READ(RSTDBYCTL); dev_priv->regfile.savePCH_PORT_HOTPLUG = I915_READ(PCH_PORT_HOTPLUG); } else { dev_priv->regfile.saveIER = I915_READ(IER); dev_priv->regfile.saveIMR = I915_READ(IMR); } } if (IS_GEN4(dev)) pci_read_config_word(dev->pdev, GCDGMBUS, &dev_priv->regfile.saveGCDGMBUS); /* Cache mode state */ if (INTEL_INFO(dev)->gen < 7) dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); /* Memory Arbitration state */ dev_priv->regfile.saveMI_ARB_STATE = I915_READ(MI_ARB_STATE); /* Scratch space */ for (i = 0; i < 16; i++) { dev_priv->regfile.saveSWF0[i] = I915_READ(SWF00 + (i << 2)); dev_priv->regfile.saveSWF1[i] = I915_READ(SWF10 + (i << 2)); } for (i = 0; i < 3; i++) dev_priv->regfile.saveSWF2[i] = I915_READ(SWF30 + (i << 2)); mutex_unlock(&dev->struct_mutex); return 0; }
static void i8xx_fbc_enable(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; struct drm_framebuffer *fb = crtc->base.primary->fb; struct drm_i915_gem_object *obj = intel_fb_obj(fb); int cfb_pitch; int i; u32 fbc_ctl; dev_priv->fbc.enabled = true; /* Note: fbc.threshold == 1 for i8xx */ cfb_pitch = dev_priv->fbc.uncompressed_size / FBC_LL_SIZE; if (fb->pitches[0] < cfb_pitch) cfb_pitch = fb->pitches[0]; /* FBC_CTL wants 32B or 64B units */ if (IS_GEN2(dev_priv)) cfb_pitch = (cfb_pitch / 32) - 1; else cfb_pitch = (cfb_pitch / 64) - 1; /* Clear old tags */ for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++) I915_WRITE(FBC_TAG(i), 0); if (IS_GEN4(dev_priv)) { u32 fbc_ctl2; /* Set it up... */ fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE; fbc_ctl2 |= FBC_CTL_PLANE(crtc->plane); I915_WRITE(FBC_CONTROL2, fbc_ctl2); I915_WRITE(FBC_FENCE_OFF, get_crtc_fence_y_offset(crtc)); } /* enable it... */ fbc_ctl = I915_READ(FBC_CONTROL); fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT; fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC; if (IS_I945GM(dev_priv)) fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; fbc_ctl |= obj->fence_reg; I915_WRITE(FBC_CONTROL, fbc_ctl); DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n", cfb_pitch, crtc->base.y, plane_name(crtc->plane)); }
int i915_restore_state(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int i; mutex_lock(&dev->struct_mutex); i915_gem_restore_fences(dev); if (IS_GEN4(dev)) pci_write_config_word(dev->pdev, GCDGMBUS, dev_priv->regfile.saveGCDGMBUS); i915_restore_display(dev); /* Cache mode state */ if (INTEL_INFO(dev)->gen < 7) I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 | 0xffff0000); /* Memory arbitration state */ I915_WRITE(MI_ARB_STATE, dev_priv->regfile.saveMI_ARB_STATE | 0xffff0000); /* Scratch space */ if (IS_GEN2(dev_priv) && IS_MOBILE(dev_priv)) { for (i = 0; i < 7; i++) { I915_WRITE(SWF0(i), dev_priv->regfile.saveSWF0[i]); I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]); } for (i = 0; i < 3; i++) I915_WRITE(SWF3(i), dev_priv->regfile.saveSWF3[i]); } else if (IS_GEN2(dev_priv)) { for (i = 0; i < 7; i++) I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]); } else if (HAS_GMCH_DISPLAY(dev_priv)) { for (i = 0; i < 16; i++) { I915_WRITE(SWF0(i), dev_priv->regfile.saveSWF0[i]); I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]); } for (i = 0; i < 3; i++) I915_WRITE(SWF3(i), dev_priv->regfile.saveSWF3[i]); } mutex_unlock(&dev->struct_mutex); intel_i2c_reset(dev); return 0; }
int i915_save_state(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int i; mutex_lock(&dev->struct_mutex); i915_save_display(dev); if (IS_GEN4(dev)) pci_read_config_word(dev->pdev, GCDGMBUS, &dev_priv->regfile.saveGCDGMBUS); /* Cache mode state */ if (INTEL_INFO(dev)->gen < 7) dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); /* Memory Arbitration state */ dev_priv->regfile.saveMI_ARB_STATE = I915_READ(MI_ARB_STATE); /* Scratch space */ if (IS_GEN2(dev_priv) && IS_MOBILE(dev_priv)) { for (i = 0; i < 7; i++) { dev_priv->regfile.saveSWF0[i] = I915_READ(SWF0(i)); dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i)); } for (i = 0; i < 3; i++) dev_priv->regfile.saveSWF3[i] = I915_READ(SWF3(i)); } else if (IS_GEN2(dev_priv)) { for (i = 0; i < 7; i++) dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i)); } else if (HAS_GMCH_DISPLAY(dev_priv)) { for (i = 0; i < 16; i++) { dev_priv->regfile.saveSWF0[i] = I915_READ(SWF0(i)); dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i)); } for (i = 0; i < 3; i++) dev_priv->regfile.saveSWF3[i] = I915_READ(SWF3(i)); } mutex_unlock(&dev->struct_mutex); return 0; }
static void i8xx_fbc_activate(struct drm_i915_private *dev_priv) { struct intel_fbc_reg_params *params = &dev_priv->fbc.params; int cfb_pitch; int i; u32 fbc_ctl; dev_priv->fbc.active = true; /* Note: fbc.threshold == 1 for i8xx */ cfb_pitch = params->cfb_size / FBC_LL_SIZE; if (params->fb.stride < cfb_pitch) cfb_pitch = params->fb.stride; /* FBC_CTL wants 32B or 64B units */ if (IS_GEN2(dev_priv)) cfb_pitch = (cfb_pitch / 32) - 1; else cfb_pitch = (cfb_pitch / 64) - 1; /* Clear old tags */ for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++) I915_WRITE(FBC_TAG(i), 0); if (IS_GEN4(dev_priv)) { u32 fbc_ctl2; /* Set it up... */ fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE; fbc_ctl2 |= FBC_CTL_PLANE(params->crtc.plane); I915_WRITE(FBC_CONTROL2, fbc_ctl2); I915_WRITE(FBC_FENCE_OFF, params->crtc.fence_y_offset); } /* enable it... */ fbc_ctl = I915_READ(FBC_CONTROL); fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT; fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC; if (IS_I945GM(dev_priv)) fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; fbc_ctl |= params->fb.fence_reg; I915_WRITE(FBC_CONTROL, fbc_ctl); }
static bool stride_is_valid(struct drm_i915_private *dev_priv, unsigned int stride) { /* These should have been caught earlier. */ WARN_ON(stride < 512); WARN_ON((stride & (64 - 1)) != 0); /* Below are the additional FBC restrictions. */ if (IS_GEN2(dev_priv) || IS_GEN3(dev_priv)) return stride == 4096 || stride == 8192; if (IS_GEN4(dev_priv) && !IS_G4X(dev_priv) && stride < 2048) return false; if (stride > 16384) return false; return true; }
Bool intel_check_display_stride(ScrnInfoPtr scrn, int stride, Bool tiling) { intel_screen_private *intel = intel_get_screen_private(scrn); int limit; /* 8xx spec has always 8K limit, but tests show larger limit in non-tiling mode, which makes large monitor work. */ if (tiling) { if (IS_GEN2(intel)) limit = KB(8); else if (IS_GEN3(intel)) limit = KB(8); else if (IS_GEN4(intel)) limit = KB(16); else limit = KB(32); } else limit = KB(32); return stride <= limit; }
/** * Detects bit 6 swizzling of address lookup between IGD access and CPU * access through main memory. */ void i915_gem_detect_bit_6_swizzle(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; if (INTEL_INFO(dev)->gen >= 8 || IS_VALLEYVIEW(dev)) { /* * On BDW+, swizzling is not used. We leave the CPU memory * controller in charge of optimizing memory accesses without * the extra address manipulation GPU side. * * VLV and CHV don't have GPU swizzling. */ swizzle_x = I915_BIT_6_SWIZZLE_NONE; swizzle_y = I915_BIT_6_SWIZZLE_NONE; } else if (INTEL_INFO(dev)->gen >= 6) { if (dev_priv->preserve_bios_swizzle) { if (I915_READ(DISP_ARB_CTL) & DISP_TILE_SURFACE_SWIZZLING) { swizzle_x = I915_BIT_6_SWIZZLE_9_10; swizzle_y = I915_BIT_6_SWIZZLE_9; } else { swizzle_x = I915_BIT_6_SWIZZLE_NONE; swizzle_y = I915_BIT_6_SWIZZLE_NONE; } } else { uint32_t dimm_c0, dimm_c1; dimm_c0 = I915_READ(MAD_DIMM_C0); dimm_c1 = I915_READ(MAD_DIMM_C1); dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK; dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK; /* Enable swizzling when the channels are populated * with identically sized dimms. We don't need to check * the 3rd channel because no cpu with gpu attached * ships in that configuration. Also, swizzling only * makes sense for 2 channels anyway. */ if (dimm_c0 == dimm_c1) { swizzle_x = I915_BIT_6_SWIZZLE_9_10; swizzle_y = I915_BIT_6_SWIZZLE_9; } else { swizzle_x = I915_BIT_6_SWIZZLE_NONE; swizzle_y = I915_BIT_6_SWIZZLE_NONE; } } } else if (IS_GEN5(dev)) { /* On Ironlake whatever DRAM config, GPU always do * same swizzling setup. */ swizzle_x = I915_BIT_6_SWIZZLE_9_10; swizzle_y = I915_BIT_6_SWIZZLE_9; } else if (IS_GEN2(dev)) { /* As far as we know, the 865 doesn't have these bit 6 * swizzling issues. */ swizzle_x = I915_BIT_6_SWIZZLE_NONE; swizzle_y = I915_BIT_6_SWIZZLE_NONE; } else if (IS_MOBILE(dev) || (IS_GEN3(dev) && !IS_G33(dev))) { uint32_t dcc; /* On 9xx chipsets, channel interleave by the CPU is * determined by DCC. For single-channel, neither the CPU * nor the GPU do swizzling. For dual channel interleaved, * the GPU's interleave is bit 9 and 10 for X tiled, and bit * 9 for Y tiled. The CPU's interleave is independent, and * can be based on either bit 11 (haven't seen this yet) or * bit 17 (common). */ dcc = I915_READ(DCC); switch (dcc & DCC_ADDRESSING_MODE_MASK) { case DCC_ADDRESSING_MODE_SINGLE_CHANNEL: case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC: swizzle_x = I915_BIT_6_SWIZZLE_NONE; swizzle_y = I915_BIT_6_SWIZZLE_NONE; break; case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED: if (dcc & DCC_CHANNEL_XOR_DISABLE) { /* This is the base swizzling by the GPU for * tiled buffers. */ swizzle_x = I915_BIT_6_SWIZZLE_9_10; swizzle_y = I915_BIT_6_SWIZZLE_9; } else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) { /* Bit 11 swizzling by the CPU in addition. */ swizzle_x = I915_BIT_6_SWIZZLE_9_10_11; swizzle_y = I915_BIT_6_SWIZZLE_9_11; } else { /* Bit 17 swizzling by the CPU in addition. */ swizzle_x = I915_BIT_6_SWIZZLE_9_10_17; swizzle_y = I915_BIT_6_SWIZZLE_9_17; } break; } /* check for L-shaped memory aka modified enhanced addressing */ if (IS_GEN4(dev)) { uint32_t ddc2 = I915_READ(DCC2); if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE)) dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES; } if (dcc == 0xffffffff) { DRM_ERROR("Couldn't read from MCHBAR. " "Disabling tiling.\n"); swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; } } else { /* The 965, G33, and newer, have a very flexible memory * configuration. It will enable dual-channel mode * (interleaving) on as much memory as it can, and the GPU * will additionally sometimes enable different bit 6 * swizzling for tiled objects from the CPU. * * Here's what I found on the G965: * slot fill memory size swizzling * 0A 0B 1A 1B 1-ch 2-ch * 512 0 0 0 512 0 O * 512 0 512 0 16 1008 X * 512 0 0 512 16 1008 X * 0 512 0 512 16 1008 X * 1024 1024 1024 0 2048 1024 O * * We could probably detect this based on either the DRB * matching, which was the case for the swizzling required in * the table above, or from the 1-ch value being less than * the minimum size of a rank. */ if (I915_READ16(C0DRB3) != I915_READ16(C1DRB3)) { swizzle_x = I915_BIT_6_SWIZZLE_NONE; swizzle_y = I915_BIT_6_SWIZZLE_NONE; } else { swizzle_x = I915_BIT_6_SWIZZLE_9_10; swizzle_y = I915_BIT_6_SWIZZLE_9; } } dev_priv->mm.bit_6_swizzle_x = swizzle_x; dev_priv->mm.bit_6_swizzle_y = swizzle_y; }
void init_instdone_definitions(uint32_t devid) { if (IS_GEN7(devid)) { init_gen7_instdone(); } else if (IS_GEN6(devid)) { /* Now called INSTDONE_1 in the docs. */ gen6_instdone1_bit(GEN6_MA_3_DONE, "Message Arbiter 3"); gen6_instdone1_bit(GEN6_EU_32_DONE, "EU 32"); gen6_instdone1_bit(GEN6_EU_31_DONE, "EU 31"); gen6_instdone1_bit(GEN6_EU_30_DONE, "EU 30"); gen6_instdone1_bit(GEN6_MA_3_DONE, "Message Arbiter 2"); gen6_instdone1_bit(GEN6_EU_22_DONE, "EU 22"); gen6_instdone1_bit(GEN6_EU_21_DONE, "EU 21"); gen6_instdone1_bit(GEN6_EU_20_DONE, "EU 20"); gen6_instdone1_bit(GEN6_MA_3_DONE, "Message Arbiter 1"); gen6_instdone1_bit(GEN6_EU_12_DONE, "EU 12"); gen6_instdone1_bit(GEN6_EU_11_DONE, "EU 11"); gen6_instdone1_bit(GEN6_EU_10_DONE, "EU 10"); gen6_instdone1_bit(GEN6_MA_3_DONE, "Message Arbiter 0"); gen6_instdone1_bit(GEN6_EU_02_DONE, "EU 02"); gen6_instdone1_bit(GEN6_EU_01_DONE, "EU 01"); gen6_instdone1_bit(GEN6_EU_00_DONE, "EU 00"); gen6_instdone1_bit(GEN6_IC_3_DONE, "IC 3"); gen6_instdone1_bit(GEN6_IC_2_DONE, "IC 2"); gen6_instdone1_bit(GEN6_IC_1_DONE, "IC 1"); gen6_instdone1_bit(GEN6_IC_0_DONE, "IC 0"); gen6_instdone1_bit(GEN6_ISC_10_DONE, "ISC 1/0"); gen6_instdone1_bit(GEN6_ISC_32_DONE, "ISC 3/2"); gen6_instdone1_bit(GEN6_VSC_DONE, "VSC"); gen6_instdone1_bit(GEN6_IEF_DONE, "IEF"); gen6_instdone1_bit(GEN6_VFE_DONE, "VFE"); gen6_instdone1_bit(GEN6_TD_DONE, "TD"); gen6_instdone1_bit(GEN6_TS_DONE, "TS"); gen6_instdone1_bit(GEN6_GW_DONE, "GW"); gen6_instdone1_bit(GEN6_HIZ_DONE, "HIZ"); gen6_instdone1_bit(GEN6_AVS_DONE, "AVS"); /* Now called INSTDONE_2 in the docs. */ gen6_instdone2_bit(GEN6_GAM_DONE, "GAM"); gen6_instdone2_bit(GEN6_CS_DONE, "CS"); gen6_instdone2_bit(GEN6_WMBE_DONE, "WMBE"); gen6_instdone2_bit(GEN6_SVRW_DONE, "SVRW"); gen6_instdone2_bit(GEN6_RCC_DONE, "RCC"); gen6_instdone2_bit(GEN6_SVG_DONE, "SVG"); gen6_instdone2_bit(GEN6_ISC_DONE, "ISC"); gen6_instdone2_bit(GEN6_MT_DONE, "MT"); gen6_instdone2_bit(GEN6_RCPFE_DONE, "RCPFE"); gen6_instdone2_bit(GEN6_RCPBE_DONE, "RCPBE"); gen6_instdone2_bit(GEN6_VDI_DONE, "VDI"); gen6_instdone2_bit(GEN6_RCZ_DONE, "RCZ"); gen6_instdone2_bit(GEN6_DAP_DONE, "DAP"); gen6_instdone2_bit(GEN6_PSD_DONE, "PSD"); gen6_instdone2_bit(GEN6_IZ_DONE, "IZ"); gen6_instdone2_bit(GEN6_WMFE_DONE, "WMFE"); gen6_instdone2_bit(GEN6_SVSM_DONE, "SVSM"); gen6_instdone2_bit(GEN6_QC_DONE, "QC"); gen6_instdone2_bit(GEN6_FL_DONE, "FL"); gen6_instdone2_bit(GEN6_SC_DONE, "SC"); gen6_instdone2_bit(GEN6_DM_DONE, "DM"); gen6_instdone2_bit(GEN6_FT_DONE, "FT"); gen6_instdone2_bit(GEN6_DG_DONE, "DG"); gen6_instdone2_bit(GEN6_SI_DONE, "SI"); gen6_instdone2_bit(GEN6_SO_DONE, "SO"); gen6_instdone2_bit(GEN6_PL_DONE, "PL"); gen6_instdone2_bit(GEN6_VME_DONE, "VME"); gen6_instdone2_bit(GEN6_SF_DONE, "SF"); gen6_instdone2_bit(GEN6_CL_DONE, "CL"); gen6_instdone2_bit(GEN6_GS_DONE, "GS"); gen6_instdone2_bit(GEN6_VS0_DONE, "VS0"); gen6_instdone2_bit(GEN6_VF_DONE, "VF"); } else if (IS_GEN5(devid)) { gen4_instdone_bit(ILK_ROW_0_EU_0_DONE, "Row 0, EU 0"); gen4_instdone_bit(ILK_ROW_0_EU_1_DONE, "Row 0, EU 1"); gen4_instdone_bit(ILK_ROW_0_EU_2_DONE, "Row 0, EU 2"); gen4_instdone_bit(ILK_ROW_0_EU_3_DONE, "Row 0, EU 3"); gen4_instdone_bit(ILK_ROW_1_EU_0_DONE, "Row 1, EU 0"); gen4_instdone_bit(ILK_ROW_1_EU_1_DONE, "Row 1, EU 1"); gen4_instdone_bit(ILK_ROW_1_EU_2_DONE, "Row 1, EU 2"); gen4_instdone_bit(ILK_ROW_1_EU_3_DONE, "Row 1, EU 3"); gen4_instdone_bit(ILK_ROW_2_EU_0_DONE, "Row 2, EU 0"); gen4_instdone_bit(ILK_ROW_2_EU_1_DONE, "Row 2, EU 1"); gen4_instdone_bit(ILK_ROW_2_EU_2_DONE, "Row 2, EU 2"); gen4_instdone_bit(ILK_ROW_2_EU_3_DONE, "Row 2, EU 3"); gen4_instdone_bit(ILK_VCP_DONE, "VCP"); gen4_instdone_bit(ILK_ROW_0_MATH_DONE, "Row 0 math"); gen4_instdone_bit(ILK_ROW_1_MATH_DONE, "Row 1 math"); gen4_instdone_bit(ILK_ROW_2_MATH_DONE, "Row 2 math"); gen4_instdone_bit(ILK_VC1_DONE, "VC1"); gen4_instdone_bit(ILK_ROW_0_MA_DONE, "Row 0 MA"); gen4_instdone_bit(ILK_ROW_1_MA_DONE, "Row 1 MA"); gen4_instdone_bit(ILK_ROW_2_MA_DONE, "Row 2 MA"); gen4_instdone_bit(ILK_ROW_0_ISC_DONE, "Row 0 ISC"); gen4_instdone_bit(ILK_ROW_1_ISC_DONE, "Row 1 ISC"); gen4_instdone_bit(ILK_ROW_2_ISC_DONE, "Row 2 ISC"); gen4_instdone_bit(ILK_VFE_DONE, "VFE"); gen4_instdone_bit(ILK_TD_DONE, "TD"); gen4_instdone_bit(ILK_SVTS_DONE, "SVTS"); gen4_instdone_bit(ILK_TS_DONE, "TS"); gen4_instdone_bit(ILK_GW_DONE, "GW"); gen4_instdone_bit(ILK_AI_DONE, "AI"); gen4_instdone_bit(ILK_AC_DONE, "AC"); gen4_instdone_bit(ILK_AM_DONE, "AM"); init_g4x_instdone1(); } else if (IS_GEN4(devid)) { gen4_instdone_bit(I965_ROW_0_EU_0_DONE, "Row 0, EU 0"); gen4_instdone_bit(I965_ROW_0_EU_1_DONE, "Row 0, EU 1"); gen4_instdone_bit(I965_ROW_0_EU_2_DONE, "Row 0, EU 2"); gen4_instdone_bit(I965_ROW_0_EU_3_DONE, "Row 0, EU 3"); gen4_instdone_bit(I965_ROW_1_EU_0_DONE, "Row 1, EU 0"); gen4_instdone_bit(I965_ROW_1_EU_1_DONE, "Row 1, EU 1"); gen4_instdone_bit(I965_ROW_1_EU_2_DONE, "Row 1, EU 2"); gen4_instdone_bit(I965_ROW_1_EU_3_DONE, "Row 1, EU 3"); gen4_instdone_bit(I965_SF_DONE, "Strips and Fans"); gen4_instdone_bit(I965_SE_DONE, "Setup Engine"); gen4_instdone_bit(I965_WM_DONE, "Windowizer"); gen4_instdone_bit(I965_DISPATCHER_DONE, "Dispatcher"); gen4_instdone_bit(I965_PROJECTION_DONE, "Projection and LOD"); gen4_instdone_bit(I965_DG_DONE, "Dependent address generator"); gen4_instdone_bit(I965_QUAD_CACHE_DONE, "Texture fetch"); gen4_instdone_bit(I965_TEXTURE_FETCH_DONE, "Texture fetch"); gen4_instdone_bit(I965_TEXTURE_DECOMPRESS_DONE, "Texture decompress"); gen4_instdone_bit(I965_SAMPLER_CACHE_DONE, "Sampler cache"); gen4_instdone_bit(I965_FILTER_DONE, "Filtering"); gen4_instdone_bit(I965_BYPASS_DONE, "Bypass FIFO"); gen4_instdone_bit(I965_PS_DONE, "Pixel shader"); gen4_instdone_bit(I965_CC_DONE, "Color calculator"); gen4_instdone_bit(I965_MAP_FILTER_DONE, "Map filter"); gen4_instdone_bit(I965_MAP_L2_IDLE, "Map L2"); gen4_instdone_bit(I965_MA_ROW_0_DONE, "Message Arbiter row 0"); gen4_instdone_bit(I965_MA_ROW_1_DONE, "Message Arbiter row 1"); gen4_instdone_bit(I965_IC_ROW_0_DONE, "Instruction cache row 0"); gen4_instdone_bit(I965_IC_ROW_1_DONE, "Instruction cache row 1"); gen4_instdone_bit(I965_CP_DONE, "Command Processor"); if (IS_G4X(devid)) { init_g4x_instdone1(); } else { init_g965_instdone1(); } } else if (IS_GEN3(devid)) { gen3_instdone_bit(IDCT_DONE, "IDCT"); gen3_instdone_bit(IQ_DONE, "IQ"); gen3_instdone_bit(PR_DONE, "PR"); gen3_instdone_bit(VLD_DONE, "VLD"); gen3_instdone_bit(IP_DONE, "Instruction parser"); gen3_instdone_bit(FBC_DONE, "Framebuffer Compression"); gen3_instdone_bit(BINNER_DONE, "Binner"); gen3_instdone_bit(SF_DONE, "Strips and fans"); gen3_instdone_bit(SE_DONE, "Setup engine"); gen3_instdone_bit(WM_DONE, "Windowizer"); gen3_instdone_bit(IZ_DONE, "Intermediate Z"); gen3_instdone_bit(PERSPECTIVE_INTERP_DONE, "Perspective interpolation"); gen3_instdone_bit(DISPATCHER_DONE, "Dispatcher"); gen3_instdone_bit(PROJECTION_DONE, "Projection and LOD"); gen3_instdone_bit(DEPENDENT_ADDRESS_DONE, "Dependent address calculation"); gen3_instdone_bit(TEXTURE_FETCH_DONE, "Texture fetch"); gen3_instdone_bit(TEXTURE_DECOMPRESS_DONE, "Texture decompression"); gen3_instdone_bit(SAMPLER_CACHE_DONE, "Sampler Cache"); gen3_instdone_bit(FILTER_DONE, "Filtering"); gen3_instdone_bit(BYPASS_FIFO_DONE, "Bypass FIFO"); gen3_instdone_bit(PS_DONE, "Pixel shader"); gen3_instdone_bit(CC_DONE, "Color calculator"); gen3_instdone_bit(MAP_FILTER_DONE, "Map filter"); gen3_instdone_bit(MAP_L2_IDLE, "Map L2"); } else { assert(IS_GEN2(devid)); gen3_instdone_bit(I830_GMBUS_DONE, "GMBUS"); gen3_instdone_bit(I830_FBC_DONE, "FBC"); gen3_instdone_bit(I830_BINNER_DONE, "BINNER"); gen3_instdone_bit(I830_MPEG_DONE, "MPEG"); gen3_instdone_bit(I830_MECO_DONE, "MECO"); gen3_instdone_bit(I830_MCD_DONE, "MCD"); gen3_instdone_bit(I830_MCSTP_DONE, "MCSTP"); gen3_instdone_bit(I830_CC_DONE, "CC"); gen3_instdone_bit(I830_DG_DONE, "DG"); gen3_instdone_bit(I830_DCMP_DONE, "DCMP"); gen3_instdone_bit(I830_FTCH_DONE, "FTCH"); gen3_instdone_bit(I830_IT_DONE, "IT"); gen3_instdone_bit(I830_MG_DONE, "MG"); gen3_instdone_bit(I830_MEC_DONE, "MEC"); gen3_instdone_bit(I830_PC_DONE, "PC"); gen3_instdone_bit(I830_QCC_DONE, "QCC"); gen3_instdone_bit(I830_TB_DONE, "TB"); gen3_instdone_bit(I830_WM_DONE, "WM"); gen3_instdone_bit(I830_EF_DONE, "EF"); gen3_instdone_bit(I830_BLITTER_DONE, "Blitter"); gen3_instdone_bit(I830_MAP_L2_DONE, "Map L2 cache"); gen3_instdone_bit(I830_SECONDARY_RING_3_DONE, "Secondary ring 3"); gen3_instdone_bit(I830_SECONDARY_RING_2_DONE, "Secondary ring 2"); gen3_instdone_bit(I830_SECONDARY_RING_1_DONE, "Secondary ring 1"); gen3_instdone_bit(I830_SECONDARY_RING_0_DONE, "Secondary ring 0"); gen3_instdone_bit(I830_PRIMARY_RING_1_DONE, "Primary ring 1"); gen3_instdone_bit(I830_PRIMARY_RING_0_DONE, "Primary ring 0"); } }