Esempio n. 1
0
static void
intel_teardown_mchbar(struct drm_device *dev, bool disable)
{
    drm_i915_private_t *dev_priv = dev->dev_private;
    struct pci_dev *bridge_dev;
    int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
    u32 temp;

    bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
    if (!bridge_dev) {
        DRM_DEBUG("no bridge dev?!\n");
        return;
    }

    if (disable) {
        if (IS_I915G(dev) || IS_I915GM(dev)) {
            pci_read_config_dword(bridge_dev, DEVEN_REG, &temp);
            temp &= ~DEVEN_MCHBAR_EN;
            pci_write_config_dword(bridge_dev, DEVEN_REG, temp);
        } else {
            pci_read_config_dword(bridge_dev, mchbar_reg, &temp);
            temp &= ~1;
            pci_write_config_dword(bridge_dev, mchbar_reg, temp);
        }
    }

    if (dev_priv->mch_res.start)
        release_resource(&dev_priv->mch_res);
}
Esempio n. 2
0
/* Setup MCHBAR if possible, return true if we should disable it again */
static bool
intel_setup_mchbar(struct drm_device *dev)
{
    struct pci_dev *bridge_dev;
    int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
    u32 temp;
    bool need_disable = false, enabled;

    bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
    if (!bridge_dev) {
        DRM_DEBUG("no bridge dev?!\n");
        goto out;
    }

    if (IS_I915G(dev) || IS_I915GM(dev)) {
        pci_read_config_dword(bridge_dev, DEVEN_REG, &temp);
        enabled = !!(temp & DEVEN_MCHBAR_EN);
    } else {
        pci_read_config_dword(bridge_dev, mchbar_reg, &temp);
        enabled = temp & 1;
    }

    /* If it's already enabled, don't have to do anything */
    if (enabled)
        goto out_put;

    if (intel_alloc_mchbar_resource(dev))
        goto out_put;

    need_disable = true;

    /* Space is allocated or reserved, so enable it. */
    if (IS_I915G(dev) || IS_I915GM(dev)) {
        pci_write_config_dword(bridge_dev, DEVEN_REG,
                               temp | DEVEN_MCHBAR_EN);
    } else {
        pci_read_config_dword(bridge_dev, mchbar_reg, &temp);
        pci_write_config_dword(bridge_dev, mchbar_reg, temp | 1);
    }
out_put:
    pci_dev_put(bridge_dev);
out:
    return need_disable;
}
Esempio n. 3
0
/* Setup MCHBAR if possible, return true if we should disable it again */
static void
intel_setup_mchbar(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
	u32 temp;
	bool enabled;

	if (IS_VALLEYVIEW(dev))
		return;

	dev_priv->mchbar_need_disable = false;

	if (IS_I915G(dev) || IS_I915GM(dev)) {
		pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
		enabled = !!(temp & DEVEN_MCHBAR_EN);
	} else {
		pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
		enabled = temp & 1;
	}

	/* If it's already enabled, don't have to do anything */
	if (enabled)
		return;

	if (intel_alloc_mchbar_resource(dev))
		return;

	dev_priv->mchbar_need_disable = true;

	/* Space is allocated or reserved, so enable it. */
	if (IS_I915G(dev) || IS_I915GM(dev)) {
		pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
				       temp | DEVEN_MCHBAR_EN);
	} else {
		pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
		pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
	}
}
Esempio n. 4
0
/* Setup MCHBAR if possible, return true if we should disable it again */
static bool
intel_setup_mchbar(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
	u32 temp;
	bool need_disable = false, enabled;

	if (IS_I915G(dev) || IS_I915GM(dev)) {
		pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
		enabled = !!(temp & DEVEN_MCHBAR_EN);
	} else {
		pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
		enabled = temp & 1;
	}

	/* If it's already enabled, don't have to do anything */
	if (enabled)
		goto out;

	if (intel_alloc_mchbar_resource(dev))
		goto out;

	need_disable = true;

	/* Space is allocated or reserved, so enable it. */
	if (IS_I915G(dev) || IS_I915GM(dev)) {
		pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
				       temp | DEVEN_MCHBAR_EN);
	} else {
		pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
		pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
	}
out:
	return need_disable;
}
Esempio n. 5
0
static void
intel_teardown_mchbar(struct drm_device *dev, bool disable)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
	u32 temp;

	if (disable) {
		if (IS_I915G(dev) || IS_I915GM(dev)) {
			pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
			temp &= ~DEVEN_MCHBAR_EN;
			pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp);
		} else {
			pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
			temp &= ~1;
			pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp);
		}
	}

	if (dev_priv->mch_res.start)
		release_resource(&dev_priv->mch_res);
}
Esempio n. 6
0
/*
 * we take the trinary returned from intel_setup_mchbar and clean up after
 * it.
 */
void
intel_teardown_mchbar(struct inteldrm_softc *dev_priv,
    struct pci_attach_args *bpa, int disable)
{
	struct drm_device	*dev = (struct drm_device *)dev_priv->drmdev;
	u_int64_t		 mchbar_addr;
	pcireg_t		 tmp, low, high = 0;
	int			 reg;

	reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;

	switch(disable) {
	case 2:
		if (INTEL_INFO(dev)->gen >= 4)
			high = pci_conf_read(bpa->pa_pc, bpa->pa_tag, reg + 4);
		low = pci_conf_read(bpa->pa_pc, bpa->pa_tag, reg);
		mchbar_addr = ((u_int64_t)high << 32) | low;
		if (bpa->pa_memex)
			extent_free(bpa->pa_memex, mchbar_addr, MCHBAR_SIZE, 0);
		/* FALLTHROUGH */
	case 1:
		if (IS_I915G(dev) || IS_I915GM(dev)) {
			tmp = pci_conf_read(bpa->pa_pc, bpa->pa_tag, DEVEN_REG);
			tmp &= ~DEVEN_MCHBAR_EN;
			pci_conf_write(bpa->pa_pc, bpa->pa_tag, DEVEN_REG, tmp);
		} else {
			tmp = pci_conf_read(bpa->pa_pc, bpa->pa_tag, reg);
			tmp &= ~1;
			pci_conf_write(bpa->pa_pc, bpa->pa_tag, reg, tmp);
		}
		break;
	case 0:
	default:
		break;
	};
}
Esempio n. 7
0
/**
 * Detects bit 6 swizzling of address lookup between IGD access and CPU
 * access through main memory.
 */
void
i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
	uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;

	if (!IS_I9XX(dev)) {
		/* As far as we know, the 865 doesn't have these bit 6
		 * swizzling issues.
		 */
		swizzle_x = I915_BIT_6_SWIZZLE_NONE;
		swizzle_y = I915_BIT_6_SWIZZLE_NONE;
	} else if ((!IS_I965G(dev) && !IS_G33(dev)) || IS_I965GM(dev) ||
		   IS_GM45(dev)) {
		uint32_t dcc;

		/* On 915-945 and GM965, channel interleave by the CPU is
		 * determined by DCC.  The CPU will alternate based on bit 6
		 * in interleaved mode, and the GPU will then also alternate
		 * on bit 6, 9, and 10 for X, but the CPU may also optionally
		 * alternate based on bit 17 (XOR not disabled and XOR
		 * bit == 17).
		 */
		dcc = I915_READ(DCC);
		switch (dcc & DCC_ADDRESSING_MODE_MASK) {
		case DCC_ADDRESSING_MODE_SINGLE_CHANNEL:
		case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC:
			swizzle_x = I915_BIT_6_SWIZZLE_NONE;
			swizzle_y = I915_BIT_6_SWIZZLE_NONE;
			break;
		case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
			if (IS_I915G(dev) || IS_I915GM(dev) ||
			    dcc & DCC_CHANNEL_XOR_DISABLE) {
				swizzle_x = I915_BIT_6_SWIZZLE_9_10;
				swizzle_y = I915_BIT_6_SWIZZLE_9;
			} else if ((IS_I965GM(dev) || IS_GM45(dev)) &&
				   (dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
				/* GM965/GM45 does either bit 11 or bit 17
				 * swizzling.
				 */
				swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
				swizzle_y = I915_BIT_6_SWIZZLE_9_11;
			} else {
				/* Bit 17 or perhaps other swizzling */
				swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
				swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
			}
			break;
		}
		if (dcc == 0xffffffff) {
			DRM_ERROR("Couldn't read from MCHBAR.  "
				  "Disabling tiling.\n");
			swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
			swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
		}
	} else {
		/* The 965, G33, and newer, have a very flexible memory
		 * configuration.  It will enable dual-channel mode
		 * (interleaving) on as much memory as it can, and the GPU
		 * will additionally sometimes enable different bit 6
		 * swizzling for tiled objects from the CPU.
		 *
		 * Here's what I found on the G965:
		 *    slot fill         memory size  swizzling
		 * 0A   0B   1A   1B    1-ch   2-ch
		 * 512  0    0    0     512    0     O
		 * 512  0    512  0     16     1008  X
		 * 512  0    0    512   16     1008  X
		 * 0    512  0    512   16     1008  X
		 * 1024 1024 1024 0     2048   1024  O
		 *
		 * We could probably detect this based on either the DRB
		 * matching, which was the case for the swizzling required in
		 * the table above, or from the 1-ch value being less than
		 * the minimum size of a rank.
		 */
		if (I915_READ16(C0DRB3) != I915_READ16(C1DRB3)) {
			swizzle_x = I915_BIT_6_SWIZZLE_NONE;
			swizzle_y = I915_BIT_6_SWIZZLE_NONE;
		} else {
			swizzle_x = I915_BIT_6_SWIZZLE_9_10;
			swizzle_y = I915_BIT_6_SWIZZLE_9;
		}
	}

	dev_priv->mm.bit_6_swizzle_x = swizzle_x;
	dev_priv->mm.bit_6_swizzle_y = swizzle_y;
}
Esempio n. 8
0
/*
 * Check the MCHBAR on the host bridge is enabled, and if not allocate it.
 * we do not need to actually map it because we access the bar through it's
 * mirror on the IGD, however, if it is disabled or not allocated then
 * the mirror does not work. *sigh*.
 *
 * we return a trinary state:
 * 0 = already enabled, or can not enable
 * 1 = enabled, needs disable
 * 2 = enabled, needs disable and free.
 */
int
intel_setup_mchbar(struct inteldrm_softc *dev_priv,
    struct pci_attach_args *bpa)
{
	struct drm_device	*dev = (struct drm_device *)dev_priv->drmdev;
	u_int64_t		 mchbar_addr;
	pcireg_t		 tmp, low, high = 0;
	u_long			 addr;
	int			 reg, ret = 1, enabled = 0;

	reg = INTEL_INFO(dev)->gen >= 4 ?  MCHBAR_I965 : MCHBAR_I915;

	if (IS_I915G(dev) || IS_I915GM(dev)) {
		tmp = pci_conf_read(bpa->pa_pc, bpa->pa_tag, DEVEN_REG);
		enabled = !!(tmp & DEVEN_MCHBAR_EN);
	} else {
		tmp = pci_conf_read(bpa->pa_pc, bpa->pa_tag, reg);
		enabled = tmp & 1;
	}

	if (enabled) {
		return (0);
	}

	if (INTEL_INFO(dev)->gen >= 4)
		high = pci_conf_read(bpa->pa_pc, bpa->pa_tag, reg + 4);
	low = pci_conf_read(bpa->pa_pc, bpa->pa_tag, reg);
	mchbar_addr = ((u_int64_t)high << 32) | low;

	/*
	 * XXX need to check to see if it's allocated in the pci resources,
	 * right now we just check to see if there's any address there
	 *
	 * if there's no address, then we allocate one.
	 * note that we can't just use pci_mapreg_map here since some intel
	 * BARs are special in that they set bit 0 to show they're enabled,
	 * this is not handled by generic pci code.
	 */
	if (mchbar_addr == 0) {
		addr = (u_long)mchbar_addr;
		if (bpa->pa_memex == NULL || extent_alloc(bpa->pa_memex,
	            MCHBAR_SIZE, MCHBAR_SIZE, 0, 0, 0, &addr)) {
			return (0); /* just say we don't need to disable */
		} else {
			mchbar_addr = addr;
			ret = 2;
			/* We've allocated it, now fill in the BAR again */
			if (INTEL_INFO(dev)->gen >= 4)
				pci_conf_write(bpa->pa_pc, bpa->pa_tag,
				    reg + 4, upper_32_bits(mchbar_addr));
			pci_conf_write(bpa->pa_pc, bpa->pa_tag,
			    reg, mchbar_addr & 0xffffffff);
		}
	}
	/* set the enable bit */
	if (IS_I915G(dev) || IS_I915GM(dev)) {
		pci_conf_write(bpa->pa_pc, bpa->pa_tag, DEVEN_REG,
		    tmp | DEVEN_MCHBAR_EN);
	} else {
		tmp = pci_conf_read(bpa->pa_pc, bpa->pa_tag, reg);
		pci_conf_write(bpa->pa_pc, bpa->pa_tag, reg, tmp | 1);
	}

	return (ret);
}