コード例 #1
0
int pci_enable_pcie_error_reporting(struct pci_dev *dev)
{
	u16 reg16 = 0;
	int pos;

	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
	if (!pos)
		return -EIO;

	pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
	if (!pos)
		return -EIO;

	pci_read_config_word(dev, pos+PCI_EXP_DEVCTL, &reg16);
	reg16 = reg16 |
		PCI_EXP_DEVCTL_CERE |
		PCI_EXP_DEVCTL_NFERE |
		PCI_EXP_DEVCTL_FERE |
		PCI_EXP_DEVCTL_URRE;
	pci_write_config_word(dev, pos+PCI_EXP_DEVCTL, reg16);

	return 0;
}
コード例 #2
0
ファイル: i460-agp.c プロジェクト: andi34/Dhollmen_Kernel
static int __devinit agp_intel_i460_probe(struct pci_dev *pdev,
					  const struct pci_device_id *ent)
{
	struct agp_bridge_data *bridge;
	u8 cap_ptr;

	cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
	if (!cap_ptr)
		return -ENODEV;

	bridge = agp_alloc_bridge();
	if (!bridge)
		return -ENOMEM;

	bridge->driver = &intel_i460_driver;
	bridge->dev = pdev;
	bridge->capndx = cap_ptr;

	printk(KERN_INFO PFX "Detected Intel 460GX chipset\n");

	pci_set_drvdata(pdev, bridge);
	return agp_add_bridge(bridge);
}
コード例 #3
0
ファイル: sis-agp.c プロジェクト: ManiacTwister/linux-hnd
static void sis_delayed_enable(struct agp_bridge_data *bridge, u32 mode)
{
	struct pci_dev *device = NULL;
	u32 command;
	int rate;

	printk(KERN_INFO PFX "Found an AGP %d.%d compliant device at %s.\n",
		agp_bridge->major_version,
		agp_bridge->minor_version,
		pci_name(agp_bridge->dev));

	pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx + PCI_AGP_STATUS, &command);
	command = agp_collect_device_status(bridge, mode, command);
	command |= AGPSTAT_AGP_ENABLE;
	rate = (command & 0x7) << 2;

	for_each_pci_dev(device) {
		u8 agp = pci_find_capability(device, PCI_CAP_ID_AGP);
		if (!agp)
			continue;

		printk(KERN_INFO PFX "Putting AGP V3 device at %s into %dx mode\n",
			pci_name(device), rate);

		pci_write_config_dword(device, agp + PCI_AGP_COMMAND, command);

		/*
		 * Weird: on some sis chipsets any rate change in the target
		 * command register triggers a 5ms screwup during which the master
		 * cannot be configured
		 */
		if (device->device == bridge->dev->device) {
			printk(KERN_INFO PFX "SiS delay workaround: giving bridge time to recover.\n");
			msleep(10);
		}
	}
}
コード例 #4
0
/* called after powerup, by probe or system-pm "wakeup" */
static int ehci_pci_reinit(struct ehci_hcd *ehci, struct pci_dev *pdev)
{
	u32			temp;
	int			retval;

	/* optional debug port, normally in the first BAR */
	temp = pci_find_capability(pdev, 0x0a);
	if (temp) {
		pci_read_config_dword(pdev, temp, &temp);
		temp >>= 16;
		if ((temp & (3 << 13)) == (1 << 13)) {
			temp &= 0x1fff;
			ehci->debug = ehci_to_hcd(ehci)->regs + temp;
			temp = ehci_readl(ehci, &ehci->debug->control);
			ehci_info(ehci, "debug port %d%s\n",
				HCS_DEBUG_PORT(ehci->hcs_params),
				(temp & DBGP_ENABLED)
					? " IN USE"
					: "");
			if (!(temp & DBGP_ENABLED))
				ehci->debug = NULL;
		}
	}

	/* we expect static quirk code to handle the "extended capabilities"
	 * (currently just BIOS handoff) allowed starting with EHCI 0.96
	 */

	/* PCI Memory-Write-Invalidate cycle support is optional (uncommon) */
	retval = pci_set_mwi(pdev);
	if (!retval)
		ehci_dbg(ehci, "MWI active\n");

	ehci_port_power(ehci, 0);

	return 0;
}
コード例 #5
0
/**
 * qla24xx_pci_config() - Setup ISP24xx PCI configuration registers.
 * @ha: HA context
 *
 * Returns 0 on success.
 */
int
qla24xx_pci_config(scsi_qla_host_t *vha)
{
	uint16_t w;
	unsigned long flags = 0;
	struct qla_hw_data *ha = vha->hw;
	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;

	pci_set_master(ha->pdev);
	pci_try_set_mwi(ha->pdev);

	pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
	w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
	w &= ~PCI_COMMAND_INTX_DISABLE;
	pci_write_config_word(ha->pdev, PCI_COMMAND, w);

	pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);

	/* PCI-X -- adjust Maximum Memory Read Byte Count (2048). */
	if (pci_find_capability(ha->pdev, PCI_CAP_ID_PCIX))
		pcix_set_mmrbc(ha->pdev, 2048);

	/* PCIe -- adjust Maximum Read Request Size (2048). */
	if (pci_is_pcie(ha->pdev))
		pcie_set_readrq(ha->pdev, 2048);

	pci_disable_rom(ha->pdev);

	ha->chip_revision = ha->pdev->revision;

	/* Get PCI bus information. */
	spin_lock_irqsave(&ha->hardware_lock, flags);
	ha->pci_attr = RD_REG_DWORD(&reg->ctrl_status);
	spin_unlock_irqrestore(&ha->hardware_lock, flags);

	return QLA_SUCCESS;
}
コード例 #6
0
ファイル: msi.c プロジェクト: xf739645524/kernel-rhel5
int pci_save_msi_state(struct pci_dev *dev)
{
	int pos, i = 0;
	u16 control;
	struct pci_cap_saved_state *save_state;
	u32 *cap;

	pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
	if (pos <= 0 || dev->no_msi)
		return 0;

	pci_read_config_word(dev, msi_control_reg(pos), &control);
	if (!(control & PCI_MSI_FLAGS_ENABLE))
		return 0;

	save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u32) * 5,
		GFP_KERNEL);
	if (!save_state) {
		printk(KERN_ERR "Out of memory in pci_save_msi_state\n");
		return -ENOMEM;
	}
	cap = &save_state->data[0];

	pci_read_config_dword(dev, pos, &cap[i++]);
	control = cap[0] >> 16;
	pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, &cap[i++]);
	if (control & PCI_MSI_FLAGS_64BIT) {
		pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, &cap[i++]);
		pci_read_config_dword(dev, pos + PCI_MSI_DATA_64, &cap[i++]);
	} else
		pci_read_config_dword(dev, pos + PCI_MSI_DATA_32, &cap[i++]);
	if (control & PCI_MSI_FLAGS_MASKBIT)
		pci_read_config_dword(dev, pos + PCI_MSI_MASK_BIT, &cap[i++]);
	save_state->cap_nr = PCI_CAP_ID_MSI;
	pci_add_saved_cap(dev, save_state);
	return 0;
}
コード例 #7
0
ファイル: intel-agp.c プロジェクト: mpcdata/tnetv107x-usb
static int __devinit agp_intel_probe(struct pci_dev *pdev,
				     const struct pci_device_id *ent)
{
	struct agp_bridge_data *bridge;
	u8 cap_ptr = 0;
	struct resource *r;
	int i, err;

	cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);

	bridge = agp_alloc_bridge();
	if (!bridge)
		return -ENOMEM;

	bridge->capndx = cap_ptr;

	if (intel_gmch_probe(pdev, bridge))
		goto found_gmch;

	for (i = 0; intel_agp_chipsets[i].name != NULL; i++) {
		/* In case that multiple models of gfx chip may
		   stand on same host bridge type, this can be
		   sure we detect the right IGD. */
		if (pdev->device == intel_agp_chipsets[i].chip_id) {
			bridge->driver = intel_agp_chipsets[i].driver;
			break;
		}
	}

	if (intel_agp_chipsets[i].name == NULL) {
		if (cap_ptr)
			dev_warn(&pdev->dev, "unsupported Intel chipset [%04x/%04x]\n",
				 pdev->vendor, pdev->device);
		agp_put_bridge(bridge);
		return -ENODEV;
	}

	if (!bridge->driver) {
		if (cap_ptr)
			dev_warn(&pdev->dev, "can't find bridge device (chip_id: %04x)\n",
			    	 intel_agp_chipsets[i].gmch_chip_id);
		agp_put_bridge(bridge);
		return -ENODEV;
	}

	bridge->dev = pdev;
	bridge->dev_private_data = NULL;

	dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name);

	/*
	* If the device has not been properly setup, the following will catch
	* the problem and should stop the system from crashing.
	* 20030610 - [email protected]
	*/
	if (pci_enable_device(pdev)) {
		dev_err(&pdev->dev, "can't enable PCI device\n");
		agp_put_bridge(bridge);
		return -ENODEV;
	}

	/*
	* The following fixes the case where the BIOS has "forgotten" to
	* provide an address range for the GART.
	* 20030610 - [email protected]
	*/
	r = &pdev->resource[0];
	if (!r->start && r->end) {
		if (pci_assign_resource(pdev, 0)) {
			dev_err(&pdev->dev, "can't assign resource 0\n");
			agp_put_bridge(bridge);
			return -ENODEV;
		}
	}

	/* Fill in the mode register */
	if (cap_ptr) {
		pci_read_config_dword(pdev,
				bridge->capndx+PCI_AGP_STATUS,
				&bridge->mode);
	}

found_gmch:
	pci_set_drvdata(pdev, bridge);
	err = agp_add_bridge(bridge);
	if (!err)
		intel_agp_enabled = 1;
	return err;
}
コード例 #8
0
ファイル: ali-agp.c プロジェクト: 1703011/asuswrt-merlin
static int __devinit agp_ali_probe(struct pci_dev *pdev,
				const struct pci_device_id *ent)
{
	struct agp_device_ids *devs = ali_agp_device_ids;
	struct agp_bridge_data *bridge;
	u8 hidden_1621_id, cap_ptr;
	int j;

	cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
	if (!cap_ptr)
		return -ENODEV;

	/* probe for known chipsets */
	for (j = 0; devs[j].chipset_name; j++) {
		if (pdev->device == devs[j].device_id)
			goto found;
	}

	dev_err(&pdev->dev, "unsupported ALi chipset [%04x/%04x])\n",
		pdev->vendor, pdev->device);
	return -ENODEV;


found:
	bridge = agp_alloc_bridge();
	if (!bridge)
		return -ENOMEM;

	bridge->dev = pdev;
	bridge->capndx = cap_ptr;

	switch (pdev->device) {
	case PCI_DEVICE_ID_AL_M1541:
		bridge->driver = &ali_m1541_bridge;
		break;
	case PCI_DEVICE_ID_AL_M1621:
		pci_read_config_byte(pdev, 0xFB, &hidden_1621_id);
		switch (hidden_1621_id) {
		case 0x31:
			devs[j].chipset_name = "M1631";
			break;
		case 0x32:
			devs[j].chipset_name = "M1632";
			break;
		case 0x41:
			devs[j].chipset_name = "M1641";
			break;
		case 0x43:
			devs[j].chipset_name = "M1621";
			break;
		case 0x47:
			devs[j].chipset_name = "M1647";
			break;
		case 0x51:
			devs[j].chipset_name = "M1651";
			break;
		default:
			break;
		}
		/*FALLTHROUGH*/
	default:
		bridge->driver = &ali_generic_bridge;
	}

	dev_info(&pdev->dev, "ALi %s chipset\n", devs[j].chipset_name);

	/* Fill in the mode register */
	pci_read_config_dword(pdev,
			bridge->capndx+PCI_AGP_STATUS,
			&bridge->mode);

	pci_set_drvdata(pdev, bridge);
	return agp_add_bridge(bridge);
}
コード例 #9
0
/*
 * Called to perform platform specific PCI setup
 */
int pcibios_plat_dev_init(struct pci_dev *dev)
{
	uint16_t config;
	uint32_t dconfig;
	int pos;
	/*
	 * Force the Cache line setting to 64 bytes. The standard
	 * Linux bus scan doesn't seem to set it. Octeon really has
	 * 128 byte lines, but Intel bridges get really upset if you
	 * try and set values above 64 bytes. Value is specified in
	 * 32bit words.
	 */
	pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, 64 / 4);
	/* Set latency timers for all devices */
	pci_write_config_byte(dev, PCI_LATENCY_TIMER, 48);

	/* Enable reporting System errors and parity errors on all devices */
	/* Enable parity checking and error reporting */
	pci_read_config_word(dev, PCI_COMMAND, &config);
	config |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
	pci_write_config_word(dev, PCI_COMMAND, config);

	if (dev->subordinate) {
		/* Set latency timers on sub bridges */
		pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, 48);
		/* More bridge error detection */
		pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &config);
		config |= PCI_BRIDGE_CTL_PARITY | PCI_BRIDGE_CTL_SERR;
		pci_write_config_word(dev, PCI_BRIDGE_CONTROL, config);
	}

	/* Enable the PCIe normal error reporting */
	pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
	if (pos) {
		/* Update Device Control */
		pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &config);
		/* Correctable Error Reporting */
		config |= PCI_EXP_DEVCTL_CERE;
		/* Non-Fatal Error Reporting */
		config |= PCI_EXP_DEVCTL_NFERE;
		/* Fatal Error Reporting */
		config |= PCI_EXP_DEVCTL_FERE;
		/* Unsupported Request */
		config |= PCI_EXP_DEVCTL_URRE;
		pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, config);
	}

	/* Find the Advanced Error Reporting capability */
	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
	if (pos) {
		/* Clear Uncorrectable Error Status */
		pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS,
				      &dconfig);
		pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS,
				       dconfig);
		/* Enable reporting of all uncorrectable errors */
		/* Uncorrectable Error Mask - turned on bits disable errors */
		pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, 0);
		/*
		 * Leave severity at HW default. This only controls if
		 * errors are reported as uncorrectable or
		 * correctable, not if the error is reported.
		 */
		/* PCI_ERR_UNCOR_SEVER - Uncorrectable Error Severity */
		/* Clear Correctable Error Status */
		pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &dconfig);
		pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS, dconfig);
		/* Enable reporting of all correctable errors */
		/* Correctable Error Mask - turned on bits disable errors */
		pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, 0);
		/* Advanced Error Capabilities */
		pci_read_config_dword(dev, pos + PCI_ERR_CAP, &dconfig);
		/* ECRC Generation Enable */
		if (config & PCI_ERR_CAP_ECRC_GENC)
			config |= PCI_ERR_CAP_ECRC_GENE;
		/* ECRC Check Enable */
		if (config & PCI_ERR_CAP_ECRC_CHKC)
			config |= PCI_ERR_CAP_ECRC_CHKE;
		pci_write_config_dword(dev, pos + PCI_ERR_CAP, dconfig);
		/* PCI_ERR_HEADER_LOG - Header Log Register (16 bytes) */
		/* Report all errors to the root complex */
		pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND,
				       PCI_ERR_ROOT_CMD_COR_EN |
				       PCI_ERR_ROOT_CMD_NONFATAL_EN |
				       PCI_ERR_ROOT_CMD_FATAL_EN);
		/* Clear the Root status register */
		pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &dconfig);
		pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, dconfig);
	}

	dev->dev.archdata.dma_ops = octeon_pci_dma_map_ops;

	return 0;
}
コード例 #10
0
int mthca_reset(struct mthca_dev *mdev)
{
	int i;
	int err = 0;
	u32 *hca_header    = NULL;
	u32 *bridge_header = NULL;
	struct pci_dev *bridge = NULL;
	int bridge_pcix_cap = 0;
	int hca_pcie_cap = 0;
	int hca_pcix_cap = 0;

	u16 devctl;
	u16 linkctl;

#define MTHCA_RESET_OFFSET 0xf0010
#define MTHCA_RESET_VALUE  swab32(1)

	/*
	 * Reset the chip.  This is somewhat ugly because we have to
	 * save off the PCI header before reset and then restore it
	 * after the chip reboots.  We skip config space offsets 22
	 * and 23 since those have a special meaning.
	 *
	 * To make matters worse, for Tavor (PCI-X HCA) we have to
	 * find the associated bridge device and save off its PCI
	 * header as well.
	 */

	if (!(mdev->mthca_flags & MTHCA_FLAG_PCIE)) {
		/* Look for the bridge -- its device ID will be 2 more
		   than HCA's device ID. */
		while ((bridge = pci_get_device(mdev->pdev->vendor,
						mdev->pdev->device + 2,
						bridge)) != NULL) {
			if (bridge->hdr_type    == PCI_HEADER_TYPE_BRIDGE &&
			    bridge->subordinate == mdev->pdev->bus) {
				mthca_dbg(mdev, "Found bridge: %s\n",
					  pci_name(bridge));
				break;
			}
		}

		if (!bridge) {
			/*
			 * Didn't find a bridge for a Tavor device --
			 * assume we're in no-bridge mode and hope for
			 * the best.
			 */
			mthca_warn(mdev, "No bridge found for %s\n",
				  pci_name(mdev->pdev));
		}

	}

	/* For Arbel do we need to save off the full 4K PCI Express header?? */
	hca_header = kmalloc(256, GFP_KERNEL);
	if (!hca_header) {
		err = -ENOMEM;
		mthca_err(mdev, "Couldn't allocate memory to save HCA "
			  "PCI header, aborting.\n");
		goto out;
	}

	for (i = 0; i < 64; ++i) {
		if (i == 22 || i == 23)
			continue;
		if (pci_read_config_dword(mdev->pdev, i * 4, hca_header + i)) {
			err = -ENODEV;
			mthca_err(mdev, "Couldn't save HCA "
				  "PCI header, aborting.\n");
			goto out;
		}
	}

	hca_pcix_cap = pci_find_capability(mdev->pdev, PCI_CAP_ID_PCIX);
	hca_pcie_cap = pci_find_capability(mdev->pdev, PCI_CAP_ID_EXP);

	if (bridge) {
		bridge_header = kmalloc(256, GFP_KERNEL);
		if (!bridge_header) {
			err = -ENOMEM;
			mthca_err(mdev, "Couldn't allocate memory to save HCA "
				  "bridge PCI header, aborting.\n");
			goto out;
		}

		for (i = 0; i < 64; ++i) {
			if (i == 22 || i == 23)
				continue;
			if (pci_read_config_dword(bridge, i * 4, bridge_header + i)) {
				err = -ENODEV;
				mthca_err(mdev, "Couldn't save HCA bridge "
					  "PCI header, aborting.\n");
				goto out;
			}
		}
		bridge_pcix_cap = pci_find_capability(bridge, PCI_CAP_ID_PCIX);
		if (!bridge_pcix_cap) {
				err = -ENODEV;
				mthca_err(mdev, "Couldn't locate HCA bridge "
					  "PCI-X capability, aborting.\n");
				goto out;
		}
	}

	/* actually hit reset */
	{
		void __iomem *reset = ioremap(pci_resource_start(mdev->pdev, 0) +
					      MTHCA_RESET_OFFSET, 4);

		if (!reset) {
			err = -ENOMEM;
			mthca_err(mdev, "Couldn't map HCA reset register, "
				  "aborting.\n");
			goto out;
		}

		writel(MTHCA_RESET_VALUE, reset);
		iounmap(reset);
	}

	/* Docs say to wait one second before accessing device */
	msleep(1000);

	/* Now wait for PCI device to start responding again */
	{
		u32 v;
		int c = 0;

		for (c = 0; c < 100; ++c) {
			if (pci_read_config_dword(bridge ? bridge : mdev->pdev, 0, &v)) {
				err = -ENODEV;
				mthca_err(mdev, "Couldn't access HCA after reset, "
					  "aborting.\n");
				goto out;
			}

			if (v != 0xffffffff)
				goto good;

			msleep(100);
		}

		err = -ENODEV;
		mthca_err(mdev, "PCI device did not come back after reset, "
			  "aborting.\n");
		goto out;
	}

good:
	/* Now restore the PCI headers */
	if (bridge) {
		if (pci_write_config_dword(bridge, bridge_pcix_cap + 0x8,
				 bridge_header[(bridge_pcix_cap + 0x8) / 4])) {
			err = -ENODEV;
			mthca_err(mdev, "Couldn't restore HCA bridge Upstream "
				  "split transaction control, aborting.\n");
			goto out;
		}
		if (pci_write_config_dword(bridge, bridge_pcix_cap + 0xc,
				 bridge_header[(bridge_pcix_cap + 0xc) / 4])) {
			err = -ENODEV;
			mthca_err(mdev, "Couldn't restore HCA bridge Downstream "
				  "split transaction control, aborting.\n");
			goto out;
		}
		/*
		 * Bridge control register is at 0x3e, so we'll
		 * naturally restore it last in this loop.
		 */
		for (i = 0; i < 16; ++i) {
			if (i * 4 == PCI_COMMAND)
				continue;

			if (pci_write_config_dword(bridge, i * 4, bridge_header[i])) {
				err = -ENODEV;
				mthca_err(mdev, "Couldn't restore HCA bridge reg %x, "
					  "aborting.\n", i);
				goto out;
			}
		}

		if (pci_write_config_dword(bridge, PCI_COMMAND,
					   bridge_header[PCI_COMMAND / 4])) {
			err = -ENODEV;
			mthca_err(mdev, "Couldn't restore HCA bridge COMMAND, "
				  "aborting.\n");
			goto out;
		}
	}

	if (hca_pcix_cap) {
		if (pci_write_config_dword(mdev->pdev, hca_pcix_cap,
				 hca_header[hca_pcix_cap / 4])) {
			err = -ENODEV;
			mthca_err(mdev, "Couldn't restore HCA PCI-X "
				  "command register, aborting.\n");
			goto out;
		}
	}

	if (hca_pcie_cap) {
		devctl = hca_header[(hca_pcie_cap + PCI_EXP_DEVCTL) / 4];
		if (pci_write_config_word(mdev->pdev, hca_pcie_cap + PCI_EXP_DEVCTL,
					   devctl)) {
			err = -ENODEV;
			mthca_err(mdev, "Couldn't restore HCA PCI Express "
				  "Device Control register, aborting.\n");
			goto out;
		}
		linkctl = hca_header[(hca_pcie_cap + PCI_EXP_LNKCTL) / 4];
		if (pci_write_config_word(mdev->pdev, hca_pcie_cap + PCI_EXP_LNKCTL,
					   linkctl)) {
			err = -ENODEV;
			mthca_err(mdev, "Couldn't restore HCA PCI Express "
				  "Link control register, aborting.\n");
			goto out;
		}
	}

	for (i = 0; i < 16; ++i) {
		if (i * 4 == PCI_COMMAND)
			continue;

		if (pci_write_config_dword(mdev->pdev, i * 4, hca_header[i])) {
			err = -ENODEV;
			mthca_err(mdev, "Couldn't restore HCA reg %x, "
				  "aborting.\n", i);
			goto out;
		}
	}

	if (pci_write_config_dword(mdev->pdev, PCI_COMMAND,
				   hca_header[PCI_COMMAND / 4])) {
		err = -ENODEV;
		mthca_err(mdev, "Couldn't restore HCA COMMAND, "
			  "aborting.\n");
		goto out;
	}

out:
	if (bridge)
		pci_dev_put(bridge);
	kfree(bridge_header);
	kfree(hca_header);

	return err;
}
コード例 #11
0
ファイル: rt_rbus_pci_util.c プロジェクト: 23171580/ralink
/*
========================================================================
Routine Description:
	Query for devices' capabilities.

Arguments:
	pDev			- PCI device
	Cap				- Capability code

Return Value:
	None

Note:
========================================================================
*/
int RtmpOsPciFindCapability(VOID *pDev, INT Cap)
{
	return pci_find_capability(pDev, Cap);
}
コード例 #12
0
ファイル: hcd-pci.c プロジェクト: PennPanda/linux-repo
/**
 * usb_hcd_pci_suspend - power management suspend of a PCI-based HCD
 * @dev: USB Host Controller being suspended
 * @message: semantics in flux
 *
 * Store this function in the HCD's struct pci_driver as suspend().
 */
int usb_hcd_pci_suspend (struct pci_dev *dev, pm_message_t message)
{
	struct usb_hcd		*hcd;
	int			retval = 0;
	int			has_pci_pm;

	hcd = pci_get_drvdata(dev);

	/* Root hub suspend should have stopped all downstream traffic,
	 * and all bus master traffic.  And done so for both the interface
	 * and the stub usb_device (which we check here).  But maybe it
	 * didn't; writing sysfs power/state files ignores such rules...
	 *
	 * We must ignore the FREEZE vs SUSPEND distinction here, because
	 * otherwise the swsusp will save (and restore) garbage state.
	 */
	if (!(hcd->state == HC_STATE_SUSPENDED ||
			hcd->state == HC_STATE_HALT))
		return -EBUSY;

	if (hcd->driver->suspend) {
		retval = hcd->driver->suspend(hcd, message);
		suspend_report_result(hcd->driver->suspend, retval);
		if (retval)
			goto done;
	}
	synchronize_irq(dev->irq);

	/* FIXME until the generic PM interfaces change a lot more, this
	 * can't use PCI D1 and D2 states.  For example, the confusion
	 * between messages and states will need to vanish, and messages
	 * will need to provide a target system state again.
	 *
	 * It'll be important to learn characteristics of the target state,
	 * especially on embedded hardware where the HCD will often be in
	 * charge of an external VBUS power supply and one or more clocks.
	 * Some target system states will leave them active; others won't.
	 * (With PCI, that's often handled by platform BIOS code.)
	 */

	/* even when the PCI layer rejects some of the PCI calls
	 * below, HCs can try global suspend and reduce DMA traffic.
	 * PM-sensitive HCDs may already have done this.
	 */
	has_pci_pm = pci_find_capability(dev, PCI_CAP_ID_PM);

	/* Downstream ports from this root hub should already be quiesced, so
	 * there will be no DMA activity.  Now we can shut down the upstream
	 * link (except maybe for PME# resume signaling) and enter some PCI
	 * low power state, if the hardware allows.
	 */
	if (hcd->state == HC_STATE_SUSPENDED) {

		/* no DMA or IRQs except when HC is active */
		if (dev->current_state == PCI_D0) {
			pci_save_state (dev);
			pci_disable_device (dev);
		}

		if (!has_pci_pm) {
			dev_dbg (hcd->self.controller, "--> PCI D0/legacy\n");
			goto done;
		}

		/* NOTE:  dev->current_state becomes nonzero only here, and
		 * only for devices that support PCI PM.  Also, exiting
		 * PCI_D3 (but not PCI_D1 or PCI_D2) is allowed to reset
		 * some device state (e.g. as part of clock reinit).
		 */
		retval = pci_set_power_state (dev, PCI_D3hot);
		suspend_report_result(pci_set_power_state, retval);
		if (retval == 0) {
			int wake = device_can_wakeup(&hcd->self.root_hub->dev);

			wake = wake && device_may_wakeup(hcd->self.controller);

			dev_dbg (hcd->self.controller, "--> PCI D3%s\n",
					wake ? "/wakeup" : "");

			/* Ignore these return values.  We rely on pci code to
			 * reject requests the hardware can't implement, rather
			 * than coding the same thing.
			 */
			(void) pci_enable_wake (dev, PCI_D3hot, wake);
			(void) pci_enable_wake (dev, PCI_D3cold, wake);
		} else {
			dev_dbg (&dev->dev, "PCI D3 suspend fail, %d\n",
					retval);
			(void) usb_hcd_pci_resume (dev);
		}

	} else if (hcd->state != HC_STATE_HALT) {
		dev_dbg (hcd->self.controller, "hcd state %d; not suspended\n",
			hcd->state);
		WARN_ON(1);
		retval = -EINVAL;
	}

done:
	if (retval == 0) {
		dev->dev.power.power_state = PMSG_SUSPEND;

#ifdef CONFIG_PPC_PMAC
		/* Disable ASIC clocks for USB */
		if (machine_is(powermac)) {
			struct device_node	*of_node;

			of_node = pci_device_to_OF_node (dev);
			if (of_node)
				pmac_call_feature(PMAC_FTR_USB_ENABLE,
							of_node, 0, 0);
		}
#endif
	}

	return retval;
}
コード例 #13
0
ファイル: hcd-pci.c プロジェクト: PennPanda/linux-repo
/**
 * usb_hcd_pci_resume - power management resume of a PCI-based HCD
 * @dev: USB Host Controller being resumed
 *
 * Store this function in the HCD's struct pci_driver as resume().
 */
int usb_hcd_pci_resume (struct pci_dev *dev)
{
	struct usb_hcd		*hcd;
	int			retval;

	hcd = pci_get_drvdata(dev);
	if (hcd->state != HC_STATE_SUSPENDED) {
		dev_dbg (hcd->self.controller, 
				"can't resume, not suspended!\n");
		return 0;
	}

#ifdef CONFIG_PPC_PMAC
	/* Reenable ASIC clocks for USB */
	if (machine_is(powermac)) {
		struct device_node *of_node;

		of_node = pci_device_to_OF_node (dev);
		if (of_node)
			pmac_call_feature (PMAC_FTR_USB_ENABLE,
						of_node, 0, 1);
	}
#endif

	/* NOTE:  chip docs cover clean "real suspend" cases (what Linux
	 * calls "standby", "suspend to RAM", and so on).  There are also
	 * dirty cases when swsusp fakes a suspend in "shutdown" mode.
	 */
	if (dev->current_state != PCI_D0) {
#ifdef	DEBUG
		int	pci_pm;
		u16	pmcr;

		pci_pm = pci_find_capability(dev, PCI_CAP_ID_PM);
		pci_read_config_word(dev, pci_pm + PCI_PM_CTRL, &pmcr);
		pmcr &= PCI_PM_CTRL_STATE_MASK;
		if (pmcr) {
			/* Clean case:  power to USB and to HC registers was
			 * maintained; remote wakeup is easy.
			 */
			dev_dbg(hcd->self.controller, "resume from PCI D%d\n",
					pmcr);
		} else {
			/* Clean:  HC lost Vcc power, D0 uninitialized
			 *   + Vaux may have preserved port and transceiver
			 *     state ... for remote wakeup from D3cold
			 *   + or not; HCD must reinit + re-enumerate
			 *
			 * Dirty: D0 semi-initialized cases with swsusp
			 *   + after BIOS init
			 *   + after Linux init (HCD statically linked)
			 */
			dev_dbg(hcd->self.controller,
				"PCI D0, from previous PCI D%d\n",
				dev->current_state);
		}
#endif
		/* yes, ignore these results too... */
		(void) pci_enable_wake (dev, dev->current_state, 0);
		(void) pci_enable_wake (dev, PCI_D3cold, 0);
	} else {
		/* Same basic cases: clean (powered/not), dirty */
		dev_dbg(hcd->self.controller, "PCI legacy resume\n");
	}

	/* NOTE:  the PCI API itself is asymmetric here.  We don't need to
	 * pci_set_power_state(PCI_D0) since that's part of re-enabling;
	 * but that won't re-enable bus mastering.  Yet pci_disable_device()
	 * explicitly disables bus mastering...
	 */
	retval = pci_enable_device (dev);
	if (retval < 0) {
		dev_err (hcd->self.controller,
			"can't re-enable after resume, %d!\n", retval);
		return retval;
	}
	pci_set_master (dev);
	pci_restore_state (dev);

	dev->dev.power.power_state = PMSG_ON;

	clear_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);

	if (hcd->driver->resume) {
		retval = hcd->driver->resume(hcd);
		if (retval) {
			dev_err (hcd->self.controller,
				"PCI post-resume error %d!\n", retval);
			usb_hc_died (hcd);
		}
	}

	return retval;
}
コード例 #14
0
static int ehci_pci_setup(struct usb_hcd *hcd)
{
	struct ehci_hcd		*ehci = hcd_to_ehci(hcd);
	struct pci_dev		*pdev = to_pci_dev(hcd->self.controller);
	struct pci_dev		*p_smbus;
	u8			rev;
	u32			temp;
	int			retval;

	switch (pdev->vendor) {
	case PCI_VENDOR_ID_TOSHIBA_2:
		
		if (pdev->device == 0x01b5) {
#ifdef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO
			ehci->big_endian_mmio = 1;
#else
			ehci_warn(ehci,
				  "unsupported big endian Toshiba quirk\n");
#endif
		}
		break;
	}

	ehci->caps = hcd->regs;
	ehci->regs = hcd->regs +
		HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));

	dbg_hcs_params(ehci, "reset");
	dbg_hcc_params(ehci, "reset");

	switch (pdev->vendor) {
	case PCI_VENDOR_ID_NVIDIA:
		switch (pdev->device) {
		case 0x003c:	
		case 0x005b:	
		case 0x00d8:	
		case 0x00e8:	
			if (pci_set_consistent_dma_mask(pdev,
						DMA_BIT_MASK(31)) < 0)
				ehci_warn(ehci, "can't enable NVidia "
					"workaround for >2GB RAM\n");
			break;
		}
		break;
	}

	
	ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);

	retval = ehci_halt(ehci);
	if (retval)
		return retval;

	if ((pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == 0x7808) ||
	    (pdev->vendor == PCI_VENDOR_ID_ATI && pdev->device == 0x4396)) {
		ehci->use_dummy_qh = 1;
		ehci_info(ehci, "applying AMD SB700/SB800/Hudson-2/3 EHCI "
				"dummy qh workaround\n");
	}

	
	retval = ehci_init(hcd);
	if (retval)
		return retval;

	switch (pdev->vendor) {
	case PCI_VENDOR_ID_NEC:
		ehci->need_io_watchdog = 0;
		break;
	case PCI_VENDOR_ID_INTEL:
		ehci->need_io_watchdog = 0;
		ehci->fs_i_thresh = 1;
		if (pdev->device == 0x27cc) {
			ehci->broken_periodic = 1;
			ehci_info(ehci, "using broken periodic workaround\n");
		}
		if (pdev->device == 0x0806 || pdev->device == 0x0811
				|| pdev->device == 0x0829) {
			ehci_info(ehci, "disable lpm for langwell/penwell\n");
			ehci->has_lpm = 0;
		}
		if (pdev->device == PCI_DEVICE_ID_INTEL_CE4100_USB) {
			hcd->has_tt = 1;
			tdi_reset(ehci);
		}
		if (pdev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK) {
			
			if (pdev->device == 0x1c26 || pdev->device == 0x1c2d) {
				ehci_info(ehci, "broken D3 during system sleep on ASUS\n");
				hcd->broken_pci_sleep = 1;
				device_set_wakeup_capable(&pdev->dev, false);
			}
		}
		break;
	case PCI_VENDOR_ID_TDI:
		if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) {
			hcd->has_tt = 1;
			tdi_reset(ehci);
		}
		break;
	case PCI_VENDOR_ID_AMD:
		
		if (usb_amd_find_chipset_info())
			ehci->amd_pll_fix = 1;
		
		if (pdev->device == 0x7463) {
			ehci_info(ehci, "ignoring AMD8111 (errata)\n");
			retval = -EIO;
			goto done;
		}
		break;
	case PCI_VENDOR_ID_NVIDIA:
		switch (pdev->device) {
		case 0x0068:
			if (pdev->revision < 0xa4)
				ehci->no_selective_suspend = 1;
			break;

		case 0x0d9d:
			ehci_info(ehci, "disable lpm/ppcd for nvidia mcp89");
			ehci->has_lpm = 0;
			ehci->has_ppcd = 0;
			ehci->command &= ~CMD_PPCEE;
			break;
		}
		break;
	case PCI_VENDOR_ID_VIA:
		if (pdev->device == 0x3104 && (pdev->revision & 0xf0) == 0x60) {
			u8 tmp;

			pci_read_config_byte(pdev, 0x4b, &tmp);
			if (tmp & 0x20)
				break;
			pci_write_config_byte(pdev, 0x4b, tmp | 0x20);
		}
		break;
	case PCI_VENDOR_ID_ATI:
		
		if (usb_amd_find_chipset_info())
			ehci->amd_pll_fix = 1;
		if ((pdev->device == 0x4386) || (pdev->device == 0x4396)) {
			p_smbus = pci_get_device(PCI_VENDOR_ID_ATI,
						 PCI_DEVICE_ID_ATI_SBX00_SMBUS,
						 NULL);
			if (!p_smbus)
				break;
			rev = p_smbus->revision;
			if ((pdev->device == 0x4386) || (rev == 0x3a)
			    || (rev == 0x3b)) {
				u8 tmp;
				ehci_info(ehci, "applying AMD SB600/SB700 USB "
					"freeze workaround\n");
				pci_read_config_byte(pdev, 0x53, &tmp);
				pci_write_config_byte(pdev, 0x53, tmp | (1<<3));
			}
			pci_dev_put(p_smbus);
		}
		break;
	case PCI_VENDOR_ID_NETMOS:
		
		ehci_info(ehci, "applying MosChip frame-index workaround\n");
		ehci->frame_index_bug = 1;
		break;
	}

	
	temp = pci_find_capability(pdev, 0x0a);
	if (temp) {
		pci_read_config_dword(pdev, temp, &temp);
		temp >>= 16;
		if ((temp & (3 << 13)) == (1 << 13)) {
			temp &= 0x1fff;
			ehci->debug = ehci_to_hcd(ehci)->regs + temp;
			temp = ehci_readl(ehci, &ehci->debug->control);
			ehci_info(ehci, "debug port %d%s\n",
				HCS_DEBUG_PORT(ehci->hcs_params),
				(temp & DBGP_ENABLED)
					? " IN USE"
					: "");
			if (!(temp & DBGP_ENABLED))
				ehci->debug = NULL;
		}
	}

	ehci_reset(ehci);

	
	temp = HCS_N_CC(ehci->hcs_params) * HCS_N_PCC(ehci->hcs_params);
	temp &= 0x0f;
	if (temp && HCS_N_PORTS(ehci->hcs_params) > temp) {
		ehci_dbg(ehci, "bogus port configuration: "
			"cc=%d x pcc=%d < ports=%d\n",
			HCS_N_CC(ehci->hcs_params),
			HCS_N_PCC(ehci->hcs_params),
			HCS_N_PORTS(ehci->hcs_params));

		switch (pdev->vendor) {
		case 0x17a0:		
			
			temp |= (ehci->hcs_params & ~0xf);
			ehci->hcs_params = temp;
			break;
		case PCI_VENDOR_ID_NVIDIA:
			
			break;
		}
	}

	
	pci_read_config_byte(pdev, 0x60, &ehci->sbrn);
	if (pdev->vendor == PCI_VENDOR_ID_STMICRO
	    && pdev->device == PCI_DEVICE_ID_STMICRO_USB_HOST)
		ehci->sbrn = 0x20; 

	if (!device_can_wakeup(&pdev->dev)) {
		u16	port_wake;

		pci_read_config_word(pdev, 0x62, &port_wake);
		if (port_wake & 0x0001) {
			dev_warn(&pdev->dev, "Enabling legacy PCI PM\n");
			device_set_wakeup_capable(&pdev->dev, 1);
		}
	}

#ifdef	CONFIG_USB_SUSPEND
	if (ehci->no_selective_suspend && device_can_wakeup(&pdev->dev))
		ehci_warn(ehci, "selective suspend/wakeup unavailable\n");
#endif

	ehci_port_power(ehci, 1);
	retval = ehci_pci_reinit(ehci, pdev);
done:
	return retval;
}
コード例 #15
0
ファイル: msi.c プロジェクト: xf739645524/kernel-rhel5
static void set_msi_affinity(unsigned int vector, cpumask_t cpu_mask)
{
	struct msi_desc *entry;
	u32 address_hi, address_lo;
	unsigned int irq = vector;
	unsigned int dest_cpu = first_cpu(cpu_mask);
	unsigned long flags;

	spin_lock_irqsave(&msi_lock, flags);

	entry = (struct msi_desc *)msi_desc[vector];
	if (!entry || !entry->dev)
		goto out_unlock;

	if (entry->msi_attrib.state == 0)
		goto out_unlock;

	switch (entry->msi_attrib.type) {
	case PCI_CAP_ID_MSI:
	{
		int pos = pci_find_capability(entry->dev, PCI_CAP_ID_MSI);

		if (!pos)
			goto out_unlock;

		pci_read_config_dword(entry->dev, msi_upper_address_reg(pos),
			&address_hi);
		pci_read_config_dword(entry->dev, msi_lower_address_reg(pos),
			&address_lo);

		msi_ops->target(vector, dest_cpu, &address_hi, &address_lo);

		pci_write_config_dword(entry->dev, msi_upper_address_reg(pos),
			address_hi);
		pci_write_config_dword(entry->dev, msi_lower_address_reg(pos),
			address_lo);
		set_native_irq_info(irq, cpu_mask);
		break;
	}
	case PCI_CAP_ID_MSIX:
	{
		int offset_hi =
			entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
				PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET;
		int offset_lo =
			entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
				PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET;

		address_hi = readl(entry->mask_base + offset_hi);
		address_lo = readl(entry->mask_base + offset_lo);

		msi_ops->target(vector, dest_cpu, &address_hi, &address_lo);

		writel(address_hi, entry->mask_base + offset_hi);
		writel(address_lo, entry->mask_base + offset_lo);
		set_native_irq_info(irq, cpu_mask);
		break;
	}
	default:
		break;
	}

out_unlock:
	spin_unlock_irqrestore(&msi_lock, flags);
}
コード例 #16
0
ファイル: msi.c プロジェクト: xf739645524/kernel-rhel5
/**
 * msix_capability_init - configure device's MSI-X capability
 * @dev: pointer to the pci_dev data structure of MSI-X device function
 * @entries: pointer to an array of struct msix_entry entries
 * @nvec: number of @entries
 *
 * Setup the MSI-X capability structure of device function with a
 * single MSI-X vector. A return of zero indicates the successful setup of
 * requested MSI-X entries with allocated vectors or non-zero for otherwise.
 **/
static int msix_capability_init(struct pci_dev *dev,
				struct msix_entry *entries, int nvec)
{
	struct msi_desc *head = NULL, *tail = NULL, *entry = NULL;
	u32 address_hi;
	u32 address_lo;
	u32 data;
	int status;
	int vector, pos, i, j, nr_entries, temp = 0;
	unsigned long phys_addr;
	u32 table_offset;
 	u16 control;
	u8 bir;
	void __iomem *base;

   	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
	/* Request & Map MSI-X table region */
 	pci_read_config_word(dev, msi_control_reg(pos), &control);
	nr_entries = multi_msix_capable(control);

 	pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset);
	bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
	table_offset &= ~PCI_MSIX_FLAGS_BIRMASK;
	phys_addr = pci_resource_start (dev, bir) + table_offset;
	base = ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
	if (base == NULL)
		return -ENOMEM;

	/* MSI-X Table Initialization */
	for (i = 0; i < nvec; i++) {
		entry = alloc_msi_entry();
		if (!entry)
			break;
		vector = get_msi_vector(dev);
		if (vector < 0) {
			kmem_cache_free(msi_cachep, entry);
			break;
		}

 		j = entries[i].entry;
 		entries[i].vector = vector;
		entry->msi_attrib.type = PCI_CAP_ID_MSIX;
 		entry->msi_attrib.state = 0;		/* Mark it not active */
		entry->msi_attrib.entry_nr = j;
		entry->msi_attrib.maskbit = 1;
		entry->msi_attrib.default_vector = dev->irq;
		entry->dev = dev;
		entry->mask_base = base;
		if (!head) {
			entry->link.head = vector;
			entry->link.tail = vector;
			head = entry;
		} else {
			entry->link.head = temp;
			entry->link.tail = tail->link.tail;
			tail->link.tail = vector;
			head->link.head = vector;
		}
		temp = vector;
		tail = entry;
		/* Replace with MSI-X handler */
		irq_handler_init(PCI_CAP_ID_MSIX, vector, 1);
		/* Configure MSI-X capability structure */
		status = msi_ops->setup(dev, vector,
					&address_hi,
					&address_lo,
					&data);
		if (status < 0)
			break;

		writel(address_lo,
			base + j * PCI_MSIX_ENTRY_SIZE +
			PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
		writel(address_hi,
			base + j * PCI_MSIX_ENTRY_SIZE +
			PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
		writel(data,
			base + j * PCI_MSIX_ENTRY_SIZE +
			PCI_MSIX_ENTRY_DATA_OFFSET);
		attach_msi_entry(entry, vector);
	}
	if (i != nvec) {
		i--;
		for (; i >= 0; i--) {
			vector = (entries + i)->vector;
			msi_free_vector(dev, vector, 0);
			(entries + i)->vector = 0;
		}
		return -EBUSY;
	}
	/* Set MSI-X enabled bits */
	enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);

	return 0;
}
コード例 #17
0
static int __devinit agp_serverworks_probe(struct pci_dev *pdev,
					   const struct pci_device_id *ent)
{
	struct agp_bridge_data *bridge;
	struct pci_dev *bridge_dev;
	u32 temp, temp2;
	u8 cap_ptr = 0;

	/* Everything is on func 1 here so we are hardcoding function one */
	bridge_dev = pci_find_slot((unsigned int)pdev->bus->number,
			PCI_DEVFN(0, 1));
	if (!bridge_dev) {
		printk(KERN_INFO PFX "Detected a Serverworks chipset "
		       "but could not find the secondary device.\n");
		return -ENODEV;
	}

	cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);

	switch (pdev->device) {
	case 0x0006:
		/* ServerWorks CNB20HE
		Fail silently.*/
		printk (KERN_ERR PFX "Detected ServerWorks CNB20HE chipset: No AGP present.\n");
		return -ENODEV;

	case PCI_DEVICE_ID_SERVERWORKS_HE:
	case PCI_DEVICE_ID_SERVERWORKS_LE:
	case 0x0007:
		break;

	default:
		if (cap_ptr)
			printk(KERN_ERR PFX "Unsupported Serverworks chipset "
					"(device id: %04x)\n", pdev->device);
		return -ENODEV;
	}

	serverworks_private.svrwrks_dev = bridge_dev;
	serverworks_private.gart_addr_ofs = 0x10;

	pci_read_config_dword(pdev, SVWRKS_APSIZE, &temp);
	if (temp & PCI_BASE_ADDRESS_MEM_TYPE_64) {
		pci_read_config_dword(pdev, SVWRKS_APSIZE + 4, &temp2);
		if (temp2 != 0) {
			printk(KERN_INFO PFX "Detected 64 bit aperture address, "
			       "but top bits are not zero.  Disabling agp\n");
			return -ENODEV;
		}
		serverworks_private.mm_addr_ofs = 0x18;
	} else
		serverworks_private.mm_addr_ofs = 0x14;

	pci_read_config_dword(pdev, serverworks_private.mm_addr_ofs, &temp);
	if (temp & PCI_BASE_ADDRESS_MEM_TYPE_64) {
		pci_read_config_dword(pdev,
				serverworks_private.mm_addr_ofs + 4, &temp2);
		if (temp2 != 0) {
			printk(KERN_INFO PFX "Detected 64 bit MMIO address, "
			       "but top bits are not zero.  Disabling agp\n");
			return -ENODEV;
		}
	}

	bridge = agp_alloc_bridge();
	if (!bridge)
		return -ENOMEM;

	bridge->driver = &sworks_driver;
	bridge->dev_private_data = &serverworks_private,
	bridge->dev = pdev;

	pci_set_drvdata(pdev, bridge);
	return agp_add_bridge(bridge);
}
コード例 #18
0
ファイル: amd-k7-agp.c プロジェクト: maraz/linux-2.6
static int __devinit agp_amdk7_probe(struct pci_dev *pdev,
				     const struct pci_device_id *ent)
{
	struct agp_bridge_data *bridge;
	u8 cap_ptr;
	int j;

	cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
	if (!cap_ptr)
		return -ENODEV;

	j = ent - agp_amdk7_pci_table;
	printk(KERN_INFO PFX "Detected AMD %s chipset\n",
	       amd_agp_device_ids[j].chipset_name);

	bridge = agp_alloc_bridge();
	if (!bridge)
		return -ENOMEM;

	bridge->driver = &amd_irongate_driver;
	bridge->dev_private_data = &amd_irongate_private,
	bridge->dev = pdev;
	bridge->capndx = cap_ptr;

	/* 751 Errata (22564_B-1.PDF)
	   erratum 20: strobe glitch with Nvidia NV10 GeForce cards.
	   system controller may experience noise due to strong drive strengths
	 */
	if (agp_bridge->dev->device == PCI_DEVICE_ID_AMD_FE_GATE_7006) {
		struct pci_dev *gfxcard=NULL;

		cap_ptr = 0;
		while (!cap_ptr) {
			gfxcard = pci_get_class(PCI_CLASS_DISPLAY_VGA<<8, gfxcard);
			if (!gfxcard) {
				printk (KERN_INFO PFX "Couldn't find an AGP VGA controller.\n");
				return -ENODEV;
			}
			cap_ptr = pci_find_capability(gfxcard, PCI_CAP_ID_AGP);
		}

		/* With so many variants of NVidia cards, it's simpler just
		   to blacklist them all, and then whitelist them as needed
		   (if necessary at all). */
		if (gfxcard->vendor == PCI_VENDOR_ID_NVIDIA) {
			agp_bridge->flags |= AGP_ERRATA_1X;
			printk (KERN_INFO PFX "AMD 751 chipset with NVidia GeForce detected. Forcing to 1X due to errata.\n");
		}
		pci_dev_put(gfxcard);
	}

	/* 761 Errata (23613_F.pdf)
	 * Revisions B0/B1 were a disaster.
	 * erratum 44: SYSCLK/AGPCLK skew causes 2X failures -- Force mode to 1X
	 * erratum 45: Timing problem prevents fast writes -- Disable fast write.
	 * erratum 46: Setup violation on AGP SBA pins - Disable side band addressing.
	 * With this lot disabled, we should prevent lockups. */
	if (agp_bridge->dev->device == PCI_DEVICE_ID_AMD_FE_GATE_700E) {
		if (pdev->revision == 0x10 || pdev->revision == 0x11) {
			agp_bridge->flags = AGP_ERRATA_FASTWRITES;
			agp_bridge->flags |= AGP_ERRATA_SBA;
			agp_bridge->flags |= AGP_ERRATA_1X;
			printk (KERN_INFO PFX "AMD 761 chipset with errata detected - disabling AGP fast writes & SBA and forcing to 1X.\n");
		}
	}

	/* Fill in the mode register */
	pci_read_config_dword(pdev,
			bridge->capndx+PCI_AGP_STATUS,
			&bridge->mode);

	pci_set_drvdata(pdev, bridge);
	return agp_add_bridge(bridge);
}
コード例 #19
0
ファイル: hcd-pci.c プロジェクト: kzlin129/tt-gpl
/**
 * usb_hcd_pci_suspend - power management suspend of a PCI-based HCD
 * @dev: USB Host Controller being suspended
 * @message: semantics in flux
 *
 * Store this function in the HCD's struct pci_driver as suspend().
 */
int usb_hcd_pci_suspend (struct pci_dev *dev, pm_message_t message)
{
	struct usb_hcd		*hcd;
	int			retval = 0;
	int			has_pci_pm;

	hcd = pci_get_drvdata(dev);

	/* FIXME until the generic PM interfaces change a lot more, this
	 * can't use PCI D1 and D2 states.  For example, the confusion
	 * between messages and states will need to vanish, and messages
	 * will need to provide a target system state again.
	 *
	 * It'll be important to learn characteristics of the target state,
	 * especially on embedded hardware where the HCD will often be in
	 * charge of an external VBUS power supply and one or more clocks.
	 * Some target system states will leave them active; others won't.
	 * (With PCI, that's often handled by platform BIOS code.)
	 */

	/* even when the PCI layer rejects some of the PCI calls
	 * below, HCs can try global suspend and reduce DMA traffic.
	 * PM-sensitive HCDs may already have done this.
	 */
	has_pci_pm = pci_find_capability(dev, PCI_CAP_ID_PM);

	switch (hcd->state) {

	/* entry if root hub wasn't yet suspended ... from sysfs,
	 * without autosuspend, or if USB_SUSPEND isn't configured.
	 */
	case HC_STATE_RUNNING:
		hcd->state = HC_STATE_QUIESCING;
		retval = hcd->driver->suspend (hcd, message);
		if (retval) {
			dev_dbg (hcd->self.controller, 
					"suspend fail, retval %d\n",
					retval);
			break;
		}
		hcd->state = HC_STATE_SUSPENDED;
		/* FALLTHROUGH */

	/* entry with CONFIG_USB_SUSPEND, or hcds that autosuspend: the
	 * controller and/or root hub will already have been suspended,
	 * but it won't be ready for a PCI resume call.
	 *
	 * FIXME only CONFIG_USB_SUSPEND guarantees hub_suspend() will
	 * have been called, otherwise root hub timers still run ...
	 */
	case HC_STATE_SUSPENDED:
		/* no DMA or IRQs except when HC is active */
		if (dev->current_state == PCI_D0) {
			free_irq (hcd->irq, hcd);
			pci_save_state (dev);
			pci_disable_device (dev);
		}

		if (!has_pci_pm) {
			dev_dbg (hcd->self.controller, "--> PCI D0/legacy\n");
			break;
		}

		/* NOTE:  dev->current_state becomes nonzero only here, and
		 * only for devices that support PCI PM.  Also, exiting
		 * PCI_D3 (but not PCI_D1 or PCI_D2) is allowed to reset
		 * some device state (e.g. as part of clock reinit).
		 */
		retval = pci_set_power_state (dev, PCI_D3hot);
		if (retval == 0) {
			dev_dbg (hcd->self.controller, "--> PCI D3\n");
			pci_enable_wake (dev, PCI_D3hot, hcd->remote_wakeup);
			pci_enable_wake (dev, PCI_D3cold, hcd->remote_wakeup);
		} else if (retval < 0) {
			dev_dbg (&dev->dev, "PCI D3 suspend fail, %d\n",
					retval);
			(void) usb_hcd_pci_resume (dev);
			break;
		}
		break;
	default:
		dev_dbg (hcd->self.controller, "hcd state %d; not suspended\n",
			hcd->state);
		WARN_ON(1);
		retval = -EINVAL;
		break;
	}

	/* update power_state **ONLY** to make sysfs happier */
	if (retval == 0)
		dev->dev.power.power_state = message;
	return retval;
}
コード例 #20
0
ファイル: msi.c プロジェクト: xf739645524/kernel-rhel5
/**
 * pci_enable_msix - configure device's MSI-X capability structure
 * @dev: pointer to the pci_dev data structure of MSI-X device function
 * @entries: pointer to an array of MSI-X entries
 * @nvec: number of MSI-X vectors requested for allocation by device driver
 *
 * Setup the MSI-X capability structure of device function with the number
 * of requested vectors upon its software driver call to request for
 * MSI-X mode enabled on its hardware device function. A return of zero
 * indicates the successful configuration of MSI-X capability structure
 * with new allocated MSI-X vectors. A return of < 0 indicates a failure.
 * Or a return of > 0 indicates that driver request is exceeding the number
 * of vectors available. Driver should use the returned value to re-send
 * its request.
 **/
int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
{
	int status, pos, nr_entries, free_vectors;
	int i, j, temp;
	u16 control;
	unsigned long flags;

	if (!entries || pci_msi_supported(dev) < 0)
 		return -EINVAL;

	status = msi_init();
	if (status < 0)
		return status;

	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
	if (!pos)
 		return -EINVAL;

	pci_read_config_word(dev, msi_control_reg(pos), &control);
	nr_entries = multi_msix_capable(control);
	if (nvec > nr_entries)
		return -EINVAL;

	/* Check for any invalid entries */
	for (i = 0; i < nvec; i++) {
		if (entries[i].entry >= nr_entries)
			return -EINVAL;		/* invalid entry */
		for (j = i + 1; j < nvec; j++) {
			if (entries[i].entry == entries[j].entry)
				return -EINVAL;	/* duplicate entry */
		}
	}
	temp = dev->irq;
	WARN_ON(!msi_lookup_vector(dev, PCI_CAP_ID_MSIX));

	/* Check whether driver already requested for MSI vector */
   	if (pci_find_capability(dev, PCI_CAP_ID_MSI) > 0 &&
		!msi_lookup_vector(dev, PCI_CAP_ID_MSI)) {
		printk(KERN_INFO "PCI: %s: Can't enable MSI-X.  "
		       "Device already has an MSI vector assigned\n",
		       pci_name(dev));
		dev->irq = temp;
		return -EINVAL;
	}

	spin_lock_irqsave(&msi_lock, flags);
	/*
	 * msi_lock is provided to ensure that enough vectors resources are
	 * available before granting.
	 */
	free_vectors = pci_vector_resources(last_alloc_vector,
				nr_released_vectors);
	/* Ensure that each MSI/MSI-X device has one vector reserved by
	   default to avoid any MSI-X driver to take all available
 	   resources */
	free_vectors -= nr_reserved_vectors;
	spin_unlock_irqrestore(&msi_lock, flags);

	if (nvec > free_vectors) {
		if (free_vectors > 0)
			return free_vectors;
		else
			return -EBUSY;
	}

	status = msix_capability_init(dev, entries, nvec);

	return status;
}
コード例 #21
0
ファイル: sworks-agp.c プロジェクト: 1703011/asuswrt-merlin
static int __devinit agp_serverworks_probe(struct pci_dev *pdev,
					   const struct pci_device_id *ent)
{
	struct agp_bridge_data *bridge;
	struct pci_dev *bridge_dev;
	u32 temp, temp2;
	u8 cap_ptr = 0;

	cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);

	switch (pdev->device) {
	case 0x0006:
		dev_err(&pdev->dev, "ServerWorks CNB20HE is unsupported due to lack of documentation\n");
		return -ENODEV;

	case PCI_DEVICE_ID_SERVERWORKS_HE:
	case PCI_DEVICE_ID_SERVERWORKS_LE:
	case 0x0007:
		break;

	default:
		if (cap_ptr)
			dev_err(&pdev->dev, "unsupported Serverworks chipset "
				"[%04x/%04x]\n", pdev->vendor, pdev->device);
		return -ENODEV;
	}

	/* Everything is on func 1 here so we are hardcoding function one */
	bridge_dev = pci_get_bus_and_slot((unsigned int)pdev->bus->number,
			PCI_DEVFN(0, 1));
	if (!bridge_dev) {
		dev_info(&pdev->dev, "can't find secondary device\n");
		return -ENODEV;
	}

	serverworks_private.svrwrks_dev = bridge_dev;
	serverworks_private.gart_addr_ofs = 0x10;

	pci_read_config_dword(pdev, SVWRKS_APSIZE, &temp);
	if (temp & PCI_BASE_ADDRESS_MEM_TYPE_64) {
		pci_read_config_dword(pdev, SVWRKS_APSIZE + 4, &temp2);
		if (temp2 != 0) {
			dev_info(&pdev->dev, "64 bit aperture address, "
				 "but top bits are not zero; disabling AGP\n");
			return -ENODEV;
		}
		serverworks_private.mm_addr_ofs = 0x18;
	} else
		serverworks_private.mm_addr_ofs = 0x14;

	pci_read_config_dword(pdev, serverworks_private.mm_addr_ofs, &temp);
	if (temp & PCI_BASE_ADDRESS_MEM_TYPE_64) {
		pci_read_config_dword(pdev,
				serverworks_private.mm_addr_ofs + 4, &temp2);
		if (temp2 != 0) {
			dev_info(&pdev->dev, "64 bit MMIO address, but top "
				 "bits are not zero; disabling AGP\n");
			return -ENODEV;
		}
	}

	bridge = agp_alloc_bridge();
	if (!bridge)
		return -ENOMEM;

	bridge->driver = &sworks_driver;
	bridge->dev_private_data = &serverworks_private,
	bridge->dev = pci_dev_get(pdev);

	pci_set_drvdata(pdev, bridge);
	return agp_add_bridge(bridge);
}
コード例 #22
0
ファイル: hcd-pci.c プロジェクト: kzlin129/tt-gpl
/**
 * usb_hcd_pci_resume - power management resume of a PCI-based HCD
 * @dev: USB Host Controller being resumed
 *
 * Store this function in the HCD's struct pci_driver as resume().
 */
int usb_hcd_pci_resume (struct pci_dev *dev)
{
	struct usb_hcd		*hcd;
	int			retval;

	hcd = pci_get_drvdata(dev);
	if (hcd->state != HC_STATE_SUSPENDED) {
		dev_dbg (hcd->self.controller, 
				"can't resume, not suspended!\n");
		return 0;
	}

	/* NOTE:  chip docs cover clean "real suspend" cases (what Linux
	 * calls "standby", "suspend to RAM", and so on).  There are also
	 * dirty cases when swsusp fakes a suspend in "shutdown" mode.
	 */
	if (dev->current_state != PCI_D0) {
#ifdef	DEBUG
		int	pci_pm;
		u16	pmcr;

		pci_pm = pci_find_capability(dev, PCI_CAP_ID_PM);
		pci_read_config_word(dev, pci_pm + PCI_PM_CTRL, &pmcr);
		pmcr &= PCI_PM_CTRL_STATE_MASK;
		if (pmcr) {
			/* Clean case:  power to USB and to HC registers was
			 * maintained; remote wakeup is easy.
			 */
			dev_dbg(hcd->self.controller, "resume from PCI D%d\n",
					pmcr);
		} else {
			/* Clean:  HC lost Vcc power, D0 uninitialized
			 *   + Vaux may have preserved port and transceiver
			 *     state ... for remote wakeup from D3cold
			 *   + or not; HCD must reinit + re-enumerate
			 *
			 * Dirty: D0 semi-initialized cases with swsusp
			 *   + after BIOS init
			 *   + after Linux init (HCD statically linked)
			 */
			dev_dbg(hcd->self.controller,
				"PCI D0, from previous PCI D%d\n",
				dev->current_state);
		}
#endif
		pci_enable_wake (dev, dev->current_state, 0);
		pci_enable_wake (dev, PCI_D3cold, 0);
	} else {
		/* Same basic cases: clean (powered/not), dirty */
		dev_dbg(hcd->self.controller, "PCI legacy resume\n");
	}

	/* NOTE:  the PCI API itself is asymmetric here.  We don't need to
	 * pci_set_power_state(PCI_D0) since that's part of re-enabling;
	 * but that won't re-enable bus mastering.  Yet pci_disable_device()
	 * explicitly disables bus mastering...
	 */
	retval = pci_enable_device (dev);
	if (retval < 0) {
		dev_err (hcd->self.controller,
			"can't re-enable after resume, %d!\n", retval);
		return retval;
	}
	pci_set_master (dev);
	pci_restore_state (dev);

	dev->dev.power.power_state = PMSG_ON;

	hcd->state = HC_STATE_RESUMING;
	hcd->saw_irq = 0;
	retval = request_irq (dev->irq, usb_hcd_irq, SA_SHIRQ,
				hcd->irq_descr, hcd);
	if (retval < 0) {
		dev_err (hcd->self.controller,
			"can't restore IRQ after resume!\n");
		usb_hc_died (hcd);
		return retval;
	}

	retval = hcd->driver->resume (hcd);
	if (!HC_IS_RUNNING (hcd->state)) {
		dev_dbg (hcd->self.controller, 
				"resume fail, retval %d\n", retval);
		usb_hc_died (hcd);
	}

	pci_enable_device(dev);
	return retval;
}
コード例 #23
0
ファイル: hcd-pci.c プロジェクト: QiuLihua83/linux-2.6.10
/**
 * usb_hcd_pci_suspend - power management suspend of a PCI-based HCD
 * @dev: USB Host Controller being suspended
 * @state: state that the controller is going into
 *
 * Store this function in the HCD's struct pci_driver as suspend().
 */
int usb_hcd_pci_suspend (struct pci_dev *dev, u32 state)
{
    struct usb_hcd		*hcd;
    int			retval = 0;
    int			has_pci_pm;

    hcd = pci_get_drvdata(dev);

    /* even when the PCI layer rejects some of the PCI calls
     * below, HCs can try global suspend and reduce DMA traffic.
     * PM-sensitive HCDs may already have done this.
     */
    has_pci_pm = pci_find_capability(dev, PCI_CAP_ID_PM);
    if (state > 4)
        state = 4;

    switch (hcd->state) {

    /* entry if root hub wasn't yet suspended ... from sysfs,
     * without autosuspend, or if USB_SUSPEND isn't configured.
     */
    case USB_STATE_RUNNING:
        hcd->state = USB_STATE_QUIESCING;
        retval = hcd->driver->suspend (hcd, state);
        if (retval) {
            dev_dbg (hcd->self.controller,
                     "suspend fail, retval %d\n",
                     retval);
            break;
        }
        hcd->state = HCD_STATE_SUSPENDED;
    /* FALLTHROUGH */

    /* entry with CONFIG_USB_SUSPEND, or hcds that autosuspend: the
     * controller and/or root hub will already have been suspended,
     * but it won't be ready for a PCI resume call.
     *
     * FIXME only CONFIG_USB_SUSPEND guarantees hub_suspend() will
     * have been called, otherwise root hub timers still run ...
     */
    case HCD_STATE_SUSPENDED:
        if (state <= dev->current_state)
            break;

        /* no DMA or IRQs except in D0 */
        if (!dev->current_state) {
            pci_save_state (dev);
            pci_disable_device (dev);
            free_irq (hcd->irq, hcd);
        }

        if (!has_pci_pm) {
            dev_dbg (hcd->self.controller, "--> PCI D0/legacy\n");
            break;
        }

        /* POLICY: ignore D1/D2/D3hot differences;
         * we know D3hot will always work.
         */
        retval = pci_set_power_state (dev, state);
        if (retval < 0 && state < 3) {
            retval = pci_set_power_state (dev, 3);
            if (retval == 0)
                state = 3;
        }
        if (retval == 0) {
            dev_dbg (hcd->self.controller, "--> PCI %s\n",
                     pci_state(dev->current_state));
#ifdef	CONFIG_USB_SUSPEND
            pci_enable_wake (dev, state, hcd->remote_wakeup);
            pci_enable_wake (dev, 4, hcd->remote_wakeup);
#endif
        } else if (retval < 0) {
            dev_dbg (&dev->dev, "PCI %s suspend fail, %d\n",
                     pci_state(state), retval);
            (void) usb_hcd_pci_resume (dev);
            break;
        }
        break;
    default:
        dev_dbg (hcd->self.controller, "hcd state %d; not suspended\n",
                 hcd->state);
        retval = -EINVAL;
        break;
    }

    /* update power_state **ONLY** to make sysfs happier */
    if (retval == 0)
        dev->dev.power.power_state = state;
    return retval;
}
コード例 #24
0
ファイル: qib_pcie.c プロジェクト: 33d/linux-2.6.21-hh20
int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent,
		    struct msix_entry *entry)
{
	u16 linkstat, speed;
	int pos = 0, pose, ret = 1;

	pose = pci_pcie_cap(dd->pcidev);
	if (!pose) {
		qib_dev_err(dd, "Can't find PCI Express capability!\n");
		/* set up something... */
		dd->lbus_width = 1;
		dd->lbus_speed = 2500; /* Gen1, 2.5GHz */
		goto bail;
	}

	pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSIX);
	if (nent && *nent && pos) {
		qib_msix_setup(dd, pos, nent, entry);
		ret = 0; /* did it, either MSIx or INTx */
	} else {
		pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI);
		if (pos)
			ret = qib_msi_setup(dd, pos);
		else
			qib_dev_err(dd, "No PCI MSI or MSIx capability!\n");
	}
	if (!pos)
		qib_enable_intx(dd->pcidev);

	pci_read_config_word(dd->pcidev, pose + PCI_EXP_LNKSTA, &linkstat);
	/*
	 * speed is bits 0-3, linkwidth is bits 4-8
	 * no defines for them in headers
	 */
	speed = linkstat & 0xf;
	linkstat >>= 4;
	linkstat &= 0x1f;
	dd->lbus_width = linkstat;

	switch (speed) {
	case 1:
		dd->lbus_speed = 2500; /* Gen1, 2.5GHz */
		break;
	case 2:
		dd->lbus_speed = 5000; /* Gen1, 5GHz */
		break;
	default: /* not defined, assume gen1 */
		dd->lbus_speed = 2500;
		break;
	}

	/*
	 * Check against expected pcie width and complain if "wrong"
	 * on first initialization, not afterwards (i.e., reset).
	 */
	if (minw && linkstat < minw)
		qib_dev_err(dd,
			    "PCIe width %u (x%u HCA), performance reduced\n",
			    linkstat, minw);

	qib_tune_pcie_caps(dd);

	qib_tune_pcie_coalesce(dd);

bail:
	/* fill in string, even on errors */
	snprintf(dd->lbus_info, sizeof(dd->lbus_info),
		 "PCIe,%uMHz,x%u\n", dd->lbus_speed, dd->lbus_width);
	return ret;
}
コード例 #25
0
static int __devinit agp_amdk7_probe(struct pci_dev *pdev,
				     const struct pci_device_id *ent)
{
	struct agp_bridge_data *bridge;
	u8 cap_ptr;
	int j;

	cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
	if (!cap_ptr)
		return -ENODEV;

	j = ent - agp_amdk7_pci_table;
	dev_info(&pdev->dev, "AMD %s chipset\n",
		 amd_agp_device_ids[j].chipset_name);

	bridge = agp_alloc_bridge();
	if (!bridge)
		return -ENOMEM;

	bridge->driver = &amd_irongate_driver;
	bridge->dev_private_data = &amd_irongate_private,
	bridge->dev = pdev;
	bridge->capndx = cap_ptr;

	if (agp_bridge->dev->device == PCI_DEVICE_ID_AMD_FE_GATE_7006) {
		struct pci_dev *gfxcard=NULL;

		cap_ptr = 0;
		while (!cap_ptr) {
			gfxcard = pci_get_class(PCI_CLASS_DISPLAY_VGA<<8, gfxcard);
			if (!gfxcard) {
				dev_info(&pdev->dev, "no AGP VGA controller\n");
				return -ENODEV;
			}
			cap_ptr = pci_find_capability(gfxcard, PCI_CAP_ID_AGP);
		}

		if (gfxcard->vendor == PCI_VENDOR_ID_NVIDIA) {
			agp_bridge->flags |= AGP_ERRATA_1X;
			dev_info(&pdev->dev, "AMD 751 chipset with NVidia GeForce; forcing 1X due to errata\n");
		}
		pci_dev_put(gfxcard);
	}

	if (agp_bridge->dev->device == PCI_DEVICE_ID_AMD_FE_GATE_700E) {
		if (pdev->revision == 0x10 || pdev->revision == 0x11) {
			agp_bridge->flags = AGP_ERRATA_FASTWRITES;
			agp_bridge->flags |= AGP_ERRATA_SBA;
			agp_bridge->flags |= AGP_ERRATA_1X;
			dev_info(&pdev->dev, "AMD 761 chipset with errata; disabling AGP fast writes & SBA and forcing to 1X\n");
		}
	}

	
	pci_read_config_dword(pdev,
			bridge->capndx+PCI_AGP_STATUS,
			&bridge->mode);

	pci_set_drvdata(pdev, bridge);
	return agp_add_bridge(bridge);
}
コード例 #26
0
ファイル: intelxlvf.c プロジェクト: eworm-de/ipxe
/**
 * Probe PCI device
 *
 * @v pci		PCI device
 * @ret rc		Return status code
 */
static int intelxlvf_probe ( struct pci_device *pci ) {
	struct net_device *netdev;
	struct intelxl_nic *intelxl;
	int rc;

	/* Allocate and initialise net device */
	netdev = alloc_etherdev ( sizeof ( *intelxl ) );
	if ( ! netdev ) {
		rc = -ENOMEM;
		goto err_alloc;
	}
	netdev_init ( netdev, &intelxlvf_operations );
	intelxl = netdev->priv;
	pci_set_drvdata ( pci, netdev );
	netdev->dev = &pci->dev;
	memset ( intelxl, 0, sizeof ( *intelxl ) );
	intelxl->intr = INTELXLVF_VFINT_DYN_CTL0;
	intelxl_init_admin ( &intelxl->command, INTELXLVF_ADMIN,
			     &intelxlvf_admin_command_offsets );
	intelxl_init_admin ( &intelxl->event, INTELXLVF_ADMIN,
			     &intelxlvf_admin_event_offsets );
	intelxlvf_init_ring ( &intelxl->tx, INTELXL_TX_NUM_DESC,
			      sizeof ( intelxl->tx.desc.tx[0] ),
			      INTELXLVF_QTX_TAIL );
	intelxlvf_init_ring ( &intelxl->rx, INTELXL_RX_NUM_DESC,
			      sizeof ( intelxl->rx.desc.rx[0] ),
			      INTELXLVF_QRX_TAIL );

	/* Fix up PCI device */
	adjust_pci_device ( pci );

	/* Map registers */
	intelxl->regs = ioremap ( pci->membase, INTELXLVF_BAR_SIZE );
	if ( ! intelxl->regs ) {
		rc = -ENODEV;
		goto err_ioremap;
	}

	/* Locate PCI Express capability */
	intelxl->exp = pci_find_capability ( pci, PCI_CAP_ID_EXP );
	if ( ! intelxl->exp ) {
		DBGC ( intelxl, "INTELXL %p missing PCIe capability\n",
		       intelxl );
		rc = -ENXIO;
		goto err_exp;
	}

	/* Reset the function via PCIe FLR */
	intelxlvf_reset_flr ( intelxl, pci );

	/* Enable MSI-X dummy interrupt */
	if ( ( rc = intelxl_msix_enable ( intelxl, pci ) ) != 0 )
		goto err_msix;

	/* Open admin queues */
	if ( ( rc = intelxl_open_admin ( intelxl ) ) != 0 )
		goto err_open_admin;

	/* Reset the function via admin queue */
	if ( ( rc = intelxlvf_reset_admin ( intelxl ) ) != 0 )
		goto err_reset_admin;

	/* Get MAC address */
	if ( ( rc = intelxlvf_admin_get_resources ( netdev ) ) != 0 )
		goto err_get_resources;

	/* Register network device */
	if ( ( rc = register_netdev ( netdev ) ) != 0 )
		goto err_register_netdev;

	return 0;

	unregister_netdev ( netdev );
 err_register_netdev:
 err_get_resources:
 err_reset_admin:
	intelxl_close_admin ( intelxl );
 err_open_admin:
	intelxl_msix_disable ( intelxl, pci );
 err_msix:
	intelxlvf_reset_flr ( intelxl, pci );
 err_exp:
	iounmap ( intelxl->regs );
 err_ioremap:
	netdev_nullify ( netdev );
	netdev_put ( netdev );
 err_alloc:
	return rc;
}
コード例 #27
0
ファイル: ehci-pci.c プロジェクト: 404992361/mi1_kernel
/* called during probe() after chip reset completes */
static int ehci_pci_setup(struct usb_hcd *hcd)
{
	struct ehci_hcd		*ehci = hcd_to_ehci(hcd);
	struct pci_dev		*pdev = to_pci_dev(hcd->self.controller);
	struct pci_dev		*p_smbus;
	u8			rev;
	u32			temp;
	int			retval;

	switch (pdev->vendor) {
	case PCI_VENDOR_ID_TOSHIBA_2:
		/* celleb's companion chip */
		if (pdev->device == 0x01b5) {
#ifdef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO
			ehci->big_endian_mmio = 1;
#else
			ehci_warn(ehci,
				  "unsupported big endian Toshiba quirk\n");
#endif
		}
		break;
	}

	ehci->caps = hcd->regs;
	ehci->regs = hcd->regs +
		HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));

	dbg_hcs_params(ehci, "reset");
	dbg_hcc_params(ehci, "reset");

        /* ehci_init() causes memory for DMA transfers to be
         * allocated.  Thus, any vendor-specific workarounds based on
         * limiting the type of memory used for DMA transfers must
         * happen before ehci_init() is called. */
	switch (pdev->vendor) {
	case PCI_VENDOR_ID_NVIDIA:
		/* NVidia reports that certain chips don't handle
		 * QH, ITD, or SITD addresses above 2GB.  (But TD,
		 * data buffer, and periodic schedule are normal.)
		 */
		switch (pdev->device) {
		case 0x003c:	/* MCP04 */
		case 0x005b:	/* CK804 */
		case 0x00d8:	/* CK8 */
		case 0x00e8:	/* CK8S */
			if (pci_set_consistent_dma_mask(pdev,
						DMA_BIT_MASK(31)) < 0)
				ehci_warn(ehci, "can't enable NVidia "
					"workaround for >2GB RAM\n");
			break;
		}
		break;
	}

	/* cache this readonly data; minimize chip reads */
	ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);

	retval = ehci_halt(ehci);
	if (retval)
		return retval;

	if ((pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == 0x7808) ||
	    (pdev->vendor == PCI_VENDOR_ID_ATI && pdev->device == 0x4396)) {
		/* EHCI controller on AMD SB700/SB800/Hudson-2/3 platforms may
		 * read/write memory space which does not belong to it when
		 * there is NULL pointer with T-bit set to 1 in the frame list
		 * table. To avoid the issue, the frame list link pointer
		 * should always contain a valid pointer to a inactive qh.
		 */
		ehci->use_dummy_qh = 1;
		ehci_info(ehci, "applying AMD SB700/SB800/Hudson-2/3 EHCI "
				"dummy qh workaround\n");
	}

	/* data structure init */
	retval = ehci_init(hcd);
	if (retval)
		return retval;

	switch (pdev->vendor) {
	case PCI_VENDOR_ID_NEC:
		ehci->need_io_watchdog = 0;
		break;
	case PCI_VENDOR_ID_INTEL:
		ehci->need_io_watchdog = 0;
		ehci->fs_i_thresh = 1;
		if (pdev->device == 0x27cc) {
			ehci->broken_periodic = 1;
			ehci_info(ehci, "using broken periodic workaround\n");
		}
		if (pdev->device == 0x0806 || pdev->device == 0x0811
				|| pdev->device == 0x0829) {
			ehci_info(ehci, "disable lpm for langwell/penwell\n");
			ehci->has_lpm = 0;
		}
		if (pdev->device == PCI_DEVICE_ID_INTEL_CE4100_USB) {
			hcd->has_tt = 1;
			tdi_reset(ehci);
		}
		if (pdev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK) {
			/* EHCI #1 or #2 on 6 Series/C200 Series chipset */
			if (pdev->device == 0x1c26 || pdev->device == 0x1c2d) {
				ehci_info(ehci, "broken D3 during system sleep on ASUS\n");
				hcd->broken_pci_sleep = 1;
				device_set_wakeup_capable(&pdev->dev, false);
			}
		}
		break;
	case PCI_VENDOR_ID_TDI:
		if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) {
			hcd->has_tt = 1;
			tdi_reset(ehci);
		}
		break;
	case PCI_VENDOR_ID_AMD:
		/* AMD PLL quirk */
		if (usb_amd_find_chipset_info())
			ehci->amd_pll_fix = 1;
		/* AMD8111 EHCI doesn't work, according to AMD errata */
		if (pdev->device == 0x7463) {
			ehci_info(ehci, "ignoring AMD8111 (errata)\n");
			retval = -EIO;
			goto done;
		}
		break;
	case PCI_VENDOR_ID_NVIDIA:
		switch (pdev->device) {
		/* Some NForce2 chips have problems with selective suspend;
		 * fixed in newer silicon.
		 */
		case 0x0068:
			if (pdev->revision < 0xa4)
				ehci->no_selective_suspend = 1;
			break;

		/* MCP89 chips on the MacBookAir3,1 give EPROTO when
		 * fetching device descriptors unless LPM is disabled.
		 * There are also intermittent problems enumerating
		 * devices with PPCD enabled.
		 */
		case 0x0d9d:
			ehci_info(ehci, "disable lpm/ppcd for nvidia mcp89");
			ehci->has_lpm = 0;
			ehci->has_ppcd = 0;
			ehci->command &= ~CMD_PPCEE;
			break;
		}
		break;
	case PCI_VENDOR_ID_VIA:
		if (pdev->device == 0x3104 && (pdev->revision & 0xf0) == 0x60) {
			u8 tmp;

			/* The VT6212 defaults to a 1 usec EHCI sleep time which
			 * hogs the PCI bus *badly*. Setting bit 5 of 0x4B makes
			 * that sleep time use the conventional 10 usec.
			 */
			pci_read_config_byte(pdev, 0x4b, &tmp);
			if (tmp & 0x20)
				break;
			pci_write_config_byte(pdev, 0x4b, tmp | 0x20);
		}
		break;
	case PCI_VENDOR_ID_ATI:
		/* AMD PLL quirk */
		if (usb_amd_find_chipset_info())
			ehci->amd_pll_fix = 1;
		/* SB600 and old version of SB700 have a bug in EHCI controller,
		 * which causes usb devices lose response in some cases.
		 */
		if ((pdev->device == 0x4386) || (pdev->device == 0x4396)) {
			p_smbus = pci_get_device(PCI_VENDOR_ID_ATI,
						 PCI_DEVICE_ID_ATI_SBX00_SMBUS,
						 NULL);
			if (!p_smbus)
				break;
			rev = p_smbus->revision;
			if ((pdev->device == 0x4386) || (rev == 0x3a)
			    || (rev == 0x3b)) {
				u8 tmp;
				ehci_info(ehci, "applying AMD SB600/SB700 USB "
					"freeze workaround\n");
				pci_read_config_byte(pdev, 0x53, &tmp);
				pci_write_config_byte(pdev, 0x53, tmp | (1<<3));
			}
			pci_dev_put(p_smbus);
		}
		break;
	case PCI_VENDOR_ID_NETMOS:
		/* MosChip frame-index-register bug */
		ehci_info(ehci, "applying MosChip frame-index workaround\n");
		ehci->frame_index_bug = 1;
		break;
	}

	/* optional debug port, normally in the first BAR */
	temp = pci_find_capability(pdev, 0x0a);
	if (temp) {
		pci_read_config_dword(pdev, temp, &temp);
		temp >>= 16;
		if ((temp & (3 << 13)) == (1 << 13)) {
			temp &= 0x1fff;
			ehci->debug = ehci_to_hcd(ehci)->regs + temp;
			temp = ehci_readl(ehci, &ehci->debug->control);
			ehci_info(ehci, "debug port %d%s\n",
				HCS_DEBUG_PORT(ehci->hcs_params),
				(temp & DBGP_ENABLED)
					? " IN USE"
					: "");
			if (!(temp & DBGP_ENABLED))
				ehci->debug = NULL;
		}
	}

	ehci_reset(ehci);

	/* at least the Genesys GL880S needs fixup here */
	temp = HCS_N_CC(ehci->hcs_params) * HCS_N_PCC(ehci->hcs_params);
	temp &= 0x0f;
	if (temp && HCS_N_PORTS(ehci->hcs_params) > temp) {
		ehci_dbg(ehci, "bogus port configuration: "
			"cc=%d x pcc=%d < ports=%d\n",
			HCS_N_CC(ehci->hcs_params),
			HCS_N_PCC(ehci->hcs_params),
			HCS_N_PORTS(ehci->hcs_params));

		switch (pdev->vendor) {
		case 0x17a0:		/* GENESYS */
			/* GL880S: should be PORTS=2 */
			temp |= (ehci->hcs_params & ~0xf);
			ehci->hcs_params = temp;
			break;
		case PCI_VENDOR_ID_NVIDIA:
			/* NF4: should be PCC=10 */
			break;
		}
	}

	/* Serial Bus Release Number is at PCI 0x60 offset */
	pci_read_config_byte(pdev, 0x60, &ehci->sbrn);
	if (pdev->vendor == PCI_VENDOR_ID_STMICRO
	    && pdev->device == PCI_DEVICE_ID_STMICRO_USB_HOST)
		ehci->sbrn = 0x20; /* ConneXT has no sbrn register */

	/* Keep this around for a while just in case some EHCI
	 * implementation uses legacy PCI PM support.  This test
	 * can be removed on 17 Dec 2009 if the dev_warn() hasn't
	 * been triggered by then.
	 */
	if (!device_can_wakeup(&pdev->dev)) {
		u16	port_wake;

		pci_read_config_word(pdev, 0x62, &port_wake);
		if (port_wake & 0x0001) {
			dev_warn(&pdev->dev, "Enabling legacy PCI PM\n");
			device_set_wakeup_capable(&pdev->dev, 1);
		}
	}

#ifdef	CONFIG_USB_SUSPEND
	/* REVISIT: the controller works fine for wakeup iff the root hub
	 * itself is "globally" suspended, but usbcore currently doesn't
	 * understand such things.
	 *
	 * System suspend currently expects to be able to suspend the entire
	 * device tree, device-at-a-time.  If we failed selective suspend
	 * reports, system suspend would fail; so the root hub code must claim
	 * success.  That's lying to usbcore, and it matters for runtime
	 * PM scenarios with selective suspend and remote wakeup...
	 */
	if (ehci->no_selective_suspend && device_can_wakeup(&pdev->dev))
		ehci_warn(ehci, "selective suspend/wakeup unavailable\n");
#endif

	ehci_port_power(ehci, 1);
	retval = ehci_pci_reinit(ehci, pdev);
done:
	return retval;
}
コード例 #28
0
static int __devinit et131x_pci_setup(struct pci_dev *pdev,
			       const struct pci_device_id *ent)
{
	int result = -EBUSY;
	int pm_cap;
	bool pci_using_dac;
	struct net_device *netdev;
	struct et131x_adapter *adapter;

	/* Enable the device via the PCI subsystem */
	if (pci_enable_device(pdev) != 0) {
		dev_err(&pdev->dev,
			"pci_enable_device() failed\n");
		return -EIO;
	}

	/* Perform some basic PCI checks */
	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
		dev_err(&pdev->dev,
			  "Can't find PCI device's base address\n");
		goto err_disable;
	}

	if (pci_request_regions(pdev, DRIVER_NAME)) {
		dev_err(&pdev->dev,
			"Can't get PCI resources\n");
		goto err_disable;
	}

	/* Enable PCI bus mastering */
	pci_set_master(pdev);

	/* Query PCI for Power Mgmt Capabilities
	 *
	 * NOTE: Now reading PowerMgmt in another location; is this still
	 * needed?
	 */
	pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
	if (pm_cap == 0) {
		dev_err(&pdev->dev,
			  "Cannot find Power Management capabilities\n");
		result = -EIO;
		goto err_release_res;
	}

	/* Check the DMA addressing support of this device */
	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
		pci_using_dac = true;

		result = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
		if (result != 0) {
			dev_err(&pdev->dev,
				  "Unable to obtain 64 bit DMA for consistent allocations\n");
			goto err_release_res;
		}
	} else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
		pci_using_dac = false;
	} else {
		dev_err(&pdev->dev,
			"No usable DMA addressing method\n");
		result = -EIO;
		goto err_release_res;
	}

	/* Allocate netdev and private adapter structs */
	netdev = et131x_device_alloc();
	if (netdev == NULL) {
		dev_err(&pdev->dev, "Couldn't alloc netdev struct\n");
		result = -ENOMEM;
		goto err_release_res;
	}
	adapter = et131x_adapter_init(netdev, pdev);
	/* Initialise the PCI setup for the device */
	et131x_pci_init(adapter, pdev);

	/* Map the bus-relative registers to system virtual memory */
	adapter->regs = pci_ioremap_bar(pdev, 0);
	if (adapter->regs == NULL) {
		dev_err(&pdev->dev, "Cannot map device registers\n");
		result = -ENOMEM;
		goto err_free_dev;
	}

	/* If Phy COMA mode was enabled when we went down, disable it here. */
	writel(ET_PMCSR_INIT,  &adapter->regs->global.pm_csr);

	/* Issue a global reset to the et1310 */
	et131x_soft_reset(adapter);

	/* Disable all interrupts (paranoid) */
	et131x_disable_interrupts(adapter);

	/* Allocate DMA memory */
	result = et131x_adapter_memory_alloc(adapter);
	if (result != 0) {
		dev_err(&pdev->dev, "Could not alloc adapater memory (DMA)\n");
		goto err_iounmap;
	}

	/* Init send data structures */
	et131x_init_send(adapter);

	/*
	 * Set up the task structure for the ISR's deferred handler
	 */
	INIT_WORK(&adapter->task, et131x_isr_handler);

	/* Copy address into the net_device struct */
	memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN);

	/* Setup et1310 as per the documentation */
	et131x_adapter_setup(adapter);

	/* Create a timer to count errors received by the NIC */
	init_timer(&adapter->ErrorTimer);

	adapter->ErrorTimer.expires = jiffies + TX_ERROR_PERIOD * HZ / 1000;
	adapter->ErrorTimer.function = et131x_error_timer_handler;
	adapter->ErrorTimer.data = (unsigned long)adapter;

	/* Initialize link state */
	et131x_link_detection_handler((unsigned long)adapter);

	/* Initialize variable for counting how long we do not have
							link status */
	adapter->boot_coma = 0;

	/* We can enable interrupts now
	 *
	 *  NOTE - Because registration of interrupt handler is done in the
	 *         device's open(), defer enabling device interrupts to that
	 *         point
	 */

	/* Register the net_device struct with the Linux network layer */
	result = register_netdev(netdev);
	if (result != 0) {
		dev_err(&pdev->dev, "register_netdev() failed\n");
		goto err_mem_free;
	}

	/* Register the net_device struct with the PCI subsystem. Save a copy
	 * of the PCI config space for this device now that the device has
	 * been initialized, just in case it needs to be quickly restored.
	 */
	pci_set_drvdata(pdev, netdev);
	pci_save_state(adapter->pdev);
	return result;

err_mem_free:
	et131x_adapter_memory_free(adapter);
err_iounmap:
	iounmap(adapter->regs);
err_free_dev:
	pci_dev_put(pdev);
	free_netdev(netdev);
err_release_res:
	pci_release_regions(pdev);
err_disable:
	pci_disable_device(pdev);
	return result;
}
コード例 #29
0
int mthca_reset(struct mthca_dev *mdev)
{
	int i;
	int err = 0;
	u32 *hca_header    = NULL;
	u32 *bridge_header = NULL;
	struct pci_dev *bridge = NULL;
	int bridge_pcix_cap = 0;
	int hca_pcie_cap = 0;
	int hca_pcix_cap = 0;

	u16 devctl;
	u16 linkctl;

#define MTHCA_RESET_OFFSET 0xf0010
#define MTHCA_RESET_VALUE  swab32(1)

	/*
	 * Reset the chip.  This is somewhat ugly because we have to
	 * save off the PCI header before reset and then restore it
	 * after the chip reboots.  We skip config space offsets 22
	 * and 23 since those have a special meaning.
	 *
	 * To make matters worse, for Tavor (PCI-X HCA) we have to
	 * find the associated bridge device and save off its PCI
	 * header as well.
	 */

	if (!(mdev->mthca_flags & MTHCA_FLAG_PCIE)) {
		/* Look for the bridge -- its device ID will be 2 more
		   than HCA's device ID. */
		while ((bridge = pci_get_device(mdev->pdev->vendor,
						mdev->pdev->device + 2,
						bridge)) != NULL) {
			if (bridge->hdr_type    == PCI_HEADER_TYPE_BRIDGE &&
			    bridge->subordinate == mdev->pdev->bus) {
				mthca_dbg(mdev, "Found bridge: %s\n",
					  pci_name(bridge));
				break;
			}
		}

		if (!bridge) {
			/*
			 * Didn't find a bridge for a Tavor device --
			 * assume we're in no-bridge mode and hope for
			 * the best.
			 */
			mthca_warn(mdev, "No bridge found for %s\n",
				  pci_name(mdev->pdev));
		}

	}

	/* For Arbel do we need to save off the full 4K PCI Express header?? */
	hca_header = kmalloc(256, GFP_KERNEL);
	if (!hca_header) {
		err = -ENOMEM;
		mthca_err(mdev, "Couldn't allocate memory to save HCA "
			  "PCI header, aborting.\n");
		goto out;
	}

	for (i = 0; i < 64; ++i) {
		if (i == 22 || i == 23)
			continue;
		if (pci_read_config_dword(mdev->pdev, i * 4, hca_header + i)) {
			err = -ENODEV;
			mthca_err(mdev, "Couldn't save HCA "
				  "PCI header, aborting.\n");
			goto out;
		}
	}

	hca_pcix_cap = pci_find_capability(mdev->pdev, PCI_CAP_ID_PCIX);
<<<<<<< HEAD
コード例 #30
0
int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent,
		    struct qib_msix_entry *entry)
{
	u16 linkstat, speed;
	int pos = 0, pose, ret = 1;

	pose = pci_pcie_cap(dd->pcidev);
	if (!pose) {
		qib_dev_err(dd, "Can't find PCI Express capability!\n");
		/*                     */
		dd->lbus_width = 1;
		dd->lbus_speed = 2500; /*              */
		goto bail;
	}

	pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSIX);
	if (nent && *nent && pos) {
		qib_msix_setup(dd, pos, nent, entry);
		ret = 0; /*                             */
	} else {
		pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI);
		if (pos)
			ret = qib_msi_setup(dd, pos);
		else
			qib_dev_err(dd, "No PCI MSI or MSIx capability!\n");
	}
	if (!pos)
		qib_enable_intx(dd->pcidev);

	pci_read_config_word(dd->pcidev, pose + PCI_EXP_LNKSTA, &linkstat);
	/*
                                            
                                  
  */
	speed = linkstat & 0xf;
	linkstat >>= 4;
	linkstat &= 0x1f;
	dd->lbus_width = linkstat;

	switch (speed) {
	case 1:
		dd->lbus_speed = 2500; /*              */
		break;
	case 2:
		dd->lbus_speed = 5000; /*            */
		break;
	default: /*                          */
		dd->lbus_speed = 2500;
		break;
	}

	/*
                                                             
                                                          
  */
	if (minw && linkstat < minw)
		qib_dev_err(dd,
			    "PCIe width %u (x%u HCA), performance reduced\n",
			    linkstat, minw);

	qib_tune_pcie_caps(dd);

	qib_tune_pcie_coalesce(dd);

bail:
	/*                                */
	snprintf(dd->lbus_info, sizeof(dd->lbus_info),
		 "PCIe,%uMHz,x%u\n", dd->lbus_speed, dd->lbus_width);
	return ret;
}