int set_msi_sid(struct irte *irte, struct pci_dev *dev)
{
	struct pci_dev *bridge;

	if (!irte || !dev)
		return -1;

	
	if (pci_is_pcie(dev) || !dev->bus->parent) {
		set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
			     (dev->bus->number << 8) | dev->devfn);
		return 0;
	}

	bridge = pci_find_upstream_pcie_bridge(dev);
	if (bridge) {
		if (pci_is_pcie(bridge))
			set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16,
				(bridge->bus->number << 8) | dev->bus->number);
		else 
			set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
				(bridge->bus->number << 8) | bridge->devfn);
	}

	return 0;
}
int set_msi_sid(struct irte *irte, struct pci_dev *dev)
{
	struct pci_dev *bridge;

	if (!irte || !dev)
		return -1;

	/* PCIe device or Root Complex integrated PCI device */
	if (pci_is_pcie(dev) || !dev->bus->parent) {
		set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
			     (dev->bus->number << 8) | dev->devfn);
		return 0;
	}

	bridge = pci_find_upstream_pcie_bridge(dev);
	if (bridge) {
		if (pci_is_pcie(bridge))/* this is a PCIe-to-PCI/PCIX bridge */
			set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16,
				(bridge->bus->number << 8) | dev->bus->number);
		else /* this is a legacy PCI bridge */
			set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
				(bridge->bus->number << 8) | bridge->devfn);
	}

	return 0;
}
Beispiel #3
0
static bool pcie_capability_reg_implemented(struct pci_dev *dev, int pos)
{
	if (!pci_is_pcie(dev))
		return false;

	switch (pos) {
	case PCI_EXP_FLAGS_TYPE:
		return true;
	case PCI_EXP_DEVCAP:
	case PCI_EXP_DEVCTL:
	case PCI_EXP_DEVSTA:
		return pcie_cap_has_devctl(dev);
	case PCI_EXP_LNKCAP:
	case PCI_EXP_LNKCTL:
	case PCI_EXP_LNKSTA:
		return pcie_cap_has_lnkctl(dev);
	case PCI_EXP_SLTCAP:
	case PCI_EXP_SLTCTL:
	case PCI_EXP_SLTSTA:
		return pcie_cap_has_sltctl(dev);
	case PCI_EXP_RTCTL:
	case PCI_EXP_RTCAP:
	case PCI_EXP_RTSTA:
		return pcie_cap_has_rtctl(dev);
	case PCI_EXP_DEVCAP2:
	case PCI_EXP_DEVCTL2:
	case PCI_EXP_LNKCAP2:
	case PCI_EXP_LNKCTL2:
	case PCI_EXP_LNKSTA2:
		return pcie_cap_version(dev) > 1;
	default:
		return false;
	}
}
int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{

#if defined(PCIEH)
    if (pci_is_pcie((struct pci_dev*)dev)) {
#if defined(CONFIG_BCM96816)
        return INTERRUPT_ID_PCIE_A+(irq_tab_pcie_bcm63xx_pcie_bus[dev->bus->number]+slot)%4;
#endif

#if defined(PCIEH_1)
        if ((dev->bus->number >= BCM_BUS_PCIE1_ROOT)&& (dev->bus->number <= BCM_BUS_PCIE1_DEVICE))
            return INTERRUPT_ID_PCIE1_RC;
        
        if ((dev->bus->number >= BCM_BUS_PCIE_ROOT) && (dev->bus->number <= BCM_BUS_PCIE_DEVICE))
            return INTERRUPT_ID_PCIE_RC;
#endif
        /* single RC */
        return INTERRUPT_ID_PCIE_RC;
    }
#endif /* PCIEH */
    
#if defined(CONFIG_BCM96816) || defined(PCI_CFG) || defined(WLAN_CHIPC_BASE) || defined(CONFIG_USB)
    return irq_tab_bcm63xx[slot];
#else
    return 0;    
#endif

}
Beispiel #5
0
static void qtnf_tune_pcie_mps(struct pci_dev *pdev)
{
	struct pci_dev *parent;
	int mps_p, mps_o, mps_m, mps;
	int ret;

	/* current mps */
	mps_o = pcie_get_mps(pdev);

	/* maximum supported mps */
	mps_m = 128 << pdev->pcie_mpss;

	/* suggested new mps value */
	mps = mps_m;

	if (pdev->bus && pdev->bus->self) {
		/* parent (bus) mps */
		parent = pdev->bus->self;

		if (pci_is_pcie(parent)) {
			mps_p = pcie_get_mps(parent);
			mps = min(mps_m, mps_p);
		}
	}

	ret = pcie_set_mps(pdev, mps);
	if (ret) {
		pr_err("failed to set mps to %d, keep using current %d\n",
		       mps, mps_o);
		return;
	}

	pr_debug("set mps to %d (was %d, max %d)\n", mps, mps_o, mps_m);
}
Beispiel #6
0
int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val)
{
	int ret;

	*val = 0;
	if (pos & 1)
		return -EINVAL;

	if (pcie_capability_reg_implemented(dev, pos)) {
		ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val);
		/*
		 * Reset *val to 0 if pci_read_config_word() fails, it may
		 * have been written as 0xFFFF if hardware error happens
		 * during pci_read_config_word().
		 */
		if (ret)
			*val = 0;
		return ret;
	}

	/*
	 * For Functions that do not implement the Slot Capabilities,
	 * Slot Status, and Slot Control registers, these spaces must
	 * be hardwired to 0b, with the exception of the Presence Detect
	 * State bit in the Slot Status register of Downstream Ports,
	 * which must be hardwired to 1b.  (PCIe Base Spec 3.0, sec 7.8)
	 */
	if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA &&
		 pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) {
		*val = PCI_EXP_SLTSTA_PDS;
	}

	return 0;
}
Beispiel #7
0
int nv04_instmem_init(struct drm_device *dev)
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_gpuobj *ramht = NULL;
	u32 offset, length;
	int ret;

	/* RAMIN always available */
	dev_priv->ramin_available = true;

	/* Reserve space at end of VRAM for PRAMIN */
	if (dev_priv->card_type >= NV_40) {
		u32 vs = hweight8((nv_rd32(dev, 0x001540) & 0x0000ff00) >> 8);
		u32 rsvd;

		/* estimate grctx size, the magics come from nv40_grctx.c */
		if      (dev_priv->chipset == 0x40) rsvd = 0x6aa0 * vs;
		else if (dev_priv->chipset  < 0x43) rsvd = 0x4f00 * vs;
		else if (nv44_graph_class(dev))	    rsvd = 0x4980 * vs;
		else				    rsvd = 0x4a40 * vs;
		rsvd += 16 * 1024;
		rsvd *= dev_priv->engine.fifo.channels;

		/* pciegart table */
		if (pci_is_pcie(dev->pdev))
			rsvd += 512 * 1024;

		/* object storage */
		rsvd += 512 * 1024;

		dev_priv->ramin_rsvd_vram = round_up(rsvd, 4096);
	} else {
Beispiel #8
0
int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val)
{
	int ret;

	*val = 0;
	if (pos & 3)
		return -EINVAL;

	if (pcie_capability_reg_implemented(dev, pos)) {
		ret = pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, val);
		/*
		 * Reset *val to 0 if pci_read_config_dword() fails, it may
		 * have been written as 0xFFFFFFFF if hardware error happens
		 * during pci_read_config_dword().
		 */
		if (ret)
			*val = 0;
		return ret;
	}

	if (pci_is_pcie(dev) && pos == PCI_EXP_SLTCTL &&
		 pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) {
		*val = PCI_EXP_SLTSTA_PDS;
	}

	return 0;
}
int nv04_instmem_init(struct drm_device *dev)
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_gpuobj *ramht = NULL;
	u32 offset, length;
	int ret;

	
	dev_priv->ramin_available = true;

	
	if (dev_priv->card_type >= NV_40) {
		u32 vs = hweight8((nv_rd32(dev, 0x001540) & 0x0000ff00) >> 8);
		u32 rsvd;

		
		if      (dev_priv->chipset == 0x40) rsvd = 0x6aa0 * vs;
		else if (dev_priv->chipset  < 0x43) rsvd = 0x4f00 * vs;
		else if (nv44_graph_class(dev))	    rsvd = 0x4980 * vs;
		else				    rsvd = 0x4a40 * vs;
		rsvd += 16 * 1024;
		rsvd *= dev_priv->engine.fifo.channels;

		
		if (pci_is_pcie(dev->pdev))
			rsvd += 512 * 1024;

		
		rsvd += 512 * 1024;

		dev_priv->ramin_rsvd_vram = round_up(rsvd, 4096);
	} else {
Beispiel #10
0
static int
nvkm_pci_init(struct nvkm_subdev *subdev)
{
	struct nvkm_pci *pci = nvkm_pci(subdev);
	struct pci_dev *pdev = pci->pdev;
	int ret;

	if (pci->agp.bridge) {
		ret = nvkm_agp_init(pci);
		if (ret)
			return ret;
	} else if (pci_is_pcie(pci->pdev)) {
		nvkm_pcie_init(pci);
	}

	if (pci->func->init)
		pci->func->init(pci);

	ret = request_irq(pdev->irq, nvkm_pci_intr, IRQF_SHARED, "nvkm", pci);
	if (ret)
		return ret;

	pci->irq = pdev->irq;
	return ret;
}
Beispiel #11
0
int pci_cleanup_aer_error_status_regs(struct pci_dev *dev)
{
	int pos;
	u32 status;
	int port_type;

	if (!pci_is_pcie(dev))
		return -ENODEV;

	pos = dev->aer_cap;
	if (!pos)
		return -EIO;

	port_type = pci_pcie_type(dev);
	if (port_type == PCI_EXP_TYPE_ROOT_PORT) {
		pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &status);
		pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, status);
	}

	pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &status);
	pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS, status);

	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
	pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);

	return 0;
}
Beispiel #12
0
static int
nvkm_pci_oneinit(struct nvkm_subdev *subdev)
{
	struct nvkm_pci *pci = nvkm_pci(subdev);
	if (pci_is_pcie(pci->pdev))
		return nvkm_pcie_oneinit(pci);
	return 0;
}
Beispiel #13
0
int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags,
			    unsigned index, unsigned start, unsigned count,
			    void *data)
{
	int (*func)(struct vfio_pci_device *vdev, unsigned index,
		    unsigned start, unsigned count, uint32_t flags,
		    void *data) = NULL;

	switch (index) {
	case VFIO_PCI_INTX_IRQ_INDEX:
		switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
		case VFIO_IRQ_SET_ACTION_MASK:
			func = vfio_pci_set_intx_mask;
			break;
		case VFIO_IRQ_SET_ACTION_UNMASK:
			func = vfio_pci_set_intx_unmask;
			break;
		case VFIO_IRQ_SET_ACTION_TRIGGER:
			func = vfio_pci_set_intx_trigger;
			break;
		}
		break;
	case VFIO_PCI_MSI_IRQ_INDEX:
	case VFIO_PCI_MSIX_IRQ_INDEX:
		switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
		case VFIO_IRQ_SET_ACTION_MASK:
		case VFIO_IRQ_SET_ACTION_UNMASK:
			/* XXX Need masking support exported */
			break;
		case VFIO_IRQ_SET_ACTION_TRIGGER:
			func = vfio_pci_set_msi_trigger;
			break;
		}
		break;
	case VFIO_PCI_ERR_IRQ_INDEX:
		switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
		case VFIO_IRQ_SET_ACTION_TRIGGER:
			if (pci_is_pcie(vdev->pdev))
				func = vfio_pci_set_err_trigger;
			break;
		}
		break;
	case VFIO_PCI_REQ_IRQ_INDEX:
		switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
		case VFIO_IRQ_SET_ACTION_TRIGGER:
			func = vfio_pci_set_req_trigger;
			break;
		}
		break;
	}

	if (!func)
		return -ENOTTY;

	return func(vdev, index, start, count, flags, data);
}
Beispiel #14
0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
{
    struct radeon_device *rdev;
    int r, acpi_status;

    rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL);
    if (rdev == NULL) {
        return -ENOMEM;
    }
    dev->dev_private = (void *)rdev;

    /* update BUS flag */
    if (drm_pci_device_is_agp(dev)) {
        flags |= RADEON_IS_AGP;
    } else if (pci_is_pcie(dev->pdev)) {
        flags |= RADEON_IS_PCIE;
    } else {
        flags |= RADEON_IS_PCI;
    }

    /* radeon_device_init should report only fatal error
     * like memory allocation failure or iomapping failure,
     * or memory manager initialization failure, it must
     * properly initialize the GPU MC controller and permit
     * VRAM allocation
     */
    r = radeon_device_init(rdev, dev, dev->pdev, flags);
    if (r) {
        dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
        goto out;
    }

    /* Again modeset_init should fail only on fatal error
     * otherwise it should provide enough functionalities
     * for shadowfb to run
     */
    r = radeon_modeset_init(rdev);
    if (r)
        dev_err(&dev->pdev->dev, "Fatal error during modeset init\n");

    /* Call ACPI methods: require modeset init
     * but failure is not fatal
     */
    if (!r) {
        acpi_status = radeon_acpi_init(rdev);
        if (acpi_status)
            dev_dbg(&dev->pdev->dev,
                    "Error during ACPI methods call\n");
    }

out:
    if (r)
        radeon_driver_unload_kms(dev);
    return r;
}
Beispiel #15
0
void pci_ptm_init(struct pci_dev *dev)
{
	int pos;
	u32 cap, ctrl;
	u8 local_clock;
	struct pci_dev *ups;

	if (!pci_is_pcie(dev))
		return;

	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_PTM);
	if (!pos)
		return;

	/*
	 * Enable PTM only on interior devices (root ports, switch ports,
	 * etc.) on the assumption that it causes no link traffic until an
	 * endpoint enables it.
	 */
	if ((pci_pcie_type(dev) == PCI_EXP_TYPE_ENDPOINT ||
	     pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END))
		return;

	pci_read_config_dword(dev, pos + PCI_PTM_CAP, &cap);
	local_clock = (cap & PCI_PTM_GRANULARITY_MASK) >> 8;

	/*
	 * There's no point in enabling PTM unless it's enabled in the
	 * upstream device or this device can be a PTM Root itself.  Per
	 * the spec recommendation (PCIe r3.1, sec 7.32.3), select the
	 * furthest upstream Time Source as the PTM Root.
	 */
	ups = pci_upstream_bridge(dev);
	if (ups && ups->ptm_enabled) {
		ctrl = PCI_PTM_CTRL_ENABLE;
		if (ups->ptm_granularity == 0)
			dev->ptm_granularity = 0;
		else if (ups->ptm_granularity > local_clock)
			dev->ptm_granularity = ups->ptm_granularity;
	} else {
		if (cap & PCI_PTM_CAP_ROOT) {
			ctrl = PCI_PTM_CTRL_ENABLE | PCI_PTM_CTRL_ROOT;
			dev->ptm_root = 1;
			dev->ptm_granularity = local_clock;
		} else
			return;
	}

	ctrl |= dev->ptm_granularity << 8;
	pci_write_config_dword(dev, pos + PCI_PTM_CTRL, ctrl);
	dev->ptm_enabled = 1;

	pci_ptm_info(dev);
}
Beispiel #16
0
int
nvkm_pcie_set_link(struct nvkm_pci *pci, enum nvkm_pcie_speed speed, u8 width)
{
	struct nvkm_subdev *subdev = &pci->subdev;
	enum nvkm_pcie_speed cur_speed, max_speed;
	struct pci_bus *pbus;
	int ret;

	if (!pci || !pci_is_pcie(pci->pdev))
		return 0;
	pbus = pci->pdev->bus;

	if (!pci->func->pcie.set_link)
		return -ENOSYS;

	nvkm_trace(subdev, "requested %s\n", nvkm_pcie_speeds[speed]);

	if (pci->func->pcie.version(pci) < 2) {
		nvkm_error(subdev, "setting link failed due to low version\n");
		return -ENODEV;
	}

	cur_speed = pci->func->pcie.cur_speed(pci);
	max_speed = min(nvkm_pcie_speed(pbus->max_bus_speed),
			pci->func->pcie.max_speed(pci));

	nvkm_trace(subdev, "current speed: %s\n", nvkm_pcie_speeds[cur_speed]);

	if (speed > max_speed) {
		nvkm_debug(subdev, "%s not supported by bus or card, dropping"
			   "requested speed to %s", nvkm_pcie_speeds[speed],
			   nvkm_pcie_speeds[max_speed]);
		speed = max_speed;
	}

	pci->pcie.speed = speed;
	pci->pcie.width = width;

	if (speed == cur_speed) {
		nvkm_debug(subdev, "requested matches current speed\n");
		return speed;
	}

	nvkm_debug(subdev, "set link to %s x%i\n",
		   nvkm_pcie_speeds[speed], width);

	ret = pci->func->pcie.set_link(pci, speed, width);
	if (ret < 0)
		nvkm_error(subdev, "setting link failed: %i\n", ret);

	return ret;
}
Beispiel #17
0
/*
 * Bluetooth coexistance requires disabling ASPM.
 */
static void ath_pci_bt_coex_prep(struct ath_common *common)
{
	struct ath_softc *sc = (struct ath_softc *) common->priv;
	struct pci_dev *pdev = to_pci_dev(sc->dev);
	u8 aspm;

	if (!pci_is_pcie(pdev))
		return;

	pci_read_config_byte(pdev, ATH_PCIE_CAP_LINK_CTRL, &aspm);
	aspm &= ~(ATH_PCIE_CAP_LINK_L0S | ATH_PCIE_CAP_LINK_L1);
	pci_write_config_byte(pdev, ATH_PCIE_CAP_LINK_CTRL, aspm);
}
Beispiel #18
0
/* Some hardware with 64-bit DMA seems to be bugged and looks for translation
 * bit in low address word instead of high one.
 */
static bool b43_dma_translation_in_low_word(struct b43_wldev *dev,
					    enum b43_dmatype type)
{
	if (type != B43_DMA_64BIT)
		return true;

#ifdef CPTCFG_B43_SSB
	if (dev->dev->bus_type == B43_BUS_SSB &&
	    dev->dev->sdev->bus->bustype == SSB_BUSTYPE_PCI &&
	    !(pci_is_pcie(dev->dev->sdev->bus->host_pci) &&
	      ssb_read32(dev->dev->sdev, SSB_TMSHIGH) & SSB_TMSHIGH_DMA64))
			return true;
#endif
	return false;
}
Beispiel #19
0
int pci_enable_ptm(struct pci_dev *dev, u8 *granularity)
{
	int pos;
	u32 cap, ctrl;
	struct pci_dev *ups;

	if (!pci_is_pcie(dev))
		return -EINVAL;

	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_PTM);
	if (!pos)
		return -EINVAL;

	pci_read_config_dword(dev, pos + PCI_PTM_CAP, &cap);
	if (!(cap & PCI_PTM_CAP_REQ))
		return -EINVAL;

	/*
	 * For a PCIe Endpoint, PTM is only useful if the endpoint can
	 * issue PTM requests to upstream devices that have PTM enabled.
	 *
	 * For Root Complex Integrated Endpoints, there is no upstream
	 * device, so there must be some implementation-specific way to
	 * associate the endpoint with a time source.
	 */
	if (pci_pcie_type(dev) == PCI_EXP_TYPE_ENDPOINT) {
		ups = pci_upstream_bridge(dev);
		if (!ups || !ups->ptm_enabled)
			return -EINVAL;

		dev->ptm_granularity = ups->ptm_granularity;
	} else if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END) {
		dev->ptm_granularity = 0;
	} else
		return -EINVAL;

	ctrl = PCI_PTM_CTRL_ENABLE;
	ctrl |= dev->ptm_granularity << 8;
	pci_write_config_dword(dev, pos + PCI_PTM_CTRL, ctrl);
	dev->ptm_enabled = 1;

	pci_ptm_info(dev);

	if (granularity)
		*granularity = dev->ptm_granularity;
	return 0;
}
static int disable_ecrc_checking(struct pci_dev *dev)
{
	int pos;
	u32 reg32;

	if (!pci_is_pcie(dev))
		return -ENODEV;

	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
	if (!pos)
		return -ENODEV;

	pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
	reg32 &= ~(PCI_ERR_CAP_ECRC_GENE | PCI_ERR_CAP_ECRC_CHKE);
	pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);

	return 0;
}
Beispiel #21
0
void pci_restore_dpc_state(struct pci_dev *dev)
{
	struct dpc_dev *dpc;
	struct pci_cap_saved_state *save_state;
	u16 *cap;

	if (!pci_is_pcie(dev))
		return;

	dpc = to_dpc_dev(dev);
	if (!dpc)
		return;

	save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_DPC);
	if (!save_state)
		return;

	cap = (u16 *)&save_state->cap.data[0];
	pci_write_config_word(dev, dpc->cap_pos + PCI_EXP_DPC_CTL, *cap);
}
/*
 * pcie_portdrv_probe - Probe PCI-Express port devices
 * @dev: PCI-Express port device being probed
 *
 * If detected invokes the pcie_port_device_register() method for
 * this port device.
 *
 */
static int __devinit pcie_portdrv_probe(struct pci_dev *dev,
					const struct pci_device_id *id)
{
	int status;

	if (!pci_is_pcie(dev) ||
	    ((dev->pcie_type != PCI_EXP_TYPE_ROOT_PORT) &&
	     (dev->pcie_type != PCI_EXP_TYPE_UPSTREAM) &&
	     (dev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)))
		return -ENODEV;

	if (!dev->irq && dev->pin) {
		dev_warn(&dev->dev, "device [%04x:%04x] has invalid IRQ; "
			 "check vendor BIOS\n", dev->vendor, dev->device);
	}
	status = pcie_port_device_register(dev);
	if (status)
		return status;

	pci_save_state(dev);
	return 0;
}
Beispiel #23
0
/*
 * pcie_portdrv_probe - Probe PCI-Express port devices
 * @dev: PCI-Express port device being probed
 *
 * If detected invokes the pcie_port_device_register() method for
 * this port device.
 *
 */
static int pcie_portdrv_probe(struct pci_dev *dev,
					const struct pci_device_id *id)
{
	int status;

	if (!pci_is_pcie(dev) ||
	    ((pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT) &&
	     (pci_pcie_type(dev) != PCI_EXP_TYPE_UPSTREAM) &&
	     (pci_pcie_type(dev) != PCI_EXP_TYPE_DOWNSTREAM)))
		return -ENODEV;

	status = pcie_port_device_register(dev);
	if (status)
		return status;

	pci_save_state(dev);
	/*
	 * D3cold may not work properly on some PCIe port, so disable
	 * it by default.
	 */
	dev->d3cold_allowed = false;
	return 0;
}
Beispiel #24
0
static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
					  u32 reg, u16 *value)
{
	struct pci_dev *parent_dev;
	struct pci_bus *parent_bus;

	parent_bus = adapter->pdev->bus->parent;
	if (!parent_bus)
		return -1;

	parent_dev = parent_bus->self;
	if (!parent_dev)
		return -1;

	if (!pci_is_pcie(parent_dev))
		return -1;

	pcie_capability_read_word(parent_dev, reg, value);
	if (*value == IXGBE_FAILED_READ_CFG_WORD &&
	    ixgbe_check_cfg_remove(&adapter->hw, parent_dev))
		return -1;
	return 0;
}
Beispiel #25
0
/**
 * qla24xx_pci_config() - Setup ISP24xx PCI configuration registers.
 * @ha: HA context
 *
 * Returns 0 on success.
 */
int
qla24xx_pci_config(scsi_qla_host_t *vha)
{
	uint16_t w;
	unsigned long flags = 0;
	struct qla_hw_data *ha = vha->hw;
	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;

	pci_set_master(ha->pdev);
	pci_try_set_mwi(ha->pdev);

	pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
	w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
	w &= ~PCI_COMMAND_INTX_DISABLE;
	pci_write_config_word(ha->pdev, PCI_COMMAND, w);

	pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);

	/* PCI-X -- adjust Maximum Memory Read Byte Count (2048). */
	if (pci_find_capability(ha->pdev, PCI_CAP_ID_PCIX))
		pcix_set_mmrbc(ha->pdev, 2048);

	/* PCIe -- adjust Maximum Read Request Size (2048). */
	if (pci_is_pcie(ha->pdev))
		pcie_set_readrq(ha->pdev, 2048);

	pci_disable_rom(ha->pdev);

	ha->chip_revision = ha->pdev->revision;

	/* Get PCI bus information. */
	spin_lock_irqsave(&ha->hardware_lock, flags);
	ha->pci_attr = RD_REG_DWORD(&reg->ctrl_status);
	spin_unlock_irqrestore(&ha->hardware_lock, flags);

	return QLA_SUCCESS;
}
Beispiel #26
0
static void ath_pci_aspm_init(struct ath_common *common)
{
	struct ath_softc *sc = (struct ath_softc *) common->priv;
	struct ath_hw *ah = sc->sc_ah;
	struct pci_dev *pdev = to_pci_dev(sc->dev);
	struct pci_dev *parent;
	int pos;
	u8 aspm;

	if (!pci_is_pcie(pdev))
		return;

	parent = pdev->bus->self;
	if (WARN_ON(!parent))
		return;

	pos = pci_pcie_cap(parent);
	pci_read_config_byte(parent, pos +  PCI_EXP_LNKCTL, &aspm);
	if (aspm & (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1)) {
		ah->aspm_enabled = true;
		/* Initialize PCIe PM and SERDES registers. */
		ath9k_hw_configpcipowersave(ah, 0, 0);
	}
}
Beispiel #27
0
/**
 * qla25xx_pci_config() - Setup ISP25xx PCI configuration registers.
 * @ha: HA context
 *
 * Returns 0 on success.
 */
int
qla25xx_pci_config(scsi_qla_host_t *vha)
{
	uint16_t w;
	struct qla_hw_data *ha = vha->hw;

	pci_set_master(ha->pdev);
	pci_try_set_mwi(ha->pdev);

	pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
	w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
	w &= ~PCI_COMMAND_INTX_DISABLE;
	pci_write_config_word(ha->pdev, PCI_COMMAND, w);

	/* PCIe -- adjust Maximum Read Request Size (2048). */
	if (pci_is_pcie(ha->pdev))
		pcie_set_readrq(ha->pdev, 2048);

	pci_disable_rom(ha->pdev);

	ha->chip_revision = ha->pdev->revision;

	return QLA_SUCCESS;
}
Beispiel #28
0
int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent)
{
	u16 linkstat, speed;
	int nvec;
	int maxvec;
	int ret = 0;

	if (!pci_is_pcie(dd->pcidev)) {
		qib_dev_err(dd, "Can't find PCI Express capability!\n");
		/* set up something... */
		dd->lbus_width = 1;
		dd->lbus_speed = 2500; /* Gen1, 2.5GHz */
		ret = -1;
		goto bail;
	}

	maxvec = (nent && *nent) ? *nent : 1;
	nvec = qib_allocate_irqs(dd, maxvec);
	if (nvec < 0) {
		ret = nvec;
		goto bail;
	}

	/*
	 * If nent exists, make sure to record how many vectors were allocated
	 */
	if (nent) {
		*nent = nvec;

		/*
		 * If we requested (nent) MSIX, but msix_enabled is not set,
		 * pci_alloc_irq_vectors() enabled INTx.
		 */
		if (!dd->pcidev->msix_enabled)
			qib_dev_err(dd,
				    "no msix vectors allocated, using INTx\n");
	}

	pcie_capability_read_word(dd->pcidev, PCI_EXP_LNKSTA, &linkstat);
	/*
	 * speed is bits 0-3, linkwidth is bits 4-8
	 * no defines for them in headers
	 */
	speed = linkstat & 0xf;
	linkstat >>= 4;
	linkstat &= 0x1f;
	dd->lbus_width = linkstat;

	switch (speed) {
	case 1:
		dd->lbus_speed = 2500; /* Gen1, 2.5GHz */
		break;
	case 2:
		dd->lbus_speed = 5000; /* Gen1, 5GHz */
		break;
	default: /* not defined, assume gen1 */
		dd->lbus_speed = 2500;
		break;
	}

	/*
	 * Check against expected pcie width and complain if "wrong"
	 * on first initialization, not afterwards (i.e., reset).
	 */
	if (minw && linkstat < minw)
		qib_dev_err(dd,
			    "PCIe width %u (x%u HCA), performance reduced\n",
			    linkstat, minw);

	qib_tune_pcie_caps(dd);

	qib_tune_pcie_coalesce(dd);

bail:
	/* fill in string, even on errors */
	snprintf(dd->lbus_info, sizeof(dd->lbus_info),
		 "PCIe,%uMHz,x%u\n", dd->lbus_speed, dd->lbus_width);
	return ret;
}
Beispiel #29
0
/**
 * radeon_driver_load_kms - Main load function for KMS.
 *
 * @dev: drm dev pointer
 * @flags: device flags
 *
 * This is the main load function for KMS (all asics).
 * It calls radeon_device_init() to set up the non-display
 * parts of the chip (asic init, CP, writeback, etc.), and
 * radeon_modeset_init() to set up the display parts
 * (crtcs, encoders, hotplug detect, etc.).
 * Returns 0 on success, error on failure.
 */
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
{
	struct radeon_device *rdev;
	int r, acpi_status;

	rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL);
	if (rdev == NULL) {
		return -ENOMEM;
	}
	dev->dev_private = (void *)rdev;

	/* update BUS flag */
	if (drm_pci_device_is_agp(dev)) {
		DRM_INFO("RADEON_IS_AGP\n");
		flags |= RADEON_IS_AGP;
	} else if (pci_is_pcie(dev->dev->bsddev)) {
		DRM_INFO("RADEON_IS_PCIE\n");
		flags |= RADEON_IS_PCIE;
	} else {
		DRM_INFO("RADEON_IS_PCI\n");
		flags |= RADEON_IS_PCI;
	}

#ifdef PM_TODO
	if ((radeon_runtime_pm != 0) &&
	    radeon_has_atpx() &&
	    ((flags & RADEON_IS_IGP) == 0))
#endif

	/* radeon_device_init should report only fatal error
	 * like memory allocation failure or iomapping failure,
	 * or memory manager initialization failure, it must
	 * properly initialize the GPU MC controller and permit
	 * VRAM allocation
	 */
	r = radeon_device_init(rdev, dev, dev->pdev, flags);
	if (r) {
		dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
		goto out;
	}

	/* Again modeset_init should fail only on fatal error
	 * otherwise it should provide enough functionalities
	 * for shadowfb to run
	 */
	r = radeon_modeset_init(rdev);
	if (r)
		dev_err(&dev->pdev->dev, "Fatal error during modeset init\n");

	/* Call ACPI methods: require modeset init
	 * but failure is not fatal
	 */
	if (!r) {
		acpi_status = radeon_acpi_init(rdev);
		if (acpi_status)
		dev_dbg(&dev->pdev->dev,
				"Error during ACPI methods call\n");
	}

#ifdef PM_TODO
	if (radeon_is_px(dev)) {
		pm_runtime_use_autosuspend(dev->dev);
		pm_runtime_set_autosuspend_delay(dev->dev, 5000);
		pm_runtime_set_active(dev->dev);
		pm_runtime_allow(dev->dev);
		pm_runtime_mark_last_busy(dev->dev);
		pm_runtime_put_autosuspend(dev->dev);
	}
#endif

out:
	if (r)
		radeon_driver_unload_kms(dev);


	return r;
}
Beispiel #30
0
int rt2x00pci_probe(struct pci_dev *pci_dev, const struct rt2x00_ops *ops)
{
    struct ieee80211_hw *hw;
    struct rt2x00_dev *rt2x00dev;
    int retval;
    u16 chip;

    retval = pci_enable_device(pci_dev);
    if (retval) {
        rt2x00_probe_err("Enable device failed\n");
        return retval;
    }

    retval = pci_request_regions(pci_dev, pci_name(pci_dev));
    if (retval) {
        rt2x00_probe_err("PCI request regions failed\n");
        goto exit_disable_device;
    }

    pci_set_master(pci_dev);

    if (pci_set_mwi(pci_dev))
        rt2x00_probe_err("MWI not available\n");

    if (dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32))) {
        rt2x00_probe_err("PCI DMA not supported\n");
        retval = -EIO;
        goto exit_release_regions;
    }

    hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw);
    if (!hw) {
        rt2x00_probe_err("Failed to allocate hardware\n");
        retval = -ENOMEM;
        goto exit_release_regions;
    }

    pci_set_drvdata(pci_dev, hw);

    rt2x00dev = hw->priv;
    rt2x00dev->dev = &pci_dev->dev;
    rt2x00dev->ops = ops;
    rt2x00dev->hw = hw;
    rt2x00dev->irq = pci_dev->irq;
    rt2x00dev->name = ops->name;

    if (pci_is_pcie(pci_dev))
        rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_PCIE);
    else
        rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_PCI);

    retval = rt2x00pci_alloc_reg(rt2x00dev);
    if (retval)
        goto exit_free_device;

    /*
     * Because rt3290 chip use different efuse offset to read efuse data.
     * So before read efuse it need to indicate it is the
     * rt3290 or not.
     */
    pci_read_config_word(pci_dev, PCI_DEVICE_ID, &chip);
    rt2x00dev->chip.rt = chip;

    retval = rt2x00lib_probe_dev(rt2x00dev);
    if (retval)
        goto exit_free_reg;

    return 0;

exit_free_reg:
    rt2x00pci_free_reg(rt2x00dev);

exit_free_device:
    ieee80211_free_hw(hw);

exit_release_regions:
    pci_clear_mwi(pci_dev);
    pci_release_regions(pci_dev);

exit_disable_device:
    pci_disable_device(pci_dev);

    return retval;
}