Ejemplo n.º 1
0
int aq_pci_func_init(struct pci_dev *pdev)
{
	int err;

	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
	if (!err) {
		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));

	}
	if (err) {
		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
		if (!err)
			err = pci_set_consistent_dma_mask(pdev,
							  DMA_BIT_MASK(32));
	}
	if (err != 0) {
		err = -ENOSR;
		goto err_exit;
	}

	err = pci_request_regions(pdev, AQ_CFG_DRV_NAME "_mmio");
	if (err < 0)
		goto err_exit;

	pci_set_master(pdev);

	return 0;

err_exit:
	return err;
}
Ejemplo n.º 2
0
static int set_dma_caps(struct pci_dev *pdev)
{
	int err;

	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
	if (err) {
		dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n");
		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
		if (err) {
			dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
			return err;
		}
	}

	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
	if (err) {
		dev_warn(&pdev->dev,
			 "Warning: couldn't set 64-bit consistent PCI DMA mask.\n");
		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
		if (err) {
			dev_err(&pdev->dev,
				"Can't set consistent PCI DMA mask, aborting.\n");
			return err;
		}
	}

	dma_set_max_seg_size(&pdev->dev, 2u * 1024 * 1024 * 1024);
	return err;
}
Ejemplo n.º 3
0
static int __devinit ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
	void __iomem * const *iomap;
	struct device *dev = &pdev->dev;
	struct ioatdma_device *device;
	int err;

	err = pcim_enable_device(pdev);
	if (err)
		return err;

	err = pcim_iomap_regions(pdev, 1 << IOAT_MMIO_BAR, DRV_NAME);
	if (err)
		return err;
	iomap = pcim_iomap_table(pdev);
	if (!iomap)
		return -ENOMEM;

	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
	if (err)
		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
	if (err)
		return err;

	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
	if (err)
		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
	if (err)
		return err;

	device = devm_kzalloc(dev, sizeof(*device), GFP_KERNEL);
	if (!device)
		return -ENOMEM;

	pci_set_master(pdev);

	device = alloc_ioatdma(pdev, iomap[IOAT_MMIO_BAR]);
	if (!device)
		return -ENOMEM;
	pci_set_drvdata(pdev, device);

	device->version = readb(device->reg_base + IOAT_VER_OFFSET);
	if (device->version == IOAT_VER_1_2)
		err = ioat1_dma_probe(device, ioat_dca_enabled);
	else if (device->version == IOAT_VER_2_0)
		err = ioat2_dma_probe(device, ioat_dca_enabled);
	else if (device->version >= IOAT_VER_3_0)
		err = ioat3_dma_probe(device, ioat_dca_enabled);
	else
		return -ENODEV;

	if (err) {
		dev_err(dev, "Intel(R) I/OAT DMA Engine init failed\n");
		return -ENODEV;
	}

	return 0;
}
Ejemplo n.º 4
0
/**
 * ufshcd_set_dma_mask - Set dma mask based on the controller
 *			 addressing capability
 * @pdev: PCI device structure
 *
 * Returns 0 for success, non-zero for failure
 */
static int ufshcd_set_dma_mask(struct pci_dev *pdev)
{
	int err;

	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
		&& !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
		return 0;
	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
	if (!err)
		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
	return err;
}
Ejemplo n.º 5
0
static int amd_ntb_init_pci(struct amd_ntb_dev *ndev,
			    struct pci_dev *pdev)
{
	int rc;

	pci_set_drvdata(pdev, ndev);

	rc = pci_enable_device(pdev);
	if (rc)
		goto err_pci_enable;

	rc = pci_request_regions(pdev, NTB_NAME);
	if (rc)
		goto err_pci_regions;

	pci_set_master(pdev);

	rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
	if (rc) {
		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
		if (rc)
			goto err_dma_mask;
		dev_warn(&pdev->dev, "Cannot DMA highmem\n");
	}

	rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
	if (rc) {
		rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
		if (rc)
			goto err_dma_mask;
		dev_warn(&pdev->dev, "Cannot DMA consistent highmem\n");
	}

	ndev->self_mmio = pci_iomap(pdev, 0, 0);
	if (!ndev->self_mmio) {
		rc = -EIO;
		goto err_dma_mask;
	}
	ndev->peer_mmio = ndev->self_mmio + AMD_PEER_OFFSET;

	return 0;

err_dma_mask:
	pci_clear_master(pdev);
err_pci_regions:
	pci_disable_device(pdev);
err_pci_enable:
	pci_set_drvdata(pdev, NULL);
	return rc;
}
Ejemplo n.º 6
0
static int vt6421_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
{
	const struct ata_port_info *ppi[] =
		{ &vt6421_sport_info, &vt6421_sport_info, &vt6421_pport_info };
	struct ata_host *host;
	int i, rc;

	*r_host = host = ata_host_alloc_pinfo(&pdev->dev, ppi, ARRAY_SIZE(ppi));
	if (!host) {
		dev_printk(KERN_ERR, &pdev->dev, "failed to allocate host\n");
		return -ENOMEM;
	}

	rc = pcim_iomap_regions(pdev, 0x3f, DRV_NAME);
	if (rc) {
		dev_printk(KERN_ERR, &pdev->dev, "failed to request/iomap "
			   "PCI BARs (errno=%d)\n", rc);
		return rc;
	}
	host->iomap = pcim_iomap_table(pdev);

	for (i = 0; i < host->n_ports; i++)
		vt6421_init_addrs(host->ports[i]);

	rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
	if (rc)
		return rc;
	rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
	if (rc)
		return rc;

	return 0;
}
Ejemplo n.º 7
0
Archivo: init.c Proyecto: 020gzh/linux
static int isci_pci_init(struct pci_dev *pdev)
{
	int err, bar_num, bar_mask = 0;
	void __iomem * const *iomap;

	err = pcim_enable_device(pdev);
	if (err) {
		dev_err(&pdev->dev,
			"failed enable PCI device %s!\n",
			pci_name(pdev));
		return err;
	}

	for (bar_num = 0; bar_num < SCI_PCI_BAR_COUNT; bar_num++)
		bar_mask |= 1 << (bar_num * 2);

	err = pcim_iomap_regions(pdev, bar_mask, DRV_NAME);
	if (err)
		return err;

	iomap = pcim_iomap_table(pdev);
	if (!iomap)
		return -ENOMEM;

	pci_set_master(pdev);

	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
	if (err) {
		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
		if (err)
			return err;
	}

	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
	if (err) {
		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
		if (err)
			return err;
	}

	return 0;
}
Ejemplo n.º 8
0
Archivo: pci.c Proyecto: 020gzh/linux
static int hsu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
	struct hsu_dma_chip *chip;
	int ret;

	ret = pcim_enable_device(pdev);
	if (ret)
		return ret;

	ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
	if (ret) {
		dev_err(&pdev->dev, "I/O memory remapping failed\n");
		return ret;
	}

	pci_set_master(pdev);
	pci_try_set_mwi(pdev);

	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
	if (ret)
		return ret;

	ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
	if (ret)
		return ret;

	chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
	if (!chip)
		return -ENOMEM;

	chip->dev = &pdev->dev;
	chip->regs = pcim_iomap_table(pdev)[0];
	chip->length = pci_resource_len(pdev, 0);
	chip->offset = HSU_PCI_CHAN_OFFSET;
	chip->irq = pdev->irq;

	pci_enable_msi(pdev);

	ret = hsu_dma_probe(chip);
	if (ret)
		return ret;

	ret = request_irq(chip->irq, hsu_pci_irq, 0, "hsu_dma_pci", chip);
	if (ret)
		goto err_register_irq;

	pci_set_drvdata(pdev, chip);

	return 0;

err_register_irq:
	hsu_dma_remove(chip);
	return ret;
}
Ejemplo n.º 9
0
static int ninja32_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
	struct ata_host *host;
	struct ata_port *ap;
	void __iomem *base;
	int rc;

	host = ata_host_alloc(&dev->dev, 1);
	if (!host)
		return -ENOMEM;
	ap = host->ports[0];

	/* Set up the PCI device */
	rc = pcim_enable_device(dev);
	if (rc)
		return rc;
	rc = pcim_iomap_regions(dev, 1 << 0, DRV_NAME);
	if (rc == -EBUSY)
		pcim_pin_device(dev);
	if (rc)
		return rc;

	host->iomap = pcim_iomap_table(dev);
	rc = pci_set_dma_mask(dev, ATA_DMA_MASK);
	if (rc)
		return rc;
	rc = pci_set_consistent_dma_mask(dev, ATA_DMA_MASK);
	if (rc)
		return rc;
	pci_set_master(dev);

	/* Set up the register mappings. We use the I/O mapping as only the
	   older chips also have MMIO on BAR 1 */
	base = host->iomap[0];
	if (!base)
		return -ENOMEM;
	ap->ops = &ninja32_port_ops;
	ap->pio_mask = ATA_PIO4;
	ap->flags |= ATA_FLAG_SLAVE_POSS;

	ap->ioaddr.cmd_addr = base + 0x10;
	ap->ioaddr.ctl_addr = base + 0x1E;
	ap->ioaddr.altstatus_addr = base + 0x1E;
	ap->ioaddr.bmdma_addr = base;
	ata_sff_std_ports(&ap->ioaddr);
	ap->pflags = ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE;

	ninja32_program(base);
	/* FIXME: Should we disable them at remove ? */
	return ata_host_activate(host, dev->irq, ata_bmdma_interrupt,
				 IRQF_SHARED, &ninja32_sht);
}
Ejemplo n.º 10
0
static int ninja32_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
	struct ata_host *host;
	struct ata_port *ap;
	void __iomem *base;
	int rc;

	host = ata_host_alloc(&dev->dev, 1);
	if (!host)
		return -ENOMEM;
	ap = host->ports[0];

	
	rc = pcim_enable_device(dev);
	if (rc)
		return rc;
	rc = pcim_iomap_regions(dev, 1 << 0, DRV_NAME);
	if (rc == -EBUSY)
		pcim_pin_device(dev);
	if (rc)
		return rc;

	host->iomap = pcim_iomap_table(dev);
	rc = pci_set_dma_mask(dev, ATA_DMA_MASK);
	if (rc)
		return rc;
	rc = pci_set_consistent_dma_mask(dev, ATA_DMA_MASK);
	if (rc)
		return rc;
	pci_set_master(dev);

	
	base = host->iomap[0];
	if (!base)
		return -ENOMEM;
	ap->ops = &ninja32_port_ops;
	ap->pio_mask = ATA_PIO4;
	ap->flags |= ATA_FLAG_SLAVE_POSS;

	ap->ioaddr.cmd_addr = base + 0x10;
	ap->ioaddr.ctl_addr = base + 0x1E;
	ap->ioaddr.altstatus_addr = base + 0x1E;
	ap->ioaddr.bmdma_addr = base;
	ata_sff_std_ports(&ap->ioaddr);
	ap->pflags = ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE;

	ninja32_program(base);
	
	return ata_host_activate(host, dev->irq, ata_sff_interrupt,
				 IRQF_SHARED, &ninja32_sht);
}
Ejemplo n.º 11
0
static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
{
	const struct dw_dma_platform_data *pdata = (void *)pid->driver_data;
	struct dw_dma_chip *chip;
	int ret;

	ret = pcim_enable_device(pdev);
	if (ret)
		return ret;

	ret = pcim_iomap_regions(pdev, 1 << 0, pci_name(pdev));
	if (ret) {
		dev_err(&pdev->dev, "I/O memory remapping failed\n");
		return ret;
	}

	pci_set_master(pdev);
	pci_try_set_mwi(pdev);

	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
	if (ret)
		return ret;

	ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
	if (ret)
		return ret;

	chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
	if (!chip)
		return -ENOMEM;

	chip->dev = &pdev->dev;
	chip->regs = pcim_iomap_table(pdev)[0];
	chip->irq = pdev->irq;
	chip->pdata = pdata;

	ret = dw_dma_probe(chip);
	if (ret)
		return ret;

	pci_set_drvdata(pdev, chip);

	return 0;
}
Ejemplo n.º 12
0
static int __devinit intel_gmch_probe(struct pci_dev *pdev,
				      struct agp_bridge_data *bridge)
{
	int i, mask;

	bridge->driver = NULL;

	for (i = 0; intel_agp_chipsets[i].name != NULL; i++) {
		if ((intel_agp_chipsets[i].gmch_chip_id != 0) &&
			find_gmch(intel_agp_chipsets[i].gmch_chip_id)) {
			bridge->driver =
				intel_agp_chipsets[i].gmch_driver;
			break;
		}
	}

	if (!bridge->driver)
		return 0;

	bridge->dev_private_data = &intel_private;
	bridge->dev = pdev;

	dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name);

	if (bridge->driver->mask_memory == intel_gen6_mask_memory)
		mask = 40;
	else if (bridge->driver->mask_memory == intel_i965_mask_memory)
		mask = 36;
	else
		mask = 32;

	if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
		dev_err(&intel_private.pcidev->dev,
			"set gfx device dma mask %d-bit failed!\n", mask);
	else
		pci_set_consistent_dma_mask(intel_private.pcidev,
					    DMA_BIT_MASK(mask));

	return 1;
}
Ejemplo n.º 13
0
static int scc_host_init(struct ata_probe_ent *probe_ent)
{
	struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
	int rc;

	rc = scc_reset_controller(probe_ent);
	if (rc)
		return rc;

	probe_ent->n_ports = 1;

	rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
	if (rc)
		return rc;
	rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
	if (rc)
		return rc;

	scc_setup_ports(&probe_ent->port[0], probe_ent->iomap[SCC_BMID_BAR]);

	pci_set_master(pdev);

	return 0;
}
Ejemplo n.º 14
0
static int sil680_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
	static const struct ata_port_info info = {
		.flags = ATA_FLAG_SLAVE_POSS,
		.pio_mask = ATA_PIO4,
		.mwdma_mask = ATA_MWDMA2,
		.udma_mask = ATA_UDMA6,
		.port_ops = &sil680_port_ops
	};
	static const struct ata_port_info info_slow = {
		.flags = ATA_FLAG_SLAVE_POSS,
		.pio_mask = ATA_PIO4,
		.mwdma_mask = ATA_MWDMA2,
		.udma_mask = ATA_UDMA5,
		.port_ops = &sil680_port_ops
	};
	const struct ata_port_info *ppi[] = { &info, NULL };
	struct ata_host *host;
	void __iomem *mmio_base;
	int rc, try_mmio;

	ata_print_version_once(&pdev->dev, DRV_VERSION);

	rc = pcim_enable_device(pdev);
	if (rc)
		return rc;

	switch (sil680_init_chip(pdev, &try_mmio)) {
		case 0:
			ppi[0] = &info_slow;
			break;
		case 0x30:
			return -ENODEV;
	}

	if (!try_mmio)
		goto use_ioports;

	/* Try to acquire MMIO resources and fallback to PIO if
	 * that fails
	 */
	rc = pcim_iomap_regions(pdev, 1 << SIL680_MMIO_BAR, DRV_NAME);
	if (rc)
		goto use_ioports;

	/* Allocate host and set it up */
	host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
	if (!host)
		return -ENOMEM;
	host->iomap = pcim_iomap_table(pdev);

	/* Setup DMA masks */
	rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
	if (rc)
		return rc;
	rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
	if (rc)
		return rc;
	pci_set_master(pdev);

	/* Get MMIO base and initialize port addresses */
	mmio_base = host->iomap[SIL680_MMIO_BAR];
	host->ports[0]->ioaddr.bmdma_addr = mmio_base + 0x00;
	host->ports[0]->ioaddr.cmd_addr = mmio_base + 0x80;
	host->ports[0]->ioaddr.ctl_addr = mmio_base + 0x8a;
	host->ports[0]->ioaddr.altstatus_addr = mmio_base + 0x8a;
	ata_sff_std_ports(&host->ports[0]->ioaddr);
	host->ports[1]->ioaddr.bmdma_addr = mmio_base + 0x08;
	host->ports[1]->ioaddr.cmd_addr = mmio_base + 0xc0;
	host->ports[1]->ioaddr.ctl_addr = mmio_base + 0xca;
	host->ports[1]->ioaddr.altstatus_addr = mmio_base + 0xca;
	ata_sff_std_ports(&host->ports[1]->ioaddr);

	/* Register & activate */
	return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
				 IRQF_SHARED, &sil680_sht);

use_ioports:
	return ata_pci_bmdma_init_one(pdev, ppi, &sil680_sht, NULL, 0);
}
Ejemplo n.º 15
0
static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
{
    struct ata_probe_ent *probe_ent;
    struct ata_port_info *ppi;
    int rc;
    unsigned int board_idx = (unsigned int) ent->driver_data;
    int pci_dev_busy = 0;

    rc = pci_enable_device(pdev);
    if (rc)
        return rc;

    rc = pci_request_regions(pdev, DRV_NAME);
    if (rc) {
        pci_dev_busy = 1;
        goto err_out;
    }

    rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
    if (rc)
        goto err_out_regions;
    rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
    if (rc)
        goto err_out_regions;

    ppi = &uli_port_info;
    probe_ent = ata_pci_init_native_mode(pdev, &ppi);
    if (!probe_ent) {
        rc = -ENOMEM;
        goto err_out_regions;
    }

    switch (board_idx) {
    case uli_5287:
        probe_ent->port[0].scr_addr = ULI5287_BASE;
        probe_ent->port[1].scr_addr = ULI5287_BASE + ULI5287_OFFS;
        probe_ent->n_ports = 4;

        probe_ent->port[2].cmd_addr = pci_resource_start(pdev, 0) + 8;
        probe_ent->port[2].altstatus_addr =
            probe_ent->port[2].ctl_addr =
                (pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS) + 4;
        probe_ent->port[2].bmdma_addr = pci_resource_start(pdev, 4) + 16;
        probe_ent->port[2].scr_addr = ULI5287_BASE + ULI5287_OFFS*4;

        probe_ent->port[3].cmd_addr = pci_resource_start(pdev, 2) + 8;
        probe_ent->port[3].altstatus_addr =
            probe_ent->port[3].ctl_addr =
                (pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS) + 4;
        probe_ent->port[3].bmdma_addr = pci_resource_start(pdev, 4) + 24;
        probe_ent->port[3].scr_addr = ULI5287_BASE + ULI5287_OFFS*5;

        ata_std_ports(&probe_ent->port[2]);
        ata_std_ports(&probe_ent->port[3]);
        break;

    case uli_5289:
        probe_ent->port[0].scr_addr = ULI5287_BASE;
        probe_ent->port[1].scr_addr = ULI5287_BASE + ULI5287_OFFS;
        break;

    case uli_5281:
        probe_ent->port[0].scr_addr = ULI5281_BASE;
        probe_ent->port[1].scr_addr = ULI5281_BASE + ULI5281_OFFS;
        break;

    default:
        BUG();
        break;
    }

    pci_set_master(pdev);
    pci_intx(pdev, 1);

    /* FIXME: check ata_device_add return value */
    ata_device_add(probe_ent);
    kfree(probe_ent);

    return 0;

err_out_regions:
    pci_release_regions(pdev);

err_out:
    if (!pci_dev_busy)
        pci_disable_device(pdev);
    return rc;

}
Ejemplo n.º 16
0
/*
 * Do all the common PCIe setup and initialization.
 * devdata is not yet allocated, and is not allocated until after this
 * routine returns success.  Therefore qib_dev_err() can't be used for error
 * printing.
 */
int qib_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent)
{
	int ret;

	ret = pci_enable_device(pdev);
	if (ret) {
		/*
		 * This can happen (in theory) iff:
		 * We did a chip reset, and then failed to reprogram the
		 * BAR, or the chip reset due to an internal error.  We then
		 * unloaded the driver and reloaded it.
		 *
		 * Both reset cases set the BAR back to initial state.  For
		 * the latter case, the AER sticky error bit at offset 0x718
		 * should be set, but the Linux kernel doesn't yet know
		 * about that, it appears.  If the original BAR was retained
		 * in the kernel data structures, this may be OK.
		 */
		qib_early_err(&pdev->dev, "pci enable failed: error %d\n",
			      -ret);
		goto done;
	}

	ret = pci_request_regions(pdev, QIB_DRV_NAME);
	if (ret) {
		qib_devinfo(pdev, "pci_request_regions fails: err %d\n", -ret);
		goto bail;
	}

	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
	if (ret) {
		/*
		 * If the 64 bit setup fails, try 32 bit.  Some systems
		 * do not setup 64 bit maps on systems with 2GB or less
		 * memory installed.
		 */
		ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
		if (ret) {
			qib_devinfo(pdev, "Unable to set DMA mask: %d\n", ret);
			goto bail;
		}
		ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
	} else
		ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
	if (ret) {
		qib_early_err(&pdev->dev,
			      "Unable to set DMA consistent mask: %d\n", ret);
		goto bail;
	}

	pci_set_master(pdev);
	ret = pci_enable_pcie_error_reporting(pdev);
	if (ret) {
		qib_early_err(&pdev->dev,
			      "Unable to enable pcie error reporting: %d\n",
			      ret);
		ret = 0;
	}
	goto done;

bail:
	pci_disable_device(pdev);
	pci_release_regions(pdev);
done:
	return ret;
}
Ejemplo n.º 17
0
static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
	void __iomem *mem;
	struct ath_softc *sc;
	struct ieee80211_hw *hw;
	u8 csz;
	u32 val;
	int ret = 0;
	char hw_name[64];

	if (pci_enable_device(pdev))
		return -EIO;

	ret =  pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
	if (ret) {
		printk(KERN_ERR "ath9k: 32-bit DMA not available\n");
		goto err_dma;
	}

	ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
	if (ret) {
		printk(KERN_ERR "ath9k: 32-bit DMA consistent "
			"DMA enable failed\n");
		goto err_dma;
	}

	/*
	 * Cache line size is used to size and align various
	 * structures used to communicate with the hardware.
	 */
	pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &csz);
	if (csz == 0) {
		/*
		 * Linux 2.4.18 (at least) writes the cache line size
		 * register as a 16-bit wide register which is wrong.
		 * We must have this setup properly for rx buffer
		 * DMA to work so force a reasonable value here if it
		 * comes up zero.
		 */
		csz = L1_CACHE_BYTES / sizeof(u32);
		pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, csz);
	}
	/*
	 * The default setting of latency timer yields poor results,
	 * set it to the value used by other systems. It may be worth
	 * tweaking this setting more.
	 */
	pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xa8);

	pci_set_master(pdev);

	/*
	 * Disable the RETRY_TIMEOUT register (0x41) to keep
	 * PCI Tx retries from interfering with C3 CPU state.
	 */
	pci_read_config_dword(pdev, 0x40, &val);
	if ((val & 0x0000ff00) != 0)
		pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);

	ret = pci_request_region(pdev, 0, "ath9k");
	if (ret) {
		dev_err(&pdev->dev, "PCI memory region reserve error\n");
		ret = -ENODEV;
		goto err_region;
	}

	mem = pci_iomap(pdev, 0, 0);
	if (!mem) {
		printk(KERN_ERR "PCI memory map error\n") ;
		ret = -EIO;
		goto err_iomap;
	}

	hw = ieee80211_alloc_hw(sizeof(struct ath_softc), &ath9k_ops);
	if (!hw) {
		dev_err(&pdev->dev, "No memory for ieee80211_hw\n");
		ret = -ENOMEM;
		goto err_alloc_hw;
	}

	SET_IEEE80211_DEV(hw, &pdev->dev);
	pci_set_drvdata(pdev, hw);

	sc = hw->priv;
	sc->hw = hw;
	sc->dev = &pdev->dev;
	sc->mem = mem;

	/* Will be cleared in ath9k_start() */
	sc->sc_flags |= SC_OP_INVALID;

	ret = request_irq(pdev->irq, ath_isr, IRQF_SHARED, "ath9k", sc);
	if (ret) {
		dev_err(&pdev->dev, "request_irq failed\n");
		goto err_irq;
	}

	sc->irq = pdev->irq;

	ret = ath9k_init_device(id->device, sc, &ath_pci_bus_ops);
	if (ret) {
		dev_err(&pdev->dev, "Failed to initialize device\n");
		goto err_init;
	}

	ath9k_hw_name(sc->sc_ah, hw_name, sizeof(hw_name));
	wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n",
		   hw_name, (unsigned long)mem, pdev->irq);

	return 0;

err_init:
	free_irq(sc->irq, sc);
err_irq:
	ieee80211_free_hw(hw);
err_alloc_hw:
	pci_iounmap(pdev, mem);
err_iomap:
	pci_release_region(pdev, 0);
err_region:
	/* Nothing */
err_dma:
	pci_disable_device(pdev);
	return ret;
}
Ejemplo n.º 18
0
/* called during probe() after chip reset completes */
static int ehci_pci_setup(struct usb_hcd *hcd)
{
	struct ehci_hcd		*ehci = hcd_to_ehci(hcd);
	struct pci_dev		*pdev = to_pci_dev(hcd->self.controller);
	struct pci_dev		*p_smbus;
	u8			rev;
	u32			temp;
	int			retval;

	switch (pdev->vendor) {
	case PCI_VENDOR_ID_TOSHIBA_2:
		/* celleb's companion chip */
		if (pdev->device == 0x01b5) {
#ifdef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO
			ehci->big_endian_mmio = 1;
#else
			ehci_warn(ehci,
				  "unsupported big endian Toshiba quirk\n");
#endif
		}
		break;
	}

	ehci->caps = hcd->regs;
	ehci->regs = hcd->regs +
		HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));

	dbg_hcs_params(ehci, "reset");
	dbg_hcc_params(ehci, "reset");

        /* ehci_init() causes memory for DMA transfers to be
         * allocated.  Thus, any vendor-specific workarounds based on
         * limiting the type of memory used for DMA transfers must
         * happen before ehci_init() is called. */
	switch (pdev->vendor) {
	case PCI_VENDOR_ID_NVIDIA:
		/* NVidia reports that certain chips don't handle
		 * QH, ITD, or SITD addresses above 2GB.  (But TD,
		 * data buffer, and periodic schedule are normal.)
		 */
		switch (pdev->device) {
		case 0x003c:	/* MCP04 */
		case 0x005b:	/* CK804 */
		case 0x00d8:	/* CK8 */
		case 0x00e8:	/* CK8S */
			if (pci_set_consistent_dma_mask(pdev,
						DMA_BIT_MASK(31)) < 0)
				ehci_warn(ehci, "can't enable NVidia "
					"workaround for >2GB RAM\n");
			break;
		}
		break;
	}

	/* cache this readonly data; minimize chip reads */
	ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);

	retval = ehci_halt(ehci);
	if (retval)
		return retval;

	if ((pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == 0x7808) ||
	    (pdev->vendor == PCI_VENDOR_ID_ATI && pdev->device == 0x4396)) {
		/* EHCI controller on AMD SB700/SB800/Hudson-2/3 platforms may
		 * read/write memory space which does not belong to it when
		 * there is NULL pointer with T-bit set to 1 in the frame list
		 * table. To avoid the issue, the frame list link pointer
		 * should always contain a valid pointer to a inactive qh.
		 */
		ehci->use_dummy_qh = 1;
		ehci_info(ehci, "applying AMD SB700/SB800/Hudson-2/3 EHCI "
				"dummy qh workaround\n");
	}

	/* data structure init */
	retval = ehci_init(hcd);
	if (retval)
		return retval;

	switch (pdev->vendor) {
	case PCI_VENDOR_ID_NEC:
		ehci->need_io_watchdog = 0;
		break;
	case PCI_VENDOR_ID_INTEL:
		ehci->need_io_watchdog = 0;
		ehci->fs_i_thresh = 1;
		if (pdev->device == 0x27cc) {
			ehci->broken_periodic = 1;
			ehci_info(ehci, "using broken periodic workaround\n");
		}
		if (pdev->device == 0x0806 || pdev->device == 0x0811
				|| pdev->device == 0x0829) {
			ehci_info(ehci, "disable lpm for langwell/penwell\n");
			ehci->has_lpm = 0;
		}
		if (pdev->device == PCI_DEVICE_ID_INTEL_CE4100_USB) {
			hcd->has_tt = 1;
			tdi_reset(ehci);
		}
		if (pdev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK) {
			/* EHCI #1 or #2 on 6 Series/C200 Series chipset */
			if (pdev->device == 0x1c26 || pdev->device == 0x1c2d) {
				ehci_info(ehci, "broken D3 during system sleep on ASUS\n");
				hcd->broken_pci_sleep = 1;
				device_set_wakeup_capable(&pdev->dev, false);
			}
		}
		break;
	case PCI_VENDOR_ID_TDI:
		if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) {
			hcd->has_tt = 1;
			tdi_reset(ehci);
		}
		break;
	case PCI_VENDOR_ID_AMD:
		/* AMD PLL quirk */
		if (usb_amd_find_chipset_info())
			ehci->amd_pll_fix = 1;
		/* AMD8111 EHCI doesn't work, according to AMD errata */
		if (pdev->device == 0x7463) {
			ehci_info(ehci, "ignoring AMD8111 (errata)\n");
			retval = -EIO;
			goto done;
		}
		break;
	case PCI_VENDOR_ID_NVIDIA:
		switch (pdev->device) {
		/* Some NForce2 chips have problems with selective suspend;
		 * fixed in newer silicon.
		 */
		case 0x0068:
			if (pdev->revision < 0xa4)
				ehci->no_selective_suspend = 1;
			break;

		/* MCP89 chips on the MacBookAir3,1 give EPROTO when
		 * fetching device descriptors unless LPM is disabled.
		 * There are also intermittent problems enumerating
		 * devices with PPCD enabled.
		 */
		case 0x0d9d:
			ehci_info(ehci, "disable lpm/ppcd for nvidia mcp89");
			ehci->has_lpm = 0;
			ehci->has_ppcd = 0;
			ehci->command &= ~CMD_PPCEE;
			break;
		}
		break;
	case PCI_VENDOR_ID_VIA:
		if (pdev->device == 0x3104 && (pdev->revision & 0xf0) == 0x60) {
			u8 tmp;

			/* The VT6212 defaults to a 1 usec EHCI sleep time which
			 * hogs the PCI bus *badly*. Setting bit 5 of 0x4B makes
			 * that sleep time use the conventional 10 usec.
			 */
			pci_read_config_byte(pdev, 0x4b, &tmp);
			if (tmp & 0x20)
				break;
			pci_write_config_byte(pdev, 0x4b, tmp | 0x20);
		}
		break;
	case PCI_VENDOR_ID_ATI:
		/* AMD PLL quirk */
		if (usb_amd_find_chipset_info())
			ehci->amd_pll_fix = 1;
		/* SB600 and old version of SB700 have a bug in EHCI controller,
		 * which causes usb devices lose response in some cases.
		 */
		if ((pdev->device == 0x4386) || (pdev->device == 0x4396)) {
			p_smbus = pci_get_device(PCI_VENDOR_ID_ATI,
						 PCI_DEVICE_ID_ATI_SBX00_SMBUS,
						 NULL);
			if (!p_smbus)
				break;
			rev = p_smbus->revision;
			if ((pdev->device == 0x4386) || (rev == 0x3a)
			    || (rev == 0x3b)) {
				u8 tmp;
				ehci_info(ehci, "applying AMD SB600/SB700 USB "
					"freeze workaround\n");
				pci_read_config_byte(pdev, 0x53, &tmp);
				pci_write_config_byte(pdev, 0x53, tmp | (1<<3));
			}
			pci_dev_put(p_smbus);
		}
		break;
	case PCI_VENDOR_ID_NETMOS:
		/* MosChip frame-index-register bug */
		ehci_info(ehci, "applying MosChip frame-index workaround\n");
		ehci->frame_index_bug = 1;
		break;
	}

	/* optional debug port, normally in the first BAR */
	temp = pci_find_capability(pdev, 0x0a);
	if (temp) {
		pci_read_config_dword(pdev, temp, &temp);
		temp >>= 16;
		if ((temp & (3 << 13)) == (1 << 13)) {
			temp &= 0x1fff;
			ehci->debug = ehci_to_hcd(ehci)->regs + temp;
			temp = ehci_readl(ehci, &ehci->debug->control);
			ehci_info(ehci, "debug port %d%s\n",
				HCS_DEBUG_PORT(ehci->hcs_params),
				(temp & DBGP_ENABLED)
					? " IN USE"
					: "");
			if (!(temp & DBGP_ENABLED))
				ehci->debug = NULL;
		}
	}

	ehci_reset(ehci);

	/* at least the Genesys GL880S needs fixup here */
	temp = HCS_N_CC(ehci->hcs_params) * HCS_N_PCC(ehci->hcs_params);
	temp &= 0x0f;
	if (temp && HCS_N_PORTS(ehci->hcs_params) > temp) {
		ehci_dbg(ehci, "bogus port configuration: "
			"cc=%d x pcc=%d < ports=%d\n",
			HCS_N_CC(ehci->hcs_params),
			HCS_N_PCC(ehci->hcs_params),
			HCS_N_PORTS(ehci->hcs_params));

		switch (pdev->vendor) {
		case 0x17a0:		/* GENESYS */
			/* GL880S: should be PORTS=2 */
			temp |= (ehci->hcs_params & ~0xf);
			ehci->hcs_params = temp;
			break;
		case PCI_VENDOR_ID_NVIDIA:
			/* NF4: should be PCC=10 */
			break;
		}
	}

	/* Serial Bus Release Number is at PCI 0x60 offset */
	pci_read_config_byte(pdev, 0x60, &ehci->sbrn);
	if (pdev->vendor == PCI_VENDOR_ID_STMICRO
	    && pdev->device == PCI_DEVICE_ID_STMICRO_USB_HOST)
		ehci->sbrn = 0x20; /* ConneXT has no sbrn register */

	/* Keep this around for a while just in case some EHCI
	 * implementation uses legacy PCI PM support.  This test
	 * can be removed on 17 Dec 2009 if the dev_warn() hasn't
	 * been triggered by then.
	 */
	if (!device_can_wakeup(&pdev->dev)) {
		u16	port_wake;

		pci_read_config_word(pdev, 0x62, &port_wake);
		if (port_wake & 0x0001) {
			dev_warn(&pdev->dev, "Enabling legacy PCI PM\n");
			device_set_wakeup_capable(&pdev->dev, 1);
		}
	}

#ifdef	CONFIG_USB_SUSPEND
	/* REVISIT: the controller works fine for wakeup iff the root hub
	 * itself is "globally" suspended, but usbcore currently doesn't
	 * understand such things.
	 *
	 * System suspend currently expects to be able to suspend the entire
	 * device tree, device-at-a-time.  If we failed selective suspend
	 * reports, system suspend would fail; so the root hub code must claim
	 * success.  That's lying to usbcore, and it matters for runtime
	 * PM scenarios with selective suspend and remote wakeup...
	 */
	if (ehci->no_selective_suspend && device_can_wakeup(&pdev->dev))
		ehci_warn(ehci, "selective suspend/wakeup unavailable\n");
#endif

	ehci_port_power(ehci, 1);
	retval = ehci_pci_reinit(ehci, pdev);
done:
	return retval;
}
Ejemplo n.º 19
0
static int ec_bhf_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
	struct net_device *net_dev;
	struct ec_bhf_priv *priv;
	void __iomem *dma_io;
	void __iomem *io;
	int err = 0;

	err = pci_enable_device(dev);
	if (err)
		return err;

	pci_set_master(dev);

	err = pci_set_dma_mask(dev, DMA_BIT_MASK(32));
	if (err) {
		dev_err(&dev->dev,
			"Required dma mask not supported, failed to initialize device\n");
		err = -EIO;
		goto err_disable_dev;
	}

	err = pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(32));
	if (err) {
		dev_err(&dev->dev,
			"Required dma mask not supported, failed to initialize device\n");
		goto err_disable_dev;
	}

	err = pci_request_regions(dev, "ec_bhf");
	if (err) {
		dev_err(&dev->dev, "Failed to request pci memory regions\n");
		goto err_disable_dev;
	}

	io = pci_iomap(dev, 0, 0);
	if (!io) {
		dev_err(&dev->dev, "Failed to map pci card memory bar 0");
		err = -EIO;
		goto err_release_regions;
	}

	dma_io = pci_iomap(dev, 2, 0);
	if (!dma_io) {
		dev_err(&dev->dev, "Failed to map pci card memory bar 2");
		err = -EIO;
		goto err_unmap;
	}

	net_dev = alloc_etherdev(sizeof(struct ec_bhf_priv));
	if (net_dev == NULL) {
		err = -ENOMEM;
		goto err_unmap_dma_io;
	}

	pci_set_drvdata(dev, net_dev);
	SET_NETDEV_DEV(net_dev, &dev->dev);

	net_dev->features = 0;
	net_dev->flags |= IFF_NOARP;

	net_dev->netdev_ops = &ec_bhf_netdev_ops;

	priv = netdev_priv(net_dev);
	priv->net_dev = net_dev;
	priv->io = io;
	priv->dma_io = dma_io;
	priv->dev = dev;

	err = ec_bhf_setup_offsets(priv);
	if (err < 0)
		goto err_free_net_dev;

	memcpy_fromio(net_dev->dev_addr, priv->mii_io + MII_MAC_ADDR, 6);

	err = register_netdev(net_dev);
	if (err < 0)
		goto err_free_net_dev;

	return 0;

err_free_net_dev:
	free_netdev(net_dev);
err_unmap_dma_io:
	pci_iounmap(dev, dma_io);
err_unmap:
	pci_iounmap(dev, io);
err_release_regions:
	pci_release_regions(dev);
err_disable_dev:
	pci_clear_master(dev);
	pci_disable_device(dev);

	return err;
}
Ejemplo n.º 20
0
static int bcwc_pci_probe(struct pci_dev *pdev,
			  const struct pci_device_id *entry)
{
	struct bcwc_private *dev_priv;
	int ret;

	dev_info(&pdev->dev, "Found Broadcom PCIe webcam with device id: %x\n",
		 pdev->device);

	dev_priv = kzalloc(sizeof(struct bcwc_private), GFP_KERNEL);
	if (!dev_priv) {
		dev_err(&pdev->dev, "Failed to allocate memory\n");
		return -ENOMEM;
	}

	dev_priv->pdev = pdev;

	ret = pci_enable_device(pdev);
	if (ret) {
		dev_err(&pdev->dev, "Failed to enable device\n");
		goto fail_free;
	}

	ret = bcwc_pci_reserve_mem(dev_priv);
	if (ret)
		goto fail_enable;

	ret = pci_enable_msi(pdev);
	if (ret) {
		dev_err(&pdev->dev, "Failed to enable MSI\n");
		goto fail_enable;
	}

	INIT_WORK(&dev_priv->irq_work, bcwc_irq_work);

	ret = bcwc_irq_enable(dev_priv);
	if (ret)
		goto fail_msi;

	ret = bcwc_pci_set_dma_mask(dev_priv, 64);
	if (ret)
		ret = bcwc_pci_set_dma_mask(dev_priv, 32);

	if (ret)
		goto fail_msi;

	dev_info(&pdev->dev, "Setting %ubit DMA mask\n", dev_priv->dma_mask);
	pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(dev_priv->dma_mask));

	pci_set_master(pdev);
	pci_set_drvdata(pdev, dev_priv);

	dev_priv->ddr_model = 4;
	dev_priv->ddr_speed = 450;

	bcwc_hw_init(dev_priv);

	return 0;
fail_msi:
	pci_disable_msi(pdev);
fail_enable:
	pci_disable_device(pdev);
fail_free:
	kfree(dev_priv);
	return ret;
}
Ejemplo n.º 21
0
static int p54p_probe(struct pci_dev *pdev,
				const struct pci_device_id *id)
{
	struct p54p_priv *priv;
	struct ieee80211_hw *dev;
	unsigned long mem_addr, mem_len;
	int err;

	pci_dev_get(pdev);
	err = pci_enable_device(pdev);
	if (err) {
		dev_err(&pdev->dev, "Cannot enable new PCI device\n");
		return err;
	}

	mem_addr = pci_resource_start(pdev, 0);
	mem_len = pci_resource_len(pdev, 0);
	if (mem_len < sizeof(struct p54p_csr)) {
		dev_err(&pdev->dev, "Too short PCI resources\n");
		err = -ENODEV;
		goto err_disable_dev;
	}

	err = pci_request_regions(pdev, "p54pci");
	if (err) {
		dev_err(&pdev->dev, "Cannot obtain PCI resources\n");
		goto err_disable_dev;
	}

	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
	if (!err)
		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
	if (err) {
		dev_err(&pdev->dev, "No suitable DMA available\n");
		goto err_free_reg;
	}

	pci_set_master(pdev);
	pci_try_set_mwi(pdev);

	pci_write_config_byte(pdev, 0x40, 0);
	pci_write_config_byte(pdev, 0x41, 0);

	dev = p54_init_common(sizeof(*priv));
	if (!dev) {
		dev_err(&pdev->dev, "ieee80211 alloc failed\n");
		err = -ENOMEM;
		goto err_free_reg;
	}

	priv = dev->priv;
	priv->pdev = pdev;

	init_completion(&priv->fw_loaded);
	SET_IEEE80211_DEV(dev, &pdev->dev);
	pci_set_drvdata(pdev, dev);

	priv->map = ioremap(mem_addr, mem_len);
	if (!priv->map) {
		dev_err(&pdev->dev, "Cannot map device memory\n");
		err = -ENOMEM;
		goto err_free_dev;
	}

	priv->ring_control = pci_alloc_consistent(pdev, sizeof(*priv->ring_control),
						  &priv->ring_control_dma);
	if (!priv->ring_control) {
		dev_err(&pdev->dev, "Cannot allocate rings\n");
		err = -ENOMEM;
		goto err_iounmap;
	}
	priv->common.open = p54p_open;
	priv->common.stop = p54p_stop;
	priv->common.tx = p54p_tx;

	spin_lock_init(&priv->lock);
	tasklet_init(&priv->tasklet, p54p_tasklet, (unsigned long)dev);

	err = request_firmware_nowait(THIS_MODULE, 1, "isl3886pci",
				      &priv->pdev->dev, GFP_KERNEL,
				      priv, p54p_firmware_step2);
	if (!err)
		return 0;

	pci_free_consistent(pdev, sizeof(*priv->ring_control),
			    priv->ring_control, priv->ring_control_dma);

 err_iounmap:
	iounmap(priv->map);

 err_free_dev:
	p54_free_common(dev);

 err_free_reg:
	pci_release_regions(pdev);
 err_disable_dev:
	pci_disable_device(pdev);
	pci_dev_put(pdev);
	return err;
}
Ejemplo n.º 22
0
static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
	struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
	struct iwl_bus *bus;
	struct iwl_pci_bus *pci_bus;
	u16 pci_cmd;
	int err;

	bus = kzalloc(sizeof(*bus) + sizeof(*pci_bus), GFP_KERNEL);
	if (!bus) {
		dev_printk(KERN_ERR, &pdev->dev,
			   "Couldn't allocate iwl_pci_bus");
		err = -ENOMEM;
		goto out_no_pci;
	}

	pci_bus = IWL_BUS_GET_PCI_BUS(bus);
	pci_bus->pci_dev = pdev;

	/* W/A - seems to solve weird behavior. We need to remove this if we
	 * don't want to stay in L1 all the time. This wastes a lot of power */
	pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
				PCIE_LINK_STATE_CLKPM);

	if (pci_enable_device(pdev)) {
		err = -ENODEV;
		goto out_no_pci;
	}

	pci_set_master(pdev);

	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
	if (!err)
		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
	if (err) {
		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
		if (!err)
			err = pci_set_consistent_dma_mask(pdev,
							DMA_BIT_MASK(32));
		/* both attempts failed: */
		if (err) {
			dev_printk(KERN_ERR, bus->dev,
				   "No suitable DMA available.\n");
			goto out_pci_disable_device;
		}
	}

	err = pci_request_regions(pdev, DRV_NAME);
	if (err) {
		dev_printk(KERN_ERR, bus->dev, "pci_request_regions failed");
		goto out_pci_disable_device;
	}

	pci_bus->hw_base = pci_iomap(pdev, 0, 0);
	if (!pci_bus->hw_base) {
		dev_printk(KERN_ERR, bus->dev, "pci_iomap failed");
		err = -ENODEV;
		goto out_pci_release_regions;
	}

	dev_printk(KERN_INFO, &pdev->dev,
		"pci_resource_len = 0x%08llx\n",
		(unsigned long long) pci_resource_len(pdev, 0));
	dev_printk(KERN_INFO, &pdev->dev,
		"pci_resource_base = %p\n", pci_bus->hw_base);

	dev_printk(KERN_INFO, &pdev->dev,
		"HW Revision ID = 0x%X\n", pdev->revision);

	/* We disable the RETRY_TIMEOUT register (0x41) to keep
	 * PCI Tx retries from interfering with C3 CPU state */
	pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);

	err = pci_enable_msi(pdev);
	if (err) {
		dev_printk(KERN_ERR, &pdev->dev, "pci_enable_msi failed");
		goto out_iounmap;
	}

	/* TODO: Move this away, not needed if not MSI */
	/* enable rfkill interrupt: hw bug w/a */
	pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
	if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
		pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
		pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
	}

	bus->dev = &pdev->dev;
	bus->irq = pdev->irq;
	bus->ops = &pci_ops;

	err = iwl_probe(bus, cfg);
	if (err)
		goto out_disable_msi;
	return 0;

out_disable_msi:
	pci_disable_msi(pdev);
out_iounmap:
	pci_iounmap(pdev, pci_bus->hw_base);
out_pci_release_regions:
	pci_set_drvdata(pdev, NULL);
	pci_release_regions(pdev);
out_pci_disable_device:
	pci_disable_device(pdev);
out_no_pci:
	kfree(bus);
	return err;
}
Ejemplo n.º 23
0
static int atp867x_ata_pci_sff_init_host(struct ata_host *host)
{
	struct device *gdev = host->dev;
	struct pci_dev *pdev = to_pci_dev(gdev);
	unsigned int mask = 0;
	int i, rc;

	/*
	 * do not map rombase
	 */
	rc = pcim_iomap_regions(pdev, 1 << ATP867X_BAR_IOBASE, DRV_NAME);
	if (rc == -EBUSY)
		pcim_pin_device(pdev);
	if (rc)
		return rc;
	host->iomap = pcim_iomap_table(pdev);

#ifdef	ATP867X_DEBUG
	atp867x_check_res(pdev);

	for (i = 0; i < PCI_ROM_RESOURCE; i++)
		printk(KERN_DEBUG "ATP867X: iomap[%d]=0x%llx\n", i,
			(unsigned long long)(host->iomap[i]));
#endif

	/*
	 * request, iomap BARs and init port addresses accordingly
	 */
	for (i = 0; i < host->n_ports; i++) {
		struct ata_port *ap = host->ports[i];
		struct ata_ioports *ioaddr = &ap->ioaddr;

		ioaddr->cmd_addr = ATP867X_IO_PORTBASE(ap, i);
		ioaddr->ctl_addr = ioaddr->altstatus_addr
				 = ATP867X_IO_ALTSTATUS(ap, i);
		ioaddr->bmdma_addr = ATP867X_IO_DMABASE(ap, i);

		ata_sff_std_ports(ioaddr);
		rc = atp867x_set_priv(ap);
		if (rc)
			return rc;

#ifdef	ATP867X_DEBUG
		atp867x_check_ports(ap, i);
#endif
		ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx",
			(unsigned long)ioaddr->cmd_addr,
			(unsigned long)ioaddr->ctl_addr);
		ata_port_desc(ap, "bmdma 0x%lx",
			(unsigned long)ioaddr->bmdma_addr);

		mask |= 1 << i;
	}

	if (!mask) {
		dev_printk(KERN_ERR, gdev, "no available native port\n");
		return -ENODEV;
	}

	atp867x_fixup(host);

	rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
	if (rc)
		return rc;

	rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
	return rc;
}
Ejemplo n.º 24
0
static int cs5520_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
	static const unsigned int cmd_port[] = { 0x1F0, 0x170 };
	static const unsigned int ctl_port[] = { 0x3F6, 0x376 };
	struct ata_port_info pi = {
		.flags		= ATA_FLAG_SLAVE_POSS,
		.pio_mask	= ATA_PIO4,
		.port_ops	= &cs5520_port_ops,
	};
	const struct ata_port_info *ppi[2];
	u8 pcicfg;
	void __iomem *iomap[5];
	struct ata_host *host;
	struct ata_ioports *ioaddr;
	int i, rc;

	rc = pcim_enable_device(pdev);
	if (rc)
		return rc;

	/* IDE port enable bits */
	pci_read_config_byte(pdev, 0x60, &pcicfg);

	/* Check if the ATA ports are enabled */
	if ((pcicfg & 3) == 0)
		return -ENODEV;

	ppi[0] = ppi[1] = &ata_dummy_port_info;
	if (pcicfg & 1)
		ppi[0] = &pi;
	if (pcicfg & 2)
		ppi[1] = &pi;

	if ((pcicfg & 0x40) == 0) {
		dev_warn(&pdev->dev, "DMA mode disabled. Enabling.\n");
		pci_write_config_byte(pdev, 0x60, pcicfg | 0x40);
	}

	pi.mwdma_mask = id->driver_data;

	host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
	if (!host)
		return -ENOMEM;

	/* Perform set up for DMA */
	if (pci_enable_device_io(pdev)) {
		printk(KERN_ERR DRV_NAME ": unable to configure BAR2.\n");
		return -ENODEV;
	}

	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
		printk(KERN_ERR DRV_NAME ": unable to configure DMA mask.\n");
		return -ENODEV;
	}
	if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
		printk(KERN_ERR DRV_NAME ": unable to configure consistent DMA mask.\n");
		return -ENODEV;
	}

	/* Map IO ports and initialize host accordingly */
	iomap[0] = devm_ioport_map(&pdev->dev, cmd_port[0], 8);
	iomap[1] = devm_ioport_map(&pdev->dev, ctl_port[0], 1);
	iomap[2] = devm_ioport_map(&pdev->dev, cmd_port[1], 8);
	iomap[3] = devm_ioport_map(&pdev->dev, ctl_port[1], 1);
	iomap[4] = pcim_iomap(pdev, 2, 0);

	if (!iomap[0] || !iomap[1] || !iomap[2] || !iomap[3] || !iomap[4])
		return -ENOMEM;

	ioaddr = &host->ports[0]->ioaddr;
	ioaddr->cmd_addr = iomap[0];
	ioaddr->ctl_addr = iomap[1];
	ioaddr->altstatus_addr = iomap[1];
	ioaddr->bmdma_addr = iomap[4];
	ata_sff_std_ports(ioaddr);

	ata_port_desc(host->ports[0],
		      "cmd 0x%x ctl 0x%x", cmd_port[0], ctl_port[0]);
	ata_port_pbar_desc(host->ports[0], 4, 0, "bmdma");

	ioaddr = &host->ports[1]->ioaddr;
	ioaddr->cmd_addr = iomap[2];
	ioaddr->ctl_addr = iomap[3];
	ioaddr->altstatus_addr = iomap[3];
	ioaddr->bmdma_addr = iomap[4] + 8;
	ata_sff_std_ports(ioaddr);

	ata_port_desc(host->ports[1],
		      "cmd 0x%x ctl 0x%x", cmd_port[1], ctl_port[1]);
	ata_port_pbar_desc(host->ports[1], 4, 8, "bmdma");

	/* activate the host */
	pci_set_master(pdev);
	rc = ata_host_start(host);
	if (rc)
		return rc;

	for (i = 0; i < 2; i++) {
		static const int irq[] = { 14, 15 };
		struct ata_port *ap = host->ports[i];

		if (ata_port_is_dummy(ap))
			continue;

		rc = devm_request_irq(&pdev->dev, irq[ap->port_no],
				      ata_bmdma_interrupt, 0, DRV_NAME, host);
		if (rc)
			return rc;

		ata_port_desc(ap, "irq %d", irq[i]);
	}

	return ata_host_register(host, &cs5520_sht);
}
Ejemplo n.º 25
0
static int hififo_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
    int i;
    int rc;
    dev_t dev = 0;
    char tmpstr[16];
    struct hififo_dev *drvdata;
    struct hififo_fifo * fifo;

    drvdata = devm_kzalloc(&pdev->dev,
                           sizeof(struct hififo_dev),
                           GFP_KERNEL);
    if (!drvdata) {
        printk(KERN_ERR DEVICE_NAME "failed to alloc drvdata\n");
        return -ENOMEM;
    }

    rc = pcim_enable_device(pdev);
    if(rc) {
        printk(KERN_ERR DEVICE_NAME ": pcim_enable_device() failed\n");
        return rc;
    }

    rc = pci_request_regions(pdev, DEVICE_NAME);
    if(rc < 0) {
        printk(KERN_ERR DEVICE_NAME ": pci_request_regions() failed\n");
        return rc;
    }

    printk(KERN_INFO DEVICE_NAME\
           ": Found Harmon Instruments PCI Express interface board\n");
    pci_set_drvdata(pdev, drvdata);

    pci_set_master(pdev); /* returns void */

    pci_set_dma_mask(pdev, 0xFFFFFFFFFFFFFFFF);

    pci_set_consistent_dma_mask(pdev, 0xFFFFFFFFFFFFFFFF);

    rc = pci_enable_msi(pdev);
    if(rc < 0) {
        printk(KERN_ERR DEVICE_NAME ": pci_enable_msi() failed\n");
        return rc;
    }

    rc = devm_request_irq(&pdev->dev,
                          pdev->irq,
                          (irq_handler_t) hififo_interrupt,
                          0, /* flags */
                          DEVICE_NAME,
                          drvdata);
    if(rc) {
        printk(KERN_ERR DEVICE_NAME ": request_irq() failed\n");
        return rc;
    }

    drvdata->pio_reg_base = (u64 *) pcim_iomap(pdev, 0, 0);
    printk(KERN_INFO DEVICE_NAME					\
           ": pci_resource_start(dev, 0) = 0x%.8llx, virt = 0x%.16llx\n",
           (u64) pci_resource_start(pdev, 0),
           (u64) drvdata->pio_reg_base);

    for(i=0; i<8; i++)
        printk("bar0[%d] = %.8x\n", i, (u32) readreg(drvdata, i));
    drvdata->idreg = readreg(drvdata, REG_ID);
    drvdata->build = readreg(drvdata, REG_BUILD);
    printk(KERN_INFO DEVICE_NAME " FPGA build = 0x%.8X\n", drvdata->build);
    drvdata->nfifos = 0;
    for(i=0; i<MAX_FIFOS; i++) {
        if(drvdata->idreg & (1 << i))
            drvdata->nfifos ++;
    }

    if(drvdata->nfifos == 0) {
        printk(KERN_INFO DEVICE_NAME "no fifos reported on card\n");
        return -1;
    }

    /* reset it */
    writereg(drvdata, 0xFFFF, REG_RESET_SET);
    udelay(10); /* wait for completion of anything that was running */
    writereg(drvdata, 0xFFFF, REG_RESET_CLEAR);

    rc = alloc_chrdev_region(&dev, 0, drvdata->nfifos, DEVICE_NAME);
    if (rc) {
        printk(KERN_ERR DEVICE_NAME ": alloc_chrdev_region() failed\n");
        return rc;
    }

    drvdata->major = MAJOR(dev);

    for(i=0; i<MAX_FIFOS; i++) {
        if((drvdata->idreg & (1 << i)) == 0)
            continue; /* fifo not present */
        fifo = devm_kzalloc(&pdev->dev,
                            sizeof(struct hififo_fifo),
                            GFP_KERNEL);
        if (!fifo) {
            printk(KERN_ERR DEVICE_NAME\
                   "failed to alloc hififo_fifo\n");
            return -ENOMEM;
        }
        drvdata->fifo[i] = fifo;
        if(i<MAX_FIFOS/2) {
            cdev_init(&fifo->cdev, &fops_fpc); /* returns void */
            fifo->cdev.ops = &fops_fpc;
        }
        else {
            cdev_init(&fifo->cdev, &fops_tpc); /* returns void */
            fifo->cdev.ops = &fops_tpc;
        }

        fifo->cdev.owner = THIS_MODULE;

        rc = cdev_add (&fifo->cdev, MKDEV(MAJOR(dev), i), 1);
        if (rc) {
            printk(KERN_NOTICE DEVICE_NAME\
                   ": Error %d adding cdev\n", rc);
            return rc;
        }
        sprintf(tmpstr, "hififo_%d_%d", hififo_count, i);
        device_create(hififo_class,
                      NULL,
                      MKDEV(MAJOR(dev), i),
                      NULL,
                      tmpstr);
        fifo->n = i;
        spin_lock_init(&fifo->lock_open);
        fifo->local_base = drvdata->pio_reg_base+8+i;
        init_waitqueue_head(&fifo->queue);
        fifo->build = drvdata->build;
        mutex_init(&fifo->sem);
        fifo->ring = pci_alloc_consistent(pdev,
                                          BUFFER_SIZE,
                                          &fifo->ring_dma_addr);
    }
    hififo_count++;
    /* enable interrupts */
    writereg(drvdata, 0xFFFF, REG_INTERRUPT);
    return 0;
}
Ejemplo n.º 26
0
static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
	struct mlx4_priv *priv;
	struct mlx4_dev *dev;
	int err;
	int port;

	printk(KERN_INFO PFX "Initializing %s\n",
	       pci_name(pdev));

	err = pci_enable_device(pdev);
	if (err) {
		dev_err(&pdev->dev, "Cannot enable PCI device, "
			"aborting.\n");
		return err;
	}

	
	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
	    pci_resource_len(pdev, 0) != 1 << 20) {
		dev_err(&pdev->dev, "Missing DCS, aborting.\n");
		err = -ENODEV;
		goto err_disable_pdev;
	}
	if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
		dev_err(&pdev->dev, "Missing UAR, aborting.\n");
		err = -ENODEV;
		goto err_disable_pdev;
	}

	err = pci_request_regions(pdev, DRV_NAME);
	if (err) {
		dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n");
		goto err_disable_pdev;
	}

	pci_set_master(pdev);

	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
	if (err) {
		dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n");
		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
		if (err) {
			dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
			goto err_release_regions;
		}
	}
	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
	if (err) {
		dev_warn(&pdev->dev, "Warning: couldn't set 64-bit "
			 "consistent PCI DMA mask.\n");
		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
		if (err) {
			dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, "
				"aborting.\n");
			goto err_release_regions;
		}
	}

	priv = kzalloc(sizeof *priv, GFP_KERNEL);
	if (!priv) {
		dev_err(&pdev->dev, "Device struct alloc failed, "
			"aborting.\n");
		err = -ENOMEM;
		goto err_release_regions;
	}

	dev       = &priv->dev;
	dev->pdev = pdev;
	INIT_LIST_HEAD(&priv->ctx_list);
	spin_lock_init(&priv->ctx_lock);

	mutex_init(&priv->port_mutex);

	INIT_LIST_HEAD(&priv->pgdir_list);
	mutex_init(&priv->pgdir_mutex);

	
	err = mlx4_reset(dev);
	if (err) {
		mlx4_err(dev, "Failed to reset HCA, aborting.\n");
		goto err_free_dev;
	}

	if (mlx4_cmd_init(dev)) {
		mlx4_err(dev, "Failed to init command interface, aborting.\n");
		goto err_free_dev;
	}

	err = mlx4_init_hca(dev);
	if (err)
		goto err_cmd;

	err = mlx4_alloc_eq_table(dev);
	if (err)
		goto err_close;

	mlx4_enable_msi_x(dev);

	err = mlx4_setup_hca(dev);
	if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X)) {
		dev->flags &= ~MLX4_FLAG_MSI_X;
		pci_disable_msix(pdev);
		err = mlx4_setup_hca(dev);
	}

	if (err)
		goto err_free_eq;

	for (port = 1; port <= dev->caps.num_ports; port++) {
		err = mlx4_init_port_info(dev, port);
		if (err)
			goto err_port;
	}

	err = mlx4_register_device(dev);
	if (err)
		goto err_port;

	mlx4_sense_init(dev);
	mlx4_start_sense(dev);

	pci_set_drvdata(pdev, dev);

	return 0;

err_port:
	for (port = 1; port <= dev->caps.num_ports; port++)
		mlx4_cleanup_port_info(&priv->port[port]);

	mlx4_cleanup_mcg_table(dev);
	mlx4_cleanup_qp_table(dev);
	mlx4_cleanup_srq_table(dev);
	mlx4_cleanup_cq_table(dev);
	mlx4_cmd_use_polling(dev);
	mlx4_cleanup_eq_table(dev);
	mlx4_cleanup_mr_table(dev);
	mlx4_cleanup_pd_table(dev);
	mlx4_cleanup_uar_table(dev);

err_free_eq:
	mlx4_free_eq_table(dev);

err_close:
	if (dev->flags & MLX4_FLAG_MSI_X)
		pci_disable_msix(pdev);

	mlx4_close_hca(dev);

err_cmd:
	mlx4_cmd_cleanup(dev);

err_free_dev:
	kfree(priv);

err_release_regions:
	pci_release_regions(pdev);

err_disable_pdev:
	pci_disable_device(pdev);
	pci_set_drvdata(pdev, NULL);
	return err;
}
Ejemplo n.º 27
0
/* called during probe() after chip reset completes */
static int ehci_pci_setup(struct usb_hcd *hcd)
{
	struct ehci_hcd		*ehci = hcd_to_ehci(hcd);
	struct pci_dev		*pdev = to_pci_dev(hcd->self.controller);
	struct pci_dev		*p_smbus;
	u8			rev;
	u32			temp;
	int			retval;

	switch (pdev->vendor) {
	case PCI_VENDOR_ID_TOSHIBA_2:
		/* celleb's companion chip */
		if (pdev->device == 0x01b5) {
#ifdef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO
			ehci->big_endian_mmio = 1;
#else
			ehci_warn(ehci,
				  "unsupported big endian Toshiba quirk\n");
#endif
		}
		break;
	}

	ehci->caps = hcd->regs;
	ehci->regs = hcd->regs +
		HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase));

	dbg_hcs_params(ehci, "reset");
	dbg_hcc_params(ehci, "reset");

        /* ehci_init() causes memory for DMA transfers to be
         * allocated.  Thus, any vendor-specific workarounds based on
         * limiting the type of memory used for DMA transfers must
         * happen before ehci_init() is called. */
	switch (pdev->vendor) {
	case PCI_VENDOR_ID_NVIDIA:
		/* NVidia reports that certain chips don't handle
		 * QH, ITD, or SITD addresses above 2GB.  (But TD,
		 * data buffer, and periodic schedule are normal.)
		 */
		switch (pdev->device) {
		case 0x003c:	/* MCP04 */
		case 0x005b:	/* CK804 */
		case 0x00d8:	/* CK8 */
		case 0x00e8:	/* CK8S */
			if (pci_set_consistent_dma_mask(pdev,
						DMA_31BIT_MASK) < 0)
				ehci_warn(ehci, "can't enable NVidia "
					"workaround for >2GB RAM\n");
			break;
		}
		break;
	}

	/* cache this readonly data; minimize chip reads */
	ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);

	retval = ehci_halt(ehci);
	if (retval)
		return retval;

	/* data structure init */
	retval = ehci_init(hcd);
	if (retval)
		return retval;

	switch (pdev->vendor) {
	case PCI_VENDOR_ID_TDI:
		if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) {
			hcd->has_tt = 1;
			tdi_reset(ehci);
		}
		break;
	case PCI_VENDOR_ID_AMD:
		/* AMD8111 EHCI doesn't work, according to AMD errata */
		if (pdev->device == 0x7463) {
			ehci_info(ehci, "ignoring AMD8111 (errata)\n");
			retval = -EIO;
			goto done;
		}
		break;
	case PCI_VENDOR_ID_NVIDIA:
		switch (pdev->device) {
		/* Some NForce2 chips have problems with selective suspend;
		 * fixed in newer silicon.
		 */
		case 0x0068:
			if (pdev->revision < 0xa4)
				ehci->no_selective_suspend = 1;
			break;
		}
		break;
	case PCI_VENDOR_ID_VIA:
		if (pdev->device == 0x3104 && (pdev->revision & 0xf0) == 0x60) {
			u8 tmp;

			/* The VT6212 defaults to a 1 usec EHCI sleep time which
			 * hogs the PCI bus *badly*. Setting bit 5 of 0x4B makes
			 * that sleep time use the conventional 10 usec.
			 */
			pci_read_config_byte(pdev, 0x4b, &tmp);
			if (tmp & 0x20)
				break;
			pci_write_config_byte(pdev, 0x4b, tmp | 0x20);
		}
		break;
	case PCI_VENDOR_ID_ATI:
		/* SB600 and old version of SB700 have a bug in EHCI controller,
		 * which causes usb devices lose response in some cases.
		 */
		if ((pdev->device == 0x4386) || (pdev->device == 0x4396)) {
			p_smbus = pci_get_device(PCI_VENDOR_ID_ATI,
						 PCI_DEVICE_ID_ATI_SBX00_SMBUS,
						 NULL);
			if (!p_smbus)
				break;
			rev = p_smbus->revision;
			if ((pdev->device == 0x4386) || (rev == 0x3a)
			    || (rev == 0x3b)) {
				u8 tmp;
				ehci_info(ehci, "applying AMD SB600/SB700 USB "
					"freeze workaround\n");
				pci_read_config_byte(pdev, 0x53, &tmp);
				pci_write_config_byte(pdev, 0x53, tmp | (1<<3));
			}
			pci_dev_put(p_smbus);
		}
		break;
	}

	ehci_reset(ehci);

	/* at least the Genesys GL880S needs fixup here */
	temp = HCS_N_CC(ehci->hcs_params) * HCS_N_PCC(ehci->hcs_params);
	temp &= 0x0f;
	if (temp && HCS_N_PORTS(ehci->hcs_params) > temp) {
		ehci_dbg(ehci, "bogus port configuration: "
			"cc=%d x pcc=%d < ports=%d\n",
			HCS_N_CC(ehci->hcs_params),
			HCS_N_PCC(ehci->hcs_params),
			HCS_N_PORTS(ehci->hcs_params));

		switch (pdev->vendor) {
		case 0x17a0:		/* GENESYS */
			/* GL880S: should be PORTS=2 */
			temp |= (ehci->hcs_params & ~0xf);
			ehci->hcs_params = temp;
			break;
		case PCI_VENDOR_ID_NVIDIA:
			/* NF4: should be PCC=10 */
			break;
		}
	}

	/* Serial Bus Release Number is at PCI 0x60 offset */
	pci_read_config_byte(pdev, 0x60, &ehci->sbrn);

	/* Keep this around for a while just in case some EHCI
	 * implementation uses legacy PCI PM support.  This test
	 * can be removed on 17 Dec 2009 if the dev_warn() hasn't
	 * been triggered by then.
	 */
	if (!device_can_wakeup(&pdev->dev)) {
		u16	port_wake;

		pci_read_config_word(pdev, 0x62, &port_wake);
		if (port_wake & 0x0001) {
			dev_warn(&pdev->dev, "Enabling legacy PCI PM\n");
			device_set_wakeup_capable(&pdev->dev, 1);
		}
	}

#ifdef	CONFIG_USB_SUSPEND
	/* REVISIT: the controller works fine for wakeup iff the root hub
	 * itself is "globally" suspended, but usbcore currently doesn't
	 * understand such things.
	 *
	 * System suspend currently expects to be able to suspend the entire
	 * device tree, device-at-a-time.  If we failed selective suspend
	 * reports, system suspend would fail; so the root hub code must claim
	 * success.  That's lying to usbcore, and it matters for for runtime
	 * PM scenarios with selective suspend and remote wakeup...
	 */
	if (ehci->no_selective_suspend && device_can_wakeup(&pdev->dev))
		ehci_warn(ehci, "selective suspend/wakeup unavailable\n");
#endif

	ehci_port_power(ehci, 1);
	retval = ehci_pci_reinit(ehci, pdev);
done:
	return retval;
}
Ejemplo n.º 28
0
static int __devinit sil680_init_one(struct pci_dev *pdev,
				     const struct pci_device_id *id)
{
	static const struct ata_port_info info = {
		.sht = &sil680_sht,
		.flags = ATA_FLAG_SLAVE_POSS,
		.pio_mask = 0x1f,
		.mwdma_mask = 0x07,
		.udma_mask = ATA_UDMA6,
		.port_ops = &sil680_port_ops
	};
	static const struct ata_port_info info_slow = {
		.sht = &sil680_sht,
		.flags = ATA_FLAG_SLAVE_POSS,
		.pio_mask = 0x1f,
		.mwdma_mask = 0x07,
		.udma_mask = ATA_UDMA5,
		.port_ops = &sil680_port_ops
	};
	const struct ata_port_info *ppi[] = { &info, NULL };
	static int printed_version;
	struct ata_host *host;
	void __iomem *mmio_base;
	int rc, try_mmio;

	if (!printed_version++)
		dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");

	switch (sil680_init_chip(pdev, &try_mmio)) {
		case 0:
			ppi[0] = &info_slow;
			break;
		case 0x30:
			return -ENODEV;
	}

	if (!try_mmio)
		goto use_ioports;

	/* Try to acquire MMIO resources and fallback to PIO if
	 * that fails
	 */
	rc = pcim_enable_device(pdev);
	if (rc)
		return rc;
	rc = pcim_iomap_regions(pdev, 1 << SIL680_MMIO_BAR, DRV_NAME);
	if (rc)
		goto use_ioports;

	/* Allocate host and set it up */
	host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
	if (!host)
		return -ENOMEM;
	host->iomap = pcim_iomap_table(pdev);

	/* Setup DMA masks */
	rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
	if (rc)
		return rc;
	rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
	if (rc)
		return rc;
	pci_set_master(pdev);

	/* Get MMIO base and initialize port addresses */
	mmio_base = host->iomap[SIL680_MMIO_BAR];
	host->ports[0]->ioaddr.bmdma_addr = mmio_base + 0x00;
	host->ports[0]->ioaddr.cmd_addr = mmio_base + 0x80;
	host->ports[0]->ioaddr.ctl_addr = mmio_base + 0x8a;
	host->ports[0]->ioaddr.altstatus_addr = mmio_base + 0x8a;
	ata_std_ports(&host->ports[0]->ioaddr);
	host->ports[1]->ioaddr.bmdma_addr = mmio_base + 0x08;
	host->ports[1]->ioaddr.cmd_addr = mmio_base + 0xc0;
	host->ports[1]->ioaddr.ctl_addr = mmio_base + 0xca;
	host->ports[1]->ioaddr.altstatus_addr = mmio_base + 0xca;
	ata_std_ports(&host->ports[1]->ioaddr);

	/* Register & activate */
	return ata_host_activate(host, pdev->irq, ata_interrupt, IRQF_SHARED,
				 &sil680_sht);

use_ioports:
	return ata_pci_init_one(pdev, ppi);
}

#ifdef CONFIG_PM
static int sil680_reinit_one(struct pci_dev *pdev)
{
	int try_mmio;

	sil680_init_chip(pdev, &try_mmio);
	return ata_pci_device_resume(pdev);
}
#endif

static const struct pci_device_id sil680[] = {
	{ PCI_VDEVICE(CMD, PCI_DEVICE_ID_SII_680), },

	{ },
};

static struct pci_driver sil680_pci_driver = {
	.name 		= DRV_NAME,
	.id_table	= sil680,
	.probe 		= sil680_init_one,
	.remove		= ata_pci_remove_one,
#ifdef CONFIG_PM
	.suspend	= ata_pci_device_suspend,
	.resume		= sil680_reinit_one,
#endif
};

static int __init sil680_init(void)
{
	return pci_register_driver(&sil680_pci_driver);
}

static void __exit sil680_exit(void)
{
	pci_unregister_driver(&sil680_pci_driver);
}
Ejemplo n.º 29
0
int __devinit rtl_pci_probe(struct pci_dev *pdev,
			    const struct pci_device_id *id)
{
	struct ieee80211_hw *hw = NULL;

	struct rtl_priv *rtlpriv = NULL;
	struct rtl_pci_priv *pcipriv = NULL;
	struct rtl_pci *rtlpci;
	unsigned long pmem_start, pmem_len, pmem_flags;
	int err;

	err = pci_enable_device(pdev);
	if (err) {
		RT_ASSERT(false,
			  ("%s : Cannot enable new PCI device\n",
			   pci_name(pdev)));
		return err;
	}

	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
		if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
			RT_ASSERT(false, ("Unable to obtain 32bit DMA "
					  "for consistent allocations\n"));
			pci_disable_device(pdev);
			return -ENOMEM;
		}
	}

	pci_set_master(pdev);

	hw = ieee80211_alloc_hw(sizeof(struct rtl_pci_priv) +
				sizeof(struct rtl_priv), &rtl_ops);
	if (!hw) {
		RT_ASSERT(false,
			  ("%s : ieee80211 alloc failed\n", pci_name(pdev)));
		err = -ENOMEM;
		goto fail1;
	}

	SET_IEEE80211_DEV(hw, &pdev->dev);
	pci_set_drvdata(pdev, hw);

	rtlpriv = hw->priv;
	pcipriv = (void *)rtlpriv->priv;
	pcipriv->dev.pdev = pdev;

	/*
	 *init dbgp flags before all
	 *other functions, because we will
	 *use it in other funtions like
	 *RT_TRACE/RT_PRINT/RTL_PRINT_DATA
	 *you can not use these macro
	 *before this
	 */
	rtl_dbgp_flag_init(hw);

	/* MEM map */
	err = pci_request_regions(pdev, KBUILD_MODNAME);
	if (err) {
		RT_ASSERT(false, ("Can't obtain PCI resources\n"));
		return err;
	}

	pmem_start = pci_resource_start(pdev, 2);
	pmem_len = pci_resource_len(pdev, 2);
	pmem_flags = pci_resource_flags(pdev, 2);

	/*shared mem start */
	rtlpriv->io.pci_mem_start =
			(unsigned long)pci_iomap(pdev, 2, pmem_len);
	if (rtlpriv->io.pci_mem_start == 0) {
		RT_ASSERT(false, ("Can't map PCI mem\n"));
		goto fail2;
	}

	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
		 ("mem mapped space: start: 0x%08lx len:%08lx "
		  "flags:%08lx, after map:0x%08lx\n",
		  pmem_start, pmem_len, pmem_flags,
		  rtlpriv->io.pci_mem_start));

	/* Disable Clk Request */
	pci_write_config_byte(pdev, 0x81, 0);
	/* leave D3 mode */
	pci_write_config_byte(pdev, 0x44, 0);
	pci_write_config_byte(pdev, 0x04, 0x06);
	pci_write_config_byte(pdev, 0x04, 0x07);

	/* init cfg & intf_ops */
	rtlpriv->rtlhal.interface = INTF_PCI;
	rtlpriv->cfg = (struct rtl_hal_cfg *)(id->driver_data);
	rtlpriv->intf_ops = &rtl_pci_ops;

	/* find adapter */
	_rtl_pci_find_adapter(pdev, hw);

	/* Init IO handler */
	_rtl_pci_io_handler_init(&pdev->dev, hw);

	/*like read eeprom and so on */
	rtlpriv->cfg->ops->read_eeprom_info(hw);

	if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
			 ("Can't init_sw_vars.\n"));
		goto fail3;
	}

	rtlpriv->cfg->ops->init_sw_leds(hw);

	/*aspm */
	rtl_pci_init_aspm(hw);

	/* Init mac80211 sw */
	err = rtl_init_core(hw);
	if (err) {
		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
			 ("Can't allocate sw for mac80211.\n"));
		goto fail3;
	}

	/* Init PCI sw */
	err = !rtl_pci_init(hw, pdev);
	if (err) {
		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
			 ("Failed to init PCI.\n"));
		goto fail3;
	}

	err = ieee80211_register_hw(hw);
	if (err) {
		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
			 ("Can't register mac80211 hw.\n"));
		goto fail3;
	} else {
		rtlpriv->mac80211.mac80211_registered = 1;
	}

	err = sysfs_create_group(&pdev->dev.kobj, &rtl_attribute_group);
	if (err) {
		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
			 ("failed to create sysfs device attributes\n"));
		goto fail3;
	}

	/*init rfkill */
	rtl_init_rfkill(hw);

	rtlpci = rtl_pcidev(pcipriv);
	err = request_irq(rtlpci->pdev->irq, &_rtl_pci_interrupt,
			  IRQF_SHARED, KBUILD_MODNAME, hw);
	if (err) {
		RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
			 ("%s: failed to register IRQ handler\n",
			  wiphy_name(hw->wiphy)));
		goto fail3;
	} else {
		rtlpci->irq_alloc = 1;
	}

	set_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
	return 0;

fail3:
	pci_set_drvdata(pdev, NULL);
	rtl_deinit_core(hw);
	_rtl_pci_io_handler_release(hw);
	ieee80211_free_hw(hw);

	if (rtlpriv->io.pci_mem_start != 0)
		pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start);

fail2:
	pci_release_regions(pdev);

fail1:

	pci_disable_device(pdev);

	return -ENODEV;

}
Ejemplo n.º 30
0
/* called during probe() after chip reset completes */
static int ehci_pci_setup(struct usb_hcd *hcd)
{
	struct ehci_hcd		*ehci = hcd_to_ehci(hcd);
	struct pci_dev		*pdev = to_pci_dev(hcd->self.controller);
	u32			temp;
	int			retval;

	switch (pdev->vendor) {
	case PCI_VENDOR_ID_TOSHIBA_2:
		/* celleb's companion chip */
		if (pdev->device == 0x01b5) {
#ifdef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO
			ehci->big_endian_mmio = 1;
#else
			ehci_warn(ehci,
				  "unsupported big endian Toshiba quirk\n");
#endif
		}
		break;
	}

	ehci->caps = hcd->regs;
	ehci->regs = hcd->regs +
		HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase));

	dbg_hcs_params(ehci, "reset");
	dbg_hcc_params(ehci, "reset");

        /* ehci_init() causes memory for DMA transfers to be
         * allocated.  Thus, any vendor-specific workarounds based on
         * limiting the type of memory used for DMA transfers must
         * happen before ehci_init() is called. */
	switch (pdev->vendor) {
	case PCI_VENDOR_ID_NVIDIA:
		/* NVidia reports that certain chips don't handle
		 * QH, ITD, or SITD addresses above 2GB.  (But TD,
		 * data buffer, and periodic schedule are normal.)
		 */
		switch (pdev->device) {
		case 0x003c:	/* MCP04 */
		case 0x005b:	/* CK804 */
		case 0x00d8:	/* CK8 */
		case 0x00e8:	/* CK8S */
			if (pci_set_consistent_dma_mask(pdev,
						DMA_31BIT_MASK) < 0)
				ehci_warn(ehci, "can't enable NVidia "
					"workaround for >2GB RAM\n");
			break;
		}
		break;
	}

	/* cache this readonly data; minimize chip reads */
	ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);

	retval = ehci_halt(ehci);
	if (retval)
		return retval;

	/* data structure init */
	retval = ehci_init(hcd);
	if (retval)
		return retval;

#ifdef CONFIG_BUFFALO_PLATFORM
	if (hcd->self.controller->bus == &pci_bus_type) {
#endif /* CONFIG_BUFFALO_PLATFORM */
	switch (pdev->vendor) {
	case PCI_VENDOR_ID_TDI:
		if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) {
			ehci->is_tdi_rh_tt = 1;
			tdi_reset(ehci);
		}
		break;
	case PCI_VENDOR_ID_AMD:
		/* AMD8111 EHCI doesn't work, according to AMD errata */
		if (pdev->device == 0x7463) {
			ehci_info(ehci, "ignoring AMD8111 (errata)\n");
			retval = -EIO;
			goto done;
		}
		break;
	case PCI_VENDOR_ID_NVIDIA:
		switch (pdev->device) {
		/* Some NForce2 chips have problems with selective suspend;
		 * fixed in newer silicon.
		 */
		case 0x0068:
			pci_read_config_dword(pdev, PCI_REVISION_ID, &temp);
			if ((temp & 0xff) < 0xa4)
				ehci->no_selective_suspend = 1;
			break;
		}
		break;
	}

	if (ehci_is_TDI(ehci))
		ehci_reset(ehci);
#ifdef CONFIG_BUFFALO_PLATFORM
	}
#endif /* CONFIG_BUFFALO_PLATFORM */

	/* at least the Genesys GL880S needs fixup here */
	temp = HCS_N_CC(ehci->hcs_params) * HCS_N_PCC(ehci->hcs_params);
	temp &= 0x0f;
	if (temp && HCS_N_PORTS(ehci->hcs_params) > temp) {
		ehci_dbg(ehci, "bogus port configuration: "
			"cc=%d x pcc=%d < ports=%d\n",
			HCS_N_CC(ehci->hcs_params),
			HCS_N_PCC(ehci->hcs_params),
			HCS_N_PORTS(ehci->hcs_params));

#ifdef CONFIG_BUFFALO_PLATFORM
		if (hcd->self.controller->bus == &pci_bus_type) {
#endif /* CONFIG_BUFFALO_PLATFORM */
		switch (pdev->vendor) {
		case 0x17a0:		/* GENESYS */
			/* GL880S: should be PORTS=2 */
			temp |= (ehci->hcs_params & ~0xf);
			ehci->hcs_params = temp;
			break;
		case PCI_VENDOR_ID_NVIDIA:
			/* NF4: should be PCC=10 */
			break;
		}
#ifdef CONFIG_BUFFALO_PLATFORM
		}
#endif /* CONFIG_BUFFALO_PLATFORM */
	}

#ifdef CONFIG_BUFFALO_PLATFORM
	if (hcd->self.controller->bus == &pci_bus_type) {
#endif /* CONFIG_BUFFALO_PLATFORM */
	/* Serial Bus Release Number is at PCI 0x60 offset */
	pci_read_config_byte(pdev, 0x60, &ehci->sbrn);

	/* Workaround current PCI init glitch:  wakeup bits aren't
	 * being set from PCI PM capability.
	 */
	if (!device_can_wakeup(&pdev->dev)) {
		u16	port_wake;

		pci_read_config_word(pdev, 0x62, &port_wake);
		if (port_wake & 0x0001)
			device_init_wakeup(&pdev->dev, 1);
	}

#ifdef	CONFIG_USB_SUSPEND
	/* REVISIT: the controller works fine for wakeup iff the root hub
	 * itself is "globally" suspended, but usbcore currently doesn't
	 * understand such things.
	 *
	 * System suspend currently expects to be able to suspend the entire
	 * device tree, device-at-a-time.  If we failed selective suspend
	 * reports, system suspend would fail; so the root hub code must claim
	 * success.  That's lying to usbcore, and it matters for for runtime
	 * PM scenarios with selective suspend and remote wakeup...
	 */
	if (ehci->no_selective_suspend && device_can_wakeup(&pdev->dev))
		ehci_warn(ehci, "selective suspend/wakeup unavailable\n");
#endif

	retval = ehci_pci_reinit(ehci, pdev);
#ifdef CONFIG_BUFFALO_PLATFORM
	}
#endif /* CONFIG_BUFFALO_PLATFORM */
done:
	return retval;
}