コード例 #1
0
ファイル: main.c プロジェクト: Astralix/mainline-dss11
static int set_dma_caps(struct pci_dev *pdev)
{
	int err;

	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
	if (err) {
		dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n");
		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
		if (err) {
			dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
			return err;
		}
	}

	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
	if (err) {
		dev_warn(&pdev->dev,
			 "Warning: couldn't set 64-bit consistent PCI DMA mask.\n");
		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
		if (err) {
			dev_err(&pdev->dev,
				"Can't set consistent PCI DMA mask, aborting.\n");
			return err;
		}
	}

	dma_set_max_seg_size(&pdev->dev, 2u * 1024 * 1024 * 1024);
	return err;
}
コード例 #2
0
ファイル: memory.c プロジェクト: rrowicki/Chrono_Kernel-1
/**
 *	i2o_dma_alloc - Allocate DMA memory
 *	@dev: struct device pointer to the PCI device of the I2O controller
 *	@addr: i2o_dma struct which should get the DMA buffer
 *	@len: length of the new DMA memory
 *
 *	Allocate a coherent DMA memory and write the pointers into addr.
 *
 *	Returns 0 on success or -ENOMEM on failure.
 */
int i2o_dma_alloc(struct device *dev, struct i2o_dma *addr, size_t len)
{
	struct pci_dev *pdev = to_pci_dev(dev);
	int dma_64 = 0;

	mutex_lock(&mem_lock);
	if ((sizeof(dma_addr_t) > 4) && (pdev->dma_mask == DMA_BIT_MASK(64))) {
		dma_64 = 1;
		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
			mutex_unlock(&mem_lock);
			return -ENOMEM;
		}
	}

	addr->virt = dma_alloc_coherent(dev, len, &addr->phys, GFP_KERNEL);

	if ((sizeof(dma_addr_t) > 4) && dma_64)
		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
;
	mutex_unlock(&mem_lock);

	if (!addr->virt)
		return -ENOMEM;

	memset(addr->virt, 0, len);
	addr->len = len;

	return 0;
}
コード例 #3
0
ファイル: aq_pci_func.c プロジェクト: Anjali05/linux
int aq_pci_func_init(struct pci_dev *pdev)
{
	int err;

	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
	if (!err) {
		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));

	}
	if (err) {
		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
		if (!err)
			err = pci_set_consistent_dma_mask(pdev,
							  DMA_BIT_MASK(32));
	}
	if (err != 0) {
		err = -ENOSR;
		goto err_exit;
	}

	err = pci_request_regions(pdev, AQ_CFG_DRV_NAME "_mmio");
	if (err < 0)
		goto err_exit;

	pci_set_master(pdev);

	return 0;

err_exit:
	return err;
}
コード例 #4
0
ファイル: aic7xxx_osm_pci.c プロジェクト: kzlin129/tt-gpl
static int
ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
	char		 buf[80];
	const uint64_t	 mask_39bit = 0x7FFFFFFFFFULL;
	struct		 ahc_softc *ahc;
	ahc_dev_softc_t	 pci;
	struct		 ahc_pci_identity *entry;
	char		*name;
	int		 error;

	pci = pdev;
	entry = ahc_find_pci_device(pci);
	if (entry == NULL)
		return (-ENODEV);

	/*
	 * Allocate a softc for this card and
	 * set it up for attachment by our
	 * common detect routine.
	 */
	sprintf(buf, "ahc_pci:%d:%d:%d",
		ahc_get_pci_bus(pci),
		ahc_get_pci_slot(pci),
		ahc_get_pci_function(pci));
	name = malloc(strlen(buf) + 1, M_DEVBUF, M_NOWAIT);
	if (name == NULL)
		return (-ENOMEM);
	strcpy(name, buf);
	ahc = ahc_alloc(NULL, name);
	if (ahc == NULL)
		return (-ENOMEM);
	if (pci_enable_device(pdev)) {
		ahc_free(ahc);
		return (-ENODEV);
	}
	pci_set_master(pdev);

	if (sizeof(dma_addr_t) > 4
	 && ahc_linux_get_memsize() > 0x80000000
	 && pci_set_dma_mask(pdev, mask_39bit) == 0) {
		ahc->flags |= AHC_39BIT_ADDRESSING;
	} else {
		if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
			printk(KERN_WARNING "aic7xxx: No suitable DMA available.\n");
                	return (-ENODEV);
		}
	}
	ahc->dev_softc = pci;
	error = ahc_pci_config(ahc, entry);
	if (error != 0) {
		ahc_free(ahc);
		return (-error);
	}
	pci_set_drvdata(pdev, ahc);
	ahc_linux_register_host(ahc, &aic7xxx_driver_template);
	return (0);
}
コード例 #5
0
static int __devinit ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
	void __iomem * const *iomap;
	struct device *dev = &pdev->dev;
	struct ioatdma_device *device;
	int err;

	err = pcim_enable_device(pdev);
	if (err)
		return err;

	err = pcim_iomap_regions(pdev, 1 << IOAT_MMIO_BAR, DRV_NAME);
	if (err)
		return err;
	iomap = pcim_iomap_table(pdev);
	if (!iomap)
		return -ENOMEM;

	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
	if (err)
		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
	if (err)
		return err;

	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
	if (err)
		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
	if (err)
		return err;

	device = devm_kzalloc(dev, sizeof(*device), GFP_KERNEL);
	if (!device)
		return -ENOMEM;

	pci_set_master(pdev);

	device = alloc_ioatdma(pdev, iomap[IOAT_MMIO_BAR]);
	if (!device)
		return -ENOMEM;
	pci_set_drvdata(pdev, device);

	device->version = readb(device->reg_base + IOAT_VER_OFFSET);
	if (device->version == IOAT_VER_1_2)
		err = ioat1_dma_probe(device, ioat_dca_enabled);
	else if (device->version == IOAT_VER_2_0)
		err = ioat2_dma_probe(device, ioat_dca_enabled);
	else if (device->version >= IOAT_VER_3_0)
		err = ioat3_dma_probe(device, ioat_dca_enabled);
	else
		return -ENODEV;

	if (err) {
		dev_err(dev, "Intel(R) I/OAT DMA Engine init failed\n");
		return -ENODEV;
	}

	return 0;
}
コード例 #6
0
ファイル: bfad.c プロジェクト: AdrianHuang/uclinux-robutest
int
bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
{
	unsigned long   bar0_len;
	int             rc = -ENODEV;

	if (pci_enable_device(pdev)) {
		BFA_PRINTF(BFA_ERR, "pci_enable_device fail %p\n", pdev);
		goto out;
	}

	if (pci_request_regions(pdev, BFAD_DRIVER_NAME))
		goto out_disable_device;

	pci_set_master(pdev);


	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)
		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
			BFA_PRINTF(BFA_ERR, "pci_set_dma_mask fail %p\n", pdev);
			goto out_release_region;
		}

	bfad->pci_bar0_map = pci_resource_start(pdev, 0);
	bar0_len = pci_resource_len(pdev, 0);
	bfad->pci_bar0_kva = ioremap(bfad->pci_bar0_map, bar0_len);

	if (bfad->pci_bar0_kva == NULL) {
		BFA_PRINTF(BFA_ERR, "Fail to map bar0\n");
		goto out_release_region;
	}

	bfad->hal_pcidev.pci_slot = PCI_SLOT(pdev->devfn);
	bfad->hal_pcidev.pci_func = PCI_FUNC(pdev->devfn);
	bfad->hal_pcidev.pci_bar_kva = bfad->pci_bar0_kva;
	bfad->hal_pcidev.device_id = pdev->device;
	bfad->pci_name = pci_name(pdev);

	bfad->pci_attr.vendor_id = pdev->vendor;
	bfad->pci_attr.device_id = pdev->device;
	bfad->pci_attr.ssid = pdev->subsystem_device;
	bfad->pci_attr.ssvid = pdev->subsystem_vendor;
	bfad->pci_attr.pcifn = PCI_FUNC(pdev->devfn);

	bfad->pcidev = pdev;
	return 0;

out_release_region:
	pci_release_regions(pdev);
out_disable_device:
	pci_disable_device(pdev);
out:
	return rc;
}
コード例 #7
0
/**
 * ufshcd_set_dma_mask - Set dma mask based on the controller
 *			 addressing capability
 * @pdev: PCI device structure
 *
 * Returns 0 for success, non-zero for failure
 */
static int ufshcd_set_dma_mask(struct pci_dev *pdev)
{
	int err;

	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
		&& !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
		return 0;
	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
	if (!err)
		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
	return err;
}
コード例 #8
0
ファイル: ntb_hw_amd.c プロジェクト: grate-driver/linux
static int amd_ntb_init_pci(struct amd_ntb_dev *ndev,
			    struct pci_dev *pdev)
{
	int rc;

	pci_set_drvdata(pdev, ndev);

	rc = pci_enable_device(pdev);
	if (rc)
		goto err_pci_enable;

	rc = pci_request_regions(pdev, NTB_NAME);
	if (rc)
		goto err_pci_regions;

	pci_set_master(pdev);

	rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
	if (rc) {
		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
		if (rc)
			goto err_dma_mask;
		dev_warn(&pdev->dev, "Cannot DMA highmem\n");
	}

	rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
	if (rc) {
		rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
		if (rc)
			goto err_dma_mask;
		dev_warn(&pdev->dev, "Cannot DMA consistent highmem\n");
	}

	ndev->self_mmio = pci_iomap(pdev, 0, 0);
	if (!ndev->self_mmio) {
		rc = -EIO;
		goto err_dma_mask;
	}
	ndev->peer_mmio = ndev->self_mmio + AMD_PEER_OFFSET;

	return 0;

err_dma_mask:
	pci_clear_master(pdev);
err_pci_regions:
	pci_disable_device(pdev);
err_pci_enable:
	pci_set_drvdata(pdev, NULL);
	return rc;
}
コード例 #9
0
ファイル: cs5520.c プロジェクト: ivucica/linux
static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
	ata_index_t index;
	ide_pci_device_t *d = &cyrix_chipsets[id->driver_data];

	ide_setup_pci_noise(dev, d);

	/* We must not grab the entire device, it has 'ISA' space in its
	   BARS too and we will freak out other bits of the kernel */
	if (pci_enable_device_bars(dev, 1<<2)) {
		printk(KERN_WARNING "%s: Unable to enable 55x0.\n", d->name);
		return -ENODEV;
	}
	pci_set_master(dev);
	if (pci_set_dma_mask(dev, DMA_32BIT_MASK)) {
		printk(KERN_WARNING "cs5520: No suitable DMA available.\n");
		return -ENODEV;
	}

	index.all = 0xf0f0;

	/*
	 *	Now the chipset is configured we can let the core
	 *	do all the device setup for us
	 */

	ide_pci_setup_ports(dev, d, 14, &index);

	if((index.b.low & 0xf0) != 0xf0)
		probe_hwif_init(&ide_hwifs[index.b.low]);
	if((index.b.high & 0xf0) != 0xf0)
		probe_hwif_init(&ide_hwifs[index.b.high]);
	return 0;
}
コード例 #10
0
ファイル: sata_via.c プロジェクト: 3null/fastsocket
static int vt6421_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
{
	const struct ata_port_info *ppi[] =
		{ &vt6421_sport_info, &vt6421_sport_info, &vt6421_pport_info };
	struct ata_host *host;
	int i, rc;

	*r_host = host = ata_host_alloc_pinfo(&pdev->dev, ppi, ARRAY_SIZE(ppi));
	if (!host) {
		dev_printk(KERN_ERR, &pdev->dev, "failed to allocate host\n");
		return -ENOMEM;
	}

	rc = pcim_iomap_regions(pdev, 0x3f, DRV_NAME);
	if (rc) {
		dev_printk(KERN_ERR, &pdev->dev, "failed to request/iomap "
			   "PCI BARs (errno=%d)\n", rc);
		return rc;
	}
	host->iomap = pcim_iomap_table(pdev);

	for (i = 0; i < host->n_ports; i++)
		vt6421_init_addrs(host->ports[i]);

	rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
	if (rc)
		return rc;
	rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
	if (rc)
		return rc;

	return 0;
}
コード例 #11
0
static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
	const struct ide_port_info *d = &cyrix_chipset;
	hw_regs_t hw[4], *hws[] = { NULL, NULL, NULL, NULL };

	ide_setup_pci_noise(dev, d);

	/* We must not grab the entire device, it has 'ISA' space in its
	 * BARS too and we will freak out other bits of the kernel
	 */
	if (pci_enable_device_io(dev)) {
		printk(KERN_WARNING "%s: Unable to enable 55x0.\n", d->name);
		return -ENODEV;
	}
	pci_set_master(dev);
	if (pci_set_dma_mask(dev, DMA_32BIT_MASK)) {
		printk(KERN_WARNING "%s: No suitable DMA available.\n",
			d->name);
		return -ENODEV;
	}

	/*
	 *	Now the chipset is configured we can let the core
	 *	do all the device setup for us
	 */

	ide_pci_setup_ports(dev, d, 14, &hw[0], &hws[0]);

	return ide_host_add(d, hws, NULL);
}
コード例 #12
0
ファイル: pci.c プロジェクト: ANFS/ANFS-kernel
static int __devinit
ath5k_pci_probe(struct pci_dev *pdev,
		const struct pci_device_id *id)
{
	void __iomem *mem;
	struct ath5k_softc *sc;
	struct ieee80211_hw *hw;
	int ret;
	u8 csz;

	/*
	 * L0s needs to be disabled on all ath5k cards.
	 *
	 * For distributions shipping with CONFIG_PCIEASPM (this will be enabled
	 * by default in the future in 2.6.36) this will also mean both L1 and
	 * L0s will be disabled when a pre 1.1 PCIe device is detected. We do
	 * know L1 works correctly even for all ath5k pre 1.1 PCIe devices
	 * though but cannot currently undue the effect of a blacklist, for
	 * details you can read pcie_aspm_sanity_check() and see how it adjusts
	 * the device link capability.
	 *
	 * It may be possible in the future to implement some PCI API to allow
	 * drivers to override blacklists for pre 1.1 PCIe but for now it is
	 * best to accept that both L0s and L1 will be disabled completely for
	 * distributions shipping with CONFIG_PCIEASPM rather than having this
	 * issue present. Motivation for adding this new API will be to help
	 * with power consumption for some of these devices.
	 */
	pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S);

	ret = pci_enable_device(pdev);
	if (ret) {
		dev_err(&pdev->dev, "can't enable device\n");
		goto err;
	}

	/* XXX 32-bit addressing only */
	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
	if (ret) {
		dev_err(&pdev->dev, "32-bit DMA not available\n");
		goto err_dis;
	}

	/*
	 * Cache line size is used to size and align various
	 * structures used to communicate with the hardware.
	 */
	pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &csz);
	if (csz == 0) {
		/*
		 * Linux 2.4.18 (at least) writes the cache line size
		 * register as a 16-bit wide register which is wrong.
		 * We must have this setup properly for rx buffer
		 * DMA to work so force a reasonable value here if it
		 * comes up zero.
		 */
		csz = L1_CACHE_BYTES >> 2;
		pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, csz);
	}
コード例 #13
0
ファイル: dma.c プロジェクト: 12019/hg556a_source
int dma_set_mask(struct device *dev, u64 dma_mask)
{
	if (dev->bus == &pci_bus_type)
		return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
	if (dev->bus == &vio_bus_type)
		return vio_set_dma_mask(to_vio_dev(dev), dma_mask);
	BUG();
	return 0;
}
コード例 #14
0
ファイル: cpqarray.c プロジェクト: pthomas/linux-2.6
/*
 * Find the IO address of the controller, its IRQ and so forth.  Fill
 * in some basic stuff into the ctlr_info_t structure.
 */
static int cpqarray_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
{
	ushort vendor_id, device_id, command;
	unchar cache_line_size, latency_timer;
	unchar irq, revision;
	unsigned long addr[6];
	__u32 board_id;

	int i;

	c->pci_dev = pdev;
	pci_set_master(pdev);
	if (pci_enable_device(pdev)) {
		printk(KERN_ERR "cpqarray: Unable to Enable PCI device\n");
		return -1;
	}
	vendor_id = pdev->vendor;
	device_id = pdev->device;
	irq = pdev->irq;

	for(i=0; i<6; i++)
		addr[i] = pci_resource_start(pdev, i);

	if (pci_set_dma_mask(pdev, CPQARRAY_DMA_MASK) != 0)
	{
		printk(KERN_ERR "cpqarray: Unable to set DMA mask\n");
		return -1;
	}

	pci_read_config_word(pdev, PCI_COMMAND, &command);
	pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
	pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_line_size);
	pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &latency_timer);

	pci_read_config_dword(pdev, 0x2c, &board_id);

	/* check to see if controller has been disabled */
	if(!(command & 0x02)) {
		printk(KERN_WARNING
			"cpqarray: controller appears to be disabled\n");
		return(-1);
	}

DBGINFO(
	printk("vendor_id = %x\n", vendor_id);
	printk("device_id = %x\n", device_id);
	printk("command = %x\n", command);
	for(i=0; i<6; i++)
		printk("addr[%d] = %lx\n", i, addr[i]);
	printk("revision = %x\n", revision);
	printk("irq = %x\n", irq);
	printk("cache_line_size = %x\n", cache_line_size);
	printk("latency_timer = %x\n", latency_timer);
	printk("board_id = %x\n", board_id);
);
コード例 #15
0
ファイル: mt76x2_pci.c プロジェクト: Lyude/linux
static int
mt76pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
	struct mt76x2_dev *dev;
	int ret;

	ret = pcim_enable_device(pdev);
	if (ret)
		return ret;

	ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
	if (ret)
		return ret;

	pci_set_master(pdev);

	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
	if (ret)
		return ret;

	dev = mt76x2_alloc_device(&pdev->dev);
	if (!dev)
		return -ENOMEM;

	mt76_mmio_init(&dev->mt76, pcim_iomap_table(pdev)[0]);

	dev->mt76.rev = mt76_rr(dev, MT_ASIC_VERSION);
	dev_info(dev->mt76.dev, "ASIC revision: %08x\n", dev->mt76.rev);

	ret = devm_request_irq(dev->mt76.dev, pdev->irq, mt76x2_irq_handler,
			       IRQF_SHARED, KBUILD_MODNAME, dev);
	if (ret)
		goto error;

	ret = mt76x2_register_device(dev);
	if (ret)
		goto error;

	/* Fix up ASPM configuration */

	/* RG_SSUSB_G1_CDR_BIR_LTR = 0x9 */
	mt76_rmw_field(dev, 0x15a10, 0x1f << 16, 0x9);

	/* RG_SSUSB_G1_CDR_BIC_LTR = 0xf */
	mt76_rmw_field(dev, 0x15a0c, 0xf << 28, 0xf);

	/* RG_SSUSB_CDR_BR_PE1D = 0x3 */
	mt76_rmw_field(dev, 0x15c58, 0x3 << 6, 0x3);

	return 0;

error:
	ieee80211_free_hw(mt76_hw(dev));
	return ret;
}
コード例 #16
0
ファイル: init.c プロジェクト: 020gzh/linux
static int isci_pci_init(struct pci_dev *pdev)
{
	int err, bar_num, bar_mask = 0;
	void __iomem * const *iomap;

	err = pcim_enable_device(pdev);
	if (err) {
		dev_err(&pdev->dev,
			"failed enable PCI device %s!\n",
			pci_name(pdev));
		return err;
	}

	for (bar_num = 0; bar_num < SCI_PCI_BAR_COUNT; bar_num++)
		bar_mask |= 1 << (bar_num * 2);

	err = pcim_iomap_regions(pdev, bar_mask, DRV_NAME);
	if (err)
		return err;

	iomap = pcim_iomap_table(pdev);
	if (!iomap)
		return -ENOMEM;

	pci_set_master(pdev);

	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
	if (err) {
		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
		if (err)
			return err;
	}

	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
	if (err) {
		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
		if (err)
			return err;
	}

	return 0;
}
コード例 #17
0
ファイル: pci.c プロジェクト: 020gzh/linux
static int hsu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
	struct hsu_dma_chip *chip;
	int ret;

	ret = pcim_enable_device(pdev);
	if (ret)
		return ret;

	ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
	if (ret) {
		dev_err(&pdev->dev, "I/O memory remapping failed\n");
		return ret;
	}

	pci_set_master(pdev);
	pci_try_set_mwi(pdev);

	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
	if (ret)
		return ret;

	ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
	if (ret)
		return ret;

	chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
	if (!chip)
		return -ENOMEM;

	chip->dev = &pdev->dev;
	chip->regs = pcim_iomap_table(pdev)[0];
	chip->length = pci_resource_len(pdev, 0);
	chip->offset = HSU_PCI_CHAN_OFFSET;
	chip->irq = pdev->irq;

	pci_enable_msi(pdev);

	ret = hsu_dma_probe(chip);
	if (ret)
		return ret;

	ret = request_irq(chip->irq, hsu_pci_irq, 0, "hsu_dma_pci", chip);
	if (ret)
		goto err_register_irq;

	pci_set_drvdata(pdev, chip);

	return 0;

err_register_irq:
	hsu_dma_remove(chip);
	return ret;
}
コード例 #18
0
ファイル: pata_ninja32.c プロジェクト: 08opt/linux
static int ninja32_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
	struct ata_host *host;
	struct ata_port *ap;
	void __iomem *base;
	int rc;

	host = ata_host_alloc(&dev->dev, 1);
	if (!host)
		return -ENOMEM;
	ap = host->ports[0];

	/* Set up the PCI device */
	rc = pcim_enable_device(dev);
	if (rc)
		return rc;
	rc = pcim_iomap_regions(dev, 1 << 0, DRV_NAME);
	if (rc == -EBUSY)
		pcim_pin_device(dev);
	if (rc)
		return rc;

	host->iomap = pcim_iomap_table(dev);
	rc = pci_set_dma_mask(dev, ATA_DMA_MASK);
	if (rc)
		return rc;
	rc = pci_set_consistent_dma_mask(dev, ATA_DMA_MASK);
	if (rc)
		return rc;
	pci_set_master(dev);

	/* Set up the register mappings. We use the I/O mapping as only the
	   older chips also have MMIO on BAR 1 */
	base = host->iomap[0];
	if (!base)
		return -ENOMEM;
	ap->ops = &ninja32_port_ops;
	ap->pio_mask = ATA_PIO4;
	ap->flags |= ATA_FLAG_SLAVE_POSS;

	ap->ioaddr.cmd_addr = base + 0x10;
	ap->ioaddr.ctl_addr = base + 0x1E;
	ap->ioaddr.altstatus_addr = base + 0x1E;
	ap->ioaddr.bmdma_addr = base;
	ata_sff_std_ports(&ap->ioaddr);
	ap->pflags = ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE;

	ninja32_program(base);
	/* FIXME: Should we disable them at remove ? */
	return ata_host_activate(host, dev->irq, ata_bmdma_interrupt,
				 IRQF_SHARED, &ninja32_sht);
}
コード例 #19
0
ファイル: bm_pci.c プロジェクト: iperry/blackmagic-io
bool bm_pci_start(bm_pci_device_t* pci)
{
	if (pci_enable_device(pci->pdev) < 0)
		return false;

	pci_set_master(pci->pdev);

	if (pci_set_dma_mask(pci->pdev, BM_DMA_64BIT_MASK) < 0)
	{
		if (pci_set_dma_mask(pci->pdev, BM_DMA_32BIT_MASK) < 0)
			goto bail;
	}

	if (strcmp(default_irq_type, "msi") == 0)
		pci_enable_msi(pci->pdev);

	return true;

bail:
	pci_disable_device(pci->pdev);
	return false;
}
コード例 #20
0
static int ninja32_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
	struct ata_host *host;
	struct ata_port *ap;
	void __iomem *base;
	int rc;

	host = ata_host_alloc(&dev->dev, 1);
	if (!host)
		return -ENOMEM;
	ap = host->ports[0];

	
	rc = pcim_enable_device(dev);
	if (rc)
		return rc;
	rc = pcim_iomap_regions(dev, 1 << 0, DRV_NAME);
	if (rc == -EBUSY)
		pcim_pin_device(dev);
	if (rc)
		return rc;

	host->iomap = pcim_iomap_table(dev);
	rc = pci_set_dma_mask(dev, ATA_DMA_MASK);
	if (rc)
		return rc;
	rc = pci_set_consistent_dma_mask(dev, ATA_DMA_MASK);
	if (rc)
		return rc;
	pci_set_master(dev);

	
	base = host->iomap[0];
	if (!base)
		return -ENOMEM;
	ap->ops = &ninja32_port_ops;
	ap->pio_mask = ATA_PIO4;
	ap->flags |= ATA_FLAG_SLAVE_POSS;

	ap->ioaddr.cmd_addr = base + 0x10;
	ap->ioaddr.ctl_addr = base + 0x1E;
	ap->ioaddr.altstatus_addr = base + 0x1E;
	ap->ioaddr.bmdma_addr = base;
	ata_sff_std_ports(&ap->ioaddr);
	ap->pflags = ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE;

	ninja32_program(base);
	
	return ata_host_activate(host, dev->irq, ata_sff_interrupt,
				 IRQF_SHARED, &ninja32_sht);
}
コード例 #21
0
static int cx18_setup_pci(struct cx18 *cx, struct pci_dev *dev,
			  const struct pci_device_id *pci_id)
{
	u16 cmd;
	unsigned char pci_latency;

	CX18_DEBUG_INFO("Enabling pci device\n");

	if (pci_enable_device(dev)) {
		CX18_ERR("Can't enable device %d!\n", cx->num);
		return -EIO;
	}
	if (pci_set_dma_mask(dev, 0xffffffff)) {
		CX18_ERR("No suitable DMA available on card %d.\n", cx->num);
		return -EIO;
	}
	if (!request_mem_region(cx->base_addr, CX18_MEM_SIZE, "cx18 encoder")) {
		CX18_ERR("Cannot request encoder memory region on card %d.\n", cx->num);
		return -EIO;
	}

	/* Check for bus mastering */
	pci_read_config_word(dev, PCI_COMMAND, &cmd);
	cmd |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
	pci_write_config_word(dev, PCI_COMMAND, cmd);

	pci_read_config_byte(dev, PCI_CLASS_REVISION, &cx->card_rev);
	pci_read_config_byte(dev, PCI_LATENCY_TIMER, &pci_latency);

	if (pci_latency < 64 && cx18_pci_latency) {
		CX18_INFO("Unreasonably low latency timer, "
			       "setting to 64 (was %d)\n", pci_latency);
		pci_write_config_byte(dev, PCI_LATENCY_TIMER, 64);
		pci_read_config_byte(dev, PCI_LATENCY_TIMER, &pci_latency);
	}
	/* This config space value relates to DMA latencies. The
	   default value 0x8080 is too low however and will lead
	   to DMA errors. 0xffff is the max value which solves
	   these problems. */
	pci_write_config_dword(dev, 0x40, 0xffff);

	CX18_DEBUG_INFO("cx%d (rev %d) at %02x:%02x.%x, "
		   "irq: %d, latency: %d, memory: 0x%lx\n",
		   cx->dev->device, cx->card_rev, dev->bus->number,
		   PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn),
		   cx->dev->irq, pci_latency, (unsigned long)cx->base_addr);

	return 0;
}
コード例 #22
0
ファイル: bcwc_drv.c プロジェクト: ddcc/bcwc_pcie
static int bcwc_pci_set_dma_mask(struct bcwc_private *dev_priv,
				 unsigned int mask)
{
	int ret;

	ret = pci_set_dma_mask(dev_priv->pdev, DMA_BIT_MASK(mask));
	if (ret) {
		dev_err(&dev_priv->pdev->dev, "Failed to set %u pci dma mask\n",
			mask);
		return ret;
	}

	dev_priv->dma_mask = mask;

	return 0;
}
コード例 #23
0
ファイル: pscnv_mem.c プロジェクト: Advael/pscnv
int
pscnv_mem_init(struct drm_device *dev)
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	int ret;

	int dma_bits = 32;
#ifdef __linux__
	if (dev_priv->card_type >= NV_50 &&
	    pci_dma_supported(dev->pdev, DMA_BIT_MASK(40)))
		dma_bits = 40;

	ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
	if (ret) {
		NV_ERROR(dev, "Error setting DMA mask: %d\n", ret);
		return ret;
	}
#else
	if (dev_priv->card_type >= NV_50)
		dma_bits = 40;
#endif
	dev_priv->dma_mask = DMA_BIT_MASK(dma_bits);

	spin_lock_init(&dev_priv->pramin_lock);
	mutex_init(&dev_priv->vram_mutex);
	
	switch (dev_priv->card_type) {
		case NV_50:
			ret = nv50_vram_init(dev);
			break;
		case NV_D0:
		case NV_C0:
			ret = nvc0_vram_init(dev);
			break;
		default:
			NV_ERROR(dev, "No VRAM allocator for NV%02x!\n", dev_priv->chipset);
			ret = -ENOSYS;
	}
	if (ret)
		return ret;

	dev_priv->fb_mtrr = drm_mtrr_add(drm_get_resource_start(dev, 1),
					 drm_get_resource_len(dev, 1),
					 DRM_MTRR_WC);

	return 0;
}
コード例 #24
0
static inline __must_check int allocQueues(struct xordev *dev) {
  dev->dmaSource1 = NULL;
  dev->dmaSource2 = NULL;
  dev->dmaDestination = NULL;
  dev->source1 = NULL;
  dev->source2 = NULL;
  dev->destination = NULL;
  dev->dmaSize = NULL;
  dev->deviceState = NULL;
  dev->deviceStateSpinlock = NULL;
  dev->waitSource1 = NULL;
  dev->waitSource2 = NULL;
  dev->waitDestination = NULL;
  pci_set_master(dev->pciDev);
  TRY_NORES(OR_GOTO(fail), pci_set_dma_mask(dev->pciDev, DMA_BIT_MASK(32)), "set dma mast");
  TRY_PTR(OR_GOTO(fail), dev->dmaSource1PciAddr, kmalloc(sizeof(dma_addr_t), GFP_KERNEL));
  TRY_PTR(OR_GOTO(fail), dev->dmaSource2PciAddr, kmalloc(sizeof(dma_addr_t), GFP_KERNEL));
  TRY_PTR(OR_GOTO(fail), dev->dmaDestinationPciAddr, kmalloc(sizeof(dma_addr_t), GFP_KERNEL));
  TRY_PTR(OR_GOTO(fail), dev->dmaSource1, dma_alloc_coherent(&dev->pciDev->dev, DMA_BUFFER_BYTES,
      dev->dmaSource1PciAddr, GFP_KERNEL));
  TRY_PTR(OR_GOTO(fail), dev->dmaSource2, dma_alloc_coherent(&dev->pciDev->dev, DMA_BUFFER_BYTES,
      dev->dmaSource2PciAddr, GFP_KERNEL));
  TRY_PTR(OR_GOTO(fail), dev->dmaDestination, dma_alloc_coherent(&dev->pciDev->dev, DMA_BUFFER_BYTES,
      dev->dmaDestinationPciAddr, GFP_KERNEL));
  TRY_PTR(OR_GOTO(fail), dev->dmaSize, kmalloc(sizeof(size_t), GFP_KERNEL));
  TRY_PTR(OR_GOTO(fail), dev->deviceState, kmalloc(sizeof(int), GFP_KERNEL));
  *dev->deviceState = DEVICE_UNOCCUPIED;
  TRY_PTR(OR_GOTO(fail), dev->deviceStateSpinlock, kmalloc(sizeof(spinlock_t), GFP_KERNEL));
  spin_lock_init(dev->deviceStateSpinlock);
  TRY_PTR(OR_GOTO(fail), dev->waitSource1, kmalloc(sizeof(wait_queue_t), GFP_KERNEL));
  init_waitqueue_head(dev->waitSource1);
  TRY_PTR(OR_GOTO(fail), dev->waitSource2, kmalloc(sizeof(wait_queue_t), GFP_KERNEL));
  init_waitqueue_head(dev->waitSource2);
  TRY_PTR(OR_GOTO(fail), dev->waitDestination, kmalloc(sizeof(wait_queue_t), GFP_KERNEL));
  init_waitqueue_head(dev->waitDestination);
  TRY_NORES(OR_GOTO(fail), memfifoNew(&dev->source1), "create source1 memory queue");
  TRY_NORES(OR_GOTO(fail), memfifoNew(&dev->source2), "create source2 memory queue");
  TRY_NORES(OR_GOTO(fail), memfifoNew(&dev->destination), "create destination memory queue");
  TRY_NORES(OR_GOTO(fail), pci_request_region(dev->pciDev, 0, "xordev"), "request BAR0");
  TRY_PTR(OR_GOTO(fail), dev->bar0, pci_iomap(dev->pciDev, 0, BAR0_SIZE), "map pci iomem");
  return 0;
  fail:
  memfifoDelete(&dev->destination);
  memfifoDelete(&dev->source2);
  memfifoDelete(&dev->source1);
  return -ENOMEM;
}
コード例 #25
0
ファイル: mt7603_pci.c プロジェクト: LastRitter/mt76
static int
mt76pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
	struct mt7603_dev *dev;
	int ret;

	ret = pcim_enable_device(pdev);
	if (ret)
		return ret;

	ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
	if (ret)
		return ret;

	pci_set_master(pdev);

	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
	if (ret)
		return ret;

	dev = mt7603_alloc_device(&pdev->dev);
	if (!dev)
		return -ENOMEM;

	mt76_mmio_init(&dev->mt76, pcim_iomap_table(pdev)[0]);

	pci_set_drvdata(pdev, dev);

	dev->mt76.rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) |
			(mt76_rr(dev, MT_HW_REV) & 0xff);
	dev_printk(KERN_INFO, dev->mt76.dev, "ASIC revision: %04x\n", dev->mt76.rev);

	ret = devm_request_irq(dev->mt76.dev, pdev->irq, mt7603_irq_handler,
			       IRQF_SHARED, KBUILD_MODNAME, dev);
	if (ret)
		goto error;

	ret = mt7603_register_device(dev);
	if (ret)
		goto error;

	return 0;
error:
	ieee80211_free_hw(mt76_hw(dev));
	return ret;
}
コード例 #26
0
ファイル: cx18-driver.c プロジェクト: andi34/Dhollmen_Kernel
static int cx18_setup_pci(struct cx18 *cx, struct pci_dev *pci_dev,
			  const struct pci_device_id *pci_id)
{
	u16 cmd;
	unsigned char pci_latency;

	CX18_DEBUG_INFO("Enabling pci device\n");

	if (pci_enable_device(pci_dev)) {
		CX18_ERR("Can't enable device %d!\n", cx->instance);
		return -EIO;
	}
	if (pci_set_dma_mask(pci_dev, 0xffffffff)) {
		CX18_ERR("No suitable DMA available, card %d\n", cx->instance);
		return -EIO;
	}
	if (!request_mem_region(cx->base_addr, CX18_MEM_SIZE, "cx18 encoder")) {
		CX18_ERR("Cannot request encoder memory region, card %d\n",
			 cx->instance);
		return -EIO;
	}

	/* Enable bus mastering and memory mapped IO for the CX23418 */
	pci_read_config_word(pci_dev, PCI_COMMAND, &cmd);
	cmd |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
	pci_write_config_word(pci_dev, PCI_COMMAND, cmd);

	cx->card_rev = pci_dev->revision;
	pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &pci_latency);

	if (pci_latency < 64 && cx18_pci_latency) {
		CX18_INFO("Unreasonably low latency timer, "
			       "setting to 64 (was %d)\n", pci_latency);
		pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, 64);
		pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &pci_latency);
	}

	CX18_DEBUG_INFO("cx%d (rev %d) at %02x:%02x.%x, "
		   "irq: %d, latency: %d, memory: 0x%lx\n",
		   cx->pci_dev->device, cx->card_rev, pci_dev->bus->number,
		   PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn),
		   cx->pci_dev->irq, pci_latency, (unsigned long)cx->base_addr);

	return 0;
}
コード例 #27
0
ファイル: pci.c プロジェクト: 513855417/linux
static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
{
	const struct dw_dma_platform_data *pdata = (void *)pid->driver_data;
	struct dw_dma_chip *chip;
	int ret;

	ret = pcim_enable_device(pdev);
	if (ret)
		return ret;

	ret = pcim_iomap_regions(pdev, 1 << 0, pci_name(pdev));
	if (ret) {
		dev_err(&pdev->dev, "I/O memory remapping failed\n");
		return ret;
	}

	pci_set_master(pdev);
	pci_try_set_mwi(pdev);

	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
	if (ret)
		return ret;

	ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
	if (ret)
		return ret;

	chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
	if (!chip)
		return -ENOMEM;

	chip->dev = &pdev->dev;
	chip->regs = pcim_iomap_table(pdev)[0];
	chip->irq = pdev->irq;
	chip->pdata = pdata;

	ret = dw_dma_probe(chip);
	if (ret)
		return ret;

	pci_set_drvdata(pdev, chip);

	return 0;
}
コード例 #28
0
ファイル: intel-agp.c プロジェクト: mpcdata/tnetv107x-usb
static int __devinit intel_gmch_probe(struct pci_dev *pdev,
				      struct agp_bridge_data *bridge)
{
	int i, mask;

	bridge->driver = NULL;

	for (i = 0; intel_agp_chipsets[i].name != NULL; i++) {
		if ((intel_agp_chipsets[i].gmch_chip_id != 0) &&
			find_gmch(intel_agp_chipsets[i].gmch_chip_id)) {
			bridge->driver =
				intel_agp_chipsets[i].gmch_driver;
			break;
		}
	}

	if (!bridge->driver)
		return 0;

	bridge->dev_private_data = &intel_private;
	bridge->dev = pdev;

	dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name);

	if (bridge->driver->mask_memory == intel_gen6_mask_memory)
		mask = 40;
	else if (bridge->driver->mask_memory == intel_i965_mask_memory)
		mask = 36;
	else
		mask = 32;

	if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
		dev_err(&intel_private.pcidev->dev,
			"set gfx device dma mask %d-bit failed!\n", mask);
	else
		pci_set_consistent_dma_mask(intel_private.pcidev,
					    DMA_BIT_MASK(mask));

	return 1;
}
コード例 #29
0
ファイル: pata_scc.c プロジェクト: 420GrayFox/dsl-n55u-bender
static int scc_host_init(struct ata_probe_ent *probe_ent)
{
	struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
	int rc;

	rc = scc_reset_controller(probe_ent);
	if (rc)
		return rc;

	probe_ent->n_ports = 1;

	rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
	if (rc)
		return rc;
	rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
	if (rc)
		return rc;

	scc_setup_ports(&probe_ent->port[0], probe_ent->iomap[SCC_BMID_BAR]);

	pci_set_master(pdev);

	return 0;
}
コード例 #30
0
int __devinit rtl_pci_probe(struct pci_dev *pdev,
			    const struct pci_device_id *id)
{
	struct ieee80211_hw *hw = NULL;

	struct rtl_priv *rtlpriv = NULL;
	struct rtl_pci_priv *pcipriv = NULL;
	struct rtl_pci *rtlpci;
	unsigned long pmem_start, pmem_len, pmem_flags;
	int err;

	err = pci_enable_device(pdev);
	if (err) {
		RT_ASSERT(false,
			  ("%s : Cannot enable new PCI device\n",
			   pci_name(pdev)));
		return err;
	}

	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
		if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
			RT_ASSERT(false, ("Unable to obtain 32bit DMA "
					  "for consistent allocations\n"));
			pci_disable_device(pdev);
			return -ENOMEM;
		}
	}

	pci_set_master(pdev);

	hw = ieee80211_alloc_hw(sizeof(struct rtl_pci_priv) +
				sizeof(struct rtl_priv), &rtl_ops);
	if (!hw) {
		RT_ASSERT(false,
			  ("%s : ieee80211 alloc failed\n", pci_name(pdev)));
		err = -ENOMEM;
		goto fail1;
	}

	SET_IEEE80211_DEV(hw, &pdev->dev);
	pci_set_drvdata(pdev, hw);

	rtlpriv = hw->priv;
	pcipriv = (void *)rtlpriv->priv;
	pcipriv->dev.pdev = pdev;

	/*
	 *init dbgp flags before all
	 *other functions, because we will
	 *use it in other funtions like
	 *RT_TRACE/RT_PRINT/RTL_PRINT_DATA
	 *you can not use these macro
	 *before this
	 */
	rtl_dbgp_flag_init(hw);

	/* MEM map */
	err = pci_request_regions(pdev, KBUILD_MODNAME);
	if (err) {
		RT_ASSERT(false, ("Can't obtain PCI resources\n"));
		return err;
	}

	pmem_start = pci_resource_start(pdev, 2);
	pmem_len = pci_resource_len(pdev, 2);
	pmem_flags = pci_resource_flags(pdev, 2);

	/*shared mem start */
	rtlpriv->io.pci_mem_start =
			(unsigned long)pci_iomap(pdev, 2, pmem_len);
	if (rtlpriv->io.pci_mem_start == 0) {
		RT_ASSERT(false, ("Can't map PCI mem\n"));
		goto fail2;
	}

	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
		 ("mem mapped space: start: 0x%08lx len:%08lx "
		  "flags:%08lx, after map:0x%08lx\n",
		  pmem_start, pmem_len, pmem_flags,
		  rtlpriv->io.pci_mem_start));

	/* Disable Clk Request */
	pci_write_config_byte(pdev, 0x81, 0);
	/* leave D3 mode */
	pci_write_config_byte(pdev, 0x44, 0);
	pci_write_config_byte(pdev, 0x04, 0x06);
	pci_write_config_byte(pdev, 0x04, 0x07);

	/* init cfg & intf_ops */
	rtlpriv->rtlhal.interface = INTF_PCI;
	rtlpriv->cfg = (struct rtl_hal_cfg *)(id->driver_data);
	rtlpriv->intf_ops = &rtl_pci_ops;

	/* find adapter */
	_rtl_pci_find_adapter(pdev, hw);

	/* Init IO handler */
	_rtl_pci_io_handler_init(&pdev->dev, hw);

	/*like read eeprom and so on */
	rtlpriv->cfg->ops->read_eeprom_info(hw);

	if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
			 ("Can't init_sw_vars.\n"));
		goto fail3;
	}

	rtlpriv->cfg->ops->init_sw_leds(hw);

	/*aspm */
	rtl_pci_init_aspm(hw);

	/* Init mac80211 sw */
	err = rtl_init_core(hw);
	if (err) {
		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
			 ("Can't allocate sw for mac80211.\n"));
		goto fail3;
	}

	/* Init PCI sw */
	err = !rtl_pci_init(hw, pdev);
	if (err) {
		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
			 ("Failed to init PCI.\n"));
		goto fail3;
	}

	err = ieee80211_register_hw(hw);
	if (err) {
		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
			 ("Can't register mac80211 hw.\n"));
		goto fail3;
	} else {
		rtlpriv->mac80211.mac80211_registered = 1;
	}

	err = sysfs_create_group(&pdev->dev.kobj, &rtl_attribute_group);
	if (err) {
		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
			 ("failed to create sysfs device attributes\n"));
		goto fail3;
	}

	/*init rfkill */
	rtl_init_rfkill(hw);

	rtlpci = rtl_pcidev(pcipriv);
	err = request_irq(rtlpci->pdev->irq, &_rtl_pci_interrupt,
			  IRQF_SHARED, KBUILD_MODNAME, hw);
	if (err) {
		RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
			 ("%s: failed to register IRQ handler\n",
			  wiphy_name(hw->wiphy)));
		goto fail3;
	} else {
		rtlpci->irq_alloc = 1;
	}

	set_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
	return 0;

fail3:
	pci_set_drvdata(pdev, NULL);
	rtl_deinit_core(hw);
	_rtl_pci_io_handler_release(hw);
	ieee80211_free_hw(hw);

	if (rtlpriv->io.pci_mem_start != 0)
		pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start);

fail2:
	pci_release_regions(pdev);

fail1:

	pci_disable_device(pdev);

	return -ENODEV;

}