static int __devinit ilo_map_device(struct pci_dev *pdev, struct ilo_hwinfo *hw)
{
	int error = -ENOMEM;

	/* map the memory mapped i/o registers */
	hw->mmio_vaddr = pci_iomap(pdev, 1, 0);
	if (hw->mmio_vaddr == NULL) {
		dev_err(&pdev->dev, "Error mapping mmio\n");
		goto out;
	}

	/* map the adapter shared memory region */
	hw->ram_vaddr = pci_iomap(pdev, 2, MAX_CCB * ILOHW_CCB_SZ);
	if (hw->ram_vaddr == NULL) {
		dev_err(&pdev->dev, "Error mapping shared mem\n");
		goto mmio_free;
	}

	/* map the doorbell aperture */
	hw->db_vaddr = pci_iomap(pdev, 3, MAX_CCB * ONE_DB_SIZE);
	if (hw->db_vaddr == NULL) {
		dev_err(&pdev->dev, "Error mapping doorbell\n");
		goto ram_free;
	}

	return 0;
ram_free:
	pci_iounmap(pdev, hw->ram_vaddr);
mmio_free:
	pci_iounmap(pdev, hw->mmio_vaddr);
out:
	return error;
}
/* the PCI probing function */
int virtio_pci_legacy_probe(struct virtio_pci_device *vp_dev)
{
	struct pci_dev *pci_dev = vp_dev->pci_dev;
	int rc;

	/* We only own devices >= 0x1000 and <= 0x103f: leave the rest. */
	if (pci_dev->device < 0x1000 || pci_dev->device > 0x103f)
		return -ENODEV;

	if (pci_dev->revision != VIRTIO_PCI_ABI_VERSION) {
		printk(KERN_ERR "virtio_pci: expected ABI version %d, got %d\n",
		       VIRTIO_PCI_ABI_VERSION, pci_dev->revision);
		return -ENODEV;
	}

	rc = dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(64));
	if (rc) {
		rc = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32));
	} else {
		/*
		 * The virtio ring base address is expressed as a 32-bit PFN,
		 * with a page size of 1 << VIRTIO_PCI_QUEUE_ADDR_SHIFT.
		 */
		dma_set_coherent_mask(&pci_dev->dev,
				DMA_BIT_MASK(32 + VIRTIO_PCI_QUEUE_ADDR_SHIFT));
	}

	if (rc)
		dev_warn(&pci_dev->dev, "Failed to enable 64-bit or 32-bit DMA.  Trying to continue, but this might not work.\n");

	rc = pci_request_region(pci_dev, 0, "virtio-pci-legacy");
	if (rc)
		return rc;

	rc = -ENOMEM;
	vp_dev->ioaddr = pci_iomap(pci_dev, 0, 0);
	if (!vp_dev->ioaddr)
		goto err_iomap;

	vp_dev->isr = vp_dev->ioaddr + VIRTIO_PCI_ISR;

	/* we use the subsystem vendor/device id as the virtio vendor/device
	 * id.  this allows us to use the same PCI vendor/device id for all
	 * virtio devices and to identify the particular virtio driver by
	 * the subsystem ids */
	vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor;
	vp_dev->vdev.id.device = pci_dev->subsystem_device;

	vp_dev->vdev.config = &virtio_pci_config_ops;

	vp_dev->config_vector = vp_config_vector;
	vp_dev->setup_vq = setup_vq;
	vp_dev->del_vq = del_vq;

	return 0;

err_iomap:
	pci_release_region(pci_dev, 0);
	return rc;
}
Example #3
0
static void __iomem *ssb_ioremap(struct ssb_bus *bus,
				 unsigned long baseaddr)
{
	void __iomem *mmio = NULL;

	switch (bus->bustype) {
	case SSB_BUSTYPE_SSB:
		
		
	case SSB_BUSTYPE_PCMCIA:
		mmio = ioremap(baseaddr, SSB_CORE_SIZE);
		break;
	case SSB_BUSTYPE_PCI:
#ifdef CONFIG_SSB_PCIHOST
		mmio = pci_iomap(bus->host_pci, 0, ~0UL);
#else
		SSB_BUG_ON(1); 
#endif
		break;
	case SSB_BUSTYPE_SDIO:
		
		mmio = (void __iomem *)baseaddr;
		break;
	}

	return mmio;
}
Example #4
0
/**
 * map_bars - Resource allocation for device I/O Memory and I/O Port.
 *            Maps physical address of PCI buffer to virtual kernel space.
 *
 * @param l_head: List that will hold mapped BARs
 * @param pdev:   Pci device description
 * @param bars:   Bitmask of BARs to be requested
 * @param name:   Desired memory region name suffix(or NULL if none)
 *
 * @note Linked list should be freed afterwards by unmap_bars!
 *
 * @return how many BARs were mapped - in case of success.
 * @return -EBUSY                    - in case of failure.
 */
int map_bars(struct list_head *l_head, struct pci_dev *pdev, int bars, char *name)
{
  char res_name[32] = "BAR";
  bar_map_t *mem = NULL;
  bar_map_t *memP, *tmpP;
  int i, bcntr = 0;
  void __iomem *ioaddr;

  INIT_LIST_HEAD(l_head);

  for (i = 0; i < 6; i++)
    if ( (bars & (1 << i)) && (pci_resource_len(pdev, i)) ) {
      memset(&res_name[3], 0, sizeof(res_name)-3);
      snprintf(&res_name[3], sizeof(res_name)-3, "%d_%s", i, (name)?:'\0');
      if (pci_request_region(pdev, i, res_name))
	goto err_out;
      /* we will treat I/O ports as if they were I/O memory */
      if ( !(ioaddr = pci_iomap(pdev, i, 0)) )
	goto err_out_iomap;
      if ( !(mem = kzalloc((sizeof *mem), GFP_KERNEL)) )
	goto err_out_alloc;
      mem->mem_bar   = i;
      mem->mem_pdev  = pdev;
      mem->mem_remap = ioaddr;
      mem->mem_len   = pci_resource_len(pdev, i);
      list_add_tail(&mem->mem_list/*new*/, l_head/*head*/);
      ++bcntr;
    }
/* Map the framebuffer from the card and configure the core */
static int mga_vram_init(struct mga_device *mdev)
{
	void __iomem *mem;
	struct apertures_struct *aper = alloc_apertures(1);
	if (!aper)
		return -ENOMEM;

	/* BAR 0 is VRAM */
	mdev->mc.vram_base = pci_resource_start(mdev->dev->pdev, 0);
	mdev->mc.vram_window = pci_resource_len(mdev->dev->pdev, 0);

	aper->ranges[0].base = mdev->mc.vram_base;
	aper->ranges[0].size = mdev->mc.vram_window;

	remove_conflicting_framebuffers(aper, "mgafb", true);
	kfree(aper);

	if (!devm_request_mem_region(mdev->dev->dev, mdev->mc.vram_base, mdev->mc.vram_window,
				"mgadrmfb_vram")) {
		DRM_ERROR("can't reserve VRAM\n");
		return -ENXIO;
	}

	mem = pci_iomap(mdev->dev->pdev, 0, 0);

	mdev->mc.vram_size = mga_probe_vram(mdev, mem);

	pci_iounmap(mdev->dev->pdev, mem);

	return 0;
}
Example #6
0
static int usnic_vnic_discover_resources(struct pci_dev *pdev,
						struct usnic_vnic *vnic)
{
	enum usnic_vnic_res_type res_type;
	int i;
	int err = 0;

	for (i = 0; i < ARRAY_SIZE(vnic->bar); i++) {
		if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM))
			continue;
		vnic->bar[i].len = pci_resource_len(pdev, i);
		vnic->bar[i].vaddr = pci_iomap(pdev, i, vnic->bar[i].len);
		if (!vnic->bar[i].vaddr) {
			usnic_err("Cannot memory-map BAR %d, aborting\n",
					i);
			err = -ENODEV;
			goto out_clean_bar;
		}
		vnic->bar[i].bus_addr = pci_resource_start(pdev, i);
	}

	vnic->vdev = vnic_dev_register(NULL, pdev, pdev, vnic->bar,
			ARRAY_SIZE(vnic->bar));
	if (!vnic->vdev) {
		usnic_err("Failed to register device %s\n",
				pci_name(pdev));
		err = -EINVAL;
		goto out_clean_bar;
	}

	for (res_type = USNIC_VNIC_RES_TYPE_EOL + 1;
			res_type < USNIC_VNIC_RES_TYPE_MAX; res_type++) {
		err = usnic_vnic_alloc_res_chunk(vnic, res_type,
						&vnic->chunks[res_type]);
		if (err) {
			usnic_err("Failed to alloc res %s with err %d\n",
					usnic_vnic_res_type_to_str(res_type),
					err);
			goto out_clean_chunks;
		}
	}

	return 0;

out_clean_chunks:
	for (res_type--; res_type > USNIC_VNIC_RES_TYPE_EOL; res_type--)
		usnic_vnic_free_res_chunk(&vnic->chunks[res_type]);
	vnic_dev_unregister(vnic->vdev);
out_clean_bar:
	for (i = 0; i < ARRAY_SIZE(vnic->bar); i++) {
		if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM))
			continue;
		if (!vnic->bar[i].vaddr)
			break;

		iounmap(vnic->bar[i].vaddr);
	}

	return err;
}
Example #7
0
//return 0 means success
static int probe(struct pci_dev *dev, const struct pci_device_id *id)
{
  /* Do probing type stuff here. Like calling request_region(); */
  //lin: unsigned char revision_id;
  unsigned long len;
  void __iomem *addressio;
  int bar ;

  if(skel_get_revision(dev) != MEM_PCI_REVISION_ID)
    return 1;

  if (pci_enable_device(dev) < 0) {
    return 1;
  }
  bar = 1;
  //lin: resource_size_t start = pci_resource_start(dev, bar);
  len = pci_resource_len(dev, bar);
  //lin: unsigned long flags = pci_resource_flags(dev, bar);
  addressio = pci_iomap(dev,bar,len);
  if (addressio == NULL) {
    return 1;
  }
  //*(byte *)addressio = 0x57;
  //iowrite8(0x89,addressio + 8);
  //printk("%x\n",ioread8(addressio + 8));
  //printk("%x\n",*(byte *)addressio);
  iowrite8(0x89,addressio + 0);
  printk("%x\n",ioread8(addressio + 0));
  iowrite16(0x50,addressio + 8);
  printk("%x\n",ioread16(addressio + 8));
  printk("len=%ld\n",len);
  return 0;
}
Example #8
0
static int marvell_pata_active(struct pci_dev *pdev)
{
	int i;
	u32 devices;
	void __iomem *barp;

	/* We don't yet know how to do this for other devices */
	if (pdev->device != 0x6145)
		return 1;

	barp = pci_iomap(pdev, 5, 0x10);
	if (barp == NULL)
		return -ENOMEM;

	printk("BAR5:");
	for(i = 0; i <= 0x0F; i++)
		printk("%02X:%02X ", i, ioread8(barp + i));
	printk("\n");

	devices = ioread32(barp + 0x0C);
	pci_iounmap(pdev, barp);

	if (devices & 0x10)
		return 1;
	return 0;
}
Example #9
0
static void __iomem *ssb_ioremap(struct ssb_bus *bus,
				 unsigned long baseaddr)
{
	void __iomem *mmio = NULL;

	switch (bus->bustype) {
	case SSB_BUSTYPE_SSB:
		/* Only map the first core for now. */
		/* fallthrough... */
	case SSB_BUSTYPE_PCMCIA:
		mmio = ioremap(baseaddr, SSB_CORE_SIZE);
		break;
	case SSB_BUSTYPE_PCI:
#ifdef CONFIG_SSB_PCIHOST
		mmio = pci_iomap(bus->host_pci, 0, ~0UL);
#else
		SSB_BUG_ON(1); /* Can't reach this code. */
#endif
		break;
	case SSB_BUSTYPE_SDIO:
		/* Nothing to ioremap in the SDIO case, just fake it */
		mmio = (void __iomem *)baseaddr;
		break;
	}

	return mmio;
}
Example #10
0
static int piix_disable_ahci(struct pci_dev *pdev)
{
	void __iomem *mmio;
	u32 tmp;
	int rc = 0;

	/* BUG: pci_enable_device has not yet been called.  This
	 * works because this device is usually set up by BIOS.
	 */

	if (!pci_resource_start(pdev, AHCI_PCI_BAR) ||
	    !pci_resource_len(pdev, AHCI_PCI_BAR))
		return 0;

	mmio = pci_iomap(pdev, AHCI_PCI_BAR, 64);
	if (!mmio)
		return -ENOMEM;

	tmp = readl(mmio + AHCI_GLOBAL_CTL);
	if (tmp & AHCI_ENABLE) {
		tmp &= ~AHCI_ENABLE;
		writel(tmp, mmio + AHCI_GLOBAL_CTL);

		tmp = readl(mmio + AHCI_GLOBAL_CTL);
		if (tmp & AHCI_ENABLE)
			rc = -EIO;
	}

	pci_iounmap(pdev, mmio);
	return rc;
}
Example #11
0
/* Special reset function for Marathon CAN-bus-PCIe card */
static void plx_pci_reset_marathon_pcie(struct pci_dev *pdev)
{
	void __iomem *addr;
	void __iomem *reset_addr;
	int i;

	plx9056_pci_reset_common(pdev);

	for (i = 0; i < 2; i++) {
		struct plx_pci_channel_map *chan_map =
			&plx_pci_card_info_marathon_pcie.chan_map_tbl[i];
		addr = pci_iomap(pdev, chan_map->bar, chan_map->size);
		if (!addr) {
			dev_err(&pdev->dev, "Failed to remap reset "
				"space %d (BAR%d)\n", i, chan_map->bar);
		} else {
			/* reset the SJA1000 chip */
			#define MARATHON_PCIE_RESET_OFFSET 32
			reset_addr = addr + chan_map->offset +
			             MARATHON_PCIE_RESET_OFFSET;
			iowrite8(0x1, reset_addr);
			udelay(100);
			pci_iounmap(pdev, addr);
		}
	}
}
Example #12
0
static int marvell_pre_reset(struct ata_link *link, unsigned long deadline)
{
	struct ata_port *ap = link->ap;
	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
	u32 devices;
	void __iomem *barp;
	int i;

	/* Check if our port is enabled */

	barp = pci_iomap(pdev, 5, 0x10);
	if (barp == NULL)
		return -ENOMEM;
	printk("BAR5:");
	for(i = 0; i <= 0x0F; i++)
		printk("%02X:%02X ", i, ioread8(barp + i));
	printk("\n");

	devices = ioread32(barp + 0x0C);
	pci_iounmap(pdev, barp);

	if ((pdev->device == 0x6145) && (ap->port_no == 0) &&
	    (!(devices & 0x10)))	/* PATA enable ? */
		return -ENOENT;

	return ata_std_prereset(link, deadline);
}
Example #13
0
/**
 * ci13xxx_pci_probe: PCI probe
 * @pdev: USB device controller being probed
 * @id:   PCI hotplug ID connecting controller to UDC framework
 *
 * This function returns an error code
 * Allocates basic PCI resources for this USB device controller, and then
 * invokes the udc_probe() method to start the UDC associated with it
 */
static int __devinit ci13xxx_pci_probe(struct pci_dev *pdev,
				       const struct pci_device_id *id)
{
	void __iomem *regs = NULL;
	int retval = 0;

	if (id == NULL)
		return -EINVAL;

	retval = pci_enable_device(pdev);
	if (retval)
		goto done;

	if (!pdev->irq) {
		dev_err(&pdev->dev, "No IRQ, check BIOS/PCI setup!");
		retval = -ENODEV;
		goto disable_device;
	}

	retval = pci_request_regions(pdev, UDC_DRIVER_NAME);
	if (retval)
		goto disable_device;

	/* BAR 0 holds all the registers */
	regs = pci_iomap(pdev, 0, 0);
	if (!regs) {
		dev_err(&pdev->dev, "Error mapping memory!");
		retval = -EFAULT;
		goto release_regions;
	}
	pci_set_drvdata(pdev, (__force void *)regs);

	pci_set_master(pdev);
	pci_try_set_mwi(pdev);

	retval = udc_probe(&ci13xxx_pci_udc_driver, &pdev->dev, regs);
	if (retval)
		goto iounmap;

	/* our device does not have MSI capability */

	retval = request_irq(pdev->irq, ci13xxx_pci_irq, IRQF_SHARED,
			     UDC_DRIVER_NAME, pdev);
	if (retval)
		goto gadget_remove;

	return 0;

 gadget_remove:
	udc_remove();
 iounmap:
	pci_iounmap(pdev, regs);
 release_regions:
	pci_release_regions(pdev);
 disable_device:
	pci_disable_device(pdev);
 done:
	return retval;
}
Example #14
0
/**
 * imx_pcie_ep_probe - Device Initialization Routine
 * @pdev: PCI device information struct
 * @id: entry in id_tbl
 *
 * Returns 0 on success, negative on failure
 **/
static int imx_pcie_ep_probe(struct pci_dev *pdev,
                             const struct pci_device_id *id)
{
    int ret = 0;
    struct device *dev = &pdev->dev;
    struct imx_pcie_ep_priv *priv;

    priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
    if (!priv) {
        dev_err(dev, "can't alloc imx pcie priv\n");
        return -ENOMEM;
    }

    priv->pci_dev = pdev;

    if (pci_enable_device(pdev)) {
        ret = -ENODEV;
        goto out;
    }
    pci_set_master(pdev);

    pci_set_drvdata(pdev, priv);

    priv->hw_base = pci_iomap(pdev, 0, 0);
    if (!priv->hw_base) {
        ret = -ENODEV;
        goto out;
    }

    pr_info("pci_resource_len = 0x%08llx\n",
            (unsigned long long) pci_resource_len(pdev, 0));
    pr_info("pci_resource_base = %p\n", priv->hw_base);

    ret = pci_enable_msi(priv->pci_dev);
    if (ret < 0) {
        dev_err(dev, "can't enable msi\n");
        return ret;
    }

    /*
     * Force to use 0x01FF8000 as the MSI address,
     * to do the MSI demo
     */
    pci_bus_write_config_dword(pdev->bus, 0, 0x54, 0x01FF8000);
    pci_bus_write_config_dword(pdev->bus->parent, 0, 0x820, 0x01FF8000);

    /* configure rc's msi cap */
    pci_bus_read_config_dword(pdev->bus->parent, 0, 0x50, &ret);
    ret |= (PCI_MSI_FLAGS_ENABLE << 16);
    pci_bus_write_config_dword(pdev->bus->parent, 0, 0x50, ret);
    pci_bus_write_config_dword(pdev->bus->parent, 0, 0x828, 0x1);
    pci_bus_write_config_dword(pdev->bus->parent, 0, 0x82C, 0xFFFFFFFE);

    return 0;

out:
    return ret;
}
Example #15
0
static int __devinit
geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
	int ret;
	ret = pci_enable_device(dev);
	if (ret)
		return ret;

	ret = pci_request_regions(dev, "geode-aes");
	if (ret)
		goto eenable;

	_iobase = pci_iomap(dev, 0, 0);

	if (_iobase == NULL) {
		ret = -ENOMEM;
		goto erequest;
	}

	spin_lock_init(&lock);

	/* Clear any pending activity */
	iowrite32(AES_INTR_PENDING | AES_INTR_MASK, _iobase + AES_INTR_REG);

	ret = crypto_register_alg(&geode_alg);
	if (ret)
		goto eiomap;

	ret = crypto_register_alg(&geode_ecb_alg);
	if (ret)
		goto ealg;

	ret = crypto_register_alg(&geode_cbc_alg);
	if (ret)
		goto eecb;

	printk(KERN_NOTICE "geode-aes: GEODE AES engine enabled.\n");
	return 0;

 eecb:
	crypto_unregister_alg(&geode_ecb_alg);

 ealg:
	crypto_unregister_alg(&geode_alg);

 eiomap:
	pci_iounmap(dev, _iobase);

 erequest:
	pci_release_regions(dev);

 eenable:
	pci_disable_device(dev);

	printk(KERN_ERR "geode-aes:  GEODE AES initialization failed.\n");
	return ret;
}
static int __devinit probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
	struct uio_info *info;
	int ret;

	info = kzalloc(sizeof(struct uio_info), GFP_KERNEL);
	if (!info)
		return -ENOMEM;

	if (pci_enable_device(pdev))
		goto out_free;

	if (pci_request_regions(pdev, "aectc"))
		goto out_disable;

	info->name = "aectc";
	info->port[0].start = pci_resource_start(pdev, 0);
	if (!info->port[0].start)
		goto out_release;
	info->priv = pci_iomap(pdev, 0, 0);
	if (!info->priv)
		goto out_release;
	info->port[0].size = pci_resource_len(pdev, 0);
	info->port[0].porttype = UIO_PORT_GPIO;

	info->version = "0.0.1";
	info->irq = pdev->irq;
	info->irq_flags = IRQF_SHARED;
	info->handler = aectc_irq;

	print_board_data(pdev, info);
	ret = uio_register_device(&pdev->dev, info);
	if (ret)
		goto out_unmap;

	iowrite32(INT_ENABLE, info->priv + INT_ENABLE_ADDR);
	iowrite8(INT_MASK_ALL, info->priv + INT_MASK_ADDR);
	if (!(ioread8(info->priv + INTA_DRVR_ADDR)
			& INTA_ENABLED_FLAG))
		dev_err(&pdev->dev, "aectc: interrupts not enabled\n");

	pci_set_drvdata(pdev, info);

	return 0;

out_unmap:
	pci_iounmap(pdev, info->priv);
out_release:
	pci_release_regions(pdev);
out_disable:
	pci_disable_device(pdev);
out_free:
	kfree(info);
	return -ENODEV;
}
Example #17
0
static int ilo_map_device(struct pci_dev *pdev, struct ilo_hwinfo *hw)
{
	int bar;
	unsigned long off;

	/* map the memory mapped i/o registers */
	hw->mmio_vaddr = pci_iomap(pdev, 1, 0);
	if (hw->mmio_vaddr == NULL) {
		dev_err(&pdev->dev, "Error mapping mmio\n");
		goto out;
	}

	/* map the adapter shared memory region */
	if (pdev->subsystem_device == 0x00E4) {
		bar = 5;
		/* Last 8k is reserved for CCBs */
		off = pci_resource_len(pdev, bar) - 0x2000;
	} else {
		bar = 2;
		off = 0;
	}
	hw->ram_vaddr = pci_iomap_range(pdev, bar, off, max_ccb * ILOHW_CCB_SZ);
	if (hw->ram_vaddr == NULL) {
		dev_err(&pdev->dev, "Error mapping shared mem\n");
		goto mmio_free;
	}

	/* map the doorbell aperture */
	hw->db_vaddr = pci_iomap(pdev, 3, max_ccb * ONE_DB_SIZE);
	if (hw->db_vaddr == NULL) {
		dev_err(&pdev->dev, "Error mapping doorbell\n");
		goto ram_free;
	}

	return 0;
ram_free:
	pci_iounmap(pdev, hw->ram_vaddr);
mmio_free:
	pci_iounmap(pdev, hw->mmio_vaddr);
out:
	return -ENOMEM;
}
Example #18
0
static int xhci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
	struct xhci_data data = {};

	pci_enable_device(pdev);
	pci_set_master(pdev);

	data.regs = pci_iomap(pdev, 0);

	return xhci_register(&pdev->dev, &data);
}
Example #19
0
/**
 * genwqe_bus_reset() - Card recovery
 *
 * pci_reset_function() will recover the device and ensure that the
 * registers are accessible again when it completes with success. If
 * not, the card will stay dead and registers will be unaccessible
 * still.
 */
static int genwqe_bus_reset(struct genwqe_dev *cd)
{
	int bars, rc = 0;
	struct pci_dev *pci_dev = cd->pci_dev;
	void __iomem *mmio;

	if (cd->err_inject & GENWQE_INJECT_BUS_RESET_FAILURE)
		return -EIO;

	mmio = cd->mmio;
	cd->mmio = NULL;
	pci_iounmap(pci_dev, mmio);

	bars = pci_select_bars(pci_dev, IORESOURCE_MEM);
	pci_release_selected_regions(pci_dev, bars);

	/*
	 * Firmware/BIOS might change memory mapping during bus reset.
	 * Settings like enable bus-mastering, ... are backuped and
	 * restored by the pci_reset_function().
	 */
	dev_dbg(&pci_dev->dev, "[%s] pci_reset function ...\n", __func__);
	rc = pci_reset_function(pci_dev);
	if (rc) {
		dev_err(&pci_dev->dev,
			"[%s] err: failed reset func (rc %d)\n", __func__, rc);
		return rc;
	}
	dev_dbg(&pci_dev->dev, "[%s] done with rc=%d\n", __func__, rc);

	/*
	 * Here is the right spot to clear the register read
	 * failure. pci_bus_reset() does this job in real systems.
	 */
	cd->err_inject &= ~(GENWQE_INJECT_HARDWARE_FAILURE |
			    GENWQE_INJECT_GFIR_FATAL |
			    GENWQE_INJECT_GFIR_INFO);

	rc = pci_request_selected_regions(pci_dev, bars, genwqe_driver_name);
	if (rc) {
		dev_err(&pci_dev->dev,
			"[%s] err: request bars failed (%d)\n", __func__, rc);
		return -EIO;
	}

	cd->mmio = pci_iomap(pci_dev, 0, 0);
	if (cd->mmio == NULL) {
		dev_err(&pci_dev->dev,
			"[%s] err: mapping BAR0 failed\n", __func__);
		return -ENOMEM;
	}
	return 0;
}
Example #20
0
static int amd_ntb_init_pci(struct amd_ntb_dev *ndev,
			    struct pci_dev *pdev)
{
	int rc;

	pci_set_drvdata(pdev, ndev);

	rc = pci_enable_device(pdev);
	if (rc)
		goto err_pci_enable;

	rc = pci_request_regions(pdev, NTB_NAME);
	if (rc)
		goto err_pci_regions;

	pci_set_master(pdev);

	rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
	if (rc) {
		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
		if (rc)
			goto err_dma_mask;
		dev_warn(&pdev->dev, "Cannot DMA highmem\n");
	}

	rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
	if (rc) {
		rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
		if (rc)
			goto err_dma_mask;
		dev_warn(&pdev->dev, "Cannot DMA consistent highmem\n");
	}

	ndev->self_mmio = pci_iomap(pdev, 0, 0);
	if (!ndev->self_mmio) {
		rc = -EIO;
		goto err_dma_mask;
	}
	ndev->peer_mmio = ndev->self_mmio + AMD_PEER_OFFSET;

	return 0;

err_dma_mask:
	pci_clear_master(pdev);
err_pci_regions:
	pci_disable_device(pdev);
err_pci_enable:
	pci_set_drvdata(pdev, NULL);
	return rc;
}
Example #21
0
static int flexcop_pci_init(struct flexcop_pci *fc_pci)
{
	int ret;
	u8 card_rev;

	pci_read_config_byte(fc_pci->pdev, PCI_CLASS_REVISION, &card_rev);
	info("card revision %x", card_rev);

	if ((ret = pci_enable_device(fc_pci->pdev)) != 0)
		return ret;

	pci_set_master(fc_pci->pdev);

	/* enable interrupts */
	// pci_write_config_dword(pdev, 0x6c, 0x8000);

	if ((ret = pci_request_regions(fc_pci->pdev, DRIVER_NAME)) != 0)
		goto err_pci_disable_device;

	fc_pci->io_mem = pci_iomap(fc_pci->pdev, 0, 0x800);

	if (!fc_pci->io_mem) {
		err("cannot map io memory\n");
		ret = -EIO;
		goto err_pci_release_regions;
	}

	pci_set_drvdata(fc_pci->pdev, fc_pci);

	if ((ret = request_irq(fc_pci->pdev->irq, flexcop_pci_isr,
					SA_SHIRQ, DRIVER_NAME, fc_pci)) != 0)
		goto err_pci_iounmap;

	spin_lock_init(&fc_pci->irq_lock);

	fc_pci->init_state |= FC_PCI_INIT;
	goto success;

err_pci_iounmap:
	pci_iounmap(fc_pci->pdev, fc_pci->io_mem);
	pci_set_drvdata(fc_pci->pdev, NULL);
err_pci_release_regions:
	pci_release_regions(fc_pci->pdev);
err_pci_disable_device:
	pci_disable_device(fc_pci->pdev);

success:
	return ret;
}
Example #22
0
static int i2c_vr_mapregs(struct pci_dev *dev, int idx)
{
	void __iomem *base;

	if (!dev || idx >= I2C_VR_ADAP_NR)
		return -EINVAL;

	base = pci_iomap(dev, idx, 0);
	if (!base) {
		dev_dbg(&dev->dev, "error mapping memory\n");
		return -EFAULT;
	}
	i2c_vr[idx].regs = base;
	return 0;
}
Example #23
0
static inline __must_check int allocQueues(struct xordev *dev) {
  dev->dmaSource1 = NULL;
  dev->dmaSource2 = NULL;
  dev->dmaDestination = NULL;
  dev->source1 = NULL;
  dev->source2 = NULL;
  dev->destination = NULL;
  dev->dmaSize = NULL;
  dev->deviceState = NULL;
  dev->deviceStateSpinlock = NULL;
  dev->waitSource1 = NULL;
  dev->waitSource2 = NULL;
  dev->waitDestination = NULL;
  pci_set_master(dev->pciDev);
  TRY_NORES(OR_GOTO(fail), pci_set_dma_mask(dev->pciDev, DMA_BIT_MASK(32)), "set dma mast");
  TRY_PTR(OR_GOTO(fail), dev->dmaSource1PciAddr, kmalloc(sizeof(dma_addr_t), GFP_KERNEL));
  TRY_PTR(OR_GOTO(fail), dev->dmaSource2PciAddr, kmalloc(sizeof(dma_addr_t), GFP_KERNEL));
  TRY_PTR(OR_GOTO(fail), dev->dmaDestinationPciAddr, kmalloc(sizeof(dma_addr_t), GFP_KERNEL));
  TRY_PTR(OR_GOTO(fail), dev->dmaSource1, dma_alloc_coherent(&dev->pciDev->dev, DMA_BUFFER_BYTES,
      dev->dmaSource1PciAddr, GFP_KERNEL));
  TRY_PTR(OR_GOTO(fail), dev->dmaSource2, dma_alloc_coherent(&dev->pciDev->dev, DMA_BUFFER_BYTES,
      dev->dmaSource2PciAddr, GFP_KERNEL));
  TRY_PTR(OR_GOTO(fail), dev->dmaDestination, dma_alloc_coherent(&dev->pciDev->dev, DMA_BUFFER_BYTES,
      dev->dmaDestinationPciAddr, GFP_KERNEL));
  TRY_PTR(OR_GOTO(fail), dev->dmaSize, kmalloc(sizeof(size_t), GFP_KERNEL));
  TRY_PTR(OR_GOTO(fail), dev->deviceState, kmalloc(sizeof(int), GFP_KERNEL));
  *dev->deviceState = DEVICE_UNOCCUPIED;
  TRY_PTR(OR_GOTO(fail), dev->deviceStateSpinlock, kmalloc(sizeof(spinlock_t), GFP_KERNEL));
  spin_lock_init(dev->deviceStateSpinlock);
  TRY_PTR(OR_GOTO(fail), dev->waitSource1, kmalloc(sizeof(wait_queue_t), GFP_KERNEL));
  init_waitqueue_head(dev->waitSource1);
  TRY_PTR(OR_GOTO(fail), dev->waitSource2, kmalloc(sizeof(wait_queue_t), GFP_KERNEL));
  init_waitqueue_head(dev->waitSource2);
  TRY_PTR(OR_GOTO(fail), dev->waitDestination, kmalloc(sizeof(wait_queue_t), GFP_KERNEL));
  init_waitqueue_head(dev->waitDestination);
  TRY_NORES(OR_GOTO(fail), memfifoNew(&dev->source1), "create source1 memory queue");
  TRY_NORES(OR_GOTO(fail), memfifoNew(&dev->source2), "create source2 memory queue");
  TRY_NORES(OR_GOTO(fail), memfifoNew(&dev->destination), "create destination memory queue");
  TRY_NORES(OR_GOTO(fail), pci_request_region(dev->pciDev, 0, "xordev"), "request BAR0");
  TRY_PTR(OR_GOTO(fail), dev->bar0, pci_iomap(dev->pciDev, 0, BAR0_SIZE), "map pci iomem");
  return 0;
  fail:
  memfifoDelete(&dev->destination);
  memfifoDelete(&dev->source2);
  memfifoDelete(&dev->source1);
  return -ENOMEM;
}
static int __devinit dw_mci_pci_probe(struct pci_dev *pdev,
				  const struct pci_device_id *entries)
{
	struct dw_mci *host;
	int ret;

	ret = pci_enable_device(pdev);
	if (ret)
		return ret;
	if (pci_request_regions(pdev, "dw_mmc_pci")) {
		ret = -ENODEV;
		goto err_disable_dev;
	}

	host = kzalloc(sizeof(struct dw_mci), GFP_KERNEL);
	if (!host) {
		ret = -ENOMEM;
		goto err_release;
	}

	host->irq = pdev->irq;
	host->irq_flags = IRQF_SHARED;
	host->dev = pdev->dev;
	host->pdata = &pci_board_data;

	host->regs = pci_iomap(pdev, PCI_BAR_NO, COMPLETE_BAR);
	if (!host->regs) {
		ret = -EIO;
		goto err_unmap;
	}

	pci_set_drvdata(pdev, host);
	ret = dw_mci_probe(host);
	if (ret)
		goto err_probe_failed;
	return ret;

err_probe_failed:
	pci_iounmap(pdev, host->regs);
err_unmap:
	kfree(host);
err_release:
	pci_release_regions(pdev);
err_disable_dev:
	pci_disable_device(pdev);
	return ret;
}
static int mgag200_device_init(struct drm_device *dev,
			       uint32_t flags)
{
	struct mga_device *mdev = dev->dev_private;
	int ret, option;

	mdev->type = flags;

	/* Hardcode the number of CRTCs to 1 */
	mdev->num_crtc = 1;

	pci_read_config_dword(dev->pdev, PCI_MGA_OPTION, &option);
	mdev->has_sdram = !(option & (1 << 14));

	/* BAR 0 is the framebuffer, BAR 1 contains registers */
	mdev->rmmio_base = pci_resource_start(mdev->dev->pdev, 1);
	mdev->rmmio_size = pci_resource_len(mdev->dev->pdev, 1);

	if (!request_mem_region(mdev->rmmio_base, mdev->rmmio_size,
				"mgadrmfb_mmio")) {
		DRM_ERROR("can't reserve mmio registers\n");
		return -ENOMEM;
	}

	mdev->rmmio = pci_iomap(dev->pdev, 1, 0);
	if (mdev->rmmio == NULL)
		return -ENOMEM;

	/* stash G200 SE model number for later use */
	if (IS_G200_SE(mdev))
		mdev->reg_1e24 = RREG32(0x1e24);

	ret = mga_vram_init(mdev);
	if (ret) {
		release_mem_region(mdev->rmmio_base, mdev->rmmio_size);
		return ret;
	}

	mdev->bpp_shifts[0] = 0;
	mdev->bpp_shifts[1] = 1;
	mdev->bpp_shifts[2] = 0;
	mdev->bpp_shifts[3] = 2;
	return 0;
}
Example #26
0
/* Special reset function for Marathon card */
static void plx_pci_reset_marathon(struct pci_dev *pdev)
{
	void __iomem *reset_addr;
	int i;
	int reset_bar[2] = {3, 5};

	plx_pci_reset_common(pdev);

	for (i = 0; i < 2; i++) {
		reset_addr = pci_iomap(pdev, reset_bar[i], 0);
		if (!reset_addr) {
			dev_err(&pdev->dev, "Failed to remap reset "
				"space %d (BAR%d)\n", i, reset_bar[i]);
		} else {
			/* reset the SJA1000 chip */
			iowrite8(0x1, reset_addr);
			udelay(100);
			pci_iounmap(pdev, reset_addr);
		}
	}
}
Example #27
0
/* Special reset function for ASEM Dual CAN raw card */
static void plx_pci_reset_asem_dual_can_raw(struct pci_dev *pdev)
{
	void __iomem *bar0_addr;
	u8 tmpval;

	plx_pci_reset_common(pdev);

	bar0_addr = pci_iomap(pdev, 0, 0);
	if (!bar0_addr) {
		dev_err(&pdev->dev, "Failed to remap reset space 0 (BAR0)\n");
		return;
	}

	/* reset the two SJA1000 chips */
	tmpval = ioread8(bar0_addr + ASEM_RAW_CAN_RST_REGISTER);
	tmpval &= ~(ASEM_RAW_CAN_RST_MASK_CAN1 | ASEM_RAW_CAN_RST_MASK_CAN2);
	iowrite8(tmpval, bar0_addr + ASEM_RAW_CAN_RST_REGISTER);
	usleep_range(300, 400);
	tmpval |= ASEM_RAW_CAN_RST_MASK_CAN1 | ASEM_RAW_CAN_RST_MASK_CAN2;
	iowrite8(tmpval, bar0_addr + ASEM_RAW_CAN_RST_REGISTER);
	usleep_range(300, 400);
	pci_iounmap(pdev, bar0_addr);
}
Example #28
0
int __devinit rtl_pci_probe(struct pci_dev *pdev,
			    const struct pci_device_id *id)
{
	struct ieee80211_hw *hw = NULL;

	struct rtl_priv *rtlpriv = NULL;
	struct rtl_pci_priv *pcipriv = NULL;
	struct rtl_pci *rtlpci;
	unsigned long pmem_start, pmem_len, pmem_flags;
	int err;

	err = pci_enable_device(pdev);
	if (err) {
		RT_ASSERT(false,
			  ("%s : Cannot enable new PCI device\n",
			   pci_name(pdev)));
		return err;
	}

	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
		if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
			RT_ASSERT(false, ("Unable to obtain 32bit DMA "
					  "for consistent allocations\n"));
			pci_disable_device(pdev);
			return -ENOMEM;
		}
	}

	pci_set_master(pdev);

	hw = ieee80211_alloc_hw(sizeof(struct rtl_pci_priv) +
				sizeof(struct rtl_priv), &rtl_ops);
	if (!hw) {
		RT_ASSERT(false,
			  ("%s : ieee80211 alloc failed\n", pci_name(pdev)));
		err = -ENOMEM;
		goto fail1;
	}

	SET_IEEE80211_DEV(hw, &pdev->dev);
	pci_set_drvdata(pdev, hw);

	rtlpriv = hw->priv;
	pcipriv = (void *)rtlpriv->priv;
	pcipriv->dev.pdev = pdev;

	/*
	 *init dbgp flags before all
	 *other functions, because we will
	 *use it in other funtions like
	 *RT_TRACE/RT_PRINT/RTL_PRINT_DATA
	 *you can not use these macro
	 *before this
	 */
	rtl_dbgp_flag_init(hw);

	/* MEM map */
	err = pci_request_regions(pdev, KBUILD_MODNAME);
	if (err) {
		RT_ASSERT(false, ("Can't obtain PCI resources\n"));
		return err;
	}

	pmem_start = pci_resource_start(pdev, 2);
	pmem_len = pci_resource_len(pdev, 2);
	pmem_flags = pci_resource_flags(pdev, 2);

	/*shared mem start */
	rtlpriv->io.pci_mem_start =
			(unsigned long)pci_iomap(pdev, 2, pmem_len);
	if (rtlpriv->io.pci_mem_start == 0) {
		RT_ASSERT(false, ("Can't map PCI mem\n"));
		goto fail2;
	}

	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
		 ("mem mapped space: start: 0x%08lx len:%08lx "
		  "flags:%08lx, after map:0x%08lx\n",
		  pmem_start, pmem_len, pmem_flags,
		  rtlpriv->io.pci_mem_start));

	/* Disable Clk Request */
	pci_write_config_byte(pdev, 0x81, 0);
	/* leave D3 mode */
	pci_write_config_byte(pdev, 0x44, 0);
	pci_write_config_byte(pdev, 0x04, 0x06);
	pci_write_config_byte(pdev, 0x04, 0x07);

	/* init cfg & intf_ops */
	rtlpriv->rtlhal.interface = INTF_PCI;
	rtlpriv->cfg = (struct rtl_hal_cfg *)(id->driver_data);
	rtlpriv->intf_ops = &rtl_pci_ops;

	/* find adapter */
	_rtl_pci_find_adapter(pdev, hw);

	/* Init IO handler */
	_rtl_pci_io_handler_init(&pdev->dev, hw);

	/*like read eeprom and so on */
	rtlpriv->cfg->ops->read_eeprom_info(hw);

	if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
			 ("Can't init_sw_vars.\n"));
		goto fail3;
	}

	rtlpriv->cfg->ops->init_sw_leds(hw);

	/*aspm */
	rtl_pci_init_aspm(hw);

	/* Init mac80211 sw */
	err = rtl_init_core(hw);
	if (err) {
		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
			 ("Can't allocate sw for mac80211.\n"));
		goto fail3;
	}

	/* Init PCI sw */
	err = !rtl_pci_init(hw, pdev);
	if (err) {
		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
			 ("Failed to init PCI.\n"));
		goto fail3;
	}

	err = ieee80211_register_hw(hw);
	if (err) {
		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
			 ("Can't register mac80211 hw.\n"));
		goto fail3;
	} else {
		rtlpriv->mac80211.mac80211_registered = 1;
	}

	err = sysfs_create_group(&pdev->dev.kobj, &rtl_attribute_group);
	if (err) {
		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
			 ("failed to create sysfs device attributes\n"));
		goto fail3;
	}

	/*init rfkill */
	rtl_init_rfkill(hw);

	rtlpci = rtl_pcidev(pcipriv);
	err = request_irq(rtlpci->pdev->irq, &_rtl_pci_interrupt,
			  IRQF_SHARED, KBUILD_MODNAME, hw);
	if (err) {
		RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
			 ("%s: failed to register IRQ handler\n",
			  wiphy_name(hw->wiphy)));
		goto fail3;
	} else {
		rtlpci->irq_alloc = 1;
	}

	set_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
	return 0;

fail3:
	pci_set_drvdata(pdev, NULL);
	rtl_deinit_core(hw);
	_rtl_pci_io_handler_release(hw);
	ieee80211_free_hw(hw);

	if (rtlpriv->io.pci_mem_start != 0)
		pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start);

fail2:
	pci_release_regions(pdev);

fail1:

	pci_disable_device(pdev);

	return -ENODEV;

}
Example #29
0
static int __devinit ipc_map_device(struct pci_dev *pdev, struct ipc_hwinfo *hw)
{
        int error = -ENOMEM;
	unsigned int verify = 0;
	// int i = 0;

        /* Map the memory mapped base address to virtual address
	   A9 Buffers are mapped to 1st 4K of PCI BAR0 start and CFG/STATUS
	   registers to the subsequent 4K region */
        hw->mmio_a9_wr_buff_x1 = pci_iomap(pdev, INDUS_IPC_BAR0, 0);
        if (hw->mmio_a9_wr_buff_x1 == NULL) {
		dev_err(&pdev->dev, "Error mapping mmio for BAR0\n");
		return error;
        }

	/* BAR0 has mapping for all the three sections:
	   CONFIG/STATUS registers, Host Input Buffers and the Host
	   Read Circular Buffer (Input Buffer and Circular Buffers
	   have same address)
	*/

	hw->mmio_a9_wr_buff_pvsmc = hw->mmio_a9_wr_buff_x1 + IPC_PVSMC_BUFF_OFFSET;
	hw->mmio_a9_rd_buff = hw->mmio_a9_wr_buff_x1 + IPC_A9_BUFF_OFFSET;
	hw->mmio_a9_cfg_status = hw->mmio_a9_wr_buff_x1 + IPC_A9_CFG_STS_OFFSET;

#if 0
	printk("IPC Driver: BAR0 Addr CFG 0x%x \nIPC Driver: BAR0 Addr X1 Buf: 0x%x\n", hw->mmio_a9_cfg_status, hw->mmio_a9_wr_buff_x1);

	verify = ioread32(hw->mmio_a9_cfg_status);
	printk("IPC Driver: CFG/STS 1st Reg Addr: 0x%x\n", (unsigned int*)hw->mmio_a9_cfg_status);
	printk("IPC Driver: CFG/STS Contents: 0x%x\n", verify);

	verify = ioread32(hw->mmio_a9_cfg_status + 4);
	printk("IPC Driver: CFG/STS 2nd Reg Addr: 0x%x\n", (unsigned int*)(hw->mmio_a9_cfg_status + 4));
	printk("IPC Driver: CFG/STS Contents: 0x%x\n", verify);

	verify = ioread32(hw->mmio_a9_cfg_status + 8);
	printk("IPC Driver: CFG/STS 3rd Reg Addr: 0x%x\n", (unsigned int*)(hw->mmio_a9_cfg_status + 8));
	printk("IPC Driver: CFG/STS Contents: 0x%x\n", verify);

	verify = ioread32(hw->mmio_a9_cfg_status + 12);
	printk("IPC Driver: CFG/STS 4th Reg Addr: 0x%x\n", (unsigned int*)(hw->mmio_a9_cfg_status + 12));
	printk("IPC Driver: CFG/STS Contents: 0x%x\n", verify);


	verify = ioread32(hw->mmio_a9_cfg_status + 16);
	printk("IPC Driver: CFG/STS 5th Reg Addr: 0x%x\n", (unsigned int*)(hw->mmio_a9_cfg_status + 16));
	printk("IPC Driver: CFG/STS Contents: 0x%x\n", verify);

	verify = ioread32(hw->mmio_a9_cfg_status + 20);
	printk("IPC Driver: CFG/STS 6th Reg Addr: 0x%x\n", (unsigned int*)(hw->mmio_a9_cfg_status + 20));
	printk("IPC Driver: CFG/STS Contents: 0x%x\n", verify);
#endif



#if 0
	printk("IPC Driver: BAR0 CFG/STS Register Contents:\n");
	for(;i < 16; i++) {
		printk("0x%x\t", *(hw->mmio_a9_cfg_status + i*4));
	}
 
	printk("IPC Driver: BAR0 Buffer Contents:\n");
	for(;i < 16; i++) {
		printk("0x%x\t", *(hw->mmio_a9_wr_buff_x1 + i*4));
	}
#endif

	return 0;
}
Example #30
0
static int
rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
{
	struct net_device *dev;
	struct netdev_private *np;
	static int card_idx;
	int chip_idx = ent->driver_data;
	int err, irq;
	void __iomem *ioaddr;
	static int version_printed;
	void *ring_space;
	dma_addr_t ring_dma;

	if (!version_printed++)
		printk ("%s", version);

	err = pci_enable_device (pdev);
	if (err)
		return err;

	irq = pdev->irq;
	err = pci_request_regions (pdev, "dl2k");
	if (err)
		goto err_out_disable;

	pci_set_master (pdev);

	err = -ENOMEM;

	dev = alloc_etherdev (sizeof (*np));
	if (!dev)
		goto err_out_res;
	SET_NETDEV_DEV(dev, &pdev->dev);

	np = netdev_priv(dev);

	/* IO registers range. */
	ioaddr = pci_iomap(pdev, 0, 0);
	if (!ioaddr)
		goto err_out_dev;
	np->eeprom_addr = ioaddr;

#ifdef MEM_MAPPING
	/* MM registers range. */
	ioaddr = pci_iomap(pdev, 1, 0);
	if (!ioaddr)
		goto err_out_iounmap;
#endif
	np->ioaddr = ioaddr;
	np->chip_id = chip_idx;
	np->pdev = pdev;
	spin_lock_init (&np->tx_lock);
	spin_lock_init (&np->rx_lock);

	/* Parse manual configuration */
	np->an_enable = 1;
	np->tx_coalesce = 1;
	if (card_idx < MAX_UNITS) {
		if (media[card_idx] != NULL) {
			np->an_enable = 0;
			if (strcmp (media[card_idx], "auto") == 0 ||
			    strcmp (media[card_idx], "autosense") == 0 ||
			    strcmp (media[card_idx], "0") == 0 ) {
				np->an_enable = 2;
			} else if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
			    strcmp (media[card_idx], "4") == 0) {
				np->speed = 100;
				np->full_duplex = 1;
			} else if (strcmp (media[card_idx], "100mbps_hd") == 0 ||
				   strcmp (media[card_idx], "3") == 0) {
				np->speed = 100;
				np->full_duplex = 0;
			} else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
				   strcmp (media[card_idx], "2") == 0) {
				np->speed = 10;
				np->full_duplex = 1;
			} else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
				   strcmp (media[card_idx], "1") == 0) {
				np->speed = 10;
				np->full_duplex = 0;
			} else if (strcmp (media[card_idx], "1000mbps_fd") == 0 ||
				 strcmp (media[card_idx], "6") == 0) {
				np->speed=1000;
				np->full_duplex=1;
			} else if (strcmp (media[card_idx], "1000mbps_hd") == 0 ||
				 strcmp (media[card_idx], "5") == 0) {
				np->speed = 1000;
				np->full_duplex = 0;
			} else {
				np->an_enable = 1;
			}
		}
		if (jumbo[card_idx] != 0) {
			np->jumbo = 1;
			dev->mtu = MAX_JUMBO;
		} else {
			np->jumbo = 0;
			if (mtu[card_idx] > 0 && mtu[card_idx] < PACKET_SIZE)
				dev->mtu = mtu[card_idx];
		}
		np->vlan = (vlan[card_idx] > 0 && vlan[card_idx] < 4096) ?
		    vlan[card_idx] : 0;
		if (rx_coalesce > 0 && rx_timeout > 0) {
			np->rx_coalesce = rx_coalesce;
			np->rx_timeout = rx_timeout;
			np->coalesce = 1;
		}
		np->tx_flow = (tx_flow == 0) ? 0 : 1;
		np->rx_flow = (rx_flow == 0) ? 0 : 1;

		if (tx_coalesce < 1)
			tx_coalesce = 1;
		else if (tx_coalesce > TX_RING_SIZE-1)
			tx_coalesce = TX_RING_SIZE - 1;
	}
	dev->netdev_ops = &netdev_ops;
	dev->watchdog_timeo = TX_TIMEOUT;
	dev->ethtool_ops = &ethtool_ops;
#if 0
	dev->features = NETIF_F_IP_CSUM;
#endif
	pci_set_drvdata (pdev, dev);

	ring_space = pci_alloc_consistent (pdev, TX_TOTAL_SIZE, &ring_dma);
	if (!ring_space)
		goto err_out_iounmap;
	np->tx_ring = ring_space;
	np->tx_ring_dma = ring_dma;

	ring_space = pci_alloc_consistent (pdev, RX_TOTAL_SIZE, &ring_dma);
	if (!ring_space)
		goto err_out_unmap_tx;
	np->rx_ring = ring_space;
	np->rx_ring_dma = ring_dma;

	/* Parse eeprom data */
	parse_eeprom (dev);

	/* Find PHY address */
	err = find_miiphy (dev);
	if (err)
		goto err_out_unmap_rx;

	/* Fiber device? */
	np->phy_media = (dr16(ASICCtrl) & PhyMedia) ? 1 : 0;
	np->link_status = 0;
	/* Set media and reset PHY */
	if (np->phy_media) {
		/* default Auto-Negotiation for fiber deivices */
	 	if (np->an_enable == 2) {
			np->an_enable = 1;
		}
		mii_set_media_pcs (dev);
	} else {
		/* Auto-Negotiation is mandatory for 1000BASE-T,
		   IEEE 802.3ab Annex 28D page 14 */
		if (np->speed == 1000)
			np->an_enable = 1;
		mii_set_media (dev);
	}

	err = register_netdev (dev);
	if (err)
		goto err_out_unmap_rx;

	card_idx++;

	printk (KERN_INFO "%s: %s, %pM, IRQ %d\n",
		dev->name, np->name, dev->dev_addr, irq);
	if (tx_coalesce > 1)
		printk(KERN_INFO "tx_coalesce:\t%d packets\n",
				tx_coalesce);
	if (np->coalesce)
		printk(KERN_INFO
		       "rx_coalesce:\t%d packets\n"
		       "rx_timeout: \t%d ns\n",
				np->rx_coalesce, np->rx_timeout*640);
	if (np->vlan)
		printk(KERN_INFO "vlan(id):\t%d\n", np->vlan);
	return 0;

err_out_unmap_rx:
	pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
err_out_unmap_tx:
	pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
err_out_iounmap:
#ifdef MEM_MAPPING
	pci_iounmap(pdev, np->ioaddr);
#endif
	pci_iounmap(pdev, np->eeprom_addr);
err_out_dev:
	free_netdev (dev);
err_out_res:
	pci_release_regions (pdev);
err_out_disable:
	pci_disable_device (pdev);
	return err;
}