/* called after powerup, by probe or system-pm "wakeup" */ static int xhci_pci_reinit(struct xhci_hcd *xhci, struct pci_dev *pdev) { /* * TODO: Implement finding debug ports later. * TODO: see if there are any quirks that need to be added to handle * new extended capabilities. */ /* PCI Memory-Write-Invalidate cycle support is optional (uncommon) */ if (!pci_set_mwi(pdev)) xhci_dbg(xhci, "MWI active\n"); xhci_dbg(xhci, "Finished xhci_pci_reinit\n"); return 0; }
/* called after powerup, by probe or system-pm "wakeup" */ static int ehci_pci_reinit(struct ehci_hcd *ehci, struct pci_dev *pdev) { int retval; /* we expect static quirk code to handle the "extended capabilities" * (currently just BIOS handoff) allowed starting with EHCI 0.96 */ /* PCI Memory-Write-Invalidate cycle support is optional (uncommon) */ retval = pci_set_mwi(pdev); if (!retval) ehci_dbg(ehci, "MWI active\n"); return 0; }
static int command_write(struct pci_dev *dev, int offset, u16 value, void *data) { struct xen_pcibk_dev_data *dev_data; int err; dev_data = pci_get_drvdata(dev); if (!pci_is_enabled(dev) && is_enable_cmd(value)) { if (unlikely(verbose_request)) printk(KERN_DEBUG DRV_NAME ": %s: enable\n", pci_name(dev)); err = pci_enable_device(dev); if (err) return err; if (dev_data) dev_data->enable_intx = 1; } else if (pci_is_enabled(dev) && !is_enable_cmd(value)) { if (unlikely(verbose_request)) printk(KERN_DEBUG DRV_NAME ": %s: disable\n", pci_name(dev)); pci_disable_device(dev); if (dev_data) dev_data->enable_intx = 0; } if (!dev->is_busmaster && is_master_cmd(value)) { if (unlikely(verbose_request)) printk(KERN_DEBUG DRV_NAME ": %s: set bus master\n", pci_name(dev)); pci_set_master(dev); } if (value & PCI_COMMAND_INVALIDATE) { if (unlikely(verbose_request)) printk(KERN_DEBUG DRV_NAME ": %s: enable memory-write-invalidate\n", pci_name(dev)); err = pci_set_mwi(dev); if (err) { printk(KERN_WARNING DRV_NAME ": %s: cannot enable " "memory-write-invalidate (%d)\n", pci_name(dev), err); value &= ~PCI_COMMAND_INVALIDATE; } } return pci_write_config_word(dev, offset, value); }
static int command_write(struct pci_dev *dev, int offset, u16 value, void *data) { int err; if (!atomic_read(&dev->enable_cnt) && is_enable_cmd(value)) { if (unlikely(verbose_request)) printk(KERN_DEBUG "pciback: %s: enable\n", pci_name(dev)); err = pci_enable_device(dev); if (err) return err; } else if (atomic_read(&dev->enable_cnt) && !is_enable_cmd(value)) { if (unlikely(verbose_request)) printk(KERN_DEBUG "pciback: %s: disable\n", pci_name(dev)); pci_disable_device(dev); } if (!dev->is_busmaster && is_master_cmd(value)) { if (unlikely(verbose_request)) printk(KERN_DEBUG "pciback: %s: set bus master\n", pci_name(dev)); pci_set_master(dev); } if (value & PCI_COMMAND_INVALIDATE) { if (unlikely(verbose_request)) printk(KERN_DEBUG "pciback: %s: enable memory-write-invalidate\n", pci_name(dev)); err = pci_set_mwi(dev); if (err) { printk(KERN_WARNING "pciback: %s: cannot enable memory-write-invalidate (%d)\n", pci_name(dev), err); value &= ~PCI_COMMAND_INVALIDATE; } } return pci_write_config_word(dev, offset, value); }
/* called after powerup, by probe or system-pm "wakeup" */ static int ehci_pci_reinit(struct ehci_hcd *ehci, struct pci_dev *pdev) { u32 temp; int retval; /* optional debug port, normally in the first BAR */ temp = pci_find_capability(pdev, 0x0a); if (temp) { pci_read_config_dword(pdev, temp, &temp); temp >>= 16; if ((temp & (3 << 13)) == (1 << 13)) { temp &= 0x1fff; ehci->debug = ehci_to_hcd(ehci)->regs + temp; temp = ehci_readl(ehci, &ehci->debug->control); ehci_info(ehci, "debug port %d%s\n", HCS_DEBUG_PORT(ehci->hcs_params), (temp & DBGP_ENABLED) ? " IN USE" : ""); if (!(temp & DBGP_ENABLED)) ehci->debug = NULL; } } /* we expect static quirk code to handle the "extended capabilities" * (currently just BIOS handoff) allowed starting with EHCI 0.96 */ /* PCI Memory-Write-Invalidate cycle support is optional (uncommon) */ retval = pci_set_mwi(pdev); if (!retval) ehci_dbg(ehci, "MWI active\n"); ehci_port_power(ehci, 0); return 0; }
static int cs5530_init_chip(void) { struct pci_dev *master_0 = NULL, *cs5530_0 = NULL, *dev = NULL; while ((dev = pci_get_device(PCI_VENDOR_ID_CYRIX, PCI_ANY_ID, dev)) != NULL) { switch (dev->device) { case PCI_DEVICE_ID_CYRIX_PCI_MASTER: master_0 = pci_dev_get(dev); break; case PCI_DEVICE_ID_CYRIX_5530_LEGACY: cs5530_0 = pci_dev_get(dev); break; } } if (!master_0) { printk(KERN_ERR DRV_NAME ": unable to locate PCI MASTER function\n"); goto fail_put; } if (!cs5530_0) { printk(KERN_ERR DRV_NAME ": unable to locate CS5530 LEGACY function\n"); goto fail_put; } pci_set_master(cs5530_0); pci_set_mwi(cs5530_0); /* * Set PCI CacheLineSize to 16-bytes: * --> Write 0x04 into 8-bit PCI CACHELINESIZE reg of function 0 of the cs5530 * * Note: This value is constant because the 5530 is only a Geode companion */ pci_write_config_byte(cs5530_0, PCI_CACHE_LINE_SIZE, 0x04); /* * Disable trapping of UDMA register accesses (Win98 hack): * --> Write 0x5006 into 16-bit reg at offset 0xd0 of function 0 of the cs5530 */ pci_write_config_word(cs5530_0, 0xd0, 0x5006); /* * Bit-1 at 0x40 enables MemoryWriteAndInvalidate on internal X-bus: * The other settings are what is necessary to get the register * into a sane state for IDE DMA operation. */ pci_write_config_byte(master_0, 0x40, 0x1e); /* * Set max PCI burst size (16-bytes seems to work best): * 16bytes: set bit-1 at 0x41 (reg value of 0x16) * all others: clear bit-1 at 0x41, and do: * 128bytes: OR 0x00 at 0x41 * 256bytes: OR 0x04 at 0x41 * 512bytes: OR 0x08 at 0x41 * 1024bytes: OR 0x0c at 0x41 */ pci_write_config_byte(master_0, 0x41, 0x14); /* * These settings are necessary to get the chip * into a sane state for IDE DMA operation. */ pci_write_config_byte(master_0, 0x42, 0x00); pci_write_config_byte(master_0, 0x43, 0xc1); pci_dev_put(master_0); pci_dev_put(cs5530_0); return 0; fail_put: if (master_0) pci_dev_put(master_0); if (cs5530_0) pci_dev_put(cs5530_0); return -ENODEV; }
/** * pci_try_set_mwi - enables memory-write-invalidate PCI transaction * @dev: the PCI device for which MWI is enabled * * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND. * Callers are not required to check the return value. * * RETURNS: An appropriate -ERRNO error value on error, or zero for success. */ int pci_try_set_mwi(struct pci_dev *dev) { int rc = pci_set_mwi(dev); return rc; }
int rt2x00pci_probe(struct pci_dev *pci_dev, const struct rt2x00_ops *ops) { struct ieee80211_hw *hw; struct rt2x00_dev *rt2x00dev; int retval; u16 chip; retval = pci_enable_device(pci_dev); if (retval) { rt2x00_probe_err("Enable device failed\n"); return retval; } retval = pci_request_regions(pci_dev, pci_name(pci_dev)); if (retval) { rt2x00_probe_err("PCI request regions failed\n"); goto exit_disable_device; } pci_set_master(pci_dev); if (pci_set_mwi(pci_dev)) rt2x00_probe_err("MWI not available\n"); if (dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32))) { rt2x00_probe_err("PCI DMA not supported\n"); retval = -EIO; goto exit_release_regions; } hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw); if (!hw) { rt2x00_probe_err("Failed to allocate hardware\n"); retval = -ENOMEM; goto exit_release_regions; } pci_set_drvdata(pci_dev, hw); rt2x00dev = hw->priv; rt2x00dev->dev = &pci_dev->dev; rt2x00dev->ops = ops; rt2x00dev->hw = hw; rt2x00dev->irq = pci_dev->irq; rt2x00dev->name = ops->name; if (pci_is_pcie(pci_dev)) rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_PCIE); else rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_PCI); retval = rt2x00pci_alloc_reg(rt2x00dev); if (retval) goto exit_free_device; /* * Because rt3290 chip use different efuse offset to read efuse data. * So before read efuse it need to indicate it is the * rt3290 or not. */ pci_read_config_word(pci_dev, PCI_DEVICE_ID, &chip); rt2x00dev->chip.rt = chip; retval = rt2x00lib_probe_dev(rt2x00dev); if (retval) goto exit_free_reg; return 0; exit_free_reg: rt2x00pci_free_reg(rt2x00dev); exit_free_device: ieee80211_free_hw(hw); exit_release_regions: pci_clear_mwi(pci_dev); pci_release_regions(pci_dev); exit_disable_device: pci_disable_device(pci_dev); return retval; }
/** * * This function is called when installing the driver for a device * @param pdev Pointer to the PCI device * */ static int __devinit pcidriver_probe(struct pci_dev *pdev, const struct pci_device_id *id) { int err; int devno; pcidriver_privdata_t *privdata; int devid; /* At the moment there is no difference between these boards here, other than * printing a different message in the log. * * However, there is some difference in the interrupt handling functions. */ if ( (id->vendor == MPRACE1_VENDOR_ID) && (id->device == MPRACE1_DEVICE_ID)) { /* It is a mpRACE-1 */ mod_info( "Found mpRACE-1 at %s\n", dev_name(&pdev->dev)); /* Set bus master */ pci_set_master(pdev); } else if ((id->vendor == PCIXTEST_VENDOR_ID) && (id->device == PCIXTEST_DEVICE_ID)) { /* It is a PCI-X Test board */ mod_info( "Found PCI-X test board at %s\n", dev_name(&pdev->dev)); } else if ((id->vendor == PCIEPLDA_VENDOR_ID) && (id->device == PCIEPLDA_DEVICE_ID)) { /* It is a PCI-X Test board */ mod_info( "Found PCIe PLDA test board at %s\n", dev_name(&pdev->dev)); } else if ((id->vendor == PCIEABB_VENDOR_ID) && (id->device == PCIEABB_DEVICE_ID)) { /* It is a PCI-X Test board */ mod_info( "Found PCIe ABB test board at %s\n", dev_name(&pdev->dev)); } else if ((id->vendor == PCIXPG4_VENDOR_ID) && (id->device == PCIXPG4_DEVICE_ID)) { /* It is a PCI-X PROGRAPE4 board */ mod_info( "Found PCI-X PROGRAPE-4 board at %s\n", dev_name(&pdev->dev)); } else if ((id->vendor == PCI64PG4_VENDOR_ID) && (id->device == PCI64PG4_DEVICE_ID)) { /* It is a PCI-64 PROGRAPE4 board */ mod_info( "Found PCI-64b/66 PROGRAPE-4 board at %s\n", dev_name(&pdev->dev)); } else if ((id->vendor == PCIE_XILINX_VENDOR_ID) && (id->device == PCIE_ML605_DEVICE_ID)) { /* It is a PCI-E Xilinx ML605 evaluation board */ mod_info("Found ML605 board at %s\n", dev_name(&pdev->dev)); } else { /* It is something else */ mod_info( "Found unknown board (%x:%x) at %s\n", id->vendor, id->device, dev_name(&pdev->dev)); } /* Enable the device */ if ((err = pci_enable_device(pdev)) != 0) { mod_info("Couldn't enable device\n"); goto probe_pcien_fail; } /* Set Memory-Write-Invalidate support */ if ((err = pci_set_mwi(pdev)) != 0) mod_info("MWI not supported. Continue without enabling MWI.\n"); /* Get / Increment the device id */ devid = atomic_inc_return(&pcidriver_deviceCount) - 1; if (devid >= MAXDEVICES) { mod_info("Maximum number of devices reached! Increase MAXDEVICES.\n"); err = -ENOMSG; goto probe_maxdevices_fail; } /* Allocate and initialize the private data for this device */ if ((privdata = kcalloc(1, sizeof(*privdata), GFP_KERNEL)) == NULL) { err = -ENOMEM; goto probe_nomem; } INIT_LIST_HEAD(&(privdata->kmem_list)); spin_lock_init(&(privdata->kmemlist_lock)); atomic_set(&privdata->kmem_count, 0); INIT_LIST_HEAD(&(privdata->umem_list)); spin_lock_init(&(privdata->umemlist_lock)); atomic_set(&privdata->umem_count, 0); pci_set_drvdata( pdev, privdata ); privdata->pdev = pdev; /* Device add to sysfs */ devno = MKDEV(MAJOR(pcidriver_devt), MINOR(pcidriver_devt) + devid); privdata->devno = devno; if (pcidriver_class != NULL) { /* FIXME: some error checking missing here */ privdata->class_dev = class_device_create(pcidriver_class, NULL, devno, &(pdev->dev), NODENAMEFMT, MINOR(pcidriver_devt) + devid, privdata); class_set_devdata( privdata->class_dev, privdata ); mod_info("Device /dev/%s%d added\n",NODENAME,MINOR(pcidriver_devt) + devid); } /* Setup mmaped BARs into kernel space */ if ((err = pcidriver_probe_irq(privdata)) != 0) goto probe_irq_probe_fail; /* Populate sysfs attributes for the class device */ /* TODO: correct errorhandling. ewww. must remove the files in reversed order :-( */ #define sysfs_attr(name) do { \ if (class_device_create_file(sysfs_attr_def_pointer, &sysfs_attr_def_name(name)) != 0) \ goto probe_device_create_fail; \ } while (0) #ifdef ENABLE_IRQ sysfs_attr(irq_count); sysfs_attr(irq_queues); #endif sysfs_attr(mmap_mode); sysfs_attr(mmap_area); sysfs_attr(kmem_count); sysfs_attr(kmem_alloc); sysfs_attr(kmem_free); sysfs_attr(kbuffers); sysfs_attr(umappings); sysfs_attr(umem_unmap); #undef sysfs_attr /* Register character device */ cdev_init( &(privdata->cdev), &pcidriver_fops ); #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,35) privdata->cdev.owner = THIS_MODULE; #endif privdata->cdev.ops = &pcidriver_fops; err = cdev_add( &privdata->cdev, devno, 1 ); if (err) { mod_info( "Couldn't add character device.\n" ); goto probe_cdevadd_fail; } return 0; probe_device_create_fail: probe_cdevadd_fail: probe_irq_probe_fail: pcidriver_irq_unmap_bars(privdata); kfree(privdata); probe_nomem: atomic_dec(&pcidriver_deviceCount); probe_maxdevices_fail: pci_disable_device(pdev); probe_pcien_fail: return err; }
int prism54_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct net_device *ndev; u8 latency_tmr; u32 mem_addr; islpci_private *priv; int rvalue; /* Enable the pci device */ if (pci_enable_device(pdev)) { printk(KERN_ERR "%s: pci_enable_device() failed.\n", DRV_NAME); return -ENODEV; } /* check whether the latency timer is set correctly */ pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &latency_tmr); #if VERBOSE > SHOW_ERROR_MESSAGES DEBUG(SHOW_TRACING, "latency timer: %x\n", latency_tmr); #endif if (latency_tmr < PCIDEVICE_LATENCY_TIMER_MIN) { /* set the latency timer */ pci_write_config_byte(pdev, PCI_LATENCY_TIMER, PCIDEVICE_LATENCY_TIMER_VAL); } /* enable PCI DMA */ if (pci_set_dma_mask(pdev, 0xffffffff)) { printk(KERN_ERR "%s: 32-bit PCI DMA not supported", DRV_NAME); goto do_pci_disable_device; } /* 0x40 is the programmable timer to configure the response timeout (TRDY_TIMEOUT) * 0x41 is the programmable timer to configure the retry timeout (RETRY_TIMEOUT) * The RETRY_TIMEOUT is used to set the number of retries that the core, as a * Master, will perform before abandoning a cycle. The default value for * RETRY_TIMEOUT is 0x80, which far exceeds the PCI 2.1 requirement for new * devices. A write of zero to the RETRY_TIMEOUT register disables this * function to allow use with any non-compliant legacy devices that may * execute more retries. * * Writing zero to both these two registers will disable both timeouts and * *can* solve problems caused by devices that are slow to respond. * Make this configurable - MSW */ if ( init_pcitm >= 0 ) { pci_write_config_byte(pdev, 0x40, (u8)init_pcitm); pci_write_config_byte(pdev, 0x41, (u8)init_pcitm); } else { printk(KERN_INFO "PCI TRDY/RETRY unchanged\n"); } /* request the pci device I/O regions */ rvalue = pci_request_regions(pdev, DRV_NAME); if (rvalue) { printk(KERN_ERR "%s: pci_request_regions failure (rc=%d)\n", DRV_NAME, rvalue); goto do_pci_disable_device; } /* check if the memory window is indeed set */ rvalue = pci_read_config_dword(pdev, PCI_BASE_ADDRESS_0, &mem_addr); if (rvalue || !mem_addr) { printk(KERN_ERR "%s: PCI device memory region not configured; fix your BIOS or CardBus bridge/drivers\n", DRV_NAME); goto do_pci_disable_device; } /* enable PCI bus-mastering */ DEBUG(SHOW_TRACING, "%s: pci_set_master(pdev)\n", DRV_NAME); pci_set_master(pdev); /* enable MWI */ pci_set_mwi(pdev); /* setup the network device interface and its structure */ if (!(ndev = islpci_setup(pdev))) { /* error configuring the driver as a network device */ printk(KERN_ERR "%s: could not configure network device\n", DRV_NAME); goto do_pci_release_regions; } priv = netdev_priv(ndev); islpci_set_state(priv, PRV_STATE_PREBOOT); /* we are attempting to boot */ /* card is in unknown state yet, might have some interrupts pending */ isl38xx_disable_interrupts(priv->device_base); /* request for the interrupt before uploading the firmware */ rvalue = request_irq(pdev->irq, &islpci_interrupt, SA_SHIRQ, ndev->name, priv); if (rvalue) { /* error, could not hook the handler to the irq */ printk(KERN_ERR "%s: could not install IRQ handler\n", ndev->name); goto do_unregister_netdev; } /* firmware upload is triggered in islpci_open */ return 0; do_unregister_netdev: unregister_netdev(ndev); islpci_free_memory(priv); pci_set_drvdata(pdev, NULL); free_netdev(ndev); priv = NULL; do_pci_release_regions: pci_release_regions(pdev); do_pci_disable_device: pci_disable_device(pdev); return -EIO; }
static void tulip_mwi_config (struct pci_dev *pdev, struct net_device *dev) { struct tulip_private *tp = rtdev->priv; u8 cache; u16 pci_command; u32 csr0; if (tulip_debug > 3) printk(KERN_DEBUG "%s: tulip_mwi_config()\n", pci_name(pdev)); tp->csr0 = csr0 = 0; /* if we have any cache line size at all, we can do MRM */ csr0 |= MRM; /* ...and barring hardware bugs, MWI */ if (!(tp->chip_id == DC21143 && tp->revision == 65)) csr0 |= MWI; /* set or disable MWI in the standard PCI command bit. * Check for the case where mwi is desired but not available */ if (csr0 & MWI) pci_set_mwi(pdev); else pci_clear_mwi(pdev); /* read result from hardware (in case bit refused to enable) */ pci_read_config_word(pdev, PCI_COMMAND, &pci_command); if ((csr0 & MWI) && (!(pci_command & PCI_COMMAND_INVALIDATE))) csr0 &= ~MWI; /* if cache line size hardwired to zero, no MWI */ pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache); if ((csr0 & MWI) && (cache == 0)) { csr0 &= ~MWI; pci_clear_mwi(pdev); } /* assign per-cacheline-size cache alignment and * burst length values */ switch (cache) { case 8: csr0 |= MRL | (1 << CALShift) | (16 << BurstLenShift); break; case 16: csr0 |= MRL | (2 << CALShift) | (16 << BurstLenShift); break; case 32: csr0 |= MRL | (3 << CALShift) | (32 << BurstLenShift); break; default: cache = 0; break; } /* if we have a good cache line size, we by now have a good * csr0, so save it and exit */ if (cache) goto out; /* we don't have a good csr0 or cache line size, disable MWI */ if (csr0 & MWI) { pci_clear_mwi(pdev); csr0 &= ~MWI; } /* sane defaults for burst length and cache alignment * originally from de4x5 driver */ csr0 |= (8 << BurstLenShift) | (1 << CALShift); out: tp->csr0 = csr0; if (tulip_debug > 2) printk(KERN_DEBUG "%s: MWI config cacheline=%d, csr0=%08x\n", pci_name(pdev), cache, csr0); }
int rt2x00pci_probe(struct pci_dev *pci_dev, const struct rt2x00_ops *ops) { struct ieee80211_hw *hw; struct rt2x00_dev *rt2x00dev; int retval; u16 chip; retval = pci_enable_device(pci_dev); if (retval) { rt2x00_probe_err("Enable device failed\n"); return retval; } retval = pci_request_regions(pci_dev, pci_name(pci_dev)); if (retval) { rt2x00_probe_err("PCI request regions failed\n"); goto exit_disable_device; } pci_set_master(pci_dev); if (pci_set_mwi(pci_dev)) rt2x00_probe_err("MWI not available\n"); if (dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32))) { rt2x00_probe_err("PCI DMA not supported\n"); retval = -EIO; goto exit_release_regions; } hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw); if (!hw) { rt2x00_probe_err("Failed to allocate hardware\n"); retval = -ENOMEM; goto exit_release_regions; } pci_set_drvdata(pci_dev, hw); rt2x00dev = hw->priv; rt2x00dev->dev = &pci_dev->dev; rt2x00dev->ops = ops; rt2x00dev->hw = hw; rt2x00dev->irq = pci_dev->irq; rt2x00dev->name = pci_name(pci_dev); if (pci_is_pcie(pci_dev)) rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_PCIE); else rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_PCI); retval = rt2x00pci_alloc_reg(rt2x00dev); if (retval) goto exit_free_device; /* * Because rt3290 chip use different efuse offset to read efuse data. * So before read efuse it need to indicate it is the * rt3290 or not. */ pci_read_config_word(pci_dev, PCI_DEVICE_ID, &chip); rt2x00dev->chip.rt = chip; if (rt2x00_rt(rt2x00dev, MT7630)) MT76x0_WLAN_ChipOnOff(rt2x00dev, 1, 1); retval = rt2x00lib_probe_dev(rt2x00dev); if (retval) goto exit_free_reg; if (rt2x00_rt(rt2x00dev, MT7630)) { rt2x00dev->TXWISize=20; rt2x00dev->bscan=0; NdisAllocateSpinLock(rt2x00dev, &rt2x00dev->CtrlRingLock); NdisAllocateSpinLock(rt2x00dev, &rt2x00dev->CalLock); retval = RTMPAllocTxRxRingMemory(rt2x00dev); if (retval != NDIS_STATUS_SUCCESS) goto exit_free_reg; } return 0; exit_free_reg: rt2x00pci_free_reg(rt2x00dev); exit_free_device: ieee80211_free_hw(hw); exit_release_regions: pci_release_regions(pci_dev); exit_disable_device: pci_disable_device(pci_dev); pci_set_drvdata(pci_dev, NULL); return retval; }
static int command_write(struct pci_dev *dev, int offset, u16 value, void *data) { struct xen_pcibk_dev_data *dev_data; int err; u16 val; struct pci_cmd_info *cmd = data; dev_data = pci_get_drvdata(dev); if (!pci_is_enabled(dev) && is_enable_cmd(value)) { if (unlikely(verbose_request)) printk(KERN_DEBUG DRV_NAME ": %s: enable\n", pci_name(dev)); err = pci_enable_device(dev); if (err) return err; if (dev_data) dev_data->enable_intx = 1; } else if (pci_is_enabled(dev) && !is_enable_cmd(value)) { if (unlikely(verbose_request)) printk(KERN_DEBUG DRV_NAME ": %s: disable\n", pci_name(dev)); pci_disable_device(dev); if (dev_data) dev_data->enable_intx = 0; } if (!dev->is_busmaster && is_master_cmd(value)) { if (unlikely(verbose_request)) printk(KERN_DEBUG DRV_NAME ": %s: set bus master\n", pci_name(dev)); pci_set_master(dev); } else if (dev->is_busmaster && !is_master_cmd(value)) { if (unlikely(verbose_request)) printk(KERN_DEBUG DRV_NAME ": %s: clear bus master\n", pci_name(dev)); pci_clear_master(dev); } if (!(cmd->val & PCI_COMMAND_INVALIDATE) && (value & PCI_COMMAND_INVALIDATE)) { if (unlikely(verbose_request)) printk(KERN_DEBUG DRV_NAME ": %s: enable memory-write-invalidate\n", pci_name(dev)); err = pci_set_mwi(dev); if (err) { pr_warn("%s: cannot enable memory-write-invalidate (%d)\n", pci_name(dev), err); value &= ~PCI_COMMAND_INVALIDATE; } } else if ((cmd->val & PCI_COMMAND_INVALIDATE) && !(value & PCI_COMMAND_INVALIDATE)) { if (unlikely(verbose_request)) printk(KERN_DEBUG DRV_NAME ": %s: disable memory-write-invalidate\n", pci_name(dev)); pci_clear_mwi(dev); } cmd->val = value; if (!permissive && (!dev_data || !dev_data->permissive)) return 0; /* Only allow the guest to control certain bits. */ err = pci_read_config_word(dev, offset, &val); if (err || val == value) return err; value &= PCI_COMMAND_GUEST; value |= val & ~PCI_COMMAND_GUEST; return pci_write_config_word(dev, offset, value); }
static int rtbt_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { const char *print_name; struct rtbt_os_ctrl *os_ctrl; struct rtbt_dev_ops *dev_ops; void __iomem *csr_addr = NULL; int rv; printk("===> %s():probe for device(Vendor=0x%x, Device=0x%p)\n", __FUNCTION__, pdev->vendor, &pdev->device); if (!id->driver_data) { printk("pci_device_id->driver_data is NULL!\n"); return -1; } dev_ops = (struct rtbt_dev_ops *)(id->driver_data); if (!(dev_ops->dev_ctrl_init && dev_ops->dev_ctrl_deinit && dev_ops->dev_resource_init && dev_ops->dev_resource_deinit && dev_ops->dev_hw_init && dev_ops->dev_hw_deinit)) { printk("dev_ops have null function pointer!\n"); return -1; } rv = pci_enable_device(pdev); if (rv) { printk("call pci_enable_dev failed(%d)\n", rv); return rv; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) print_name = pci_name(pdev); #else print_name = pdev->slot_name; #endif if ((rv = pci_request_region(pdev, 0, print_name)) != 0) { printk("Request PCI resource failed(%d)\n", rv); goto err_out_disable_dev; } /* map physical address to virtual address for accessing register */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9) csr_addr = pci_iomap(pdev, 0, pci_resource_len(pdev, 0)); #else csr_addr = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); #endif if (!csr_addr) { printk("ioremap failed, region 0x%lx @ 0x%lX\n", (unsigned long)pci_resource_start(pdev, 0), (unsigned long)pci_resource_len(pdev, 0)); goto err_out_free_res; } else { printk("%s():PCI Dev(%s) get resource at 0x%lx,VA 0x%lx,IRQ %d.\n", __FUNCTION__, print_name, (ULONG)pci_resource_start(pdev, 0), (ULONG)csr_addr, pdev->irq); } /* Config PCI bus features */ // TODO: Shiang, does our chip support mwi?? rv = pci_set_mwi(pdev); if (rv != 0) { printk("set MWI failed(%d)\n", rv); goto err_out_free_res; } pci_set_master(pdev); /* device control block initialization */ printk("call dev_ops->dev_ctrl_init!\n"); if (dev_ops->dev_ctrl_init(&os_ctrl, csr_addr)) goto err_out_free_res; os_ctrl->dev_ops = dev_ops; // rtbth_us_init(os_ctrl->dev_ctrl); printk("call dev_ops->dev_resource_init!\n"); if (dev_ops->dev_resource_init(os_ctrl)) goto err_dev_ctrl; os_ctrl->if_dev = (void *) pdev; rtbth_us_init(os_ctrl->dev_ctrl); /* Init the host protocol stack hooking interface */ if (rtbt_hps_iface_init(RAL_INF_PCI, pdev, os_ctrl)) goto err_dev_resource; #if 0 /* Link the host protocol stack interface to the protocl stack */ if (rtbt_hps_iface_attach(os_ctrl)) goto err_hps_iface; #endif printk("<---%s():Sucess\n", __FUNCTION__); return 0; rtbt_hps_iface_deinit(RAL_INF_PCI, pdev, os_ctrl); err_dev_resource: printk("err: call rtbt_dev_resource_deinit()\n"); dev_ops->dev_resource_deinit(os_ctrl); err_dev_ctrl: printk("err: call rtbt_dev_ctrl_deinit()\n"); dev_ops->dev_ctrl_deinit(os_ctrl); err_out_free_res: if (csr_addr) { #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9) pci_iounmap(pdev, csr_addr); #else iounmap((csr_addr)); #endif } pci_set_drvdata(pdev, NULL); pci_release_region(pdev, 0); err_out_disable_dev: pci_disable_device(pdev); printk("<---%s():fail\n", __FUNCTION__); return -1; }
static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *dev; struct cp_private *cp; int rc; void __iomem *regs; long pciaddr; unsigned int addr_len, i, pci_using_dac; u8 pci_rev; #ifndef MODULE static int version_printed; if (version_printed++ == 0) printk("%s", version); #endif pci_read_config_byte(pdev, PCI_REVISION_ID, &pci_rev); if (pdev->vendor == PCI_VENDOR_ID_REALTEK && pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pci_rev < 0x20) { printk(KERN_ERR PFX "pci dev %s (id %04x:%04x rev %02x) is not an 8139C+ compatible chip\n", pci_name(pdev), pdev->vendor, pdev->device, pci_rev); printk(KERN_ERR PFX "Try the \"8139too\" driver instead.\n"); return -ENODEV; } dev = alloc_etherdev(sizeof(struct cp_private)); if (!dev) return -ENOMEM; SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); cp = netdev_priv(dev); cp->pdev = pdev; cp->dev = dev; cp->msg_enable = (debug < 0 ? CP_DEF_MSG_ENABLE : debug); spin_lock_init (&cp->lock); cp->mii_if.dev = dev; cp->mii_if.mdio_read = mdio_read; cp->mii_if.mdio_write = mdio_write; cp->mii_if.phy_id = CP_INTERNAL_PHY; cp->mii_if.phy_id_mask = 0x1f; cp->mii_if.reg_num_mask = 0x1f; cp_set_rxbufsize(cp); rc = pci_enable_device(pdev); if (rc) goto err_out_free; rc = pci_set_mwi(pdev); if (rc) goto err_out_disable; rc = pci_request_regions(pdev, DRV_NAME); if (rc) goto err_out_mwi; pciaddr = pci_resource_start(pdev, 1); if (!pciaddr) { rc = -EIO; printk(KERN_ERR PFX "no MMIO resource for pci dev %s\n", pci_name(pdev)); goto err_out_res; } if (pci_resource_len(pdev, 1) < CP_REGS_SIZE) { rc = -EIO; printk(KERN_ERR PFX "MMIO resource (%lx) too small on pci dev %s\n", pci_resource_len(pdev, 1), pci_name(pdev)); goto err_out_res; } /* Configure DMA attributes. */ if ((sizeof(dma_addr_t) > 4) && !pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL) && !pci_set_dma_mask(pdev, 0xffffffffffffffffULL)) { pci_using_dac = 1; } else { pci_using_dac = 0; rc = pci_set_dma_mask(pdev, 0xffffffffULL); if (rc) { printk(KERN_ERR PFX "No usable DMA configuration, " "aborting.\n"); goto err_out_res; } rc = pci_set_consistent_dma_mask(pdev, 0xffffffffULL); if (rc) { printk(KERN_ERR PFX "No usable consistent DMA configuration, " "aborting.\n"); goto err_out_res; } } cp->cpcmd = (pci_using_dac ? PCIDAC : 0) | PCIMulRW | RxChkSum | CpRxOn | CpTxOn; regs = ioremap(pciaddr, CP_REGS_SIZE); if (!regs) { rc = -EIO; printk(KERN_ERR PFX "Cannot map PCI MMIO (%lx@%lx) on pci dev %s\n", pci_resource_len(pdev, 1), pciaddr, pci_name(pdev)); goto err_out_res; } dev->base_addr = (unsigned long) regs; cp->regs = regs; cp_stop_hw(cp); /* read MAC address from EEPROM */ addr_len = read_eeprom (regs, 0, 8) == 0x8129 ? 8 : 6; for (i = 0; i < 3; i++) ((u16 *) (dev->dev_addr))[i] = le16_to_cpu (read_eeprom (regs, i + 7, addr_len)); dev->open = cp_open; dev->stop = cp_close; dev->set_multicast_list = cp_set_rx_mode; dev->hard_start_xmit = cp_start_xmit; dev->get_stats = cp_get_stats; dev->do_ioctl = cp_ioctl; dev->poll = cp_rx_poll; dev->weight = 16; /* arbitrary? from NAPI_HOWTO.txt. */ #ifdef BROKEN dev->change_mtu = cp_change_mtu; #endif dev->ethtool_ops = &cp_ethtool_ops; #if 0 dev->tx_timeout = cp_tx_timeout; dev->watchdog_timeo = TX_TIMEOUT; #endif #if CP_VLAN_TAG_USED dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; dev->vlan_rx_register = cp_vlan_rx_register; dev->vlan_rx_kill_vid = cp_vlan_rx_kill_vid; #endif if (pci_using_dac) dev->features |= NETIF_F_HIGHDMA; dev->irq = pdev->irq; rc = register_netdev(dev); if (rc) goto err_out_iomap; printk (KERN_INFO "%s: RTL-8139C+ at 0x%lx, " "%02x:%02x:%02x:%02x:%02x:%02x, " "IRQ %d\n", dev->name, dev->base_addr, dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5], dev->irq); pci_set_drvdata(pdev, dev); /* enable busmastering and memory-write-invalidate */ pci_set_master(pdev); if (cp->wol_enabled) cp_set_d3_state (cp); return 0; err_out_iomap: iounmap(regs); err_out_res: pci_release_regions(pdev); err_out_mwi: pci_clear_mwi(pdev); err_out_disable: pci_disable_device(pdev); err_out_free: free_netdev(dev); return rc; }
static int lpp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) { DEV *dev; int rc; uint minor; unsigned long plx_base; dbg("%s(): pci_dev->vendor=%04X device=%04X\n pci_device_id->vendor=%04X device=%04X", __FUNCTION__, pdev->vendor, pdev->device, ent->vendor, ent->device); //return -ENODEV; dev = kmalloc (sizeof(DEV), GFP_KERNEL); if (!dev) return -ENOMEM; memset (dev, 0x00, sizeof (*dev)); /* Enable tasklet for the device */ tasklet_init(&dev->Dpc, DpcForIsr, (unsigned long) dev); /* enable device (incl. PCI PM wakeup), and bus-mastering */ rc = pci_enable_device(pdev); dbg1("pci_enable_device()=%d dev=%p", rc, dev); if (rc) goto err_out_free; rc = pci_set_mwi(pdev); dbg1("pci_set_mwi()=%d", rc); if (rc) goto err_out_disable; rc = pci_request_regions(pdev, DRIVER_MOD_NAME); dbg1("pci_request_regions()=%d", rc); if (rc) goto err_out_mwi; if (pdev->irq < 2) { rc = -EIO; printk(KERN_ERR PFX "invalid irq (%d) for pci dev %s\n", pdev->irq, pci_name(pdev)); // pdev->slot_name); goto err_out_res; } plx_base = pci_resource_start (pdev, 1); dev->PlxIntCsrPort = plx_base + P9050_INTCSR; dev->cport = pci_resource_start (pdev, 2); dev->rport = pci_resource_start (pdev, 3); dev->wport = dev->rport; dev->wtcport = pci_resource_start (pdev, 5); dev->irq = pdev->irq; /* pio_end = pci_resource_end (pdev, 0); pio_flags = pci_resource_flags (pdev, 0); pio_len = pci_resource_len (pdev, 0); */ // rc = register_netdev(dev); // if (rc) goto err_out_iomap; /* select a "subminor" number (part of a minor number) */ down (&minor_table_mutex); for (minor = MAX_LPCS; minor < MAX_DEVICES; ++minor) { if (minor_table[minor] == NULL) break; } if (minor >= MAX_DEVICES) { info ("Too many devices plugged in, can not handle this device."); rc = -EINVAL; goto err_minor_table; } dev->minor = minor; minor_table[minor] = dev; pci_set_drvdata(pdev, dev); #ifdef CONFIG_DEVFS_FS devfs_mk_cdev(MKDEV(major, minor), S_IFCHR | S_IRUGO | S_IWUGO, "lpc/%d", minor); #endif up (&minor_table_mutex); dbg1("%s(): minor=%d return 0", __FUNCTION__, minor); return 0; err_minor_table: up (&minor_table_mutex); //err_out_iomap: // iounmap(regs); err_out_res: pci_release_regions(pdev); err_out_mwi: pci_clear_mwi(pdev); err_out_disable: pci_disable_device(pdev); err_out_free: tasklet_kill(&dev->Dpc); kfree(dev); dbg1("%s(): return %d", __FUNCTION__, rc); return rc; }