static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) { struct ata_probe_ent *probe_ent; struct ata_port_info *ppi; int rc; unsigned int board_idx = (unsigned int) ent->driver_data; int pci_dev_busy = 0; rc = pci_enable_device(pdev); if (rc) return rc; rc = pci_request_regions(pdev, DRV_NAME); if (rc) { pci_dev_busy = 1; goto err_out; } rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); if (rc) goto err_out_regions; rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); if (rc) goto err_out_regions; ppi = &uli_port_info; probe_ent = ata_pci_init_native_mode(pdev, &ppi); if (!probe_ent) { rc = -ENOMEM; goto err_out_regions; } switch (board_idx) { case uli_5287: probe_ent->port[0].scr_addr = ULI5287_BASE; probe_ent->port[1].scr_addr = ULI5287_BASE + ULI5287_OFFS; probe_ent->n_ports = 4; probe_ent->port[2].cmd_addr = pci_resource_start(pdev, 0) + 8; probe_ent->port[2].altstatus_addr = probe_ent->port[2].ctl_addr = (pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS) + 4; probe_ent->port[2].bmdma_addr = pci_resource_start(pdev, 4) + 16; probe_ent->port[2].scr_addr = ULI5287_BASE + ULI5287_OFFS*4; probe_ent->port[3].cmd_addr = pci_resource_start(pdev, 2) + 8; probe_ent->port[3].altstatus_addr = probe_ent->port[3].ctl_addr = (pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS) + 4; probe_ent->port[3].bmdma_addr = pci_resource_start(pdev, 4) + 24; probe_ent->port[3].scr_addr = ULI5287_BASE + ULI5287_OFFS*5; ata_std_ports(&probe_ent->port[2]); ata_std_ports(&probe_ent->port[3]); break; case uli_5289: probe_ent->port[0].scr_addr = ULI5287_BASE; probe_ent->port[1].scr_addr = ULI5287_BASE + ULI5287_OFFS; break; case uli_5281: probe_ent->port[0].scr_addr = ULI5281_BASE; probe_ent->port[1].scr_addr = ULI5281_BASE + ULI5281_OFFS; break; default: BUG(); break; } pci_set_master(pdev); pci_enable_intx(pdev); /* FIXME: check ata_device_add return value */ ata_device_add(probe_ent); kfree(probe_ent); return 0; err_out_regions: pci_release_regions(pdev); err_out: if (!pci_dev_busy) pci_disable_device(pdev); return rc; }
/* Main entry */ static int r592_probe(struct pci_dev *pdev, const struct pci_device_id *id) { int error = -ENOMEM; struct memstick_host *host; struct r592_device *dev; /* Allocate memory */ host = memstick_alloc_host(sizeof(struct r592_device), &pdev->dev); if (!host) goto error1; dev = memstick_priv(host); dev->host = host; dev->pci_dev = pdev; pci_set_drvdata(pdev, dev); /* pci initialization */ error = pci_enable_device(pdev); if (error) goto error2; pci_set_master(pdev); error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (error) goto error3; error = pci_request_regions(pdev, DRV_NAME); if (error) goto error3; dev->mmio = pci_ioremap_bar(pdev, 0); if (!dev->mmio) goto error4; dev->irq = pdev->irq; spin_lock_init(&dev->irq_lock); spin_lock_init(&dev->io_thread_lock); init_completion(&dev->dma_done); INIT_KFIFO(dev->pio_fifo); setup_timer(&dev->detect_timer, r592_detect_timer, (long unsigned int)dev); /* Host initialization */ host->caps = MEMSTICK_CAP_PAR4; host->request = r592_submit_req; host->set_param = r592_set_param; r592_check_dma(dev); dev->io_thread = kthread_run(r592_process_thread, dev, "r592_io"); if (IS_ERR(dev->io_thread)) { error = PTR_ERR(dev->io_thread); goto error5; } /* This is just a precation, so don't fail */ dev->dummy_dma_page = pci_alloc_consistent(pdev, PAGE_SIZE, &dev->dummy_dma_page_physical_address); r592_stop_dma(dev , 0); if (request_irq(dev->irq, &r592_irq, IRQF_SHARED, DRV_NAME, dev)) goto error6; r592_update_card_detect(dev); if (memstick_add_host(host)) goto error7; message("driver successfully loaded"); return 0; error7: free_irq(dev->irq, dev); error6: if (dev->dummy_dma_page) pci_free_consistent(pdev, PAGE_SIZE, dev->dummy_dma_page, dev->dummy_dma_page_physical_address); kthread_stop(dev->io_thread); error5: iounmap(dev->mmio); error4: pci_release_regions(pdev); error3: pci_disable_device(pdev); error2: memstick_free_host(host); error1: return error; }
static int sercos3_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) { struct uio_info *info; struct sercos3_priv *priv; int i; info = kzalloc(sizeof(struct uio_info), GFP_KERNEL); if (!info) return -ENOMEM; priv = kzalloc(sizeof(struct sercos3_priv), GFP_KERNEL); if (!priv) goto out_free; if (pci_enable_device(dev)) goto out_free_priv; if (pci_request_regions(dev, "sercos3")) goto out_disable; /* we only need PCI BAR's 0, 2, 3, 4, 5 */ if (sercos3_setup_iomem(dev, info, 0, 0)) goto out_unmap; if (sercos3_setup_iomem(dev, info, 1, 2)) goto out_unmap; if (sercos3_setup_iomem(dev, info, 2, 3)) goto out_unmap; if (sercos3_setup_iomem(dev, info, 3, 4)) goto out_unmap; if (sercos3_setup_iomem(dev, info, 4, 5)) goto out_unmap; spin_lock_init(&priv->ier0_cache_lock); info->priv = priv; info->name = "Sercos_III_PCI"; info->version = "0.0.1"; info->irq = dev->irq; info->irq_flags = IRQF_SHARED; info->handler = sercos3_handler; info->irqcontrol = sercos3_irqcontrol; pci_set_drvdata(dev, info); if (uio_register_device(&dev->dev, info)) goto out_unmap; return 0; out_unmap: for (i = 0; i < 5; i++) { if (info->mem[i].internal_addr) iounmap(info->mem[i].internal_addr); } pci_release_regions(dev); out_disable: pci_disable_device(dev); out_free_priv: kfree(priv); out_free: kfree(info); return -ENODEV; }
static int orinoco_plx_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { int err; struct orinoco_private *priv; struct orinoco_pci_card *card; void __iomem *hermes_io, *attr_io, *bridge_io; err = pci_enable_device(pdev); if (err) { printk(KERN_ERR PFX "Cannot enable PCI device\n"); return err; } err = pci_request_regions(pdev, DRIVER_NAME); if (err) { printk(KERN_ERR PFX "Cannot obtain PCI resources\n"); goto fail_resources; } bridge_io = pci_iomap(pdev, 1, 0); if (!bridge_io) { printk(KERN_ERR PFX "Cannot map bridge registers\n"); err = -EIO; goto fail_map_bridge; } attr_io = pci_iomap(pdev, 2, 0); if (!attr_io) { printk(KERN_ERR PFX "Cannot map PCMCIA attributes\n"); err = -EIO; goto fail_map_attr; } hermes_io = pci_iomap(pdev, 3, 0); if (!hermes_io) { printk(KERN_ERR PFX "Cannot map chipset registers\n"); err = -EIO; goto fail_map_hermes; } /* Allocate network device */ priv = alloc_orinocodev(sizeof(*card), &pdev->dev, orinoco_plx_cor_reset, NULL); if (!priv) { printk(KERN_ERR PFX "Cannot allocate network device\n"); err = -ENOMEM; goto fail_alloc; } card = priv->card; card->bridge_io = bridge_io; card->attr_io = attr_io; hermes_struct_init(&priv->hw, hermes_io, HERMES_16BIT_REGSPACING); err = request_irq(pdev->irq, orinoco_interrupt, IRQF_SHARED, DRIVER_NAME, priv); if (err) { printk(KERN_ERR PFX "Cannot allocate IRQ %d\n", pdev->irq); err = -EBUSY; goto fail_irq; } err = orinoco_plx_hw_init(card); if (err) { printk(KERN_ERR PFX "Hardware initialization failed\n"); goto fail; } err = orinoco_plx_cor_reset(priv); if (err) { printk(KERN_ERR PFX "Initial reset failed\n"); goto fail; } err = orinoco_init(priv); if (err) { printk(KERN_ERR PFX "orinoco_init() failed\n"); goto fail; } err = orinoco_if_add(priv, 0, 0, NULL); if (err) { printk(KERN_ERR PFX "orinoco_if_add() failed\n"); goto fail; } pci_set_drvdata(pdev, priv); return 0; fail: free_irq(pdev->irq, priv); fail_irq: pci_set_drvdata(pdev, NULL); free_orinocodev(priv); fail_alloc: pci_iounmap(pdev, hermes_io); fail_map_hermes: pci_iounmap(pdev, attr_io); fail_map_attr: pci_iounmap(pdev, bridge_io); fail_map_bridge: pci_release_regions(pdev); fail_resources: pci_disable_device(pdev); return err; }
static void fnic_remove(struct pci_dev *pdev) { struct fnic *fnic = pci_get_drvdata(pdev); struct fc_lport *lp = fnic->lport; unsigned long flags; /* * Mark state so that the workqueue thread stops forwarding * received frames and link events to the local port. ISR and * other threads that can queue work items will also stop * creating work items on the fnic workqueue */ spin_lock_irqsave(&fnic->fnic_lock, flags); fnic->stop_rx_link_events = 1; spin_unlock_irqrestore(&fnic->fnic_lock, flags); if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI) del_timer_sync(&fnic->notify_timer); /* * Flush the fnic event queue. After this call, there should * be no event queued for this fnic device in the workqueue */ flush_workqueue(fnic_event_queue); skb_queue_purge(&fnic->frame_queue); skb_queue_purge(&fnic->tx_queue); if (fnic->config.flags & VFCF_FIP_CAPABLE) { del_timer_sync(&fnic->fip_timer); skb_queue_purge(&fnic->fip_frame_queue); fnic_fcoe_reset_vlans(fnic); fnic_fcoe_evlist_free(fnic); } /* * Log off the fabric. This stops all remote ports, dns port, * logs off the fabric. This flushes all rport, disc, lport work * before returning */ fc_fabric_logoff(fnic->lport); spin_lock_irqsave(&fnic->fnic_lock, flags); fnic->in_remove = 1; spin_unlock_irqrestore(&fnic->fnic_lock, flags); fcoe_ctlr_destroy(&fnic->ctlr); fc_lport_destroy(lp); fnic_stats_debugfs_remove(fnic); /* * This stops the fnic device, masks all interrupts. Completed * CQ entries are drained. Posted WQ/RQ/Copy-WQ entries are * cleaned up */ fnic_cleanup(fnic); BUG_ON(!skb_queue_empty(&fnic->frame_queue)); BUG_ON(!skb_queue_empty(&fnic->tx_queue)); spin_lock_irqsave(&fnic_list_lock, flags); list_del(&fnic->list); spin_unlock_irqrestore(&fnic_list_lock, flags); fc_remove_host(fnic->lport->host); scsi_remove_host(fnic->lport->host); fc_exch_mgr_free(fnic->lport); vnic_dev_notify_unset(fnic->vdev); fnic_free_intr(fnic); fnic_free_vnic_resources(fnic); fnic_clear_intr_mode(fnic); vnic_dev_close(fnic->vdev); vnic_dev_unregister(fnic->vdev); fnic_iounmap(fnic); pci_release_regions(pdev); pci_disable_device(pdev); scsi_host_put(lp->host); }
static int i5100_init_one(struct pci_dev *pdev, const struct pci_device_id *id) { int rc; struct mem_ctl_info *mci; struct edac_mc_layer layers[2]; struct i5100_priv *priv; struct pci_dev *ch0mm, *ch1mm, *einj; int ret = 0; u32 dw; int ranksperch; if (PCI_FUNC(pdev->devfn) != 1) return -ENODEV; rc = pci_enable_device(pdev); if (rc < 0) { ret = rc; goto bail; } /* ECC enabled? */ pci_read_config_dword(pdev, I5100_MC, &dw); if (!i5100_mc_errdeten(dw)) { printk(KERN_INFO "i5100_edac: ECC not enabled.\n"); ret = -ENODEV; goto bail_pdev; } /* figure out how many ranks, from strapped state of 48GB_Mode input */ pci_read_config_dword(pdev, I5100_MS, &dw); ranksperch = !!(dw & (1 << 8)) * 2 + 4; /* enable error reporting... */ pci_read_config_dword(pdev, I5100_EMASK_MEM, &dw); dw &= ~I5100_FERR_NF_MEM_ANY_MASK; pci_write_config_dword(pdev, I5100_EMASK_MEM, dw); /* device 21, func 0, Channel 0 Memory Map, Error Flag/Mask, etc... */ ch0mm = pci_get_device_func(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_21, 0); if (!ch0mm) { ret = -ENODEV; goto bail_pdev; } rc = pci_enable_device(ch0mm); if (rc < 0) { ret = rc; goto bail_ch0; } /* device 22, func 0, Channel 1 Memory Map, Error Flag/Mask, etc... */ ch1mm = pci_get_device_func(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_22, 0); if (!ch1mm) { ret = -ENODEV; goto bail_disable_ch0; } rc = pci_enable_device(ch1mm); if (rc < 0) { ret = rc; goto bail_ch1; } layers[0].type = EDAC_MC_LAYER_CHANNEL; layers[0].size = 2; layers[0].is_virt_csrow = false; layers[1].type = EDAC_MC_LAYER_SLOT; layers[1].size = ranksperch; layers[1].is_virt_csrow = true; mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*priv)); if (!mci) { ret = -ENOMEM; goto bail_disable_ch1; } /* device 19, func 0, Error injection */ einj = pci_get_device_func(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_19, 0); if (!einj) { ret = -ENODEV; goto bail_einj; } rc = pci_enable_device(einj); if (rc < 0) { ret = rc; goto bail_disable_einj; } mci->pdev = &pdev->dev; priv = mci->pvt_info; priv->ranksperchan = ranksperch; priv->mc = pdev; priv->ch0mm = ch0mm; priv->ch1mm = ch1mm; priv->einj = einj; INIT_DELAYED_WORK(&(priv->i5100_scrubbing), i5100_refresh_scrubbing); /* If scrubbing was already enabled by the bios, start maintaining it */ pci_read_config_dword(pdev, I5100_MC, &dw); if (i5100_mc_scrben(dw)) { priv->scrub_enable = 1; schedule_delayed_work(&(priv->i5100_scrubbing), I5100_SCRUB_REFRESH_RATE); } i5100_init_dimm_layout(pdev, mci); i5100_init_interleaving(pdev, mci); mci->mtype_cap = MEM_FLAG_FB_DDR2; mci->edac_ctl_cap = EDAC_FLAG_SECDED; mci->edac_cap = EDAC_FLAG_SECDED; mci->mod_name = "i5100_edac.c"; mci->mod_ver = "not versioned"; mci->ctl_name = "i5100"; mci->dev_name = pci_name(pdev); mci->ctl_page_to_phys = NULL; mci->edac_check = i5100_check_error; mci->set_sdram_scrub_rate = i5100_set_scrub_rate; mci->get_sdram_scrub_rate = i5100_get_scrub_rate; priv->inject_channel = 0; priv->inject_hlinesel = 0; priv->inject_deviceptr1 = 0; priv->inject_deviceptr2 = 0; priv->inject_eccmask1 = 0; priv->inject_eccmask2 = 0; i5100_init_csrows(mci); /* this strange construction seems to be in every driver, dunno why */ switch (edac_op_state) { case EDAC_OPSTATE_POLL: case EDAC_OPSTATE_NMI: break; default: edac_op_state = EDAC_OPSTATE_POLL; break; } if (edac_mc_add_mc(mci)) { ret = -ENODEV; goto bail_scrub; } i5100_setup_debugfs(mci); return ret; bail_scrub: priv->scrub_enable = 0; cancel_delayed_work_sync(&(priv->i5100_scrubbing)); edac_mc_free(mci); bail_disable_einj: pci_disable_device(einj); bail_einj: pci_dev_put(einj); bail_disable_ch1: pci_disable_device(ch1mm); bail_ch1: pci_dev_put(ch1mm); bail_disable_ch0: pci_disable_device(ch0mm); bail_ch0: pci_dev_put(ch0mm); bail_pdev: pci_disable_device(pdev); bail: return ret; }
static int __devinit et131x_pci_setup(struct pci_dev *pdev, const struct pci_device_id *ent) { int result = -EBUSY; int pm_cap; bool pci_using_dac; struct net_device *netdev; struct et131x_adapter *adapter; /* Enable the device via the PCI subsystem */ if (pci_enable_device(pdev) != 0) { dev_err(&pdev->dev, "pci_enable_device() failed\n"); return -EIO; } /* Perform some basic PCI checks */ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { dev_err(&pdev->dev, "Can't find PCI device's base address\n"); goto err_disable; } if (pci_request_regions(pdev, DRIVER_NAME)) { dev_err(&pdev->dev, "Can't get PCI resources\n"); goto err_disable; } /* Enable PCI bus mastering */ pci_set_master(pdev); /* Query PCI for Power Mgmt Capabilities * * NOTE: Now reading PowerMgmt in another location; is this still * needed? */ pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); if (pm_cap == 0) { dev_err(&pdev->dev, "Cannot find Power Management capabilities\n"); result = -EIO; goto err_release_res; } /* Check the DMA addressing support of this device */ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { pci_using_dac = true; result = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); if (result != 0) { dev_err(&pdev->dev, "Unable to obtain 64 bit DMA for consistent allocations\n"); goto err_release_res; } } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { pci_using_dac = false; } else { dev_err(&pdev->dev, "No usable DMA addressing method\n"); result = -EIO; goto err_release_res; } /* Allocate netdev and private adapter structs */ netdev = et131x_device_alloc(); if (netdev == NULL) { dev_err(&pdev->dev, "Couldn't alloc netdev struct\n"); result = -ENOMEM; goto err_release_res; } adapter = et131x_adapter_init(netdev, pdev); /* Initialise the PCI setup for the device */ et131x_pci_init(adapter, pdev); /* Map the bus-relative registers to system virtual memory */ adapter->regs = pci_ioremap_bar(pdev, 0); if (adapter->regs == NULL) { dev_err(&pdev->dev, "Cannot map device registers\n"); result = -ENOMEM; goto err_free_dev; } /* If Phy COMA mode was enabled when we went down, disable it here. */ writel(ET_PMCSR_INIT, &adapter->regs->global.pm_csr); /* Issue a global reset to the et1310 */ et131x_soft_reset(adapter); /* Disable all interrupts (paranoid) */ et131x_disable_interrupts(adapter); /* Allocate DMA memory */ result = et131x_adapter_memory_alloc(adapter); if (result != 0) { dev_err(&pdev->dev, "Could not alloc adapater memory (DMA)\n"); goto err_iounmap; } /* Init send data structures */ et131x_init_send(adapter); /* * Set up the task structure for the ISR's deferred handler */ INIT_WORK(&adapter->task, et131x_isr_handler); /* Copy address into the net_device struct */ memcpy(netdev->dev_addr, adapter->CurrentAddress, ETH_ALEN); /* Setup et1310 as per the documentation */ et131x_adapter_setup(adapter); /* Create a timer to count errors received by the NIC */ init_timer(&adapter->ErrorTimer); adapter->ErrorTimer.expires = jiffies + TX_ERROR_PERIOD * HZ / 1000; adapter->ErrorTimer.function = et131x_error_timer_handler; adapter->ErrorTimer.data = (unsigned long)adapter; /* Initialize link state */ et131x_link_detection_handler((unsigned long)adapter); /* Intialize variable for counting how long we do not have link status */ adapter->PoMgmt.TransPhyComaModeOnBoot = 0; /* We can enable interrupts now * * NOTE - Because registration of interrupt handler is done in the * device's open(), defer enabling device interrupts to that * point */ /* Register the net_device struct with the Linux network layer */ result = register_netdev(netdev); if (result != 0) { dev_err(&pdev->dev, "register_netdev() failed\n"); goto err_mem_free; } /* Register the net_device struct with the PCI subsystem. Save a copy * of the PCI config space for this device now that the device has * been initialized, just in case it needs to be quickly restored. */ pci_set_drvdata(pdev, netdev); pci_save_state(adapter->pdev); return result; err_mem_free: et131x_adapter_memory_free(adapter); err_iounmap: iounmap(adapter->regs); err_free_dev: pci_dev_put(pdev); free_netdev(netdev); err_release_res: pci_release_regions(pdev); err_disable: pci_disable_device(pdev); return result; }
static int vr_nor_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) { struct vr_nor_mtd *p = NULL; unsigned int exp_timing_cs0; int err; err = pci_enable_device(dev); if (err) goto out; err = pci_request_regions(dev, DRV_NAME); if (err) goto disable_dev; p = kzalloc(sizeof(*p), GFP_KERNEL); err = -ENOMEM; if (!p) goto release; p->dev = dev; err = vr_nor_init_maps(p); if (err) goto release; err = vr_nor_mtd_setup(p); if (err) goto destroy_maps; err = vr_nor_init_partitions(p); if (err) goto destroy_mtd_setup; pci_set_drvdata(dev, p); return 0; destroy_mtd_setup: map_destroy(p->info); destroy_maps: /* write-protect the flash bank */ exp_timing_cs0 = readl(p->csr_base + EXP_TIMING_CS0); exp_timing_cs0 &= ~TIMING_WR_EN; writel(exp_timing_cs0, p->csr_base + EXP_TIMING_CS0); /* unmap the flash window */ iounmap(p->map.virt); /* unmap the csr window */ iounmap(p->csr_base); release: kfree(p); pci_release_regions(dev); disable_dev: pci_disable_device(dev); out: return err; }
static s32 __devinit pch_probe(struct pci_dev *pdev, const struct pci_device_id *id) { s32 ret; unsigned long flags; struct pch_dev *chip; chip = kzalloc(sizeof(struct pch_dev), GFP_KERNEL); if (chip == NULL) return -ENOMEM; /* enable the 1588 pci device */ ret = pci_enable_device(pdev); if (ret != 0) { dev_err(&pdev->dev, "could not enable the pci device\n"); goto err_pci_en; } chip->mem_base = pci_resource_start(pdev, IO_MEM_BAR); if (!chip->mem_base) { dev_err(&pdev->dev, "could not locate IO memory address\n"); ret = -ENODEV; goto err_pci_start; } /* retrieve the available length of the IO memory space */ chip->mem_size = pci_resource_len(pdev, IO_MEM_BAR); /* allocate the memory for the device registers */ if (!request_mem_region(chip->mem_base, chip->mem_size, "1588_regs")) { dev_err(&pdev->dev, "could not allocate register memory space\n"); ret = -EBUSY; goto err_req_mem_region; } /* get the virtual address to the 1588 registers */ chip->regs = ioremap(chip->mem_base, chip->mem_size); if (!chip->regs) { dev_err(&pdev->dev, "Could not get virtual address\n"); ret = -ENOMEM; goto err_ioremap; } chip->caps = ptp_pch_caps; chip->ptp_clock = ptp_clock_register(&chip->caps); if (IS_ERR(chip->ptp_clock)) return PTR_ERR(chip->ptp_clock); spin_lock_init(&chip->register_lock); ret = request_irq(pdev->irq, &isr, IRQF_SHARED, KBUILD_MODNAME, chip); if (ret != 0) { dev_err(&pdev->dev, "failed to get irq %d\n", pdev->irq); goto err_req_irq; } /* indicate success */ chip->irq = pdev->irq; chip->pdev = pdev; pci_set_drvdata(pdev, chip); spin_lock_irqsave(&chip->register_lock, flags); /* reset the ieee1588 h/w */ pch_reset(chip); iowrite32(DEFAULT_ADDEND, &chip->regs->addend); iowrite32(1, &chip->regs->trgt_lo); iowrite32(0, &chip->regs->trgt_hi); iowrite32(PCH_TSE_TTIPEND, &chip->regs->event); /* Version: IEEE1588 v1 and IEEE1588-2008, Mode: All Evwnt, Locked */ iowrite32(0x80020000, &chip->regs->ch_control); pch_eth_enable_set(chip); if (strcmp(pch_param.station, "00:00:00:00:00:00") != 0) { if (pch_set_station_address(pch_param.station, pdev) != 0) { dev_err(&pdev->dev, "Invalid station address parameter\n" "Module loaded but station address not set correctly\n" ); } } spin_unlock_irqrestore(&chip->register_lock, flags); return 0; err_req_irq: ptp_clock_unregister(chip->ptp_clock); iounmap(chip->regs); chip->regs = 0; err_ioremap: release_mem_region(chip->mem_base, chip->mem_size); err_req_mem_region: chip->mem_base = 0; err_pci_start: pci_disable_device(pdev); err_pci_en: kfree(chip); dev_err(&pdev->dev, "probe failed(ret=0x%x)\n", ret); return ret; }
static void __devexit vx855_remove(struct pci_dev *pdev) { mfd_remove_devices(&pdev->dev); pci_disable_device(pdev); }
static int __devinit enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *netdev; struct enic *enic; int using_dac = 0; unsigned int i; int err; netdev = alloc_etherdev(sizeof(struct enic)); if (!netdev) { printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n"); return -ENOMEM; } pci_set_drvdata(pdev, netdev); SET_NETDEV_DEV(netdev, &pdev->dev); enic = netdev_priv(netdev); enic->netdev = netdev; enic->pdev = pdev; err = pci_enable_device(pdev); if (err) { printk(KERN_ERR PFX "Cannot enable PCI device, aborting.\n"); goto err_out_free_netdev; } err = pci_request_regions(pdev, DRV_NAME); if (err) { printk(KERN_ERR PFX "Cannot request PCI regions, aborting.\n"); goto err_out_disable_device; } pci_set_master(pdev); err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40)); if (err) { err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { printk(KERN_ERR PFX "No usable DMA configuration, aborting.\n"); goto err_out_release_regions; } err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { printk(KERN_ERR PFX "Unable to obtain 32-bit DMA " "for consistent allocations, aborting.\n"); goto err_out_release_regions; } } else { err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40)); if (err) { printk(KERN_ERR PFX "Unable to obtain 40-bit DMA " "for consistent allocations, aborting.\n"); goto err_out_release_regions; } using_dac = 1; } for (i = 0; i < ARRAY_SIZE(enic->bar); i++) { if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM)) continue; enic->bar[i].len = pci_resource_len(pdev, i); enic->bar[i].vaddr = pci_iomap(pdev, i, enic->bar[i].len); if (!enic->bar[i].vaddr) { printk(KERN_ERR PFX "Cannot memory-map BAR %d, aborting.\n", i); err = -ENODEV; goto err_out_iounmap; } enic->bar[i].bus_addr = pci_resource_start(pdev, i); } enic->vdev = vnic_dev_register(NULL, enic, pdev, enic->bar, ARRAY_SIZE(enic->bar)); if (!enic->vdev) { printk(KERN_ERR PFX "vNIC registration failed, aborting.\n"); err = -ENODEV; goto err_out_iounmap; } err = enic_dev_open(enic); if (err) { printk(KERN_ERR PFX "vNIC dev open failed, aborting.\n"); goto err_out_vnic_unregister; } netif_carrier_off(netdev); err = vnic_dev_init(enic->vdev, 0); if (err) { printk(KERN_ERR PFX "vNIC dev init failed, aborting.\n"); goto err_out_dev_close; } err = enic_dev_init(enic); if (err) { printk(KERN_ERR PFX "Device initialization failed, aborting.\n"); goto err_out_dev_close; } init_timer(&enic->notify_timer); enic->notify_timer.function = enic_notify_timer; enic->notify_timer.data = (unsigned long)enic; INIT_WORK(&enic->reset, enic_reset); for (i = 0; i < enic->wq_count; i++) spin_lock_init(&enic->wq_lock[i]); spin_lock_init(&enic->devcmd_lock); enic->port_mtu = enic->config.mtu; (void)enic_change_mtu(netdev, enic->port_mtu); err = enic_set_mac_addr(netdev, enic->mac_addr); if (err) { printk(KERN_ERR PFX "Invalid MAC address, aborting.\n"); goto err_out_dev_deinit; } netdev->netdev_ops = &enic_netdev_ops; netdev->watchdog_timeo = 2 * HZ; netdev->ethtool_ops = &enic_ethtool_ops; netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; if (ENIC_SETTING(enic, TXCSUM)) netdev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; if (ENIC_SETTING(enic, TSO)) netdev->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN; if (ENIC_SETTING(enic, LRO)) netdev->features |= NETIF_F_LRO; if (using_dac) netdev->features |= NETIF_F_HIGHDMA; enic->csum_rx_enabled = ENIC_SETTING(enic, RXCSUM); enic->lro_mgr.max_aggr = ENIC_LRO_MAX_AGGR; enic->lro_mgr.max_desc = ENIC_LRO_MAX_DESC; enic->lro_mgr.lro_arr = enic->lro_desc; enic->lro_mgr.get_skb_header = enic_get_skb_header; enic->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID; enic->lro_mgr.dev = netdev; enic->lro_mgr.ip_summed = CHECKSUM_COMPLETE; enic->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY; err = register_netdev(netdev); if (err) { printk(KERN_ERR PFX "Cannot register net device, aborting.\n"); goto err_out_dev_deinit; } return 0; err_out_dev_deinit: enic_dev_deinit(enic); err_out_dev_close: vnic_dev_close(enic->vdev); err_out_vnic_unregister: vnic_dev_unregister(enic->vdev); err_out_iounmap: enic_iounmap(enic); err_out_release_regions: pci_release_regions(pdev); err_out_disable_device: pci_disable_device(pdev); err_out_free_netdev: pci_set_drvdata(pdev, NULL); free_netdev(netdev); return err; }
static int __devinit snd_ad1889_create(struct snd_card *card, struct pci_dev *pci, struct snd_ad1889 **rchip) { int err; struct snd_ad1889 *chip; static struct snd_device_ops ops = { .dev_free = snd_ad1889_dev_free, }; *rchip = NULL; if ((err = pci_enable_device(pci)) < 0) return err; if (pci_set_dma_mask(pci, DMA_BIT_MASK(32)) < 0 || pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(32)) < 0) { printk(KERN_ERR PFX "error setting 32-bit DMA mask.\n"); pci_disable_device(pci); return -ENXIO; } if ((chip = kzalloc(sizeof(*chip), GFP_KERNEL)) == NULL) { pci_disable_device(pci); return -ENOMEM; } chip->card = card; card->private_data = chip; chip->pci = pci; chip->irq = -1; if ((err = pci_request_regions(pci, card->driver)) < 0) goto free_and_ret; chip->bar = pci_resource_start(pci, 0); chip->iobase = pci_ioremap_bar(pci, 0); if (chip->iobase == NULL) { printk(KERN_ERR PFX "unable to reserve region.\n"); err = -EBUSY; goto free_and_ret; } pci_set_master(pci); spin_lock_init(&chip->lock); if (request_irq(pci->irq, snd_ad1889_interrupt, IRQF_SHARED, KBUILD_MODNAME, chip)) { printk(KERN_ERR PFX "cannot obtain IRQ %d\n", pci->irq); snd_ad1889_free(chip); return -EBUSY; } chip->irq = pci->irq; synchronize_irq(chip->irq); if ((err = snd_ad1889_init(chip)) < 0) { snd_ad1889_free(chip); return err; } if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) { snd_ad1889_free(chip); return err; } snd_card_set_dev(card, &pci->dev); *rchip = chip; return 0; free_and_ret: kfree(chip); pci_disable_device(pci); return err; }
/* driver entry point */ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) { int ret = -ENODEV; resource_size_t csr_base, mem_base; unsigned long csr_len, mem_len; struct denali_nand_info *denali; denali = kzalloc(sizeof(*denali), GFP_KERNEL); if (!denali) return -ENOMEM; ret = pci_enable_device(dev); if (ret) { printk(KERN_ERR "Spectra: pci_enable_device failed.\n"); goto failed_alloc_memery; } if (id->driver_data == INTEL_CE4100) { /* Due to a silicon limitation, we can only support * ONFI timing mode 1 and below. */ if (onfi_timing_mode < -1 || onfi_timing_mode > 1) { printk(KERN_ERR "Intel CE4100 only supports" " ONFI timing mode 1 or below\n"); ret = -EINVAL; goto failed_enable_dev; } denali->platform = INTEL_CE4100; mem_base = pci_resource_start(dev, 0); mem_len = pci_resource_len(dev, 1); csr_base = pci_resource_start(dev, 1); csr_len = pci_resource_len(dev, 1); } else { denali->platform = INTEL_MRST; csr_base = pci_resource_start(dev, 0); csr_len = pci_resource_len(dev, 0); mem_base = pci_resource_start(dev, 1); mem_len = pci_resource_len(dev, 1); if (!mem_len) { mem_base = csr_base + csr_len; mem_len = csr_len; } } /* Is 32-bit DMA supported? */ ret = dma_set_mask(&dev->dev, DMA_BIT_MASK(32)); if (ret) { printk(KERN_ERR "Spectra: no usable DMA configuration\n"); goto failed_enable_dev; } denali->buf.dma_buf = dma_map_single(&dev->dev, denali->buf.buf, DENALI_BUF_SIZE, DMA_BIDIRECTIONAL); if (dma_mapping_error(&dev->dev, denali->buf.dma_buf)) { dev_err(&dev->dev, "Spectra: failed to map DMA buffer\n"); goto failed_enable_dev; } pci_set_master(dev); denali->dev = &dev->dev; denali->mtd.dev.parent = &dev->dev; ret = pci_request_regions(dev, DENALI_NAND_NAME); if (ret) { printk(KERN_ERR "Spectra: Unable to request memory regions\n"); goto failed_dma_map; } denali->flash_reg = ioremap_nocache(csr_base, csr_len); if (!denali->flash_reg) { printk(KERN_ERR "Spectra: Unable to remap memory region\n"); ret = -ENOMEM; goto failed_req_regions; } denali->flash_mem = ioremap_nocache(mem_base, mem_len); if (!denali->flash_mem) { printk(KERN_ERR "Spectra: ioremap_nocache failed!"); ret = -ENOMEM; goto failed_remap_reg; } denali_hw_init(denali); denali_drv_init(denali); /* denali_isr register is done after all the hardware * initilization is finished*/ if (request_irq(dev->irq, denali_isr, IRQF_SHARED, DENALI_NAND_NAME, denali)) { printk(KERN_ERR "Spectra: Unable to allocate IRQ\n"); ret = -ENODEV; goto failed_remap_mem; } /* now that our ISR is registered, we can enable interrupts */ denali_set_intr_modes(denali, true); pci_set_drvdata(dev, denali); denali->mtd.name = "denali-nand"; denali->mtd.owner = THIS_MODULE; denali->mtd.priv = &denali->nand; /* register the driver with the NAND core subsystem */ denali->nand.select_chip = denali_select_chip; denali->nand.cmdfunc = denali_cmdfunc; denali->nand.read_byte = denali_read_byte; denali->nand.waitfunc = denali_waitfunc; /* scan for NAND devices attached to the controller * this is the first stage in a two step process to register * with the nand subsystem */ if (nand_scan_ident(&denali->mtd, denali->max_banks, NULL)) { ret = -ENXIO; goto failed_req_irq; } /* MTD supported page sizes vary by kernel. We validate our * kernel supports the device here. */ if (denali->mtd.writesize > NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE) { ret = -ENODEV; printk(KERN_ERR "Spectra: device size not supported by this " "version of MTD."); goto failed_req_irq; } /* support for multi nand * MTD known nothing about multi nand, * so we should tell it the real pagesize * and anything necessery */ denali->devnum = ioread32(denali->flash_reg + DEVICES_CONNECTED); denali->nand.chipsize <<= (denali->devnum - 1); denali->nand.page_shift += (denali->devnum - 1); denali->nand.pagemask = (denali->nand.chipsize >> denali->nand.page_shift) - 1; denali->nand.bbt_erase_shift += (denali->devnum - 1); denali->nand.phys_erase_shift = denali->nand.bbt_erase_shift; denali->nand.chip_shift += (denali->devnum - 1); denali->mtd.writesize <<= (denali->devnum - 1); denali->mtd.oobsize <<= (denali->devnum - 1); denali->mtd.erasesize <<= (denali->devnum - 1); denali->mtd.size = denali->nand.numchips * denali->nand.chipsize; denali->bbtskipbytes *= denali->devnum; /* second stage of the NAND scan * this stage requires information regarding ECC and * bad block management. */ /* Bad block management */ denali->nand.bbt_td = &bbt_main_descr; denali->nand.bbt_md = &bbt_mirror_descr; /* skip the scan for now until we have OOB read and write support */ denali->nand.options |= NAND_USE_FLASH_BBT | NAND_SKIP_BBTSCAN; denali->nand.ecc.mode = NAND_ECC_HW_SYNDROME; /* Denali Controller only support 15bit and 8bit ECC in MRST, * so just let controller do 15bit ECC for MLC and 8bit ECC for * SLC if possible. * */ if (denali->nand.cellinfo & 0xc && (denali->mtd.oobsize > (denali->bbtskipbytes + ECC_15BITS * (denali->mtd.writesize / ECC_SECTOR_SIZE)))) { /* if MLC OOB size is large enough, use 15bit ECC*/ denali->nand.ecc.layout = &nand_15bit_oob; denali->nand.ecc.bytes = ECC_15BITS; iowrite32(15, denali->flash_reg + ECC_CORRECTION); } else if (denali->mtd.oobsize < (denali->bbtskipbytes + ECC_8BITS * (denali->mtd.writesize / ECC_SECTOR_SIZE))) { printk(KERN_ERR "Your NAND chip OOB is not large enough to" " contain 8bit ECC correction codes"); goto failed_req_irq; } else { denali->nand.ecc.layout = &nand_8bit_oob; denali->nand.ecc.bytes = ECC_8BITS; iowrite32(8, denali->flash_reg + ECC_CORRECTION); } denali->nand.ecc.bytes *= denali->devnum; denali->nand.ecc.layout->eccbytes *= denali->mtd.writesize / ECC_SECTOR_SIZE; denali->nand.ecc.layout->oobfree[0].offset = denali->bbtskipbytes + denali->nand.ecc.layout->eccbytes; denali->nand.ecc.layout->oobfree[0].length = denali->mtd.oobsize - denali->nand.ecc.layout->eccbytes - denali->bbtskipbytes; /* Let driver know the total blocks number and * how many blocks contained by each nand chip. * blksperchip will help driver to know how many * blocks is taken by FW. * */ denali->totalblks = denali->mtd.size >> denali->nand.phys_erase_shift; denali->blksperchip = denali->totalblks / denali->nand.numchips; /* These functions are required by the NAND core framework, otherwise, * the NAND core will assert. However, we don't need them, so we'll stub * them out. */ denali->nand.ecc.calculate = denali_ecc_calculate; denali->nand.ecc.correct = denali_ecc_correct; denali->nand.ecc.hwctl = denali_ecc_hwctl; /* override the default read operations */ denali->nand.ecc.size = ECC_SECTOR_SIZE * denali->devnum; denali->nand.ecc.read_page = denali_read_page; denali->nand.ecc.read_page_raw = denali_read_page_raw; denali->nand.ecc.write_page = denali_write_page; denali->nand.ecc.write_page_raw = denali_write_page_raw; denali->nand.ecc.read_oob = denali_read_oob; denali->nand.ecc.write_oob = denali_write_oob; denali->nand.erase_cmd = denali_erase; if (nand_scan_tail(&denali->mtd)) { ret = -ENXIO; goto failed_req_irq; } ret = mtd_device_register(&denali->mtd, NULL, 0); if (ret) { dev_err(&dev->dev, "Spectra: Failed to register MTD: %d\n", ret); goto failed_req_irq; } return 0; failed_req_irq: denali_irq_cleanup(dev->irq, denali); failed_remap_mem: iounmap(denali->flash_mem); failed_remap_reg: iounmap(denali->flash_reg); failed_req_regions: pci_release_regions(dev); failed_dma_map: dma_unmap_single(&dev->dev, denali->buf.dma_buf, DENALI_BUF_SIZE, DMA_BIDIRECTIONAL); failed_enable_dev: pci_disable_device(dev); failed_alloc_memery: kfree(denali); return ret; }
/* * Initializes and registers a new TurboPAM card. * * dev: the PCI device * num: the board number * * Return: 0 if OK, <0 if error */ static int __devinit tpam_probe(struct pci_dev *dev, const struct pci_device_id *pci_id) { tpam_card *card, *c; int i, err; if (pci_enable_device(dev)) { printk(KERN_ERR "TurboPAM: can't enable PCI device at %s\n", pci_name(dev)); return -ENODEV; } /* allocate memory for the board structure */ if (!(card = (tpam_card *)kmalloc(sizeof(tpam_card), GFP_KERNEL))) { printk(KERN_ERR "TurboPAM: tpam_register_card: " "kmalloc failed!\n"); err = -ENOMEM; goto err_out_disable_dev; } memset((char *)card, 0, sizeof(tpam_card)); card->irq = dev->irq; card->lock = SPIN_LOCK_UNLOCKED; sprintf(card->interface.id, "%s%d", id, cards_num); /* request interrupt */ if (request_irq(card->irq, &tpam_irq, SA_INTERRUPT | SA_SHIRQ, card->interface.id, card)) { printk(KERN_ERR "TurboPAM: tpam_register_card: " "could not request irq %d\n", card->irq); err = -EIO; goto err_out_free_card; } /* remap board memory */ if (!(card->bar0 = (unsigned long) ioremap(pci_resource_start(dev, 0), 0x800000))) { printk(KERN_ERR "TurboPAM: tpam_register_card: " "unable to remap bar0\n"); err = -EIO; goto err_out_free_irq; } /* reset the board */ readl(card->bar0 + TPAM_RESETPAM_REGISTER); /* initialisation magic :-( */ copy_to_pam_dword(card, (void *)0x01800008, 0x00000030); copy_to_pam_dword(card, (void *)0x01800010, 0x00000030); copy_to_pam_dword(card, (void *)0x01800014, 0x42240822); copy_to_pam_dword(card, (void *)0x01800018, 0x07114000); copy_to_pam_dword(card, (void *)0x0180001c, 0x00000400); copy_to_pam_dword(card, (void *)0x01840070, 0x00000010); /* fill the ISDN link layer structure */ card->interface.owner = THIS_MODULE; card->interface.channels = TPAM_NBCHANNEL; card->interface.maxbufsize = TPAM_MAXBUFSIZE; card->interface.features = ISDN_FEATURE_P_EURO | ISDN_FEATURE_L2_HDLC | ISDN_FEATURE_L2_MODEM | ISDN_FEATURE_L3_TRANS; card->interface.hl_hdrlen = 0; card->interface.command = tpam_command; card->interface.writebuf_skb = tpam_writebuf_skb; card->interface.writecmd = NULL; card->interface.readstat = NULL; /* register wrt the ISDN link layer */ if (!register_isdn(&card->interface)) { printk(KERN_ERR "TurboPAM: tpam_register_card: " "unable to register %s\n", card->interface.id); err = -EIO; goto err_out_iounmap; } card->id = card->interface.channels; /* initialize all channels */ for (i = 0; i < TPAM_NBCHANNEL; ++i) { card->channels[i].num = i; card->channels[i].card = card; card->channels[i].ncoid = TPAM_NCOID_INVALID; card->channels[i].hdlc = 0; card->channels[i].realhdlc = 0; card->channels[i].hdlcshift = 0; skb_queue_head_init(&card->channels[i].sendq); } /* initialize the rest of board structure */ card->channels_used = 0; card->channels_tested = 0; card->running = 0; card->busy = 0; card->roundrobin = 0; card->loopmode = 0; skb_queue_head_init(&card->sendq); skb_queue_head_init(&card->recvq); INIT_WORK(&card->recv_tq, (void *) (void *) tpam_recv_tq, card); INIT_WORK(&card->send_tq, (void *) (void *) tpam_send_tq, card); /* add the board at the end of the list of boards */ card->next = NULL; if (cards) { c = cards; while (c->next) c = c->next; c->next = card; } else cards = card; ++cards_num; pci_set_drvdata(dev, card); return 0; err_out_iounmap: iounmap((void *)card->bar0); err_out_free_irq: free_irq(card->irq, card); err_out_free_card: kfree(card); err_out_disable_dev: pci_disable_device(dev); return err; }
static int __devinit test_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *netdev = NULL; int err; unsigned short val = 0; netdev = alloc_etherdev(4); if (netdev == NULL) { printk("alloc_etherdev failed\n"); return -1; } SET_NETDEV_DEV(netdev, &pdev->dev); // ether_setup(netdev); netdev->netdev_ops = &test_netdev_ops; netdev->irq = pdev->irq; //irq pci_set_drvdata(pdev, netdev); if ((err = register_netdev(netdev))) { printk("register_netdev failed\n"); pci_set_drvdata(pdev, NULL); free_netdev(netdev); return err; } if ((err = pci_enable_device(pdev))) { printk("pci_enable_device failed"); pci_set_drvdata(pdev, NULL); free_netdev(netdev); return err; } if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { printk("pci_resource_flags failed\n"); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); free_netdev(netdev); return -2; } err = pci_request_regions(pdev, DRV_MODULE_NAME); if (err) { printk("pci_request_regions failed\n"); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); free_netdev(netdev); return -2; } Pbase = pci_resource_start(pdev, 0); len = pci_resource_len(pdev, 0); Vbase = (unsigned long)ioremap(Pbase, len); /* print the board memory */ printk("Pbase addr: 0x%lx\n", Pbase); printk("memory size: %ld\n", len); printk("Vbase addr: 0x%lx\n", Vbase); /* end */ pci_set_master(pdev); /* read pcie config */ printk("read the VID and DID:\n"); pci_read_config_word(pdev, 0x0, &val); printk("VID: %d\n", val); pci_read_config_word(pdev, 0x2, &val); printk("DID: %d\n", val); /* end */ /* alloc dma buffer */ dmaVaddr = (unsigned long)pci_alloc_consistent(pdev, DMA_BUFF_SIZE, &dmaBusAddr); if ((err = register_netdev(netdev))) { printk("register_netdev failed\n"); pci_free_consistent(pdev, DMA_BUFF_SIZE, (void*)dmaVaddr, dmaBusAddr); if (Vbase) { iounmap((void*)Vbase); Vbase = 0; } pci_release_regions(pdev); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); free_netdev(netdev); return err; } return 0; }
/* * Probe PCI device for EMS CAN signature and register each available * CAN channel to SJA1000 Socket-CAN subsystem. */ static int ems_pci_add_card(struct pci_dev *pdev, const struct pci_device_id *ent) { struct sja1000_priv *priv; struct net_device *dev; struct ems_pci_card *card; int max_chan, conf_size, base_bar; int err, i; /* Enabling PCI device */ if (pci_enable_device(pdev) < 0) { dev_err(&pdev->dev, "Enabling PCI device failed\n"); return -ENODEV; } /* Allocating card structures to hold addresses, ... */ card = kzalloc(sizeof(struct ems_pci_card), GFP_KERNEL); if (card == NULL) { pci_disable_device(pdev); return -ENOMEM; } pci_set_drvdata(pdev, card); card->pci_dev = pdev; card->channels = 0; if (pdev->vendor == PCI_VENDOR_ID_PLX) { card->version = 2; /* CPC-PCI v2 */ max_chan = EMS_PCI_V2_MAX_CHAN; base_bar = EMS_PCI_V2_BASE_BAR; conf_size = EMS_PCI_V2_CONF_SIZE; } else { card->version = 1; /* CPC-PCI v1 */ max_chan = EMS_PCI_V1_MAX_CHAN; base_bar = EMS_PCI_V1_BASE_BAR; conf_size = EMS_PCI_V1_CONF_SIZE; } /* Remap configuration space and controller memory area */ card->conf_addr = pci_iomap(pdev, 0, conf_size); if (card->conf_addr == NULL) { err = -ENOMEM; goto failure_cleanup; } card->base_addr = pci_iomap(pdev, base_bar, EMS_PCI_BASE_SIZE); if (card->base_addr == NULL) { err = -ENOMEM; goto failure_cleanup; } if (card->version == 1) { /* Configure PITA-2 parallel interface (enable MUX) */ writel(PITA2_MISC_CONFIG, card->conf_addr + PITA2_MISC); /* Check for unique EMS CAN signature */ if (ems_pci_v1_readb(card, 0) != 0x55 || ems_pci_v1_readb(card, 1) != 0xAA || ems_pci_v1_readb(card, 2) != 0x01 || ems_pci_v1_readb(card, 3) != 0xCB || ems_pci_v1_readb(card, 4) != 0x11) { dev_err(&pdev->dev, "Not EMS Dr. Thomas Wuensche interface\n"); err = -ENODEV; goto failure_cleanup; } } ems_pci_card_reset(card); /* Detect available channels */ for (i = 0; i < max_chan; i++) { dev = alloc_sja1000dev(0); if (dev == NULL) { err = -ENOMEM; goto failure_cleanup; } card->net_dev[i] = dev; priv = netdev_priv(dev); priv->priv = card; priv->irq_flags = IRQF_SHARED; dev->irq = pdev->irq; priv->reg_base = card->base_addr + EMS_PCI_CAN_BASE_OFFSET + (i * EMS_PCI_CAN_CTRL_SIZE); if (card->version == 1) { priv->read_reg = ems_pci_v1_read_reg; priv->write_reg = ems_pci_v1_write_reg; priv->post_irq = ems_pci_v1_post_irq; } else { priv->read_reg = ems_pci_v2_read_reg; priv->write_reg = ems_pci_v2_write_reg; priv->post_irq = ems_pci_v2_post_irq; } /* Check if channel is present */ if (ems_pci_check_chan(priv)) { priv->can.clock.freq = EMS_PCI_CAN_CLOCK; priv->ocr = EMS_PCI_OCR; priv->cdr = EMS_PCI_CDR; SET_NETDEV_DEV(dev, &pdev->dev); dev->dev_id = i; if (card->version == 1) /* reset int flag of pita */ writel(PITA2_ICR_INT0_EN | PITA2_ICR_INT0, card->conf_addr + PITA2_ICR); else /* enable IRQ in PLX 9030 */ writel(PLX_ICSR_ENA_CLR, card->conf_addr + PLX_ICSR); /* Register SJA1000 device */ err = register_sja1000dev(dev); if (err) { dev_err(&pdev->dev, "Registering device failed " "(err=%d)\n", err); free_sja1000dev(dev); goto failure_cleanup; } card->channels++; dev_info(&pdev->dev, "Channel #%d at 0x%p, irq %d\n", i + 1, priv->reg_base, dev->irq); } else { free_sja1000dev(dev); } } return 0; failure_cleanup: dev_err(&pdev->dev, "Error: %d. Cleaning Up.\n", err); ems_pci_del_card(pdev); return err; }
static int __devinit snd_nm256_create(struct snd_card *card, struct pci_dev *pci, struct nm256 **chip_ret) { struct nm256 *chip; int err, pval; static struct snd_device_ops ops = { .dev_free = snd_nm256_dev_free, }; u32 addr; *chip_ret = NULL; if ((err = pci_enable_device(pci)) < 0) return err; chip = kzalloc(sizeof(*chip), GFP_KERNEL); if (chip == NULL) { pci_disable_device(pci); return -ENOMEM; } chip->card = card; chip->pci = pci; chip->use_cache = use_cache; spin_lock_init(&chip->reg_lock); chip->irq = -1; mutex_init(&chip->irq_mutex); /* store buffer sizes in bytes */ chip->streams[SNDRV_PCM_STREAM_PLAYBACK].bufsize = playback_bufsize * 1024; chip->streams[SNDRV_PCM_STREAM_CAPTURE].bufsize = capture_bufsize * 1024; /* * The NM256 has two memory ports. The first port is nothing * more than a chunk of video RAM, which is used as the I/O ring * buffer. The second port has the actual juicy stuff (like the * mixer and the playback engine control registers). */ chip->buffer_addr = pci_resource_start(pci, 0); chip->cport_addr = pci_resource_start(pci, 1); /* Init the memory port info. */ /* remap control port (#2) */ chip->res_cport = request_mem_region(chip->cport_addr, NM_PORT2_SIZE, card->driver); if (chip->res_cport == NULL) { snd_printk(KERN_ERR "memory region 0x%lx (size 0x%x) busy\n", chip->cport_addr, NM_PORT2_SIZE); err = -EBUSY; goto __error; } chip->cport = ioremap_nocache(chip->cport_addr, NM_PORT2_SIZE); if (chip->cport == NULL) { snd_printk(KERN_ERR "unable to map control port %lx\n", chip->cport_addr); err = -ENOMEM; goto __error; } if (!strcmp(card->driver, "NM256AV")) { /* Ok, try to see if this is a non-AC97 version of the hardware. */ pval = snd_nm256_readw(chip, NM_MIXER_PRESENCE); if ((pval & NM_PRESENCE_MASK) != NM_PRESENCE_VALUE) { if (! force_ac97) { printk(KERN_ERR "nm256: no ac97 is found!\n"); printk(KERN_ERR " force the driver to load by " "passing in the module parameter\n"); printk(KERN_ERR " force_ac97=1\n"); printk(KERN_ERR " or try sb16 or cs423x drivers instead.\n"); err = -ENXIO; goto __error; } } chip->buffer_end = 2560 * 1024; chip->interrupt = snd_nm256_interrupt; chip->mixer_status_offset = NM_MIXER_STATUS_OFFSET; chip->mixer_status_mask = NM_MIXER_READY_MASK; } else { /* Not sure if there is any relevant detect for the ZX or not. */ if (snd_nm256_readb(chip, 0xa0b) != 0) chip->buffer_end = 6144 * 1024; else chip->buffer_end = 4096 * 1024; chip->interrupt = snd_nm256_interrupt_zx; chip->mixer_status_offset = NM2_MIXER_STATUS_OFFSET; chip->mixer_status_mask = NM2_MIXER_READY_MASK; } chip->buffer_size = chip->streams[SNDRV_PCM_STREAM_PLAYBACK].bufsize + chip->streams[SNDRV_PCM_STREAM_CAPTURE].bufsize; if (chip->use_cache) chip->buffer_size += NM_TOTAL_COEFF_COUNT * 4; else chip->buffer_size += NM_MAX_PLAYBACK_COEF_SIZE + NM_MAX_RECORD_COEF_SIZE; if (buffer_top >= chip->buffer_size && buffer_top < chip->buffer_end) chip->buffer_end = buffer_top; else { /* get buffer end pointer from signature */ if ((err = snd_nm256_peek_for_sig(chip)) < 0) goto __error; } chip->buffer_start = chip->buffer_end - chip->buffer_size; chip->buffer_addr += chip->buffer_start; printk(KERN_INFO "nm256: Mapping port 1 from 0x%x - 0x%x\n", chip->buffer_start, chip->buffer_end); chip->res_buffer = request_mem_region(chip->buffer_addr, chip->buffer_size, card->driver); if (chip->res_buffer == NULL) { snd_printk(KERN_ERR "nm256: buffer 0x%lx (size 0x%x) busy\n", chip->buffer_addr, chip->buffer_size); err = -EBUSY; goto __error; } chip->buffer = ioremap_nocache(chip->buffer_addr, chip->buffer_size); if (chip->buffer == NULL) { err = -ENOMEM; snd_printk(KERN_ERR "unable to map ring buffer at %lx\n", chip->buffer_addr); goto __error; } /* set offsets */ addr = chip->buffer_start; chip->streams[SNDRV_PCM_STREAM_PLAYBACK].buf = addr; addr += chip->streams[SNDRV_PCM_STREAM_PLAYBACK].bufsize; chip->streams[SNDRV_PCM_STREAM_CAPTURE].buf = addr; addr += chip->streams[SNDRV_PCM_STREAM_CAPTURE].bufsize; if (chip->use_cache) { chip->all_coeff_buf = addr; } else { chip->coeff_buf[SNDRV_PCM_STREAM_PLAYBACK] = addr; addr += NM_MAX_PLAYBACK_COEF_SIZE; chip->coeff_buf[SNDRV_PCM_STREAM_CAPTURE] = addr; } /* Fixed setting. */ chip->mixer_base = NM_MIXER_OFFSET; chip->coeffs_current = 0; snd_nm256_init_chip(chip); // pci_set_master(pci); /* needed? */ if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) goto __error; snd_card_set_dev(card, &pci->dev); *chip_ret = chip; return 0; __error: snd_nm256_free(chip); return err; }
static int __devinit wdt_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { unsigned char conf; int ret = -ENODEV; if (pci_enable_device(pdev)) { dev_err(&pdev->dev, "cannot enable PCI device\n"); return -ENODEV; } /* * Allocate a MMIO region which contains watchdog control register * and counter, then configure the watchdog to use this region. * This is possible only if PnP is properly enabled in BIOS. * If not, the watchdog must be configured in BIOS manually. */ if (allocate_resource(&iomem_resource, &wdt_res, VIA_WDT_MMIO_LEN, 0xf0000000, 0xffffff00, 0xff, NULL, NULL)) { dev_err(&pdev->dev, "MMIO allocation failed\n"); goto err_out_disable_device; } pci_write_config_dword(pdev, VIA_WDT_MMIO_BASE, wdt_res.start); pci_read_config_byte(pdev, VIA_WDT_CONF, &conf); conf |= VIA_WDT_CONF_ENABLE | VIA_WDT_CONF_MMIO; pci_write_config_byte(pdev, VIA_WDT_CONF, conf); pci_read_config_dword(pdev, VIA_WDT_MMIO_BASE, &mmio); if (mmio) { dev_info(&pdev->dev, "VIA Chipset watchdog MMIO: %x\n", mmio); } else { dev_err(&pdev->dev, "MMIO setting failed. Check BIOS.\n"); goto err_out_resource; } if (!request_mem_region(mmio, VIA_WDT_MMIO_LEN, "via_wdt")) { dev_err(&pdev->dev, "MMIO region busy\n"); goto err_out_resource; } wdt_mem = ioremap(mmio, VIA_WDT_MMIO_LEN); if (wdt_mem == NULL) { dev_err(&pdev->dev, "cannot remap VIA wdt MMIO registers\n"); goto err_out_release; } wdt_dev.timeout = timeout; watchdog_set_nowayout(&wdt_dev, nowayout); if (readl(wdt_mem) & VIA_WDT_FIRED) wdt_dev.bootstatus |= WDIOF_CARDRESET; ret = watchdog_register_device(&wdt_dev); if (ret) goto err_out_iounmap; /* start triggering, in case of watchdog already enabled by BIOS */ mod_timer(&timer, jiffies + WDT_HEARTBEAT); return 0; err_out_iounmap: iounmap(wdt_mem); err_out_release: release_mem_region(mmio, VIA_WDT_MMIO_LEN); err_out_resource: release_resource(&wdt_res); err_out_disable_device: pci_disable_device(pdev); return ret; }
static int __devinit pch_phub_probe(struct pci_dev *pdev, const struct pci_device_id *id) { int retval; int ret; ssize_t rom_size; struct pch_phub_reg *chip; chip = kzalloc(sizeof(struct pch_phub_reg), GFP_KERNEL); if (chip == NULL) return -ENOMEM; ret = pci_enable_device(pdev); if (ret) { dev_err(&pdev->dev, "%s : pci_enable_device FAILED(ret=%d)", __func__, ret); goto err_pci_enable_dev; } dev_dbg(&pdev->dev, "%s : pci_enable_device returns %d\n", __func__, ret); ret = pci_request_regions(pdev, KBUILD_MODNAME); if (ret) { dev_err(&pdev->dev, "%s : pci_request_regions FAILED(ret=%d)", __func__, ret); goto err_req_regions; } dev_dbg(&pdev->dev, "%s : " "pci_request_regions returns %d\n", __func__, ret); chip->pch_phub_base_address = pci_iomap(pdev, 1, 0); if (chip->pch_phub_base_address == 0) { dev_err(&pdev->dev, "%s : pci_iomap FAILED", __func__); ret = -ENOMEM; goto err_pci_iomap; } dev_dbg(&pdev->dev, "%s : pci_iomap SUCCESS and value " "in pch_phub_base_address variable is %p\n", __func__, chip->pch_phub_base_address); if (id->driver_data != 3) { chip->pch_phub_extrom_base_address =\ pci_map_rom(pdev, &rom_size); if (chip->pch_phub_extrom_base_address == 0) { dev_err(&pdev->dev, "%s: pci_map_rom FAILED", __func__); ret = -ENOMEM; goto err_pci_map; } dev_dbg(&pdev->dev, "%s : " "pci_map_rom SUCCESS and value in " "pch_phub_extrom_base_address variable is %p\n", __func__, chip->pch_phub_extrom_base_address); } if (id->driver_data == 1) { /* EG20T PCH */ retval = sysfs_create_file(&pdev->dev.kobj, &dev_attr_pch_mac.attr); if (retval) goto err_sysfs_create; retval = sysfs_create_bin_file(&pdev->dev.kobj, &pch_bin_attr); if (retval) goto exit_bin_attr; pch_phub_read_modify_write_reg(chip, (unsigned int)CLKCFG_REG_OFFSET, CLKCFG_CAN_50MHZ, CLKCFG_CANCLK_MASK); /* quirk for CM-iTC board */ if (strstr(dmi_get_system_info(DMI_BOARD_NAME), "CM-iTC")) pch_phub_read_modify_write_reg(chip, (unsigned int)CLKCFG_REG_OFFSET, CLKCFG_UART_48MHZ | CLKCFG_BAUDDIV | CLKCFG_PLL2VCO | CLKCFG_UARTCLKSEL, CLKCFG_UART_MASK); /* set the prefech value */ iowrite32(0x000affaa, chip->pch_phub_base_address + 0x14); /* set the interrupt delay value */ iowrite32(0x25, chip->pch_phub_base_address + 0x44); chip->pch_opt_rom_start_address = PCH_PHUB_ROM_START_ADDR_EG20T; chip->pch_mac_start_address = PCH_PHUB_MAC_START_ADDR_EG20T; } else if (id->driver_data == 2) { /* ML7213 IOH */ retval = sysfs_create_bin_file(&pdev->dev.kobj, &pch_bin_attr); if (retval) goto err_sysfs_create; /* set the prefech value * Device2(USB OHCI #1/ USB EHCI #1/ USB Device):a * Device4(SDIO #0,1,2):f * Device6(SATA 2):f * Device8(USB OHCI #0/ USB EHCI #0):a */ iowrite32(0x000affa0, chip->pch_phub_base_address + 0x14); chip->pch_opt_rom_start_address =\ PCH_PHUB_ROM_START_ADDR_ML7213; } else if (id->driver_data == 3) { /* ML7223 IOH Bus-m*/ /* set the prefech value * Device8(GbE) */ iowrite32(0x000a0000, chip->pch_phub_base_address + 0x14); chip->pch_opt_rom_start_address =\ PCH_PHUB_ROM_START_ADDR_ML7223; chip->pch_mac_start_address = PCH_PHUB_MAC_START_ADDR_ML7223; } else if (id->driver_data == 4) { /* ML7223 IOH Bus-n*/ retval = sysfs_create_file(&pdev->dev.kobj, &dev_attr_pch_mac.attr); if (retval) goto err_sysfs_create; retval = sysfs_create_bin_file(&pdev->dev.kobj, &pch_bin_attr); if (retval) goto exit_bin_attr; /* set the prefech value * Device2(USB OHCI #0,1,2,3/ USB EHCI #0):a * Device4(SDIO #0,1):f * Device6(SATA 2):f */ iowrite32(0x0000ffa0, chip->pch_phub_base_address + 0x14); /* set the interrupt delay value */ iowrite32(0x25, chip->pch_phub_base_address + 0x140); chip->pch_opt_rom_start_address =\ PCH_PHUB_ROM_START_ADDR_ML7223; chip->pch_mac_start_address = PCH_PHUB_MAC_START_ADDR_ML7223; } else if (id->driver_data == 5) { /* ML7831 */ retval = sysfs_create_file(&pdev->dev.kobj, &dev_attr_pch_mac.attr); if (retval) goto err_sysfs_create; retval = sysfs_create_bin_file(&pdev->dev.kobj, &pch_bin_attr); if (retval) goto exit_bin_attr; /* set the prefech value */ iowrite32(0x000affaa, chip->pch_phub_base_address + 0x14); /* set the interrupt delay value */ iowrite32(0x25, chip->pch_phub_base_address + 0x44); chip->pch_opt_rom_start_address = PCH_PHUB_ROM_START_ADDR_EG20T; chip->pch_mac_start_address = PCH_PHUB_MAC_START_ADDR_EG20T; } chip->ioh_type = id->driver_data; pci_set_drvdata(pdev, chip); return 0; exit_bin_attr: sysfs_remove_file(&pdev->dev.kobj, &dev_attr_pch_mac.attr); err_sysfs_create: pci_unmap_rom(pdev, chip->pch_phub_extrom_base_address); err_pci_map: pci_iounmap(pdev, chip->pch_phub_base_address); err_pci_iomap: pci_release_regions(pdev); err_req_regions: pci_disable_device(pdev); err_pci_enable_dev: kfree(chip); dev_err(&pdev->dev, "%s returns %d\n", __func__, ret); return ret; }
static int command_write(struct pci_dev *dev, int offset, u16 value, void *data) { struct xen_pcibk_dev_data *dev_data; int err; u16 val; struct pci_cmd_info *cmd = data; dev_data = pci_get_drvdata(dev); if (!pci_is_enabled(dev) && is_enable_cmd(value)) { if (unlikely(verbose_request)) printk(KERN_DEBUG DRV_NAME ": %s: enable\n", pci_name(dev)); err = pci_enable_device(dev); if (err) return err; if (dev_data) dev_data->enable_intx = 1; } else if (pci_is_enabled(dev) && !is_enable_cmd(value)) { if (unlikely(verbose_request)) printk(KERN_DEBUG DRV_NAME ": %s: disable\n", pci_name(dev)); pci_disable_device(dev); if (dev_data) dev_data->enable_intx = 0; } if (!dev->is_busmaster && is_master_cmd(value)) { if (unlikely(verbose_request)) printk(KERN_DEBUG DRV_NAME ": %s: set bus master\n", pci_name(dev)); pci_set_master(dev); } if (value & PCI_COMMAND_INVALIDATE) { if (unlikely(verbose_request)) printk(KERN_DEBUG DRV_NAME ": %s: enable memory-write-invalidate\n", pci_name(dev)); err = pci_set_mwi(dev); if (err) { pr_warn("%s: cannot enable memory-write-invalidate (%d)\n", pci_name(dev), err); value &= ~PCI_COMMAND_INVALIDATE; } } cmd->val = value; if (!permissive && (!dev_data || !dev_data->permissive)) return 0; /* Only allow the guest to control certain bits. */ err = pci_read_config_word(dev, offset, &val); if (err || val == value) return err; value &= PCI_COMMAND_GUEST; value |= val & ~PCI_COMMAND_GUEST; return pci_write_config_word(dev, offset, value); }
static int __devinit #else static int #endif igbuio_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) { struct rte_uio_pci_dev *udev; struct msix_entry msix_entry; int err; /* essential vars for configuring the device with net_device */ struct net_device *netdev; struct net_adapter *adapter = NULL; struct ixgbe_hw *hw_i = NULL; struct e1000_hw *hw_e = NULL; udev = kzalloc(sizeof(struct rte_uio_pci_dev), GFP_KERNEL); if (!udev) return -ENOMEM; /* * enable device: ask low-level code to enable I/O and * memory */ err = pci_enable_device(dev); if (err != 0) { dev_err(&dev->dev, "Cannot enable PCI device\n"); goto fail_free; } /* * reserve device's PCI memory regions for use by this * module */ err = pci_request_regions(dev, "igb_uio"); if (err != 0) { dev_err(&dev->dev, "Cannot request regions\n"); goto fail_disable; } /* enable bus mastering on the device */ pci_set_master(dev); /* remap IO memory */ err = igbuio_setup_bars(dev, &udev->info); if (err != 0) goto fail_release_iomem; /* set 64-bit DMA mask */ err = pci_set_dma_mask(dev, DMA_BIT_MASK(64)); if (err != 0) { dev_err(&dev->dev, "Cannot set DMA mask\n"); goto fail_release_iomem; } err = pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(64)); if (err != 0) { dev_err(&dev->dev, "Cannot set consistent DMA mask\n"); goto fail_release_iomem; } /* fill uio infos */ udev->info.name = "igb_uio"; udev->info.version = "0.1"; udev->info.handler = igbuio_pci_irqhandler; udev->info.irqcontrol = igbuio_pci_irqcontrol; #ifdef CONFIG_XEN_DOM0 /* check if the driver run on Xen Dom0 */ if (xen_initial_domain()) udev->info.mmap = igbuio_dom0_pci_mmap; #endif udev->info.priv = udev; udev->pdev = dev; switch (igbuio_intr_mode_preferred) { case RTE_INTR_MODE_MSIX: /* Only 1 msi-x vector needed */ msix_entry.entry = 0; if (pci_enable_msix(dev, &msix_entry, 1) == 0) { dev_dbg(&dev->dev, "using MSI-X"); udev->info.irq = msix_entry.vector; udev->mode = RTE_INTR_MODE_MSIX; break; } /* fall back to INTX */ case RTE_INTR_MODE_LEGACY: if (pci_intx_mask_supported(dev)) { dev_dbg(&dev->dev, "using INTX"); udev->info.irq_flags = IRQF_SHARED; udev->info.irq = dev->irq; udev->mode = RTE_INTR_MODE_LEGACY; break; } dev_notice(&dev->dev, "PCI INTX mask not supported\n"); /* fall back to no IRQ */ case RTE_INTR_MODE_NONE: udev->mode = RTE_INTR_MODE_NONE; udev->info.irq = 0; break; default: dev_err(&dev->dev, "invalid IRQ mode %u", igbuio_intr_mode_preferred); err = -EINVAL; goto fail_release_iomem; } err = sysfs_create_group(&dev->dev.kobj, &dev_attr_grp); if (err != 0) goto fail_release_iomem; /* initialize the corresponding netdev */ netdev = alloc_etherdev(sizeof(struct net_adapter)); if (!netdev) { err = -ENOMEM; goto fail_alloc_etherdev; } SET_NETDEV_DEV(netdev, pci_dev_to_dev(dev)); adapter = netdev_priv(netdev); adapter->netdev = netdev; adapter->pdev = dev; udev->adapter = adapter; adapter->type = retrieve_dev_specs(id); /* recover device-specific mac address */ switch (adapter->type) { case IXGBE: hw_i = &adapter->hw._ixgbe_hw; hw_i->back = adapter; hw_i->hw_addr = ioremap(pci_resource_start(dev, 0), pci_resource_len(dev, 0)); if (!hw_i->hw_addr) { err = -EIO; goto fail_ioremap; } break; case IGB: hw_e = &adapter->hw._e1000_hw; hw_e->back = adapter; hw_e->hw_addr = ioremap(pci_resource_start(dev, 0), pci_resource_len(dev, 0)); if (!hw_e->hw_addr) { err = -EIO; goto fail_ioremap; } break; } netdev_assign_netdev_ops(netdev); strncpy(netdev->name, pci_name(dev), sizeof(netdev->name) - 1); retrieve_dev_addr(netdev, adapter); strcpy(netdev->name, "dpdk%d"); err = register_netdev(netdev); if (err) goto fail_ioremap; adapter->netdev_registered = true; if (sscanf(netdev->name, "dpdk%hu", &adapter->bd_number) <= 0) goto fail_bdnumber; //printk(KERN_DEBUG "ifindex picked: %hu\n", adapter->bd_number); dev_info(&dev->dev, "ifindex picked: %hu\n", adapter->bd_number); /* register uio driver */ err = uio_register_device(&dev->dev, &udev->info); if (err != 0) goto fail_remove_group; pci_set_drvdata(dev, udev); dev_info(&dev->dev, "uio device registered with irq %lx\n", udev->info.irq); /* reset nstats */ memset(&adapter->nstats, 0, sizeof(struct net_device_stats)); return 0; fail_bdnumber: fail_ioremap: free_netdev(netdev); fail_alloc_etherdev: pci_release_selected_regions(dev, pci_select_bars(dev, IORESOURCE_MEM)); fail_remove_group: sysfs_remove_group(&dev->dev.kobj, &dev_attr_grp); fail_release_iomem: igbuio_pci_release_iomem(&udev->info); if (udev->mode == RTE_INTR_MODE_MSIX) pci_disable_msix(udev->pdev); pci_release_regions(dev); fail_disable: pci_disable_device(dev); fail_free: kfree(udev); return err; }
static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct sja1000_priv *priv; struct peak_pci_chan *chan; struct net_device *dev, *prev_dev; void __iomem *cfg_base, *reg_base; u16 sub_sys_id, icr; int i, err, channels; err = pci_enable_device(pdev); if (err) return err; err = pci_request_regions(pdev, DRV_NAME); if (err) goto failure_disable_pci; err = pci_read_config_word(pdev, 0x2e, &sub_sys_id); if (err) goto failure_release_regions; dev_dbg(&pdev->dev, "probing device %04x:%04x:%04x\n", pdev->vendor, pdev->device, sub_sys_id); err = pci_write_config_word(pdev, 0x44, 0); if (err) goto failure_release_regions; if (sub_sys_id >= 12) channels = 4; else if (sub_sys_id >= 10) channels = 3; else if (sub_sys_id >= 4) channels = 2; else channels = 1; cfg_base = pci_iomap(pdev, 0, PEAK_PCI_CFG_SIZE); if (!cfg_base) { dev_err(&pdev->dev, "failed to map PCI resource #0\n"); err = -ENOMEM; goto failure_release_regions; } reg_base = pci_iomap(pdev, 1, PEAK_PCI_CHAN_SIZE * channels); if (!reg_base) { dev_err(&pdev->dev, "failed to map PCI resource #1\n"); err = -ENOMEM; goto failure_unmap_cfg_base; } /* Set GPIO control register */ writew(0x0005, cfg_base + PITA_GPIOICR + 2); /* Enable all channels of this card */ writeb(0x00, cfg_base + PITA_GPIOICR); /* Toggle reset */ writeb(0x05, cfg_base + PITA_MISC + 3); mdelay(5); /* Leave parport mux mode */ writeb(0x04, cfg_base + PITA_MISC + 3); icr = readw(cfg_base + PITA_ICR + 2); for (i = 0; i < channels; i++) { dev = alloc_sja1000dev(sizeof(struct peak_pci_chan)); if (!dev) { err = -ENOMEM; goto failure_remove_channels; } priv = netdev_priv(dev); chan = priv->priv; chan->cfg_base = cfg_base; priv->reg_base = reg_base + i * PEAK_PCI_CHAN_SIZE; priv->read_reg = peak_pci_read_reg; priv->write_reg = peak_pci_write_reg; priv->post_irq = peak_pci_post_irq; priv->can.clock.freq = PEAK_PCI_CAN_CLOCK; priv->ocr = PEAK_PCI_OCR; priv->cdr = PEAK_PCI_CDR; /* Neither a slave nor a single device distributes the clock */ if (channels == 1 || i > 0) priv->cdr |= CDR_CLK_OFF; /* Setup interrupt handling */ priv->irq_flags = IRQF_SHARED; dev->irq = pdev->irq; chan->icr_mask = peak_pci_icr_masks[i]; icr |= chan->icr_mask; SET_NETDEV_DEV(dev, &pdev->dev); /* Create chain of SJA1000 devices */ chan->prev_dev = pci_get_drvdata(pdev); pci_set_drvdata(pdev, dev); /* * PCAN-ExpressCard needs some additional i2c init. * This must be done *before* register_sja1000dev() but * *after* devices linkage */ if (pdev->device == PEAK_PCIEC_DEVICE_ID) { err = peak_pciec_probe(pdev, dev); if (err) { dev_err(&pdev->dev, "failed to probe device (err %d)\n", err); goto failure_free_dev; } } err = register_sja1000dev(dev); if (err) { dev_err(&pdev->dev, "failed to register device\n"); goto failure_free_dev; } dev_info(&pdev->dev, "%s at reg_base=0x%p cfg_base=0x%p irq=%d\n", dev->name, priv->reg_base, chan->cfg_base, dev->irq); } /* Enable interrupts */ writew(icr, cfg_base + PITA_ICR + 2); return 0; failure_free_dev: pci_set_drvdata(pdev, chan->prev_dev); free_sja1000dev(dev); failure_remove_channels: /* Disable interrupts */ writew(0x0, cfg_base + PITA_ICR + 2); chan = NULL; for (dev = pci_get_drvdata(pdev); dev; dev = prev_dev) { priv = netdev_priv(dev); chan = priv->priv; prev_dev = chan->prev_dev; unregister_sja1000dev(dev); free_sja1000dev(dev); } /* free any PCIeC resources too */ if (chan && chan->pciec_card) peak_pciec_remove(chan->pciec_card); pci_iounmap(pdev, reg_base); failure_unmap_cfg_base: pci_iounmap(pdev, cfg_base); failure_release_regions: pci_release_regions(pdev); failure_disable_pci: pci_disable_device(pdev); return err; }
static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct Scsi_Host *host; struct fc_lport *lp; struct fnic *fnic; mempool_t *pool; int err; int i; unsigned long flags; /* * Allocate SCSI Host and set up association between host, * local port, and fnic */ lp = libfc_host_alloc(&fnic_host_template, sizeof(struct fnic)); if (!lp) { printk(KERN_ERR PFX "Unable to alloc libfc local port\n"); err = -ENOMEM; goto err_out; } host = lp->host; fnic = lport_priv(lp); fnic->lport = lp; fnic->ctlr.lp = lp; snprintf(fnic->name, sizeof(fnic->name) - 1, "%s%d", DRV_NAME, host->host_no); host->transportt = fnic_fc_transport; fnic_stats_debugfs_init(fnic); /* Setup PCI resources */ pci_set_drvdata(pdev, fnic); fnic->pdev = pdev; err = pci_enable_device(pdev); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "Cannot enable PCI device, aborting.\n"); goto err_out_free_hba; } err = pci_request_regions(pdev, DRV_NAME); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "Cannot enable PCI resources, aborting\n"); goto err_out_disable_device; } pci_set_master(pdev); /* Query PCI controller on system for DMA addressing * limitation for the device. Try 64-bit first, and * fail to 32-bit. */ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (err) { err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "No usable DMA configuration " "aborting\n"); goto err_out_release_regions; } } /* Map vNIC resources from BAR0 */ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { shost_printk(KERN_ERR, fnic->lport->host, "BAR0 not memory-map'able, aborting.\n"); err = -ENODEV; goto err_out_release_regions; } fnic->bar0.vaddr = pci_iomap(pdev, 0, 0); fnic->bar0.bus_addr = pci_resource_start(pdev, 0); fnic->bar0.len = pci_resource_len(pdev, 0); if (!fnic->bar0.vaddr) { shost_printk(KERN_ERR, fnic->lport->host, "Cannot memory-map BAR0 res hdr, " "aborting.\n"); err = -ENODEV; goto err_out_release_regions; } fnic->vdev = vnic_dev_register(NULL, fnic, pdev, &fnic->bar0); if (!fnic->vdev) { shost_printk(KERN_ERR, fnic->lport->host, "vNIC registration failed, " "aborting.\n"); err = -ENODEV; goto err_out_iounmap; } err = vnic_dev_cmd_init(fnic->vdev); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "vnic_dev_cmd_init() returns %d, aborting\n", err); goto err_out_vnic_unregister; } err = fnic_dev_wait(fnic->vdev, vnic_dev_open, vnic_dev_open_done, CMD_OPENF_RQ_ENABLE_THEN_POST); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "vNIC dev open failed, aborting.\n"); goto err_out_dev_cmd_deinit; } err = vnic_dev_init(fnic->vdev, 0); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "vNIC dev init failed, aborting.\n"); goto err_out_dev_close; } err = vnic_dev_mac_addr(fnic->vdev, fnic->ctlr.ctl_src_addr); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "vNIC get MAC addr failed \n"); goto err_out_dev_close; } /* set data_src for point-to-point mode and to keep it non-zero */ memcpy(fnic->data_src_addr, fnic->ctlr.ctl_src_addr, ETH_ALEN); /* Get vNIC configuration */ err = fnic_get_vnic_config(fnic); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "Get vNIC configuration failed, " "aborting.\n"); goto err_out_dev_close; } /* Configure Maximum Outstanding IO reqs*/ if (fnic->config.io_throttle_count != FNIC_UCSM_DFLT_THROTTLE_CNT_BLD) { host->can_queue = min_t(u32, FNIC_MAX_IO_REQ, max_t(u32, FNIC_MIN_IO_REQ, fnic->config.io_throttle_count)); } fnic->fnic_max_tag_id = host->can_queue; host->max_lun = fnic->config.luns_per_tgt; host->max_id = FNIC_MAX_FCP_TARGET; host->max_cmd_len = FCOE_MAX_CMD_LEN; fnic_get_res_counts(fnic); err = fnic_set_intr_mode(fnic); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "Failed to set intr mode, " "aborting.\n"); goto err_out_dev_close; } err = fnic_alloc_vnic_resources(fnic); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "Failed to alloc vNIC resources, " "aborting.\n"); goto err_out_clear_intr; } /* initialize all fnic locks */ spin_lock_init(&fnic->fnic_lock); for (i = 0; i < FNIC_WQ_MAX; i++) spin_lock_init(&fnic->wq_lock[i]); for (i = 0; i < FNIC_WQ_COPY_MAX; i++) { spin_lock_init(&fnic->wq_copy_lock[i]); fnic->wq_copy_desc_low[i] = DESC_CLEAN_LOW_WATERMARK; fnic->fw_ack_recd[i] = 0; fnic->fw_ack_index[i] = -1; } for (i = 0; i < FNIC_IO_LOCKS; i++) spin_lock_init(&fnic->io_req_lock[i]); fnic->io_req_pool = mempool_create_slab_pool(2, fnic_io_req_cache); if (!fnic->io_req_pool) goto err_out_free_resources; pool = mempool_create_slab_pool(2, fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]); if (!pool) goto err_out_free_ioreq_pool; fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT] = pool; pool = mempool_create_slab_pool(2, fnic_sgl_cache[FNIC_SGL_CACHE_MAX]); if (!pool) goto err_out_free_dflt_pool; fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX] = pool; /* setup vlan config, hw inserts vlan header */ fnic->vlan_hw_insert = 1; fnic->vlan_id = 0; /* Initialize the FIP fcoe_ctrl struct */ fnic->ctlr.send = fnic_eth_send; fnic->ctlr.update_mac = fnic_update_mac; fnic->ctlr.get_src_addr = fnic_get_mac; if (fnic->config.flags & VFCF_FIP_CAPABLE) { shost_printk(KERN_INFO, fnic->lport->host, "firmware supports FIP\n"); /* enable directed and multicast */ vnic_dev_packet_filter(fnic->vdev, 1, 1, 0, 0, 0); vnic_dev_add_addr(fnic->vdev, FIP_ALL_ENODE_MACS); vnic_dev_add_addr(fnic->vdev, fnic->ctlr.ctl_src_addr); fnic->set_vlan = fnic_set_vlan; fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_AUTO); timer_setup(&fnic->fip_timer, fnic_fip_notify_timer, 0); spin_lock_init(&fnic->vlans_lock); INIT_WORK(&fnic->fip_frame_work, fnic_handle_fip_frame); INIT_WORK(&fnic->event_work, fnic_handle_event); skb_queue_head_init(&fnic->fip_frame_queue); INIT_LIST_HEAD(&fnic->evlist); INIT_LIST_HEAD(&fnic->vlans); } else { shost_printk(KERN_INFO, fnic->lport->host, "firmware uses non-FIP mode\n"); fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_NON_FIP); fnic->ctlr.state = FIP_ST_NON_FIP; } fnic->state = FNIC_IN_FC_MODE; atomic_set(&fnic->in_flight, 0); fnic->state_flags = FNIC_FLAGS_NONE; /* Enable hardware stripping of vlan header on ingress */ fnic_set_nic_config(fnic, 0, 0, 0, 0, 0, 0, 1); /* Setup notification buffer area */ err = fnic_notify_set(fnic); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "Failed to alloc notify buffer, aborting.\n"); goto err_out_free_max_pool; } /* Setup notify timer when using MSI interrupts */ if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI) timer_setup(&fnic->notify_timer, fnic_notify_timer, 0); /* allocate RQ buffers and post them to RQ*/ for (i = 0; i < fnic->rq_count; i++) { vnic_rq_enable(&fnic->rq[i]); err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "fnic_alloc_rq_frame can't alloc " "frame\n"); goto err_out_free_rq_buf; } } /* * Initialization done with PCI system, hardware, firmware. * Add host to SCSI */ err = scsi_add_host(lp->host, &pdev->dev); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "fnic: scsi_add_host failed...exiting\n"); goto err_out_free_rq_buf; } /* Start local port initiatialization */ lp->link_up = 0; lp->max_retry_count = fnic->config.flogi_retries; lp->max_rport_retry_count = fnic->config.plogi_retries; lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | FCP_SPPF_CONF_COMPL); if (fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) lp->service_params |= FCP_SPPF_RETRY; lp->boot_time = jiffies; lp->e_d_tov = fnic->config.ed_tov; lp->r_a_tov = fnic->config.ra_tov; lp->link_supported_speeds = FC_PORTSPEED_10GBIT; fc_set_wwnn(lp, fnic->config.node_wwn); fc_set_wwpn(lp, fnic->config.port_wwn); fcoe_libfc_config(lp, &fnic->ctlr, &fnic_transport_template, 0); if (!fc_exch_mgr_alloc(lp, FC_CLASS_3, FCPIO_HOST_EXCH_RANGE_START, FCPIO_HOST_EXCH_RANGE_END, NULL)) { err = -ENOMEM; goto err_out_remove_scsi_host; } fc_lport_init_stats(lp); fnic->stats_reset_time = jiffies; fc_lport_config(lp); if (fc_set_mfs(lp, fnic->config.maxdatafieldsize + sizeof(struct fc_frame_header))) { err = -EINVAL; goto err_out_free_exch_mgr; } fc_host_maxframe_size(lp->host) = lp->mfs; fc_host_dev_loss_tmo(lp->host) = fnic->config.port_down_timeout / 1000; sprintf(fc_host_symbolic_name(lp->host), DRV_NAME " v" DRV_VERSION " over %s", fnic->name); spin_lock_irqsave(&fnic_list_lock, flags); list_add_tail(&fnic->list, &fnic_list); spin_unlock_irqrestore(&fnic_list_lock, flags); INIT_WORK(&fnic->link_work, fnic_handle_link); INIT_WORK(&fnic->frame_work, fnic_handle_frame); skb_queue_head_init(&fnic->frame_queue); skb_queue_head_init(&fnic->tx_queue); /* Enable all queues */ for (i = 0; i < fnic->raw_wq_count; i++) vnic_wq_enable(&fnic->wq[i]); for (i = 0; i < fnic->wq_copy_count; i++) vnic_wq_copy_enable(&fnic->wq_copy[i]); fc_fabric_login(lp); err = fnic_request_intr(fnic); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "Unable to request irq.\n"); goto err_out_free_exch_mgr; } vnic_dev_enable(fnic->vdev); for (i = 0; i < fnic->intr_count; i++) vnic_intr_unmask(&fnic->intr[i]); fnic_notify_timer_start(fnic); return 0; err_out_free_exch_mgr: fc_exch_mgr_free(lp); err_out_remove_scsi_host: fc_remove_host(lp->host); scsi_remove_host(lp->host); err_out_free_rq_buf: for (i = 0; i < fnic->rq_count; i++) vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf); vnic_dev_notify_unset(fnic->vdev); err_out_free_max_pool: mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX]); err_out_free_dflt_pool: mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT]); err_out_free_ioreq_pool: mempool_destroy(fnic->io_req_pool); err_out_free_resources: fnic_free_vnic_resources(fnic); err_out_clear_intr: fnic_clear_intr_mode(fnic); err_out_dev_close: vnic_dev_close(fnic->vdev); err_out_dev_cmd_deinit: err_out_vnic_unregister: vnic_dev_unregister(fnic->vdev); err_out_iounmap: fnic_iounmap(fnic); err_out_release_regions: pci_release_regions(pdev); err_out_disable_device: pci_disable_device(pdev); err_out_free_hba: fnic_stats_debugfs_remove(fnic); scsi_host_put(lp->host); err_out: return err; }
int dwc3_intel_suspend(struct dwc_otg2 *otg) { int ret; struct usb_phy *phy; struct pci_dev *pci_dev; struct usb_hcd *hcd = NULL; pci_power_t state = PCI_D3cold; if (!otg) return 0; hcd = container_of(otg->otg.host, struct usb_hcd, self); pci_dev = to_pci_dev(otg->dev); if (otg->state == DWC_STATE_A_HOST && otg->suspend_host) { /* Check if USB2 ULPI PHY is hang via access its internal * registers. If hang, then do hard reset before enter * hibernation mode. Otherwise, the USB2 PHY can't enter * suspended state which will blocking U2PMU can't get ready * then can't enter D0i3hot forever in SCU FW. */ if (!is_utmi_phy(otg)) { phy = usb_get_phy(USB_PHY_TYPE_USB2); if (!phy) return -ENODEV; if (usb_phy_io_read(phy, ULPI_VENDOR_ID_LOW) < 0) { enable_usb_phy(otg, 0); enable_usb_phy(otg, 1); } usb_put_phy(phy); } ret = otg->suspend_host(hcd); if (ret) { otg_err(otg, "dwc3-host enter suspend faield: %d\n", ret); return ret; } } if (otg->state == DWC_STATE_B_PERIPHERAL || otg->state == DWC_STATE_A_HOST) state = PCI_D3hot; set_sus_phy(otg, 1); if (pci_save_state(pci_dev)) { otg_err(otg, "pci_save_state failed!\n"); return -EIO; } pci_disable_device(pci_dev); if ((state == PCI_D3cold) && is_utmi_phy(otg)) { /* Important!! Whenever the VUSBPHY rail is disabled, SW * must assert USBRST# to isolate the SOC’s DP/DM pins from the * outside world. There is a risk of damage to the SOC if a * peripheral were to bias DP/DM to 3.3V when the SOC is * unpowered. */ ret = intel_scu_ipc_update_register(PMIC_USBPHYCTRL, 0x0, USBPHYRSTB); if (ret) otg_err(otg, "%s: ipc update failed\n", __func__); } pci_set_power_state(pci_dev, state); return 0; }
static void __devexit sis5513_remove(struct pci_dev *dev) { ide_pci_remove(dev); pci_disable_device(dev); }
static int __devinit p54p_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct p54p_priv *priv; struct ieee80211_hw *dev; unsigned long mem_addr, mem_len; int err; err = pci_enable_device(pdev); if (err) { dev_err(&pdev->dev, "Cannot enable new PCI device\n"); return err; } mem_addr = pci_resource_start(pdev, 0); mem_len = pci_resource_len(pdev, 0); if (mem_len < sizeof(struct p54p_csr)) { dev_err(&pdev->dev, "Too short PCI resources\n"); goto err_disable_dev; } err = pci_request_regions(pdev, "p54pci"); if (err) { dev_err(&pdev->dev, "Cannot obtain PCI resources\n"); goto err_disable_dev; } if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { dev_err(&pdev->dev, "No suitable DMA available\n"); goto err_free_reg; } pci_set_master(pdev); pci_try_set_mwi(pdev); pci_write_config_byte(pdev, 0x40, 0); pci_write_config_byte(pdev, 0x41, 0); dev = p54_init_common(sizeof(*priv)); if (!dev) { dev_err(&pdev->dev, "ieee80211 alloc failed\n"); err = -ENOMEM; goto err_free_reg; } priv = dev->priv; priv->pdev = pdev; SET_IEEE80211_DEV(dev, &pdev->dev); pci_set_drvdata(pdev, dev); priv->map = ioremap(mem_addr, mem_len); if (!priv->map) { dev_err(&pdev->dev, "Cannot map device memory\n"); err = -ENOMEM; goto err_free_dev; } priv->ring_control = pci_alloc_consistent(pdev, sizeof(*priv->ring_control), &priv->ring_control_dma); if (!priv->ring_control) { dev_err(&pdev->dev, "Cannot allocate rings\n"); err = -ENOMEM; goto err_iounmap; } priv->common.open = p54p_open; priv->common.stop = p54p_stop; priv->common.tx = p54p_tx; spin_lock_init(&priv->lock); tasklet_init(&priv->tasklet, p54p_tasklet, (unsigned long)dev); err = request_firmware(&priv->firmware, "isl3886pci", &priv->pdev->dev); if (err) { dev_err(&pdev->dev, "Cannot find firmware (isl3886pci)\n"); err = request_firmware(&priv->firmware, "isl3886", &priv->pdev->dev); if (err) goto err_free_common; } err = p54p_open(dev); if (err) goto err_free_common; err = p54_read_eeprom(dev); p54p_stop(dev); if (err) goto err_free_common; err = p54_register_common(dev, &pdev->dev); if (err) goto err_free_common; return 0; err_free_common: release_firmware(priv->firmware); pci_free_consistent(pdev, sizeof(*priv->ring_control), priv->ring_control, priv->ring_control_dma); err_iounmap: iounmap(priv->map); err_free_dev: pci_set_drvdata(pdev, NULL); p54_free_common(dev); err_free_reg: pci_release_regions(pdev); err_disable_dev: pci_disable_device(pdev); return err; }
static int __devinit pci200_pci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { card_t *card; u32 __iomem *p; int i; u32 ramsize; u32 ramphys; /* buffer memory base */ u32 scaphys; /* SCA memory base */ u32 plxphys; /* PLX registers memory base */ i = pci_enable_device(pdev); if (i) return i; i = pci_request_regions(pdev, "PCI200SYN"); if (i) { pci_disable_device(pdev); return i; } card = kzalloc(sizeof(card_t), GFP_KERNEL); if (card == NULL) { printk(KERN_ERR "pci200syn: unable to allocate memory\n"); pci_release_regions(pdev); pci_disable_device(pdev); return -ENOBUFS; } pci_set_drvdata(pdev, card); card->ports[0].netdev = alloc_hdlcdev(&card->ports[0]); card->ports[1].netdev = alloc_hdlcdev(&card->ports[1]); if (!card->ports[0].netdev || !card->ports[1].netdev) { printk(KERN_ERR "pci200syn: unable to allocate memory\n"); pci200_pci_remove_one(pdev); return -ENOMEM; } if (pci_resource_len(pdev, 0) != PCI200SYN_PLX_SIZE || pci_resource_len(pdev, 2) != PCI200SYN_SCA_SIZE || pci_resource_len(pdev, 3) < 16384) { printk(KERN_ERR "pci200syn: invalid card EEPROM parameters\n"); pci200_pci_remove_one(pdev); return -EFAULT; } plxphys = pci_resource_start(pdev,0) & PCI_BASE_ADDRESS_MEM_MASK; card->plxbase = ioremap(plxphys, PCI200SYN_PLX_SIZE); scaphys = pci_resource_start(pdev,2) & PCI_BASE_ADDRESS_MEM_MASK; card->scabase = ioremap(scaphys, PCI200SYN_SCA_SIZE); ramphys = pci_resource_start(pdev,3) & PCI_BASE_ADDRESS_MEM_MASK; card->rambase = pci_ioremap_bar(pdev, 3); if (card->plxbase == NULL || card->scabase == NULL || card->rambase == NULL) { printk(KERN_ERR "pci200syn: ioremap() failed\n"); pci200_pci_remove_one(pdev); return -EFAULT; } /* Reset PLX */ p = &card->plxbase->init_ctrl; writel(readl(p) | 0x40000000, p); readl(p); /* Flush the write - do not use sca_flush */ udelay(1); writel(readl(p) & ~0x40000000, p); readl(p); /* Flush the write - do not use sca_flush */ udelay(1); ramsize = sca_detect_ram(card, card->rambase, pci_resource_len(pdev, 3)); /* number of TX + RX buffers for one port - this is dual port card */ i = ramsize / (2 * (sizeof(pkt_desc) + HDLC_MAX_MRU)); card->tx_ring_buffers = min(i / 2, MAX_TX_BUFFERS); card->rx_ring_buffers = i - card->tx_ring_buffers; card->buff_offset = 2 * sizeof(pkt_desc) * (card->tx_ring_buffers + card->rx_ring_buffers); printk(KERN_INFO "pci200syn: %u KB RAM at 0x%x, IRQ%u, using %u TX +" " %u RX packets rings\n", ramsize / 1024, ramphys, pdev->irq, card->tx_ring_buffers, card->rx_ring_buffers); if (card->tx_ring_buffers < 1) { printk(KERN_ERR "pci200syn: RAM test failed\n"); pci200_pci_remove_one(pdev); return -EFAULT; } /* Enable interrupts on the PCI bridge */ p = &card->plxbase->intr_ctrl_stat; writew(readw(p) | 0x0040, p); /* Allocate IRQ */ if (request_irq(pdev->irq, sca_intr, IRQF_SHARED, "pci200syn", card)) { printk(KERN_WARNING "pci200syn: could not allocate IRQ%d.\n", pdev->irq); pci200_pci_remove_one(pdev); return -EBUSY; } card->irq = pdev->irq; sca_init(card, 0); for (i = 0; i < 2; i++) { port_t *port = &card->ports[i]; struct net_device *dev = port->netdev; hdlc_device *hdlc = dev_to_hdlc(dev); port->chan = i; spin_lock_init(&port->lock); dev->irq = card->irq; dev->mem_start = ramphys; dev->mem_end = ramphys + ramsize - 1; dev->tx_queue_len = 50; dev->netdev_ops = &pci200_ops; hdlc->attach = sca_attach; hdlc->xmit = sca_xmit; port->settings.clock_type = CLOCK_EXT; port->card = card; sca_init_port(port); if (register_hdlc_device(dev)) { printk(KERN_ERR "pci200syn: unable to register hdlc " "device\n"); port->card = NULL; pci200_pci_remove_one(pdev); return -ENOBUFS; } printk(KERN_INFO "%s: PCI200SYN channel %d\n", dev->name, port->chan); } sca_flush(card); return 0; }
/*---------------------------------------------------------------- * prism2sta_probe_pci * * Probe routine called when a PCI device w/ matching ID is found. * The ISL3874 implementation uses the following map: * BAR0: Prism2.x registers memory mapped, size=4k * Here's the sequence: * - Allocate the PCI resources. * - Read the PCMCIA attribute memory to make sure we have a WLAN card * - Reset the MAC * - Initialize the netdev and wlan data * - Initialize the MAC * * Arguments: * pdev ptr to pci device structure containing info about * pci configuration. * id ptr to the device id entry that matched this device. * * Returns: * zero - success * negative - failed * * Side effects: * * * Call context: * process thread * ----------------------------------------------------------------*/ static int __devinit prism2sta_probe_pci( struct pci_dev *pdev, const struct pci_device_id *id) { int result; phys_t phymem = 0; void *mem = NULL; wlandevice_t *wlandev = NULL; hfa384x_t *hw = NULL; DBFENTER; /* Enable the pci device */ if (pci_enable_device(pdev)) { WLAN_LOG_ERROR("%s: pci_enable_device() failed.\n", dev_info); result = -EIO; goto fail; } /* Figure out our resources */ phymem = pci_resource_start(pdev, 0); if (!request_mem_region(phymem, pci_resource_len(pdev, 0), "Prism2")) { printk(KERN_ERR "prism2: Cannot reserve PCI memory region\n"); result = -EIO; goto fail; } mem = ioremap(phymem, PCI_SIZE); if ( mem == 0 ) { WLAN_LOG_ERROR("%s: ioremap() failed.\n", dev_info); result = -EIO; goto fail; } /* Log the device */ WLAN_LOG_INFO("A Prism2.5 PCI device found, " "phymem:0x%llx, irq:%d, mem:0x%p\n", (unsigned long long)phymem, pdev->irq, mem); if ((wlandev = create_wlan()) == NULL) { WLAN_LOG_ERROR("%s: Memory allocation failure.\n", dev_info); result = -EIO; goto fail; } hw = wlandev->priv; if ( wlan_setup(wlandev) != 0 ) { WLAN_LOG_ERROR("%s: wlan_setup() failed.\n", dev_info); result = -EIO; goto fail; } /* Setup netdevice's ability to report resources * Note: the netdevice was allocated by wlan_setup() */ wlandev->netdev->irq = pdev->irq; wlandev->netdev->mem_start = (unsigned long) mem; wlandev->netdev->mem_end = wlandev->netdev->mem_start + pci_resource_len(pdev, 0); /* Register the wlandev, this gets us a name and registers the * linux netdevice. */ SET_MODULE_OWNER(wlandev->netdev); if ( register_wlandev(wlandev) != 0 ) { WLAN_LOG_ERROR("%s: register_wlandev() failed.\n", dev_info); result = -EIO; goto fail; } #if 0 /* TODO: Move this and an irq test into an hfa384x_testif() routine. */ outw(PRISM2STA_MAGIC, HFA384x_SWSUPPORT(wlandev->netdev->base_addr)); reg=inw( HFA384x_SWSUPPORT(wlandev->netdev->base_addr)); if ( reg != PRISM2STA_MAGIC ) { WLAN_LOG_ERROR("MAC register access test failed!\n"); result = -EIO; goto fail; } #endif /* Initialize the hw data */ hfa384x_create(hw, wlandev->netdev->irq, 0, mem); hw->wlandev = wlandev; /* Do a chip-level reset on the MAC */ if (prism2_doreset) { result = hfa384x_corereset(hw, prism2_reset_holdtime, prism2_reset_settletime, 0); if (result != 0) { WLAN_LOG_ERROR( "%s: hfa384x_corereset() failed.\n", dev_info); unregister_wlandev(wlandev); hfa384x_destroy(hw); result = -EIO; goto fail; } } pci_set_drvdata(pdev, wlandev); /* Shouldn't actually hook up the IRQ until we * _know_ things are alright. A test routine would help. */ request_irq(wlandev->netdev->irq, hfa384x_interrupt, SA_SHIRQ, wlandev->name, wlandev); wlandev->msdstate = WLAN_MSD_HWPRESENT; result = 0; goto done; fail: pci_set_drvdata(pdev, NULL); if (wlandev) kfree(wlandev); if (hw) kfree(hw); if (mem) iounmap((void *) mem); pci_release_regions(pdev); pci_disable_device(pdev); done: DBFEXIT; return result; }
static long esb_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int new_options, retval = -EINVAL; int new_heartbeat; void __user *argp = (void __user *)arg; int __user *p = argp; static const struct watchdog_info ident = { .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, .firmware_version = 0, .identity = ESB_MODULE_NAME, }; switch (cmd) { case WDIOC_GETSUPPORT: return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0; case WDIOC_GETSTATUS: return put_user(0, p); case WDIOC_GETBOOTSTATUS: return put_user(triggered, p); case WDIOC_SETOPTIONS: { if (get_user(new_options, p)) return -EFAULT; if (new_options & WDIOS_DISABLECARD) { esb_timer_stop(); retval = 0; } if (new_options & WDIOS_ENABLECARD) { esb_timer_start(); retval = 0; } return retval; } case WDIOC_KEEPALIVE: esb_timer_keepalive(); return 0; case WDIOC_SETTIMEOUT: { if (get_user(new_heartbeat, p)) return -EFAULT; if (esb_timer_set_heartbeat(new_heartbeat)) return -EINVAL; esb_timer_keepalive(); /* Fall */ } case WDIOC_GETTIMEOUT: return put_user(heartbeat, p); default: return -ENOTTY; } } /* * Kernel Interfaces */ static const struct file_operations esb_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .write = esb_write, .unlocked_ioctl = esb_ioctl, .open = esb_open, .release = esb_release, }; static struct miscdevice esb_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &esb_fops, }; /* * Data for PCI driver interface */ static DEFINE_PCI_DEVICE_TABLE(esb_pci_tbl) = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_9), }, { 0, }, /* End of list */ }; MODULE_DEVICE_TABLE(pci, esb_pci_tbl); /* * Init & exit routines */ static unsigned char __devinit esb_getdevice(struct pci_dev *pdev) { if (pci_enable_device(pdev)) { printk(KERN_ERR PFX "failed to enable device\n"); goto err_devput; } if (pci_request_region(pdev, 0, ESB_MODULE_NAME)) { printk(KERN_ERR PFX "failed to request region\n"); goto err_disable; } BASEADDR = pci_ioremap_bar(pdev, 0); if (BASEADDR == NULL) { /* Something's wrong here, BASEADDR has to be set */ printk(KERN_ERR PFX "failed to get BASEADDR\n"); goto err_release; } /* Done */ esb_pci = pdev; return 1; err_release: pci_release_region(pdev, 0); err_disable: pci_disable_device(pdev); err_devput: return 0; } static void __devinit esb_initdevice(void) { u8 val1; u16 val2; /* * Config register: * Bit 5 : 0 = Enable WDT_OUTPUT * Bit 2 : 0 = set the timer frequency to the PCI clock * divided by 2^15 (approx 1KHz). * Bits 1:0 : 11 = WDT_INT_TYPE Disabled. * The watchdog has two timers, it can be setup so that the * expiry of timer1 results in an interrupt and the expiry of * timer2 results in a reboot. We set it to not generate * any interrupts as there is not much we can do with it * right now. */ pci_write_config_word(esb_pci, ESB_CONFIG_REG, 0x0003); /* Check that the WDT isn't already locked */ pci_read_config_byte(esb_pci, ESB_LOCK_REG, &val1); if (val1 & ESB_WDT_LOCK) printk(KERN_WARNING PFX "nowayout already set\n"); /* Set the timer to watchdog mode and disable it for now */ pci_write_config_byte(esb_pci, ESB_LOCK_REG, 0x00); /* Check if the watchdog was previously triggered */ esb_unlock_registers(); val2 = readw(ESB_RELOAD_REG); if (val2 & ESB_WDT_TIMEOUT) triggered = WDIOF_CARDRESET; /* Reset WDT_TIMEOUT flag and timers */ esb_unlock_registers(); writew((ESB_WDT_TIMEOUT | ESB_WDT_RELOAD), ESB_RELOAD_REG); /* And set the correct timeout value */ esb_timer_set_heartbeat(heartbeat); } static int __devinit esb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { int ret; cards_found++; if (cards_found == 1) printk(KERN_INFO PFX "Intel 6300ESB WatchDog Timer Driver v%s\n", ESB_VERSION); if (cards_found > 1) { printk(KERN_ERR PFX "This driver only supports 1 device\n"); return -ENODEV; } /* Check whether or not the hardware watchdog is there */ if (!esb_getdevice(pdev) || esb_pci == NULL) return -ENODEV; /* Check that the heartbeat value is within it's range; if not reset to the default */ if (heartbeat < 0x1 || heartbeat > 2 * 0x03ff) { heartbeat = WATCHDOG_HEARTBEAT; printk(KERN_INFO PFX "heartbeat value must be 1<heartbeat<2046, using %d\n", heartbeat); } /* Initialize the watchdog and make sure it does not run */ esb_initdevice(); /* Register the watchdog so that userspace has access to it */ ret = misc_register(&esb_miscdev); if (ret != 0) { printk(KERN_ERR PFX "cannot register miscdev on minor=%d (err=%d)\n", WATCHDOG_MINOR, ret); goto err_unmap; } printk(KERN_INFO PFX "initialized (0x%p). heartbeat=%d sec (nowayout=%d)\n", BASEADDR, heartbeat, nowayout); return 0; err_unmap: iounmap(BASEADDR); pci_release_region(esb_pci, 0); pci_disable_device(esb_pci); esb_pci = NULL; return ret; }
static int __devinit pci200_pci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { card_t *card; u32 __iomem *p; int i; u32 ramsize; u32 ramphys; /* buffer memory base */ u32 scaphys; /* SCA memory base */ u32 plxphys; /* PLX registers memory base */ #ifndef MODULE static int printed_version; if (!printed_version++) printk(KERN_INFO "%s\n", version); #endif i = pci_enable_device(pdev); if (i) return i; i = pci_request_regions(pdev, "PCI200SYN"); if (i) { pci_disable_device(pdev); return i; } card = kzalloc(sizeof(card_t), GFP_KERNEL); if (card == NULL) { printk(KERN_ERR "pci200syn: unable to allocate memory\n"); pci_release_regions(pdev); pci_disable_device(pdev); return -ENOBUFS; } pci_set_drvdata(pdev, card); card->ports[0].dev = alloc_hdlcdev(&card->ports[0]); card->ports[1].dev = alloc_hdlcdev(&card->ports[1]); if (!card->ports[0].dev || !card->ports[1].dev) { printk(KERN_ERR "pci200syn: unable to allocate memory\n"); pci200_pci_remove_one(pdev); return -ENOMEM; } if (pci_resource_len(pdev, 0) != PCI200SYN_PLX_SIZE || pci_resource_len(pdev, 2) != PCI200SYN_SCA_SIZE || pci_resource_len(pdev, 3) < 16384) { printk(KERN_ERR "pci200syn: invalid card EEPROM parameters\n"); pci200_pci_remove_one(pdev); return -EFAULT; } plxphys = pci_resource_start(pdev,0) & PCI_BASE_ADDRESS_MEM_MASK; card->plxbase = ioremap(plxphys, PCI200SYN_PLX_SIZE); scaphys = pci_resource_start(pdev,2) & PCI_BASE_ADDRESS_MEM_MASK; card->scabase = ioremap(scaphys, PCI200SYN_SCA_SIZE); ramphys = pci_resource_start(pdev,3) & PCI_BASE_ADDRESS_MEM_MASK; card->rambase = ioremap(ramphys, pci_resource_len(pdev,3)); if (card->plxbase == NULL || card->scabase == NULL || card->rambase == NULL) { printk(KERN_ERR "pci200syn: ioremap() failed\n"); pci200_pci_remove_one(pdev); return -EFAULT; } /* Reset PLX */ p = &card->plxbase->init_ctrl; writel(readl(p) | 0x40000000, p); readl(p); /* Flush the write - do not use sca_flush */ udelay(1); writel(readl(p) & ~0x40000000, p); readl(p); /* Flush the write - do not use sca_flush */ udelay(1); ramsize = sca_detect_ram(card, card->rambase, pci_resource_len(pdev, 3)); /* number of TX + RX buffers for one port - this is dual port card */ i = ramsize / (2 * (sizeof(pkt_desc) + HDLC_MAX_MRU)); card->tx_ring_buffers = min(i / 2, MAX_TX_BUFFERS); card->rx_ring_buffers = i - card->tx_ring_buffers; card->buff_offset = 2 * sizeof(pkt_desc) * (card->tx_ring_buffers + card->rx_ring_buffers); printk(KERN_INFO "pci200syn: %u KB RAM at 0x%x, IRQ%u, using %u TX +" " %u RX packets rings\n", ramsize / 1024, ramphys, pdev->irq, card->tx_ring_buffers, card->rx_ring_buffers); if (pdev->subsystem_device == PCI_DEVICE_ID_PLX_9050) { printk(KERN_ERR "Detected PCI200SYN card with old " "configuration data.\n"); printk(KERN_ERR "See <http://www.kernel.org/pub/" "linux/utils/net/hdlc/pci200syn/> for update.\n"); printk(KERN_ERR "The card will stop working with" " future versions of Linux if not updated.\n"); } if (card->tx_ring_buffers < 1) { printk(KERN_ERR "pci200syn: RAM test failed\n"); pci200_pci_remove_one(pdev); return -EFAULT; } /* Enable interrupts on the PCI bridge */ p = &card->plxbase->intr_ctrl_stat; writew(readw(p) | 0x0040, p); /* Allocate IRQ */ if (request_irq(pdev->irq, sca_intr, IRQF_SHARED, devname, card)) { printk(KERN_WARNING "pci200syn: could not allocate IRQ%d.\n", pdev->irq); pci200_pci_remove_one(pdev); return -EBUSY; } card->irq = pdev->irq; sca_init(card, 0); for (i = 0; i < 2; i++) { port_t *port = &card->ports[i]; struct net_device *dev = port_to_dev(port); hdlc_device *hdlc = dev_to_hdlc(dev); port->phy_node = i; spin_lock_init(&port->lock); dev->irq = card->irq; dev->mem_start = ramphys; dev->mem_end = ramphys + ramsize - 1; dev->tx_queue_len = 50; dev->do_ioctl = pci200_ioctl; dev->open = pci200_open; dev->stop = pci200_close; hdlc->attach = sca_attach; hdlc->xmit = sca_xmit; port->settings.clock_type = CLOCK_EXT; port->card = card; if (register_hdlc_device(dev)) { printk(KERN_ERR "pci200syn: unable to register hdlc " "device\n"); port->card = NULL; pci200_pci_remove_one(pdev); return -ENOBUFS; } sca_init_sync_port(port); /* Set up SCA memory */ printk(KERN_INFO "%s: PCI200SYN node %d\n", dev->name, port->phy_node); } sca_flush(card); return 0; }