static int __init setup_gazelpci(struct IsdnCardState *cs) { u_int pci_ioaddr0 = 0, pci_ioaddr1 = 0; u_char pci_irq = 0, found; u_int nbseek, seekcard; printk(KERN_WARNING "Gazel: PCI card automatic recognition\n"); found = 0; if (!pci_present()) { printk(KERN_WARNING "Gazel: No PCI bus present\n"); return 1; } seekcard = PCI_DEVICE_ID_PLX_R685; for (nbseek = 0; nbseek < 3; nbseek++) { if ((dev_tel = pci_find_device(PCI_VENDOR_ID_PLX, seekcard, dev_tel))) { if (pci_enable_device(dev_tel)) return 1; pci_irq = dev_tel->irq; pci_ioaddr0 = pci_resource_start(dev_tel, 1); pci_ioaddr1 = pci_resource_start(dev_tel, 2); found = 1; } if (found) break; else { switch (seekcard) { case PCI_DEVICE_ID_PLX_R685: seekcard = PCI_DEVICE_ID_PLX_R753; break; case PCI_DEVICE_ID_PLX_R753: seekcard = PCI_DEVICE_ID_PLX_DJINN_ITOO; break; } } } if (!found) { printk(KERN_WARNING "Gazel: No PCI card found\n"); return (1); } if (!pci_irq) { printk(KERN_WARNING "Gazel: No IRQ for PCI card found\n"); return 1; } cs->hw.gazel.pciaddr[0] = pci_ioaddr0; cs->hw.gazel.pciaddr[1] = pci_ioaddr1; pci_ioaddr1 &= 0xfffe; cs->hw.gazel.cfg_reg = pci_ioaddr0 & 0xfffe; cs->hw.gazel.ipac = pci_ioaddr1; cs->hw.gazel.isac = pci_ioaddr1 + 0x80; cs->hw.gazel.hscx[0] = pci_ioaddr1; cs->hw.gazel.hscx[1] = pci_ioaddr1 + 0x40; cs->hw.gazel.isacfifo = cs->hw.gazel.isac; cs->hw.gazel.hscxfifo[0] = cs->hw.gazel.hscx[0]; cs->hw.gazel.hscxfifo[1] = cs->hw.gazel.hscx[1]; cs->irq = pci_irq; cs->irq_flags |= SA_SHIRQ; switch (seekcard) { case PCI_DEVICE_ID_PLX_R685: printk(KERN_INFO "Gazel: Card PCI R685 found\n"); cs->subtyp = R685; cs->dc.isac.adf2 = 0x87; printk(KERN_INFO "Gazel: config irq:%d isac:0x%X cfg:0x%X\n", cs->irq, cs->hw.gazel.isac, cs->hw.gazel.cfg_reg); printk(KERN_INFO "Gazel: hscx A:0x%X hscx B:0x%X\n", cs->hw.gazel.hscx[0], cs->hw.gazel.hscx[1]); break; case PCI_DEVICE_ID_PLX_R753: case PCI_DEVICE_ID_PLX_DJINN_ITOO: printk(KERN_INFO "Gazel: Card PCI R753 found\n"); cs->subtyp = R753; test_and_set_bit(HW_IPAC, &cs->HW_Flags); printk(KERN_INFO "Gazel: config irq:%d ipac:0x%X cfg:0x%X\n", cs->irq, cs->hw.gazel.ipac, cs->hw.gazel.cfg_reg); break; } return (0); }
static void print_bar_info(struct pci_dev* device) { unsigned int flags = 0; unsigned int i = 0; for(i = 0; i < 6; i++) { flags = pci_resource_flags(device, i); if(!flags) printk(KERN_INFO ANB_DEVICE_PREFIX "Device BAR %d: not in use\n", i); else { printk(KERN_INFO ANB_DEVICE_PREFIX "Device BAR %d: %10d bytes (0x%08x ~ 0x%08x) Type: %3s P %c RO %c C %c RL%c SH%c\n", i, (unsigned int)pci_resource_len(device, i), (unsigned int)pci_resource_start(device, i), (unsigned int)pci_resource_end(device, i), ((flags & IORESOURCE_IO) == IORESOURCE_IO) ? "IO" : ((flags & IORESOURCE_MEM) == IORESOURCE_MEM) ? "MEM" : "---", ((flags & IORESOURCE_PREFETCH) == IORESOURCE_PREFETCH) ? '+' : '-', ((flags & IORESOURCE_READONLY) == IORESOURCE_READONLY) ? '+' : '-', ((flags & IORESOURCE_CACHEABLE) == IORESOURCE_CACHEABLE) ? '+' : '-', ((flags & IORESOURCE_RANGELENGTH) == IORESOURCE_RANGELENGTH) ? '+' : '-', ((flags & IORESOURCE_SHADOWABLE) == IORESOURCE_SHADOWABLE) ? '+' : '-'); printk(KERN_INFO ANB_DEVICE_PREFIX " ASz%c ASt%c M64%c W %c M %c\n", ((flags & IORESOURCE_SIZEALIGN) == IORESOURCE_SIZEALIGN) ? '+' : '-', ((flags & IORESOURCE_STARTALIGN) == IORESOURCE_STARTALIGN) ? '+' : '-', ((flags & IORESOURCE_MEM_64) == IORESOURCE_MEM_64) ? '+' : '-', ((flags & IORESOURCE_WINDOW) == IORESOURCE_WINDOW) ? '+' : '-', ((flags & IORESOURCE_MUXED) == IORESOURCE_MUXED) ? '+' : '-'); printk(KERN_INFO ANB_DEVICE_PREFIX " Ex %c Dis%c U %c A %c B %c\n", ((flags & IORESOURCE_EXCLUSIVE) == IORESOURCE_EXCLUSIVE) ? '+' : '-', ((flags & IORESOURCE_DISABLED) == IORESOURCE_DISABLED) ? '+' : '-', ((flags & IORESOURCE_UNSET) == IORESOURCE_UNSET) ? '+' : '-', ((flags & IORESOURCE_AUTO) == IORESOURCE_AUTO) ? '+' : '-', ((flags & IORESOURCE_BUSY) == IORESOURCE_BUSY) ? '+' : '-'); } } }
static int XPCIe_init(void) { //previous: pci_find_device gDev = pci_get_device (PCI_VENDOR_ID_XILINX, PCI_DEVICE_ID_XILINX_PCIE, gDev); if (NULL == gDev) { printk(/*KERN_WARNING*/"%s: Init: Hardware not found.\n", gDrvrName); //return (CRIT_ERR); return (-1); } if (0 > pci_enable_device(gDev)) { printk(/*KERN_WARNING*/"%s: Init: Device not enabled.\n", gDrvrName); //return (CRIT_ERR); return (-1); } // Get Base Address of registers from pci structure. Should come from pci_dev // structure, but that element seems to be missing on the development system. gBaseHdwr = pci_resource_start (gDev, 0); if (0 > gBaseHdwr) { printk(/*KERN_WARNING*/"%s: Init: Base Address not set.\n", gDrvrName); //return (CRIT_ERR); return (-1); } printk(/*KERN_WARNING*/"Base hw val %llx\n", gBaseHdwr); gBaseLen = pci_resource_len (gDev, 0); printk(/*KERN_WARNING*/"Base hw len %d\n", (unsigned int)gBaseLen); // Remap the I/O register block so that it can be safely accessed. // I/O register block starts at gBaseHdwr and is 32 bytes long. // It is cast to char because that is the way Linus does it. // Reference "/usr/src/Linux-2.4/Documentation/IO-mapping.txt". gBaseVirt = ioremap(gBaseHdwr, gBaseLen); if (!gBaseVirt) { printk(/*KERN_WARNING*/"%s: Init: Could not remap memory.\n", gDrvrName); //return (CRIT_ERR); return (-1); } printk(/*KERN_WARNING*/"Virt hw val %p\n", gBaseVirt); // Get IRQ from pci_dev structure. It may have been remapped by the kernel, // and this value will be the correct one. gIrq = gDev->irq; printk("irq: %d\n",gIrq); //--- START: Initialize Hardware // Try to gain exclusive control of memory for demo hardware. if (0 > check_mem_region(gBaseHdwr, KINBURN_REGISTER_SIZE)) { printk(/*KERN_WARNING*/"%s: Init: Memory in use.\n", gDrvrName); //return (CRIT_ERR); return (-1); } request_mem_region(gBaseHdwr, KINBURN_REGISTER_SIZE, "3GIO_Demo_Drv"); gStatFlags = gStatFlags | HAVE_REGION; printk(/*KERN_WARNING*/"%s: Init: Initialize Hardware Done..\n",gDrvrName); // Request IRQ from OS. #if 0 if (0 > request_irq(gIrq, &XPCIe_IRQHandler,/* SA_INTERRUPT |*/ SA_SHIRQ, gDrvrName, gDev)) { printk(/*KERN_WARNING*/"%s: Init: Unable to allocate IRQ",gDrvrName); return (-1); } gStatFlags = gStatFlags | HAVE_IRQ; #endif initcode(); //allocate ioctl arg structure ioctl_arg = kmalloc(sizeof(xpcie_arg_t), GFP_KERNEL); //--- END: Initialize Hardware //--- START: Allocate Buffers gBufferUnaligned = kmalloc(BUF_SIZE, GFP_KERNEL); gReadBuffer = gBufferUnaligned; if (NULL == gBufferUnaligned) { printk(KERN_CRIT"%s: Init: Unable to allocate gBuffer.\n",gDrvrName); return (-1); } gWriteBuffer = kmalloc(BUF_SIZE, GFP_KERNEL); if (NULL == gWriteBuffer) { printk(KERN_CRIT"%s: Init: Unable to allocate gBuffer.\n",gDrvrName); return (-1); } //--- END: Allocate Buffers //--- START: Register Driver // Register with the kernel as a character device. // Abort if it fails. if (0 > register_chrdev(gDrvrMajor, gDrvrName, &XPCIe_Intf)) { printk(KERN_WARNING"%s: Init: will not register\n", gDrvrName); return (CRIT_ERR); } printk(KERN_INFO"%s: Init: module registered\n", gDrvrName); gStatFlags = gStatFlags | HAVE_KREG; printk("%s driver is loaded\n", gDrvrName); return 0; }
static int acpi_processor_errata_piix4(struct pci_dev *dev) { u8 value1 = 0; u8 value2 = 0; if (!dev) return -EINVAL; /* * Note that 'dev' references the PIIX4 ACPI Controller. */ switch (dev->revision) { case 0: ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 A-step\n")); break; case 1: ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 B-step\n")); break; case 2: ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4E\n")); break; case 3: ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4M\n")); break; default: ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found unknown PIIX4\n")); break; } switch (dev->revision) { case 0: /* PIIX4 A-step */ case 1: /* PIIX4 B-step */ /* * See specification changes #13 ("Manual Throttle Duty Cycle") * and #14 ("Enabling and Disabling Manual Throttle"), plus * erratum #5 ("STPCLK# Deassertion Time") from the January * 2002 PIIX4 specification update. Applies to only older * PIIX4 models. */ errata.piix4.throttle = 1; case 2: /* PIIX4E */ case 3: /* PIIX4M */ /* * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA * Livelock") from the January 2002 PIIX4 specification update. * Applies to all PIIX4 models. */ /* * BM-IDE * ------ * Find the PIIX4 IDE Controller and get the Bus Master IDE * Status register address. We'll use this later to read * each IDE controller's DMA status to make sure we catch all * DMA activity. */ dev = pci_get_subsys(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB, PCI_ANY_ID, PCI_ANY_ID, NULL); if (dev) { errata.piix4.bmisx = pci_resource_start(dev, 4); pci_dev_put(dev); } /* * Type-F DMA * ---------- * Find the PIIX4 ISA Controller and read the Motherboard * DMA controller's status to see if Type-F (Fast) DMA mode * is enabled (bit 7) on either channel. Note that we'll * disable C3 support if this is enabled, as some legacy * devices won't operate well if fast DMA is disabled. */ dev = pci_get_subsys(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_0, PCI_ANY_ID, PCI_ANY_ID, NULL); if (dev) { pci_read_config_byte(dev, 0x76, &value1); pci_read_config_byte(dev, 0x77, &value2); if ((value1 & 0x80) || (value2 & 0x80)) errata.piix4.fdma = 1; pci_dev_put(dev); } break; } if (errata.piix4.bmisx) ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Bus master activity detection (BM-IDE) erratum enabled\n")); if (errata.piix4.fdma) ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Type-F DMA livelock erratum (C3 disabled)\n")); return 0; }
static int __devinit snd_nm256_create(struct snd_card *card, struct pci_dev *pci, struct nm256 **chip_ret) { struct nm256 *chip; int err, pval; static struct snd_device_ops ops = { .dev_free = snd_nm256_dev_free, }; u32 addr; *chip_ret = NULL; if ((err = pci_enable_device(pci)) < 0) return err; chip = kzalloc(sizeof(*chip), GFP_KERNEL); if (chip == NULL) { pci_disable_device(pci); return -ENOMEM; } chip->card = card; chip->pci = pci; chip->use_cache = use_cache; spin_lock_init(&chip->reg_lock); chip->irq = -1; init_MUTEX(&chip->irq_mutex); /* store buffer sizes in bytes */ chip->streams[SNDRV_PCM_STREAM_PLAYBACK].bufsize = playback_bufsize * 1024; chip->streams[SNDRV_PCM_STREAM_CAPTURE].bufsize = capture_bufsize * 1024; /* * The NM256 has two memory ports. The first port is nothing * more than a chunk of video RAM, which is used as the I/O ring * buffer. The second port has the actual juicy stuff (like the * mixer and the playback engine control registers). */ chip->buffer_addr = pci_resource_start(pci, 0); chip->cport_addr = pci_resource_start(pci, 1); /* Init the memory port info. */ /* remap control port (#2) */ chip->res_cport = request_mem_region(chip->cport_addr, NM_PORT2_SIZE, card->driver); if (chip->res_cport == NULL) { snd_printk(KERN_ERR "memory region 0x%lx (size 0x%x) busy\n", chip->cport_addr, NM_PORT2_SIZE); err = -EBUSY; goto __error; } chip->cport = ioremap_nocache(chip->cport_addr, NM_PORT2_SIZE); if (chip->cport == NULL) { snd_printk(KERN_ERR "unable to map control port %lx\n", chip->cport_addr); err = -ENOMEM; goto __error; } if (!strcmp(card->driver, "NM256AV")) { /* Ok, try to see if this is a non-AC97 version of the hardware. */ pval = snd_nm256_readw(chip, NM_MIXER_PRESENCE); if ((pval & NM_PRESENCE_MASK) != NM_PRESENCE_VALUE) { if (! force_ac97) { printk(KERN_ERR "nm256: no ac97 is found!\n"); printk(KERN_ERR " force the driver to load by " "passing in the module parameter\n"); printk(KERN_ERR " force_ac97=1\n"); printk(KERN_ERR " or try sb16 or cs423x drivers instead.\n"); err = -ENXIO; goto __error; } } chip->buffer_end = 2560 * 1024; chip->interrupt = snd_nm256_interrupt; chip->mixer_status_offset = NM_MIXER_STATUS_OFFSET; chip->mixer_status_mask = NM_MIXER_READY_MASK; } else { /* Not sure if there is any relevant detect for the ZX or not. */ if (snd_nm256_readb(chip, 0xa0b) != 0) chip->buffer_end = 6144 * 1024; else chip->buffer_end = 4096 * 1024; chip->interrupt = snd_nm256_interrupt_zx; chip->mixer_status_offset = NM2_MIXER_STATUS_OFFSET; chip->mixer_status_mask = NM2_MIXER_READY_MASK; } chip->buffer_size = chip->streams[SNDRV_PCM_STREAM_PLAYBACK].bufsize + chip->streams[SNDRV_PCM_STREAM_CAPTURE].bufsize; if (chip->use_cache) chip->buffer_size += NM_TOTAL_COEFF_COUNT * 4; else chip->buffer_size += NM_MAX_PLAYBACK_COEF_SIZE + NM_MAX_RECORD_COEF_SIZE; if (buffer_top >= chip->buffer_size && buffer_top < chip->buffer_end) chip->buffer_end = buffer_top; else { /* get buffer end pointer from signature */ if ((err = snd_nm256_peek_for_sig(chip)) < 0) goto __error; } chip->buffer_start = chip->buffer_end - chip->buffer_size; chip->buffer_addr += chip->buffer_start; printk(KERN_INFO "nm256: Mapping port 1 from 0x%x - 0x%x\n", chip->buffer_start, chip->buffer_end); chip->res_buffer = request_mem_region(chip->buffer_addr, chip->buffer_size, card->driver); if (chip->res_buffer == NULL) { snd_printk(KERN_ERR "nm256: buffer 0x%lx (size 0x%x) busy\n", chip->buffer_addr, chip->buffer_size); err = -EBUSY; goto __error; } chip->buffer = ioremap_nocache(chip->buffer_addr, chip->buffer_size); if (chip->buffer == NULL) { err = -ENOMEM; snd_printk(KERN_ERR "unable to map ring buffer at %lx\n", chip->buffer_addr); goto __error; } /* set offsets */ addr = chip->buffer_start; chip->streams[SNDRV_PCM_STREAM_PLAYBACK].buf = addr; addr += chip->streams[SNDRV_PCM_STREAM_PLAYBACK].bufsize; chip->streams[SNDRV_PCM_STREAM_CAPTURE].buf = addr; addr += chip->streams[SNDRV_PCM_STREAM_CAPTURE].bufsize; if (chip->use_cache) { chip->all_coeff_buf = addr; } else { chip->coeff_buf[SNDRV_PCM_STREAM_PLAYBACK] = addr; addr += NM_MAX_PLAYBACK_COEF_SIZE; chip->coeff_buf[SNDRV_PCM_STREAM_CAPTURE] = addr; } /* Fixed setting. */ chip->mixer_base = NM_MIXER_OFFSET; chip->coeffs_current = 0; snd_nm256_init_chip(chip); // pci_set_master(pci); /* needed? */ if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) goto __error; snd_card_set_dev(card, &pci->dev); *chip_ret = chip; return 0; __error: snd_nm256_free(chip); return err; }
struct net_device * islpci_setup(struct pci_dev *pdev) { islpci_private *priv; struct net_device *ndev = alloc_etherdev(sizeof (islpci_private)); if (!ndev) return ndev; pci_set_drvdata(pdev, ndev); #if defined(SET_NETDEV_DEV) SET_NETDEV_DEV(ndev, &pdev->dev); #endif /* setup the structure members */ ndev->base_addr = pci_resource_start(pdev, 0); ndev->irq = pdev->irq; /* initialize the function pointers */ ndev->netdev_ops = &islpci_netdev_ops; ndev->wireless_handlers = &prism54_handler_def; ndev->ethtool_ops = &islpci_ethtool_ops; /* ndev->set_multicast_list = &islpci_set_multicast_list; */ ndev->addr_len = ETH_ALEN; /* Get a non-zero dummy MAC address for nameif. Jean II */ memcpy(ndev->dev_addr, dummy_mac, 6); ndev->watchdog_timeo = ISLPCI_TX_TIMEOUT; /* allocate a private device structure to the network device */ priv = netdev_priv(ndev); priv->ndev = ndev; priv->pdev = pdev; priv->monitor_type = ARPHRD_IEEE80211; priv->ndev->type = (priv->iw_mode == IW_MODE_MONITOR) ? priv->monitor_type : ARPHRD_ETHER; /* Add pointers to enable iwspy support. */ priv->wireless_data.spy_data = &priv->spy_data; ndev->wireless_data = &priv->wireless_data; /* save the start and end address of the PCI memory area */ ndev->mem_start = (unsigned long) priv->device_base; ndev->mem_end = ndev->mem_start + ISL38XX_PCI_MEM_SIZE; #if VERBOSE > SHOW_ERROR_MESSAGES DEBUG(SHOW_TRACING, "PCI Memory remapped to 0x%p\n", priv->device_base); #endif init_waitqueue_head(&priv->reset_done); /* init the queue read locks, process wait counter */ mutex_init(&priv->mgmt_lock); priv->mgmt_received = NULL; init_waitqueue_head(&priv->mgmt_wqueue); mutex_init(&priv->stats_lock); spin_lock_init(&priv->slock); /* init state machine with off#1 state */ priv->state = PRV_STATE_OFF; priv->state_off = 1; /* initialize workqueue's */ INIT_WORK(&priv->stats_work, prism54_update_stats); priv->stats_timestamp = 0; INIT_WORK(&priv->reset_task, islpci_do_reset_and_wake); priv->reset_task_pending = 0; /* allocate various memory areas */ if (islpci_alloc_memory(priv)) goto do_free_netdev; /* select the firmware file depending on the device id */ switch (pdev->device) { case 0x3877: strcpy(priv->firmware, ISL3877_IMAGE_FILE); break; case 0x3886: strcpy(priv->firmware, ISL3886_IMAGE_FILE); break; default: strcpy(priv->firmware, ISL3890_IMAGE_FILE); break; } if (register_netdev(ndev)) { DEBUG(SHOW_ERROR_MESSAGES, "ERROR: register_netdev() failed \n"); goto do_islpci_free_memory; } return ndev; do_islpci_free_memory: islpci_free_memory(priv); do_free_netdev: pci_set_drvdata(pdev, NULL); free_netdev(ndev); priv = NULL; return NULL; }
static int piix_get_info (char *buffer, char **addr, off_t offset, int count) { char *p = buffer; int i; #ifndef CONFIG_SMALL for (i = 0; i < n_piix_devs; i++) { struct pci_dev *dev = piix_devs[i]; unsigned long bibma = pci_resource_start(dev, 4); u16 reg40 = 0, psitre = 0, reg42 = 0, ssitre = 0; u8 c0 = 0, c1 = 0, reg54 = 0, reg55 = 0; u8 reg44 = 0, reg48 = 0, reg4a = 0, reg4b = 0; p += sprintf(p, "\nController: %d\n", i); p += sprintf(p, "\n Intel "); switch(dev->device) { case PCI_DEVICE_ID_INTEL_82801EB_1: p += sprintf(p, "PIIX4 SATA 150 "); break; case PCI_DEVICE_ID_INTEL_82801BA_8: case PCI_DEVICE_ID_INTEL_82801BA_9: case PCI_DEVICE_ID_INTEL_82801CA_10: case PCI_DEVICE_ID_INTEL_82801CA_11: case PCI_DEVICE_ID_INTEL_82801DB_1: case PCI_DEVICE_ID_INTEL_82801DB_10: case PCI_DEVICE_ID_INTEL_82801DB_11: case PCI_DEVICE_ID_INTEL_82801EB_11: case PCI_DEVICE_ID_INTEL_82801E_11: case PCI_DEVICE_ID_INTEL_ESB_2: case PCI_DEVICE_ID_INTEL_ICH6_19: case PCI_DEVICE_ID_INTEL_ICH7_21: case PCI_DEVICE_ID_INTEL_ESB2_18: p += sprintf(p, "PIIX4 Ultra 100 "); break; case PCI_DEVICE_ID_INTEL_82372FB_1: case PCI_DEVICE_ID_INTEL_82801AA_1: p += sprintf(p, "PIIX4 Ultra 66 "); break; case PCI_DEVICE_ID_INTEL_82451NX: case PCI_DEVICE_ID_INTEL_82801AB_1: case PCI_DEVICE_ID_INTEL_82443MX_1: case PCI_DEVICE_ID_INTEL_82371AB: p += sprintf(p, "PIIX4 Ultra 33 "); break; case PCI_DEVICE_ID_INTEL_82371SB_1: p += sprintf(p, "PIIX3 "); break; case PCI_DEVICE_ID_INTEL_82371MX: p += sprintf(p, "MPIIX "); break; case PCI_DEVICE_ID_INTEL_82371FB_1: case PCI_DEVICE_ID_INTEL_82371FB_0: default: p += sprintf(p, "PIIX "); break; } p += sprintf(p, "Chipset.\n"); if (dev->device == PCI_DEVICE_ID_INTEL_82371MX) continue; pci_read_config_word(dev, 0x40, ®40); pci_read_config_word(dev, 0x42, ®42); pci_read_config_byte(dev, 0x44, ®44); pci_read_config_byte(dev, 0x48, ®48); pci_read_config_byte(dev, 0x4a, ®4a); pci_read_config_byte(dev, 0x4b, ®4b); pci_read_config_byte(dev, 0x54, ®54); pci_read_config_byte(dev, 0x55, ®55); psitre = (reg40 & 0x4000) ? 1 : 0; ssitre = (reg42 & 0x4000) ? 1 : 0; /* * at that point bibma+0x2 et bibma+0xa are byte registers * to investigate: */ c0 = inb(bibma + 0x02); c1 = inb(bibma + 0x0a); p += sprintf(p, "--------------- Primary Channel " "---------------- Secondary Channel " "-------------\n"); p += sprintf(p, " %sabled " " %sabled\n", (c0&0x80) ? "dis" : " en", (c1&0x80) ? "dis" : " en"); p += sprintf(p, "--------------- drive0 --------- drive1 " "-------- drive0 ---------- drive1 ------\n"); p += sprintf(p, "DMA enabled: %s %s " " %s %s\n", (c0&0x20) ? "yes" : "no ", (c0&0x40) ? "yes" : "no ", (c1&0x20) ? "yes" : "no ", (c1&0x40) ? "yes" : "no " ); p += sprintf(p, "UDMA enabled: %s %s " " %s %s\n", (reg48&0x01) ? "yes" : "no ", (reg48&0x02) ? "yes" : "no ", (reg48&0x04) ? "yes" : "no ", (reg48&0x08) ? "yes" : "no " ); p += sprintf(p, "UDMA enabled: %s %s " " %s %s\n", ((reg54&0x11) && (reg55&0x10) && (reg4a&0x01)) ? "5" : ((reg54&0x11) && (reg4a&0x02)) ? "4" : ((reg54&0x11) && (reg4a&0x01)) ? "3" : (reg4a&0x02) ? "2" : (reg4a&0x01) ? "1" : (reg4a&0x00) ? "0" : "X", ((reg54&0x22) && (reg55&0x20) && (reg4a&0x10)) ? "5" : ((reg54&0x22) && (reg4a&0x20)) ? "4" : ((reg54&0x22) && (reg4a&0x10)) ? "3" : (reg4a&0x20) ? "2" : (reg4a&0x10) ? "1" : (reg4a&0x00) ? "0" : "X", ((reg54&0x44) && (reg55&0x40) && (reg4b&0x03)) ? "5" : ((reg54&0x44) && (reg4b&0x02)) ? "4" : ((reg54&0x44) && (reg4b&0x01)) ? "3" : (reg4b&0x02) ? "2" : (reg4b&0x01) ? "1" : (reg4b&0x00) ? "0" : "X", ((reg54&0x88) && (reg55&0x80) && (reg4b&0x30)) ? "5" : ((reg54&0x88) && (reg4b&0x20)) ? "4" : ((reg54&0x88) && (reg4b&0x10)) ? "3" : (reg4b&0x20) ? "2" : (reg4b&0x10) ? "1" : (reg4b&0x00) ? "0" : "X"); p += sprintf(p, "UDMA\n"); p += sprintf(p, "DMA\n"); p += sprintf(p, "PIO\n"); /* * FIXME.... Add configuration junk data....blah blah...... */ } #endif return p-buffer; /* => must be less than 4k! */ }
static int __devinit p54p_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct p54p_priv *priv; struct ieee80211_hw *dev; unsigned long mem_addr, mem_len; int err; err = pci_enable_device(pdev); if (err) { printk(KERN_ERR "%s (p54pci): Cannot enable new PCI device\n", pci_name(pdev)); return err; } mem_addr = pci_resource_start(pdev, 0); mem_len = pci_resource_len(pdev, 0); if (mem_len < sizeof(struct p54p_csr)) { printk(KERN_ERR "%s (p54pci): Too short PCI resources\n", pci_name(pdev)); goto err_disable_dev; } err = pci_request_regions(pdev, "p54pci"); if (err) { printk(KERN_ERR "%s (p54pci): Cannot obtain PCI resources\n", pci_name(pdev)); goto err_disable_dev; } if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) || pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) { printk(KERN_ERR "%s (p54pci): No suitable DMA available\n", pci_name(pdev)); goto err_free_reg; } pci_set_master(pdev); pci_try_set_mwi(pdev); pci_write_config_byte(pdev, 0x40, 0); pci_write_config_byte(pdev, 0x41, 0); dev = p54_init_common(sizeof(*priv)); if (!dev) { printk(KERN_ERR "%s (p54pci): ieee80211 alloc failed\n", pci_name(pdev)); err = -ENOMEM; goto err_free_reg; } priv = dev->priv; priv->pdev = pdev; SET_IEEE80211_DEV(dev, &pdev->dev); pci_set_drvdata(pdev, dev); priv->map = ioremap(mem_addr, mem_len); if (!priv->map) { printk(KERN_ERR "%s (p54pci): Cannot map device memory\n", pci_name(pdev)); err = -EINVAL; // TODO: use a better error code? goto err_free_dev; } priv->ring_control = pci_alloc_consistent(pdev, sizeof(*priv->ring_control), &priv->ring_control_dma); if (!priv->ring_control) { printk(KERN_ERR "%s (p54pci): Cannot allocate rings\n", pci_name(pdev)); err = -ENOMEM; goto err_iounmap; } priv->common.open = p54p_open; priv->common.stop = p54p_stop; priv->common.tx = p54p_tx; spin_lock_init(&priv->lock); tasklet_init(&priv->rx_tasklet, p54p_rx_tasklet, (unsigned long)dev); err = request_firmware(&priv->firmware, "isl3886pci", &priv->pdev->dev); if (err) { printk(KERN_ERR "%s (p54pci): cannot find firmware " "(isl3886pci)\n", pci_name(priv->pdev)); err = request_firmware(&priv->firmware, "isl3886", &priv->pdev->dev); if (err) goto err_free_common; } err = p54p_open(dev); if (err) goto err_free_common; err = p54_read_eeprom(dev); p54p_stop(dev); if (err) goto err_free_common; err = ieee80211_register_hw(dev); if (err) { printk(KERN_ERR "%s (p54pci): Cannot register netdevice\n", pci_name(pdev)); goto err_free_common; } return 0; err_free_common: release_firmware(priv->firmware); p54_free_common(dev); pci_free_consistent(pdev, sizeof(*priv->ring_control), priv->ring_control, priv->ring_control_dma); err_iounmap: iounmap(priv->map); err_free_dev: pci_set_drvdata(pdev, NULL); ieee80211_free_hw(dev); err_free_reg: pci_release_regions(pdev); err_disable_dev: pci_disable_device(pdev); return err; }
static int __devinit rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *dev; struct netdev_private *np; static int card_idx; int chip_idx = ent->driver_data; int err, irq; long ioaddr; static int version_printed; void *ring_space; dma_addr_t ring_dma; if (!version_printed++) printk ("%s", version); err = pci_enable_device (pdev); if (err) return err; irq = pdev->irq; err = pci_request_regions (pdev, "dl2k"); if (err) goto err_out_disable; pci_set_master (pdev); dev = alloc_etherdev (sizeof (*np)); if (!dev) { err = -ENOMEM; goto err_out_res; } SET_NETDEV_DEV(dev, &pdev->dev); #ifdef MEM_MAPPING ioaddr = pci_resource_start (pdev, 1); ioaddr = (long) ioremap (ioaddr, RIO_IO_SIZE); if (!ioaddr) { err = -ENOMEM; goto err_out_dev; } #else ioaddr = pci_resource_start (pdev, 0); #endif dev->base_addr = ioaddr; dev->irq = irq; np = netdev_priv(dev); np->chip_id = chip_idx; np->pdev = pdev; spin_lock_init (&np->tx_lock); spin_lock_init (&np->rx_lock); /* Parse manual configuration */ np->an_enable = 1; np->tx_coalesce = 1; if (card_idx < MAX_UNITS) { if (media[card_idx] != NULL) { np->an_enable = 0; if (strcmp (media[card_idx], "auto") == 0 || strcmp (media[card_idx], "autosense") == 0 || strcmp (media[card_idx], "0") == 0 ) { np->an_enable = 2; } else if (strcmp (media[card_idx], "100mbps_fd") == 0 || strcmp (media[card_idx], "4") == 0) { np->speed = 100; np->full_duplex = 1; } else if (strcmp (media[card_idx], "100mbps_hd") == 0 || strcmp (media[card_idx], "3") == 0) { np->speed = 100; np->full_duplex = 0; } else if (strcmp (media[card_idx], "10mbps_fd") == 0 || strcmp (media[card_idx], "2") == 0) { np->speed = 10; np->full_duplex = 1; } else if (strcmp (media[card_idx], "10mbps_hd") == 0 || strcmp (media[card_idx], "1") == 0) { np->speed = 10; np->full_duplex = 0; } else if (strcmp (media[card_idx], "1000mbps_fd") == 0 || strcmp (media[card_idx], "6") == 0) { np->speed=1000; np->full_duplex=1; } else if (strcmp (media[card_idx], "1000mbps_hd") == 0 || strcmp (media[card_idx], "5") == 0) { np->speed = 1000; np->full_duplex = 0; } else { np->an_enable = 1; } } if (jumbo[card_idx] != 0) { np->jumbo = 1; dev->mtu = MAX_JUMBO; } else { np->jumbo = 0; if (mtu[card_idx] > 0 && mtu[card_idx] < PACKET_SIZE) dev->mtu = mtu[card_idx]; } np->vlan = (vlan[card_idx] > 0 && vlan[card_idx] < 4096) ? vlan[card_idx] : 0; if (rx_coalesce > 0 && rx_timeout > 0) { np->rx_coalesce = rx_coalesce; np->rx_timeout = rx_timeout; np->coalesce = 1; } np->tx_flow = (tx_flow == 0) ? 0 : 1; np->rx_flow = (rx_flow == 0) ? 0 : 1; if (tx_coalesce < 1) tx_coalesce = 1; else if (tx_coalesce > TX_RING_SIZE-1) tx_coalesce = TX_RING_SIZE - 1; } dev->netdev_ops = &netdev_ops; dev->watchdog_timeo = TX_TIMEOUT; SET_ETHTOOL_OPS(dev, ðtool_ops); #if 0 dev->features = NETIF_F_IP_CSUM; #endif pci_set_drvdata (pdev, dev); ring_space = pci_alloc_consistent (pdev, TX_TOTAL_SIZE, &ring_dma); if (!ring_space) goto err_out_iounmap; np->tx_ring = (struct netdev_desc *) ring_space; np->tx_ring_dma = ring_dma; ring_space = pci_alloc_consistent (pdev, RX_TOTAL_SIZE, &ring_dma); if (!ring_space) goto err_out_unmap_tx; np->rx_ring = (struct netdev_desc *) ring_space; np->rx_ring_dma = ring_dma; /* Parse eeprom data */ parse_eeprom (dev); /* Find PHY address */ err = find_miiphy (dev); if (err) goto err_out_unmap_rx; /* Fiber device? */ np->phy_media = (readw(ioaddr + ASICCtrl) & PhyMedia) ? 1 : 0; np->link_status = 0; /* Set media and reset PHY */ if (np->phy_media) { /* default Auto-Negotiation for fiber deivices */ if (np->an_enable == 2) { np->an_enable = 1; } mii_set_media_pcs (dev); } else { /* Auto-Negotiation is mandatory for 1000BASE-T, IEEE 802.3ab Annex 28D page 14 */ if (np->speed == 1000) np->an_enable = 1; mii_set_media (dev); } err = register_netdev (dev); if (err) goto err_out_unmap_rx; card_idx++; printk (KERN_INFO "%s: %s, %pM, IRQ %d\n", dev->name, np->name, dev->dev_addr, irq); if (tx_coalesce > 1) printk(KERN_INFO "tx_coalesce:\t%d packets\n", tx_coalesce); if (np->coalesce) printk(KERN_INFO "rx_coalesce:\t%d packets\n" "rx_timeout: \t%d ns\n", np->rx_coalesce, np->rx_timeout*640); if (np->vlan) printk(KERN_INFO "vlan(id):\t%d\n", np->vlan); return 0; err_out_unmap_rx: pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma); err_out_unmap_tx: pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma); err_out_iounmap: #ifdef MEM_MAPPING iounmap ((void *) ioaddr); err_out_dev: #endif free_netdev (dev); err_out_res: pci_release_regions (pdev); err_out_disable: pci_disable_device (pdev); return err; }
int mthca_reset(struct mthca_dev *mdev) { int i; int err = 0; u32 *hca_header = NULL; u32 *bridge_header = NULL; struct pci_dev *bridge = NULL; int bridge_pcix_cap = 0; int hca_pcie_cap = 0; int hca_pcix_cap = 0; u16 devctl; u16 linkctl; #define MTHCA_RESET_OFFSET 0xf0010 #define MTHCA_RESET_VALUE swab32(1) /* * Reset the chip. This is somewhat ugly because we have to * save off the PCI header before reset and then restore it * after the chip reboots. We skip config space offsets 22 * and 23 since those have a special meaning. * * To make matters worse, for Tavor (PCI-X HCA) we have to * find the associated bridge device and save off its PCI * header as well. */ if (!(mdev->mthca_flags & MTHCA_FLAG_PCIE)) { /* Look for the bridge -- its device ID will be 2 more than HCA's device ID. */ while ((bridge = pci_get_device(mdev->pdev->vendor, mdev->pdev->device + 2, bridge)) != NULL) { if (bridge->hdr_type == PCI_HEADER_TYPE_BRIDGE && bridge->subordinate == mdev->pdev->bus) { mthca_dbg(mdev, "Found bridge: %s\n", pci_name(bridge)); break; } } if (!bridge) { /* * Didn't find a bridge for a Tavor device -- * assume we're in no-bridge mode and hope for * the best. */ mthca_warn(mdev, "No bridge found for %s\n", pci_name(mdev->pdev)); } } /* For Arbel do we need to save off the full 4K PCI Express header?? */ hca_header = kmalloc(256, GFP_KERNEL); if (!hca_header) { err = -ENOMEM; goto put_dev; } for (i = 0; i < 64; ++i) { if (i == 22 || i == 23) continue; if (pci_read_config_dword(mdev->pdev, i * 4, hca_header + i)) { err = -ENODEV; mthca_err(mdev, "Couldn't save HCA " "PCI header, aborting.\n"); goto free_hca; } } hca_pcix_cap = pci_find_capability(mdev->pdev, PCI_CAP_ID_PCIX); hca_pcie_cap = pci_pcie_cap(mdev->pdev); if (bridge) { bridge_header = kmalloc(256, GFP_KERNEL); if (!bridge_header) { err = -ENOMEM; goto free_hca; } for (i = 0; i < 64; ++i) { if (i == 22 || i == 23) continue; if (pci_read_config_dword(bridge, i * 4, bridge_header + i)) { err = -ENODEV; mthca_err(mdev, "Couldn't save HCA bridge " "PCI header, aborting.\n"); goto free_bh; } } bridge_pcix_cap = pci_find_capability(bridge, PCI_CAP_ID_PCIX); if (!bridge_pcix_cap) { err = -ENODEV; mthca_err(mdev, "Couldn't locate HCA bridge " "PCI-X capability, aborting.\n"); goto free_bh; } } /* actually hit reset */ { void __iomem *reset = ioremap(pci_resource_start(mdev->pdev, 0) + MTHCA_RESET_OFFSET, 4); if (!reset) { err = -ENOMEM; mthca_err(mdev, "Couldn't map HCA reset register, " "aborting.\n"); goto free_bh; } writel(MTHCA_RESET_VALUE, reset); iounmap(reset); } /* Docs say to wait one second before accessing device */ msleep(1000); /* Now wait for PCI device to start responding again */ { u32 v; int c = 0; for (c = 0; c < 100; ++c) { if (pci_read_config_dword(bridge ? bridge : mdev->pdev, 0, &v)) { err = -ENODEV; mthca_err(mdev, "Couldn't access HCA after reset, " "aborting.\n"); goto free_bh; } if (v != 0xffffffff) goto good; msleep(100); } err = -ENODEV; mthca_err(mdev, "PCI device did not come back after reset, " "aborting.\n"); goto free_bh; } good: /* Now restore the PCI headers */ if (bridge) { if (pci_write_config_dword(bridge, bridge_pcix_cap + 0x8, bridge_header[(bridge_pcix_cap + 0x8) / 4])) { err = -ENODEV; mthca_err(mdev, "Couldn't restore HCA bridge Upstream " "split transaction control, aborting.\n"); goto free_bh; } if (pci_write_config_dword(bridge, bridge_pcix_cap + 0xc, bridge_header[(bridge_pcix_cap + 0xc) / 4])) { err = -ENODEV; mthca_err(mdev, "Couldn't restore HCA bridge Downstream " "split transaction control, aborting.\n"); goto free_bh; } /* * Bridge control register is at 0x3e, so we'll * naturally restore it last in this loop. */ for (i = 0; i < 16; ++i) { if (i * 4 == PCI_COMMAND) continue; if (pci_write_config_dword(bridge, i * 4, bridge_header[i])) { err = -ENODEV; mthca_err(mdev, "Couldn't restore HCA bridge reg %x, " "aborting.\n", i); goto free_bh; } } if (pci_write_config_dword(bridge, PCI_COMMAND, bridge_header[PCI_COMMAND / 4])) { err = -ENODEV; mthca_err(mdev, "Couldn't restore HCA bridge COMMAND, " "aborting.\n"); goto free_bh; } } if (hca_pcix_cap) { if (pci_write_config_dword(mdev->pdev, hca_pcix_cap, hca_header[hca_pcix_cap / 4])) { err = -ENODEV; mthca_err(mdev, "Couldn't restore HCA PCI-X " "command register, aborting.\n"); goto free_bh; } } if (hca_pcie_cap) { devctl = hca_header[(hca_pcie_cap + PCI_EXP_DEVCTL) / 4]; if (pcie_capability_write_word(mdev->pdev, PCI_EXP_DEVCTL, devctl)) { err = -ENODEV; mthca_err(mdev, "Couldn't restore HCA PCI Express " "Device Control register, aborting.\n"); goto free_bh; } linkctl = hca_header[(hca_pcie_cap + PCI_EXP_LNKCTL) / 4]; if (pcie_capability_write_word(mdev->pdev, PCI_EXP_LNKCTL, linkctl)) { err = -ENODEV; mthca_err(mdev, "Couldn't restore HCA PCI Express " "Link control register, aborting.\n"); goto free_bh; } } for (i = 0; i < 16; ++i) { if (i * 4 == PCI_COMMAND) continue; if (pci_write_config_dword(mdev->pdev, i * 4, hca_header[i])) { err = -ENODEV; mthca_err(mdev, "Couldn't restore HCA reg %x, " "aborting.\n", i); goto free_bh; } } if (pci_write_config_dword(mdev->pdev, PCI_COMMAND, hca_header[PCI_COMMAND / 4])) { err = -ENODEV; mthca_err(mdev, "Couldn't restore HCA COMMAND, " "aborting.\n"); } free_bh: kfree(bridge_header); free_hca: kfree(hca_header); put_dev: pci_dev_put(bridge); return err; }
int __init setup_telespci(struct IsdnCard *card) { struct IsdnCardState *cs = card->cs; char tmp[64]; #ifdef __BIG_ENDIAN #error "not running on big endian machines now" #endif strcpy(tmp, telespci_revision); printk(KERN_INFO "HiSax: Teles/PCI driver Rev. %s\n", HiSax_getrev(tmp)); if (cs->typ != ISDN_CTYPE_TELESPCI) return (0); #if CONFIG_PCI if (!pci_present()) { printk(KERN_ERR "TelesPCI: no PCI bus present\n"); return(0); } if ((dev_tel = pci_find_device (PCI_VENDOR_ID_ZORAN, PCI_DEVICE_ID_ZORAN_36120, dev_tel))) { if (pci_enable_device(dev_tel)) return(0); cs->irq = dev_tel->irq; if (!cs->irq) { printk(KERN_WARNING "Teles: No IRQ for PCI card found\n"); return(0); } cs->hw.teles0.membase = (u_long) ioremap(pci_resource_start(dev_tel, 0), PAGE_SIZE); printk(KERN_INFO "Found: Zoran, base-address: 0x%lx, irq: 0x%x\n", pci_resource_start(dev_tel, 0), dev_tel->irq); } else { printk(KERN_WARNING "TelesPCI: No PCI card found\n"); return(0); } #else printk(KERN_WARNING "HiSax: Teles/PCI and NO_PCI_BIOS\n"); printk(KERN_WARNING "HiSax: Teles/PCI unable to config\n"); return (0); #endif /* CONFIG_PCI */ /* Initialize Zoran PCI controller */ writel(0x00000000, cs->hw.teles0.membase + 0x28); writel(0x01000000, cs->hw.teles0.membase + 0x28); writel(0x01000000, cs->hw.teles0.membase + 0x28); writel(0x7BFFFFFF, cs->hw.teles0.membase + 0x2C); writel(0x70000000, cs->hw.teles0.membase + 0x3C); writel(0x61000000, cs->hw.teles0.membase + 0x40); /* writel(0x00800000, cs->hw.teles0.membase + 0x200); */ printk(KERN_INFO "HiSax: %s config irq:%d mem:%lx\n", CardType[cs->typ], cs->irq, cs->hw.teles0.membase); cs->readisac = &ReadISAC; cs->writeisac = &WriteISAC; cs->readisacfifo = &ReadISACfifo; cs->writeisacfifo = &WriteISACfifo; cs->BC_Read_Reg = &ReadHSCX; cs->BC_Write_Reg = &WriteHSCX; cs->BC_Send_Data = &hscx_fill_fifo; cs->cardmsg = &TelesPCI_card_msg; cs->irq_func = &telespci_interrupt; cs->irq_flags |= SA_SHIRQ; ISACVersion(cs, "TelesPCI:"); if (HscxVersion(cs, "TelesPCI:")) { printk(KERN_WARNING "TelesPCI: wrong HSCX versions check IO/MEM addresses\n"); release_io_telespci(cs); return (0); } return (1); }
int oxygen_pci_probe(struct pci_dev *pci, int index, char *id, struct module *owner, const struct pci_device_id *ids, int (*get_model)(struct oxygen *chip, const struct pci_device_id *id ) ) { struct snd_card *card; struct oxygen *chip; const struct pci_device_id *pci_id; int err; err = snd_card_create(index, id, owner, sizeof(*chip), &card); if (err < 0) return err; chip = card->private_data; chip->card = card; chip->pci = pci; chip->irq = -1; spin_lock_init(&chip->reg_lock); mutex_init(&chip->mutex); INIT_WORK(&chip->spdif_input_bits_work, oxygen_spdif_input_bits_changed); INIT_WORK(&chip->gpio_work, oxygen_gpio_changed); init_waitqueue_head(&chip->ac97_waitqueue); err = pci_enable_device(pci); if (err < 0) goto err_card; err = pci_request_regions(pci, DRIVER); if (err < 0) { snd_printk(KERN_ERR "cannot reserve PCI resources\n"); goto err_pci_enable; } if (!(pci_resource_flags(pci, 0) & IORESOURCE_IO) || pci_resource_len(pci, 0) < OXYGEN_IO_SIZE) { snd_printk(KERN_ERR "invalid PCI I/O range\n"); err = -ENXIO; goto err_pci_regions; } chip->addr = pci_resource_start(pci, 0); pci_id = oxygen_search_pci_id(chip, ids); if (!pci_id) { err = -ENODEV; goto err_pci_regions; } oxygen_restore_eeprom(chip, pci_id); err = get_model(chip, pci_id); if (err < 0) goto err_pci_regions; if (chip->model.model_data_size) { chip->model_data = kzalloc(chip->model.model_data_size, GFP_KERNEL); if (!chip->model_data) { err = -ENOMEM; goto err_pci_regions; } } pci_set_master(pci); snd_card_set_dev(card, &pci->dev); card->private_free = oxygen_card_free; oxygen_init(chip); chip->model.init(chip); err = request_irq(pci->irq, oxygen_interrupt, IRQF_SHARED, DRIVER, chip); if (err < 0) { snd_printk(KERN_ERR "cannot grab interrupt %d\n", pci->irq); goto err_card; } chip->irq = pci->irq; strcpy(card->driver, chip->model.chip); strcpy(card->shortname, chip->model.shortname); sprintf(card->longname, "%s (rev %u) at %#lx, irq %i", chip->model.longname, chip->revision, chip->addr, chip->irq); strcpy(card->mixername, chip->model.chip); snd_component_add(card, chip->model.chip); err = oxygen_pcm_init(chip); if (err < 0) goto err_card; err = oxygen_mixer_init(chip); if (err < 0) goto err_card; if (chip->model.device_config & (MIDI_OUTPUT | MIDI_INPUT)) { unsigned int info_flags = MPU401_INFO_INTEGRATED; if (chip->model.device_config & MIDI_OUTPUT) info_flags |= MPU401_INFO_OUTPUT; if (chip->model.device_config & MIDI_INPUT) info_flags |= MPU401_INFO_INPUT; err = snd_mpu401_uart_new(card, 0, MPU401_HW_CMIPCI, chip->addr + OXYGEN_MPU401, info_flags, 0, 0, &chip->midi); if (err < 0) goto err_card; } oxygen_proc_init(chip); spin_lock_irq(&chip->reg_lock); if (chip->model.device_config & CAPTURE_1_FROM_SPDIF) chip->interrupt_mask |= OXYGEN_INT_SPDIF_IN_DETECT; if (chip->has_ac97_0 | chip->has_ac97_1) chip->interrupt_mask |= OXYGEN_INT_AC97; oxygen_write16(chip, OXYGEN_INTERRUPT_MASK, chip->interrupt_mask); spin_unlock_irq(&chip->reg_lock); err = snd_card_register(card); if (err < 0) goto err_card; pci_set_drvdata(pci, card); return 0; err_pci_regions: pci_release_regions(pci); err_pci_enable: pci_disable_device(pci); err_card: snd_card_free(card); return err; }
static int __devinit rtsx_probe(struct pci_dev *pci, const struct pci_device_id *pci_id) { struct Scsi_Host *host; struct rtsx_dev *dev; int err = 0; struct task_struct *th; RTSX_DEBUGP("Realtek PCI-E card reader detected\n"); err = pci_enable_device(pci); if (err < 0) { printk(KERN_ERR "PCI enable device failed!\n"); return err; } err = pci_request_regions(pci, CR_DRIVER_NAME); if (err < 0) { printk(KERN_ERR "PCI request regions for %s failed!\n", CR_DRIVER_NAME); pci_disable_device(pci); return err; } /* * Ask the SCSI layer to allocate a host structure, with extra * space at the end for our private rtsx_dev structure. */ host = scsi_host_alloc(&rtsx_host_template, sizeof(*dev)); if (!host) { printk(KERN_ERR "Unable to allocate the scsi host\n"); pci_release_regions(pci); pci_disable_device(pci); return -ENOMEM; } dev = host_to_rtsx(host); memset(dev, 0, sizeof(struct rtsx_dev)); dev->chip = kzalloc(sizeof(struct rtsx_chip), GFP_KERNEL); if (dev->chip == NULL) { goto errout; } spin_lock_init(&dev->reg_lock); mutex_init(&(dev->dev_mutex)); sema_init(&(dev->sema), 0); init_completion(&(dev->notify)); init_waitqueue_head(&dev->delay_wait); dev->pci = pci; dev->irq = -1; printk(KERN_INFO "Resource length: 0x%x\n", (unsigned int)pci_resource_len(pci, 0)); dev->addr = pci_resource_start(pci, 0); dev->remap_addr = ioremap_nocache(dev->addr, pci_resource_len(pci, 0)); if (dev->remap_addr == NULL) { printk(KERN_ERR "ioremap error\n"); err = -ENXIO; goto errout; } /* Using "unsigned long" cast here to eliminate gcc warning in 64-bit system */ printk(KERN_INFO "Original address: 0x%lx, remapped address: 0x%lx\n", (unsigned long)(dev->addr), (unsigned long)(dev->remap_addr)); dev->rtsx_resv_buf = dma_alloc_coherent(&(pci->dev), RTSX_RESV_BUF_LEN, &(dev->rtsx_resv_buf_addr), GFP_KERNEL); if (dev->rtsx_resv_buf == NULL) { printk(KERN_ERR "alloc dma buffer fail\n"); err = -ENXIO; goto errout; } dev->chip->host_cmds_ptr = dev->rtsx_resv_buf; dev->chip->host_cmds_addr = dev->rtsx_resv_buf_addr; dev->chip->host_sg_tbl_ptr = dev->rtsx_resv_buf + HOST_CMDS_BUF_LEN; dev->chip->host_sg_tbl_addr = dev->rtsx_resv_buf_addr + HOST_CMDS_BUF_LEN; dev->chip->rtsx = dev; rtsx_init_options(dev->chip); printk(KERN_INFO "pci->irq = %d\n", pci->irq); if (dev->chip->msi_en) { if (pci_enable_msi(pci) < 0) dev->chip->msi_en = 0; } if (rtsx_acquire_irq(dev) < 0) { err = -EBUSY; goto errout; } pci_set_master(pci); synchronize_irq(dev->irq); err = scsi_add_host(host, &pci->dev); if (err) { printk(KERN_ERR "Unable to add the scsi host\n"); goto errout; } rtsx_init_chip(dev->chip); /* Start up our control thread */ th = kthread_create(rtsx_control_thread, dev, CR_DRIVER_NAME); if (IS_ERR(th)) { printk(KERN_ERR "Unable to start control thread\n"); err = PTR_ERR(th); goto errout; } /* Take a reference to the host for the control thread and * count it among all the threads we have launched. Then * start it up. */ scsi_host_get(rtsx_to_host(dev)); atomic_inc(&total_threads); wake_up_process(th); /* Start up the thread for delayed SCSI-device scanning */ th = kthread_create(rtsx_scan_thread, dev, "rtsx-scan"); if (IS_ERR(th)) { printk(KERN_ERR "Unable to start the device-scanning thread\n"); quiesce_and_remove_host(dev); err = PTR_ERR(th); goto errout; } /* Take a reference to the host for the scanning thread and * count it among all the threads we have launched. Then * start it up. */ scsi_host_get(rtsx_to_host(dev)); atomic_inc(&total_threads); wake_up_process(th); /* Start up the thread for polling thread */ th = kthread_create(rtsx_polling_thread, dev, "rtsx-polling"); if (IS_ERR(th)) { printk(KERN_ERR "Unable to start the device-polling thread\n"); quiesce_and_remove_host(dev); err = PTR_ERR(th); goto errout; } /* Take a reference to the host for the polling thread and * count it among all the threads we have launched. Then * start it up. */ scsi_host_get(rtsx_to_host(dev)); atomic_inc(&total_threads); wake_up_process(th); pci_set_drvdata(pci, dev); return 0; /* We come here if there are any problems */ errout: printk(KERN_ERR "rtsx_probe() failed\n"); release_everything(dev); return err; }
static int __devinit enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *netdev; struct enic *enic; int using_dac = 0; unsigned int i; int err; const u8 rss_default_cpu = 0; const u8 rss_hash_type = 0; const u8 rss_hash_bits = 0; const u8 rss_base_cpu = 0; const u8 rss_enable = 0; const u8 tso_ipid_split_en = 0; const u8 ig_vlan_strip_en = 1; /* Allocate net device structure and initialize. Private * instance data is initialized to zero. */ netdev = alloc_etherdev(sizeof(struct enic)); if (!netdev) { printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n"); return -ENOMEM; } pci_set_drvdata(pdev, netdev); SET_NETDEV_DEV(netdev, &pdev->dev); enic = netdev_priv(netdev); enic->netdev = netdev; enic->pdev = pdev; /* Setup PCI resources */ err = pci_enable_device(pdev); if (err) { printk(KERN_ERR PFX "Cannot enable PCI device, aborting.\n"); goto err_out_free_netdev; } err = pci_request_regions(pdev, DRV_NAME); if (err) { printk(KERN_ERR PFX "Cannot request PCI regions, aborting.\n"); goto err_out_disable_device; } pci_set_master(pdev); /* Query PCI controller on system for DMA addressing * limitation for the device. Try 40-bit first, and * fail to 32-bit. */ err = pci_set_dma_mask(pdev, DMA_40BIT_MASK); if (err) { err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); if (err) { printk(KERN_ERR PFX "No usable DMA configuration, aborting.\n"); goto err_out_release_regions; } err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); if (err) { printk(KERN_ERR PFX "Unable to obtain 32-bit DMA " "for consistent allocations, aborting.\n"); goto err_out_release_regions; } } else { err = pci_set_consistent_dma_mask(pdev, DMA_40BIT_MASK); if (err) { printk(KERN_ERR PFX "Unable to obtain 40-bit DMA " "for consistent allocations, aborting.\n"); goto err_out_release_regions; } using_dac = 1; } /* Map vNIC resources from BAR0 */ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { printk(KERN_ERR PFX "BAR0 not memory-map'able, aborting.\n"); err = -ENODEV; goto err_out_release_regions; } enic->bar0.vaddr = pci_iomap(pdev, 0, enic->bar0.len); enic->bar0.bus_addr = pci_resource_start(pdev, 0); enic->bar0.len = pci_resource_len(pdev, 0); if (!enic->bar0.vaddr) { printk(KERN_ERR PFX "Cannot memory-map BAR0 res hdr, aborting.\n"); err = -ENODEV; goto err_out_release_regions; } /* Register vNIC device */ enic->vdev = vnic_dev_register(NULL, enic, pdev, &enic->bar0); if (!enic->vdev) { printk(KERN_ERR PFX "vNIC registration failed, aborting.\n"); err = -ENODEV; goto err_out_iounmap; } /* Issue device open to get device in known state */ err = enic_dev_open(enic); if (err) { printk(KERN_ERR PFX "vNIC dev open failed, aborting.\n"); goto err_out_vnic_unregister; } /* Issue device init to initialize the vnic-to-switch link. * We'll start with carrier off and wait for link UP * notification later to turn on carrier. We don't need * to wait here for the vnic-to-switch link initialization * to complete; link UP notification is the indication that * the process is complete. */ netif_carrier_off(netdev); err = vnic_dev_init(enic->vdev, 0); if (err) { printk(KERN_ERR PFX "vNIC dev init failed, aborting.\n"); goto err_out_dev_close; } /* Get vNIC configuration */ err = enic_get_vnic_config(enic); if (err) { printk(KERN_ERR PFX "Get vNIC configuration failed, aborting.\n"); goto err_out_dev_close; } /* Get available resource counts */ enic_get_res_counts(enic); /* Set interrupt mode based on resource counts and system * capabilities */ err = enic_set_intr_mode(enic); if (err) { printk(KERN_ERR PFX "Failed to set intr mode, aborting.\n"); goto err_out_dev_close; } /* Allocate and configure vNIC resources */ err = enic_alloc_vnic_resources(enic); if (err) { printk(KERN_ERR PFX "Failed to alloc vNIC resources, aborting.\n"); goto err_out_free_vnic_resources; } enic_init_vnic_resources(enic); /* Enable VLAN tag stripping. RSS not enabled (yet). */ err = enic_set_nic_cfg(enic, rss_default_cpu, rss_hash_type, rss_hash_bits, rss_base_cpu, rss_enable, tso_ipid_split_en, ig_vlan_strip_en); if (err) { printk(KERN_ERR PFX "Failed to config nic, aborting.\n"); goto err_out_free_vnic_resources; } /* Setup notification timer, HW reset task, and locks */ init_timer(&enic->notify_timer); enic->notify_timer.function = enic_notify_timer; enic->notify_timer.data = (unsigned long)enic; INIT_WORK(&enic->reset, enic_reset); for (i = 0; i < enic->wq_count; i++) spin_lock_init(&enic->wq_lock[i]); spin_lock_init(&enic->devcmd_lock); /* Register net device */ enic->port_mtu = enic->config.mtu; (void)enic_change_mtu(netdev, enic->port_mtu); err = enic_set_mac_addr(netdev, enic->mac_addr); if (err) { printk(KERN_ERR PFX "Invalid MAC address, aborting.\n"); goto err_out_free_vnic_resources; } netdev->netdev_ops = &enic_netdev_ops; netdev->watchdog_timeo = 2 * HZ; netdev->ethtool_ops = &enic_ethtool_ops; switch (vnic_dev_get_intr_mode(enic->vdev)) { default: netif_napi_add(netdev, &enic->napi, enic_poll, 64); break; case VNIC_DEV_INTR_MODE_MSIX: netif_napi_add(netdev, &enic->napi, enic_poll_msix, 64); break; } netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; if (ENIC_SETTING(enic, TXCSUM)) netdev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; if (ENIC_SETTING(enic, TSO)) netdev->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN; if (ENIC_SETTING(enic, LRO)) netdev->features |= NETIF_F_LRO; if (using_dac) netdev->features |= NETIF_F_HIGHDMA; enic->csum_rx_enabled = ENIC_SETTING(enic, RXCSUM); enic->lro_mgr.max_aggr = ENIC_LRO_MAX_AGGR; enic->lro_mgr.max_desc = ENIC_LRO_MAX_DESC; enic->lro_mgr.lro_arr = enic->lro_desc; enic->lro_mgr.get_skb_header = enic_get_skb_header; enic->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID; enic->lro_mgr.dev = netdev; enic->lro_mgr.ip_summed = CHECKSUM_COMPLETE; enic->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY; err = register_netdev(netdev); if (err) { printk(KERN_ERR PFX "Cannot register net device, aborting.\n"); goto err_out_free_vnic_resources; } return 0; err_out_free_vnic_resources: enic_free_vnic_resources(enic); err_out_dev_close: vnic_dev_close(enic->vdev); err_out_vnic_unregister: enic_clear_intr_mode(enic); vnic_dev_unregister(enic->vdev); err_out_iounmap: enic_iounmap(enic); err_out_release_regions: pci_release_regions(pdev); err_out_disable_device: pci_disable_device(pdev); err_out_free_netdev: pci_set_drvdata(pdev, NULL); free_netdev(netdev); return err; }
/** * pci_map_rom - map a PCI ROM to kernel space * @pdev: pointer to pci device struct * @size: pointer to receive size of pci window over ROM * @return: kernel virtual pointer to image of ROM * * Map a PCI ROM into kernel space. If ROM is boot video ROM, * the shadow BIOS copy will be returned instead of the * actual ROM. */ void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size) { struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; loff_t start; void __iomem *rom; void __iomem *image; int last_image; /* * IORESOURCE_ROM_SHADOW set on x86, x86_64 and IA64 supports legacy * memory map if the VGA enable bit of the Bridge Control register is * set for embedded VGA. */ if (res->flags & IORESOURCE_ROM_SHADOW) { /* primary video rom always starts here */ start = (loff_t)0xC0000; *size = 0x20000; /* cover C000:0 through E000:0 */ } else { if (res->flags & (IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY)) { *size = pci_resource_len(pdev, PCI_ROM_RESOURCE); return (void __iomem *)(unsigned long) pci_resource_start(pdev, PCI_ROM_RESOURCE); } else { /* assign the ROM an address if it doesn't have one */ if (res->parent == NULL && pci_assign_resource(pdev,PCI_ROM_RESOURCE)) return NULL; start = pci_resource_start(pdev, PCI_ROM_RESOURCE); *size = pci_resource_len(pdev, PCI_ROM_RESOURCE); if (*size == 0) return NULL; /* Enable ROM space decodes */ if (pci_enable_rom(pdev)) return NULL; } } rom = ioremap(start, *size); if (!rom) { /* restore enable if ioremap fails */ if (!(res->flags & (IORESOURCE_ROM_ENABLE | IORESOURCE_ROM_SHADOW | IORESOURCE_ROM_COPY))) pci_disable_rom(pdev); return NULL; } /* * Try to find the true size of the ROM since sometimes the PCI window * size is much larger than the actual size of the ROM. * True size is important if the ROM is going to be copied. */ image = rom; do { void __iomem *pds; /* Standard PCI ROMs start out with these bytes 55 AA */ if (readb(image) != 0x55) break; if (readb(image + 1) != 0xAA) break; /* get the PCI data structure and check its signature */ pds = image + readw(image + 24); if (readb(pds) != 'P') break; if (readb(pds + 1) != 'C') break; if (readb(pds + 2) != 'I') break; if (readb(pds + 3) != 'R') break; last_image = readb(pds + 21) & 0x80; /* this length is reliable */ image += readw(pds + 16) * 512; } while (!last_image); /* never return a size larger than the PCI resource window */ /* there are known ROMs that get the size wrong */ *size = min((size_t)(image - rom), *size); return rom; }
static int parse_eeprom (struct net_device *dev) { int i, j; long ioaddr = dev->base_addr; u8 sromdata[256]; u8 *psib; u32 crc; PSROM_t psrom = (PSROM_t) sromdata; struct netdev_private *np = netdev_priv(dev); int cid, next; #ifdef MEM_MAPPING ioaddr = pci_resource_start (np->pdev, 0); #endif /* Read eeprom */ for (i = 0; i < 128; i++) { ((__le16 *) sromdata)[i] = cpu_to_le16(read_eeprom (ioaddr, i)); } #ifdef MEM_MAPPING ioaddr = dev->base_addr; #endif if (np->pdev->vendor == PCI_VENDOR_ID_DLINK) { /* D-Link Only */ /* Check CRC */ crc = ~ether_crc_le (256 - 4, sromdata); if (psrom->crc != cpu_to_le32(crc)) { printk (KERN_ERR "%s: EEPROM data CRC error.\n", dev->name); return -1; } } /* Set MAC address */ for (i = 0; i < 6; i++) dev->dev_addr[i] = psrom->mac_addr[i]; if (np->pdev->vendor != PCI_VENDOR_ID_DLINK) { return 0; } /* Parse Software Information Block */ i = 0x30; psib = (u8 *) sromdata; do { cid = psib[i++]; next = psib[i++]; if ((cid == 0 && next == 0) || (cid == 0xff && next == 0xff)) { printk (KERN_ERR "Cell data error\n"); return -1; } switch (cid) { case 0: /* Format version */ break; case 1: /* End of cell */ return 0; case 2: /* Duplex Polarity */ np->duplex_polarity = psib[i]; writeb (readb (ioaddr + PhyCtrl) | psib[i], ioaddr + PhyCtrl); break; case 3: /* Wake Polarity */ np->wake_polarity = psib[i]; break; case 9: /* Adapter description */ j = (next - i > 255) ? 255 : next - i; memcpy (np->name, &(psib[i]), j); break; case 4: case 5: case 6: case 7: case 8: /* Reversed */ break; default: /* Unknown cell */ return -1; } i = next; } while (1); return 0; }
/****************************************************************************** Network device configuration functions ******************************************************************************/ static int islpci_alloc_memory(islpci_private *priv) { int counter; #if VERBOSE > SHOW_ERROR_MESSAGES printk(KERN_DEBUG "islpci_alloc_memory\n"); #endif /* remap the PCI device base address to accessable */ if (!(priv->device_base = ioremap(pci_resource_start(priv->pdev, 0), ISL38XX_PCI_MEM_SIZE))) { /* error in remapping the PCI device memory address range */ printk(KERN_ERR "PCI memory remapping failed \n"); return -1; } /* memory layout for consistent DMA region: * * Area 1: Control Block for the device interface * Area 2: Power Save Mode Buffer for temporary frame storage. Be aware that * the number of supported stations in the AP determines the minimal * size of the buffer ! */ /* perform the allocation */ priv->driver_mem_address = pci_alloc_consistent(priv->pdev, HOST_MEM_BLOCK, &priv-> device_host_address); if (!priv->driver_mem_address) { /* error allocating the block of PCI memory */ printk(KERN_ERR "%s: could not allocate DMA memory, aborting!", "prism54"); return -1; } /* assign the Control Block to the first address of the allocated area */ priv->control_block = (isl38xx_control_block *) priv->driver_mem_address; /* set the Power Save Buffer pointer directly behind the CB */ priv->device_psm_buffer = priv->device_host_address + CONTROL_BLOCK_SIZE; /* make sure all buffer pointers are initialized */ for (counter = 0; counter < ISL38XX_CB_QCOUNT; counter++) { priv->control_block->driver_curr_frag[counter] = cpu_to_le32(0); priv->control_block->device_curr_frag[counter] = cpu_to_le32(0); } priv->index_mgmt_rx = 0; memset(priv->mgmt_rx, 0, sizeof(priv->mgmt_rx)); memset(priv->mgmt_tx, 0, sizeof(priv->mgmt_tx)); /* allocate rx queue for management frames */ if (islpci_mgmt_rx_fill(priv->ndev) < 0) goto out_free; /* now get the data rx skb's */ memset(priv->data_low_rx, 0, sizeof (priv->data_low_rx)); memset(priv->pci_map_rx_address, 0, sizeof (priv->pci_map_rx_address)); for (counter = 0; counter < ISL38XX_CB_RX_QSIZE; counter++) { struct sk_buff *skb; /* allocate an sk_buff for received data frames storage * each frame on receive size consists of 1 fragment * include any required allignment operations */ if (!(skb = dev_alloc_skb(MAX_FRAGMENT_SIZE_RX + 2))) { /* error allocating an sk_buff structure elements */ printk(KERN_ERR "Error allocating skb.\n"); skb = NULL; goto out_free; } skb_reserve(skb, (4 - (long) skb->data) & 0x03); /* add the new allocated sk_buff to the buffer array */ priv->data_low_rx[counter] = skb; /* map the allocated skb data area to pci */ priv->pci_map_rx_address[counter] = pci_map_single(priv->pdev, (void *) skb->data, MAX_FRAGMENT_SIZE_RX + 2, PCI_DMA_FROMDEVICE); if (!priv->pci_map_rx_address[counter]) { /* error mapping the buffer to device accessable memory address */ printk(KERN_ERR "failed to map skb DMA'able\n"); goto out_free; } } prism54_acl_init(&priv->acl); prism54_wpa_bss_ie_init(priv); if (mgt_init(priv)) goto out_free; return 0; out_free: islpci_free_memory(priv); return -1; }
static int __devinit dwc3_pci_probe(struct pci_dev *pci, const struct pci_device_id *id) { struct resource res[2]; struct platform_device *dwc3; struct dwc3_pci *glue; int ret = -ENOMEM; int devid; glue = kzalloc(sizeof(*glue), GFP_KERNEL); if (!glue) { dev_err(&pci->dev, "not enough memory\n"); goto err0; } glue->dev = &pci->dev; ret = pci_enable_device(pci); if (ret) { dev_err(&pci->dev, "failed to enable pci device\n"); goto err1; } pci_set_power_state(pci, PCI_D0); pci_set_master(pci); devid = dwc3_get_device_id(); if (devid < 0) goto err2; dwc3 = platform_device_alloc("dwc3", devid); if (!dwc3) { dev_err(&pci->dev, "couldn't allocate dwc3 device\n"); goto err3; } memset(res, 0x00, sizeof(struct resource) * ARRAY_SIZE(res)); res[0].start = pci_resource_start(pci, 0); res[0].end = pci_resource_end(pci, 0); res[0].name = "dwc_usb3"; res[0].flags = IORESOURCE_MEM; res[1].start = pci->irq; res[1].name = "dwc_usb3"; res[1].flags = IORESOURCE_IRQ; ret = platform_device_add_resources(dwc3, res, ARRAY_SIZE(res)); if (ret) { dev_err(&pci->dev, "couldn't add resources to dwc3 device\n"); goto err4; } pci_set_drvdata(pci, glue); dma_set_coherent_mask(&dwc3->dev, pci->dev.coherent_dma_mask); dwc3->dev.dma_mask = pci->dev.dma_mask; dwc3->dev.dma_parms = pci->dev.dma_parms; dwc3->dev.parent = &pci->dev; glue->dwc3 = dwc3; ret = platform_device_add(dwc3); if (ret) { dev_err(&pci->dev, "failed to register dwc3 device\n"); goto err4; } return 0; err4: pci_set_drvdata(pci, NULL); platform_device_put(dwc3); err3: dwc3_put_device_id(devid); err2: pci_disable_device(pci); err1: kfree(glue); err0: return ret; }
static int __devinit snd_nm256_create(struct snd_card *card, struct pci_dev *pci, struct nm256 **chip_ret) { struct nm256 *chip; int err, pval; static struct snd_device_ops ops = { .dev_free = snd_nm256_dev_free, }; u32 addr; *chip_ret = NULL; if ((err = pci_enable_device(pci)) < 0) return err; chip = kzalloc(sizeof(*chip), GFP_KERNEL); if (chip == NULL) { pci_disable_device(pci); return -ENOMEM; } chip->card = card; chip->pci = pci; chip->use_cache = use_cache; spin_lock_init(&chip->reg_lock); chip->irq = -1; mutex_init(&chip->irq_mutex); chip->streams[SNDRV_PCM_STREAM_PLAYBACK].bufsize = playback_bufsize * 1024; chip->streams[SNDRV_PCM_STREAM_CAPTURE].bufsize = capture_bufsize * 1024; chip->buffer_addr = pci_resource_start(pci, 0); chip->cport_addr = pci_resource_start(pci, 1); chip->res_cport = request_mem_region(chip->cport_addr, NM_PORT2_SIZE, card->driver); if (chip->res_cport == NULL) { snd_printk(KERN_ERR "memory region 0x%lx (size 0x%x) busy\n", chip->cport_addr, NM_PORT2_SIZE); err = -EBUSY; goto __error; } chip->cport = ioremap_nocache(chip->cport_addr, NM_PORT2_SIZE); if (chip->cport == NULL) { snd_printk(KERN_ERR "unable to map control port %lx\n", chip->cport_addr); err = -ENOMEM; goto __error; } if (!strcmp(card->driver, "NM256AV")) { pval = snd_nm256_readw(chip, NM_MIXER_PRESENCE); if ((pval & NM_PRESENCE_MASK) != NM_PRESENCE_VALUE) { if (! force_ac97) { printk(KERN_ERR "nm256: no ac97 is found!\n"); printk(KERN_ERR " force the driver to load by " "passing in the module parameter\n"); printk(KERN_ERR " force_ac97=1\n"); printk(KERN_ERR " or try sb16, opl3sa2, or " "cs423x drivers instead.\n"); err = -ENXIO; goto __error; } } chip->buffer_end = 2560 * 1024; chip->interrupt = snd_nm256_interrupt; chip->mixer_status_offset = NM_MIXER_STATUS_OFFSET; chip->mixer_status_mask = NM_MIXER_READY_MASK; } else { if (snd_nm256_readb(chip, 0xa0b) != 0) chip->buffer_end = 6144 * 1024; else chip->buffer_end = 4096 * 1024; chip->interrupt = snd_nm256_interrupt_zx; chip->mixer_status_offset = NM2_MIXER_STATUS_OFFSET; chip->mixer_status_mask = NM2_MIXER_READY_MASK; } chip->buffer_size = chip->streams[SNDRV_PCM_STREAM_PLAYBACK].bufsize + chip->streams[SNDRV_PCM_STREAM_CAPTURE].bufsize; if (chip->use_cache) chip->buffer_size += NM_TOTAL_COEFF_COUNT * 4; else chip->buffer_size += NM_MAX_PLAYBACK_COEF_SIZE + NM_MAX_RECORD_COEF_SIZE; if (buffer_top >= chip->buffer_size && buffer_top < chip->buffer_end) chip->buffer_end = buffer_top; else { if ((err = snd_nm256_peek_for_sig(chip)) < 0) goto __error; } chip->buffer_start = chip->buffer_end - chip->buffer_size; chip->buffer_addr += chip->buffer_start; printk(KERN_INFO "nm256: Mapping port 1 from 0x%x - 0x%x\n", chip->buffer_start, chip->buffer_end); chip->res_buffer = request_mem_region(chip->buffer_addr, chip->buffer_size, card->driver); if (chip->res_buffer == NULL) { snd_printk(KERN_ERR "nm256: buffer 0x%lx (size 0x%x) busy\n", chip->buffer_addr, chip->buffer_size); err = -EBUSY; goto __error; } chip->buffer = ioremap_nocache(chip->buffer_addr, chip->buffer_size); if (chip->buffer == NULL) { err = -ENOMEM; snd_printk(KERN_ERR "unable to map ring buffer at %lx\n", chip->buffer_addr); goto __error; } addr = chip->buffer_start; chip->streams[SNDRV_PCM_STREAM_PLAYBACK].buf = addr; addr += chip->streams[SNDRV_PCM_STREAM_PLAYBACK].bufsize; chip->streams[SNDRV_PCM_STREAM_CAPTURE].buf = addr; addr += chip->streams[SNDRV_PCM_STREAM_CAPTURE].bufsize; if (chip->use_cache) { chip->all_coeff_buf = addr; } else { chip->coeff_buf[SNDRV_PCM_STREAM_PLAYBACK] = addr; addr += NM_MAX_PLAYBACK_COEF_SIZE; chip->coeff_buf[SNDRV_PCM_STREAM_CAPTURE] = addr; } chip->mixer_base = NM_MIXER_OFFSET; chip->coeffs_current = 0; snd_nm256_init_chip(chip); if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) goto __error; snd_card_set_dev(card, &pci->dev); *chip_ret = chip; return 0; __error: snd_nm256_free(chip); return err; }
int mite_setup2(struct mite_struct *mite, unsigned use_iodwbsr_1) { unsigned long length; resource_size_t addr; int i; u32 csigr_bits; unsigned unknown_dma_burst_bits; if (comedi_pci_enable(mite->pcidev, "mite")) { printk(KERN_ERR "error enabling mite and requesting io regions\n"); return -EIO; } pci_set_master(mite->pcidev); addr = pci_resource_start(mite->pcidev, 0); mite->mite_phys_addr = addr; mite->mite_io_addr = ioremap(addr, PCI_MITE_SIZE); if (!mite->mite_io_addr) { printk(KERN_ERR "Failed to remap mite io memory address\n"); return -ENOMEM; } printk(KERN_INFO "MITE:0x%08llx mapped to %p ", (unsigned long long)mite->mite_phys_addr, mite->mite_io_addr); addr = pci_resource_start(mite->pcidev, 1); mite->daq_phys_addr = addr; length = pci_resource_len(mite->pcidev, 1); /* * In case of a 660x board, DAQ size is 8k instead of 4k * (see as shown by lspci output) */ mite->daq_io_addr = ioremap(mite->daq_phys_addr, length); if (!mite->daq_io_addr) { printk(KERN_ERR "Failed to remap daq io memory address\n"); return -ENOMEM; } printk(KERN_INFO "DAQ:0x%08llx mapped to %p\n", (unsigned long long)mite->daq_phys_addr, mite->daq_io_addr); if (use_iodwbsr_1) { writel(0, mite->mite_io_addr + MITE_IODWBSR); printk(KERN_INFO "mite: using I/O Window Base Size register 1\n"); writel(mite->daq_phys_addr | WENAB | MITE_IODWBSR_1_WSIZE_bits(length), mite->mite_io_addr + MITE_IODWBSR_1); writel(0, mite->mite_io_addr + MITE_IODWCR_1); } else { writel(mite->daq_phys_addr | WENAB, mite->mite_io_addr + MITE_IODWBSR); } /* * make sure dma bursts work. I got this from running a bus analyzer * on a pxi-6281 and a pxi-6713. 6713 powered up with register value * of 0x61f and bursts worked. 6281 powered up with register value of * 0x1f and bursts didn't work. The NI windows driver reads the * register, then does a bitwise-or of 0x600 with it and writes it back. */ unknown_dma_burst_bits = readl(mite->mite_io_addr + MITE_UNKNOWN_DMA_BURST_REG); unknown_dma_burst_bits |= UNKNOWN_DMA_BURST_ENABLE_BITS; writel(unknown_dma_burst_bits, mite->mite_io_addr + MITE_UNKNOWN_DMA_BURST_REG); csigr_bits = readl(mite->mite_io_addr + MITE_CSIGR); mite->num_channels = mite_csigr_dmac(csigr_bits); if (mite->num_channels > MAX_MITE_DMA_CHANNELS) { printk(KERN_WARNING "mite: bug? chip claims to have %i dma " "channels. Setting to %i.\n", mite->num_channels, MAX_MITE_DMA_CHANNELS); mite->num_channels = MAX_MITE_DMA_CHANNELS; } dump_chip_signature(csigr_bits); for (i = 0; i < mite->num_channels; i++) { writel(CHOR_DMARESET, mite->mite_io_addr + MITE_CHOR(i)); /* disable interrupts */ writel(CHCR_CLR_DMA_IE | CHCR_CLR_LINKP_IE | CHCR_CLR_SAR_IE | CHCR_CLR_DONE_IE | CHCR_CLR_MRDY_IE | CHCR_CLR_DRDY_IE | CHCR_CLR_LC_IE | CHCR_CLR_CONT_RB_IE, mite->mite_io_addr + MITE_CHCR(i)); } mite->fifo_size = mite_fifo_size(mite, 0); printk(KERN_INFO "mite: fifo size is %i.\n", mite->fifo_size); mite->used = 1; return 0; }
int __devinit setup_sedlbauer(struct IsdnCard *card) { int bytecnt, ver, val; struct IsdnCardState *cs = card->cs; char tmp[64]; u16 sub_vendor_id, sub_id; strcpy(tmp, Sedlbauer_revision); printk(KERN_INFO "HiSax: Sedlbauer driver Rev. %s\n", HiSax_getrev(tmp)); if (cs->typ == ISDN_CTYPE_SEDLBAUER) { cs->subtyp = SEDL_SPEED_CARD_WIN; cs->hw.sedl.bus = SEDL_BUS_ISA; cs->hw.sedl.chip = SEDL_CHIP_TEST; } else if (cs->typ == ISDN_CTYPE_SEDLBAUER_PCMCIA) { cs->subtyp = SEDL_SPEED_STAR; cs->hw.sedl.bus = SEDL_BUS_PCMCIA; cs->hw.sedl.chip = SEDL_CHIP_TEST; } else if (cs->typ == ISDN_CTYPE_SEDLBAUER_FAX) { cs->subtyp = SEDL_SPEED_FAX; cs->hw.sedl.bus = SEDL_BUS_ISA; cs->hw.sedl.chip = SEDL_CHIP_ISAC_ISAR; } else return (0); bytecnt = 8; if (card->para[1]) { cs->hw.sedl.cfg_reg = card->para[1]; cs->irq = card->para[0]; if (cs->hw.sedl.chip == SEDL_CHIP_ISAC_ISAR) { bytecnt = 16; } } else { #ifdef __ISAPNP__ if (isapnp_present()) { struct pnp_dev *pnp_d; while(ipid->card_vendor) { if ((pnp_c = pnp_find_card(ipid->card_vendor, ipid->card_device, pnp_c))) { pnp_d = NULL; if ((pnp_d = pnp_find_dev(pnp_c, ipid->vendor, ipid->function, pnp_d))) { int err; printk(KERN_INFO "HiSax: %s detected\n", (char *)ipid->driver_data); pnp_disable_dev(pnp_d); err = pnp_activate_dev(pnp_d); if (err<0) { printk(KERN_WARNING "%s: pnp_activate_dev ret(%d)\n", __FUNCTION__, err); return(0); } card->para[1] = pnp_port_start(pnp_d, 0); card->para[0] = pnp_irq(pnp_d, 0); if (!card->para[0] || !card->para[1]) { printk(KERN_ERR "Sedlbauer PnP:some resources are missing %ld/%lx\n", card->para[0], card->para[1]); pnp_disable_dev(pnp_d); return(0); } cs->hw.sedl.cfg_reg = card->para[1]; cs->irq = card->para[0]; if (ipid->function == ISAPNP_FUNCTION(0x2)) { cs->subtyp = SEDL_SPEED_FAX; cs->hw.sedl.chip = SEDL_CHIP_ISAC_ISAR; bytecnt = 16; } else { cs->subtyp = SEDL_SPEED_CARD_WIN; cs->hw.sedl.chip = SEDL_CHIP_TEST; } goto ready; } else { printk(KERN_ERR "Sedlbauer PnP: PnP error card found, no device\n"); return(0); } } ipid++; pnp_c = NULL; } if (!ipid->card_vendor) { printk(KERN_INFO "Sedlbauer PnP: no ISAPnP card found\n"); } } #endif /* Probe for Sedlbauer speed pci */ #ifdef CONFIG_PCI if ((dev_sedl = pci_find_device(PCI_VENDOR_ID_TIGERJET, PCI_DEVICE_ID_TIGERJET_100, dev_sedl))) { if (pci_enable_device(dev_sedl)) return(0); cs->irq = dev_sedl->irq; if (!cs->irq) { printk(KERN_WARNING "Sedlbauer: No IRQ for PCI card found\n"); return(0); } cs->hw.sedl.cfg_reg = pci_resource_start(dev_sedl, 0); } else { printk(KERN_WARNING "Sedlbauer: No PCI card found\n"); return(0); } cs->irq_flags |= SA_SHIRQ; cs->hw.sedl.bus = SEDL_BUS_PCI; sub_vendor_id = dev_sedl->subsystem_vendor; sub_id = dev_sedl->subsystem_device; printk(KERN_INFO "Sedlbauer: PCI subvendor:%x subid %x\n", sub_vendor_id, sub_id); printk(KERN_INFO "Sedlbauer: PCI base adr %#x\n", cs->hw.sedl.cfg_reg); if (sub_id != PCI_SUB_ID_SEDLBAUER) { printk(KERN_ERR "Sedlbauer: unknown sub id %#x\n", sub_id); return(0); } if (sub_vendor_id == PCI_SUBVENDOR_SPEEDFAX_PYRAMID) { cs->hw.sedl.chip = SEDL_CHIP_ISAC_ISAR; cs->subtyp = SEDL_SPEEDFAX_PYRAMID; } else if (sub_vendor_id == PCI_SUBVENDOR_SPEEDFAX_PCI) { cs->hw.sedl.chip = SEDL_CHIP_ISAC_ISAR; cs->subtyp = SEDL_SPEEDFAX_PCI; } else if (sub_vendor_id == PCI_SUBVENDOR_HST_SAPHIR3) { cs->hw.sedl.chip = SEDL_CHIP_IPAC; cs->subtyp = HST_SAPHIR3; } else if (sub_vendor_id == PCI_SUBVENDOR_SEDLBAUER_PCI) { cs->hw.sedl.chip = SEDL_CHIP_IPAC; cs->subtyp = SEDL_SPEED_PCI; } else { printk(KERN_ERR "Sedlbauer: unknown sub vendor id %#x\n", sub_vendor_id); return(0); } bytecnt = 256; cs->hw.sedl.reset_on = SEDL_ISAR_PCI_ISAR_RESET_ON; cs->hw.sedl.reset_off = SEDL_ISAR_PCI_ISAR_RESET_OFF; byteout(cs->hw.sedl.cfg_reg, 0xff); byteout(cs->hw.sedl.cfg_reg, 0x00); byteout(cs->hw.sedl.cfg_reg+ 2, 0xdd); byteout(cs->hw.sedl.cfg_reg+ 5, 0x02); byteout(cs->hw.sedl.cfg_reg +3, cs->hw.sedl.reset_on); mdelay(2); byteout(cs->hw.sedl.cfg_reg +3, cs->hw.sedl.reset_off); mdelay(10); #else printk(KERN_WARNING "Sedlbauer: NO_PCI_BIOS\n"); return (0); #endif /* CONFIG_PCI */ } ready: /* In case of the sedlbauer pcmcia card, this region is in use, * reserved for us by the card manager. So we do not check it * here, it would fail. */ if (cs->hw.sedl.bus != SEDL_BUS_PCMCIA && !request_region(cs->hw.sedl.cfg_reg, bytecnt, "sedlbauer isdn")) { printk(KERN_WARNING "HiSax: %s config port %x-%x already in use\n", CardType[card->typ], cs->hw.sedl.cfg_reg, cs->hw.sedl.cfg_reg + bytecnt); return (0); } printk(KERN_INFO "Sedlbauer: defined at 0x%x-0x%x IRQ %d\n", cs->hw.sedl.cfg_reg, cs->hw.sedl.cfg_reg + bytecnt, cs->irq); cs->BC_Read_Reg = &ReadHSCX; cs->BC_Write_Reg = &WriteHSCX; cs->BC_Send_Data = &hscx_fill_fifo; cs->cardmsg = &Sedl_card_msg; /* * testing ISA and PCMCIA Cards for IPAC, default is ISAC * do not test for PCI card, because ports are different * and PCI card uses only IPAC (for the moment) */ if (cs->hw.sedl.bus != SEDL_BUS_PCI) { val = readreg(cs->hw.sedl.cfg_reg + SEDL_IPAC_ANY_ADR, cs->hw.sedl.cfg_reg + SEDL_IPAC_ANY_IPAC, IPAC_ID); printk(KERN_DEBUG "Sedlbauer: testing IPAC version %x\n", val); if ((val == 1) || (val == 2)) { /* IPAC */ cs->subtyp = SEDL_SPEED_WIN2_PC104; if (cs->hw.sedl.bus == SEDL_BUS_PCMCIA) { cs->subtyp = SEDL_SPEED_STAR2; } cs->hw.sedl.chip = SEDL_CHIP_IPAC; } else { /* ISAC_HSCX oder ISAC_ISAR */ if (cs->hw.sedl.chip == SEDL_CHIP_TEST) { cs->hw.sedl.chip = SEDL_CHIP_ISAC_HSCX; } } } /* * hw.sedl.chip is now properly set */ printk(KERN_INFO "Sedlbauer: %s detected\n", Sedlbauer_Types[cs->subtyp]); setup_isac(cs); if (cs->hw.sedl.chip == SEDL_CHIP_IPAC) { if (cs->hw.sedl.bus == SEDL_BUS_PCI) { cs->hw.sedl.adr = cs->hw.sedl.cfg_reg + SEDL_IPAC_PCI_ADR; cs->hw.sedl.isac = cs->hw.sedl.cfg_reg + SEDL_IPAC_PCI_IPAC; cs->hw.sedl.hscx = cs->hw.sedl.cfg_reg + SEDL_IPAC_PCI_IPAC; } else { cs->hw.sedl.adr = cs->hw.sedl.cfg_reg + SEDL_IPAC_ANY_ADR; cs->hw.sedl.isac = cs->hw.sedl.cfg_reg + SEDL_IPAC_ANY_IPAC; cs->hw.sedl.hscx = cs->hw.sedl.cfg_reg + SEDL_IPAC_ANY_IPAC; } test_and_set_bit(HW_IPAC, &cs->HW_Flags); cs->readisac = &ReadISAC_IPAC; cs->writeisac = &WriteISAC_IPAC; cs->readisacfifo = &ReadISACfifo_IPAC; cs->writeisacfifo = &WriteISACfifo_IPAC; cs->irq_func = &sedlbauer_interrupt_ipac; val = readreg(cs->hw.sedl.adr, cs->hw.sedl.isac, IPAC_ID); printk(KERN_INFO "Sedlbauer: IPAC version %x\n", val); } else { /* ISAC_HSCX oder ISAC_ISAR */ cs->readisac = &ReadISAC; cs->writeisac = &WriteISAC; cs->readisacfifo = &ReadISACfifo; cs->writeisacfifo = &WriteISACfifo; if (cs->hw.sedl.chip == SEDL_CHIP_ISAC_ISAR) { if (cs->hw.sedl.bus == SEDL_BUS_PCI) { cs->hw.sedl.adr = cs->hw.sedl.cfg_reg + SEDL_ISAR_PCI_ADR; cs->hw.sedl.isac = cs->hw.sedl.cfg_reg + SEDL_ISAR_PCI_ISAC; cs->hw.sedl.hscx = cs->hw.sedl.cfg_reg + SEDL_ISAR_PCI_ISAR; } else { cs->hw.sedl.adr = cs->hw.sedl.cfg_reg + SEDL_ISAR_ISA_ADR; cs->hw.sedl.isac = cs->hw.sedl.cfg_reg + SEDL_ISAR_ISA_ISAC; cs->hw.sedl.hscx = cs->hw.sedl.cfg_reg + SEDL_ISAR_ISA_ISAR; cs->hw.sedl.reset_on = cs->hw.sedl.cfg_reg + SEDL_ISAR_ISA_ISAR_RESET_ON; cs->hw.sedl.reset_off = cs->hw.sedl.cfg_reg + SEDL_ISAR_ISA_ISAR_RESET_OFF; } cs->bcs[0].hw.isar.reg = &cs->hw.sedl.isar; cs->bcs[1].hw.isar.reg = &cs->hw.sedl.isar; test_and_set_bit(HW_ISAR, &cs->HW_Flags); cs->irq_func = &sedlbauer_interrupt_isar; cs->auxcmd = &isar_auxcmd; ISACVersion(cs, "Sedlbauer:"); cs->BC_Read_Reg = &ReadISAR; cs->BC_Write_Reg = &WriteISAR; cs->BC_Send_Data = &isar_fill_fifo; bytecnt = 3; while (bytecnt) { ver = ISARVersion(cs, "Sedlbauer:"); if (ver < 0) printk(KERN_WARNING "Sedlbauer: wrong ISAR version (ret = %d)\n", ver); else break; reset_sedlbauer(cs); bytecnt--; } if (!bytecnt) { release_io_sedlbauer(cs); return (0); } } else { if (cs->hw.sedl.bus == SEDL_BUS_PCMCIA) { cs->hw.sedl.adr = cs->hw.sedl.cfg_reg + SEDL_HSCX_PCMCIA_ADR; cs->hw.sedl.isac = cs->hw.sedl.cfg_reg + SEDL_HSCX_PCMCIA_ISAC; cs->hw.sedl.hscx = cs->hw.sedl.cfg_reg + SEDL_HSCX_PCMCIA_HSCX; cs->hw.sedl.reset_on = cs->hw.sedl.cfg_reg + SEDL_HSCX_PCMCIA_RESET; cs->hw.sedl.reset_off = cs->hw.sedl.cfg_reg + SEDL_HSCX_PCMCIA_RESET; cs->irq_flags |= SA_SHIRQ; } else { cs->hw.sedl.adr = cs->hw.sedl.cfg_reg + SEDL_HSCX_ISA_ADR; cs->hw.sedl.isac = cs->hw.sedl.cfg_reg + SEDL_HSCX_ISA_ISAC; cs->hw.sedl.hscx = cs->hw.sedl.cfg_reg + SEDL_HSCX_ISA_HSCX; cs->hw.sedl.reset_on = cs->hw.sedl.cfg_reg + SEDL_HSCX_ISA_RESET_ON; cs->hw.sedl.reset_off = cs->hw.sedl.cfg_reg + SEDL_HSCX_ISA_RESET_OFF; } cs->irq_func = &sedlbauer_interrupt; ISACVersion(cs, "Sedlbauer:"); if (HscxVersion(cs, "Sedlbauer:")) { printk(KERN_WARNING "Sedlbauer: wrong HSCX versions check IO address\n"); release_io_sedlbauer(cs); return (0); } } } return (1); }
/** * usb_hcd_pci_probe - initialize PCI-based HCDs * @dev: USB Host Controller being probed * @id: pci hotplug id connecting controller to HCD framework * Context: !in_interrupt() * * Allocates basic PCI resources for this USB host controller, and * then invokes the start() method for the HCD associated with it * through the hotplug entry's driver_data. * * Store this function in the HCD's struct pci_driver as probe(). */ int usb_hcd_pci_probe (struct pci_dev *dev, const struct pci_device_id *id) { struct hc_driver *driver; unsigned long resource, len; void __iomem *base; struct usb_hcd *hcd; int retval, region; char buf [8], *bufp = buf; if (usb_disabled()) return -ENODEV; if (!id || !(driver = (struct hc_driver *) id->driver_data)) return -EINVAL; if (pci_enable_device (dev) < 0) return -ENODEV; dev->current_state = 0; dev->dev.power.power_state = 0; if (!dev->irq) { dev_err (&dev->dev, "Found HC with no IRQ. Check BIOS/PCI %s setup!\n", pci_name(dev)); retval = -ENODEV; goto done; } if (driver->flags & HCD_MEMORY) { // EHCI, OHCI region = 0; resource = pci_resource_start (dev, 0); len = pci_resource_len (dev, 0); if (!request_mem_region (resource, len, driver->description)) { dev_dbg (&dev->dev, "controller already in use\n"); retval = -EBUSY; goto done; } base = ioremap_nocache (resource, len); if (base == NULL) { dev_dbg (&dev->dev, "error mapping memory\n"); retval = -EFAULT; clean_1: release_mem_region (resource, len); dev_err (&dev->dev, "init %s fail, %d\n", pci_name(dev), retval); goto done; } } else { // UHCI resource = len = 0; for (region = 0; region < PCI_ROM_RESOURCE; region++) { if (!(pci_resource_flags (dev, region) & IORESOURCE_IO)) continue; resource = pci_resource_start (dev, region); len = pci_resource_len (dev, region); if (request_region (resource, len, driver->description)) break; } if (region == PCI_ROM_RESOURCE) { dev_dbg (&dev->dev, "no i/o regions available\n"); retval = -EBUSY; goto done; } base = (void __iomem *) resource; } // driver->reset(), later on, will transfer device from // control by SMM/BIOS to control by Linux (if needed) hcd = usb_create_hcd (driver); if (hcd == NULL){ dev_dbg (&dev->dev, "hcd alloc fail\n"); retval = -ENOMEM; clean_2: if (driver->flags & HCD_MEMORY) { iounmap (base); goto clean_1; } else { release_region (resource, len); dev_err (&dev->dev, "init %s fail, %d\n", pci_name(dev), retval); goto done; } } // hcd zeroed everything hcd->regs = base; hcd->region = region; pci_set_drvdata (dev, hcd); hcd->self.bus_name = pci_name(dev); #ifdef CONFIG_PCI_NAMES hcd->product_desc = dev->pretty_name; #endif hcd->self.controller = &dev->dev; if ((retval = hcd_buffer_create (hcd)) != 0) { clean_3: pci_set_drvdata (dev, NULL); usb_put_hcd (hcd); goto clean_2; } dev_info (hcd->self.controller, "%s\n", hcd->product_desc); /* till now HC has been in an indeterminate state ... */ if (driver->reset && (retval = driver->reset (hcd)) < 0) { dev_err (hcd->self.controller, "can't reset\n"); goto clean_3; } pci_set_master (dev); #ifndef __sparc__ sprintf (buf, "%d", dev->irq); #else bufp = __irq_itoa(dev->irq); #endif retval = request_irq (dev->irq, usb_hcd_irq, SA_SHIRQ, hcd->driver->description, hcd); if (retval != 0) { dev_err (hcd->self.controller, "request interrupt %s failed\n", bufp); goto clean_3; } hcd->irq = dev->irq; dev_info (hcd->self.controller, "irq %s, %s 0x%lx\n", bufp, (driver->flags & HCD_MEMORY) ? "pci mem" : "io base", resource); INIT_LIST_HEAD (&hcd->dev_list); usb_register_bus (&hcd->self); if ((retval = driver->start (hcd)) < 0) { dev_err (hcd->self.controller, "init error %d\n", retval); usb_hcd_pci_remove (dev); } done: if (retval != 0) pci_disable_device (dev); return retval; }
int mlx4_reset(struct mlx4_dev *dev) { void __iomem *reset; u32 *hca_header = NULL; int pcie_cap; u16 devctl; u16 linkctl; u16 vendor; unsigned long end; u32 sem; int i; int err = 0; #define MLX4_RESET_BASE 0xf0000 #define MLX4_RESET_SIZE 0x400 #define MLX4_SEM_OFFSET 0x3fc #define MLX4_RESET_OFFSET 0x10 #define MLX4_RESET_VALUE swab32(1) #define MLX4_SEM_TIMEOUT_JIFFIES (10 * HZ) #define MLX4_RESET_TIMEOUT_JIFFIES (2 * HZ) /* * Reset the chip. This is somewhat ugly because we have to * save off the PCI header before reset and then restore it * after the chip reboots. We skip config space offsets 22 * and 23 since those have a special meaning. */ /* Do we need to save off the full 4K PCI Express header?? */ hca_header = kmalloc(256, GFP_KERNEL); if (!hca_header) { err = -ENOMEM; mlx4_err(dev, "Couldn't allocate memory to save HCA " "PCI header, aborting.\n"); goto out; } pcie_cap = pci_find_capability(dev->pdev, PCI_CAP_ID_EXP); for (i = 0; i < 64; ++i) { if (i == 22 || i == 23) continue; if (pci_read_config_dword(dev->pdev, i * 4, hca_header + i)) { err = -ENODEV; mlx4_err(dev, "Couldn't save HCA " "PCI header, aborting.\n"); goto out; } } reset = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_RESET_BASE, MLX4_RESET_SIZE); if (!reset) { err = -ENOMEM; mlx4_err(dev, "Couldn't map HCA reset register, aborting.\n"); goto out; } /* grab HW semaphore to lock out flash updates */ end = jiffies + MLX4_SEM_TIMEOUT_JIFFIES; do { sem = readl(reset + MLX4_SEM_OFFSET); if (!sem) break; msleep(1); } while (time_before(jiffies, end)); if (sem) { mlx4_err(dev, "Failed to obtain HW semaphore, aborting\n"); err = -EAGAIN; iounmap(reset); goto out; } /* actually hit reset */ writel(MLX4_RESET_VALUE, reset + MLX4_RESET_OFFSET); iounmap(reset); /* Docs say to wait one second before accessing device */ msleep(1000); end = jiffies + MLX4_RESET_TIMEOUT_JIFFIES; do { if (!pci_read_config_word(dev->pdev, PCI_VENDOR_ID, &vendor) && vendor != 0xffff) break; msleep(1); } while (time_before(jiffies, end)); if (vendor == 0xffff) { err = -ENODEV; mlx4_err(dev, "PCI device did not come back after reset, " "aborting.\n"); goto out; } /* Now restore the PCI headers */ if (pcie_cap) { devctl = hca_header[(pcie_cap + PCI_EXP_DEVCTL) / 4]; if (pci_write_config_word(dev->pdev, pcie_cap + PCI_EXP_DEVCTL, devctl)) { err = -ENODEV; mlx4_err(dev, "Couldn't restore HCA PCI Express " "Device Control register, aborting.\n"); goto out; } linkctl = hca_header[(pcie_cap + PCI_EXP_LNKCTL) / 4]; if (pci_write_config_word(dev->pdev, pcie_cap + PCI_EXP_LNKCTL, linkctl)) { err = -ENODEV; mlx4_err(dev, "Couldn't restore HCA PCI Express " "Link control register, aborting.\n"); goto out; } } for (i = 0; i < 16; ++i) { if (i * 4 == PCI_COMMAND) continue; if (pci_write_config_dword(dev->pdev, i * 4, hca_header[i])) { err = -ENODEV; mlx4_err(dev, "Couldn't restore HCA reg %x, " "aborting.\n", i); goto out; } } if (pci_write_config_dword(dev->pdev, PCI_COMMAND, hca_header[PCI_COMMAND / 4])) { err = -ENODEV; mlx4_err(dev, "Couldn't restore HCA COMMAND, " "aborting.\n"); goto out; } out: kfree(hca_header); return err; }
static int __devinit setup_sedlbauer_pci(struct IsdnCard *card) { struct IsdnCardState *cs = card->cs; u16 sub_vendor_id, sub_id; if ((dev_sedl = hisax_find_pci_device(PCI_VENDOR_ID_TIGERJET, PCI_DEVICE_ID_TIGERJET_100, dev_sedl))) { if (pci_enable_device(dev_sedl)) return(0); cs->irq = dev_sedl->irq; if (!cs->irq) { printk(KERN_WARNING "Sedlbauer: No IRQ for PCI card found\n"); return(0); } cs->hw.sedl.cfg_reg = pci_resource_start(dev_sedl, 0); } else { printk(KERN_WARNING "Sedlbauer: No PCI card found\n"); return(0); } cs->irq_flags |= IRQF_SHARED; cs->hw.sedl.bus = SEDL_BUS_PCI; sub_vendor_id = dev_sedl->subsystem_vendor; sub_id = dev_sedl->subsystem_device; printk(KERN_INFO "Sedlbauer: PCI subvendor:%x subid %x\n", sub_vendor_id, sub_id); printk(KERN_INFO "Sedlbauer: PCI base adr %#x\n", cs->hw.sedl.cfg_reg); if (sub_id != PCI_SUB_ID_SEDLBAUER) { printk(KERN_ERR "Sedlbauer: unknown sub id %#x\n", sub_id); return(0); } if (sub_vendor_id == PCI_SUBVENDOR_SPEEDFAX_PYRAMID) { cs->hw.sedl.chip = SEDL_CHIP_ISAC_ISAR; cs->subtyp = SEDL_SPEEDFAX_PYRAMID; } else if (sub_vendor_id == PCI_SUBVENDOR_SPEEDFAX_PCI) { cs->hw.sedl.chip = SEDL_CHIP_ISAC_ISAR; cs->subtyp = SEDL_SPEEDFAX_PCI; } else if (sub_vendor_id == PCI_SUBVENDOR_HST_SAPHIR3) { cs->hw.sedl.chip = SEDL_CHIP_IPAC; cs->subtyp = HST_SAPHIR3; } else if (sub_vendor_id == PCI_SUBVENDOR_SEDLBAUER_PCI) { cs->hw.sedl.chip = SEDL_CHIP_IPAC; cs->subtyp = SEDL_SPEED_PCI; } else { printk(KERN_ERR "Sedlbauer: unknown sub vendor id %#x\n", sub_vendor_id); return(0); } cs->hw.sedl.reset_on = SEDL_ISAR_PCI_ISAR_RESET_ON; cs->hw.sedl.reset_off = SEDL_ISAR_PCI_ISAR_RESET_OFF; byteout(cs->hw.sedl.cfg_reg, 0xff); byteout(cs->hw.sedl.cfg_reg, 0x00); byteout(cs->hw.sedl.cfg_reg+ 2, 0xdd); byteout(cs->hw.sedl.cfg_reg+ 5, 0); /* disable all IRQ */ byteout(cs->hw.sedl.cfg_reg +3, cs->hw.sedl.reset_on); mdelay(2); byteout(cs->hw.sedl.cfg_reg +3, cs->hw.sedl.reset_off); mdelay(10); return (1); }
static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) { static int printed_version; struct ata_probe_ent *probe_ent = NULL; unsigned long base; void *mmio_base; int rc; if (!printed_version++) printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n"); /* * If this driver happens to only be useful on Apple's K2, then * we should check that here as it has a normal Serverworks ID */ rc = pci_enable_device(pdev); if (rc) return rc; rc = pci_request_regions(pdev, DRV_NAME); if (rc) goto err_out; rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); if (rc) goto err_out_regions; probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL); if (probe_ent == NULL) { rc = -ENOMEM; goto err_out_regions; } memset(probe_ent, 0, sizeof(*probe_ent)); INIT_LIST_HEAD(&probe_ent->node); probe_ent->pdev = pdev; probe_ent->port_ops = sil_port_info[ent->driver_data].port_ops; probe_ent->sht = sil_port_info[ent->driver_data].sht; probe_ent->n_ports = 2; probe_ent->pio_mask = sil_port_info[ent->driver_data].pio_mask; probe_ent->udma_mask = sil_port_info[ent->driver_data].udma_mask; probe_ent->irq = pdev->irq; probe_ent->irq_flags = SA_SHIRQ; probe_ent->host_flags = sil_port_info[ent->driver_data].host_flags; mmio_base = ioremap(pci_resource_start(pdev, 5), pci_resource_len(pdev, 5)); if (mmio_base == NULL) { rc = -ENOMEM; goto err_out_free_ent; } probe_ent->mmio_base = mmio_base; base = (unsigned long) mmio_base; probe_ent->port[0].cmd_addr = base + SIL_IDE0_TF; probe_ent->port[0].ctl_addr = base + SIL_IDE0_CTL; probe_ent->port[0].bmdma_addr = base + SIL_IDE0_BMDMA; probe_ent->port[0].scr_addr = base + SIL_IDE0_SCR; ata_std_ports(&probe_ent->port[0]); probe_ent->port[1].cmd_addr = base + SIL_IDE1_TF; probe_ent->port[1].ctl_addr = base + SIL_IDE1_CTL; probe_ent->port[1].bmdma_addr = base + SIL_IDE1_BMDMA; probe_ent->port[1].scr_addr = base + SIL_IDE1_SCR; ata_std_ports(&probe_ent->port[1]); pci_set_master(pdev); /* FIXME: check ata_device_add return value */ ata_device_add(probe_ent); kfree(probe_ent); return 0; err_out_free_ent: kfree(probe_ent); err_out_regions: pci_release_regions(pdev); err_out: pci_disable_device(pdev); return rc; }
static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, struct t4_cq *rcq, struct t4_cq *scq, struct c4iw_dev_ucontext *uctx) { int user = (uctx != &rdev->uctx); struct fw_ri_res_wr *res_wr; struct fw_ri_res *res; int wr_len; struct c4iw_wr_wait wr_wait; struct sk_buff *skb; int ret; int eqsize; wq->sq.qid = c4iw_get_qpid(rdev, uctx); if (!wq->sq.qid) return -ENOMEM; wq->rq.qid = c4iw_get_qpid(rdev, uctx); if (!wq->rq.qid) goto err1; if (!user) { wq->sq.sw_sq = kzalloc(wq->sq.size * sizeof *wq->sq.sw_sq, GFP_KERNEL); if (!wq->sq.sw_sq) goto err2; wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq, GFP_KERNEL); if (!wq->rq.sw_rq) goto err3; } /* * RQT must be a power of 2. */ wq->rq.rqt_size = roundup_pow_of_two(wq->rq.size); wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size); if (!wq->rq.rqt_hwaddr) goto err4; if (user) { if (alloc_oc_sq(rdev, &wq->sq) && alloc_host_sq(rdev, &wq->sq)) goto err5; } else if (alloc_host_sq(rdev, &wq->sq)) goto err5; memset(wq->sq.queue, 0, wq->sq.memsize); dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr); wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev), wq->rq.memsize, &(wq->rq.dma_addr), GFP_KERNEL); if (!wq->rq.queue) goto err6; PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n", __func__, wq->sq.queue, (unsigned long long)virt_to_phys(wq->sq.queue), wq->rq.queue, (unsigned long long)virt_to_phys(wq->rq.queue)); memset(wq->rq.queue, 0, wq->rq.memsize); dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr); wq->db = rdev->lldi.db_reg; wq->gts = rdev->lldi.gts_reg; if (user) { wq->sq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) + (wq->sq.qid << rdev->qpshift); wq->sq.udb &= PAGE_MASK; wq->rq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) + (wq->rq.qid << rdev->qpshift); wq->rq.udb &= PAGE_MASK; } wq->rdev = rdev; wq->rq.msn = 1; /* build fw_ri_res_wr */ wr_len = sizeof *res_wr + 2 * sizeof *res; skb = alloc_skb(wr_len, GFP_KERNEL); if (!skb) { ret = -ENOMEM; goto err7; } set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len); memset(res_wr, 0, wr_len); res_wr->op_nres = cpu_to_be32( FW_WR_OP(FW_RI_RES_WR) | V_FW_RI_RES_WR_NRES(2) | FW_WR_COMPL(1)); res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); res_wr->cookie = (unsigned long) &wr_wait; res = res_wr->res; res->u.sqrq.restype = FW_RI_RES_TYPE_SQ; res->u.sqrq.op = FW_RI_RES_OP_WRITE; /* * eqsize is the number of 64B entries plus the status page size. */ eqsize = wq->sq.size * T4_SQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES; res->u.sqrq.fetchszm_to_iqid = cpu_to_be32( V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */ V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */ V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */ (t4_sq_onchip(&wq->sq) ? F_FW_RI_RES_WR_ONCHIP : 0) | V_FW_RI_RES_WR_IQID(scq->cqid)); res->u.sqrq.dcaen_to_eqsize = cpu_to_be32( V_FW_RI_RES_WR_DCAEN(0) | V_FW_RI_RES_WR_DCACPU(0) | V_FW_RI_RES_WR_FBMIN(2) | V_FW_RI_RES_WR_FBMAX(2) | V_FW_RI_RES_WR_CIDXFTHRESHO(0) | V_FW_RI_RES_WR_CIDXFTHRESH(0) | V_FW_RI_RES_WR_EQSIZE(eqsize)); res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid); res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr); res++; res->u.sqrq.restype = FW_RI_RES_TYPE_RQ; res->u.sqrq.op = FW_RI_RES_OP_WRITE; /* * eqsize is the number of 64B entries plus the status page size. */ eqsize = wq->rq.size * T4_RQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES; res->u.sqrq.fetchszm_to_iqid = cpu_to_be32( V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */ V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */ V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */ V_FW_RI_RES_WR_IQID(rcq->cqid)); res->u.sqrq.dcaen_to_eqsize = cpu_to_be32( V_FW_RI_RES_WR_DCAEN(0) | V_FW_RI_RES_WR_DCACPU(0) | V_FW_RI_RES_WR_FBMIN(2) | V_FW_RI_RES_WR_FBMAX(2) | V_FW_RI_RES_WR_CIDXFTHRESHO(0) | V_FW_RI_RES_WR_CIDXFTHRESH(0) | V_FW_RI_RES_WR_EQSIZE(eqsize)); res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid); res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr); c4iw_init_wr_wait(&wr_wait); ret = c4iw_ofld_send(rdev, skb); if (ret) goto err7; ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, wq->sq.qid, __func__); if (ret) goto err7; PDBG("%s sqid 0x%x rqid 0x%x kdb 0x%p squdb 0x%llx rqudb 0x%llx\n", __func__, wq->sq.qid, wq->rq.qid, wq->db, (unsigned long long)wq->sq.udb, (unsigned long long)wq->rq.udb); return 0; err7: dma_free_coherent(&(rdev->lldi.pdev->dev), wq->rq.memsize, wq->rq.queue, dma_unmap_addr(&wq->rq, mapping)); err6: dealloc_sq(rdev, &wq->sq); err5: c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size); err4: kfree(wq->rq.sw_rq); err3: kfree(wq->sq.sw_sq); err2: c4iw_put_qpid(rdev, wq->rq.qid, uctx); err1: c4iw_put_qpid(rdev, wq->sq.qid, uctx); return -ENOMEM; }
int device_probe (struct pci_dev* device, const struct pci_device_id* device_id) { struct AnBDevice* private_data = NULL; unsigned int flags = 0; int retval = 0; down(&sem); printk(KERN_INFO ANB_DEVICE_PREFIX "----------------------------------------probing---------------------------------------\n"); if(devices_count >= ANB_DEVICE_LIMIT) { printk(KERN_INFO ANB_DEVICE_PREFIX "The limit of devices has been reached\n"); return -EINVAL; } printk(KERN_INFO ANB_DEVICE_PREFIX "Device number: %d\n", devices_count); printk(KERN_INFO ANB_DEVICE_PREFIX "Allocating a memory...\n"); private_data = kzalloc(sizeof(*private_data), GFP_KERNEL); if (!private_data) { retval = -ENOMEM; goto probing_done; } printk(KERN_INFO ANB_DEVICE_PREFIX "Allocated kernel logical memory: 0x%016lx\n", (unsigned long)private_data); printk(KERN_INFO ANB_DEVICE_PREFIX " 0x%016lx\n", (unsigned long)private_data + sizeof(*private_data)); printk(KERN_INFO ANB_DEVICE_PREFIX " memory size: %ld bytes\n", sizeof(*private_data)); printk(KERN_INFO ANB_DEVICE_PREFIX "Enabling a device...\n"); retval = pci_enable_device(device); if (retval) goto probing_fb_free; printk(KERN_INFO ANB_DEVICE_PREFIX "Requesting memory regions...\n"); retval = pci_request_regions(device, ANB_DEVICE_NAME); if (retval) goto probing_fb_disable; printk(KERN_INFO ANB_DEVICE_PREFIX "Checking BARs configuration...\n"); print_bar_info(device); flags = pci_resource_flags(device, ANB_DEVICE_REGISTERS_BAR); private_data->bars[ANB_DEVICE_REGISTERS_BAR].offset = pci_resource_start(device, ANB_DEVICE_REGISTERS_BAR); private_data->bars[ANB_DEVICE_REGISTERS_BAR].length = pci_resource_len(device, ANB_DEVICE_REGISTERS_BAR); if((flags & IORESOURCE_IO) != IORESOURCE_IO) { printk(KERN_WARNING ANB_DEVICE_PREFIX "Unexpected BAR type (device registers)\n"); retval = -EINVAL; goto probing_fb_regions; } flags = pci_resource_flags(device, ANB_DEVICE_MEMORY_BAR); private_data->bars[ANB_DEVICE_MEMORY_BAR].offset = pci_resource_start(device, ANB_DEVICE_MEMORY_BAR); private_data->bars[ANB_DEVICE_MEMORY_BAR].length = pci_resource_len(device, ANB_DEVICE_MEMORY_BAR); if((flags & IORESOURCE_MEM) != IORESOURCE_MEM) { printk(KERN_WARNING ANB_DEVICE_PREFIX "Unexpected BAR type (device memory)\n"); retval = -EINVAL; goto probing_fb_regions; } printk(KERN_INFO ANB_DEVICE_PREFIX "Setting the device up...\n"); pci_write_config_dword(device, PCI_BASE_ADDRESS_0 + 4 * ANB_DEVICE_REGISTERS_BAR, private_data->bars[ANB_DEVICE_REGISTERS_BAR].offset); pci_write_config_dword(device, PCI_BASE_ADDRESS_0 + 4 * ANB_DEVICE_MEMORY_BAR, private_data->bars[ANB_DEVICE_MEMORY_BAR].offset); pci_write_config_word(device, PCI_COMMAND, ANB_DEVICE_COMMAND); pci_write_config_byte(device, PCI_LATENCY_TIMER, ANB_DEVICE_LATENCY); printk(KERN_INFO ANB_DEVICE_PREFIX "Mapping device registers into the memory...\n"); private_data->bars[ANB_DEVICE_REGISTERS_BAR].virt_address = pci_iomap(device, ANB_DEVICE_REGISTERS_BAR, private_data->bars[ANB_DEVICE_REGISTERS_BAR].length); if(!private_data->bars[ANB_DEVICE_REGISTERS_BAR].virt_address) { printk(KERN_WARNING ANB_DEVICE_PREFIX "Can't map BAR into the memory (device registers)\n"); retval = -ENOMEM; goto probing_fb_regions; } private_data->bars[ANB_DEVICE_MEMORY_BAR].virt_address = pci_iomap(device, ANB_DEVICE_MEMORY_BAR, private_data->bars[ANB_DEVICE_MEMORY_BAR].length); if(!private_data->bars[ANB_DEVICE_MEMORY_BAR].virt_address) { printk(KERN_WARNING ANB_DEVICE_PREFIX "Can't map BAR into the memory (device memory)\n"); retval = -ENOMEM; goto probing_fb_io_map; } printk(KERN_INFO ANB_DEVICE_PREFIX "Allocating entry in /dev...\n"); pci_set_drvdata(device, private_data); private_data->device_ref = device; private_data->device_id = devices_count; retval = AllocateEntry(private_data); if(retval) goto probing_fb_mem_map; devices_count += 1; goto probing_done; probing_fb_mem_map: pci_iounmap(device, private_data->bars[ANB_DEVICE_MEMORY_BAR].virt_address); private_data->bars[ANB_DEVICE_MEMORY_BAR].virt_address = NULL; probing_fb_io_map: pci_iounmap(device, private_data->bars[ANB_DEVICE_REGISTERS_BAR].virt_address); private_data->bars[ANB_DEVICE_REGISTERS_BAR].virt_address = NULL; probing_fb_regions: pci_release_regions(device); probing_fb_disable: pci_disable_device(device); probing_fb_free: kfree(private_data); private_data = NULL; probing_done: printk(KERN_INFO ANB_DEVICE_PREFIX "Probing retval: %d\n", retval); up(&sem); return retval; }
static int __devinit nm256_install(struct pci_dev *pcidev, enum nm256rev rev, char *verstr) { struct nm256_info *card; int x; if (pci_enable_device(pcidev)) return 0; card = kmalloc (sizeof (struct nm256_info), GFP_KERNEL); if (card == NULL) { printk (KERN_ERR "NM256: out of memory!\n"); return 0; } card->magsig = NM_MAGIC_SIG; card->playing = 0; card->recording = 0; card->rev = rev; spin_lock_init(&card->lock); /* Init the memory port info. */ for (x = 0; x < 2; x++) { card->port[x].physaddr = pci_resource_start (pcidev, x); card->port[x].ptr = NULL; card->port[x].start_offset = 0; card->port[x].end_offset = 0; } /* Port 2 is easy. */ card->port[1].start_offset = 0; card->port[1].end_offset = NM_PORT2_SIZE; /* Yuck. But we have to map in port 2 so we can check how much RAM the card has. */ if (nm256_remap_ports (card)) { kfree (card); return 0; } /* * The NM256 has two memory ports. The first port is nothing * more than a chunk of video RAM, which is used as the I/O ring * buffer. The second port has the actual juicy stuff (like the * mixer and the playback engine control registers). */ if (card->rev == REV_NM256AV) { /* Ok, try to see if this is a non-AC97 version of the hardware. */ int pval = nm256_readPort16 (card, 2, NM_MIXER_PRESENCE); if ((pval & NM_PRESENCE_MASK) != NM_PRESENCE_VALUE) { if (! force_load) { printk (KERN_ERR "NM256: This doesn't look to me like the AC97-compatible version.\n"); printk (KERN_ERR " You can force the driver to load by passing in the module\n"); printk (KERN_ERR " parameter:\n"); printk (KERN_ERR " force_load = 1\n"); printk (KERN_ERR "\n"); printk (KERN_ERR " More likely, you should be using the appropriate SB-16 or\n"); printk (KERN_ERR " CS4232 driver instead. (If your BIOS has settings for\n"); printk (KERN_ERR " IRQ and/or DMA for the sound card, this is *not* the correct\n"); printk (KERN_ERR " driver to use.)\n"); nm256_release_ports (card); kfree (card); return 0; } else { printk (KERN_INFO "NM256: Forcing driver load as per user request.\n"); } } else { /* printk (KERN_INFO "NM256: Congratulations. You're not running Eunice.\n")*/; } card->port[0].end_offset = 2560 * 1024; card->introutine = nm256_interrupt; card->mixer_status_offset = NM_MIXER_STATUS_OFFSET; card->mixer_status_mask = NM_MIXER_READY_MASK; } else { /* Not sure if there is any relevant detect for the ZX or not. */ if (nm256_readPort8 (card, 2, 0xa0b) != 0) card->port[0].end_offset = 6144 * 1024; else card->port[0].end_offset = 4096 * 1024; card->introutine = nm256_interrupt_zx; card->mixer_status_offset = NM2_MIXER_STATUS_OFFSET; card->mixer_status_mask = NM2_MIXER_READY_MASK; } if (buffertop >= 98304 && buffertop < card->port[0].end_offset) card->port[0].end_offset = buffertop; else nm256_peek_for_sig (card); card->port[0].start_offset = card->port[0].end_offset - 98304; printk (KERN_INFO "NM256: Mapping port 1 from 0x%x - 0x%x\n", card->port[0].start_offset, card->port[0].end_offset); if (nm256_remap_ports (card)) { kfree (card); return 0; } /* See if we can get the interrupt. */ card->irq = pcidev->irq; card->has_irq = 0; if (nm256_grabInterrupt (card) != 0) { nm256_release_ports (card); kfree (card); return 0; } nm256_releaseInterrupt (card); /* * Init the board. */ card->playbackBufferSize = 16384; card->recordBufferSize = 16384; card->coeffBuf = card->port[0].end_offset - NM_MAX_COEFFICIENT; card->abuf2 = card->coeffBuf - card->recordBufferSize; card->abuf1 = card->abuf2 - card->playbackBufferSize; card->allCoeffBuf = card->abuf2 - (NM_TOTAL_COEFF_COUNT * 4); /* Fixed setting. */ card->mixer = NM_MIXER_OFFSET; card->mixer_values_init = 0; card->is_open_play = 0; card->is_open_record = 0; card->coeffsCurrent = 0; card->opencnt[0] = 0; card->opencnt[1] = 0; /* Reasonable default settings, but largely unnecessary. */ for (x = 0; x < 2; x++) { card->sinfo[x].bits = 8; card->sinfo[x].stereo = 0; card->sinfo[x].samplerate = 8000; } nm256_initHw (card); for (x = 0; x < 2; x++) { if ((card->dev[x] = sound_install_audiodrv(AUDIO_DRIVER_VERSION, "NM256", &nm256_audio_driver, sizeof(struct audio_driver), DMA_NODMA, AFMT_U8 | AFMT_S16_LE, NULL, -1, -1)) >= 0) { /* 1K minimum buffer size. */ audio_devs[card->dev[x]]->min_fragment = 10; /* Maximum of 8K buffer size. */ audio_devs[card->dev[x]]->max_fragment = 13; } else { printk(KERN_ERR "NM256: Too many PCM devices available\n"); nm256_release_ports (card); kfree (card); return 0; } } pci_set_drvdata(pcidev,card); /* Insert the card in the list. */ card->next_card = nmcard_list; nmcard_list = card; printk(KERN_INFO "Initialized NeoMagic %s audio in PCI native mode\n", verstr); /* * And our mixer. (We should allow support for other mixers, maybe.) */ nm256_install_mixer (card); return 1; }
static int __devinit ad1889_probe(struct pci_dev *pcidev, const struct pci_device_id *ent) { int err; ad1889_dev_t *dev; unsigned long bar; struct proc_dir_entry *proc_root = NULL; if ((err = pci_enable_device(pcidev)) != 0) { printk(KERN_ERR DEVNAME ": pci_enable_device failed\n"); return err; } pci_set_master(pcidev); if ((dev = ad1889_alloc_dev(pcidev)) == NULL) { printk(KERN_ERR DEVNAME ": cannot allocate memory for device\n"); return -ENOMEM; } pci_set_drvdata(pcidev, dev); bar = pci_resource_start(pcidev, 0); if (!(pci_resource_flags(pcidev, 0) & IORESOURCE_MEM)) { printk(KERN_ERR DEVNAME ": memory region not assigned\n"); goto out1; } if (pci_request_region(pcidev, 0, DEVNAME)) { printk(KERN_ERR DEVNAME ": unable to request memory region\n"); goto out1; } dev->regbase = ioremap_nocache(bar, AD_DSIOMEMSIZE); if (!dev->regbase) { printk(KERN_ERR DEVNAME ": unable to remap iomem\n"); goto out2; } if (request_irq(pcidev->irq, ad1889_interrupt, SA_SHIRQ, DEVNAME, dev) != 0) { printk(KERN_ERR DEVNAME ": unable to request interrupt\n"); goto out3; } printk(KERN_INFO DEVNAME ": %s at %p IRQ %d\n", (char *)ent->driver_data, dev->regbase, pcidev->irq); if (ad1889_aclink_reset(pcidev) != 0) goto out4; /* register /dev/dsp */ if ((dev->dev_audio = register_sound_dsp(&ad1889_fops, -1)) < 0) { printk(KERN_ERR DEVNAME ": cannot register /dev/dsp\n"); goto out4; } if ((err = ad1889_ac97_init(dev, 0)) != 0) goto out5; /* XXX: cleanups */ if (((proc_root = proc_mkdir("driver/ad1889", NULL)) == NULL) || create_proc_read_entry("ac97", S_IFREG|S_IRUGO, proc_root, ac97_read_proc, dev->ac97_codec) == NULL || create_proc_read_entry("info", S_IFREG|S_IRUGO, proc_root, ad1889_read_proc, dev) == NULL) goto out5; ad1889_initcfg(dev); //DBG(DEVNAME ": Driver initialization done!\n"); ad1889_dev = dev; return 0; out5: unregister_sound_dsp(dev->dev_audio); out4: free_irq(pcidev->irq, dev); out3: iounmap(dev->regbase); out2: pci_release_region(pcidev, 0); out1: ad1889_free_dev(dev); pci_set_drvdata(pcidev, NULL); return -ENODEV; }
static int __devinit probe_one(struct pci_dev *pdev, const struct pci_device_id *ent) { struct address_info *hw_config; unsigned long base; void __iomem *mem; unsigned long io; u16 map; u8 irq, dma8, dma16; int oldquiet; extern int sb_be_quiet; base = pci_resource_start(pdev, 0); if(base == 0UL) return 1; mem = ioremap(base, 128); if (!mem) return 1; map = readw(mem + 0x18); /* Read the SMI enables */ iounmap(mem); /* Map bits 0:1 * 0x20 + 0x200 = sb base 2 sb enable 3 adlib enable 5 MPU enable 0x330 6 MPU enable 0x300 The other bits may be used internally so must be masked */ io = 0x220 + 0x20 * (map & 3); if(map & (1<<2)) printk(KERN_INFO "kahlua: XpressAudio at 0x%lx\n", io); else return 1; if(map & (1<<5)) printk(KERN_INFO "kahlua: MPU at 0x300\n"); else if(map & (1<<6)) printk(KERN_INFO "kahlua: MPU at 0x330\n"); irq = mixer_read(io, 0x80) & 0x0F; dma8 = mixer_read(io, 0x81); // printk("IRQ=%x MAP=%x DMA=%x\n", irq, map, dma8); if(dma8 & 0x20) dma16 = 5; else if(dma8 & 0x40) dma16 = 6; else if(dma8 & 0x80) dma16 = 7; else { printk(KERN_ERR "kahlua: No 16bit DMA enabled.\n"); return 1; } if(dma8 & 0x01) dma8 = 0; else if(dma8 & 0x02) dma8 = 1; else if(dma8 & 0x08) dma8 = 3; else { printk(KERN_ERR "kahlua: No 8bit DMA enabled.\n"); return 1; } if(irq & 1) irq = 9; else if(irq & 2) irq = 5; else if(irq & 4) irq = 7; else if(irq & 8) irq = 10; else { printk(KERN_ERR "kahlua: SB IRQ not set.\n"); return 1; } printk(KERN_INFO "kahlua: XpressAudio on IRQ %d, DMA %d, %d\n", irq, dma8, dma16); hw_config = kzalloc(sizeof(struct address_info), GFP_KERNEL); if(hw_config == NULL) { printk(KERN_ERR "kahlua: out of memory.\n"); return 1; } pci_set_drvdata(pdev, hw_config); hw_config->io_base = io; hw_config->irq = irq; hw_config->dma = dma8; hw_config->dma2 = dma16; hw_config->name = "Cyrix XpressAudio"; hw_config->driver_use_1 = SB_NO_MIDI | SB_PCI_IRQ; if (!request_region(io, 16, "soundblaster")) goto err_out_free; if(sb_dsp_detect(hw_config, 0, 0, NULL)==0) { printk(KERN_ERR "kahlua: audio not responding.\n"); release_region(io, 16); goto err_out_free; } oldquiet = sb_be_quiet; sb_be_quiet = 1; if(sb_dsp_init(hw_config, THIS_MODULE)) { sb_be_quiet = oldquiet; goto err_out_free; } sb_be_quiet = oldquiet; return 0; err_out_free: pci_set_drvdata(pdev, NULL); kfree(hw_config); return 1; }