static int __devinit pci200_pci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { card_t *card; u32 __iomem *p; int i; u32 ramsize; u32 ramphys; /* buffer memory base */ u32 scaphys; /* SCA memory base */ u32 plxphys; /* PLX registers memory base */ i = pci_enable_device(pdev); if (i) return i; i = pci_request_regions(pdev, "PCI200SYN"); if (i) { pci_disable_device(pdev); return i; } card = kzalloc(sizeof(card_t), GFP_KERNEL); if (card == NULL) { printk(KERN_ERR "pci200syn: unable to allocate memory\n"); pci_release_regions(pdev); pci_disable_device(pdev); return -ENOBUFS; } pci_set_drvdata(pdev, card); card->ports[0].netdev = alloc_hdlcdev(&card->ports[0]); card->ports[1].netdev = alloc_hdlcdev(&card->ports[1]); if (!card->ports[0].netdev || !card->ports[1].netdev) { printk(KERN_ERR "pci200syn: unable to allocate memory\n"); pci200_pci_remove_one(pdev); return -ENOMEM; } if (pci_resource_len(pdev, 0) != PCI200SYN_PLX_SIZE || pci_resource_len(pdev, 2) != PCI200SYN_SCA_SIZE || pci_resource_len(pdev, 3) < 16384) { printk(KERN_ERR "pci200syn: invalid card EEPROM parameters\n"); pci200_pci_remove_one(pdev); return -EFAULT; } plxphys = pci_resource_start(pdev,0) & PCI_BASE_ADDRESS_MEM_MASK; card->plxbase = ioremap(plxphys, PCI200SYN_PLX_SIZE); scaphys = pci_resource_start(pdev,2) & PCI_BASE_ADDRESS_MEM_MASK; card->scabase = ioremap(scaphys, PCI200SYN_SCA_SIZE); ramphys = pci_resource_start(pdev,3) & PCI_BASE_ADDRESS_MEM_MASK; card->rambase = pci_ioremap_bar(pdev, 3); if (card->plxbase == NULL || card->scabase == NULL || card->rambase == NULL) { printk(KERN_ERR "pci200syn: ioremap() failed\n"); pci200_pci_remove_one(pdev); return -EFAULT; } /* Reset PLX */ p = &card->plxbase->init_ctrl; writel(readl(p) | 0x40000000, p); readl(p); /* Flush the write - do not use sca_flush */ udelay(1); writel(readl(p) & ~0x40000000, p); readl(p); /* Flush the write - do not use sca_flush */ udelay(1); ramsize = sca_detect_ram(card, card->rambase, pci_resource_len(pdev, 3)); /* number of TX + RX buffers for one port - this is dual port card */ i = ramsize / (2 * (sizeof(pkt_desc) + HDLC_MAX_MRU)); card->tx_ring_buffers = min(i / 2, MAX_TX_BUFFERS); card->rx_ring_buffers = i - card->tx_ring_buffers; card->buff_offset = 2 * sizeof(pkt_desc) * (card->tx_ring_buffers + card->rx_ring_buffers); printk(KERN_INFO "pci200syn: %u KB RAM at 0x%x, IRQ%u, using %u TX +" " %u RX packets rings\n", ramsize / 1024, ramphys, pdev->irq, card->tx_ring_buffers, card->rx_ring_buffers); if (pdev->subsystem_device == PCI_DEVICE_ID_PLX_9050) { printk(KERN_ERR "Detected PCI200SYN card with old " "configuration data.\n"); printk(KERN_ERR "See <http://www.kernel.org/pub/" "linux/utils/net/hdlc/pci200syn/> for update.\n"); printk(KERN_ERR "The card will stop working with" " future versions of Linux if not updated.\n"); } if (card->tx_ring_buffers < 1) { printk(KERN_ERR "pci200syn: RAM test failed\n"); pci200_pci_remove_one(pdev); return -EFAULT; } /* Enable interrupts on the PCI bridge */ p = &card->plxbase->intr_ctrl_stat; writew(readw(p) | 0x0040, p); /* Allocate IRQ */ if (request_irq(pdev->irq, sca_intr, IRQF_SHARED, "pci200syn", card)) { printk(KERN_WARNING "pci200syn: could not allocate IRQ%d.\n", pdev->irq); pci200_pci_remove_one(pdev); return -EBUSY; } card->irq = pdev->irq; sca_init(card, 0); for (i = 0; i < 2; i++) { port_t *port = &card->ports[i]; struct net_device *dev = port->netdev; hdlc_device *hdlc = dev_to_hdlc(dev); port->chan = i; spin_lock_init(&port->lock); dev->irq = card->irq; dev->mem_start = ramphys; dev->mem_end = ramphys + ramsize - 1; dev->tx_queue_len = 50; dev->do_ioctl = pci200_ioctl; dev->open = pci200_open; dev->stop = pci200_close; hdlc->attach = sca_attach; hdlc->xmit = sca_xmit; port->settings.clock_type = CLOCK_EXT; port->card = card; sca_init_port(port); if (register_hdlc_device(dev)) { printk(KERN_ERR "pci200syn: unable to register hdlc " "device\n"); port->card = NULL; pci200_pci_remove_one(pdev); return -ENOBUFS; } printk(KERN_INFO "%s: PCI200SYN channel %d\n", dev->name, port->chan); } sca_flush(card); return 0; }
/* * This function is called by the PCI sub-system when it finds a PCI * device with dev/vendor IDs that match with one of our devices. * All of the driver initialization is done in this function. */ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id) { int status = 0; struct be_adapter *adapter; struct FWCMD_COMMON_GET_FW_VERSION_RESPONSE_PAYLOAD get_fwv; struct be_net_object *pnob; struct net_device *netdev; status = pci_enable_device(pdev); if (status) goto error; status = pci_request_regions(pdev, be_driver_name); if (status) goto error_pci_req; pci_set_master(pdev); adapter = kzalloc(sizeof(struct be_adapter), GFP_KERNEL); if (adapter == NULL) { status = -ENOMEM; goto error_adapter; } adapter->dev_state = BE_DEV_STATE_NONE; adapter->pdev = pdev; pci_set_drvdata(pdev, adapter); adapter->enable_aic = 1; adapter->max_eqd = MAX_EQD; adapter->min_eqd = 0; adapter->cur_eqd = 0; status = pci_set_dma_mask(pdev, DMA_64BIT_MASK); if (!status) { adapter->dma_64bit_cap = true; } else { adapter->dma_64bit_cap = false; status = pci_set_dma_mask(pdev, DMA_32BIT_MASK); if (status != 0) { printk(KERN_ERR "Could not set PCI DMA Mask\n"); goto cleanup; } } status = init_pci_be_function(adapter, pdev); if (status != 0) { printk(KERN_ERR "Failed to map PCI BARS\n"); status = -ENOMEM; goto cleanup; } be_trace_set_level(DL_ALWAYS | DL_ERR); adapter->be_link_sts = kmalloc(sizeof(struct BE_LINK_STATUS), GFP_KERNEL); if (adapter->be_link_sts == NULL) { printk(KERN_ERR "Memory allocation for link status " "buffer failed\n"); goto cleanup; } spin_lock_init(&adapter->txq_lock); netdev = alloc_etherdev(sizeof(struct be_net_object)); if (netdev == NULL) { status = -ENOMEM; goto cleanup; } pnob = netdev_priv(netdev); adapter->net_obj = pnob; adapter->netdevp = netdev; pnob->adapter = adapter; pnob->netdev = netdev; status = be_nob_ring_alloc(adapter, pnob); if (status != 0) goto cleanup; status = be_nob_ring_init(adapter, pnob); if (status != 0) goto cleanup; be_rxf_mac_address_read_write(&pnob->fn_obj, false, false, false, false, false, netdev->dev_addr, NULL, NULL); netdev->init = &benet_init; netif_carrier_off(netdev); netif_stop_queue(netdev); SET_NETDEV_DEV(netdev, &(adapter->pdev->dev)); netif_napi_add(netdev, &pnob->napi, be_poll, 64); /* if the rx_frag size if 2K, one page is shared as two RX frags */ pnob->rx_pg_shared = (pnob->rx_buf_size <= PAGE_SIZE / 2) ? true : false; if (pnob->rx_buf_size != rxbuf_size) { printk(KERN_WARNING "Could not set Rx buffer size to %d. Using %d\n", rxbuf_size, pnob->rx_buf_size); rxbuf_size = pnob->rx_buf_size; } tasklet_init(&(adapter->sts_handler), be_process_intr, (unsigned long)adapter); adapter->tasklet_started = 1; spin_lock_init(&(adapter->int_lock)); status = be_register_isr(adapter, pnob); if (status != 0) goto cleanup; adapter->rx_csum = 1; adapter->max_rx_coal = BE_LRO_MAX_PKTS; memset(&get_fwv, 0, sizeof(struct FWCMD_COMMON_GET_FW_VERSION_RESPONSE_PAYLOAD)); printk(KERN_INFO "BladeEngine Driver version:%s. " "Copyright ServerEngines, Corporation 2005 - 2008\n", be_drvr_ver); status = be_function_get_fw_version(&pnob->fn_obj, &get_fwv, NULL, NULL); if (status == BE_SUCCESS) { strncpy(be_fw_ver, get_fwv.firmware_version_string, 32); printk(KERN_INFO "BladeEngine Firmware Version:%s\n", get_fwv.firmware_version_string); } else { printk(KERN_WARNING "Unable to get BE Firmware Version\n"); } sema_init(&adapter->get_eth_stat_sem, 0); init_timer(&adapter->timer_ctxt.get_stats_timer); atomic_set(&adapter->timer_ctxt.get_stat_flag, 0); adapter->timer_ctxt.get_stats_timer.function = &be_get_stats_timer_handler; status = be_mcc_create(adapter); if (status < 0) goto cleanup; status = be_mcc_init(adapter); if (status < 0) goto cleanup; status = be_mcc_add_async_event_callback(&adapter->net_obj->mcc_q_obj, be_link_status_async_callback, (void *)adapter); if (status != BE_SUCCESS) { printk(KERN_WARNING "add_async_event_callback failed"); printk(KERN_WARNING "Link status changes may not be reflected\n"); } status = register_netdev(netdev); if (status != 0) goto cleanup; be_update_link_status(adapter); adapter->dev_state = BE_DEV_STATE_INIT; return 0; cleanup: be_remove(pdev); return status; error_adapter: pci_release_regions(pdev); error_pci_req: pci_disable_device(pdev); error: printk(KERN_ERR "BladeEngine initalization failed\n"); return status; }
static int __devinit ibmasm_init_one(struct pci_dev *pdev, const struct pci_device_id *id) { int result; struct service_processor *sp; if ((result = pci_enable_device(pdev))) { dev_err(&pdev->dev, "Failed to enable PCI device\n"); return result; } if ((result = pci_request_regions(pdev, DRIVER_NAME))) { dev_err(&pdev->dev, "Failed to allocate PCI resources\n"); goto error_resources; } /* vnc client won't work without bus-mastering */ pci_set_master(pdev); sp = kzalloc(sizeof(struct service_processor), GFP_KERNEL); if (sp == NULL) { dev_err(&pdev->dev, "Failed to allocate memory\n"); result = -ENOMEM; goto error_kmalloc; } spin_lock_init(&sp->lock); INIT_LIST_HEAD(&sp->command_queue); pci_set_drvdata(pdev, (void *)sp); sp->dev = &pdev->dev; sp->number = pdev->bus->number; snprintf(sp->dirname, IBMASM_NAME_SIZE, "%d", sp->number); snprintf(sp->devname, IBMASM_NAME_SIZE, "%s%d", DRIVER_NAME, sp->number); if (ibmasm_event_buffer_init(sp)) { dev_err(sp->dev, "Failed to allocate event buffer\n"); goto error_eventbuffer; } if (ibmasm_heartbeat_init(sp)) { dev_err(sp->dev, "Failed to allocate heartbeat command\n"); goto error_heartbeat; } sp->irq = pdev->irq; sp->base_address = pci_ioremap_bar(pdev, 0); if (!sp->base_address) { dev_err(sp->dev, "Failed to ioremap pci memory\n"); result = -ENODEV; goto error_ioremap; } result = request_irq(sp->irq, ibmasm_interrupt_handler, IRQF_SHARED, sp->devname, (void*)sp); if (result) { dev_err(sp->dev, "Failed to register interrupt handler\n"); goto error_request_irq; } enable_sp_interrupts(sp->base_address); result = ibmasm_init_remote_input_dev(sp); if (result) { dev_err(sp->dev, "Failed to initialize remote queue\n"); goto error_send_message; } result = ibmasm_send_driver_vpd(sp); if (result) { dev_err(sp->dev, "Failed to send driver VPD to service processor\n"); goto error_send_message; } result = ibmasm_send_os_state(sp, SYSTEM_STATE_OS_UP); if (result) { dev_err(sp->dev, "Failed to send OS state to service processor\n"); goto error_send_message; } ibmasmfs_add_sp(sp); ibmasm_register_uart(sp); return 0; error_send_message: disable_sp_interrupts(sp->base_address); ibmasm_free_remote_input_dev(sp); free_irq(sp->irq, (void *)sp); error_request_irq: iounmap(sp->base_address); error_ioremap: ibmasm_heartbeat_exit(sp); error_heartbeat: ibmasm_event_buffer_exit(sp); error_eventbuffer: pci_set_drvdata(pdev, NULL); kfree(sp); error_kmalloc: pci_release_regions(pdev); error_resources: pci_disable_device(pdev); return result; }
/* called when kernel matches PCI hardware to this module */ static int timing_dev_probe(struct pci_dev *dev, const struct pci_device_id *id) { int rc, i, j; #if DEBUG != 0 printk(KERN_DEBUG "timing_dev_probe() entry\n"); #endif /* enable the card */ rc = pci_enable_device(dev); if ( rc ) { printk(KERN_ALERT "Failed to enable timing card (%d)\n", rc); return rc; } /* first, check if this card is ours... although */ /* the vendor ID should have taken care of */ /* this, that ID was actually the bridge */ /* chip We tell this from other cards using */ /* the sub_vendorID and sub_deviceID */ if ( (ADLINK_VENDOR_ID != dev->subsystem_vendor) || (ADLINK_7300A_ID != dev->subsystem_device) ) { printk(KERN_ALERT "Timing Driver rejected device: "); printk(KERN_ALERT "--sub_vendor ID = %x ", dev->subsystem_vendor); printk(KERN_ALERT "--sub_device ID = %x\n", dev->subsystem_device); pci_disable_device(dev); return -ENODEV; /* no such device error */ } /* NOW WE ARE DEALING WITH THE TIMING CARD */ /* simple initialization */ dev_ptr = dev; dma_waiting = 0; dma_configured = 0; output_enabled = 0; /* enable DMA */ pci_set_master(dev); if ( pci_set_dma_mask(dev, DMA_BIT_MASK(32)) ) { printk(KERN_ALERT "DMA NOT SUPPORTED: Aboting."); return -ENODEV; /* not the device we expected */ } else printk(KERN_WARNING "Doing DMA with 32 bits\n"); /* retrieve assigned interrupt line number */ /* -> see linux/pci.h lines 255 & 256 */ irq_line = dev->irq; /* request interrupt line number */ /* -> common practice says to put this */ /* in device open but this could be */ /* needed if the user closes the */ /* device before DMA transfer done */ rc = request_irq(irq_line, timing_interrupt_handler, IRQF_SHARED, "timing", timing_card); if ( rc ) { printk(KERN_ALERT "Failed to register irq %d\n", irq_line); return rc; } /* must claim proprietary access to memory region */ /* mapped to the device */ rc = pci_request_regions(dev, timing_driver.name); if ( rc ) { printk(KERN_ALERT "Memory region collision for TIMING card\n"); goto request_fail; } /* retrieve base address of mmapped regions */ /* -> common practice avoids reading */ /* the PCI config space directly */ timing_card[0].len = pci_resource_len(dev, TIMING_BAR); timing_card[0].base = pci_iomap(dev, TIMING_BAR, /* +1 for maxlen */ timing_card[0].len + 1); if (!timing_card[0].base) { printk(KERN_ALERT "Failed to find Timing base address\n"); rc = -ENODEV; /* no device error */ goto no_base; } /* already did this for timing_card[0] */ i = 1; /* init other IO port vals */ for ( j = 1; j < TIMING_IOPORT_COUNT; j++, i++ ) { timing_card[i].len = timing_card[0].len; timing_card[i].base = timing_card[0].base + (i*TIMING_IOPORT_SIZE); } /* and onboard timer vals */ for ( j = 0; j < TIMING_8254_COUNT; j++, i++ ) { timing_card[i].len = timing_card[0].len; timing_card[i].base = timing_card[0].base + (i*TIMING_IOPORT_SIZE); } /* finally, set up for Bus Mater (LCR) (PLX9080) */ timing_card[i].len = pci_resource_len(dev, PLX9080_BAR); timing_card[i].base = pci_iomap(dev, PLX9080_BAR, /* +1 for maxlen */ timing_card[i].len + 1); master_chip = &timing_card[i]; #if DEBUG != 0 printk(KERN_DEBUG "timing_dev_probe() exit success\n"); #endif /* END NORMAL CONTROL FLOW */ return 0; /* ERROR HANDLING */ no_base: pci_release_regions(dev); request_fail: pci_disable_device(dev); pci_clear_master(dev); return rc; } /* end timing_dev_probe */
/** * stmmac_pci_probe * * @pdev: pci device pointer * @id: pointer to table of device id/id's. * * Description: This probing function gets called for all PCI devices which * match the ID table and are not "owned" by other driver yet. This function * gets passed a "struct pci_dev *" for each device whose entry in the ID table * matches the device. The probe functions returns zero when the driver choose * to take "ownership" of the device or an error code(-ve no) otherwise. */ static int __devinit stmmac_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { int ret = 0; void __iomem *addr = NULL; struct stmmac_priv *priv = NULL; int i; /* Enable pci device */ ret = pci_enable_device(pdev); if (ret) { pr_err("%s : ERROR: failed to enable %s device\n", __func__, pci_name(pdev)); return ret; } if (pci_request_regions(pdev, STMMAC_RESOURCE_NAME)) { pr_err("%s: ERROR: failed to get PCI region\n", __func__); ret = -ENODEV; goto err_out_req_reg_failed; } /* Get the base address of device */ for (i = 0; i <= 5; i++) { if (pci_resource_len(pdev, i) == 0) continue; addr = pci_iomap(pdev, i, 0); if (addr == NULL) { pr_err("%s: ERROR: cannot map register memory, aborting", __func__); ret = -EIO; goto err_out_map_failed; } break; } pci_set_master(pdev); stmmac_default_data(); priv = stmmac_dvr_probe(&(pdev->dev), &plat_dat, addr); if (!priv) { pr_err("%s: main driver probe failed", __func__); goto err_out; } priv->dev->irq = pdev->irq; priv->wol_irq = pdev->irq; pci_set_drvdata(pdev, priv->dev); pr_debug("STMMAC platform driver registration completed"); return 0; err_out: pci_clear_master(pdev); err_out_map_failed: pci_release_regions(pdev); err_out_req_reg_failed: pci_disable_device(pdev); return ret; }
static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) { struct vmw_private *dev_priv; int ret; uint32_t svga_id; enum vmw_res_type i; bool refuse_dma = false; dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); if (unlikely(dev_priv == NULL)) { DRM_ERROR("Failed allocating a device private struct.\n"); return -ENOMEM; } pci_set_master(dev->pdev); dev_priv->dev = dev; dev_priv->vmw_chipset = chipset; dev_priv->last_read_seqno = (uint32_t) -100; mutex_init(&dev_priv->cmdbuf_mutex); mutex_init(&dev_priv->release_mutex); mutex_init(&dev_priv->binding_mutex); rwlock_init(&dev_priv->resource_lock); ttm_lock_init(&dev_priv->reservation_sem); spin_lock_init(&dev_priv->hw_lock); spin_lock_init(&dev_priv->waiter_lock); spin_lock_init(&dev_priv->cap_lock); spin_lock_init(&dev_priv->svga_lock); for (i = vmw_res_context; i < vmw_res_max; ++i) { idr_init(&dev_priv->res_idr[i]); INIT_LIST_HEAD(&dev_priv->res_lru[i]); } mutex_init(&dev_priv->init_mutex); init_waitqueue_head(&dev_priv->fence_queue); init_waitqueue_head(&dev_priv->fifo_queue); dev_priv->fence_queue_waiters = 0; dev_priv->fifo_queue_waiters = 0; dev_priv->used_memory_size = 0; dev_priv->io_start = pci_resource_start(dev->pdev, 0); dev_priv->vram_start = pci_resource_start(dev->pdev, 1); dev_priv->mmio_start = pci_resource_start(dev->pdev, 2); dev_priv->assume_16bpp = !!vmw_assume_16bpp; dev_priv->enable_fb = enable_fbdev; vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); svga_id = vmw_read(dev_priv, SVGA_REG_ID); if (svga_id != SVGA_ID_2) { ret = -ENOSYS; DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id); goto out_err0; } dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES); ret = vmw_dma_select_mode(dev_priv); if (unlikely(ret != 0)) { DRM_INFO("Restricting capabilities due to IOMMU setup.\n"); refuse_dma = true; } dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE); dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE); dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH); dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT); vmw_get_initial_size(dev_priv); if (dev_priv->capabilities & SVGA_CAP_GMR2) { dev_priv->max_gmr_ids = vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS); dev_priv->max_gmr_pages = vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES); dev_priv->memory_size = vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE); dev_priv->memory_size -= dev_priv->vram_size; } else { /* * An arbitrary limit of 512MiB on surface * memory. But all HWV8 hardware supports GMR2. */ dev_priv->memory_size = 512*1024*1024; } dev_priv->max_mob_pages = 0; dev_priv->max_mob_size = 0; if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { uint64_t mem_size = vmw_read(dev_priv, SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB); /* * Workaround for low memory 2D VMs to compensate for the * allocation taken by fbdev */ if (!(dev_priv->capabilities & SVGA_CAP_3D)) mem_size *= 2; dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE; dev_priv->prim_bb_mem = vmw_read(dev_priv, SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM); dev_priv->max_mob_size = vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE); dev_priv->stdu_max_width = vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH); dev_priv->stdu_max_height = vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT); vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH); dev_priv->texture_max_width = vmw_read(dev_priv, SVGA_REG_DEV_CAP); vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT); dev_priv->texture_max_height = vmw_read(dev_priv, SVGA_REG_DEV_CAP); } else { dev_priv->texture_max_width = 8192; dev_priv->texture_max_height = 8192; dev_priv->prim_bb_mem = dev_priv->vram_size; } vmw_print_capabilities(dev_priv->capabilities); ret = vmw_dma_masks(dev_priv); if (unlikely(ret != 0)) goto out_err0; if (dev_priv->capabilities & SVGA_CAP_GMR2) { DRM_INFO("Max GMR ids is %u\n", (unsigned)dev_priv->max_gmr_ids); DRM_INFO("Max number of GMR pages is %u\n", (unsigned)dev_priv->max_gmr_pages); DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n", (unsigned)dev_priv->memory_size / 1024); } DRM_INFO("Maximum display memory size is %u kiB\n", dev_priv->prim_bb_mem / 1024); DRM_INFO("VRAM at 0x%08x size is %u kiB\n", dev_priv->vram_start, dev_priv->vram_size / 1024); DRM_INFO("MMIO at 0x%08x size is %u kiB\n", dev_priv->mmio_start, dev_priv->mmio_size / 1024); ret = vmw_ttm_global_init(dev_priv); if (unlikely(ret != 0)) goto out_err0; vmw_master_init(&dev_priv->fbdev_master); ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); dev_priv->active_master = &dev_priv->fbdev_master; dev_priv->mmio_virt = memremap(dev_priv->mmio_start, dev_priv->mmio_size, MEMREMAP_WB); if (unlikely(dev_priv->mmio_virt == NULL)) { ret = -ENOMEM; DRM_ERROR("Failed mapping MMIO.\n"); goto out_err3; } /* Need mmio memory to check for fifo pitchlock cap. */ if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) && !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) && !vmw_fifo_have_pitchlock(dev_priv)) { ret = -ENOSYS; DRM_ERROR("Hardware has no pitchlock\n"); goto out_err4; } dev_priv->tdev = ttm_object_device_init (dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops); if (unlikely(dev_priv->tdev == NULL)) { DRM_ERROR("Unable to initialize TTM object management.\n"); ret = -ENOMEM; goto out_err4; } dev->dev_private = dev_priv; ret = pci_request_regions(dev->pdev, "vmwgfx probe"); dev_priv->stealth = (ret != 0); if (dev_priv->stealth) { /** * Request at least the mmio PCI resource. */ DRM_INFO("It appears like vesafb is loaded. " "Ignore above error if any.\n"); ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe"); if (unlikely(ret != 0)) { DRM_ERROR("Failed reserving the SVGA MMIO resource.\n"); goto out_no_device; } } if (dev_priv->capabilities & SVGA_CAP_IRQMASK) { ret = drm_irq_install(dev, dev->pdev->irq); if (ret != 0) { DRM_ERROR("Failed installing irq: %d\n", ret); goto out_no_irq; } } dev_priv->fman = vmw_fence_manager_init(dev_priv); if (unlikely(dev_priv->fman == NULL)) { ret = -ENOMEM; goto out_no_fman; } ret = ttm_bo_device_init(&dev_priv->bdev, dev_priv->bo_global_ref.ref.object, &vmw_bo_driver, dev->anon_inode->i_mapping, VMWGFX_FILE_PAGE_OFFSET, false); if (unlikely(ret != 0)) { DRM_ERROR("Failed initializing TTM buffer object driver.\n"); goto out_no_bdev; } /* * Enable VRAM, but initially don't use it until SVGA is enabled and * unhidden. */ ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM, (dev_priv->vram_size >> PAGE_SHIFT)); if (unlikely(ret != 0)) { DRM_ERROR("Failed initializing memory manager for VRAM.\n"); goto out_no_vram; } dev_priv->bdev.man[TTM_PL_VRAM].use_type = false; dev_priv->has_gmr = true; if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) || refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR, VMW_PL_GMR) != 0) { DRM_INFO("No GMR memory available. " "Graphics memory resources are very limited.\n"); dev_priv->has_gmr = false; } if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { dev_priv->has_mob = true; if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB, VMW_PL_MOB) != 0) { DRM_INFO("No MOB memory available. " "3D will be disabled.\n"); dev_priv->has_mob = false; } } if (dev_priv->has_mob) { spin_lock(&dev_priv->cap_lock); vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DX); dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP); spin_unlock(&dev_priv->cap_lock); } ret = vmw_kms_init(dev_priv); if (unlikely(ret != 0)) goto out_no_kms; vmw_overlay_init(dev_priv); ret = vmw_request_device(dev_priv); if (ret) goto out_no_fifo; DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no."); if (dev_priv->enable_fb) { vmw_fifo_resource_inc(dev_priv); vmw_svga_enable(dev_priv); vmw_fb_init(dev_priv); } dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier; register_pm_notifier(&dev_priv->pm_nb); return 0; out_no_fifo: vmw_overlay_close(dev_priv); vmw_kms_close(dev_priv); out_no_kms: if (dev_priv->has_mob) (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); if (dev_priv->has_gmr) (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); out_no_vram: (void)ttm_bo_device_release(&dev_priv->bdev); out_no_bdev: vmw_fence_manager_takedown(dev_priv->fman); out_no_fman: if (dev_priv->capabilities & SVGA_CAP_IRQMASK) drm_irq_uninstall(dev_priv->dev); out_no_irq: if (dev_priv->stealth) pci_release_region(dev->pdev, 2); else pci_release_regions(dev->pdev); out_no_device: ttm_object_device_release(&dev_priv->tdev); out_err4: memunmap(dev_priv->mmio_virt); out_err3: vmw_ttm_global_release(dev_priv); out_err0: for (i = vmw_res_context; i < vmw_res_max; ++i) idr_destroy(&dev_priv->res_idr[i]); if (dev_priv->ctx.staged_bindings) vmw_binding_state_free(dev_priv->ctx.staged_bindings); kfree(dev_priv); return ret; }
static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct sja1000_priv *priv; struct peak_pci_chan *chan; struct net_device *dev, *prev_dev; void __iomem *cfg_base, *reg_base; u16 sub_sys_id, icr; int i, err, channels; err = pci_enable_device(pdev); if (err) return err; err = pci_request_regions(pdev, DRV_NAME); if (err) goto failure_disable_pci; err = pci_read_config_word(pdev, 0x2e, &sub_sys_id); if (err) goto failure_release_regions; dev_dbg(&pdev->dev, "probing device %04x:%04x:%04x\n", pdev->vendor, pdev->device, sub_sys_id); err = pci_write_config_word(pdev, 0x44, 0); if (err) goto failure_release_regions; if (sub_sys_id >= 12) channels = 4; else if (sub_sys_id >= 10) channels = 3; else if (sub_sys_id >= 4) channels = 2; else channels = 1; cfg_base = pci_iomap(pdev, 0, PEAK_PCI_CFG_SIZE); if (!cfg_base) { dev_err(&pdev->dev, "failed to map PCI resource #0\n"); err = -ENOMEM; goto failure_release_regions; } reg_base = pci_iomap(pdev, 1, PEAK_PCI_CHAN_SIZE * channels); if (!reg_base) { dev_err(&pdev->dev, "failed to map PCI resource #1\n"); err = -ENOMEM; goto failure_unmap_cfg_base; } /* Set GPIO control register */ writew(0x0005, cfg_base + PITA_GPIOICR + 2); /* Enable all channels of this card */ writeb(0x00, cfg_base + PITA_GPIOICR); /* Toggle reset */ writeb(0x05, cfg_base + PITA_MISC + 3); mdelay(5); /* Leave parport mux mode */ writeb(0x04, cfg_base + PITA_MISC + 3); icr = readw(cfg_base + PITA_ICR + 2); for (i = 0; i < channels; i++) { dev = alloc_sja1000dev(sizeof(struct peak_pci_chan)); if (!dev) { err = -ENOMEM; goto failure_remove_channels; } priv = netdev_priv(dev); chan = priv->priv; chan->cfg_base = cfg_base; priv->reg_base = reg_base + i * PEAK_PCI_CHAN_SIZE; priv->read_reg = peak_pci_read_reg; priv->write_reg = peak_pci_write_reg; priv->post_irq = peak_pci_post_irq; priv->can.clock.freq = PEAK_PCI_CAN_CLOCK; priv->ocr = PEAK_PCI_OCR; priv->cdr = PEAK_PCI_CDR; /* Neither a slave nor a single device distributes the clock */ if (channels == 1 || i > 0) priv->cdr |= CDR_CLK_OFF; /* Setup interrupt handling */ priv->irq_flags = IRQF_SHARED; dev->irq = pdev->irq; chan->icr_mask = peak_pci_icr_masks[i]; icr |= chan->icr_mask; SET_NETDEV_DEV(dev, &pdev->dev); dev->dev_id = i; /* Create chain of SJA1000 devices */ chan->prev_dev = pci_get_drvdata(pdev); pci_set_drvdata(pdev, dev); /* * PCAN-ExpressCard needs some additional i2c init. * This must be done *before* register_sja1000dev() but * *after* devices linkage */ if (pdev->device == PEAK_PCIEC_DEVICE_ID) { err = peak_pciec_probe(pdev, dev); if (err) { dev_err(&pdev->dev, "failed to probe device (err %d)\n", err); goto failure_free_dev; } } err = register_sja1000dev(dev); if (err) { dev_err(&pdev->dev, "failed to register device\n"); goto failure_free_dev; } dev_info(&pdev->dev, "%s at reg_base=0x%p cfg_base=0x%p irq=%d\n", dev->name, priv->reg_base, chan->cfg_base, dev->irq); } /* Enable interrupts */ writew(icr, cfg_base + PITA_ICR + 2); return 0; failure_free_dev: pci_set_drvdata(pdev, chan->prev_dev); free_sja1000dev(dev); failure_remove_channels: /* Disable interrupts */ writew(0x0, cfg_base + PITA_ICR + 2); chan = NULL; for (dev = pci_get_drvdata(pdev); dev; dev = prev_dev) { priv = netdev_priv(dev); chan = priv->priv; prev_dev = chan->prev_dev; unregister_sja1000dev(dev); free_sja1000dev(dev); } /* free any PCIeC resources too */ if (chan && chan->pciec_card) peak_pciec_remove(chan->pciec_card); pci_iounmap(pdev, reg_base); failure_unmap_cfg_base: pci_iounmap(pdev, cfg_base); failure_release_regions: pci_release_regions(pdev); failure_disable_pci: pci_disable_device(pdev); return err; }
static int mf624_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) { struct uio_info *info; info = kzalloc(sizeof(struct uio_info), GFP_KERNEL); if (!info) return -ENOMEM; if (pci_enable_device(dev)) goto out_free; if (pci_request_regions(dev, "mf624")) goto out_disable; info->name = "mf624"; info->version = "0.0.1"; /* Note: Datasheet says device uses BAR0, BAR1, BAR2 -- do not trust it */ /* BAR0 */ info->mem[0].name = "PCI chipset, interrupts, status " "bits, special functions"; info->mem[0].addr = pci_resource_start(dev, 0); if (!info->mem[0].addr) goto out_release; info->mem[0].size = pci_resource_len(dev, 0); info->mem[0].memtype = UIO_MEM_PHYS; info->mem[0].internal_addr = pci_ioremap_bar(dev, 0); if (!info->mem[0].internal_addr) goto out_release; /* BAR2 */ info->mem[1].name = "ADC, DAC, DIO"; info->mem[1].addr = pci_resource_start(dev, 2); if (!info->mem[1].addr) goto out_unmap0; info->mem[1].size = pci_resource_len(dev, 2); info->mem[1].memtype = UIO_MEM_PHYS; info->mem[1].internal_addr = pci_ioremap_bar(dev, 2); if (!info->mem[1].internal_addr) goto out_unmap0; /* BAR4 */ info->mem[2].name = "Counter/timer chip"; info->mem[2].addr = pci_resource_start(dev, 4); if (!info->mem[2].addr) goto out_unmap1; info->mem[2].size = pci_resource_len(dev, 4); info->mem[2].memtype = UIO_MEM_PHYS; info->mem[2].internal_addr = pci_ioremap_bar(dev, 4); if (!info->mem[2].internal_addr) goto out_unmap1; info->irq = dev->irq; info->irq_flags = IRQF_SHARED; info->handler = mf624_irq_handler; info->irqcontrol = mf624_irqcontrol; if (uio_register_device(&dev->dev, info)) goto out_unmap2; pci_set_drvdata(dev, info); return 0; out_unmap2: iounmap(info->mem[2].internal_addr); out_unmap1: iounmap(info->mem[1].internal_addr); out_unmap0: iounmap(info->mem[0].internal_addr); out_release: pci_release_regions(dev); out_disable: pci_disable_device(dev); out_free: kfree(info); return -ENODEV; }
static int rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *dev; struct netdev_private *np; static int card_idx; int chip_idx = ent->driver_data; int err, irq; void __iomem *ioaddr; static int version_printed; void *ring_space; dma_addr_t ring_dma; if (!version_printed++) printk ("%s", version); err = pci_enable_device (pdev); if (err) return err; irq = pdev->irq; err = pci_request_regions (pdev, "dl2k"); if (err) goto err_out_disable; pci_set_master (pdev); err = -ENOMEM; dev = alloc_etherdev (sizeof (*np)); if (!dev) goto err_out_res; SET_NETDEV_DEV(dev, &pdev->dev); np = netdev_priv(dev); /* IO registers range. */ ioaddr = pci_iomap(pdev, 0, 0); if (!ioaddr) goto err_out_dev; np->eeprom_addr = ioaddr; #ifdef MEM_MAPPING /* MM registers range. */ ioaddr = pci_iomap(pdev, 1, 0); if (!ioaddr) goto err_out_iounmap; #endif np->ioaddr = ioaddr; np->chip_id = chip_idx; np->pdev = pdev; spin_lock_init (&np->tx_lock); spin_lock_init (&np->rx_lock); /* Parse manual configuration */ np->an_enable = 1; np->tx_coalesce = 1; if (card_idx < MAX_UNITS) { if (media[card_idx] != NULL) { np->an_enable = 0; if (strcmp (media[card_idx], "auto") == 0 || strcmp (media[card_idx], "autosense") == 0 || strcmp (media[card_idx], "0") == 0 ) { np->an_enable = 2; } else if (strcmp (media[card_idx], "100mbps_fd") == 0 || strcmp (media[card_idx], "4") == 0) { np->speed = 100; np->full_duplex = 1; } else if (strcmp (media[card_idx], "100mbps_hd") == 0 || strcmp (media[card_idx], "3") == 0) { np->speed = 100; np->full_duplex = 0; } else if (strcmp (media[card_idx], "10mbps_fd") == 0 || strcmp (media[card_idx], "2") == 0) { np->speed = 10; np->full_duplex = 1; } else if (strcmp (media[card_idx], "10mbps_hd") == 0 || strcmp (media[card_idx], "1") == 0) { np->speed = 10; np->full_duplex = 0; } else if (strcmp (media[card_idx], "1000mbps_fd") == 0 || strcmp (media[card_idx], "6") == 0) { np->speed=1000; np->full_duplex=1; } else if (strcmp (media[card_idx], "1000mbps_hd") == 0 || strcmp (media[card_idx], "5") == 0) { np->speed = 1000; np->full_duplex = 0; } else { np->an_enable = 1; } } if (jumbo[card_idx] != 0) { np->jumbo = 1; dev->mtu = MAX_JUMBO; } else { np->jumbo = 0; if (mtu[card_idx] > 0 && mtu[card_idx] < PACKET_SIZE) dev->mtu = mtu[card_idx]; } np->vlan = (vlan[card_idx] > 0 && vlan[card_idx] < 4096) ? vlan[card_idx] : 0; if (rx_coalesce > 0 && rx_timeout > 0) { np->rx_coalesce = rx_coalesce; np->rx_timeout = rx_timeout; np->coalesce = 1; } np->tx_flow = (tx_flow == 0) ? 0 : 1; np->rx_flow = (rx_flow == 0) ? 0 : 1; if (tx_coalesce < 1) tx_coalesce = 1; else if (tx_coalesce > TX_RING_SIZE-1) tx_coalesce = TX_RING_SIZE - 1; } dev->netdev_ops = &netdev_ops; dev->watchdog_timeo = TX_TIMEOUT; dev->ethtool_ops = ðtool_ops; #if 0 dev->features = NETIF_F_IP_CSUM; #endif /* MTU range: 68 - 1536 or 8000 */ dev->min_mtu = ETH_MIN_MTU; dev->max_mtu = np->jumbo ? MAX_JUMBO : PACKET_SIZE; pci_set_drvdata (pdev, dev); ring_space = pci_alloc_consistent (pdev, TX_TOTAL_SIZE, &ring_dma); if (!ring_space) goto err_out_iounmap; np->tx_ring = ring_space; np->tx_ring_dma = ring_dma; ring_space = pci_alloc_consistent (pdev, RX_TOTAL_SIZE, &ring_dma); if (!ring_space) goto err_out_unmap_tx; np->rx_ring = ring_space; np->rx_ring_dma = ring_dma; /* Parse eeprom data */ parse_eeprom (dev); /* Find PHY address */ err = find_miiphy (dev); if (err) goto err_out_unmap_rx; /* Fiber device? */ np->phy_media = (dr16(ASICCtrl) & PhyMedia) ? 1 : 0; np->link_status = 0; /* Set media and reset PHY */ if (np->phy_media) { /* default Auto-Negotiation for fiber deivices */ if (np->an_enable == 2) { np->an_enable = 1; } } else { /* Auto-Negotiation is mandatory for 1000BASE-T, IEEE 802.3ab Annex 28D page 14 */ if (np->speed == 1000) np->an_enable = 1; } err = register_netdev (dev); if (err) goto err_out_unmap_rx; card_idx++; printk (KERN_INFO "%s: %s, %pM, IRQ %d\n", dev->name, np->name, dev->dev_addr, irq); if (tx_coalesce > 1) printk(KERN_INFO "tx_coalesce:\t%d packets\n", tx_coalesce); if (np->coalesce) printk(KERN_INFO "rx_coalesce:\t%d packets\n" "rx_timeout: \t%d ns\n", np->rx_coalesce, np->rx_timeout*640); if (np->vlan) printk(KERN_INFO "vlan(id):\t%d\n", np->vlan); return 0; err_out_unmap_rx: pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma); err_out_unmap_tx: pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma); err_out_iounmap: #ifdef MEM_MAPPING pci_iounmap(pdev, np->ioaddr); #endif pci_iounmap(pdev, np->eeprom_addr); err_out_dev: free_netdev (dev); err_out_res: pci_release_regions (pdev); err_out_disable: pci_disable_device (pdev); return err; }
/* * Attach to a PCI probed interface */ static int __devinit pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id) { struct device_node *np; pmac_ide_hwif_t *pmif; void __iomem *base; unsigned long rbase, rlen; int rc; struct ide_hw hw; np = pci_device_to_OF_node(pdev); if (np == NULL) { printk(KERN_ERR "ide-pmac: cannot find MacIO node for Kauai ATA interface\n"); return -ENODEV; } pmif = kzalloc(sizeof(*pmif), GFP_KERNEL); if (pmif == NULL) return -ENOMEM; if (pci_enable_device(pdev)) { printk(KERN_WARNING "ide-pmac: Can't enable PCI device for " "%s\n", np->full_name); rc = -ENXIO; goto out_free_pmif; } pci_set_master(pdev); if (pci_request_regions(pdev, "Kauai ATA")) { printk(KERN_ERR "ide-pmac: Cannot obtain PCI resources for " "%s\n", np->full_name); rc = -ENXIO; goto out_free_pmif; } pmif->mdev = NULL; pmif->node = np; rbase = pci_resource_start(pdev, 0); rlen = pci_resource_len(pdev, 0); base = ioremap(rbase, rlen); pmif->regbase = (unsigned long) base + 0x2000; pmif->dma_regs = base + 0x1000; pmif->kauai_fcr = base; pmif->irq = pdev->irq; pci_set_drvdata(pdev, pmif); memset(&hw, 0, sizeof(hw)); pmac_ide_init_ports(&hw, pmif->regbase); hw.irq = pdev->irq; hw.dev = &pdev->dev; rc = pmac_ide_setup_device(pmif, &hw); if (rc != 0) { /* The inteface is released to the common IDE layer */ pci_set_drvdata(pdev, NULL); iounmap(base); pci_release_regions(pdev); kfree(pmif); } return rc; out_free_pmif: kfree(pmif); return rc; }
static int snd_atiixp_pcm_open(struct snd_pcm_substream *substream, struct atiixp_dma *dma, int pcm_type) { struct atiixp_modem *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; int err; static unsigned int rates[] = { 8000, 9600, 12000, 16000 }; static struct snd_pcm_hw_constraint_list hw_constraints_rates = { .count = ARRAY_SIZE(rates), .list = rates, .mask = 0, }; if (snd_BUG_ON(!dma->ops || !dma->ops->enable_dma)) return -EINVAL; if (dma->opened) return -EBUSY; dma->substream = substream; runtime->hw = snd_atiixp_pcm_hw; dma->ac97_pcm_type = pcm_type; if ((err = snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &hw_constraints_rates)) < 0) return err; if ((err = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS)) < 0) return err; runtime->private_data = dma; /* enable DMA bits */ spin_lock_irq(&chip->reg_lock); dma->ops->enable_dma(chip, 1); spin_unlock_irq(&chip->reg_lock); dma->opened = 1; return 0; } static int snd_atiixp_pcm_close(struct snd_pcm_substream *substream, struct atiixp_dma *dma) { struct atiixp_modem *chip = snd_pcm_substream_chip(substream); /* disable DMA bits */ if (snd_BUG_ON(!dma->ops || !dma->ops->enable_dma)) return -EINVAL; spin_lock_irq(&chip->reg_lock); dma->ops->enable_dma(chip, 0); spin_unlock_irq(&chip->reg_lock); dma->substream = NULL; dma->opened = 0; return 0; } /* */ static int snd_atiixp_playback_open(struct snd_pcm_substream *substream) { struct atiixp_modem *chip = snd_pcm_substream_chip(substream); int err; mutex_lock(&chip->open_mutex); err = snd_atiixp_pcm_open(substream, &chip->dmas[ATI_DMA_PLAYBACK], 0); mutex_unlock(&chip->open_mutex); if (err < 0) return err; return 0; } static int snd_atiixp_playback_close(struct snd_pcm_substream *substream) { struct atiixp_modem *chip = snd_pcm_substream_chip(substream); int err; mutex_lock(&chip->open_mutex); err = snd_atiixp_pcm_close(substream, &chip->dmas[ATI_DMA_PLAYBACK]); mutex_unlock(&chip->open_mutex); return err; } static int snd_atiixp_capture_open(struct snd_pcm_substream *substream) { struct atiixp_modem *chip = snd_pcm_substream_chip(substream); return snd_atiixp_pcm_open(substream, &chip->dmas[ATI_DMA_CAPTURE], 1); } static int snd_atiixp_capture_close(struct snd_pcm_substream *substream) { struct atiixp_modem *chip = snd_pcm_substream_chip(substream); return snd_atiixp_pcm_close(substream, &chip->dmas[ATI_DMA_CAPTURE]); } /* AC97 playback */ static struct snd_pcm_ops snd_atiixp_playback_ops = { .open = snd_atiixp_playback_open, .close = snd_atiixp_playback_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_atiixp_pcm_hw_params, .hw_free = snd_atiixp_pcm_hw_free, .prepare = snd_atiixp_playback_prepare, .trigger = snd_atiixp_pcm_trigger, .pointer = snd_atiixp_pcm_pointer, }; /* AC97 capture */ static struct snd_pcm_ops snd_atiixp_capture_ops = { .open = snd_atiixp_capture_open, .close = snd_atiixp_capture_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_atiixp_pcm_hw_params, .hw_free = snd_atiixp_pcm_hw_free, .prepare = snd_atiixp_capture_prepare, .trigger = snd_atiixp_pcm_trigger, .pointer = snd_atiixp_pcm_pointer, }; static struct atiixp_dma_ops snd_atiixp_playback_dma_ops = { .type = ATI_DMA_PLAYBACK, .llp_offset = ATI_REG_MODEM_OUT_DMA1_LINKPTR, .dt_cur = ATI_REG_MODEM_OUT_DMA1_DT_CUR, .enable_dma = atiixp_out_enable_dma, .enable_transfer = atiixp_out_enable_transfer, .flush_dma = atiixp_out_flush_dma, }; static struct atiixp_dma_ops snd_atiixp_capture_dma_ops = { .type = ATI_DMA_CAPTURE, .llp_offset = ATI_REG_MODEM_IN_DMA_LINKPTR, .dt_cur = ATI_REG_MODEM_IN_DMA_DT_CUR, .enable_dma = atiixp_in_enable_dma, .enable_transfer = atiixp_in_enable_transfer, .flush_dma = atiixp_in_flush_dma, }; static int __devinit snd_atiixp_pcm_new(struct atiixp_modem *chip) { struct snd_pcm *pcm; int err; /* initialize constants */ chip->dmas[ATI_DMA_PLAYBACK].ops = &snd_atiixp_playback_dma_ops; chip->dmas[ATI_DMA_CAPTURE].ops = &snd_atiixp_capture_dma_ops; /* PCM #0: analog I/O */ err = snd_pcm_new(chip->card, "ATI IXP MC97", ATI_PCMDEV_ANALOG, 1, 1, &pcm); if (err < 0) return err; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_atiixp_playback_ops); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_atiixp_capture_ops); pcm->dev_class = SNDRV_PCM_CLASS_MODEM; pcm->private_data = chip; strcpy(pcm->name, "ATI IXP MC97"); chip->pcmdevs[ATI_PCMDEV_ANALOG] = pcm; snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci), 64*1024, 128*1024); return 0; } /* * interrupt handler */ static irqreturn_t snd_atiixp_interrupt(int irq, void *dev_id) { struct atiixp_modem *chip = dev_id; unsigned int status; status = atiixp_read(chip, ISR); if (! status) return IRQ_NONE; /* process audio DMA */ if (status & ATI_REG_ISR_MODEM_OUT1_XRUN) snd_atiixp_xrun_dma(chip, &chip->dmas[ATI_DMA_PLAYBACK]); else if (status & ATI_REG_ISR_MODEM_OUT1_STATUS) snd_atiixp_update_dma(chip, &chip->dmas[ATI_DMA_PLAYBACK]); if (status & ATI_REG_ISR_MODEM_IN_XRUN) snd_atiixp_xrun_dma(chip, &chip->dmas[ATI_DMA_CAPTURE]); else if (status & ATI_REG_ISR_MODEM_IN_STATUS) snd_atiixp_update_dma(chip, &chip->dmas[ATI_DMA_CAPTURE]); /* for codec detection */ if (status & CODEC_CHECK_BITS) { unsigned int detected; detected = status & CODEC_CHECK_BITS; spin_lock(&chip->reg_lock); chip->codec_not_ready_bits |= detected; atiixp_update(chip, IER, detected, 0); /* disable the detected irqs */ spin_unlock(&chip->reg_lock); } /* ack */ atiixp_write(chip, ISR, status); return IRQ_HANDLED; } /* * ac97 mixer section */ static int __devinit snd_atiixp_mixer_new(struct atiixp_modem *chip, int clock) { struct snd_ac97_bus *pbus; struct snd_ac97_template ac97; int i, err; int codec_count; static struct snd_ac97_bus_ops ops = { .write = snd_atiixp_ac97_write, .read = snd_atiixp_ac97_read, }; static unsigned int codec_skip[NUM_ATI_CODECS] = { ATI_REG_ISR_CODEC0_NOT_READY, ATI_REG_ISR_CODEC1_NOT_READY, ATI_REG_ISR_CODEC2_NOT_READY, }; if (snd_atiixp_codec_detect(chip) < 0) return -ENXIO; if ((err = snd_ac97_bus(chip->card, 0, &ops, chip, &pbus)) < 0) return err; pbus->clock = clock; chip->ac97_bus = pbus; codec_count = 0; for (i = 0; i < NUM_ATI_CODECS; i++) { if (chip->codec_not_ready_bits & codec_skip[i]) continue; memset(&ac97, 0, sizeof(ac97)); ac97.private_data = chip; ac97.pci = chip->pci; ac97.num = i; ac97.scaps = AC97_SCAP_SKIP_AUDIO | AC97_SCAP_POWER_SAVE; if ((err = snd_ac97_mixer(pbus, &ac97, &chip->ac97[i])) < 0) { chip->ac97[i] = NULL; /* to be sure */ snd_printdd("atiixp-modem: codec %d not available for modem\n", i); continue; } codec_count++; } if (! codec_count) { snd_printk(KERN_ERR "atiixp-modem: no codec available\n"); return -ENODEV; } /* snd_ac97_tune_hardware(chip->ac97, ac97_quirks); */ return 0; } #ifdef CONFIG_PM /* * power management */ static int snd_atiixp_suspend(struct pci_dev *pci, pm_message_t state) { struct snd_card *card = pci_get_drvdata(pci); struct atiixp_modem *chip = card->private_data; int i; snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); for (i = 0; i < NUM_ATI_PCMDEVS; i++) snd_pcm_suspend_all(chip->pcmdevs[i]); for (i = 0; i < NUM_ATI_CODECS; i++) snd_ac97_suspend(chip->ac97[i]); snd_atiixp_aclink_down(chip); snd_atiixp_chip_stop(chip); pci_disable_device(pci); pci_save_state(pci); pci_set_power_state(pci, pci_choose_state(pci, state)); return 0; } static int snd_atiixp_resume(struct pci_dev *pci) { struct snd_card *card = pci_get_drvdata(pci); struct atiixp_modem *chip = card->private_data; int i; pci_set_power_state(pci, PCI_D0); pci_restore_state(pci); if (pci_enable_device(pci) < 0) { printk(KERN_ERR "atiixp-modem: pci_enable_device failed, " "disabling device\n"); snd_card_disconnect(card); return -EIO; } pci_set_master(pci); snd_atiixp_aclink_reset(chip); snd_atiixp_chip_start(chip); for (i = 0; i < NUM_ATI_CODECS; i++) snd_ac97_resume(chip->ac97[i]); snd_power_change_state(card, SNDRV_CTL_POWER_D0); return 0; } #endif /* CONFIG_PM */ #ifdef CONFIG_PROC_FS /* * proc interface for register dump */ static void snd_atiixp_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct atiixp_modem *chip = entry->private_data; int i; for (i = 0; i < 256; i += 4) snd_iprintf(buffer, "%02x: %08x\n", i, readl(chip->remap_addr + i)); } static void __devinit snd_atiixp_proc_init(struct atiixp_modem *chip) { struct snd_info_entry *entry; if (! snd_card_proc_new(chip->card, "atiixp-modem", &entry)) snd_info_set_text_ops(entry, chip, snd_atiixp_proc_read); } #else #define snd_atiixp_proc_init(chip) #endif /* * destructor */ static int snd_atiixp_free(struct atiixp_modem *chip) { if (chip->irq < 0) goto __hw_end; snd_atiixp_chip_stop(chip); __hw_end: if (chip->irq >= 0) free_irq(chip->irq, chip); if (chip->remap_addr) iounmap(chip->remap_addr); pci_release_regions(chip->pci); pci_disable_device(chip->pci); kfree(chip); return 0; } static int snd_atiixp_dev_free(struct snd_device *device) { struct atiixp_modem *chip = device->device_data; return snd_atiixp_free(chip); } /* * constructor for chip instance */ static int __devinit snd_atiixp_create(struct snd_card *card, struct pci_dev *pci, struct atiixp_modem **r_chip) { static struct snd_device_ops ops = { .dev_free = snd_atiixp_dev_free, }; struct atiixp_modem *chip; int err; if ((err = pci_enable_device(pci)) < 0) return err; chip = kzalloc(sizeof(*chip), GFP_KERNEL); if (chip == NULL) { pci_disable_device(pci); return -ENOMEM; } spin_lock_init(&chip->reg_lock); mutex_init(&chip->open_mutex); chip->card = card; chip->pci = pci; chip->irq = -1; if ((err = pci_request_regions(pci, "ATI IXP MC97")) < 0) { kfree(chip); pci_disable_device(pci); return err; } chip->addr = pci_resource_start(pci, 0); chip->remap_addr = ioremap_nocache(chip->addr, pci_resource_len(pci, 0)); if (chip->remap_addr == NULL) { snd_printk(KERN_ERR "AC'97 space ioremap problem\n"); snd_atiixp_free(chip); return -EIO; } if (request_irq(pci->irq, snd_atiixp_interrupt, IRQF_SHARED, card->shortname, chip)) { snd_printk(KERN_ERR "unable to grab IRQ %d\n", pci->irq); snd_atiixp_free(chip); return -EBUSY; } chip->irq = pci->irq; pci_set_master(pci); synchronize_irq(chip->irq); if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) { snd_atiixp_free(chip); return err; } snd_card_set_dev(card, &pci->dev); *r_chip = chip; return 0; } static int __devinit snd_atiixp_probe(struct pci_dev *pci, const struct pci_device_id *pci_id) { struct snd_card *card; struct atiixp_modem *chip; int err; card = snd_card_new(index, id, THIS_MODULE, 0); if (card == NULL) return -ENOMEM; strcpy(card->driver, "ATIIXP-MODEM"); strcpy(card->shortname, "ATI IXP Modem"); if ((err = snd_atiixp_create(card, pci, &chip)) < 0) goto __error; card->private_data = chip; if ((err = snd_atiixp_aclink_reset(chip)) < 0) goto __error; if ((err = snd_atiixp_mixer_new(chip, ac97_clock)) < 0) goto __error; if ((err = snd_atiixp_pcm_new(chip)) < 0) goto __error; snd_atiixp_proc_init(chip); snd_atiixp_chip_start(chip); sprintf(card->longname, "%s rev %x at 0x%lx, irq %i", card->shortname, pci->revision, chip->addr, chip->irq); if ((err = snd_card_register(card)) < 0) goto __error; pci_set_drvdata(pci, card); return 0; __error: snd_card_free(card); return err; } static void __devexit snd_atiixp_remove(struct pci_dev *pci) { snd_card_free(pci_get_drvdata(pci)); pci_set_drvdata(pci, NULL); }
static int __devinit #else static int #endif igbuio_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) { struct rte_uio_pci_dev *udev; struct msix_entry msix_entry; int err; udev = kzalloc(sizeof(struct rte_uio_pci_dev), GFP_KERNEL); if (!udev) return -ENOMEM; /* * enable device: ask low-level code to enable I/O and * memory */ err = pci_enable_device(dev); if (err != 0) { dev_err(&dev->dev, "Cannot enable PCI device\n"); goto fail_free; } /* * reserve device's PCI memory regions for use by this * module */ err = pci_request_regions(dev, "igb_uio"); if (err != 0) { dev_err(&dev->dev, "Cannot request regions\n"); goto fail_disable; } /* enable bus mastering on the device */ pci_set_master(dev); /* remap IO memory */ err = igbuio_setup_bars(dev, &udev->info); if (err != 0) goto fail_release_iomem; /* set 64-bit DMA mask */ err = pci_set_dma_mask(dev, DMA_BIT_MASK(64)); if (err != 0) { dev_err(&dev->dev, "Cannot set DMA mask\n"); goto fail_release_iomem; } err = pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(64)); if (err != 0) { dev_err(&dev->dev, "Cannot set consistent DMA mask\n"); goto fail_release_iomem; } /* fill uio infos */ udev->info.name = "igb_uio"; udev->info.version = "0.1"; udev->info.handler = igbuio_pci_irqhandler; udev->info.irqcontrol = igbuio_pci_irqcontrol; #ifdef CONFIG_XEN_DOM0 /* check if the driver run on Xen Dom0 */ if (xen_initial_domain()) udev->info.mmap = igbuio_dom0_pci_mmap; #endif udev->info.priv = udev; udev->pdev = dev; switch (igbuio_intr_mode_preferred) { case RTE_INTR_MODE_MSIX: /* Only 1 msi-x vector needed */ msix_entry.entry = 0; if (pci_enable_msix(dev, &msix_entry, 1) == 0) { dev_dbg(&dev->dev, "using MSI-X"); udev->info.irq = msix_entry.vector; udev->mode = RTE_INTR_MODE_MSIX; break; } /* fall back to INTX */ case RTE_INTR_MODE_LEGACY: if (pci_intx_mask_supported(dev)) { dev_dbg(&dev->dev, "using INTX"); udev->info.irq_flags = IRQF_SHARED; udev->info.irq = dev->irq; udev->mode = RTE_INTR_MODE_LEGACY; break; } dev_notice(&dev->dev, "PCI INTX mask not supported\n"); /* fall back to no IRQ */ case RTE_INTR_MODE_NONE: udev->mode = RTE_INTR_MODE_NONE; udev->info.irq = 0; break; default: dev_err(&dev->dev, "invalid IRQ mode %u", igbuio_intr_mode_preferred); err = -EINVAL; goto fail_release_iomem; } err = sysfs_create_group(&dev->dev.kobj, &dev_attr_grp); if (err != 0) goto fail_release_iomem; /* register uio driver */ err = uio_register_device(&dev->dev, &udev->info); if (err != 0) goto fail_remove_group; pci_set_drvdata(dev, udev); dev_info(&dev->dev, "uio device registered with irq %lx\n", udev->info.irq); return 0; fail_remove_group: sysfs_remove_group(&dev->dev.kobj, &dev_attr_grp); fail_release_iomem: igbuio_pci_release_iomem(&udev->info); if (udev->mode == RTE_INTR_MODE_MSIX) pci_disable_msix(udev->pdev); pci_release_regions(dev); fail_disable: pci_disable_device(dev); fail_free: kfree(udev); return err; }
static int __devinit t3e3_init_channel(struct channel *channel, struct pci_dev *pdev, struct card *card) { struct net_device *dev; unsigned int val; int err; err = pci_enable_device(pdev); if (err) return err; err = pci_request_regions(pdev, "SBE 2T3E3"); if (err) goto disable; dev = alloc_hdlcdev(channel); if (!dev) { printk(KERN_ERR "SBE 2T3E3" ": Out of memory\n"); err = -ENOMEM; goto free_regions; } t3e3_sc_init(channel); dev_to_priv(dev) = channel; channel->pdev = pdev; channel->dev = dev; channel->card = card; channel->addr = pci_resource_start(pdev, 0); if (pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_2T3E3_P1) channel->h.slot = 1; else channel->h.slot = 0; err = setup_device(dev, channel); if (err) goto free_dev; pci_read_config_dword(channel->pdev, 0x40, &val); /* mask sleep mode */ pci_write_config_dword(channel->pdev, 0x40, val & 0x3FFFFFFF); pci_read_config_byte(channel->pdev, PCI_CACHE_LINE_SIZE, &channel->h.cache_size); pci_read_config_dword(channel->pdev, PCI_COMMAND, &channel->h.command); t3e3_init(channel); err = request_irq(dev->irq, &t3e3_intr, IRQF_SHARED, dev->name, dev); if (err) { printk(KERN_WARNING "%s: could not get irq: %d\n", dev->name, dev->irq); goto unregister_dev; } pci_set_drvdata(pdev, channel); return 0; unregister_dev: unregister_hdlc_device(dev); free_dev: free_netdev(dev); free_regions: pci_release_regions(pdev); disable: pci_disable_device(pdev); return err; }
static int __devinit ioat_probe(struct pci_dev *pdev, const struct pci_device_id *id) { void __iomem *iobase; struct ioat_device *device; unsigned long mmio_start, mmio_len; int err; err = pci_enable_device(pdev); if (err) goto err_enable_device; err = pci_request_regions(pdev, ioat_pci_driver.name); if (err) goto err_request_regions; err = pci_set_dma_mask(pdev, DMA_64BIT_MASK); if (err) err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); if (err) goto err_set_dma_mask; err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); if (err) err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); if (err) goto err_set_dma_mask; mmio_start = pci_resource_start(pdev, 0); mmio_len = pci_resource_len(pdev, 0); iobase = ioremap(mmio_start, mmio_len); if (!iobase) { err = -ENOMEM; goto err_ioremap; } device = kzalloc(sizeof(*device), GFP_KERNEL); if (!device) { err = -ENOMEM; goto err_kzalloc; } device->pdev = pdev; pci_set_drvdata(pdev, device); device->iobase = iobase; pci_set_master(pdev); err = ioat_setup_functionality(pdev, iobase); if (err) goto err_version; return 0; err_version: kfree(device); err_kzalloc: iounmap(iobase); err_ioremap: err_set_dma_mask: pci_release_regions(pdev); pci_disable_device(pdev); err_request_regions: err_enable_device: return err; }
/** * mei_probe - Device Initialization Routine * * @pdev: PCI device structure * @ent: entry in kcs_pci_tbl * * returns 0 on success, <0 on failure. */ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct mei_device *dev; struct mei_me_hw *hw; int err; if (!mei_me_quirk_probe(pdev, ent)) { err = -ENODEV; goto end; } /* enable pci dev */ err = pci_enable_device(pdev); if (err) { dev_err(&pdev->dev, "failed to enable pci device.\n"); goto end; } /* set PCI host mastering */ pci_set_master(pdev); /* pci request regions for mei driver */ err = pci_request_regions(pdev, KBUILD_MODNAME); if (err) { dev_err(&pdev->dev, "failed to get pci regions.\n"); goto disable_device; } if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) || dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); } if (err) { dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); goto release_regions; } /* allocates and initializes the mei dev structure */ dev = mei_me_dev_init(pdev); if (!dev) { err = -ENOMEM; goto release_regions; } hw = to_me_hw(dev); /* mapping IO device memory */ hw->mem_addr = pci_iomap(pdev, 0, 0); if (!hw->mem_addr) { dev_err(&pdev->dev, "mapping I/O device memory failure.\n"); err = -ENOMEM; goto free_device; } pci_enable_msi(pdev); /* request and enable interrupt */ if (pci_dev_msi_enabled(pdev)) err = request_threaded_irq(pdev->irq, NULL, mei_me_irq_thread_handler, IRQF_ONESHOT, KBUILD_MODNAME, dev); else err = request_threaded_irq(pdev->irq, mei_me_irq_quick_handler, mei_me_irq_thread_handler, IRQF_SHARED, KBUILD_MODNAME, dev); if (err) { dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n", pdev->irq); goto disable_msi; } if (mei_start(dev)) { dev_err(&pdev->dev, "init hw failure.\n"); err = -ENODEV; goto release_irq; } err = mei_register(dev); if (err) goto release_irq; pci_set_drvdata(pdev, dev); schedule_delayed_work(&dev->timer_work, HZ); dev_dbg(&pdev->dev, "initialization successful.\n"); return 0; release_irq: mei_cancel_work(dev); mei_disable_interrupts(dev); free_irq(pdev->irq, dev); disable_msi: pci_disable_msi(pdev); pci_iounmap(pdev, hw->mem_addr); free_device: kfree(dev); release_regions: pci_release_regions(pdev); disable_device: pci_disable_device(pdev); end: dev_err(&pdev->dev, "initialization failed.\n"); return err; }
static int __devinit p54p_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct p54p_priv *priv; struct ieee80211_hw *dev; unsigned long mem_addr, mem_len; int err; err = pci_enable_device(pdev); if (err) { dev_err(&pdev->dev, "Cannot enable new PCI device\n"); return err; } mem_addr = pci_resource_start(pdev, 0); mem_len = pci_resource_len(pdev, 0); if (mem_len < sizeof(struct p54p_csr)) { dev_err(&pdev->dev, "Too short PCI resources\n"); goto err_disable_dev; } err = pci_request_regions(pdev, "p54pci"); if (err) { dev_err(&pdev->dev, "Cannot obtain PCI resources\n"); goto err_disable_dev; } if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { dev_err(&pdev->dev, "No suitable DMA available\n"); goto err_free_reg; } pci_set_master(pdev); pci_try_set_mwi(pdev); pci_write_config_byte(pdev, 0x40, 0); pci_write_config_byte(pdev, 0x41, 0); dev = p54_init_common(sizeof(*priv)); if (!dev) { dev_err(&pdev->dev, "ieee80211 alloc failed\n"); err = -ENOMEM; goto err_free_reg; } priv = dev->priv; priv->pdev = pdev; SET_IEEE80211_DEV(dev, &pdev->dev); pci_set_drvdata(pdev, dev); priv->map = ioremap(mem_addr, mem_len); if (!priv->map) { dev_err(&pdev->dev, "Cannot map device memory\n"); err = -ENOMEM; goto err_free_dev; } priv->ring_control = pci_alloc_consistent(pdev, sizeof(*priv->ring_control), &priv->ring_control_dma); if (!priv->ring_control) { dev_err(&pdev->dev, "Cannot allocate rings\n"); err = -ENOMEM; goto err_iounmap; } priv->common.open = p54p_open; priv->common.stop = p54p_stop; priv->common.tx = p54p_tx; spin_lock_init(&priv->lock); tasklet_init(&priv->tasklet, p54p_tasklet, (unsigned long)dev); err = request_firmware(&priv->firmware, "isl3886pci", &priv->pdev->dev); if (err) { dev_err(&pdev->dev, "Cannot find firmware (isl3886pci)\n"); err = request_firmware(&priv->firmware, "isl3886", &priv->pdev->dev); if (err) goto err_free_common; } err = p54p_open(dev); if (err) goto err_free_common; err = p54_read_eeprom(dev); p54p_stop(dev); if (err) goto err_free_common; err = p54_register_common(dev, &pdev->dev); if (err) goto err_free_common; return 0; err_free_common: release_firmware(priv->firmware); pci_free_consistent(pdev, sizeof(*priv->ring_control), priv->ring_control, priv->ring_control_dma); err_iounmap: iounmap(priv->map); err_free_dev: pci_set_drvdata(pdev, NULL); p54_free_common(dev); err_free_reg: pci_release_regions(pdev); err_disable_dev: pci_disable_device(pdev); return err; }
/*---------------------------------------------------------------- * prism2sta_probe_plx * * Probe routine called when a PCI device w/ matching ID is found. * This PLX implementation uses the following map: * BAR0: Unused * BAR1: ???? * BAR2: PCMCIA attribute memory * BAR3: PCMCIA i/o space * Here's the sequence: * - Allocate the PCI resources. * - Read the PCMCIA attribute memory to make sure we have a WLAN card * - Reset the MAC using the PCMCIA COR * - Initialize the netdev and wlan data * - Initialize the MAC * * Arguments: * pdev ptr to pci device structure containing info about * pci configuration. * id ptr to the device id entry that matched this device. * * Returns: * zero - success * negative - failed * * Side effects: * * * Call context: * process thread * ----------------------------------------------------------------*/ static int __devinit prism2sta_probe_plx( struct pci_dev *pdev, const struct pci_device_id *id) { int result; phys_t pccard_ioaddr; phys_t pccard_attr_mem; unsigned int pccard_attr_len; void __iomem *attr_mem = NULL; UINT32 plx_addr; wlandevice_t *wlandev = NULL; hfa384x_t *hw = NULL; int reg; u32 regic; if (pci_enable_device(pdev)) return -EIO; /* TMC7160 boards are special */ if ((pdev->vendor == PCIVENDOR_NDC) && (pdev->device == PCIDEVICE_NCP130_ASIC)) { unsigned long delay; pccard_attr_mem = 0; pccard_ioaddr = pci_resource_start(pdev, 1); outb(0x45, pccard_ioaddr); delay = jiffies + 1*HZ; while (time_before(jiffies, delay)); if (inb(pccard_ioaddr) != 0x45) { WLAN_LOG_ERROR("Initialize the TMC7160 failed. (0x%x)\n", inb(pccard_ioaddr)); return -EIO; } pccard_ioaddr = pci_resource_start(pdev, 2); prism2_doreset = 0; WLAN_LOG_INFO("NDC NCP130 with TMC716(ASIC) PCI interface device found at io:0x%x, irq:%d\n", pccard_ioaddr, pdev->irq); goto init; } /* Collect the resource requirements */ pccard_attr_mem = pci_resource_start(pdev, 2); pccard_attr_len = pci_resource_len(pdev, 2); if (pccard_attr_len < PLX_MIN_ATTR_LEN) return -EIO; pccard_ioaddr = pci_resource_start(pdev, 3); /* bjoern: We need to tell the card to enable interrupts, in * case the serial eprom didn't do this already. See the * PLX9052 data book, p8-1 and 8-24 for reference. * [MSM]: This bit of code came from the orinoco_cs driver. */ plx_addr = pci_resource_start(pdev, 1); regic = 0; regic = inl(plx_addr+PLX_INTCSR); if(regic & PLX_INTCSR_INTEN) { WLAN_LOG_DEBUG(1, "%s: Local Interrupt already enabled\n", dev_info); } else { regic |= PLX_INTCSR_INTEN; outl(regic, plx_addr+PLX_INTCSR); regic = inl(plx_addr+PLX_INTCSR); if(!(regic & PLX_INTCSR_INTEN)) { WLAN_LOG_ERROR( "%s: Couldn't enable Local Interrupts\n", dev_info); return -EIO; } } /* These assignments are here in case of future mappings for * io space and irq that might be similar to ioremap */ if (!request_mem_region(pccard_attr_mem, pci_resource_len(pdev, 2), "Prism2")) { WLAN_LOG_ERROR("%s: Couldn't reserve PCI memory region\n", dev_info); return -EIO; } attr_mem = ioremap(pccard_attr_mem, pccard_attr_len); WLAN_LOG_INFO("A PLX PCI/PCMCIA interface device found, " "phymem:0x%llx, phyio=0x%x, irq:%d, " "mem: 0x%lx\n", (unsigned long long)pccard_attr_mem, pccard_ioaddr, pdev->irq, (unsigned long)attr_mem); /* Verify whether PC card is present. * [MSM] This needs improvement, the right thing to do is * probably to walk the CIS looking for the vendor and product * IDs. It would be nice if this could be tied in with the * etc/pcmcia/wlan-ng.conf file. Any volunteers? ;-) */ if ( readb(attr_mem + 0) != 0x01 || readb(attr_mem + 2) != 0x03 || readb(attr_mem + 4) != 0x00 || readb(attr_mem + 6) != 0x00 || readb(attr_mem + 8) != 0xFF || readb(attr_mem + 10) != 0x17 || readb(attr_mem + 12) != 0x04 || readb(attr_mem + 14) != 0x67) { WLAN_LOG_ERROR("Prism2 PC card CIS is invalid.\n"); return -EIO; } WLAN_LOG_INFO("A PCMCIA WLAN adapter was found.\n"); /* Write COR to enable PC card */ writeb(COR_VALUE, attr_mem + COR_OFFSET); reg = readb(attr_mem + COR_OFFSET); init: /* * Now do everything the same as a PCI device * [MSM] TODO: We could probably factor this out of pcmcia/pci/plx * and perhaps usb. Perhaps a task for another day....... */ if ((wlandev = create_wlan()) == NULL) { WLAN_LOG_ERROR("%s: Memory allocation failure.\n", dev_info); result = -EIO; goto failed; } hw = wlandev->priv; if ( wlan_setup(wlandev) != 0 ) { WLAN_LOG_ERROR("%s: wlan_setup() failed.\n", dev_info); result = -EIO; goto failed; } /* Setup netdevice's ability to report resources * Note: the netdevice was allocated by wlan_setup() */ wlandev->netdev->irq = pdev->irq; wlandev->netdev->base_addr = pccard_ioaddr; wlandev->netdev->mem_start = (unsigned long)attr_mem; wlandev->netdev->mem_end = (unsigned long)attr_mem + pci_resource_len(pdev, 0); /* Initialize the hw data */ hfa384x_create(hw, wlandev->netdev->irq, pccard_ioaddr, attr_mem); hw->wlandev = wlandev; /* Register the wlandev, this gets us a name and registers the * linux netdevice. */ SET_MODULE_OWNER(wlandev->netdev); #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)) SET_NETDEV_DEV(wlandev->netdev, &(pdev->dev)); #endif if ( register_wlandev(wlandev) != 0 ) { WLAN_LOG_ERROR("%s: register_wlandev() failed.\n", dev_info); result = -EIO; goto failed; } #if 0 /* TODO: Move this and an irq test into an hfa384x_testif() routine. */ outw(PRISM2STA_MAGIC, HFA384x_SWSUPPORT(wlandev->netdev->base_addr)); reg=inw( HFA384x_SWSUPPORT(wlandev->netdev->base_addr)); if ( reg != PRISM2STA_MAGIC ) { WLAN_LOG_ERROR("MAC register access test failed!\n"); result = -EIO; goto failed; } #endif /* Do a chip-level reset on the MAC */ if (prism2_doreset) { result = hfa384x_corereset(hw, prism2_reset_holdtime, prism2_reset_settletime, 0); if (result != 0) { unregister_wlandev(wlandev); hfa384x_destroy(hw); WLAN_LOG_ERROR( "%s: hfa384x_corereset() failed.\n", dev_info); result = -EIO; goto failed; } } pci_set_drvdata(pdev, wlandev); /* Shouldn't actually hook up the IRQ until we * _know_ things are alright. A test routine would help. */ request_irq(wlandev->netdev->irq, hfa384x_interrupt, SA_SHIRQ, wlandev->name, wlandev); wlandev->msdstate = WLAN_MSD_HWPRESENT; result = 0; goto done; failed: pci_set_drvdata(pdev, NULL); if (wlandev) kfree(wlandev); if (hw) kfree(hw); if (attr_mem) iounmap(attr_mem); pci_release_regions(pdev); pci_disable_device(pdev); done: DBFEXIT; return result; }
// ############ Initialization and cleanup ############ static int __init flink_pci_init(void) { int error = 0; struct flink_device* flink_pci_dev; struct pci_dev* pci_device; void __iomem* mmio_base_ptr; unsigned long mmio_length; #if defined(DBG) printk(KERN_DEBUG "[%s] Initializing module with parameters 'vid=%x, pid=%x'", MODULE_NAME, vid, pid); #endif // Get PCI device struct pci_device = pci_get_device(vid, pid, NULL); if(pci_device == NULL) { printk(KERN_ALERT "[%s] ERROR: PCIe device not found!", MODULE_NAME); goto err_pci_device_not_found; } // Initialize and enable the PCI device error = pci_enable_device(pci_device); if(error) { printk(KERN_ALERT "[%s] ERROR: Unable to enable PCI device!", MODULE_NAME); goto err_pci_enable_device; } // Reserve PCI memory resources error = pci_request_regions(pci_device, KBUILD_MODNAME); if(error) { printk(KERN_ALERT "[%s] ERROR: Memory region request failed!", MODULE_NAME); goto err_pci_region_request; } // I/O Memory mapping mmio_length = pci_resource_len(pci_device, BAR_0); mmio_base_ptr = pci_iomap(pci_device, BAR_0, mmio_length); #if defined(DBG) printk(KERN_DEBUG "[%s] PCI resource I/O memory mapping:", MODULE_NAME); printk(KERN_DEBUG " -> Base address: 0x%p", mmio_base_ptr); printk(KERN_DEBUG " -> Memory length: 0x%lx (%lu bytes)", mmio_length, mmio_length); #endif if(mmio_base_ptr == NULL){ printk(KERN_ALERT "[%s] ERROR: I/O Memory mapping failed!", MODULE_NAME); goto err_pci_iomap; } flink_pci_dev = create_flink_pci_device(&pci_bus_ops, pci_device, mmio_base_ptr, mmio_length); flink_device_add(flink_pci_dev); // All done printk(KERN_INFO "[%s] Module sucessfully loaded", MODULE_NAME); return 0; // ---- ERROR HANDLING ---- err_pci_iomap: pci_release_regions(pci_device); err_pci_region_request: pci_disable_device(pci_device); err_pci_enable_device: pci_device = NULL; err_pci_device_not_found: // nothing to do return error; }
/* * Do all the common PCIe setup and initialization. * devdata is not yet allocated, and is not allocated until after this * routine returns success. Therefore qib_dev_err() can't be used for error * printing. */ int qib_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent) { int ret; ret = pci_enable_device(pdev); if (ret) { /* * This can happen (in theory) iff: * We did a chip reset, and then failed to reprogram the * BAR, or the chip reset due to an internal error. We then * unloaded the driver and reloaded it. * * Both reset cases set the BAR back to initial state. For * the latter case, the AER sticky error bit at offset 0x718 * should be set, but the Linux kernel doesn't yet know * about that, it appears. If the original BAR was retained * in the kernel data structures, this may be OK. */ qib_early_err(&pdev->dev, "pci enable failed: error %d\n", -ret); goto done; } ret = pci_request_regions(pdev, QIB_DRV_NAME); if (ret) { qib_devinfo(pdev, "pci_request_regions fails: err %d\n", -ret); goto bail; } ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); if (ret) { /* * If the 64 bit setup fails, try 32 bit. Some systems * do not setup 64 bit maps on systems with 2GB or less * memory installed. */ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (ret) { qib_devinfo(pdev, "Unable to set DMA mask: %d\n", ret); goto bail; } ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); } else ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); if (ret) { qib_early_err(&pdev->dev, "Unable to set DMA consistent mask: %d\n", ret); goto bail; } pci_set_master(pdev); ret = pci_enable_pcie_error_reporting(pdev); if (ret) { qib_early_err(&pdev->dev, "Unable to enable pcie error reporting: %d\n", ret); ret = 0; } goto done; bail: pci_disable_device(pdev); pci_release_regions(pdev); done: return ret; }
int rt2x00pci_probe(struct pci_dev *pci_dev, const struct rt2x00_ops *ops) { struct ieee80211_hw *hw; struct rt2x00_dev *rt2x00dev; int retval; u16 chip; retval = pci_enable_device(pci_dev); if (retval) { rt2x00_probe_err("Enable device failed\n"); return retval; } retval = pci_request_regions(pci_dev, pci_name(pci_dev)); if (retval) { rt2x00_probe_err("PCI request regions failed\n"); goto exit_disable_device; } pci_set_master(pci_dev); if (pci_set_mwi(pci_dev)) rt2x00_probe_err("MWI not available\n"); if (dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32))) { rt2x00_probe_err("PCI DMA not supported\n"); retval = -EIO; goto exit_release_regions; } hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw); if (!hw) { rt2x00_probe_err("Failed to allocate hardware\n"); retval = -ENOMEM; goto exit_release_regions; } pci_set_drvdata(pci_dev, hw); rt2x00dev = hw->priv; rt2x00dev->dev = &pci_dev->dev; rt2x00dev->ops = ops; rt2x00dev->hw = hw; rt2x00dev->irq = pci_dev->irq; rt2x00dev->name = pci_name(pci_dev); if (pci_is_pcie(pci_dev)) rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_PCIE); else rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_PCI); retval = rt2x00pci_alloc_reg(rt2x00dev); if (retval) goto exit_free_device; /* * Because rt3290 chip use different efuse offset to read efuse data. * So before read efuse it need to indicate it is the * rt3290 or not. */ pci_read_config_word(pci_dev, PCI_DEVICE_ID, &chip); rt2x00dev->chip.rt = chip; if (rt2x00_rt(rt2x00dev, MT7630)) MT76x0_WLAN_ChipOnOff(rt2x00dev, 1, 1); retval = rt2x00lib_probe_dev(rt2x00dev); if (retval) goto exit_free_reg; if (rt2x00_rt(rt2x00dev, MT7630)) { rt2x00dev->TXWISize=20; rt2x00dev->bscan=0; NdisAllocateSpinLock(rt2x00dev, &rt2x00dev->CtrlRingLock); NdisAllocateSpinLock(rt2x00dev, &rt2x00dev->CalLock); retval = RTMPAllocTxRxRingMemory(rt2x00dev); if (retval != NDIS_STATUS_SUCCESS) goto exit_free_reg; } return 0; exit_free_reg: rt2x00pci_free_reg(rt2x00dev); exit_free_device: ieee80211_free_hw(hw); exit_release_regions: pci_release_regions(pci_dev); exit_disable_device: pci_disable_device(pci_dev); pci_set_drvdata(pci_dev, NULL); return retval; }
// // PCI device probe & initialization function // static INT __devinit rt2860_probe( IN struct pci_dev *pci_dev, IN const struct pci_device_id *pci_id) { PRTMP_ADAPTER pAd = (PRTMP_ADAPTER)NULL; struct net_device *net_dev; PVOID handle; PSTRING print_name; ULONG csr_addr; INT rv = 0; RTMP_OS_NETDEV_OP_HOOK netDevHook; DBGPRINT(RT_DEBUG_TRACE, ("===> rt2860_probe\n")); //PCIDevInit============================================== // wake up and enable device if ((rv = pci_enable_device(pci_dev))!= 0) { DBGPRINT(RT_DEBUG_ERROR, ("Enable PCI device failed, errno=%d!\n", rv)); return rv; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) print_name = pci_dev ? pci_name(pci_dev) : "rt3090"; #else print_name = pci_dev ? pci_dev->slot_name : "rt3090"; #endif // LINUX_VERSION_CODE // if ((rv = pci_request_regions(pci_dev, print_name)) != 0) { DBGPRINT(RT_DEBUG_ERROR, ("Request PCI resource failed, errno=%d!\n", rv)); goto err_out; } // map physical address to virtual address for accessing register csr_addr = (unsigned long) ioremap(pci_resource_start(pci_dev, 0), pci_resource_len(pci_dev, 0)); if (!csr_addr) { DBGPRINT(RT_DEBUG_ERROR, ("ioremap failed for device %s, region 0x%lX @ 0x%lX\n", print_name, (ULONG)pci_resource_len(pci_dev, 0), (ULONG)pci_resource_start(pci_dev, 0))); goto err_out_free_res; } else { DBGPRINT(RT_DEBUG_TRACE, ("%s: at 0x%lx, VA 0x%lx, IRQ %d. \n", print_name, (ULONG)pci_resource_start(pci_dev, 0), (ULONG)csr_addr, pci_dev->irq)); } // Set DMA master pci_set_master(pci_dev); //RtmpDevInit============================================== // Allocate RTMP_ADAPTER adapter structure handle = kmalloc(sizeof(struct os_cookie), GFP_KERNEL); if (handle == NULL) { DBGPRINT(RT_DEBUG_ERROR, ("%s(): Allocate memory for os handle failed!\n", __FUNCTION__)); goto err_out_iounmap; } ((POS_COOKIE)handle)->pci_dev = pci_dev; rv = RTMPAllocAdapterBlock(handle, &pAd); //shiang: we may need the pci_dev for allocate structure of "RTMP_ADAPTER" if (rv != NDIS_STATUS_SUCCESS) goto err_out_iounmap; // Here are the RTMP_ADAPTER structure with pci-bus specific parameters. pAd->CSRBaseAddress = (PUCHAR)csr_addr; DBGPRINT(RT_DEBUG_ERROR, ("pAd->CSRBaseAddress =0x%lx, csr_addr=0x%lx!\n", (ULONG)pAd->CSRBaseAddress, csr_addr)); RtmpRaDevCtrlInit(pAd, RTMP_DEV_INF_PCI); //NetDevInit============================================== net_dev = RtmpPhyNetDevInit(pAd, &netDevHook); if (net_dev == NULL) goto err_out_free_radev; // Here are the net_device structure with pci-bus specific parameters. net_dev->irq = pci_dev->irq; // Interrupt IRQ number net_dev->base_addr = csr_addr; // Save CSR virtual address and irq to device structure pci_set_drvdata(pci_dev, net_dev); // Set driver data #ifdef NATIVE_WPA_SUPPLICANT_SUPPORT /* for supporting Network Manager */ /* Set the sysfs physical device reference for the network logical device * if set prior to registration will cause a symlink during initialization. */ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)) SET_NETDEV_DEV(net_dev, &(pci_dev->dev)); #endif #endif // NATIVE_WPA_SUPPLICANT_SUPPORT // //All done, it's time to register the net device to linux kernel. // Register this device rv = RtmpOSNetDevAttach(net_dev, &netDevHook); if (rv) goto err_out_free_netdev; #ifdef CONFIG_STA_SUPPORT pAd->StaCfg.OriDevType = net_dev->type; #endif // CONFIG_STA_SUPPORT // RTMPInitPCIeDevice(pci_dev, pAd); DBGPRINT(RT_DEBUG_TRACE, ("<=== rt2860_probe\n")); return 0; // probe ok /* --------------------------- ERROR HANDLE --------------------------- */ err_out_free_netdev: RtmpOSNetDevFree(net_dev); err_out_free_radev: /* free RTMP_ADAPTER strcuture and os_cookie*/ RTMPFreeAdapter(pAd); err_out_iounmap: iounmap((void *)(csr_addr)); release_mem_region(pci_resource_start(pci_dev, 0), pci_resource_len(pci_dev, 0)); err_out_free_res: pci_release_regions(pci_dev); err_out: pci_disable_device(pci_dev); DBGPRINT(RT_DEBUG_ERROR, ("<=== rt2860_probe failed with rv = %d!\n", rv)); return -ENODEV; /* probe fail */ }
static int rtsx_probe(struct pci_dev *pci, const struct pci_device_id *pci_id) { struct Scsi_Host *host; struct rtsx_dev *dev; int err = 0; struct task_struct *th; printk(KERN_INFO "--- %s ---\n", DRIVER_MAKE_TIME); err = pci_enable_device(pci); if (err < 0) { printk(KERN_ERR "PCI enable device failed!\n"); return err; } err = pci_request_regions(pci, CR_DRIVER_NAME); if (err < 0) { printk(KERN_ERR "PCI request regions for %s failed!\n", CR_DRIVER_NAME); pci_disable_device(pci); return err; } /* * Ask the SCSI layer to allocate a host structure, with extra * space at the end for our private rtsx_dev structure. */ host = scsi_host_alloc(&rtsx_host_template, sizeof(*dev)); if (!host) { printk(KERN_ERR "Unable to allocate the scsi host\n"); pci_release_regions(pci); pci_disable_device(pci); return -ENOMEM; } dev = host_to_rtsx(host); memset(dev, 0, sizeof(struct rtsx_dev)); dev->chip = (struct rtsx_chip *)kmalloc(sizeof(struct rtsx_chip), GFP_KERNEL); if (dev->chip == NULL) { goto errout; } memset(dev->chip, 0, sizeof(struct rtsx_chip)); spin_lock_init(&dev->reg_lock); mutex_init(&(dev->dev_mutex)); sema_init(&(dev->sema), 0); init_completion(&(dev->notify)); init_waitqueue_head(&dev->delay_wait); dev->pci = pci; dev->irq = -1; printk(KERN_INFO "Resource length: 0x%x\n", (unsigned int)pci_resource_len(pci,0)); dev->addr = pci_resource_start(pci, 0); dev->remap_addr = ioremap_nocache(dev->addr, pci_resource_len(pci,0)); if (dev->remap_addr == NULL) { printk(KERN_ERR "ioremap error\n"); err = -ENXIO; goto errout; } printk(KERN_INFO "Original address: 0x%lx, remapped address: 0x%lx\n", (unsigned long)(dev->addr), (unsigned long)(dev->remap_addr)); dev->rtsx_resv_buf = dma_alloc_coherent(&(pci->dev), RTSX_RESV_BUF_LEN, &(dev->rtsx_resv_buf_addr), GFP_KERNEL); if (dev->rtsx_resv_buf == NULL) { printk(KERN_ERR "alloc dma buffer fail\n"); err = -ENXIO; goto errout; } dev->chip->host_cmds_ptr = dev->rtsx_resv_buf; dev->chip->host_cmds_addr = dev->rtsx_resv_buf_addr; dev->chip->host_sg_tbl_ptr = dev->rtsx_resv_buf + HOST_CMDS_BUF_LEN; dev->chip->host_sg_tbl_addr = dev->rtsx_resv_buf_addr + HOST_CMDS_BUF_LEN; dev->chip->rtsx = dev; rtsx_init_options(dev->chip); printk(KERN_INFO "pci->irq = %d\n", pci->irq); if (dev->chip->msi_en) { if (pci_enable_msi(pci) < 0) { dev->chip->msi_en = 0; } } if (rtsx_acquire_irq(dev) < 0) { err = -EBUSY; goto errout; } pci_set_master(pci); synchronize_irq(dev->irq); err = scsi_add_host(host, &pci->dev); if (err) { printk(KERN_ERR "Unable to add the scsi host\n"); goto errout; } rtsx_init_chip(dev->chip); th = kthread_create(rtsx_control_thread, dev, CR_DRIVER_NAME); if (IS_ERR(th)) { printk(KERN_ERR "Unable to start control thread\n"); err = PTR_ERR(th); goto errout; } /* Take a reference to the host for the control thread and * count it among all the threads we have launched. Then * start it up. */ scsi_host_get(rtsx_to_host(dev)); atomic_inc(&total_threads); wake_up_process(th); th = kthread_create(rtsx_scan_thread, dev, "rtsx-scan"); if (IS_ERR(th)) { printk(KERN_ERR "Unable to start the device-scanning thread\n"); quiesce_and_remove_host(dev); err = PTR_ERR(th); goto errout; } /* Take a reference to the host for the scanning thread and * count it among all the threads we have launched. Then * start it up. */ scsi_host_get(rtsx_to_host(dev)); atomic_inc(&total_threads); wake_up_process(th); th = kthread_create(rtsx_polling_thread, dev, "rtsx-polling"); if (IS_ERR(th)) { printk(KERN_ERR "Unable to start the device-polling thread\n"); quiesce_and_remove_host(dev); err = PTR_ERR(th); goto errout; } /* Take a reference to the host for the polling thread and * count it among all the threads we have launched. Then * start it up. */ scsi_host_get(rtsx_to_host(dev)); atomic_inc(&total_threads); wake_up_process(th); pci_set_drvdata(pci, dev); return 0; errout: printk(KERN_ERR "rtsx_probe() failed\n"); release_everything(dev); return err; }
static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data); struct iwl_bus *bus; struct iwl_pci_bus *pci_bus; u16 pci_cmd; int err; bus = kzalloc(sizeof(*bus) + sizeof(*pci_bus), GFP_KERNEL); if (!bus) { dev_printk(KERN_ERR, &pdev->dev, "Couldn't allocate iwl_pci_bus"); err = -ENOMEM; goto out_no_pci; } pci_bus = IWL_BUS_GET_PCI_BUS(bus); pci_bus->pci_dev = pdev; pci_set_drvdata(pdev, bus); /* W/A - seems to solve weird behavior. We need to remove this if we * don't want to stay in L1 all the time. This wastes a lot of power */ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM); if (pci_enable_device(pdev)) { err = -ENODEV; goto out_no_pci; } pci_set_master(pdev); err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); if (!err) err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36)); if (err) { err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (!err) err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); /* both attempts failed: */ if (err) { dev_printk(KERN_ERR, bus->dev, "No suitable DMA available.\n"); goto out_pci_disable_device; } } err = pci_request_regions(pdev, DRV_NAME); if (err) { dev_printk(KERN_ERR, bus->dev, "pci_request_regions failed"); goto out_pci_disable_device; } pci_bus->hw_base = pci_iomap(pdev, 0, 0); if (!pci_bus->hw_base) { dev_printk(KERN_ERR, bus->dev, "pci_iomap failed"); err = -ENODEV; goto out_pci_release_regions; } dev_printk(KERN_INFO, &pdev->dev, "pci_resource_len = 0x%08llx\n", (unsigned long long) pci_resource_len(pdev, 0)); dev_printk(KERN_INFO, &pdev->dev, "pci_resource_base = %p\n", pci_bus->hw_base); dev_printk(KERN_INFO, &pdev->dev, "HW Revision ID = 0x%X\n", pdev->revision); /* We disable the RETRY_TIMEOUT register (0x41) to keep * PCI Tx retries from interfering with C3 CPU state */ pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); err = pci_enable_msi(pdev); if (err) dev_printk(KERN_ERR, &pdev->dev, "pci_enable_msi failed(0X%x)", err); /* TODO: Move this away, not needed if not MSI */ /* enable rfkill interrupt: hw bug w/a */ pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { pci_cmd &= ~PCI_COMMAND_INTX_DISABLE; pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); } bus->dev = &pdev->dev; bus->irq = pdev->irq; bus->ops = &bus_ops_pci; err = iwl_probe(bus, &trans_ops_pcie, cfg); if (err) goto out_disable_msi; return 0; out_disable_msi: pci_disable_msi(pdev); pci_iounmap(pdev, pci_bus->hw_base); out_pci_release_regions: pci_set_drvdata(pdev, NULL); pci_release_regions(pdev); out_pci_disable_device: pci_disable_device(pdev); out_no_pci: kfree(bus); return err; }
static INT __devinit rt2860_probe( IN struct pci_dev *pci_dev, IN const struct pci_device_id *pci_id) { VOID *pAd = NULL; struct net_device *net_dev; PVOID handle; PSTRING print_name; ULONG csr_addr; INT rv = 0; RTMP_OS_NETDEV_OP_HOOK netDevHook; ULONG OpMode; DBGPRINT(RT_DEBUG_TRACE, ("===> rt2860_probe\n")); /*PCIDevInit============================================== */ /* wake up and enable device */ if ((rv = pci_enable_device(pci_dev))!= 0) { DBGPRINT(RT_DEBUG_ERROR, ("Enable PCI device failed, errno=%d!\n", rv)); return rv; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) print_name = pci_name(pci_dev); #else print_name = pci_dev->slot_name; #endif /* LINUX_VERSION_CODE */ if ((rv = pci_request_regions(pci_dev, print_name)) != 0) { DBGPRINT(RT_DEBUG_ERROR, ("Request PCI resource failed, errno=%d!\n", rv)); goto err_out; } /* map physical address to virtual address for accessing register */ csr_addr = (unsigned long) ioremap(pci_resource_start(pci_dev, 0), pci_resource_len(pci_dev, 0)); if (!csr_addr) { DBGPRINT(RT_DEBUG_ERROR, ("ioremap failed for device %s, region 0x%lX @ 0x%lX\n", print_name, (ULONG)pci_resource_len(pci_dev, 0), (ULONG)pci_resource_start(pci_dev, 0))); goto err_out_free_res; } else { DBGPRINT(RT_DEBUG_TRACE, ("%s: at 0x%lx, VA 0x%lx, IRQ %d. \n", print_name, (ULONG)pci_resource_start(pci_dev, 0), (ULONG)csr_addr, pci_dev->irq)); } /* Set DMA master */ pci_set_master(pci_dev); /*RtmpDevInit============================================== */ /* Allocate RTMP_ADAPTER adapter structure */ /* handle = kmalloc(sizeof(struct os_cookie), GFP_KERNEL); */ os_alloc_mem(NULL, (UCHAR **)&handle, sizeof(struct os_cookie)); if (handle == NULL) { DBGPRINT(RT_DEBUG_ERROR, ("%s(): Allocate memory for os handle failed!\n", __FUNCTION__)); goto err_out_iounmap; } memset(handle, 0, sizeof(struct os_cookie)); ((POS_COOKIE)handle)->pci_dev = pci_dev; #ifdef OS_ABL_FUNC_SUPPORT { RTMP_PCI_CONFIG PciConfig; PciConfig.ConfigVendorID = PCI_VENDOR_ID; /* get DRIVER operations */ RTMP_DRV_OPS_FUNCTION(pRtmpDrvOps, NULL, &PciConfig, NULL); } #endif /* OS_ABL_FUNC_SUPPORT */ rv = RTMPAllocAdapterBlock(handle, &pAd); /* we may need the pci_dev for allocate structure of "RTMP_ADAPTER" */ if (rv != NDIS_STATUS_SUCCESS) goto err_out_iounmap; /* Here are the RTMP_ADAPTER structure with pci-bus specific parameters. */ /* pAd->CSRBaseAddress = (PUCHAR)csr_addr; */ RTMP_DRIVER_PCI_CSR_SET(pAd, csr_addr); /* RTMPInitPCIeDevice(pci_dev, pAd); */ RTMP_DRIVER_PCIE_INIT(pAd, pci_dev); /*NetDevInit============================================== */ net_dev = RtmpPhyNetDevInit(pAd, &netDevHook); if (net_dev == NULL) goto err_out_free_radev; /* Here are the net_device structure with pci-bus specific parameters. */ net_dev->irq = pci_dev->irq; /* Interrupt IRQ number */ net_dev->base_addr = csr_addr; /* Save CSR virtual address and irq to device structure */ pci_set_drvdata(pci_dev, net_dev); /* Set driver data */ #ifdef NATIVE_WPA_SUPPLICANT_SUPPORT /* for supporting Network Manager */ /* Set the sysfs physical device reference for the network logical device * if set prior to registration will cause a symlink during initialization. */ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)) SET_NETDEV_DEV(net_dev, &(pci_dev->dev)); #endif #endif /* NATIVE_WPA_SUPPLICANT_SUPPORT */ /*All done, it's time to register the net device to linux kernel. */ /* Register this device */ #ifdef RT_CFG80211_SUPPORT { /* pAd->pCfgDev = &(pci_dev->dev); */ /* pAd->CFG80211_Register = CFG80211_Register; */ /* RTMP_DRIVER_CFG80211_INIT(pAd, pci_dev); */ /* In 2.6.32, cfg80211 register must be before register_netdevice(); We can not put the register in rt28xx_open(); Or you will suffer NULL pointer in list_add of cfg80211_netdev_notifier_call(). */ CFG80211_Register(pAd, &(pci_dev->dev), net_dev); } #endif /* RT_CFG80211_SUPPORT */ RTMP_DRIVER_OP_MODE_GET(pAd, &OpMode); rv = RtmpOSNetDevAttach(OpMode, net_dev, &netDevHook); if (rv) goto err_out_free_netdev; #ifdef CONFIG_STA_SUPPORT /* pAd->StaCfg.OriDevType = net_dev->type; */ RTMP_DRIVER_STA_DEV_TYPE_SET(pAd, net_dev->type); #endif /* CONFIG_STA_SUPPORT */ /*#ifdef KTHREAD_SUPPORT */ #ifdef PRE_ASSIGN_MAC_ADDR UCHAR PermanentAddress[MAC_ADDR_LEN]; RTMP_DRIVER_MAC_ADDR_GET(pAd, &PermanentAddress[0]); DBGPRINT(RT_DEBUG_TRACE, ("@%s MAC address: %02x:%02x:%02x:%02x:%02x:%02x\n", __FUNCTION__, PermanentAddress[0], PermanentAddress[1],PermanentAddress[2],PermanentAddress[3],PermanentAddress[4],PermanentAddress[5])); /* Set up the Mac address */ RtmpOSNetDevAddrSet(OpMode, net_dev, &PermanentAddress[0], NULL); #endif /* PRE_ASSIGN_MAC_ADDR */ DBGPRINT(RT_DEBUG_TRACE, ("<=== rt2860_probe\n")); return 0; /* probe ok */ /* --------------------------- ERROR HANDLE --------------------------- */ err_out_free_netdev: RtmpOSNetDevFree(net_dev); err_out_free_radev: /* free RTMP_ADAPTER strcuture and os_cookie*/ RTMPFreeAdapter(pAd); err_out_iounmap: iounmap((void *)(csr_addr)); release_mem_region(pci_resource_start(pci_dev, 0), pci_resource_len(pci_dev, 0)); err_out_free_res: pci_release_regions(pci_dev); err_out: pci_disable_device(pci_dev); DBGPRINT(RT_DEBUG_ERROR, ("<=== rt2860_probe failed with rv = %d!\n", rv)); return -ENODEV; /* probe fail */ }
/* driver entry point */ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) { int ret = -ENODEV; resource_size_t csr_base, mem_base; unsigned long csr_len, mem_len; struct denali_nand_info *denali; denali = kzalloc(sizeof(*denali), GFP_KERNEL); if (!denali) return -ENOMEM; ret = pci_enable_device(dev); if (ret) { ; goto failed_alloc_memery; } if (id->driver_data == INTEL_CE4100) { /* Due to a silicon limitation, we can only support * ONFI timing mode 1 and below. */ if (onfi_timing_mode < -1 || onfi_timing_mode > 1) { // printk(KERN_ERR "Intel CE4100 only supports" ; ret = -EINVAL; goto failed_enable_dev; } denali->platform = INTEL_CE4100; mem_base = pci_resource_start(dev, 0); mem_len = pci_resource_len(dev, 1); csr_base = pci_resource_start(dev, 1); csr_len = pci_resource_len(dev, 1); } else { denali->platform = INTEL_MRST; csr_base = pci_resource_start(dev, 0); csr_len = pci_resource_len(dev, 0); mem_base = pci_resource_start(dev, 1); mem_len = pci_resource_len(dev, 1); if (!mem_len) { mem_base = csr_base + csr_len; mem_len = csr_len; } } /* Is 32-bit DMA supported? */ ret = dma_set_mask(&dev->dev, DMA_BIT_MASK(32)); if (ret) { ; goto failed_enable_dev; } denali->buf.dma_buf = dma_map_single(&dev->dev, denali->buf.buf, DENALI_BUF_SIZE, DMA_BIDIRECTIONAL); if (dma_mapping_error(&dev->dev, denali->buf.dma_buf)) { dev_err(&dev->dev, "Spectra: failed to map DMA buffer\n"); goto failed_enable_dev; } pci_set_master(dev); denali->dev = &dev->dev; denali->mtd.dev.parent = &dev->dev; ret = pci_request_regions(dev, DENALI_NAND_NAME); if (ret) { ; goto failed_dma_map; } denali->flash_reg = ioremap_nocache(csr_base, csr_len); if (!denali->flash_reg) { ; ret = -ENOMEM; goto failed_req_regions; } denali->flash_mem = ioremap_nocache(mem_base, mem_len); if (!denali->flash_mem) { ; ret = -ENOMEM; goto failed_remap_reg; } denali_hw_init(denali); denali_drv_init(denali); /* denali_isr register is done after all the hardware * initilization is finished*/ if (request_irq(dev->irq, denali_isr, IRQF_SHARED, DENALI_NAND_NAME, denali)) { ; ret = -ENODEV; goto failed_remap_mem; } /* now that our ISR is registered, we can enable interrupts */ denali_set_intr_modes(denali, true); pci_set_drvdata(dev, denali); denali->mtd.name = "denali-nand"; denali->mtd.owner = THIS_MODULE; denali->mtd.priv = &denali->nand; /* register the driver with the NAND core subsystem */ denali->nand.select_chip = denali_select_chip; denali->nand.cmdfunc = denali_cmdfunc; denali->nand.read_byte = denali_read_byte; denali->nand.waitfunc = denali_waitfunc; /* scan for NAND devices attached to the controller * this is the first stage in a two step process to register * with the nand subsystem */ if (nand_scan_ident(&denali->mtd, denali->max_banks, NULL)) { ret = -ENXIO; goto failed_req_irq; } /* MTD supported page sizes vary by kernel. We validate our * kernel supports the device here. */ if (denali->mtd.writesize > NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE) { ret = -ENODEV; // printk(KERN_ERR "Spectra: device size not supported by this " ; goto failed_req_irq; } /* support for multi nand * MTD known nothing about multi nand, * so we should tell it the real pagesize * and anything necessery */ denali->devnum = ioread32(denali->flash_reg + DEVICES_CONNECTED); denali->nand.chipsize <<= (denali->devnum - 1); denali->nand.page_shift += (denali->devnum - 1); denali->nand.pagemask = (denali->nand.chipsize >> denali->nand.page_shift) - 1; denali->nand.bbt_erase_shift += (denali->devnum - 1); denali->nand.phys_erase_shift = denali->nand.bbt_erase_shift; denali->nand.chip_shift += (denali->devnum - 1); denali->mtd.writesize <<= (denali->devnum - 1); denali->mtd.oobsize <<= (denali->devnum - 1); denali->mtd.erasesize <<= (denali->devnum - 1); denali->mtd.size = denali->nand.numchips * denali->nand.chipsize; denali->bbtskipbytes *= denali->devnum; /* second stage of the NAND scan * this stage requires information regarding ECC and * bad block management. */ /* Bad block management */ denali->nand.bbt_td = &bbt_main_descr; denali->nand.bbt_md = &bbt_mirror_descr; /* skip the scan for now until we have OOB read and write support */ denali->nand.options |= NAND_USE_FLASH_BBT | NAND_SKIP_BBTSCAN; denali->nand.ecc.mode = NAND_ECC_HW_SYNDROME; /* Denali Controller only support 15bit and 8bit ECC in MRST, * so just let controller do 15bit ECC for MLC and 8bit ECC for * SLC if possible. * */ if (denali->nand.cellinfo & 0xc && (denali->mtd.oobsize > (denali->bbtskipbytes + ECC_15BITS * (denali->mtd.writesize / ECC_SECTOR_SIZE)))) { /* if MLC OOB size is large enough, use 15bit ECC*/ denali->nand.ecc.layout = &nand_15bit_oob; denali->nand.ecc.bytes = ECC_15BITS; iowrite32(15, denali->flash_reg + ECC_CORRECTION); } else if (denali->mtd.oobsize < (denali->bbtskipbytes + ECC_8BITS * (denali->mtd.writesize / ECC_SECTOR_SIZE))) { // printk(KERN_ERR "Your NAND chip OOB is not large enough to" ; goto failed_req_irq; } else { denali->nand.ecc.layout = &nand_8bit_oob; denali->nand.ecc.bytes = ECC_8BITS; iowrite32(8, denali->flash_reg + ECC_CORRECTION); } denali->nand.ecc.bytes *= denali->devnum; denali->nand.ecc.layout->eccbytes *= denali->mtd.writesize / ECC_SECTOR_SIZE; denali->nand.ecc.layout->oobfree[0].offset = denali->bbtskipbytes + denali->nand.ecc.layout->eccbytes; denali->nand.ecc.layout->oobfree[0].length = denali->mtd.oobsize - denali->nand.ecc.layout->eccbytes - denali->bbtskipbytes; /* Let driver know the total blocks number and * how many blocks contained by each nand chip. * blksperchip will help driver to know how many * blocks is taken by FW. * */ denali->totalblks = denali->mtd.size >> denali->nand.phys_erase_shift; denali->blksperchip = denali->totalblks / denali->nand.numchips; /* These functions are required by the NAND core framework, otherwise, * the NAND core will assert. However, we don't need them, so we'll stub * them out. */ denali->nand.ecc.calculate = denali_ecc_calculate; denali->nand.ecc.correct = denali_ecc_correct; denali->nand.ecc.hwctl = denali_ecc_hwctl; /* override the default read operations */ denali->nand.ecc.size = ECC_SECTOR_SIZE * denali->devnum; denali->nand.ecc.read_page = denali_read_page; denali->nand.ecc.read_page_raw = denali_read_page_raw; denali->nand.ecc.write_page = denali_write_page; denali->nand.ecc.write_page_raw = denali_write_page_raw; denali->nand.ecc.read_oob = denali_read_oob; denali->nand.ecc.write_oob = denali_write_oob; denali->nand.erase_cmd = denali_erase; if (nand_scan_tail(&denali->mtd)) { ret = -ENXIO; goto failed_req_irq; } ret = mtd_device_register(&denali->mtd, NULL, 0); if (ret) { dev_err(&dev->dev, "Spectra: Failed to register MTD: %d\n", ret); goto failed_req_irq; } return 0; failed_req_irq: denali_irq_cleanup(dev->irq, denali); failed_remap_mem: iounmap(denali->flash_mem); failed_remap_reg: iounmap(denali->flash_reg); failed_req_regions: pci_release_regions(dev); failed_dma_map: dma_unmap_single(&dev->dev, denali->buf.dma_buf, DENALI_BUF_SIZE, DMA_BIDIRECTIONAL); failed_enable_dev: pci_disable_device(dev); failed_alloc_memery: kfree(denali); return ret; }
static int prism54_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct net_device *ndev; u8 latency_tmr; u32 mem_addr; islpci_private *priv; int rvalue; /* Enable the pci device */ if (pci_enable_device(pdev)) { printk(KERN_ERR "%s: pci_enable_device() failed.\n", DRV_NAME); return -ENODEV; } /* check whether the latency timer is set correctly */ pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &latency_tmr); #if VERBOSE > SHOW_ERROR_MESSAGES DEBUG(SHOW_TRACING, "latency timer: %x\n", latency_tmr); #endif if (latency_tmr < PCIDEVICE_LATENCY_TIMER_MIN) { /* set the latency timer */ pci_write_config_byte(pdev, PCI_LATENCY_TIMER, PCIDEVICE_LATENCY_TIMER_VAL); } /* enable PCI DMA */ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { printk(KERN_ERR "%s: 32-bit PCI DMA not supported", DRV_NAME); goto do_pci_disable_device; } /* 0x40 is the programmable timer to configure the response timeout (TRDY_TIMEOUT) * 0x41 is the programmable timer to configure the retry timeout (RETRY_TIMEOUT) * The RETRY_TIMEOUT is used to set the number of retries that the core, as a * Master, will perform before abandoning a cycle. The default value for * RETRY_TIMEOUT is 0x80, which far exceeds the PCI 2.1 requirement for new * devices. A write of zero to the RETRY_TIMEOUT register disables this * function to allow use with any non-compliant legacy devices that may * execute more retries. * * Writing zero to both these two registers will disable both timeouts and * *can* solve problems caused by devices that are slow to respond. * Make this configurable - MSW */ if ( init_pcitm >= 0 ) { pci_write_config_byte(pdev, 0x40, (u8)init_pcitm); pci_write_config_byte(pdev, 0x41, (u8)init_pcitm); } else { printk(KERN_INFO "PCI TRDY/RETRY unchanged\n"); } /* request the pci device I/O regions */ rvalue = pci_request_regions(pdev, DRV_NAME); if (rvalue) { printk(KERN_ERR "%s: pci_request_regions failure (rc=%d)\n", DRV_NAME, rvalue); goto do_pci_disable_device; } /* check if the memory window is indeed set */ rvalue = pci_read_config_dword(pdev, PCI_BASE_ADDRESS_0, &mem_addr); if (rvalue || !mem_addr) { printk(KERN_ERR "%s: PCI device memory region not configured; fix your BIOS or CardBus bridge/drivers\n", DRV_NAME); goto do_pci_release_regions; } /* enable PCI bus-mastering */ DEBUG(SHOW_TRACING, "%s: pci_set_master(pdev)\n", DRV_NAME); pci_set_master(pdev); /* enable MWI */ pci_try_set_mwi(pdev); /* setup the network device interface and its structure */ if (!(ndev = islpci_setup(pdev))) { /* error configuring the driver as a network device */ printk(KERN_ERR "%s: could not configure network device\n", DRV_NAME); goto do_pci_clear_mwi; } priv = netdev_priv(ndev); islpci_set_state(priv, PRV_STATE_PREBOOT); /* we are attempting to boot */ /* card is in unknown state yet, might have some interrupts pending */ isl38xx_disable_interrupts(priv->device_base); /* request for the interrupt before uploading the firmware */ rvalue = request_irq(pdev->irq, islpci_interrupt, IRQF_SHARED, ndev->name, priv); if (rvalue) { /* error, could not hook the handler to the irq */ printk(KERN_ERR "%s: could not install IRQ handler\n", ndev->name); goto do_unregister_netdev; } /* firmware upload is triggered in islpci_open */ return 0; do_unregister_netdev: unregister_netdev(ndev); islpci_free_memory(priv); pci_set_drvdata(pdev, NULL); free_netdev(ndev); priv = NULL; do_pci_clear_mwi: pci_clear_mwi(pdev); do_pci_release_regions: pci_release_regions(pdev); do_pci_disable_device: pci_disable_device(pdev); return -EIO; }
static int orinoco_plx_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { int err; struct orinoco_private *priv; struct orinoco_pci_card *card; void __iomem *hermes_io, *attr_io, *bridge_io; err = pci_enable_device(pdev); if (err) { printk(KERN_ERR PFX "Cannot enable PCI device\n"); return err; } err = pci_request_regions(pdev, DRIVER_NAME); if (err) { printk(KERN_ERR PFX "Cannot obtain PCI resources\n"); goto fail_resources; } bridge_io = pci_iomap(pdev, 1, 0); if (!bridge_io) { printk(KERN_ERR PFX "Cannot map bridge registers\n"); err = -EIO; goto fail_map_bridge; } attr_io = pci_iomap(pdev, 2, 0); if (!attr_io) { printk(KERN_ERR PFX "Cannot map PCMCIA attributes\n"); err = -EIO; goto fail_map_attr; } hermes_io = pci_iomap(pdev, 3, 0); if (!hermes_io) { printk(KERN_ERR PFX "Cannot map chipset registers\n"); err = -EIO; goto fail_map_hermes; } /* Allocate network device */ priv = alloc_orinocodev(sizeof(*card), &pdev->dev, orinoco_plx_cor_reset, NULL); if (!priv) { printk(KERN_ERR PFX "Cannot allocate network device\n"); err = -ENOMEM; goto fail_alloc; } card = priv->card; card->bridge_io = bridge_io; card->attr_io = attr_io; hermes_struct_init(&priv->hw, hermes_io, HERMES_16BIT_REGSPACING); err = request_irq(pdev->irq, orinoco_interrupt, IRQF_SHARED, DRIVER_NAME, priv); if (err) { printk(KERN_ERR PFX "Cannot allocate IRQ %d\n", pdev->irq); err = -EBUSY; goto fail_irq; } err = orinoco_plx_hw_init(card); if (err) { printk(KERN_ERR PFX "Hardware initialization failed\n"); goto fail; } err = orinoco_plx_cor_reset(priv); if (err) { printk(KERN_ERR PFX "Initial reset failed\n"); goto fail; } err = orinoco_init(priv); if (err) { printk(KERN_ERR PFX "orinoco_init() failed\n"); goto fail; } err = orinoco_if_add(priv, 0, 0, NULL); if (err) { printk(KERN_ERR PFX "orinoco_if_add() failed\n"); goto fail; } pci_set_drvdata(pdev, priv); return 0; fail: free_irq(pdev->irq, priv); fail_irq: free_orinocodev(priv); fail_alloc: pci_iounmap(pdev, hermes_io); fail_map_hermes: pci_iounmap(pdev, attr_io); fail_map_attr: pci_iounmap(pdev, bridge_io); fail_map_bridge: pci_release_regions(pdev); fail_resources: pci_disable_device(pdev); return err; }
/** * mei_me_probe - Device Initialization Routine * * @pdev: PCI device structure * @ent: entry in kcs_pci_tbl * * Return: 0 on success, <0 on failure. */ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { const struct mei_cfg *cfg = (struct mei_cfg *)(ent->driver_data); struct mei_device *dev; struct mei_me_hw *hw; unsigned int irqflags; int err; if (!mei_me_quirk_probe(pdev, cfg)) return -ENODEV; /* enable pci dev */ err = pci_enable_device(pdev); if (err) { dev_err(&pdev->dev, "failed to enable pci device.\n"); goto end; } /* set PCI host mastering */ pci_set_master(pdev); /* pci request regions for mei driver */ err = pci_request_regions(pdev, KBUILD_MODNAME); if (err) { dev_err(&pdev->dev, "failed to get pci regions.\n"); goto disable_device; } if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) || dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); } if (err) { dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); goto release_regions; } /* allocates and initializes the mei dev structure */ dev = mei_me_dev_init(pdev, cfg); if (!dev) { err = -ENOMEM; goto release_regions; } hw = to_me_hw(dev); /* mapping IO device memory */ hw->mem_addr = pci_iomap(pdev, 0, 0); if (!hw->mem_addr) { dev_err(&pdev->dev, "mapping I/O device memory failure.\n"); err = -ENOMEM; goto free_device; } pci_enable_msi(pdev); /* request and enable interrupt */ irqflags = pci_dev_msi_enabled(pdev) ? IRQF_ONESHOT : IRQF_SHARED; err = request_threaded_irq(pdev->irq, mei_me_irq_quick_handler, mei_me_irq_thread_handler, irqflags, KBUILD_MODNAME, dev); if (err) { dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n", pdev->irq); goto disable_msi; } if (mei_start(dev)) { dev_err(&pdev->dev, "init hw failure.\n"); err = -ENODEV; goto release_irq; } pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_ME_RPM_TIMEOUT); pm_runtime_use_autosuspend(&pdev->dev); err = mei_register(dev, &pdev->dev); if (err) goto release_irq; pci_set_drvdata(pdev, dev); schedule_delayed_work(&dev->timer_work, HZ); /* * For not wake-able HW runtime pm framework * can't be used on pci device level. * Use domain runtime pm callbacks instead. */ if (!pci_dev_run_wake(pdev)) mei_me_set_pm_domain(dev); if (mei_pg_is_enabled(dev)) pm_runtime_put_noidle(&pdev->dev); dev_dbg(&pdev->dev, "initialization successful.\n"); return 0; release_irq: mei_cancel_work(dev); mei_disable_interrupts(dev); free_irq(pdev->irq, dev); disable_msi: pci_disable_msi(pdev); pci_iounmap(pdev, hw->mem_addr); free_device: kfree(dev); release_regions: pci_release_regions(pdev); disable_device: pci_disable_device(pdev); end: dev_err(&pdev->dev, "initialization failed.\n"); return err; }
// chip-specific constructor // (see "Management of Cards and Components") static int __devinit snd_vortex_create(struct snd_card *card, struct pci_dev *pci, vortex_t ** rchip) { vortex_t *chip; int err; static struct snd_device_ops ops = { .dev_free = snd_vortex_dev_free, }; *rchip = NULL; // check PCI availability (DMA). if ((err = pci_enable_device(pci)) < 0) return err; if (pci_set_dma_mask(pci, DMA_BIT_MASK(32)) < 0 || pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(32)) < 0) { printk(KERN_ERR "error to set DMA mask\n"); pci_disable_device(pci); return -ENXIO; } chip = kzalloc(sizeof(*chip), GFP_KERNEL); if (chip == NULL) { pci_disable_device(pci); return -ENOMEM; } chip->card = card; // initialize the stuff chip->pci_dev = pci; chip->io = pci_resource_start(pci, 0); chip->vendor = pci->vendor; chip->device = pci->device; chip->card = card; chip->irq = -1; // (1) PCI resource allocation // Get MMIO area // if ((err = pci_request_regions(pci, CARD_NAME_SHORT)) != 0) goto regions_out; chip->mmio = pci_ioremap_bar(pci, 0); if (!chip->mmio) { printk(KERN_ERR "MMIO area remap failed.\n"); err = -ENOMEM; goto ioremap_out; } /* Init audio core. * This must be done before we do request_irq otherwise we can get spurious * interrupts that we do not handle properly and make a mess of things */ if ((err = vortex_core_init(chip)) != 0) { printk(KERN_ERR "hw core init failed\n"); goto core_out; } if ((err = request_irq(pci->irq, vortex_interrupt, IRQF_SHARED, CARD_NAME_SHORT, chip)) != 0) { printk(KERN_ERR "cannot grab irq\n"); goto irq_out; } chip->irq = pci->irq; pci_set_master(pci); // End of PCI setup. // Register alsa root device. if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) { goto alloc_out; } snd_card_set_dev(card, &pci->dev); *rchip = chip; return 0; alloc_out: free_irq(chip->irq, chip); irq_out: vortex_core_shutdown(chip); core_out: iounmap(chip->mmio); ioremap_out: pci_release_regions(chip->pci_dev); regions_out: pci_disable_device(chip->pci_dev); //FIXME: this not the right place to unregister the gameport vortex_gameport_unregister(chip); kfree(chip); return err; }
static int __devinit vr_nor_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) { struct vr_nor_mtd *p = NULL; unsigned int exp_timing_cs0; int err; err = pci_enable_device(dev); if (err) goto out; err = pci_request_regions(dev, DRV_NAME); if (err) goto disable_dev; p = kzalloc(sizeof(*p), GFP_KERNEL); err = -ENOMEM; if (!p) goto release; p->dev = dev; err = vr_nor_init_maps(p); if (err) goto release; err = vr_nor_mtd_setup(p); if (err) goto destroy_maps; err = vr_nor_init_partitions(p); if (err) goto destroy_mtd_setup; pci_set_drvdata(dev, p); return 0; destroy_mtd_setup: map_destroy(p->info); destroy_maps: /* write-protect the flash bank */ exp_timing_cs0 = readl(p->csr_base + EXP_TIMING_CS0); exp_timing_cs0 &= ~TIMING_WR_EN; writel(exp_timing_cs0, p->csr_base + EXP_TIMING_CS0); /* unmap the flash window */ iounmap(p->map.virt); /* unmap the csr window */ iounmap(p->csr_base); release: kfree(p); pci_release_regions(dev); disable_dev: pci_disable_device(dev); out: return err; }