/** * zfcp_scsi_adapter_register - Register SCSI and FC host with SCSI midlayer * @adapter: The zfcp adapter to register with the SCSI midlayer */ int zfcp_scsi_adapter_register(struct zfcp_adapter *adapter) { struct ccw_dev_id dev_id; if (adapter->scsi_host) return 0; ccw_device_get_id(adapter->ccw_device, &dev_id); /* register adapter as SCSI host with mid layer of SCSI stack */ adapter->scsi_host = scsi_host_alloc(&zfcp_scsi_host_template, sizeof (struct zfcp_adapter *)); if (!adapter->scsi_host) { dev_err(&adapter->ccw_device->dev, "Registering the FCP device with the " "SCSI stack failed\n"); return -EIO; } /* tell the SCSI stack some characteristics of this adapter */ adapter->scsi_host->max_id = 511; adapter->scsi_host->max_lun = 0xFFFFFFFF; adapter->scsi_host->max_channel = 0; adapter->scsi_host->unique_id = dev_id.devno; adapter->scsi_host->max_cmd_len = 16; /* in struct fcp_cmnd */ adapter->scsi_host->transportt = zfcp_scsi_transport_template; adapter->scsi_host->hostdata[0] = (unsigned long) adapter; if (scsi_add_host(adapter->scsi_host, &adapter->ccw_device->dev)) { scsi_host_put(adapter->scsi_host); return -EIO; } return 0; }
static int __devinit a2091_probe(struct zorro_dev *z, const struct zorro_device_id *ent) { struct Scsi_Host *instance; int error; struct a2091_scsiregs *regs; wd33c93_regs wdregs; struct a2091_hostdata *hdata; if (!request_mem_region(z->resource.start, 256, "wd33c93")) return -EBUSY; instance = scsi_host_alloc(&a2091_scsi_template, sizeof(struct a2091_hostdata)); if (!instance) { error = -ENOMEM; goto fail_alloc; } instance->irq = IRQ_AMIGA_PORTS; instance->unique_id = z->slotaddr; regs = (struct a2091_scsiregs *)ZTWO_VADDR(z->resource.start); regs->DAWR = DAWR_A2091; wdregs.SASR = ®s->SASR; wdregs.SCMD = ®s->SCMD; hdata = shost_priv(instance); hdata->wh.no_sync = 0xff; hdata->wh.fast = 0; hdata->wh.dma_mode = CTRL_DMA; hdata->regs = regs; wd33c93_init(instance, wdregs, dma_setup, dma_stop, WD33C93_FS_8_10); error = request_irq(IRQ_AMIGA_PORTS, a2091_intr, IRQF_SHARED, "A2091 SCSI", instance); if (error) goto fail_irq; regs->CNTR = CNTR_PDMD | CNTR_INTEN; error = scsi_add_host(instance, NULL); if (error) goto fail_host; zorro_set_drvdata(z, instance); scsi_scan_host(instance); return 0; fail_host: free_irq(IRQ_AMIGA_PORTS, instance); fail_irq: scsi_host_put(instance); fail_alloc: release_mem_region(z->resource.start, 256); return error; }
static int __init mvme147_init(void) { wd33c93_regs regs; struct WD33C93_hostdata *hdata; int error = -ENOMEM; if (!MACH_IS_MVME147) return 0; mvme147_shost = scsi_host_alloc(&mvme147_host_template, sizeof(struct WD33C93_hostdata)); if (!mvme147_shost) goto err_out; mvme147_shost->base = 0xfffe4000; mvme147_shost->irq = MVME147_IRQ_SCSI_PORT; regs.SASR = (volatile unsigned char *)0xfffe4000; regs.SCMD = (volatile unsigned char *)0xfffe4001; hdata = shost_priv(mvme147_shost); hdata->no_sync = 0xff; hdata->fast = 0; hdata->dma_mode = CTRL_DMA; wd33c93_init(mvme147_shost, regs, dma_setup, dma_stop, WD33C93_FS_8_10); error = request_irq(MVME147_IRQ_SCSI_PORT, mvme147_intr, 0, "MVME147 SCSI PORT", mvme147_shost); if (error) goto err_unregister; error = request_irq(MVME147_IRQ_SCSI_DMA, mvme147_intr, 0, "MVME147 SCSI DMA", mvme147_shost); if (error) goto err_free_irq; #if 0 /* Disabled; causes problems booting */ m147_pcc->scsi_interrupt = 0x10; /* Assert SCSI bus reset */ udelay(100); m147_pcc->scsi_interrupt = 0x00; /* Negate SCSI bus reset */ udelay(2000); m147_pcc->scsi_interrupt = 0x40; /* Clear bus reset interrupt */ #endif m147_pcc->scsi_interrupt = 0x09; /* Enable interrupt */ m147_pcc->dma_cntrl = 0x00; /* ensure DMA is stopped */ m147_pcc->dma_intr = 0x89; /* Ack and enable ints */ error = scsi_add_host(mvme147_shost, NULL); if (error) goto err_free_irq; scsi_scan_host(mvme147_shost); return 0; err_free_irq: free_irq(MVME147_IRQ_SCSI_PORT, mvme147_shost); err_unregister: scsi_host_put(mvme147_shost); err_out: return error; }
static int dmx3191d_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) { struct Scsi_Host *shost; struct NCR5380_hostdata *hostdata; unsigned long io; int error = -ENODEV; if (pci_enable_device(pdev)) goto out; io = pci_resource_start(pdev, 0); if (!request_region(io, DMX3191D_REGION_LEN, DMX3191D_DRIVER_NAME)) { printk(KERN_ERR "dmx3191: region 0x%lx-0x%lx already reserved\n", io, io + DMX3191D_REGION_LEN); goto out_disable_device; } shost = scsi_host_alloc(&dmx3191d_driver_template, sizeof(struct NCR5380_hostdata)); if (!shost) goto out_release_region; hostdata = shost_priv(shost); hostdata->base = io; /* This card does not seem to raise an interrupt on pdev->irq. * Steam-powered SCSI controllers run without an IRQ anyway. */ shost->irq = NO_IRQ; error = NCR5380_init(shost, 0); if (error) goto out_host_put; NCR5380_maybe_reset_bus(shost); pci_set_drvdata(pdev, shost); error = scsi_add_host(shost, &pdev->dev); if (error) goto out_exit; scsi_scan_host(shost); return 0; out_exit: NCR5380_exit(shost); out_host_put: scsi_host_put(shost); out_release_region: release_region(io, DMX3191D_REGION_LEN); out_disable_device: pci_disable_device(pdev); out: return error; }
static int __devinit cumanascsi1_probe(struct expansion_card *ec, const struct ecard_id *id) { struct Scsi_Host *host; int ret = -ENOMEM; host = scsi_host_alloc(&cumanascsi_template, sizeof(struct NCR5380_hostdata)); if (!host) goto out; host->io_port = ecard_address(ec, ECARD_IOC, ECARD_SLOW) + 0x800; host->irq = ec->irq; NCR5380_init(host, 0); host->n_io_port = 255; if (!(request_region(host->io_port, host->n_io_port, "CumanaSCSI-1"))) { ret = -EBUSY; goto out_free; } ((struct NCR5380_hostdata *)host->hostdata)->ctrl = 0; outb(0x00, host->io_port - 577); ret = request_irq(host->irq, cumanascsi_intr, SA_INTERRUPT, "CumanaSCSI-1", host); if (ret) { printk("scsi%d: IRQ%d not free: %d\n", host->host_no, host->irq, ret); goto out_release; } printk("scsi%d: at port 0x%08lx irq %d", host->host_no, host->io_port, host->irq); printk(" options CAN_QUEUE=%d CMD_PER_LUN=%d release=%d", host->can_queue, host->cmd_per_lun, CUMANASCSI_PUBLIC_RELEASE); printk("\nscsi%d:", host->host_no); NCR5380_print_options(host); printk("\n"); ret = scsi_add_host(host, &ec->dev); if (ret) goto out_free_irq; scsi_scan_host(host); goto out; out_free_irq: free_irq(host->irq, host); out_release: release_region(host->io_port, host->n_io_port); out_free: scsi_host_put(host); out: return ret; }
static struct Scsi_Host *qlogic_detect(struct scsi_host_template *host, struct pcmcia_device *link, int qbase, int qlirq) { int qltyp; /* type of chip */ int qinitid; struct Scsi_Host *shost; /* registered host structure */ struct qlogicfas408_priv *priv; qltyp = qlogicfas408_get_chip_type(qbase, INT_TYPE); qinitid = host->this_id; if (qinitid < 0) qinitid = 7; /* if no ID, use 7 */ qlogicfas408_setup(qbase, qinitid, INT_TYPE); host->name = qlogic_name; shost = scsi_host_alloc(host, sizeof(struct qlogicfas408_priv)); if (!shost) goto err; shost->io_port = qbase; shost->n_io_port = 16; shost->dma_channel = -1; if (qlirq != -1) shost->irq = qlirq; priv = get_priv_by_host(shost); priv->qlirq = qlirq; priv->qbase = qbase; priv->qinitid = qinitid; priv->shost = shost; priv->int_type = INT_TYPE; if (request_irq(qlirq, qlogicfas408_ihandl, 0, qlogic_name, shost)) goto free_scsi_host; sprintf(priv->qinfo, "Qlogicfas Driver version 0.46, chip %02X at %03X, IRQ %d, TPdma:%d", qltyp, qbase, qlirq, QL_TURBO_PDMA); if (scsi_add_host(shost, NULL)) goto free_interrupt; scsi_scan_host(shost); return shost; free_interrupt: free_irq(qlirq, shost); free_scsi_host: scsi_host_put(shost); err: return NULL; }
static int __devinit dmx3191d_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) { struct Scsi_Host *shost; unsigned long io; int error = -ENODEV; if (pci_enable_device(pdev)) goto out; io = pci_resource_start(pdev, 0); if (!request_region(io, DMX3191D_REGION_LEN, DMX3191D_DRIVER_NAME)) { printk(KERN_ERR "dmx3191: region 0x%lx-0x%lx already reserved\n", io, io + DMX3191D_REGION_LEN); goto out_disable_device; } shost = scsi_host_alloc(&dmx3191d_driver_template, sizeof(struct NCR5380_hostdata)); if (!shost) goto out_release_region; shost->io_port = io; shost->irq = pdev->irq; NCR5380_init(shost, FLAG_NO_PSEUDO_DMA | FLAG_DTC3181E); if (request_irq(pdev->irq, NCR5380_intr, IRQF_SHARED, DMX3191D_DRIVER_NAME, shost)) { /* * Steam powered scsi controllers run without an IRQ anyway */ printk(KERN_WARNING "dmx3191: IRQ %d not available - " "switching to polled mode.\n", pdev->irq); shost->irq = SCSI_IRQ_NONE; } pci_set_drvdata(pdev, shost); error = scsi_add_host(shost, &pdev->dev); if (error) goto out_free_irq; scsi_scan_host(shost); return 0; out_free_irq: free_irq(shost->irq, shost); out_release_region: release_region(io, DMX3191D_REGION_LEN); out_disable_device: pci_disable_device(pdev); out: return error; }
static int cciss_scsi_detect(ctlr_info_t *h) { struct Scsi_Host *sh; int error; sh = scsi_host_alloc(&cciss_driver_template, sizeof(struct ctlr_info *)); if (sh == NULL) goto fail; sh->io_port = 0; // good enough? FIXME, sh->n_io_port = 0; // I do
static int __devinit oakscsi_probe(struct expansion_card *ec, const struct ecard_id *id) { struct Scsi_Host *host; int ret = -ENOMEM; ret = ecard_request_resources(ec); if (ret) goto out; host = scsi_host_alloc(&oakscsi_template, sizeof(struct NCR5380_hostdata)); if (!host) { ret = -ENOMEM; goto release; } priv(host)->base = ioremap(ecard_resource_start(ec, ECARD_RES_MEMC), ecard_resource_len(ec, ECARD_RES_MEMC)); if (!priv(host)->base) { ret = -ENOMEM; goto unreg; } host->irq = IRQ_NONE; host->n_io_port = 255; NCR5380_init(host, 0); printk("scsi%d: at port 0x%08lx irqs disabled", host->host_no, host->io_port); printk(" options CAN_QUEUE=%d CMD_PER_LUN=%d release=%d", host->can_queue, host->cmd_per_lun, OAKSCSI_PUBLIC_RELEASE); printk("\nscsi%d:", host->host_no); NCR5380_print_options(host); printk("\n"); ret = scsi_add_host(host, &ec->dev); if (ret) goto out_unmap; scsi_scan_host(host); goto out; out_unmap: iounmap(priv(host)->base); unreg: scsi_host_put(host); release: ecard_release_resources(ec); out: return ret; }
static int oakscsi_probe(struct expansion_card *ec, const struct ecard_id *id) { struct Scsi_Host *host; int ret = -ENOMEM; ret = ecard_request_resources(ec); if (ret) goto out; host = scsi_host_alloc(&oakscsi_template, sizeof(struct NCR5380_hostdata)); if (!host) { ret = -ENOMEM; goto release; } priv(host)->base = ioremap(ecard_resource_start(ec, ECARD_RES_MEMC), ecard_resource_len(ec, ECARD_RES_MEMC)); if (!priv(host)->base) { ret = -ENOMEM; goto unreg; } host->irq = NO_IRQ; host->n_io_port = 255; ret = NCR5380_init(host, 0); if (ret) goto out_unmap; NCR5380_maybe_reset_bus(host); ret = scsi_add_host(host, &ec->dev); if (ret) goto out_exit; scsi_scan_host(host); goto out; out_exit: NCR5380_exit(host); out_unmap: iounmap(priv(host)->base); unreg: scsi_host_put(host); release: ecard_release_resources(ec); out: return ret; }
/** * megaraid_io_attach - attach a device with the IO subsystem * @adapter : controller's soft state * * Attach this device with the IO subsystem. */ static int megaraid_io_attach(adapter_t *adapter) { struct Scsi_Host *host; // Initialize SCSI Host structure host = scsi_host_alloc(&megaraid_template_g, 8); if (!host) { con_log(CL_ANN, (KERN_WARNING "megaraid mbox: scsi_register failed\n")); return -1; } SCSIHOST2ADAP(host) = (caddr_t)adapter; adapter->host = host; host->irq = adapter->irq; host->unique_id = adapter->unique_id; host->can_queue = adapter->max_cmds; host->this_id = adapter->init_id; host->sg_tablesize = adapter->sglen; host->max_sectors = adapter->max_sectors; host->cmd_per_lun = adapter->cmd_per_lun; host->max_channel = adapter->max_channel; host->max_id = adapter->max_target; host->max_lun = adapter->max_lun; // notify mid-layer about the new controller if (scsi_add_host(host, &adapter->pdev->dev)) { con_log(CL_ANN, (KERN_WARNING "megaraid mbox: scsi_add_host failed\n")); scsi_host_put(host); return -1; } scsi_scan_host(host); return 0; }
static int tcm_loop_driver_probe(struct device *dev) { struct tcm_loop_hba *tl_hba; struct Scsi_Host *sh; int error, host_prot; tl_hba = to_tcm_loop_hba(dev); sh = scsi_host_alloc(&tcm_loop_driver_template, sizeof(struct tcm_loop_hba)); if (!sh) { pr_err("Unable to allocate struct scsi_host\n"); return -ENODEV; } tl_hba->sh = sh; /* * Assign the struct tcm_loop_hba pointer to struct Scsi_Host->hostdata */ *((struct tcm_loop_hba **)sh->hostdata) = tl_hba; /* * Setup single ID, Channel and LUN for now.. */ sh->max_id = 2; sh->max_lun = 0; sh->max_channel = 0; sh->max_cmd_len = TL_SCSI_MAX_CMD_LEN; host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION | SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION | SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION; scsi_host_set_prot(sh, host_prot); scsi_host_set_guard(sh, SHOST_DIX_GUARD_CRC); error = scsi_add_host(sh, &tl_hba->dev); if (error) { pr_err("%s: scsi_add_host failed\n", __func__); scsi_host_put(sh); return -ENODEV; } return 0; }
static int __devinit oakscsi_probe(struct expansion_card *ec, const struct ecard_id *id) { struct Scsi_Host *host; int ret = -ENOMEM; host = scsi_host_alloc(&oakscsi_template, sizeof(struct NCR5380_hostdata)); if (!host) goto out; host->io_port = ecard_address(ec, ECARD_MEMC, 0); host->irq = IRQ_NONE; host->n_io_port = 255; ret = -EBUSY; if (!request_region (host->io_port, host->n_io_port, "Oak SCSI")) goto unreg; NCR5380_init(host, 0); printk("scsi%d: at port 0x%08lx irqs disabled", host->host_no, host->io_port); printk(" options CAN_QUEUE=%d CMD_PER_LUN=%d release=%d", host->can_queue, host->cmd_per_lun, OAKSCSI_PUBLIC_RELEASE); printk("\nscsi%d:", host->host_no); NCR5380_print_options(host); printk("\n"); ret = scsi_add_host(host, &ec->dev); if (ret) goto out_release; scsi_scan_host(host); goto out; out_release: release_region(host->io_port, host->n_io_port); unreg: scsi_host_put(host); out: return ret; }
static int tcm_loop_driver_probe(struct device *dev) { struct tcm_loop_hba *tl_hba; struct Scsi_Host *sh; int error; tl_hba = to_tcm_loop_hba(dev); sh = scsi_host_alloc(&tcm_loop_driver_template, sizeof(struct tcm_loop_hba)); if (!sh) { printk(KERN_ERR "Unable to allocate struct scsi_host\n"); return -ENODEV; } tl_hba->sh = sh; /* * Assign the struct tcm_loop_hba pointer to struct Scsi_Host->hostdata */ *((struct tcm_loop_hba **)sh->hostdata) = tl_hba; /* * Setup single ID, Channel and LUN for now.. */ sh->max_id = 2; sh->max_lun = 0; sh->max_channel = 0; sh->max_cmd_len = TL_SCSI_MAX_CMD_LEN; error = scsi_add_host(sh, &tl_hba->dev); if (error) { printk(KERN_ERR "%s: scsi_add_host failed\n", __func__); scsi_host_put(sh); return -ENODEV; } return 0; }
static int __devinit esp_sun3x_probe(struct platform_device *dev) { struct scsi_host_template *tpnt = &scsi_esp_template; struct Scsi_Host *host; struct esp *esp; struct resource *res; int err = -ENOMEM; host = scsi_host_alloc(tpnt, sizeof(struct esp)); if (!host) goto fail; host->max_id = 8; esp = shost_priv(host); esp->host = host; esp->dev = dev; esp->ops = &sun3x_esp_ops; res = platform_get_resource(dev, IORESOURCE_MEM, 0); if (!res && !res->start) goto fail_unlink; esp->regs = ioremap_nocache(res->start, 0x20); if (!esp->regs) goto fail_unmap_regs; res = platform_get_resource(dev, IORESOURCE_MEM, 1); if (!res && !res->start) goto fail_unmap_regs; esp->dma_regs = ioremap_nocache(res->start, 0x10); esp->command_block = dma_alloc_coherent(esp->dev, 16, &esp->command_block_dma, GFP_KERNEL); if (!esp->command_block) goto fail_unmap_regs_dma; host->irq = platform_get_irq(dev, 0); err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "SUN3X ESP", esp); if (err < 0) goto fail_unmap_command_block; esp->scsi_id = 7; esp->host->this_id = esp->scsi_id; esp->scsi_id_mask = (1 << esp->scsi_id); esp->cfreq = 20000000; dev_set_drvdata(&dev->dev, esp); err = scsi_esp_register(esp, &dev->dev); if (err) goto fail_free_irq; return 0; fail_free_irq: free_irq(host->irq, esp); fail_unmap_command_block: dma_free_coherent(esp->dev, 16, esp->command_block, esp->command_block_dma); fail_unmap_regs_dma: iounmap(esp->dma_regs); fail_unmap_regs: iounmap(esp->regs); fail_unlink: scsi_host_put(host); fail: return err; }
static int rtsx_probe(struct pci_dev *pci, const struct pci_device_id *pci_id) { struct Scsi_Host *host; struct rtsx_dev *dev; int err = 0; struct task_struct *th; dev_dbg(&pci->dev, "Realtek PCI-E card reader detected\n"); err = pcim_enable_device(pci); if (err < 0) { dev_err(&pci->dev, "PCI enable device failed!\n"); return err; } err = pci_request_regions(pci, CR_DRIVER_NAME); if (err < 0) { dev_err(&pci->dev, "PCI request regions for %s failed!\n", CR_DRIVER_NAME); return err; } /* * Ask the SCSI layer to allocate a host structure, with extra * space at the end for our private rtsx_dev structure. */ host = scsi_host_alloc(&rtsx_host_template, sizeof(*dev)); if (!host) { dev_err(&pci->dev, "Unable to allocate the scsi host\n"); return -ENOMEM; } dev = host_to_rtsx(host); memset(dev, 0, sizeof(struct rtsx_dev)); dev->chip = kzalloc(sizeof(struct rtsx_chip), GFP_KERNEL); if (!dev->chip) { err = -ENOMEM; goto errout; } spin_lock_init(&dev->reg_lock); mutex_init(&(dev->dev_mutex)); init_completion(&dev->cmnd_ready); init_completion(&dev->control_exit); init_completion(&dev->polling_exit); init_completion(&(dev->notify)); init_completion(&dev->scanning_done); init_waitqueue_head(&dev->delay_wait); dev->pci = pci; dev->irq = -1; dev_info(&pci->dev, "Resource length: 0x%x\n", (unsigned int)pci_resource_len(pci, 0)); dev->addr = pci_resource_start(pci, 0); dev->remap_addr = ioremap_nocache(dev->addr, pci_resource_len(pci, 0)); if (!dev->remap_addr) { dev_err(&pci->dev, "ioremap error\n"); err = -ENXIO; goto errout; } /* * Using "unsigned long" cast here to eliminate gcc warning in * 64-bit system */ dev_info(&pci->dev, "Original address: 0x%lx, remapped address: 0x%lx\n", (unsigned long)(dev->addr), (unsigned long)(dev->remap_addr)); dev->rtsx_resv_buf = dmam_alloc_coherent(&pci->dev, RTSX_RESV_BUF_LEN, &dev->rtsx_resv_buf_addr, GFP_KERNEL); if (!dev->rtsx_resv_buf) { dev_err(&pci->dev, "alloc dma buffer fail\n"); err = -ENXIO; goto errout; } dev->chip->host_cmds_ptr = dev->rtsx_resv_buf; dev->chip->host_cmds_addr = dev->rtsx_resv_buf_addr; dev->chip->host_sg_tbl_ptr = dev->rtsx_resv_buf + HOST_CMDS_BUF_LEN; dev->chip->host_sg_tbl_addr = dev->rtsx_resv_buf_addr + HOST_CMDS_BUF_LEN; dev->chip->rtsx = dev; rtsx_init_options(dev->chip); dev_info(&pci->dev, "pci->irq = %d\n", pci->irq); if (dev->chip->msi_en) { if (pci_enable_msi(pci) < 0) dev->chip->msi_en = 0; } if (rtsx_acquire_irq(dev) < 0) { err = -EBUSY; goto errout; } pci_set_master(pci); synchronize_irq(dev->irq); rtsx_init_chip(dev->chip); /* * set the supported max_lun and max_id for the scsi host * NOTE: the minimal value of max_id is 1 */ host->max_id = 1; host->max_lun = dev->chip->max_lun; /* Start up our control thread */ th = kthread_run(rtsx_control_thread, dev, CR_DRIVER_NAME); if (IS_ERR(th)) { dev_err(&pci->dev, "Unable to start control thread\n"); err = PTR_ERR(th); goto errout; } dev->ctl_thread = th; err = scsi_add_host(host, &pci->dev); if (err) { dev_err(&pci->dev, "Unable to add the scsi host\n"); goto errout; } /* Start up the thread for delayed SCSI-device scanning */ th = kthread_run(rtsx_scan_thread, dev, "rtsx-scan"); if (IS_ERR(th)) { dev_err(&pci->dev, "Unable to start the device-scanning thread\n"); complete(&dev->scanning_done); quiesce_and_remove_host(dev); err = PTR_ERR(th); goto errout; } /* Start up the thread for polling thread */ th = kthread_run(rtsx_polling_thread, dev, "rtsx-polling"); if (IS_ERR(th)) { dev_err(&pci->dev, "Unable to start the device-polling thread\n"); quiesce_and_remove_host(dev); err = PTR_ERR(th); goto errout; } dev->polling_thread = th; pci_set_drvdata(pci, dev); return 0; /* We come here if there are any problems */ errout: dev_err(&pci->dev, "rtsx_probe() failed\n"); release_everything(dev); return err; }
static int __devinit cumanascsi1_probe(struct expansion_card *ec, const struct ecard_id *id) { struct Scsi_Host *host; int ret; ret = ecard_request_resources(ec); if (ret) goto out; host = scsi_host_alloc(&cumanascsi_template, sizeof(struct NCR5380_hostdata)); if (!host) { ret = -ENOMEM; goto out_release; } priv(host)->base = ioremap(ecard_resource_start(ec, ECARD_RES_IOCSLOW), ecard_resource_len(ec, ECARD_RES_IOCSLOW)); priv(host)->dma = ioremap(ecard_resource_start(ec, ECARD_RES_MEMC), ecard_resource_len(ec, ECARD_RES_MEMC)); if (!priv(host)->base || !priv(host)->dma) { ret = -ENOMEM; goto out_unmap; } host->irq = ec->irq; NCR5380_init(host, 0); priv(host)->ctrl = 0; writeb(0, priv(host)->base + CTRL); host->n_io_port = 255; if (!(request_region(host->io_port, host->n_io_port, "CumanaSCSI-1"))) { ret = -EBUSY; goto out_unmap; } ret = request_irq(host->irq, cumanascsi_intr, IRQF_DISABLED, "CumanaSCSI-1", host); if (ret) { printk("scsi%d: IRQ%d not free: %d\n", host->host_no, host->irq, ret); goto out_unmap; } printk("scsi%d: at port 0x%08lx irq %d", host->host_no, host->io_port, host->irq); printk(" options CAN_QUEUE=%d CMD_PER_LUN=%d release=%d", host->can_queue, host->cmd_per_lun, CUMANASCSI_PUBLIC_RELEASE); printk("\nscsi%d:", host->host_no); NCR5380_print_options(host); printk("\n"); ret = scsi_add_host(host, &ec->dev); if (ret) goto out_free_irq; scsi_scan_host(host); goto out; out_free_irq: free_irq(host->irq, host); out_unmap: iounmap(priv(host)->base); iounmap(priv(host)->dma); scsi_host_put(host); out_release: ecard_release_resources(ec); out: return ret; }
static int __devinit powertecscsi_probe(struct expansion_card *ec, const struct ecard_id *id) { struct Scsi_Host *host; struct powertec_info *info; unsigned long resbase, reslen; void __iomem *base; int ret; ret = ecard_request_resources(ec); if (ret) goto out; resbase = ecard_resource_start(ec, ECARD_RES_IOCFAST); reslen = ecard_resource_len(ec, ECARD_RES_IOCFAST); base = ioremap(resbase, reslen); if (!base) { ret = -ENOMEM; goto out_region; } host = scsi_host_alloc(&powertecscsi_template, sizeof (struct powertec_info)); if (!host) { ret = -ENOMEM; goto out_unmap; } ecard_set_drvdata(ec, host); info = (struct powertec_info *)host->hostdata; info->base = base; powertecscsi_terminator_ctl(host, term[ec->slot_no]); info->info.scsi.io_base = base + POWERTEC_FAS216_OFFSET; info->info.scsi.io_shift = POWERTEC_FAS216_SHIFT; info->info.scsi.irq = ec->irq; info->info.scsi.dma = ec->dma; info->info.ifcfg.clockrate = 40; /* MHz */ info->info.ifcfg.select_timeout = 255; info->info.ifcfg.asyncperiod = 200; /* ns */ info->info.ifcfg.sync_max_depth = 7; info->info.ifcfg.cntl3 = CNTL3_BS8 | CNTL3_FASTSCSI | CNTL3_FASTCLK; info->info.ifcfg.disconnect_ok = 1; info->info.ifcfg.wide_max_size = 0; info->info.ifcfg.capabilities = 0; info->info.dma.setup = powertecscsi_dma_setup; info->info.dma.pseudo = NULL; info->info.dma.stop = powertecscsi_dma_stop; ec->irqaddr = base + POWERTEC_INTR_STATUS; ec->irqmask = POWERTEC_INTR_BIT; ec->irq_data = info; ec->ops = &powertecscsi_ops; device_create_file(&ec->dev, &dev_attr_bus_term); ret = fas216_init(host); if (ret) goto out_free; ret = request_irq(ec->irq, powertecscsi_intr, SA_INTERRUPT, "powertec", info); if (ret) { printk("scsi%d: IRQ%d not free: %d\n", host->host_no, ec->irq, ret); goto out_release; } if (info->info.scsi.dma != NO_DMA) { if (request_dma(info->info.scsi.dma, "powertec")) { printk("scsi%d: DMA%d not free, using PIO\n", host->host_no, info->info.scsi.dma); info->info.scsi.dma = NO_DMA; } else { set_dma_speed(info->info.scsi.dma, 180); info->info.ifcfg.capabilities |= FASCAP_DMA; } } ret = fas216_add(host, &ec->dev); if (ret == 0) goto out; if (info->info.scsi.dma != NO_DMA) free_dma(info->info.scsi.dma); free_irq(ec->irq, host); out_release: fas216_release(host); out_free: device_remove_file(&ec->dev, &dev_attr_bus_term); scsi_host_put(host); out_unmap: iounmap(base); out_region: ecard_release_resources(ec); out: return ret; }
static int mts_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { int i; int ep_out = -1; int ep_in_set[3]; /* this will break if we have more than three endpoints which is why we check */ int *ep_in_current = ep_in_set; int err_retval = -ENOMEM; struct mts_desc * new_desc; struct vendor_product const* p; struct usb_device *dev = interface_to_usbdev (intf); /* the current altsetting on the interface we're probing */ struct usb_host_interface *altsetting; MTS_DEBUG_GOT_HERE(); MTS_DEBUG( "usb-device descriptor at %x\n", (int)dev ); MTS_DEBUG( "product id = 0x%x, vendor id = 0x%x\n", le16_to_cpu(dev->descriptor.idProduct), le16_to_cpu(dev->descriptor.idVendor) ); MTS_DEBUG_GOT_HERE(); p = &mts_supported_products[id - mts_usb_ids]; MTS_DEBUG_GOT_HERE(); MTS_DEBUG( "found model %s\n", p->name ); if ( p->support_status != mts_sup_full ) MTS_MESSAGE( "model %s is not known to be fully supported, reports welcome!\n", p->name ); /* the current altsetting on the interface we're probing */ altsetting = intf->cur_altsetting; /* Check if the config is sane */ if ( altsetting->desc.bNumEndpoints != MTS_EP_TOTAL ) { MTS_WARNING( "expecting %d got %d endpoints! Bailing out.\n", (int)MTS_EP_TOTAL, (int)altsetting->desc.bNumEndpoints ); return -ENODEV; } for( i = 0; i < altsetting->desc.bNumEndpoints; i++ ) { if ((altsetting->endpoint[i].desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) != USB_ENDPOINT_XFER_BULK) { MTS_WARNING( "can only deal with bulk endpoints; endpoint %d is not bulk.\n", (int)altsetting->endpoint[i].desc.bEndpointAddress ); } else { if (altsetting->endpoint[i].desc.bEndpointAddress & USB_DIR_IN) *ep_in_current++ = altsetting->endpoint[i].desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; else { if ( ep_out != -1 ) { MTS_WARNING( "can only deal with one output endpoints. Bailing out." ); return -ENODEV; } ep_out = altsetting->endpoint[i].desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; } } } if ( ep_out == -1 ) { MTS_WARNING( "couldn't find an output bulk endpoint. Bailing out.\n" ); return -ENODEV; } new_desc = kzalloc(sizeof(struct mts_desc), GFP_KERNEL); if (!new_desc) goto out; new_desc->urb = usb_alloc_urb(0, GFP_KERNEL); if (!new_desc->urb) goto out_kfree; new_desc->context.scsi_status = kmalloc(1, GFP_KERNEL); if (!new_desc->context.scsi_status) goto out_kfree2; new_desc->usb_dev = dev; new_desc->usb_intf = intf; init_MUTEX(&new_desc->lock); /* endpoints */ new_desc->ep_out = ep_out; new_desc->ep_response = ep_in_set[0]; new_desc->ep_image = ep_in_set[1]; if ( new_desc->ep_out != MTS_EP_OUT ) MTS_WARNING( "will this work? Command EP is not usually %d\n", (int)new_desc->ep_out ); if ( new_desc->ep_response != MTS_EP_RESPONSE ) MTS_WARNING( "will this work? Response EP is not usually %d\n", (int)new_desc->ep_response ); if ( new_desc->ep_image != MTS_EP_IMAGE ) MTS_WARNING( "will this work? Image data EP is not usually %d\n", (int)new_desc->ep_image ); new_desc->host = scsi_host_alloc(&mts_scsi_host_template, sizeof(new_desc)); if (!new_desc->host) goto out_free_urb; new_desc->host->hostdata[0] = (unsigned long)new_desc; if (scsi_add_host(new_desc->host, NULL)) { err_retval = -EIO; goto out_free_urb; } scsi_scan_host(new_desc->host); usb_set_intfdata(intf, new_desc); return 0; out_kfree2: kfree(new_desc->context.scsi_status); out_free_urb: usb_free_urb(new_desc->urb); out_kfree: kfree(new_desc); out: return err_retval; }
static int esas2r_probe(struct pci_dev *pcid, const struct pci_device_id *id) { struct Scsi_Host *host = NULL; struct esas2r_adapter *a; int err; size_t host_alloc_size = sizeof(struct esas2r_adapter) + ((num_requests) + 1) * sizeof(struct esas2r_request); esas2r_log_dev(ESAS2R_LOG_DEBG, &(pcid->dev), "esas2r_probe() 0x%02x 0x%02x 0x%02x 0x%02x", pcid->vendor, pcid->device, pcid->subsystem_vendor, pcid->subsystem_device); esas2r_log_dev(ESAS2R_LOG_INFO, &(pcid->dev), "before pci_enable_device() " "enable_cnt: %d", pcid->enable_cnt.counter); err = pci_enable_device(pcid); if (err != 0) { esas2r_log_dev(ESAS2R_LOG_CRIT, &(pcid->dev), "pci_enable_device() FAIL (%d)", err); return -ENODEV; } esas2r_log_dev(ESAS2R_LOG_INFO, &(pcid->dev), "pci_enable_device() OK"); esas2r_log_dev(ESAS2R_LOG_INFO, &(pcid->dev), "after pci_enable_device() enable_cnt: %d", pcid->enable_cnt.counter); host = scsi_host_alloc(&driver_template, host_alloc_size); if (host == NULL) { esas2r_log(ESAS2R_LOG_CRIT, "scsi_host_alloc() FAIL"); return -ENODEV; } memset(host->hostdata, 0, host_alloc_size); a = (struct esas2r_adapter *)host->hostdata; esas2r_log(ESAS2R_LOG_INFO, "scsi_host_alloc() OK host: %p", host); /* override max LUN and max target id */ host->max_id = ESAS2R_MAX_ID + 1; host->max_lun = 255; /* we can handle 16-byte CDbs */ host->max_cmd_len = 16; host->can_queue = can_queue; host->cmd_per_lun = cmd_per_lun; host->this_id = host->max_id + 1; host->max_channel = 0; host->unique_id = found_adapters; host->sg_tablesize = sg_tablesize; host->max_sectors = esas2r_max_sectors; /* set to bus master for BIOses that don't do it for us */ esas2r_log(ESAS2R_LOG_INFO, "pci_set_master() called"); pci_set_master(pcid); if (!esas2r_init_adapter(host, pcid, found_adapters)) { esas2r_log(ESAS2R_LOG_CRIT, "unable to initialize device at PCI bus %x:%x", pcid->bus->number, pcid->devfn); esas2r_log_dev(ESAS2R_LOG_INFO, &(host->shost_gendev), "scsi_host_put() called"); scsi_host_put(host); return 0; } esas2r_log(ESAS2R_LOG_INFO, "pci_set_drvdata(%p, %p) called", pcid, host->hostdata); pci_set_drvdata(pcid, host); esas2r_log(ESAS2R_LOG_INFO, "scsi_add_host() called"); err = scsi_add_host(host, &pcid->dev); if (err) { esas2r_log(ESAS2R_LOG_CRIT, "scsi_add_host returned %d", err); esas2r_log_dev(ESAS2R_LOG_CRIT, &(host->shost_gendev), "scsi_add_host() FAIL"); esas2r_log_dev(ESAS2R_LOG_INFO, &(host->shost_gendev), "scsi_host_put() called"); scsi_host_put(host); esas2r_log_dev(ESAS2R_LOG_INFO, &(host->shost_gendev), "pci_set_drvdata(%p, NULL) called", pcid); pci_set_drvdata(pcid, NULL); return -ENODEV; } esas2r_fw_event_on(a); esas2r_log_dev(ESAS2R_LOG_INFO, &(host->shost_gendev), "scsi_scan_host() called"); scsi_scan_host(host); /* Add sysfs binary files */ if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_fw)) esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev), "Failed to create sysfs binary file: fw"); else a->sysfs_fw_created = 1; if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_fs)) esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev), "Failed to create sysfs binary file: fs"); else a->sysfs_fs_created = 1; if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_vda)) esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev), "Failed to create sysfs binary file: vda"); else a->sysfs_vda_created = 1; if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_hw)) esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev), "Failed to create sysfs binary file: hw"); else a->sysfs_hw_created = 1; if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_live_nvram)) esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev), "Failed to create sysfs binary file: live_nvram"); else a->sysfs_live_nvram_created = 1; if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_default_nvram)) esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev), "Failed to create sysfs binary file: default_nvram"); else a->sysfs_default_nvram_created = 1; found_adapters++; return 0; }
static struct Scsi_Host *__qlogicfas_detect(struct scsi_host_template *host, int qbase, int qlirq) { int qltyp; int qinitid; struct Scsi_Host *hreg; struct qlogicfas408_priv *priv; if (!qbase || qlirq == -1) goto err; if (!request_region(qbase, 0x10, qlogicfas_name)) { printk(KERN_INFO "%s: address %#x is busy\n", qlogicfas_name, qbase); goto err; } if (!qlogicfas408_detect(qbase, INT_TYPE)) { printk(KERN_WARNING "%s: probe failed for %#x\n", qlogicfas_name, qbase); goto err_release_mem; } printk(KERN_INFO "%s: Using preset base address of %03x," " IRQ %d\n", qlogicfas_name, qbase, qlirq); qltyp = qlogicfas408_get_chip_type(qbase, INT_TYPE); qinitid = host->this_id; if (qinitid < 0) qinitid = 7; qlogicfas408_setup(qbase, qinitid, INT_TYPE); hreg = scsi_host_alloc(host, sizeof(struct qlogicfas408_priv)); if (!hreg) goto err_release_mem; priv = get_priv_by_host(hreg); hreg->io_port = qbase; hreg->n_io_port = 16; hreg->dma_channel = -1; if (qlirq != -1) hreg->irq = qlirq; priv->qbase = qbase; priv->qlirq = qlirq; priv->qinitid = qinitid; priv->shost = hreg; priv->int_type = INT_TYPE; sprintf(priv->qinfo, "Qlogicfas Driver version 0.46, chip %02X at %03X, IRQ %d, TPdma:%d", qltyp, qbase, qlirq, QL_TURBO_PDMA); host->name = qlogicfas_name; if (request_irq(qlirq, qlogicfas408_ihandl, 0, qlogicfas_name, hreg)) goto free_scsi_host; if (scsi_add_host(hreg, NULL)) goto free_interrupt; scsi_scan_host(hreg); return hreg; free_interrupt: free_irq(qlirq, hreg); free_scsi_host: scsi_host_put(hreg); err_release_mem: release_region(qbase, 0x10); err: return NULL; }
static struct Scsi_Host *__qlogicfas_detect(struct scsi_host_template *host, int qbase, int qlirq) { int qltyp; /* type of chip */ int qinitid; struct Scsi_Host *hreg; /* registered host structure */ struct qlogicfas408_priv *priv; /* Qlogic Cards only exist at 0x230 or 0x330 (the chip itself * decodes the address - I check 230 first since MIDI cards are * typically at 0x330 * * Theoretically, two Qlogic cards can coexist in the same system. * This should work by simply using this as a loadable module for * the second card, but I haven't tested this. */ if (!qbase || qlirq == -1) goto err; if (!request_region(qbase, 0x10, qlogicfas_name)) { printk(KERN_INFO "%s: address %#x is busy\n", qlogicfas_name, qbase); goto err; } if (!qlogicfas408_detect(qbase, INT_TYPE)) { printk(KERN_WARNING "%s: probe failed for %#x\n", qlogicfas_name, qbase); goto err_release_mem; } printk(KERN_INFO "%s: Using preset base address of %03x," " IRQ %d\n", qlogicfas_name, qbase, qlirq); qltyp = qlogicfas408_get_chip_type(qbase, INT_TYPE); qinitid = host->this_id; if (qinitid < 0) qinitid = 7; /* if no ID, use 7 */ qlogicfas408_setup(qbase, qinitid, INT_TYPE); hreg = scsi_host_alloc(host, sizeof(struct qlogicfas408_priv)); if (!hreg) goto err_release_mem; priv = get_priv_by_host(hreg); hreg->io_port = qbase; hreg->n_io_port = 16; hreg->dma_channel = -1; if (qlirq != -1) hreg->irq = qlirq; priv->qbase = qbase; priv->qlirq = qlirq; priv->qinitid = qinitid; priv->shost = hreg; priv->int_type = INT_TYPE; sprintf(priv->qinfo, "Qlogicfas Driver version 0.46, chip %02X at %03X, IRQ %d, TPdma:%d", qltyp, qbase, qlirq, QL_TURBO_PDMA); host->name = qlogicfas_name; if (request_irq(qlirq, qlogicfas408_ihandl, 0, qlogicfas_name, hreg)) goto free_scsi_host; if (scsi_add_host(hreg, NULL)) goto free_interrupt; scsi_scan_host(hreg); return hreg; free_interrupt: free_irq(qlirq, hreg); free_scsi_host: scsi_host_put(hreg); err_release_mem: release_region(qbase, 0x10); err: return NULL; }
static void zfcp_scsi_forget_cmnds(struct zfcp_scsi_dev *zsdev, u8 tm_flags) { struct zfcp_adapter *adapter = zsdev->port->adapter; struct zfcp_scsi_req_filter filter = { .tmf_scope = FCP_TMF_TGT_RESET, .port_handle = zsdev->port->handle, }; unsigned long flags; if (tm_flags == FCP_TMF_LUN_RESET) { filter.tmf_scope = FCP_TMF_LUN_RESET; filter.lun_handle = zsdev->lun_handle; } /* * abort_lock secures against other processings - in the abort-function * and normal cmnd-handler - of (struct zfcp_fsf_req *)->data */ write_lock_irqsave(&adapter->abort_lock, flags); zfcp_reqlist_apply_for_all(adapter->req_list, zfcp_scsi_forget_cmnd, &filter); write_unlock_irqrestore(&adapter->abort_lock, flags); } static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags) { struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device); struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; struct zfcp_fsf_req *fsf_req = NULL; int retval = SUCCESS, ret; int retry = 3; while (retry--) { fsf_req = zfcp_fsf_fcp_task_mgmt(scpnt, tm_flags); if (fsf_req) break; zfcp_erp_wait(adapter); ret = fc_block_scsi_eh(scpnt); if (ret) return ret; if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_RUNNING)) { zfcp_dbf_scsi_devreset("nres", scpnt, tm_flags); return SUCCESS; } } if (!fsf_req) return FAILED; wait_for_completion(&fsf_req->completion); if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) { zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags); retval = FAILED; } else { zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags); zfcp_scsi_forget_cmnds(zfcp_sdev, tm_flags); } zfcp_fsf_req_free(fsf_req); return retval; } static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt) { return zfcp_task_mgmt_function(scpnt, FCP_TMF_LUN_RESET); } static int zfcp_scsi_eh_target_reset_handler(struct scsi_cmnd *scpnt) { return zfcp_task_mgmt_function(scpnt, FCP_TMF_TGT_RESET); } static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) { struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device); struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; int ret; zfcp_erp_adapter_reopen(adapter, 0, "schrh_1"); zfcp_erp_wait(adapter); ret = fc_block_scsi_eh(scpnt); if (ret) return ret; return SUCCESS; } struct scsi_transport_template *zfcp_scsi_transport_template; static struct scsi_host_template zfcp_scsi_host_template = { .module = THIS_MODULE, .name = "zfcp", .queuecommand = zfcp_scsi_queuecommand, .eh_abort_handler = zfcp_scsi_eh_abort_handler, .eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler, .eh_target_reset_handler = zfcp_scsi_eh_target_reset_handler, .eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler, .slave_alloc = zfcp_scsi_slave_alloc, .slave_configure = zfcp_scsi_slave_configure, .slave_destroy = zfcp_scsi_slave_destroy, .change_queue_depth = scsi_change_queue_depth, .proc_name = "zfcp", .can_queue = 4096, .this_id = -1, .sg_tablesize = (((QDIO_MAX_ELEMENTS_PER_BUFFER - 1) * ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2), /* GCD, adjusted later */ .max_sectors = (((QDIO_MAX_ELEMENTS_PER_BUFFER - 1) * ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2) * 8, /* GCD, adjusted later */ .dma_boundary = ZFCP_QDIO_SBALE_LEN - 1, .use_clustering = 1, .shost_attrs = zfcp_sysfs_shost_attrs, .sdev_attrs = zfcp_sysfs_sdev_attrs, .track_queue_depth = 1, }; /** * zfcp_scsi_adapter_register - Register SCSI and FC host with SCSI midlayer * @adapter: The zfcp adapter to register with the SCSI midlayer */ int zfcp_scsi_adapter_register(struct zfcp_adapter *adapter) { struct ccw_dev_id dev_id; if (adapter->scsi_host) return 0; ccw_device_get_id(adapter->ccw_device, &dev_id); /* register adapter as SCSI host with mid layer of SCSI stack */ adapter->scsi_host = scsi_host_alloc(&zfcp_scsi_host_template, sizeof (struct zfcp_adapter *)); if (!adapter->scsi_host) { dev_err(&adapter->ccw_device->dev, "Registering the FCP device with the " "SCSI stack failed\n"); return -EIO; } /* tell the SCSI stack some characteristics of this adapter */ adapter->scsi_host->max_id = 511; adapter->scsi_host->max_lun = 0xFFFFFFFF; adapter->scsi_host->max_channel = 0; adapter->scsi_host->unique_id = dev_id.devno; adapter->scsi_host->max_cmd_len = 16; /* in struct fcp_cmnd */ adapter->scsi_host->transportt = zfcp_scsi_transport_template; adapter->scsi_host->hostdata[0] = (unsigned long) adapter; if (scsi_add_host(adapter->scsi_host, &adapter->ccw_device->dev)) { scsi_host_put(adapter->scsi_host); return -EIO; } return 0; } /** * zfcp_scsi_adapter_unregister - Unregister SCSI and FC host from SCSI midlayer * @adapter: The zfcp adapter to unregister. */ void zfcp_scsi_adapter_unregister(struct zfcp_adapter *adapter) { struct Scsi_Host *shost; struct zfcp_port *port; shost = adapter->scsi_host; if (!shost) return; read_lock_irq(&adapter->port_list_lock); list_for_each_entry(port, &adapter->port_list, list) port->rport = NULL; read_unlock_irq(&adapter->port_list_lock); fc_remove_host(shost); scsi_remove_host(shost); scsi_host_put(shost); adapter->scsi_host = NULL; } static struct fc_host_statistics* zfcp_init_fc_host_stats(struct zfcp_adapter *adapter) { struct fc_host_statistics *fc_stats; if (!adapter->fc_stats) { fc_stats = kmalloc(sizeof(*fc_stats), GFP_KERNEL); if (!fc_stats) return NULL; adapter->fc_stats = fc_stats; /* freed in adapter_release */ } memset(adapter->fc_stats, 0, sizeof(*adapter->fc_stats)); return adapter->fc_stats; } static void zfcp_adjust_fc_host_stats(struct fc_host_statistics *fc_stats, struct fsf_qtcb_bottom_port *data, struct fsf_qtcb_bottom_port *old) { fc_stats->seconds_since_last_reset = data->seconds_since_last_reset - old->seconds_since_last_reset; fc_stats->tx_frames = data->tx_frames - old->tx_frames; fc_stats->tx_words = data->tx_words - old->tx_words; fc_stats->rx_frames = data->rx_frames - old->rx_frames; fc_stats->rx_words = data->rx_words - old->rx_words; fc_stats->lip_count = data->lip - old->lip; fc_stats->nos_count = data->nos - old->nos; fc_stats->error_frames = data->error_frames - old->error_frames; fc_stats->dumped_frames = data->dumped_frames - old->dumped_frames; fc_stats->link_failure_count = data->link_failure - old->link_failure; fc_stats->loss_of_sync_count = data->loss_of_sync - old->loss_of_sync; fc_stats->loss_of_signal_count = data->loss_of_signal - old->loss_of_signal; fc_stats->prim_seq_protocol_err_count = data->psp_error_counts - old->psp_error_counts; fc_stats->invalid_tx_word_count = data->invalid_tx_words - old->invalid_tx_words; fc_stats->invalid_crc_count = data->invalid_crcs - old->invalid_crcs; fc_stats->fcp_input_requests = data->input_requests - old->input_requests; fc_stats->fcp_output_requests = data->output_requests - old->output_requests; fc_stats->fcp_control_requests = data->control_requests - old->control_requests; fc_stats->fcp_input_megabytes = data->input_mb - old->input_mb; fc_stats->fcp_output_megabytes = data->output_mb - old->output_mb; } static void zfcp_set_fc_host_stats(struct fc_host_statistics *fc_stats, struct fsf_qtcb_bottom_port *data) { fc_stats->seconds_since_last_reset = data->seconds_since_last_reset; fc_stats->tx_frames = data->tx_frames; fc_stats->tx_words = data->tx_words; fc_stats->rx_frames = data->rx_frames; fc_stats->rx_words = data->rx_words; fc_stats->lip_count = data->lip; fc_stats->nos_count = data->nos; fc_stats->error_frames = data->error_frames; fc_stats->dumped_frames = data->dumped_frames; fc_stats->link_failure_count = data->link_failure; fc_stats->loss_of_sync_count = data->loss_of_sync; fc_stats->loss_of_signal_count = data->loss_of_signal; fc_stats->prim_seq_protocol_err_count = data->psp_error_counts; fc_stats->invalid_tx_word_count = data->invalid_tx_words; fc_stats->invalid_crc_count = data->invalid_crcs; fc_stats->fcp_input_requests = data->input_requests; fc_stats->fcp_output_requests = data->output_requests; fc_stats->fcp_control_requests = data->control_requests; fc_stats->fcp_input_megabytes = data->input_mb; fc_stats->fcp_output_megabytes = data->output_mb; }
static void scsifront_free(struct vscsifrnt_info *info) { struct Scsi_Host *host = info->host; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14) if (host->shost_state != SHOST_DEL) { #else if (!test_bit(SHOST_DEL, &host->shost_state)) { #endif scsi_remove_host(info->host); } if (info->ring_ref != GRANT_INVALID_REF) { gnttab_end_foreign_access(info->ring_ref, (unsigned long)info->ring.sring); info->ring_ref = GRANT_INVALID_REF; info->ring.sring = NULL; } if (info->irq) unbind_from_irqhandler(info->irq, info); info->irq = 0; scsi_host_put(info->host); } static int scsifront_alloc_ring(struct vscsifrnt_info *info) { struct xenbus_device *dev = info->dev; struct vscsiif_sring *sring; int err = -ENOMEM; info->ring_ref = GRANT_INVALID_REF; /***** Frontend to Backend ring start *****/ sring = (struct vscsiif_sring *) __get_free_page(GFP_KERNEL); if (!sring) { xenbus_dev_fatal(dev, err, "fail to allocate shared ring (Front to Back)"); return err; } SHARED_RING_INIT(sring); FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE); err = xenbus_grant_ring(dev, virt_to_mfn(sring)); if (err < 0) { free_page((unsigned long) sring); info->ring.sring = NULL; xenbus_dev_fatal(dev, err, "fail to grant shared ring (Front to Back)"); goto free_sring; } info->ring_ref = err; err = bind_listening_port_to_irqhandler( dev->otherend_id, scsifront_intr, SA_SAMPLE_RANDOM, "scsifront", info); if (err <= 0) { xenbus_dev_fatal(dev, err, "bind_listening_port_to_irqhandler"); goto free_sring; } info->irq = err; return 0; /* free resource */ free_sring: scsifront_free(info); return err; } static int scsifront_init_ring(struct vscsifrnt_info *info) { struct xenbus_device *dev = info->dev; struct xenbus_transaction xbt; int err; DPRINTK("%s\n",__FUNCTION__); err = scsifront_alloc_ring(info); if (err) return err; DPRINTK("%u %u\n", info->ring_ref, info->evtchn); again: err = xenbus_transaction_start(&xbt); if (err) { xenbus_dev_fatal(dev, err, "starting transaction"); } err = xenbus_printf(xbt, dev->nodename, "ring-ref", "%u", info->ring_ref); if (err) { xenbus_dev_fatal(dev, err, "%s", "writing ring-ref"); goto fail; } err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u", irq_to_evtchn_port(info->irq)); if (err) { xenbus_dev_fatal(dev, err, "%s", "writing event-channel"); goto fail; } err = xenbus_transaction_end(xbt, 0); if (err) { if (err == -EAGAIN) goto again; xenbus_dev_fatal(dev, err, "completing transaction"); goto free_sring; } return 0; fail: xenbus_transaction_end(xbt, 1); free_sring: /* free resource */ scsifront_free(info); return err; } static int scsifront_probe(struct xenbus_device *dev, const struct xenbus_device_id *id) { struct vscsifrnt_info *info; struct Scsi_Host *host; int i, err = -ENOMEM; char name[DEFAULT_TASK_COMM_LEN]; host = scsi_host_alloc(&scsifront_sht, sizeof(*info)); if (!host) { xenbus_dev_fatal(dev, err, "fail to allocate scsi host"); return err; } info = (struct vscsifrnt_info *) host->hostdata; info->host = host; dev->dev.driver_data = info; info->dev = dev; for (i = 0; i < VSCSIIF_MAX_REQS; i++) { info->shadow[i].next_free = i + 1; init_waitqueue_head(&(info->shadow[i].wq_reset)); info->shadow[i].wait_reset = 0; } info->shadow[VSCSIIF_MAX_REQS - 1].next_free = 0x0fff; err = scsifront_init_ring(info); if (err) { scsi_host_put(host); return err; } init_waitqueue_head(&info->wq); spin_lock_init(&info->io_lock); spin_lock_init(&info->shadow_lock); snprintf(name, DEFAULT_TASK_COMM_LEN, "vscsiif.%d", info->host->host_no); info->kthread = kthread_run(scsifront_schedule, info, name); if (IS_ERR(info->kthread)) { err = PTR_ERR(info->kthread); info->kthread = NULL; printk(KERN_ERR "scsifront: kthread start err %d\n", err); goto free_sring; } host->max_id = VSCSIIF_MAX_TARGET; host->max_channel = 0; host->max_lun = VSCSIIF_MAX_LUN; host->max_sectors = (VSCSIIF_SG_TABLESIZE - 1) * PAGE_SIZE / 512; err = scsi_add_host(host, &dev->dev); if (err) { printk(KERN_ERR "scsifront: fail to add scsi host %d\n", err); goto free_sring; } xenbus_switch_state(dev, XenbusStateInitialised); return 0; free_sring: /* free resource */ scsifront_free(info); return err; } static int scsifront_remove(struct xenbus_device *dev) { struct vscsifrnt_info *info = dev->dev.driver_data; DPRINTK("%s: %s removed\n",__FUNCTION__ ,dev->nodename); if (info->kthread) { kthread_stop(info->kthread); info->kthread = NULL; } scsifront_free(info); return 0; } static int scsifront_disconnect(struct vscsifrnt_info *info) { struct xenbus_device *dev = info->dev; struct Scsi_Host *host = info->host; DPRINTK("%s: %s disconnect\n",__FUNCTION__ ,dev->nodename); /* When this function is executed, all devices of Frontend have been deleted. Therefore, it need not block I/O before remove_host. */ scsi_remove_host(host); xenbus_frontend_closed(dev); return 0; } #define VSCSIFRONT_OP_ADD_LUN 1 #define VSCSIFRONT_OP_DEL_LUN 2 static void scsifront_do_lun_hotplug(struct vscsifrnt_info *info, int op) { struct xenbus_device *dev = info->dev; int i, err = 0; char str[64], state_str[64]; char **dir; unsigned int dir_n = 0; unsigned int device_state; unsigned int hst, chn, tgt, lun; struct scsi_device *sdev; dir = xenbus_directory(XBT_NIL, dev->otherend, "vscsi-devs", &dir_n); if (IS_ERR(dir)) return; for (i = 0; i < dir_n; i++) { /* read status */ snprintf(str, sizeof(str), "vscsi-devs/%s/state", dir[i]); err = xenbus_scanf(XBT_NIL, dev->otherend, str, "%u", &device_state); if (XENBUS_EXIST_ERR(err)) continue; /* virtual SCSI device */ snprintf(str, sizeof(str), "vscsi-devs/%s/v-dev", dir[i]); err = xenbus_scanf(XBT_NIL, dev->otherend, str, "%u:%u:%u:%u", &hst, &chn, &tgt, &lun); if (XENBUS_EXIST_ERR(err)) continue; /* front device state path */ snprintf(state_str, sizeof(state_str), "vscsi-devs/%s/state", dir[i]); switch (op) { case VSCSIFRONT_OP_ADD_LUN: if (device_state == XenbusStateInitialised) { sdev = scsi_device_lookup(info->host, chn, tgt, lun); if (sdev) { printk(KERN_ERR "scsifront: Device already in use.\n"); scsi_device_put(sdev); xenbus_printf(XBT_NIL, dev->nodename, state_str, "%d", XenbusStateClosed); } else { scsi_add_device(info->host, chn, tgt, lun); xenbus_printf(XBT_NIL, dev->nodename, state_str, "%d", XenbusStateConnected); } } break; case VSCSIFRONT_OP_DEL_LUN: if (device_state == XenbusStateClosing) { sdev = scsi_device_lookup(info->host, chn, tgt, lun); if (sdev) { scsi_remove_device(sdev); scsi_device_put(sdev); xenbus_printf(XBT_NIL, dev->nodename, state_str, "%d", XenbusStateClosed); } } break; default: break; } } kfree(dir); return; } static void scsifront_backend_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { struct vscsifrnt_info *info = dev->dev.driver_data; DPRINTK("%p %u %u\n", dev, dev->state, backend_state); switch (backend_state) { case XenbusStateUnknown: case XenbusStateInitialising: case XenbusStateInitWait: case XenbusStateClosed: break; case XenbusStateInitialised: break; case XenbusStateConnected: if (xenbus_read_driver_state(dev->nodename) == XenbusStateInitialised) { scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_ADD_LUN); } if (dev->state == XenbusStateConnected) break; xenbus_switch_state(dev, XenbusStateConnected); break; case XenbusStateClosing: scsifront_disconnect(info); break; case XenbusStateReconfiguring: scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_DEL_LUN); xenbus_switch_state(dev, XenbusStateReconfiguring); break; case XenbusStateReconfigured: scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_ADD_LUN); xenbus_switch_state(dev, XenbusStateConnected); break; } } static struct xenbus_device_id scsifront_ids[] = { { "vscsi" }, { "" } }; MODULE_ALIAS("xen:vscsi"); static struct xenbus_driver scsifront_driver = { .name = "vscsi", .owner = THIS_MODULE, .ids = scsifront_ids, .probe = scsifront_probe, .remove = scsifront_remove, /* .resume = scsifront_resume, */ .otherend_changed = scsifront_backend_changed, }; int scsifront_xenbus_init(void) { return xenbus_register_frontend(&scsifront_driver); } void scsifront_xenbus_unregister(void) { xenbus_unregister_driver(&scsifront_driver); }
static int eesoxscsi_probe(struct expansion_card *ec, const struct ecard_id *id) { struct Scsi_Host *host; struct eesoxscsi_info *info; void __iomem *base; int ret; ret = ecard_request_resources(ec); if (ret) goto out; base = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0); if (!base) { ret = -ENOMEM; goto out_region; } host = scsi_host_alloc(&eesox_template, sizeof(struct eesoxscsi_info)); if (!host) { ret = -ENOMEM; goto out_region; } ecard_set_drvdata(ec, host); info = (struct eesoxscsi_info *)host->hostdata; info->ec = ec; info->base = base; info->ctl_port = base + EESOX_CONTROL; info->control = term[ec->slot_no] ? EESOX_TERM_ENABLE : 0; writeb(info->control, info->ctl_port); info->info.scsi.io_base = base + EESOX_FAS216_OFFSET; info->info.scsi.io_shift = EESOX_FAS216_SHIFT; info->info.scsi.irq = ec->irq; info->info.scsi.dma = ec->dma; info->info.ifcfg.clockrate = 40; /* MHz */ info->info.ifcfg.select_timeout = 255; info->info.ifcfg.asyncperiod = 200; /* ns */ info->info.ifcfg.sync_max_depth = 7; info->info.ifcfg.cntl3 = CNTL3_FASTSCSI | CNTL3_FASTCLK; info->info.ifcfg.disconnect_ok = 1; info->info.ifcfg.wide_max_size = 0; info->info.ifcfg.capabilities = FASCAP_PSEUDODMA; info->info.dma.setup = eesoxscsi_dma_setup; info->info.dma.pseudo = eesoxscsi_dma_pseudo; info->info.dma.stop = eesoxscsi_dma_stop; ec->irqaddr = base + EESOX_DMASTAT; ec->irqmask = EESOX_STAT_INTR; ecard_setirq(ec, &eesoxscsi_ops, info); device_create_file(&ec->dev, &dev_attr_bus_term); ret = fas216_init(host); if (ret) goto out_free; ret = request_irq(ec->irq, eesoxscsi_intr, 0, "eesoxscsi", info); if (ret) { printk("scsi%d: IRQ%d not free: %d\n", host->host_no, ec->irq, ret); goto out_remove; } if (info->info.scsi.dma != NO_DMA) { if (request_dma(info->info.scsi.dma, "eesox")) { printk("scsi%d: DMA%d not free, DMA disabled\n", host->host_no, info->info.scsi.dma); info->info.scsi.dma = NO_DMA; } else { set_dma_speed(info->info.scsi.dma, 180); info->info.ifcfg.capabilities |= FASCAP_DMA; info->info.ifcfg.cntl3 |= CNTL3_BS8; } } ret = fas216_add(host, &ec->dev); if (ret == 0) goto out; if (info->info.scsi.dma != NO_DMA) free_dma(info->info.scsi.dma); free_irq(ec->irq, host); out_remove: fas216_remove(host); out_free: device_remove_file(&ec->dev, &dev_attr_bus_term); scsi_host_put(host); out_region: ecard_release_resources(ec); out: return ret; }
//probe书中有教 static int our_probe(struct usb_interface *intf, const struct usb_device_id *id){ struct us_data *us; int result; //device struct device *dev; //scsihost struct Scsi_Host *host; //检测id是否符合 和intf struct us_unusual_dev *unusual_dev; unusual_dev=(id - usb_storage_usb_ids) + us_unusual_dev_list; if(usb_usual_check_type(id,USB_US_TYPE_STOR) || usb_usual_ignore_device(intf)) return -ENXIO; printk(KERN_ALERT "probe usb usb detected!\n"); //分配个host host = scsi_host_alloc(&usb_stor_host_template,sizeof(*us)); if(!host){ dev_warn(&intf->dev,"fail to allocate the scsi host\n"); return -ENOMEM; } //host中的一些初始化 host->max_cmd_len = 16; host-> sg_tablesize = usb_stor_sg_tablesize(intf); us= host_to_us(host);//us 作为host中 的us //分内存? memset(us,0,sizeof(struct us_data)); mutex_init(&(us->dev_mutex)); init_completion(&us->cmnd_ready); init_completion(&(us->notify)); init_waitqueue_head(&us->delay_wait); INIT_DELAYED_WORK(&us->scan_dwork,usb_stor_scan_dwork); result = associate_dev(us,intf); if(result) goto Bad; result = get_device_info(us,id,unusual_dev); if(result) goto Bad; //transport protocol get_transport(us); get_protocol(us); if(!us->transport ||!us->proto_handler){ result=-ENXIO; goto Bad; } printk(KERN_ALERT"Transport: %s\n",us->transport_name); printk(KERN_ALERT"Protocol: %s\n",us->transport_name); dev = &us->pusb_intf->dev; //设置max lun if(us->fflags & US_FL_SINGLE_LUN) us->max_lun =0; //endpoint get pipe result = get_pipes(us); if(result) goto Bad; //如果u盘前十个指令错误,重置 if (us->fflags & US_FL_INITIAL_READ10) set_bit(US_FLIDX_REDO_READ10, &us->dflags); //申请子资源,添加进host result=usb_stor_acquire_sesources(us); if(result) goto Bad; snprintf(us->scsi_name,sizeof(us->scsi_name),"our-usb-storage%s",dev_name(&us->pusb_intf->dev)); result= scsi_add_host(us_to_host(us),dev); if(result){ printk(KERN_ALERT"UNable to add the host\n"); goto Bad; } //scsi设备延时探测 usb_autopm_get_interface_no_resume(us->pusb_intf); set_bit(US_FLIDX_SCAN_PENDING,&us->dflags); if(delay_use>0) dev_dbg(dev,"waiting for device before scanning\n"); queue_delayed_work(system_freezable_wq,&us->scan_dwork,delay_use * HZ); return 0; Bad: printk(KERN_ALERT "probe false!\n"); release_everything(us); return result; }
static void zfcp_scsi_forget_cmnds(struct zfcp_scsi_dev *zsdev, u8 tm_flags) { struct zfcp_adapter *adapter = zsdev->port->adapter; struct zfcp_scsi_req_filter filter = { .tmf_scope = FCP_TMF_TGT_RESET, .port_handle = zsdev->port->handle, }; unsigned long flags; if (tm_flags == FCP_TMF_LUN_RESET) { filter.tmf_scope = FCP_TMF_LUN_RESET; filter.lun_handle = zsdev->lun_handle; } /* * abort_lock secures against other processings - in the abort-function * and normal cmnd-handler - of (struct zfcp_fsf_req *)->data */ write_lock_irqsave(&adapter->abort_lock, flags); zfcp_reqlist_apply_for_all(adapter->req_list, zfcp_scsi_forget_cmnd, &filter); write_unlock_irqrestore(&adapter->abort_lock, flags); } /** * zfcp_scsi_task_mgmt_function() - Send a task management function (sync). * @sdev: Pointer to SCSI device to send the task management command to. * @tm_flags: Task management flags, * here we only handle %FCP_TMF_TGT_RESET or %FCP_TMF_LUN_RESET. */ static int zfcp_scsi_task_mgmt_function(struct scsi_device *sdev, u8 tm_flags) { struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); struct zfcp_fsf_req *fsf_req = NULL; int retval = SUCCESS, ret; int retry = 3; while (retry--) { fsf_req = zfcp_fsf_fcp_task_mgmt(sdev, tm_flags); if (fsf_req) break; zfcp_dbf_scsi_devreset("wait", sdev, tm_flags, NULL); zfcp_erp_wait(adapter); ret = fc_block_rport(rport); if (ret) { zfcp_dbf_scsi_devreset("fiof", sdev, tm_flags, NULL); return ret; } if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_RUNNING)) { zfcp_dbf_scsi_devreset("nres", sdev, tm_flags, NULL); return SUCCESS; } } if (!fsf_req) { zfcp_dbf_scsi_devreset("reqf", sdev, tm_flags, NULL); return FAILED; } wait_for_completion(&fsf_req->completion); if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) { zfcp_dbf_scsi_devreset("fail", sdev, tm_flags, fsf_req); retval = FAILED; } else { zfcp_dbf_scsi_devreset("okay", sdev, tm_flags, fsf_req); zfcp_scsi_forget_cmnds(zfcp_sdev, tm_flags); } zfcp_fsf_req_free(fsf_req); return retval; } static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt) { struct scsi_device *sdev = scpnt->device; return zfcp_scsi_task_mgmt_function(sdev, FCP_TMF_LUN_RESET); } static int zfcp_scsi_eh_target_reset_handler(struct scsi_cmnd *scpnt) { struct scsi_target *starget = scsi_target(scpnt->device); struct fc_rport *rport = starget_to_rport(starget); struct Scsi_Host *shost = rport_to_shost(rport); struct scsi_device *sdev = NULL, *tmp_sdev; struct zfcp_adapter *adapter = (struct zfcp_adapter *)shost->hostdata[0]; int ret; shost_for_each_device(tmp_sdev, shost) { if (tmp_sdev->id == starget->id) { sdev = tmp_sdev; break; } } if (!sdev) { ret = FAILED; zfcp_dbf_scsi_eh("tr_nosd", adapter, starget->id, ret); return ret; } ret = zfcp_scsi_task_mgmt_function(sdev, FCP_TMF_TGT_RESET); /* release reference from above shost_for_each_device */ if (sdev) scsi_device_put(tmp_sdev); return ret; } static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) { struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device); struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; int ret = SUCCESS, fc_ret; zfcp_erp_adapter_reopen(adapter, 0, "schrh_1"); zfcp_erp_wait(adapter); fc_ret = fc_block_scsi_eh(scpnt); if (fc_ret) ret = fc_ret; zfcp_dbf_scsi_eh("schrh_r", adapter, ~0, ret); return ret; } /** * zfcp_scsi_sysfs_host_reset() - Support scsi_host sysfs attribute host_reset. * @shost: Pointer to Scsi_Host to perform action on. * @reset_type: We support %SCSI_ADAPTER_RESET but not %SCSI_FIRMWARE_RESET. * * Return: 0 on %SCSI_ADAPTER_RESET, -%EOPNOTSUPP otherwise. * * This is similar to zfcp_sysfs_adapter_failed_store(). */ static int zfcp_scsi_sysfs_host_reset(struct Scsi_Host *shost, int reset_type) { struct zfcp_adapter *adapter = (struct zfcp_adapter *)shost->hostdata[0]; int ret = 0; if (reset_type != SCSI_ADAPTER_RESET) { ret = -EOPNOTSUPP; zfcp_dbf_scsi_eh("scshr_n", adapter, ~0, ret); return ret; } zfcp_erp_adapter_reset_sync(adapter, "scshr_y"); return ret; } struct scsi_transport_template *zfcp_scsi_transport_template; static struct scsi_host_template zfcp_scsi_host_template = { .module = THIS_MODULE, .name = "zfcp", .queuecommand = zfcp_scsi_queuecommand, .eh_timed_out = fc_eh_timed_out, .eh_abort_handler = zfcp_scsi_eh_abort_handler, .eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler, .eh_target_reset_handler = zfcp_scsi_eh_target_reset_handler, .eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler, .slave_alloc = zfcp_scsi_slave_alloc, .slave_configure = zfcp_scsi_slave_configure, .slave_destroy = zfcp_scsi_slave_destroy, .change_queue_depth = scsi_change_queue_depth, .host_reset = zfcp_scsi_sysfs_host_reset, .proc_name = "zfcp", .can_queue = 4096, .this_id = -1, .sg_tablesize = (((QDIO_MAX_ELEMENTS_PER_BUFFER - 1) * ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2), /* GCD, adjusted later */ .max_sectors = (((QDIO_MAX_ELEMENTS_PER_BUFFER - 1) * ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2) * 8, /* GCD, adjusted later */ /* report size limit per scatter-gather segment */ .max_segment_size = ZFCP_QDIO_SBALE_LEN, .dma_boundary = ZFCP_QDIO_SBALE_LEN - 1, .shost_attrs = zfcp_sysfs_shost_attrs, .sdev_attrs = zfcp_sysfs_sdev_attrs, .track_queue_depth = 1, .supported_mode = MODE_INITIATOR, }; /** * zfcp_scsi_adapter_register - Register SCSI and FC host with SCSI midlayer * @adapter: The zfcp adapter to register with the SCSI midlayer */ int zfcp_scsi_adapter_register(struct zfcp_adapter *adapter) { struct ccw_dev_id dev_id; if (adapter->scsi_host) return 0; ccw_device_get_id(adapter->ccw_device, &dev_id); /* register adapter as SCSI host with mid layer of SCSI stack */ adapter->scsi_host = scsi_host_alloc(&zfcp_scsi_host_template, sizeof (struct zfcp_adapter *)); if (!adapter->scsi_host) { dev_err(&adapter->ccw_device->dev, "Registering the FCP device with the " "SCSI stack failed\n"); return -EIO; } /* tell the SCSI stack some characteristics of this adapter */ adapter->scsi_host->max_id = 511; adapter->scsi_host->max_lun = 0xFFFFFFFF; adapter->scsi_host->max_channel = 0; adapter->scsi_host->unique_id = dev_id.devno; adapter->scsi_host->max_cmd_len = 16; /* in struct fcp_cmnd */ adapter->scsi_host->transportt = zfcp_scsi_transport_template; adapter->scsi_host->hostdata[0] = (unsigned long) adapter; if (scsi_add_host(adapter->scsi_host, &adapter->ccw_device->dev)) { scsi_host_put(adapter->scsi_host); return -EIO; } return 0; } /** * zfcp_scsi_adapter_unregister - Unregister SCSI and FC host from SCSI midlayer * @adapter: The zfcp adapter to unregister. */ void zfcp_scsi_adapter_unregister(struct zfcp_adapter *adapter) { struct Scsi_Host *shost; struct zfcp_port *port; shost = adapter->scsi_host; if (!shost) return; read_lock_irq(&adapter->port_list_lock); list_for_each_entry(port, &adapter->port_list, list) port->rport = NULL; read_unlock_irq(&adapter->port_list_lock); fc_remove_host(shost); scsi_remove_host(shost); scsi_host_put(shost); adapter->scsi_host = NULL; } static struct fc_host_statistics* zfcp_scsi_init_fc_host_stats(struct zfcp_adapter *adapter) { struct fc_host_statistics *fc_stats; if (!adapter->fc_stats) { fc_stats = kmalloc(sizeof(*fc_stats), GFP_KERNEL); if (!fc_stats) return NULL; adapter->fc_stats = fc_stats; /* freed in adapter_release */ } memset(adapter->fc_stats, 0, sizeof(*adapter->fc_stats)); return adapter->fc_stats; } static void zfcp_scsi_adjust_fc_host_stats(struct fc_host_statistics *fc_stats, struct fsf_qtcb_bottom_port *data, struct fsf_qtcb_bottom_port *old) { fc_stats->seconds_since_last_reset = data->seconds_since_last_reset - old->seconds_since_last_reset; fc_stats->tx_frames = data->tx_frames - old->tx_frames; fc_stats->tx_words = data->tx_words - old->tx_words; fc_stats->rx_frames = data->rx_frames - old->rx_frames; fc_stats->rx_words = data->rx_words - old->rx_words; fc_stats->lip_count = data->lip - old->lip; fc_stats->nos_count = data->nos - old->nos; fc_stats->error_frames = data->error_frames - old->error_frames; fc_stats->dumped_frames = data->dumped_frames - old->dumped_frames; fc_stats->link_failure_count = data->link_failure - old->link_failure; fc_stats->loss_of_sync_count = data->loss_of_sync - old->loss_of_sync; fc_stats->loss_of_signal_count = data->loss_of_signal - old->loss_of_signal; fc_stats->prim_seq_protocol_err_count = data->psp_error_counts - old->psp_error_counts; fc_stats->invalid_tx_word_count = data->invalid_tx_words - old->invalid_tx_words; fc_stats->invalid_crc_count = data->invalid_crcs - old->invalid_crcs; fc_stats->fcp_input_requests = data->input_requests - old->input_requests; fc_stats->fcp_output_requests = data->output_requests - old->output_requests; fc_stats->fcp_control_requests = data->control_requests - old->control_requests; fc_stats->fcp_input_megabytes = data->input_mb - old->input_mb; fc_stats->fcp_output_megabytes = data->output_mb - old->output_mb; } static void zfcp_scsi_set_fc_host_stats(struct fc_host_statistics *fc_stats, struct fsf_qtcb_bottom_port *data) { fc_stats->seconds_since_last_reset = data->seconds_since_last_reset; fc_stats->tx_frames = data->tx_frames; fc_stats->tx_words = data->tx_words; fc_stats->rx_frames = data->rx_frames; fc_stats->rx_words = data->rx_words; fc_stats->lip_count = data->lip; fc_stats->nos_count = data->nos; fc_stats->error_frames = data->error_frames; fc_stats->dumped_frames = data->dumped_frames; fc_stats->link_failure_count = data->link_failure; fc_stats->loss_of_sync_count = data->loss_of_sync; fc_stats->loss_of_signal_count = data->loss_of_signal; fc_stats->prim_seq_protocol_err_count = data->psp_error_counts; fc_stats->invalid_tx_word_count = data->invalid_tx_words; fc_stats->invalid_crc_count = data->invalid_crcs; fc_stats->fcp_input_requests = data->input_requests; fc_stats->fcp_output_requests = data->output_requests; fc_stats->fcp_control_requests = data->control_requests; fc_stats->fcp_input_megabytes = data->input_mb; fc_stats->fcp_output_megabytes = data->output_mb; }
static int __devinit pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct pvscsi_adapter *adapter; struct Scsi_Host *host; unsigned int i; unsigned long flags = 0; int error; error = -ENODEV; if (pci_enable_device(pdev)) return error; if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0 && pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) { printk(KERN_INFO "vmw_pvscsi: using 64bit dma\n"); } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) == 0 && pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) == 0) { printk(KERN_INFO "vmw_pvscsi: using 32bit dma\n"); } else { printk(KERN_ERR "vmw_pvscsi: failed to set DMA mask\n"); goto out_disable_device; } pvscsi_template.can_queue = min(PVSCSI_MAX_NUM_PAGES_REQ_RING, pvscsi_ring_pages) * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE; pvscsi_template.cmd_per_lun = min(pvscsi_template.can_queue, pvscsi_cmd_per_lun); host = scsi_host_alloc(&pvscsi_template, sizeof(struct pvscsi_adapter)); if (!host) { printk(KERN_ERR "vmw_pvscsi: failed to allocate host\n"); goto out_disable_device; } adapter = shost_priv(host); memset(adapter, 0, sizeof(*adapter)); adapter->dev = pdev; adapter->host = host; spin_lock_init(&adapter->hw_lock); host->max_channel = 0; host->max_id = 16; host->max_lun = 1; host->max_cmd_len = 16; adapter->rev = pdev->revision; if (pci_request_regions(pdev, "vmw_pvscsi")) { printk(KERN_ERR "vmw_pvscsi: pci memory selection failed\n"); goto out_free_host; } for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { if ((pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO)) continue; if (pci_resource_len(pdev, i) < PVSCSI_MEM_SPACE_SIZE) continue; break; } if (i == DEVICE_COUNT_RESOURCE) { printk(KERN_ERR "vmw_pvscsi: adapter has no suitable MMIO region\n"); goto out_release_resources; } adapter->mmioBase = pci_iomap(pdev, i, PVSCSI_MEM_SPACE_SIZE); if (!adapter->mmioBase) { printk(KERN_ERR "vmw_pvscsi: can't iomap for BAR %d memsize %lu\n", i, PVSCSI_MEM_SPACE_SIZE); goto out_release_resources; } pci_set_master(pdev); pci_set_drvdata(pdev, host); ll_adapter_reset(adapter); adapter->use_msg = pvscsi_setup_msg_workqueue(adapter); error = pvscsi_allocate_rings(adapter); if (error) { printk(KERN_ERR "vmw_pvscsi: unable to allocate ring memory\n"); goto out_release_resources; } /* * From this point on we should reset the adapter if anything goes * wrong. */ pvscsi_setup_all_rings(adapter); adapter->cmd_map = kcalloc(adapter->req_depth, sizeof(struct pvscsi_ctx), GFP_KERNEL); if (!adapter->cmd_map) { printk(KERN_ERR "vmw_pvscsi: failed to allocate memory.\n"); error = -ENOMEM; goto out_reset_adapter; } INIT_LIST_HEAD(&adapter->cmd_pool); for (i = 0; i < adapter->req_depth; i++) { struct pvscsi_ctx *ctx = adapter->cmd_map + i; list_add(&ctx->list, &adapter->cmd_pool); } error = pvscsi_allocate_sg(adapter); if (error) { printk(KERN_ERR "vmw_pvscsi: unable to allocate s/g table\n"); goto out_reset_adapter; } if (!pvscsi_disable_msix && pvscsi_setup_msix(adapter, &adapter->irq) == 0) { printk(KERN_INFO "vmw_pvscsi: using MSI-X\n"); adapter->use_msix = 1; } else if (!pvscsi_disable_msi && pci_enable_msi(pdev) == 0) { printk(KERN_INFO "vmw_pvscsi: using MSI\n"); adapter->use_msi = 1; adapter->irq = pdev->irq; } else { printk(KERN_INFO "vmw_pvscsi: using INTx\n"); adapter->irq = pdev->irq; flags = IRQF_SHARED; } error = request_irq(adapter->irq, pvscsi_isr, flags, "vmw_pvscsi", adapter); if (error) { printk(KERN_ERR "vmw_pvscsi: unable to request IRQ: %d\n", error); adapter->irq = 0; goto out_reset_adapter; } error = scsi_add_host(host, &pdev->dev); if (error) { printk(KERN_ERR "vmw_pvscsi: scsi_add_host failed: %d\n", error); goto out_reset_adapter; } dev_info(&pdev->dev, "VMware PVSCSI rev %d host #%u\n", adapter->rev, host->host_no); pvscsi_unmask_intr(adapter); scsi_scan_host(host); return 0; out_reset_adapter: ll_adapter_reset(adapter); out_release_resources: pvscsi_release_resources(adapter); out_free_host: scsi_host_put(host); out_disable_device: pci_set_drvdata(pdev, NULL); pci_disable_device(pdev); return error; }
static int __init amiga_a3000_scsi_probe(struct platform_device *pdev) { struct resource *res; struct Scsi_Host *instance; int error; struct a3000_scsiregs *regs; wd33c93_regs wdregs; struct a3000_hostdata *hdata; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENODEV; if (!request_mem_region(res->start, resource_size(res), "wd33c93")) return -EBUSY; instance = scsi_host_alloc(&amiga_a3000_scsi_template, sizeof(struct a3000_hostdata)); if (!instance) { error = -ENOMEM; goto fail_alloc; } instance->irq = IRQ_AMIGA_PORTS; regs = (struct a3000_scsiregs *)ZTWO_VADDR(res->start); regs->DAWR = DAWR_A3000; wdregs.SASR = ®s->SASR; wdregs.SCMD = ®s->SCMD; hdata = shost_priv(instance); hdata->wh.no_sync = 0xff; hdata->wh.fast = 0; hdata->wh.dma_mode = CTRL_DMA; hdata->regs = regs; wd33c93_init(instance, wdregs, dma_setup, dma_stop, WD33C93_FS_12_15); error = request_irq(IRQ_AMIGA_PORTS, a3000_intr, IRQF_SHARED, "A3000 SCSI", instance); if (error) goto fail_irq; regs->CNTR = CNTR_PDMD | CNTR_INTEN; error = scsi_add_host(instance, NULL); if (error) goto fail_host; platform_set_drvdata(pdev, instance); scsi_scan_host(instance); return 0; fail_host: free_irq(IRQ_AMIGA_PORTS, instance); fail_irq: scsi_host_put(instance); fail_alloc: release_mem_region(res->start, resource_size(res)); return error; }
static int __devinit esp_mac_probe(struct platform_device *dev) { struct scsi_host_template *tpnt = &scsi_esp_template; struct Scsi_Host *host; struct esp *esp; int err; struct mac_esp_priv *mep; if (!MACH_IS_MAC) return -ENODEV; if (dev->id > 1) return -ENODEV; host = scsi_host_alloc(tpnt, sizeof(struct esp)); err = -ENOMEM; if (!host) goto fail; host->max_id = 8; host->use_clustering = DISABLE_CLUSTERING; esp = shost_priv(host); esp->host = host; esp->dev = dev; esp->command_block = kzalloc(16, GFP_KERNEL); if (!esp->command_block) goto fail_unlink; esp->command_block_dma = (dma_addr_t)esp->command_block; esp->scsi_id = 7; host->this_id = esp->scsi_id; esp->scsi_id_mask = 1 << esp->scsi_id; mep = kzalloc(sizeof(struct mac_esp_priv), GFP_KERNEL); if (!mep) goto fail_free_command_block; mep->esp = esp; platform_set_drvdata(dev, mep); switch (macintosh_config->scsi_type) { case MAC_SCSI_QUADRA: esp->cfreq = 16500000; esp->regs = (void __iomem *)MAC_ESP_REGS_QUADRA; mep->pdma_io = esp->regs + MAC_ESP_PDMA_IO_OFFSET; mep->pdma_regs = NULL; break; case MAC_SCSI_QUADRA2: esp->cfreq = 25000000; esp->regs = (void __iomem *)(MAC_ESP_REGS_QUADRA2 + dev->id * MAC_ESP_REGS_SPACING); mep->pdma_io = esp->regs + MAC_ESP_PDMA_IO_OFFSET; mep->pdma_regs = (void __iomem *)(MAC_ESP_PDMA_REG + dev->id * MAC_ESP_PDMA_REG_SPACING); nubus_writel(0x1d1, mep->pdma_regs); break; case MAC_SCSI_QUADRA3: /* These quadras have a real DMA controller (the PSC) but we * don't know how to drive it so we must use PIO instead. */ esp->cfreq = 25000000; esp->regs = (void __iomem *)MAC_ESP_REGS_QUADRA3; mep->pdma_io = NULL; mep->pdma_regs = NULL; break; } esp->ops = &mac_esp_ops; if (mep->pdma_io == NULL) { printk(KERN_INFO PFX "using PIO for controller %d\n", dev->id); esp_write8(0, ESP_TCLOW); esp_write8(0, ESP_TCMED); esp->flags = ESP_FLAG_DISABLE_SYNC; mac_esp_ops.send_dma_cmd = mac_esp_send_pio_cmd; } else { printk(KERN_INFO PFX "using PDMA for controller %d\n", dev->id); } host->irq = IRQ_MAC_SCSI; esp_chips[dev->id] = esp; mb(); if (esp_chips[!dev->id] == NULL) { err = request_irq(host->irq, mac_scsi_esp_intr, 0, "ESP", NULL); if (err < 0) { esp_chips[dev->id] = NULL; goto fail_free_priv; } } err = scsi_esp_register(esp, &dev->dev); if (err) goto fail_free_irq; return 0; fail_free_irq: if (esp_chips[!dev->id] == NULL) free_irq(host->irq, esp); fail_free_priv: kfree(mep); fail_free_command_block: kfree(esp->command_block); fail_unlink: scsi_host_put(host); fail: return err; }