/* * Called from SCSI EH process context to issue a LUN_RESET TMR * to struct scsi_device */ static int tcm_loop_device_reset(struct scsi_cmnd *sc) { struct tcm_loop_hba *tl_hba; struct tcm_loop_nexus *tl_nexus; struct tcm_loop_tpg *tl_tpg; int ret = FAILED; /* * Locate the tcm_loop_hba_t pointer */ tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); /* * Locate the tl_nexus and se_sess pointers */ tl_nexus = tl_hba->tl_nexus; if (!tl_nexus) { pr_err("Unable to perform device reset without" " active I_T Nexus\n"); return FAILED; } /* * Locate the tl_tpg pointer from TargetID in sc->device->id */ tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun, 0, TMR_LUN_RESET); return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED; }
/* * fnic_reset_host_stats : clears host stats * note : called when reset_statistics set under sysfs dir */ static void fnic_reset_host_stats(struct Scsi_Host *host) { int ret; struct fc_lport *lp = shost_priv(host); struct fnic *fnic = lport_priv(lp); struct fc_host_statistics *stats; unsigned long flags; /* dump current stats, before clearing them */ stats = fnic_get_stats(host); fnic_dump_fchost_stats(host, stats); spin_lock_irqsave(&fnic->fnic_lock, flags); ret = vnic_dev_stats_clear(fnic->vdev); spin_unlock_irqrestore(&fnic->fnic_lock, flags); if (ret) { FNIC_MAIN_DBG(KERN_DEBUG, fnic->lport->host, "fnic: Reset vnic stats failed" " 0x%x", ret); return; } fnic->stats_reset_time = jiffies; memset(stats, 0, sizeof(*stats)); return; }
static int dma_setup(struct scsi_cmnd *cmd, int dir_in) { struct Scsi_Host *instance = cmd->device->host; struct WD33C93_hostdata *hdata = shost_priv(instance); unsigned char flags = 0x01; unsigned long addr = virt_to_bus(cmd->SCp.ptr); /* setup dma direction */ if (!dir_in) flags |= 0x04; /* remember direction */ hdata->dma_dir = dir_in; if (dir_in) { /* invalidate any cache */ cache_clear(addr, cmd->SCp.this_residual); } else { /* push any dirty cache */ cache_push(addr, cmd->SCp.this_residual); } /* start DMA */ m147_pcc->dma_bcr = cmd->SCp.this_residual | (1 << 24); m147_pcc->dma_dadr = addr; m147_pcc->dma_cntrl = flags; /* return success */ return 0; }
static void pvscsi_shutdown(struct pci_dev *dev) { struct Scsi_Host *host = pci_get_drvdata(dev); struct pvscsi_adapter *adapter = shost_priv(host); __pvscsi_shutdown(adapter); }
static int pvscsi_abort(struct scsi_cmnd *cmd) { struct pvscsi_adapter *adapter = shost_priv(cmd->device->host); struct pvscsi_ctx *ctx; unsigned long flags; scmd_printk(KERN_DEBUG, cmd, "task abort on host %u, %p\n", adapter->host->host_no, cmd); spin_lock_irqsave(&adapter->hw_lock, flags); /* * Poll the completion ring first - we might be trying to abort * a command that is waiting to be dispatched in the completion ring. */ pvscsi_process_completion_ring(adapter); /* * If there is no context for the command, it either already succeeded * or else was never properly issued. Not our problem. */ ctx = pvscsi_find_context(adapter, cmd); if (!ctx) { scmd_printk(KERN_DEBUG, cmd, "Failed to abort cmd %p\n", cmd); goto out; } pvscsi_abort_cmd(adapter, ctx); pvscsi_process_completion_ring(adapter); out: spin_unlock_irqrestore(&adapter->hw_lock, flags); return SUCCESS; }
static void g_NCR5380_trigger_irq(struct Scsi_Host *instance) { struct NCR5380_hostdata *hostdata = shost_priv(instance); /* * An interrupt is triggered whenever BSY = false, SEL = true * and a bit set in the SELECT_ENABLE_REG is asserted on the * SCSI bus. * * Note that the bus is only driven when the phase control signals * (I/O, C/D, and MSG) match those in the TCR. */ NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(NCR5380_read(STATUS_REG) & PHASE_MASK)); NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_SEL); msleep(1); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); NCR5380_write(SELECT_ENABLE_REG, 0); NCR5380_write(TARGET_COMMAND_REG, 0); }
static int pvscsi_queue(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) { struct Scsi_Host *host = cmd->device->host; struct pvscsi_adapter *adapter = shost_priv(host); struct pvscsi_ctx *ctx; unsigned long flags; spin_lock_irqsave(&adapter->hw_lock, flags); ctx = pvscsi_acquire_context(adapter, cmd); if (!ctx || pvscsi_queue_ring(adapter, ctx, cmd) != 0) { if (ctx) pvscsi_release_context(adapter, ctx); spin_unlock_irqrestore(&adapter->hw_lock, flags); return SCSI_MLQUEUE_HOST_BUSY; } cmd->scsi_done = done; dev_dbg(&cmd->device->sdev_gendev, "queued cmd %p, ctx %p, op=%x\n", cmd, ctx, cmd->cmnd[0]); spin_unlock_irqrestore(&adapter->hw_lock, flags); pvscsi_kick_io(adapter, cmd->cmnd[0]); return 0; }
/* * csio_get_host_speed - Return link speed to FC transport. * @shost: scsi host. * */ static void csio_get_host_speed(struct Scsi_Host *shost) { struct csio_lnode *ln = shost_priv(shost); struct csio_hw *hw = csio_lnode_to_hw(ln); spin_lock_irq(&hw->lock); switch (hw->pport[ln->portid].link_speed) { case FW_PORT_CAP32_SPEED_1G: fc_host_speed(shost) = FC_PORTSPEED_1GBIT; break; case FW_PORT_CAP32_SPEED_10G: fc_host_speed(shost) = FC_PORTSPEED_10GBIT; break; case FW_PORT_CAP32_SPEED_25G: fc_host_speed(shost) = FC_PORTSPEED_25GBIT; break; case FW_PORT_CAP32_SPEED_40G: fc_host_speed(shost) = FC_PORTSPEED_40GBIT; break; case FW_PORT_CAP32_SPEED_50G: fc_host_speed(shost) = FC_PORTSPEED_50GBIT; break; case FW_PORT_CAP32_SPEED_100G: fc_host_speed(shost) = FC_PORTSPEED_100GBIT; break; default: fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; break; } spin_unlock_irq(&hw->lock); }
static int ufs_test_run_multi_query_test(struct test_data *td) { int i; struct scsi_device *sdev; struct ufs_hba *hba; BUG_ON(!td || !td->req_q || !td->req_q->queuedata); sdev = (struct scsi_device *)td->req_q->queuedata; BUG_ON(!sdev->host); hba = shost_priv(sdev->host); BUG_ON(!hba); atomic_set(&utd->outstanding_threads, 0); utd->fail_threads = 0; init_completion(&utd->outstanding_complete); for (i = 0; i < MAX_PARALLEL_QUERIES; ++i) { atomic_inc(&utd->outstanding_threads); async_schedule(ufs_test_random_async_query, hba); } if (!wait_for_completion_timeout(&utd->outstanding_complete, THREADS_COMPLETION_TIMOUT)) { pr_err("%s: Multi-query test timed-out %d threads left", __func__, atomic_read(&utd->outstanding_threads)); } test_iosched_mark_test_completion(); return 0; }
static inline int ufs_test_pm_runtime_cfg_sync(struct test_iosched *tios, bool enable) { struct scsi_device *sdev; struct ufs_hba *hba; int ret; BUG_ON(!tios || !tios->req_q || !tios->req_q->queuedata); sdev = (struct scsi_device *)tios->req_q->queuedata; BUG_ON(!sdev->host); hba = shost_priv(sdev->host); BUG_ON(!hba); if (enable) { ret = pm_runtime_get_sync(hba->dev); /* Positive non-zero return values are not errors */ if (ret < 0) { pr_err("%s: pm_runtime_get_sync failed, ret=%d\n", __func__, ret); return ret; } return 0; } pm_runtime_put_sync(hba->dev); return 0; }
static int macscsi_pwrite(struct Scsi_Host *instance, unsigned char *src, int len) { struct NCR5380_hostdata *hostdata = shost_priv(instance); unsigned char *s; unsigned char *d; NCR5380_local_declare(); NCR5380_setup(instance); s = src; d = hostdata->pdma_base + (OUTPUT_DATA_REG << 4); /* These conditions are derived from MacOS */ while (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ) && (!(NCR5380_read(STATUS_REG) & SR_REQ) || (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH))) ; if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ)) { pr_err("Error in macscsi_pwrite\n"); return -1; } CP_MEM_TO_IO(s, d, len); if (len != 0) { pr_notice("Bus error in macscsi_pwrite\n"); return -1; } return 0; }
/* * Main entry point from struct scsi_host_template for incoming SCSI CDB+Data * from Linux/SCSI subsystem for SCSI low level device drivers (LLDs) */ static int tcm_loop_queuecommand( struct Scsi_Host *sh, struct scsi_cmnd *sc) { struct se_cmd *se_cmd; struct se_portal_group *se_tpg; struct tcm_loop_hba *tl_hba; struct tcm_loop_tpg *tl_tpg; pr_debug("tcm_loop_queuecommand() %d:%d:%d:%d got CDB: 0x%02x" " scsi_buf_len: %u\n", sc->device->host->host_no, sc->device->id, sc->device->channel, sc->device->lun, sc->cmnd[0], scsi_bufflen(sc)); /* * Locate the tcm_loop_hba_t pointer */ tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; se_tpg = &tl_tpg->tl_se_tpg; /* * Determine the SAM Task Attribute and allocate tl_cmd and * tl_cmd->tl_se_cmd from TCM infrastructure */ se_cmd = tcm_loop_allocate_core_cmd(tl_hba, se_tpg, sc); if (!se_cmd) { sc->scsi_done(sc); return 0; } /* * Queue up the newly allocated to be processed in TCM thread context. */ transport_generic_handle_cdb_map(se_cmd); return 0; }
/************************************************************************** * qla2x00_print_scsi_cmd * Dumps out info about the scsi cmd and srb. * Input * cmd : struct scsi_cmnd **************************************************************************/ void qla2x00_print_scsi_cmd(struct scsi_cmnd * cmd) { int i; struct scsi_qla_host *ha; srb_t *sp; ha = shost_priv(cmd->device->host); sp = (srb_t *) cmd->SCp.ptr; printk("SCSI Command @=0x%p, Handle=0x%p\n", cmd, cmd->host_scribble); printk(" chan=0x%02x, target=0x%02x, lun=0x%02x, cmd_len=0x%02x\n", cmd->device->channel, cmd->device->id, cmd->device->lun, cmd->cmd_len); printk(" CDB: "); for (i = 0; i < cmd->cmd_len; i++) { printk("0x%02x ", cmd->cmnd[i]); } printk("\n seg_cnt=%d, allowed=%d, retries=%d\n", scsi_sg_count(cmd), cmd->allowed, cmd->retries); printk(" request buffer=0x%p, request buffer len=0x%x\n", scsi_sglist(cmd), scsi_bufflen(cmd)); printk(" tag=%d, transfersize=0x%x\n", cmd->tag, cmd->transfersize); printk(" serial_number=%lx, SP=%p\n", cmd->serial_number, sp); printk(" data direction=%d\n", cmd->sc_data_direction); if (!sp) return; printk(" sp flags=0x%x\n", sp->flags); }
static void fnic_get_host_speed(struct Scsi_Host *shost) { struct fc_lport *lp = shost_priv(shost); struct fnic *fnic = lport_priv(lp); u32 port_speed = vnic_dev_port_speed(fnic->vdev); /* Add in other values as they get defined in fw */ switch (port_speed) { case DCEM_PORTSPEED_10G: fc_host_speed(shost) = FC_PORTSPEED_10GBIT; break; case DCEM_PORTSPEED_20G: fc_host_speed(shost) = FC_PORTSPEED_20GBIT; break; case DCEM_PORTSPEED_25G: fc_host_speed(shost) = FC_PORTSPEED_25GBIT; break; case DCEM_PORTSPEED_40G: case DCEM_PORTSPEED_4x10G: fc_host_speed(shost) = FC_PORTSPEED_40GBIT; break; case DCEM_PORTSPEED_100G: fc_host_speed(shost) = FC_PORTSPEED_100GBIT; break; default: fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; break; } }
static void tcm_loop_submission_work(struct work_struct *work) { struct tcm_loop_cmd *tl_cmd = container_of(work, struct tcm_loop_cmd, work); struct se_cmd *se_cmd = &tl_cmd->tl_se_cmd; struct scsi_cmnd *sc = tl_cmd->sc; struct tcm_loop_nexus *tl_nexus; struct tcm_loop_hba *tl_hba; struct tcm_loop_tpg *tl_tpg; struct scatterlist *sgl_bidi = NULL; u32 sgl_bidi_count = 0; int rc; tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; /* * Ensure that this tl_tpg reference from the incoming sc->device->id * has already been configured via tcm_loop_make_naa_tpg(). */ if (!tl_tpg->tl_hba) { set_host_byte(sc, DID_NO_CONNECT); goto out_done; } if (tl_tpg->tl_transport_status == TCM_TRANSPORT_OFFLINE) { set_host_byte(sc, DID_TRANSPORT_DISRUPTED); goto out_done; } tl_nexus = tl_hba->tl_nexus; if (!tl_nexus) { scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus" " does not exist\n"); set_host_byte(sc, DID_ERROR); goto out_done; } if (scsi_bidi_cmnd(sc)) { struct scsi_data_buffer *sdb = scsi_in(sc); sgl_bidi = sdb->table.sgl; sgl_bidi_count = sdb->table.nents; se_cmd->se_cmd_flags |= SCF_BIDI; } rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd, &tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun, scsi_bufflen(sc), tcm_loop_sam_attr(sc), sc->sc_data_direction, 0, scsi_sglist(sc), scsi_sg_count(sc), sgl_bidi, sgl_bidi_count); if (rc < 0) { set_host_byte(sc, DID_NO_CONNECT); goto out_done; } return; out_done: sc->scsi_done(sc); return; }
/** * fc_lport_bsg_request() - The common entry point for sending * FC Passthrough requests * @job: The BSG passthrough job */ int fc_lport_bsg_request(struct fc_bsg_job *job) { struct request *rsp = job->req->next_rq; struct Scsi_Host *shost = job->shost; struct fc_lport *lport = shost_priv(shost); struct fc_rport *rport; struct fc_rport_priv *rdata; int rc = -EINVAL; u32 did; job->reply->reply_payload_rcv_len = 0; if (rsp) rsp->resid_len = job->reply_payload.payload_len; mutex_lock(&lport->lp_mutex); switch (job->request->msgcode) { case FC_BSG_RPT_ELS: rport = job->rport; if (!rport) break; rdata = rport->dd_data; rc = fc_lport_els_request(job, lport, rport->port_id, rdata->e_d_tov); break; case FC_BSG_RPT_CT: rport = job->rport; if (!rport) break; rdata = rport->dd_data; rc = fc_lport_ct_request(job, lport, rport->port_id, rdata->e_d_tov); break; case FC_BSG_HST_CT: did = ntoh24(job->request->rqst_data.h_ct.port_id); if (did == FC_FID_DIR_SERV) rdata = lport->dns_rdata; else rdata = lport->tt.rport_lookup(lport, did); if (!rdata) break; rc = fc_lport_ct_request(job, lport, did, rdata->e_d_tov); break; case FC_BSG_HST_ELS_NOLOGIN: did = ntoh24(job->request->rqst_data.h_els.port_id); rc = fc_lport_els_request(job, lport, did, lport->e_d_tov); break; } mutex_unlock(&lport->lp_mutex); return rc; }
static int __devinit a2091_probe(struct zorro_dev *z, const struct zorro_device_id *ent) { struct Scsi_Host *instance; int error; struct a2091_scsiregs *regs; wd33c93_regs wdregs; struct a2091_hostdata *hdata; if (!request_mem_region(z->resource.start, 256, "wd33c93")) return -EBUSY; instance = scsi_host_alloc(&a2091_scsi_template, sizeof(struct a2091_hostdata)); if (!instance) { error = -ENOMEM; goto fail_alloc; } instance->irq = IRQ_AMIGA_PORTS; instance->unique_id = z->slotaddr; regs = (struct a2091_scsiregs *)ZTWO_VADDR(z->resource.start); regs->DAWR = DAWR_A2091; wdregs.SASR = ®s->SASR; wdregs.SCMD = ®s->SCMD; hdata = shost_priv(instance); hdata->wh.no_sync = 0xff; hdata->wh.fast = 0; hdata->wh.dma_mode = CTRL_DMA; hdata->regs = regs; wd33c93_init(instance, wdregs, dma_setup, dma_stop, WD33C93_FS_8_10); error = request_irq(IRQ_AMIGA_PORTS, a2091_intr, IRQF_SHARED, "A2091 SCSI", instance); if (error) goto fail_irq; regs->CNTR = CNTR_PDMD | CNTR_INTEN; error = scsi_add_host(instance, NULL); if (error) goto fail_host; zorro_set_drvdata(z, instance); scsi_scan_host(instance); return 0; fail_host: free_irq(IRQ_AMIGA_PORTS, instance); fail_irq: scsi_host_put(instance); fail_alloc: release_mem_region(z->resource.start, 256); return error; }
static int __init mvme147_init(void) { wd33c93_regs regs; struct WD33C93_hostdata *hdata; int error = -ENOMEM; if (!MACH_IS_MVME147) return 0; mvme147_shost = scsi_host_alloc(&mvme147_host_template, sizeof(struct WD33C93_hostdata)); if (!mvme147_shost) goto err_out; mvme147_shost->base = 0xfffe4000; mvme147_shost->irq = MVME147_IRQ_SCSI_PORT; regs.SASR = (volatile unsigned char *)0xfffe4000; regs.SCMD = (volatile unsigned char *)0xfffe4001; hdata = shost_priv(mvme147_shost); hdata->no_sync = 0xff; hdata->fast = 0; hdata->dma_mode = CTRL_DMA; wd33c93_init(mvme147_shost, regs, dma_setup, dma_stop, WD33C93_FS_8_10); error = request_irq(MVME147_IRQ_SCSI_PORT, mvme147_intr, 0, "MVME147 SCSI PORT", mvme147_shost); if (error) goto err_unregister; error = request_irq(MVME147_IRQ_SCSI_DMA, mvme147_intr, 0, "MVME147 SCSI DMA", mvme147_shost); if (error) goto err_free_irq; #if 0 /* Disabled; causes problems booting */ m147_pcc->scsi_interrupt = 0x10; /* Assert SCSI bus reset */ udelay(100); m147_pcc->scsi_interrupt = 0x00; /* Negate SCSI bus reset */ udelay(2000); m147_pcc->scsi_interrupt = 0x40; /* Clear bus reset interrupt */ #endif m147_pcc->scsi_interrupt = 0x09; /* Enable interrupt */ m147_pcc->dma_cntrl = 0x00; /* ensure DMA is stopped */ m147_pcc->dma_intr = 0x89; /* Ack and enable ints */ error = scsi_add_host(mvme147_shost, NULL); if (error) goto err_free_irq; scsi_scan_host(mvme147_shost); return 0; err_free_irq: free_irq(MVME147_IRQ_SCSI_PORT, mvme147_shost); err_unregister: scsi_host_put(mvme147_shost); err_out: return error; }
static int dmx3191d_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) { struct Scsi_Host *shost; struct NCR5380_hostdata *hostdata; unsigned long io; int error = -ENODEV; if (pci_enable_device(pdev)) goto out; io = pci_resource_start(pdev, 0); if (!request_region(io, DMX3191D_REGION_LEN, DMX3191D_DRIVER_NAME)) { printk(KERN_ERR "dmx3191: region 0x%lx-0x%lx already reserved\n", io, io + DMX3191D_REGION_LEN); goto out_disable_device; } shost = scsi_host_alloc(&dmx3191d_driver_template, sizeof(struct NCR5380_hostdata)); if (!shost) goto out_release_region; hostdata = shost_priv(shost); hostdata->base = io; /* This card does not seem to raise an interrupt on pdev->irq. * Steam-powered SCSI controllers run without an IRQ anyway. */ shost->irq = NO_IRQ; error = NCR5380_init(shost, 0); if (error) goto out_host_put; NCR5380_maybe_reset_bus(shost); pci_set_drvdata(pdev, shost); error = scsi_add_host(shost, &pdev->dev); if (error) goto out_exit; scsi_scan_host(shost); return 0; out_exit: NCR5380_exit(shost); out_host_put: scsi_host_put(shost); out_release_region: release_region(io, DMX3191D_REGION_LEN); out_disable_device: pci_disable_device(pdev); out: return error; }
void fc_get_host_port_state(struct Scsi_Host *shost) { struct fc_lport *lp = shost_priv(shost); if (lp->link_up) fc_host_port_state(shost) = FC_PORTSTATE_ONLINE; else fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE; }
/* * csio_get_host_port_id - sysfs entries for nport_id is * populated/cached from this function */ static void csio_get_host_port_id(struct Scsi_Host *shost) { struct csio_lnode *ln = shost_priv(shost); struct csio_hw *hw = csio_lnode_to_hw(ln); spin_lock_irq(&hw->lock); fc_host_port_id(shost) = ln->nport_id; spin_unlock_irq(&hw->lock); }
static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt, int status) { struct a3000_hostdata *hdata = shost_priv(instance); struct WD33C93_hostdata *wh = &hdata->wh; struct a3000_scsiregs *regs = hdata->regs; /* disable SCSI interrupts */ unsigned short cntr = CNTR_PDMD; if (!wh->dma_dir) cntr |= CNTR_DDIR; regs->CNTR = cntr; mb(); /* make sure CNTR is updated before next IO */ /* flush if we were reading */ if (wh->dma_dir) { regs->FLUSH = 1; mb(); /* don't allow prefetch */ while (!(regs->ISTR & ISTR_FE_FLG)) barrier(); mb(); /* no IO until FLUSH is done */ } /* clear a possible interrupt */ /* I think that this CINT is only necessary if you are * using the terminal count features. HM 7 Mar 1994 */ regs->CINT = 1; /* stop DMA */ regs->SP_DMA = 1; mb(); /* make sure DMA is stopped before next IO */ /* restore the CONTROL bits (minus the direction flag) */ regs->CNTR = CNTR_PDMD | CNTR_INTEN; mb(); /* make sure CNTR is updated before next IO */ /* copy from a bounce buffer, if necessary */ if (status && wh->dma_bounce_buffer) { if (SCpnt) { if (wh->dma_dir && SCpnt) memcpy(SCpnt->SCp.ptr, wh->dma_bounce_buffer, SCpnt->SCp.this_residual); kfree(wh->dma_bounce_buffer); wh->dma_bounce_buffer = NULL; wh->dma_bounce_len = 0; } else { kfree(wh->dma_bounce_buffer); wh->dma_bounce_buffer = NULL; wh->dma_bounce_len = 0; } } }
int mvme147_detect(struct scsi_host_template *tpnt) { static unsigned char called = 0; struct Scsi_Host *instance; wd33c93_regs regs; struct WD33C93_hostdata *hdata; if (!MACH_IS_MVME147 || called) return 0; called++; tpnt->proc_name = "MVME147"; tpnt->proc_info = &wd33c93_proc_info; instance = scsi_register(tpnt, sizeof(struct WD33C93_hostdata)); if (!instance) goto err_out; instance->base = 0xfffe4000; instance->irq = MVME147_IRQ_SCSI_PORT; regs.SASR = (volatile unsigned char *)0xfffe4000; regs.SCMD = (volatile unsigned char *)0xfffe4001; hdata = shost_priv(instance); hdata->no_sync = 0xff; hdata->fast = 0; hdata->dma_mode = CTRL_DMA; wd33c93_init(instance, regs, dma_setup, dma_stop, WD33C93_FS_8_10); if (request_irq(MVME147_IRQ_SCSI_PORT, mvme147_intr, 0, "MVME147 SCSI PORT", instance)) goto err_unregister; if (request_irq(MVME147_IRQ_SCSI_DMA, mvme147_intr, 0, "MVME147 SCSI DMA", instance)) goto err_free_irq; #if 0 /* Disabled; causes problems booting */ m147_pcc->scsi_interrupt = 0x10; /* Assert SCSI bus reset */ udelay(100); m147_pcc->scsi_interrupt = 0x00; /* Negate SCSI bus reset */ udelay(2000); m147_pcc->scsi_interrupt = 0x40; /* Clear bus reset interrupt */ #endif m147_pcc->scsi_interrupt = 0x09; /* Enable interrupt */ m147_pcc->dma_cntrl = 0x00; /* ensure DMA is stopped */ m147_pcc->dma_intr = 0x89; /* Ack and enable ints */ return 1; err_free_irq: free_irq(MVME147_IRQ_SCSI_PORT, mvme147_intr); err_unregister: scsi_unregister(instance); err_out: return 0; }
static void __devexit a2091_remove(struct zorro_dev *z) { struct Scsi_Host *instance = zorro_get_drvdata(z); struct a2091_hostdata *hdata = shost_priv(instance); hdata->regs->CNTR = 0; scsi_remove_host(instance); free_irq(IRQ_AMIGA_PORTS, instance); scsi_host_put(instance); release_mem_region(z->resource.start, 256); }
static int ufs_fmp_run(struct device *dev, uint32_t mode, uint8_t *data, uint32_t len, uint32_t write) { int ret = 0; struct ufs_hba *hba; struct ufs_fmp_work *work; struct Scsi_Host *host; static struct buffer_head *bh; work = dev_get_drvdata(dev); if (!work) { dev_err(dev, "Fail to get work from platform device\n"); return -ENODEV; } host = work->host; hba = shost_priv(host); hba->self_test_mode = mode; bh = __getblk(work->bdev, work->sector, FMP_BLK_SIZE); if (!bh) { dev_err(dev, "Fail to get block from bdev\n"); return -ENODEV; } hba->self_test_bh = bh; get_bh(bh); if (write == WRITE_MODE) { memcpy(bh->b_data, data, len); set_buffer_dirty(bh); sync_dirty_buffer(bh); if (buffer_req(bh) && !buffer_uptodate(bh)) { dev_err(dev, "IO error syncing for FMP fips write\n"); ret = -EIO; goto out; } memset(bh->b_data, 0, FMP_BLK_SIZE); } else { lock_buffer(bh); bh->b_end_io = end_buffer_read_sync; submit_bh(READ_SYNC, bh); wait_on_buffer(bh); if (unlikely(!buffer_uptodate(bh))) { ret = -EIO; goto out; } memcpy(data, bh->b_data, len); } out: hba->self_test_mode = 0; hba->self_test_bh = NULL; put_bh(bh); return ret; }
static __devexit void zorro7xx_remove_one(struct zorro_dev *z) { struct Scsi_Host *host = zorro_get_drvdata(z); struct NCR_700_Host_Parameters *hostdata = shost_priv(host); scsi_remove_host(host); NCR_700_release(host); kfree(hostdata); free_irq(host->irq, host); zorro_release_device(z); }
static int pvscsi_host_reset(struct scsi_cmnd *cmd) { struct Scsi_Host *host = cmd->device->host; struct pvscsi_adapter *adapter = shost_priv(host); unsigned long flags; bool use_msg; scmd_printk(KERN_INFO, cmd, "SCSI Host reset\n"); spin_lock_irqsave(&adapter->hw_lock, flags); use_msg = adapter->use_msg; if (use_msg) { adapter->use_msg = 0; spin_unlock_irqrestore(&adapter->hw_lock, flags); /* * Now that we know that the ISR won't add more work on the * workqueue we can safely flush any outstanding work. */ flush_workqueue(adapter->workqueue); spin_lock_irqsave(&adapter->hw_lock, flags); } /* * We're going to tear down the entire ring structure and set it back * up, so stalling new requests until all completions are flushed and * the rings are back in place. */ pvscsi_process_request_ring(adapter); ll_adapter_reset(adapter); /* * Now process any completions. Note we do this AFTER adapter reset, * which is strange, but stops races where completions get posted * between processing the ring and issuing the reset. The backend will * not touch the ring memory after reset, so the immediately pre-reset * completion ring state is still valid. */ pvscsi_process_completion_ring(adapter); pvscsi_reset_all(adapter); adapter->use_msg = use_msg; pvscsi_setup_all_rings(adapter); pvscsi_unmask_intr(adapter); spin_unlock_irqrestore(&adapter->hw_lock, flags); return SUCCESS; }
static const char *pvscsi_info(struct Scsi_Host *host) { struct pvscsi_adapter *adapter = shost_priv(host); static char buf[256]; sprintf(buf, "VMware PVSCSI storage adapter rev %d, req/cmp/msg rings: " "%u/%u/%u pages, cmd_per_lun=%u", adapter->rev, adapter->req_pages, adapter->cmp_pages, adapter->msg_pages, pvscsi_template.cmd_per_lun); return buf; }
static int __exit amiga_a3000_scsi_remove(struct platform_device *pdev) { struct Scsi_Host *instance = platform_get_drvdata(pdev); struct a3000_hostdata *hdata = shost_priv(instance); struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); hdata->regs->CNTR = 0; scsi_remove_host(instance); free_irq(IRQ_AMIGA_PORTS, instance); scsi_host_put(instance); release_mem_region(res->start, resource_size(res)); return 0; }
/* * csio_get_port_type - Return FC local port type. * @shost: scsi host. * */ static void csio_get_host_port_type(struct Scsi_Host *shost) { struct csio_lnode *ln = shost_priv(shost); struct csio_hw *hw = csio_lnode_to_hw(ln); spin_lock_irq(&hw->lock); if (csio_is_npiv_ln(ln)) fc_host_port_type(shost) = FC_PORTTYPE_NPIV; else fc_host_port_type(shost) = FC_PORTTYPE_NPORT; spin_unlock_irq(&hw->lock); }