static int bt_pci_attach(device_t dev) { struct bt_softc *bt = device_get_softc(dev); int opri; int error; /* Initialize softc */ error = bt_pci_alloc_resources(dev); if (error) { device_printf(dev, "can't allocate resources in bt_pci_attach\n"); return error; } /* Allocate a dmatag for our CCB DMA maps */ /* XXX Should be a child of the PCI bus dma tag */ if (bus_dma_tag_create( /* parent */ NULL, /* alignemnt */ 1, /* boundary */ 0, /* lowaddr */ BUS_SPACE_MAXADDR_32BIT, /* highaddr */ BUS_SPACE_MAXADDR, /* filter */ NULL, /* filterarg */ NULL, /* maxsize */ BUS_SPACE_MAXSIZE_32BIT, /* nsegments */ ~0, /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT, /* flags */ 0, /* lockfunc */ busdma_lock_mutex, /* lockarg */ &Giant, &bt->parent_dmat) != 0) { bt_pci_release_resources(dev); return (ENOMEM); } /* * Protect ourself from spurrious interrupts during * intialization and attach. We should really rely * on interrupts during attach, but we don't have * access to our interrupts during ISA probes, so until * that changes, we mask our interrupts during attach * too. */ opri = splcam(); if (bt_probe(dev) || bt_fetch_adapter_info(dev) || bt_init(dev)) { bt_pci_release_resources(dev); splx(opri); return (ENXIO); } error = bt_attach(dev); splx(opri); if (error) { bt_pci_release_resources(dev); return (error); } return (0); }
static int ncvattach(DEVPORT_PDEVICE devi) { struct ncv_softc *sc; struct scsi_low_softc *slp; u_int32_t flags = DEVPORT_PDEVFLAGS(devi); intrmask_t s; char dvname[16]; /* SCSI_LOW_DVNAME_LEN */ strcpy(dvname, "ncv"); sc = DEVPORT_PDEVALLOC_SOFTC(devi); if (sc == NULL) { return(0); } slp = &sc->sc_sclow; slp->sl_dev = devi; sc->sc_iot = rman_get_bustag(sc->port_res); sc->sc_ioh = rman_get_bushandle(sc->port_res); slp->sl_hostid = NCV_HOSTID; slp->sl_cfgflags = flags; s = splcam(); ncvattachsubr(sc); splx(s); return(NCVIOSZ); }
static struct wds_req * wdsr_alloc(struct wds *wp) { struct wds_req *r; int x; int i; r = NULL; x = splcam(); /* anyway most of the time only 1 or 2 commands will * be active because SCSI disconnect is not supported * by hardware, so the search should be fast enough */ i = ffs(wp->wdsr_free) - 1; if(i < 0) { splx(x); return (NULL); } wp->wdsr_free &= ~ (1<<i); r = &wp->dx->req[i]; r->flags = 0; /* reset all flags */ r->ombn = i; /* luckily we have one omb per wdsr */ wp->dx->ombs[i].stat = 1; r->mask = 0; splx(x); smallog3('r', i + '0', r->ombn + '0'); return (r); }
/* * Function name: twa_detach * Description: Called when the controller is being detached from * the pci bus. * * Input: dev -- bus device corresponding to the ctlr * Output: None * Return value: 0 -- success * non-zero-- failure */ static int twa_detach(device_t dev) { struct twa_softc *sc = device_get_softc(dev); int s; int error; twa_dbg_dprint_enter(3, sc); error = EBUSY; s = splcam(); if (sc->twa_state & TWA_STATE_OPEN) goto out; /* Shut the controller down. */ if ((error = twa_shutdown(dev))) goto out; /* Free all resources associated with this controller. */ twa_free(sc); error = 0; out: splx(s); return(error); }
int stg_attach(device_t dev) { struct stg_softc *sc; struct scsi_low_softc *slp; u_int32_t flags = device_get_flags(dev); intrmask_t s; char dvname[16]; sc = device_get_softc(dev); strcpy(dvname,"stg"); slp = &sc->sc_sclow; slp->sl_dev = dev; sc->sc_iot = rman_get_bustag(sc->port_res); sc->sc_ioh = rman_get_bushandle(sc->port_res); slp->sl_hostid = STG_HOSTID; slp->sl_cfgflags = flags; s = splcam(); stgattachsubr(sc); splx(s); return(STGIOSZ); }
static __inline void mly_requeue_ccb(struct mly_softc *sc, union ccb *ccb) { int s; s = splcam(); TAILQ_INSERT_HEAD(&sc->mly_cam_ccbq, &ccb->ccb_h, sim_links.tqe); splx(s); }
static __inline void adv_free_ccb_info(struct adv_softc *adv, struct adv_ccb_info *cinfo) { int opri; opri = splcam(); cinfo->state = ACCB_FREE; SLIST_INSERT_HEAD(&adv->free_ccb_infos, cinfo, links); splx(opri); }
static __inline void ahbecbfree(struct ahb_softc* ahb, struct ecb* ecb) { int s; s = splcam(); ecb->state = ECB_FREE; SLIST_INSERT_HEAD(&ahb->free_ecbs, ecb, links); splx(s); }
static void nsp_card_unload(DEVPORT_PDEVICE devi) { struct nsp_softc *sc = DEVPORT_PDEVGET_SOFTC(devi); intrmask_t s; s = splcam(); scsi_low_deactivate((struct scsi_low_softc *)sc); scsi_low_dettach(&sc->sc_sclow); splx(s); }
static void nsp_card_unload(device_t devi) { struct nsp_softc *sc = device_get_softc(devi); intrmask_t s; s = splcam(); scsi_low_deactivate((struct scsi_low_softc *)sc); scsi_low_dettach(&sc->sc_sclow); splx(s); }
/******************************************************************************** * Bring the controller to a quiescent state, ready for system suspend. * * We can't assume that the controller is not active at this point, so we need * to mask interrupts. */ static int mly_pci_suspend(device_t dev) { struct mly_softc *sc = device_get_softc(dev); int s; debug_called(1); s = splcam(); mly_detach(sc); splx(s); return(0); }
static void ncv_card_unload(DEVPORT_PDEVICE devi) { struct ncv_softc *sc = DEVPORT_PDEVGET_SOFTC(devi); intrmask_t s; printf("%s: unload\n", sc->sc_sclow.sl_xname); s = splcam(); scsi_low_deactivate((struct scsi_low_softc *)sc); scsi_low_dettach(&sc->sc_sclow); splx(s); }
static __inline union ccb * mly_dequeue_ccb(struct mly_softc *sc) { union ccb *ccb; int s; s = splcam(); if ((ccb = (union ccb *)TAILQ_FIRST(&sc->mly_cam_ccbq)) != NULL) TAILQ_REMOVE(&sc->mly_cam_ccbq, &ccb->ccb_h, sim_links.tqe); splx(s); return(ccb); }
static void stg_isa_unload(device_t devi) { struct stg_softc *sc = device_get_softc(devi); intrmask_t s; printf("%s: unload\n",sc->sc_sclow.sl_xname); s = splcam(); scsi_low_deactivate((struct scsi_low_softc *)sc); scsi_low_dettach(&sc->sc_sclow); splx(s); }
static __inline struct ecb* ahbecbget(struct ahb_softc *ahb) { struct ecb* ecb; int s; s = splcam(); if ((ecb = SLIST_FIRST(&ahb->free_ecbs)) != NULL) SLIST_REMOVE_HEAD(&ahb->free_ecbs, links); splx(s); return (ecb); }
int stg_detach (device_t dev) { struct stg_softc *sc = device_get_softc(dev); intrmask_t s; s = splcam(); scsi_low_deactivate((struct scsi_low_softc *)sc); scsi_low_dettach(&sc->sc_sclow); splx(s); stg_release_resource(dev); return (0); }
static int nspattach(device_t devi) { struct nsp_softc *sc; struct scsi_low_softc *slp; u_int32_t flags = device_get_flags(devi); u_int iobase = bus_get_resource_start(devi, SYS_RES_IOPORT, 0); intrmask_t s; char dvname[16]; strcpy(dvname,"nsp"); if (iobase == 0) { printf("%s: no ioaddr is given\n", dvname); return (0); } sc = device_get_softc(devi); if (sc == NULL) return (0); slp = &sc->sc_sclow; slp->sl_dev = devi; sc->sc_iot = rman_get_bustag(sc->port_res); sc->sc_ioh = rman_get_bushandle(sc->port_res); if (sc->mem_res == NULL) { printf("WARNING: CANNOT GET Memory RESOURCE going PIO mode"); flags |= PIO_MODE; } if ((flags & PIO_MODE) == 0) { sc->sc_memt = rman_get_bustag(sc->mem_res); sc->sc_memh = rman_get_bushandle(sc->mem_res); } else { sc->sc_memh = 0; } /* slp->sl_irq = devi->pd_irq; */ sc->sc_iclkdiv = CLKDIVR_20M; sc->sc_clkdiv = CLKDIVR_40M; slp->sl_hostid = NSP_HOSTID; slp->sl_cfgflags = flags; s = splcam(); nspattachsubr(sc); splx(s); return(NSP_IOSIZE); }
static __inline struct adv_ccb_info * adv_get_ccb_info(struct adv_softc *adv) { struct adv_ccb_info *cinfo; int opri; opri = splcam(); if ((cinfo = SLIST_FIRST(&adv->free_ccb_infos)) != NULL) { SLIST_REMOVE_HEAD(&adv->free_ccb_infos, links); } else { cinfo = adv_alloc_ccb_info(adv); } splx(opri); return (cinfo); }
/* * Function name: twa_suspend * Description: Called to suspend I/O before hot-swapping PCI ctlrs. * Doesn't do much as of now. * * Input: dev -- bus device corresponding to the ctlr * Output: None * Return value: 0 -- success * non-zero-- failure */ static int twa_suspend(device_t dev) { struct twa_softc *sc = device_get_softc(dev); int s; twa_dbg_dprint_enter(3, sc); s = splcam(); sc->twa_state |= TWA_STATE_SUSPEND; twa_disable_interrupts(sc); splx(s); return(1); }
/* * Function name: twa_reset_stats * Description: For being called from ddb. * Resets some controller stats. * * Input: None * Output: None * Return value: None */ void twa_reset_stats(void) { struct twa_softc *sc; int s; int i; s = splcam(); for (i = 0; (sc = devclass_get_softc(twa_devclass, i)) != NULL; i++) { sc->twa_qstats[TWAQ_FREE].q_max = 0; sc->twa_qstats[TWAQ_BUSY].q_max = 0; sc->twa_qstats[TWAQ_PENDING].q_max = 0; sc->twa_qstats[TWAQ_COMPLETE].q_max = 0; } splx(s); }
/* * Function name: twa_shutdown * Description: Called at unload/shutdown time. Lets the controller * know that we are going down. * * Input: dev -- bus device corresponding to the ctlr * Output: None * Return value: 0 -- success * non-zero-- failure */ static int twa_shutdown(device_t dev) { struct twa_softc *sc = device_get_softc(dev); int s; int error = 0; twa_dbg_dprint_enter(3, sc); s = splcam(); /* Disconnect from the controller. */ error = twa_deinit_ctlr(sc); splx(s); return(error); }
/* * Function name: twa_report * Description: For being called from ddb. Prints controller stats, * and requests, if any, that are in the wrong queue. * * Input: None * Output: None * Return value: None */ void twa_report(void) { struct twa_softc *sc; struct twa_request *tr; int s; int i; s = splcam(); for (i = 0; (sc = devclass_get_softc(twa_devclass, i)) != NULL; i++) { twa_print_controller(sc); TAILQ_FOREACH(tr, &sc->twa_busy, tr_link) twa_print_request(tr, TWA_CMD_BUSY); TAILQ_FOREACH(tr, &sc->twa_complete, tr_link) twa_print_request(tr, TWA_CMD_COMPLETE); } splx(s); }
static int wds_cmd(int base, u_int8_t * p, int l) { int s = splcam(); while (l--) { do { outb(base + WDS_CMD, *p); wds_wait(base + WDS_STAT, WDS_RDY, WDS_RDY); } while (inb(base + WDS_STAT) & WDS_REJ); p++; } wds_wait(base + WDS_STAT, WDS_RDY, WDS_RDY); splx(s); return (0); }
static int stgattach(device_t devi) { struct stg_softc *sc; struct scsi_low_softc *slp; u_int32_t flags = device_get_flags(devi); u_int iobase = bus_get_resource_start(devi, SYS_RES_IOPORT, 0); intrmask_t s; char dvname[16]; strcpy(dvname,"stg"); if (iobase == 0) { printf("%s: no ioaddr is given\n", dvname); return (0); } sc = device_get_softc(devi); if (sc == NULL) { return(0); } slp = &sc->sc_sclow; slp->sl_dev = devi; sc->sc_iot = rman_get_bustag(sc->port_res); sc->sc_ioh = rman_get_bushandle(sc->port_res); slp->sl_hostid = STG_HOSTID; slp->sl_cfgflags = flags; s = splcam(); stgattachsubr(sc); splx(s); return(STGIOSZ); }
static void isp_cam_async(void *cbarg, u_int32_t code, struct cam_path *path, void *arg) { struct cam_sim *sim; struct ispsoftc *isp; sim = (struct cam_sim *)cbarg; isp = (struct ispsoftc *) cam_sim_softc(sim); switch (code) { case AC_LOST_DEVICE: if (isp->isp_type & ISP_HA_SCSI) { u_int16_t oflags, nflags; sdparam *sdp = isp->isp_param; int s, tgt = xpt_path_target_id(path); s = splcam(); sdp += cam_sim_bus(sim); isp->isp_update |= (1 << cam_sim_bus(sim)); nflags = DPARM_SAFE_DFLT; if (ISP_FW_REVX(isp->isp_fwrev) >= ISP_FW_REV(7, 55, 0)) { nflags |= DPARM_NARROW | DPARM_ASYNC; } oflags = sdp->isp_devparam[tgt].dev_flags; sdp->isp_devparam[tgt].dev_flags = nflags; sdp->isp_devparam[tgt].dev_update = 1; (void) isp_control(isp, ISPCTL_UPDATE_PARAMS, NULL); sdp->isp_devparam[tgt].dev_flags = oflags; (void) splx(s); } break; default: break; } }
static void adv_action(struct cam_sim *sim, union ccb *ccb) { struct adv_softc *adv; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("adv_action\n")); adv = (struct adv_softc *)cam_sim_softc(sim); switch (ccb->ccb_h.func_code) { /* Common cases first */ case XPT_SCSI_IO: /* Execute the requested I/O operation */ { struct ccb_hdr *ccb_h; struct ccb_scsiio *csio; struct adv_ccb_info *cinfo; ccb_h = &ccb->ccb_h; csio = &ccb->csio; cinfo = adv_get_ccb_info(adv); if (cinfo == NULL) panic("XXX Handle CCB info error!!!"); ccb_h->ccb_cinfo_ptr = cinfo; cinfo->ccb = ccb; /* Only use S/G if there is a transfer */ if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { if ((ccb_h->flags & CAM_SCATTER_VALID) == 0) { /* * We've been given a pointer * to a single buffer */ if ((ccb_h->flags & CAM_DATA_PHYS) == 0) { int s; int error; s = splsoftvm(); error = bus_dmamap_load(adv->buffer_dmat, cinfo->dmamap, csio->data_ptr, csio->dxfer_len, adv_execute_ccb, csio, /*flags*/0); if (error == EINPROGRESS) { /* * So as to maintain ordering, * freeze the controller queue * until our mapping is * returned. */ adv_set_state(adv, ADV_BUSDMA_BLOCK); } splx(s); } else { struct bus_dma_segment seg; /* Pointer to physical buffer */ seg.ds_addr = (bus_addr_t)csio->data_ptr; seg.ds_len = csio->dxfer_len; adv_execute_ccb(csio, &seg, 1, 0); } } else { struct bus_dma_segment *segs; if ((ccb_h->flags & CAM_DATA_PHYS) != 0) panic("adv_setup_data - Physical " "segment pointers unsupported"); if ((ccb_h->flags & CAM_SG_LIST_PHYS) == 0) panic("adv_setup_data - Virtual " "segment addresses unsupported"); /* Just use the segments provided */ segs = (struct bus_dma_segment *)csio->data_ptr; adv_execute_ccb(ccb, segs, csio->sglist_cnt, 0); } } else { adv_execute_ccb(ccb, NULL, 0, 0); } break; } case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ case XPT_TARGET_IO: /* Execute target I/O request */ case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */ case XPT_CONT_TARGET_IO: /* Continue Host Target I/O Connection*/ case XPT_EN_LUN: /* Enable LUN as a target */ case XPT_ABORT: /* Abort the specified CCB */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; #define IS_CURRENT_SETTINGS(c) (c->type == CTS_TYPE_CURRENT_SETTINGS) #define IS_USER_SETTINGS(c) (c->type == CTS_TYPE_USER_SETTINGS) case XPT_SET_TRAN_SETTINGS: { struct ccb_trans_settings_scsi *scsi; struct ccb_trans_settings_spi *spi; struct ccb_trans_settings *cts; target_bit_vector targ_mask; struct adv_transinfo *tconf; u_int update_type; int s; cts = &ccb->cts; targ_mask = ADV_TID_TO_TARGET_MASK(cts->ccb_h.target_id); update_type = 0; /* * The user must specify which type of settings he wishes * to change. */ if (IS_CURRENT_SETTINGS(cts) && !IS_USER_SETTINGS(cts)) { tconf = &adv->tinfo[cts->ccb_h.target_id].current; update_type |= ADV_TRANS_GOAL; } else if (IS_USER_SETTINGS(cts) && !IS_CURRENT_SETTINGS(cts)) { tconf = &adv->tinfo[cts->ccb_h.target_id].user; update_type |= ADV_TRANS_USER; } else { ccb->ccb_h.status = CAM_REQ_INVALID; break; } s = splcam(); scsi = &cts->proto_specific.scsi; spi = &cts->xport_specific.spi; if ((update_type & ADV_TRANS_GOAL) != 0) { if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) adv->disc_enable |= targ_mask; else adv->disc_enable &= ~targ_mask; adv_write_lram_8(adv, ADVV_DISC_ENABLE_B, adv->disc_enable); } if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) adv->cmd_qng_enabled |= targ_mask; else adv->cmd_qng_enabled &= ~targ_mask; } } if ((update_type & ADV_TRANS_USER) != 0) { if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { if ((spi->flags & CTS_SPI_VALID_DISC) != 0) adv->user_disc_enable |= targ_mask; else adv->user_disc_enable &= ~targ_mask; } if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) adv->user_cmd_qng_enabled |= targ_mask; else adv->user_cmd_qng_enabled &= ~targ_mask; } } /* * If the user specifies either the sync rate, or offset, * but not both, the unspecified parameter defaults to its * current value in transfer negotiations. */ if (((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0) || ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0)) { /* * If the user provided a sync rate but no offset, * use the current offset. */ if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0) spi->sync_offset = tconf->offset; /* * If the user provided an offset but no sync rate, * use the current sync rate. */ if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) spi->sync_period = tconf->period; adv_period_offset_to_sdtr(adv, &spi->sync_period, &spi->sync_offset, cts->ccb_h.target_id); adv_set_syncrate(adv, /*struct cam_path */NULL, cts->ccb_h.target_id, spi->sync_period, spi->sync_offset, update_type); } splx(s); ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_GET_TRAN_SETTINGS: /* Get default/user set transfer settings for the target */ { struct ccb_trans_settings_scsi *scsi; struct ccb_trans_settings_spi *spi; struct ccb_trans_settings *cts; struct adv_transinfo *tconf; target_bit_vector target_mask; int s; cts = &ccb->cts; target_mask = ADV_TID_TO_TARGET_MASK(cts->ccb_h.target_id); scsi = &cts->proto_specific.scsi; spi = &cts->xport_specific.spi; cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_2; cts->transport = XPORT_SPI; cts->transport_version = 2; scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; s = splcam(); if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { tconf = &adv->tinfo[cts->ccb_h.target_id].current; if ((adv->disc_enable & target_mask) != 0) spi->flags |= CTS_SPI_FLAGS_DISC_ENB; if ((adv->cmd_qng_enabled & target_mask) != 0) scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; } else { tconf = &adv->tinfo[cts->ccb_h.target_id].user; if ((adv->user_disc_enable & target_mask) != 0) spi->flags |= CTS_SPI_FLAGS_DISC_ENB; if ((adv->user_cmd_qng_enabled & target_mask) != 0) scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; } spi->sync_period = tconf->period; spi->sync_offset = tconf->offset; splx(s); spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; spi->valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET | CTS_SPI_VALID_BUS_WIDTH | CTS_SPI_VALID_DISC; scsi->valid = CTS_SCSI_VALID_TQ; ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_CALC_GEOMETRY: { int extended; extended = (adv->control & ADV_CNTL_BIOS_GT_1GB) != 0; cam_calc_geometry(&ccb->ccg, extended); xpt_done(ccb); break; } case XPT_RESET_BUS: /* Reset the specified SCSI bus */ { int s; s = splcam(); adv_stop_execution(adv); adv_reset_bus(adv, /*initiate_reset*/TRUE); adv_start_execution(adv); splx(s); ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_TERM_IO: /* Terminate the I/O process */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; /* XXX??? */ cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE; cpi->target_sprt = 0; cpi->hba_misc = 0; cpi->hba_eng_cnt = 0; cpi->max_target = 7; cpi->max_lun = 7; cpi->initiator_id = adv->scsi_id; cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 3300; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "Advansys", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->ccb_h.status = CAM_REQ_CMP; cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; xpt_done(ccb); break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } }
static void isp_action(struct cam_sim *sim, union ccb *ccb) { int s, tgt, error; struct ispsoftc *isp; struct ccb_trans_settings *cts; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("isp_action\n")); isp = (struct ispsoftc *)cam_sim_softc(sim); ccb->ccb_h.sim_priv.entries[0].field = 0; ccb->ccb_h.sim_priv.entries[1].ptr = isp; /* * This should only happen for Fibre Channel adapters. * We want to pass through all but XPT_SCSI_IO (e.g., * path inquiry) but fail if we can't get good Fibre * Channel link status. */ if (ccb->ccb_h.func_code == XPT_SCSI_IO && isp->isp_state != ISP_RUNSTATE) { s = splcam(); DISABLE_INTS(isp); isp_init(isp); if (isp->isp_state != ISP_INITSTATE) { (void) splx(s); /* * Lie. Say it was a selection timeout. */ ccb->ccb_h.status = CAM_SEL_TIMEOUT; xpt_done(ccb); return; } isp->isp_state = ISP_RUNSTATE; ENABLE_INTS(isp); (void) splx(s); } IDPRINTF(4, ("%s: isp_action code %x\n", isp->isp_name, ccb->ccb_h.func_code)); switch (ccb->ccb_h.func_code) { case XPT_SCSI_IO: /* Execute the requested I/O operation */ /* * Do a couple of preliminary checks... */ if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) { ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } } if (isp->isp_type & ISP_HA_SCSI) { if (ccb->ccb_h.target_id > (MAX_TARGETS-1)) { ccb->ccb_h.status = CAM_PATH_INVALID; } else if (ISP_FW_REVX(isp->isp_fwrev) >= ISP_FW_REV(7, 55, 0)) { /* * Too much breakage. */ #if 0 if (ccb->ccb_h.target_lun > 31) { ccb->ccb_h.status = CAM_PATH_INVALID; } #else if (ccb->ccb_h.target_lun > 7) { ccb->ccb_h.status = CAM_PATH_INVALID; } #endif } else if (ccb->ccb_h.target_lun > 7) { ccb->ccb_h.status = CAM_PATH_INVALID; } } else { if (ccb->ccb_h.target_id > (MAX_FC_TARG-1)) { ccb->ccb_h.status = CAM_PATH_INVALID; #ifdef SCCLUN } else if (ccb->ccb_h.target_lun > 15) { ccb->ccb_h.status = CAM_PATH_INVALID; #else } else if (ccb->ccb_h.target_lun > 65535) { ccb->ccb_h.status = CAM_PATH_INVALID; #endif } } if (ccb->ccb_h.status == CAM_PATH_INVALID) { printf("%s: invalid tgt/lun (%d.%d) in XPT_SCSI_IO\n", isp->isp_name, ccb->ccb_h.target_id, ccb->ccb_h.target_lun); xpt_done(ccb); break; } s = splcam(); DISABLE_INTS(isp); switch (ispscsicmd((ISP_SCSI_XFER_T *) ccb)) { case CMD_QUEUED: ccb->ccb_h.status |= CAM_SIM_QUEUED; break; case CMD_EAGAIN: if (!(isp->isp_osinfo.simqfrozen & SIMQFRZ_RESOURCE)) { xpt_freeze_simq(sim, 1); isp->isp_osinfo.simqfrozen |= SIMQFRZ_RESOURCE; } ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_REQUEUE_REQ; xpt_done(ccb); break; case CMD_COMPLETE: /* * Just make sure that we didn't get it returned * as completed, but with the request still in * progress. In theory, 'cannot happen'. */ if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_REQ_CMP_ERR; } xpt_done(ccb); break; } ENABLE_INTS(isp); splx(s); break; case XPT_EN_LUN: /* Enable LUN as a target */ case XPT_TARGET_IO: /* Execute target I/O request */ case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */ case XPT_CONT_TARGET_IO: /* Continue Host Target I/O Connection*/ ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; case XPT_RESET_DEV: /* BDR the specified SCSI device */ tgt = ccb->ccb_h.target_id; /* XXX: Which Bus? */ s = splcam(); error = isp_control(isp, ISPCTL_RESET_DEV, &tgt); (void) splx(s); if (error) { ccb->ccb_h.status = CAM_REQ_CMP_ERR; } else { ccb->ccb_h.status = CAM_REQ_CMP; } xpt_done(ccb); break; case XPT_ABORT: /* Abort the specified CCB */ s = splcam(); error = isp_control(isp, ISPCTL_ABORT_CMD, ccb); (void) splx(s); if (error) { ccb->ccb_h.status = CAM_REQ_CMP_ERR; } else { ccb->ccb_h.status = CAM_REQ_CMP; } xpt_done(ccb); break; case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */ cts = &ccb->cts; tgt = cts->ccb_h.target_id; s = splcam(); if (isp->isp_type & ISP_HA_FC) { ; /* nothing to change */ } else { sdparam *sdp = isp->isp_param; u_int16_t *dptr; int bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); sdp += bus; #if 0 if (cts->flags & CCB_TRANS_CURRENT_SETTINGS) dptr = &sdp->isp_devparam[tgt].cur_dflags; else dptr = &sdp->isp_devparam[tgt].dev_flags; #else /* * We always update (internally) from dev_flags * so any request to change settings just gets * vectored to that location. */ dptr = &sdp->isp_devparam[tgt].dev_flags; #endif /* * Note that these operations affect the * the goal flags (dev_flags)- not * the current state flags. Then we mark * things so that the next operation to * this HBA will cause the update to occur. */ if (cts->valid & CCB_TRANS_DISC_VALID) { if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) { *dptr |= DPARM_DISC; } else { *dptr &= ~DPARM_DISC; } } if (cts->valid & CCB_TRANS_TQ_VALID) { if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) { *dptr |= DPARM_TQING; } else { *dptr &= ~DPARM_TQING; } } if (cts->valid & CCB_TRANS_BUS_WIDTH_VALID) { switch (cts->bus_width) { case MSG_EXT_WDTR_BUS_16_BIT: *dptr |= DPARM_WIDE; break; default: *dptr &= ~DPARM_WIDE; } } /* * Any SYNC RATE of nonzero and SYNC_OFFSET * of nonzero will cause us to go to the * selected (from NVRAM) maximum value for * this device. At a later point, we'll * allow finer control. */ if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) && (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) && (cts->sync_offset > 0)) { *dptr |= DPARM_SYNC; } else { *dptr &= ~DPARM_SYNC; } if (bootverbose || isp->isp_dblev >= 3) printf("%s: %d.%d set %s period 0x%x offset " "0x%x flags 0x%x\n", isp->isp_name, bus, tgt, (cts->flags & CCB_TRANS_CURRENT_SETTINGS)? "current" : "user", sdp->isp_devparam[tgt].sync_period, sdp->isp_devparam[tgt].sync_offset, sdp->isp_devparam[tgt].dev_flags); s = splcam(); sdp->isp_devparam[tgt].dev_update = 1; isp->isp_update |= (1 << bus); (void) isp_control(isp, ISPCTL_UPDATE_PARAMS, NULL); (void) splx(s); } (void) splx(s); ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; case XPT_GET_TRAN_SETTINGS: cts = &ccb->cts; tgt = cts->ccb_h.target_id; if (isp->isp_type & ISP_HA_FC) { /* * a lot of normal SCSI things don't make sense. */ cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB; cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; /* * How do you measure the width of a high * speed serial bus? Well, in bytes. * * Offset and period make no sense, though, so we set * (above) a 'base' transfer speed to be gigabit. */ cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; } else { sdparam *sdp = isp->isp_param; u_int16_t dval, pval, oval; int bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); sdp += bus; if (cts->flags & CCB_TRANS_CURRENT_SETTINGS) { s = splcam(); /* * First do a refresh to see if things * have changed recently! */ sdp->isp_devparam[tgt].dev_refresh = 1; isp->isp_update |= (1 << bus); (void) isp_control(isp, ISPCTL_UPDATE_PARAMS, NULL); (void) splx(s); dval = sdp->isp_devparam[tgt].cur_dflags; oval = sdp->isp_devparam[tgt].cur_offset; pval = sdp->isp_devparam[tgt].cur_period; } else { dval = sdp->isp_devparam[tgt].dev_flags; oval = sdp->isp_devparam[tgt].sync_offset; pval = sdp->isp_devparam[tgt].sync_period; } s = splcam(); cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB); if (dval & DPARM_DISC) { cts->flags |= CCB_TRANS_DISC_ENB; } if (dval & DPARM_TQING) { cts->flags |= CCB_TRANS_TAG_ENB; } if (dval & DPARM_WIDE) { cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; } else { cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; } cts->valid = CCB_TRANS_BUS_WIDTH_VALID | CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; if ((dval & DPARM_SYNC) && oval != 0) { cts->sync_period = pval; cts->sync_offset = oval; cts->valid |= CCB_TRANS_SYNC_RATE_VALID | CCB_TRANS_SYNC_OFFSET_VALID; } splx(s); if (bootverbose || isp->isp_dblev >= 3) printf("%s: %d.%d get %s period 0x%x offset " "0x%x flags 0x%x\n", isp->isp_name, bus, tgt, (cts->flags & CCB_TRANS_CURRENT_SETTINGS)? "current" : "user", pval, oval, dval); } ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; case XPT_CALC_GEOMETRY: { struct ccb_calc_geometry *ccg; u_int32_t secs_per_cylinder; u_int32_t size_mb; ccg = &ccb->ccg; if (ccg->block_size == 0) { printf("%s: %d.%d XPT_CALC_GEOMETRY block size 0?\n", isp->isp_name, ccg->ccb_h.target_id, ccg->ccb_h.target_lun); ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } size_mb = ccg->volume_size /((1024L * 1024L) / ccg->block_size); if (size_mb > 1024) { ccg->heads = 255; ccg->secs_per_track = 63; } else { ccg->heads = 64; ccg->secs_per_track = 32; } secs_per_cylinder = ccg->heads * ccg->secs_per_track; ccg->cylinders = ccg->volume_size / secs_per_cylinder; ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_RESET_BUS: /* Reset the specified bus */ tgt = cam_sim_bus(sim); s = splcam(); error = isp_control(isp, ISPCTL_RESET_BUS, &tgt); (void) splx(s); if (error) ccb->ccb_h.status = CAM_REQ_CMP_ERR; else { if (cam_sim_bus(sim) && isp->isp_path2 != NULL) xpt_async(AC_BUS_RESET, isp->isp_path2, NULL); else if (isp->isp_path != NULL) xpt_async(AC_BUS_RESET, isp->isp_path, NULL); ccb->ccb_h.status = CAM_REQ_CMP; } xpt_done(ccb); break; case XPT_TERM_IO: /* Terminate the I/O process */ /* Does this need to be implemented? */ ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; cpi->target_sprt = 0; cpi->hba_eng_cnt = 0; if (IS_FC(isp)) { cpi->hba_misc = PIM_NOBUSRESET; cpi->max_target = MAX_FC_TARG-1; cpi->initiator_id = ((fcparam *)isp->isp_param)->isp_loopid; #ifdef SCCLUN cpi->max_lun = (1 << 16) - 1; #else cpi->max_lun = (1 << 4) - 1; #endif /* * Set base transfer capabilities for Fibre Channel. * Technically not correct because we don't know * what media we're running on top of- but we'll * look good if we always say 100MB/s. */ cpi->base_transfer_speed = 100000; } else { sdparam *sdp = isp->isp_param; sdp += cam_sim_bus(xpt_path_sim(cpi->ccb_h.path)); cpi->hba_misc = 0; cpi->initiator_id = sdp->isp_initiator_id; cpi->max_target = MAX_TARGETS-1; if (ISP_FW_REVX(isp->isp_fwrev) >= ISP_FW_REV(7, 55, 0)) { #if 0 /* * Too much breakage. */ cpi->max_lun = (1 << 5) - 1; #else cpi->max_lun = (1 << 3) - 1; #endif } else { cpi->max_lun = (1 << 3) - 1; } cpi->base_transfer_speed = 3300; } cpi->bus_id = cam_sim_bus(sim); strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "Qlogic", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } }
static void wds_action(struct cam_sim * sim, union ccb * ccb) { int unit = cam_sim_unit(sim); int s; DBG(DBX "wds%d: action 0x%x\n", unit, ccb->ccb_h.func_code); switch (ccb->ccb_h.func_code) { case XPT_SCSI_IO: s = splcam(); DBG(DBX "wds%d: SCSI IO entered\n", unit); wds_scsi_io(sim, &ccb->csio); DBG(DBX "wds%d: SCSI IO returned\n", unit); splx(s); break; case XPT_RESET_BUS: /* how to do it right ? */ printf("wds%d: reset\n", unit); ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; case XPT_ABORT: ccb->ccb_h.status = CAM_UA_ABORT; xpt_done(ccb); break; case XPT_CALC_GEOMETRY: { struct ccb_calc_geometry *ccg; u_int32_t size_mb; u_int32_t secs_per_cylinder; ccg = &ccb->ccg; size_mb = ccg->volume_size / ((1024L * 1024L) / ccg->block_size); ccg->heads = 64; ccg->secs_per_track = 16; secs_per_cylinder = ccg->heads * ccg->secs_per_track; ccg->cylinders = ccg->volume_size / secs_per_cylinder; ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; /* XXX??? */ cpi->hba_inquiry = 0; /* nothing fancy */ cpi->target_sprt = 0; cpi->hba_misc = 0; cpi->hba_eng_cnt = 0; cpi->max_target = 7; cpi->max_lun = 7; cpi->initiator_id = WDS_HBA_ID; cpi->hba_misc = 0; cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 3300; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "WD/FDC", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } }
void adv_timeout(void *arg) { int s; union ccb *ccb; struct adv_softc *adv; struct adv_ccb_info *cinfo; ccb = (union ccb *)arg; adv = (struct adv_softc *)xpt_path_sim(ccb->ccb_h.path)->softc; cinfo = (struct adv_ccb_info *)ccb->ccb_h.ccb_cinfo_ptr; xpt_print_path(ccb->ccb_h.path); printf("Timed out\n"); s = splcam(); /* Have we been taken care of already?? */ if (cinfo == NULL || cinfo->state == ACCB_FREE) { splx(s); return; } adv_stop_execution(adv); if ((cinfo->state & ACCB_ABORT_QUEUED) == 0) { struct ccb_hdr *ccb_h; /* * In order to simplify the recovery process, we ask the XPT * layer to halt the queue of new transactions and we traverse * the list of pending CCBs and remove their timeouts. This * means that the driver attempts to clear only one error * condition at a time. In general, timeouts that occur * close together are related anyway, so there is no benefit * in attempting to handle errors in parrallel. Timeouts will * be reinstated when the recovery process ends. */ adv_set_state(adv, ADV_IN_TIMEOUT); /* This CCB is the CCB representing our recovery actions */ cinfo->state |= ACCB_RECOVERY_CCB|ACCB_ABORT_QUEUED; ccb_h = LIST_FIRST(&adv->pending_ccbs); while (ccb_h != NULL) { untimeout(adv_timeout, ccb_h, ccb_h->timeout_ch); ccb_h = LIST_NEXT(ccb_h, sim_links.le); } /* XXX Should send a BDR */ /* Attempt an abort as our first tact */ xpt_print_path(ccb->ccb_h.path); printf("Attempting abort\n"); adv_abort_ccb(adv, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, ccb, CAM_CMD_TIMEOUT, /*queued_only*/FALSE); ccb->ccb_h.timeout_ch = timeout(adv_timeout, ccb, 2 * hz); } else { /* Our attempt to perform an abort failed, go for a reset */ xpt_print_path(ccb->ccb_h.path); printf("Resetting bus\n"); ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_CMD_TIMEOUT; adv_reset_bus(adv, /*initiate_reset*/TRUE); } adv_start_execution(adv); splx(s); }
static void adv_execute_ccb(void *arg, bus_dma_segment_t *dm_segs, int nsegments, int error) { struct ccb_scsiio *csio; struct ccb_hdr *ccb_h; struct cam_sim *sim; struct adv_softc *adv; struct adv_ccb_info *cinfo; struct adv_scsi_q scsiq; struct adv_sg_head sghead; int s; csio = (struct ccb_scsiio *)arg; ccb_h = &csio->ccb_h; sim = xpt_path_sim(ccb_h->path); adv = (struct adv_softc *)cam_sim_softc(sim); cinfo = (struct adv_ccb_info *)csio->ccb_h.ccb_cinfo_ptr; /* * Setup our done routine to release the simq on * the next ccb that completes. */ if ((adv->state & ADV_BUSDMA_BLOCK) != 0) adv->state |= ADV_BUSDMA_BLOCK_CLEARED; if ((ccb_h->flags & CAM_CDB_POINTER) != 0) { if ((ccb_h->flags & CAM_CDB_PHYS) == 0) { /* XXX Need phystovirt!!!! */ /* How about pmap_kenter??? */ scsiq.cdbptr = csio->cdb_io.cdb_ptr; } else { scsiq.cdbptr = csio->cdb_io.cdb_ptr; } } else { scsiq.cdbptr = csio->cdb_io.cdb_bytes; } /* * Build up the request */ scsiq.q1.status = 0; scsiq.q1.q_no = 0; scsiq.q1.cntl = 0; scsiq.q1.sg_queue_cnt = 0; scsiq.q1.target_id = ADV_TID_TO_TARGET_MASK(ccb_h->target_id); scsiq.q1.target_lun = ccb_h->target_lun; scsiq.q1.sense_len = csio->sense_len; scsiq.q1.extra_bytes = 0; scsiq.q2.ccb_index = cinfo - adv->ccb_infos; scsiq.q2.target_ix = ADV_TIDLUN_TO_IX(ccb_h->target_id, ccb_h->target_lun); scsiq.q2.flag = 0; scsiq.q2.cdb_len = csio->cdb_len; if ((ccb_h->flags & CAM_TAG_ACTION_VALID) != 0) scsiq.q2.tag_code = csio->tag_action; else scsiq.q2.tag_code = 0; scsiq.q2.vm_id = 0; if (nsegments != 0) { bus_dmasync_op_t op; scsiq.q1.data_addr = dm_segs->ds_addr; scsiq.q1.data_cnt = dm_segs->ds_len; if (nsegments > 1) { scsiq.q1.cntl |= QC_SG_HEAD; sghead.entry_cnt = sghead.entry_to_copy = nsegments; sghead.res = 0; sghead.sg_list = adv_fixup_dmasegs(adv, dm_segs); scsiq.sg_head = &sghead; } else { scsiq.sg_head = NULL; } if ((ccb_h->flags & CAM_DIR_MASK) == CAM_DIR_IN) op = BUS_DMASYNC_PREREAD; else op = BUS_DMASYNC_PREWRITE; bus_dmamap_sync(adv->buffer_dmat, cinfo->dmamap, op); } else { scsiq.q1.data_addr = 0; scsiq.q1.data_cnt = 0; scsiq.sg_head = NULL; } s = splcam(); /* * Last time we need to check if this SCB needs to * be aborted. */ if (ccb_h->status != CAM_REQ_INPROG) { if (nsegments != 0) bus_dmamap_unload(adv->buffer_dmat, cinfo->dmamap); adv_clear_state(adv, (union ccb *)csio); adv_free_ccb_info(adv, cinfo); xpt_done((union ccb *)csio); splx(s); return; } if (adv_execute_scsi_queue(adv, &scsiq, csio->dxfer_len) != 0) { /* Temporary resource shortage */ adv_set_state(adv, ADV_RESOURCE_SHORTAGE); if (nsegments != 0) bus_dmamap_unload(adv->buffer_dmat, cinfo->dmamap); csio->ccb_h.status = CAM_REQUEUE_REQ; adv_clear_state(adv, (union ccb *)csio); adv_free_ccb_info(adv, cinfo); xpt_done((union ccb *)csio); splx(s); return; } cinfo->state |= ACCB_ACTIVE; ccb_h->status |= CAM_SIM_QUEUED; LIST_INSERT_HEAD(&adv->pending_ccbs, ccb_h, sim_links.le); /* Schedule our timeout */ ccb_h->timeout_ch = timeout(adv_timeout, csio, (ccb_h->timeout * hz)/1000); splx(s); }