int tws_get_param(struct tws_softc *sc, u_int32_t table_id, u_int32_t param_id, u_int32_t param_size, void *data) { struct tws_request *req; struct tws_command_packet *cmd_pkt; union tws_command_giga *cmd; struct tws_getset_param *param; u_int16_t reqid; u_int64_t mfa; int error = SUCCESS; req = tws_get_request(sc, TWS_REQ_TYPE_GETSET_PARAM); if ( req == NULL ) { TWS_TRACE_DEBUG(sc, "null req", 0, 0); return(FAILURE); } req->length = TWS_SECTOR_SIZE; req->data = malloc(TWS_SECTOR_SIZE, M_TWS, M_NOWAIT); if ( req->data == NULL ) return(FAILURE); bzero(req->data, TWS_SECTOR_SIZE); param = (struct tws_getset_param *)req->data; req->cb = NULL; req->flags = TWS_DIR_IN; cmd_pkt = req->cmd_pkt; cmd = &cmd_pkt->cmd.pkt_g; cmd->param.sgl_off__opcode = BUILD_SGL_OFF__OPCODE(2, TWS_FW_CMD_GET_PARAM); cmd->param.request_id = (u_int8_t)req->request_id; cmd->param.host_id__unit = 0; cmd->param.param_count = 1; cmd->param.size = 2; /* map routine will add sgls */ /* Specify which parameter we want to set. */ param->table_id = (table_id | TWS_9K_PARAM_DESCRIPTOR); param->parameter_id = (u_int8_t)(param_id); param->parameter_size_bytes = (u_int16_t)param_size; error = tws_map_request(sc, req); if (!error) { reqid = tws_poll4_response(sc, &mfa); tws_unmap_request(sc, req); if ( reqid == TWS_REQ_TYPE_GETSET_PARAM ) { memcpy(data, param->data, param_size); } else { error = FAILURE; } } free(req->data, M_TWS); req->state = TWS_REQ_STATE_FREE; return(error); }
int tws_send_generic_cmd(struct tws_softc *sc, u_int8_t opcode) { struct tws_request *req; struct tws_cmd_generic *cmd; TWS_TRACE_DEBUG(sc, "entry", sc, opcode); req = tws_get_request(sc, TWS_REQ_TYPE_INTERNAL_CMD); if ( req == NULL ) { TWS_TRACE_DEBUG(sc, "no requests", 0, 0); return(FAILURE); } cmd = &(req->cmd_pkt->cmd.pkt_g.generic); bzero(cmd, sizeof(struct tws_cmd_generic)); /* req->cmd_pkt->hdr.header_desc.size_header = 128; */ req->cb = tws_cmd_complete; cmd->sgl_off__opcode = BUILD_RES__OPCODE(0, opcode); cmd->size = 2; cmd->request_id = req->request_id; cmd->host_id__unit = 0; cmd->status = 0; cmd->flags = 0; cmd->count = 0; req->error_code = tws_submit_command(sc, req); return(SUCCESS); }
int tws_set_param(struct tws_softc *sc, u_int32_t table_id, u_int32_t param_id, u_int32_t param_size, void *data) { struct tws_request *req; struct tws_command_packet *cmd_pkt; union tws_command_giga *cmd; struct tws_getset_param *param; int error; req = tws_get_request(sc, TWS_REQ_TYPE_GETSET_PARAM); if ( req == NULL ) { TWS_TRACE_DEBUG(sc, "null req", 0, 0); return(ENOMEM); } req->length = TWS_SECTOR_SIZE; req->data = malloc(TWS_SECTOR_SIZE, M_TWS, M_NOWAIT); if ( req->data == NULL ) return(ENOMEM); bzero(req->data, TWS_SECTOR_SIZE); param = (struct tws_getset_param *)req->data; req->cb = tws_getset_param_complete; req->flags = TWS_DIR_OUT; cmd_pkt = req->cmd_pkt; cmd = &cmd_pkt->cmd.pkt_g; cmd->param.sgl_off__opcode = BUILD_SGL_OFF__OPCODE(2, TWS_FW_CMD_SET_PARAM); cmd->param.request_id = (u_int8_t)req->request_id; cmd->param.host_id__unit = 0; cmd->param.param_count = 1; cmd->param.size = 2; /* map routine will add sgls */ /* Specify which parameter we want to set. */ param->table_id = (table_id | TWS_9K_PARAM_DESCRIPTOR); param->parameter_id = (u_int8_t)(param_id); param->parameter_size_bytes = (u_int16_t)param_size; memcpy(param->data, data, param_size); req->thandle = timeout(tws_timeout, req, (TWS_IOCTL_TIMEOUT * hz)); error = tws_map_request(sc, req); return(error); }
int tws_send_scsi_cmd(struct tws_softc *sc, int cmd) { struct tws_request *req; struct tws_command_packet *cmd_pkt; int error; TWS_TRACE_DEBUG(sc, "entry",sc, cmd); req = tws_get_request(sc, TWS_AEN_FETCH_REQ); if ( req == NULL ) return(ENOMEM); req->type = TWS_AEN_FETCH_REQ; req->cb = tws_aen_complete; cmd_pkt = req->cmd_pkt; cmd_pkt->cmd.pkt_a.res__opcode = TWS_FW_CMD_EXECUTE_SCSI; cmd_pkt->cmd.pkt_a.status = 0; cmd_pkt->cmd.pkt_a.unit = 0; cmd_pkt->cmd.pkt_a.sgl_offset = 16; cmd_pkt->cmd.pkt_a.lun_l4__req_id = req->request_id; cmd_pkt->cmd.pkt_a.cdb[0] = (u_int8_t)cmd; cmd_pkt->cmd.pkt_a.cdb[4] = 128; req->length = TWS_SECTOR_SIZE; req->data = kmalloc(TWS_SECTOR_SIZE, M_TWS, M_NOWAIT | M_ZERO); if ( req->data == NULL ) return(ENOMEM); req->flags = TWS_DIR_IN; callout_reset(&req->thandle, (TWS_IO_TIMEOUT * hz), tws_timeout, req); error = tws_map_request(sc, req); return(error); }
int tws_init_connect(struct tws_softc *sc, u_int16_t mcreadits ) { struct tws_request *req; struct tws_cmd_init_connect *initc; u_int16_t reqid; u_int64_t mfa; TWS_TRACE_DEBUG(sc, "entry", 0, mcreadits); #if 0 req = tws_get_request(sc, TWS_REQ_TYPE_INTERNAL_CMD); #else // 0 req = &sc->reqs[TWS_REQ_TYPE_INTERNAL_CMD]; bzero(&req->cmd_pkt->cmd, sizeof(struct tws_command_apache)); req->data = NULL; req->length = 0; req->type = TWS_REQ_TYPE_INTERNAL_CMD; req->flags = TWS_DIR_UNKNOWN; req->error_code = TWS_REQ_RET_INVALID; req->cb = NULL; req->ccb_ptr = NULL; req->thandle.callout = NULL; req->next = req->prev = NULL; req->state = TWS_REQ_STATE_BUSY; #endif // 0 if ( req == NULL ) { TWS_TRACE_DEBUG(sc, "no requests", 0, 0); // device_printf(sc->tws_dev, "No requests for initConnect\n"); return(FAILURE); } tws_swap16(0xbeef); /* just for test */ tws_swap32(0xdeadbeef); /* just for test */ tws_swap64(0xdeadbeef); /* just for test */ initc = &(req->cmd_pkt->cmd.pkt_g.init_connect); /* req->cmd_pkt->hdr.header_desc.size_header = 128; */ initc->res1__opcode = BUILD_RES__OPCODE(0, TWS_FW_CMD_INIT_CONNECTION); initc->size = 6; initc->request_id = req->request_id; initc->message_credits = mcreadits; initc->features |= TWS_BIT_EXTEND; if ( sc->is64bit && !tws_use_32bit_sgls ) initc->features |= TWS_64BIT_SG_ADDRESSES; /* assuming set features is always on */ initc->size = 6; initc->fw_srl = sc->cinfo.working_srl = TWS_CURRENT_FW_SRL; initc->fw_arch_id = 0; initc->fw_branch = sc->cinfo.working_branch = 0; initc->fw_build = sc->cinfo.working_build = 0; req->error_code = tws_submit_command(sc, req); reqid = tws_poll4_response(sc, &mfa); if ( reqid != TWS_INVALID_REQID && reqid == req->request_id ) { sc->cinfo.fw_on_ctlr_srl = initc->fw_srl; sc->cinfo.fw_on_ctlr_branch = initc->fw_branch; sc->cinfo.fw_on_ctlr_build = initc->fw_build; sc->stats.reqs_out++; req->state = TWS_REQ_STATE_FREE; } else { /* * REVISIT::If init connect fails we need to reset the ctlr * and try again? */ TWS_TRACE(sc, "unexpected req_id ", reqid, 0); TWS_TRACE(sc, "INITCONNECT FAILED", reqid, 0); return(FAILURE); } return(SUCCESS); }
static int tws_passthru(struct tws_softc *sc, void *buf) { struct tws_request *req; struct tws_ioctl_no_data_buf *ubuf = (struct tws_ioctl_no_data_buf *)buf; int error; u_int16_t lun4; if ( tws_get_state(sc) != TWS_ONLINE) { return(EBUSY); } //============================================================================================== // Get a command // do { req = tws_get_request(sc, TWS_REQ_TYPE_PASSTHRU); if ( !req ) { error = tsleep(sc, 0, "tws_sleep", TWS_IOCTL_TIMEOUT*hz); if ( error == EWOULDBLOCK ) { return(ETIMEDOUT); } } else { // Make sure we are still ready for new commands... if ( tws_get_state(sc) != TWS_ONLINE) { return(EBUSY); } break; } } while(1); req->length = (ubuf->driver_pkt.buffer_length + 511) & ~511; TWS_TRACE_DEBUG(sc, "datal,rid", req->length, req->request_id); if ( req->length ) { req->data = sc->ioctl_data_mem; req->dma_map = sc->ioctl_data_map; //========================================================================================== // Copy data in from user space // error = copyin(ubuf->pdata, req->data, req->length); } //============================================================================================== // Set command fields // req->flags = TWS_DIR_IN | TWS_DIR_OUT; req->cb = tws_passthru_complete; memcpy(&req->cmd_pkt->cmd, &ubuf->cmd_pkt.cmd, sizeof(struct tws_command_apache)); if ( GET_OPCODE(req->cmd_pkt->cmd.pkt_a.res__opcode) == TWS_FW_CMD_EXECUTE_SCSI ) { lun4 = req->cmd_pkt->cmd.pkt_a.lun_l4__req_id & 0xF000; req->cmd_pkt->cmd.pkt_a.lun_l4__req_id = lun4 | req->request_id; } else { req->cmd_pkt->cmd.pkt_g.generic.request_id = (u_int8_t) req->request_id; } //============================================================================================== // Send command to controller // error = tws_map_request(sc, req); if (error) { ubuf->driver_pkt.os_status = error; goto out_data; } if ( req->state == TWS_REQ_STATE_COMPLETE ) { ubuf->driver_pkt.os_status = req->error_code; goto out_unmap; } mtx_lock(&sc->gen_lock); error = mtx_sleep(req, &sc->gen_lock, 0, "tws_passthru", TWS_IOCTL_TIMEOUT*hz); mtx_unlock(&sc->gen_lock); if (( req->state != TWS_REQ_STATE_COMPLETE ) && ( error == EWOULDBLOCK )) { TWS_TRACE_DEBUG(sc, "msleep timeout", error, req->request_id); tws_timeout((void*) req); } out_unmap: if ( req->error_code == TWS_REQ_RET_RESET ) { error = EBUSY; req->error_code = EBUSY; TWS_TRACE_DEBUG(sc, "ioctl reset", error, req->request_id); } tws_unmap_request(sc, req); //============================================================================================== // Return command status to user space // memcpy(&ubuf->cmd_pkt.hdr, &req->cmd_pkt->hdr, sizeof(struct tws_command_apache)); memcpy(&ubuf->cmd_pkt.cmd, &req->cmd_pkt->cmd, sizeof(struct tws_command_apache)); out_data: if ( req->length ) { //========================================================================================== // Copy data out to user space // if ( !error ) error = copyout(req->data, ubuf->pdata, ubuf->driver_pkt.buffer_length); } if ( error ) TWS_TRACE_DEBUG(sc, "errored", error, 0); if ( req->error_code != TWS_REQ_RET_SUBMIT_SUCCESS ) ubuf->driver_pkt.os_status = error; //============================================================================================== // Free command // req->state = TWS_REQ_STATE_FREE; wakeup_one(sc); return(error); }
static int32_t tws_execute_scsi(struct tws_softc *sc, union ccb *ccb) { struct tws_command_packet *cmd_pkt; struct tws_request *req; struct ccb_hdr *ccb_h = &(ccb->ccb_h); struct ccb_scsiio *csio = &(ccb->csio); int error; u_int16_t lun; mtx_assert(&sc->sim_lock, MA_OWNED); if (ccb_h->target_id >= TWS_MAX_NUM_UNITS) { TWS_TRACE_DEBUG(sc, "traget id too big", ccb_h->target_id, ccb_h->target_lun); ccb_h->status |= CAM_TID_INVALID; xpt_done(ccb); return(0); } if (ccb_h->target_lun >= TWS_MAX_NUM_LUNS) { TWS_TRACE_DEBUG(sc, "target lun 2 big", ccb_h->target_id, ccb_h->target_lun); ccb_h->status |= CAM_LUN_INVALID; xpt_done(ccb); return(0); } if(ccb_h->flags & CAM_CDB_PHYS) { TWS_TRACE_DEBUG(sc, "cdb phy", ccb_h->target_id, ccb_h->target_lun); ccb_h->status = CAM_REQ_INVALID; xpt_done(ccb); return(0); } /* * We are going to work on this request. Mark it as enqueued (though * we don't actually queue it...) */ ccb_h->status |= CAM_SIM_QUEUED; req = tws_get_request(sc, TWS_REQ_TYPE_SCSI_IO); if ( !req ) { TWS_TRACE_DEBUG(sc, "no reqs", ccb_h->target_id, ccb_h->target_lun); ccb_h->status |= CAM_REQUEUE_REQ; xpt_done(ccb); return(0); } if((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { if(ccb_h->flags & CAM_DIR_IN) req->flags |= TWS_DIR_IN; if(ccb_h->flags & CAM_DIR_OUT) req->flags |= TWS_DIR_OUT; } else { req->flags = TWS_DIR_NONE; /* no data */ } req->type = TWS_REQ_TYPE_SCSI_IO; req->cb = tws_scsi_complete; cmd_pkt = req->cmd_pkt; /* cmd_pkt->hdr.header_desc.size_header = 128; */ cmd_pkt->cmd.pkt_a.res__opcode = TWS_FW_CMD_EXECUTE_SCSI; cmd_pkt->cmd.pkt_a.unit = ccb_h->target_id; cmd_pkt->cmd.pkt_a.status = 0; cmd_pkt->cmd.pkt_a.sgl_offset = 16; /* lower nibble */ lun = ccb_h->target_lun & 0XF; lun = lun << 12; cmd_pkt->cmd.pkt_a.lun_l4__req_id = lun | req->request_id; /* upper nibble */ lun = ccb_h->target_lun & 0XF0; lun = lun << 8; cmd_pkt->cmd.pkt_a.lun_h4__sgl_entries = lun; #ifdef TWS_DEBUG if ( csio->cdb_len > 16 ) TWS_TRACE(sc, "cdb len too big", ccb_h->target_id, csio->cdb_len); #endif if(ccb_h->flags & CAM_CDB_POINTER) bcopy(csio->cdb_io.cdb_ptr, cmd_pkt->cmd.pkt_a.cdb, csio->cdb_len); else bcopy(csio->cdb_io.cdb_bytes, cmd_pkt->cmd.pkt_a.cdb, csio->cdb_len); req->data = ccb; req->flags |= TWS_DATA_CCB; /* save ccb ptr */ req->ccb_ptr = ccb; /* * tws_map_load_data_callback will fill in the SGL, * and submit the I/O. */ sc->stats.scsi_ios++; ccb_h->timeout_ch = timeout(tws_timeout, req, (ccb_h->timeout * hz)/1000); error = tws_map_request(sc, req); return(error); }
static int tws_passthru(struct tws_softc *sc, void *buf) { struct tws_request *req; struct tws_ioctl_no_data_buf *ubuf = (struct tws_ioctl_no_data_buf *)buf; int error; u_int16_t lun4; if ( tws_get_state(sc) == TWS_RESET ) { return(EBUSY); } do { req = tws_get_request(sc, TWS_PASSTHRU_REQ); if ( !req ) { sc->chan = 1; error = tsleep((void *)&sc->chan, 0, "tws_sleep", TWS_IO_TIMEOUT*hz); if ( error == EWOULDBLOCK ) { return(ETIMEDOUT); } } else { break; } }while(1); req->length = ubuf->driver_pkt.buffer_length; TWS_TRACE_DEBUG(sc, "datal,rid", req->length, req->request_id); if ( req->length ) { req->data = kmalloc(req->length, M_TWS, M_WAITOK | M_ZERO); error = copyin(ubuf->pdata, req->data, req->length); } req->flags = TWS_DIR_IN | TWS_DIR_OUT; req->cb = tws_passthru_complete; memcpy(&req->cmd_pkt->cmd, &ubuf->cmd_pkt.cmd, sizeof(struct tws_command_apache)); if ( GET_OPCODE(req->cmd_pkt->cmd.pkt_a.res__opcode) == TWS_FW_CMD_EXECUTE_SCSI ) { lun4 = req->cmd_pkt->cmd.pkt_a.lun_l4__req_id & 0xF000; req->cmd_pkt->cmd.pkt_a.lun_l4__req_id = lun4 | req->request_id; } else { req->cmd_pkt->cmd.pkt_g.generic.request_id = (u_int8_t) req->request_id; } lockmgr(&sc->gen_lock, LK_EXCLUSIVE); req->error_code = tws_map_request(sc, req); error = lksleep(req, &sc->gen_lock, 0, "tws_passthru", TWS_IO_TIMEOUT*hz); if ( error == EWOULDBLOCK ) { error = ETIMEDOUT; TWS_TRACE_DEBUG(sc, "lksleep timeout", error, req->request_id); tws_reset((void *)sc); } if ( req->error_code == TWS_REQ_REQUEUE ) { error = EBUSY; } tws_unmap_request(sc, req); memcpy(&ubuf->cmd_pkt.hdr, &req->cmd_pkt->hdr, sizeof(struct tws_command_apache)); memcpy(&ubuf->cmd_pkt.cmd, &req->cmd_pkt->cmd, sizeof(struct tws_command_apache)); if ( !error && req->length ) { error = copyout(req->data, ubuf->pdata, req->length); } kfree(req->data, M_TWS); req->state = TWS_REQ_STATE_FREE; lockmgr(&sc->gen_lock, LK_RELEASE); if ( error ) TWS_TRACE_DEBUG(sc, "errored", error, 0); if ( req->error_code != TWS_REQ_SUBMIT_SUCCESS ) ubuf->driver_pkt.os_status = error; if ( sc->chan && tws_get_state(sc) != TWS_RESET ) { sc->chan = 0; wakeup((void *)&sc->chan); } return(error); }
static int32_t tws_execute_scsi(struct tws_softc *sc, union ccb *ccb) { struct tws_command_packet *cmd_pkt; struct tws_request *req; struct ccb_hdr *ccb_h = &(ccb->ccb_h); struct ccb_scsiio *csio = &(ccb->csio); int error; u_int16_t lun; KKASSERT(lockstatus(&sc->sim_lock, curthread) != 0); if (ccb_h->target_id >= TWS_MAX_NUM_UNITS) { TWS_TRACE_DEBUG(sc, "traget id too big", ccb_h->target_id, ccb_h->target_lun); ccb_h->status |= CAM_TID_INVALID; xpt_done(ccb); return(0); } if (ccb_h->target_lun >= TWS_MAX_NUM_LUNS) { TWS_TRACE_DEBUG(sc, "target lun 2 big", ccb_h->target_id, ccb_h->target_lun); ccb_h->status |= CAM_LUN_INVALID; xpt_done(ccb); return(0); } if(ccb_h->flags & CAM_CDB_PHYS) { TWS_TRACE_DEBUG(sc, "cdb phy", ccb_h->target_id, ccb_h->target_lun); ccb_h->status = CAM_REQ_CMP_ERR; xpt_done(ccb); return(0); } /* * We are going to work on this request. Mark it as enqueued (though * we don't actually queue it...) */ ccb_h->status |= CAM_SIM_QUEUED; req = tws_get_request(sc, TWS_SCSI_IO_REQ); if ( !req ) { TWS_TRACE_DEBUG(sc, "no reqs", ccb_h->target_id, ccb_h->target_lun); /* tws_freeze_simq(sc); */ ccb_h->status |= CAM_REQUEUE_REQ; xpt_done(ccb); return(0); } if((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { if(ccb_h->flags & CAM_DIR_IN) req->flags = TWS_DIR_IN; else req->flags = TWS_DIR_OUT; } else { req->flags = TWS_DIR_NONE; /* no data */ } req->type = TWS_SCSI_IO_REQ; req->cb = tws_scsi_complete; cmd_pkt = req->cmd_pkt; /* cmd_pkt->hdr.header_desc.size_header = 128; */ cmd_pkt->cmd.pkt_a.res__opcode = TWS_FW_CMD_EXECUTE_SCSI; cmd_pkt->cmd.pkt_a.unit = ccb_h->target_id; cmd_pkt->cmd.pkt_a.status = 0; cmd_pkt->cmd.pkt_a.sgl_offset = 16; /* lower nibble */ lun = ccb_h->target_lun & 0XF; lun = lun << 12; cmd_pkt->cmd.pkt_a.lun_l4__req_id = lun | req->request_id; /* upper nibble */ lun = ccb_h->target_lun & 0XF0; lun = lun << 8; cmd_pkt->cmd.pkt_a.lun_h4__sgl_entries = lun; #ifdef TWS_DEBUG if ( csio->cdb_len > 16 ) TWS_TRACE(sc, "cdb len too big", ccb_h->target_id, csio->cdb_len); #endif if(ccb_h->flags & CAM_CDB_POINTER) bcopy(csio->cdb_io.cdb_ptr, cmd_pkt->cmd.pkt_a.cdb, csio->cdb_len); else bcopy(csio->cdb_io.cdb_bytes, cmd_pkt->cmd.pkt_a.cdb, csio->cdb_len); if (!(ccb_h->flags & CAM_DATA_PHYS)) { /* Virtual data addresses. Need to convert them... */ if (!(ccb_h->flags & CAM_SCATTER_VALID)) { if (csio->dxfer_len > TWS_MAX_IO_SIZE) { TWS_TRACE(sc, "I/O is big", csio->dxfer_len, 0); tws_release_request(req); ccb_h->status = CAM_REQ_TOO_BIG; xpt_done(ccb); return(0); } req->length = csio->dxfer_len; if (req->length) { req->data = csio->data_ptr; /* there is 1 sgl_entrie */ /* cmd_pkt->cmd.pkt_a.lun_h4__sgl_entries |= 1; */ } } else { TWS_TRACE_DEBUG(sc, "got sglist", ccb_h->target_id, ccb_h->target_lun); tws_release_request(req); ccb_h->status = CAM_REQ_CMP_ERR; xpt_done(ccb); return(0); } } else { /* Data addresses are physical. */ TWS_TRACE_DEBUG(sc, "Phy data addr", ccb_h->target_id, ccb_h->target_lun); tws_release_request(req); ccb_h->status = CAM_REQ_CMP_ERR; ccb_h->status |= CAM_RELEASE_SIMQ; ccb_h->status &= ~CAM_SIM_QUEUED; xpt_done(ccb); return(0); } /* save ccb ptr */ req->ccb_ptr = ccb; /* * tws_map_load_data_callback will fill in the SGL, * and submit the I/O. */ sc->stats.scsi_ios++; callout_reset(ccb_h->timeout_ch, (ccb_h->timeout * hz)/1000, tws_timeout, req); error = tws_map_request(sc, req); return(error); }