static TW_VOID twa_watchdog(TW_VOID *arg) { struct tw_cl_ctlr_handle *ctlr_handle = (struct tw_cl_ctlr_handle *)arg; struct twa_softc *sc = ctlr_handle->osl_ctlr_ctxt; int i; int i_need_a_reset = 0; int driver_is_active = 0; TW_UINT64 current_time; struct tw_osli_req_context *my_req; //============================================================================== current_time = (TW_UINT64) (tw_osl_get_local_time()); for (i = 0; i < TW_OSLI_MAX_NUM_REQUESTS; i++) { my_req = &(sc->req_ctx_buf[i]); if ((my_req->state == TW_OSLI_REQ_STATE_BUSY) && (my_req->deadline) && (my_req->deadline < current_time)) { tw_cl_set_reset_needed(ctlr_handle); #ifdef TW_OSL_DEBUG device_printf((sc)->bus_dev, "Request %d timed out! d = %llu, c = %llu\n", i, my_req->deadline, current_time); #else /* TW_OSL_DEBUG */ device_printf((sc)->bus_dev, "Request %d timed out!\n", i); #endif /* TW_OSL_DEBUG */ break; } } //============================================================================== i_need_a_reset = tw_cl_is_reset_needed(ctlr_handle); i = (int) ((sc->watchdog_index++) & 1); driver_is_active = tw_cl_is_active(ctlr_handle); if (i_need_a_reset) { #ifdef TW_OSL_DEBUG device_printf((sc)->bus_dev, "Watchdog rescheduled in 70 seconds\n"); #endif /* TW_OSL_DEBUG */ callout_reset(&(sc->watchdog_callout[i]), 70*hz, twa_watchdog, &sc->ctlr_handle); tw_cl_reset_ctlr(ctlr_handle); #ifdef TW_OSL_DEBUG device_printf((sc)->bus_dev, "Watchdog reset completed!\n"); #endif /* TW_OSL_DEBUG */ } else if (driver_is_active) { callout_reset(&(sc->watchdog_callout[i]), 5*hz, twa_watchdog, &sc->ctlr_handle); } #ifdef TW_OSL_DEBUG if (i_need_a_reset) device_printf((sc)->bus_dev, "i_need_a_reset = %d, " "driver_is_active = %d\n", i_need_a_reset, driver_is_active); #endif /* TW_OSL_DEBUG */ }
/* * Function name: tw_osl_timeout * Description: Call to timeout(). * * Input: req_handle -- ptr to request handle sent by OSL. * Output: None * Return value: None */ TW_VOID tw_osl_timeout(struct tw_cl_req_handle *req_handle) { struct tw_osli_req_context *req = req_handle->osl_req_ctxt; union ccb *ccb = (union ccb *)(req->orig_req); struct ccb_hdr *ccb_h = &(ccb->ccb_h); req->deadline = tw_osl_get_local_time() + (ccb_h->timeout / 1000); }
/* * Function name: tw_cli_poll_status * Description: Poll for a given status to show up in the firmware * status register. * * Input: ctlr -- ptr to CL internal ctlr context * status -- status to look for * timeout -- max # of seconds to wait before giving up * Output: None * Return value: 0 -- success * non-zero-- failure */ TW_INT32 tw_cli_poll_status(struct tw_cli_ctlr_context *ctlr, TW_UINT32 status, TW_UINT32 timeout) { TW_TIME end_time; TW_UINT32 status_reg; tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered"); end_time = tw_osl_get_local_time() + timeout; do { status_reg = TW_CLI_READ_STATUS_REGISTER(ctlr->ctlr_handle); if ((status_reg & status) == status) /* got the required bit(s) */ return(TW_OSL_ESUCCESS); tw_osl_delay(1000); } while (tw_osl_get_local_time() <= end_time); return(TW_OSL_ETIMEDOUT); }
/* * Function name: tw_cli_manage_aen * Description: Handles AEN's. * * Input: ctlr -- ptr to CL internal ctlr context * req -- ptr to CL internal request context * Output: None * Return value: None */ TW_UINT16 tw_cli_manage_aen(struct tw_cli_ctlr_context *ctlr, struct tw_cli_req_context *req) { struct tw_cl_command_header *cmd_hdr; TW_UINT16 aen_code; TW_TIME local_time; TW_TIME sync_time; TW_UINT32 error; tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered"); cmd_hdr = (struct tw_cl_command_header *)(req->data); aen_code = cmd_hdr->status_block.error; switch (aen_code) { case TWA_AEN_SYNC_TIME_WITH_HOST: tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "Received AEN_SYNC_TIME"); /* * Free the internal req pkt right here, since * tw_cli_set_param will need it. */ ctlr->internal_req_busy = TW_CL_FALSE; tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q); /* * We will use a callback in tw_cli_set_param only when * interrupts are enabled and we can expect our callback * to get called. Setting the get_more_aens * flag will make the callback continue to try to retrieve * more AEN's. */ if (ctlr->interrupts_enabled) ctlr->get_more_aens = TW_CL_TRUE; /* Calculate time (in seconds) since last Sunday 12.00 AM. */ local_time = tw_osl_get_local_time(); sync_time = (local_time - (3 * 86400)) % 604800; if ((error = tw_cli_set_param(ctlr, TWA_PARAM_TIME_TABLE, TWA_PARAM_TIME_SCHED_TIME, 4, &sync_time, (ctlr->interrupts_enabled) ? tw_cli_param_callback : TW_CL_NULL))) tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE, TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR, 0x1208, 0x1, TW_CL_SEVERITY_ERROR_STRING, "Unable to sync time with ctlr", "error = %d", error); break; case TWA_AEN_QUEUE_EMPTY: tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "AEN queue empty"); break; default: /* Queue the event. */ tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "Queueing AEN"); tw_cli_create_ctlr_event(ctlr, TW_CL_MESSAGE_SOURCE_CONTROLLER_EVENT, cmd_hdr); break; } /* switch */ return(aen_code); }
/* * Function name: tw_osli_fw_passthru * Description: Builds a fw passthru cmd pkt, and submits it to CL. * * Input: sc -- ptr to OSL internal ctlr context * buf -- ptr to ioctl pkt understood by CL * Output: None * Return value: 0 -- success * non-zero-- failure */ TW_INT32 tw_osli_fw_passthru(struct twa_softc *sc, TW_INT8 *buf) { struct tw_osli_req_context *req; struct tw_osli_ioctl_no_data_buf *user_buf = (struct tw_osli_ioctl_no_data_buf *)buf; TW_TIME end_time; TW_UINT32 timeout = 60; TW_UINT32 data_buf_size_adjusted; struct tw_cl_req_packet *req_pkt; struct tw_cl_passthru_req_packet *pt_req; TW_INT32 error; tw_osli_dbg_dprintf(5, sc, "ioctl: passthru"); if ((req = tw_osli_get_request(sc)) == NULL) return(EBUSY); req->req_handle.osl_req_ctxt = req; req->orig_req = buf; req->flags |= TW_OSLI_REQ_FLAGS_PASSTHRU; req_pkt = &(req->req_pkt); req_pkt->status = 0; req_pkt->tw_osl_callback = tw_osl_complete_passthru; /* Let the Common Layer retry the request on cmd queue full. */ req_pkt->flags |= TW_CL_REQ_RETRY_ON_BUSY; pt_req = &(req_pkt->gen_req_pkt.pt_req); /* * Make sure that the data buffer sent to firmware is a * 512 byte multiple in size. */ data_buf_size_adjusted = (user_buf->driver_pkt.buffer_length + (sc->sg_size_factor - 1)) & ~(sc->sg_size_factor - 1); if ((req->length = data_buf_size_adjusted)) { if ((req->data = malloc(data_buf_size_adjusted, TW_OSLI_MALLOC_CLASS, M_WAITOK)) == NULL) { error = ENOMEM; tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2016, "Could not alloc mem for " "fw_passthru data_buf", error); goto fw_passthru_err; } /* Copy the payload. */ if ((error = copyin((TW_VOID *)(user_buf->pdata), req->data, user_buf->driver_pkt.buffer_length)) != 0) { tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2017, "Could not copyin fw_passthru data_buf", error); goto fw_passthru_err; } pt_req->sgl_entries = 1; /* will be updated during mapping */ req->flags |= (TW_OSLI_REQ_FLAGS_DATA_IN | TW_OSLI_REQ_FLAGS_DATA_OUT); } else pt_req->sgl_entries = 0; /* no payload */ pt_req->cmd_pkt = (TW_VOID *)(&(user_buf->cmd_pkt)); pt_req->cmd_pkt_length = sizeof(struct tw_cl_command_packet); if ((error = tw_osli_map_request(req))) goto fw_passthru_err; end_time = tw_osl_get_local_time() + timeout; while (req->state != TW_OSLI_REQ_STATE_COMPLETE) { mtx_lock(req->ioctl_wake_timeout_lock); req->flags |= TW_OSLI_REQ_FLAGS_SLEEPING; error = mtx_sleep(req, req->ioctl_wake_timeout_lock, 0, "twa_passthru", timeout*hz); mtx_unlock(req->ioctl_wake_timeout_lock); if (!(req->flags & TW_OSLI_REQ_FLAGS_SLEEPING)) error = 0; req->flags &= ~TW_OSLI_REQ_FLAGS_SLEEPING; if (! error) { if (((error = req->error_code)) || ((error = (req->state != TW_OSLI_REQ_STATE_COMPLETE))) || ((error = req_pkt->status))) goto fw_passthru_err; break; } if (req_pkt->status) { error = req_pkt->status; goto fw_passthru_err; } if (error == EWOULDBLOCK) { /* Time out! */ if ((!(req->error_code)) && (req->state == TW_OSLI_REQ_STATE_COMPLETE) && (!(req_pkt->status)) ) { #ifdef TW_OSL_DEBUG tw_osli_printf(sc, "request = %p", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x7777, "FALSE Passthru timeout!", req); #endif /* TW_OSL_DEBUG */ error = 0; /* False error */ break; } if (!(tw_cl_is_reset_needed(&(req->ctlr->ctlr_handle)))) { #ifdef TW_OSL_DEBUG tw_osli_printf(sc, "request = %p", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2018, "Passthru request timed out!", req); #else /* TW_OSL_DEBUG */ device_printf((sc)->bus_dev, "Passthru request timed out!\n"); #endif /* TW_OSL_DEBUG */ tw_cl_reset_ctlr(&(req->ctlr->ctlr_handle)); } error = 0; end_time = tw_osl_get_local_time() + timeout; continue; /* * Don't touch req after a reset. It (and any * associated data) will be * unmapped by the callback. */ } /* * Either the request got completed, or we were woken up by a * signal. Calculate the new timeout, in case it was the latter. */ timeout = (end_time - tw_osl_get_local_time()); } /* End of while loop */ /* If there was a payload, copy it back. */ if ((!error) && (req->length)) if ((error = copyout(req->data, user_buf->pdata, user_buf->driver_pkt.buffer_length))) tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2019, "Could not copyout fw_passthru data_buf", error); fw_passthru_err: if (req_pkt->status == TW_CL_ERR_REQ_BUS_RESET) error = EBUSY; user_buf->driver_pkt.os_status = error; /* Free resources. */ if (req->data) free(req->data, TW_OSLI_MALLOC_CLASS); tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q); return(error); }
/* * Function name: tw_osli_execute_scsi * Description: Build a fw cmd, based on a CAM style ccb, and * send it down. * * Input: req -- ptr to OSL internal request context * ccb -- ptr to CAM style ccb * Output: None * Return value: 0 -- success * non-zero-- failure */ TW_INT32 tw_osli_execute_scsi(struct tw_osli_req_context *req, union ccb *ccb) { struct twa_softc *sc = req->ctlr; struct tw_cl_req_packet *req_pkt; struct tw_cl_scsi_req_packet *scsi_req; struct ccb_hdr *ccb_h = &(ccb->ccb_h); struct ccb_scsiio *csio = &(ccb->csio); TW_INT32 error; tw_osli_dbg_dprintf(10, sc, "SCSI I/O request 0x%x", csio->cdb_io.cdb_bytes[0]); if (ccb_h->target_id >= TW_CL_MAX_NUM_UNITS) { tw_osli_dbg_dprintf(3, sc, "Invalid target. PTL = %x %x %jx", ccb_h->path_id, ccb_h->target_id, (uintmax_t)ccb_h->target_lun); ccb_h->status |= CAM_TID_INVALID; xpt_done(ccb); return(1); } if (ccb_h->target_lun >= TW_CL_MAX_NUM_LUNS) { tw_osli_dbg_dprintf(3, sc, "Invalid lun. PTL = %x %x %jx", ccb_h->path_id, ccb_h->target_id, (uintmax_t)ccb_h->target_lun); ccb_h->status |= CAM_LUN_INVALID; xpt_done(ccb); return(1); } if(ccb_h->flags & CAM_CDB_PHYS) { tw_osli_printf(sc, "", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2105, "Physical CDB address!"); ccb_h->status = CAM_REQ_INVALID; xpt_done(ccb); return(1); } /* * We are going to work on this request. Mark it as enqueued (though * we don't actually queue it...) */ ccb_h->status |= CAM_SIM_QUEUED; if((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { if(ccb_h->flags & CAM_DIR_IN) req->flags |= TW_OSLI_REQ_FLAGS_DATA_IN; else req->flags |= TW_OSLI_REQ_FLAGS_DATA_OUT; } /* Build the CL understood request packet for SCSI cmds. */ req_pkt = &req->req_pkt; req_pkt->status = 0; req_pkt->tw_osl_callback = tw_osl_complete_io; scsi_req = &(req_pkt->gen_req_pkt.scsi_req); scsi_req->unit = ccb_h->target_id; scsi_req->lun = ccb_h->target_lun; scsi_req->sense_len = 0; scsi_req->sense_data = (TW_UINT8 *)(&csio->sense_data); scsi_req->scsi_status = 0; if(ccb_h->flags & CAM_CDB_POINTER) scsi_req->cdb = csio->cdb_io.cdb_ptr; else scsi_req->cdb = csio->cdb_io.cdb_bytes; scsi_req->cdb_len = csio->cdb_len; if (csio->dxfer_len > TW_CL_MAX_IO_SIZE) { tw_osli_printf(sc, "size = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2106, "I/O size too big", csio->dxfer_len); ccb_h->status = CAM_REQ_TOO_BIG; ccb_h->status &= ~CAM_SIM_QUEUED; xpt_done(ccb); return(1); } if ((ccb_h->flags & CAM_DATA_MASK) == CAM_DATA_VADDR) { if ((req->length = csio->dxfer_len) != 0) { req->data = csio->data_ptr; scsi_req->sgl_entries = 1; } } else req->flags |= TW_OSLI_REQ_FLAGS_CCB; req->deadline = tw_osl_get_local_time() + (ccb_h->timeout / 1000); /* * twa_map_load_data_callback will fill in the SGL, * and submit the I/O. */ error = tw_osli_map_request(req); if ((error) && (req->flags & TW_OSLI_REQ_FLAGS_FAILED)) { req->deadline = 0; ccb_h->status = CAM_REQ_CMP_ERR; ccb_h->status &= ~CAM_SIM_QUEUED; xpt_done(ccb); } return(error); }
/* * Function name: tw_cl_create_event * Description: Creates and queues ctlr/CL/OSL AEN's to be * supplied to user-space tools on request. * Also notifies OS Layer. * Input: ctlr -- ptr to CL internal ctlr context * queue_event-- TW_CL_TRUE --> queue event; * TW_CL_FALSE--> don't queue event * (simply notify OSL) * event_src -- source of event * event_code -- AEN/error code * severity -- severity of event * severity_str--Text description of severity * event_desc -- standard string related to the event/error * event_specific_desc -- format string for additional * info about the event * ... -- additional arguments conforming to the format * specified by event_specific_desc * Output: None * Return value: None */ TW_VOID tw_cl_create_event(struct tw_cl_ctlr_handle *ctlr_handle, TW_UINT8 queue_event, TW_UINT8 event_src, TW_UINT16 event_code, TW_UINT8 severity, TW_UINT8 *severity_str, TW_UINT8 *event_desc, TW_UINT8 *event_specific_desc, ...) { struct tw_cli_ctlr_context *ctlr = ctlr_handle->cl_ctlr_ctxt; struct tw_cl_event_packet event_pkt; struct tw_cl_event_packet *event; TW_UINT32 aen_head; va_list ap; tw_cli_dbg_printf(8, ctlr_handle, tw_osl_cur_func(), "entered"); if ((ctlr) && (queue_event)) { /* Protect access to ctlr->aen_head. */ tw_osl_get_lock(ctlr_handle, ctlr->gen_lock); aen_head = ctlr->aen_head; ctlr->aen_head = (aen_head + 1) % ctlr->max_aens_supported; /* Queue the event. */ event = &(ctlr->aen_queue[aen_head]); tw_osl_memzero(event->parameter_data, sizeof(event->parameter_data)); if (event->retrieved == TW_CL_AEN_NOT_RETRIEVED) ctlr->aen_q_overflow = TW_CL_TRUE; event->sequence_id = ++(ctlr->aen_cur_seq_id); if ((aen_head + 1) == ctlr->max_aens_supported) { tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "AEN queue wrapped"); ctlr->aen_q_wrapped = TW_CL_TRUE; } /* Free access to ctlr->aen_head. */ tw_osl_free_lock(ctlr_handle, ctlr->gen_lock); } else { event = &event_pkt; tw_osl_memzero(event, sizeof(struct tw_cl_event_packet)); } event->event_src = event_src; event->time_stamp_sec = (TW_UINT32)tw_osl_get_local_time(); event->aen_code = event_code; event->severity = severity; tw_osl_strcpy(event->severity_str, severity_str); event->retrieved = TW_CL_AEN_NOT_RETRIEVED; va_start(ap, event_specific_desc); tw_osl_vsprintf(event->parameter_data, event_specific_desc, ap); va_end(ap); event->parameter_len = (TW_UINT8)(tw_osl_strlen(event->parameter_data)); tw_osl_strcpy(event->parameter_data + event->parameter_len + 1, event_desc); event->parameter_len += (1 + tw_osl_strlen(event_desc)); tw_cli_dbg_printf(4, ctlr_handle, tw_osl_cur_func(), "event = %x %x %x %x %x %x %x\n %s", event->sequence_id, event->time_stamp_sec, event->aen_code, event->severity, event->retrieved, event->repeat_count, event->parameter_len, event->parameter_data); tw_osl_notify_event(ctlr_handle, event); }
/* * Function name: tw_cli_drain_aen_queue * Description: Fetches all un-retrieved AEN's posted by fw. * * Input: ctlr -- ptr to CL internal ctlr context * Output: None * Return value: 0 -- success * non-zero-- failure */ TW_INT32 tw_cli_drain_aen_queue(struct tw_cli_ctlr_context *ctlr) { struct tw_cli_req_context *req; struct tw_cl_command_header *cmd_hdr; TW_TIME end_time; TW_UINT16 aen_code; TW_INT32 error; tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered"); for (;;) { if ((req = tw_cli_get_request(ctlr )) == TW_CL_NULL) { error = TW_OSL_EBUSY; break; } req->flags |= TW_CLI_REQ_FLAGS_INTERNAL; req->tw_cli_callback = TW_CL_NULL; if ((error = tw_cli_send_scsi_cmd(req, 0x03 /* REQUEST_SENSE */))) { tw_cli_dbg_printf(1, ctlr->ctlr_handle, tw_osl_cur_func(), "Cannot send command to fetch aen"); break; } end_time = tw_osl_get_local_time() + TW_CLI_REQUEST_TIMEOUT_PERIOD; do { if ((error = req->error_code)) /* * This will take care of completion due to * a reset, or a failure in * tw_cli_submit_pending_queue. */ goto out; tw_cli_process_resp_intr(req->ctlr); if ((req->state != TW_CLI_REQ_STATE_BUSY) && (req->state != TW_CLI_REQ_STATE_PENDING)) break; } while (tw_osl_get_local_time() <= end_time); if (req->state != TW_CLI_REQ_STATE_COMPLETE) { error = TW_OSL_ETIMEDOUT; break; } if ((error = req->cmd_pkt->command.cmd_pkt_9k.status)) { cmd_hdr = &req->cmd_pkt->cmd_hdr; #if 0 tw_cli_create_ctlr_event(ctlr, TW_CL_MESSAGE_SOURCE_CONTROLLER_ERROR, cmd_hdr); #endif // 0 break; } aen_code = tw_cli_manage_aen(ctlr, req); if (aen_code == TWA_AEN_QUEUE_EMPTY) break; if (aen_code == TWA_AEN_SYNC_TIME_WITH_HOST) continue; ctlr->internal_req_busy = TW_CL_FALSE; tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q); } out: if (req) { if (req->data) ctlr->internal_req_busy = TW_CL_FALSE; tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q); } return(error); }