/* * Function name: tw_cli_aen_callback * Description: Callback for requests to fetch AEN's. * * Input: req -- ptr to completed request pkt * Output: None * Return value: None */ TW_VOID tw_cli_aen_callback(struct tw_cli_req_context *req) { struct tw_cli_ctlr_context *ctlr = req->ctlr; struct tw_cl_command_header *cmd_hdr; struct tw_cl_command_9k *cmd = &(req->cmd_pkt->command.cmd_pkt_9k); TW_UINT16 aen_code = TWA_AEN_QUEUE_EMPTY; TW_INT32 error; tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered"); tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "req_id = 0x%x, req error = %d, status = 0x%x", GET_REQ_ID(cmd->lun_l4__req_id), req->error_code, cmd->status); /* * If the request was never submitted to the controller, the function * that sets error is responsible for calling tw_cl_create_event. */ if (!(error = req->error_code)) if ((error = cmd->status)) { cmd_hdr = (struct tw_cl_command_header *) (&(req->cmd_pkt->cmd_hdr)); tw_cli_create_ctlr_event(ctlr, TW_CL_MESSAGE_SOURCE_CONTROLLER_ERROR, cmd_hdr); tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE, TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR, 0x1206, 0x1, TW_CL_SEVERITY_ERROR_STRING, "Request Sense failed", "opcode = 0x%x, status = %d", GET_OPCODE(cmd->res__opcode), cmd->status); } if (error) { ctlr->internal_req_busy = TW_CL_FALSE; tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q); return; } tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "Request Sense command succeeded"); aen_code = tw_cli_manage_aen(ctlr, req); if (aen_code != TWA_AEN_SYNC_TIME_WITH_HOST) { ctlr->internal_req_busy = TW_CL_FALSE; tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q); if (aen_code != TWA_AEN_QUEUE_EMPTY) if ((error = tw_cli_get_aen(ctlr))) tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE, TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR, 0x1207, 0x1, TW_CL_SEVERITY_ERROR_STRING, "Failed to fetch all AEN's", "error = %d", error); } }
/* * Function name: tw_cli_submit_pending_queue * Description: Kick starts any requests in the pending queue. * * Input: ctlr -- ptr to CL internal ctlr context * Output: None * Return value: 0 -- all pending requests submitted successfully * non-zero-- otherwise */ TW_INT32 tw_cli_submit_pending_queue(struct tw_cli_ctlr_context *ctlr) { struct tw_cli_req_context *req; TW_INT32 error = TW_OSL_ESUCCESS; tw_cli_dbg_printf(3, ctlr->ctlr_handle, tw_osl_cur_func(), "entered"); /* * Pull requests off the pending queue, and submit them. */ while ((req = tw_cli_req_q_remove_head(ctlr, TW_CLI_PENDING_Q)) != TW_CL_NULL) { if ((error = tw_cli_submit_cmd(req))) { if (error == TW_OSL_EBUSY) { tw_cli_dbg_printf(2, ctlr->ctlr_handle, tw_osl_cur_func(), "Requeueing pending request"); req->state = TW_CLI_REQ_STATE_PENDING; /* * Queue the request at the head of the pending * queue, and break away, so we don't try to * submit any more requests. */ tw_cli_req_q_insert_head(req, TW_CLI_PENDING_Q); break; } else { tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE, TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR, 0x1202, 0x1, TW_CL_SEVERITY_ERROR_STRING, "Could not start request " "in pending queue", "request = %p, opcode = 0x%x, " "error = %d", req, GET_OPCODE(req->cmd_pkt-> command.cmd_pkt_9k.res__opcode), error); /* * Set the appropriate error and call the CL * internal callback if there's one. If the * request originator is polling for completion, * he should be checking req->error to * determine that the request did not go * through. The request originators are * responsible for the clean-up. */ req->error_code = error; req->state = TW_CLI_REQ_STATE_COMPLETE; if (req->tw_cli_callback) req->tw_cli_callback(req); error = TW_OSL_ESUCCESS; } } } return(error); }
/* * Function name: twa_interrupt * Description: Interrupt handler. Determines the kind of interrupt, * and returns TW_CL_TRUE if it recognizes the interrupt. * * Input: ctlr_handle -- controller handle * Output: None * Return value: TW_CL_TRUE -- interrupt recognized * TW_CL_FALSE-- interrupt not recognized */ TW_INT32 tw_cl_interrupt(struct tw_cl_ctlr_handle *ctlr_handle) { struct tw_cli_ctlr_context *ctlr = (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt); TW_UINT32 status_reg; TW_INT32 rc = TW_CL_FALSE; tw_cli_dbg_printf(10, ctlr_handle, tw_osl_cur_func(), "entered"); /* If we don't have controller context, bail */ if (ctlr == NULL) goto out; /* * Bail If we get an interrupt while resetting, or shutting down. */ if (ctlr->reset_in_progress || !(ctlr->active)) goto out; /* Read the status register to determine the type of interrupt. */ status_reg = TW_CLI_READ_STATUS_REGISTER(ctlr_handle); if (tw_cli_check_ctlr_state(ctlr, status_reg)) goto out; /* Clear the interrupt. */ if (status_reg & TWA_STATUS_HOST_INTERRUPT) { tw_cli_dbg_printf(6, ctlr_handle, tw_osl_cur_func(), "Host interrupt"); TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle, TWA_CONTROL_CLEAR_HOST_INTERRUPT); } if (status_reg & TWA_STATUS_ATTENTION_INTERRUPT) { tw_cli_dbg_printf(6, ctlr_handle, tw_osl_cur_func(), "Attention interrupt"); rc |= TW_CL_TRUE; /* request for a deferred isr call */ tw_cli_process_attn_intr(ctlr); TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle, TWA_CONTROL_CLEAR_ATTENTION_INTERRUPT); } if (status_reg & TWA_STATUS_COMMAND_INTERRUPT) { tw_cli_dbg_printf(6, ctlr_handle, tw_osl_cur_func(), "Command interrupt"); rc |= TW_CL_TRUE; /* request for a deferred isr call */ tw_cli_process_cmd_intr(ctlr); if ((TW_CL_Q_FIRST_ITEM(&(ctlr->req_q_head[TW_CLI_PENDING_Q]))) == TW_CL_NULL) TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle, TWA_CONTROL_MASK_COMMAND_INTERRUPT); } if (status_reg & TWA_STATUS_RESPONSE_INTERRUPT) { tw_cli_dbg_printf(10, ctlr_handle, tw_osl_cur_func(), "Response interrupt"); rc |= TW_CL_TRUE; /* request for a deferred isr call */ tw_cli_process_resp_intr(ctlr); } out: return(rc); }
/* * Function name: tw_cli_process_resp_intr * Description: Looks for cmd completions from fw; queues cmds completed * by fw into complete queue. * * Input: ctlr -- ptr to CL internal ctlr context * Output: None * Return value: 0 -- no ctlr error * non-zero-- ctlr error */ TW_INT32 tw_cli_process_resp_intr(struct tw_cli_ctlr_context *ctlr) { TW_UINT32 resp; struct tw_cli_req_context *req; TW_INT32 error; TW_UINT32 status_reg; tw_cli_dbg_printf(10, ctlr->ctlr_handle, tw_osl_cur_func(), "entered"); for (;;) { status_reg = TW_CLI_READ_STATUS_REGISTER(ctlr->ctlr_handle); if ((error = tw_cli_check_ctlr_state(ctlr, status_reg))) break; if (status_reg & TWA_STATUS_RESPONSE_QUEUE_EMPTY) { tw_cli_dbg_printf(7, ctlr->ctlr_handle, tw_osl_cur_func(), "Response queue empty"); break; } /* Response queue is not empty. */ resp = TW_CLI_READ_RESPONSE_QUEUE(ctlr->ctlr_handle); { req = &(ctlr->req_ctxt_buf[GET_RESP_ID(resp)]); } if (req->state != TW_CLI_REQ_STATE_BUSY) { tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE, TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR, 0x1201, 0x1, TW_CL_SEVERITY_ERROR_STRING, "Unposted command completed!!", "request = %p, status = %d", req, req->state); #ifdef TW_OSL_DEBUG tw_cl_print_ctlr_stats(ctlr->ctlr_handle); #endif /* TW_OSL_DEBUG */ tw_cl_reset_ctlr(ctlr->ctlr_handle); return(TW_OSL_EIO); } /* * Remove the request from the busy queue, mark it as complete, * and enqueue it in the complete queue. */ tw_cli_req_q_remove_item(req, TW_CLI_BUSY_Q); req->state = TW_CLI_REQ_STATE_COMPLETE; tw_cli_req_q_insert_tail(req, TW_CLI_COMPLETE_Q); } /* Complete this, and other requests in the complete queue. */ tw_cli_process_complete_queue(ctlr); return(error); }
/* * Function name: tw_cl_print_ctlr_stats * Description: Prints the current status of the controller. * * Input: ctlr_handle-- controller handle * Output: None * Return value: None */ TW_VOID tw_cl_print_ctlr_stats(struct tw_cl_ctlr_handle *ctlr_handle) { struct tw_cli_ctlr_context *ctlr = (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt); TW_UINT32 status_reg; TW_INT8 desc[200]; tw_cli_dbg_printf(7, ctlr->ctlr_handle, "", "entered"); /* Print current controller details. */ tw_cli_dbg_printf(0, ctlr_handle, "", "cl_ctlr_ctxt = %p", ctlr); tw_osl_memzero(desc, 200); status_reg = TW_CLI_READ_STATUS_REGISTER(ctlr_handle); tw_cli_dbg_printf(0, ctlr_handle, "", "status reg = 0x%x %s", status_reg, tw_cli_describe_bits(status_reg, desc)); tw_cli_dbg_printf(0, ctlr_handle, "", "CLq type current max"); tw_cli_dbg_printf(0, ctlr_handle, "", "free %04d %04d", ctlr->q_stats[TW_CLI_FREE_Q].cur_len, ctlr->q_stats[TW_CLI_FREE_Q].max_len); tw_cli_dbg_printf(0, ctlr_handle, "", "busy %04d %04d", ctlr->q_stats[TW_CLI_BUSY_Q].cur_len, ctlr->q_stats[TW_CLI_BUSY_Q].max_len); tw_cli_dbg_printf(0, ctlr_handle, "", "pending %04d %04d", ctlr->q_stats[TW_CLI_PENDING_Q].cur_len, ctlr->q_stats[TW_CLI_PENDING_Q].max_len); tw_cli_dbg_printf(0, ctlr_handle, "", "complete %04d %04d", ctlr->q_stats[TW_CLI_COMPLETE_Q].cur_len, ctlr->q_stats[TW_CLI_COMPLETE_Q].max_len); tw_cli_dbg_printf(0, ctlr_handle, "", "AEN queue head %d tail %d", ctlr->aen_head, ctlr->aen_tail); }
/* * Function name: tw_cli_notify_ctlr_info * Description: Notify OSL of controller info (fw/BIOS versions, etc.). * * Input: ctlr -- ptr to CL internal ctlr context * Output: None * Return value: None */ TW_VOID tw_cli_notify_ctlr_info(struct tw_cli_ctlr_context *ctlr) { TW_INT8 fw_ver[16]; TW_INT8 bios_ver[16]; TW_INT8 ctlr_model[16]; TW_INT32 error[3]; TW_UINT8 num_ports = 0; tw_cli_dbg_printf(5, ctlr->ctlr_handle, tw_osl_cur_func(), "entered"); /* Get the port count. */ error[0] = tw_cli_get_param(ctlr, TWA_PARAM_CONTROLLER_TABLE, TWA_PARAM_CONTROLLER_PORT_COUNT, &num_ports, 1, TW_CL_NULL); /* Get the firmware and BIOS versions. */ error[0] = tw_cli_get_param(ctlr, TWA_PARAM_VERSION_TABLE, TWA_PARAM_VERSION_FW, fw_ver, 16, TW_CL_NULL); error[1] = tw_cli_get_param(ctlr, TWA_PARAM_VERSION_TABLE, TWA_PARAM_VERSION_BIOS, bios_ver, 16, TW_CL_NULL); error[2] = tw_cli_get_param(ctlr, TWA_PARAM_VERSION_TABLE, TWA_PARAM_CTLR_MODEL, ctlr_model, 16, TW_CL_NULL); tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE, TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR, 0x1300, 0x3, TW_CL_SEVERITY_INFO_STRING, "Controller details:", "Model %.16s, %d ports, Firmware %.16s, BIOS %.16s", error[2]?(TW_INT8 *)TW_CL_NULL:ctlr_model, num_ports, error[0]?(TW_INT8 *)TW_CL_NULL:fw_ver, error[1]?(TW_INT8 *)TW_CL_NULL:bios_ver); }
/* * Function name: tw_cli_find_response * Description: Find a particular response in the ctlr response queue. * * Input: ctlr -- ptr to per ctlr structure * req_id -- request id of the response to look for * Output: None * Return value: 0 -- success * non-zero-- failure */ TW_INT32 tw_cli_find_response(struct tw_cli_ctlr_context *ctlr, TW_INT32 req_id) { TW_UINT32 resp; TW_INT32 resp_id; TW_UINT32 status_reg; tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered"); for (;;) { status_reg = TW_CLI_READ_STATUS_REGISTER(ctlr->ctlr_handle); if (status_reg & TWA_STATUS_RESPONSE_QUEUE_EMPTY) return(TW_OSL_ENOTTY); /* no more response queue entries */ if (ctlr->device_id == TW_CL_DEVICE_ID_9K) { resp = TW_CLI_READ_RESPONSE_QUEUE(ctlr->ctlr_handle); resp_id = GET_RESP_ID(resp); } else { resp = TW_CLI_READ_LARGE_RESPONSE_QUEUE( ctlr->ctlr_handle); resp_id = GET_LARGE_RESP_ID(resp); } if (resp_id == req_id) return(TW_OSL_ESUCCESS); /* found the req_id */ } }
/* * Function name: twa_setup * Description: Disables interrupts on the controller * * Input: ctlr -- ptr to CL internal ctlr context * Output: None * Return value: None */ TW_VOID tw_cli_disable_interrupts(struct tw_cli_ctlr_context *ctlr) { tw_cli_dbg_printf(3, ctlr->ctlr_handle, tw_osl_cur_func(), "entered"); TW_CLI_WRITE_CONTROL_REGISTER(ctlr->ctlr_handle, TWA_CONTROL_DISABLE_INTERRUPTS); ctlr->interrupts_enabled = TW_CL_FALSE; }
/* * Function name: tw_cli_param_callback * Description: Callback for get/set_param requests. * * Input: req -- ptr to completed request pkt * Output: None * Return value: None */ TW_VOID tw_cli_param_callback(struct tw_cli_req_context *req) { struct tw_cli_ctlr_context *ctlr = req->ctlr; union tw_cl_command_7k *cmd = &(req->cmd_pkt->command.cmd_pkt_7k); TW_INT32 error; tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered"); /* * If the request was never submitted to the controller, the function * that sets req->error is responsible for calling tw_cl_create_event. */ if (! req->error_code) if (cmd->param.status) { #if 0 tw_cli_create_ctlr_event(ctlr, TW_CL_MESSAGE_SOURCE_CONTROLLER_ERROR, &(req->cmd_pkt->cmd_hdr)); #endif // 0 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE, TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR, 0x1204, 0x1, TW_CL_SEVERITY_ERROR_STRING, "get/set_param failed", "status = %d", cmd->param.status); } ctlr->internal_req_busy = TW_CL_FALSE; tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q); if ((ctlr->get_more_aens) && (!(ctlr->reset_in_progress))) { ctlr->get_more_aens = TW_CL_FALSE; tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "Fetching more AEN's"); if ((error = tw_cli_get_aen(ctlr))) tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE, TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR, 0x1205, 0x1, TW_CL_SEVERITY_ERROR_STRING, "Failed to fetch all AEN's from param_callback", "error = %d", error); } }
/* * Function name: tw_cli_enable_interrupts * Description: Enables interrupts on the controller * * Input: ctlr -- ptr to CL internal ctlr context * Output: None * Return value: None */ TW_VOID tw_cli_enable_interrupts(struct tw_cli_ctlr_context *ctlr) { tw_cli_dbg_printf(3, ctlr->ctlr_handle, tw_osl_cur_func(), "entered"); ctlr->interrupts_enabled = TW_CL_TRUE; TW_CLI_WRITE_CONTROL_REGISTER(ctlr->ctlr_handle, TWA_CONTROL_CLEAR_ATTENTION_INTERRUPT | TWA_CONTROL_UNMASK_RESPONSE_INTERRUPT | TWA_CONTROL_ENABLE_INTERRUPTS); }
/* * Function name: tw_cl_reset_stats * Description: Resets CL maintained statistics for the controller. * * Input: ctlr_handle-- controller handle * Output: None * Return value: None */ TW_VOID tw_cl_reset_stats(struct tw_cl_ctlr_handle *ctlr_handle) { struct tw_cli_ctlr_context *ctlr = (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt); tw_cli_dbg_printf(7, ctlr_handle, tw_osl_cur_func(), "entered"); ctlr->q_stats[TW_CLI_FREE_Q].max_len = 0; ctlr->q_stats[TW_CLI_BUSY_Q].max_len = 0; ctlr->q_stats[TW_CLI_PENDING_Q].max_len = 0; ctlr->q_stats[TW_CLI_COMPLETE_Q].max_len = 0; }
/* * Function name: tw_cli_process_cmd_intr * Description: This function gets called if we hit a queue full * condition earlier, and the fw is now ready for * new cmds. Submits any pending requests. * * Input: ctlr -- ptr to CL internal ctlr context * Output: None * Return value: None */ TW_VOID tw_cli_process_cmd_intr(struct tw_cli_ctlr_context *ctlr) { tw_cli_dbg_printf(6, ctlr->ctlr_handle, tw_osl_cur_func(), "entered"); /* Start any requests that might be in the pending queue. */ tw_cli_submit_pending_queue(ctlr); /* * If tw_cli_submit_pending_queue was unsuccessful due to a "cmd queue * full" condition, cmd_intr will already have been unmasked by * tw_cli_submit_cmd. We don't need to do it again... simply return. */ }
/* * Function name: tw_cli_process_complete_queue * Description: Calls the CL internal callback routine, if any, for * each request in the complete queue. * * Input: ctlr -- ptr to CL internal ctlr context * Output: None * Return value: None */ TW_VOID tw_cli_process_complete_queue(struct tw_cli_ctlr_context *ctlr) { struct tw_cli_req_context *req; tw_cli_dbg_printf(10, ctlr->ctlr_handle, tw_osl_cur_func(), "entered"); /* * Pull commands off the completed list, dispatch them appropriately. */ while ((req = tw_cli_req_q_remove_head(ctlr, TW_CLI_COMPLETE_Q)) != TW_CL_NULL) { /* Call the CL internal callback, if there's one. */ if (req->tw_cli_callback) req->tw_cli_callback(req); } }
/* * Function name: tw_cli_drain_response_queue * Description: Drain the controller response queue. * * Input: ctlr -- ptr to per ctlr structure * Output: None * Return value: 0 -- success * non-zero-- failure */ TW_INT32 tw_cli_drain_response_queue(struct tw_cli_ctlr_context *ctlr) { TW_UINT32 resp; TW_UINT32 status_reg; tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered"); for (;;) { status_reg = TW_CLI_READ_STATUS_REGISTER(ctlr->ctlr_handle); if (status_reg & TWA_STATUS_RESPONSE_QUEUE_EMPTY) return(TW_OSL_ESUCCESS); /* no more response queue entries */ resp = TW_CLI_READ_RESPONSE_QUEUE(ctlr->ctlr_handle); } }
/* * Function name: tw_cli_get_request * Description: Gets a request pkt from the free queue. * * Input: ctlr -- ptr to CL internal ctlr context * req_pkt -- ptr to OSL built req_pkt, if there's one * Output: None * Return value: ptr to request pkt -- success * TW_CL_NULL -- failure */ struct tw_cli_req_context * tw_cli_get_request(struct tw_cli_ctlr_context *ctlr ) { struct tw_cli_req_context *req; tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered"); { /* Get a free request packet. */ req = tw_cli_req_q_remove_head(ctlr, TW_CLI_FREE_Q); } /* Initialize some fields to their defaults. */ if (req) { req->req_handle = TW_CL_NULL; req->data = TW_CL_NULL; req->length = 0; req->data_phys = 0; req->state = TW_CLI_REQ_STATE_INIT; /* req being initialized */ req->flags = 0; req->error_code = 0; req->orig_req = TW_CL_NULL; req->tw_cli_callback = TW_CL_NULL; /* * Look at the status field in the command packet to see how * it completed the last time it was used, and zero out only * the portions that might have changed. Note that we don't * care to zero out the sglist. */ if (req->cmd_pkt->command.cmd_pkt_9k.status) tw_osl_memzero(req->cmd_pkt, sizeof(struct tw_cl_command_header) + 28 /* max bytes before sglist */); else tw_osl_memzero(&(req->cmd_pkt->command), 28 /* max bytes before sglist */); } return(req); }
TW_VOID tw_cli_drain_pending_queue(struct tw_cli_ctlr_context *ctlr) { struct tw_cli_req_context *req; struct tw_cl_req_packet *req_pkt; tw_cli_dbg_printf(3, ctlr->ctlr_handle, tw_osl_cur_func(), "entered"); /* * Pull requests off the pending queue, and complete them. */ while ((req = tw_cli_req_q_remove_head(ctlr, TW_CLI_PENDING_Q)) != TW_CL_NULL) { if (req->flags & TW_CLI_REQ_FLAGS_INTERNAL) { /* * It's an internal request. Set the appropriate * error and call the CL internal callback if there's * one. If the request originator is polling for * completion, he should be checking req->error to * determine that the request did not go through. * The request originators are responsible for the * clean-up. */ req->error_code = TW_CL_ERR_REQ_BUS_RESET; if (req->tw_cli_callback) req->tw_cli_callback(req); } else if (req->flags & TW_CLI_REQ_FLAGS_PASSTHRU) { /* It's a passthru request. Complete it. */ if ((req_pkt = req->orig_req) != TW_CL_NULL) { req_pkt->status = TW_CL_ERR_REQ_BUS_RESET; if (req_pkt->tw_osl_callback) req_pkt->tw_osl_callback(req->req_handle); } tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q); } else { /* It's an external (SCSI) request. Add it to the reset queue. */ tw_osl_untimeout(req->req_handle); tw_cli_req_q_insert_tail(req, TW_CLI_RESET_Q); } } /* End of while loop */ }
/* * Function name: tw_cli_complete_io * Description: CL internal callback for SCSI/fw passthru requests. * * Input: req -- ptr to CL internal request context * Output: None * Return value: None */ TW_VOID tw_cli_complete_io(struct tw_cli_req_context *req) { struct tw_cli_ctlr_context *ctlr = req->ctlr; struct tw_cl_req_packet *req_pkt = (struct tw_cl_req_packet *)(req->orig_req); tw_cli_dbg_printf(8, ctlr->ctlr_handle, tw_osl_cur_func(), "entered"); req_pkt->status = TW_CL_ERR_REQ_SUCCESS; if (req->error_code) { req_pkt->status = TW_CL_ERR_REQ_UNABLE_TO_SUBMIT_COMMAND; goto out; } if (req->state != TW_CLI_REQ_STATE_COMPLETE) { tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE, TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR, 0x1203, 0x1, TW_CL_SEVERITY_ERROR_STRING, "I/O completion on incomplete command!!", "request = %p, status = %d", req, req->state); #ifdef TW_OSL_DEBUG tw_cl_print_ctlr_stats(ctlr->ctlr_handle); #endif /* TW_OSL_DEBUG */ tw_cl_reset_ctlr(ctlr->ctlr_handle); req_pkt->status = TW_CL_ERR_REQ_BUS_RESET; goto out; } if (req->flags & TW_CLI_REQ_FLAGS_PASSTHRU) { /* Copy the command packet back into OSL's space. */ tw_osl_memcpy(req_pkt->gen_req_pkt.pt_req.cmd_pkt, req->cmd_pkt, sizeof(struct tw_cl_command_packet)); } else tw_cli_scsi_complete(req); out: req_pkt->tw_osl_callback(req->req_handle); tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q); }
/* * Function name: tw_cli_poll_status * Description: Poll for a given status to show up in the firmware * status register. * * Input: ctlr -- ptr to CL internal ctlr context * status -- status to look for * timeout -- max # of seconds to wait before giving up * Output: None * Return value: 0 -- success * non-zero-- failure */ TW_INT32 tw_cli_poll_status(struct tw_cli_ctlr_context *ctlr, TW_UINT32 status, TW_UINT32 timeout) { TW_TIME end_time; TW_UINT32 status_reg; tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered"); end_time = tw_osl_get_local_time() + timeout; do { status_reg = TW_CLI_READ_STATUS_REGISTER(ctlr->ctlr_handle); if ((status_reg & status) == status) /* got the required bit(s) */ return(TW_OSL_ESUCCESS); tw_osl_delay(1000); } while (tw_osl_get_local_time() <= end_time); return(TW_OSL_ETIMEDOUT); }
/* * Function name: tw_cl_shutdown_ctlr * Description: Closes logical connection with the controller. * * Input: ctlr -- ptr to per ctlr structure * flags -- more info passed by the OS Layer * Output: None * Return value: 0 -- success * non-zero-- failure */ TW_INT32 tw_cl_shutdown_ctlr(struct tw_cl_ctlr_handle *ctlr_handle, TW_UINT32 flags) { struct tw_cli_ctlr_context *ctlr = (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt); TW_INT32 error; tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(), "entered"); /* * Mark the controller as inactive, disable any further interrupts, * and notify the controller that we are going down. */ ctlr->state &= ~TW_CLI_CTLR_STATE_ACTIVE; tw_cli_disable_interrupts(ctlr); /* Let the controller know that we are going down. */ if ((error = tw_cli_init_connection(ctlr, TWA_SHUTDOWN_MESSAGE_CREDITS, 0, 0, 0, 0, 0, TW_CL_NULL, TW_CL_NULL, TW_CL_NULL, TW_CL_NULL, TW_CL_NULL))) tw_cl_create_event(ctlr_handle, TW_CL_FALSE, TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR, 0x1015, 0x1, TW_CL_SEVERITY_ERROR_STRING, "Can't close connection with controller", "error = %d", error); if (flags & TW_CL_STOP_CTLR_ONLY) goto ret; /* Destroy all locks used by CL. */ tw_osl_destroy_lock(ctlr_handle, ctlr->gen_lock); tw_osl_destroy_lock(ctlr_handle, ctlr->io_lock); if (!(ctlr->flags & TW_CL_64BIT_ADDRESSES)) tw_osl_destroy_lock(ctlr_handle, ctlr->intr_lock); ret: return(error); }
/* * Function name: tw_cli_find_aen * Description: Reports whether a given AEN ever occurred. * * Input: ctlr -- ptr to CL internal ctlr context * aen_code-- AEN to look for * Output: None * Return value: 0 -- success * non-zero-- failure */ TW_INT32 tw_cli_find_aen(struct tw_cli_ctlr_context *ctlr, TW_UINT16 aen_code) { TW_UINT32 last_index; TW_INT32 i; tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered"); if (ctlr->aen_q_wrapped) last_index = ctlr->aen_head; else last_index = 0; i = ctlr->aen_head; do { i = (i + ctlr->max_aens_supported - 1) % ctlr->max_aens_supported; if (ctlr->aen_queue[i].aen_code == aen_code) return(TW_OSL_ESUCCESS); } while (i != last_index); return(TW_OSL_EGENFAILURE); }
/* * Function name: tw_cli_process_attn_intr * Description: This function gets called if the fw posted an AEN * (Asynchronous Event Notification). It fetches * all the AEN's that the fw might have posted. * * Input: ctlr -- ptr to CL internal ctlr context * Output: None * Return value: None */ TW_VOID tw_cli_process_attn_intr(struct tw_cli_ctlr_context *ctlr) { TW_INT32 error; tw_cli_dbg_printf(6, ctlr->ctlr_handle, tw_osl_cur_func(), "entered"); if ((error = tw_cli_get_aen(ctlr))) { /* * If the driver is already in the process of retrieveing AEN's, * we will be returned TW_OSL_EBUSY. In this case, * tw_cli_param_callback or tw_cli_aen_callback will eventually * retrieve the AEN this attention interrupt is for. So, we * don't need to print the failure. */ if (error != TW_OSL_EBUSY) tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE, TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR, 0x1200, 0x1, TW_CL_SEVERITY_ERROR_STRING, "Failed to fetch AEN", "error = %d", error); } }
/* * Function name: tw_cl_start_io * Description: Interface to OS Layer for accepting SCSI requests. * * Input: ctlr_handle -- controller handle * req_pkt -- OSL built request packet * req_handle -- request handle * Output: None * Return value: 0 -- success * non-zero-- failure */ TW_INT32 tw_cl_start_io(struct tw_cl_ctlr_handle *ctlr_handle, struct tw_cl_req_packet *req_pkt, struct tw_cl_req_handle *req_handle) { struct tw_cli_ctlr_context *ctlr; struct tw_cli_req_context *req; struct tw_cl_command_9k *cmd; struct tw_cl_scsi_req_packet *scsi_req; TW_INT32 error = TW_CL_ERR_REQ_SUCCESS; tw_cli_dbg_printf(10, ctlr_handle, tw_osl_cur_func(), "entered"); ctlr = (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt); /* * If working with a firmware version that does not support multiple * luns, and this request is directed at a non-zero lun, error it * back right away. */ if ((req_pkt->gen_req_pkt.scsi_req.lun) && (ctlr->working_srl < TWA_MULTI_LUN_FW_SRL)) { req_pkt->status |= (TW_CL_ERR_REQ_INVALID_LUN | TW_CL_ERR_REQ_SCSI_ERROR); req_pkt->tw_osl_callback(req_handle); return(TW_CL_ERR_REQ_SUCCESS); } if ((req = tw_cli_get_request(ctlr )) == TW_CL_NULL) { tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(), "Out of request context packets: returning busy"); return(TW_OSL_EBUSY); } req_handle->cl_req_ctxt = req; req->req_handle = req_handle; req->orig_req = req_pkt; req->tw_cli_callback = tw_cli_complete_io; req->flags |= TW_CLI_REQ_FLAGS_EXTERNAL; req->flags |= TW_CLI_REQ_FLAGS_9K; scsi_req = &(req_pkt->gen_req_pkt.scsi_req); /* Build the cmd pkt. */ cmd = &(req->cmd_pkt->command.cmd_pkt_9k); req->cmd_pkt->cmd_hdr.header_desc.size_header = 128; cmd->res__opcode = BUILD_RES__OPCODE(0, TWA_FW_CMD_EXECUTE_SCSI); cmd->unit = (TW_UINT8)(scsi_req->unit); cmd->lun_l4__req_id = TW_CL_SWAP16( BUILD_LUN_L4__REQ_ID(scsi_req->lun, req->request_id)); cmd->status = 0; cmd->sgl_offset = 16; /* offset from end of hdr = max cdb len */ tw_osl_memcpy(cmd->cdb, scsi_req->cdb, scsi_req->cdb_len); if (req_pkt->flags & TW_CL_REQ_CALLBACK_FOR_SGLIST) { TW_UINT32 num_sgl_entries; req_pkt->tw_osl_sgl_callback(req_handle, cmd->sg_list, &num_sgl_entries); cmd->lun_h4__sgl_entries = TW_CL_SWAP16(BUILD_LUN_H4__SGL_ENTRIES(scsi_req->lun, num_sgl_entries)); } else { cmd->lun_h4__sgl_entries = TW_CL_SWAP16(BUILD_LUN_H4__SGL_ENTRIES(scsi_req->lun, scsi_req->sgl_entries)); tw_cli_fill_sg_list(ctlr, scsi_req->sg_list, cmd->sg_list, scsi_req->sgl_entries); } if (((TW_CL_Q_FIRST_ITEM(&(ctlr->req_q_head[TW_CLI_PENDING_Q]))) != TW_CL_NULL) || (ctlr->reset_in_progress)) { tw_cli_req_q_insert_tail(req, TW_CLI_PENDING_Q); TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle, TWA_CONTROL_UNMASK_COMMAND_INTERRUPT); } else if ((error = tw_cli_submit_cmd(req))) { tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(), "Could not start request. request = %p, error = %d", req, error); tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q); } return(error); }
/* * Function name: tw_cli_manage_aen * Description: Handles AEN's. * * Input: ctlr -- ptr to CL internal ctlr context * req -- ptr to CL internal request context * Output: None * Return value: None */ TW_UINT16 tw_cli_manage_aen(struct tw_cli_ctlr_context *ctlr, struct tw_cli_req_context *req) { struct tw_cl_command_header *cmd_hdr; TW_UINT16 aen_code; TW_TIME local_time; TW_TIME sync_time; TW_UINT32 error; tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered"); cmd_hdr = (struct tw_cl_command_header *)(req->data); aen_code = cmd_hdr->status_block.error; switch (aen_code) { case TWA_AEN_SYNC_TIME_WITH_HOST: tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "Received AEN_SYNC_TIME"); /* * Free the internal req pkt right here, since * tw_cli_set_param will need it. */ ctlr->internal_req_busy = TW_CL_FALSE; tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q); /* * We will use a callback in tw_cli_set_param only when * interrupts are enabled and we can expect our callback * to get called. Setting the get_more_aens * flag will make the callback continue to try to retrieve * more AEN's. */ if (ctlr->interrupts_enabled) ctlr->get_more_aens = TW_CL_TRUE; /* Calculate time (in seconds) since last Sunday 12.00 AM. */ local_time = tw_osl_get_local_time(); sync_time = (local_time - (3 * 86400)) % 604800; if ((error = tw_cli_set_param(ctlr, TWA_PARAM_TIME_TABLE, TWA_PARAM_TIME_SCHED_TIME, 4, &sync_time, (ctlr->interrupts_enabled) ? tw_cli_param_callback : TW_CL_NULL))) tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE, TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR, 0x1208, 0x1, TW_CL_SEVERITY_ERROR_STRING, "Unable to sync time with ctlr", "error = %d", error); break; case TWA_AEN_QUEUE_EMPTY: tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "AEN queue empty"); break; default: /* Queue the event. */ tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "Queueing AEN"); tw_cli_create_ctlr_event(ctlr, TW_CL_MESSAGE_SOURCE_CONTROLLER_EVENT, cmd_hdr); break; } /* switch */ return(aen_code); }
/* * Function name: tw_cl_init_ctlr * Description: Initializes driver data structures for the controller. * * Input: ctlr_handle -- controller handle * flags -- more info passed by the OS Layer * device_id -- device id of the controller * max_simult_reqs -- maximum # of simultaneous requests * that the OS Layer expects the Common * Layer to support * max_aens -- maximun # of AEN's needed to be supported * non_dma_mem -- ptr to allocated non-DMA memory * dma_mem -- ptr to allocated DMA'able memory * dma_mem_phys -- physical address of dma_mem * Output: None * Return value: 0 -- success * non-zero-- failure */ TW_INT32 tw_cl_init_ctlr(struct tw_cl_ctlr_handle *ctlr_handle, TW_UINT32 flags, TW_INT32 device_id, TW_INT32 max_simult_reqs, TW_INT32 max_aens, TW_VOID *non_dma_mem, TW_VOID *dma_mem, TW_UINT64 dma_mem_phys ) { struct tw_cli_ctlr_context *ctlr; struct tw_cli_req_context *req; TW_UINT8 *free_non_dma_mem; TW_INT32 error = TW_OSL_ESUCCESS; TW_INT32 i; tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(), "entered"); if (flags & TW_CL_START_CTLR_ONLY) { ctlr = (struct tw_cli_ctlr_context *) (ctlr_handle->cl_ctlr_ctxt); goto start_ctlr; } if (max_simult_reqs > TW_CL_MAX_SIMULTANEOUS_REQUESTS) { tw_cl_create_event(ctlr_handle, TW_CL_FALSE, TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR, 0x1000, 0x1, TW_CL_SEVERITY_ERROR_STRING, "Too many simultaneous requests to support!", "requested = %d, supported = %d, error = %d\n", max_simult_reqs, TW_CL_MAX_SIMULTANEOUS_REQUESTS, TW_OSL_EBIG); return(TW_OSL_EBIG); } if ((non_dma_mem == TW_CL_NULL) || (dma_mem == TW_CL_NULL) ) { tw_cl_create_event(ctlr_handle, TW_CL_FALSE, TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR, 0x1001, 0x1, TW_CL_SEVERITY_ERROR_STRING, "Insufficient memory for Common Layer's internal usage", "error = %d\n", TW_OSL_ENOMEM); return(TW_OSL_ENOMEM); } tw_osl_memzero(non_dma_mem, sizeof(struct tw_cli_ctlr_context) + (sizeof(struct tw_cli_req_context) * (max_simult_reqs + 1)) + (sizeof(struct tw_cl_event_packet) * max_aens)); tw_osl_memzero(dma_mem, (sizeof(struct tw_cl_command_packet) * (max_simult_reqs + 1)) + TW_CLI_SECTOR_SIZE); free_non_dma_mem = (TW_UINT8 *)non_dma_mem; ctlr = (struct tw_cli_ctlr_context *)free_non_dma_mem; free_non_dma_mem += sizeof(struct tw_cli_ctlr_context); ctlr_handle->cl_ctlr_ctxt = ctlr; ctlr->ctlr_handle = ctlr_handle; ctlr->device_id = (TW_UINT32)device_id; ctlr->arch_id = TWA_ARCH_ID(device_id); ctlr->flags = flags; ctlr->sg_size_factor = TWA_SG_ELEMENT_SIZE_FACTOR(device_id); ctlr->max_simult_reqs = max_simult_reqs + 1; ctlr->max_aens_supported = max_aens; /* Initialize queues of CL internal request context packets. */ tw_cli_req_q_init(ctlr, TW_CLI_FREE_Q); tw_cli_req_q_init(ctlr, TW_CLI_BUSY_Q); tw_cli_req_q_init(ctlr, TW_CLI_PENDING_Q); tw_cli_req_q_init(ctlr, TW_CLI_COMPLETE_Q); /* Initialize all locks used by CL. */ ctlr->gen_lock = &(ctlr->gen_lock_handle); tw_osl_init_lock(ctlr_handle, "tw_cl_gen_lock", ctlr->gen_lock); ctlr->io_lock = &(ctlr->io_lock_handle); tw_osl_init_lock(ctlr_handle, "tw_cl_io_lock", ctlr->io_lock); /* * If 64 bit cmd pkt addresses are used, we will need to serialize * writes to the hardware (across registers), since existing (G66) * hardware will get confused if, for example, we wrote the low 32 bits * of the cmd pkt address, followed by a response interrupt mask to the * control register, followed by the high 32 bits of the cmd pkt * address. It will then interpret the value written to the control * register as the low cmd pkt address. So, for this case, we will * make a note that we will need to synchronize control register writes * with command register writes. */ if ((ctlr->flags & TW_CL_64BIT_ADDRESSES) && ((ctlr->device_id == TW_CL_DEVICE_ID_9K) || (ctlr->device_id == TW_CL_DEVICE_ID_9K_X) || (ctlr->device_id == TW_CL_DEVICE_ID_9K_E) || (ctlr->device_id == TW_CL_DEVICE_ID_9K_SA))) { ctlr->state |= TW_CLI_CTLR_STATE_G66_WORKAROUND_NEEDED; ctlr->intr_lock = ctlr->io_lock; } else { ctlr->intr_lock = &(ctlr->intr_lock_handle); tw_osl_init_lock(ctlr_handle, "tw_cl_intr_lock", ctlr->intr_lock); } /* Initialize CL internal request context packets. */ ctlr->req_ctxt_buf = (struct tw_cli_req_context *)free_non_dma_mem; free_non_dma_mem += (sizeof(struct tw_cli_req_context) * ( max_simult_reqs + 1)); ctlr->cmd_pkt_buf = (struct tw_cl_command_packet *)dma_mem; ctlr->cmd_pkt_phys = dma_mem_phys; ctlr->internal_req_data = (TW_UINT8 *) (ctlr->cmd_pkt_buf + ( max_simult_reqs + 1)); ctlr->internal_req_data_phys = ctlr->cmd_pkt_phys + (sizeof(struct tw_cl_command_packet) * ( max_simult_reqs + 1)); for (i = 0; i < ( max_simult_reqs + 1); i++) { req = &(ctlr->req_ctxt_buf[i]); req->cmd_pkt = &(ctlr->cmd_pkt_buf[i]); req->cmd_pkt_phys = ctlr->cmd_pkt_phys + (i * sizeof(struct tw_cl_command_packet)); req->request_id = i; req->ctlr = ctlr; /* Insert request into the free queue. */ tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q); } /* Initialize the AEN queue. */ ctlr->aen_queue = (struct tw_cl_event_packet *)free_non_dma_mem; start_ctlr: /* * Disable interrupts. Interrupts will be enabled in tw_cli_start_ctlr * (only) if initialization succeeded. */ tw_cli_disable_interrupts(ctlr); /* Initialize the controller. */ if ((error = tw_cli_start_ctlr(ctlr))) { /* Soft reset the controller, and try one more time. */ tw_cl_create_event(ctlr_handle, TW_CL_FALSE, TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR, 0x1002, 0x1, TW_CL_SEVERITY_ERROR_STRING, "Controller initialization failed. Retrying...", "error = %d\n", error); if ((error = tw_cli_soft_reset(ctlr))) { tw_cl_create_event(ctlr_handle, TW_CL_FALSE, TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR, 0x1003, 0x1, TW_CL_SEVERITY_ERROR_STRING, "Controller soft reset failed", "error = %d\n", error); return(error); } else if ((error = tw_cli_start_ctlr(ctlr))) { tw_cl_create_event(ctlr_handle, TW_CL_FALSE, TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR, 0x1004, 0x1, TW_CL_SEVERITY_ERROR_STRING, "Controller initialization retry failed", "error = %d\n", error); return(error); } } /* Notify some info about the controller to the OSL. */ tw_cli_notify_ctlr_info(ctlr); /* Mark the controller as active. */ ctlr->state |= TW_CLI_CTLR_STATE_ACTIVE; return(error); }
/* * Function name: tw_cli_scsi_complete * Description: Completion routine for SCSI requests. * * Input: req -- ptr to CL internal request context * Output: None * Return value: None */ TW_VOID tw_cli_scsi_complete(struct tw_cli_req_context *req) { struct tw_cl_req_packet *req_pkt = (struct tw_cl_req_packet *)(req->orig_req); struct tw_cl_scsi_req_packet *scsi_req = &(req_pkt->gen_req_pkt.scsi_req); struct tw_cl_command_9k *cmd = &(req->cmd_pkt->command.cmd_pkt_9k); struct tw_cl_command_header *cmd_hdr; TW_UINT16 error; TW_UINT8 *cdb; tw_cli_dbg_printf(8, req->ctlr->ctlr_handle, tw_osl_cur_func(), "entered"); scsi_req->scsi_status = cmd->status; if (! cmd->status) return; tw_cli_dbg_printf(1, req->ctlr->ctlr_handle, tw_osl_cur_func(), "req_id = 0x%x, status = 0x%x", GET_REQ_ID(cmd->lun_l4__req_id), cmd->status); cmd_hdr = &(req->cmd_pkt->cmd_hdr); error = cmd_hdr->status_block.error; if ((error == TWA_ERROR_LOGICAL_UNIT_NOT_SUPPORTED) || (error == TWA_ERROR_UNIT_OFFLINE)) { if (GET_LUN_L4(cmd->lun_l4__req_id)) req_pkt->status |= TW_CL_ERR_REQ_INVALID_LUN; else req_pkt->status |= TW_CL_ERR_REQ_INVALID_TARGET; } else { tw_cli_dbg_printf(2, req->ctlr->ctlr_handle, tw_osl_cur_func(), "cmd = %x %x %x %x %x %x %x", GET_OPCODE(cmd->res__opcode), GET_SGL_OFF(cmd->res__opcode), cmd->unit, cmd->lun_l4__req_id, cmd->status, cmd->sgl_offset, cmd->lun_h4__sgl_entries); cdb = (TW_UINT8 *)(cmd->cdb); tw_cli_dbg_printf(2, req->ctlr->ctlr_handle, tw_osl_cur_func(), "cdb = %x %x %x %x %x %x %x %x " "%x %x %x %x %x %x %x %x", cdb[0], cdb[1], cdb[2], cdb[3], cdb[4], cdb[5], cdb[6], cdb[7], cdb[8], cdb[9], cdb[10], cdb[11], cdb[12], cdb[13], cdb[14], cdb[15]); #if 0 /* * Print the error. Firmware doesn't yet support * the 'Mode Sense' cmd. Don't print if the cmd * is 'Mode Sense', and the error is 'Invalid field * in CDB'. */ if (! ((cdb[0] == 0x1A) && (error == 0x10D))) tw_cli_create_ctlr_event(req->ctlr, TW_CL_MESSAGE_SOURCE_CONTROLLER_ERROR, cmd_hdr); #endif // 0 } if (scsi_req->sense_data) { tw_osl_memcpy(scsi_req->sense_data, cmd_hdr->sense_data, TWA_SENSE_DATA_LENGTH); scsi_req->sense_len = TWA_SENSE_DATA_LENGTH; req_pkt->status |= TW_CL_ERR_REQ_AUTO_SENSE_VALID; } req_pkt->status |= TW_CL_ERR_REQ_SCSI_ERROR; }
/* * Function name: tw_cl_init_ctlr * Description: Initializes driver data structures for the controller. * * Input: ctlr_handle -- controller handle * flags -- more info passed by the OS Layer * device_id -- device id of the controller * max_simult_reqs -- maximum # of simultaneous requests * that the OS Layer expects the Common * Layer to support * max_aens -- maximun # of AEN's needed to be supported * non_dma_mem -- ptr to allocated non-DMA memory * dma_mem -- ptr to allocated DMA'able memory * dma_mem_phys -- physical address of dma_mem * Output: None * Return value: 0 -- success * non-zero-- failure */ TW_INT32 tw_cl_init_ctlr(struct tw_cl_ctlr_handle *ctlr_handle, TW_UINT32 flags, TW_INT32 device_id, TW_INT32 max_simult_reqs, TW_INT32 max_aens, TW_VOID *non_dma_mem, TW_VOID *dma_mem, TW_UINT64 dma_mem_phys ) { struct tw_cli_ctlr_context *ctlr; struct tw_cli_req_context *req; TW_UINT8 *free_non_dma_mem; TW_INT32 error = TW_OSL_ESUCCESS; TW_INT32 i; tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(), "entered"); if (flags & TW_CL_START_CTLR_ONLY) { ctlr = (struct tw_cli_ctlr_context *) (ctlr_handle->cl_ctlr_ctxt); goto start_ctlr; } if (max_simult_reqs > TW_CL_MAX_SIMULTANEOUS_REQUESTS) { tw_cl_create_event(ctlr_handle, TW_CL_FALSE, TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR, 0x1000, 0x1, TW_CL_SEVERITY_ERROR_STRING, "Too many simultaneous requests to support!", "requested = %d, supported = %d, error = %d\n", max_simult_reqs, TW_CL_MAX_SIMULTANEOUS_REQUESTS, TW_OSL_EBIG); return(TW_OSL_EBIG); } if ((non_dma_mem == TW_CL_NULL) || (dma_mem == TW_CL_NULL) ) { tw_cl_create_event(ctlr_handle, TW_CL_FALSE, TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR, 0x1001, 0x1, TW_CL_SEVERITY_ERROR_STRING, "Insufficient memory for Common Layer's internal usage", "error = %d\n", TW_OSL_ENOMEM); return(TW_OSL_ENOMEM); } tw_osl_memzero(non_dma_mem, sizeof(struct tw_cli_ctlr_context) + (sizeof(struct tw_cli_req_context) * max_simult_reqs) + (sizeof(struct tw_cl_event_packet) * max_aens)); tw_osl_memzero(dma_mem, (sizeof(struct tw_cl_command_packet) * max_simult_reqs) + TW_CLI_SECTOR_SIZE); free_non_dma_mem = (TW_UINT8 *)non_dma_mem; ctlr = (struct tw_cli_ctlr_context *)free_non_dma_mem; free_non_dma_mem += sizeof(struct tw_cli_ctlr_context); ctlr_handle->cl_ctlr_ctxt = ctlr; ctlr->ctlr_handle = ctlr_handle; ctlr->device_id = (TW_UINT32)device_id; ctlr->arch_id = TWA_ARCH_ID(device_id); ctlr->flags = flags; ctlr->sg_size_factor = TWA_SG_ELEMENT_SIZE_FACTOR(device_id); ctlr->max_simult_reqs = max_simult_reqs; ctlr->max_aens_supported = max_aens; /* Initialize queues of CL internal request context packets. */ tw_cli_req_q_init(ctlr, TW_CLI_FREE_Q); tw_cli_req_q_init(ctlr, TW_CLI_BUSY_Q); tw_cli_req_q_init(ctlr, TW_CLI_PENDING_Q); tw_cli_req_q_init(ctlr, TW_CLI_COMPLETE_Q); /* Initialize all locks used by CL. */ ctlr->gen_lock = &(ctlr->gen_lock_handle); tw_osl_init_lock(ctlr_handle, "tw_cl_gen_lock", ctlr->gen_lock); ctlr->io_lock = &(ctlr->io_lock_handle); tw_osl_init_lock(ctlr_handle, "tw_cl_io_lock", ctlr->io_lock); /* Initialize CL internal request context packets. */ ctlr->req_ctxt_buf = (struct tw_cli_req_context *)free_non_dma_mem; free_non_dma_mem += (sizeof(struct tw_cli_req_context) * max_simult_reqs); ctlr->cmd_pkt_buf = (struct tw_cl_command_packet *)dma_mem; ctlr->cmd_pkt_phys = dma_mem_phys; ctlr->internal_req_data = (TW_UINT8 *) (ctlr->cmd_pkt_buf + max_simult_reqs); ctlr->internal_req_data_phys = ctlr->cmd_pkt_phys + (sizeof(struct tw_cl_command_packet) * max_simult_reqs); for (i = 0; i < max_simult_reqs; i++) { req = &(ctlr->req_ctxt_buf[i]); req->cmd_pkt = &(ctlr->cmd_pkt_buf[i]); req->cmd_pkt_phys = ctlr->cmd_pkt_phys + (i * sizeof(struct tw_cl_command_packet)); req->request_id = i; req->ctlr = ctlr; /* Insert request into the free queue. */ tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q); } /* Initialize the AEN queue. */ ctlr->aen_queue = (struct tw_cl_event_packet *)free_non_dma_mem; start_ctlr: /* * Disable interrupts. Interrupts will be enabled in tw_cli_start_ctlr * (only) if initialization succeeded. */ tw_cli_disable_interrupts(ctlr); /* Initialize the controller. */ if ((error = tw_cli_start_ctlr(ctlr))) { /* Soft reset the controller, and try one more time. */ tw_cl_create_event(ctlr_handle, TW_CL_FALSE, TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR, 0x1002, 0x1, TW_CL_SEVERITY_ERROR_STRING, "Controller initialization failed. Retrying...", "error = %d\n", error); if ((error = tw_cli_soft_reset(ctlr))) { tw_cl_create_event(ctlr_handle, TW_CL_FALSE, TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR, 0x1003, 0x1, TW_CL_SEVERITY_ERROR_STRING, "Controller soft reset failed", "error = %d\n", error); return(error); } else if ((error = tw_cli_start_ctlr(ctlr))) { tw_cl_create_event(ctlr_handle, TW_CL_FALSE, TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR, 0x1004, 0x1, TW_CL_SEVERITY_ERROR_STRING, "Controller initialization retry failed", "error = %d\n", error); return(error); } } /* Notify some info about the controller to the OSL. */ tw_cli_notify_ctlr_info(ctlr); /* Mark the controller active. */ ctlr->active = TW_CL_TRUE; return(error); }
/* * Function name: tw_cli_start_ctlr * Description: Establishes a logical connection with the controller. * Determines whether or not the driver is compatible * with the firmware on the controller, before proceeding * to work with it. * * Input: ctlr -- ptr to per ctlr structure * Output: None * Return value: 0 -- success * non-zero-- failure */ TW_INT32 tw_cli_start_ctlr(struct tw_cli_ctlr_context *ctlr) { TW_UINT16 fw_on_ctlr_srl = 0; TW_UINT16 fw_on_ctlr_arch_id = 0; TW_UINT16 fw_on_ctlr_branch = 0; TW_UINT16 fw_on_ctlr_build = 0; TW_UINT32 init_connect_result = 0; TW_INT32 error = TW_OSL_ESUCCESS; tw_cli_dbg_printf(3, ctlr->ctlr_handle, tw_osl_cur_func(), "entered"); /* Wait for the controller to become ready. */ if ((error = tw_cli_poll_status(ctlr, TWA_STATUS_MICROCONTROLLER_READY, TW_CLI_REQUEST_TIMEOUT_PERIOD))) { tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE, TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR, 0x1009, 0x1, TW_CL_SEVERITY_ERROR_STRING, "Microcontroller not ready", "error = %d", error); return(error); } /* Drain the response queue. */ if ((error = tw_cli_drain_response_queue(ctlr))) { tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE, TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR, 0x100A, 0x1, TW_CL_SEVERITY_ERROR_STRING, "Can't drain response queue", "error = %d", error); return(error); } /* Establish a logical connection with the controller. */ if ((error = tw_cli_init_connection(ctlr, (TW_UINT16)(ctlr->max_simult_reqs), TWA_EXTENDED_INIT_CONNECT, TWA_CURRENT_FW_SRL, (TW_UINT16)(ctlr->arch_id), TWA_CURRENT_FW_BRANCH(ctlr->arch_id), TWA_CURRENT_FW_BUILD(ctlr->arch_id), &fw_on_ctlr_srl, &fw_on_ctlr_arch_id, &fw_on_ctlr_branch, &fw_on_ctlr_build, &init_connect_result))) { tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE, TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR, 0x100B, 0x2, TW_CL_SEVERITY_WARNING_STRING, "Can't initialize connection in current mode", "error = %d", error); return(error); } { /* See if we can at least work with the firmware on the * controller in the current mode. */ if (init_connect_result & TWA_CTLR_FW_COMPATIBLE) { /* Yes, we can. Make note of the operating mode. */ if (init_connect_result & TWA_CTLR_FW_SAME_OR_NEWER) { ctlr->working_srl = TWA_CURRENT_FW_SRL; ctlr->working_branch = TWA_CURRENT_FW_BRANCH(ctlr->arch_id); ctlr->working_build = TWA_CURRENT_FW_BUILD(ctlr->arch_id); } else { ctlr->working_srl = fw_on_ctlr_srl; ctlr->working_branch = fw_on_ctlr_branch; ctlr->working_build = fw_on_ctlr_build; } } else { /* * No, we can't. See if we can at least work with * it in the base mode. */ tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE, TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR, 0x1010, 0x2, TW_CL_SEVERITY_WARNING_STRING, "Driver/Firmware mismatch. " "Negotiating for base level...", " "); if ((error = tw_cli_init_connection(ctlr, (TW_UINT16)(ctlr->max_simult_reqs), TWA_EXTENDED_INIT_CONNECT, TWA_BASE_FW_SRL, (TW_UINT16)(ctlr->arch_id), TWA_BASE_FW_BRANCH, TWA_BASE_FW_BUILD, &fw_on_ctlr_srl, &fw_on_ctlr_arch_id, &fw_on_ctlr_branch, &fw_on_ctlr_build, &init_connect_result))) { tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE, TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR, 0x1011, 0x1, TW_CL_SEVERITY_ERROR_STRING, "Can't initialize connection in " "base mode", " "); return(error); } if (!(init_connect_result & TWA_CTLR_FW_COMPATIBLE)) { /* * The firmware on the controller is not even * compatible with our base mode. We cannot * work with it. Bail... */ return(1); } /* * We can work with this firmware, but only in * base mode. */ ctlr->working_srl = TWA_BASE_FW_SRL; ctlr->working_branch = TWA_BASE_FW_BRANCH; ctlr->working_build = TWA_BASE_FW_BUILD; ctlr->operating_mode = TWA_BASE_MODE; } ctlr->fw_on_ctlr_srl = fw_on_ctlr_srl; ctlr->fw_on_ctlr_branch = fw_on_ctlr_branch; ctlr->fw_on_ctlr_build = fw_on_ctlr_build; } /* Drain the AEN queue */ if ((error = tw_cli_drain_aen_queue(ctlr))) /* * We will just print that we couldn't drain the AEN queue. * There's no need to bail out. */ tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE, TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR, 0x1014, 0x2, TW_CL_SEVERITY_WARNING_STRING, "Can't drain AEN queue", "error = %d", error); /* Enable interrupts. */ tw_cli_enable_interrupts(ctlr); return(TW_OSL_ESUCCESS); }
/* * Function name: tw_cli_process_host_intr * Description: This function gets called if we triggered an interrupt. * We don't use it as of now. * * Input: ctlr -- ptr to CL internal ctlr context * Output: None * Return value: None */ TW_VOID tw_cli_process_host_intr(struct tw_cli_ctlr_context *ctlr) { tw_cli_dbg_printf(6, ctlr->ctlr_handle, tw_osl_cur_func(), "entered"); }
/* * Function name: tw_cli_init_connection * Description: Sends init_connection cmd to firmware * * Input: ctlr -- ptr to per ctlr structure * message_credits -- max # of requests that we might send * down simultaneously. This will be * typically set to 256 at init-time or * after a reset, and to 1 at shutdown-time * set_features -- indicates if we intend to use 64-bit * sg, also indicates if we want to do a * basic or an extended init_connection; * * Note: The following input/output parameters are valid, only in case of an * extended init_connection: * * current_fw_srl -- srl of fw we are bundled * with, if any; 0 otherwise * current_fw_arch_id -- arch_id of fw we are bundled * with, if any; 0 otherwise * current_fw_branch -- branch # of fw we are bundled * with, if any; 0 otherwise * current_fw_build -- build # of fw we are bundled * with, if any; 0 otherwise * Output: fw_on_ctlr_srl -- srl of fw on ctlr * fw_on_ctlr_arch_id -- arch_id of fw on ctlr * fw_on_ctlr_branch -- branch # of fw on ctlr * fw_on_ctlr_build -- build # of fw on ctlr * init_connect_result -- result bitmap of fw response * Return value: 0 -- success * non-zero-- failure */ TW_INT32 tw_cli_init_connection(struct tw_cli_ctlr_context *ctlr, TW_UINT16 message_credits, TW_UINT32 set_features, TW_UINT16 current_fw_srl, TW_UINT16 current_fw_arch_id, TW_UINT16 current_fw_branch, TW_UINT16 current_fw_build, TW_UINT16 *fw_on_ctlr_srl, TW_UINT16 *fw_on_ctlr_arch_id, TW_UINT16 *fw_on_ctlr_branch, TW_UINT16 *fw_on_ctlr_build, TW_UINT32 *init_connect_result) { struct tw_cli_req_context *req; struct tw_cl_command_init_connect *init_connect; TW_INT32 error = TW_OSL_EBUSY; tw_cli_dbg_printf(3, ctlr->ctlr_handle, tw_osl_cur_func(), "entered"); /* Get a request packet. */ if ((req = tw_cli_get_request(ctlr )) == TW_CL_NULL) goto out; req->flags |= TW_CLI_REQ_FLAGS_INTERNAL; /* Build the cmd pkt. */ init_connect = &(req->cmd_pkt->command.cmd_pkt_7k.init_connect); req->cmd_pkt->cmd_hdr.header_desc.size_header = 128; init_connect->res1__opcode = BUILD_RES__OPCODE(0, TWA_FW_CMD_INIT_CONNECTION); init_connect->request_id = (TW_UINT8)(TW_CL_SWAP16(req->request_id)); init_connect->message_credits = TW_CL_SWAP16(message_credits); init_connect->features = TW_CL_SWAP32(set_features); if (ctlr->flags & TW_CL_64BIT_ADDRESSES) init_connect->features |= TW_CL_SWAP32(TWA_64BIT_SG_ADDRESSES); if (set_features & TWA_EXTENDED_INIT_CONNECT) { /* * Fill in the extra fields needed for an extended * init_connect. */ init_connect->size = 6; init_connect->fw_srl = TW_CL_SWAP16(current_fw_srl); init_connect->fw_arch_id = TW_CL_SWAP16(current_fw_arch_id); init_connect->fw_branch = TW_CL_SWAP16(current_fw_branch); init_connect->fw_build = TW_CL_SWAP16(current_fw_build); } else init_connect->size = 3; /* Submit the command, and wait for it to complete. */ error = tw_cli_submit_and_poll_request(req, TW_CLI_REQUEST_TIMEOUT_PERIOD); if (error == TW_OSL_ETIMEDOUT) /* Clean-up done by tw_cli_submit_and_poll_request. */ return(error); if (error) goto out; if ((error = init_connect->status)) { tw_cli_create_ctlr_event(ctlr, TW_CL_MESSAGE_SOURCE_CONTROLLER_ERROR, &(req->cmd_pkt->cmd_hdr)); goto out; } if (set_features & TWA_EXTENDED_INIT_CONNECT) { *fw_on_ctlr_srl = TW_CL_SWAP16(init_connect->fw_srl); *fw_on_ctlr_arch_id = TW_CL_SWAP16(init_connect->fw_arch_id); *fw_on_ctlr_branch = TW_CL_SWAP16(init_connect->fw_branch); *fw_on_ctlr_build = TW_CL_SWAP16(init_connect->fw_build); *init_connect_result = TW_CL_SWAP32(init_connect->result); } tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q); return(error); out: tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE, TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR, 0x1016, 0x1, TW_CL_SEVERITY_ERROR_STRING, "init_connection failed", "error = %d", error); if (req) tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q); return(error); }
/* * Function name: tw_cli_submit_cmd * Description: Submits a cmd to firmware. * * Input: req -- ptr to CL internal request context * Output: None * Return value: 0 -- success * non-zero-- failure */ TW_INT32 tw_cli_submit_cmd(struct tw_cli_req_context *req) { struct tw_cli_ctlr_context *ctlr = req->ctlr; struct tw_cl_ctlr_handle *ctlr_handle = ctlr->ctlr_handle; TW_UINT32 status_reg; TW_INT32 error = 0; tw_cli_dbg_printf(10, ctlr_handle, tw_osl_cur_func(), "entered"); /* Serialize access to the controller cmd queue. */ tw_osl_get_lock(ctlr_handle, ctlr->io_lock); /* For 9650SE first write low 4 bytes */ if ((ctlr->device_id == TW_CL_DEVICE_ID_9K_E) || (ctlr->device_id == TW_CL_DEVICE_ID_9K_SA)) tw_osl_write_reg(ctlr_handle, TWA_COMMAND_QUEUE_OFFSET_LOW, (TW_UINT32)(req->cmd_pkt_phys + sizeof(struct tw_cl_command_header)), 4); status_reg = TW_CLI_READ_STATUS_REGISTER(ctlr_handle); if (status_reg & TWA_STATUS_COMMAND_QUEUE_FULL) { struct tw_cl_req_packet *req_pkt = (struct tw_cl_req_packet *)(req->orig_req); tw_cli_dbg_printf(7, ctlr_handle, tw_osl_cur_func(), "Cmd queue full"); if ((req->flags & TW_CLI_REQ_FLAGS_INTERNAL) || ((req_pkt) && (req_pkt->flags & TW_CL_REQ_RETRY_ON_BUSY)) ) { if (req->state != TW_CLI_REQ_STATE_PENDING) { tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(), "pending internal/ioctl request"); req->state = TW_CLI_REQ_STATE_PENDING; tw_cli_req_q_insert_tail(req, TW_CLI_PENDING_Q); /* Unmask command interrupt. */ TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle, TWA_CONTROL_UNMASK_COMMAND_INTERRUPT); } else error = TW_OSL_EBUSY; } else { error = TW_OSL_EBUSY; } } else { tw_cli_dbg_printf(10, ctlr_handle, tw_osl_cur_func(), "Submitting command"); /* Insert command into busy queue */ req->state = TW_CLI_REQ_STATE_BUSY; tw_cli_req_q_insert_tail(req, TW_CLI_BUSY_Q); if ((ctlr->device_id == TW_CL_DEVICE_ID_9K_E) || (ctlr->device_id == TW_CL_DEVICE_ID_9K_SA)) { /* Now write the high 4 bytes */ tw_osl_write_reg(ctlr_handle, TWA_COMMAND_QUEUE_OFFSET_HIGH, (TW_UINT32)(((TW_UINT64)(req->cmd_pkt_phys + sizeof(struct tw_cl_command_header)))>>32), 4); } else { if (ctlr->flags & TW_CL_64BIT_ADDRESSES) {