/* * Function name: tw_cl_print_ctlr_stats * Description: Prints the current status of the controller. * * Input: ctlr_handle-- controller handle * Output: None * Return value: None */ TW_VOID tw_cl_print_ctlr_stats(struct tw_cl_ctlr_handle *ctlr_handle) { struct tw_cli_ctlr_context *ctlr = (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt); TW_UINT32 status_reg; TW_INT8 desc[200]; tw_cli_dbg_printf(7, ctlr->ctlr_handle, "", "entered"); /* Print current controller details. */ tw_cli_dbg_printf(0, ctlr_handle, "", "cl_ctlr_ctxt = %p", ctlr); tw_osl_memzero(desc, 200); status_reg = TW_CLI_READ_STATUS_REGISTER(ctlr_handle); tw_cli_dbg_printf(0, ctlr_handle, "", "status reg = 0x%x %s", status_reg, tw_cli_describe_bits(status_reg, desc)); tw_cli_dbg_printf(0, ctlr_handle, "", "CLq type current max"); tw_cli_dbg_printf(0, ctlr_handle, "", "free %04d %04d", ctlr->q_stats[TW_CLI_FREE_Q].cur_len, ctlr->q_stats[TW_CLI_FREE_Q].max_len); tw_cli_dbg_printf(0, ctlr_handle, "", "busy %04d %04d", ctlr->q_stats[TW_CLI_BUSY_Q].cur_len, ctlr->q_stats[TW_CLI_BUSY_Q].max_len); tw_cli_dbg_printf(0, ctlr_handle, "", "pending %04d %04d", ctlr->q_stats[TW_CLI_PENDING_Q].cur_len, ctlr->q_stats[TW_CLI_PENDING_Q].max_len); tw_cli_dbg_printf(0, ctlr_handle, "", "complete %04d %04d", ctlr->q_stats[TW_CLI_COMPLETE_Q].cur_len, ctlr->q_stats[TW_CLI_COMPLETE_Q].max_len); tw_cli_dbg_printf(0, ctlr_handle, "", "AEN queue head %d tail %d", ctlr->aen_head, ctlr->aen_tail); }
/* * Function name: tw_cli_get_request * Description: Gets a request pkt from the free queue. * * Input: ctlr -- ptr to CL internal ctlr context * req_pkt -- ptr to OSL built req_pkt, if there's one * Output: None * Return value: ptr to request pkt -- success * TW_CL_NULL -- failure */ struct tw_cli_req_context * tw_cli_get_request(struct tw_cli_ctlr_context *ctlr ) { struct tw_cli_req_context *req; tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered"); { /* Get a free request packet. */ req = tw_cli_req_q_remove_head(ctlr, TW_CLI_FREE_Q); } /* Initialize some fields to their defaults. */ if (req) { req->req_handle = TW_CL_NULL; req->data = TW_CL_NULL; req->length = 0; req->data_phys = 0; req->state = TW_CLI_REQ_STATE_INIT; /* req being initialized */ req->flags = 0; req->error_code = 0; req->orig_req = TW_CL_NULL; req->tw_cli_callback = TW_CL_NULL; /* * Look at the status field in the command packet to see how * it completed the last time it was used, and zero out only * the portions that might have changed. Note that we don't * care to zero out the sglist. */ if (req->cmd_pkt->command.cmd_pkt_9k.status) tw_osl_memzero(req->cmd_pkt, sizeof(struct tw_cl_command_header) + 28 /* max bytes before sglist */); else tw_osl_memzero(&(req->cmd_pkt->command), 28 /* max bytes before sglist */); } return(req); }
/* * Function name: tw_cli_dbg_printf * Description: Calls OSL print function if dbg_level is appropriate * * Input: dbg_level -- Determines whether or not to print * ctlr_handle -- controller handle * cur_func -- text name of calling function * fmt -- format string for the arguments to follow * ... -- variable number of arguments, to be printed * based on the fmt string * Output: None * Return value: None */ TW_VOID tw_cli_dbg_printf(TW_UINT8 dbg_level, struct tw_cl_ctlr_handle *ctlr_handle, const TW_INT8 *cur_func, TW_INT8 *fmt, ...) { #ifdef TW_OSL_DEBUG TW_INT8 print_str[256]; va_list ap; tw_osl_memzero(print_str, 256); if (dbg_level <= TW_OSL_DEBUG_LEVEL_FOR_CL) { tw_osl_sprintf(print_str, "%s: ", cur_func); va_start(ap, fmt); tw_osl_vsprintf(print_str + tw_osl_strlen(print_str), fmt, ap); va_end(ap); tw_osl_strcpy(print_str + tw_osl_strlen(print_str), "\n"); tw_osl_dbg_printf(ctlr_handle, "%s", print_str); } #endif /* TW_OSL_DEBUG */ }
/* * Function name: tw_cl_init_ctlr * Description: Initializes driver data structures for the controller. * * Input: ctlr_handle -- controller handle * flags -- more info passed by the OS Layer * device_id -- device id of the controller * max_simult_reqs -- maximum # of simultaneous requests * that the OS Layer expects the Common * Layer to support * max_aens -- maximun # of AEN's needed to be supported * non_dma_mem -- ptr to allocated non-DMA memory * dma_mem -- ptr to allocated DMA'able memory * dma_mem_phys -- physical address of dma_mem * Output: None * Return value: 0 -- success * non-zero-- failure */ TW_INT32 tw_cl_init_ctlr(struct tw_cl_ctlr_handle *ctlr_handle, TW_UINT32 flags, TW_INT32 device_id, TW_INT32 max_simult_reqs, TW_INT32 max_aens, TW_VOID *non_dma_mem, TW_VOID *dma_mem, TW_UINT64 dma_mem_phys ) { struct tw_cli_ctlr_context *ctlr; struct tw_cli_req_context *req; TW_UINT8 *free_non_dma_mem; TW_INT32 error = TW_OSL_ESUCCESS; TW_INT32 i; tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(), "entered"); if (flags & TW_CL_START_CTLR_ONLY) { ctlr = (struct tw_cli_ctlr_context *) (ctlr_handle->cl_ctlr_ctxt); goto start_ctlr; } if (max_simult_reqs > TW_CL_MAX_SIMULTANEOUS_REQUESTS) { tw_cl_create_event(ctlr_handle, TW_CL_FALSE, TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR, 0x1000, 0x1, TW_CL_SEVERITY_ERROR_STRING, "Too many simultaneous requests to support!", "requested = %d, supported = %d, error = %d\n", max_simult_reqs, TW_CL_MAX_SIMULTANEOUS_REQUESTS, TW_OSL_EBIG); return(TW_OSL_EBIG); } if ((non_dma_mem == TW_CL_NULL) || (dma_mem == TW_CL_NULL) ) { tw_cl_create_event(ctlr_handle, TW_CL_FALSE, TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR, 0x1001, 0x1, TW_CL_SEVERITY_ERROR_STRING, "Insufficient memory for Common Layer's internal usage", "error = %d\n", TW_OSL_ENOMEM); return(TW_OSL_ENOMEM); } tw_osl_memzero(non_dma_mem, sizeof(struct tw_cli_ctlr_context) + (sizeof(struct tw_cli_req_context) * max_simult_reqs) + (sizeof(struct tw_cl_event_packet) * max_aens)); tw_osl_memzero(dma_mem, (sizeof(struct tw_cl_command_packet) * max_simult_reqs) + TW_CLI_SECTOR_SIZE); free_non_dma_mem = (TW_UINT8 *)non_dma_mem; ctlr = (struct tw_cli_ctlr_context *)free_non_dma_mem; free_non_dma_mem += sizeof(struct tw_cli_ctlr_context); ctlr_handle->cl_ctlr_ctxt = ctlr; ctlr->ctlr_handle = ctlr_handle; ctlr->device_id = (TW_UINT32)device_id; ctlr->arch_id = TWA_ARCH_ID(device_id); ctlr->flags = flags; ctlr->sg_size_factor = TWA_SG_ELEMENT_SIZE_FACTOR(device_id); ctlr->max_simult_reqs = max_simult_reqs; ctlr->max_aens_supported = max_aens; /* Initialize queues of CL internal request context packets. */ tw_cli_req_q_init(ctlr, TW_CLI_FREE_Q); tw_cli_req_q_init(ctlr, TW_CLI_BUSY_Q); tw_cli_req_q_init(ctlr, TW_CLI_PENDING_Q); tw_cli_req_q_init(ctlr, TW_CLI_COMPLETE_Q); /* Initialize all locks used by CL. */ ctlr->gen_lock = &(ctlr->gen_lock_handle); tw_osl_init_lock(ctlr_handle, "tw_cl_gen_lock", ctlr->gen_lock); ctlr->io_lock = &(ctlr->io_lock_handle); tw_osl_init_lock(ctlr_handle, "tw_cl_io_lock", ctlr->io_lock); /* Initialize CL internal request context packets. */ ctlr->req_ctxt_buf = (struct tw_cli_req_context *)free_non_dma_mem; free_non_dma_mem += (sizeof(struct tw_cli_req_context) * max_simult_reqs); ctlr->cmd_pkt_buf = (struct tw_cl_command_packet *)dma_mem; ctlr->cmd_pkt_phys = dma_mem_phys; ctlr->internal_req_data = (TW_UINT8 *) (ctlr->cmd_pkt_buf + max_simult_reqs); ctlr->internal_req_data_phys = ctlr->cmd_pkt_phys + (sizeof(struct tw_cl_command_packet) * max_simult_reqs); for (i = 0; i < max_simult_reqs; i++) { req = &(ctlr->req_ctxt_buf[i]); req->cmd_pkt = &(ctlr->cmd_pkt_buf[i]); req->cmd_pkt_phys = ctlr->cmd_pkt_phys + (i * sizeof(struct tw_cl_command_packet)); req->request_id = i; req->ctlr = ctlr; /* Insert request into the free queue. */ tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q); } /* Initialize the AEN queue. */ ctlr->aen_queue = (struct tw_cl_event_packet *)free_non_dma_mem; start_ctlr: /* * Disable interrupts. Interrupts will be enabled in tw_cli_start_ctlr * (only) if initialization succeeded. */ tw_cli_disable_interrupts(ctlr); /* Initialize the controller. */ if ((error = tw_cli_start_ctlr(ctlr))) { /* Soft reset the controller, and try one more time. */ tw_cl_create_event(ctlr_handle, TW_CL_FALSE, TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR, 0x1002, 0x1, TW_CL_SEVERITY_ERROR_STRING, "Controller initialization failed. Retrying...", "error = %d\n", error); if ((error = tw_cli_soft_reset(ctlr))) { tw_cl_create_event(ctlr_handle, TW_CL_FALSE, TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR, 0x1003, 0x1, TW_CL_SEVERITY_ERROR_STRING, "Controller soft reset failed", "error = %d\n", error); return(error); } else if ((error = tw_cli_start_ctlr(ctlr))) { tw_cl_create_event(ctlr_handle, TW_CL_FALSE, TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR, 0x1004, 0x1, TW_CL_SEVERITY_ERROR_STRING, "Controller initialization retry failed", "error = %d\n", error); return(error); } } /* Notify some info about the controller to the OSL. */ tw_cli_notify_ctlr_info(ctlr); /* Mark the controller active. */ ctlr->active = TW_CL_TRUE; return(error); }
/* * Function name: tw_cl_init_ctlr * Description: Initializes driver data structures for the controller. * * Input: ctlr_handle -- controller handle * flags -- more info passed by the OS Layer * device_id -- device id of the controller * max_simult_reqs -- maximum # of simultaneous requests * that the OS Layer expects the Common * Layer to support * max_aens -- maximun # of AEN's needed to be supported * non_dma_mem -- ptr to allocated non-DMA memory * dma_mem -- ptr to allocated DMA'able memory * dma_mem_phys -- physical address of dma_mem * Output: None * Return value: 0 -- success * non-zero-- failure */ TW_INT32 tw_cl_init_ctlr(struct tw_cl_ctlr_handle *ctlr_handle, TW_UINT32 flags, TW_INT32 device_id, TW_INT32 max_simult_reqs, TW_INT32 max_aens, TW_VOID *non_dma_mem, TW_VOID *dma_mem, TW_UINT64 dma_mem_phys ) { struct tw_cli_ctlr_context *ctlr; struct tw_cli_req_context *req; TW_UINT8 *free_non_dma_mem; TW_INT32 error = TW_OSL_ESUCCESS; TW_INT32 i; tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(), "entered"); if (flags & TW_CL_START_CTLR_ONLY) { ctlr = (struct tw_cli_ctlr_context *) (ctlr_handle->cl_ctlr_ctxt); goto start_ctlr; } if (max_simult_reqs > TW_CL_MAX_SIMULTANEOUS_REQUESTS) { tw_cl_create_event(ctlr_handle, TW_CL_FALSE, TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR, 0x1000, 0x1, TW_CL_SEVERITY_ERROR_STRING, "Too many simultaneous requests to support!", "requested = %d, supported = %d, error = %d\n", max_simult_reqs, TW_CL_MAX_SIMULTANEOUS_REQUESTS, TW_OSL_EBIG); return(TW_OSL_EBIG); } if ((non_dma_mem == TW_CL_NULL) || (dma_mem == TW_CL_NULL) ) { tw_cl_create_event(ctlr_handle, TW_CL_FALSE, TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR, 0x1001, 0x1, TW_CL_SEVERITY_ERROR_STRING, "Insufficient memory for Common Layer's internal usage", "error = %d\n", TW_OSL_ENOMEM); return(TW_OSL_ENOMEM); } tw_osl_memzero(non_dma_mem, sizeof(struct tw_cli_ctlr_context) + (sizeof(struct tw_cli_req_context) * (max_simult_reqs + 1)) + (sizeof(struct tw_cl_event_packet) * max_aens)); tw_osl_memzero(dma_mem, (sizeof(struct tw_cl_command_packet) * (max_simult_reqs + 1)) + TW_CLI_SECTOR_SIZE); free_non_dma_mem = (TW_UINT8 *)non_dma_mem; ctlr = (struct tw_cli_ctlr_context *)free_non_dma_mem; free_non_dma_mem += sizeof(struct tw_cli_ctlr_context); ctlr_handle->cl_ctlr_ctxt = ctlr; ctlr->ctlr_handle = ctlr_handle; ctlr->device_id = (TW_UINT32)device_id; ctlr->arch_id = TWA_ARCH_ID(device_id); ctlr->flags = flags; ctlr->sg_size_factor = TWA_SG_ELEMENT_SIZE_FACTOR(device_id); ctlr->max_simult_reqs = max_simult_reqs + 1; ctlr->max_aens_supported = max_aens; /* Initialize queues of CL internal request context packets. */ tw_cli_req_q_init(ctlr, TW_CLI_FREE_Q); tw_cli_req_q_init(ctlr, TW_CLI_BUSY_Q); tw_cli_req_q_init(ctlr, TW_CLI_PENDING_Q); tw_cli_req_q_init(ctlr, TW_CLI_COMPLETE_Q); /* Initialize all locks used by CL. */ ctlr->gen_lock = &(ctlr->gen_lock_handle); tw_osl_init_lock(ctlr_handle, "tw_cl_gen_lock", ctlr->gen_lock); ctlr->io_lock = &(ctlr->io_lock_handle); tw_osl_init_lock(ctlr_handle, "tw_cl_io_lock", ctlr->io_lock); /* * If 64 bit cmd pkt addresses are used, we will need to serialize * writes to the hardware (across registers), since existing (G66) * hardware will get confused if, for example, we wrote the low 32 bits * of the cmd pkt address, followed by a response interrupt mask to the * control register, followed by the high 32 bits of the cmd pkt * address. It will then interpret the value written to the control * register as the low cmd pkt address. So, for this case, we will * make a note that we will need to synchronize control register writes * with command register writes. */ if ((ctlr->flags & TW_CL_64BIT_ADDRESSES) && ((ctlr->device_id == TW_CL_DEVICE_ID_9K) || (ctlr->device_id == TW_CL_DEVICE_ID_9K_X) || (ctlr->device_id == TW_CL_DEVICE_ID_9K_E) || (ctlr->device_id == TW_CL_DEVICE_ID_9K_SA))) { ctlr->state |= TW_CLI_CTLR_STATE_G66_WORKAROUND_NEEDED; ctlr->intr_lock = ctlr->io_lock; } else { ctlr->intr_lock = &(ctlr->intr_lock_handle); tw_osl_init_lock(ctlr_handle, "tw_cl_intr_lock", ctlr->intr_lock); } /* Initialize CL internal request context packets. */ ctlr->req_ctxt_buf = (struct tw_cli_req_context *)free_non_dma_mem; free_non_dma_mem += (sizeof(struct tw_cli_req_context) * ( max_simult_reqs + 1)); ctlr->cmd_pkt_buf = (struct tw_cl_command_packet *)dma_mem; ctlr->cmd_pkt_phys = dma_mem_phys; ctlr->internal_req_data = (TW_UINT8 *) (ctlr->cmd_pkt_buf + ( max_simult_reqs + 1)); ctlr->internal_req_data_phys = ctlr->cmd_pkt_phys + (sizeof(struct tw_cl_command_packet) * ( max_simult_reqs + 1)); for (i = 0; i < ( max_simult_reqs + 1); i++) { req = &(ctlr->req_ctxt_buf[i]); req->cmd_pkt = &(ctlr->cmd_pkt_buf[i]); req->cmd_pkt_phys = ctlr->cmd_pkt_phys + (i * sizeof(struct tw_cl_command_packet)); req->request_id = i; req->ctlr = ctlr; /* Insert request into the free queue. */ tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q); } /* Initialize the AEN queue. */ ctlr->aen_queue = (struct tw_cl_event_packet *)free_non_dma_mem; start_ctlr: /* * Disable interrupts. Interrupts will be enabled in tw_cli_start_ctlr * (only) if initialization succeeded. */ tw_cli_disable_interrupts(ctlr); /* Initialize the controller. */ if ((error = tw_cli_start_ctlr(ctlr))) { /* Soft reset the controller, and try one more time. */ tw_cl_create_event(ctlr_handle, TW_CL_FALSE, TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR, 0x1002, 0x1, TW_CL_SEVERITY_ERROR_STRING, "Controller initialization failed. Retrying...", "error = %d\n", error); if ((error = tw_cli_soft_reset(ctlr))) { tw_cl_create_event(ctlr_handle, TW_CL_FALSE, TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR, 0x1003, 0x1, TW_CL_SEVERITY_ERROR_STRING, "Controller soft reset failed", "error = %d\n", error); return(error); } else if ((error = tw_cli_start_ctlr(ctlr))) { tw_cl_create_event(ctlr_handle, TW_CL_FALSE, TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR, 0x1004, 0x1, TW_CL_SEVERITY_ERROR_STRING, "Controller initialization retry failed", "error = %d\n", error); return(error); } } /* Notify some info about the controller to the OSL. */ tw_cli_notify_ctlr_info(ctlr); /* Mark the controller as active. */ ctlr->state |= TW_CLI_CTLR_STATE_ACTIVE; return(error); }
/* * Function name: tw_cli_check_ctlr_state * Description: Makes sure that the fw status register reports a * proper status. * * Input: ctlr -- ptr to CL internal ctlr context * status_reg-- value in the status register * Output: None * Return value: 0 -- no errors * non-zero-- errors */ TW_INT32 tw_cli_check_ctlr_state(struct tw_cli_ctlr_context *ctlr, TW_UINT32 status_reg) { struct tw_cl_ctlr_handle *ctlr_handle = ctlr->ctlr_handle; TW_INT32 error = TW_OSL_ESUCCESS; tw_cli_dbg_printf(8, ctlr->ctlr_handle, tw_osl_cur_func(), "entered"); /* Check if the 'micro-controller ready' bit is not set. */ if (!(status_reg & TWA_STATUS_MICROCONTROLLER_READY)) { TW_INT8 desc[200]; tw_osl_memzero(desc, 200); if (!(ctlr->reset_phase1_in_progress)) { tw_cl_create_event(ctlr_handle, TW_CL_FALSE, TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT, 0x1301, 0x1, TW_CL_SEVERITY_ERROR_STRING, "Missing expected status bit(s)", "status reg = 0x%x; Missing bits: %s", status_reg, tw_cli_describe_bits( TWA_STATUS_MICROCONTROLLER_READY, desc)); error = TW_OSL_EGENFAILURE; } } /* Check if any error bits are set. */ if ((status_reg & TWA_STATUS_UNEXPECTED_BITS) != 0) { TW_INT8 desc[200]; tw_osl_memzero(desc, 200); /* Skip queue error msgs during 9650SE/9690SA reset */ if (((ctlr->device_id != TW_CL_DEVICE_ID_9K_E) && (ctlr->device_id != TW_CL_DEVICE_ID_9K_SA)) || (!(ctlr->reset_in_progress)) || ((status_reg & TWA_STATUS_QUEUE_ERROR_INTERRUPT) == 0)) tw_cl_create_event(ctlr_handle, TW_CL_FALSE, TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT, 0x1302, 0x1, TW_CL_SEVERITY_ERROR_STRING, "Unexpected status bit(s)", "status reg = 0x%x Unexpected bits: %s", status_reg & TWA_STATUS_UNEXPECTED_BITS, tw_cli_describe_bits(status_reg & TWA_STATUS_UNEXPECTED_BITS, desc)); if (status_reg & TWA_STATUS_PCI_PARITY_ERROR_INTERRUPT) { tw_cl_create_event(ctlr_handle, TW_CL_FALSE, TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT, 0x1303, 0x1, TW_CL_SEVERITY_ERROR_STRING, "PCI parity error: clearing... " "Re-seat/move/replace card", "status reg = 0x%x %s", status_reg, tw_cli_describe_bits(status_reg, desc)); TW_CLI_WRITE_CONTROL_REGISTER(ctlr->ctlr_handle, TWA_CONTROL_CLEAR_PARITY_ERROR); #ifdef TW_OSL_PCI_CONFIG_ACCESSIBLE tw_osl_write_pci_config(ctlr->ctlr_handle, TW_CLI_PCI_CONFIG_STATUS_OFFSET, TWA_PCI_CONFIG_CLEAR_PARITY_ERROR, 2); #endif /* TW_OSL_PCI_CONFIG_ACCESSIBLE */ } if (status_reg & TWA_STATUS_PCI_ABORT_INTERRUPT) { tw_cl_create_event(ctlr_handle, TW_CL_FALSE, TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT, 0x1304, 0x1, TW_CL_SEVERITY_ERROR_STRING, "PCI abort: clearing... ", "status reg = 0x%x %s", status_reg, tw_cli_describe_bits(status_reg, desc)); TW_CLI_WRITE_CONTROL_REGISTER(ctlr->ctlr_handle, TWA_CONTROL_CLEAR_PCI_ABORT); #ifdef TW_OSL_PCI_CONFIG_ACCESSIBLE tw_osl_write_pci_config(ctlr->ctlr_handle, TW_CLI_PCI_CONFIG_STATUS_OFFSET, TWA_PCI_CONFIG_CLEAR_PCI_ABORT, 2); #endif /* TW_OSL_PCI_CONFIG_ACCESSIBLE */ } if (status_reg & TWA_STATUS_QUEUE_ERROR_INTERRUPT) { /* Skip queue error msgs during 9650SE/9690SA reset */ if (((ctlr->device_id != TW_CL_DEVICE_ID_9K_E) && (ctlr->device_id != TW_CL_DEVICE_ID_9K_SA)) || (!(ctlr->reset_in_progress))) tw_cl_create_event(ctlr_handle, TW_CL_FALSE, TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT, 0x1305, 0x1, TW_CL_SEVERITY_ERROR_STRING, "Controller queue error: clearing... ", "status reg = 0x%x %s", status_reg, tw_cli_describe_bits(status_reg, desc)); TW_CLI_WRITE_CONTROL_REGISTER(ctlr->ctlr_handle, TWA_CONTROL_CLEAR_QUEUE_ERROR); } } return(error); }
/* * Function name: tw_cl_create_event * Description: Creates and queues ctlr/CL/OSL AEN's to be * supplied to user-space tools on request. * Also notifies OS Layer. * Input: ctlr -- ptr to CL internal ctlr context * queue_event-- TW_CL_TRUE --> queue event; * TW_CL_FALSE--> don't queue event * (simply notify OSL) * event_src -- source of event * event_code -- AEN/error code * severity -- severity of event * severity_str--Text description of severity * event_desc -- standard string related to the event/error * event_specific_desc -- format string for additional * info about the event * ... -- additional arguments conforming to the format * specified by event_specific_desc * Output: None * Return value: None */ TW_VOID tw_cl_create_event(struct tw_cl_ctlr_handle *ctlr_handle, TW_UINT8 queue_event, TW_UINT8 event_src, TW_UINT16 event_code, TW_UINT8 severity, TW_UINT8 *severity_str, TW_UINT8 *event_desc, TW_UINT8 *event_specific_desc, ...) { struct tw_cli_ctlr_context *ctlr = ctlr_handle->cl_ctlr_ctxt; struct tw_cl_event_packet event_pkt; struct tw_cl_event_packet *event; TW_UINT32 aen_head; va_list ap; tw_cli_dbg_printf(8, ctlr_handle, tw_osl_cur_func(), "entered"); if ((ctlr) && (queue_event)) { /* Protect access to ctlr->aen_head. */ tw_osl_get_lock(ctlr_handle, ctlr->gen_lock); aen_head = ctlr->aen_head; ctlr->aen_head = (aen_head + 1) % ctlr->max_aens_supported; /* Queue the event. */ event = &(ctlr->aen_queue[aen_head]); tw_osl_memzero(event->parameter_data, sizeof(event->parameter_data)); if (event->retrieved == TW_CL_AEN_NOT_RETRIEVED) ctlr->aen_q_overflow = TW_CL_TRUE; event->sequence_id = ++(ctlr->aen_cur_seq_id); if ((aen_head + 1) == ctlr->max_aens_supported) { tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "AEN queue wrapped"); ctlr->aen_q_wrapped = TW_CL_TRUE; } /* Free access to ctlr->aen_head. */ tw_osl_free_lock(ctlr_handle, ctlr->gen_lock); } else { event = &event_pkt; tw_osl_memzero(event, sizeof(struct tw_cl_event_packet)); } event->event_src = event_src; event->time_stamp_sec = (TW_UINT32)tw_osl_get_local_time(); event->aen_code = event_code; event->severity = severity; tw_osl_strcpy(event->severity_str, severity_str); event->retrieved = TW_CL_AEN_NOT_RETRIEVED; va_start(ap, event_specific_desc); tw_osl_vsprintf(event->parameter_data, event_specific_desc, ap); va_end(ap); event->parameter_len = (TW_UINT8)(tw_osl_strlen(event->parameter_data)); tw_osl_strcpy(event->parameter_data + event->parameter_len + 1, event_desc); event->parameter_len += (1 + tw_osl_strlen(event_desc)); tw_cli_dbg_printf(4, ctlr_handle, tw_osl_cur_func(), "event = %x %x %x %x %x %x %x\n %s", event->sequence_id, event->time_stamp_sec, event->aen_code, event->severity, event->retrieved, event->repeat_count, event->parameter_len, event->parameter_data); tw_osl_notify_event(ctlr_handle, event); }