Example #1
0
/*
 * Function name:	tw_cli_aen_callback
 * Description:		Callback for requests to fetch AEN's.
 *
 * Input:		req	-- ptr to completed request pkt
 * Output:		None
 * Return value:	None
 */
TW_VOID
tw_cli_aen_callback(struct tw_cli_req_context *req)
{
	struct tw_cli_ctlr_context	*ctlr = req->ctlr;
	struct tw_cl_command_header	*cmd_hdr;
	struct tw_cl_command_9k		*cmd =
		&(req->cmd_pkt->command.cmd_pkt_9k);
	TW_UINT16			aen_code = TWA_AEN_QUEUE_EMPTY;
	TW_INT32			error;

	tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");

	tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(),
		"req_id = 0x%x, req error = %d, status = 0x%x",
		GET_REQ_ID(cmd->lun_l4__req_id), req->error_code, cmd->status);

	/*
	 * If the request was never submitted to the controller, the function
	 * that sets error is responsible for calling tw_cl_create_event.
	 */
	if (!(error = req->error_code))
		if ((error = cmd->status)) {
			cmd_hdr = (struct tw_cl_command_header *)
				(&(req->cmd_pkt->cmd_hdr));
			tw_cli_create_ctlr_event(ctlr,
				TW_CL_MESSAGE_SOURCE_CONTROLLER_ERROR,
				cmd_hdr);
			tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
				TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
				0x1206, 0x1, TW_CL_SEVERITY_ERROR_STRING,
				"Request Sense failed",
				"opcode = 0x%x, status = %d",
				GET_OPCODE(cmd->res__opcode), cmd->status);
		}

	if (error) {
		ctlr->internal_req_busy = TW_CL_FALSE;
		tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
		return;
	}

	tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(),
		"Request Sense command succeeded");

	aen_code = tw_cli_manage_aen(ctlr, req);

	if (aen_code != TWA_AEN_SYNC_TIME_WITH_HOST) {
		ctlr->internal_req_busy = TW_CL_FALSE;
		tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
		if (aen_code != TWA_AEN_QUEUE_EMPTY)
			if ((error = tw_cli_get_aen(ctlr)))
				tw_cl_create_event(ctlr->ctlr_handle,
					TW_CL_FALSE,
					TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
					0x1207, 0x1,
					TW_CL_SEVERITY_ERROR_STRING,
					"Failed to fetch all AEN's",
					"error = %d", error);
	}
}
Example #2
0
/*
 * Function name:	tw_cli_process_resp_intr
 * Description:		Looks for cmd completions from fw; queues cmds completed
 *			by fw into complete queue.
 *
 * Input:		ctlr	-- ptr to CL internal ctlr context
 * Output:		None
 * Return value:	0	-- no ctlr error
 *			non-zero-- ctlr error
 */
TW_INT32
tw_cli_process_resp_intr(struct tw_cli_ctlr_context *ctlr)
{
	TW_UINT32			resp;
	struct tw_cli_req_context	*req;
	TW_INT32			error;
	TW_UINT32			status_reg;
    
	tw_cli_dbg_printf(10, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");

	for (;;) {
		status_reg = TW_CLI_READ_STATUS_REGISTER(ctlr->ctlr_handle);
		if ((error = tw_cli_check_ctlr_state(ctlr, status_reg)))
			break;
		if (status_reg & TWA_STATUS_RESPONSE_QUEUE_EMPTY) {
			tw_cli_dbg_printf(7, ctlr->ctlr_handle,
				tw_osl_cur_func(), "Response queue empty");
			break;
		}

		/* Response queue is not empty. */
		resp = TW_CLI_READ_RESPONSE_QUEUE(ctlr->ctlr_handle);
		{
			req = &(ctlr->req_ctxt_buf[GET_RESP_ID(resp)]);
		}

		if (req->state != TW_CLI_REQ_STATE_BUSY) {
			tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
				TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
				0x1201, 0x1, TW_CL_SEVERITY_ERROR_STRING,
				"Unposted command completed!!",
				"request = %p, status = %d",
				req, req->state);
#ifdef TW_OSL_DEBUG
			tw_cl_print_ctlr_stats(ctlr->ctlr_handle);
#endif /* TW_OSL_DEBUG */
			tw_cl_reset_ctlr(ctlr->ctlr_handle);
			return(TW_OSL_EIO);
		}

		/*
		 * Remove the request from the busy queue, mark it as complete,
		 * and enqueue it in the complete queue.
		 */
		tw_cli_req_q_remove_item(req, TW_CLI_BUSY_Q);
		req->state = TW_CLI_REQ_STATE_COMPLETE;
		tw_cli_req_q_insert_tail(req, TW_CLI_COMPLETE_Q);

	}

	/* Complete this, and other requests in the complete queue. */
	tw_cli_process_complete_queue(ctlr);
	
	return(error);
}
Example #3
0
TW_VOID
tw_cli_drain_pending_queue(struct tw_cli_ctlr_context *ctlr)
{
	struct tw_cli_req_context	*req;
	struct tw_cl_req_packet		*req_pkt;
    
	tw_cli_dbg_printf(3, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
	
	/*
	 * Pull requests off the pending queue, and complete them.
	 */
	while ((req = tw_cli_req_q_remove_head(ctlr, TW_CLI_PENDING_Q)) !=
		TW_CL_NULL) {
		if (req->flags & TW_CLI_REQ_FLAGS_INTERNAL) {
			/*
			 * It's an internal request.  Set the appropriate
			 * error and call the CL internal callback if there's
			 * one.  If the request originator is polling for
			 * completion, he should be checking req->error to
			 * determine that the request did not go through.
			 * The request originators are responsible for the
			 * clean-up.
			 */
			req->error_code = TW_CL_ERR_REQ_BUS_RESET;
			if (req->tw_cli_callback)
				req->tw_cli_callback(req);
		} else if (req->flags & TW_CLI_REQ_FLAGS_PASSTHRU) {
			/* It's a passthru request.  Complete it. */
			if ((req_pkt = req->orig_req) != TW_CL_NULL) {
				req_pkt->status = TW_CL_ERR_REQ_BUS_RESET;

				if (req_pkt->tw_osl_callback)
					req_pkt->tw_osl_callback(req->req_handle);
			}
			tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
		} else {
			/* It's an external (SCSI) request.  Add it to the reset queue. */
			tw_osl_untimeout(req->req_handle);
			tw_cli_req_q_insert_tail(req, TW_CLI_RESET_Q);
		}
	} /* End of while loop */
}
Example #4
0
/*
 * Function name:	tw_cli_param_callback
 * Description:		Callback for get/set_param requests.
 *
 * Input:		req	-- ptr to completed request pkt
 * Output:		None
 * Return value:	None
 */
TW_VOID
tw_cli_param_callback(struct tw_cli_req_context *req)
{
	struct tw_cli_ctlr_context	*ctlr = req->ctlr;
	union tw_cl_command_7k		*cmd =
		&(req->cmd_pkt->command.cmd_pkt_7k);
	TW_INT32			error;

	tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");

	/*
	 * If the request was never submitted to the controller, the function
	 * that sets req->error is responsible for calling tw_cl_create_event.
	 */
	if (! req->error_code)
		if (cmd->param.status) {
#if       0
			tw_cli_create_ctlr_event(ctlr,
				TW_CL_MESSAGE_SOURCE_CONTROLLER_ERROR,
				&(req->cmd_pkt->cmd_hdr));
#endif // 0
			tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
				TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
				0x1204, 0x1, TW_CL_SEVERITY_ERROR_STRING,
				"get/set_param failed",
				"status = %d", cmd->param.status);
		}

	ctlr->internal_req_busy = TW_CL_FALSE;
	tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);

	if ((ctlr->get_more_aens) && (!(ctlr->reset_in_progress))) {
		ctlr->get_more_aens = TW_CL_FALSE;
		tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(),
			"Fetching more AEN's");
		if ((error = tw_cli_get_aen(ctlr)))
			tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
				TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
				0x1205, 0x1, TW_CL_SEVERITY_ERROR_STRING,
				"Failed to fetch all AEN's from param_callback",
				"error = %d", error);
	}
}
Example #5
0
/*
 * Function name:	tw_cli_complete_io
 * Description:		CL internal callback for SCSI/fw passthru requests.
 *
 * Input:		req	-- ptr to CL internal request context
 * Output:		None
 * Return value:	None
 */
TW_VOID
tw_cli_complete_io(struct tw_cli_req_context *req)
{
	struct tw_cli_ctlr_context	*ctlr = req->ctlr;
	struct tw_cl_req_packet		*req_pkt =
		(struct tw_cl_req_packet *)(req->orig_req);

	tw_cli_dbg_printf(8, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");

	req_pkt->status = TW_CL_ERR_REQ_SUCCESS;
	if (req->error_code) {
		req_pkt->status = TW_CL_ERR_REQ_UNABLE_TO_SUBMIT_COMMAND;
		goto out;
	}

	if (req->state != TW_CLI_REQ_STATE_COMPLETE) {
		tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
			TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
			0x1203, 0x1, TW_CL_SEVERITY_ERROR_STRING,
			"I/O completion on incomplete command!!",
			"request = %p, status = %d",
			req, req->state);
#ifdef TW_OSL_DEBUG
		tw_cl_print_ctlr_stats(ctlr->ctlr_handle);
#endif /* TW_OSL_DEBUG */
		tw_cl_reset_ctlr(ctlr->ctlr_handle);
		req_pkt->status = TW_CL_ERR_REQ_BUS_RESET;
		goto out;
	}

	if (req->flags & TW_CLI_REQ_FLAGS_PASSTHRU) {
		/* Copy the command packet back into OSL's space. */
		tw_osl_memcpy(req_pkt->gen_req_pkt.pt_req.cmd_pkt, req->cmd_pkt,
			sizeof(struct tw_cl_command_packet));
	} else
		tw_cli_scsi_complete(req);

out:
	req_pkt->tw_osl_callback(req->req_handle);
	tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
}
Example #6
0
/*
 * Function name:	tw_cli_manage_aen
 * Description:		Handles AEN's.
 *
 * Input:		ctlr	-- ptr to CL internal ctlr context
 *			req	-- ptr to CL internal request context
 * Output:		None
 * Return value:	None
 */
TW_UINT16
tw_cli_manage_aen(struct tw_cli_ctlr_context *ctlr,
	struct tw_cli_req_context *req)
{
	struct tw_cl_command_header	*cmd_hdr;
	TW_UINT16			aen_code;
	TW_TIME				local_time;
	TW_TIME				sync_time;
	TW_UINT32			error;

	tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");

	cmd_hdr = (struct tw_cl_command_header *)(req->data);
	aen_code = cmd_hdr->status_block.error;

	switch (aen_code) {
	case TWA_AEN_SYNC_TIME_WITH_HOST:
		tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(),
			"Received AEN_SYNC_TIME");
		/*
		 * Free the internal req pkt right here, since
		 * tw_cli_set_param will need it.
		 */
		ctlr->internal_req_busy = TW_CL_FALSE;
		tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);

		/*
		 * We will use a callback in tw_cli_set_param only when
		 * interrupts are enabled and we can expect our callback
		 * to get called.  Setting the get_more_aens
		 * flag will make the callback continue to try to retrieve
		 * more AEN's.
		 */
		if (ctlr->interrupts_enabled)
			ctlr->get_more_aens = TW_CL_TRUE;
		/* Calculate time (in seconds) since last Sunday 12.00 AM. */
		local_time = tw_osl_get_local_time();
		sync_time = (local_time - (3 * 86400)) % 604800;
		if ((error = tw_cli_set_param(ctlr, TWA_PARAM_TIME_TABLE,
				TWA_PARAM_TIME_SCHED_TIME, 4,
				&sync_time,
				(ctlr->interrupts_enabled)
				? tw_cli_param_callback : TW_CL_NULL)))
			tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
				TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
				0x1208, 0x1, TW_CL_SEVERITY_ERROR_STRING,
				"Unable to sync time with ctlr",
				"error = %d", error);

		break;


	case TWA_AEN_QUEUE_EMPTY:
		tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(),
			"AEN queue empty");
		break;


	default:
		/* Queue the event. */

		tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(),
			"Queueing AEN");
		tw_cli_create_ctlr_event(ctlr,
			TW_CL_MESSAGE_SOURCE_CONTROLLER_EVENT,
			cmd_hdr);
		break;
	} /* switch */
	return(aen_code);
}
Example #7
0
/*
 * Function name:	tw_cl_start_io
 * Description:		Interface to OS Layer for accepting SCSI requests.
 *
 * Input:		ctlr_handle	-- controller handle
 *			req_pkt		-- OSL built request packet
 *			req_handle	-- request handle
 * Output:		None
 * Return value:	0	-- success
 *			non-zero-- failure
 */
TW_INT32
tw_cl_start_io(struct tw_cl_ctlr_handle *ctlr_handle,
	struct tw_cl_req_packet *req_pkt, struct tw_cl_req_handle *req_handle)
{
	struct tw_cli_ctlr_context		*ctlr;
	struct tw_cli_req_context		*req;
	struct tw_cl_command_9k			*cmd;
	struct tw_cl_scsi_req_packet		*scsi_req;
	TW_INT32				error = TW_CL_ERR_REQ_SUCCESS;

	tw_cli_dbg_printf(10, ctlr_handle, tw_osl_cur_func(), "entered");

	ctlr = (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);

	/*
	 * If working with a firmware version that does not support multiple
	 * luns, and this request is directed at a non-zero lun, error it
	 * back right away.
	 */
	if ((req_pkt->gen_req_pkt.scsi_req.lun) &&
		(ctlr->working_srl < TWA_MULTI_LUN_FW_SRL)) {
		req_pkt->status |= (TW_CL_ERR_REQ_INVALID_LUN |
			TW_CL_ERR_REQ_SCSI_ERROR);
		req_pkt->tw_osl_callback(req_handle);
		return(TW_CL_ERR_REQ_SUCCESS);
	}

	if ((req = tw_cli_get_request(ctlr
		)) == TW_CL_NULL) {
		tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
			"Out of request context packets: returning busy");
		return(TW_OSL_EBUSY);
	}

	req_handle->cl_req_ctxt = req;
	req->req_handle = req_handle;
	req->orig_req = req_pkt;
	req->tw_cli_callback = tw_cli_complete_io;

	req->flags |= TW_CLI_REQ_FLAGS_EXTERNAL;
	req->flags |= TW_CLI_REQ_FLAGS_9K;

	scsi_req = &(req_pkt->gen_req_pkt.scsi_req);

	/* Build the cmd pkt. */
	cmd = &(req->cmd_pkt->command.cmd_pkt_9k);

	req->cmd_pkt->cmd_hdr.header_desc.size_header = 128;

	cmd->res__opcode = BUILD_RES__OPCODE(0, TWA_FW_CMD_EXECUTE_SCSI);
	cmd->unit = (TW_UINT8)(scsi_req->unit);
	cmd->lun_l4__req_id = TW_CL_SWAP16(
		BUILD_LUN_L4__REQ_ID(scsi_req->lun, req->request_id));
	cmd->status = 0;
	cmd->sgl_offset = 16; /* offset from end of hdr = max cdb len */
	tw_osl_memcpy(cmd->cdb, scsi_req->cdb, scsi_req->cdb_len);

	if (req_pkt->flags & TW_CL_REQ_CALLBACK_FOR_SGLIST) {
		TW_UINT32	num_sgl_entries;

		req_pkt->tw_osl_sgl_callback(req_handle, cmd->sg_list,
			&num_sgl_entries);
		cmd->lun_h4__sgl_entries =
			TW_CL_SWAP16(BUILD_LUN_H4__SGL_ENTRIES(scsi_req->lun,
				num_sgl_entries));
	} else {
		cmd->lun_h4__sgl_entries =
			TW_CL_SWAP16(BUILD_LUN_H4__SGL_ENTRIES(scsi_req->lun,
				scsi_req->sgl_entries));
		tw_cli_fill_sg_list(ctlr, scsi_req->sg_list,
			cmd->sg_list, scsi_req->sgl_entries);
	}

	if (((TW_CL_Q_FIRST_ITEM(&(ctlr->req_q_head[TW_CLI_PENDING_Q]))) != TW_CL_NULL) ||
		(ctlr->reset_in_progress)) {
		tw_cli_req_q_insert_tail(req, TW_CLI_PENDING_Q);
		TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle,
			TWA_CONTROL_UNMASK_COMMAND_INTERRUPT);
	} else if ((error = tw_cli_submit_cmd(req))) {
		tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
			"Could not start request. request = %p, error = %d",
			req, error);
		tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
	}
	return(error);
}
Example #8
0
/*
 * Function name:	tw_cli_submit_cmd
 * Description:		Submits a cmd to firmware.
 *
 * Input:		req	-- ptr to CL internal request context
 * Output:		None
 * Return value:	0	-- success
 *			non-zero-- failure
 */
TW_INT32
tw_cli_submit_cmd(struct tw_cli_req_context *req)
{
	struct tw_cli_ctlr_context	*ctlr = req->ctlr;
	struct tw_cl_ctlr_handle	*ctlr_handle = ctlr->ctlr_handle;
	TW_UINT32			status_reg;
	TW_INT32			error = 0;

	tw_cli_dbg_printf(10, ctlr_handle, tw_osl_cur_func(), "entered");

	/* Serialize access to the controller cmd queue. */
	tw_osl_get_lock(ctlr_handle, ctlr->io_lock);

	/* For 9650SE first write low 4 bytes */
	if ((ctlr->device_id == TW_CL_DEVICE_ID_9K_E) ||
	    (ctlr->device_id == TW_CL_DEVICE_ID_9K_SA))
		tw_osl_write_reg(ctlr_handle,
				 TWA_COMMAND_QUEUE_OFFSET_LOW,
				 (TW_UINT32)(req->cmd_pkt_phys + sizeof(struct tw_cl_command_header)), 4);

	status_reg = TW_CLI_READ_STATUS_REGISTER(ctlr_handle);
	if (status_reg & TWA_STATUS_COMMAND_QUEUE_FULL) {
		struct tw_cl_req_packet	*req_pkt =
			(struct tw_cl_req_packet *)(req->orig_req);

		tw_cli_dbg_printf(7, ctlr_handle, tw_osl_cur_func(),
			"Cmd queue full");

		if ((req->flags & TW_CLI_REQ_FLAGS_INTERNAL)
			|| ((req_pkt) &&
			(req_pkt->flags & TW_CL_REQ_RETRY_ON_BUSY))
			) {
			if (req->state != TW_CLI_REQ_STATE_PENDING) {
				tw_cli_dbg_printf(2, ctlr_handle,
					tw_osl_cur_func(),
					"pending internal/ioctl request");
				req->state = TW_CLI_REQ_STATE_PENDING;
				tw_cli_req_q_insert_tail(req, TW_CLI_PENDING_Q);
				/* Unmask command interrupt. */
				TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle,
					TWA_CONTROL_UNMASK_COMMAND_INTERRUPT);
			} else
				error = TW_OSL_EBUSY;
		} else {
			error = TW_OSL_EBUSY;
		}
	} else {
		tw_cli_dbg_printf(10, ctlr_handle, tw_osl_cur_func(),
			"Submitting command");

		/* Insert command into busy queue */
		req->state = TW_CLI_REQ_STATE_BUSY;
		tw_cli_req_q_insert_tail(req, TW_CLI_BUSY_Q);

		if ((ctlr->device_id == TW_CL_DEVICE_ID_9K_E) ||
		    (ctlr->device_id == TW_CL_DEVICE_ID_9K_SA)) {
			/* Now write the high 4 bytes */
			tw_osl_write_reg(ctlr_handle, 
					 TWA_COMMAND_QUEUE_OFFSET_HIGH,
					 (TW_UINT32)(((TW_UINT64)(req->cmd_pkt_phys + sizeof(struct tw_cl_command_header)))>>32), 4);
		} else {
			if (ctlr->flags & TW_CL_64BIT_ADDRESSES) {
Example #9
0
/*
 * Function name:	tw_cli_init_connection
 * Description:		Sends init_connection cmd to firmware
 *
 * Input:		ctlr		-- ptr to per ctlr structure
 *			message_credits	-- max # of requests that we might send
 *					 down simultaneously.  This will be
 *					 typically set to 256 at init-time or
 *					after a reset, and to 1 at shutdown-time
 *			set_features	-- indicates if we intend to use 64-bit
 *					sg, also indicates if we want to do a
 *					basic or an extended init_connection;
 *
 * Note: The following input/output parameters are valid, only in case of an
 *		extended init_connection:
 *
 *			current_fw_srl		-- srl of fw we are bundled
 *						with, if any; 0 otherwise
 *			current_fw_arch_id	-- arch_id of fw we are bundled
 *						with, if any; 0 otherwise
 *			current_fw_branch	-- branch # of fw we are bundled
 *						with, if any; 0 otherwise
 *			current_fw_build	-- build # of fw we are bundled
 *						with, if any; 0 otherwise
 * Output:		fw_on_ctlr_srl		-- srl of fw on ctlr
 *			fw_on_ctlr_arch_id	-- arch_id of fw on ctlr
 *			fw_on_ctlr_branch	-- branch # of fw on ctlr
 *			fw_on_ctlr_build	-- build # of fw on ctlr
 *			init_connect_result	-- result bitmap of fw response
 * Return value:	0	-- success
 *			non-zero-- failure
 */
TW_INT32
tw_cli_init_connection(struct tw_cli_ctlr_context *ctlr,
	TW_UINT16 message_credits, TW_UINT32 set_features,
	TW_UINT16 current_fw_srl, TW_UINT16 current_fw_arch_id,
	TW_UINT16 current_fw_branch, TW_UINT16 current_fw_build,
	TW_UINT16 *fw_on_ctlr_srl, TW_UINT16 *fw_on_ctlr_arch_id,
	TW_UINT16 *fw_on_ctlr_branch, TW_UINT16 *fw_on_ctlr_build,
	TW_UINT32 *init_connect_result)
{
	struct tw_cli_req_context		*req;
	struct tw_cl_command_init_connect	*init_connect;
	TW_INT32				error = TW_OSL_EBUSY;
    
	tw_cli_dbg_printf(3, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");

	/* Get a request packet. */
	if ((req = tw_cli_get_request(ctlr
		)) == TW_CL_NULL)
		goto out;

	req->flags |= TW_CLI_REQ_FLAGS_INTERNAL;

	/* Build the cmd pkt. */
	init_connect = &(req->cmd_pkt->command.cmd_pkt_7k.init_connect);

	req->cmd_pkt->cmd_hdr.header_desc.size_header = 128;

	init_connect->res1__opcode =
		BUILD_RES__OPCODE(0, TWA_FW_CMD_INIT_CONNECTION);
   	init_connect->request_id =
		(TW_UINT8)(TW_CL_SWAP16(req->request_id));
	init_connect->message_credits = TW_CL_SWAP16(message_credits);
	init_connect->features = TW_CL_SWAP32(set_features);
	if (ctlr->flags & TW_CL_64BIT_ADDRESSES)
		init_connect->features |= TW_CL_SWAP32(TWA_64BIT_SG_ADDRESSES);
	if (set_features & TWA_EXTENDED_INIT_CONNECT) {
		/*
		 * Fill in the extra fields needed for an extended
		 * init_connect.
		 */
		init_connect->size = 6;
		init_connect->fw_srl = TW_CL_SWAP16(current_fw_srl);
		init_connect->fw_arch_id = TW_CL_SWAP16(current_fw_arch_id);
		init_connect->fw_branch = TW_CL_SWAP16(current_fw_branch);
		init_connect->fw_build = TW_CL_SWAP16(current_fw_build);
	} else
		init_connect->size = 3;

	/* Submit the command, and wait for it to complete. */
	error = tw_cli_submit_and_poll_request(req,
		TW_CLI_REQUEST_TIMEOUT_PERIOD);
	if (error == TW_OSL_ETIMEDOUT)
		/* Clean-up done by tw_cli_submit_and_poll_request. */
		return(error);
	if (error)
		goto out;
	if ((error = init_connect->status)) {
		tw_cli_create_ctlr_event(ctlr,
			TW_CL_MESSAGE_SOURCE_CONTROLLER_ERROR,
			&(req->cmd_pkt->cmd_hdr));
		goto out;
	}
	if (set_features & TWA_EXTENDED_INIT_CONNECT) {
		*fw_on_ctlr_srl = TW_CL_SWAP16(init_connect->fw_srl);
		*fw_on_ctlr_arch_id = TW_CL_SWAP16(init_connect->fw_arch_id);
		*fw_on_ctlr_branch = TW_CL_SWAP16(init_connect->fw_branch);
		*fw_on_ctlr_build = TW_CL_SWAP16(init_connect->fw_build);
		*init_connect_result = TW_CL_SWAP32(init_connect->result);
	}
	tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
	return(error);

out:
	tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
		TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
		0x1016, 0x1, TW_CL_SEVERITY_ERROR_STRING,
		"init_connection failed",
		"error = %d", error);
	if (req)
		tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
	return(error);
}
Example #10
0
/*
 * Function name:	tw_cl_init_ctlr
 * Description:		Initializes driver data structures for the controller.
 *
 * Input:		ctlr_handle -- controller handle
 *			flags -- more info passed by the OS Layer
 *			device_id -- device id of the controller
 *			max_simult_reqs -- maximum # of simultaneous requests
 *					that the OS Layer expects the Common
 *					Layer to support
 *			max_aens -- maximun # of AEN's needed to be supported
 *			non_dma_mem -- ptr to allocated non-DMA memory
 *			dma_mem -- ptr to allocated DMA'able memory
 *			dma_mem_phys -- physical address of dma_mem
 * Output:		None
 * Return value:	0	-- success
 *			non-zero-- failure
 */
TW_INT32
tw_cl_init_ctlr(struct tw_cl_ctlr_handle *ctlr_handle, TW_UINT32 flags,
	TW_INT32 device_id, TW_INT32 max_simult_reqs, TW_INT32 max_aens,
	TW_VOID *non_dma_mem, TW_VOID *dma_mem, TW_UINT64 dma_mem_phys
	)
{
	struct tw_cli_ctlr_context	*ctlr;
	struct tw_cli_req_context	*req;
	TW_UINT8			*free_non_dma_mem;
	TW_INT32			error = TW_OSL_ESUCCESS;
	TW_INT32			i;

	tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(), "entered");

	if (flags & TW_CL_START_CTLR_ONLY) {
		ctlr = (struct tw_cli_ctlr_context *)
			(ctlr_handle->cl_ctlr_ctxt);
		goto start_ctlr;
	}

	if (max_simult_reqs > TW_CL_MAX_SIMULTANEOUS_REQUESTS) {
		tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
			TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
			0x1000, 0x1, TW_CL_SEVERITY_ERROR_STRING,
			"Too many simultaneous requests to support!",
			"requested = %d, supported = %d, error = %d\n",
			max_simult_reqs, TW_CL_MAX_SIMULTANEOUS_REQUESTS,
			TW_OSL_EBIG);
		return(TW_OSL_EBIG);
	}

	if ((non_dma_mem == TW_CL_NULL) || (dma_mem == TW_CL_NULL)
		) {
		tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
			TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
			0x1001, 0x1, TW_CL_SEVERITY_ERROR_STRING,
			"Insufficient memory for Common Layer's internal usage",
			"error = %d\n", TW_OSL_ENOMEM);
		return(TW_OSL_ENOMEM);
	}

	tw_osl_memzero(non_dma_mem, sizeof(struct tw_cli_ctlr_context) +
		(sizeof(struct tw_cli_req_context) * max_simult_reqs) +
		(sizeof(struct tw_cl_event_packet) * max_aens));

	tw_osl_memzero(dma_mem,
		(sizeof(struct tw_cl_command_packet) *
		max_simult_reqs) +
		TW_CLI_SECTOR_SIZE);

	free_non_dma_mem = (TW_UINT8 *)non_dma_mem;

	ctlr = (struct tw_cli_ctlr_context *)free_non_dma_mem;
	free_non_dma_mem += sizeof(struct tw_cli_ctlr_context);

	ctlr_handle->cl_ctlr_ctxt = ctlr;
	ctlr->ctlr_handle = ctlr_handle;

	ctlr->device_id = (TW_UINT32)device_id;
	ctlr->arch_id = TWA_ARCH_ID(device_id);
	ctlr->flags = flags;
	ctlr->sg_size_factor = TWA_SG_ELEMENT_SIZE_FACTOR(device_id);
	ctlr->max_simult_reqs = max_simult_reqs;
	ctlr->max_aens_supported = max_aens;

	/* Initialize queues of CL internal request context packets. */
	tw_cli_req_q_init(ctlr, TW_CLI_FREE_Q);
	tw_cli_req_q_init(ctlr, TW_CLI_BUSY_Q);
	tw_cli_req_q_init(ctlr, TW_CLI_PENDING_Q);
	tw_cli_req_q_init(ctlr, TW_CLI_COMPLETE_Q);

	/* Initialize all locks used by CL. */
	ctlr->gen_lock = &(ctlr->gen_lock_handle);
	tw_osl_init_lock(ctlr_handle, "tw_cl_gen_lock", ctlr->gen_lock);
	ctlr->io_lock = &(ctlr->io_lock_handle);
	tw_osl_init_lock(ctlr_handle, "tw_cl_io_lock", ctlr->io_lock);

	/* Initialize CL internal request context packets. */
	ctlr->req_ctxt_buf = (struct tw_cli_req_context *)free_non_dma_mem;
	free_non_dma_mem += (sizeof(struct tw_cli_req_context) *
		max_simult_reqs);

	ctlr->cmd_pkt_buf = (struct tw_cl_command_packet *)dma_mem;
	ctlr->cmd_pkt_phys = dma_mem_phys;

	ctlr->internal_req_data = (TW_UINT8 *)
		(ctlr->cmd_pkt_buf +
		max_simult_reqs);
	ctlr->internal_req_data_phys = ctlr->cmd_pkt_phys +
		(sizeof(struct tw_cl_command_packet) *
		max_simult_reqs);

	for (i = 0; i < max_simult_reqs; i++) {
		req = &(ctlr->req_ctxt_buf[i]);

		req->cmd_pkt = &(ctlr->cmd_pkt_buf[i]);
		req->cmd_pkt_phys = ctlr->cmd_pkt_phys +
			(i * sizeof(struct tw_cl_command_packet));

		req->request_id = i;
		req->ctlr = ctlr;

		/* Insert request into the free queue. */
		tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
	}

	/* Initialize the AEN queue. */
	ctlr->aen_queue = (struct tw_cl_event_packet *)free_non_dma_mem;


start_ctlr:
	/*
	 * Disable interrupts.  Interrupts will be enabled in tw_cli_start_ctlr
	 * (only) if initialization succeeded.
	 */
	tw_cli_disable_interrupts(ctlr);

	/* Initialize the controller. */
	if ((error = tw_cli_start_ctlr(ctlr))) {
		/* Soft reset the controller, and try one more time. */
		tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
			TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
			0x1002, 0x1, TW_CL_SEVERITY_ERROR_STRING,
			"Controller initialization failed. Retrying...",
			"error = %d\n", error);
		if ((error = tw_cli_soft_reset(ctlr))) {
			tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
				TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
				0x1003, 0x1, TW_CL_SEVERITY_ERROR_STRING,
				"Controller soft reset failed",
				"error = %d\n", error);
			return(error);
		} else if ((error = tw_cli_start_ctlr(ctlr))) {
			tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
				TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
				0x1004, 0x1, TW_CL_SEVERITY_ERROR_STRING,
				"Controller initialization retry failed",
				"error = %d\n", error);
			return(error);
		}
	}
	/* Notify some info about the controller to the OSL. */
	tw_cli_notify_ctlr_info(ctlr);

	/* Mark the controller active. */
	ctlr->active = TW_CL_TRUE;
	return(error);
}
Example #11
0
/*
 * Function name:	tw_cl_init_ctlr
 * Description:		Initializes driver data structures for the controller.
 *
 * Input:		ctlr_handle -- controller handle
 *			flags -- more info passed by the OS Layer
 *			device_id -- device id of the controller
 *			max_simult_reqs -- maximum # of simultaneous requests
 *					that the OS Layer expects the Common
 *					Layer to support
 *			max_aens -- maximun # of AEN's needed to be supported
 *			non_dma_mem -- ptr to allocated non-DMA memory
 *			dma_mem -- ptr to allocated DMA'able memory
 *			dma_mem_phys -- physical address of dma_mem
 * Output:		None
 * Return value:	0	-- success
 *			non-zero-- failure
 */
TW_INT32
tw_cl_init_ctlr(struct tw_cl_ctlr_handle *ctlr_handle, TW_UINT32 flags,
	TW_INT32 device_id, TW_INT32 max_simult_reqs, TW_INT32 max_aens,
	TW_VOID *non_dma_mem, TW_VOID *dma_mem, TW_UINT64 dma_mem_phys
	)
{
	struct tw_cli_ctlr_context	*ctlr;
	struct tw_cli_req_context	*req;
	TW_UINT8			*free_non_dma_mem;
	TW_INT32			error = TW_OSL_ESUCCESS;
	TW_INT32			i;

	tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(), "entered");

	if (flags & TW_CL_START_CTLR_ONLY) {
		ctlr = (struct tw_cli_ctlr_context *)
			(ctlr_handle->cl_ctlr_ctxt);
		goto start_ctlr;
	}

	if (max_simult_reqs > TW_CL_MAX_SIMULTANEOUS_REQUESTS) {
		tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
			TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
			0x1000, 0x1, TW_CL_SEVERITY_ERROR_STRING,
			"Too many simultaneous requests to support!",
			"requested = %d, supported = %d, error = %d\n",
			max_simult_reqs, TW_CL_MAX_SIMULTANEOUS_REQUESTS,
			TW_OSL_EBIG);
		return(TW_OSL_EBIG);
	}

	if ((non_dma_mem == TW_CL_NULL) || (dma_mem == TW_CL_NULL)
		) {
		tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
			TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
			0x1001, 0x1, TW_CL_SEVERITY_ERROR_STRING,
			"Insufficient memory for Common Layer's internal usage",
			"error = %d\n", TW_OSL_ENOMEM);
		return(TW_OSL_ENOMEM);
	}

	tw_osl_memzero(non_dma_mem, sizeof(struct tw_cli_ctlr_context) +
		(sizeof(struct tw_cli_req_context) * (max_simult_reqs + 1)) +
		(sizeof(struct tw_cl_event_packet) * max_aens));

	tw_osl_memzero(dma_mem,
		(sizeof(struct tw_cl_command_packet) *
		(max_simult_reqs + 1)) +
		TW_CLI_SECTOR_SIZE);

	free_non_dma_mem = (TW_UINT8 *)non_dma_mem;

	ctlr = (struct tw_cli_ctlr_context *)free_non_dma_mem;
	free_non_dma_mem += sizeof(struct tw_cli_ctlr_context);

	ctlr_handle->cl_ctlr_ctxt = ctlr;
	ctlr->ctlr_handle = ctlr_handle;

	ctlr->device_id = (TW_UINT32)device_id;
	ctlr->arch_id = TWA_ARCH_ID(device_id);
	ctlr->flags = flags;
	ctlr->sg_size_factor = TWA_SG_ELEMENT_SIZE_FACTOR(device_id);
	ctlr->max_simult_reqs = max_simult_reqs + 1;
	ctlr->max_aens_supported = max_aens;

	/* Initialize queues of CL internal request context packets. */
	tw_cli_req_q_init(ctlr, TW_CLI_FREE_Q);
	tw_cli_req_q_init(ctlr, TW_CLI_BUSY_Q);
	tw_cli_req_q_init(ctlr, TW_CLI_PENDING_Q);
	tw_cli_req_q_init(ctlr, TW_CLI_COMPLETE_Q);

	/* Initialize all locks used by CL. */
	ctlr->gen_lock = &(ctlr->gen_lock_handle);
	tw_osl_init_lock(ctlr_handle, "tw_cl_gen_lock", ctlr->gen_lock);
	ctlr->io_lock = &(ctlr->io_lock_handle);
	tw_osl_init_lock(ctlr_handle, "tw_cl_io_lock", ctlr->io_lock);
	/*
	 * If 64 bit cmd pkt addresses are used, we will need to serialize
	 * writes to the hardware (across registers), since existing (G66)
	 * hardware will get confused if, for example, we wrote the low 32 bits
	 * of the cmd pkt address, followed by a response interrupt mask to the
	 * control register, followed by the high 32 bits of the cmd pkt
	 * address.  It will then interpret the value written to the control
	 * register as the low cmd pkt address.  So, for this case, we will
	 * make a note that we will need to synchronize control register writes
	 * with command register writes.
	 */
	if ((ctlr->flags & TW_CL_64BIT_ADDRESSES) &&
	    ((ctlr->device_id == TW_CL_DEVICE_ID_9K) ||
	     (ctlr->device_id == TW_CL_DEVICE_ID_9K_X) ||
	     (ctlr->device_id == TW_CL_DEVICE_ID_9K_E) ||
	     (ctlr->device_id == TW_CL_DEVICE_ID_9K_SA))) {
		ctlr->state |= TW_CLI_CTLR_STATE_G66_WORKAROUND_NEEDED;
		ctlr->intr_lock = ctlr->io_lock;
	} else {
		ctlr->intr_lock = &(ctlr->intr_lock_handle);
		tw_osl_init_lock(ctlr_handle, "tw_cl_intr_lock",
			ctlr->intr_lock);
	}

	/* Initialize CL internal request context packets. */
	ctlr->req_ctxt_buf = (struct tw_cli_req_context *)free_non_dma_mem;
	free_non_dma_mem += (sizeof(struct tw_cli_req_context) *
		(
		max_simult_reqs +
		1));

	ctlr->cmd_pkt_buf = (struct tw_cl_command_packet *)dma_mem;
	ctlr->cmd_pkt_phys = dma_mem_phys;

	ctlr->internal_req_data = (TW_UINT8 *)
		(ctlr->cmd_pkt_buf +
		(
		max_simult_reqs +
		1));
	ctlr->internal_req_data_phys = ctlr->cmd_pkt_phys +
		(sizeof(struct tw_cl_command_packet) *
		(
		max_simult_reqs +
		1));

	for (i = 0;
		i < (
		max_simult_reqs +
		1); i++) {
		req = &(ctlr->req_ctxt_buf[i]);

		req->cmd_pkt = &(ctlr->cmd_pkt_buf[i]);
		req->cmd_pkt_phys = ctlr->cmd_pkt_phys +
			(i * sizeof(struct tw_cl_command_packet));

		req->request_id = i;
		req->ctlr = ctlr;

		/* Insert request into the free queue. */
		tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
	}

	/* Initialize the AEN queue. */
	ctlr->aen_queue = (struct tw_cl_event_packet *)free_non_dma_mem;


start_ctlr:
	/*
	 * Disable interrupts.  Interrupts will be enabled in tw_cli_start_ctlr
	 * (only) if initialization succeeded.
	 */
	tw_cli_disable_interrupts(ctlr);

	/* Initialize the controller. */
	if ((error = tw_cli_start_ctlr(ctlr))) {
		/* Soft reset the controller, and try one more time. */
		tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
			TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
			0x1002, 0x1, TW_CL_SEVERITY_ERROR_STRING,
			"Controller initialization failed. Retrying...",
			"error = %d\n", error);
		if ((error = tw_cli_soft_reset(ctlr))) {
			tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
				TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
				0x1003, 0x1, TW_CL_SEVERITY_ERROR_STRING,
				"Controller soft reset failed",
				"error = %d\n", error);
			return(error);
		} else if ((error = tw_cli_start_ctlr(ctlr))) {
			tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
				TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
				0x1004, 0x1, TW_CL_SEVERITY_ERROR_STRING,
				"Controller initialization retry failed",
				"error = %d\n", error);
			return(error);
		}
	}
	/* Notify some info about the controller to the OSL. */
	tw_cli_notify_ctlr_info(ctlr);

	/* Mark the controller as active. */
	ctlr->state |= TW_CLI_CTLR_STATE_ACTIVE;
	return(error);
}
Example #12
0
/*
 * Function name:	tw_cli_drain_aen_queue
 * Description:		Fetches all un-retrieved AEN's posted by fw.
 *
 * Input:		ctlr	-- ptr to CL internal ctlr context
 * Output:		None
 * Return value:	0	-- success
 *			non-zero-- failure
 */
TW_INT32
tw_cli_drain_aen_queue(struct tw_cli_ctlr_context *ctlr)
{
	struct tw_cli_req_context	*req;
	struct tw_cl_command_header	*cmd_hdr;
	TW_TIME				end_time;
	TW_UINT16			aen_code;
	TW_INT32			error;

	tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");

	for (;;) {
		if ((req = tw_cli_get_request(ctlr
			)) == TW_CL_NULL) {
			error = TW_OSL_EBUSY;
			break;
		}

		req->flags |= TW_CLI_REQ_FLAGS_INTERNAL;
		req->tw_cli_callback = TW_CL_NULL;
		if ((error = tw_cli_send_scsi_cmd(req,
				0x03 /* REQUEST_SENSE */))) {
			tw_cli_dbg_printf(1, ctlr->ctlr_handle,
				tw_osl_cur_func(),
				"Cannot send command to fetch aen");
			break;
		}

		end_time = tw_osl_get_local_time() +
			TW_CLI_REQUEST_TIMEOUT_PERIOD;
		do {
			if ((error = req->error_code))
				/*
				 * This will take care of completion due to
				 * a reset, or a failure in
				 * tw_cli_submit_pending_queue.
				 */
				goto out;

			tw_cli_process_resp_intr(req->ctlr);

			if ((req->state != TW_CLI_REQ_STATE_BUSY) &&
				(req->state != TW_CLI_REQ_STATE_PENDING))
				break;
		} while (tw_osl_get_local_time() <= end_time);

		if (req->state != TW_CLI_REQ_STATE_COMPLETE) {
			error = TW_OSL_ETIMEDOUT;
			break;
		}

		if ((error = req->cmd_pkt->command.cmd_pkt_9k.status)) {
			cmd_hdr = &req->cmd_pkt->cmd_hdr;
#if       0
			tw_cli_create_ctlr_event(ctlr,
				TW_CL_MESSAGE_SOURCE_CONTROLLER_ERROR,
				cmd_hdr);
#endif // 0
			break;
		}

		aen_code = tw_cli_manage_aen(ctlr, req);
		if (aen_code == TWA_AEN_QUEUE_EMPTY)
			break;
		if (aen_code == TWA_AEN_SYNC_TIME_WITH_HOST)
			continue;

		ctlr->internal_req_busy = TW_CL_FALSE;
		tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
	}

out:
	if (req) {
		if (req->data)
			ctlr->internal_req_busy = TW_CL_FALSE;
		tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
	}
	return(error);
}