コード例 #1
0
/*
 * Function name:	tw_cl_init_ctlr
 * Description:		Initializes driver data structures for the controller.
 *
 * Input:		ctlr_handle -- controller handle
 *			flags -- more info passed by the OS Layer
 *			device_id -- device id of the controller
 *			max_simult_reqs -- maximum # of simultaneous requests
 *					that the OS Layer expects the Common
 *					Layer to support
 *			max_aens -- maximun # of AEN's needed to be supported
 *			non_dma_mem -- ptr to allocated non-DMA memory
 *			dma_mem -- ptr to allocated DMA'able memory
 *			dma_mem_phys -- physical address of dma_mem
 * Output:		None
 * Return value:	0	-- success
 *			non-zero-- failure
 */
TW_INT32
tw_cl_init_ctlr(struct tw_cl_ctlr_handle *ctlr_handle, TW_UINT32 flags,
	TW_INT32 device_id, TW_INT32 max_simult_reqs, TW_INT32 max_aens,
	TW_VOID *non_dma_mem, TW_VOID *dma_mem, TW_UINT64 dma_mem_phys
	)
{
	struct tw_cli_ctlr_context	*ctlr;
	struct tw_cli_req_context	*req;
	TW_UINT8			*free_non_dma_mem;
	TW_INT32			error = TW_OSL_ESUCCESS;
	TW_INT32			i;

	tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(), "entered");

	if (flags & TW_CL_START_CTLR_ONLY) {
		ctlr = (struct tw_cli_ctlr_context *)
			(ctlr_handle->cl_ctlr_ctxt);
		goto start_ctlr;
	}

	if (max_simult_reqs > TW_CL_MAX_SIMULTANEOUS_REQUESTS) {
		tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
			TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
			0x1000, 0x1, TW_CL_SEVERITY_ERROR_STRING,
			"Too many simultaneous requests to support!",
			"requested = %d, supported = %d, error = %d\n",
			max_simult_reqs, TW_CL_MAX_SIMULTANEOUS_REQUESTS,
			TW_OSL_EBIG);
		return(TW_OSL_EBIG);
	}

	if ((non_dma_mem == TW_CL_NULL) || (dma_mem == TW_CL_NULL)
		) {
		tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
			TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
			0x1001, 0x1, TW_CL_SEVERITY_ERROR_STRING,
			"Insufficient memory for Common Layer's internal usage",
			"error = %d\n", TW_OSL_ENOMEM);
		return(TW_OSL_ENOMEM);
	}

	tw_osl_memzero(non_dma_mem, sizeof(struct tw_cli_ctlr_context) +
		(sizeof(struct tw_cli_req_context) * max_simult_reqs) +
		(sizeof(struct tw_cl_event_packet) * max_aens));

	tw_osl_memzero(dma_mem,
		(sizeof(struct tw_cl_command_packet) *
		max_simult_reqs) +
		TW_CLI_SECTOR_SIZE);

	free_non_dma_mem = (TW_UINT8 *)non_dma_mem;

	ctlr = (struct tw_cli_ctlr_context *)free_non_dma_mem;
	free_non_dma_mem += sizeof(struct tw_cli_ctlr_context);

	ctlr_handle->cl_ctlr_ctxt = ctlr;
	ctlr->ctlr_handle = ctlr_handle;

	ctlr->device_id = (TW_UINT32)device_id;
	ctlr->arch_id = TWA_ARCH_ID(device_id);
	ctlr->flags = flags;
	ctlr->sg_size_factor = TWA_SG_ELEMENT_SIZE_FACTOR(device_id);
	ctlr->max_simult_reqs = max_simult_reqs;
	ctlr->max_aens_supported = max_aens;

	/* Initialize queues of CL internal request context packets. */
	tw_cli_req_q_init(ctlr, TW_CLI_FREE_Q);
	tw_cli_req_q_init(ctlr, TW_CLI_BUSY_Q);
	tw_cli_req_q_init(ctlr, TW_CLI_PENDING_Q);
	tw_cli_req_q_init(ctlr, TW_CLI_COMPLETE_Q);

	/* Initialize all locks used by CL. */
	ctlr->gen_lock = &(ctlr->gen_lock_handle);
	tw_osl_init_lock(ctlr_handle, "tw_cl_gen_lock", ctlr->gen_lock);
	ctlr->io_lock = &(ctlr->io_lock_handle);
	tw_osl_init_lock(ctlr_handle, "tw_cl_io_lock", ctlr->io_lock);

	/* Initialize CL internal request context packets. */
	ctlr->req_ctxt_buf = (struct tw_cli_req_context *)free_non_dma_mem;
	free_non_dma_mem += (sizeof(struct tw_cli_req_context) *
		max_simult_reqs);

	ctlr->cmd_pkt_buf = (struct tw_cl_command_packet *)dma_mem;
	ctlr->cmd_pkt_phys = dma_mem_phys;

	ctlr->internal_req_data = (TW_UINT8 *)
		(ctlr->cmd_pkt_buf +
		max_simult_reqs);
	ctlr->internal_req_data_phys = ctlr->cmd_pkt_phys +
		(sizeof(struct tw_cl_command_packet) *
		max_simult_reqs);

	for (i = 0; i < max_simult_reqs; i++) {
		req = &(ctlr->req_ctxt_buf[i]);

		req->cmd_pkt = &(ctlr->cmd_pkt_buf[i]);
		req->cmd_pkt_phys = ctlr->cmd_pkt_phys +
			(i * sizeof(struct tw_cl_command_packet));

		req->request_id = i;
		req->ctlr = ctlr;

		/* Insert request into the free queue. */
		tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
	}

	/* Initialize the AEN queue. */
	ctlr->aen_queue = (struct tw_cl_event_packet *)free_non_dma_mem;


start_ctlr:
	/*
	 * Disable interrupts.  Interrupts will be enabled in tw_cli_start_ctlr
	 * (only) if initialization succeeded.
	 */
	tw_cli_disable_interrupts(ctlr);

	/* Initialize the controller. */
	if ((error = tw_cli_start_ctlr(ctlr))) {
		/* Soft reset the controller, and try one more time. */
		tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
			TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
			0x1002, 0x1, TW_CL_SEVERITY_ERROR_STRING,
			"Controller initialization failed. Retrying...",
			"error = %d\n", error);
		if ((error = tw_cli_soft_reset(ctlr))) {
			tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
				TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
				0x1003, 0x1, TW_CL_SEVERITY_ERROR_STRING,
				"Controller soft reset failed",
				"error = %d\n", error);
			return(error);
		} else if ((error = tw_cli_start_ctlr(ctlr))) {
			tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
				TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
				0x1004, 0x1, TW_CL_SEVERITY_ERROR_STRING,
				"Controller initialization retry failed",
				"error = %d\n", error);
			return(error);
		}
	}
	/* Notify some info about the controller to the OSL. */
	tw_cli_notify_ctlr_info(ctlr);

	/* Mark the controller active. */
	ctlr->active = TW_CL_TRUE;
	return(error);
}
コード例 #2
0
/*
 * Function name:	tw_cl_init_ctlr
 * Description:		Initializes driver data structures for the controller.
 *
 * Input:		ctlr_handle -- controller handle
 *			flags -- more info passed by the OS Layer
 *			device_id -- device id of the controller
 *			max_simult_reqs -- maximum # of simultaneous requests
 *					that the OS Layer expects the Common
 *					Layer to support
 *			max_aens -- maximun # of AEN's needed to be supported
 *			non_dma_mem -- ptr to allocated non-DMA memory
 *			dma_mem -- ptr to allocated DMA'able memory
 *			dma_mem_phys -- physical address of dma_mem
 * Output:		None
 * Return value:	0	-- success
 *			non-zero-- failure
 */
TW_INT32
tw_cl_init_ctlr(struct tw_cl_ctlr_handle *ctlr_handle, TW_UINT32 flags,
	TW_INT32 device_id, TW_INT32 max_simult_reqs, TW_INT32 max_aens,
	TW_VOID *non_dma_mem, TW_VOID *dma_mem, TW_UINT64 dma_mem_phys
	)
{
	struct tw_cli_ctlr_context	*ctlr;
	struct tw_cli_req_context	*req;
	TW_UINT8			*free_non_dma_mem;
	TW_INT32			error = TW_OSL_ESUCCESS;
	TW_INT32			i;

	tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(), "entered");

	if (flags & TW_CL_START_CTLR_ONLY) {
		ctlr = (struct tw_cli_ctlr_context *)
			(ctlr_handle->cl_ctlr_ctxt);
		goto start_ctlr;
	}

	if (max_simult_reqs > TW_CL_MAX_SIMULTANEOUS_REQUESTS) {
		tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
			TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
			0x1000, 0x1, TW_CL_SEVERITY_ERROR_STRING,
			"Too many simultaneous requests to support!",
			"requested = %d, supported = %d, error = %d\n",
			max_simult_reqs, TW_CL_MAX_SIMULTANEOUS_REQUESTS,
			TW_OSL_EBIG);
		return(TW_OSL_EBIG);
	}

	if ((non_dma_mem == TW_CL_NULL) || (dma_mem == TW_CL_NULL)
		) {
		tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
			TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
			0x1001, 0x1, TW_CL_SEVERITY_ERROR_STRING,
			"Insufficient memory for Common Layer's internal usage",
			"error = %d\n", TW_OSL_ENOMEM);
		return(TW_OSL_ENOMEM);
	}

	tw_osl_memzero(non_dma_mem, sizeof(struct tw_cli_ctlr_context) +
		(sizeof(struct tw_cli_req_context) * (max_simult_reqs + 1)) +
		(sizeof(struct tw_cl_event_packet) * max_aens));

	tw_osl_memzero(dma_mem,
		(sizeof(struct tw_cl_command_packet) *
		(max_simult_reqs + 1)) +
		TW_CLI_SECTOR_SIZE);

	free_non_dma_mem = (TW_UINT8 *)non_dma_mem;

	ctlr = (struct tw_cli_ctlr_context *)free_non_dma_mem;
	free_non_dma_mem += sizeof(struct tw_cli_ctlr_context);

	ctlr_handle->cl_ctlr_ctxt = ctlr;
	ctlr->ctlr_handle = ctlr_handle;

	ctlr->device_id = (TW_UINT32)device_id;
	ctlr->arch_id = TWA_ARCH_ID(device_id);
	ctlr->flags = flags;
	ctlr->sg_size_factor = TWA_SG_ELEMENT_SIZE_FACTOR(device_id);
	ctlr->max_simult_reqs = max_simult_reqs + 1;
	ctlr->max_aens_supported = max_aens;

	/* Initialize queues of CL internal request context packets. */
	tw_cli_req_q_init(ctlr, TW_CLI_FREE_Q);
	tw_cli_req_q_init(ctlr, TW_CLI_BUSY_Q);
	tw_cli_req_q_init(ctlr, TW_CLI_PENDING_Q);
	tw_cli_req_q_init(ctlr, TW_CLI_COMPLETE_Q);

	/* Initialize all locks used by CL. */
	ctlr->gen_lock = &(ctlr->gen_lock_handle);
	tw_osl_init_lock(ctlr_handle, "tw_cl_gen_lock", ctlr->gen_lock);
	ctlr->io_lock = &(ctlr->io_lock_handle);
	tw_osl_init_lock(ctlr_handle, "tw_cl_io_lock", ctlr->io_lock);
	/*
	 * If 64 bit cmd pkt addresses are used, we will need to serialize
	 * writes to the hardware (across registers), since existing (G66)
	 * hardware will get confused if, for example, we wrote the low 32 bits
	 * of the cmd pkt address, followed by a response interrupt mask to the
	 * control register, followed by the high 32 bits of the cmd pkt
	 * address.  It will then interpret the value written to the control
	 * register as the low cmd pkt address.  So, for this case, we will
	 * make a note that we will need to synchronize control register writes
	 * with command register writes.
	 */
	if ((ctlr->flags & TW_CL_64BIT_ADDRESSES) &&
	    ((ctlr->device_id == TW_CL_DEVICE_ID_9K) ||
	     (ctlr->device_id == TW_CL_DEVICE_ID_9K_X) ||
	     (ctlr->device_id == TW_CL_DEVICE_ID_9K_E) ||
	     (ctlr->device_id == TW_CL_DEVICE_ID_9K_SA))) {
		ctlr->state |= TW_CLI_CTLR_STATE_G66_WORKAROUND_NEEDED;
		ctlr->intr_lock = ctlr->io_lock;
	} else {
		ctlr->intr_lock = &(ctlr->intr_lock_handle);
		tw_osl_init_lock(ctlr_handle, "tw_cl_intr_lock",
			ctlr->intr_lock);
	}

	/* Initialize CL internal request context packets. */
	ctlr->req_ctxt_buf = (struct tw_cli_req_context *)free_non_dma_mem;
	free_non_dma_mem += (sizeof(struct tw_cli_req_context) *
		(
		max_simult_reqs +
		1));

	ctlr->cmd_pkt_buf = (struct tw_cl_command_packet *)dma_mem;
	ctlr->cmd_pkt_phys = dma_mem_phys;

	ctlr->internal_req_data = (TW_UINT8 *)
		(ctlr->cmd_pkt_buf +
		(
		max_simult_reqs +
		1));
	ctlr->internal_req_data_phys = ctlr->cmd_pkt_phys +
		(sizeof(struct tw_cl_command_packet) *
		(
		max_simult_reqs +
		1));

	for (i = 0;
		i < (
		max_simult_reqs +
		1); i++) {
		req = &(ctlr->req_ctxt_buf[i]);

		req->cmd_pkt = &(ctlr->cmd_pkt_buf[i]);
		req->cmd_pkt_phys = ctlr->cmd_pkt_phys +
			(i * sizeof(struct tw_cl_command_packet));

		req->request_id = i;
		req->ctlr = ctlr;

		/* Insert request into the free queue. */
		tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
	}

	/* Initialize the AEN queue. */
	ctlr->aen_queue = (struct tw_cl_event_packet *)free_non_dma_mem;


start_ctlr:
	/*
	 * Disable interrupts.  Interrupts will be enabled in tw_cli_start_ctlr
	 * (only) if initialization succeeded.
	 */
	tw_cli_disable_interrupts(ctlr);

	/* Initialize the controller. */
	if ((error = tw_cli_start_ctlr(ctlr))) {
		/* Soft reset the controller, and try one more time. */
		tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
			TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
			0x1002, 0x1, TW_CL_SEVERITY_ERROR_STRING,
			"Controller initialization failed. Retrying...",
			"error = %d\n", error);
		if ((error = tw_cli_soft_reset(ctlr))) {
			tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
				TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
				0x1003, 0x1, TW_CL_SEVERITY_ERROR_STRING,
				"Controller soft reset failed",
				"error = %d\n", error);
			return(error);
		} else if ((error = tw_cli_start_ctlr(ctlr))) {
			tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
				TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
				0x1004, 0x1, TW_CL_SEVERITY_ERROR_STRING,
				"Controller initialization retry failed",
				"error = %d\n", error);
			return(error);
		}
	}
	/* Notify some info about the controller to the OSL. */
	tw_cli_notify_ctlr_info(ctlr);

	/* Mark the controller as active. */
	ctlr->state |= TW_CLI_CTLR_STATE_ACTIVE;
	return(error);
}