Exemplo n.º 1
0
void
pppt_sess_close_locked(pppt_sess_t *ps)
{
	pppt_tgt_t	*tgt = ps->ps_target;
	pppt_task_t	*ptask;

	stmf_trace("pppt", "Session close %p", (void *)ps);

	ASSERT(mutex_owned(&pppt_global.global_lock));
	ASSERT(mutex_owned(&tgt->target_mutex));
	ASSERT(mutex_owned(&ps->ps_mutex));
	ASSERT(!ps->ps_closed); /* Caller should ensure session is not closed */

	ps->ps_closed = B_TRUE;
	for (ptask = avl_first(&ps->ps_task_list); ptask != NULL;
	    ptask = AVL_NEXT(&ps->ps_task_list, ptask)) {
		mutex_enter(&ptask->pt_mutex);
		if (ptask->pt_state == PTS_ACTIVE) {
			stmf_abort(STMF_QUEUE_TASK_ABORT, ptask->pt_stmf_task,
			    STMF_ABORTED, NULL);
		}
		mutex_exit(&ptask->pt_mutex);
	}

	/*
	 * Now that all the tasks are aborting the session refcnt should
	 * go to 0.
	 */
	while (ps->ps_refcnt != 0) {
		cv_wait(&ps->ps_cv, &ps->ps_mutex);
	}

	avl_remove(&tgt->target_sess_list, ps);
	avl_remove(&pppt_global.global_sess_list, ps);
	(void) taskq_dispatch(pppt_global.global_sess_taskq,
	    &pppt_sess_destroy_task, ps, KM_SLEEP);

	stmf_trace("pppt", "Session close complete %p", (void *)ps);
}
Exemplo n.º 2
0
void
sbd_do_ats_xfer(struct scsi_task *task, sbd_cmd_t *scmd,
    struct stmf_data_buf *dbuf, uint8_t dbuf_reusable)
{
	uint32_t len;

	if (ATOMIC32_GET(scmd->len) == 0) {
		if (dbuf != NULL) {
			stmf_free_dbuf(task, dbuf);
		}
		return;
	}

	if ((dbuf != NULL) &&
	    ((dbuf->db_flags & DB_DONT_REUSE) || (dbuf_reusable == 0))) {
		/* free current dbuf and allocate a new one */
		stmf_free_dbuf(task, dbuf);
		dbuf = NULL;
	}
	if (dbuf == NULL) {
		uint32_t maxsize, minsize, old_minsize;

		maxsize = (ATOMIC32_GET(scmd->len) > (128*1024)) ? 128*1024 :
		    ATOMIC32_GET(scmd->len);
		minsize = maxsize >> 2;
		do {
			old_minsize = minsize;
			dbuf = stmf_alloc_dbuf(task, maxsize, &minsize, 0);
		} while ((dbuf == NULL) && (old_minsize > minsize) &&
		    (minsize >= 512));
		if (dbuf == NULL) {
			if (ATOMIC8_GET(scmd->nbufs) == 0) {
				stmf_abort(STMF_QUEUE_TASK_ABORT, task,
				    STMF_ALLOC_FAILURE, NULL);
			}
			return;
		}
	}
Exemplo n.º 3
0
void
sbd_handle_ats_xfer_completion(struct scsi_task *task, sbd_cmd_t *scmd,
    struct stmf_data_buf *dbuf, uint8_t dbuf_reusable)
{
	uint64_t laddr;
	uint32_t buflen, iolen, miscompare_off;
	int ndx;
	sbd_status_t ret;

	if (ATOMIC8_GET(scmd->nbufs) > 0) {
		atomic_dec_8(&scmd->nbufs);
	}

	if (dbuf->db_xfer_status != STMF_SUCCESS) {
		scmd->flags |= SBD_SCSI_CMD_ABORT_REQUESTED;
		sbd_ats_release_resources(task, scmd);
		stmf_abort(STMF_QUEUE_TASK_ABORT, task,
		    dbuf->db_xfer_status, NULL);
		return;
	}

	/* if the command is no longer active return */
	if (((scmd->flags & SBD_SCSI_CMD_ACTIVE) == 0) ||
	    (scmd->trans_data == NULL) ||
	    ((scmd->flags & SBD_SCSI_CMD_TRANS_DATA) == 0) ||
	    (scmd->nbufs == 0xff))  {
		cmn_err(CE_NOTE, "sbd_handle_ats_xfer_completion:handled"
		    "unexpected completion");
		return;
	}

	if (scmd->flags & SBD_SCSI_CMD_XFER_FAIL) {
		goto ATS_XFER_DONE;
	}

	if (ATOMIC32_GET(scmd->len) != 0) {
		/*
		 * Initiate the next port xfer to occur in parallel
		 * with writing this buf.  A side effect of sbd_do_ats_xfer is
		 * it may set scmd_len to 0.  This means all the data
		 * transfers have been started, not that they are done.
		 */
		sbd_do_ats_xfer(task, scmd, NULL, 0);
	}

	laddr = dbuf->db_relative_offset;
	for (buflen = 0, ndx = 0; (buflen < dbuf->db_data_size) &&
	    (ndx < dbuf->db_sglist_length); ndx++) {
		iolen = min(dbuf->db_data_size - buflen,
		    dbuf->db_sglist[ndx].seg_length);
		if (iolen == 0)
			break;
		bcopy(dbuf->db_sglist[ndx].seg_addr, &scmd->trans_data[laddr],
		    iolen);
		buflen += iolen;
		laddr += (uint64_t)iolen;
	}
	task->task_nbytes_transferred += buflen;

ATS_XFER_DONE:
	if (ATOMIC32_GET(scmd->len) == 0 ||
	    scmd->flags & SBD_SCSI_CMD_XFER_FAIL) {
		stmf_free_dbuf(task, dbuf);
		/*
		 * if this is not the last buffer to be transfered then exit
		 * and wait for the next buffer.  Once nbufs is 0 then all the
		 * data has arrived and the compare can be done.
		 */
		if (ATOMIC8_GET(scmd->nbufs) > 0) {
			return;
		}
		if (scmd->flags & SBD_SCSI_CMD_XFER_FAIL) {
			sbd_ats_release_resources(task, scmd);
			stmf_scsilib_send_status(task, STATUS_CHECK,
			    STMF_SAA_WRITE_ERROR);
		} else {
			ret = sbd_compare_and_write(task, scmd,
			    &miscompare_off);

			/*
			 * since stmf_scsilib_send_status may result in
			 * the task being released clean up resources before
			 * calling it.
			 */
			sbd_ats_release_resources(task, scmd);
			if (ret != SBD_SUCCESS) {
				if (ret != SBD_COMPARE_FAILED) {
					stmf_scsilib_send_status(task,
					    STATUS_CHECK, STMF_SAA_WRITE_ERROR);
				} else {
					sbd_send_miscompare_status(task,
					    miscompare_off);
				}
			} else {
				stmf_scsilib_send_status(task, STATUS_GOOD, 0);
			}
		}
		return;
	}
	sbd_do_ats_xfer(task, scmd, dbuf, dbuf_reusable);
}
Exemplo n.º 4
0
/*
 * srpt_ch_data_comp()
 *
 * Process an IB completion for a RDMA operation.  This completion
 * should be associated with the last RDMA operation for any
 * data buffer transfer.
 */
static void
srpt_ch_data_comp(srpt_channel_t *ch, stmf_data_buf_t *stmf_dbuf,
	ibt_wc_status_t wc_status)
{
	srpt_ds_dbuf_t		*dbuf;
	srpt_iu_t		*iu;
	stmf_status_t		status;

	ASSERT(stmf_dbuf != NULL);

	dbuf = (srpt_ds_dbuf_t *)stmf_dbuf->db_port_private;

	ASSERT(dbuf != NULL);

	iu = dbuf->db_iu;

	ASSERT(iu != NULL);
	ASSERT(iu->iu_ch == ch);

	/*
	 * If work completion indicates non-flush failure, then
	 * start a channel disconnect (asynchronous) and release
	 * the reference to the IU.  The task will be cleaned
	 * up with STMF during channel shutdown processing.
	 */
	if (wc_status != IBT_SUCCESS) {
		SRPT_DPRINTF_L2("ch_data_comp, WC status err(%d)",
		    wc_status);
		if (wc_status != IBT_WC_WR_FLUSHED_ERR) {
			srpt_ch_disconnect(ch);
		}
		atomic_dec_32(&iu->iu_sq_posted_cnt);
		return;
	}

	/*
	 * If STMF has requested this task be aborted, then if this is the
	 * last I/O operation outstanding, notify STMF the task has been
	 *  aborted and ignore the completion.
	 */
	mutex_enter(&iu->iu_lock);
	atomic_dec_32(&iu->iu_sq_posted_cnt);

	if ((iu->iu_flags & SRPT_IU_STMF_ABORTING) != 0) {
		scsi_task_t	*abort_task = iu->iu_stmf_task;

		mutex_exit(&iu->iu_lock);
		stmf_abort(STMF_REQUEUE_TASK_ABORT_LPORT, abort_task,
		    STMF_ABORTED, NULL);
		return;
	}

	/*
	 * We should not get an RDMA completion where the task has already
	 * completed aborting and STMF has been informed.
	 */
	ASSERT((iu->iu_flags & SRPT_IU_ABORTED) == 0);

	/*
	 * Good completion for last RDMA op associated with a data buffer
	 * I/O, if specified initiate status otherwise let STMF know we are
	 * done.
	 */
	stmf_dbuf->db_xfer_status = STMF_SUCCESS;
	mutex_exit(&iu->iu_lock);

	DTRACE_SRP_8(xfer__done, srpt_channel_t, ch,
	    ibt_wr_ds_t, &(dbuf->db_sge), srpt_iu_t, iu,
	    ibt_send_wr_t, 0, uint32_t, stmf_dbuf->db_data_size,
	    uint32_t, 0, uint32_t, 0,
	    uint32_t, (stmf_dbuf->db_flags & DB_DIRECTION_TO_RPORT) ? 1 : 0);

	if ((stmf_dbuf->db_flags & DB_SEND_STATUS_GOOD) != 0) {
		status = srpt_stp_send_status(dbuf->db_iu->iu_stmf_task, 0);
		if (status == STMF_SUCCESS) {
			return;
		}
		stmf_dbuf->db_xfer_status = STMF_FAILURE;
	}
	stmf_data_xfer_done(dbuf->db_iu->iu_stmf_task, stmf_dbuf, 0);
}
Exemplo n.º 5
0
/*
 * srpt_ch_cleanup()
 */
void
srpt_ch_cleanup(srpt_channel_t *ch)
{
	srpt_iu_t		*iu;
	srpt_iu_t		*next;
	ibt_wc_t		wc;
	srpt_target_port_t	*tgt;
	srpt_channel_t		*tgt_ch;
	scsi_task_t		*iutask;

	SRPT_DPRINTF_L3("ch_cleanup, invoked for ch(%p), state(%d)",
	    (void *)ch, ch->ch_state);

	/* add a ref for the channel until we're done */
	srpt_ch_add_ref(ch);

	tgt = ch->ch_tgt;
	ASSERT(tgt != NULL);

	/*
	 * Make certain the channel is in the target ports list of
	 * known channels and remove it (releasing the target
	 * ports reference to the channel).
	 */
	mutex_enter(&tgt->tp_ch_list_lock);
	tgt_ch = list_head(&tgt->tp_ch_list);
	while (tgt_ch != NULL) {
		if (tgt_ch == ch) {
			list_remove(&tgt->tp_ch_list, tgt_ch);
			srpt_ch_release_ref(tgt_ch, 0);
			break;
		}
		tgt_ch = list_next(&tgt->tp_ch_list, tgt_ch);
	}
	mutex_exit(&tgt->tp_ch_list_lock);

	if (tgt_ch == NULL) {
		SRPT_DPRINTF_L2("ch_cleanup, target channel no"
		    "longer known to target");
		srpt_ch_release_ref(ch, 0);
		return;
	}

	rw_enter(&ch->ch_rwlock, RW_WRITER);
	ch->ch_state = SRPT_CHANNEL_DISCONNECTING;
	rw_exit(&ch->ch_rwlock);

	/*
	 * Don't accept any further incoming requests, and clean
	 * up the receive queue.  The send queue is left alone
	 * so tasks can finish and clean up (whether normally
	 * or via abort).
	 */
	if (ch->ch_rcq_hdl) {
		ibt_set_cq_handler(ch->ch_rcq_hdl, NULL, NULL);

		while (ibt_poll_cq(ch->ch_rcq_hdl, &wc, 1, NULL) ==
		    IBT_SUCCESS) {
			iu = (srpt_iu_t *)(uintptr_t)wc.wc_id;
			SRPT_DPRINTF_L4("ch_cleanup, recovering"
			    " outstanding RX iu(%p)", (void *)iu);
			mutex_enter(&iu->iu_lock);
			srpt_ioc_repost_recv_iu(iu->iu_ioc, iu);
			/*
			 * Channel reference has not yet been added for this
			 * IU, so do not decrement.
			 */
			mutex_exit(&iu->iu_lock);
		}
	}

	/*
	 * Go through the list of outstanding IU for the channel's SCSI
	 * session and for each either abort or complete an abort.
	 */
	rw_enter(&ch->ch_rwlock, RW_READER);
	if (ch->ch_session != NULL) {
		rw_enter(&ch->ch_session->ss_rwlock, RW_READER);
		iu = list_head(&ch->ch_session->ss_task_list);
		while (iu != NULL) {
			next = list_next(&ch->ch_session->ss_task_list, iu);

			mutex_enter(&iu->iu_lock);
			if (ch == iu->iu_ch) {
				if (iu->iu_stmf_task == NULL) {
					cmn_err(CE_NOTE,
					    "ch_cleanup, NULL stmf task");
					ASSERT(0);
				}
				iutask = iu->iu_stmf_task;
			} else {
				iutask = NULL;
			}
			mutex_exit(&iu->iu_lock);

			if (iutask != NULL) {
				SRPT_DPRINTF_L4("ch_cleanup, aborting "
				    "task(%p)", (void *)iutask);
				stmf_abort(STMF_QUEUE_TASK_ABORT, iutask,
				    STMF_ABORTED, NULL);
			}
			iu = next;
		}
		rw_exit(&ch->ch_session->ss_rwlock);
	}
	rw_exit(&ch->ch_rwlock);

	srpt_ch_release_ref(ch, 0);
}
Exemplo n.º 6
0
/*
 * srpt_ch_task_mgmt_abort()
 *
 * Returns 0 on success, indicating we've sent a management response.
 * Returns !0 to indicate failure; the IU should be reposted.
 */
static ibt_status_t
srpt_ch_task_mgmt_abort(srpt_channel_t *ch, srpt_iu_t *iu,
	uint64_t tag_to_abort)
{
	srpt_session_t	*session = ch->ch_session;
	srpt_iu_t	*ss_iu;
	ibt_status_t	status;

	/*
	 * Locate the associated task (tag_to_abort) in the
	 * session's active task list.
	 */
	rw_enter(&session->ss_rwlock, RW_READER);
	ss_iu = list_head(&session->ss_task_list);
	while (ss_iu != NULL) {
		mutex_enter(&ss_iu->iu_lock);
		if ((tag_to_abort == ss_iu->iu_tag)) {
			mutex_exit(&ss_iu->iu_lock);
			break;
		}
		mutex_exit(&ss_iu->iu_lock);
		ss_iu = list_next(&session->ss_task_list, ss_iu);
	}
	rw_exit(&session->ss_rwlock);

	/*
	 * Take appropriate action based on state of task
	 * to be aborted:
	 * 1) No longer exists - do nothing.
	 * 2) Previously aborted or status queued - do nothing.
	 * 3) Otherwise - initiate abort.
	 */
	if (ss_iu == NULL)  {
		goto send_mgmt_resp;
	}

	mutex_enter(&ss_iu->iu_lock);
	if ((ss_iu->iu_flags & (SRPT_IU_STMF_ABORTING |
	    SRPT_IU_ABORTED | SRPT_IU_RESP_SENT)) != 0) {
		mutex_exit(&ss_iu->iu_lock);
		goto send_mgmt_resp;
	}

	/*
	 * Set aborting flag and notify STMF of abort request.  No
	 * additional I/O will be queued for this IU.
	 */
	SRPT_DPRINTF_L3("ch_task_mgmt_abort, task found");
	ss_iu->iu_flags |= SRPT_IU_SRP_ABORTING;
	mutex_exit(&ss_iu->iu_lock);
	stmf_abort(STMF_QUEUE_TASK_ABORT,
	    ss_iu->iu_stmf_task, STMF_ABORTED, NULL);

send_mgmt_resp:
	mutex_enter(&iu->iu_lock);
	status = srpt_stp_send_mgmt_response(iu, SRP_TM_SUCCESS,
	    SRPT_FENCE_SEND);
	mutex_exit(&iu->iu_lock);

	if (status != IBT_SUCCESS) {
		SRPT_DPRINTF_L2("ch_task_mgmt_abort, err(%d)"
		    " posting abort response", status);
	}

	return (status);
}
Exemplo n.º 7
0
/*
 * srpt_ch_rsp_comp()
 *
 * Process a completion for an IB SEND message.  A SEND completion
 * is for a SRP response packet sent back to the initiator.  It
 * will not have a STMF SCSI task associated with it if it was
 * sent for a rejected IU, or was a task management abort response.
 */
static void
srpt_ch_rsp_comp(srpt_channel_t *ch, srpt_iu_t *iu,
	ibt_wc_status_t wc_status)
{
	ASSERT(iu->iu_ch == ch);

	/*
	 * If work completion indicates failure, decrement the
	 * send posted count.  If it is a flush error, we are
	 * done; for all other errors start a channel disconnect.
	 */
	if (wc_status != IBT_SUCCESS) {
		SRPT_DPRINTF_L2("ch_rsp_comp, WC status err(%d)",
		    wc_status);
		atomic_dec_32(&iu->iu_sq_posted_cnt);

		if (wc_status != IBT_WC_WR_FLUSHED_ERR) {
			srpt_ch_disconnect(ch);
		}

		mutex_enter(&iu->iu_lock);
		if (iu->iu_stmf_task == NULL) {
			srpt_ioc_repost_recv_iu(iu->iu_ioc, iu);
			mutex_exit(&iu->iu_lock);
			srpt_ch_release_ref(ch, 0);
		} else {
			/* cleanup handled in task_free */
			mutex_exit(&iu->iu_lock);
		}
		return;
	}

	/*
	 * If the IU response completion is not associated with
	 * with a SCSI task, release the IU to return the resource
	 * and the reference to the channel it holds.
	 */
	mutex_enter(&iu->iu_lock);
	atomic_dec_32(&iu->iu_sq_posted_cnt);

	if (iu->iu_stmf_task == NULL) {
		srpt_ioc_repost_recv_iu(iu->iu_ioc, iu);
		mutex_exit(&iu->iu_lock);
		srpt_ch_release_ref(ch, 0);
		return;
	}

	/*
	 * If STMF has requested the IU task be aborted, then notify STMF
	 * the command is now aborted.
	 */
	if ((iu->iu_flags & SRPT_IU_STMF_ABORTING) != 0) {
		scsi_task_t	*abort_task = iu->iu_stmf_task;

		mutex_exit(&iu->iu_lock);
		stmf_abort(STMF_REQUEUE_TASK_ABORT_LPORT, abort_task,
		    STMF_ABORTED, NULL);
		return;
	}

	/*
	 * We should not get a SEND completion where the task has already
	 * completed aborting and STMF has been informed.
	 */
	ASSERT((iu->iu_flags & SRPT_IU_ABORTED) == 0);

	/*
	 * Successful status response completion for SCSI task.
	 * Let STMF know we are done.
	 */
	mutex_exit(&iu->iu_lock);

	stmf_send_status_done(iu->iu_stmf_task, STMF_SUCCESS,
	    STMF_IOF_LPORT_DONE);
}