Esempio n. 1
0
void
ghd_waitq_shuffle_up(ccc_t *cccp, gdev_t *gdevp)
{
	gcmd_t	*gcmdp;

	ASSERT(mutex_owned(&cccp->ccc_waitq_mutex));

	GDBG_WAITQ(("ghd_waitq_shuffle_up: cccp 0x%p gdevp 0x%p N %ld "
	    "max %ld\n", cccp, gdevp, GDEV_NACTIVE(gdevp),
	    GDEV_MAXACTIVE(gdevp)));
	for (;;) {
		/*
		 * Now check the device wait queue throttle to see if I can
		 * shuffle up a request to the HBA wait queue.
		 */
		if (GDEV_NACTIVE(gdevp) >= GDEV_MAXACTIVE(gdevp)) {
			GDBG_WAITQ(("ghd_waitq_shuffle_up: N>MAX gdevp 0x%p\n",
				gdevp));
			return;
		}

		/*
		 * single thread requests while multiple instances
		 * because the different target drives might have
		 * conflicting maxactive throttles.
		 */
		if (gdevp->gd_ninstances > 1 && GDEV_NACTIVE(gdevp) > 0) {
			GDBG_WAITQ(("ghd_waitq_shuffle_up: multi gdevp 0x%p\n",
				gdevp));
			return;
		}

		/*
		 * promote the topmost request from the device queue to
		 * the HBA queue.
		 */
		if ((gcmdp = L2_remove_head(&GDEV_QHEAD(gdevp))) == NULL) {
			/* the device is empty so we're done */
			GDBG_WAITQ(("ghd_waitq_shuffle_up: MT gdevp 0x%p\n",
				gdevp));
			return;
		}
		L2_add(&GHBA_QHEAD(cccp), &gcmdp->cmd_q, gcmdp);
		GDEV_NACTIVE(gdevp)++;
		gcmdp->cmd_waitq_level++;
		GDBG_WAITQ(("ghd_waitq_shuffle_up: gdevp 0x%p gcmdp 0x%p\n",
			gdevp, gcmdp));
	}
}
Esempio n. 2
0
int
ghd_transport(ccc_t	*cccp,
		gcmd_t	*gcmdp,
		gtgt_t	*gtgtp,
		ulong_t	 timeout,
		int	 polled,
		void	*intr_status)
{
	gdev_t	*gdevp = gtgtp->gt_gdevp;

	ASSERT(!mutex_owned(&cccp->ccc_hba_mutex));
	ASSERT(!mutex_owned(&cccp->ccc_waitq_mutex));

	if (polled) {
		/*
		 * Grab the HBA mutex so no other requests are started
		 * until after this one completes.
		 */
		mutex_enter(&cccp->ccc_hba_mutex);

		GDBG_START(("ghd_transport: polled"
		    " cccp 0x%p gdevp 0x%p gtgtp 0x%p gcmdp 0x%p\n",
		    (void *)cccp, (void *)gdevp, (void *)gtgtp, (void *)gcmdp));

		/*
		 * Lock the doneq so no other thread flushes the Q.
		 */
		ghd_doneq_pollmode_enter(cccp);
	}
#if defined(GHD_DEBUG) || defined(__lint)
	else {
		GDBG_START(("ghd_transport: non-polled"
		    " cccp 0x%p gdevp 0x%p gtgtp 0x%p gcmdp 0x%p\n",
		    (void *)cccp, (void *)gdevp, (void *)gtgtp, (void *)gcmdp));
	}
#endif
	/*
	 * add this request to the tail of the waitq
	 */
	gcmdp->cmd_waitq_level = 1;
	mutex_enter(&cccp->ccc_waitq_mutex);
	L2_add(&GDEV_QHEAD(gdevp), &gcmdp->cmd_q, gcmdp);

	/*
	 * Add this request to the packet timer active list and start its
	 * abort timer.
	 */
	gcmdp->cmd_state = GCMD_STATE_WAITQ;
	ghd_timer_start(cccp, gcmdp, timeout);


	/*
	 * Check the device wait queue throttle and perhaps move
	 * some requests to the end of the HBA wait queue.
	 */
	ghd_waitq_shuffle_up(cccp, gdevp);

	if (!polled) {
		/*
		 * See if the HBA mutex is available but use the
		 * tryenter so I don't deadlock.
		 */
		if (!mutex_tryenter(&cccp->ccc_hba_mutex)) {
			/* The HBA mutex isn't available */
			GDBG_START(("ghd_transport: !mutex cccp 0x%p\n",
			    (void *)cccp));
			mutex_exit(&cccp->ccc_waitq_mutex);
			return (TRAN_ACCEPT);
		}
		GDBG_START(("ghd_transport: got mutex cccp 0x%p\n",
		    (void *)cccp));

		/*
		 * start as many requests as possible from the head
		 * of the HBA wait queue
		 */

		ghd_waitq_process_and_mutex_exit(cccp);

		ASSERT(!mutex_owned(&cccp->ccc_hba_mutex));
		ASSERT(!mutex_owned(&cccp->ccc_waitq_mutex));

		return (TRAN_ACCEPT);
	}


	/*
	 * If polled mode (FLAG_NOINTR specified in scsi_pkt flags),
	 * then ghd_poll() waits until the request completes or times out
	 * before returning.
	 */

	mutex_exit(&cccp->ccc_waitq_mutex);
	(void) ghd_poll(cccp, GHD_POLL_REQUEST, 0, gcmdp, gtgtp, intr_status);
	ghd_doneq_pollmode_exit(cccp);

	mutex_enter(&cccp->ccc_waitq_mutex);
	ghd_waitq_process_and_mutex_exit(cccp);

	/* call HBA's completion function but don't do callback to target */
	(*cccp->ccc_hba_complete)(cccp->ccc_hba_handle, gcmdp, FALSE);

	GDBG_START(("ghd_transport: polled done cccp 0x%p\n", (void *)cccp));
	return (TRAN_ACCEPT);
}