Beispiel #1
0
int
ghd_waitq_process_and_mutex_hold(ccc_t *cccp)
{
	gcmd_t	*gcmdp;
	int	 rc = FALSE;

	ASSERT(mutex_owned(&cccp->ccc_hba_mutex));
	ASSERT(mutex_owned(&cccp->ccc_waitq_mutex));

	for (;;) {
		if (L2_EMPTY(&GHBA_QHEAD(cccp))) {
			/* return if the list is empty */
			GDBG_WAITQ(("ghd_waitq_proc: MT cccp 0x%p qp 0x%p\n",
				cccp, &cccp->ccc_waitq));
			break;
		}
		if (GHBA_NACTIVE(cccp) >= GHBA_MAXACTIVE(cccp)) {
			/* return if the HBA is too active */
			GDBG_WAITQ(("ghd_waitq_proc: N>M cccp 0x%p qp 0x%p"
				" N %ld max %ld\n", cccp, &cccp->ccc_waitq,
					GHBA_NACTIVE(cccp),
					GHBA_MAXACTIVE(cccp)));
			break;
		}

		/*
		 * bail out if the wait queue has been
		 * "held" by the HBA driver
		 */
		if (cccp->ccc_waitq_held) {
			GDBG_WAITQ(("ghd_waitq_proc: held"));
			return (rc);
		}

		if (cccp->ccc_waitq_frozen) {

			clock_t lbolt, delay_in_hz, time_to_wait;

			delay_in_hz =
			    drv_usectohz(cccp->ccc_waitq_freezedelay * 1000);

			lbolt = ddi_get_lbolt();
			time_to_wait = delay_in_hz -
			    (lbolt - cccp->ccc_waitq_freezetime);

			if (time_to_wait > 0) {
				/*
				 * stay frozen; we'll be called again
				 * by ghd_timeout_softintr()
				 */
				GDBG_WAITQ(("ghd_waitq_proc: frozen"));
				return (rc);
			} else {
				/* unfreeze and continue */
				GDBG_WAITQ(("ghd_waitq_proc: unfreezing"));
				cccp->ccc_waitq_freezetime = 0;
				cccp->ccc_waitq_freezedelay = 0;
				cccp->ccc_waitq_frozen = 0;
			}
		}

		gcmdp = (gcmd_t *)L2_remove_head(&GHBA_QHEAD(cccp));
		GHBA_NACTIVE(cccp)++;
		gcmdp->cmd_waitq_level++;
		mutex_exit(&cccp->ccc_waitq_mutex);

		/*
		 * Start up the next I/O request
		 */
		ASSERT(gcmdp != NULL);
		gcmdp->cmd_state = GCMD_STATE_ACTIVE;
		if (!(*cccp->ccc_hba_start)(cccp->ccc_hba_handle, gcmdp)) {
			/* if the HBA rejected the request, requeue it */
			gcmdp->cmd_state = GCMD_STATE_WAITQ;
			mutex_enter(&cccp->ccc_waitq_mutex);
			GHBA_NACTIVE(cccp)--;
			gcmdp->cmd_waitq_level--;
			L2_add_head(&GHBA_QHEAD(cccp), &gcmdp->cmd_q, gcmdp);
			GDBG_WAITQ(("ghd_waitq_proc: busy cccp 0x%p gcmdp 0x%p"
				" handle 0x%p\n", cccp, gcmdp,
					cccp->ccc_hba_handle));
			break;
		}
		rc = TRUE;
		mutex_enter(&cccp->ccc_waitq_mutex);
		GDBG_WAITQ(("ghd_waitq_proc: ++ cccp 0x%p gcmdp 0x%p N %ld\n",
			cccp, gcmdp, GHBA_NACTIVE(cccp)));
	}
	ASSERT(mutex_owned(&cccp->ccc_hba_mutex));
	ASSERT(mutex_owned(&cccp->ccc_waitq_mutex));
	return (rc);
}
Beispiel #2
0
static int
ghd_poll(ccc_t	*cccp,
	gpoll_t	 polltype,
	ulong_t	 polltime,
	gcmd_t	*poll_gcmdp,
	gtgt_t	*gtgtp,
	void	*intr_status)
{
	gcmd_t	*gcmdp;
	L2el_t	 gcmd_hold_queue;
	int	 got_it = FALSE;
	clock_t  poll_lbolt;
	clock_t	 start_lbolt;
	clock_t	 current_lbolt;


	ASSERT(mutex_owned(&cccp->ccc_hba_mutex));
	L2_INIT(&gcmd_hold_queue);

	/* Que hora es? */
	poll_lbolt = drv_usectohz((clock_t)polltime);
	start_lbolt = ddi_get_lbolt();

	/* unqueue and save all CMD/CCBs until I find the right one */
	while (!got_it) {

		/* Give up yet? */
		current_lbolt = ddi_get_lbolt();
		if (poll_lbolt && (current_lbolt - start_lbolt >= poll_lbolt))
			break;

		/*
		 * delay 1 msec each time around the loop (this is an
		 * arbitrary delay value, any value should work) except
		 * zero because some devices don't like being polled too
		 * fast and it saturates the bus on an MP system.
		 */
		drv_usecwait(1000);

		/*
		 * check for any new device status
		 */
		if ((*cccp->ccc_get_status)(cccp->ccc_hba_handle, intr_status))
			(*cccp->ccc_process_intr)(cccp->ccc_hba_handle,
			    intr_status);

		/*
		 * If something completed then try to start the
		 * next request from the wait queue. Don't release
		 * the HBA mutex because I don't know whether my
		 * request(s) is/are on the done queue yet.
		 */
		mutex_enter(&cccp->ccc_waitq_mutex);
		(void) ghd_waitq_process_and_mutex_hold(cccp);
		mutex_exit(&cccp->ccc_waitq_mutex);

		/*
		 * Process the first of any timed-out requests.
		 */
		ghd_timer_poll(cccp, GHD_TIMER_POLL_ONE);

		/*
		 * Unqueue all the completed requests, look for mine
		 */
		while (gcmdp = ghd_doneq_get(cccp)) {
			/*
			 * If we got one and it's my request, then
			 * we're done.
			 */
			if (gcmdp == poll_gcmdp) {
				poll_gcmdp->cmd_state = GCMD_STATE_IDLE;
				got_it = TRUE;
				continue;
			}
			/* fifo queue the other cmds on my local list */
			L2_add(&gcmd_hold_queue, &gcmdp->cmd_q, gcmdp);
		}


		/*
		 * Check whether we're done yet.
		 */
		switch (polltype) {
		case GHD_POLL_DEVICE:
			/*
			 * wait for everything queued on a specific device
			 */
			if (GDEV_NACTIVE(gtgtp->gt_gdevp) == 0)
				got_it = TRUE;
			break;

		case GHD_POLL_ALL:
			/*
			 * if waiting for all outstanding requests and
			 * if active list is now empty then exit
			 */
			if (GHBA_NACTIVE(cccp) == 0)
				got_it = TRUE;
			break;

		case GHD_POLL_REQUEST:
			break;

		}
	}

	if (L2_EMPTY(&gcmd_hold_queue)) {
		ASSERT(!mutex_owned(&cccp->ccc_waitq_mutex));
		ASSERT(mutex_owned(&cccp->ccc_hba_mutex));
		return (got_it);
	}

	/*
	 * copy the local gcmd_hold_queue back to the doneq so
	 * that the order of completion callbacks is preserved
	 */
	while (gcmdp = L2_next(&gcmd_hold_queue)) {
		L2_delete(&gcmdp->cmd_q);
		GHD_DONEQ_PUT_TAIL(cccp, gcmdp);
	}

	ASSERT(!mutex_owned(&cccp->ccc_waitq_mutex));
	ASSERT(mutex_owned(&cccp->ccc_hba_mutex));
	return (got_it);
}
Beispiel #3
0
void
ghd_waitq_delete(ccc_t *cccp, gcmd_t *gcmdp)
{
	gtgt_t	*gtgtp = GCMDP2GTGTP(gcmdp);
	gdev_t	*gdevp = gtgtp->gt_gdevp;
#if defined(GHD_DEBUG) || defined(__lint)
	Q_t	*qp = &gdevp->gd_waitq;
#endif

	ASSERT(mutex_owned(&cccp->ccc_hba_mutex));
	mutex_enter(&cccp->ccc_waitq_mutex);

	/*
	 * Adjust all queue counters. If this request is being aborted
	 * it might only have made it to the target queue. Otherwise,
	 * both the target and hba queue have to be adjusted when a
	 * request is completed normally. The cmd_waitq_level value
	 * indicates which queue counters need to be adjusted. It's
	 * incremented as the request progresses up the queues.
	 */
	switch (gcmdp->cmd_waitq_level) {
	case 0:
		break;
	case 1:
		/*
		 * If this is an early-timeout, or early-abort, the request
		 * is still linked onto a waitq. Remove it now. If it's
		 * an active request and no longer on the waitq then calling
		 * L2_delete a second time does no harm.
		 */
		L2_delete(&gcmdp->cmd_q);
		break;

	case 2:
		L2_delete(&gcmdp->cmd_q);
#if defined(GHD_DEBUG) || defined(__lint)
		if (GDEV_NACTIVE(gdevp) == 0)
			debug_enter("\n\nGHD WAITQ DELETE\n\n");
#endif
		GDEV_NACTIVE(gdevp)--;
		break;

	case 3:
		/* it's an active or completed command */
#if defined(GHD_DEBUG) || defined(__lint)
		if (GDEV_NACTIVE(gdevp) == 0 || GHBA_NACTIVE(cccp) == 0)
			debug_enter("\n\nGHD WAITQ DELETE\n\n");
#endif
		GDEV_NACTIVE(gdevp)--;
		GHBA_NACTIVE(cccp)--;
		break;

	default:
		/* this shouldn't happen */
#if defined(GHD_DEBUG) || defined(__lint)
		debug_enter("\n\nGHD WAITQ LEVEL > 3\n\n");
#endif
		break;
	}

	GDBG_WAITQ(("ghd_waitq_delete: gcmdp 0x%p qp 0x%p level %ld\n",
		gcmdp, qp, gcmdp->cmd_waitq_level));


	/*
	 * There's probably now more room in the HBA queue. Move
	 * up as many requests as possible.
	 */
	ghd_waitq_shuffle_up(cccp, gdevp);

	mutex_exit(&cccp->ccc_waitq_mutex);
}