示例#1
0
/**
 * Worker for rtSemMutexSolRequest that handles the case where we go to sleep.
 *
 * @returns VINF_SUCCESS, VERR_INTERRUPTED, or VERR_SEM_DESTROYED.
 *          Returns without owning the mutex.
 * @param   pThis           The mutex instance.
 * @param   cMillies        The timeout, must be > 0 or RT_INDEFINITE_WAIT.
 * @param   fInterruptible  The wait type.
 *
 * @remarks This needs to be called with the mutex object held!
 */
static int rtSemMutexSolRequestSleep(PRTSEMMUTEXINTERNAL pThis, RTMSINTERVAL cMillies,
                                     bool fInterruptible)
{
    int rc = VERR_GENERAL_FAILURE;
    Assert(cMillies > 0);

    /*
     * Now we wait (sleep; although might spin and then sleep) & reference the mutex.
     */
    ASMAtomicIncU32(&pThis->cWaiters);
    ASMAtomicIncU32(&pThis->cRefs);

    if (cMillies != RT_INDEFINITE_WAIT)
    {
        clock_t cTicks   = drv_usectohz((clock_t)(cMillies * 1000L));
        clock_t cTimeout = ddi_get_lbolt();
        cTimeout        += cTicks;
        if (fInterruptible)
            rc = cv_timedwait_sig(&pThis->Cnd, &pThis->Mtx, cTimeout);
        else
            rc = cv_timedwait(&pThis->Cnd, &pThis->Mtx, cTimeout);
    }
    else
    {
        if (fInterruptible)
            rc = cv_wait_sig(&pThis->Cnd, &pThis->Mtx);
        else
        {
            cv_wait(&pThis->Cnd, &pThis->Mtx);
            rc = 1;
        }
    }

    ASMAtomicDecU32(&pThis->cWaiters);
    if (rc > 0)
    {
        if (pThis->u32Magic == RTSEMMUTEX_MAGIC)
        {
            if (pThis->hOwnerThread == NIL_RTNATIVETHREAD)
            {
                /*
                 * Woken up by a release from another thread.
                 */
                Assert(pThis->cRecursions == 0);
                pThis->cRecursions = 1;
                pThis->hOwnerThread = RTThreadNativeSelf();
                rc = VINF_SUCCESS;
            }
            else
            {
                /*
                 * Interrupted by some signal.
                 */
                rc = VERR_INTERRUPTED;
            }
        }
        else
        {
            /*
             * Awakened due to the destruction-in-progress broadcast.
             * We will cleanup if we're the last waiter.
             */
            rc = VERR_SEM_DESTROYED;
        }
    }
    else if (rc == -1)
    {
        /*
         * Timed out.
         */
        rc = VERR_TIMEOUT;
    }
    else
    {
        /*
         * Condition may not have been met, returned due to pending signal.
         */
        rc = VERR_INTERRUPTED;
    }

    if (!ASMAtomicDecU32(&pThis->cRefs))
    {
        Assert(RT_FAILURE_NP(rc));
        mutex_exit(&pThis->Mtx);
        cv_destroy(&pThis->Cnd);
        mutex_destroy(&pThis->Mtx);
        RTMemFree(pThis);
        return rc;
    }

    return rc;
}
示例#2
0
int
beep_mktone(int frequency, int duration)
{
	int next;
	int status = 0;

	BEEP_DEBUG1((CE_CONT, "beep_mktone(%d, %d) : start.", frequency,
	    duration));

	/*
	 * The frequency value is limited to the range of [0 - 32767]
	 */
	if (frequency < 0 || frequency > INT16_MAX)
		return (EINVAL);

	mutex_enter(&beep_state.mutex);

	if (beep_state.mode == BEEP_UNINIT) {
		status = ENXIO;

	} else if (beep_state.mode == BEEP_TIMED) {

		/* If already processing a beep, queue this one */

		if (frequency != 0) {
			next = beep_state.queue_tail + 1;
			if (next == beep_state.queue_size)
				next = 0;

			if (next != beep_state.queue_head) {
				/*
				 * If there is room in the queue,
				 * add this entry
				 */

				beep_state.queue[beep_state.queue_tail].
				    frequency = (unsigned short)frequency;

				beep_state.queue[beep_state.queue_tail].
				    duration = (unsigned short)duration;

				beep_state.queue_tail = next;
			} else {
				status = EAGAIN;
			}
		}

	} else if (beep_state.mode == BEEP_OFF) {

		/* Start another beep only if the previous one is over */

		if (frequency != 0) {
			beep_state.mode = BEEP_TIMED;

			if (beep_state.beep_freq != NULL)
				(*beep_state.beep_freq)(beep_state.arg,
				    frequency);

			if (beep_state.beep_on != NULL)
				(*beep_state.beep_on)(beep_state.arg);

			/*
			 * Set timeout for ending the beep after the
			 * specified time
			 */

			beep_state.timeout_id = timeout(beep_timeout, NULL,
			    drv_usectohz(duration * 1000));
		}
	} else {
		status = EBUSY;
	}

	mutex_exit(&beep_state.mutex);

	BEEP_DEBUG1((CE_CONT, "beep_mktone : done, status %d.", status));

	return (status);
}
示例#3
0
static void
idm_update_state(idm_conn_t *ic, idm_conn_state_t new_state,
    idm_conn_event_ctx_t *event_ctx)
{
	int rc;
	idm_status_t idm_status;

	/*
	 * Validate new state
	 */
	ASSERT(new_state != CS_S0_UNDEFINED);
	ASSERT3U(new_state, <, CS_MAX_STATE);

	/*
	 * Update state in context.  We protect this with a mutex
	 * even though the state machine code is single threaded so that
	 * other threads can check the state value atomically.
	 */
	new_state = (new_state < CS_MAX_STATE) ?
	    new_state : CS_S0_UNDEFINED;

	IDM_SM_LOG(CE_NOTE, "idm_update_state: conn %p, evt %s(%d), "
	    "%s(%d) --> %s(%d)", (void *)ic,
	    idm_ce_name[event_ctx->iec_event], event_ctx->iec_event,
	    idm_cs_name[ic->ic_state], ic->ic_state,
	    idm_cs_name[new_state], new_state);

	DTRACE_PROBE2(conn__state__change,
	    idm_conn_t *, ic, idm_conn_state_t, new_state);

	mutex_enter(&ic->ic_state_mutex);
	idm_sm_audit_state_change(&ic->ic_state_audit, SAS_IDM_CONN,
	    (int)ic->ic_state, (int)new_state);
	ic->ic_last_state = ic->ic_state;
	ic->ic_state = new_state;
	cv_signal(&ic->ic_state_cv);
	mutex_exit(&ic->ic_state_mutex);

	switch (ic->ic_state) {
	case CS_S1_FREE:
		ASSERT(0); /* Initial state, can't return */
		break;
	case CS_S2_XPT_WAIT:
		if ((rc = idm_ini_conn_finish(ic)) != 0) {
			idm_conn_event(ic, CE_CONNECT_FAIL, NULL);
		} else {
			idm_conn_event(ic, CE_CONNECT_SUCCESS, NULL);
		}
		break;
	case CS_S3_XPT_UP:
		/*
		 * Finish any connection related setup including
		 * waking up the idm_tgt_conn_accept thread.
		 * and starting the login timer.  If the function
		 * fails then we return to "free" state.
		 */
		if ((rc = idm_tgt_conn_finish(ic)) != IDM_STATUS_SUCCESS) {
			switch (rc) {
			case IDM_STATUS_REJECT:
				idm_conn_event(ic, CE_CONNECT_REJECT, NULL);
				break;
			default:
				idm_conn_event(ic, CE_CONNECT_FAIL, NULL);
				break;
			}
		}

		/*
		 * First login received will cause a transition to
		 * CS_S4_IN_LOGIN.  Start login timer.
		 */
		ic->ic_state_timeout = timeout(idm_login_timeout, ic,
		    drv_usectohz(IDM_LOGIN_SECONDS*1000000));
		break;
	case CS_S4_IN_LOGIN:
		if (ic->ic_conn_type == CONN_TYPE_INI) {
			(void) idm_notify_client(ic, CN_READY_FOR_LOGIN, NULL);
			mutex_enter(&ic->ic_state_mutex);
			ic->ic_state_flags |= CF_LOGIN_READY;
			cv_signal(&ic->ic_state_cv);
			mutex_exit(&ic->ic_state_mutex);
		}
		break;
	case CS_S5_LOGGED_IN:
		ASSERT(!ic->ic_ffp);
		/*
		 * IDM can go to FFP before the initiator but it
		 * needs to go to FFP after the target (IDM target should
		 * go to FFP after notify_ack).
		 */
		idm_status = idm_ffp_enable(ic);
		if (idm_status != IDM_STATUS_SUCCESS) {
			idm_conn_event(ic, CE_TRANSPORT_FAIL, NULL);
		}

		if (ic->ic_reinstate_conn) {
			/* Connection reinstatement is complete */
			idm_conn_event(ic->ic_reinstate_conn,
			    CE_CONN_REINSTATE_SUCCESS, NULL);
		}
		break;
	case CS_S6_IN_LOGOUT:
		break;
	case CS_S7_LOGOUT_REQ:
		/* Start logout timer for target connections */
		if (IDM_CONN_ISTGT(ic)) {
			ic->ic_state_timeout = timeout(idm_logout_req_timeout,
			    ic, drv_usectohz(IDM_LOGOUT_SECONDS*1000000));
		}
		break;
	case CS_S8_CLEANUP:
		/* Close connection (if it's not already closed) */
		if (IDM_CONN_ISTGT(ic)) {
			ic->ic_transport_ops->it_tgt_conn_disconnect(ic);
		} else {
			ic->ic_transport_ops->it_ini_conn_disconnect(ic);
		}

		/* Stop executing active tasks */
		idm_task_abort(ic, NULL, AT_INTERNAL_SUSPEND);

		/* Start logout timer */
		ic->ic_state_timeout = timeout(idm_cleanup_timeout, ic,
		    drv_usectohz(IDM_CLEANUP_SECONDS*1000000));
		break;
	case CS_S10_IN_CLEANUP:
		break;
	case CS_S9A_REJECTED:
		/*
		 * We never finished establishing the connection so no
		 * disconnect.  No client notifications because the client
		 * rejected the connection.
		 */
		idm_refcnt_async_wait_ref(&ic->ic_refcnt,
		    &idm_conn_reject_unref);
		break;
	case CS_S9B_WAIT_SND_DONE:
		break;
	case CS_S9_INIT_ERROR:
		if (IDM_CONN_ISTGT(ic)) {
			ic->ic_transport_ops->it_tgt_conn_disconnect(ic);
		} else {
			mutex_enter(&ic->ic_state_mutex);
			ic->ic_state_flags |= CF_ERROR;
			ic->ic_conn_sm_status = IDM_STATUS_FAIL;
			cv_signal(&ic->ic_state_cv);
			mutex_exit(&ic->ic_state_mutex);
			if (ic->ic_last_state != CS_S1_FREE &&
			    ic->ic_last_state != CS_S2_XPT_WAIT) {
				ic->ic_transport_ops->it_ini_conn_disconnect(
				    ic);
			} else {
				(void) idm_notify_client(ic, CN_CONNECT_FAIL,
				    NULL);
			}
		}
		/*FALLTHROUGH*/
	case CS_S11_COMPLETE:
		/*
		 * No more traffic on this connection.  If this is an
		 * initiator connection and we weren't connected yet
		 * then don't send the "connect lost" event.
		 * It's useful to the initiator to know whether we were
		 * logging in at the time so send that information in the
		 * data field.
		 */
		if (IDM_CONN_ISTGT(ic) ||
		    ((ic->ic_last_state != CS_S1_FREE) &&
		    (ic->ic_last_state != CS_S2_XPT_WAIT))) {
			(void) idm_notify_client(ic, CN_CONNECT_LOST,
			    (uintptr_t)(ic->ic_last_state == CS_S4_IN_LOGIN));
		}

		/* Abort all tasks */
		idm_task_abort(ic, NULL, AT_INTERNAL_ABORT);

		/*
		 * Handle terminal state actions on the global taskq so
		 * we can clean up all the connection resources from
		 * a separate thread context.
		 */
		idm_refcnt_async_wait_ref(&ic->ic_refcnt, &idm_conn_unref);
		break;
	case CS_S12_ENABLE_DM:

		/*
		 * The Enable DM state indicates the initiator to initiate
		 * the hello sequence and the target to get ready to accept
		 * the iSER Hello Message.
		 */
		idm_status = (IDM_CONN_ISINI(ic)) ?
		    ic->ic_transport_ops->it_ini_enable_datamover(ic) :
		    ic->ic_transport_ops->it_tgt_enable_datamover(ic);

		if (idm_status == IDM_STATUS_SUCCESS) {
			idm_conn_event(ic, CE_ENABLE_DM_SUCCESS, NULL);
		} else {
			idm_conn_event(ic, CE_ENABLE_DM_FAIL, NULL);
		}

		break;

	default:
		ASSERT(0);
		break;

	}
}
示例#4
0
/*
 * bufcall request
 *
 * Requires Lock (( M: Mandatory, P: Prohibited, A: Allowed ))
 *  -. uinst_t->lock   : M [RW_READER]
 *  -. uinst_t->u_lock : A
 *  -. uinst_t->l_lock : P
 *  -. uinst_t->c_lock : P
 */
void
oplmsu_cmn_bufcall(queue_t *q, mblk_t *mp, size_t size, int rw_flag)
{

	ASSERT(RW_READ_HELD(&oplmsu_uinst->lock));

	if (rw_flag == MSU_WRITE_SIDE) {
		ctrl_t	*ctrl;

		putbq(q, mp);

		mutex_enter(&oplmsu_uinst->c_lock);
		ctrl = (ctrl_t *)q->q_ptr;
		if (ctrl->wbuf_id != 0) {
			mutex_exit(&oplmsu_uinst->c_lock);
			return;
		}

		ctrl->wbuftbl->q = q;
		ctrl->wbuftbl->rw_flag = rw_flag;
		ctrl->wbuf_id = bufcall(size, BPRI_LO, oplmsu_cmn_bufcb,
		    (void *)ctrl->wbuftbl);

		if (ctrl->wbuf_id == 0) {
			if (ctrl->wtout_id != 0) {
				mutex_exit(&oplmsu_uinst->c_lock);
				return;
			}

			ctrl->wtout_id = timeout(oplmsu_cmn_bufcb,
			    (void *)ctrl->wbuftbl, drv_usectohz(MSU_TM_500MS));
		}
		mutex_exit(&oplmsu_uinst->c_lock);
	} else if (rw_flag == MSU_READ_SIDE) {
		lpath_t	*lpath;
		mblk_t	*wrk_msg;

		mutex_enter(&oplmsu_uinst->l_lock);
		lpath = (lpath_t *)q->q_ptr;
		if (mp->b_datap->db_type >= QPCTL) {
			if (lpath->first_lpri_hi == NULL) {
				lpath->last_lpri_hi = mp;
				mp->b_next = NULL;
			} else {
				wrk_msg = lpath->first_lpri_hi;
				wrk_msg->b_prev = mp;
				mp->b_next = wrk_msg;
			}
			mp->b_prev = NULL;
			lpath->first_lpri_hi = mp;
		} else {
			putbq(q, mp);
		}

		if (lpath->rbuf_id != 0) {
			mutex_exit(&oplmsu_uinst->l_lock);
			return;
		}

		lpath->rbuftbl->q = q;
		lpath->rbuftbl->rw_flag = rw_flag;
		lpath->rbuf_id = bufcall(size, BPRI_LO, oplmsu_cmn_bufcb,
		    (void *)lpath->rbuftbl);

		if (lpath->rbuf_id == 0) {
			if (lpath->rtout_id != 0) {
				mutex_exit(&oplmsu_uinst->l_lock);
				return;
			}

			lpath->rtout_id = timeout(oplmsu_cmn_bufcb,
			    (void *)lpath->rbuftbl, drv_usectohz(MSU_TM_500MS));
		}
		mutex_exit(&oplmsu_uinst->l_lock);
	}
}
/*
 * Returns 1 if the caller should put the message (bp) back on the queue
 */
static int
mouse8042_initiate_reset(queue_t *q, mblk_t *mp, struct mouse_state *state)
{
	mutex_enter(&state->reset_mutex);
	/*
	 * If we're in the middle of a reset, put the message back on the queue
	 * for processing later.
	 */
	if (state->reset_state != MSE_RESET_IDLE) {
		/*
		 * We noenable the queue again here in case it was backenabled
		 * by an upper-level module.
		 */
		noenable(q);

		mutex_exit(&state->reset_mutex);
		return (1);
	}

	/*
	 * Drop the reset state lock before allocating the response message and
	 * grabbing the 8042 exclusive-access lock (since those operations
	 * may take an extended period of time to complete).
	 */
	mutex_exit(&state->reset_mutex);

	if (state->reply_mp == NULL)
		state->reply_mp = allocb(2, BPRI_MED);
	if (state->reset_ack_mp == NULL)
		state->reset_ack_mp = allocb(1, BPRI_MED);

	if (state->reply_mp == NULL || state->reset_ack_mp == NULL) {
		/*
		 * Allocation failed -- set up a bufcall to enable the queue
		 * whenever there is enough memory to allocate the response
		 * message.
		 */
		state->bc_id = qbufcall(q, (state->reply_mp == NULL) ? 2 : 1,
		    BPRI_MED, (void (*)(void *))qenable, q);

		if (state->bc_id == 0) {
			/*
			 * If the qbufcall failed, we cannot proceed, so use the
			 * message we were sent to respond with an error.
			 */
			*mp->b_rptr = MSEERROR;
			mp->b_wptr = mp->b_rptr + 1;
			qreply(q, mp);
			return (0);
		}

		return (1);
	} else {
		/* Bufcall completed successfully (or wasn't needed) */
		state->bc_id = 0;
	}

	/*
	 * Gain exclusive access to the 8042 for the duration of the reset.
	 * The unlock will occur when the reset has either completed or timed
	 * out.
	 */
	(void) ddi_get8(state->ms_handle,
	    state->ms_addr + I8042_LOCK);

	mutex_enter(&state->reset_mutex);

	state->reset_state = MSE_RESET_PRE;
	noenable(q);

	state->reset_tid = qtimeout(q,
	    mouse8042_reset_timeout,
	    state,
	    drv_usectohz(
	    MOUSE8042_RESET_TIMEOUT_USECS));

	ddi_put8(state->ms_handle,
	    state->ms_addr +
	    I8042_INT_OUTPUT_DATA, MSERESET);

	mp->b_rptr++;

	mutex_exit(&state->reset_mutex);
	return (1);
}
static clock_t t1394_tlist_nsectohz(hrtime_t  nS)
{
	return (drv_usectohz(HCI1394_TLIST_nS_TO_uS(nS)));
}
示例#7
0
/*
 * srpt_ch_cleanup()
 */
void
srpt_ch_cleanup(srpt_channel_t *ch)
{
	srpt_iu_t		*iu;
	srpt_iu_t		*next;
	ibt_wc_t		wc;
	srpt_target_port_t	*tgt;
	srpt_channel_t		*tgt_ch;
	scsi_task_t		*iutask;

	SRPT_DPRINTF_L3("ch_cleanup, invoked for ch(%p), state(%d)",
	    (void *)ch, ch->ch_state);

	/* add a ref for the channel until we're done */
	srpt_ch_add_ref(ch);

	tgt = ch->ch_tgt;
	ASSERT(tgt != NULL);

	/*
	 * Make certain the channel is in the target ports list of
	 * known channels and remove it (releasing the target
	 * ports reference to the channel).
	 */
	mutex_enter(&tgt->tp_ch_list_lock);
	tgt_ch = list_head(&tgt->tp_ch_list);
	while (tgt_ch != NULL) {
		if (tgt_ch == ch) {
			list_remove(&tgt->tp_ch_list, tgt_ch);
			srpt_ch_release_ref(tgt_ch, 0);
			break;
		}
		tgt_ch = list_next(&tgt->tp_ch_list, tgt_ch);
	}
	mutex_exit(&tgt->tp_ch_list_lock);

	if (tgt_ch == NULL) {
		SRPT_DPRINTF_L2("ch_cleanup, target channel no"
		    "longer known to target");
		srpt_ch_release_ref(ch, 0);
		return;
	}

	rw_enter(&ch->ch_rwlock, RW_WRITER);
	ch->ch_state = SRPT_CHANNEL_DISCONNECTING;
	rw_exit(&ch->ch_rwlock);


	/*
	 * Generally the IB CQ's will have been drained prior to
	 * getting to this call; but we check here to make certain.
	 */
	if (ch->ch_scq_hdl) {
		SRPT_DPRINTF_L4("ch_cleanup, start drain (%d)",
		    ch->ch_swqe_posted);
		while ((int)ch->ch_swqe_posted > 0) {
			delay(drv_usectohz(1000));
		}
		ibt_set_cq_handler(ch->ch_scq_hdl, NULL, NULL);
	}

	if (ch->ch_rcq_hdl) {
		ibt_set_cq_handler(ch->ch_rcq_hdl, NULL, NULL);

		while (ibt_poll_cq(ch->ch_rcq_hdl, &wc, 1, NULL) ==
		    IBT_SUCCESS) {
			iu = (srpt_iu_t *)(uintptr_t)wc.wc_id;
			SRPT_DPRINTF_L4("ch_cleanup, recovering"
			    " outstanding RX iu(%p)", (void *)iu);
			mutex_enter(&iu->iu_lock);
			srpt_ioc_repost_recv_iu(iu->iu_ioc, iu);
			/*
			 * Channel reference has not yet been added for this
			 * IU, so do not decrement.
			 */
			mutex_exit(&iu->iu_lock);
		}
	}

	/*
	 * Go through the list of outstanding IU for the channel's SCSI
	 * session and for each either abort or complete an abort.
	 */
	rw_enter(&ch->ch_rwlock, RW_READER);
	if (ch->ch_session != NULL) {
		rw_enter(&ch->ch_session->ss_rwlock, RW_READER);
		iu = list_head(&ch->ch_session->ss_task_list);
		while (iu != NULL) {
			next = list_next(&ch->ch_session->ss_task_list, iu);

			mutex_enter(&iu->iu_lock);
			if (ch == iu->iu_ch) {
				if (iu->iu_stmf_task == NULL) {
					cmn_err(CE_NOTE,
					    "ch_cleanup, NULL stmf task");
					ASSERT(0);
				}
				iutask = iu->iu_stmf_task;
			} else {
				iutask = NULL;
			}
			mutex_exit(&iu->iu_lock);

			if (iutask != NULL) {
				SRPT_DPRINTF_L4("ch_cleanup, aborting "
				    "task(%p)", (void *)iutask);
				stmf_abort(STMF_QUEUE_TASK_ABORT, iutask,
				    STMF_ABORTED, NULL);
			}
			iu = next;
		}
		rw_exit(&ch->ch_session->ss_rwlock);
	}
	rw_exit(&ch->ch_rwlock);

	srpt_ch_release_ref(ch, 0);
}
示例#8
0
/*
 * sckm_process_msg
 *
 * Process a message received from the SC. Invoked by sckm_event_task().
 */
static void
sckm_process_msg(uint32_t cmd, uint64_t transid,
                 uint32_t len, sckm_mbox_req_hdr_t *req_data,
                 sckm_mbox_rep_hdr_t *rep_data)
{
    int rv;

    mutex_enter(&sckm_umutex);

    switch (cmd) {
    case SCKM_MSG_SADB: {
        int sadb_msglen;

        sadb_msglen = len-sizeof (sckm_mbox_req_hdr_t);
        SCKM_DEBUG1(D_TASK, "received SCKM_MSG_SADB len=%d",
                    sadb_msglen);

        /* sanity check request */
        if (len-sizeof (sckm_mbox_req_hdr_t) <= 0) {
            SCKM_DEBUG0(D_TASK, "bad SADB message, "
                        "zero length");
            /*
             * SADB message is too short, send corresponding
             * error message to SC.
             */
            rep_data->sckm_version = SCKM_PROTOCOL_VERSION;
            rep_data->status = SCKM_ERR_SADB_MSG;

            if ((rv = mboxsc_putmsg(KEY_KDSC, MBOXSC_MSG_REPLY,
                                    cmd, &transid, sizeof (sckm_mbox_rep_hdr_t),
                                    rep_data, MBOXSC_PUTMSG_DEF_TIMEOUT)) != 0) {
                SCKM_DEBUG1(D_TASK, "sckm_mbox_task: "
                            "mboxsc_putmsg() failed (%d)\n", rv);
            }
            mutex_exit(&sckm_umutex);
            return;
        }

        /* initialize request for daemon */
        sckm_udata.transid = transid;
        sckm_udata.type = SCKM_IOCTL_REQ_SADB;
        sckm_udata.buf_len = len-sizeof (sckm_mbox_req_hdr_t);
        bcopy(req_data+1, sckm_udata.buf, sckm_udata.buf_len);

        break;
    }
    default:
        cmn_err(CE_WARN, "unknown cmd %x received from SC", cmd);
        /*
         * Received unknown command from SC. Send corresponding
         * error message to SC.
         */
        rep_data->sckm_version = SCKM_PROTOCOL_VERSION;
        rep_data->status = SCKM_ERR_BAD_CMD;

        if ((rv = mboxsc_putmsg(KEY_KDSC, MBOXSC_MSG_REPLY,
                                cmd, &transid, sizeof (sckm_mbox_rep_hdr_t),
                                rep_data, MBOXSC_PUTMSG_DEF_TIMEOUT)) != 0) {
            SCKM_DEBUG1(D_TASK, "sckm_mbox_task: "
                        "mboxsc_putmsg() failed (%d)\n", rv);
        }
        mutex_exit(&sckm_umutex);
        return;
    }

    /*
     * At this point, we know that the request is valid, so pass
     * the request to the daemon.
     */
    SCKM_DEBUG0(D_TASK, "waking up daemon");
    sckm_udata_req = B_TRUE;
    cv_signal(&sckm_udata_cv);

    /* wait for daemon to process request */
    if (cv_reltimedwait(&sckm_cons_cv, &sckm_umutex,
                        drv_usectohz(SCKM_DAEMON_TIMEOUT), TR_CLOCK_TICK) == -1) {
        /*
         * Daemon did not process the data, report this
         * error to the SC.
         */
        SCKM_DEBUG0(D_TASK, "daemon timeout!!");
        rep_data->sckm_version = SCKM_PROTOCOL_VERSION;
        rep_data->status = SCKM_ERR_DAEMON;
    } else {
        /* Daemon processed data, return status to SC */
        SCKM_DEBUG0(D_TASK, "daemon processed data");
        rep_data->sckm_version = SCKM_PROTOCOL_VERSION;
        switch (sckm_udata_status.status) {
        case SCKM_IOCTL_STAT_SUCCESS:
            SCKM_DEBUG0(D_TASK, "daemon returned success");
            rep_data->status = SCKM_SUCCESS;
            break;
        case SCKM_IOCTL_STAT_ERR_PFKEY:
            SCKM_DEBUG1(D_TASK, "daemon returned PF_KEY "
                        "error, errno=%d",
                        sckm_udata_status.sadb_msg_errno);
            rep_data->status = SCKM_ERR_SADB_PFKEY;
            rep_data->sadb_msg_errno =
                sckm_udata_status.sadb_msg_errno;
            break;
        case SCKM_IOCTL_STAT_ERR_REQ:
            SCKM_DEBUG0(D_TASK, "daemon returned "
                        "bad request");
            rep_data->status = SCKM_ERR_DAEMON;
            break;
        case SCKM_IOCTL_STAT_ERR_VERSION:
            SCKM_DEBUG0(D_TASK, "PF_KEY version not "
                        "supported");
            rep_data->status = SCKM_ERR_SADB_VERSION;
            rep_data->sadb_msg_version =
                sckm_udata_status.sadb_msg_version;
            break;
        case SCKM_IOCTL_STAT_ERR_TIMEOUT:
            SCKM_DEBUG0(D_TASK, "no response received "
                        "from key engine");
            rep_data->status = SCKM_ERR_SADB_TIMEOUT;
            break;
        case SCKM_IOCTL_STAT_ERR_OTHER:
            SCKM_DEBUG0(D_TASK, "daemon encountered "
                        "an error");
            rep_data->status = SCKM_ERR_DAEMON;
            break;
        case SCKM_IOCTL_STAT_ERR_SADB_TYPE:
            SCKM_DEBUG0(D_TASK, "daemon returned bad "
                        "SADB message type");
            rep_data->status = SCKM_ERR_SADB_BAD_TYPE;
            break;
        default:
            cmn_err(CE_WARN, "SCKM daemon returned "
                    "invalid status %d", sckm_udata_status.status);
            rep_data->status = SCKM_ERR_DAEMON;
        }
    }

    /* send reply back to SC */
    if ((rv = mboxsc_putmsg(KEY_KDSC, MBOXSC_MSG_REPLY,
                            cmd, &transid, sizeof (sckm_mbox_rep_hdr_t),
                            rep_data, MBOXSC_PUTMSG_DEF_TIMEOUT)) != 0) {
        SCKM_DEBUG1(D_TASK, "failed sending reply to SC (%d)", rv);
    } else {
        SCKM_DEBUG0(D_TASK, "reply sent to SC");
    }

    sckm_udata_req = B_FALSE;
    mutex_exit(&sckm_umutex);
}
示例#9
0
/*
 * cvc_wsrv()
 *	cvc_wsrv handles mblks that have been queued by cvc_wput either because
 *	the IOSRAM path was selected or the queue contained preceding mblks.  To
 *	optimize processing (particularly if the IOSRAM path is selected), all
 *	mblks are pulled off of the queue and chained together.  Then, if there
 *	are any mblks on the chain, they are either forwarded to cvcredir or
 *	sent for IOSRAM processing as appropriate given current circumstances.
 *	IOSRAM processing may not be able to handle all of the data in the
 *	chain, in which case the remaining data is placed back on the queue and
 *	a timeout routine is registered to reschedule cvc_wsrv in the future.
 *	Automatic scheduling of the queue is disabled (noenable(q)) while
 *	cvc_wsrv is running to avoid superfluous calls.
 */
static int
cvc_wsrv(queue_t *q)
{
	mblk_t *total_mp = NULL;
	mblk_t *mp;

	if (cvc_stopped == 1 || cvc_suspended == 1) {
		return (0);
	}

	rw_enter(&cvclock, RW_READER);
	noenable(q);

	/*
	 * If there's already a timeout registered for scheduling this routine
	 * in the future, it's a safe bet that we don't want to run right now.
	 */
	if (cvc_timeout_id != (timeout_id_t)-1) {
		enableok(q);
		rw_exit(&cvclock);
		return (0);
	}

	/*
	 * Start by linking all of the queued M_DATA mblks into a single chain
	 * so we can flush as much as possible to IOSRAM (if we choose that
	 * route).
	 */
	while ((mp = getq(q)) != NULL) {
		/*
		 * Technically, certain IOCTLs are supposed to be processed only
		 * after all preceding data has completely "drained".  In an
		 * attempt to support that, we delay processing of those IOCTLs
		 * until this point.  It is still possible that an IOCTL will be
		 * processed before all preceding data is drained, for instance
		 * in the case where not all of the preceding data would fit
		 * into IOSRAM and we have to place it back on the queue.
		 * However, since none of these IOCTLs really appear to have any
		 * relevance for cvc, and we weren't supporting delayed
		 * processing at _all_ previously, this partial implementation
		 * should suffice.  (Fully implementing the delayed IOCTL
		 * processing would be unjustifiably difficult given the nature
		 * of the underlying IOSRAM console protocol.)
		 */
		if (mp->b_datap->db_type == M_IOCTL) {
			cvc_ioctl(q, mp);
			continue;
		}

		/*
		 * We know that only M_IOCTL and M_DATA blocks are placed on our
		 * queue.  Since this block isn't an M_IOCTL, it must be M_DATA.
		 */
		if (total_mp != NULL) {
			linkb(total_mp, mp);
		} else {
			total_mp = mp;
		}
	}

	/*
	 * Do we actually have anything to do?
	 */
	if (total_mp == NULL) {
		enableok(q);
		rw_exit(&cvclock);
		return (0);
	}

	/*
	 * Yes, we do, so send the data to either cvcredir or IOSRAM as
	 * appropriate.  In the latter case, we might not be able to transmit
	 * everything right now, so re-queue the remainder.
	 */
	if (cvcoutput_q != NULL && !via_iosram) {
		CVC_DBG0(CVC_DBG_NETWORK_WR, "Sending to cvcredir.");
		/*
		 * XXX - should canputnext be called here?  Starfire's cvc
		 * doesn't do that, and it appears to work anyway.
		 */
		(void) putnext(cvcoutput_q, total_mp);
	} else {
		CVC_DBG0(CVC_DBG_IOSRAM_WR, "Send to IOSRAM.");
		cvc_send_to_iosram(&total_mp);
		if (total_mp != NULL) {
			(void) putbq(q, total_mp);
		}
	}

	/*
	 * If there is still data queued at this point, make sure the queue
	 * gets scheduled again after an appropriate delay (which has been
	 * somewhat arbitrarily selected as half of the SC's input polling
	 * frequency).
	 */
	enableok(q);
	if (q->q_first != NULL) {
		if (cvc_timeout_id == (timeout_id_t)-1) {
			cvc_timeout_id = timeout(cvc_flush_queue,
			    NULL, drv_usectohz(CVC_IOSRAM_POLL_USECS / 2));
		}
	}
	rw_exit(&cvclock);
	return (0);
}
示例#10
0
/*
 * cvc_send_to_iosram()
 *	Flush as much data as possible to the CONO chunk.  If successful, free
 *	any mblks that were completely transmitted, update the b_rptr field in
 *	the first remaining mblk if it was partially transmitted, and update the
 *	caller's pointer to the new head of the mblk chain.  Since the software
 *	that will be pulling this data out of IOSRAM (dxs on the SC) is just
 *	polling at some frequency, we avoid attempts to flush data to IOSRAM any
 *	faster than a large divisor of that polling frequency.
 *
 *	Note that "cvc_buf_t out" is only declared "static" to keep it from
 *	being allocated on the stack.  Allocating 1K+ structures on the stack
 *	seems rather antisocial.
 */
static void
cvc_send_to_iosram(mblk_t **chainpp)
{
	int			rval;
	uint8_t			dvalid;
	uchar_t			*cp;
	mblk_t			*mp;
	mblk_t			*last_empty_mp;
	static clock_t		last_flush = (clock_t)-1;
	static cvc_buf_t	out;   /* see note above about static */

	ASSERT(chainpp != NULL);

	/*
	 * We _do_ have something to do, right?
	 */
	if (*chainpp == NULL) {
		return;
	}

	/*
	 * We can actually increase throughput by throttling back on attempts to
	 * flush data to IOSRAM, since trying to write every little bit of data
	 * as it shows up will actually generate more delays waiting for the SC
	 * to pick up each of those bits.  Instead, we'll avoid attempting to
	 * write data to IOSRAM any faster than half of the polling frequency we
	 * expect the SC to be using.
	 */
	if (ddi_get_lbolt() - last_flush <
	    drv_usectohz(CVC_IOSRAM_POLL_USECS / 2)) {
		return;
	}

	/*
	 * If IOSRAM is inaccessible or the CONO chunk still holds data that
	 * hasn't been picked up by the SC, there's nothing we can do right now.
	 */
	rval = iosram_get_flag(IOSRAM_KEY_CONO, &dvalid, NULL);
	if ((rval != 0) || (dvalid == IOSRAM_DATA_VALID)) {
		if ((rval != 0) && (rval != EAGAIN)) {
			cmn_err(CE_WARN, "cvc_send_to_iosram: get_flag ret %d",
			    rval);
		}
		return;
	}

	/*
	 * Copy up to MAX_XFER_COUTPUT chars from the mblk chain into a buffer.
	 * Don't change any of the mblks just yet, since we can't be certain
	 * that we'll be successful in writing data to the CONO chunk.
	 */
	out.count = 0;
	mp = *chainpp;
	cp = mp->b_rptr;
	last_empty_mp = NULL;
	while ((mp != NULL) && (out.count < MAX_XFER_COUTPUT)) {
		/*
		 * Process as many of the characters in the current mblk as
		 * possible.
		 */
		while ((cp != mp->b_wptr) && (out.count < MAX_XFER_COUTPUT)) {
			out.buffer[out.count++] = *cp++;
		}

		/*
		 * Did we process that entire mblk?  If so, move on to the next
		 * one.  If not, we're done filling the buffer even if there's
		 * space left, because apparently there wasn't room to process
		 * the next character.
		 */
		if (cp != mp->b_wptr) {
			break;
		}

		/*
		 * When this loop terminates, last_empty_mp will point to the
		 * last mblk that was completely processed, mp will point to the
		 * following mblk (or NULL if no more mblks exist), and cp will
		 * point to the first untransmitted character in the mblk
		 * pointed to by mp.  We'll need this data to update the mblk
		 * chain if all of the data is successfully transmitted.
		 */
		last_empty_mp = mp;
		mp = mp->b_cont;
		cp = (mp != NULL) ? mp->b_rptr : NULL;
	}

	/*
	 * If we succeeded in preparing some data, try to transmit it through
	 * IOSRAM.  First write the count and the data, which can be done in a
	 * single operation thanks to the buffer structure we use, then set the
	 * data_valid flag if the first step succeeded.
	 */
	if (out.count != 0) {
		rval = iosram_wr(IOSRAM_KEY_CONO, COUNT_OFFSET,
		    CONSBUF_COUNT_SIZE + out.count, (caddr_t)&out);
		if ((rval != 0) && (rval != EAGAIN)) {
			cmn_err(CE_WARN, "cvc_putc: write ret %d", rval);
		}

		/* if the data write succeeded, set the data_valid flag */
		if (rval == 0) {
			rval = iosram_set_flag(IOSRAM_KEY_CONO,
			    IOSRAM_DATA_VALID, IOSRAM_INT_NONE);
			if ((rval != 0) && (rval != EAGAIN)) {
				cmn_err(CE_WARN,
				    "cvc_putc: set flags for outbuf ret %d",
				    rval);
			}
		}

		/*
		 * If we successfully transmitted any data, modify the caller's
		 * mblk chain to remove the data that was transmitted, freeing
		 * all mblks that were completely processed.
		 */
		if (rval == 0) {
			last_flush = ddi_get_lbolt();

			/*
			 * If any data is left over, update the b_rptr field of
			 * the first remaining mblk in case some of its data was
			 * processed.
			 */
			if (mp != NULL) {
				mp->b_rptr = cp;
			}

			/*
			 * If any mblks have been emptied, unlink them from the
			 * residual chain, free them, and update the caller's
			 * mblk pointer.
			 */
			if (last_empty_mp != NULL) {
				last_empty_mp->b_cont = NULL;
				freemsg(*chainpp);
				*chainpp = mp;
			}
		}
	}
}
示例#11
0
/*
 * cvc_getstr()
 *	Poll IOSRAM for console input while available.
 */
static void
cvc_getstr(char *cp)
{
	short		count;
	uint8_t		command = 0;
	int		rval = ESUCCESS;
	uint8_t		dvalid = IOSRAM_DATA_INVALID;
	uint8_t		intrpending = 0;

	mutex_enter(&cvc_iosram_input_mutex);
	while (dvalid == IOSRAM_DATA_INVALID) {
		/*
		 * Check the CONC data_valid flag to see if a control message is
		 * available.
		 */
		rval = iosram_get_flag(IOSRAM_KEY_CONC, &dvalid, &intrpending);
		if ((rval != 0) && (rval != EAGAIN)) {
			cmn_err(CE_WARN,
			    "cvc_getstr: get flag for cntl ret %d", rval);
		}

		/*
		 * If a control message is available, try to read and process
		 * it.
		 */
		if ((dvalid == IOSRAM_DATA_VALID) && (rval == 0)) {
			/* read the control reg offset */
			rval = iosram_rd(IOSRAM_KEY_CONC,
			    CVC_CTL_OFFSET(command), CVC_CTL_SIZE(command),
			    (caddr_t)&command);
			if ((rval != 0) && (rval != EAGAIN)) {
				cmn_err(CE_WARN,
				    "cvc_getstr: read for command ret %d",
				    rval);
			}

			/* process the cntl msg and clear the data_valid flag */
			if (rval == 0) {
				cvc_iosram_ops(command);
			}
		}

		/*
		 * Check the CONI data_valid flag to see if console input data
		 * is available.
		 */
		rval = iosram_get_flag(IOSRAM_KEY_CONI, &dvalid, &intrpending);
		if ((rval != 0) && (rval != EAGAIN)) {
			cmn_err(CE_WARN,
			    "cvc_getstr: get flag for inbuf ret %d",
			    rval);
		}
		if ((rval != 0) || (dvalid != IOSRAM_DATA_VALID)) {
			goto retry;
		}

		/*
		 * Try to read the count.
		 */
		rval = iosram_rd(IOSRAM_KEY_CONI, COUNT_OFFSET,
		    CONSBUF_COUNT_SIZE, (caddr_t)&count);
		if (rval != 0) {
			if (rval != EAGAIN) {
				cmn_err(CE_WARN,
				    "cvc_getstr: read for count ret %d", rval);
			}
			goto retry;
		}

		/*
		 * If there is data to be read, try to read it.
		 */
		if (count != 0) {
			rval = iosram_rd(IOSRAM_KEY_CONI, DATA_OFFSET, count,
			    (caddr_t)cp);
			if (rval != 0) {
				if (rval != EAGAIN) {
					cmn_err(CE_WARN,
					    "cvc_getstr: read for count ret %d",
					    rval);
				}
				goto retry;
			}
			cp[count] = '\0';
		}

		/*
		 * Try to clear the data_valid flag to indicate that whatever
		 * was in CONI was read successfully.  If successful, and some
		 * data was read, break out of the loop to return to the caller.
		 */
		rval = iosram_set_flag(IOSRAM_KEY_CONI, IOSRAM_DATA_INVALID,
		    IOSRAM_INT_NONE);
		if (rval != 0) {
			if (rval != EAGAIN) {
				cmn_err(CE_WARN,
				    "cvc_getstr: set flag for inbuf ret %d",
				    rval);
			}
		} else if (count != 0) {
			CVC_DBG1(CVC_DBG_IOSRAM_RD, "Read 0x%x", count);
			break;
		}

		/*
		 * Use a smaller delay between checks of IOSRAM for input
		 * when cvcd/cvcredir are not running or "via_iosram" has
		 * been set.
		 * We don't go away completely when i/o is going through the
		 * network via cvcd since a command may be sent via IOSRAM
		 * to switch if the network is down or hung.
		 */
retry:
		if ((cvcoutput_q == NULL) || (via_iosram))
			delay(drv_usectohz(CVC_IOSRAM_POLL_USECS));
		else
			delay(drv_usectohz(CVC_IOSRAM_POLL_USECS * 10));

	}

	mutex_exit(&cvc_iosram_input_mutex);
}
示例#12
0
static void
sess_sm_new_state(iscsit_sess_t *ist, sess_event_ctx_t *ctx,
    iscsit_session_state_t new_state)
{
	int t2r_secs;

	/*
	 * Validate new state
	 */
	ASSERT(new_state != SS_UNDEFINED);
	ASSERT3U(new_state, <, SS_MAX_STATE);

	new_state = (new_state < SS_MAX_STATE) ?
	    new_state : SS_UNDEFINED;

	IDM_SM_LOG(CE_NOTE, "sess_sm_new_state: sess %p, evt %s(%d), "
	    "%s(%d) --> %s(%d)\n", (void *) ist,
	    iscsit_se_name[ctx->se_ctx_event], ctx->se_ctx_event,
	    iscsit_ss_name[ist->ist_state], ist->ist_state,
	    iscsit_ss_name[new_state], new_state);

	DTRACE_PROBE3(sess__state__change,
	    iscsit_sess_t *, ist, sess_event_ctx_t *, ctx,
	    iscsit_session_state_t, new_state);

	mutex_enter(&ist->ist_mutex);
	idm_sm_audit_state_change(&ist->ist_state_audit, SAS_ISCSIT_SESS,
	    (int)ist->ist_state, (int)new_state);
	ist->ist_last_state = ist->ist_state;
	ist->ist_state = new_state;
	mutex_exit(&ist->ist_mutex);

	switch (ist->ist_state) {
	case SS_Q1_FREE:
		break;
	case SS_Q2_ACTIVE:
		iscsit_tgt_bind_sess(ist->ist_tgt, ist);
		break;
	case SS_Q3_LOGGED_IN:
		break;
	case SS_Q4_FAILED:
		t2r_secs =
		    ist->ist_failed_conn->ict_op.op_default_time_2_retain;
		ist->ist_state_timeout = timeout(sess_sm_timeout, ist,
		    drv_usectohz(t2r_secs*1000000));
		break;
	case SS_Q5_CONTINUE:
		break;
	case SS_Q6_DONE:
	case SS_Q7_ERROR:
		/*
		 * We won't need our TSIH anymore and it represents an
		 * implicit reference to the global TSIH pool.  Get rid
		 * of it.
		 */
		if (ist->ist_tsih != ISCSI_UNSPEC_TSIH) {
			iscsit_tsih_free(ist->ist_tsih);
		}

		/*
		 * We don't want this session to show up anymore so unbind
		 * it now.  After this call this session cannot have any
		 * references outside itself (implicit or explicit).
		 */
		iscsit_tgt_unbind_sess(ist->ist_tgt, ist);

		/*
		 * If we have more connections bound then more events
		 * are comming so don't wait for idle yet.
		 */
		if (ist->ist_conn_count == 0) {
			idm_refcnt_async_wait_ref(&ist->ist_refcnt,
			    &iscsit_sess_unref);
		}
		break;
	default:
		ASSERT(0);
		/*NOTREACHED*/
	}
}
示例#13
0
/*
 * audio1575_chip_init()
 *
 * Description:
 *	This routine initializes the M1575 AC97 audio controller and the AC97
 *	codec.	The AC97 codec registers are programmed from codec_shadow[].
 *	If we are not doing a restore, we initialize codec_shadow[], otherwise
 *	we use the current values of shadow.	This routine expects that the
 *	PCI IO and Memory spaces have been mapped and enabled already.
 * Arguments:
 *	audio1575_state_t	*state		The device's state structure
 *						restore	from codec_shadow[]
 * Returns:
 *	DDI_SUCCESS	The hardware was initialized properly
 *	DDI_FAILURE	The hardware couldn't be initialized properly
 */
static int
audio1575_chip_init(audio1575_state_t *statep)
{
	uint32_t		ssr;
	uint32_t		rtsr;
	uint32_t		intrsr;
	int 			i;
	int			j;
#ifdef	__sparc
	uint8_t			clk_detect;
	ddi_acc_handle_t	pcih;
#endif
	clock_t			ticks;

	/*
	 * clear the interrupt control and status register
	 * READ/WRITE/READ workaround required
	 * for buggy hardware
	 */

	PUT32(M1575_INTRCR_REG, 0);
	(void) GET32(M1575_INTRCR_REG);

	intrsr = GET32(M1575_INTRSR_REG);
	PUT32(M1575_INTRSR_REG, (intrsr & M1575_INTR_MASK));
	(void) GET32(M1575_INTRSR_REG);

	ticks = drv_usectohz(M1575_LOOP_CTR);

	/*
	 * SADA only supports stereo, so we set the channel bits
	 * to "00" to select 2 channels.
	 * will also set the following:
	 *
	 * Disable double rate enable
	 * no SPDIF output selected
	 * 16 bit audio record mode
	 * 16 bit pcm out mode
	 * PCM Out 6 chan mode FL FR CEN BL BR LFE
	 * PCM Out 2 channel mode (00)
	 */
	for (i = 0; i < M1575_LOOP_CTR; i++) {
		/* Reset the AC97 Codec	and default to 2 channel 16 bit mode */
		PUT32(M1575_SCR_REG, M1575_SCR_COLDRST);
		delay(ticks<<1);

		/* Read the System Status Reg */
		ssr = GET32(M1575_SSR_REG);

		/* make sure and release the blocked reset bit */
		if (ssr & M1575_SSR_RSTBLK) {
			SET32(M1575_INTFCR_REG, M1575_INTFCR_RSTREL);
			delay(ticks);

			/* Read the System Status Reg */
			ssr = GET32(M1575_SSR_REG);

			/* make sure and release the blocked reset bit */
			if (ssr & M1575_SSR_RSTBLK) {
				return (DDI_FAILURE);
			}

			/* Reset the controller */
			PUT32(M1575_SCR_REG, M1575_SCR_COLDRST);
			delay(ticks);
		}

		/* according AC'97 spec, wait for codec reset */
		for (j = 0; j < M1575_LOOP_CTR; j++) {
			if ((GET32(M1575_SCR_REG) & M1575_SCR_COLDRST) == 0) {
				break;
			}
			delay(ticks);
		}

		/* codec reset failed */
		if (j >= M1575_LOOP_CTR) {
			audio_dev_warn(statep->adev,
			    "failure to reset codec");
			return (DDI_FAILURE);
		}

		/*
		 * Wait for FACRDY First codec ready. The hardware can
		 * provide the state of
		 * codec ready bit on SDATA_IN[0] and as reflected in
		 * the Recv Tag Slot Reg.
		 */
		rtsr = GET32(M1575_RTSR_REG);
		if (rtsr & M1575_RTSR_FACRDY) {
			break;
		} else { /* reset the status and wait for new status to set */
			rtsr |= M1575_RTSR_FACRDY;
			PUT32(M1575_RTSR_REG, rtsr);
			drv_usecwait(10);
		}
	}

	/* if we could not reset the AC97 codec then report failure */
	if (i >= M1575_LOOP_CTR) {
		audio_dev_warn(statep->adev,
		    "no codec ready signal received");
		return (DDI_FAILURE);
	}

#ifdef	__sparc
	/* Magic code from ULi to Turn on the AC_LINK clock */
	pcih = statep->pcih;
	pci_config_put8(pcih, M1575_PCIACD_REG, 0);
	pci_config_put8(pcih, M1575_PCIACD_REG, 4);
	pci_config_put8(pcih, M1575_PCIACD_REG, 0);
	(void) pci_config_get8(pcih, M1575_PCIACD_REG);
	pci_config_put8(pcih, M1575_PCIACD_REG, 2);
	pci_config_put8(pcih, M1575_PCIACD_REG, 0);
	clk_detect = pci_config_get8(pcih, M1575_PCIACD_REG);

	if (clk_detect != 1) {
		audio_dev_warn(statep->adev, "No AC97 Clock Detected");
		return (DDI_FAILURE);
	}
#endif

	/* Magic code from Uli to Init FIFO1 and FIFO2 */
	PUT32(M1575_FIFOCR1_REG, 0x81818181);
	PUT32(M1575_FIFOCR2_REG, 0x81818181);
	PUT32(M1575_FIFOCR3_REG, 0x81818181);

	/* Make sure that PCM in and PCM out are enabled */
	SET32(M1575_INTFCR_REG, (M1575_INTFCR_PCMIENB | M1575_INTFCR_PCMOENB));

	audio1575_dma_stop(statep, B_FALSE);

	return (DDI_SUCCESS);
}
void
emlxs_taskq_destroy(emlxs_taskq_t *taskq)
{
	emlxs_taskq_thread_t *tthread;
	uint32_t i;

	/* If taskq already closed, then quit */
	if (!taskq->open) {
		return;
	}

	mutex_enter(&taskq->get_lock);

	/* If taskq already closed, then quit */
	if (!taskq->open) {
		mutex_exit(&taskq->get_lock);
		return;
	}

	taskq->open = 0;
	mutex_exit(&taskq->get_lock);


	/* No more threads can be dispatched now */

	/* Kill the threads */
	for (i = 0; i < EMLXS_MAX_TASKQ_THREADS; i++) {
		tthread = &taskq->thread_list[i];

		/*
		 * If the thread lock can be acquired,
		 * it is in one of these states:
		 * 1. Thread not started.
		 * 2. Thread asleep.
		 * 3. Thread busy.
		 * 4. Thread ended.
		 */
		mutex_enter(&tthread->lock);
		tthread->flags |= EMLXS_THREAD_KILLED;
		cv_signal(&tthread->cv_flag);

		/* Wait for thread to die */
		while (!(tthread->flags & EMLXS_THREAD_ENDED)) {
			mutex_exit(&tthread->lock);
			delay(drv_usectohz(10000));
			mutex_enter(&tthread->lock);
		}
		mutex_exit(&tthread->lock);

		/* Clean up thread */
		mutex_destroy(&tthread->lock);
		cv_destroy(&tthread->cv_flag);
	}

	/* Clean up taskq */
	mutex_destroy(&taskq->put_lock);
	mutex_destroy(&taskq->get_lock);

	return;

} /* emlxs_taskq_destroy() */
示例#15
0
int
ghd_waitq_process_and_mutex_hold(ccc_t *cccp)
{
	gcmd_t	*gcmdp;
	int	 rc = FALSE;

	ASSERT(mutex_owned(&cccp->ccc_hba_mutex));
	ASSERT(mutex_owned(&cccp->ccc_waitq_mutex));

	for (;;) {
		if (L2_EMPTY(&GHBA_QHEAD(cccp))) {
			/* return if the list is empty */
			GDBG_WAITQ(("ghd_waitq_proc: MT cccp 0x%p qp 0x%p\n",
			    (void *)cccp, (void *)&cccp->ccc_waitq));
			break;
		}
		if (GHBA_NACTIVE(cccp) >= GHBA_MAXACTIVE(cccp)) {
			/* return if the HBA is too active */
			GDBG_WAITQ(("ghd_waitq_proc: N>M cccp 0x%p qp 0x%p"
			    " N %ld max %ld\n", (void *)cccp,
			    (void *)&cccp->ccc_waitq,
			    GHBA_NACTIVE(cccp),
			    GHBA_MAXACTIVE(cccp)));
			break;
		}

		/*
		 * bail out if the wait queue has been
		 * "held" by the HBA driver
		 */
		if (cccp->ccc_waitq_held) {
			GDBG_WAITQ(("ghd_waitq_proc: held"));
			return (rc);
		}

		if (cccp->ccc_waitq_frozen) {

			clock_t lbolt, delay_in_hz, time_to_wait;

			delay_in_hz =
			    drv_usectohz(cccp->ccc_waitq_freezedelay * 1000);

			lbolt = ddi_get_lbolt();
			time_to_wait = delay_in_hz -
			    (lbolt - cccp->ccc_waitq_freezetime);

			if (time_to_wait > 0) {
				/*
				 * stay frozen; we'll be called again
				 * by ghd_timeout_softintr()
				 */
				GDBG_WAITQ(("ghd_waitq_proc: frozen"));
				return (rc);
			} else {
				/* unfreeze and continue */
				GDBG_WAITQ(("ghd_waitq_proc: unfreezing"));
				cccp->ccc_waitq_freezetime = 0;
				cccp->ccc_waitq_freezedelay = 0;
				cccp->ccc_waitq_frozen = 0;
			}
		}

		gcmdp = (gcmd_t *)L2_remove_head(&GHBA_QHEAD(cccp));
		GHBA_NACTIVE(cccp)++;
		gcmdp->cmd_waitq_level++;
		mutex_exit(&cccp->ccc_waitq_mutex);

		/*
		 * Start up the next I/O request
		 */
		ASSERT(gcmdp != NULL);
		gcmdp->cmd_state = GCMD_STATE_ACTIVE;
		if (!(*cccp->ccc_hba_start)(cccp->ccc_hba_handle, gcmdp)) {
			/* if the HBA rejected the request, requeue it */
			gcmdp->cmd_state = GCMD_STATE_WAITQ;
			mutex_enter(&cccp->ccc_waitq_mutex);
			GHBA_NACTIVE(cccp)--;
			gcmdp->cmd_waitq_level--;
			L2_add_head(&GHBA_QHEAD(cccp), &gcmdp->cmd_q, gcmdp);
			GDBG_WAITQ(("ghd_waitq_proc: busy cccp 0x%p gcmdp 0x%p"
			    " handle 0x%p\n", (void *)cccp, (void *)gcmdp,
			    cccp->ccc_hba_handle));
			break;
		}
		rc = TRUE;
		mutex_enter(&cccp->ccc_waitq_mutex);
		GDBG_WAITQ(("ghd_waitq_proc: ++ cccp 0x%p gcmdp 0x%p N %ld\n",
		    (void *)cccp, (void *)gcmdp, GHBA_NACTIVE(cccp)));
	}
	ASSERT(mutex_owned(&cccp->ccc_hba_mutex));
	ASSERT(mutex_owned(&cccp->ccc_waitq_mutex));
	return (rc);
}