Пример #1
0
/*
 * Clean up scheduler activations state associated with an exiting
 * (or execing) lwp.  t is always the current thread.
 */
void
schedctl_lwp_cleanup(kthread_t *t)
{
	sc_shared_t	*ssp = t->t_schedctl;
	proc_t		*p = ttoproc(t);
	sc_page_ctl_t	*pagep;
	index_t		index;

	ASSERT(MUTEX_NOT_HELD(&p->p_lock));

	thread_lock(t);		/* protect against ts_tick and ts_update */
	t->t_schedctl = NULL;
	t->t_sc_uaddr = 0;
	thread_unlock(t);

	/*
	 * Remove the context op to avoid the final call to
	 * schedctl_save when switching away from this lwp.
	 */
	(void) removectx(t, ssp, schedctl_save, schedctl_restore,
	    schedctl_fork, NULL, NULL, NULL);

	/*
	 * Do not unmap the shared page until the process exits.
	 * User-level library code relies on this for adaptive mutex locking.
	 */
	mutex_enter(&p->p_sc_lock);
	ssp->sc_state = SC_FREE;
	pagep = schedctl_page_lookup(ssp);
	index = (index_t)(ssp - pagep->spc_base);
	BT_CLEAR(pagep->spc_map, index);
	pagep->spc_space += sizeof (sc_shared_t);
	mutex_exit(&p->p_sc_lock);
}
Пример #2
0
void
kstat_delete(kstat_t *ksp)
{
    ekstat_t *e = (ekstat_t *)ksp;
	kmutex_t *lock = ksp->ks_lock;
	int lock_needs_release = 0;

    // destroy the sysctl
    if (ksp->ks_type == KSTAT_TYPE_NAMED) {

		if (lock && MUTEX_NOT_HELD(lock)) {
			mutex_enter(lock);
			lock_needs_release = 1;
		}

		remove_child_sysctls(e);

		if (lock_needs_release) {
			mutex_exit(lock);
		}
    }


	sysctl_unregister_oid(&e->e_oid);

	if (e->e_vals) {
		kfree(e->e_vals, sizeof(sysctl_leaf_t) * e->e_num_vals);
	}
    cv_destroy(&e->e_cv);
	kfree(e, e->e_size);
}
Пример #3
0
/*
 * Destroy an attribute hash table, called by mmd_rempdesc or during free.
 */
static void
mmd_destroy_pattbl(patbkt_t **tbl)
{
	patbkt_t *bkt;
	pattr_t *pa, *pa_next;
	uint_t i, tbl_sz;

	ASSERT(tbl != NULL);
	bkt = *tbl;
	tbl_sz = bkt->pbkt_tbl_sz;

	/* make sure caller passes in the first bucket */
	ASSERT(tbl_sz > 0);

	/* destroy the contents of each bucket */
	for (i = 0; i < tbl_sz; i++, bkt++) {
		/* we ought to be exclusive at this point */
		ASSERT(MUTEX_NOT_HELD(&(bkt->pbkt_lock)));

		pa = Q2PATTR(bkt->pbkt_pattr_q.ql_next);
		while (pa != Q2PATTR(&(bkt->pbkt_pattr_q))) {
			ASSERT(pa->pat_magic == PATTR_MAGIC);
			pa_next = Q2PATTR(pa->pat_next);
			remque(&(pa->pat_next));
			kmem_free(pa, pa->pat_buflen);
			pa = pa_next;
		}
	}

	kmem_cache_free(pattbl_cache, *tbl);
	*tbl = NULL;

	/* commit all previous stores */
	membar_producer();
}
Пример #4
0
/*
 * Function called by an lwp after it resumes from stop().
 */
void
setallwatch(void)
{
	proc_t *p = curproc;
	struct as *as = curproc->p_as;
	struct watched_page *pwp, *next;
	struct seg *seg;
	caddr_t vaddr;
	uint_t prot;
	int err, retrycnt;

	if (p->p_wprot == NULL)
		return;

	ASSERT(MUTEX_NOT_HELD(&curproc->p_lock));

	AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);

	pwp = p->p_wprot;
	while (pwp != NULL) {

		vaddr = pwp->wp_vaddr;
		retrycnt = 0;
	retry:
		ASSERT(pwp->wp_flags & WP_SETPROT);
		if ((seg = as_segat(as, vaddr)) != NULL &&
		    !(pwp->wp_flags & WP_NOWATCH)) {
			prot = pwp->wp_prot;
			err = SEGOP_SETPROT(seg, vaddr, PAGESIZE, prot);
			if (err == IE_RETRY) {
				ASSERT(retrycnt == 0);
				retrycnt++;
				goto retry;
			}
		}

		next = pwp->wp_list;

		if (pwp->wp_read + pwp->wp_write + pwp->wp_exec == 0) {
			/*
			 * No watched areas remain in this page.
			 * Free the watched_page structure.
			 */
			avl_remove(&as->a_wpage, pwp);
			kmem_free(pwp, sizeof (struct watched_page));
		} else {
			pwp->wp_flags &= ~WP_SETPROT;
		}

		pwp = next;
	}
	p->p_wprot = NULL;

	AS_LOCK_EXIT(as, &as->a_lock);
}
Пример #5
0
/*
 * This interface replaces hasprocperm; it works like hasprocperm but
 * additionally returns success if the proc_t's match
 * It is the preferred interface for most uses.
 * And it will acquire p_crlock itself, so it assert's that it shouldn't
 * be held.
 */
int
prochasprocperm(proc_t *tp, proc_t *sp, const cred_t *scrp)
{
	int rets;
	cred_t *tcrp;

	ASSERT(MUTEX_NOT_HELD(&tp->p_crlock));

	if (tp == sp)
		return (1);

	if (tp->p_sessp != sp->p_sessp && secpolicy_basic_proc(scrp) != 0)
		return (0);

	mutex_enter(&tp->p_crlock);
	crhold(tcrp = tp->p_cred);
	mutex_exit(&tp->p_crlock);
	rets = hasprocperm(tcrp, scrp);
	crfree(tcrp);

	return (rets);
}
Пример #6
0
/*
 * ibmf_i_issue_pkt():
 *	Post an IB packet on the specified QP's send queue
 */
int
ibmf_i_issue_pkt(ibmf_client_t *clientp, ibmf_msg_impl_t *msgimplp,
    ibmf_qp_handle_t ibmf_qp_handle, ibmf_send_wqe_t *send_wqep)
{
	int			ret;
	ibt_status_t		status;
	ibt_wr_ds_t		sgl[1];
	ibt_qp_hdl_t		ibt_qp_handle;

	_NOTE(ASSUMING_PROTECTED(*send_wqep))
	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*send_wqep))

	IBMF_TRACE_4(IBMF_TNF_DEBUG, DPRINT_L4,
	    ibmf_i_issue_pkt_start, IBMF_TNF_TRACE, "",
	    "ibmf_i_issue_pkt() enter, clientp = %p, msg = %p, "
	    "qp_hdl = %p,  swqep = %p\n", tnf_opaque, clientp, clientp,
	    tnf_opaque, msg, msgimplp, tnf_opaque, ibmf_qp_handle,
	    ibmf_qp_handle, tnf_opaque, send_wqep, send_wqep);

	ASSERT(MUTEX_HELD(&msgimplp->im_mutex));
	ASSERT(MUTEX_NOT_HELD(&clientp->ic_mutex));

	/*
	 * if the qp handle provided in ibmf_send_pkt()
	 * is not the default qp handle for this client,
	 * then the wqe must be sent on this qp,
	 * else use the default qp handle set up during ibmf_register()
	 */
	if (ibmf_qp_handle == IBMF_QP_HANDLE_DEFAULT) {
		ibt_qp_handle = clientp->ic_qp->iq_qp_handle;
	} else {
		ibt_qp_handle =
		    ((ibmf_alt_qp_t *)ibmf_qp_handle)->isq_qp_handle;
	}

	/* initialize the send WQE */
	ibmf_i_init_send_wqe(clientp, msgimplp, sgl, send_wqep,
	    msgimplp->im_ud_dest, ibt_qp_handle, ibmf_qp_handle);

	_NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*send_wqep))

	/*
	 * Issue the wqe to the transport.
	 * NOTE: ibt_post_send() will not block, so, it is ok
	 * to hold the msgimpl mutex across this call.
	 */
	status = ibt_post_send(send_wqep->send_qp_handle, &send_wqep->send_wr,
	    1, NULL);
	if (status != IBT_SUCCESS) {
		mutex_enter(&clientp->ic_kstat_mutex);
		IBMF_ADD32_KSTATS(clientp, send_pkt_failed, 1);
		mutex_exit(&clientp->ic_kstat_mutex);
		IBMF_TRACE_2(IBMF_TNF_NODEBUG, DPRINT_L1,
		    ibmf_i_issue_pkt_err, IBMF_TNF_TRACE, "",
		    "ibmf_i_issue_pkt(): %s, status = %d\n",
		    tnf_string, msg, "post send failure",
		    tnf_uint, ibt_status, status);
		IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_issue_pkt_end,
		    IBMF_TNF_TRACE, "", "ibmf_i_issue_pkt(() exit\n");
		return (IBMF_TRANSPORT_FAILURE);
	}

	ret = IBMF_SUCCESS;

	/* bump the number of active sends */
	if (ibmf_qp_handle == IBMF_QP_HANDLE_DEFAULT) {
		mutex_enter(&clientp->ic_mutex);
		clientp->ic_sends_active++;
		mutex_exit(&clientp->ic_mutex);
		mutex_enter(&clientp->ic_kstat_mutex);
		IBMF_ADD32_KSTATS(clientp, sends_active, 1);
		mutex_exit(&clientp->ic_kstat_mutex);
	} else {
		ibmf_alt_qp_t *qpp = (ibmf_alt_qp_t *)ibmf_qp_handle;
		mutex_enter(&qpp->isq_mutex);
		qpp->isq_sends_active++;
		mutex_exit(&qpp->isq_mutex);
		mutex_enter(&clientp->ic_kstat_mutex);
		IBMF_ADD32_KSTATS(clientp, sends_active, 1);
		mutex_exit(&clientp->ic_kstat_mutex);
	}

	IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_issue_pkt_end,
	    IBMF_TNF_TRACE, "", "ibmf_i_issue_pkt() exit\n");
	return (ret);
}
Пример #7
0
/*
 * ibmf_i_client_add_msg():
 *	Add the message to the client message list
 */
void
ibmf_i_client_add_msg(ibmf_client_t *clientp, ibmf_msg_impl_t *msgimplp)
{
    IBMF_TRACE_2(IBMF_TNF_DEBUG, DPRINT_L4,
                 ibmf_i_client_add_msg_start, IBMF_TNF_TRACE, "",
                 "ibmf_i_client_add_msg(): clientp = 0x%p, msgp = 0x%p\n",
                 tnf_opaque, clientp, clientp, tnf_opaque, msg, msgimplp);

    ASSERT(MUTEX_NOT_HELD(&msgimplp->im_mutex));

    mutex_enter(&clientp->ic_msg_mutex);

    /*
     * If this is a termination message, add the message to
     * the termination message list else add the message
     * to the regular message list.
     */
    mutex_enter(&msgimplp->im_mutex);
    if (msgimplp->im_flags & IBMF_MSG_FLAGS_TERMINATION) {

        mutex_exit(&msgimplp->im_mutex);
        /* Put the message on the list */
        if (clientp->ic_term_msg_list == NULL) {
            clientp->ic_term_msg_list = clientp->ic_term_msg_last =
                                            msgimplp;
        } else {
            msgimplp->im_msg_prev = clientp->ic_term_msg_last;
            clientp->ic_term_msg_last->im_msg_next = msgimplp;
            clientp->ic_term_msg_last = msgimplp;
        }
    } else {

        mutex_exit(&msgimplp->im_mutex);
        /*
         * Increment the counter and kstats for active messages
         */
        clientp->ic_msgs_active++;
        mutex_enter(&clientp->ic_kstat_mutex);
        IBMF_ADD32_KSTATS(clientp, msgs_active, 1);
        mutex_exit(&clientp->ic_kstat_mutex);

        /* Put the message on the list */
        if (clientp->ic_msg_list == NULL) {
            clientp->ic_msg_list = clientp->ic_msg_last = msgimplp;
        } else {
            msgimplp->im_msg_prev = clientp->ic_msg_last;
            clientp->ic_msg_last->im_msg_next = msgimplp;
            clientp->ic_msg_last = msgimplp;
        }
    }

    msgimplp->im_msg_next = NULL;

    /* Set the message flags to indicate the message is on the list */
    mutex_enter(&msgimplp->im_mutex);
    msgimplp->im_flags |= IBMF_MSG_FLAGS_ON_LIST;
    mutex_exit(&msgimplp->im_mutex);

    mutex_exit(&clientp->ic_msg_mutex);

    IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
                 ibmf_i_client_add_msg_end, IBMF_TNF_TRACE, "",
                 "ibmf_i_client_add_msg() exit\n");
}
Пример #8
0
/*
 * ibmf_i_client_rem_msg():
 *	Remove the message from the client's message list
 *	The refcnt will hold the message reference count at the time
 *	the message was removed from the message list. Any packets
 *	arriving after this point for the message will be dropped.
 *	The message reference count is used by the threads processing
 *	the message to decide which one should notify the client
 *	(the one that decrements the reference count to zero).
 */
void
ibmf_i_client_rem_msg(ibmf_client_t *clientp, ibmf_msg_impl_t *msgimplp,
                      uint_t *refcnt)
{
    ibmf_msg_impl_t *tmpmsg, *prevmsg = NULL;

    ASSERT(MUTEX_NOT_HELD(&msgimplp->im_mutex));

    IBMF_TRACE_2(IBMF_TNF_DEBUG, DPRINT_L4,
                 ibmf_i_client_rem_msg_start, IBMF_TNF_TRACE, "",
                 "ibmf_i_client_rem_msg(): clientp = 0x%p, msgp = 0x%p\n",
                 tnf_opaque, clientp, clientp, tnf_opaque, msg, msgimplp);

    mutex_enter(&clientp->ic_msg_mutex);

    /*
     * If this is a termination message, remove the message from
     * the termination message list else remove the message
     * from the regular message list.
     */
    mutex_enter(&msgimplp->im_mutex);
    if (msgimplp->im_flags & IBMF_MSG_FLAGS_TERMINATION) {

        mutex_exit(&msgimplp->im_mutex);
        tmpmsg = clientp->ic_term_msg_list;

        while (tmpmsg != NULL) {
            if (tmpmsg == msgimplp)
                break;
            prevmsg = tmpmsg;
            tmpmsg = tmpmsg->im_msg_next;
        }

        ASSERT(tmpmsg != NULL);

        if (tmpmsg->im_msg_next == NULL)
            clientp->ic_term_msg_last = prevmsg;
        else
            tmpmsg->im_msg_next->im_msg_prev = prevmsg;

        if (prevmsg != NULL)
            prevmsg->im_msg_next = tmpmsg->im_msg_next;
        else
            clientp->ic_term_msg_list = tmpmsg->im_msg_next;
    } else {

        mutex_exit(&msgimplp->im_mutex);
        /*
         * Decrement the counter and kstats for active messages
         */
        ASSERT(clientp->ic_msgs_active != 0);
        clientp->ic_msgs_active--;
        mutex_enter(&clientp->ic_kstat_mutex);
        IBMF_SUB32_KSTATS(clientp, msgs_active, 1);
        mutex_exit(&clientp->ic_kstat_mutex);

        tmpmsg = clientp->ic_msg_list;

        while (tmpmsg != NULL) {
            if (tmpmsg == msgimplp)
                break;
            prevmsg = tmpmsg;
            tmpmsg = tmpmsg->im_msg_next;
        }

        ASSERT(tmpmsg != NULL);

        if (tmpmsg->im_msg_next == NULL)
            clientp->ic_msg_last = prevmsg;
        else
            tmpmsg->im_msg_next->im_msg_prev = prevmsg;

        if (prevmsg != NULL)
            prevmsg->im_msg_next = tmpmsg->im_msg_next;
        else
            clientp->ic_msg_list = tmpmsg->im_msg_next;
    }

    /* Save away the message reference count and clear the list flag */
    mutex_enter(&msgimplp->im_mutex);
    *refcnt = msgimplp->im_ref_count;
    msgimplp->im_flags &= ~IBMF_MSG_FLAGS_ON_LIST;
    mutex_exit(&msgimplp->im_mutex);

    mutex_exit(&clientp->ic_msg_mutex);

    IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
                 ibmf_i_client_rem_msg_end, IBMF_TNF_TRACE, "",
                 "ibmf_i_client_rem_msg() exit\n");
}
Пример #9
0
/*
 * wrapper for qi_putp entry in module ops vec.
 * implements asynchronous putnext().
 * Note, that unlike putnext(), this routine is NOT optimized for the
 * fastpath.  Calling this routine will grab whatever locks are necessary
 * to protect the stream head, q_next, and syncq's.
 * And since it is in the normal locks path, we do not use putlocks if
 * they exist (though this can be changed by swapping the value of
 * UseFastlocks).
 */
void
put(queue_t *qp, mblk_t *mp)
{
	queue_t		*fqp = qp; /* For strft tracing */
	syncq_t		*sq;
	uint16_t	flags;
	uint16_t	drain_mask;
	struct qinit	*qi;
	int		(*putproc)();
	int		ix;
	boolean_t	queued = B_FALSE;
	kmutex_t	*sqciplock = NULL;
	ushort_t	*sqcipcount = NULL;

	TRACE_2(TR_FAC_STREAMS_FR, TR_PUT_START,
		"put:(%X, %X)", qp, mp);
	ASSERT(mp->b_datap->db_ref != 0);
	ASSERT(mp->b_next == NULL && mp->b_prev == NULL);

	sq = qp->q_syncq;
	ASSERT(sq != NULL);
	qi = qp->q_qinfo;

	if (UseFastlocks && sq->sq_ciputctrl != NULL) {
		/* fastlock: */
		ASSERT(sq->sq_flags & SQ_CIPUT);
		ix = CPU->cpu_seqid & sq->sq_nciputctrl;
		sqciplock = &sq->sq_ciputctrl[ix].ciputctrl_lock;
		sqcipcount = &sq->sq_ciputctrl[ix].ciputctrl_count;
		mutex_enter(sqciplock);
		if (!((*sqcipcount) & SQ_FASTPUT) ||
		    (sq->sq_flags & (SQ_STAYAWAY|SQ_EXCL|SQ_EVENTS))) {
			mutex_exit(sqciplock);
			sqciplock = NULL;
			goto slowlock;
		}
		(*sqcipcount)++;
		ASSERT(*sqcipcount != 0);
		queued = qp->q_sqflags & Q_SQQUEUED;
		mutex_exit(sqciplock);
	} else {
	    slowlock:
		ASSERT(sqciplock == NULL);
		mutex_enter(SQLOCK(sq));
		flags = sq->sq_flags;
		/*
		 * We are going to drop SQLOCK, so make a claim to prevent syncq
		 * from closing.
		 */
		sq->sq_count++;
		ASSERT(sq->sq_count != 0);		/* Wraparound */
		/*
		 * If there are writers or exclusive waiters, there is not much
		 * we can do.  Place the message on the syncq and schedule a
		 * background thread to drain it.
		 *
		 * Also if we are approaching end of stack, fill the syncq and
		 * switch processing to a background thread - see comments on
		 * top.
		 */
		if ((flags & (SQ_STAYAWAY|SQ_EXCL|SQ_EVENTS)) ||
		    (sq->sq_needexcl != 0) || PUT_STACK_NOTENOUGH()) {

			TRACE_3(TR_FAC_STREAMS_FR, TR_PUTNEXT_END,
			    "putnext_end:(%p, %p, %p) SQ_EXCL fill",
			    qp, mp, sq);

			/*
			 * NOTE: qfill_syncq will need QLOCK. It is safe to drop
			 * SQLOCK because positive sq_count keeps the syncq from
			 * closing.
			 */
			mutex_exit(SQLOCK(sq));

			qfill_syncq(sq, qp, mp);
			/*
			 * NOTE: after the call to qfill_syncq() qp may be
			 * closed, both qp and sq should not be referenced at
			 * this point.
			 *
			 * This ASSERT is located here to prevent stack frame
			 * consumption in the DEBUG code.
			 */
			ASSERT(sqciplock == NULL);
			return;
		}

		queued = qp->q_sqflags & Q_SQQUEUED;
		/*
		 * If not a concurrent perimiter, we need to acquire
		 * it exclusively.  It could not have been previously
		 * set since we held the SQLOCK before testing
		 * SQ_GOAWAY above (which includes SQ_EXCL).
		 * We do this here because we hold the SQLOCK, and need
		 * to make this state change BEFORE dropping it.
		 */
		if (!(flags & SQ_CIPUT)) {
			ASSERT((sq->sq_flags & SQ_EXCL) == 0);
			ASSERT(!(sq->sq_type & SQ_CIPUT));
			sq->sq_flags |= SQ_EXCL;
		}
		mutex_exit(SQLOCK(sq));
	}

	ASSERT((sq->sq_flags & (SQ_EXCL|SQ_CIPUT)));
	ASSERT(MUTEX_NOT_HELD(SQLOCK(sq)));

	/*
	 * We now have a claim on the syncq, we are either going to
	 * put the message on the syncq and then drain it, or we are
	 * going to call the putproc().
	 */
	putproc = qi->qi_putp;
	if (!queued) {
		STR_FTEVENT_MSG(mp, fqp, FTEV_PUTNEXT, mp->b_rptr -
		    mp->b_datap->db_base);
		(*putproc)(qp, mp);
		ASSERT(MUTEX_NOT_HELD(SQLOCK(sq)));
		ASSERT(MUTEX_NOT_HELD(QLOCK(qp)));
	} else {
		mutex_enter(QLOCK(qp));
		/*
		 * If there are no messages in front of us, just call putproc(),
		 * otherwise enqueue the message and drain the queue.
		 */
		if (qp->q_syncqmsgs == 0) {
			mutex_exit(QLOCK(qp));
			STR_FTEVENT_MSG(mp, fqp, FTEV_PUTNEXT, mp->b_rptr -
			    mp->b_datap->db_base);
			(*putproc)(qp, mp);
			ASSERT(MUTEX_NOT_HELD(SQLOCK(sq)));
		} else {
			/*
			 * We are doing a fill with the intent to
			 * drain (meaning we are filling because
			 * there are messages in front of us ane we
			 * need to preserve message ordering)
			 * Therefore, put the message on the queue
			 * and call qdrain_syncq (must be done with
			 * the QLOCK held).
			 */
			STR_FTEVENT_MSG(mp, fqp, FTEV_PUTNEXT,
			    mp->b_rptr - mp->b_datap->db_base);

#ifdef DEBUG
			/*
			 * These two values were in the original code for
			 * all syncq messages.  This is unnecessary in
			 * the current implementation, but was retained
			 * in debug mode as it is usefull to know where
			 * problems occur.
			 */
			mp->b_queue = qp;
			mp->b_prev = (mblk_t *)putproc;
#endif
			SQPUT_MP(qp, mp);
			qdrain_syncq(sq, qp);
			ASSERT(MUTEX_NOT_HELD(QLOCK(qp)));
		}
	}
	/*
	 * Before we release our claim, we need to see if any
	 * events were posted. If the syncq is SQ_EXCL && SQ_QUEUED,
	 * we were responsible for going exclusive and, therefore,
	 * are resposible for draining.
	 */
	if (sq->sq_flags & (SQ_EXCL)) {
		drain_mask = 0;
	} else {
		drain_mask = SQ_QUEUED;
	}

	if (sqciplock != NULL) {
		mutex_enter(sqciplock);
		flags = sq->sq_flags;
		ASSERT(flags & SQ_CIPUT);
		/* SQ_EXCL could have been set by qwriter_inner */
		if ((flags & (SQ_EXCL|SQ_TAIL)) || sq->sq_needexcl) {
			/*
			 * we need SQLOCK to handle
			 * wakeups/drains/flags change.  sqciplock
			 * is needed to decrement sqcipcount.
			 * SQLOCK has to be grabbed before sqciplock
			 * for lock ordering purposes.
			 * after sqcipcount is decremented some lock
			 * still needs to be held to make sure
			 * syncq won't get freed on us.
			 *
			 * To prevent deadlocks we try to grab SQLOCK and if it
			 * is held already we drop sqciplock, acquire SQLOCK and
			 * reacqwire sqciplock again.
			 */
			if (mutex_tryenter(SQLOCK(sq)) == 0) {
				mutex_exit(sqciplock);
				mutex_enter(SQLOCK(sq));
				mutex_enter(sqciplock);
			}
			flags = sq->sq_flags;
			ASSERT(*sqcipcount != 0);
			(*sqcipcount)--;
			mutex_exit(sqciplock);
		} else {
			ASSERT(*sqcipcount != 0);
			(*sqcipcount)--;
			mutex_exit(sqciplock);
			TRACE_3(TR_FAC_STREAMS_FR, TR_PUTNEXT_END,
			"putnext_end:(%p, %p, %p) done", qp, mp, sq);
			return;
		}
	} else {
		mutex_enter(SQLOCK(sq));
		flags = sq->sq_flags;
		ASSERT(sq->sq_count != 0);
		sq->sq_count--;
	}
	if ((flags & (SQ_TAIL)) || sq->sq_needexcl) {
		putnext_tail(sq, qp, (flags & ~drain_mask));
		/*
		 * The only purpose of this ASSERT is to preserve calling stack
		 * in DEBUG kernel.
		 */
		ASSERT(sq != NULL);
		return;
	}
	ASSERT((sq->sq_flags & (SQ_EXCL|SQ_CIPUT)) || queued);
	ASSERT((flags & (SQ_EXCL|SQ_CIPUT)) || queued);
	/*
	 * Safe to always drop SQ_EXCL:
	 *	Not SQ_CIPUT means we set SQ_EXCL above
	 *	For SQ_CIPUT SQ_EXCL will only be set if the put
	 *	procedure did a qwriter(INNER) in which case
	 *	nobody else is in the inner perimeter and we
	 *	are exiting.
	 *
	 * I would like to make the following assertion:
	 *
	 * ASSERT((flags & (SQ_EXCL|SQ_CIPUT)) != (SQ_EXCL|SQ_CIPUT) ||
	 * 	sq->sq_count == 0);
	 *
	 * which indicates that if we are both putshared and exclusive,
	 * we became exclusive while executing the putproc, and the only
	 * claim on the syncq was the one we dropped a few lines above.
	 * But other threads that enter putnext while the syncq is exclusive
	 * need to make a claim as they may need to drop SQLOCK in the
	 * has_writers case to avoid deadlocks.  If these threads are
	 * delayed or preempted, it is possible that the writer thread can
	 * find out that there are other claims making the (sq_count == 0)
	 * test invalid.
	 */

	sq->sq_flags = flags & ~SQ_EXCL;
	mutex_exit(SQLOCK(sq));
	TRACE_3(TR_FAC_STREAMS_FR, TR_PUTNEXT_END,
	    "putnext_end:(%p, %p, %p) done", qp, mp, sq);
}
Пример #10
0
static int
schedctl_shared_alloc(sc_shared_t **kaddrp, uintptr_t *uaddrp)
{
	proc_t		*p = curproc;
	sc_page_ctl_t	*pagep;
	sc_shared_t	*ssp;
	caddr_t		base;
	index_t		index;
	int		error;

	ASSERT(MUTEX_NOT_HELD(&p->p_lock));
	mutex_enter(&p->p_sc_lock);

	/*
	 * Try to find space for the new data in existing pages
	 * within the process's list of shared pages.
	 */
	for (pagep = p->p_pagep; pagep != NULL; pagep = pagep->spc_next)
		if (pagep->spc_space != 0)
			break;

	if (pagep != NULL)
		base = pagep->spc_uaddr;
	else {
		struct anon_map *amp;
		caddr_t kaddr;

		/*
		 * No room, need to allocate a new page.  Also set up
		 * a mapping to the kernel address space for the new
		 * page and lock it in memory.
		 */
		if ((error = schedctl_getpage(&amp, &kaddr)) != 0) {
			mutex_exit(&p->p_sc_lock);
			return (error);
		}
		if ((error = schedctl_map(amp, &base, kaddr)) != 0) {
			schedctl_freepage(amp, kaddr);
			mutex_exit(&p->p_sc_lock);
			return (error);
		}

		/*
		 * Allocate and initialize the page control structure.
		 */
		pagep = kmem_alloc(sizeof (sc_page_ctl_t), KM_SLEEP);
		pagep->spc_amp = amp;
		pagep->spc_base = (sc_shared_t *)kaddr;
		pagep->spc_end = (sc_shared_t *)(kaddr + sc_pagesize);
		pagep->spc_uaddr = base;

		pagep->spc_map = kmem_zalloc(sizeof (ulong_t) * sc_bitmap_words,
		    KM_SLEEP);
		pagep->spc_space = sc_pagesize;

		pagep->spc_next = p->p_pagep;
		p->p_pagep = pagep;
	}

	/*
	 * Got a page, now allocate space for the data.  There should
	 * be space unless something's wrong.
	 */
	ASSERT(pagep != NULL && pagep->spc_space >= sizeof (sc_shared_t));
	index = bt_availbit(pagep->spc_map, sc_bitmap_len);
	ASSERT(index != -1);

	/*
	 * Get location with pointer arithmetic.  spc_base is of type
	 * sc_shared_t *.  Mark as allocated.
	 */
	ssp = pagep->spc_base + index;
	BT_SET(pagep->spc_map, index);
	pagep->spc_space -= sizeof (sc_shared_t);

	mutex_exit(&p->p_sc_lock);

	/*
	 * Return kernel and user addresses.
	 */
	*kaddrp = ssp;
	*uaddrp = (uintptr_t)base + ((uintptr_t)ssp & PAGEOFFSET);
	return (0);
}
/*
 * hci1394_ixl_dma_sync()
 *    the heart of interrupt processing, this routine correlates where the
 *    hardware is for the specified context with the IXL program.  Invokes
 *    callbacks as needed.  Also called by "update" to make sure ixl is
 *    sync'ed up with where the hardware is.
 *    Returns one of the ixl_intr defined return codes - HCI1394_IXL_INTR...
 *    {..._DMALOST, ..._DMASTOP, ..._NOADV,... _NOERROR}
 */
int
hci1394_ixl_dma_sync(hci1394_state_t *soft_statep, hci1394_iso_ctxt_t *ctxtp)
{
	ixl1394_command_t *ixlp = NULL;	/* current ixl command */
	ixl1394_command_t *ixlnextp;	/* next ixl command */
	uint16_t	ixlopcode;
	uint16_t	timestamp;
	int		donecode;
	boolean_t	isdone;

	void (*callback)(opaque_t, struct ixl1394_callback *);

	TNF_PROBE_0_DEBUG(hci1394_ixl_dma_sync_enter,
	    HCI1394_TNF_HAL_STACK_ISOCH, "");

	ASSERT(MUTEX_NOT_HELD(&ctxtp->intrprocmutex));

	/* xfer start ixl cmd where last left off */
	ixlnextp = ctxtp->ixl_execp;

	/* last completed descriptor block's timestamp  */
	timestamp = ctxtp->dma_last_time;

	/*
	 * follow execution path in IXL, until find dma descriptor in IXL
	 * xfer command whose status isn't set or until run out of IXL cmds
	 */
	while (ixlnextp != NULL) {
		ixlp = ixlnextp;
		ixlnextp = ixlp->next_ixlp;
		ixlopcode = ixlp->ixl_opcode & ~IXL1394_OPF_UPDATE;

		/*
		 * process IXL commands: xfer start, callback, store timestamp
		 * and jump and ignore the others
		 */

		/* determine if this is an xfer start IXL command */
		if (((ixlopcode & IXL1394_OPF_ISXFER) != 0) &&
		    ((ixlopcode & IXL1394_OPTY_MASK) != 0)) {

			/* process xfer cmd to see if HW has been here */
			isdone = hci1394_ixl_intr_check_xfer(soft_statep, ctxtp,
			    ixlp, &ixlnextp, &timestamp, &donecode);

			if (isdone == B_TRUE) {
				TNF_PROBE_0_DEBUG(hci1394_ixl_dma_sync_exit,
					HCI1394_TNF_HAL_STACK_ISOCH, "");
				return (donecode);
			}

			/* continue to process next IXL command */
			continue;
		}

		/* else check if IXL cmd - jump, callback or store timestamp */
		switch (ixlopcode) {
		case IXL1394_OP_JUMP:
			/*
			 * set next IXL cmd to label ptr in current IXL jump cmd
			 */
			ixlnextp = ((ixl1394_jump_t *)ixlp)->label;
			break;

		case IXL1394_OP_STORE_TIMESTAMP:
			/*
			 * set last timestamp value recorded into current IXL
			 * cmd
			 */
			((ixl1394_store_timestamp_t *)ixlp)->timestamp =
			    timestamp;
			break;

		case IXL1394_OP_CALLBACK:
			/*
			 * if callback function is specified, call it with IXL
			 * cmd addr.  Make sure to grab the lock before setting
			 * the "in callback" flag in intr_flags.
			 */
			mutex_enter(&ctxtp->intrprocmutex);
			ctxtp->intr_flags |= HCI1394_ISO_CTXT_INCALL;
			mutex_exit(&ctxtp->intrprocmutex);

			callback = ((ixl1394_callback_t *)ixlp)->callback;
			if (callback != NULL) {
				callback(ctxtp->global_callback_arg,
				    (ixl1394_callback_t *)ixlp);
			}

			/*
			 * And grab the lock again before clearing
			 * the "in callback" flag.
			 */
			mutex_enter(&ctxtp->intrprocmutex);
			ctxtp->intr_flags &= ~HCI1394_ISO_CTXT_INCALL;
			mutex_exit(&ctxtp->intrprocmutex);
			break;
		}
	}

	/*
	 * If we jumped to NULL because of an updateable JUMP, set ixl_execp
	 * back to ixlp.  The destination label might get updated to a
	 * non-NULL value.
	 */
	if ((ixlp != NULL) && (ixlp->ixl_opcode == IXL1394_OP_JUMP_U)) {
		ctxtp->ixl_execp = ixlp;
		TNF_PROBE_1_DEBUG(hci1394_ixl_dma_sync_exit,
		    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
		    "INTR_NOERROR");
		return (HCI1394_IXL_INTR_NOERROR);
	}

	/* save null IXL cmd and depth and last timestamp */
	ctxtp->ixl_execp = NULL;
	ctxtp->ixl_exec_depth = 0;
	ctxtp->dma_last_time = timestamp;

	ctxtp->rem_noadv_intrs = 0;


	/* return stopped status if at end of IXL cmds & context stopped */
	if (HCI1394_ISOCH_CTXT_ACTIVE(soft_statep, ctxtp) == 0) {
		TNF_PROBE_1_DEBUG(hci1394_ixl_dma_sync_exit,
		    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
		    "INTR_DMASTOP");
		return (HCI1394_IXL_INTR_DMASTOP);
	}

	/* else interrupt processing is lost */
	TNF_PROBE_1_DEBUG(hci1394_ixl_dma_sync_exit,
	    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg, "INTR_DMALOST");
	return (HCI1394_IXL_INTR_DMALOST);
}
Пример #12
0
/*
 * Multidata message block free callback routine.
 */
static void
mmd_esballoc_free(caddr_t buf)
{
	multidata_t *mmd;
	pdesc_t *pd;
	pdesc_slab_t *slab;
	int i;

	ASSERT(buf != NULL);
	ASSERT(((struct mmd_buf_info *)buf)->buf_len == MMD_CACHE_SIZE);

	mmd = (multidata_t *)(buf + sizeof (struct mmd_buf_info));
	ASSERT(mmd->mmd_magic == MULTIDATA_MAGIC);

	ASSERT(mmd->mmd_dp != NULL);
	ASSERT(mmd->mmd_dp->db_ref == 1);

	/* remove all packet descriptors and private attributes */
	pd = Q2PD(mmd->mmd_pd_q.ql_next);
	while (pd != Q2PD(&(mmd->mmd_pd_q)))
		pd = mmd_destroy_pdesc(mmd, pd);

	ASSERT(mmd->mmd_pd_q.ql_next == &(mmd->mmd_pd_q));
	ASSERT(mmd->mmd_pd_cnt == 0);
	ASSERT(mmd->mmd_hbuf_ref == 0);
	ASSERT(mmd->mmd_pbuf_ref == 0);

	/* remove all global attributes */
	if (mmd->mmd_pattbl != NULL)
		mmd_destroy_pattbl(&(mmd->mmd_pattbl));

	/* remove all descriptor slabs */
	slab = Q2PDSLAB(mmd->mmd_pd_slab_q.ql_next);
	while (slab != Q2PDSLAB(&(mmd->mmd_pd_slab_q))) {
		pdesc_slab_t *slab_next = Q2PDSLAB(slab->pds_next);

		remque(&(slab->pds_next));
		slab->pds_next = NULL;
		slab->pds_prev = NULL;
		slab->pds_mmd = NULL;
		slab->pds_used = 0;
		kmem_cache_free(pd_slab_cache, slab);

		ASSERT(mmd->mmd_slab_cnt > 0);
		mmd->mmd_slab_cnt--;
		slab = slab_next;
	}
	ASSERT(mmd->mmd_pd_slab_q.ql_next == &(mmd->mmd_pd_slab_q));
	ASSERT(mmd->mmd_slab_cnt == 0);

	mmd->mmd_dp = NULL;

	/* finally, free all associated message blocks */
	if (mmd->mmd_hbuf != NULL) {
		freeb(mmd->mmd_hbuf);
		mmd->mmd_hbuf = NULL;
	}

	for (i = 0; i < MULTIDATA_MAX_PBUFS; i++) {
		if (mmd->mmd_pbuf[i] != NULL) {
			freeb(mmd->mmd_pbuf[i]);
			mmd->mmd_pbuf[i] = NULL;
			ASSERT(mmd->mmd_pbuf_cnt > 0);
			mmd->mmd_pbuf_cnt--;
		}
	}

	ASSERT(mmd->mmd_pbuf_cnt == 0);
	ASSERT(MUTEX_NOT_HELD(&(mmd->mmd_pd_slab_lock)));
	kmem_cache_free(mmd_cache, buf);
}