void
tnf_thread_exit(void)
{
	tnf_ops_t *ops;
	tnf_block_header_t *block;

	TNF_PROBE_0(thread_exit, "thread", /* CSTYLED */);
        /* LINTED pointer cast may result in improper alignment */
	ops = (tnf_ops_t *)curthread->t_tnf_tpdp;
	/*
	 * Mark ops as busy from now on, so it will never be used
	 * again.  If we fail on the busy lock, the buffer
	 * deallocation code is cleaning our ops, so we don't need to
	 * do anything.  If we get the lock and the buffer exists,
	 * release all blocks we hold.  Once we're off allthreads,
	 * the deallocator will not examine our ops.
	 */
	if (ops->busy)
		return;
	LOCK_INIT_HELD(&ops->busy);
	if (tnf_buf != NULL) {
		/* Release any A-locks held */
		block = ops->wcb.tnfw_w_pos.tnfw_w_block;
		ops->wcb.tnfw_w_pos.tnfw_w_block = NULL;
		if (block != NULL)
			lock_clear(&block->A_lock);
		block = ops->wcb.tnfw_w_tag_pos.tnfw_w_block;
		ops->wcb.tnfw_w_tag_pos.tnfw_w_block = NULL;
		if (block != NULL)
			lock_clear(&block->A_lock);
	}
}
Exemple #2
0
/* ARGSUSED */
void
mutex_init(kmutex_t *mp, char *name, kmutex_type_t type, void *ibc)
{
	mutex_impl_t *lp = (mutex_impl_t *)mp;

	ASSERT(ibc < (void *)KERNELBASE);	/* see 1215173 */

	if ((intptr_t)ibc > ipltospl(LOCK_LEVEL) && ibc < (void *)KERNELBASE) {
		ASSERT(type != MUTEX_ADAPTIVE && type != MUTEX_DEFAULT);
		MUTEX_SET_TYPE(lp, MUTEX_SPIN);
		LOCK_INIT_CLEAR(&lp->m_spin.m_spinlock);
		LOCK_INIT_HELD(&lp->m_spin.m_dummylock);
		lp->m_spin.m_minspl = (int)(intptr_t)ibc;
	} else {
		ASSERT(type != MUTEX_SPIN);
		MUTEX_SET_TYPE(lp, MUTEX_ADAPTIVE);
		MUTEX_CLEAR_LOCK_AND_WAITERS(lp);
	}
}