Example #1
0
static inline void
pmap_pai_reset(struct pmap_tlb_info *ti, struct pmap_asid_info *pai,
	struct pmap *pm)
{
	/*
	 * We must have an ASID but it must not be onproc (on a processor).
	 */
	KASSERT(pai->pai_asid);
#ifdef MULTIPROCESSOR
	KASSERT((pm->pm_onproc & ti->ti_cpu_mask) == 0);
#endif
	LIST_REMOVE(pai, pai_link);
#ifdef DIAGNOSTIC
	pai->pai_link.le_prev = NULL;	/* tagged as unlinked */
#endif
	/*
	 * Note that we don't mark the ASID as not in use in the TLB's ASID
	 * bitmap (thus it can't be allocated until the ASID space is exhausted
	 * and therefore reinitialized).  We don't want to flush the TLB for
	 * entries belonging to this ASID so we will let natural TLB entry
	 * replacement flush them out of the TLB.  Any new entries for this
	 * pmap will need a new ASID allocated.
	 */
	pai->pai_asid = 0;

#ifdef MULTIPROCESSOR
	/*
	 * The bits in pm_active belonging to this TLB can only be changed
	 * while this TLB's lock is held.
	 */
	atomic_and_32(&pm->pm_active, ~ti->ti_cpu_mask);
#endif /* MULTIPROCESSOR */
}
Example #2
0
/*
 * Remove inactive table from device. Routines which work's with inactive tables
 * doesn't need to synchronise with dmstrategy. They can synchronise themselves with mutex?.
 *
 */
int
dm_table_clear_ioctl(prop_dictionary_t dm_dict)
{
	dm_dev_t *dmv;
	const char *name, *uuid;
	uint32_t flags, minor;

	dmv = NULL;
	name = NULL;
	uuid = NULL;
	flags = 0;
	minor = 0;

	prop_dictionary_get_cstring_nocopy(dm_dict, DM_IOCTL_NAME, &name);
	prop_dictionary_get_cstring_nocopy(dm_dict, DM_IOCTL_UUID, &uuid);
	prop_dictionary_get_uint32(dm_dict, DM_IOCTL_FLAGS, &flags);
	prop_dictionary_get_uint32(dm_dict, DM_IOCTL_MINOR, &minor);

	aprint_debug("Clearing inactive table from device: %s--%s\n",
	    name, uuid);

	if ((dmv = dm_dev_lookup(name, uuid, minor)) == NULL) {
		DM_REMOVE_FLAG(flags, DM_EXISTS_FLAG);
		return ENOENT;
	}
	/* Select unused table */
	dm_table_destroy(&dmv->table_head, DM_TABLE_INACTIVE);

	atomic_and_32(&dmv->flags, ~DM_INACTIVE_PRESENT_FLAG);

	dm_dev_unbusy(dmv);

	return 0;
}
Example #3
0
void
i386_ipi_halt(struct cpu_info *ci)
{
	x86_disable_intr();
	atomic_and_32(&ci->ci_flags, ~CPUF_RUNNING);

	for(;;) {
		x86_hlt();
	}
}
Example #4
0
void
pic_list_deliver_irqs(register_t psw, int ipl, void *frame)
{
	const uint32_t ipl_mask = __BIT(ipl);
	struct pic_softc *pic;

	while ((pic = pic_list_find_pic_by_pending_ipl(ipl_mask)) != NULL) {
		pic_deliver_irqs(pic, ipl, frame);
		KASSERT((pic->pic_pending_ipls & ipl_mask) == 0);
	}
	atomic_and_32(&pic_pending_ipls, ~ipl_mask);
}
Example #5
0
/*
 * It must be from link
 * req_payload has been allocated when create_unsol_exchange
 */
static int
fcoet_process_unsol_els_req(fcoe_frame_t *frm)
{
	int			ret = FCOE_SUCCESS;
	fcoet_exchange_t	*xch;

	xch = fcoet_create_unsol_exchange(frm);
	ASSERT(xch);
	ASSERT(FRM_IS_LAST_FRAME(frm));

	/*
	 * For the reason of keeping symmetric, we do copy here as in
	 * process_sol_els instead of in create_unsol_exchange.
	 * req_payload depends on how to allocate buf in create_unsol_exchange
	 */
	XCH2ELS(xch)->els_req_alloc_size = 0;
	XCH2ELS(xch)->els_req_size = frm->frm_payload_size;
	XCH2ELS(xch)->els_req_payload =
	    GET_BYTE_OFFSET(xch, GET_STRUCT_SIZE(fcoet_exchange_t));
	bcopy(frm->frm_payload, XCH2ELS(xch)->els_req_payload,
	    XCH2ELS(xch)->els_req_size);
	if (XCH2ELS(xch)->els_req_payload[0] != ELS_OP_FLOGI) {
		/*
		 * Ensure LINK_UP event has been handled, or PLOIG has
		 * been processed by FCT, or else it will be discarded.
		 * It need more consideration later ???
		 */
		if ((XCH2ELS(xch)->els_req_payload[0] == ELS_OP_PLOGI) &&
		    (xch->xch_ss->ss_flags & SS_FLAG_DELAY_PLOGI)) {
			delay(STMF_SEC2TICK(1)/2);
		}

		if ((XCH2ELS(xch)->els_req_payload[0] == ELS_OP_PRLI) &&
		    (xch->xch_ss->ss_flags & SS_FLAG_DELAY_PLOGI)) {
			atomic_and_32(&xch->xch_ss->ss_flags,
			    ~SS_FLAG_DELAY_PLOGI);
			delay(STMF_SEC2TICK(1)/3);
		}
		fct_post_rcvd_cmd(xch->xch_cmd, NULL);
	} else {
		/*
		 * We always handle FLOGI internally
		 * Save dst mac address from FLOGI request to restore later
		 */
		bcopy((char *)frm->frm_hdr-22,
		    frm->frm_eport->eport_efh_dst, ETHERADDRL);
		ret = fcoet_process_unsol_flogi_req(xch);
	}
	return (ret);
}
Example #6
0
static void
i40e_m_stop(void *arg)
{
	i40e_t *i40e = arg;

	mutex_enter(&i40e->i40e_general_lock);

	if (i40e->i40e_state & I40E_SUSPENDED)
		goto done;

	atomic_and_32(&i40e->i40e_state, ~I40E_STARTED);
	i40e_stop(i40e, B_TRUE);
done:
	mutex_exit(&i40e->i40e_general_lock);
}
Example #7
0
/*
 * enable/disable port is simple compared to physical FC HBAs
 */
fct_status_t
fcoet_enable_port(fcoet_soft_state_t *ss)
{
	FCOET_EXT_LOG(ss->ss_alias, "port is being enabled-%p", ss);
	/* Call fcoe function to online the port */
	if (ss->ss_eport->eport_ctl(ss->ss_eport, FCOE_CMD_PORT_ONLINE, 0) ==
	    FCOE_FAILURE) {
		return (FCT_FAILURE);
	}

	if ((ss->ss_flags & SS_FLAG_PORT_DISABLED) == SS_FLAG_PORT_DISABLED) {
		atomic_and_32(&ss->ss_flags, ~SS_FLAG_PORT_DISABLED);
	}

	return (FCT_SUCCESS);
}
Example #8
0
/*
 * Simulate Linux behaviour better and switch tables here and not in
 * dm_table_load_ioctl.
 */
int
dm_dev_resume_ioctl(prop_dictionary_t dm_dict)
{
	dm_dev_t *dmv;
	const char *name, *uuid;
	uint32_t flags, minor;

	name = NULL;
	uuid = NULL;
	flags = 0;

	/*
	 * char *xml; xml = prop_dictionary_externalize(dm_dict);
	 * printf("%s\n",xml);
	 */

	prop_dictionary_get_cstring_nocopy(dm_dict, DM_IOCTL_NAME, &name);
	prop_dictionary_get_cstring_nocopy(dm_dict, DM_IOCTL_UUID, &uuid);
	prop_dictionary_get_uint32(dm_dict, DM_IOCTL_FLAGS, &flags);
	prop_dictionary_get_uint32(dm_dict, DM_IOCTL_MINOR, &minor);

	/* Remove device from global device list */
	if ((dmv = dm_dev_lookup(name, uuid, minor)) == NULL) {
		DM_REMOVE_FLAG(flags, DM_EXISTS_FLAG);
		return ENOENT;
	}
	atomic_and_32(&dmv->flags, ~(DM_SUSPEND_FLAG | DM_INACTIVE_PRESENT_FLAG));
	atomic_or_32(&dmv->flags, DM_ACTIVE_PRESENT_FLAG);

	dm_table_switch_tables(&dmv->table_head);

	DM_ADD_FLAG(flags, DM_EXISTS_FLAG);

	dmgetproperties(dmv->diskp, &dmv->table_head);

	prop_dictionary_set_uint32(dm_dict, DM_IOCTL_OPEN, dmv->table_head.io_cnt);
	prop_dictionary_set_uint32(dm_dict, DM_IOCTL_FLAGS, flags);
	prop_dictionary_set_uint32(dm_dict, DM_IOCTL_MINOR, dmv->minor);

	dm_dev_unbusy(dmv);

	/* Destroy inactive table after resume. */
	dm_table_destroy(&dmv->table_head, DM_TABLE_INACTIVE);

	return 0;
}
Example #9
0
static void
pic_list_unblock_irqs(void)
{
	uint32_t blocked_pics = pic_blocked_pics;

	pic_blocked_pics = 0;
	for (;;) {
		struct pic_softc *pic;
#if PIC_MAXSOURCES > 32
		volatile uint32_t *iblocked;
		uint32_t blocked;
		size_t irq_base;
#endif

		int pic_id = ffs(blocked_pics);
		if (pic_id-- == 0)
			return;

		pic = pic_list[pic_id];
		KASSERT(pic != NULL);
#if PIC_MAXSOURCES > 32
		for (irq_base = 0, iblocked = pic->pic_blocked_irqs;
		     irq_base < pic->pic_maxsources;
		     irq_base += 32, iblocked++) {
			if ((blocked = *iblocked) != 0) {
				(*pic->pic_ops->pic_unblock_irqs)(pic,
				    irq_base, blocked);
				atomic_and_32(iblocked, ~blocked);
			}
		}
#else
		KASSERT(pic->pic_blocked_irqs[0] != 0);
		(*pic->pic_ops->pic_unblock_irqs)(pic,
		    0, pic->pic_blocked_irqs[0]);
		pic->pic_blocked_irqs[0] = 0;
#endif
		blocked_pics &= ~__BIT(pic_id);
	}
}
Example #10
0
/*
 * Stop the device and put it in a reset/quiesced state such
 * that the interface can be unregistered.
 */
void
ixgbe_m_stop(void *arg)
{
	ixgbe_t *ixgbe = (ixgbe_t *)arg;

	mutex_enter(&ixgbe->gen_lock);

	if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
		mutex_exit(&ixgbe->gen_lock);
		return;
	}

	atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_STARTED);

	ixgbe_stop(ixgbe, B_TRUE);

	mutex_exit(&ixgbe->gen_lock);

	/*
	 * Disable and stop the watchdog timer
	 */
	ixgbe_disable_watchdog_timer(ixgbe);
}
Example #11
0
/*
 * Stop the device and put it in a reset/quiesced state such
 * that the interface can be unregistered.
 */
void
igb_m_stop(void *arg)
{
	igb_t *igb = (igb_t *)arg;

	mutex_enter(&igb->gen_lock);

	if (igb->igb_state & IGB_SUSPENDED) {
		mutex_exit(&igb->gen_lock);
		return;
	}

	atomic_and_32(&igb->igb_state, ~IGB_STARTED);

	igb_stop(igb, B_TRUE);

	mutex_exit(&igb->gen_lock);

	/*
	 * Disable and stop the watchdog timer
	 */
	igb_disable_watchdog_timer(igb);
}
Example #12
0
static caddr_t
apix_do_softint_prolog(struct cpu *cpu, uint_t pil, uint_t oldpil,
    caddr_t stackptr)
{
	kthread_t *t, *volatile it;
	struct machcpu *mcpu = &cpu->cpu_m;
	hrtime_t now;

	UNREFERENCED_1PARAMETER(oldpil);
	ASSERT(pil > mcpu->mcpu_pri && pil > cpu->cpu_base_spl);

	atomic_and_32((uint32_t *)&mcpu->mcpu_softinfo.st_pending, ~(1 << pil));

	mcpu->mcpu_pri = pil;

	now = tsc_read();

	/*
	 * Get set to run interrupt thread.
	 * There should always be an interrupt thread since we
	 * allocate one for each level on the CPU.
	 */
	it = cpu->cpu_intr_thread;
	ASSERT(it != NULL);
	cpu->cpu_intr_thread = it->t_link;

	/* t_intr_start could be zero due to cpu_intr_swtch_enter. */
	t = cpu->cpu_thread;
	if ((t->t_flag & T_INTR_THREAD) && t->t_intr_start != 0) {
		hrtime_t intrtime = now - t->t_intr_start;
		mcpu->intrstat[pil][0] += intrtime;
		cpu->cpu_intracct[cpu->cpu_mstate] += intrtime;
		t->t_intr_start = 0;
	}

	/*
	 * Note that the code in kcpc_overflow_intr -relies- on the
	 * ordering of events here - in particular that t->t_lwp of
	 * the interrupt thread is set to the pinned thread *before*
	 * curthread is changed.
	 */
	it->t_lwp = t->t_lwp;
	it->t_state = TS_ONPROC;

	/*
	 * Push interrupted thread onto list from new thread.
	 * Set the new thread as the current one.
	 * Set interrupted thread's T_SP because if it is the idle thread,
	 * resume() may use that stack between threads.
	 */

	ASSERT(SA((uintptr_t)stackptr) == (uintptr_t)stackptr);
	t->t_sp = (uintptr_t)stackptr;

	it->t_intr = t;
	cpu->cpu_thread = it;

	/*
	 * Set bit for this pil in CPU's interrupt active bitmask.
	 */
	ASSERT((cpu->cpu_intr_actv & (1 << pil)) == 0);
	cpu->cpu_intr_actv |= (1 << pil);

	/*
	 * Initialize thread priority level from intr_pri
	 */
	it->t_pil = (uchar_t)pil;
	it->t_pri = (pri_t)pil + intr_pri;
	it->t_intr_start = now;

	return (it->t_stk);
}
Example #13
0
void
pic_deliver_irqs(struct pic_softc *pic, int ipl, void *frame)
{
	const uint32_t ipl_mask = __BIT(ipl);
	struct intrsource *is;
	volatile uint32_t *ipending = pic->pic_pending_irqs;
	volatile uint32_t *iblocked = pic->pic_blocked_irqs;
	size_t irq_base;
#if PIC_MAXSOURCES > 32
	size_t irq_count;
	int poi = 0;		/* Possibility of interrupting */
#endif
	uint32_t pending_irqs;
	uint32_t blocked_irqs;
	int irq;
	bool progress = false;
	
	KASSERT(pic->pic_pending_ipls & ipl_mask);

	irq_base = 0;
#if PIC_MAXSOURCES > 32
	irq_count = 0;
#endif

	for (;;) {
		pending_irqs = pic_find_pending_irqs_by_ipl(pic, irq_base,
		    *ipending, ipl);
		KASSERT((pending_irqs & *ipending) == pending_irqs);
		KASSERT((pending_irqs & ~(*ipending)) == 0);
		if (pending_irqs == 0) {
#if PIC_MAXSOURCES > 32
			irq_count += 32;
			if (__predict_true(irq_count >= pic->pic_maxsources)) {
				if (!poi)
					/*Interrupt at this level was handled.*/
					break;
				irq_base = 0;
				irq_count = 0;
				poi = 0;
				ipending = pic->pic_pending_irqs;
				iblocked = pic->pic_blocked_irqs;
			} else {
				irq_base += 32;
				ipending++;
				iblocked++;
				KASSERT(irq_base <= pic->pic_maxsources);
			}
			continue;
#else
			break;
#endif
		}
		progress = true;
		blocked_irqs = 0;
		do {
			irq = ffs(pending_irqs) - 1;
			KASSERT(irq >= 0);

			atomic_and_32(ipending, ~__BIT(irq));
			is = pic->pic_sources[irq_base + irq];
			if (is != NULL) {
				cpsie(I32_bit);
				pic_dispatch(is, frame);
				cpsid(I32_bit);
#if PIC_MAXSOURCES > 32
				/*
				 * There is a possibility of interrupting
				 * from cpsie() to cpsid().
				 */
				poi = 1;
#endif
				blocked_irqs |= __BIT(irq);
			} else {
				KASSERT(0);
			}
			pending_irqs = pic_find_pending_irqs_by_ipl(pic,
			    irq_base, *ipending, ipl);
		} while (pending_irqs);
		if (blocked_irqs) {
			atomic_or_32(iblocked, blocked_irqs);
			atomic_or_32(&pic_blocked_pics, __BIT(pic->pic_id));
		}
	}

	KASSERT(progress);
	/*
	 * Since interrupts are disabled, we don't have to be too careful
	 * about these.
	 */
	if (atomic_and_32_nv(&pic->pic_pending_ipls, ~ipl_mask) == 0)
		atomic_and_32(&pic_pending_pics, ~__BIT(pic->pic_id));
}
Example #14
0
/*
 * Common code for rdlock, timedrdlock, wrlock, timedwrlock, tryrdlock,
 * and trywrlock for process-private (USYNC_THREAD) rwlocks.
 */
int
rwlock_lock(rwlock_t *rwlp, timespec_t *tsp, int rd_wr)
{
	volatile uint32_t *rwstate = (volatile uint32_t *)&rwlp->rwlock_readers;
	uint32_t readers;
	ulwp_t *self = curthread;
	queue_head_t *qp;
	ulwp_t *ulwp;
	int try_flag;
	int ignore_waiters_flag;
	int error = 0;

	try_flag = (rd_wr & TRY_FLAG);
	rd_wr &= ~TRY_FLAG;
	ASSERT(rd_wr == READ_LOCK || rd_wr == WRITE_LOCK);

	if (!try_flag) {
		DTRACE_PROBE2(plockstat, rw__block, rwlp, rd_wr);
	}

	qp = queue_lock(rwlp, MX);
	/* initial attempt to acquire the lock fails if there are waiters */
	ignore_waiters_flag = 0;
	while (error == 0) {
		if (rd_wr == READ_LOCK) {
			if (read_lock_try(rwlp, ignore_waiters_flag))
				break;
		} else {
			if (write_lock_try(rwlp, ignore_waiters_flag))
				break;
		}
		/* subsequent attempts do not fail due to waiters */
		ignore_waiters_flag = 1;
		atomic_or_32(rwstate, URW_HAS_WAITERS);
		readers = *rwstate;
		ASSERT_CONSISTENT_STATE(readers);
		if ((readers & URW_WRITE_LOCKED) ||
		    (rd_wr == WRITE_LOCK &&
		    (readers & URW_READERS_MASK) != 0))
			/* EMPTY */;	/* somebody holds the lock */
		else if ((ulwp = queue_waiter(qp)) == NULL) {
			atomic_and_32(rwstate, ~URW_HAS_WAITERS);
			ignore_waiters_flag = 0;
			continue;	/* no queued waiters, start over */
		} else {
			/*
			 * Do a priority check on the queued waiter (the
			 * highest priority thread on the queue) to see
			 * if we should defer to him or just grab the lock.
			 */
			int our_pri = real_priority(self);
			int his_pri = real_priority(ulwp);

			if (rd_wr == WRITE_LOCK) {
				/*
				 * We defer to a queued thread that has
				 * a higher priority than ours.
				 */
				if (his_pri <= our_pri) {
					/*
					 * Don't defer, just grab the lock.
					 */
					continue;
				}
			} else {
				/*
				 * We defer to a queued thread that has
				 * a higher priority than ours or that
				 * is a writer whose priority equals ours.
				 */
				if (his_pri < our_pri ||
				    (his_pri == our_pri && !ulwp->ul_writer)) {
					/*
					 * Don't defer, just grab the lock.
					 */
					continue;
				}
			}
		}
		/*
		 * We are about to block.
		 * If we're doing a trylock, return EBUSY instead.
		 */
		if (try_flag) {
			error = EBUSY;
			break;
		}
		/*
		 * Enqueue writers ahead of readers.
		 */
		self->ul_writer = rd_wr;	/* *must* be 0 or 1 */
		enqueue(qp, self, 0);
		set_parking_flag(self, 1);
		queue_unlock(qp);
		if ((error = __lwp_park(tsp, 0)) == EINTR)
			error = 0;
		set_parking_flag(self, 0);
		qp = queue_lock(rwlp, MX);
		if (self->ul_sleepq && dequeue_self(qp) == 0) {
			atomic_and_32(rwstate, ~URW_HAS_WAITERS);
			ignore_waiters_flag = 0;
		}
		self->ul_writer = 0;
		if (rd_wr == WRITE_LOCK &&
		    (*rwstate & URW_WRITE_LOCKED) &&
		    rwlp->rwlock_owner == (uintptr_t)self) {
			/*
			 * We acquired the lock by hand-off
			 * from the previous owner,
			 */
			error = 0;	/* timedlock did not fail */
			break;
		}
	}

	/*
	 * Make one final check to see if there are any threads left
	 * on the rwlock queue.  Clear the URW_HAS_WAITERS flag if not.
	 */
	if (qp->qh_root == NULL || qp->qh_root->qr_head == NULL)
		atomic_and_32(rwstate, ~URW_HAS_WAITERS);

	queue_unlock(qp);

	if (!try_flag) {
		DTRACE_PROBE3(plockstat, rw__blocked, rwlp, rd_wr, error == 0);
	}

	return (error);
}
Example #15
0
/*
 * Release a process-private rwlock and wake up any thread(s) sleeping on it.
 * This is called when a thread releases a lock that appears to have waiters.
 */
static void
rw_queue_release(rwlock_t *rwlp)
{
	volatile uint32_t *rwstate = (volatile uint32_t *)&rwlp->rwlock_readers;
	queue_head_t *qp;
	uint32_t readers;
	uint32_t writer;
	ulwp_t **ulwpp;
	ulwp_t *ulwp;
	ulwp_t *prev;
	int nlwpid = 0;
	int more;
	int maxlwps = MAXLWPS;
	lwpid_t buffer[MAXLWPS];
	lwpid_t *lwpid = buffer;

	qp = queue_lock(rwlp, MX);

	/*
	 * Here is where we actually drop the lock,
	 * but we retain the URW_HAS_WAITERS flag, if it is already set.
	 */
	readers = *rwstate;
	ASSERT_CONSISTENT_STATE(readers);
	if (readers & URW_WRITE_LOCKED)	/* drop the writer lock */
		atomic_and_32(rwstate, ~URW_WRITE_LOCKED);
	else				/* drop the readers lock */
		atomic_dec_32(rwstate);
	if (!(readers & URW_HAS_WAITERS)) {	/* no waiters */
		queue_unlock(qp);
		return;
	}

	/*
	 * The presence of the URW_HAS_WAITERS flag causes all rwlock
	 * code to go through the slow path, acquiring queue_lock(qp).
	 * Therefore, the rest of this code is safe because we are
	 * holding the queue lock and the URW_HAS_WAITERS flag is set.
	 */

	readers = *rwstate;		/* must fetch the value again */
	ASSERT_CONSISTENT_STATE(readers);
	ASSERT(readers & URW_HAS_WAITERS);
	readers &= URW_READERS_MASK;	/* count of current readers */
	writer = 0;			/* no current writer */

	/*
	 * Examine the queue of waiters in priority order and prepare
	 * to wake up as many readers as we encounter before encountering
	 * a writer.  If the highest priority thread on the queue is a
	 * writer, stop there and wake it up.
	 *
	 * We keep track of lwpids that are to be unparked in lwpid[].
	 * __lwp_unpark_all() is called to unpark all of them after
	 * they have been removed from the sleep queue and the sleep
	 * queue lock has been dropped.  If we run out of space in our
	 * on-stack buffer, we need to allocate more but we can't call
	 * lmalloc() because we are holding a queue lock when the overflow
	 * occurs and lmalloc() acquires a lock.  We can't use alloca()
	 * either because the application may have allocated a small
	 * stack and we don't want to overrun the stack.  So we call
	 * alloc_lwpids() to allocate a bigger buffer using the mmap()
	 * system call directly since that path acquires no locks.
	 */
	while ((ulwpp = queue_slot(qp, &prev, &more)) != NULL) {
		ulwp = *ulwpp;
		ASSERT(ulwp->ul_wchan == rwlp);
		if (ulwp->ul_writer) {
			if (writer != 0 || readers != 0)
				break;
			/* one writer to wake */
			writer++;
		} else {
			if (writer != 0)
				break;
			/* at least one reader to wake */
			readers++;
			if (nlwpid == maxlwps)
				lwpid = alloc_lwpids(lwpid, &nlwpid, &maxlwps);
		}
		queue_unlink(qp, ulwpp, prev);
		ulwp->ul_sleepq = NULL;
		ulwp->ul_wchan = NULL;
		if (writer) {
			/*
			 * Hand off the lock to the writer we will be waking.
			 */
			ASSERT((*rwstate & ~URW_HAS_WAITERS) == 0);
			atomic_or_32(rwstate, URW_WRITE_LOCKED);
			rwlp->rwlock_owner = (uintptr_t)ulwp;
		}
		lwpid[nlwpid++] = ulwp->ul_lwpid;
	}

	/*
	 * This modification of rwstate must be done last.
	 * The presence of the URW_HAS_WAITERS flag causes all rwlock
	 * code to go through the slow path, acquiring queue_lock(qp).
	 * Otherwise the read_lock_try() and write_lock_try() fast paths
	 * are effective.
	 */
	if (ulwpp == NULL)
		atomic_and_32(rwstate, ~URW_HAS_WAITERS);

	if (nlwpid == 0) {
		queue_unlock(qp);
	} else {
		ulwp_t *self = curthread;
		no_preempt(self);
		queue_unlock(qp);
		if (nlwpid == 1)
			(void) __lwp_unpark(lwpid[0]);
		else
			(void) __lwp_unpark_all(lwpid, nlwpid);
		preempt(self);
	}
	if (lwpid != buffer)
		(void) munmap((caddr_t)lwpid, maxlwps * sizeof (lwpid_t));
}