Exemplo n.º 1
0
static
void
cvtestthread(void *junk, unsigned long num)
{
	int i;
	volatile int j;
	time_t secs1, secs2;
	uint32_t nsecs1, nsecs2;

	(void)junk;

	for (i=0; i<NCVLOOPS; i++) {
		lock_acquire(testlock);
		while (testval1 != num) {
			gettime(&secs1, &nsecs1);
			cv_wait(testcv, testlock);
			gettime(&secs2, &nsecs2);

			if (nsecs2 < nsecs1) {
				secs2--;
				nsecs2 += 1000000000;
			}
			
			nsecs2 -= nsecs1;
			secs2 -= secs1;

			/* Require at least 2000 cpu cycles (we're 25mhz) */
			if (secs2==0 && nsecs2 < 40*2000) {
				kprintf("cv_wait took only %u ns\n", nsecs2);
				kprintf("That's too fast... you must be "
					"busy-looping\n");
				V(donesem);
				thread_exit();
			}

		}
		kprintf("Thread %lu\n", num);
		testval1 = (testval1 + NTHREADS - 1)%NTHREADS;

		/*
		 * loop a little while to make sure we can measure the
		 * time waiting on the cv.
		 */
		for (j=0; j<3000; j++);

		cv_broadcast(testcv, testlock);
		lock_release(testlock);
	}
	V(donesem);
}
Exemplo n.º 2
0
void
down(struct semaphore *s)
{

	mtx_lock(&s->mtx);
	while (s->value == 0) {
		s->waiters++;
		cv_wait(&s->cv, &s->mtx);
		s->waiters--;
	}

	s->value--;
	mtx_unlock(&s->mtx);
}
Exemplo n.º 3
0
void
afs_osi_Sleep(void *event)
{
    struct afs_event *evp;
    int seq;

    evp = afs_getevent(event);
    seq = evp->seq;
    while (seq == evp->seq) {
	AFS_ASSERT_GLOCK();
	cv_wait(&evp->cond, &afs_global_lock);
    }
    relevent(evp);
}
static
void
cat(void * unusedpointer, 
    unsigned long catnumber)
{
        int whichBowl = 0; // 0 = neither bowl, 1 = bowlOne, 2 = bowlTwo

	(void) unusedpointer;
        //(void) catnumber;	

	lock_acquire(l);
	//wait until there are no mioce eating and there is at most 1 cat eating
       	while((catsEating > 1) || (miceEating > 0))
	{
		cv_wait(c, l);
	}
	
	//choose a bowl
	if(!bowlOneUsed){
		bowlOneUsed = 1;
		whichBowl = 1;
	}
	else if(!bowlTwoUsed){
		bowlTwoUsed = 1;
		whichBowl = 2;
	}
	else
		panic("No bowl open for cat, even though we waited for an open bowl");

	catsEating++;

	//cat eats
	lock_release(l);
	kprintf("cat %lu starts eating at bowl %d \n", catnumber, whichBowl);
	clocksleep(1);
	kprintf("cat %lu done eating at bowl %d \n", catnumber, whichBowl);
	lock_acquire(l);

	
	if(whichBowl == 1)
		bowlOneUsed = 0;
	else if (whichBowl == 2)
		bowlTwoUsed = 0;
	else
		panic("Neither bowl was used (cat)!");
	catsEating--;
	
	cv_broadcast(c, l);
	lock_release(l);
}
Exemplo n.º 5
0
void
flush_workqueue(struct workqueue_struct *wq)
{
	static const struct wq_flush zero_wqf;
	struct wq_flush wqf = zero_wqf;

	mutex_init(&wqf.wqf_lock, MUTEX_DEFAULT, IPL_NONE);
	cv_init(&wqf.wqf_cv, "lnxwflsh");

	if (1) {
		struct wq_flush_work *const wqfw = kmem_zalloc(sizeof(*wqfw),
		    KM_SLEEP);

		wqf.wqf_n = 1;
		wqfw->wqfw_flush = &wqf;
		INIT_WORK(&wqfw->wqfw_work, &linux_wq_barrier);
		wqfw->wqfw_work.w_wq = wq;
		wqfw->wqfw_work.w_state = WORK_PENDING;
		workqueue_enqueue(wq->wq_workqueue, &wqfw->wqfw_work.w_wk,
		    NULL);
	} else {
		struct cpu_info *ci;
		CPU_INFO_ITERATOR cii;
		struct wq_flush_work *wqfw;

		panic("per-CPU Linux workqueues don't work yet!");

		wqf.wqf_n = 0;
		for (CPU_INFO_FOREACH(cii, ci)) {
			wqfw = kmem_zalloc(sizeof(*wqfw), KM_SLEEP);
			mutex_enter(&wqf.wqf_lock);
			wqf.wqf_n++;
			mutex_exit(&wqf.wqf_lock);
			wqfw->wqfw_flush = &wqf;
			INIT_WORK(&wqfw->wqfw_work, &linux_wq_barrier);
			wqfw->wqfw_work.w_state = WORK_PENDING;
			wqfw->wqfw_work.w_wq = wq;
			workqueue_enqueue(wq->wq_workqueue,
			    &wqfw->wqfw_work.w_wk, ci);
		}
	}

	mutex_enter(&wqf.wqf_lock);
	while (0 < wqf.wqf_n)
		cv_wait(&wqf.wqf_cv, &wqf.wqf_lock);
	mutex_exit(&wqf.wqf_lock);

	cv_destroy(&wqf.wqf_cv);
	mutex_destroy(&wqf.wqf_lock);
}
Exemplo n.º 6
0
static void
nandfs_wakeup_wait_cleaner(struct nandfs_device *fsdev, int reason)
{

	mtx_lock(&fsdev->nd_clean_mtx);
	if (reason == NANDFS_CLEANER_KILL)
		fsdev->nd_cleaner_exit = 1;
	if (fsdev->nd_cleaning == 0) {
		fsdev->nd_cleaning = 1;
		wakeup(&fsdev->nd_cleaning);
	}
	cv_wait(&fsdev->nd_clean_cv, &fsdev->nd_clean_mtx);
	mtx_unlock(&fsdev->nd_clean_mtx);
}
Exemplo n.º 7
0
/*------------------------------------------------------------------------*
 *	usb_proc_drain
 *
 * This function will tear down an USB process, waiting for the
 * currently executing command to return.
 *
 * NOTE: If the structure pointed to by "up" is all zero,
 * this function does nothing.
 *------------------------------------------------------------------------*/
void
usb_proc_drain(struct usb_process *up)
{
	/* check if not initialised */
	if (up->up_mtx == NULL)
		return;
	/* handle special case with Giant */
	if (up->up_mtx != &Giant)
		mtx_assert(up->up_mtx, MA_NOTOWNED);

	mtx_lock(up->up_mtx);

	/* Set the gone flag */

	up->up_gone = 1;

	while (up->up_ptr) {

		/* Check if we need to wakeup the USB process */

		if (up->up_msleep || up->up_csleep) {
			up->up_msleep = 0;
			up->up_csleep = 0;
			cv_signal(&up->up_cv);
		}
		/* Check if we are still cold booted */

		if (cold) {
#ifndef __rtems__
			USB_THREAD_SUSPEND(up->up_ptr);
			printf("WARNING: A USB process has "
			    "been left suspended\n");
			break;
#else /* __rtems__ */
                        BSD_ASSERT(0);
#endif /* __rtems__ */
		}
		cv_wait(&up->up_cv, up->up_mtx);
	}
	/* Check if someone is waiting - should not happen */

	if (up->up_dsleep) {
		up->up_dsleep = 0;
		cv_broadcast(&up->up_drain);
		DPRINTF("WARNING: Someone is waiting "
		    "for USB process drain!\n");
	}
	mtx_unlock(up->up_mtx);
}
Exemplo n.º 8
0
/* remove a vldc port */
static int
i_vldc_remove_port(vldc_t *vldcp, uint_t portno)
{
	vldc_port_t *vport;
	vldc_minor_t *vminor;

	ASSERT(vldcp != NULL);
	ASSERT(MUTEX_HELD(&vldcp->lock));

	vport = &(vldcp->port[portno]);
	vminor = vport->minorp;
	if (vminor == NULL) {
		cmn_err(CE_NOTE, "?i_vldc_remove_port: trying to remove a "
		    "port (%u) which is not bound", portno);
		return (MDEG_FAILURE);
	}

	/*
	 * Make sure that all new attempts to open or use the minor node
	 * associated with the port will fail.
	 */
	mutex_enter(&vminor->lock);
	vminor->portno = VLDC_INVALID_PORTNO;
	mutex_exit(&vminor->lock);

	/* send hangup to anyone polling */
	pollwakeup(&vport->poll, POLLHUP);

	/* Now wait for all current users of the minor node to finish. */
	mutex_enter(&vminor->lock);
	while (vminor->in_use > 0) {
		cv_wait(&vminor->cv, &vminor->lock);
	}

	if (vport->status != VLDC_PORT_CLOSED) {
		/* close the port before it is torn down */
		(void) i_vldc_close_port(vldcp, portno);
	}

	/* remove minor node */
	ddi_remove_minor_node(vldcp->dip, vport->minorp->sname);
	vport->minorp = NULL;

	mutex_exit(&vminor->lock);

	D1("i_vldc_remove_port: removed vldc port %u\n", portno);

	return (MDEG_SUCCESS);
}
Exemplo n.º 9
0
void *thread_cli_fun(void *arg) {
    double          busy;
    int             inx;
    Msg            *msg;
    struct rusage   r_start;
    struct rusage   r_stop;
    struct timeval  t_elapsed_data;
    struct timeval  t_elapsed_total;
    struct timeval  t_start_data;
    struct timeval  t_start_total;
    struct timeval  t_stop;

    arg = arg; // touch
    util_time_timer_start(&t_start_total);
    util_time_timer_start(&t_start_data);
    util_cpu_timer_start(&r_start);
    for (inx = 0; inx < loop; inx++) {
        if (verbose)
            printf("count=%d\n", inx);
        msg = msg_queue_remove(&mutex_pool, &queue_pool);
        assert(msg != NULL);
        if (csize) {
            msg->cbuf = (char *) malloc(csize);
            memcpy(msg->cbuf, send_buffer2, csize);
        }
        if (dsize) {
            msg->dbuf = (char *) malloc(dsize);
            memcpy(msg->dbuf, send_buffer, dsize);
        }
        msg_queue_add(&mutex_srv, &queue_srv, msg);
        cv_signal(&cv_srv);
        cv_wait(&cv_cli);
        msg = msg_queue_remove(&mutex_cli, &queue_cli);
        assert(msg != NULL);
        msg_queue_add(&mutex_pool, &queue_pool, msg);
    }
    util_cpu_timer_stop(&r_stop);
    util_time_timer_stop(&t_stop);
    util_time_elapsed(&t_start_total, &t_stop, &t_elapsed_total);
    util_time_elapsed(&t_start_data, &t_stop, &t_elapsed_data);
    util_cpu_timer_busy(&r_start, &r_stop, &t_elapsed_data, &busy);

    if (!bm) {
        print_elapsed("", &t_elapsed_total);
        print_elapsed(" (data)", &t_elapsed_data);
    }
    print_rate(bm, "", bidir ? 2 * loop : loop, dsize, &t_elapsed_data, busy);
    return NULL;
}
Exemplo n.º 10
0
/* Like sleep, but it will timeout in 'usec' microseconds. */
void rendez_sleep_timeout(struct rendez *rv, int (*cond)(void*), void *arg,
                          uint64_t usec)
{
	int8_t irq_state = 0;
	struct alarm_waiter awaiter;
	struct cv_lookup_elm cle;
	struct timer_chain *pcpui_tchain = &per_cpu_info[core_id()].tchain;

	if (!usec)
		return;
	/* Doing this cond check early, but then unlocking again.  Mostly just to
	 * avoid weird issues with the CV lock and the alarm tchain lock. */
	cv_lock_irqsave(&rv->cv, &irq_state);
	if (cond(arg)) {
		cv_unlock_irqsave(&rv->cv, &irq_state);
		return;
	}
	cv_unlock_irqsave(&rv->cv, &irq_state);
	/* The handler will call rendez_wake, but won't mess with the condition
	 * state.  It's enough to break us out of cv_wait() to see .on_tchain. */
	init_awaiter(&awaiter, rendez_alarm_handler);
	awaiter.data = rv;
	set_awaiter_rel(&awaiter, usec);
	/* Set our alarm on this cpu's tchain.  Note that when we sleep in cv_wait,
	 * we could be migrated, and later on we could be unsetting the alarm
	 * remotely. */
	set_alarm(pcpui_tchain, &awaiter);
	cv_lock_irqsave(&rv->cv, &irq_state);
	__reg_abortable_cv(&cle, &rv->cv);
	/* We could wake early for a few reasons.  Legit wakeups after a changed
	 * condition (and we should exit), other alarms with different timeouts (and
	 * we should go back to sleep), etc.  Note it is possible for our alarm to
	 * fire immediately upon setting it: before we even cv_lock. */
	while (!cond(arg) && awaiter.on_tchain) {
		if (should_abort(&cle)) {
			cv_unlock_irqsave(&rv->cv, &irq_state);
			unset_alarm(pcpui_tchain, &awaiter);
			dereg_abortable_cv(&cle);
			error(EINTR, "syscall aborted");
		}
		cv_wait(&rv->cv);
		cpu_relax();
	}
	cv_unlock_irqsave(&rv->cv, &irq_state);
	dereg_abortable_cv(&cle);
	/* Turn off our alarm.  If it already fired, this is a no-op.  Note this
	 * could be cross-core. */
	unset_alarm(pcpui_tchain, &awaiter);
}
Exemplo n.º 11
0
int
cv_wait_sig(kcondvar_t *cvp, kmutex_t *mp)
{
	kthread_t *t = curthread;
	proc_t *p = ttoproc(t);
	klwp_t *lwp = ttolwp(t);
	int rval = 1;
	int signalled = 0;

	if (panicstr)
		return (rval);

	/*
	 * The check for t_intr is to catch an interrupt thread
	 * that has not yet unpinned the thread underneath.
	 */
	if (lwp == NULL || t->t_intr) {
		cv_wait(cvp, mp);
		return (rval);
	}

	ASSERT(curthread->t_schedflag & TS_DONT_SWAP);
	lwp->lwp_asleep = 1;
	lwp->lwp_sysabort = 0;
	thread_lock(t);
	cv_block_sig(t, (condvar_impl_t *)cvp);
	thread_unlock_nopreempt(t);
	mutex_exit(mp);
	if (ISSIG(t, JUSTLOOKING) || MUSTRETURN(p, t))
		setrun(t);
	/* ASSERT(no locks are held) */
	swtch();
	signalled = (t->t_schedflag & TS_SIGNALLED);
	t->t_flag &= ~T_WAKEABLE;
	mutex_enter(mp);
	if (ISSIG_PENDING(t, lwp, p)) {
		mutex_exit(mp);
		if (issig(FORREAL))
			rval = 0;
		mutex_enter(mp);
	}
	if (lwp->lwp_sysabort || MUSTRETURN(p, t))
		rval = 0;
	lwp->lwp_asleep = 0;
	lwp->lwp_sysabort = 0;
	if (rval == 0 && signalled)	/* avoid consuming the cv_signal() */
		cv_signal(cvp);
	return (rval);
}
Exemplo n.º 12
0
void
zfs_delete_thread(void *arg)
{
	zfsvfs_t	*zfsvfs = arg;
	zfs_delete_t 	*zd = &zfsvfs->z_delete_head;
	znode_t		*zp;
	callb_cpr_t	cprinfo;
	int		drained;

	CALLB_CPR_INIT(&cprinfo, &zd->z_mutex, callb_generic_cpr, "zfs_delete");

	mutex_enter(&zd->z_mutex);

	if (!zd->z_drained && !zd->z_draining) {
		zd->z_draining = B_TRUE;
		mutex_exit(&zd->z_mutex);
		drained = zfs_drain_dq(zfsvfs);
		mutex_enter(&zd->z_mutex);
		zd->z_draining = B_FALSE;
		zd->z_drained = drained;
		cv_broadcast(&zd->z_quiesce_cv);
	}

	while (zd->z_thread_count <= zd->z_thread_target) {
		zp = list_head(&zd->z_znodes);
		if (zp == NULL) {
			ASSERT(zd->z_znode_count == 0);
			CALLB_CPR_SAFE_BEGIN(&cprinfo);
			cv_wait(&zd->z_cv, &zd->z_mutex);
			CALLB_CPR_SAFE_END(&cprinfo, &zd->z_mutex);
			continue;
		}
		ASSERT(zd->z_znode_count != 0);
		list_remove(&zd->z_znodes, zp);
		if (--zd->z_znode_count == 0)
			cv_broadcast(&zd->z_quiesce_cv);
		mutex_exit(&zd->z_mutex);
		zfs_rmnode(zp);
		(void) zfs_delete_thread_target(zfsvfs, -1);
		mutex_enter(&zd->z_mutex);
	}

	ASSERT(zd->z_thread_count != 0);
	if (--zd->z_thread_count == 0)
		cv_broadcast(&zd->z_cv);

	CALLB_CPR_EXIT(&cprinfo);	/* NB: drops z_mutex */
	thread_exit();
}
Exemplo n.º 13
0
/*
 * xpvtap_user_thread_stop()
 */
static void
xpvtap_user_thread_stop(xpvtap_state_t *state)
{
	/* wake thread so it can exit */
	mutex_enter(&state->bt_thread.ut_mutex);
	state->bt_thread.ut_wake = B_TRUE;
	state->bt_thread.ut_exit = B_TRUE;
	cv_signal(&state->bt_thread.ut_wake_cv);
	if (!state->bt_thread.ut_exit_done) {
		cv_wait(&state->bt_thread.ut_exit_done_cv,
		    &state->bt_thread.ut_mutex);
	}
	mutex_exit(&state->bt_thread.ut_mutex);
	ASSERT(state->bt_thread.ut_exit_done);
}
Exemplo n.º 14
0
static void
filemon_lock_read(void)
{
	mtx_lock(&access_mtx);

	while (access_owner != NULL || access_requester != NULL)
		cv_wait(&access_cv, &access_mtx);

	n_readers++;

	/* Wake up threads waiting. */
	cv_broadcast(&access_cv);

	mtx_unlock(&access_mtx);
}
Exemplo n.º 15
0
int
cv_timedwait(kcondvar_t *cv, kmutex_t *mtx, int ticks)
{
#ifdef DIAGNOSTIC
	extern int hz;
#endif

	if (ticks == 0) {
		cv_wait(cv, mtx);
		return 0;
	} else {
		KASSERT(hz == 100);
		return rumpuser_cv_timedwait(RUMPCV(cv), mtx->kmtx_mtx, ticks);
	}
}
Exemplo n.º 16
0
static int do_test(struct taskq *tq, char *desc)
{
	mutex_lock(&tq_mutex);
	tq_done = false;
	mutex_unlock(&tq_mutex);

	if (taskq_dispatch(tq, tq_test_func, desc, 0) == 0)
		return 1;

	mutex_lock(&tq_mutex);
	while (!tq_done)
		cv_wait(&tq_wait, &tq_mutex);
	mutex_unlock(&tq_mutex);
	return 0;
}
Exemplo n.º 17
0
void
e_ddi_enter_instance()
{
	mutex_enter(&e_ddi_inst_state.ins_serial);
	if (e_ddi_inst_state.ins_thread == curthread)
		e_ddi_inst_state.ins_busy++;
	else {
		while (e_ddi_inst_state.ins_busy)
			cv_wait(&e_ddi_inst_state.ins_serial_cv,
			    &e_ddi_inst_state.ins_serial);
		e_ddi_inst_state.ins_thread = curthread;
		e_ddi_inst_state.ins_busy = 1;
	}
	mutex_exit(&e_ddi_inst_state.ins_serial);
}
Exemplo n.º 18
0
/*------------------------------------------------------------------------*
 *	usb_proc_drain
 *
 * This function will tear down an USB process, waiting for the
 * currently executing command to return.
 *
 * NOTE: If the structure pointed to by "up" is all zero,
 * this function does nothing.
 *------------------------------------------------------------------------*/
void
usb_proc_drain(struct usb_process *up)
{
	/* check if not initialised */
	if (up->up_lock == NULL)
		return;
#if 0 /* XXX */
	/* handle special case with Giant */
	if (up->up_mtx != &Giant)
		mtx_assert(up->up_mtx, MA_NOTOWNED);
#else
	KKASSERT(!lockowned(up->up_lock));
	lockmgr(up->up_lock, LK_EXCLUSIVE);
#endif

	/* Set the gone flag */

	up->up_gone = 1;

	while (up->up_ptr) {

		/* Check if we need to wakeup the USB process */

		if (up->up_msleep || up->up_csleep) {
			up->up_msleep = 0;
			up->up_csleep = 0;
			cv_signal(&up->up_cv);
		}
		/* Check if we are still cold booted */

		if (cold) {
			USB_THREAD_SUSPEND(up->up_ptr);
			kprintf("WARNING: A USB process has "
			    "been left suspended\n");
			break;
		}
		cv_wait(&up->up_cv, up->up_lock);
	}
	/* Check if someone is waiting - should not happen */

	if (up->up_dsleep) {
		up->up_dsleep = 0;
		cv_broadcast(&up->up_drain);
		DPRINTF("WARNING: Someone is waiting "
		    "for USB process drain!\n");
	}
	lockmgr(up->up_lock, LK_RELEASE);
}
Exemplo n.º 19
0
ACPI_STATUS
acpi_sema_p(acpi_sema_t *sp, unsigned count, uint16_t wait_time)
{
	ACPI_STATUS rv = AE_OK;
	clock_t deadline;

	mutex_enter(&sp->mutex);

	if (sp->available >= count) {
		/*
		 * Enough units available, no blocking
		 */
		sp->available -= count;
		mutex_exit(&sp->mutex);
		return (rv);
	} else if (wait_time == 0) {
		/*
		 * Not enough units available and timeout
		 * specifies no blocking
		 */
		rv = AE_TIME;
		mutex_exit(&sp->mutex);
		return (rv);
	}

	/*
	 * Not enough units available and timeout specifies waiting
	 */
	if (wait_time != ACPI_WAIT_FOREVER)
		deadline = ddi_get_lbolt() +
		    (clock_t)drv_usectohz(wait_time * 1000);

	do {
		if (wait_time == ACPI_WAIT_FOREVER)
			cv_wait(&sp->cv, &sp->mutex);
		else if (cv_timedwait(&sp->cv, &sp->mutex, deadline) < 0) {
			rv = AE_TIME;
			break;
		}
	} while (sp->available < count);

	/* if we dropped out of the wait with AE_OK, we got the units */
	if (rv == AE_OK)
		sp->available -= count;

	mutex_exit(&sp->mutex);
	return (rv);
}
Exemplo n.º 20
0
/*
 * These allow for recursive entry.  This is necessary to facilitate
 * simpler locking with things like the fault handler, where a caller
 * might already be "holding" the slot.
 *
 * This is modeled in part after ndi_devi_enter and ndi_devi_exit.
 */
void
sda_slot_enter(sda_slot_t *slot)
{
	kt_did_t	self = ddi_get_kt_did();
	mutex_enter(&slot->s_lock);
	if (slot->s_owner == self) {
		slot->s_circular++;
	} else {
		while ((slot->s_owner != 0) && (slot->s_owner != self)) {
			cv_wait(&slot->s_cv, &slot->s_lock);
		}
		slot->s_owner = self;
		slot->s_circular++;
	}
	mutex_exit(&slot->s_lock);
}
Exemplo n.º 21
0
static void
isadma_dmawait(isadma_devstate_t *isadmap)
{

	ASSERT(mutex_owned(&isadmap->isadma_access_lock));

	/* Wait loop, if the locking dip is set, we wait. */
	while (isadmap->isadma_ldip != NULL) {

		isadmap->isadma_want++;
		cv_wait(&isadmap->isadma_access_cv,
		    &isadmap->isadma_access_lock);
		isadmap->isadma_want--;
		isadma_sleep_cnt++;
	}
}
Exemplo n.º 22
0
Arquivo: rrwlock.c Projeto: AB17/zfs
void
rrw_enter_write(rrwlock_t *rrl)
{
	mutex_enter(&rrl->rr_lock);
	ASSERT(rrl->rr_writer != curthread);

	while (refcount_count(&rrl->rr_anon_rcount) > 0 ||
	    refcount_count(&rrl->rr_linked_rcount) > 0 ||
	    rrl->rr_writer != NULL) {
		rrl->rr_writer_wanted = B_TRUE;
		cv_wait(&rrl->rr_cv, &rrl->rr_lock);
	}
	rrl->rr_writer_wanted = B_FALSE;
	rrl->rr_writer = curthread;
	mutex_exit(&rrl->rr_lock);
}
Exemplo n.º 23
0
/*
 * _sd_recovery_wait()
 *   while _sd_node_recovery is set, accesses to mirrored devices will block
 *   (_sd_node_recovery-1) is count of blocked threads.
 */
int
_sd_recovery_wait(void)
{
	int blk;

	mutex_enter(&_sd_ft_data.fi_lock);
	blk = _sd_node_recovery ? _sd_node_recovery++ : 0;

	if (blk)
		cv_wait(&_sd_ft_data.fi_rem_sv, &_sd_ft_data.fi_lock);
	mutex_exit(&_sd_ft_data.fi_lock);

	if (!_sd_cache_initialized)
		return (EINVAL);
	return (0);
}
Exemplo n.º 24
0
pid_t sys_waitpid(pid_t pid, int *status, int options) {
	
	if(options != 0) { // only support option 0.
		errno = EINVAL;
		return -1;
	}
	if(pid == curproc->p_pid) {
		return pid;
	}

	struct exitc *c; 

	// status pointer validation
	vaddr_t sp = (vaddr_t)status;
	struct addrspace *as = curproc->p_addrspace;
	if (as != NULL && !valid_address_check(as, sp)) { // out of vaddr boundary for this proc
		errno = EFAULT;
		return -1;
	}

	struct proc* p = find_proc(pid); // search for process

	if(p != NULL) {
		lock_acquire(p->p_lk);
		while(find_proc(pid) != NULL) {
			cv_wait(p->p_cv, p->p_lk);
		}
		lock_release(p->p_lk);
	}

	c = find_exitc(pid);
	if(c) {
		*status = c->exitcode;
	} else {
		errno = ESRCH;
		return -1;
	}

	/*unsigned num = exitcarray_num(codes);
	for (unsigned i = 0 ; i < num ; i++) {
		if (exitcarray_get(codes, i) == c) {
			exitcarray_remove(codes,i);
		}
	}*/

	return pid;
}
Exemplo n.º 25
0
/*
 * Given a client struct, inspect the callback info to see if the
 * callback path is up and available.
 *
 * If new callback path is available and no one has set it up then
 * try to set it up. If setup is not successful after 5 tries (5 secs)
 * then gives up and returns NULL.
 *
 * If callback path is being initialized, then wait for the CB_NULL RPC
 * call to occur.
 */
static rfs4_cbinfo_t *
rfs4_cbinfo_hold(rfs4_client_t *cp)
{
	rfs4_cbinfo_t *cbp = &cp->rc_cbinfo;
	int retries = 0;

	mutex_enter(cbp->cb_lock);

	while (cbp->cb_newer.cb_new == TRUE && cbp->cb_nullcaller == FALSE) {
		/*
		 * Looks like a new callback path may be available and
		 * noone has set it up.
		 */
		mutex_exit(cbp->cb_lock);
		rfs4_dbe_hold(cp->rc_dbe);
		rfs4_do_cb_null(cp); /* caller will release client hold */

		mutex_enter(cbp->cb_lock);
		/*
		 * If callback path is no longer new, or it's being setup
		 * then stop and wait for it to be done.
		 */
		if (cbp->cb_newer.cb_new == FALSE || cbp->cb_nullcaller == TRUE)
			break;
		mutex_exit(cbp->cb_lock);

		if (++retries >= rfs4_max_setup_cb_tries)
			return (NULL);
		delay(hz);
		mutex_enter(cbp->cb_lock);
	}

	/* Is there a thread working on doing the CB_NULL RPC? */
	if (cbp->cb_nullcaller == TRUE)
		cv_wait(cbp->cb_cv, cbp->cb_lock);  /* if so, wait on it */

	/* If the callback path is not okay (up and running), just quit */
	if (cbp->cb_state != CB_OK) {
		mutex_exit(cbp->cb_lock);
		return (NULL);
	}

	/* Let someone know we are using the current callback info */
	cbp->cb_refcnt++;
	mutex_exit(cbp->cb_lock);
	return (cbp);
}
Exemplo n.º 26
0
/*
 * Stop the squeue from polling. This needs to be done
 * from inside the perimeter.
 */
void
ip_squeue_quiesce_ring(ill_t *ill, ill_rx_ring_t *rx_ring)
{
	squeue_t *sqp;

	ASSERT(ILL_MAC_PERIM_HELD(ill));
	ASSERT(rx_ring != NULL);

	sqp = rx_ring->rr_sqp;
	mutex_enter(&sqp->sq_lock);
	sqp->sq_state |= SQS_POLL_QUIESCE;
	cv_signal(&sqp->sq_worker_cv);
	while (!(sqp->sq_state & SQS_POLL_QUIESCE_DONE))
		cv_wait(&sqp->sq_ctrlop_done_cv, &sqp->sq_lock);

	mutex_exit(&sqp->sq_lock);
}
Exemplo n.º 27
0
void
_sema_wait(struct sema *sema, const char *file, int line)
{

	mtx_lock(&sema->sema_mtx);
	while (sema->sema_value == 0) {
		sema->sema_waiters++;
		cv_wait(&sema->sema_cv, &sema->sema_mtx);
		sema->sema_waiters--;
	}
	sema->sema_value--;

	CTR6(KTR_LOCK, "%s(%p) \"%s\" v = %d at %s:%d", __func__, sema,
	    cv_wmesg(&sema->sema_cv), sema->sema_value, file, line);

	mtx_unlock(&sema->sema_mtx);
}
Exemplo n.º 28
0
/*	
Waitpid waits for a process to exit. One of several things could happen here.
First, we need to find out what the process is actually doing. We get the process by it's PID.

If the process doesn't exist at all, then waitpid fails. Can this even happen? In our case, probably not.
If the process exists, then we check if the current process is the parent of the process we retrieved. If not, waitpid fails. 
If the process exists and is the child of the current process
	but is already exited, then we just return the exit status, and we're all good.
	but is still running,
		then we need wait for it to finish running, and inform us when it has done doing so
			We need to mark that we're interested in the process (process interest count ++?)
			We need to wait on the processes CV while the process is still running
*/
int sys_waitpid(pid_t pid, u_int32_t *retstatus, int options, pid_t *retpid)
{
	if (retstatus == NULL) return EFAULT;

	// The options argument should be 0. You are not required to implement any options.
	// Check to make sure that options you do not support are not requested.
	if (options != 0) return EINVAL;
	
	lock_acquire(proctable_lock);
	struct process *matching_process = proctable_get_process(pid);

	// Current process doesn't exist. Either it exited a long time ago, or never existed
	// Either way, waitpid fails
	if (matching_process == NULL) {
		*retstatus = -1;
		lock_release(proctable_lock);
		return EAGAIN;
	}

	// Current process is not the parent of the process we're calling waitpid on.
	// This is not allowed, so waitpid fails
	if (matching_process->parent != curthread->t_process) {
		lock_release(proctable_lock);
		return EAGAIN;
	}

	// Here, we assume the process exists and is the child of the current process
	const int exited = STATUS_EXITED;
	const int running = STATUS_RUNNING;

	if (matching_process->status == exited) {
		*retpid = pid;
		*retstatus = matching_process->exit_code;
		lock_release(proctable_lock);
		return 0;
	} else {
		// The process is still running
		while (matching_process->status == running) {
			cv_wait(matching_process->waitpid_cv, proctable_lock);
		}
		*retpid = pid;
		*retstatus = matching_process->exit_code;
		lock_release(proctable_lock);
		return 0;
	}
}
Exemplo n.º 29
0
Arquivo: mmp.c Projeto: LLNL/zfs
void
mmp_thread_stop(spa_t *spa)
{
	mmp_thread_t *mmp = &spa->spa_mmp;

	mutex_enter(&mmp->mmp_thread_lock);
	mmp->mmp_thread_exiting = 1;
	cv_broadcast(&mmp->mmp_thread_cv);

	while (mmp->mmp_thread) {
		cv_wait(&mmp->mmp_thread_cv, &mmp->mmp_thread_lock);
	}
	mutex_exit(&mmp->mmp_thread_lock);

	ASSERT(mmp->mmp_thread == NULL);
	mmp->mmp_thread_exiting = 0;
}
Exemplo n.º 30
0
/*
 * Enqueue an internal driver request and wait until it is completed.
 */
static int
ipmi_submit_driver_request(struct ipmi_softc *sc, struct ipmi_request **preq,
    int timo)
{
	int error;
	struct ipmi_request *req = *preq;

	ASSERT(req->ir_owner == NULL);

	IPMI_LOCK(sc);
	error = sc->ipmi_enqueue_request(sc, req);

	if (error != 0) {
		IPMI_UNLOCK(sc);
		return (error);
	}

	while (req->ir_status != IRS_COMPLETED && error >= 0)
		if (timo == 0)
			cv_wait(&req->ir_cv, &sc->ipmi_lock);
		else
			error = cv_timedwait(&req->ir_cv, &sc->ipmi_lock,
			    ddi_get_lbolt() + timo);

	switch (req->ir_status) {
		case IRS_QUEUED:
			TAILQ_REMOVE(&sc->ipmi_pending_requests, req, ir_link);
			req->ir_status = IRS_CANCELED;
			error = EWOULDBLOCK;
			break;
		case IRS_PROCESSED:
			req->ir_status = IRS_CANCELED;
			error = EWOULDBLOCK;
			*preq = NULL;
			break;
		case IRS_COMPLETED:
			error = req->ir_error;
			break;
		default:
			panic("IPMI: Invalid request status");
			break;
	}
	IPMI_UNLOCK(sc);

	return (error);
}