Exemplo n.º 1
0
/*
 * The page_retire_thread loops forever, looking to see if there are
 * pages still waiting to be retired.
 */
static void
page_retire_thread(void)
{
	callb_cpr_t c;

	CALLB_CPR_INIT(&c, &pr_thread_mutex, callb_generic_cpr, "page_retire");

	mutex_enter(&pr_thread_mutex);
	for (;;) {
		if (pr_enable && PR_KSTAT_PENDING) {
			/*
			 * Sigh. It's SO broken how we have to try to shake
			 * loose the holder of the page. Since we have no
			 * idea who or what has it locked, we go bang on
			 * every door in the city to try to locate it.
			 */
			kmem_reap();
			seg_preap();
			page_retire_hunt(page_retire_thread_cb);
			CALLB_CPR_SAFE_BEGIN(&c);
			(void) cv_timedwait(&pr_cv, &pr_thread_mutex,
			    lbolt + pr_thread_shortwait);
			CALLB_CPR_SAFE_END(&c, &pr_thread_mutex);
		} else {
			CALLB_CPR_SAFE_BEGIN(&c);
			(void) cv_timedwait(&pr_cv, &pr_thread_mutex,
			    lbolt + pr_thread_longwait);
			CALLB_CPR_SAFE_END(&c, &pr_thread_mutex);
		}
	}
	/*NOTREACHED*/
}
Exemplo n.º 2
0
/*
 * iser_ini_enable_datamover() is used by the iSCSI initator to request that a
 * specified iSCSI connection be transitioned to iSER-assisted mode.
 * In the case of iSER, the RDMA resources for a reliable connection have
 * already been allocated at this time, and the 'RDMAExtensions' is set to 'Yes'
 * so no further negotiations are required at this time.
 * The initiator now sends the first iSER Message - 'Hello' to the target
 * and waits for  the 'HelloReply' Message from the target before directing
 * the initiator to go into the Full Feature Phase.
 *
 * No transport op is required on the target side.
 */
static idm_status_t
iser_ini_enable_datamover(idm_conn_t *ic)
{

    iser_conn_t	*iser_conn;
    clock_t		delay;
    int		status;

    iser_conn = (iser_conn_t *)ic->ic_transport_private;

    mutex_enter(&iser_conn->ic_lock);
    iser_conn->ic_stage = ISER_CONN_STAGE_HELLO_SENT;
    mutex_exit(&iser_conn->ic_lock);

    /* Send the iSER Hello Message to the target */
    status = iser_xfer_hello_msg(iser_conn->ic_chan);
    if (status != ISER_STATUS_SUCCESS) {

        mutex_enter(&iser_conn->ic_lock);
        iser_conn->ic_stage = ISER_CONN_STAGE_HELLO_SENT_FAIL;
        mutex_exit(&iser_conn->ic_lock);

        return (IDM_STATUS_FAIL);
    }

    /*
     * Acquire the iser_conn->ic_lock and wait for the iSER HelloReply
     * Message from the target, i.e. iser_conn_stage_t to be set to
     * ISER_CONN_STAGE_HELLOREPLY_RCV. If the handshake does not
     * complete within a specified time period (.5s), then return failure.
     *
     */
    delay = ddi_get_lbolt() + drv_usectohz(500000);

    mutex_enter(&iser_conn->ic_lock);
    while ((iser_conn->ic_stage != ISER_CONN_STAGE_HELLOREPLY_RCV) &&
            (ddi_get_lbolt() < delay)) {

        (void) cv_timedwait(&iser_conn->ic_stage_cv,
                            &iser_conn->ic_lock, delay);
    }

    switch (iser_conn->ic_stage) {
    case ISER_CONN_STAGE_HELLOREPLY_RCV:
        iser_conn->ic_stage = ISER_CONN_STAGE_LOGGED_IN;
        mutex_exit(&iser_conn->ic_lock);
        /*
         * Return suceess to indicate that the initiator connection can
         * go to the next phase - FFP
         */
        return (IDM_STATUS_SUCCESS);
    default:
        iser_conn->ic_stage = ISER_CONN_STAGE_HELLOREPLY_RCV_FAIL;
        mutex_exit(&iser_conn->ic_lock);
        return (IDM_STATUS_FAIL);

    }

    /* STATEMENT_NEVER_REACHED */
}
Exemplo n.º 3
0
/*
 * Delay this thread by 'ticks' if we are still in the open transaction
 * group and there is already a waiting txg quiesing or quiesced.  Abort
 * the delay if this txg stalls or enters the quiesing state.
 */
void
txg_delay(dsl_pool_t *dp, uint64_t txg, int ticks)
{
	tx_state_t *tx = &dp->dp_tx;
	clock_t timeout = ddi_get_lbolt() + ticks;

	/* don't delay if this txg could transition to quiesing immediately */
	if (tx->tx_open_txg > txg ||
	    tx->tx_syncing_txg == txg-1 || tx->tx_synced_txg == txg-1)
		return;

	mutex_enter(&tx->tx_sync_lock);
	if (tx->tx_open_txg > txg || tx->tx_synced_txg == txg-1) {
		mutex_exit(&tx->tx_sync_lock);
		return;
	}

	while (ddi_get_lbolt() < timeout &&
	    tx->tx_syncing_txg < txg-1 && !txg_stalled(dp))
		(void) cv_timedwait(&tx->tx_quiesce_more_cv, &tx->tx_sync_lock,
		    timeout);

	DMU_TX_STAT_BUMP(dmu_tx_delay);

	mutex_exit(&tx->tx_sync_lock);
}
Exemplo n.º 4
0
int
rumpuser_cv_timedwait(struct rumpuser_cv *cv, struct rumpuser_mtx *mtx,
	int64_t sec, int64_t nsec)
{

	return cv_timedwait(cv, mtx, sec, nsec);
}
Exemplo n.º 5
0
int
testcall(struct lwp *l, void *uap, register_t *retval)
{
	int i;

	mutex_init(&test_mutex, MUTEX_DEFAULT, IPL_NONE);
	cv_init(&test_cv, "testcv");

	printf("test: creating threads\n");

	test_count = NTHREADS;
	test_exit = 0;

	for (i = 0; i < test_count; i++)
		kthread_create(0, KTHREAD_MPSAFE, NULL, thread1, &primes[i],
		    &test_threads[i], "thread%d", i);

	printf("test: sleeping\n");

	mutex_enter(&test_mutex);
	while (test_count != 0) {
		(void)cv_timedwait(&test_cv, &test_mutex, hz * SECONDS);
		test_exit = 1;
	}
	mutex_exit(&test_mutex);

	printf("test: finished\n");

	cv_destroy(&test_cv);
	mutex_destroy(&test_mutex);

	return 0;
}
Exemplo n.º 6
0
int
_sema_timedwait(struct sema *sema, int timo, const char *file, int line)
{
	int ret, timed_out;

	mtx_lock(&sema->sema_mtx);

	/*
	 * A spurious wakeup will cause the timeout interval to start over.
	 * This isn't a big deal as long as spurious wakeups don't occur
	 * continuously, since the timeout period is merely a lower bound on how
	 * long to wait.
	 */
	for (timed_out = 0; sema->sema_value == 0 && timed_out == 0;) {
		sema->sema_waiters++;
		timed_out = cv_timedwait(&sema->sema_cv, &sema->sema_mtx, timo);
		sema->sema_waiters--;
	}
	if (sema->sema_value > 0) {
		/* Success. */
		sema->sema_value--;
		ret = 1;

		CTR6(KTR_LOCK, "%s(%p) \"%s\" v = %d at %s:%d", __func__, sema,
		    cv_wmesg(&sema->sema_cv), sema->sema_value, file, line);
	} else {
		ret = 0;
		
		CTR5(KTR_LOCK, "%s(%p) \"%s\" fail at %s:%d", __func__, sema,
		    cv_wmesg(&sema->sema_cv), file, line);
	}

	mtx_unlock(&sema->sema_mtx);
	return (ret);
}
Exemplo n.º 7
0
/*
 * Thread to setup switching mode. This thread is created during vsw_attach()
 * initially. It invokes vsw_setup_switching() and keeps retrying while the
 * returned value is EAGAIN. The thread exits when the switching mode setup is
 * done successfully or when the error returned is not EAGAIN. This thread may
 * also get created from vsw_update_md_prop() if the switching mode needs to be
 * updated.
 */
void
vsw_setup_switching_thread(void *arg)
{
	callb_cpr_t	cprinfo;
	vsw_t		*vswp =  (vsw_t *)arg;
	clock_t		wait_time;
	clock_t		xwait;
	clock_t		wait_rv;
	int		rv;

	/* wait time used on successive retries */
	xwait = drv_usectohz(vsw_setup_switching_delay * MICROSEC);

	CALLB_CPR_INIT(&cprinfo, &vswp->sw_thr_lock, callb_generic_cpr,
	    "vsw_setup_sw_thread");

	mutex_enter(&vswp->sw_thr_lock);

	while ((vswp->sw_thr_flags & VSW_SWTHR_STOP) == 0) {

		CALLB_CPR_SAFE_BEGIN(&cprinfo);

		/* Wait for sometime before (re)trying setup_switching() */
		wait_time = ddi_get_lbolt() + xwait;
		while ((vswp->sw_thr_flags & VSW_SWTHR_STOP) == 0) {
			wait_rv = cv_timedwait(&vswp->sw_thr_cv,
			    &vswp->sw_thr_lock, wait_time);
			if (wait_rv == -1) {	/* timed out */
				break;
			}
		}

		CALLB_CPR_SAFE_END(&cprinfo, &vswp->sw_thr_lock)

		if ((vswp->sw_thr_flags & VSW_SWTHR_STOP) != 0) {
			/*
			 * If there is a stop request, process that first and
			 * exit the loop. Continue to hold the mutex which gets
			 * released in CALLB_CPR_EXIT().
			 */
			break;
		}

		mutex_exit(&vswp->sw_thr_lock);
		rv = vsw_setup_switching(vswp);
		if (rv == 0) {
			vsw_setup_switching_post_process(vswp);
		}
		mutex_enter(&vswp->sw_thr_lock);
		if (rv != EAGAIN) {
			break;
		}

	}

	vswp->sw_thr_flags &= ~VSW_SWTHR_STOP;
	vswp->sw_thread = NULL;
	CALLB_CPR_EXIT(&cprinfo);
	thread_exit();
}
Exemplo n.º 8
0
/* afs_osi_TimedSleep
 * 
 * Arguments:
 * event - event to sleep on
 * ams --- max sleep time in milliseconds
 * aintok - 1 if should sleep interruptibly
 *
 * Returns 0 if timeout and EINTR if signalled.
 */
int
afs_osi_TimedSleep(void *event, afs_int32 ams, int aintok)
{
    int code = 0;
    struct afs_event *evp;
    clock_t ticks;

    ticks = (ams * afs_hz) / 1000;
#if defined(AFS_SUN510_ENV)
    ticks = ticks + ddi_get_lbolt();
#else
    ticks = ticks + lbolt;
#endif

    evp = afs_getevent(event);

    AFS_ASSERT_GLOCK();
    if (aintok) {
	if (cv_timedwait_sig(&evp->cond, &afs_global_lock, ticks) == 0)
	    code = EINTR;
    } else {
	cv_timedwait(&evp->cond, &afs_global_lock, ticks);
    }

    relevent(evp);
    return code;
}
static void
udf_discstrat_thread(void *arg)
{
	struct udf_mount *ump = (struct udf_mount *) arg;
	struct strat_private *priv = PRIV(ump);
	int empty;

	empty = 1;
	mutex_enter(&priv->discstrat_mutex);
	while (priv->run_thread || !empty) {
		/* process the current selected queue */
		udf_doshedule(ump);
		empty  = (bufq_peek(priv->queues[UDF_SHED_READING]) == NULL);
		empty &= (bufq_peek(priv->queues[UDF_SHED_WRITING]) == NULL);
		empty &= (bufq_peek(priv->queues[UDF_SHED_SEQWRITING]) == NULL);

		/* wait for more if needed */
		if (empty)
			cv_timedwait(&priv->discstrat_cv,
				&priv->discstrat_mutex, hz/8);
	}
	mutex_exit(&priv->discstrat_mutex);

	wakeup(&priv->run_thread);
	kthread_exit(0);
	/* not reached */
}
Exemplo n.º 10
0
/*
 * ehci_wait_for_transfers_completion:
 *
 * Wait for processing all completed transfers and to send results
 * to upstream.
 */
static void
ehci_wait_for_isoc_completion(
	ehci_state_t		*ehcip,
	ehci_pipe_private_t	*pp)
{
	clock_t			xfer_cmpl_time_wait;

	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));

	if (pp->pp_itw_head == NULL) {

		return;
	}

	/* Get the number of clock ticks to wait */
	xfer_cmpl_time_wait = drv_usectohz(EHCI_XFER_CMPL_TIMEWAIT * 1000000);

	(void) cv_timedwait(&pp->pp_xfer_cmpl_cv,
	    &ehcip->ehci_int_mutex,
	    ddi_get_lbolt() + xfer_cmpl_time_wait);

	if (pp->pp_itw_head) {
		USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
		    "ehci_wait_for_isoc_completion: "
		    "No transfers completion confirmation received");
	}
}
Exemplo n.º 11
0
int
splat_condvar_test34_thread(void *arg)
{
	condvar_thr_t *ct = (condvar_thr_t *)arg;
	condvar_priv_t *cv = ct->ct_cvp;
	clock_t rc;

	ASSERT(cv->cv_magic == SPLAT_CONDVAR_TEST_MAGIC);

	mutex_enter(&cv->cv_mtx);
	splat_vprint(cv->cv_file, ct->ct_name,
	    "%s thread sleeping with %d waiters\n",
	    ct->ct_thread->comm, atomic_read(&cv->cv_condvar.cv_waiters));

	/* Sleep no longer than 3 seconds, for this test we should
	 * actually never sleep that long without being woken up. */
	rc = cv_timedwait(&cv->cv_condvar, &cv->cv_mtx, lbolt + HZ * 3);
	if (rc == -1) {
		ct->ct_rc = -ETIMEDOUT;
		splat_vprint(cv->cv_file, ct->ct_name, "%s thread timed out, "
		    "should have been woken\n", ct->ct_thread->comm);
	} else {
		splat_vprint(cv->cv_file, ct->ct_name,
		    "%s thread woken %d waiters remain\n",
		    ct->ct_thread->comm,
		    atomic_read(&cv->cv_condvar.cv_waiters));
	}

	mutex_exit(&cv->cv_mtx);

	/* wait for main thread reap us */
	while (!kthread_should_stop())
		schedule();
	return 0;
}
Exemplo n.º 12
0
static void
trim_thread(void *arg)
{
	spa_t *spa = arg;
	zio_t *zio;

#ifdef _KERNEL
	(void) snprintf(curthread->td_name, sizeof(curthread->td_name),
	    "trim %s", spa_name(spa));
#endif

	for (;;) {
		mutex_enter(&spa->spa_trim_lock);
		if (spa->spa_trim_thread == NULL) {
			spa->spa_trim_thread = curthread;
			cv_signal(&spa->spa_trim_cv);
			mutex_exit(&spa->spa_trim_lock);
			thread_exit();
		}

		(void) cv_timedwait(&spa->spa_trim_cv, &spa->spa_trim_lock,
		    hz * trim_max_interval);
		mutex_exit(&spa->spa_trim_lock);

		zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);

		spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
		trim_map_commit(spa, zio, spa->spa_root_vdev);
		(void) zio_wait(zio);
		trim_map_commit_done(spa, spa->spa_root_vdev);
		spa_config_exit(spa, SCL_STATE, FTAG);
	}
}
Exemplo n.º 13
0
static void
npf_worker(void *arg)
{
	for (;;) {
		const bool finish = (worker_lwp == NULL);
		u_int i = NPF_MAX_WORKS;
		npf_workfunc_t work;

		/* Run the jobs. */
		while (i--) {
			if ((work = work_funcs[i]) != NULL) {
				work();
			}
		}

		/* Exit if requested and all jobs are done. */
		if (finish) {
			break;
		}

		/* Sleep and periodically wake up, unless we get notified. */
		mutex_enter(&worker_lock);
		worker_loop++;
		cv_broadcast(&worker_event_cv);
		cv_timedwait(&worker_cv, &worker_lock, WORKER_INTERVAL);
		mutex_exit(&worker_lock);
	}
	kthread_exit(0);
}
Exemplo n.º 14
0
/* Performance monitor thread */
static void
spa_perfmon_thread(spa_t *spa)
{
	spa_perfmon_data_t *data = &spa->spa_perfmon;
	boolean_t done = B_FALSE;

	ASSERT(data);

	DTRACE_PROBE1(spa_pm_start, spa_t *, spa);

	/* take a reference against spa */
	mutex_enter(&spa_namespace_lock);
	spa_open_ref(spa, FTAG);
	mutex_exit(&spa_namespace_lock);

	/* CONSTCOND */
	while (1) {
		clock_t deadline = ddi_get_lbolt() +
		    spa_special_stat_update_ticks;

		/* wait for the next tick, check exit condition */
		mutex_enter(&data->perfmon_lock);
		(void) cv_timedwait(&data->perfmon_cv, &data->perfmon_lock,
		    deadline);
		if (spa->spa_state == POOL_STATE_UNINITIALIZED ||
		    data->perfmon_thr_exit)
			done = B_TRUE;
		mutex_exit(&data->perfmon_lock);

		if (done)
			goto out;

		/*
		 * do the monitoring work here: gather average
		 * latency and utilization statistics
		 */
		DTRACE_PROBE1(spa_pm_work, spa_t *, spa);
		spa_load_stats_update(spa);

		/* we can adjust load and dedup at the same time */
		if (spa_enable_data_placement_selection)
			spa_special_load_adjust(spa);
		if (spa->spa_dedup_best_effort)
			spa_special_dedup_adjust(spa);

		/* go to sleep until next tick */
		DTRACE_PROBE1(spa_pm_sleep, spa_t *, spa);
	}

out:
	/* release the reference against spa */
	mutex_enter(&spa_namespace_lock);
	spa_close(spa, FTAG);
	mutex_exit(&spa_namespace_lock);

	DTRACE_PROBE1(spa_pm_stop, spa_t *, spa);
	thread_exit();
}
Exemplo n.º 15
0
void
usb_detach_wait(device_t dv, kcondvar_t *cv, kmutex_t *lock)
{
	DPRINTF(("usb_detach_wait: waiting for %s\n", device_xname(dv)));
	if (cv_timedwait(cv, lock, hz * 60))	// dv, PZERO, "usbdet", hz * 60
		printf("usb_detach_wait: %s didn't detach\n",
		        device_xname(dv));
	DPRINTF(("usb_detach_wait: %s done\n", device_xname(dv)));
}
Exemplo n.º 16
0
static void cond(void *data) {
	int res;

	printf("%d: entered\n", (int) data);
	bsd_dde_prepare_thread("cond");

	mtx_lock(&mtx1);
	res = cv_timedwait(&cv1, &mtx1, hz/10);
	mtx_unlock(&mtx1);
	printf("%d: %s\n", (int) data, (res?"timeout":"success"));
}
Exemplo n.º 17
0
void
usb_detach_wait(device_t dv, kcondvar_t *cv, kmutex_t *lock)
{
	USBHIST_FUNC(); USBHIST_CALLED(usbdebug);

	DPRINTFN(1, "waiting for dv %p", dv, 0, 0, 0);
	if (cv_timedwait(cv, lock, hz * 60))	// dv, PZERO, "usbdet", hz * 60
		printf("usb_detach_wait: %s didn't detach\n",
			device_xname(dv));
	DPRINTFN(1, "done", 0, 0, 0, 0);
}
Exemplo n.º 18
0
static void
txg_thread_wait(tx_state_t *tx, callb_cpr_t *cpr, kcondvar_t *cv, uint64_t time)
{
	CALLB_CPR_SAFE_BEGIN(cpr);

	if (time)
		(void) cv_timedwait(cv, &tx->tx_sync_lock, time);
	else
		cv_wait(cv, &tx->tx_sync_lock);

	CALLB_CPR_SAFE_END(cpr, &tx->tx_sync_lock);
}
Exemplo n.º 19
0
static int
splat_condvar_test5(struct file *file, void *arg)
{
        kcondvar_t condvar;
        kmutex_t mtx;
	clock_t time_left, time_before, time_after, time_delta;
	uint64_t whole_delta;
	uint32_t remain_delta;
	int rc = 0;

	mutex_init(&mtx, SPLAT_CONDVAR_TEST_NAME, MUTEX_DEFAULT, NULL);
	cv_init(&condvar, NULL, CV_DEFAULT, NULL);

        splat_vprint(file, SPLAT_CONDVAR_TEST5_NAME, "Thread going to sleep for "
	           "%d second and expecting to be woken by timeout\n", 1);

	/* Allow a 1 second timeout, plenty long to validate correctness. */
	time_before = lbolt;
	mutex_enter(&mtx);
	time_left = cv_timedwait(&condvar, &mtx, lbolt + HZ);
	mutex_exit(&mtx);
	time_after = lbolt;
	time_delta = time_after - time_before; /* XXX - Handle jiffie wrap */
	whole_delta  = time_delta;
	remain_delta = do_div(whole_delta, HZ);

	if (time_left == -1) {
		if (time_delta >= HZ) {
			splat_vprint(file, SPLAT_CONDVAR_TEST5_NAME,
			           "Thread correctly timed out and was asleep "
			           "for %d.%d seconds (%d second min)\n",
			           (int)whole_delta, (int)remain_delta, 1);
		} else {
			splat_vprint(file, SPLAT_CONDVAR_TEST5_NAME,
			           "Thread correctly timed out but was only "
			           "asleep for %d.%d seconds (%d second "
			           "min)\n", (int)whole_delta,
				   (int)remain_delta, 1);
			rc = -ETIMEDOUT;
		}
	} else {
		splat_vprint(file, SPLAT_CONDVAR_TEST5_NAME,
		           "Thread exited after only %d.%d seconds, it "
		           "did not hit the %d second timeout\n",
		           (int)whole_delta, (int)remain_delta, 1);
		rc = -ETIMEDOUT;
	}

	cv_destroy(&condvar);
	mutex_destroy(&mtx);

	return rc;
}
Exemplo n.º 20
0
static int
awin_p2wi_wait(struct awin_p2wi_softc *sc, int flags)
{
	int error = 0, retry;

	/* Wait up to 5 seconds for a transfer to complete */
	sc->sc_stat = 0;
	for (retry = (flags & I2C_F_POLL) ? 100 : 5; retry > 0; retry--) {
		if (flags & I2C_F_POLL) {
			sc->sc_stat |= P2WI_READ(sc, AWIN_A31_P2WI_STAT_REG);
		} else {
			error = cv_timedwait(&sc->sc_cv, &sc->sc_lock, hz);
			if (error && error != EWOULDBLOCK) {
				break;
			}
		}
		if (sc->sc_stat & AWIN_A31_P2WI_STAT_MASK) {
			break;
		}
		if (flags & I2C_F_POLL) {
			delay(10000);
		}
	}
	if (retry == 0)
		error = EAGAIN;

	if (flags & I2C_F_POLL) {
		P2WI_WRITE(sc, AWIN_A31_P2WI_STAT_REG,
		    sc->sc_stat & AWIN_A31_P2WI_STAT_MASK);
	}

	if (error) {
		/* Abort transaction */
		device_printf(sc->sc_dev, "transfer timeout, error = %d\n",
		    error);
		P2WI_WRITE(sc, AWIN_A31_P2WI_CTRL_REG,
		    AWIN_A31_P2WI_CTRL_ABORT_TRANS);
		return error;
	}

	if (sc->sc_stat & AWIN_A31_P2WI_STAT_LOAD_BSY) {
		device_printf(sc->sc_dev, "transfer busy\n");
		return EBUSY;
	}
	if (sc->sc_stat & AWIN_A31_P2WI_STAT_TRANS_ERR) {
		device_printf(sc->sc_dev, "transfer error, id 0x%02llx\n",
		    __SHIFTOUT(sc->sc_stat, AWIN_A31_P2WI_STAT_TRANS_ERR_ID));
		return EIO;
	}

	return 0;
}
Exemplo n.º 21
0
// Unlike cv_timedwait(), this timeout is relative to now.
clock_t cv_timedwait_hires(
        kcondvar_t * cv, kmutex_t * m, hrtime_t timeout, hrtime_t resolution, int flag)
{
    VERIFY0(flag); // We don't support flags.
    clock_t expiration = ddi_get_lbolt() + timeout;;

    if (resolution > 1) {
        expiration = (expiration / resolution) * resolution;
    }

    // clock_t is ticks (usec) and hrtime_t is nsec.
    return cv_timedwait(cv, m, USEC_TO_NSEC(expiration));
}
Exemplo n.º 22
0
/*
 * Wait for minor nodes to be created before returning from attach,
 * with a 5 sec. timeout to avoid hangs should an error occur.
 */
void
pcata_minor_wait(ata_soft_t *softp)
{
	clock_t	timeout;

	timeout = ddi_get_lbolt() + drv_usectohz(5000000);
	mutex_enter(&softp->event_hilock);
	while ((softp->flags & PCATA_MAKEDEVICENODE) == 0) {
		if (cv_timedwait(&softp->readywait_cv, &softp->event_hilock,
		    timeout) == (clock_t)-1)
			break;
	}
	mutex_exit(&softp->event_hilock);
}
Exemplo n.º 23
0
ACPI_STATUS
acpi_sema_p(acpi_sema_t *sp, unsigned count, uint16_t wait_time)
{
	ACPI_STATUS rv = AE_OK;
	clock_t deadline;

	mutex_enter(&sp->mutex);

	if (sp->available >= count) {
		/*
		 * Enough units available, no blocking
		 */
		sp->available -= count;
		mutex_exit(&sp->mutex);
		return (rv);
	} else if (wait_time == 0) {
		/*
		 * Not enough units available and timeout
		 * specifies no blocking
		 */
		rv = AE_TIME;
		mutex_exit(&sp->mutex);
		return (rv);
	}

	/*
	 * Not enough units available and timeout specifies waiting
	 */
	if (wait_time != ACPI_WAIT_FOREVER)
		deadline = ddi_get_lbolt() +
		    (clock_t)drv_usectohz(wait_time * 1000);

	do {
		if (wait_time == ACPI_WAIT_FOREVER)
			cv_wait(&sp->cv, &sp->mutex);
		else if (cv_timedwait(&sp->cv, &sp->mutex, deadline) < 0) {
			rv = AE_TIME;
			break;
		}
	} while (sp->available < count);

	/* if we dropped out of the wait with AE_OK, we got the units */
	if (rv == AE_OK)
		sp->available -= count;

	mutex_exit(&sp->mutex);
	return (rv);
}
Exemplo n.º 24
0
/*
 * Enqueue an internal driver request and wait until it is completed.
 */
static int
ipmi_submit_driver_request(struct ipmi_softc *sc, struct ipmi_request **preq,
    int timo)
{
	int error;
	struct ipmi_request *req = *preq;

	ASSERT(req->ir_owner == NULL);

	IPMI_LOCK(sc);
	error = sc->ipmi_enqueue_request(sc, req);

	if (error != 0) {
		IPMI_UNLOCK(sc);
		return (error);
	}

	while (req->ir_status != IRS_COMPLETED && error >= 0)
		if (timo == 0)
			cv_wait(&req->ir_cv, &sc->ipmi_lock);
		else
			error = cv_timedwait(&req->ir_cv, &sc->ipmi_lock,
			    ddi_get_lbolt() + timo);

	switch (req->ir_status) {
		case IRS_QUEUED:
			TAILQ_REMOVE(&sc->ipmi_pending_requests, req, ir_link);
			req->ir_status = IRS_CANCELED;
			error = EWOULDBLOCK;
			break;
		case IRS_PROCESSED:
			req->ir_status = IRS_CANCELED;
			error = EWOULDBLOCK;
			*preq = NULL;
			break;
		case IRS_COMPLETED:
			error = req->ir_error;
			break;
		default:
			panic("IPMI: Invalid request status");
			break;
	}
	IPMI_UNLOCK(sc);

	return (error);
}
Exemplo n.º 25
0
Arquivo: taskq.c Projeto: AB17/zfs
static taskq_ent_t *
task_alloc(taskq_t *tq, int tqflags)
{
	taskq_ent_t *t;
	int rv;

again:	if ((t = tq->tq_freelist) != NULL && tq->tq_nalloc >= tq->tq_minalloc) {
		ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC));
		tq->tq_freelist = t->tqent_next;
	} else {
		if (tq->tq_nalloc >= tq->tq_maxalloc) {
			if (!(tqflags & KM_SLEEP))
				return (NULL);

			/*
			 * We don't want to exceed tq_maxalloc, but we can't
			 * wait for other tasks to complete (and thus free up
			 * task structures) without risking deadlock with
			 * the caller.  So, we just delay for one second
			 * to throttle the allocation rate. If we have tasks
			 * complete before one second timeout expires then
			 * taskq_ent_free will signal us and we will
			 * immediately retry the allocation.
			 */
			tq->tq_maxalloc_wait++;
			rv = cv_timedwait(&tq->tq_maxalloc_cv,
			    &tq->tq_lock, ddi_get_lbolt() + hz);
			tq->tq_maxalloc_wait--;
			if (rv > 0)
				goto again;		/* signaled */
		}
		mutex_exit(&tq->tq_lock);

		t = kmem_alloc(sizeof (taskq_ent_t), tqflags);

		mutex_enter(&tq->tq_lock);
		if (t != NULL) {
			/* Make sure we start without any flags */
			t->tqent_flags = 0;
			tq->tq_nalloc++;
		}
	}
	return (t);
}
/*
 * Wait for data to arrive at/drain from a socket buffer.
 */
int
sbwait(struct sockbuf *sb)
{
	struct socket *so;
	kmutex_t *lock;
	int error;

	so = sb->sb_so;

	KASSERT(solocked(so));

	sb->sb_flags |= SB_NOTIFY;
	lock = so->so_lock;
	if ((sb->sb_flags & SB_NOINTR) != 0)
		error = cv_timedwait(&sb->sb_cv, lock, sb->sb_timeo);
	else
		error = cv_timedwait_sig(&sb->sb_cv, lock, sb->sb_timeo);
	if (__predict_false(lock != so->so_lock))
		solockretry(so, lock);
	return error;
}
Exemplo n.º 27
0
/*
 * Close the RX side of a VCC.
 */
void
hatm_rx_vcc_close(struct hatm_softc *sc, u_int cid)
{
	struct hevcc *vcc = sc->vccs[cid];
	uint32_t v;

	vcc->vflags |= HE_VCC_RX_CLOSING;
	WRITE_RSR(sc, cid, 0, 0xf, 0);

	v = READ4(sc, HE_REGO_RCCSTAT);
	while ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) &&
	       (READ4(sc, HE_REGO_RCCSTAT) & HE_REGM_RCCSTAT_PROG))
		cv_timedwait(&sc->cv_rcclose, &sc->mtx, 1);

	if (!(sc->ifp->if_drv_flags & IFF_DRV_RUNNING))
		return;

	WRITE_MBOX4(sc, HE_REGO_RCON_CLOSE, cid);

	vcc->vflags |= HE_VCC_RX_CLOSING;
	vcc->vflags &= ~HE_VCC_RX_OPEN;
}
Exemplo n.º 28
0
static void
if_netmap_send(void *arg)
{
	struct mbuf *m;
	struct if_netmap_softc *sc = (struct if_netmap_softc *)arg;
	struct ifnet *ifp = sc->ifp;
	struct uhi_pollfd pfd;
	uint32_t avail;
	uint32_t cur;
	u_int pktlen;
	int rv;
	int done;
	int pkts_sent;

	if (sc->cfg->cpu >= 0)
		sched_bind(sc->tx_thread.thr, sc->cfg->cpu);

	rv = if_netmap_txsync(sc->nm_host_ctx, NULL, NULL);
	if (rv == -1) {
		printf("could not sync tx descriptors before transmit\n");
	}

	avail = if_netmap_txavail(sc->nm_host_ctx);

	sc->tx_thread.last_stop_check = ticks;
	done = 0;
	pkts_sent = 0;
	do {
		mtx_lock(&sc->tx_lock);
		sc->tx_pkts_to_send -= pkts_sent;
		while ((sc->tx_pkts_to_send == 0) && !done)
			if (EWOULDBLOCK == cv_timedwait(&sc->tx_cv, &sc->tx_lock, sc->stop_check_ticks))
				done = if_netmap_stoppable_thread_check(&sc->tx_thread);
		mtx_unlock(&sc->tx_lock);
	
		if (done)
			break;

		pkts_sent = 0;

		IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
		while (m) {
			while (0 == avail && !done) {
				memset(&pfd, 0, sizeof(pfd));

				pfd.fd = sc->fd;
				pfd.events = UHI_POLLOUT;
				
				rv = uhi_poll(&pfd, 1, IF_NETMAP_THREAD_STOP_CHECK_MS);
				if (rv == 0)
					done = if_netmap_stoppable_thread_check(&sc->tx_thread);	
				else if (rv == -1)
					printf("error from poll for transmit\n");
					
				avail = if_netmap_txavail(sc->nm_host_ctx);
			}

			if (ticks - sc->tx_thread.last_stop_check >= sc->stop_check_ticks)
				done = if_netmap_stoppable_thread_check(&sc->tx_thread);

			if (done)
				break;

			cur = if_netmap_txcur(sc->nm_host_ctx);

			while (m && avail) {
				ifp->if_ocopies++;
				ifp->if_opackets++;

				avail--;
				pkts_sent++;

				pktlen = m_length(m, NULL);

				m_copydata(m, 0, pktlen,
					   if_netmap_txslot(sc->nm_host_ctx, &cur, pktlen)); 
				m_freem(m);

				IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
			}

			rv = if_netmap_txsync(sc->nm_host_ctx, &avail, &cur);
			if (rv == -1) {
				printf("could not sync tx descriptors after transmit\n");
			}
			avail = if_netmap_txavail(sc->nm_host_ctx);
		}

	} while (!done);

	if_netmap_stoppable_thread_done(&sc->tx_thread);
}
Exemplo n.º 29
0
static void
nvpflush_daemon(void)
{
	callb_cpr_t cprinfo;
	clock_t clk;
	int rval;
	int i;

	ASSERT(modrootloaded);

	nvpflush_thread = curthread;
	NVPDAEMON_DEBUG((CE_CONT, "nvpdaemon: init\n"));

	CALLB_CPR_INIT(&cprinfo, &nvpflush_lock, callb_generic_cpr, "nvp");
	mutex_enter(&nvpflush_lock);
	for (;;) {

		CALLB_CPR_SAFE_BEGIN(&cprinfo);
		while (do_nvpflush == 0) {
			clk = cv_timedwait(&nvpflush_cv, &nvpflush_lock,
			    ddi_get_lbolt() +
				(nvpdaemon_idle_time * TICKS_PER_SECOND));
			if (clk == -1 &&
			    do_nvpflush == 0 && nvpflush_timer_busy == 0) {
				/*
				 * Note that CALLB_CPR_EXIT calls mutex_exit()
				 * on the lock passed in to CALLB_CPR_INIT,
				 * so the lock must be held when invoking it.
				 */
				CALLB_CPR_SAFE_END(&cprinfo, &nvpflush_lock);
				NVPDAEMON_DEBUG((CE_CONT, "nvpdaemon: exit\n"));
				ASSERT(mutex_owned(&nvpflush_lock));
				nvpflush_thr_id = NULL;
				nvpflush_daemon_active = 0;
				CALLB_CPR_EXIT(&cprinfo);
				thread_exit();
			}
		}
		CALLB_CPR_SAFE_END(&cprinfo, &nvpflush_lock);

		nvpbusy = 1;
		do_nvpflush = 0;
		mutex_exit(&nvpflush_lock);

		/*
		 * Try flushing what's dirty, reschedule if there's
		 * a failure or data gets marked as dirty again.
		 */
		for (i = 0; i < NCACHEFDS; i++) {
			rw_enter(&cachefds[i]->nvf_lock, RW_READER);
			if (NVF_IS_DIRTY(cachefds[i])) {
				NVPDAEMON_DEBUG((CE_CONT,
				    "nvpdaemon: flush %s\n",
				    cachefds[i]->nvf_name));
				rw_exit(&cachefds[i]->nvf_lock);
				rval = nvpflush_one(cachefds[i]);
				rw_enter(&cachefds[i]->nvf_lock, RW_READER);
				if (rval != DDI_SUCCESS ||
				    NVF_IS_DIRTY(cachefds[i])) {
					rw_exit(&cachefds[i]->nvf_lock);
					NVPDAEMON_DEBUG((CE_CONT,
					    "nvpdaemon: %s dirty again\n",
					    cachefds[i]->nvf_name));
					wake_nvpflush_daemon();
				} else {
					rw_exit(&cachefds[i]->nvf_lock);
					nvf_write_complete(cachefds[i]);
				}
			} else {
				NVPDAEMON_DEBUG((CE_CONT,
				    "nvpdaemon: not dirty %s\n",
				    cachefds[i]->nvf_name));
				rw_exit(&cachefds[i]->nvf_lock);
			}
		}

		mutex_enter(&nvpflush_lock);
		nvpbusy = 0;
	}
}
Exemplo n.º 30
0
static void
scan_task(void *arg, int pending)
{
#define	ISCAN_REP	(ISCAN_MINDWELL | ISCAN_DISCARD)
	struct ieee80211_scan_state *ss = (struct ieee80211_scan_state *) arg;
	struct ieee80211vap *vap = ss->ss_vap;
	struct ieee80211com *ic = ss->ss_ic;
	struct ieee80211_channel *chan;
	unsigned long maxdwell, scanend;
	int scandone = 0;

	IEEE80211_LOCK(ic);
	if (vap == NULL || (ic->ic_flags & IEEE80211_F_SCAN) == 0 ||
	    (SCAN_PRIVATE(ss)->ss_iflags & ISCAN_ABORT)) {
		/* Cancelled before we started */
		goto done;
	}

	if (ss->ss_next == ss->ss_last) {
		IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
			"%s: no channels to scan\n", __func__);
		goto done;
	}

	if (vap->iv_opmode == IEEE80211_M_STA &&
	    vap->iv_state == IEEE80211_S_RUN) {
		if ((vap->iv_bss->ni_flags & IEEE80211_NODE_PWR_MGT) == 0) {
			/* Enable station power save mode */
			ieee80211_sta_pwrsave(vap, 1);
			/*
			 * Use an 1ms delay so the null data frame has a chance
			 * to go out.
			 * XXX Should use M_TXCB mechanism to eliminate this.
			 */
			cv_timedwait(&SCAN_PRIVATE(ss)->ss_scan_cv,
			    IEEE80211_LOCK_OBJ(ic), hz / 1000);
			if (SCAN_PRIVATE(ss)->ss_iflags & ISCAN_ABORT)
				goto done;
		}
	}

	scanend = ticks + SCAN_PRIVATE(ss)->ss_duration;
	IEEE80211_UNLOCK(ic);
	ic->ic_scan_start(ic);		/* notify driver */
	IEEE80211_LOCK(ic);

	for (;;) {
		scandone = (ss->ss_next >= ss->ss_last) ||
		    (SCAN_PRIVATE(ss)->ss_iflags & ISCAN_CANCEL) != 0;
		if (scandone || (ss->ss_flags & IEEE80211_SCAN_GOTPICK) ||
		    (SCAN_PRIVATE(ss)->ss_iflags & ISCAN_ABORT) ||
		     time_after(ticks + ss->ss_mindwell, scanend))
			break;

		chan = ss->ss_chans[ss->ss_next++];

		/*
		 * Watch for truncation due to the scan end time.
		 */
		if (time_after(ticks + ss->ss_maxdwell, scanend))
			maxdwell = scanend - ticks;
		else
			maxdwell = ss->ss_maxdwell;

		IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
		    "%s: chan %3d%c -> %3d%c [%s, dwell min %lums max %lums]\n",
		    __func__,
		    ieee80211_chan2ieee(ic, ic->ic_curchan),
		        channel_type(ic->ic_curchan),
		    ieee80211_chan2ieee(ic, chan), channel_type(chan),
		    (ss->ss_flags & IEEE80211_SCAN_ACTIVE) &&
			(chan->ic_flags & IEEE80211_CHAN_PASSIVE) == 0 ?
			"active" : "passive",
		    ticks_to_msecs(ss->ss_mindwell), ticks_to_msecs(maxdwell));

		/*
		 * Potentially change channel and phy mode.
		 */
		ic->ic_curchan = chan;
		ic->ic_rt = ieee80211_get_ratetable(chan);
		IEEE80211_UNLOCK(ic);
		/*
		 * Perform the channel change and scan unlocked so the driver
		 * may sleep. Once set_channel returns the hardware has
		 * completed the channel change.
		 */
		ic->ic_set_channel(ic);
		ieee80211_radiotap_chan_change(ic);

		/*
		 * Scan curchan.  Drivers for "intelligent hardware"
		 * override ic_scan_curchan to tell the device to do
		 * the work.  Otherwise we manage the work outselves;
		 * sending a probe request (as needed), and arming the
		 * timeout to switch channels after maxdwell ticks.
		 *
		 * scan_curchan should only pause for the time required to
		 * prepare/initiate the hardware for the scan (if at all), the
		 * below condvar is used to sleep for the channels dwell time
		 * and allows it to be signalled for abort.
		 */
		ic->ic_scan_curchan(ss, maxdwell);
		IEEE80211_LOCK(ic);

		SCAN_PRIVATE(ss)->ss_chanmindwell = ticks + ss->ss_mindwell;
		/* clear mindwell lock and initial channel change flush */
		SCAN_PRIVATE(ss)->ss_iflags &= ~ISCAN_REP;

		if ((SCAN_PRIVATE(ss)->ss_iflags & (ISCAN_CANCEL|ISCAN_ABORT)))
			continue;

		/* Wait to be signalled to scan the next channel */
		cv_wait(&SCAN_PRIVATE(ss)->ss_scan_cv, IEEE80211_LOCK_OBJ(ic));
	}
	if (SCAN_PRIVATE(ss)->ss_iflags & ISCAN_ABORT)
		goto done;

	IEEE80211_UNLOCK(ic);
	ic->ic_scan_end(ic);		/* notify driver */
	IEEE80211_LOCK(ic);

	/*
	 * Record scan complete time.  Note that we also do
	 * this when canceled so any background scan will
	 * not be restarted for a while.
	 */
	if (scandone)
		ic->ic_lastscan = ticks;
	/* return to the bss channel */
	if (ic->ic_bsschan != IEEE80211_CHAN_ANYC &&
	    ic->ic_curchan != ic->ic_bsschan) {
		ieee80211_setupcurchan(ic, ic->ic_bsschan);
		IEEE80211_UNLOCK(ic);
		ic->ic_set_channel(ic);
		ieee80211_radiotap_chan_change(ic);
		IEEE80211_LOCK(ic);
	}
	/* clear internal flags and any indication of a pick */
	SCAN_PRIVATE(ss)->ss_iflags &= ~ISCAN_REP;
	ss->ss_flags &= ~IEEE80211_SCAN_GOTPICK;

	/*
	 * If not canceled and scan completed, do post-processing.
	 * If the callback function returns 0, then it wants to
	 * continue/restart scanning.  Unfortunately we needed to
	 * notify the driver to end the scan above to avoid having
	 * rx frames alter the scan candidate list.
	 */
	if ((SCAN_PRIVATE(ss)->ss_iflags & ISCAN_CANCEL) == 0 &&
	    !ss->ss_ops->scan_end(ss, vap) &&
	    (ss->ss_flags & IEEE80211_SCAN_ONCE) == 0 &&
	    time_before(ticks + ss->ss_mindwell, scanend)) {
		IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
		    "%s: done, restart "
		    "[ticks %u, dwell min %lu scanend %lu]\n",
		    __func__,
		    ticks, ss->ss_mindwell, scanend);
		ss->ss_next = 0;	/* reset to begining */
		if (ss->ss_flags & IEEE80211_SCAN_ACTIVE)
			vap->iv_stats.is_scan_active++;
		else
			vap->iv_stats.is_scan_passive++;

		ss->ss_ops->scan_restart(ss, vap);	/* XXX? */
		ieee80211_runtask(ic, &SCAN_PRIVATE(ss)->ss_scan_task);
		IEEE80211_UNLOCK(ic);
		return;
	}

	/* past here, scandone is ``true'' if not in bg mode */
	if ((ss->ss_flags & IEEE80211_SCAN_BGSCAN) == 0)
		scandone = 1;

	IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
	    "%s: %s, [ticks %u, dwell min %lu scanend %lu]\n",
	    __func__, scandone ? "done" : "stopped",
	    ticks, ss->ss_mindwell, scanend);

	/*
	 * Clear the SCAN bit first in case frames are
	 * pending on the station power save queue.  If
	 * we defer this then the dispatch of the frames
	 * may generate a request to cancel scanning.
	 */
done:
	ic->ic_flags &= ~IEEE80211_F_SCAN;
	/*
	 * Drop out of power save mode when a scan has
	 * completed.  If this scan was prematurely terminated
	 * because it is a background scan then don't notify
	 * the ap; we'll either return to scanning after we
	 * receive the beacon frame or we'll drop out of power
	 * save mode because the beacon indicates we have frames
	 * waiting for us.
	 */
	if (scandone) {
		ieee80211_sta_pwrsave(vap, 0);
		if (ss->ss_next >= ss->ss_last) {
			ieee80211_notify_scan_done(vap);
			ic->ic_flags_ext &= ~IEEE80211_FEXT_BGSCAN;
		}
	}
	SCAN_PRIVATE(ss)->ss_iflags &= ~(ISCAN_CANCEL|ISCAN_ABORT);
	ss->ss_flags &= ~(IEEE80211_SCAN_ONCE | IEEE80211_SCAN_PICK1ST);
	IEEE80211_UNLOCK(ic);
#undef ISCAN_REP
}