示例#1
0
文件: condvar.c 项目: BjoKaSH/mac-zfs
/*
 * Like cv_timedwait_sig(), but takes an absolute hires future time
 * rather than a future time in clock ticks.  Will not return showing
 * that a timeout occurred until the future time is passed.
 * If 'when' is a NULL pointer, no timeout will occur.
 * Returns:
 * 	Function result in order of presidence:
 *		 0 if a signal was received
 *		-1 if timeout occured
 *	        >0 if awakened via cv_signal() or cv_broadcast()
 *		   or by a spurious wakeup.
 *		   (might return time remaining)
 * As a special test, if someone abruptly resets the system time
 * (but not through adjtime(2); drifting of the clock is allowed and
 * expected [see timespectohz_adj()]), then we force a return of -1
 * so the caller can return a premature timeout to the calling process
 * so it can reevaluate the situation in light of the new system time.
 * (The system clock has been reset if timecheck != timechanged.)
 */
int
cv_waituntil_sig(kcondvar_t *cvp, kmutex_t *mp,
	timestruc_t *when, int timecheck)
{
	timestruc_t now;
	timestruc_t delta;
	int rval;

	if (when == NULL)
		return (cv_wait_sig_swap(cvp, mp));

	gethrestime(&now);
	delta = *when;
	timespecsub(&delta, &now);
	if (delta.tv_sec < 0 || (delta.tv_sec == 0 && delta.tv_nsec == 0)) {
		/*
		 * We have already reached the absolute future time.
		 * Call cv_timedwait_sig() just to check for signals.
		 * We will return immediately with either 0 or -1.
		 */
		rval = cv_timedwait_sig(cvp, mp, lbolt);
	} else {
		if (timecheck == timechanged) {
			rval = cv_timedwait_sig(cvp, mp,
				lbolt + timespectohz_adj(when, now));
		} else {
			/*
			 * Someone reset the system time;
			 * just force an immediate timeout.
			 */
			rval = -1;
		}
		if (rval == -1 && timecheck == timechanged) {
			/*
			 * Even though cv_timedwait_sig() returned showing a
			 * timeout, the future time may not have passed yet.
			 * If not, change rval to indicate a normal wakeup.
			 */
			gethrestime(&now);
			delta = *when;
			timespecsub(&delta, &now);
			if (delta.tv_sec > 0 || (delta.tv_sec == 0 &&
			    delta.tv_nsec > 0))
				rval = 1;
		}
	}
	return (rval);
}
示例#2
0
/* afs_osi_TimedSleep
 * 
 * Arguments:
 * event - event to sleep on
 * ams --- max sleep time in milliseconds
 * aintok - 1 if should sleep interruptibly
 *
 * Returns 0 if timeout and EINTR if signalled.
 */
int
afs_osi_TimedSleep(void *event, afs_int32 ams, int aintok)
{
    int code = 0;
    struct afs_event *evp;
    clock_t ticks;

    ticks = (ams * afs_hz) / 1000;
#if defined(AFS_SUN510_ENV)
    ticks = ticks + ddi_get_lbolt();
#else
    ticks = ticks + lbolt;
#endif

    evp = afs_getevent(event);

    AFS_ASSERT_GLOCK();
    if (aintok) {
	if (cv_timedwait_sig(&evp->cond, &afs_global_lock, ticks) == 0)
	    code = EINTR;
    } else {
	cv_timedwait(&evp->cond, &afs_global_lock, ticks);
    }

    relevent(evp);
    return code;
}
/* ARGSUSED3 */
int
_rdc_link_down(void *arg, int mode, spcs_s_info_t kstatus, int *rvp)
{
	char host[MAX_RDC_HOST_SIZE];
	rdc_link_down_t *syncdp;
	clock_t timeout = RDC_SYNC_EVENT_TIMEOUT * 2; /* 2 min */
	int rc = 0;

	if (ddi_copyin(arg, host, MAX_RDC_HOST_SIZE, mode))
		return (EFAULT);


	syncdp = rdc_lookup_host(host);

	mutex_enter(&syncdp->syncd_mutex);
	if (!syncdp->link_down) {
		syncdp->waiting = 1;
		if (cv_timedwait_sig(&syncdp->syncd_cv, &syncdp->syncd_mutex,
		    nsc_lbolt() + timeout) == 0) {
			/* Woken by a signal, not a link down event */
			syncdp->waiting = 0;
			rc = EAGAIN;
			spcs_s_add(kstatus, rc);
		}

	}
	mutex_exit(&syncdp->syncd_mutex);

	return (rc);
}
/*
 * Wait for data to arrive at/drain from a socket buffer.
 */
int
sbwait(struct sockbuf *sb)
{
	struct socket *so;
	kmutex_t *lock;
	int error;

	so = sb->sb_so;

	KASSERT(solocked(so));

	sb->sb_flags |= SB_NOTIFY;
	lock = so->so_lock;
	if ((sb->sb_flags & SB_NOINTR) != 0)
		error = cv_timedwait(&sb->sb_cv, lock, sb->sb_timeo);
	else
		error = cv_timedwait_sig(&sb->sb_cv, lock, sb->sb_timeo);
	if (__predict_false(lock != so->so_lock))
		solockretry(so, lock);
	return error;
}
示例#5
0
/*
 * wait until local tx buffer drains.
 * 'timeout' is in seconds, zero means wait forever
 */
static int
uftdi_wait_tx_drain(uftdi_state_t *uf, int timeout)
{
	clock_t	until;
	int over = 0;

	until = ddi_get_lbolt() + drv_usectohz(1000 * 1000 * timeout);

	while (uf->uf_tx_mp && !over) {
		if (timeout > 0) {
			/* whether timedout or signal pending */
			over = cv_timedwait_sig(&uf->uf_tx_cv,
			    &uf->uf_lock, until) <= 0;
		} else {
			/* whether a signal is pending */
			over = cv_wait_sig(&uf->uf_tx_cv,
			    &uf->uf_lock) == 0;
		}
	}

	return (uf->uf_tx_mp == NULL ? USB_SUCCESS : USB_FAILURE);
}
示例#6
0
int
wait_for_completion_interruptible_timeout(struct completion *c, unsigned long timeout)
{
	int res = 0;
	unsigned long start, now;
	start = jiffies;

	mtx_lock(&c->lock);
	while (c->done == 0) {
		res = cv_timedwait_sig(&c->cv, &c->lock, timeout);
		if (res)
			goto out;
		now = jiffies;
		if (timeout < (now - start)) {
			res = EWOULDBLOCK;
			goto out;
		}

		timeout -= (now - start);
		start = now;
	}

	_completion_claim(c);
	res = 0;

out:
	mtx_unlock(&c->lock);

	if (res == EWOULDBLOCK) {
		return 0;
	} else if ((res == EINTR) || (res == ERESTART)) {
		return -ERESTART;
	} else {
		KASSERT((res == 0), ("res = %d", res));
		return timeout;
	}
}
示例#7
0
ACPI_STATUS
AcpiOsWaitSemaphore(ACPI_SEMAPHORE Handle, UINT32 Units, UINT16 Timeout)
{
	struct acpi_sema	*as = (struct acpi_sema *)Handle;
	int			error, prevtick, slptick, tmo;
	ACPI_STATUS		status = AE_OK;

	ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);

	if (as == NULL || Units == 0)
		return_ACPI_STATUS (AE_BAD_PARAMETER);

	mtx_lock(&as->as_lock);

	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
	    "get %u unit(s) from %s, units %u, waiters %d, timeout %u\n",
	    Units, as->as_name, as->as_units, as->as_waiters, Timeout));

	if (as->as_maxunits != ACPI_NO_UNIT_LIMIT && as->as_maxunits < Units) {
		mtx_unlock(&as->as_lock);
		return_ACPI_STATUS (AE_LIMIT);
	}

	switch (Timeout) {
	case ACPI_DO_NOT_WAIT:
		if (!ACPISEM_AVAIL(as, Units))
			status = AE_TIME;
		break;
	case ACPI_WAIT_FOREVER:
		while (!ACPISEM_AVAIL(as, Units)) {
			as->as_waiters++;
			error = cv_wait_sig(&as->as_cv, &as->as_lock);
			as->as_waiters--;
			if (error == EINTR || as->as_reset) {
				status = AE_ERROR;
				break;
			}
		}
		break;
	default:
		tmo = timeout2hz(Timeout);
		while (!ACPISEM_AVAIL(as, Units)) {
			prevtick = ticks;
			as->as_waiters++;
			error = cv_timedwait_sig(&as->as_cv, &as->as_lock, tmo);
			as->as_waiters--;
			if (error == EINTR || as->as_reset) {
				status = AE_ERROR;
				break;
			}
			if (ACPISEM_AVAIL(as, Units))
				break;
			slptick = ticks - prevtick;
			if (slptick >= tmo || slptick < 0) {
				status = AE_TIME;
				break;
			}
			tmo -= slptick;
		}
	}
	if (status == AE_OK)
		as->as_units -= Units;

	mtx_unlock(&as->as_lock);

	return_ACPI_STATUS (status);
}
示例#8
0
static int
kern_sem_wait(struct thread *td, semid_t id, int tryflag,
    struct timespec *abstime)
{
	struct timespec ts1, ts2;
	struct timeval tv;
	struct file *fp;
	struct ksem *ks;
	int error;

	DP((">>> kern_sem_wait entered! pid=%d\n", (int)td->td_proc->p_pid));
	error = ksem_get(td, id, CAP_SEM_WAIT, &fp);
	if (error)
		return (error);
	ks = fp->f_data;
	mtx_lock(&sem_lock);
	DP((">>> kern_sem_wait critical section entered! pid=%d\n",
	    (int)td->td_proc->p_pid));
#ifdef MAC
	error = mac_posixsem_check_wait(td->td_ucred, fp->f_cred, ks);
	if (error) {
		DP(("kern_sem_wait mac failed\n"));
		goto err;
	}
#endif
	DP(("kern_sem_wait value = %d, tryflag %d\n", ks->ks_value, tryflag));
	vfs_timestamp(&ks->ks_atime);
	while (ks->ks_value == 0) {
		ks->ks_waiters++;
		if (tryflag != 0)
			error = EAGAIN;
		else if (abstime == NULL)
			error = cv_wait_sig(&ks->ks_cv, &sem_lock);
		else {
			for (;;) {
				ts1 = *abstime;
				getnanotime(&ts2);
				timespecsub(&ts1, &ts2);
				TIMESPEC_TO_TIMEVAL(&tv, &ts1);
				if (tv.tv_sec < 0) {
					error = ETIMEDOUT;
					break;
				}
				error = cv_timedwait_sig(&ks->ks_cv,
				    &sem_lock, tvtohz(&tv));
				if (error != EWOULDBLOCK)
					break;
			}
		}
		ks->ks_waiters--;
		if (error)
			goto err;
	}
	ks->ks_value--;
	DP(("kern_sem_wait value post-decrement = %d\n", ks->ks_value));
	error = 0;
err:
	mtx_unlock(&sem_lock);
	fdrop(fp, td);
	DP(("<<< kern_sem_wait leaving, pid=%d, error = %d\n",
	    (int)td->td_proc->p_pid, error));
	return (error);
}
示例#9
0
文件: mmp.c 项目: LLNL/zfs
static void
mmp_thread(void *arg)
{
	spa_t *spa = (spa_t *)arg;
	mmp_thread_t *mmp = &spa->spa_mmp;
	boolean_t last_spa_suspended = spa_suspended(spa);
	boolean_t last_spa_multihost = spa_multihost(spa);
	callb_cpr_t cpr;
	hrtime_t max_fail_ns = zfs_multihost_fail_intervals *
	    MSEC2NSEC(MAX(zfs_multihost_interval, MMP_MIN_INTERVAL));

	mmp_thread_enter(mmp, &cpr);

	/*
	 * The mmp_write_done() function calculates mmp_delay based on the
	 * prior value of mmp_delay and the elapsed time since the last write.
	 * For the first mmp write, there is no "last write", so we start
	 * with fake, but reasonable, default non-zero values.
	 */
	mmp->mmp_delay = MSEC2NSEC(MAX(zfs_multihost_interval,
	    MMP_MIN_INTERVAL)) / MAX(vdev_count_leaves(spa), 1);
	mmp->mmp_last_write = gethrtime() - mmp->mmp_delay;

	while (!mmp->mmp_thread_exiting) {
		uint64_t mmp_fail_intervals = zfs_multihost_fail_intervals;
		uint64_t mmp_interval = MSEC2NSEC(
		    MAX(zfs_multihost_interval, MMP_MIN_INTERVAL));
		boolean_t suspended = spa_suspended(spa);
		boolean_t multihost = spa_multihost(spa);
		hrtime_t start, next_time;

		start = gethrtime();
		if (multihost) {
			next_time = start + mmp_interval /
			    MAX(vdev_count_leaves(spa), 1);
		} else {
			next_time = start + MSEC2NSEC(MMP_DEFAULT_INTERVAL);
		}

		/*
		 * When MMP goes off => on, or spa goes suspended =>
		 * !suspended, we know no writes occurred recently.  We
		 * update mmp_last_write to give us some time to try.
		 */
		if ((!last_spa_multihost && multihost) ||
		    (last_spa_suspended && !suspended)) {
			mutex_enter(&mmp->mmp_io_lock);
			mmp->mmp_last_write = gethrtime();
			mutex_exit(&mmp->mmp_io_lock);
		} else if (last_spa_multihost && !multihost) {
			mutex_enter(&mmp->mmp_io_lock);
			mmp->mmp_delay = 0;
			mutex_exit(&mmp->mmp_io_lock);
		}
		last_spa_multihost = multihost;
		last_spa_suspended = suspended;

		/*
		 * Smooth max_fail_ns when its factors are decreased, because
		 * making (max_fail_ns < mmp_interval) results in the pool being
		 * immediately suspended before writes can occur at the new
		 * higher frequency.
		 */
		if ((mmp_interval * mmp_fail_intervals) < max_fail_ns) {
			max_fail_ns = ((31 * max_fail_ns) + (mmp_interval *
			    mmp_fail_intervals)) / 32;
		} else {
			max_fail_ns = mmp_interval * mmp_fail_intervals;
		}

		/*
		 * Suspend the pool if no MMP write has succeeded in over
		 * mmp_interval * mmp_fail_intervals nanoseconds.
		 */
		if (!suspended && mmp_fail_intervals && multihost &&
		    (start - mmp->mmp_last_write) > max_fail_ns) {
			zio_suspend(spa, NULL);
		}

		if (multihost)
			mmp_write_uberblock(spa);

		CALLB_CPR_SAFE_BEGIN(&cpr);
		(void) cv_timedwait_sig(&mmp->mmp_thread_cv,
		    &mmp->mmp_thread_lock, ddi_get_lbolt() +
		    ((next_time - gethrtime()) / (NANOSEC / hz)));
		CALLB_CPR_SAFE_END(&cpr, &mmp->mmp_thread_lock);
	}

	/* Outstanding writes are allowed to complete. */
	if (mmp->mmp_zio_root)
		zio_wait(mmp->mmp_zio_root);

	mmp->mmp_zio_root = NULL;
	mmp_thread_exit(mmp, &mmp->mmp_thread, &cpr);
}
示例#10
0
/**
 * Worker for rtSemMutexSolRequest that handles the case where we go to sleep.
 *
 * @returns VINF_SUCCESS, VERR_INTERRUPTED, or VERR_SEM_DESTROYED.
 *          Returns without owning the mutex.
 * @param   pThis           The mutex instance.
 * @param   cMillies        The timeout, must be > 0 or RT_INDEFINITE_WAIT.
 * @param   fInterruptible  The wait type.
 *
 * @remarks This needs to be called with the mutex object held!
 */
static int rtSemMutexSolRequestSleep(PRTSEMMUTEXINTERNAL pThis, RTMSINTERVAL cMillies,
                                       bool fInterruptible)
{
    int rc = VERR_GENERAL_FAILURE;
    Assert(cMillies > 0);

    /*
     * Now we wait (sleep; although might spin and then sleep) & reference the mutex.
     */
    ASMAtomicIncU32(&pThis->cWaiters);
    ASMAtomicIncU32(&pThis->cRefs);

    if (cMillies != RT_INDEFINITE_WAIT)
    {
        clock_t cTicks   = drv_usectohz((clock_t)(cMillies * 1000L));
        clock_t cTimeout = ddi_get_lbolt();
        cTimeout        += cTicks;
        if (fInterruptible)
            rc = cv_timedwait_sig(&pThis->Cnd, &pThis->Mtx, cTimeout);
        else
            rc = cv_timedwait(&pThis->Cnd, &pThis->Mtx, cTimeout);
    }
    else
    {
        if (fInterruptible)
            rc = cv_wait_sig(&pThis->Cnd, &pThis->Mtx);
        else
        {
            cv_wait(&pThis->Cnd, &pThis->Mtx);
            rc = 1;
        }
    }

    ASMAtomicDecU32(&pThis->cWaiters);
    if (rc > 0)
    {
        if (pThis->u32Magic == RTSEMMUTEX_MAGIC)
        {
            if (pThis->hOwnerThread == NIL_RTNATIVETHREAD)
            {
                /*
                 * Woken up by a release from another thread.
                 */
                Assert(pThis->cRecursions == 0);
                pThis->cRecursions = 1;
                pThis->hOwnerThread = RTThreadNativeSelf();
                rc = VINF_SUCCESS;
            }
            else
            {
                /*
                 * Interrupted by some signal.
                 */
                rc = VERR_INTERRUPTED;
            }
        }
        else
        {
            /*
             * Awakened due to the destruction-in-progress broadcast.
             * We will cleanup if we're the last waiter.
             */
            rc = VERR_SEM_DESTROYED;
        }
    }
    else if (rc == -1)
    {
        /*
         * Timed out.
         */
        rc = VERR_TIMEOUT;
    }
    else
    {
        /*
         * Condition may not have been met, returned due to pending signal.
         */
        rc = VERR_INTERRUPTED;
    }

    if (!ASMAtomicDecU32(&pThis->cRefs))
    {
        Assert(RT_FAILURE_NP(rc));
        mutex_exit(&pThis->Mtx);
        cv_destroy(&pThis->Cnd);
        mutex_destroy(&pThis->Mtx);
        RTMemFree(pThis);
        return rc;
    }

    return rc;
}
示例#11
0
文件: dm2s.c 项目: andreiw/polaris
/*
 * dm2s_mbox_init - Mailbox specific initialization.
 */
static int
dm2s_mbox_init(dm2s_t *dm2sp)
{
	int ret;
	clock_t tout;

	ASSERT(MUTEX_HELD(&dm2sp->ms_lock));
	dm2sp->ms_target = DM2S_TARGET_ID;
	dm2sp->ms_key = DSCP_KEY;
	dm2sp->ms_state &= ~DM2S_MB_INITED;

	/* Iterate until mailbox gets connected */
	while (!(dm2sp->ms_state & DM2S_MB_CONN)) {
		DPRINTF(DBG_MBOX, ("dm2s_mbox_init: calling mb_init\n"));
		ret = scf_mb_init(dm2sp->ms_target, dm2sp->ms_key,
		    dm2s_event_handler, (void *)dm2sp);
		DPRINTF(DBG_MBOX, ("dm2s_mbox_init: mb_init ret=%d\n", ret));

		if (ret != 0) {
			DPRINTF(DBG_MBOX,
			    ("dm2s_mbox_init: failed ret =%d\n", ret));
			DTRACE_PROBE1(dm2s_mbox_fail, int, ret);
		} else {
			dm2sp->ms_state |= DM2S_MB_INITED;

			/* Block until the mailbox is ready to communicate. */
			while (!(dm2sp->ms_state &
			    (DM2S_MB_CONN | DM2S_MB_DISC))) {

				if (cv_wait_sig(&dm2sp->ms_wait,
				    &dm2sp->ms_lock) <= 0) {
					/* interrupted */
					ret = EINTR;
					break;
				}
			}
		}

		if ((ret != 0) || (dm2sp->ms_state & DM2S_MB_DISC)) {

			if (dm2sp->ms_state & DM2S_MB_INITED) {
				(void) scf_mb_fini(dm2sp->ms_target,
				    dm2sp->ms_key);
			}
			if (dm2sp->ms_state & DM2S_MB_DISC) {
				DPRINTF(DBG_WARN,
				    ("dm2s_mbox_init: mbox DISC_ERROR\n"));
				DTRACE_PROBE1(dm2s_mbox_fail,
				    int, DM2S_MB_DISC);
			}

			dm2sp->ms_state &= ~(DM2S_MB_INITED | DM2S_MB_DISC |
			    DM2S_MB_CONN);

			if (ret == EINTR) {
				return (ret);
			}

			/*
			 * If there was failure, then wait for
			 * DM2S_MB_TOUT secs and retry again.
			 */

			DPRINTF(DBG_MBOX, ("dm2s_mbox_init: waiting...\n"));
			tout = ddi_get_lbolt() + drv_usectohz(DM2S_MB_TOUT);
			ret = cv_timedwait_sig(&dm2sp->ms_wait,
			    &dm2sp->ms_lock, tout);
			if (ret == 0) {
				/* if interrupted, return immediately. */
				DPRINTF(DBG_MBOX,
				    ("dm2s_mbox_init: interrupted\n"));
				return (EINTR);
			}
		}
	}

	/*
	 * Obtain the max size of a single message.
	 * NOTE: There is no mechanism to update the
	 * upperlayers dynamically, so we expect this
	 * size to be atleast the default MTU size.
	 */
	ret = scf_mb_ctrl(dm2sp->ms_target, dm2sp->ms_key,
	    SCF_MBOP_MAXMSGSIZE, &dm2sp->ms_mtu);

	if ((ret == 0) && (dm2sp->ms_mtu < DM2S_DEF_MTU)) {
		cmn_err(CE_WARN, "Max message size expected >= %d "
		    "but found %d\n", DM2S_DEF_MTU, dm2sp->ms_mtu);
		ret = EIO;
	}

	if (ret != 0) {
		dm2sp->ms_state &= ~DM2S_MB_INITED;
		(void) scf_mb_fini(dm2sp->ms_target, dm2sp->ms_key);
	}
	DPRINTF(DBG_MBOX, ("dm2s_mbox_init: mb_init ret=%d\n", ret));
	return (ret);
}
示例#12
0
int
sigtimedwait1(struct lwp *l, const struct sys_____sigtimedwait50_args *uap,
    register_t *retval, copyin_t fetchss, copyout_t storeinf, copyin_t fetchts,
    copyout_t storets)
{
	/* {
		syscallarg(const sigset_t *) set;
		syscallarg(siginfo_t *) info;
		syscallarg(struct timespec *) timeout;
	} */
	struct proc *p = l->l_proc;
	int error, signum, timo;
	struct timespec ts, tsstart, tsnow;
	ksiginfo_t ksi;

	/*
	 * Calculate timeout, if it was specified.
	 *
	 * NULL pointer means an infinite timeout.
	 * {.tv_sec = 0, .tv_nsec = 0} means do not block.
	 */
	if (SCARG(uap, timeout)) {
		error = (*fetchts)(SCARG(uap, timeout), &ts, sizeof(ts));
		if (error)
			return error;

		if ((error = itimespecfix(&ts)) != 0)
			return error;

		timo = tstohz(&ts);
		if (timo == 0) {
			if (ts.tv_sec == 0 && ts.tv_nsec == 0)
				timo = -1; /* do not block */
			else
				timo = 1; /* the shortest possible timeout */
		}

		/*
		 * Remember current uptime, it would be used in
		 * ECANCELED/ERESTART case.
		 */
		getnanouptime(&tsstart);
	} else {
		memset(&tsstart, 0, sizeof(tsstart)); /* XXXgcc */
		timo = 0; /* infinite timeout */
	}

	error = (*fetchss)(SCARG(uap, set), &l->l_sigwaitset,
	    sizeof(l->l_sigwaitset));
	if (error)
		return error;

	/*
	 * Silently ignore SA_CANTMASK signals. psignal1() would ignore
	 * SA_CANTMASK signals in waitset, we do this only for the below
	 * siglist check.
	 */
	sigminusset(&sigcantmask, &l->l_sigwaitset);

	mutex_enter(p->p_lock);

	/* Check for pending signals in the process, if no - then in LWP. */
	if ((signum = sigget(&p->p_sigpend, &ksi, 0, &l->l_sigwaitset)) == 0)
		signum = sigget(&l->l_sigpend, &ksi, 0, &l->l_sigwaitset);

	if (signum != 0) {
		/* If found a pending signal, just copy it out to the user. */
		mutex_exit(p->p_lock);
		goto out;
	}

	if (timo < 0) {
		/* If not allowed to block, return an error */
		mutex_exit(p->p_lock);
		return EAGAIN;
	}

	/*
	 * Set up the sigwait list and wait for signal to arrive.
	 * We can either be woken up or time out.
	 */
	l->l_sigwaited = &ksi;
	LIST_INSERT_HEAD(&p->p_sigwaiters, l, l_sigwaiter);
	error = cv_timedwait_sig(&l->l_sigcv, p->p_lock, timo);

	/*
	 * Need to find out if we woke as a result of _lwp_wakeup() or a
	 * signal outside our wait set.
	 */
	if (l->l_sigwaited != NULL) {
		if (error == EINTR) {
			/* Wakeup via _lwp_wakeup(). */
			error = ECANCELED;
		} else if (!error) {
			/* Spurious wakeup - arrange for syscall restart. */
			error = ERESTART;
		}
		l->l_sigwaited = NULL;
		LIST_REMOVE(l, l_sigwaiter);
	}
	mutex_exit(p->p_lock);

	/*
	 * If the sleep was interrupted (either by signal or wakeup), update
	 * the timeout and copyout new value back.  It would be used when
	 * the syscall would be restarted or called again.
	 */
	if (timo && (error == ERESTART || error == ECANCELED)) {
		getnanouptime(&tsnow);

		/* Compute how much time has passed since start. */
		timespecsub(&tsnow, &tsstart, &tsnow);

		/* Substract passed time from timeout. */
		timespecsub(&ts, &tsnow, &ts);

		if (ts.tv_sec < 0)
			error = EAGAIN;
		else {
			/* Copy updated timeout to userland. */
			error = (*storets)(&ts, SCARG(uap, timeout),
			    sizeof(ts));
		}
	}
out:
	/*
	 * If a signal from the wait set arrived, copy it to userland.
	 * Copy only the used part of siginfo, the padding part is
	 * left unchanged (userland is not supposed to touch it anyway).
	 */
	if (error == 0 && SCARG(uap, info)) {
		error = (*storeinf)(&ksi.ksi_info, SCARG(uap, info),
		    sizeof(ksi.ksi_info));
	}
	if (error == 0)
		*retval = ksi.ksi_info._signo;
	return error;
}
示例#13
0
/**
 * radeon_fence_wait_any_seq - wait for a sequence number on any ring
 *
 * @rdev: radeon device pointer
 * @target_seq: sequence number(s) we want to wait for
 * @intr: use interruptable sleep
 *
 * Wait for the requested sequence number(s) to be written by any ring
 * (all asics).  Sequnce number array is indexed by ring id.
 * @intr selects whether to use interruptable (true) or non-interruptable
 * (false) sleep when waiting for the sequence number.  Helper function
 * for radeon_fence_wait_any(), et al.
 * Returns 0 if the sequence number has passed, error for all other cases.
 */
static int radeon_fence_wait_any_seq(struct radeon_device *rdev,
                                     u64 *target_seq, bool intr)
{
    unsigned long timeout, last_activity, tmp;
    unsigned i, ring = RADEON_NUM_RINGS;
    bool signaled, fence_queue_locked;
    int r;

    for (i = 0, last_activity = 0; i < RADEON_NUM_RINGS; ++i) {
        if (!target_seq[i]) {
            continue;
        }

        /* use the most recent one as indicator */
        if (time_after(rdev->fence_drv[i].last_activity, last_activity)) {
            last_activity = rdev->fence_drv[i].last_activity;
        }

        /* For lockup detection just pick the lowest ring we are
         * actively waiting for
         */
        if (i < ring) {
            ring = i;
        }
    }

    /* nothing to wait for ? */
    if (ring == RADEON_NUM_RINGS) {
        return -ENOENT;
    }

    while (!radeon_fence_any_seq_signaled(rdev, target_seq)) {
        timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT;
        if (time_after(last_activity, timeout)) {
            /* the normal case, timeout is somewhere before last_activity */
            timeout = last_activity - timeout;
        } else {
            /* either jiffies wrapped around, or no fence was signaled in the last 500ms
             * anyway we will just wait for the minimum amount and then check for a lockup
             */
            timeout = 1;
        }

        CTR2(KTR_DRM, "radeon fence: wait begin (ring=%d, target_seq=%d)",
             ring, target_seq[ring]);
        for (i = 0; i < RADEON_NUM_RINGS; ++i) {
            if (target_seq[i]) {
                radeon_irq_kms_sw_irq_get(rdev, i);
            }
        }
        fence_queue_locked = false;
        r = 0;
        while (!(signaled = radeon_fence_any_seq_signaled(rdev,
                            target_seq))) {
            if (!fence_queue_locked) {
                mtx_lock(&rdev->fence_queue_mtx);
                fence_queue_locked = true;
            }
            if (intr) {
                r = cv_timedwait_sig(&rdev->fence_queue,
                                     &rdev->fence_queue_mtx,
                                     timeout);
            } else {
                r = cv_timedwait(&rdev->fence_queue,
                                 &rdev->fence_queue_mtx,
                                 timeout);
            }
            if (r == EINTR)
                r = ERESTARTSYS;
            if (r != 0) {
                if (r == EWOULDBLOCK) {
                    signaled =
                        radeon_fence_any_seq_signaled(
                            rdev, target_seq);
                }
                break;
            }
        }
        if (fence_queue_locked) {
            mtx_unlock(&rdev->fence_queue_mtx);
        }
        for (i = 0; i < RADEON_NUM_RINGS; ++i) {
            if (target_seq[i]) {
                radeon_irq_kms_sw_irq_put(rdev, i);
            }
        }
        if (unlikely(r == ERESTARTSYS)) {
            return -r;
        }
        CTR2(KTR_DRM, "radeon fence: wait end (ring=%d, target_seq=%d)",
             ring, target_seq[ring]);

        if (unlikely(!signaled)) {
#ifndef __FreeBSD__
            /* we were interrupted for some reason and fence
             * isn't signaled yet, resume waiting */
            if (r) {
                continue;
            }
#endif

            sx_xlock(&rdev->ring_lock);
            for (i = 0, tmp = 0; i < RADEON_NUM_RINGS; ++i) {
                if (time_after(rdev->fence_drv[i].last_activity, tmp)) {
                    tmp = rdev->fence_drv[i].last_activity;
                }
            }
            /* test if somebody else has already decided that this is a lockup */
            if (last_activity != tmp) {
                last_activity = tmp;
                sx_xunlock(&rdev->ring_lock);
                continue;
            }

            if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
                /* good news we believe it's a lockup */
                dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016jx)\n",
                         (uintmax_t)target_seq[ring]);

                /* change last activity so nobody else think there is a lockup */
                for (i = 0; i < RADEON_NUM_RINGS; ++i) {
                    rdev->fence_drv[i].last_activity = jiffies;
                }

                /* mark the ring as not ready any more */
                rdev->ring[ring].ready = false;
                sx_xunlock(&rdev->ring_lock);
                return -EDEADLK;
            }
            sx_xunlock(&rdev->ring_lock);
        }
    }
    return 0;
}
示例#14
0
/**
 * radeon_fence_wait_seq - wait for a specific sequence number
 *
 * @rdev: radeon device pointer
 * @target_seq: sequence number we want to wait for
 * @ring: ring index the fence is associated with
 * @intr: use interruptable sleep
 * @lock_ring: whether the ring should be locked or not
 *
 * Wait for the requested sequence number to be written (all asics).
 * @intr selects whether to use interruptable (true) or non-interruptable
 * (false) sleep when waiting for the sequence number.  Helper function
 * for radeon_fence_wait(), et al.
 * Returns 0 if the sequence number has passed, error for all other cases.
 * -EDEADLK is returned when a GPU lockup has been detected and the ring is
 * marked as not ready so no further jobs get scheduled until a successful
 * reset.
 */
static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq,
                                 unsigned ring, bool intr, bool lock_ring)
{
    unsigned long timeout, last_activity;
    uint64_t seq;
    unsigned i;
    bool signaled, fence_queue_locked;
    int r;

    while (target_seq > atomic_load_acq_64(&rdev->fence_drv[ring].last_seq)) {
        if (!rdev->ring[ring].ready) {
            return -EBUSY;
        }

        timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT;
        if (time_after(rdev->fence_drv[ring].last_activity, timeout)) {
            /* the normal case, timeout is somewhere before last_activity */
            timeout = rdev->fence_drv[ring].last_activity - timeout;
        } else {
            /* either jiffies wrapped around, or no fence was signaled in the last 500ms
             * anyway we will just wait for the minimum amount and then check for a lockup
             */
            timeout = 1;
        }
        seq = atomic_load_acq_64(&rdev->fence_drv[ring].last_seq);
        /* Save current last activity valuee, used to check for GPU lockups */
        last_activity = rdev->fence_drv[ring].last_activity;

        CTR2(KTR_DRM, "radeon fence: wait begin (ring=%d, seq=%d)",
             ring, seq);

        radeon_irq_kms_sw_irq_get(rdev, ring);
        fence_queue_locked = false;
        r = 0;
        while (!(signaled = radeon_fence_seq_signaled(rdev,
                            target_seq, ring))) {
            if (!fence_queue_locked) {
                mtx_lock(&rdev->fence_queue_mtx);
                fence_queue_locked = true;
            }
            if (intr) {
                r = cv_timedwait_sig(&rdev->fence_queue,
                                     &rdev->fence_queue_mtx,
                                     timeout);
            } else {
                r = cv_timedwait(&rdev->fence_queue,
                                 &rdev->fence_queue_mtx,
                                 timeout);
            }
            if (r == EINTR)
                r = ERESTARTSYS;
            if (r != 0) {
                if (r == EWOULDBLOCK) {
                    signaled =
                        radeon_fence_seq_signaled(
                            rdev, target_seq, ring);
                }
                break;
            }
        }
        if (fence_queue_locked) {
            mtx_unlock(&rdev->fence_queue_mtx);
        }
        radeon_irq_kms_sw_irq_put(rdev, ring);
        if (unlikely(r == ERESTARTSYS)) {
            return -r;
        }
        CTR2(KTR_DRM, "radeon fence: wait end (ring=%d, seq=%d)",
             ring, seq);

        if (unlikely(!signaled)) {
#ifndef __FreeBSD__
            /* we were interrupted for some reason and fence
             * isn't signaled yet, resume waiting */
            if (r) {
                continue;
            }
#endif

            /* check if sequence value has changed since last_activity */
            if (seq != atomic_load_acq_64(&rdev->fence_drv[ring].last_seq)) {
                continue;
            }

            if (lock_ring) {
                sx_xlock(&rdev->ring_lock);
            }

            /* test if somebody else has already decided that this is a lockup */
            if (last_activity != rdev->fence_drv[ring].last_activity) {
                if (lock_ring) {
                    sx_xunlock(&rdev->ring_lock);
                }
                continue;
            }

            if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
                /* good news we believe it's a lockup */
                dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016jx last fence id 0x%016jx)\n",
                         (uintmax_t)target_seq, (uintmax_t)seq);

                /* change last activity so nobody else think there is a lockup */
                for (i = 0; i < RADEON_NUM_RINGS; ++i) {
                    rdev->fence_drv[i].last_activity = jiffies;
                }

                /* mark the ring as not ready any more */
                rdev->ring[ring].ready = false;
                if (lock_ring) {
                    sx_xunlock(&rdev->ring_lock);
                }
                return -EDEADLK;
            }

            if (lock_ring) {
                sx_xunlock(&rdev->ring_lock);
            }
        }
    }
    return 0;
}
示例#15
0
static int
btsco_open(void *hdl, int flags)
{
	struct sockaddr_bt sa;
	struct btsco_softc *sc = hdl;
	struct sockopt sopt;
	int err, timo;

	DPRINTF("%s flags 0x%x\n", sc->sc_name, flags);
	/* flags FREAD & FWRITE? */

	if (sc->sc_sco != NULL || sc->sc_sco_l != NULL)
		return EIO;

	KASSERT(mutex_owned(bt_lock));

	memset(&sa, 0, sizeof(sa));
	sa.bt_len = sizeof(sa);
	sa.bt_family = AF_BLUETOOTH;
	bdaddr_copy(&sa.bt_bdaddr, &sc->sc_laddr);

	if (sc->sc_flags & BTSCO_LISTEN) {
		err = sco_attach_pcb(&sc->sc_sco_l, &btsco_sco_proto, sc);
		if (err)
			goto done;

		err = sco_bind_pcb(sc->sc_sco_l, &sa);
		if (err) {
			sco_detach_pcb(&sc->sc_sco_l);
			goto done;
		}

		err = sco_listen_pcb(sc->sc_sco_l);
		if (err) {
			sco_detach_pcb(&sc->sc_sco_l);
			goto done;
		}

		timo = 0;	/* no timeout */
	} else {
		err = sco_attach_pcb(&sc->sc_sco, &btsco_sco_proto, sc);
		if (err)
			goto done;

		err = sco_bind_pcb(sc->sc_sco, &sa);
		if (err) {
			sco_detach_pcb(&sc->sc_sco);
			goto done;
		}

		bdaddr_copy(&sa.bt_bdaddr, &sc->sc_raddr);
		err = sco_connect_pcb(sc->sc_sco, &sa);
		if (err) {
			sco_detach_pcb(&sc->sc_sco);
			goto done;
		}

		timo = BTSCO_TIMEOUT;
	}

	sc->sc_state = BTSCO_WAIT_CONNECT;
	while (err == 0 && sc->sc_state == BTSCO_WAIT_CONNECT)
		err = cv_timedwait_sig(&sc->sc_connect, bt_lock, timo);

	switch (sc->sc_state) {
	case BTSCO_CLOSED:		/* disconnected */
		err = sc->sc_err;

		/* fall through to */
	case BTSCO_WAIT_CONNECT:	/* error */
		if (sc->sc_sco != NULL)
			sco_detach_pcb(&sc->sc_sco);

		if (sc->sc_sco_l != NULL)
			sco_detach_pcb(&sc->sc_sco_l);

		break;

	case BTSCO_OPEN:		/* hurrah */
		sockopt_init(&sopt, BTPROTO_SCO, SO_SCO_MTU, 0);
		(void)sco_getopt(sc->sc_sco, &sopt);
		(void)sockopt_get(&sopt, &sc->sc_mtu, sizeof(sc->sc_mtu));
		sockopt_destroy(&sopt);
		break;

	default:
		UNKNOWN(sc->sc_state);
		break;
	}

done:
	DPRINTF("done err=%d, sc_state=%d, sc_mtu=%d\n",
			err, sc->sc_state, sc->sc_mtu);
	return err;
}
示例#16
0
void
sleepq_enqueue(sleepq_t *sq, wchan_t wchan, const char *wmesg, syncobj_t *sobj)
{
	struct lwp *l = curlwp;

#ifndef T2EX
	if (__predict_false(sobj != &sleep_syncobj || strcmp(wemsg, "callout"))) {
#else
	if (__predict_false(sobj != &sleep_syncobj || (strcmp(wmesg, "callout") != 0 && strcmp(wmesg, "select") != 0 && strcmp(wmesg, "pollsock") != 0))) {
#endif
		panic("sleepq: unsupported enqueue");
	}

	/*
	 * Remove an LWP from a sleep queue if the LWP was deleted while in
	 * the waiting state.
	 */
	if ( l->l_sleepq != NULL && (l->l_stat & LSSLEEP) != 0 ) {
		sleepq_remove(l->l_sleepq, l);
	}

#ifndef T2EX
	l->l_syncobj = sobj;
#endif
	l->l_wchan = wchan;
	l->l_sleepq = sq;
#ifndef T2EX
	l->l_wmesg = wmesg;
	l->l_slptime = 0;
#endif
	l->l_stat = LSSLEEP;
#ifndef T2EX
	l->l_sleeperr = 0;
#endif

	TAILQ_INSERT_TAIL(sq, l, l_sleepchain);
}

int
sleepq_block(int timo, bool hatch)
{
	struct lwp *l = curlwp;
	int error = 0;

	//KASSERT(timo == 0 && !hatch);

	if (timo != 0) {
		callout_schedule(&l->l_timeout_ch, timo);
	}

#ifdef T2EX
	if ( l->l_mutex != NULL ) {
		mutex_exit(l->l_mutex);
	}
#endif

	mutex_enter(&sq_mtx);
	while (l->l_wchan) {
		if ( hatch ) {
			error = cv_timedwait_sig( &sq_cv, &sq_mtx, timo );
		}
		else {
			error = cv_timedwait( &sq_cv, &sq_mtx, timo );
		}

		if (error == EINTR) {
			if (l->l_wchan) {
				TAILQ_REMOVE(l->l_sleepq, l, l_sleepchain);
				l->l_wchan = NULL;
				l->l_sleepq = NULL;
			}
		}
	}
	mutex_exit(&sq_mtx);

#ifdef T2EX
	l->l_mutex = &spc_lock;
#endif

	if (timo != 0) {
		/*
		 * Even if the callout appears to have fired, we need to
		 * stop it in order to synchronise with other CPUs.
		 */
		if (callout_halt(&l->l_timeout_ch, NULL)) {
			error = EWOULDBLOCK;
		}
	}

	return error;
}

#ifdef T2EX
lwp_t *
sleepq_wake(sleepq_t *sq, wchan_t wchan, u_int expected, kmutex_t *mp)
{
	struct lwp *l;
	bool found = false;

	TAILQ_FOREACH(l, sq, l_sleepchain) {
		if (l->l_wchan == wchan) {
			found = true;
			l->l_wchan = NULL;
		}
	}
	if (found)
		cv_broadcast(&sq_cv);

	mutex_spin_exit(mp);
	return NULL;
}
#else
/*
 * sleepq_wake:
 *
 *	Wake zero or more LWPs blocked on a single wait channel.
 */
lwp_t *
sleepq_wake(sleepq_t *sq, wchan_t wchan, u_int expected, kmutex_t *mp)
{
	lwp_t *l, *next;
	int swapin = 0;

	KASSERT(mutex_owned(mp));

	for (l = TAILQ_FIRST(sq); l != NULL; l = next) {
		KASSERT(l->l_sleepq == sq);
		KASSERT(l->l_mutex == mp);
		next = TAILQ_NEXT(l, l_sleepchain);
		if (l->l_wchan != wchan)
			continue;
		swapin |= sleepq_remove(sq, l);
		if (--expected == 0)
			break;
	}

	mutex_spin_exit(mp);

#if 0
	/*
	 * If there are newly awakend threads that need to be swapped in,
	 * then kick the swapper into action.
	 */
	if (swapin)
		uvm_kick_scheduler();
#endif

	return l;
}