Exemple #1
0
/**
 * schedule_hrtimeout_range_clock - sleep until timeout
 * @expires:	timeout value (ktime_t)
 * @delta:	slack in expires timeout (ktime_t)
 * @mode:	timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
 * @clock:	timer clock, CLOCK_MONOTONIC or CLOCK_REALTIME
 */
int __sched
schedule_hrtimeout_range_clock(ktime_t *expires, unsigned long delta,
			       const enum hrtimer_mode mode, int clock)
{
	struct hrtimer_sleeper t;

	/*
	 * Optimize when a zero timeout value is given. It does not
	 * matter whether this is an absolute or a relative time.
	 */
	if (expires && !expires->tv64) {
		__set_current_state(TASK_RUNNING);
		return 0;
	}

	/*
	 * A NULL parameter means "infinite"
	 */
	if (!expires) {
		schedule();
		__set_current_state(TASK_RUNNING);
		return -EINTR;
	}

	hrtimer_init_on_stack(&t.timer, clock, mode);
	hrtimer_set_expires_range_ns(&t.timer, *expires, delta);

	hrtimer_init_sleeper(&t, current);

	hrtimer_start_expires(&t.timer, mode);
	if (!hrtimer_active(&t.timer))
		t.task = NULL;

	if (likely(t.task))
		schedule();

	hrtimer_cancel(&t.timer);
	destroy_hrtimer_on_stack(&t.timer);

	__set_current_state(TASK_RUNNING);

	return !t.task ? 0 : -EINTR;
}
Exemple #2
0
static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
{
	hrtimer_init_sleeper(t, current);

	do {
		set_current_state(TASK_INTERRUPTIBLE);
		hrtimer_start_expires(&t->timer, mode);

		if (likely(t->task))
			freezable_schedule();

		hrtimer_cancel(&t->timer);
		mode = HRTIMER_MODE_ABS;

	} while (t->task && !signal_pending(current));

	__set_current_state(TASK_RUNNING);

	return t->task == NULL;
}
Exemple #3
0
int __sched
schedule_hrtimeout_range_clock(ktime_t *expires, unsigned long delta,
                               const enum hrtimer_mode mode, int clock)
{
    struct hrtimer_sleeper t;

    if (expires && !expires->tv64) {
        __set_current_state(TASK_RUNNING);
        return 0;
    }

    if (!expires) {
        schedule();
        __set_current_state(TASK_RUNNING);
        return -EINTR;
    }

    hrtimer_init_on_stack(&t.timer, clock, mode);
    hrtimer_set_expires_range_ns(&t.timer, *expires, delta);

    hrtimer_init_sleeper(&t, current);

    hrtimer_start_expires(&t.timer, mode);
    if (!hrtimer_active(&t.timer))
        t.task = NULL;

    if (likely(t.task))
        schedule();

    hrtimer_cancel(&t.timer);
    destroy_hrtimer_on_stack(&t.timer);

    __set_current_state(TASK_RUNNING);

    return !t.task ? 0 : -EINTR;
}
Exemple #4
0
static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
{
	struct restart_block *restart;

	hrtimer_init_sleeper(t, current);

	do {
		set_current_state(TASK_INTERRUPTIBLE);
		hrtimer_start_expires(&t->timer, mode);

		if (likely(t->task))
			freezable_schedule();

		hrtimer_cancel(&t->timer);
		mode = HRTIMER_MODE_ABS;

	} while (t->task && !signal_pending(current));

	__set_current_state(TASK_RUNNING);

	if (!t->task)
		return 0;

	restart = &current->restart_block;
	if (restart->nanosleep.type != TT_NONE) {
		ktime_t rem = hrtimer_expires_remaining(&t->timer);
		struct timespec64 rmt;

		if (rem <= 0)
			return 0;
		rmt = ktime_to_timespec64(rem);

		return nanosleep_copyout(restart, &rmt);
	}
	return -ERESTART_RESTARTBLOCK;
}
static int CVE_2014_0205_linux2_6_30_2_futex_wait(u32 __user *uaddr, int fshared,
		      u32 val, ktime_t *abs_time, u32 bitset, int clockrt)
{
	struct task_struct *curr = current;
	struct restart_block *restart;
	DECLARE_WAITQUEUE(wait, curr);
	struct futex_hash_bucket *hb;
	struct futex_q q;
	u32 uval;
	int ret;
	struct hrtimer_sleeper t;
	int rem = 0;

	if (!bitset)
		return -EINVAL;

	q.pi_state = NULL;
	q.bitset = bitset;
retry:
	q.key = FUTEX_KEY_INIT;
	ret = get_futex_key(uaddr, fshared, &q.key, VERIFY_READ);
	if (unlikely(ret != 0))
		goto out;

retry_private:
	hb = queue_lock(&q);

	/*
	 * Access the page AFTER the hash-bucket is locked.
	 * Order is important:
	 *
	 *   Userspace waiter: val = var; if (cond(val)) CVE_2014_0205_linux2_6_30_2_futex_wait(&var, val);
	 *   Userspace waker:  if (cond(var)) { var = new; futex_wake(&var); }
	 *
	 * The basic logical guarantee of a futex is that it blocks ONLY
	 * if cond(var) is known to be true at the time of blocking, for
	 * any cond.  If we queued after testing *uaddr, that would open
	 * a race condition where we could block indefinitely with
	 * cond(var) false, which would violate the guarantee.
	 *
	 * A consequence is that CVE_2014_0205_linux2_6_30_2_futex_wait() can return zero and absorb
	 * a wakeup when *uaddr != val on entry to the syscall.  This is
	 * rare, but normal.
	 *
	 * For shared futexes, we hold the mmap semaphore, so the mapping
	 * cannot have changed since we looked it up in get_futex_key.
	 */
	ret = get_futex_value_locked(&uval, uaddr);

	if (unlikely(ret)) {
		queue_unlock(&q, hb);

		ret = get_user(uval, uaddr);
		if (ret)
			goto out_put_key;

		if (!fshared)
			goto retry_private;

		put_futex_key(fshared, &q.key);
		goto retry;
	}
	ret = -EWOULDBLOCK;
	if (unlikely(uval != val)) {
		queue_unlock(&q, hb);
		goto out_put_key;
	}

	/* Only actually queue if *uaddr contained val.  */
	queue_me(&q, hb);

	/*
	 * There might have been scheduling since the queue_me(), as we
	 * cannot hold a spinlock across the get_user() in case it
	 * faults, and we cannot just set TASK_INTERRUPTIBLE state when
	 * queueing ourselves into the futex hash.  This code thus has to
	 * rely on the futex_wake() code removing us from hash when it
	 * wakes us up.
	 */

	/* add_wait_queue is the barrier after __set_current_state. */
	__set_current_state(TASK_INTERRUPTIBLE);
	add_wait_queue(&q.waiter, &wait);
	/*
	 * !plist_node_empty() is safe here without any lock.
	 * q.lock_ptr != 0 is not safe, because of ordering against wakeup.
	 */
	if (likely(!plist_node_empty(&q.list))) {
		if (!abs_time)
			schedule();
		else {
			hrtimer_init_on_stack(&t.timer,
					      clockrt ? CLOCK_REALTIME :
					      CLOCK_MONOTONIC,
					      HRTIMER_MODE_ABS);
			hrtimer_init_sleeper(&t, current);
			hrtimer_set_expires_range_ns(&t.timer, *abs_time,
						     current->timer_slack_ns);

			hrtimer_start_expires(&t.timer, HRTIMER_MODE_ABS);
			if (!hrtimer_active(&t.timer))
				t.task = NULL;

			/*
			 * the timer could have already expired, in which
			 * case current would be flagged for rescheduling.
			 * Don't bother calling schedule.
			 */
			if (likely(t.task))
				schedule();

			hrtimer_cancel(&t.timer);

			/* Flag if a timeout occured */
			rem = (t.task == NULL);

			destroy_hrtimer_on_stack(&t.timer);
		}
	}
	__set_current_state(TASK_RUNNING);

	/*
	 * NOTE: we don't remove ourselves from the waitqueue because
	 * we are the only user of it.
	 */

	/* If we were woken (and unqueued), we succeeded, whatever. */
	ret = 0;
	if (!unqueue_me(&q))
		goto out_put_key;
	ret = -ETIMEDOUT;
	if (rem)
		goto out_put_key;

	/*
	 * We expect signal_pending(current), but another thread may
	 * have handled it for us already.
	 */
	ret = -ERESTARTSYS;
	if (!abs_time)
		goto out_put_key;

	restart = &current_thread_info()->restart_block;
	restart->fn = CVE_2014_0205_linux2_6_30_2_futex_wait_restart;
	restart->futex.uaddr = (u32 *)uaddr;
	restart->futex.val = val;
	restart->futex.time = abs_time->tv64;
	restart->futex.bitset = bitset;
	restart->futex.flags = 0;

	if (fshared)
		restart->futex.flags |= FLAGS_SHARED;
	if (clockrt)
		restart->futex.flags |= FLAGS_CLOCKRT;

	ret = -ERESTART_RESTARTBLOCK;

out_put_key:
	put_futex_key(fshared, &q.key);
out:
	return ret;
}
Exemple #6
0
/*
 * Slow path lock function:
 */
static int __sched
rt_mutex_slowlock(struct rt_mutex *lock, int state,
		  struct hrtimer_sleeper *timeout,
		  int detect_deadlock)
{
	int ret = 0, saved_lock_depth = -1;
	struct rt_mutex_waiter waiter;
	unsigned long flags;

	debug_rt_mutex_init_waiter(&waiter);

	raw_spin_lock_irqsave(&lock->wait_lock, flags);
	init_lists(lock);

	/* Try to acquire the lock again: */
	if (try_to_take_rt_mutex(lock, current, NULL)) {
		raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
		return 0;
	}

	set_current_state(state);

	/* Setup the timer, when timeout != NULL */
	if (unlikely(timeout)) {
		hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
		if (!hrtimer_active(&timeout->timer))
			timeout->task = NULL;
	}

	ret = task_blocks_on_rt_mutex(lock, &waiter, current, detect_deadlock, flags, 0);

	/*
	 * We drop the BKL here before we go into the wait loop to avoid a
	 * possible deadlock in the scheduler.
	 *
	 * Note: This must be done after we call task_blocks_on_rt_mutex
	 *  because rt_release_bkl() releases the wait_lock and will
	 *  cause a race between setting the mark waiters flag in
	 *  the owner field and adding this task to the wait list. Those
	 *  two must be done within the protection of the wait_lock.
	 */
	if (unlikely(current->lock_depth >= 0))
		saved_lock_depth = rt_release_bkl(lock, flags);

	if (likely(!ret))
		ret = __rt_mutex_slowlock(lock, state, timeout, &waiter, flags);

	set_current_state(TASK_RUNNING);

	if (unlikely(ret))
		remove_waiter(lock, &waiter, flags);
	BUG_ON(!plist_node_empty(&waiter.list_entry));

	/*
	 * try_to_take_rt_mutex() sets the waiter bit
	 * unconditionally. We might have to fix that up.
	 */
	fixup_rt_mutex_waiters(lock);

	raw_spin_unlock_irqrestore(&lock->wait_lock, flags);

	/* Remove pending timer: */
	if (unlikely(timeout))
		hrtimer_cancel(&timeout->timer);

	/* Must we reaquire the BKL? */
	if (unlikely(saved_lock_depth >= 0))
		rt_reacquire_bkl(saved_lock_depth);

	debug_rt_mutex_free_waiter(&waiter);

	return ret;
}
Exemple #7
0
static void rcu_timer_restart(void)
{
	pr_info("JRCU: starting timer. rate is %d Hz\n", RCU_HZ);
	hrtimer_forward_now(&rcu_timer, ns_to_ktime(RCU_PERIOD_NS));
	hrtimer_start_expires(&rcu_timer, HRTIMER_MODE_ABS);
}