Пример #1
0
/*
 * Note: we use "set_current_state()" _after_ the wait-queue add,
 * because we need a memory barrier there on SMP, so that any
 * wake-function that tests for the wait-queue being active
 * will be guaranteed to see waitqueue addition _or_ subsequent
 * tests in this thread will see the wakeup having taken place.
 *
 * The spin_unlock() itself is semi-permeable and only protects
 * one way (it only protects stuff inside the critical region and
 * stops them from bleeding out - it would still allow subsequent
 * loads to move into the critical region).
 */
void
prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
{
	unsigned long flags;

	wait->flags &= ~WQ_FLAG_EXCLUSIVE;
	spin_lock_irqsave(&q->lock, flags);
	if (list_empty(&wait->task_list))
		__add_wait_queue(q, wait);
	set_current_state(state);
	spin_unlock_irqrestore(&q->lock, flags);
}
Пример #2
0
static noinline void pci_wait_cfg(struct pci_dev *dev)
{
	DECLARE_WAITQUEUE(wait, current);

	__add_wait_queue(&pci_cfg_wait, &wait);
	do {
		set_current_state(TASK_UNINTERRUPTIBLE);
		raw_spin_unlock_irq(&pci_lock);
		schedule();
		raw_spin_lock_irq(&pci_lock);
	} while (dev->block_cfg_access);
	__remove_wait_queue(&pci_cfg_wait, &wait);
}
Пример #3
0
// Deep-sleeper
__sched sleep_on(wait_queue_head_t *q){
  unsigned long flags;
  wait_queue_t wait;

  init_waitqueue_entry(&wait, current);
  __set_current_state(TASK_UNINTERRUPTIBLE);
  spin_lock_irqsave(&q->lock, flags);
  __add_wait_queue(q, &wait);
  spin_unlock(&q->lock);
  schedule();
  spin_lock_irq(&q->lock);
  __remove_wait_queue(q, &wait);
  spin_unlock_irqrestore(&q->lock, flags);
  return;
}
Пример #4
0
void
prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
{
	unsigned long flags;

	/*清除__wait节点flags中的WQ_FLAG_EXCLUSIVE标志,该标志在唤醒函数中用到*/
	wait->flags &= ~WQ_FLAG_EXCLUSIVE;
	spin_lock_irqsave(&q->lock, flags);
	if (list_empty(&wait->task_list))
		/*将__wait节点加入到等待队列wq中*/
		__add_wait_queue(q, wait);
	/*将当前进程的状态设置为TASK_INTERRUPTIBLE*/
	set_current_state(state);
	spin_unlock_irqrestore(&q->lock, flags);
}
Пример #5
0
static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t count,
			     loff_t *ppos)
{
	struct eventfd_ctx *ctx = file->private_data;
	ssize_t res;
	__u64 ucnt;
	DECLARE_WAITQUEUE(wait, current);

	if (count < sizeof(ucnt))
		return -EINVAL;
	if (copy_from_user(&ucnt, buf, sizeof(ucnt)))
		return -EFAULT;
	if (ucnt == ULLONG_MAX)
		return -EINVAL;
	spin_lock_irq(&ctx->wqh.lock);
	res = -EAGAIN;
	if (ULLONG_MAX - ctx->count > ucnt)
		res = sizeof(ucnt);
	else if (!(file->f_flags & O_NONBLOCK)) {
		__add_wait_queue(&ctx->wqh, &wait);
		for (res = 0;;) {
			set_current_state(TASK_INTERRUPTIBLE);
			if (ULLONG_MAX - ctx->count > ucnt) {
				res = sizeof(ucnt);
				break;
			}
			if (signal_pending(current)) {
				res = -ERESTARTSYS;
				break;
			}
			spin_unlock_irq(&ctx->wqh.lock);
			schedule();
			spin_lock_irq(&ctx->wqh.lock);
		}
		__remove_wait_queue(&ctx->wqh, &wait);
		__set_current_state(TASK_RUNNING);
	}
	if (likely(res > 0)) {
		ctx->count += ucnt;
		if (waitqueue_active(&ctx->wqh))
			wake_up_locked_poll(&ctx->wqh, POLLIN);
	}
	spin_unlock_irq(&ctx->wqh.lock);

	return res;
}
Пример #6
0
static ssize_t eventfd_read(struct file *file, char __user *buf, size_t count,
			    loff_t *ppos)
{
	struct eventfd_ctx *ctx = file->private_data;
	ssize_t res;
	__u64 ucnt;
	DECLARE_WAITQUEUE(wait, current);

	if (count < sizeof(ucnt))
		return -EINVAL;
	spin_lock_irq(&ctx->wqh.lock);
	res = -EAGAIN;
	ucnt = ctx->count;
	if (ucnt > 0)
		res = sizeof(ucnt);
	else if (!(file->f_flags & O_NONBLOCK)) {
		__add_wait_queue(&ctx->wqh, &wait);
		for (res = 0;;) {
			set_current_state(TASK_INTERRUPTIBLE);
			if (ctx->count > 0) {
				ucnt = ctx->count;
				res = sizeof(ucnt);
				break;
			}
			if (signal_pending(current)) {
				res = -ERESTARTSYS;
				break;
			}
			spin_unlock_irq(&ctx->wqh.lock);
			schedule();
			spin_lock_irq(&ctx->wqh.lock);
		}
		__remove_wait_queue(&ctx->wqh, &wait);
		__set_current_state(TASK_RUNNING);
	}
	if (res > 0) {
		ctx->count = 0;
		if (waitqueue_active(&ctx->wqh))
			wake_up_locked(&ctx->wqh);
	}
	spin_unlock_irq(&ctx->wqh.lock);
	if (res > 0 && put_user(ucnt, (__u64 __user *) buf))
		return -EFAULT;

	return res;
}
Пример #7
0
/*
 * Note: we use "set_current_state()" _after_ the wait-queue add,
 * because we need a memory barrier there on SMP, so that any
 * wake-function that tests for the wait-queue being active
 * will be guaranteed to see waitqueue addition _or_ subsequent
 * tests in this thread will see the wakeup having taken place.
 *
 * The spin_unlock() itself is semi-permeable and only protects
 * one way (it only protects stuff inside the critical region and
 * stops them from bleeding out - it would still allow subsequent
 * loads to move into the critical region).
 */
void fastcall
prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
{
	unsigned long flags;

	wait->flags &= ~WQ_FLAG_EXCLUSIVE;
	spin_lock_irqsave(&q->lock, flags);
	if (list_empty(&wait->task_list))
		__add_wait_queue(q, wait);
	/*
	 * don't alter the task state if this is just going to
	 * queue an async wait queue callback
	 */
	if (is_sync_wait(wait))
		set_current_state(state);
	spin_unlock_irqrestore(&q->lock, flags);
}
Пример #8
0
void pin_kill(struct fs_pin *p)
{
	wait_queue_entry_t wait;

	if (!p) {
		rcu_read_unlock();
		return;
	}
	init_wait(&wait);
	spin_lock_irq(&p->wait.lock);
	if (likely(!p->done)) {
		p->done = -1;
		spin_unlock_irq(&p->wait.lock);
		rcu_read_unlock();
		p->kill(p);
		return;
	}
	if (p->done > 0) {
		spin_unlock_irq(&p->wait.lock);
		rcu_read_unlock();
		return;
	}
	__add_wait_queue(&p->wait, &wait);
	while (1) {
		set_current_state(TASK_UNINTERRUPTIBLE);
		spin_unlock_irq(&p->wait.lock);
		rcu_read_unlock();
		schedule();
		rcu_read_lock();
		if (likely(list_empty(&wait.entry)))
			break;
		/* OK, we know p couldn't have been freed yet */
		spin_lock_irq(&p->wait.lock);
		if (p->done > 0) {
			spin_unlock_irq(&p->wait.lock);
			break;
		}
	}
	rcu_read_unlock();
}
Пример #9
0
/**
 * radeon_fence_enable_signaling - enable signalling on fence
 * @fence: fence
 *
 * This function is called with fence_queue lock held, and adds a callback
 * to fence_queue that checks if this fence is signaled, and if so it
 * signals the fence and removes itself.
 */
static bool radeon_fence_enable_signaling(struct fence *f)
{
	struct radeon_fence *fence = to_radeon_fence(f);
	struct radeon_device *rdev = fence->rdev;

	if (atomic64_read(&rdev->fence_drv[fence->ring].last_seq) >= fence->seq)
		return false;

//   if (down_read_trylock(&rdev->exclusive_lock))
         {
		radeon_irq_kms_sw_irq_get(rdev, fence->ring);

//       if (radeon_fence_activity(rdev, fence->ring))
//           wake_up_all_locked(&rdev->fence_queue);

		/* did fence get signaled after we enabled the sw irq? */
		if (atomic64_read(&rdev->fence_drv[fence->ring].last_seq) >= fence->seq) {
			radeon_irq_kms_sw_irq_put(rdev, fence->ring);
//           up_read(&rdev->exclusive_lock);
			return false;
		}

//       up_read(&rdev->exclusive_lock);
//   } else {
		/* we're probably in a lockup, lets not fiddle too much */
//       if (radeon_irq_kms_sw_irq_get_delayed(rdev, fence->ring))
//           rdev->fence_drv[fence->ring].delayed_irq = true;
//       radeon_fence_schedule_check(rdev, fence->ring);
	}

//	fence->fence_wake.flags = 0;
//	fence->fence_wake.private = NULL;
	fence->fence_wake.func = radeon_fence_check_signaled;
	__add_wait_queue(&rdev->fence_queue, &fence->fence_wake);
	fence_get(f);

	FENCE_TRACE(&fence->base, "armed on ring %i!\n", fence->ring);
	return true;
}
Пример #10
0
/**
 * eventfd_ctx_read - Reads the eventfd counter or wait if it is zero.
 * @ctx: [in] Pointer to eventfd context.
 * @no_wait: [in] Different from zero if the operation should not block.
 * @cnt: [out] Pointer to the 64-bit counter value.
 *
 * Returns %0 if successful, or the following error codes:
 *
 * -EAGAIN      : The operation would have blocked but @no_wait was non-zero.
 * -ERESTARTSYS : A signal interrupted the wait operation.
 *
 * If @no_wait is zero, the function might sleep until the eventfd internal
 * counter becomes greater than zero.
 */
ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait, __u64 *cnt)
{
	ssize_t res;
	DECLARE_WAITQUEUE(wait, current);

	spin_lock_irq(&ctx->wqh.lock);
	*cnt = 0;
	res = -EAGAIN;
	if (ctx->count > 0)
		res = 0;
	else if (!no_wait) {
		__add_wait_queue(&ctx->wqh, &wait);
		for (;;) {
			set_current_state(TASK_INTERRUPTIBLE);
			if (ctx->count > 0) {
				res = 0;
				break;
			}
			if (signal_pending(current)) {
				res = -ERESTARTSYS;
				break;
			}
			spin_unlock_irq(&ctx->wqh.lock);
			schedule();
			spin_lock_irq(&ctx->wqh.lock);
		}
		__remove_wait_queue(&ctx->wqh, &wait);
		__set_current_state(TASK_RUNNING);
	}
	if (likely(res == 0)) {
		eventfd_ctx_do_read(ctx, cnt);
		if (waitqueue_active(&ctx->wqh))
			wake_up_locked_poll(&ctx->wqh, POLLOUT);
	}
	spin_unlock_irq(&ctx->wqh.lock);

	return res;
}
Пример #11
0
ssize_t
gfsk_evfd_ctx_write(struct gfsk_evfd_ctx *ctx, int no_wait, int cnt)
{
	ssize_t res;
	DECLARE_WAITQUEUE(wait, current);

	spin_lock_irq(&ctx->wqh.lock);
	res = -EAGAIN;
	if (ctx->count < INT_MAX - 1)
		res = 1;
	else if (!no_wait) {
		__add_wait_queue(&ctx->wqh, &wait);
		for (;;) {
			set_current_state(TASK_INTERRUPTIBLE);
			if (ctx->count < INT_MAX - 1) {
				res = 1;
				break;
			}
			if (signal_pending(current)) {
				res = -ERESTARTSYS;
				break;
			}
			spin_unlock_irq(&ctx->wqh.lock);
			schedule();
			spin_lock_irq(&ctx->wqh.lock);
		}
		__remove_wait_queue(&ctx->wqh, &wait);
		__set_current_state(TASK_RUNNING);
	}
	if (likely(res == 1)) {
		ctx->count += 1;
		if (waitqueue_active(&ctx->wqh))
			wake_up_locked_poll(&ctx->wqh, POLLIN);
	}
	spin_unlock_irq(&ctx->wqh.lock);

	return (res);
}
Пример #12
0
void
fusion_sleep_on(wait_queue_head_t *q, struct semaphore *lock, signed long *timeout)
{
     wait_queue_t wait;

     init_waitqueue_entry (&wait, current);

     current->state = TASK_INTERRUPTIBLE;

     write_lock (&q->lock);
     __add_wait_queue (q, &wait);
     write_unlock (&q->lock);

     up (lock);

     if (timeout)
          *timeout = schedule_timeout(*timeout);
     else
          schedule();

     write_lock (&q->lock);
     __remove_wait_queue (q, &wait);
     write_unlock (&q->lock);
}
Пример #13
0
static inline void __add_wait_queue_exclusive(wait_queue_head_t *q,
                                              wait_queue_t *wait)
{
        wait->flags |= WQ_FLAG_EXCLUSIVE;
        __add_wait_queue(q, wait);
}
Пример #14
0
void
one_core_wq_wait( OneCore      *core,
                  OneWaitQueue *queue,
                  int          *timeout_ms )
{
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
     DEFINE_WAIT(wait);

     int timeout = 0;

     if (timeout_ms)
          timeout = *timeout_ms * HZ / 1000;


     D_MAGIC_ASSERT( core, OneCore );
     D_MAGIC_ASSERT( queue, OneWaitQueue );

     prepare_to_wait( &queue->queue, &wait, TASK_INTERRUPTIBLE );

     one_core_unlock( core );

     if (timeout_ms) {
          timeout = schedule_timeout(timeout);
          if (timeout) {
               timeout = timeout * 1000 / HZ;

               *timeout_ms = timeout ? timeout : 1;
          }
          else
               *timeout_ms = 0;
     }
     else
          schedule();

     one_core_lock( core );

     finish_wait( &queue->queue, &wait );
#else
     wait_queue_t wait;
     int          timeout = 0;

     if (timeout_ms)
          timeout = *timeout_ms * HZ / 1000;


     D_MAGIC_ASSERT( core, OneCore );
     D_MAGIC_ASSERT( queue, OneWaitQueue );

     init_waitqueue_entry(&wait, current);

     current->state = TASK_INTERRUPTIBLE;

     write_lock( &queue->queue.lock);
     __add_wait_queue( &queue->queue, &wait);
     write_unlock( &queue->queue.lock );

     one_core_unlock( core );

     if (timeout_ms) {
          timeout = schedule_timeout(timeout);
          if (timeout) {
               timeout = timeout * 1000 / HZ;

               *timeout_ms = timeout ? timeout : 1;
          }
          else
               *timeout_ms = 0;
     }
     else
          schedule();

     one_core_lock( core );

     write_lock( &queue->queue.lock );
     __remove_wait_queue( &queue->queue, &wait );
     write_unlock( &queue->queue.lock );
#endif
}
Пример #15
0
/*
 * The associated lock must be locked on entry.  It is unlocked on return.
 *
 * Return values:
 *
 * n < 0 : interrupted,  -n jiffies remaining on timeout, or -1 if timeout == 0
 * n = 0 : timeout expired
 * n > 0 : sv_signal()'d, n jiffies remaining on timeout, or 1 if timeout == 0
 */
signed long sv_wait(sv_t *sv, int sv_wait_flags, unsigned long timeout) 
{
	DECLARE_WAITQUEUE( wait, current );
	unsigned long flags;
	signed long ret = 0;

#ifdef SV_DEBUG_INTERRUPT_STATE
	{
	unsigned long flags;
	__save_flags(flags);

	if(sv->sv_flags & SV_INTS) {
		if(SV_TEST_INTERRUPTS_ENABLED(flags)) {
			printk(KERN_ERR "sv_wait: SV_INTS and interrupts "
			       "enabled (flags: 0x%lx)\n", flags);
			BUG();
		}
	} else {
		if (SV_TEST_INTERRUPTS_DISABLED(flags)) {
			printk(KERN_WARNING "sv_wait: !SV_INTS and interrupts "
			       "disabled! (flags: 0x%lx)\n", flags);
		}
	}
	}
#endif  /* SV_DEBUG_INTERRUPT_STATE */

	sv_lock(sv);

	sv->sv_mon_unlock_func(sv->sv_mon_lock);

	/* Add ourselves to the wait queue and set the state before
	 * releasing the sv_lock so as to avoid racing with the
	 * wake_up() in sv_signal() and sv_broadcast(). 
	 */

	/* don't need the _irqsave part, but there is no wq_write_lock() */
	wq_write_lock_irqsave(&sv->sv_waiters.lock, flags);

#ifdef EXCLUSIVE_IN_QUEUE
	wait.flags |= WQ_FLAG_EXCLUSIVE;
#endif

	switch(sv->sv_flags & SV_ORDER_MASK) {
	case SV_ORDER_FIFO:
		__add_wait_queue_tail(&sv->sv_waiters, &wait);
		break;
	case SV_ORDER_FILO:
		__add_wait_queue(&sv->sv_waiters, &wait);
		break;
	default:
		printk(KERN_ERR "sv_wait: unknown order!  (sv: 0x%p, flags: 0x%x)\n",
					(void *)sv, sv->sv_flags);
		BUG();
	}
	wq_write_unlock_irqrestore(&sv->sv_waiters.lock, flags);

	if(sv_wait_flags & SV_WAIT_SIG)
		set_current_state(TASK_EXCLUSIVE | TASK_INTERRUPTIBLE  );
	else
		set_current_state(TASK_EXCLUSIVE | TASK_UNINTERRUPTIBLE);

	spin_unlock(&sv->sv_lock);

	if(sv->sv_flags & SV_INTS)
		local_irq_enable();
	else if(sv->sv_flags & SV_BHS)
		local_bh_enable();

	if (timeout)
		ret = schedule_timeout(timeout);
	else
		schedule();

	if(current->state != TASK_RUNNING) /* XXX Is this possible? */ {
		printk(KERN_ERR "sv_wait: state not TASK_RUNNING after "
		       "schedule().\n");
		set_current_state(TASK_RUNNING);
	}

	remove_wait_queue(&sv->sv_waiters, &wait);

	/* Return cases:
	   - woken by a sv_signal/sv_broadcast
	   - woken by a signal
	   - woken by timeout expiring
	*/

	/* XXX This isn't really accurate; we may have been woken
           before the signal anyway.... */
	if(signal_pending(current))
		return timeout ? -ret : -1;
	return timeout ? ret : 1;
}