Esempio n. 1
0
unsigned long wait_for_completion_timeout(struct completion *x, unsigned long timeout)
{
        might_sleep();

        spin_lock_irq(&x->wait.lock);
        if (!x->done) {
                DECLARE_WAITQUEUE(wait, current);

                wait.flags |= WQ_FLAG_EXCLUSIVE;
                __add_wait_queue_tail(&x->wait, &wait);
                do {
                        __set_current_state(TASK_UNINTERRUPTIBLE);
                        spin_unlock_irq(&x->wait.lock);
                        timeout = schedule_timeout(timeout);
                        spin_lock_irq(&x->wait.lock);
                        if (!timeout) {
                                __remove_wait_queue(&x->wait, &wait);
                                goto out;
                        }
                } while (!x->done);
                __remove_wait_queue(&x->wait, &wait);
        }
        x->done--;
out:
        spin_unlock_irq(&x->wait.lock);
        return timeout;
}
Esempio n. 2
0
void __poll_freewait(poll_table* pt, wait_queue_t *wait)
{
	struct poll_table_page * p = pt->table;
	while (p) {
		struct poll_table_entry * entry;
		struct poll_table_page *old;

		entry = p->entry;
		if (entry == p->entries) /* may happen with async poll */
			break;
		do {
			entry--;
			if (wait != &entry->wait)
				remove_wait_queue(entry->wait_address,&entry->wait);
			else
				__remove_wait_queue(entry->wait_address,&entry->wait);
			fput(entry->filp);
		} while (entry > p->entries);
		old = p;
		p = p->next;
		if (old->size == PAGE_SIZE)
			free_page((unsigned long) old);
	}
	if (pt->iocb)
		kmem_cache_free(async_poll_table_cache, pt);
}
Esempio n. 3
0
/* ARGSUSED */
void
lock_wait(wait_queue_head_t *q, spinlock_t *lock, int rw)
{
	DECLARE_WAITQUEUE( wait, current );

	__set_current_state(TASK_UNINTERRUPTIBLE);

	wq_write_lock(&q->lock);
	if (rw) {
		__add_wait_queue_tail(q, &wait);
	} else {
		__add_wait_queue(q, &wait);
	}

	wq_write_unlock(&q->lock);
	spin_unlock(lock);

	schedule();

	wq_write_lock(&q->lock);
	__remove_wait_queue(q, &wait);
	wq_write_unlock(&q->lock);

	spin_lock(lock);

	/* return with lock held */
}
Esempio n. 4
0
void
fusion_sleep_on(wait_queue_head_t * q, struct semaphore *lock,
		signed long *timeout)
{
	wait_queue_t wait;

	init_waitqueue_entry(&wait, current);

	current->state = TASK_INTERRUPTIBLE;

	write_lock(&q->lock);
	__add_wait_queue(q, &wait);
	write_unlock(&q->lock);

	up(lock);

	if (timeout)
		*timeout = schedule_timeout(*timeout);
	else
		schedule();

	write_lock(&q->lock);
	__remove_wait_queue(q, &wait);
	write_unlock(&q->lock);
}
Esempio n. 5
0
static inline long __sched
do_wait_for_common(struct completion *x,
		   long (*action)(long), long timeout, int state)
{
	if (!x->done) {
		DECLARE_WAITQUEUE(wait, current);

		__add_wait_queue_tail_exclusive(&x->wait, &wait);
		do {
			if (signal_pending_state(state, current)) {
				timeout = -ERESTARTSYS;
				break;
			}
			__set_current_state(state);
			spin_unlock_irq(&x->wait.lock);
			timeout = action(timeout);
			spin_lock_irq(&x->wait.lock);
		} while (!x->done && timeout);
		__remove_wait_queue(&x->wait, &wait);
		if (!x->done)
			return timeout;
	}
	x->done--;
	return timeout ?: 1;
}
Esempio n. 6
0
void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
{
	unsigned long flags;

	spin_lock_irqsave(&q->lock, flags);
	__remove_wait_queue(q, wait);
	spin_unlock_irqrestore(&q->lock, flags);
}
Esempio n. 7
0
void __wtd_down_waiter(wait_queue_t *wait)
{
	struct worktodo *wtd = (struct worktodo *)wait;
	struct semaphore *sem = wtd->data;

	__remove_wait_queue(&sem->wait, &wtd->wait);
	wtd_push(wtd, __wtd_down_action, wtd);
	wtd_queue(wtd);
}
static noinline void pci_wait_cfg(struct pci_dev *dev)
{
	DECLARE_WAITQUEUE(wait, current);

	__add_wait_queue(&pci_cfg_wait, &wait);
	do {
		set_current_state(TASK_UNINTERRUPTIBLE);
		raw_spin_unlock_irq(&pci_lock);
		schedule();
		raw_spin_lock_irq(&pci_lock);
	} while (dev->block_cfg_access);
	__remove_wait_queue(&pci_cfg_wait, &wait);
}
Esempio n. 9
0
/**
 * eventfd_ctx_remove_wait_queue - Read the current counter and removes wait queue.
 * @ctx: [in] Pointer to eventfd context.
 * @wait: [in] Wait queue to be removed.
 * @cnt: [out] Pointer to the 64-bit counter value.
 *
 * Returns %0 if successful, or the following error codes:
 *
 * -EAGAIN      : The operation would have blocked.
 *
 * This is used to atomically remove a wait queue entry from the eventfd wait
 * queue head, and read/reset the counter value.
 */
int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_t *wait,
				  __u64 *cnt)
{
	unsigned long flags;

	spin_lock_irqsave(&ctx->wqh.lock, flags);
	eventfd_ctx_do_read(ctx, cnt);
	__remove_wait_queue(&ctx->wqh, wait);
	if (*cnt != 0 && waitqueue_active(&ctx->wqh))
		wake_up_locked_poll(&ctx->wqh, POLLOUT);
	spin_unlock_irqrestore(&ctx->wqh.lock, flags);

	return *cnt != 0 ? 0 : -EAGAIN;
}
Esempio n. 10
0
// Deep-sleeper
__sched sleep_on(wait_queue_head_t *q){
  unsigned long flags;
  wait_queue_t wait;

  init_waitqueue_entry(&wait, current);
  __set_current_state(TASK_UNINTERRUPTIBLE);
  spin_lock_irqsave(&q->lock, flags);
  __add_wait_queue(q, &wait);
  spin_unlock(&q->lock);
  schedule();
  spin_lock_irq(&q->lock);
  __remove_wait_queue(q, &wait);
  spin_unlock_irqrestore(&q->lock, flags);
  return;
}
Esempio n. 11
0
static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t count,
			     loff_t *ppos)
{
	struct eventfd_ctx *ctx = file->private_data;
	ssize_t res;
	__u64 ucnt;
	DECLARE_WAITQUEUE(wait, current);

	if (count < sizeof(ucnt))
		return -EINVAL;
	if (copy_from_user(&ucnt, buf, sizeof(ucnt)))
		return -EFAULT;
	if (ucnt == ULLONG_MAX)
		return -EINVAL;
	spin_lock_irq(&ctx->wqh.lock);
	res = -EAGAIN;
	if (ULLONG_MAX - ctx->count > ucnt)
		res = sizeof(ucnt);
	else if (!(file->f_flags & O_NONBLOCK)) {
		__add_wait_queue(&ctx->wqh, &wait);
		for (res = 0;;) {
			set_current_state(TASK_INTERRUPTIBLE);
			if (ULLONG_MAX - ctx->count > ucnt) {
				res = sizeof(ucnt);
				break;
			}
			if (signal_pending(current)) {
				res = -ERESTARTSYS;
				break;
			}
			spin_unlock_irq(&ctx->wqh.lock);
			schedule();
			spin_lock_irq(&ctx->wqh.lock);
		}
		__remove_wait_queue(&ctx->wqh, &wait);
		__set_current_state(TASK_RUNNING);
	}
	if (likely(res > 0)) {
		ctx->count += ucnt;
		if (waitqueue_active(&ctx->wqh))
			wake_up_locked_poll(&ctx->wqh, POLLIN);
	}
	spin_unlock_irq(&ctx->wqh.lock);

	return res;
}
Esempio n. 12
0
static ssize_t eventfd_read(struct file *file, char __user *buf, size_t count,
			    loff_t *ppos)
{
	struct eventfd_ctx *ctx = file->private_data;
	ssize_t res;
	__u64 ucnt;
	DECLARE_WAITQUEUE(wait, current);

	if (count < sizeof(ucnt))
		return -EINVAL;
	spin_lock_irq(&ctx->wqh.lock);
	res = -EAGAIN;
	ucnt = ctx->count;
	if (ucnt > 0)
		res = sizeof(ucnt);
	else if (!(file->f_flags & O_NONBLOCK)) {
		__add_wait_queue(&ctx->wqh, &wait);
		for (res = 0;;) {
			set_current_state(TASK_INTERRUPTIBLE);
			if (ctx->count > 0) {
				ucnt = ctx->count;
				res = sizeof(ucnt);
				break;
			}
			if (signal_pending(current)) {
				res = -ERESTARTSYS;
				break;
			}
			spin_unlock_irq(&ctx->wqh.lock);
			schedule();
			spin_lock_irq(&ctx->wqh.lock);
		}
		__remove_wait_queue(&ctx->wqh, &wait);
		__set_current_state(TASK_RUNNING);
	}
	if (res > 0) {
		ctx->count = 0;
		if (waitqueue_active(&ctx->wqh))
			wake_up_locked(&ctx->wqh);
	}
	spin_unlock_irq(&ctx->wqh.lock);
	if (res > 0 && put_user(ucnt, (__u64 __user *) buf))
		return -EFAULT;

	return res;
}
Esempio n. 13
0
/**
 * eventfd_ctx_read - Reads the eventfd counter or wait if it is zero.
 * @ctx: [in] Pointer to eventfd context.
 * @no_wait: [in] Different from zero if the operation should not block.
 * @cnt: [out] Pointer to the 64-bit counter value.
 *
 * Returns %0 if successful, or the following error codes:
 *
 * -EAGAIN      : The operation would have blocked but @no_wait was non-zero.
 * -ERESTARTSYS : A signal interrupted the wait operation.
 *
 * If @no_wait is zero, the function might sleep until the eventfd internal
 * counter becomes greater than zero.
 */
ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait, __u64 *cnt)
{
	ssize_t res;
	DECLARE_WAITQUEUE(wait, current);

	spin_lock_irq(&ctx->wqh.lock);
	*cnt = 0;
	res = -EAGAIN;
	if (ctx->count > 0)
		res = 0;
	else if (!no_wait) {
		__add_wait_queue(&ctx->wqh, &wait);
		for (;;) {
			set_current_state(TASK_INTERRUPTIBLE);
			if (ctx->count > 0) {
				res = 0;
				break;
			}
			if (signal_pending(current)) {
				res = -ERESTARTSYS;
				break;
			}
			spin_unlock_irq(&ctx->wqh.lock);
			schedule();
			spin_lock_irq(&ctx->wqh.lock);
		}
		__remove_wait_queue(&ctx->wqh, &wait);
		__set_current_state(TASK_RUNNING);
	}
	if (likely(res == 0)) {
		eventfd_ctx_do_read(ctx, cnt);
		if (waitqueue_active(&ctx->wqh))
			wake_up_locked_poll(&ctx->wqh, POLLOUT);
	}
	spin_unlock_irq(&ctx->wqh.lock);

	return res;
}
Esempio n. 14
0
ssize_t
gfsk_evfd_ctx_write(struct gfsk_evfd_ctx *ctx, int no_wait, int cnt)
{
	ssize_t res;
	DECLARE_WAITQUEUE(wait, current);

	spin_lock_irq(&ctx->wqh.lock);
	res = -EAGAIN;
	if (ctx->count < INT_MAX - 1)
		res = 1;
	else if (!no_wait) {
		__add_wait_queue(&ctx->wqh, &wait);
		for (;;) {
			set_current_state(TASK_INTERRUPTIBLE);
			if (ctx->count < INT_MAX - 1) {
				res = 1;
				break;
			}
			if (signal_pending(current)) {
				res = -ERESTARTSYS;
				break;
			}
			spin_unlock_irq(&ctx->wqh.lock);
			schedule();
			spin_lock_irq(&ctx->wqh.lock);
		}
		__remove_wait_queue(&ctx->wqh, &wait);
		__set_current_state(TASK_RUNNING);
	}
	if (likely(res == 1)) {
		ctx->count += 1;
		if (waitqueue_active(&ctx->wqh))
			wake_up_locked_poll(&ctx->wqh, POLLIN);
	}
	spin_unlock_irq(&ctx->wqh.lock);

	return (res);
}
Esempio n. 15
0
void
one_core_wq_wait( OneCore      *core,
                  OneWaitQueue *queue,
                  int          *timeout_ms )
{
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
     DEFINE_WAIT(wait);

     int timeout = 0;

     if (timeout_ms)
          timeout = *timeout_ms * HZ / 1000;


     D_MAGIC_ASSERT( core, OneCore );
     D_MAGIC_ASSERT( queue, OneWaitQueue );

     prepare_to_wait( &queue->queue, &wait, TASK_INTERRUPTIBLE );

     one_core_unlock( core );

     if (timeout_ms) {
          timeout = schedule_timeout(timeout);
          if (timeout) {
               timeout = timeout * 1000 / HZ;

               *timeout_ms = timeout ? timeout : 1;
          }
          else
               *timeout_ms = 0;
     }
     else
          schedule();

     one_core_lock( core );

     finish_wait( &queue->queue, &wait );
#else
     wait_queue_t wait;
     int          timeout = 0;

     if (timeout_ms)
          timeout = *timeout_ms * HZ / 1000;


     D_MAGIC_ASSERT( core, OneCore );
     D_MAGIC_ASSERT( queue, OneWaitQueue );

     init_waitqueue_entry(&wait, current);

     current->state = TASK_INTERRUPTIBLE;

     write_lock( &queue->queue.lock);
     __add_wait_queue( &queue->queue, &wait);
     write_unlock( &queue->queue.lock );

     one_core_unlock( core );

     if (timeout_ms) {
          timeout = schedule_timeout(timeout);
          if (timeout) {
               timeout = timeout * 1000 / HZ;

               *timeout_ms = timeout ? timeout : 1;
          }
          else
               *timeout_ms = 0;
     }
     else
          schedule();

     one_core_lock( core );

     write_lock( &queue->queue.lock );
     __remove_wait_queue( &queue->queue, &wait );
     write_unlock( &queue->queue.lock );
#endif
}
Esempio n. 16
0
static void skb_async_read_waiter(wait_queue_t *wait)
{
	struct skb_async_info *info = (void *)wait;
	__remove_wait_queue(info->sk->sleep, &info->wtd.wait);
	wtd_queue(&info->wtd);
}
Esempio n. 17
0
// ARM10C 20170830
// x: &kthreadd_done, action: schedule_timeout, timeout: 0x7FFFFFFF, state: 2
static inline long __sched
do_wait_for_common(struct completion *x,
		   long (*action)(long), long timeout, int state)
{
	// x->done: (&kthreadd_done)->done: 0
	if (!x->done) {
		// current: kmem_cache#15-oX (struct task_struct) (pid: 1)
		DECLARE_WAITQUEUE(wait, current);
		// DECLARE_WAITQUEUE(wait, kmem_cache#15-oX (struct task_struct) (pid: 1)):
		// wait_queue_t wait =
		// {
		//     .private	= kmem_cache#15-oX (struct task_struct) (pid: 1),
		//     .func = default_wake_function,
		//     .task_list = { NULL, NULL }
		// }

		// &x->wait: &(&kthreadd_done)->wait
		__add_wait_queue_tail_exclusive(&x->wait, &wait);

		// __add_wait_queue_tail_exclusive 에서 한일:
		// (&wait)->flags: ? | 0x01
		// &(&(&kthreadd_done)->wait)->task_list 과 (&(&(&kthreadd_done)->wait)->task_list)->prev 사이에 &(&wait)->task_list 를 추가함
		// 간단히 말하면 head 인 &(&(&kthreadd_done)->wait)->task_list 의 tail에 &(&wait)->task_list 가 추가됨
		//
		// (&(&(&kthreadd_done)->wait)->task_list)->prev = &(&wait)->task_list;
		// (&(&wait)->task_list)->next = &(&(&kthreadd_done)->wait)->task_list;
		// (&(&wait)->task_list)->prev = &(&(&kthreadd_done)->wait)->task_list;
		// (&(&(&kthreadd_done)->wait)->task_list)->next = &(&wait)->task_list;

// 2017/08/30 종료
// 2017/09/06 시작

		do {
			// state: 2, current: kmem_cache#15-oX (struct task_struct) (pid: 1)
			// signal_pending_state(2, kmem_cache#15-oX (struct task_struct) (pid: 1)): 0
			if (signal_pending_state(state, current)) {
				timeout = -ERESTARTSYS;
				break;
			}

			// state: 2
			__set_current_state(state);

			// __set_current_state 에서 한일:
			// (kmem_cache#15-oX (struct task_struct) (pid: 1))->state: 2

			// &x->wait.lock: &(&kthreadd_done)->wait.lock
			spin_unlock_irq(&x->wait.lock);

			// spin_unlock_irq 에서 한일:
			// &(&kthreadd_done)->wait.lock 을 사용하여 spin unlock 을 수행

			// action: schedule_timeout, timeout: 0x7FFFFFFF
			timeout = action(timeout);
			spin_lock_irq(&x->wait.lock);
		} while (!x->done && timeout);
		__remove_wait_queue(&x->wait, &wait);
		if (!x->done)
			return timeout;
	}
	x->done--;
	return timeout ?: 1;
}
Esempio n. 18
0
/* 这个函数真正将执行epoll_wait的进程带入睡眠状态... */
static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
		   int maxevents, long timeout)
{
	int res, eavail;
	unsigned long flags;
	long jtimeout;
	wait_queue_t wait;

	/*
	 * Calculate the timeout by checking for the "infinite" value (-1)
	 * and the overflow condition. The passed timeout is in milliseconds,
	 * that why (t * HZ) / 1000.
	 */
	/* 计算睡觉时间, 毫秒要转换为HZ */
	jtimeout = (timeout < 0 || timeout >= EP_MAX_MSTIMEO) ?
		MAX_SCHEDULE_TIMEOUT : (timeout * HZ + 999) / 1000;

retry:
	spin_lock_irqsave(&ep->lock, flags);

	res = 0;
	/* 如果ready list不为空, 就不睡了, 直接干活... */
	if (list_empty(&ep->rdllist)) {
		/*
		 * We don't have any available event to return to the caller.
		 * We need to sleep here, and we will be wake up by
		 * ep_poll_callback() when events will become available.
		 */
		/* OK, 初始化一个等待队列, 准备直接把自己挂起,
		 * 注意current是一个宏, 代表当前进程 */
		init_waitqueue_entry(&wait, current);
		__add_wait_queue_exclusive(&ep->wq, &wait);

		for (;;) {
			/*
			 * We don't want to sleep if the ep_poll_callback() sends us
			 * a wakeup in between. That's why we set the task state
			 * to TASK_INTERRUPTIBLE before doing the checks.
			 */
			/* 将当前进程设置位睡眠, 但是可以被信号唤醒的状态,
			 * 注意这个设置是"将来时", 我们此刻还没睡! */
			set_current_state(TASK_INTERRUPTIBLE);
			/* 如果这个时候, ready list里面有成员了,
			 * 或者睡眠时间已经过了, 就直接不睡了... */
			if (!list_empty(&ep->rdllist) || !jtimeout)
				break;
			/* 如果有信号产生, 也起床... */
			if (signal_pending(current)) {
				res = -EINTR;
				break;
			}
			/* 啥事都没有,解锁, 睡觉... */
			spin_unlock_irqrestore(&ep->lock, flags);
			/* jtimeout这个时间后, 会被唤醒,
			 * ep_poll_callback()如果此时被调用,
			 * 那么我们就会直接被唤醒, 不用等时间了... 
			 * 再次强调一下ep_poll_callback()的调用时机是由被监听的fd
			 * 的具体实现, 比如socket或者某个设备驱动来决定的,
			 * 因为等待队列头是他们持有的, epoll和当前进程
			 * 只是单纯的等待...
			 **/
			jtimeout = schedule_timeout(jtimeout);
			spin_lock_irqsave(&ep->lock, flags);
		}
		__remove_wait_queue(&ep->wq, &wait);

		/* OK 我们醒来了... */
		set_current_state(TASK_RUNNING);
	}
	/* Is it worth to try to dig for events ? */
	eavail = !list_empty(&ep->rdllist) || ep->ovflist != EP_UNACTIVE_PTR;

	spin_unlock_irqrestore(&ep->lock, flags);

	/*
	 * Try to transfer events to user space. In case we get 0 events and
	 * there's still timeout left over, we go trying again in search of
	 * more luck.
	 */
	/* 如果一切正常, 有event发生, 就开始准备数据copy给用户空间了... */
	if (!res && eavail &&
	    !(res = ep_send_events(ep, events, maxevents)) && jtimeout)
		goto retry;

	return res;
}