예제 #1
0
/*
 *  Same as __wtd_down, but sem->sleepers is not incremented when
 *  coming from a wakeup.
 */
void wtd_down_common(struct semaphore * sem, struct worktodo *wtd,
			int do_incr)
{
	int gotit;
	int sleepers;

	init_waitqueue_func_entry(&wtd->wait, __wtd_down_waiter);
	wtd->data = sem;

	spin_lock_irq(&semaphore_lock);
	sem->sleepers += do_incr;
	sleepers = sem->sleepers;
	gotit = add_wait_queue_exclusive_cond(&sem->wait, &wtd->wait,
			atomic_add_negative(sleepers - 1, &sem->count));
	if (gotit)
		sem->sleepers = 0;
	else
		sem->sleepers = 1;
	spin_unlock_irq(&semaphore_lock);

	if (gotit) {
		wake_up(&sem->wait);
		wtd_queue(wtd);
	}
}
예제 #2
0
/* Init poll structure */
void vhost_poll_init(struct vhost_poll *poll, work_func_t func,
		     unsigned long mask)
{
	INIT_WORK(&poll->work, func);
	init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
	init_poll_funcptr(&poll->table, vhost_poll_func);
	poll->mask = mask;
}
예제 #3
0
/* Init poll structure */
void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
		     unsigned long mask, struct vhost_dev *dev)
{
	init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
	init_poll_funcptr(&poll->table, vhost_poll_func);
	poll->mask = mask;
	poll->dev = dev;

	vhost_work_init(&poll->work, fn);
}
예제 #4
0
void __pollwait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p)
{
	struct poll_table_page *table;

#ifdef CONFIG_EPOLL
	/* If there is a qproc set (in this case, that implies it's an
	 * eventpoll poll_table), we may be casting the poll_table from
	 * something else so make sure we don't dereference any other
	 * poll_table fields in this case. */
	if (p->qproc) {
		p->qproc(filp, wait_address, p);
		return;
	}
#endif /* CONFIG_EPOLL */
		
	table = p->table;
	
	if (!table || POLL_TABLE_FULL(table)) {
		struct poll_table_page *new_table;

		new_table = (struct poll_table_page *) __get_free_page(GFP_KERNEL);
		if (!new_table) {
			p->error = -ENOMEM;
			__set_current_state(TASK_RUNNING);
			return;
		}
		new_table->size = PAGE_SIZE;
		new_table->entry = new_table->entries;
		new_table->next = table;
		p->table = new_table;
		table = new_table;
	}

	/* Add a new entry */
	{
		struct poll_table_entry * entry = table->entry;
		table->entry = entry+1;
	 	get_file(filp);
	 	entry->filp = filp;
		entry->wait_address = wait_address;
		entry->p = p;
		if (p->iocb) /* async poll */
			init_waitqueue_func_entry(&entry->wait, async_poll_waiter);
		else
			init_waitqueue_entry(&entry->wait, current);
		add_wait_queue(wait_address,&entry->wait);
	}
}
예제 #5
0
static void skb_async_read_worker(void *_data)
{
	struct skb_async_info	*info = _data;
	struct sock *sk = info->sk;
	struct sk_buff *skb;
	int error;

	/* Caller is allowed not to check sk->err before skb_recv_datagram() */
	error = sock_error(sk);
	if (error)
		goto no_packet;


	init_waitqueue_func_entry(&info->wtd.wait, skb_async_read_waiter);

	/* Attempted to dequeue and process any skbs that already arrived.
	 * Note that add_wait_queue_cond is used to check against a race
	 * where an skb is added to the queue after we checked but before 
	 * the callback is added to the wait queue.
	 */
	do {
		skb = skb_dequeue(&sk->receive_queue);
		if (skb) {
			info->finish(sk, info->cb, info->len, skb);
			kfree(info);
			return;
		}
	} while ( add_wait_queue_cond( sk->sleep, &info->wtd.wait,
					(!(error = sock_error(sk)) &&
					skb_queue_empty(&sk->receive_queue)) )
		  && !error);

	if (!error)
		return;

no_packet:
	info->cb.fn(info->cb.data, info->cb.vec, error);
	kfree(info);
	return;
}
예제 #6
0
/* 
 * 该函数在调用f_op->poll()时会被调用.
 * 也就是epoll主动poll某个fd时, 用来将epitem与指定的fd关联起来的.
 * 关联的办法就是使用等待队列(waitqueue)
 */
static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead,
				 poll_table *pt)
{
	struct epitem *epi = ep_item_from_epqueue(pt);
	struct eppoll_entry *pwq;

	if (epi->nwait >= 0 && (pwq = kmem_cache_alloc(pwq_cache, GFP_KERNEL))) {
		/* 初始化等待队列, 指定ep_poll_callback为唤醒时的回调函数,
		 * 当我们监听的fd发生状态改变时, 也就是队列头被唤醒时,
		 * 指定的回调函数将会被调用. */
		init_waitqueue_func_entry(&pwq->wait, ep_poll_callback);
		pwq->whead = whead;
		pwq->base = epi;
		/* 将刚分配的等待队列成员加入到头中, 头是由fd持有的 */
		add_wait_queue(whead, &pwq->wait);
		list_add_tail(&pwq->llink, &epi->pwqlist);
		/* nwait记录了当前epitem加入到了多少个等待队列中,
		 * 我认为这个值最大也只会是1... */
		epi->nwait++;
	} else {
		/* We have to signal that an error occurred */
		epi->nwait = -1;
	}
}
예제 #7
0
static int virqfd_enable(struct vfio_pci_device *vdev,
			 int (*handler)(struct vfio_pci_device *, void *),
			 void (*thread)(struct vfio_pci_device *, void *),
			 void *data, struct virqfd **pvirqfd, int fd)
{
	struct fd irqfd;
	struct eventfd_ctx *ctx;
	struct virqfd *virqfd;
	int ret = 0;
	unsigned int events;

	virqfd = kzalloc(sizeof(*virqfd), GFP_KERNEL);
	if (!virqfd)
		return -ENOMEM;

	virqfd->pvirqfd = pvirqfd;
	virqfd->vdev = vdev;
	virqfd->handler = handler;
	virqfd->thread = thread;
	virqfd->data = data;

	INIT_WORK(&virqfd->shutdown, virqfd_shutdown);
	INIT_WORK(&virqfd->inject, virqfd_inject);

	irqfd = fdget(fd);
	if (!irqfd.file) {
		ret = -EBADF;
		goto err_fd;
	}

	ctx = eventfd_ctx_fileget(irqfd.file);
	if (IS_ERR(ctx)) {
		ret = PTR_ERR(ctx);
		goto err_ctx;
	}

	virqfd->eventfd = ctx;

	/*
	 * virqfds can be released by closing the eventfd or directly
	 * through ioctl.  These are both done through a workqueue, so
	 * we update the pointer to the virqfd under lock to avoid
	 * pushing multiple jobs to release the same virqfd.
	 */
	spin_lock_irq(&vdev->irqlock);

	if (*pvirqfd) {
		spin_unlock_irq(&vdev->irqlock);
		ret = -EBUSY;
		goto err_busy;
	}
	*pvirqfd = virqfd;

	spin_unlock_irq(&vdev->irqlock);

	/*
	 * Install our own custom wake-up handling so we are notified via
	 * a callback whenever someone signals the underlying eventfd.
	 */
	init_waitqueue_func_entry(&virqfd->wait, virqfd_wakeup);
	init_poll_funcptr(&virqfd->pt, virqfd_ptable_queue_proc);

	events = irqfd.file->f_op->poll(irqfd.file, &virqfd->pt);

	/*
	 * Check if there was an event already pending on the eventfd
	 * before we registered and trigger it as if we didn't miss it.
	 */
	if (events & POLLIN) {
		if ((!handler || handler(vdev, data)) && thread)
			schedule_work(&virqfd->inject);
	}

	/*
	 * Do not drop the file until the irqfd is fully initialized,
	 * otherwise we might race against the POLLHUP.
	 */
	fdput(irqfd);

	return 0;
err_busy:
	eventfd_ctx_put(ctx);
err_ctx:
	fdput(irqfd);
err_fd:
	kfree(virqfd);

	return ret;
}