Пример #1
0
/*
 * abort_exclusive_wait - abort exclusive waiting in a queue
 * @q: waitqueue waited on
 * @wait: wait descriptor
 * @state: runstate of the waiter to be woken
 * @key: key to identify a wait bit queue or %NULL
 *
 * Sets current thread back to running state and removes
 * the wait descriptor from the given waitqueue if still
 * queued.
 *
 * Wakes up the next waiter if the caller is concurrently
 * woken up through the queue.
 *
 * This prevents waiter starvation where an exclusive waiter
 * aborts and is woken up concurrently and noone wakes up
 * the next waiter.
 */
void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait,
			unsigned int mode, void *key)
{
	unsigned long flags;

	__set_current_state(TASK_RUNNING);
	spin_lock_irqsave(&q->lock, flags);
	if (!list_empty(&wait->task_list))
		list_del_init(&wait->task_list);
	else if (waitqueue_active(q))
		__wake_up_common(q, mode, 1, 0, key);
	spin_unlock_irqrestore(&q->lock, flags);
}
void _mali_osk_wait_queue_wake_up( _mali_osk_wait_queue_t *queue )
{
	MALI_DEBUG_ASSERT_POINTER( queue );

	/* if queue is empty, don't attempt to wake up its elements */
	if (!waitqueue_active(&queue->wait_queue)) return;

	MALI_DEBUG_PRINT(6, ("Waking up elements in wait queue %p ....\n", queue));

	wake_up_all(&queue->wait_queue);

	MALI_DEBUG_PRINT(6, ("... elements in wait queue %p woken up\n", queue));
}
Пример #3
0
/**
 * sk_stream_write_space - stream socket write_space callback.
 * @sk: socket
 *
 * FIXME: write proper description
 */
void sk_stream_write_space(struct sock *sk)
{
	struct socket *sock = sk->sk_socket;

	if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) && sock) {
		clear_bit(SOCK_NOSPACE, &sock->flags);

		if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
			wake_up_interruptible(sk->sk_sleep);
		if (sock->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN))
			sock_wake_async(sock, 2, POLL_OUT);
	}
}
Пример #4
0
/*
 * wake up sync - echo event was catched
 */
void
snd_seq_oss_writeq_wakeup(seq_oss_writeq_t *q, abstime_t time)
{
	unsigned long flags;

	spin_lock_irqsave(&q->sync_lock, flags);
	q->sync_time = time;
	q->sync_event_put = 0;
	if (waitqueue_active(&q->sync_sleep)) {
		wake_up(&q->sync_sleep);
	}
	spin_unlock_irqrestore(&q->sync_lock, flags);
}
Пример #5
0
/**
 * Free a buffer.
 *
 * \param dev DRM device.
 * \param buf buffer to free.
 * 
 * Resets the fields of \p buf.
 */
void drm_free_buffer(drm_device_t *dev, drm_buf_t *buf)
{
	if (!buf) return;

	buf->waiting  = 0;
	buf->pending  = 0;
	buf->filp     = NULL;
	buf->used     = 0;

	if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && waitqueue_active(&buf->dma_wait)) {
		wake_up_interruptible(&buf->dma_wait);
	}
}
Пример #6
0
/**
 * shutdown - shutdown socket connection
 * @sock: socket structure
 * @how: direction to close (must be SHUT_RDWR)
 *
 * Terminates connection (if necessary), then purges socket's receive queue.
 *
 * Returns 0 on success, errno otherwise
 */
static int shutdown(struct socket *sock, int how)
{
	struct sock *sk = sock->sk;
	struct tipc_port *tport = tipc_sk_port(sk);
	struct sk_buff *buf;
	int res;

	if (how != SHUT_RDWR)
		return -EINVAL;

	lock_sock(sk);

	switch (sock->state) {
	case SS_CONNECTING:
	case SS_CONNECTED:

restart:
		/* Disconnect and send a 'FIN+' or 'FIN-' message to peer */
		buf = __skb_dequeue(&sk->sk_receive_queue);
		if (buf) {
			atomic_dec(&tipc_queue_size);
			if (TIPC_SKB_CB(buf)->handle != 0) {
				kfree_skb(buf);
				goto restart;
			}
			tipc_disconnect(tport->ref);
			tipc_reject_msg(buf, TIPC_CONN_SHUTDOWN);
		} else {
			tipc_shutdown(tport->ref);
		}

		sock->state = SS_DISCONNECTING;

		/* fall through */

	case SS_DISCONNECTING:

		/* Discard any unreceived messages; wake up sleeping tasks */
		discard_rx_queue(sk);
		if (waitqueue_active(sk_sleep(sk)))
			wake_up_interruptible(sk_sleep(sk));
		res = 0;
		break;

	default:
		res = -ENOTCONN;
	}

	release_sock(sk);
	return res;
}
Пример #7
0
/**
 * eventfd_signal - Adds @n to the eventfd counter.
 * @ctx: [in] Pointer to the eventfd context.
 * @n: [in] Value of the counter to be added to the eventfd internal counter.
 *          The value cannot be negative.
 *
 * This function is supposed to be called by the kernel in paths that do not
 * allow sleeping. In this function we allow the counter to reach the ULLONG_MAX
 * value, and we signal this as overflow condition by returining a POLLERR
 * to poll(2).
 *
 * Returns the amount by which the counter was incrememnted.  This will be less
 * than @n if the counter has overflowed.
 */
__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
{
	unsigned long flags;

	spin_lock_irqsave(&ctx->wqh.lock, flags);
	if (ULLONG_MAX - ctx->count < n)
		n = ULLONG_MAX - ctx->count;
	ctx->count += n;
	if (waitqueue_active(&ctx->wqh))
		wake_up_locked_poll(&ctx->wqh, POLLIN);
	spin_unlock_irqrestore(&ctx->wqh.lock, flags);

	return n;
}
Пример #8
0
/*
 * This gets called when the timer event triggers. We set the "expired"
 * flag, but we do not re-arm the timer (in case it's necessary,
 * interval.tv64 != 0) until the timer is accessed.
 */
enum hrtimer_restart timerfd_callback(struct hrtimer *timer)
{
  struct timerfd_ctx *ctx = container_of(timer, struct timerfd_ctx, timer);
  unsigned long flags;

  spin_lock_irqsave(&ctx->wqh.lock, flags);
  ctx->expired = 1;
  ctx->ticks++;
  if (waitqueue_active(&ctx->wqh))
    __wake_up_locked_keyPtr(&ctx->wqh, TASK_NORMAL, (void *) (POLLIN));
  spin_unlock_irqrestore(&ctx->wqh.lock, flags);

  return HRTIMER_NORESTART;
}
Пример #9
0
void RESET_EVENT( EVENT_HNDL* pEvent )
{
	DWORD LockFlag;

	// clear the event flag
	ACQUIRE_LOCK( &pEvent->FlagLock, LockFlag );
	pEvent->SetFlag = FALSE;

	// empty out the queue
	while ( waitqueue_active( &pEvent->WaitQue ) )
		interruptible_sleep_on( &pEvent->WaitQue );

	RELEASE_LOCK( &pEvent->FlagLock, LockFlag );
}
Пример #10
0
void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
{
	int me;
	int cpu = vcpu->cpu;

	me = get_cpu();
	if (waitqueue_active(vcpu->arch.wqp)) {
		wake_up_interruptible(vcpu->arch.wqp);
		vcpu->stat.halt_wakeup++;
	} else if (cpu != me && cpu != -1) {
		smp_send_reschedule(vcpu->cpu);
	}
	put_cpu();
}
Пример #11
0
static irqreturn_t tmio_irq(int irq, void *__tmio)
{
	struct tmio_nand *tmio = __tmio;
	struct nand_chip *nand_chip = &tmio->chip;

	/* disable RDYREQ interrupt */
	tmio_iowrite8(0x00, tmio->fcr + FCR_IMR);

	if (unlikely(!waitqueue_active(&nand_chip->controller->wq)))
		dev_warn(&tmio->dev->dev, "spurious interrupt\n");

	wake_up(&nand_chip->controller->wq);
	return IRQ_HANDLED;
}
Пример #12
0
void signalfd_cleanup(struct sighand_struct *sighand)
{
	wait_queue_head_t *wqh = &sighand->signalfd_wqh;
	/*
                                                                     
                                                                    
                                                                
  */
	if (likely(!waitqueue_active(wqh)))
		return;

	/*                                                            */
	wake_up_poll(wqh, POLLHUP | POLLFREE);
}
Пример #13
0
/**
 * eventfd_ctx_remove_wait_queue - Read the current counter and removes wait queue.
 * @ctx: [in] Pointer to eventfd context.
 * @wait: [in] Wait queue to be removed.
 * @cnt: [out] Pointer to the 64-bit counter value.
 *
 * Returns %0 if successful, or the following error codes:
 *
 * -EAGAIN      : The operation would have blocked.
 *
 * This is used to atomically remove a wait queue entry from the eventfd wait
 * queue head, and read/reset the counter value.
 */
int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_t *wait,
				  __u64 *cnt)
{
	unsigned long flags;

	spin_lock_irqsave(&ctx->wqh.lock, flags);
	eventfd_ctx_do_read(ctx, cnt);
	__remove_wait_queue(&ctx->wqh, wait);
	if (*cnt != 0 && waitqueue_active(&ctx->wqh))
		wake_up_locked_poll(&ctx->wqh, POLLOUT);
	spin_unlock_irqrestore(&ctx->wqh.lock, flags);

	return *cnt != 0 ? 0 : -EAGAIN;
}
Пример #14
0
static void
kni_sk_write_space(struct sock *sk)
{
	wait_queue_head_t *wqueue;

	if (!sock_writeable(sk) ||
	    !test_and_clear_bit(SOCK_ASYNC_NOSPACE,
				&sk->sk_socket->flags))
		return;
	wqueue = sk_sleep(sk);
	if (wqueue && waitqueue_active(wqueue))
		wake_up_interruptible_poll(
			wqueue, POLLOUT | POLLWRNORM | POLLWRBAND);
}
Пример #15
0
void signalfd_cleanup(struct sighand_struct *sighand)
{
	wait_queue_head_t *wqh = &sighand->signalfd_wqh;
	/*
	 * The lockless check can race with remove_wait_queue() in progress,
	 * but in this case its caller should run under rcu_read_lock() and
	 * sighand_cachep is SLAB_DESTROY_BY_RCU, we can safely return.
	 */
	if (likely(!waitqueue_active(wqh)))
		return;

	/* wait_queue_t->func(POLLFREE) should do remove_wait_queue() */
	wake_up_poll(wqh, POLLHUP | POLLFREE);
}
Пример #16
0
static ssize_t vloopback_write(struct file *f, const char *buf,
		size_t count, loff_t *offset)
{
	struct video_device *loopdev=video_devdata(f);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
	priv_ptr ptr=(priv_ptr)video_get_drvdata(loopdev);
#else
	priv_ptr ptr=(priv_ptr)loopdev->priv;
#endif	
	int nr=ptr->pipenr;
	unsigned long realcount=count;
	
	if (!ptr->in)
		return -EINVAL;
	if (loops[nr]->zerocopy)
		return -EINVAL;
	
	if (loops[nr]->buffer==NULL) {
		return -EINVAL;
	}

	/* Anybody want some pictures??? */
	if (!waitqueue_active(&loops[nr]->wait)) {
		/* No, waiting this makes the write op blocking */
		wait_event_interruptible(loops[nr]->wait, loops[nr]->pendingread);
	}
	
	down(&loops[nr]->lock);
	if (!loops[nr]->buffer) {
		up(&loops[nr]->lock);
		return -EINVAL;
	}
	if (realcount > loops[nr]->buflength) {
		realcount = loops[nr]->buflength;
		info("Too much data! Only %ld bytes used.", realcount);
	}
	
	if (copy_from_user(
	    loops[nr]->buffer+loops[nr]->frame*loops[nr]->buflength,
	    buf, realcount
	)) return -EFAULT;

	loops[nr]->frame=0;
	up(&loops[nr]->lock);

	loops[nr]->frameswrite++;
	wake_up(&loops[nr]->wait);

	return realcount;
}
Пример #17
0
int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
{
	if (irq->irq == KVM_INTERRUPT_UNSET)
		kvmppc_core_dequeue_external(vcpu, irq);
	else
		kvmppc_core_queue_external(vcpu, irq);

	if (waitqueue_active(&vcpu->wq)) {
		wake_up_interruptible(&vcpu->wq);
		vcpu->stat.halt_wakeup++;
	}

	return 0;
}
Пример #18
0
static int osprd_close_last(struct inode *inode, struct file *filp)
{
	int r;
	r = 0;
	if (filp) {
		osprd_info_t *d = file2osprd(filp);
		int filp_writable = filp->f_mode & FMODE_WRITE;

		// EXERCISE: If the user closes a ramdisk file that holds
		// a lock, release the lock.  Also wake up blocked processes
		// as appropriate.
		//r = osprd_ioctl (inode, filp, OSPRDIOCRELEASE, 0);
		// Your code here.

		// This line avoids compiler warnings; you may remove it.
		(void) filp_writable, (void) d;
		if (!(filp->f_flags & F_OSPRD_LOCKED))
			{r = -EINVAL; }
		
		// Otherwise, clear the lock from filp->f_flags, wake up
		// the wait queue, perform any additional accounting steps
		// you need, and return 0.
		else 
		{
			osp_spin_lock(&(d->mutex));
			// Clear lock flag.
			filp->f_flags &= ~F_OSPRD_LOCKED;
			//d->mutex.lock = 0;
			d->n_writel = 0;
			d->n_readl = 0;
			d->dead = 0;
				
			// Wake queue.
			if(waitqueue_active(&d->blockq) == 0){
				//eprintk("Tail: %d head: %d\n", d->ticket_tail, d->ticket_head);				
				//d->ticket_head = d->ticket_tail;
				d->ticket_tail += d->desync;
				d->desync = 0;
			}
			osp_spin_unlock(&(d->mutex));	
			wake_up_all(&d->blockq);		
				
			r = 0;
		}

	}

	return r;
}
Пример #19
0
static bool dna_e1000e_clean_rx_irq(struct e1000_adapter *adapter) {
  bool ret;
  int i, debug = 0;
  struct e1000_ring *rx_ring = adapter->rx_ring;
  union e1000_rx_desc_extended *rx_desc;
  struct e1000_buffer *buffer_info;
  struct e1000_hw *hw = &adapter->hw;
  u32 staterr;

  /* The register contains the last packet that we have read */
  i = E1000_READ_REG(hw, E1000_RDT(0));
  if(++i == rx_ring->count)
    i = 0;

  rx_ring->next_to_clean = i; 
  rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
  staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
  buffer_info = &rx_ring->buffer_info[i];
  
  if(unlikely(debug))
    printk(KERN_INFO
	   "DNA: dna_e1000_clean_rx_irq(%s)[id=%d][status=%d][rx_reg=%u]\n",
	   adapter->netdev->name, i, staterr,
	   E1000_READ_REG(&adapter->hw, E1000_RDT(0)));

  if(staterr & E1000_RXD_STAT_DD) {
    if(!adapter->dna.interrupt_received) {
      if(waitqueue_active(&adapter->dna.packet_waitqueue)) {
	wake_up_interruptible(&adapter->dna.packet_waitqueue);
	adapter->dna.interrupt_received = 1;

	if(unlikely(debug))
	  printk(KERN_WARNING "DNA: dna_e1000_clean_rx_irq(%s): "
		 "woken up [slot=%d] XXXX\n", adapter->netdev->name, i);

      }
    }

    if(unlikely(debug))
      printk(KERN_WARNING "DNA: dna_e1000_clean_rx_irq(%s): "
	     "woken up [slot=%d][interrupt_received=%d]\n",
	     adapter->netdev->name, i, adapter->dna.interrupt_received);

    ret = TRUE;
  } else
    ret = FALSE;

  return(ret);		
}
Пример #20
0
static ssize_t tier_attr_internals_show(struct tier_device *dev,
                                               char *buf)
{
        char *iotype;
        char *iopending;
        char *qlock;
        char *aiowq;
        char *discard;
#ifndef MAX_PERFORMANCE
        char *debug_state;
#endif
        int res = 0;
       
        if (atomic_read(&dev->migrate) == MIGRATION_IO) 
            iotype = as_sprintf("iotype (normal or migration) : migration_io\n");
        else if (atomic_read(&dev->wqlock))
            iotype = as_sprintf("iotype (normal or migration) : normal_io\n");
        else
            iotype = as_sprintf("iotype (normal or migration) : no activity\n");
        iopending =  as_sprintf("async random ios pending     : %i\n", atomic_read(&dev->aio_pending));
        if ( mutex_is_locked(&dev->qlock))
             qlock = as_sprintf("main mutex                   : locked\n");
        else qlock = as_sprintf("main mutex                   : unlocked\n");
        if (waitqueue_active(&dev->aio_event)) 
             aiowq = as_sprintf("waiting on asynchrounous io  : True\n");
        else 
             aiowq = as_sprintf("waiting on asynchrounous io  : False\n");
#ifndef MAX_PERFORMANCE
        spin_lock(&dev->dbg_lock);
        if (dev->debug_state & DISCARD)
           discard = as_sprintf("discard request is pending   : True\n");
        else 
           discard = as_sprintf("discard request is pending   : False\n");
        debug_state = as_sprintf("debug state                  : %i\n", dev->debug_state);
        spin_unlock(&dev->dbg_lock);
        res = sprintf(buf, "%s%s%s%s%s%s", iotype, iopending, qlock, aiowq, discard, debug_state);
#else
        res = sprintf(buf, "%s%s%s%s", iotype, iopending, qlock, aiowq);
#endif
        kfree(iotype);
        kfree(iopending);
        kfree(qlock);
        kfree(aiowq);
#ifndef MAX_PERFORMANCE
        kfree(discard);
        kfree(debug_state);
#endif
        return res;
}
Пример #21
0
/* Select who will handle tx completion:
 * - a write is pending - wake it up and let it do the poll + post
 * - post handler is taken - taker will do the poll + post
 * else return 1 and let the caller do it
 */
static int sdp_tx_handler_select(struct sdp_sock *ssk)
{
	struct sock *sk = sk_ssk(ssk);

	if (sk->sk_write_pending) {
		/* Do the TX posts from sender context */
		if (sdp_sk_sleep(sk) && waitqueue_active(sdp_sk_sleep(sk))) {
			sdp_prf1(sk, NULL, "Waking up pending sendmsg");
			wake_up_interruptible(sdp_sk_sleep(sk));
			return 0;
		} else
			sdp_prf1(sk, NULL, "Unexpected: sk_sleep=%p, "
				"waitqueue_active: %d\n",
				sdp_sk_sleep(sk), waitqueue_active(sdp_sk_sleep(sk)));
	}

	if (posts_handler(ssk)) {
		/* Somebody else available to check for completion */
		sdp_prf1(sk, NULL, "Somebody else will call do_posts");
		return 0;
	}

	return 1;
}
Пример #22
0
static void __kaio_queue_fsync_req(struct ploop_request * preq, int prio)
{
    struct ploop_device * plo   = preq->plo;
    struct ploop_delta  * delta = ploop_top_delta(plo);
    struct ploop_io     * io    = &delta->io;

    if (prio)
        list_add(&preq->list, &io->fsync_queue);
    else
        list_add_tail(&preq->list, &io->fsync_queue);

    io->fsync_qlen++;
    if (waitqueue_active(&io->fsync_waitq))
        wake_up_interruptible(&io->fsync_waitq);
}
Пример #23
0
static void freeque (int id)
{
	struct msqid_ds *msq = msgque[id];
	struct msg *msgp, *msgh;

	msq->msg_perm.seq++;
	msg_seq = (msg_seq+1) % ((unsigned)(1<<31)/MSGMNI); /* increment, but avoid overflow */
	msgbytes -= msq->msg_cbytes;
	if (id == max_msqid)
		while (max_msqid && (msgque[--max_msqid] == IPC_UNUSED));
	msgque[id] = (struct msqid_ds *) IPC_UNUSED;
	used_queues--;
	while (waitqueue_active(&msq->rwait) || waitqueue_active(&msq->wwait)) {
		wake_up (&msq->rwait); 
		wake_up (&msq->wwait);
		schedule(); 
	}
	for (msgp = msq->msg_first; msgp; msgp = msgh ) {
		msgh = msgp->msg_next;
		msghdrs--;
		kfree(msgp);
	}
	kfree(msq);
}
Пример #24
0
int btrfs_tree_unlock(struct extent_buffer *eb)
{
	/*
	 * if we were a blocking owner, we don't have the spinlock held
	 * just clear the bit and look for waiters
	 */
	if (test_and_clear_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
		smp_mb__after_clear_bit();
	else
		spin_unlock(&eb->lock);

	if (waitqueue_active(&eb->lock_wq))
		wake_up(&eb->lock_wq);
	return 0;
}
_mali_osk_wait_queue_t *_mali_osk_wait_queue_init(void)
{
	_mali_osk_wait_queue_t *ret = NULL;

	ret = kmalloc(sizeof(_mali_osk_wait_queue_t), GFP_KERNEL);

	if (NULL == ret) {
		return ret;
	}

	init_waitqueue_head(&ret->wait_queue);
	MALI_DEBUG_ASSERT(!waitqueue_active(&ret->wait_queue));

	return ret;
}
Пример #26
0
static irqreturn_t aml_demod_isr(int irq, void *dev_id)
{
    if (demod_sta.dvb_mode == 0) {
	//dvbc_isr(&demod_sta);
	if(dvbc_isr_islock()){
		printk("sync4\n");
		if(waitqueue_active(&lock_wq))
			wake_up_interruptible(&lock_wq);
	}
    }
    else {
	dvbt_isr(&demod_sta);
    }

    return IRQ_HANDLED;
}
Пример #27
0
NDAS_SAL_API
void
sal_event_destroy(sal_event event)
{
    struct _sal_event    *sevent = (struct _sal_event*) event;
    sal_assert(sevent!=SAL_INVALID_EVENT);
#ifdef    MAGIC
    sal_assert(m->magic == SAL_EVENT_MAGIC);
#endif

    dbgl_salsync(5, "%s %p",sevent->desc, sevent);
    sal_assert( !waitqueue_active(&sevent->queue) );

    // check if locked and print debugging info
    kfree(sevent);
}
Пример #28
0
static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t count,
			     loff_t *ppos)
{
	struct eventfd_ctx *ctx = file->private_data;
	ssize_t res;
	__u64 ucnt;
	DECLARE_WAITQUEUE(wait, current);

	if (count < sizeof(ucnt))
		return -EINVAL;
	if (copy_from_user(&ucnt, buf, sizeof(ucnt)))
		return -EFAULT;
	if (ucnt == ULLONG_MAX)
		return -EINVAL;
	spin_lock_irq(&ctx->wqh.lock);
	res = -EAGAIN;
	if (ULLONG_MAX - ctx->count > ucnt)
		res = sizeof(ucnt);
	else if (!(file->f_flags & O_NONBLOCK)) {
		__add_wait_queue(&ctx->wqh, &wait);
		for (res = 0;;) {
			set_current_state(TASK_INTERRUPTIBLE);
			if (ULLONG_MAX - ctx->count > ucnt) {
				res = sizeof(ucnt);
				break;
			}
			if (signal_pending(current)) {
				res = -ERESTARTSYS;
				break;
			}
			spin_unlock_irq(&ctx->wqh.lock);
			schedule();
			spin_lock_irq(&ctx->wqh.lock);
		}
		__remove_wait_queue(&ctx->wqh, &wait);
		__set_current_state(TASK_RUNNING);
	}
	if (likely(res > 0)) {
		ctx->count += ucnt;
		if (waitqueue_active(&ctx->wqh))
			wake_up_locked_poll(&ctx->wqh, POLLIN);
	}
	spin_unlock_irq(&ctx->wqh.lock);

	return res;
}
Пример #29
0
void ext4_free_io_end(ext4_io_end_t *io)
{
	int i;
	wait_queue_head_t *wq;

	BUG_ON(!io);
	if (io->page)
		put_page(io->page);
	for (i = 0; i < io->num_io_pages; i++)
		put_io_page(io->pages[i]);
	io->num_io_pages = 0;
	wq = ext4_ioend_wq(io->inode);
	if (atomic_dec_and_test(&EXT4_I(io->inode)->i_ioend_count) &&
	    waitqueue_active(wq))
		wake_up_all(wq);
	kmem_cache_free(io_end_cachep, io);
}
Пример #30
0
static ssize_t eventfd_read(struct file *file, char __user *buf, size_t count,
			    loff_t *ppos)
{
	struct eventfd_ctx *ctx = file->private_data;
	ssize_t res;
	__u64 ucnt;
	DECLARE_WAITQUEUE(wait, current);

	if (count < sizeof(ucnt))
		return -EINVAL;
	spin_lock_irq(&ctx->wqh.lock);
	res = -EAGAIN;
	ucnt = ctx->count;
	if (ucnt > 0)
		res = sizeof(ucnt);
	else if (!(file->f_flags & O_NONBLOCK)) {
		__add_wait_queue(&ctx->wqh, &wait);
		for (res = 0;;) {
			set_current_state(TASK_INTERRUPTIBLE);
			if (ctx->count > 0) {
				ucnt = ctx->count;
				res = sizeof(ucnt);
				break;
			}
			if (signal_pending(current)) {
				res = -ERESTARTSYS;
				break;
			}
			spin_unlock_irq(&ctx->wqh.lock);
			schedule();
			spin_lock_irq(&ctx->wqh.lock);
		}
		__remove_wait_queue(&ctx->wqh, &wait);
		__set_current_state(TASK_RUNNING);
	}
	if (res > 0) {
		ctx->count = 0;
		if (waitqueue_active(&ctx->wqh))
			wake_up_locked(&ctx->wqh);
	}
	spin_unlock_irq(&ctx->wqh.lock);
	if (res > 0 && put_user(ucnt, (__u64 __user *) buf))
		return -EFAULT;

	return res;
}