コード例 #1
0
void sk_stream_wait_close(struct sock *sk, long timeout)
{
	if (timeout) {
		DEFINE_WAIT(wait);

		do {
			prepare_to_wait(sk->sk_sleep, &wait,
					TASK_INTERRUPTIBLE);
			if (sk_wait_event(sk, &timeout, !sk_stream_closing(sk)))
				break;
		} while (!signal_pending(current) && timeout);

		finish_wait(sk->sk_sleep, &wait);
	}
}
コード例 #2
0
ファイル: svc.c プロジェクト: GodFox/magx_kernel_xpixl
static int svc_dropparty(struct socket *sock, int ep_ref)
{
	DEFINE_WAIT(wait);
	struct atm_vcc *vcc = ATM_SD(sock);
	int error;

	lock_sock(vcc->sk);
	set_bit(ATM_VF_WAITING, &vcc->flags);
	prepare_to_wait(vcc->sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
	sigd_enq2(vcc, as_dropparty, NULL, NULL, NULL, NULL, ep_ref);
	while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
		schedule();
		prepare_to_wait(vcc->sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
	}
	finish_wait(vcc->sk->sk_sleep, &wait);
	if (!sigd) {
		error = -EUNATCH;
		goto out;
	}
	error = xchg(&vcc->sk->sk_err_soft, 0);
out:
	release_sock(vcc->sk);
	return error;
}
コード例 #3
0
ファイル: thread.c プロジェクト: 0xroot/Blackphone-BP1-Kernel
int speakup_thread(void *data)
{
	unsigned long flags;
	int should_break;
	struct bleep our_sound;

	our_sound.active = 0;
	our_sound.freq = 0;
	our_sound.jiffies = 0;

	mutex_lock(&spk_mutex);
	while (1) {
		DEFINE_WAIT(wait);
		while (1) {
			spk_lock(flags);
			our_sound = unprocessed_sound;
			unprocessed_sound.active = 0;
			prepare_to_wait(&speakup_event, &wait,
				TASK_INTERRUPTIBLE);
			should_break = kthread_should_stop() ||
				our_sound.active ||
				(synth && synth->catch_up && synth->alive &&
					(speakup_info.flushing ||
					!synth_buffer_empty()));
			spk_unlock(flags);
			if (should_break)
				break;
			mutex_unlock(&spk_mutex);
			schedule();
			mutex_lock(&spk_mutex);
		}
		finish_wait(&speakup_event, &wait);
		if (kthread_should_stop())
			break;

		if (our_sound.active)
			kd_mksound(our_sound.freq, our_sound.jiffies);
		if (synth && synth->catch_up && synth->alive) {
			/* It is up to the callee to take the lock, so that it
			 * can sleep whenever it likes */
			synth->catch_up(synth);
		}

		speakup_start_ttys();
	}
	mutex_unlock(&spk_mutex);
	return 0;
}
コード例 #4
0
ファイル: wait.c プロジェクト: jfasch/linux
/*
 * To allow interruptible waiting and asynchronous (i.e. nonblocking) waiting,
 * the actions of __wait_on_atomic_t() are permitted return codes.  Nonzero
 * return codes halt waiting and return.
 */
static __sched
int __wait_on_atomic_t(wait_queue_head_t *wq, struct wait_bit_queue *q,
		       int (*action)(atomic_t *), unsigned mode)
{
	atomic_t *val;
	int ret = 0;

	do {
		prepare_to_wait(wq, &q->wait, mode);
		val = q->key.flags;
		if (atomic_read(val) == 0)
			ret = (*action)(val);
	} while (!ret && atomic_read(val) != 0);
	finish_wait(wq, &q->wait);
	return ret;
}
コード例 #5
0
void
fusion_sleep_on(wait_queue_head_t *q, struct semaphore *lock, signed long *timeout)
{
     DEFINE_WAIT(wait);

     prepare_to_wait( q, &wait, TASK_INTERRUPTIBLE );

     up( lock );

     if (timeout)
          *timeout = schedule_timeout(*timeout);
     else
          schedule();

     finish_wait( q, &wait );
}
コード例 #6
0
ファイル: backing-dev.c プロジェクト: 020gzh/linux
/**
 * congestion_wait - wait for a backing_dev to become uncongested
 * @sync: SYNC or ASYNC IO
 * @timeout: timeout in jiffies
 *
 * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit
 * write congestion.  If no backing_devs are congested then just wait for the
 * next write to be completed.
 */
long congestion_wait(int sync, long timeout)
{
	long ret;
	unsigned long start = jiffies;
	DEFINE_WAIT(wait);
	wait_queue_head_t *wqh = &congestion_wqh[sync];

	prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
	ret = io_schedule_timeout(timeout);
	finish_wait(wqh, &wait);

	trace_writeback_congestion_wait(jiffies_to_usecs(timeout),
					jiffies_to_usecs(jiffies - start));

	return ret;
}
コード例 #7
0
/* Drop the inode semaphore and wait for a pipe event, atomically */
void pipe_wait(struct pipe_inode_info *pipe)
{
	DEFINE_WAIT(wait);

	/*
	 * Pipes are system-local resources, so sleeping on them
	 * is considered a noninteractive wait:
	 */
	prepare_to_wait(&pipe->wait, &wait, TASK_INTERRUPTIBLE);
	if (pipe->inode)
		mutex_unlock(&pipe->inode->i_mutex);
	schedule();
	finish_wait(&pipe->wait, &wait);
	if (pipe->inode)
		mutex_lock(&pipe->inode->i_mutex);
}
コード例 #8
0
ファイル: backing-dev.c プロジェクト: 020gzh/linux
/**
 * wait_iff_congested - Conditionally wait for a backing_dev to become uncongested or a zone to complete writes
 * @zone: A zone to check if it is heavily congested
 * @sync: SYNC or ASYNC IO
 * @timeout: timeout in jiffies
 *
 * In the event of a congested backing_dev (any backing_dev) and the given
 * @zone has experienced recent congestion, this waits for up to @timeout
 * jiffies for either a BDI to exit congestion of the given @sync queue
 * or a write to complete.
 *
 * In the absence of zone congestion, a short sleep or a cond_resched is
 * performed to yield the processor and to allow other subsystems to make
 * a forward progress.
 *
 * The return value is 0 if the sleep is for the full timeout. Otherwise,
 * it is the number of jiffies that were still remaining when the function
 * returned. return_value == timeout implies the function did not sleep.
 */
long wait_iff_congested(struct zone *zone, int sync, long timeout)
{
	long ret;
	unsigned long start = jiffies;
	DEFINE_WAIT(wait);
	wait_queue_head_t *wqh = &congestion_wqh[sync];

	/*
	 * If there is no congestion, or heavy congestion is not being
	 * encountered in the current zone, yield if necessary instead
	 * of sleeping on the congestion queue
	 */
	if (atomic_read(&nr_wb_congested[sync]) == 0 ||
	    !test_bit(ZONE_CONGESTED, &zone->flags)) {

		/*
		 * Memory allocation/reclaim might be called from a WQ
		 * context and the current implementation of the WQ
		 * concurrency control doesn't recognize that a particular
		 * WQ is congested if the worker thread is looping without
		 * ever sleeping. Therefore we have to do a short sleep
		 * here rather than calling cond_resched().
		 */
		if (current->flags & PF_WQ_WORKER)
			schedule_timeout_uninterruptible(1);
		else
			cond_resched();

		/* In case we scheduled, work out time remaining */
		ret = timeout - (jiffies - start);
		if (ret < 0)
			ret = 0;

		goto out;
	}

	/* Sleep until uncongested or a write happens */
	prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
	ret = io_schedule_timeout(timeout);
	finish_wait(wqh, &wait);

out:
	trace_writeback_wait_iff_congested(jiffies_to_usecs(timeout),
					jiffies_to_usecs(jiffies - start));

	return ret;
}
コード例 #9
0
/**
 * dccp_wait_for_ccid - Wait for ccid to tell us we can send a packet
 * @sk: socket to wait for
 * @timeo: for how long
 */
static int dccp_wait_for_ccid(struct sock *sk, struct sk_buff *skb,
			      long *timeo)
{
	struct dccp_sock *dp = dccp_sk(sk);
	DEFINE_WAIT(wait);
	long delay;
	int rc;

	while (1) {
		prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);

		if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
			goto do_error;
		if (!*timeo)
			goto do_nonblock;
		if (signal_pending(current))
			goto do_interrupted;

		rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb,
					    skb->len);
		if (rc <= 0)
			break;
		delay = msecs_to_jiffies(rc);
		if (delay > *timeo || delay < 0)
			goto do_nonblock;

		sk->sk_write_pending++;
		release_sock(sk);
		*timeo -= schedule_timeout(delay);
		lock_sock(sk);
		sk->sk_write_pending--;
	}
out:
	finish_wait(sk->sk_sleep, &wait);
	return rc;

do_error:
	rc = -EPIPE;
	goto out;
do_nonblock:
	rc = -EAGAIN;
	goto out;
do_interrupted:
	rc = sock_intr_errno(*timeo);
	goto out;
}
コード例 #10
0
ファイル: cpu.c プロジェクト: tsj123/androidx86_remix
/*
 * This ensures that the hotplug operation can begin only when the
 * refcount goes to zero.
 *
 * Note that during a cpu-hotplug operation, the new readers, if any,
 * will be blocked by the cpu_hotplug.lock
 *
 * Since cpu_hotplug_begin() is always called after invoking
 * cpu_maps_update_begin(), we can be sure that only one writer is active.
 *
 * Note that theoretically, there is a possibility of a livelock:
 * - Refcount goes to zero, last reader wakes up the sleeping
 *   writer.
 * - Last reader unlocks the cpu_hotplug.lock.
 * - A new reader arrives at this moment, bumps up the refcount.
 * - The writer acquires the cpu_hotplug.lock finds the refcount
 *   non zero and goes to sleep again.
 *
 * However, this is very difficult to achieve in practice since
 * get_online_cpus() not an api which is called all that often.
 *
 */
void cpu_hotplug_begin(void)
{
	DEFINE_WAIT(wait);

	cpu_hotplug.active_writer = current;
	cpuhp_lock_acquire();

	for (;;) {
		mutex_lock(&cpu_hotplug.lock);
		prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE);
		if (likely(!atomic_read(&cpu_hotplug.refcount)))
				break;
		mutex_unlock(&cpu_hotplug.lock);
		schedule();
	}
	finish_wait(&cpu_hotplug.wq, &wait);
}
コード例 #11
0
static int
__nfs_iocounter_wait(struct nfs_io_counter *c)
{
	wait_queue_head_t *wq = bit_waitqueue(&c->flags, NFS_IO_INPROGRESS);
	DEFINE_WAIT_BIT(q, &c->flags, NFS_IO_INPROGRESS);
	int ret = 0;

	do {
		prepare_to_wait(wq, &q.wait, TASK_KILLABLE);
		set_bit(NFS_IO_INPROGRESS, &c->flags);
		if (atomic_read(&c->io_count) == 0)
			break;
		ret = nfs_wait_bit_killable(&c->flags);
	} while (atomic_read(&c->io_count) != 0 && !ret);
	finish_wait(wq, &q.wait);
	return ret;
}
コード例 #12
0
/*
 * when dropping snapshots, we generate a ton of delayed refs, and it makes
 * sense not to join the transaction while it is trying to flush the current
 * queue of delayed refs out.
 *
 * This is used by the drop snapshot code only
 */
static noinline int wait_transaction_pre_flush(struct btrfs_fs_info *info)
{
    DEFINE_WAIT(wait);

    mutex_lock(&info->trans_mutex);
    while (info->running_transaction &&
            info->running_transaction->delayed_refs.flushing) {
        prepare_to_wait(&info->transaction_wait, &wait,
                        TASK_UNINTERRUPTIBLE);
        mutex_unlock(&info->trans_mutex);
        schedule();
        mutex_lock(&info->trans_mutex);
        finish_wait(&info->transaction_wait, &wait);
    }
    mutex_unlock(&info->trans_mutex);
    return 0;
}
コード例 #13
0
static int  mct_u232_ioctl(struct tty_struct *tty,
			unsigned int cmd, unsigned long arg)
{
	DEFINE_WAIT(wait);
	struct usb_serial_port *port = tty->driver_data;
	struct mct_u232_private *mct_u232_port = usb_get_serial_port_data(port);
	struct async_icount cnow, cprev;
	unsigned long flags;

	dbg("%s - port %d, cmd = 0x%x", __func__, port->number, cmd);

	switch (cmd) {

	case TIOCMIWAIT:

		dbg("%s (%d) TIOCMIWAIT", __func__,  port->number);

		spin_lock_irqsave(&mct_u232_port->lock, flags);
		cprev = mct_u232_port->icount;
		spin_unlock_irqrestore(&mct_u232_port->lock, flags);
		for ( ; ; ) {
			prepare_to_wait(&mct_u232_port->msr_wait,
					&wait, TASK_INTERRUPTIBLE);
			schedule();
			finish_wait(&mct_u232_port->msr_wait, &wait);
			/* see if a signal did it */
			if (signal_pending(current))
				return -ERESTARTSYS;
			spin_lock_irqsave(&mct_u232_port->lock, flags);
			cnow = mct_u232_port->icount;
			spin_unlock_irqrestore(&mct_u232_port->lock, flags);
			if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
			    cnow.dcd == cprev.dcd && cnow.cts == cprev.cts)
				return -EIO; /* no change => error */
			if (((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) ||
			    ((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) ||
			    ((arg & TIOCM_CD)  && (cnow.dcd != cprev.dcd)) ||
			    ((arg & TIOCM_CTS) && (cnow.cts != cprev.cts))) {
				return 0;
			}
			cprev = cnow;
		}

	}
	return -ENOIOCTLCMD;
}
コード例 #14
0
ファイル: jiq.c プロジェクト: sciampoli/slyRepo
static int jiq_read_wq_delayed(char *buf, char **start, off_t offset,
                   int len, int *eof, void *data)
{
	DEFINE_WAIT(wait);
	
	jiq_data.len = 0;                /* nothing printed, yet */
	jiq_data.buf = buf;              /* print in this place */
	jiq_data.jiffies = jiffies;      /* initial time */
	jiq_data.delay = delay;
    
	prepare_to_wait(&jiq_wait, &wait, TASK_INTERRUPTIBLE);
	schedule_delayed_work(&jiq_work, delay);
	schedule();
	finish_wait(&jiq_wait, &wait);

	*eof = 1;
	return jiq_data.len;
}
コード例 #15
0
/* wait for a transaction commit to be fully complete */
static noinline int wait_for_commit(struct btrfs_root *root,
				    struct btrfs_transaction *commit)
{
	DEFINE_WAIT(wait);
	mutex_lock(&root->fs_info->trans_mutex);
	while (!commit->commit_done) {
		prepare_to_wait(&commit->commit_wait, &wait,
				TASK_UNINTERRUPTIBLE);
		if (commit->commit_done)
			break;
		mutex_unlock(&root->fs_info->trans_mutex);
		schedule();
		mutex_lock(&root->fs_info->trans_mutex);
	}
	mutex_unlock(&root->fs_info->trans_mutex);
	finish_wait(&commit->commit_wait, &wait);
	return 0;
}
コード例 #16
0
static ssize_t atf_log_read(struct file *file, char __user *buf, size_t count, loff_t *pos)
{
    ssize_t ret;
    unsigned int write_pos;
    unsigned int read_pos;
    DEFINE_WAIT(wait);

start:
    while (1) {
        atf_log_lock();
        write_pos = atf_buf_vir_ctl->info.atf_write_pos;
        read_pos = atf_buf_vir_ctl->info.atf_read_pos;
        //pr_notice("atf_log_read: wait in wq\n");
        prepare_to_wait(&atf_log_wq, &wait, TASK_INTERRUPTIBLE);
        ret = (write_pos == read_pos);
        atf_log_unlock();
        if (!ret)
            break;
        if (file->f_flags & O_NONBLOCK) {
            ret = -EAGAIN;
            break;
        }
        if (signal_pending(current)) {
            ret = -EINTR;
            break;
        }
        schedule();
    }
    finish_wait(&atf_log_wq, &wait);
    //pr_notice("atf_log_read: finish wait\n");
    if (ret)
        return ret;
    atf_log_lock();
    if (unlikely(write_pos == read_pos)) {
        atf_log_unlock();
        goto start;
    }
    ret = do_read_log_to_usr(buf, count);
    atf_buf_vir_ctl->info.atf_read_pos = index_to_pos(read_index);
    atf_buf_vir_ctl->info.atf_read_seq += ret;
    atf_log_unlock();
    //pr_notice("atf_log_read: return %d, idx: %lu, readpos: %p, writepos: %p\n", ret, read_index, atf_buf_vir_ctl->info.atf_read_pos, atf_buf_vir_ctl->info.atf_write_pos);
    return ret;
}
コード例 #17
0
ファイル: mempool.c プロジェクト: gnensis/linux-2.6.15
/**
 * mempool_alloc - allocate an element from a specific memory pool
 * @pool:      pointer to the memory pool which was allocated via
 *             mempool_create().
 * @gfp_mask:  the usual allocation bitmask.
 *
 * this function only sleeps if the alloc_fn function sleeps or
 * returns NULL. Note that due to preallocation, this function
 * *never* fails when called from process contexts. (it might
 * fail if called from an IRQ context.)
 */
void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask)
{
	void *element;
	unsigned long flags;
	wait_queue_t wait;
	gfp_t gfp_temp;

	might_sleep_if(gfp_mask & __GFP_WAIT);

	gfp_mask |= __GFP_NOMEMALLOC;	/* don't allocate emergency reserves */
	gfp_mask |= __GFP_NORETRY;	/* don't loop in __alloc_pages */
	gfp_mask |= __GFP_NOWARN;	/* failures are OK */

	gfp_temp = gfp_mask & ~(__GFP_WAIT|__GFP_IO);

repeat_alloc:

	element = pool->alloc(gfp_temp, pool->pool_data);
	if (likely(element != NULL))
		return element;

	spin_lock_irqsave(&pool->lock, flags);
	if (likely(pool->curr_nr)) {
		element = remove_element(pool);
		spin_unlock_irqrestore(&pool->lock, flags);
		return element;
	}
	spin_unlock_irqrestore(&pool->lock, flags);

	/* We must not sleep in the GFP_ATOMIC case */
	if (!(gfp_mask & __GFP_WAIT))
		return NULL;

	/* Now start performing page reclaim */
	gfp_temp = gfp_mask;
	init_wait(&wait);
	prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
	smp_mb();
	if (!pool->curr_nr)
		io_schedule();
	finish_wait(&pool->wait, &wait);

	goto repeat_alloc;
}
コード例 #18
0
ファイル: sscullp.c プロジェクト: elftech/vmware-code
static int scull_getwritespace(struct scull_pipe *dev, struct file *filp)
{
	while (spacefree(dev) == 0) { /* full */
		DEFINE_WAIT(wait);
		
		up(&dev->sem);
		if (filp->f_flags & O_NONBLOCK)
			return -EAGAIN;
		prepare_to_wait(&dev->outq, &wait, TASK_INTERRUPTIBLE);
		if (spacefree(dev) == 0)
			schedule();
		finish_wait(&dev->outq, &wait);
		if (signal_pending(current))
			return -ERESTARTSYS; /* signal: tell the fs layer to handle it */
		if (down_interruptible(&dev->sem))
			return -ERESTARTSYS;
	}
	return 0;
}	
コード例 #19
0
ファイル: output.c プロジェクト: 303750856/linux-3.1
/**
 * dccp_wait_for_ccid  -  Await CCID send permission
 * @sk:    socket to wait for
 * @delay: timeout in jiffies
 * This is used by CCIDs which need to delay the send time in process context.
 */
static int dccp_wait_for_ccid(struct sock *sk, unsigned long delay)
{
	DEFINE_WAIT(wait);
	long remaining;

	prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
	sk->sk_write_pending++;
	release_sock(sk);

	remaining = schedule_timeout(delay);

	lock_sock(sk);
	sk->sk_write_pending--;
	finish_wait(sk_sleep(sk), &wait);

	if (signal_pending(current) || sk->sk_err)
		return -1;
	return remaining;
}
コード例 #20
0
ファイル: main.c プロジェクト: yishengjiang99/pr
static int read_hello (char *buf, char **start, off_t offset, 
		int len, int *eof, void *unused) 
{
	struct proc_hello_data *usrsp=hello_data;
	int n=0;

	*eof = 1;
	
	printk(KERN_ALERT "2470:10.1 process(%d \"%s\") "
			"prepare to wait ..!\n", current->pid, current->comm);

	prepare_to_wait(usrsp->proc_hello_wqh,usrsp->proc_hello_wq,
			TASK_INTERRUPTIBLE); 

	printk(KERN_ALERT "2470:10.1 process(%d \"%s\") "
			"waiting ..!\n", current->pid, current->comm);

	sleep_on_timeout(usrsp->proc_hello_wqh,10*HZ);	

	printk(KERN_ALERT "2470:10.1 process(%d \"%s\") "
			"done waiting ..!\n", current->pid, current->comm);
/*
	while (usrsp->proc_hello_flag!=1) {
		printk(KERN_ALERT "2470:10.1 process(%d \"%s\") "
			"call schedule  ..!\n", current->pid, current->comm);
		schedule();
	}
*/

	// process awake message
   	printk(KERN_ALERT "2470:10.1 process(%d \"%s\") "
			"finished waiting ..!\n", current->pid, current->comm);

	finish_wait(usrsp->proc_hello_wqh,usrsp->proc_hello_wq);

	usrsp->proc_hello_flag=0;

	n=sprintf(buf, "Hello .. I got \"%s\"\n", 
				usrsp->proc_hello_value); 

	return n;
}
コード例 #21
0
ファイル: io_kaio.c プロジェクト: cloudlinuxadmin/cl7-kernel
static int kaio_fsync_thread(void * data)
{
    struct ploop_io * io = data;
    struct ploop_device * plo = io->plo;

    set_user_nice(current, -20);

    spin_lock_irq(&plo->lock);
    while (!kthread_should_stop() || !list_empty(&io->fsync_queue)) {
        int err;
        struct ploop_request * preq;

        DEFINE_WAIT(_wait);
        for (;;) {
            prepare_to_wait(&io->fsync_waitq, &_wait, TASK_INTERRUPTIBLE);
            if (!list_empty(&io->fsync_queue) ||
                    kthread_should_stop())
                break;

            spin_unlock_irq(&plo->lock);
            schedule();
            spin_lock_irq(&plo->lock);
        }
        finish_wait(&io->fsync_waitq, &_wait);

        if (list_empty(&io->fsync_queue) && kthread_should_stop())
            break;

        preq = list_entry(io->fsync_queue.next, struct ploop_request, list);
        list_del(&preq->list);
        io->fsync_qlen--;
        if (!preq->prealloc_size)
            plo->st.bio_fsync++;
        spin_unlock_irq(&plo->lock);

        /* trick: preq->prealloc_size is actually new pos of eof */
        if (preq->prealloc_size) {
            err = kaio_truncate(io, io->files.file,
                                preq->prealloc_size >> (plo->cluster_log + 9));
            if (err)
                PLOOP_REQ_SET_ERROR(preq, -EIO);
        } else {
コード例 #22
0
int init_module( void )
{
	init_waitqueue_head(&my_wait);
	DEFINE_WAIT(wait);
	
	while(condition != 10) {
		prepare_to_wait(&my_wait, &wait, TASK_INTERRUPTIBLE);
		if (condition == 10) 
			break;
		condition++;	
		printk(KERN_ALERT "condition:%d\n",condition);
		schedule_timeout(100);
	}
	finish_wait(&my_wait, &wait);

	if (signal_pending(current))
		return -ERESTARTSYS;

	return 0;
}
コード例 #23
0
ファイル: svc.c プロジェクト: 020gzh/linux
int svc_change_qos(struct atm_vcc *vcc, struct atm_qos *qos)
{
	struct sock *sk = sk_atm(vcc);
	DEFINE_WAIT(wait);

	set_bit(ATM_VF_WAITING, &vcc->flags);
	sigd_enq2(vcc, as_modify, NULL, NULL, &vcc->local, qos, 0);
	for (;;) {
		prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE);
		if (!test_bit(ATM_VF_WAITING, &vcc->flags) ||
		    test_bit(ATM_VF_RELEASED, &vcc->flags) || !sigd) {
			break;
		}
		schedule();
	}
	finish_wait(sk_sleep(sk), &wait);
	if (!sigd)
		return -EUNATCH;
	return -sk->sk_err;
}
コード例 #24
0
/*
 * @count: represents the device counts of the user's interst
 */
static ssize_t dsp_twch_read(struct file *file, char __user *buf, size_t count,
			     loff_t *ppos)
{
	long taskstat[TASKDEV_MAX];
	int devcount = count / sizeof(long);
	int i;
	DEFINE_WAIT(wait);

	if (dsp_cfgstat_get_stat() != CFGSTAT_READY) {
		printk(KERN_ERR "omapdsp: dsp has not been configured.\n");
		return -EINVAL;
	}

	prepare_to_wait(&read_wait_q, &wait, TASK_INTERRUPTIBLE);
	if (change_cnt == 0)	/* last check */
		schedule();
	finish_wait(&read_wait_q, &wait);

	/* unconfigured while waiting ;-( */
	if ((change_cnt == 0) && (dsp_cfgstat_get_stat() != CFGSTAT_READY))
		return -EINVAL;

	if (devcount > TASKDEV_MAX)
		devcount = TASKDEV_MAX;

	count = devcount * sizeof(long);
	change_cnt = 0;
	for (i = 0; i < devcount; i++) {
		/*
		 * once the device state is read, the 'STALE' bit will be set
		 * so that the Dynamic Loader can distinguish the new request
		 * from the old one.
		 */
		taskstat[i] = taskdev_state_stale(i);
	}

	if (copy_to_user(buf, taskstat, count))
		return -EFAULT;

	return count;
}
コード例 #25
0
static void wait_current_trans(struct btrfs_root *root)
{
    struct btrfs_transaction *cur_trans;

    cur_trans = root->fs_info->running_transaction;
    if (cur_trans && cur_trans->blocked) {
        DEFINE_WAIT(wait);
        cur_trans->use_count++;
        while (1) {
            prepare_to_wait(&root->fs_info->transaction_wait, &wait,
                            TASK_UNINTERRUPTIBLE);
            if (!cur_trans->blocked)
                break;
            mutex_unlock(&root->fs_info->trans_mutex);
            schedule();
            mutex_lock(&root->fs_info->trans_mutex);
        }
        finish_wait(&root->fs_info->transaction_wait, &wait);
        put_transaction(cur_trans);
    }
}
コード例 #26
0
ファイル: null_blk.c プロジェクト: 0x000000FF/edison-linux
static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait)
{
	struct nullb_cmd *cmd;
	DEFINE_WAIT(wait);

	cmd = __alloc_cmd(nq);
	if (cmd || !can_wait)
		return cmd;

	do {
		prepare_to_wait(&nq->wait, &wait, TASK_UNINTERRUPTIBLE);
		cmd = __alloc_cmd(nq);
		if (cmd)
			break;

		io_schedule();
	} while (1);

	finish_wait(&nq->wait, &wait);
	return cmd;
}
コード例 #27
0
ファイル: svc.c プロジェクト: 020gzh/linux
static int svc_listen(struct socket *sock, int backlog)
{
	DEFINE_WAIT(wait);
	struct sock *sk = sock->sk;
	struct atm_vcc *vcc = ATM_SD(sock);
	int error;

	pr_debug("%p\n", vcc);
	lock_sock(sk);
	/* let server handle listen on unbound sockets */
	if (test_bit(ATM_VF_SESSION, &vcc->flags)) {
		error = -EINVAL;
		goto out;
	}
	if (test_bit(ATM_VF_LISTEN, &vcc->flags)) {
		error = -EADDRINUSE;
		goto out;
	}
	set_bit(ATM_VF_WAITING, &vcc->flags);
	sigd_enq(vcc, as_listen, NULL, NULL, &vcc->local);
	for (;;) {
		prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE);
		if (!test_bit(ATM_VF_WAITING, &vcc->flags) || !sigd)
			break;
		schedule();
	}
	finish_wait(sk_sleep(sk), &wait);
	if (!sigd) {
		error = -EUNATCH;
		goto out;
	}
	set_bit(ATM_VF_LISTEN, &vcc->flags);
	vcc_insert_socket(sk);
	sk->sk_max_ack_backlog = backlog > 0 ? backlog : ATM_BACKLOG_DEFAULT;
	error = -sk->sk_err;
out:
	release_sock(sk);
	return error;
}
コード例 #28
0
ファイル: proc.c プロジェクト: WayWingsDev/Gplus_2159_0801
/*
 * Signal to userspace an interrupt has occured.
 */
static ssize_t irq_proc_read(struct file *filp, char  __user *bufp, size_t len, loff_t *ppos)
{
 	struct irq_proc *ip = (struct irq_proc *)filp->private_data;
 	irq_desc_t *idp = irq_desc + ip->irq;
 	int pending;
	
 	DEFINE_WAIT(wait);
	
 	if (len < sizeof(int))
 		return -EINVAL;
	
	pending = atomic_read(&ip->count);
 	if (pending == 0) {
 		if (idp->status & IRQ_DISABLED)
 			enable_irq(ip->irq);
 		if (filp->f_flags & O_NONBLOCK)
 			return -EWOULDBLOCK;
 	}
	
 	while (pending == 0) {
 		prepare_to_wait(&ip->q, &wait, TASK_INTERRUPTIBLE);
		pending = atomic_read(&ip->count);
		if (pending == 0)
 			schedule();
 		finish_wait(&ip->q, &wait);
 		if (signal_pending(current))
 			return -ERESTARTSYS;
 	}
	
 	if (copy_to_user(bufp, &pending, sizeof pending))
 		return -EFAULT;

 	*ppos += sizeof pending;
	
 	atomic_sub(pending, &ip->count);

 	return sizeof pending;
}
コード例 #29
0
ファイル: sock.c プロジェクト: BackupTheBerlios/arp2-svn
/* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
   I think, these locks should be removed for datagram sockets.
 */
static long sock_wait_for_wmem(struct sock * sk, long timeo)
{
	DEFINE_WAIT(wait);

	clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
	for (;;) {
		if (!timeo)
			break;
		if (signal_pending(current))
			break;
		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
		prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
		if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
			break;
		if (sk->sk_shutdown & SEND_SHUTDOWN)
			break;
		if (sk->sk_err)
			break;
		timeo = schedule_timeout(timeo);
	}
	finish_wait(sk->sk_sleep, &wait);
	return timeo;
}
コード例 #30
0
ファイル: domain.c プロジェクト: alessandroste/testBSP
static void genpd_acquire_lock(struct generic_pm_domain *genpd)
{
	DEFINE_WAIT(wait);

	mutex_lock(&genpd->lock);
	/*
	 * Wait for the domain to transition into either the active,
	 * or the power off state.
	 */
	for (;;) {
		prepare_to_wait(&genpd->status_wait_queue, &wait,
				TASK_UNINTERRUPTIBLE);
		if (genpd->status == GPD_STATE_ACTIVE
		    || genpd->status == GPD_STATE_POWER_OFF)
			break;
		mutex_unlock(&genpd->lock);

		schedule();

		mutex_lock(&genpd->lock);
	}
	finish_wait(&genpd->status_wait_queue, &wait);
}