Exemple #1
0
static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
{
	DECLARE_WAITQUEUE(wait, current);
	struct sock *sk = sock->sk, *nsk;
	long timeo;
	int err = 0;

	lock_sock_nested(sk, SINGLE_DEPTH_NESTING);

	if (sk->sk_state != BT_LISTEN) {
		err = -EBADFD;
		goto done;
	}

	timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);

	BT_DBG("sk %p timeo %ld", sk, timeo);

	/* Wait for an incoming connection. (wake-one). */
	add_wait_queue_exclusive(sk_sleep(sk), &wait);
	while (!(nsk = bt_accept_dequeue(sk, newsock))) {
		set_current_state(TASK_INTERRUPTIBLE);
		if (!timeo) {
			err = -EAGAIN;
			break;
		}

		release_sock(sk);
		timeo = schedule_timeout(timeo);
		lock_sock_nested(sk, SINGLE_DEPTH_NESTING);

		if (sk->sk_state != BT_LISTEN) {
			err = -EBADFD;
			break;
		}

		if (signal_pending(current)) {
			err = sock_intr_errno(timeo);
			break;
		}
	}
	set_current_state(TASK_RUNNING);
	remove_wait_queue(sk_sleep(sk), &wait);

	if (err)
		goto done;

	newsock->state = SS_CONNECTED;

	BT_DBG("new socket %p", nsk);

done:
	release_sock(sk);
	return err;
}
Exemple #2
0
static int l2cap_sock_accept(struct socket *sock, struct socket *newsock,
			     int flags)
{
	DEFINE_WAIT_FUNC(wait, woken_wake_function);
	struct sock *sk = sock->sk, *nsk;
	long timeo;
	int err = 0;

	lock_sock_nested(sk, L2CAP_NESTING_PARENT);

	timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);

	BT_DBG("sk %p timeo %ld", sk, timeo);

	/* Wait for an incoming connection. (wake-one). */
	add_wait_queue_exclusive(sk_sleep(sk), &wait);
	while (1) {
		if (sk->sk_state != BT_LISTEN) {
			err = -EBADFD;
			break;
		}

		nsk = bt_accept_dequeue(sk, newsock);
		if (nsk)
			break;

		if (!timeo) {
			err = -EAGAIN;
			break;
		}

		if (signal_pending(current)) {
			err = sock_intr_errno(timeo);
			break;
		}

		release_sock(sk);

		timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);

		lock_sock_nested(sk, L2CAP_NESTING_PARENT);
	}
	remove_wait_queue(sk_sleep(sk), &wait);

	if (err)
		goto done;

	newsock->state = SS_CONNECTED;

	BT_DBG("new socket %p", nsk);

done:
	release_sock(sk);
	return err;
}
Exemple #3
0
int
__down_interruptible (struct semaphore * sem)
{
	int retval = 0;
	struct task_struct *tsk = current;
	DECLARE_WAITQUEUE(wait, tsk);
	tsk->state = TASK_INTERRUPTIBLE;
	add_wait_queue_exclusive(&sem->wait, &wait);

	spin_lock_irq(&semaphore_lock);
	sem->sleepers ++;
	for (;;) {
		int sleepers = sem->sleepers;

		/*
		 * With signals pending, this turns into
		 * the trylock failure case - we won't be
		 * sleeping, and we* can't get the lock as
		 * it has contention. Just correct the count
		 * and exit.
		 */
		if (signal_pending(current)) {
			retval = -EINTR;
			sem->sleepers = 0;
			atomic_add(sleepers, &sem->count);
			break;
		}

		/*
		 * Add "everybody else" into it. They aren't
		 * playing, because we own the spinlock. The
		 * "-1" is because we're still hoping to get
		 * the lock.
		 */
		if (!atomic_add_negative(sleepers - 1, &sem->count)) {
			sem->sleepers = 0;
			break;
		}
		sem->sleepers = 1;	/* us - see -1 above */
		spin_unlock_irq(&semaphore_lock);

		schedule();
		tsk->state = TASK_INTERRUPTIBLE;
		spin_lock_irq(&semaphore_lock);
	}
	spin_unlock_irq(&semaphore_lock);
	tsk->state = TASK_RUNNING;
	remove_wait_queue(&sem->wait, &wait);
	wake_up(&sem->wait);
	return retval;
}
Exemple #4
0
static int wait_for_packet(struct sock * sk, int *err, long *timeo_p)
{
	int error;

	DECLARE_WAITQUEUE(wait, current);

	__set_current_state(TASK_INTERRUPTIBLE);
	add_wait_queue_exclusive(sk->sleep, &wait);

	/* Socket errors? */
	error = sock_error(sk);
	if (error)
		goto out_err;

	if (!skb_queue_empty(&sk->receive_queue))
		goto ready;

	/* Socket shut down? */
	if (sk->shutdown & RCV_SHUTDOWN)
		goto out_noerr;

	/* Sequenced packets can come disconnected. If so we report the problem */
	error = -ENOTCONN;
	if(connection_based(sk) && !(sk->state==TCP_ESTABLISHED || sk->state==TCP_LISTEN))
		goto out_err;

	/* handle signals */
	if (signal_pending(current))
		goto interrupted;

	*timeo_p = schedule_timeout(*timeo_p);

ready:
	current->state = TASK_RUNNING;
	remove_wait_queue(sk->sleep, &wait);
	return 0;

interrupted:
	error = sock_intr_errno(*timeo_p);
out_err:
	*err = error;
out:
	current->state = TASK_RUNNING;
	remove_wait_queue(sk->sleep, &wait);
	return error;
out_noerr:
	*err = 0;
	error = 1;
	goto out;
}
Exemple #5
0
static inline void __lock_metapage(struct metapage *mp)
{
	DECLARE_WAITQUEUE(wait, current);
	INCREMENT(mpStat.lockwait);
	add_wait_queue_exclusive(&mp->wait, &wait);
	do {
		set_current_state(TASK_UNINTERRUPTIBLE);
		if (metapage_locked(mp)) {
			unlock_page(mp->page);
			io_schedule();
			lock_page(mp->page);
		}
	} while (trylock_metapage(mp));
	__set_current_state(TASK_RUNNING);
	remove_wait_queue(&mp->wait, &wait);
}
Exemple #6
0
static void request_wait(struct fuse_conn *fc)
{
	DECLARE_WAITQUEUE(wait, current);

	add_wait_queue_exclusive(&fc->waitq, &wait);
	while(fc->sb != NULL && list_empty(&fc->pending)) {
		set_current_state(TASK_INTERRUPTIBLE);
		if(signal_pending(current))
			break;

		spin_unlock(&fuse_lock);
		schedule();
		spin_lock(&fuse_lock);
	}
	set_current_state(TASK_RUNNING);
	remove_wait_queue(&fc->waitq, &wait);
}
Exemple #7
0
void __down(struct semaphore * sem)
{
	struct task_struct *tsk = current;
	DECLARE_WAITQUEUE(wait, tsk);

	tsk->state = TASK_UNINTERRUPTIBLE;
	add_wait_queue_exclusive(&sem->wait, &wait);

	while (__sem_update_count(sem, -1) <= 0) {
		schedule();
		tsk->state = TASK_UNINTERRUPTIBLE;
	}
	remove_wait_queue(&sem->wait, &wait);
	tsk->state = TASK_RUNNING;

	wake_up(&sem->wait);
}
/* Wait for the lock to become unbiased. Since we're
 * a writer, we'll make ourselves exclusive.
 */
void down_write_failed(struct rw_semaphore *sem)
{
	struct task_struct *tsk = current;
	DECLARE_WAITQUEUE(wait, tsk);

	up_write(sem);	/* this takes care of granting the lock */

	add_wait_queue_exclusive(&sem->wait, &wait);

	while (atomic_read(&sem->count) < 0) {
		set_task_state(tsk, TASK_UNINTERRUPTIBLE);
		if (atomic_read(&sem->count) >= 0)
			break;	/* we must attempt to acquire or bias the lock */
		schedule();
	}

	remove_wait_queue(&sem->wait, &wait);
	tsk->state = TASK_RUNNING;
}
Exemple #9
0
int __sched
__down_failed_interruptible(struct semaphore *sem)
{
	struct task_struct *tsk = current;
	DECLARE_WAITQUEUE(wait, tsk);
	long ret = 0;

#ifdef CONFIG_DEBUG_SEMAPHORE
	printk("%s(%d): down failed(%p)\n",
	       tsk->comm, tsk->pid, sem);
#endif

	tsk->state = TASK_INTERRUPTIBLE;
	wmb();
	add_wait_queue_exclusive(&sem->wait, &wait);

	while (__sem_update_count(sem, -1) <= 0) {
		if (signal_pending(current)) {
			/*
			 * A signal is pending - give up trying.
			 * Set sem->count to 0 if it is negative,
			 * since we are no longer sleeping.
			 */
			__sem_update_count(sem, 0);
			ret = -EINTR;
			break;
		}
		schedule();
		set_task_state(tsk, TASK_INTERRUPTIBLE);
	}

	remove_wait_queue(&sem->wait, &wait);
	tsk->state = TASK_RUNNING;
	wake_up(&sem->wait);

#ifdef CONFIG_DEBUG_SEMAPHORE
	printk("%s(%d): down %s(%p)\n",
	       current->comm, current->pid,
	       (ret < 0 ? "interrupted" : "acquired"), sem);
#endif
	return ret;
}
Exemple #10
0
static void netlink_table_grab(void)
{
	write_lock_bh(&nl_table_lock);

	if (atomic_read(&nl_table_users)) {
		DECLARE_WAITQUEUE(wait, current);

		add_wait_queue_exclusive(&nl_table_wait, &wait);
		for(;;) {
			set_current_state(TASK_UNINTERRUPTIBLE);
			if (atomic_read(&nl_table_users) == 0)
				break;
			write_unlock_bh(&nl_table_lock);
			schedule();
			write_lock_bh(&nl_table_lock);
		}

		__set_current_state(TASK_RUNNING);
		remove_wait_queue(&nl_table_wait, &wait);
	}
}
Exemple #11
0
void __sched
__down_failed(struct semaphore *sem)
{
	struct task_struct *tsk = current;
	DECLARE_WAITQUEUE(wait, tsk);

#ifdef CONFIG_DEBUG_SEMAPHORE
	printk("%s(%d): down failed(%p)\n",
	       tsk->comm, tsk->pid, sem);
#endif

	tsk->state = TASK_UNINTERRUPTIBLE;
	wmb();
	add_wait_queue_exclusive(&sem->wait, &wait);

	/*
	 * Try to get the semaphore.  If the count is > 0, then we've
	 * got the semaphore; we decrement count and exit the loop.
	 * If the count is 0 or negative, we set it to -1, indicating
	 * that we are asleep, and then sleep.
	 */
	while (__sem_update_count(sem, -1) <= 0) {
		schedule();
		set_task_state(tsk, TASK_UNINTERRUPTIBLE);
	}
	remove_wait_queue(&sem->wait, &wait);
	tsk->state = TASK_RUNNING;

	/*
	 * If there are any more sleepers, wake one of them up so
	 * that it can either get the semaphore, or set count to -1
	 * indicating that there are still processes sleeping.
	 */
	wake_up(&sem->wait);

#ifdef CONFIG_DEBUG_SEMAPHORE
	printk("%s(%d): down acquired(%p)\n",
	       tsk->comm, tsk->pid, sem);
#endif
}
Exemple #12
0
int __down_interruptible(struct semaphore * sem)
{
	int retval = 0;
	struct task_struct *tsk = current;
	DECLARE_WAITQUEUE(wait, tsk);

	tsk->state = TASK_INTERRUPTIBLE;
	add_wait_queue_exclusive(&sem->wait, &wait);

	while (__sem_update_count(sem, -1) <= 0) {
		if (signal_pending(current)) {
			__sem_update_count(sem, 0);
			retval = -EINTR;
			break;
		}
		schedule();
		tsk->state = TASK_INTERRUPTIBLE;
	}
	tsk->state = TASK_RUNNING;
	remove_wait_queue(&sem->wait, &wait);
	wake_up(&sem->wait);
	return retval;
}
Exemple #13
0
/*!
 *  @brief      Wait for an event to be signaled for some specified time.
 *              It is possible to wait infinitely, this calls Blocks and does
 *              not Spin.
 *
 *  @param      allocated event object handle.
 *
 *  @retval     Int as success if succedded otherwise failure
 */
Int OsalEvent_wait (OsalEvent_Handle handle, UInt32 timeout)
{
    Int status = OSALEVENT_SUCCESS ;
    Int32 osStatus = 0u;
    UInt32 lockKey = 0u;

    OsalEvent_Object* event = (OsalEvent_Object*) handle;

    GT_2trace (curTrace, GT_ENTER,"OsalEvent_wait", event, timeout );

    GT_assert (curTrace, (NULL != event) );

    if (OSAL_WAIT_NONE == timeout)
    {
        lockKey = OsalSpinlock_enter (event->lock) ;
        if (event->value == 0u) {
            /*! @retval OSALEVENT_SYNC_FAIL when value is found zero. */
            status = OSALEVENT_E_SYNCFAIL ;
            GT_setFailureReason(curTrace, GT_4CLASS,
                "OsalEvent_wait",OSALEVENT_E_SYNCFAIL,"Wrong combination");
        }
        OsalSpinlock_leave (event->lock, lockKey) ;
    }
    else {
        lockKey = OsalSpinlock_enter (event->lock) ;
        if (!event->value) {
            DECLARE_WAITQUEUE (wait, current) ;
            /* Add the current process to wait queue */
            add_wait_queue_exclusive (&event->list, &wait) ;
            /* Set the current task status as interrutible */
            do {
                set_current_state (TASK_INTERRUPTIBLE) ;
                /* Release the lock */
                OsalSpinlock_leave (event->lock, lockKey) ;
                /* Schedule out for given timeout */
                osStatus = schedule_timeout( \
                    (OSAL_WAIT_FOREVER == timeout)?MAX_SCHEDULE_TIMEOUT:\
                     msecs_to_jiffies (timeout) ) ;
                /* Take the lock again */
                lockKey = OsalSpinlock_enter (event->lock) ;
                /* Check for status */
                if (osStatus == 0) {
                    /*! @retval OSALEVENT_E_TIMEOUT when timeout occurs on
                        wait state */
                    status = OSALEVENT_E_TIMEOUT ;
                    GT_setFailureReason(curTrace, GT_4CLASS,
                        "OsalEvent_wait",OSALEVENT_E_TIMEOUT,
                        "Timeout occured");
                    break ;
                }
                if (osStatus == -ERESTARTSYS) {
                    /*! @retval When we are inerrupted because of the this
                        system call*/
                    status = OSALEVENT_E_RESTARTSYS ;
                    GT_setFailureReason(curTrace, GT_4CLASS,
                        "OsalEvent_wait",OSALEVENT_E_RESTARTSYS,
                        "Interrupted ");
                    break ;
                }
            } while (0) ;
            /* Remove from wait list */
            remove_wait_queue (&event->list, &wait) ;
            /* Set the current task status as running */
            set_current_state (TASK_RUNNING) ;
        }
        /* End the lock */
        OsalSpinlock_leave (event->lock, lockKey) ;
    }

    GT_1trace (curTrace, GT_LEAVE,"OsalEvent_wait",status);

    return status;
}
Exemple #14
0
static int cachemiss_thread (void *data)
{
	tux_req_t *req;
	struct k_sigaction *ka;
	DECLARE_WAITQUEUE(wait, current);
	iothread_t *iot = data;
	int nr = iot->ti->cpu, wake_up;

	Dprintk("iot %p/%p got started.\n", iot, current);
	drop_permissions();

	spin_lock(&iot->async_lock);
	iot->threads++;
	sprintf(current->comm, "async IO %d/%d", nr, iot->threads);


	spin_lock_irq(&current->sighand->siglock);
	ka = current->sighand->action + SIGCHLD-1;
	ka->sa.sa_handler = SIG_IGN;
	siginitsetinv(&current->blocked, sigmask(SIGCHLD));
	recalc_sigpending();
	spin_unlock_irq(&current->sighand->siglock);

	spin_unlock(&iot->async_lock);
#ifdef CONFIG_SMP
	{
		cpumask_t mask;

		if (cpu_isset(nr, cpu_online_map)) {
			cpus_clear(mask);
			cpu_set(nr, mask);
			set_cpus_allowed(current, mask);
		}

	}
#endif

	add_wait_queue_exclusive(&iot->async_sleep, &wait);

	for (;;) {
		while (!list_empty(&iot->async_queue) &&
				(req = get_cachemiss(iot))) {

			if (!req->atom_idx) {
				add_tux_atom(req, flush_request);
				add_req_to_workqueue(req);
				continue;
			}
			tux_schedule_atom(req, 1);
			if (signal_pending(current))
				flush_all_signals();
		}
		if (signal_pending(current))
			flush_all_signals();
		if (!list_empty(&iot->async_queue))
			continue;
		if (iot->shutdown) {
			Dprintk("iot %p/%p got shutdown!\n", iot, current);
			break;
		}
		__set_current_state(TASK_INTERRUPTIBLE);
		if (list_empty(&iot->async_queue)) {
			Dprintk("iot %p/%p going to sleep.\n", iot, current);
			schedule();
			Dprintk("iot %p/%p got woken up.\n", iot, current);
		}
		__set_current_state(TASK_RUNNING);
	}

	remove_wait_queue(&iot->async_sleep, &wait);

	wake_up = 0;
	spin_lock(&iot->async_lock);
	if (!--iot->threads)
		wake_up = 1;
	spin_unlock(&iot->async_lock);
	Dprintk("iot %p/%p has finished shutdown!\n", iot, current);
	if (wake_up) {
		Dprintk("iot %p/%p waking up master.\n", iot, current);
		wake_up(&iot->wait_shutdown);
	}

	return 0;
}
/*****************************************************************************
 Function   : VOS_SemDown
 Description: schedule a task which pend on a sem
 Input      : pSemCB -- control of semaphore
              timeOutInMillSec -- timeout unit is ms
 Return     : VOS_OK on success and errno on failure
 *****************************************************************************/
VOS_UINT32 VOS_SemDown(SEM_CONTROL_BLOCK *pSemCB, VOS_UINT32 timeOutInMillSec)
{
    struct task_struct *tsk = current;
    VOS_UINT32         timeintick;
    VOS_ULONG          flags;
    VOS_UINT32         ulRetValue = VOS_OK;
    VOS_INT            sleepers;

    DECLARE_WAITQUEUE(wait, tsk);
    
    /* if timeOutInMillSec = 0, schedule and do not time out
       if timeOutInMillSec > 0, schedule and time out */
    timeintick = (timeOutInMillSec==0)
                  ? MAX_SCHEDULE_TIMEOUT : ((timeOutInMillSec*HZ)/1000);

    tsk->state = TASK_UNINTERRUPTIBLE;

    add_wait_queue_exclusive(&pSemCB->wait, &wait);

    spin_lock_irqsave(&VOS_semaphore_lock, flags);

    pSemCB->sleepers++;

    for ( ; ; ) 
    {
        sleepers = pSemCB->sleepers;

        /* Add "everybody else" into it. They aren't
           playing, because we own the spinlock. */
        if (!atomic_add_negative(sleepers - 1, &pSemCB->count)) 
        {
            pSemCB->sleepers = 0;
            break;
        }

        pSemCB->sleepers = 1;   /* us - see -1 above */

        spin_unlock_irqrestore(&VOS_semaphore_lock, flags);

        /* if timeintick equal MAX_SCHEDULE_TIMEOUT,
           finally will call schedule(), will not return back
           if no other wake up this thread */
           
        if ( 0 == schedule_timeout(timeintick) )
        {
            /* match spin_unlock_irqrestore, which behind */
            spin_lock_irqsave(&VOS_semaphore_lock, flags);
            ulRetValue = VOS_ERRNO_SEMA4_P_TIMEOUT;
            break;         
        }
        else
        {
            ulRetValue = VOS_OK;
        }

        tsk->state = TASK_UNINTERRUPTIBLE;

        spin_lock_irqsave(&VOS_semaphore_lock, flags);
    }

    spin_unlock_irqrestore(&VOS_semaphore_lock, flags);

    remove_wait_queue(&pSemCB->wait, &wait);

    tsk->state = TASK_RUNNING;

    /* wake up another one, now maybe count > 0 */
    wake_up(&pSemCB->wait);

    return ulRetValue;
}
Exemple #16
0
void
cfs_waitq_add_exclusive(cfs_waitq_t *waitq,
                        cfs_waitlink_t *link)
{
        add_wait_queue_exclusive(LINUX_WAITQ_HEAD(waitq), LINUX_WAITQ(link));
}