Пример #1
0
/*
 * Note that when we come in to __down or __down_interruptible,
 * we have already decremented count, but that decrement was
 * ineffective since the result was < 0, and any negative value
 * of count is equivalent to 0.
 * Thus it is only when we decrement count from some value > 0
 * that we have actually got the semaphore.
 */
void __sched __down(struct semaphore *sem)
{
	struct task_struct *tsk = current;
	DECLARE_WAITQUEUE(wait, tsk);

	__set_task_state(tsk, TASK_UNINTERRUPTIBLE);
	add_wait_queue_exclusive(&sem->wait, &wait);

	/*
	 * Try to get the semaphore.  If the count is > 0, then we've
	 * got the semaphore; we decrement count and exit the loop.
	 * If the count is 0 or negative, we set it to -1, indicating
	 * that we are asleep, and then sleep.
	 */
	while (__sem_update_count(sem, -1) <= 0) {
		schedule();
		set_task_state(tsk, TASK_UNINTERRUPTIBLE);
	}
	remove_wait_queue(&sem->wait, &wait);
	__set_task_state(tsk, TASK_RUNNING);

	/*
	 * If there are any more sleepers, wake one of them up so
	 * that it can either get the semaphore, or set count to -1
	 * indicating that there are still processes sleeping.
	 */
	wake_up(&sem->wait);
}
Пример #2
0
int __sched __down_interruptible(struct semaphore * sem)
{
	int retval = 0;
	struct task_struct *tsk = current;
	DECLARE_WAITQUEUE(wait, tsk);

	__set_task_state(tsk, TASK_INTERRUPTIBLE);
	add_wait_queue_exclusive(&sem->wait, &wait);

	while (__sem_update_count(sem, -1) <= 0) {
		if (signal_pending(current)) {
			/*
			 * A signal is pending - give up trying.
			 * Set sem->count to 0 if it is negative,
			 * since we are no longer sleeping.
			 */
			__sem_update_count(sem, 0);
			retval = -EINTR;
			break;
		}
		schedule();
		set_task_state(tsk, TASK_INTERRUPTIBLE);
	}
	remove_wait_queue(&sem->wait, &wait);
	__set_task_state(tsk, TASK_RUNNING);

	wake_up(&sem->wait);
	return retval;
}
Пример #3
0
/*
 * Because this function is inlined, the 'state' parameter will be
 * constant, and thus optimised away by the compiler.  Likewise the
 * 'timeout' parameter for the cases without timeouts.
 */
static inline int __sched __down_common(struct semaphore *sem, long state,
								long timeout)
{
	struct task_struct *task = current;
	struct semaphore_waiter waiter;

	list_add_tail(&waiter.list, &sem->wait_list);
	waiter.task = task;
	waiter.up = 0;

	for (;;) {
		if (signal_pending_state(state, task))
			goto interrupted;
		if (timeout <= 0)
			goto timed_out;
		__set_task_state(task, state);
		spin_unlock_irq(&sem->lock);
		timeout = schedule_timeout(timeout);
		spin_lock_irq(&sem->lock);
		if (waiter.up)
			return 0;
	}

 timed_out:
	list_del(&waiter.list);
	return -ETIME;

 interrupted:
	list_del(&waiter.list);
	return -EINTR;
}
Пример #4
0
/*
 * Turn a tracing stop into a normal stop now, since with no tracer there
 * would be no way to wake it up with SIGCONT or SIGKILL.  If there was a
 * signal sent that would resume the child, but didn't because it was in
 * TASK_TRACED, resume it now.
 * Requires that irqs be disabled.
 */
static void ptrace_untrace(struct task_struct *child)
{
	spin_lock(&child->sighand->siglock);
	if (task_is_traced(child)) {
		if (child->signal->flags & SIGNAL_STOP_STOPPED) {
			__set_task_state(child, TASK_STOPPED);
		} else {
			signal_wake_up(child, 1);
		}
	}
	spin_unlock(&child->sighand->siglock);
}
Пример #5
0
/*
 * Turn a tracing stop into a normal stop now, since with no tracer there
 * would be no way to wake it up with SIGCONT or SIGKILL.  If there was a
 * signal sent that would resume the child, but didn't because it was in
 * TASK_TRACED, resume it now.
 * Requires that irqs be disabled.
 */
static void ptrace_untrace(struct task_struct *child)
{
	spin_lock(&child->sighand->siglock);
	if (task_is_traced(child)) {
		/*
		 * If the group stop is completed or in progress,
		 * this thread was already counted as stopped.
		 */
		if (child->signal->flags & SIGNAL_STOP_STOPPED ||
		    child->signal->group_stop_count)
			__set_task_state(child, TASK_STOPPED);
		else
			signal_wake_up(child, 1);
	}
	spin_unlock(&child->sighand->siglock);
}
Пример #6
0
void wk_start_kick_cpu(int cpu)
{
	if(IS_ERR(wk_tsk[cpu]))
	{
		printk("[wdk]wk_task[%d] is NULL\n",cpu);
	}
	else
	{
		/* Need to be alseep *before* we do a kthread_bind */
		__set_task_state(wk_tsk[cpu], TASK_UNINTERRUPTIBLE);
		set_tsk_need_resched(wk_tsk[cpu]);

		kthread_bind(wk_tsk[cpu], cpu);
	//	printk("[wdk]bind thread[%d] to cpu[%d]\n",wk_tsk[cpu]->pid,cpu);
		wake_up_process(wk_tsk[cpu]);
	}
}
Пример #7
0
/*
 * get a read lock on the semaphore
 */
void __sched __down_read(struct rw_semaphore *sem)
{
	struct rwsem_waiter waiter;
	struct task_struct *tsk;
	unsigned long flags;

	raw_spin_lock_irqsave(&sem->wait_lock, flags);

	if (sem->count >= 0 && list_empty(&sem->wait_list)) {
		/* granted */
		sem->count++;
		raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
		goto out;
	}

	tsk = current;
	set_task_state(tsk, TASK_UNINTERRUPTIBLE);

	/* set up my own style of waitqueue */
	waiter.task = tsk;
	waiter.type = RWSEM_WAITING_FOR_READ;
	get_task_struct(tsk);

	list_add_tail(&waiter.list, &sem->wait_list);

	/* we don't need to touch the semaphore struct anymore */
	raw_spin_unlock_irqrestore(&sem->wait_lock, flags);

	/* wait to be given the lock */
	for (;;) {
		if (!waiter.task)
			break;
		schedule();
		set_task_state(tsk, TASK_UNINTERRUPTIBLE);
	}

	__set_task_state(tsk, TASK_RUNNING);
 out:
	;
}
Пример #8
0
/*
 *  Ok, this is the main fork-routine.
 *
 * It copies the process, and if successful kick-starts
 * it and waits for it to finish using the VM if required.
 */
long do_fork(unsigned long clone_flags,
	      unsigned long stack_start,
	      struct pt_regs *regs,
	      unsigned long stack_size,
	      int __user *parent_tidptr,
	      int __user *child_tidptr)
{
	struct task_struct *p;
	int trace = 0;
	long nr;

	/*
	 * Do some preliminary argument and permissions checking before we
	 * actually start allocating stuff
	 */
	if (clone_flags & CLONE_NEWUSER) {
		if (clone_flags & CLONE_THREAD)
			return -EINVAL;
		/* hopefully this check will go away when userns support is
		 * complete
		 */
		if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
				!capable(CAP_SETGID))
			return -EPERM;
	}

	/*
	 * We hope to recycle these flags after 2.6.26
	 */
	if (unlikely(clone_flags & CLONE_STOPPED)) {
		static int __read_mostly count = 100;

		if (count > 0 && printk_ratelimit()) {
			char comm[TASK_COMM_LEN];

			count--;
			printk(KERN_INFO "fork(): process `%s' used deprecated "
					"clone flags 0x%lx\n",
				get_task_comm(comm, current),
				clone_flags & CLONE_STOPPED);
		}
	}

	/*
	 * When called from kernel_thread, don't do user tracing stuff.
	 */
	if (likely(user_mode(regs)))
		trace = tracehook_prepare_clone(clone_flags);

	p = copy_process(clone_flags, stack_start, regs, stack_size,
			 child_tidptr, NULL, trace);
	/*
	 * Do this prior waking up the new thread - the thread pointer
	 * might get invalid after that point, if the thread exits quickly.
	 */
	if (!IS_ERR(p)) {
		struct completion vfork;

		trace_sched_process_fork(current, p);

		nr = task_pid_vnr(p);

		if (clone_flags & CLONE_PARENT_SETTID)
			put_user(nr, parent_tidptr);

		if (clone_flags & CLONE_VFORK) {
			p->vfork_done = &vfork;
			init_completion(&vfork);
		}

		audit_finish_fork(p);
		tracehook_report_clone(regs, clone_flags, nr, p);

		/*
		 * We set PF_STARTING at creation in case tracing wants to
		 * use this to distinguish a fully live task from one that
		 * hasn't gotten to tracehook_report_clone() yet.  Now we
		 * clear it and set the child going.
		 */
		p->flags &= ~PF_STARTING;

		if (unlikely(clone_flags & CLONE_STOPPED)) {
			/*
			 * We'll start up with an immediate SIGSTOP.
			 */
			sigaddset(&p->pending.signal, SIGSTOP);
			set_tsk_thread_flag(p, TIF_SIGPENDING);
			__set_task_state(p, TASK_STOPPED);
		} else {
			wake_up_new_task(p, clone_flags);
		}

		tracehook_report_clone_complete(trace, regs,
						clone_flags, nr, p);

		if (clone_flags & CLONE_VFORK) {
			freezer_do_not_count();
			wait_for_completion(&vfork);
			freezer_count();
			tracehook_report_vfork_done(p, nr);
		}
	} else {
		nr = PTR_ERR(p);
	}
	return nr;
}
Пример #9
0
/*
 * Lock a mutex (possibly interruptible), slowpath:
 */
static inline int __sched
__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
		    struct lockdep_map *nest_lock, unsigned long ip)
{
	struct task_struct *task = current;
	struct mutex_waiter waiter;
	unsigned long flags;

	preempt_disable();
	mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);

#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
	/*
	 * Optimistic spinning.
	 *
	 * We try to spin for acquisition when we find that there are no
	 * pending waiters and the lock owner is currently running on a
	 * (different) CPU.
	 *
	 * The rationale is that if the lock owner is running, it is likely to
	 * release the lock soon.
	 *
	 * Since this needs the lock owner, and this mutex implementation
	 * doesn't track the owner atomically in the lock field, we need to
	 * track it non-atomically.
	 *
	 * We can't do this for DEBUG_MUTEXES because that relies on wait_lock
	 * to serialize everything.
	 *
	 * The mutex spinners are queued up using MCS lock so that only one
	 * spinner can compete for the mutex. However, if mutex spinning isn't
	 * going to happen, there is no point in going through the lock/unlock
	 * overhead.
	 */
	if (!mutex_can_spin_on_owner(lock))
		goto slowpath;

	for (;;) {
		struct task_struct *owner;
		struct mspin_node  node;

		/*
		 * If there's an owner, wait for it to either
		 * release the lock or go to sleep.
		 */
		mspin_lock(MLOCK(lock), &node);
		owner = ACCESS_ONCE(lock->owner);
		if (owner && !mutex_spin_on_owner(lock, owner)) {
			mspin_unlock(MLOCK(lock), &node);
			break;
		}

		if ((atomic_read(&lock->count) == 1) &&
		    (atomic_cmpxchg(&lock->count, 1, 0) == 1)) {
			lock_acquired(&lock->dep_map, ip);
			mutex_set_owner(lock);
			mspin_unlock(MLOCK(lock), &node);
			preempt_enable();
			return 0;
		}
		mspin_unlock(MLOCK(lock), &node);

		/*
		 * When there's no owner, we might have preempted between the
		 * owner acquiring the lock and setting the owner field. If
		 * we're an RT task that will live-lock because we won't let
		 * the owner complete.
		 */
		if (!owner && (need_resched() || rt_task(task)))
			break;

		/*
		 * The cpu_relax() call is a compiler barrier which forces
		 * everything in this loop to be re-loaded. We don't need
		 * memory barriers as we'll eventually observe the right
		 * values at the cost of a few extra spins.
		 */
		arch_mutex_cpu_relax();
	}
slowpath:
#endif
	spin_lock_mutex(&lock->wait_lock, flags);

	debug_mutex_lock_common(lock, &waiter);
	debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));

	/* add waiting tasks to the end of the waitqueue (FIFO): */
	list_add_tail(&waiter.list, &lock->wait_list);
	waiter.task = task;

	if (MUTEX_SHOW_NO_WAITER(lock) && (atomic_xchg(&lock->count, -1) == 1))
		goto done;

	lock_contended(&lock->dep_map, ip);

	for (;;) {
		/*
		 * Lets try to take the lock again - this is needed even if
		 * we get here for the first time (shortly after failing to
		 * acquire the lock), to make sure that we get a wakeup once
		 * it's unlocked. Later on, if we sleep, this is the
		 * operation that gives us the lock. We xchg it to -1, so
		 * that when we release the lock, we properly wake up the
		 * other waiters:
		 */
		if (MUTEX_SHOW_NO_WAITER(lock) &&
		   (atomic_xchg(&lock->count, -1) == 1))
			break;

		/*
		 * got a signal? (This code gets eliminated in the
		 * TASK_UNINTERRUPTIBLE case.)
		 */
		if (unlikely(signal_pending_state(state, task))) {
			mutex_remove_waiter(lock, &waiter,
					    task_thread_info(task));
			mutex_release(&lock->dep_map, 1, ip);
			spin_unlock_mutex(&lock->wait_lock, flags);

			debug_mutex_free_waiter(&waiter);
			preempt_enable();
			return -EINTR;
		}
		__set_task_state(task, state);

		/* didn't get the lock, go to sleep: */
		spin_unlock_mutex(&lock->wait_lock, flags);
		schedule_preempt_disabled();
		spin_lock_mutex(&lock->wait_lock, flags);
	}

done:
	lock_acquired(&lock->dep_map, ip);
	/* got the lock - rejoice! */
	mutex_remove_waiter(lock, &waiter, current_thread_info());
	mutex_set_owner(lock);

	/* set it to 0 if there are no waiters left: */
	if (likely(list_empty(&lock->wait_list)))
		atomic_set(&lock->count, 0);

	spin_unlock_mutex(&lock->wait_lock, flags);

	debug_mutex_free_waiter(&waiter);
	preempt_enable();

	return 0;
}
Пример #10
0
/*
 * Lock a mutex (possibly interruptible), slowpath:
 */
static inline int __sched
__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
               unsigned long ip)
{
    struct task_struct *task = current;
    struct mutex_waiter waiter;
    unsigned int old_val;
    unsigned long flags;

    spin_lock_mutex(&lock->wait_lock, flags);

    debug_mutex_lock_common(lock, &waiter);
    mutex_acquire(&lock->dep_map, subclass, 0, ip);
    debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));

    /* add waiting tasks to the end of the waitqueue (FIFO): */
    list_add_tail(&waiter.list, &lock->wait_list);
    waiter.task = task;

    old_val = atomic_xchg(&lock->count, -1);
    if (old_val == 1)
        goto done;

    lock_contended(&lock->dep_map, ip);

    for (;;) {
        /*
         * Lets try to take the lock again - this is needed even if
         * we get here for the first time (shortly after failing to
         * acquire the lock), to make sure that we get a wakeup once
         * it's unlocked. Later on, if we sleep, this is the
         * operation that gives us the lock. We xchg it to -1, so
         * that when we release the lock, we properly wake up the
         * other waiters:
         */
        old_val = atomic_xchg(&lock->count, -1);
        if (old_val == 1)
            break;

        /*
         * got a signal? (This code gets eliminated in the
         * TASK_UNINTERRUPTIBLE case.)
         */
        if (unlikely((state == TASK_INTERRUPTIBLE &&
                    signal_pending(task)) ||
                  (state == TASK_KILLABLE &&
                    fatal_signal_pending(task)))) {
            mutex_remove_waiter(lock, &waiter,
                        task_thread_info(task));
            mutex_release(&lock->dep_map, 1, ip);
            spin_unlock_mutex(&lock->wait_lock, flags);

            debug_mutex_free_waiter(&waiter);
            return -EINTR;
        }
        __set_task_state(task, state);

        /* didnt get the lock, go to sleep: */
        spin_unlock_mutex(&lock->wait_lock, flags);
        schedule();
        spin_lock_mutex(&lock->wait_lock, flags);
    }

done:
    lock_acquired(&lock->dep_map);
    /* got the lock - rejoice! */
    mutex_remove_waiter(lock, &waiter, task_thread_info(task));
    debug_mutex_set_owner(lock, task_thread_info(task));

    /* set it to 0 if there are no waiters left: */
    if (likely(list_empty(&lock->wait_list)))
        atomic_set(&lock->count, 0);

    spin_unlock_mutex(&lock->wait_lock, flags);

    debug_mutex_free_waiter(&waiter);

    return 0;
}