Ejemplo n.º 1
0
/*
 * Assumes either tasklist_lock read locked with appropriate task_lock held, or
 * tasklist_lock write locked.
 */
static void task_update_object(struct task_kddm_object *obj)
{
	struct task_struct *tsk = obj->task;
	const struct cred *cred;

	if (tsk) {
		BUG_ON(tsk->task_obj != obj);

		obj->state = tsk->state;
		obj->flags = tsk->flags;
		obj->ptrace = tsk->ptrace;
		obj->exit_state = tsk->exit_state;
		obj->exit_code = tsk->exit_code;
		obj->exit_signal = tsk->exit_signal;

		obj->self_exec_id = tsk->self_exec_id;

		BUG_ON(obj->node != kerrighed_node_id &&
		       obj->node != KERRIGHED_NODE_ID_NONE);

		rcu_read_lock();
		cred = __task_cred(tsk);
		obj->uid = cred->uid;
		obj->euid = cred->euid;
		obj->egid = cred->egid;
		rcu_read_unlock();

		obj->utime = task_utime(tsk);
		obj->stime = task_stime(tsk);

		obj->dumpable = (tsk->mm && get_dumpable(tsk->mm) == 1);

		obj->thread_group_empty = thread_group_empty(tsk);
	}
}
Ejemplo n.º 2
0
SYSCALL_DEFINE2( smunch, int, pid, unsigned long, bit_pattern) {
	struct task_struct *target;
	unsigned long flags;
	rcu_read_lock();
	target=find_task_by_vpid(pid);
	rcu_read_unlock();
	if(!target) {
	printk(KERN_ALERT "TARGET DOESNT EXIST\n"); 
	return -1;}
	
	if(!lock_task_sighand(target, &flags)) {
        printk(KERN_ALERT "Could not acquire sighand lock\n");
        return -1;}
	if(!thread_group_empty(target)) {
                printk(KERN_ALERT "Multi threaded process\n");
		unlock_task_sighand(target , &flags);
		return -1;
	}
	if((target->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) && (bit_pattern & (1<<(SIGKILL-1)))) {
		printk(KERN_ALERT " killing process with PID %d\n",pid);
		unlock_task_sighand(target, &flags);
		release_task(target);
	}
	else {
                sigaddsetmask(&(target->signal->shared_pending.signal),bit_pattern);
			wake_up_process(target);
		unlock_task_sighand(target, &flags);
	}
	return 0;
}
Ejemplo n.º 3
0
/*
 * Careful here! We test whether the file pointer is NULL before
 * releasing the fd. This ensures that one clone task can't release
 * an fd while another clone is opening it.
 */
SYSCALL_DEFINE1(close, unsigned int, fd)
{
	struct file * filp;
	struct files_struct *files = current->files;
	struct fdtable *fdt;
	int retval;

#if 1 //wschen 2012-03-21
	struct inode *inode;
	struct pipe_inode_info *pipe;
        wait_queue_head_t *qhead;

try_again:
#endif

	spin_lock(&files->file_lock);
	fdt = files_fdtable(files);
	if (fd >= fdt->max_fds)
		goto out_unlock;
	filp = fdt->fd[fd];
	if (!filp)
		goto out_unlock;

#if 1 //wschen 2012-03-21 CTS testInterruptWritablePipeChannel fail
        if (!thread_group_empty(current) && filp->f_path.dentry) {
            inode = filp->f_path.dentry->d_inode;
            if (inode) {
                pipe = inode->i_pipe;
                if (pipe && (pipe->writers == 1) && (pipe->readers == 1)) {

                    if ((filp->f_mode == FMODE_WRITE) && (filp->f_flags == O_WRONLY)) {
                        if ((pipe->waiting_writers == 1)) {
                            qhead = &pipe->wait;
                            if (qhead) {
                                if (waitqueue_active(qhead)) {

                                    struct task_struct *p;
                                    struct list_head *tmp = &qhead->task_list;
                                    wait_queue_t *w;

                                    w = list_entry(tmp->next, wait_queue_t, task_list);
                                    if (w && (w->func == autoremove_wake_function)) {
                                        p = (struct task_struct *) w->private;
                                        if (p && (p->pid > 0) && (p->tgid > 0) && same_thread_group(p, current)) {
                                            set_tsk_thread_flag(p, TIF_SIGPENDING);
                                            wake_up_interruptible(qhead);
                                        }
                                    }
                                }
                            }
                        } else if (pipe->waiting_writers == 0) {
                            if (mutex_trylock(&inode->i_mutex) == 0) {
                                spin_unlock(&files->file_lock);
                                schedule();
                                goto try_again;
                            } else {
                                mutex_unlock(&inode->i_mutex);
                            }
                        }
                    } else if ((filp->f_mode == FMODE_READ) && (filp->f_flags == O_RDONLY) && (pipe->waiting_writers == 0)) {
Ejemplo n.º 4
0
/*
 * Check constraints on flags passed to the unshare system call.
 */
static int check_unshare_flags(unsigned long unshare_flags)
{
	if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND|
				CLONE_VM|CLONE_FILES|CLONE_SYSVSEM|
				CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET|
				CLONE_NEWUSER|CLONE_NEWPID))
		return -EINVAL;
	/*
	 * Not implemented, but pretend it works if there is nothing
	 * to unshare.  Note that unsharing the address space or the
	 * signal handlers also need to unshare the signal queues (aka
	 * CLONE_THREAD).
	 */
	if (unshare_flags & (CLONE_THREAD | CLONE_SIGHAND | CLONE_VM)) {
		if (!thread_group_empty(current))
			return -EINVAL;
	}
	if (unshare_flags & (CLONE_SIGHAND | CLONE_VM)) {
		if (atomic_read(&current->sighand->count) > 1)
			return -EINVAL;
	}
	if (unshare_flags & CLONE_VM) {
		if (!current_is_single_threaded())
			return -EINVAL;
	}

	return 0;
}
Ejemplo n.º 5
0
static inline bool __task_will_free_mem(struct task_struct *task)
{
	struct signal_struct *sig = task->signal;

	/*
	 * A coredumping process may sleep for an extended period in exit_mm(),
	 * so the oom killer cannot assume that the process will promptly exit
	 * and release memory.
	 */
	if (sig->flags & SIGNAL_GROUP_COREDUMP)
		return false;

	if (sig->flags & SIGNAL_GROUP_EXIT)
		return true;

	if (thread_group_empty(task) && (task->flags & PF_EXITING))
		return true;

	return false;
}
Ejemplo n.º 6
0
SYSCALL_DEFINE2(smunch,int,pid,unsigned long,bit_pattern)
{
	unsigned long flags;	
	struct task_struct *task; 		
		
	rcu_read_lock();
		task = pid_task(find_vpid(pid),PIDTYPE_PID);
	rcu_read_unlock();	
	if(!task) return -1;	 // Process not present
	if(!lock_task_sighand(task,&flags))
	{
		//Process refuses to give the lock. Either dead/dying 
		unlock_task_sighand(task,&flags);
		return -1;		
	}
			
	if(!thread_group_empty(task))
	{
		printk(KERN_ALERT "\nMULTI-Threaded Process, Exiting without processing");
		ret=-1; goto return_path;
	}
	printk(KERN_ALERT "\nExit State:%XH,State=%XH\n",task->exit_state,task->state);	//Info to user
	if(task->state & TASK_UNINTERRUPTIBLE)
	printk(KERN_ALERT "\nProcess is in Uniterruptible Wait-DeepSleep!!"); // Info to User	
	if(bit_pattern & (1UL<<(SIGKILL-1)) && (task->exit_state & EXIT_ZOMBIE))
	{ 
		printk(KERN_ALERT "\nSIGKILL present while Process is Zombie, releasing task!!");		
		unlock_task_sighand(task,&flags);	 	
		release_task(task);  // detach_pid is called from release_task()
		return 0; 
	}
	 /* If !SIGKILL || (ordinary process) || DeepSleep, sending all signals. It is Users responsility to note that signals will get handled from 1-64 order*/
	printk(KERN_ALERT "!SIGKILL || (ordinary process) || DeepSleep, sending all signals!");
	task->signal->shared_pending.signal.sig[0] = bit_pattern;
	set_tsk_thread_flag(task,TIF_SIGPENDING);	
	signal_wake_up(task,1); 
	ret=0;		
	return_path:
	unlock_task_sighand(task,&flags);
	return ret;
}
Ejemplo n.º 7
0
/*
 * Called with tasklist_lock held for writing.
 * Unlink a traced task, and clean it up if it was a traced zombie.
 * Return true if it needs to be reaped with release_task().
 * (We can't call release_task() here because we already hold tasklist_lock.)
 *
 * If it's a zombie, our attachedness prevented normal parent notification
 * or self-reaping.  Do notification now if it would have happened earlier.
 * If it should reap itself, return true.
 *
 * If it's our own child, there is no notification to do. But if our normal
 * children self-reap, then this child was prevented by ptrace and we must
 * reap it now, in that case we must also wake up sub-threads sleeping in
 * do_wait().
 */
static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
{
	__ptrace_unlink(p);

	if (p->exit_state == EXIT_ZOMBIE) {
		if (!task_detached(p) && thread_group_empty(p)) {
			if (!same_thread_group(p->real_parent, tracer))
				do_notify_parent(p, p->exit_signal);
			else if (ignoring_children(tracer->sighand)) {
				__wake_up_parent(p, tracer);
				p->exit_signal = -1;
			}
		}
		if (task_detached(p)) {
			/* Mark it as in the process of being reaped. */
			p->exit_state = EXIT_DEAD;
			return true;
		}
	}

	return false;
}
Ejemplo n.º 8
0
/*
 * wrapper function to avoid #include problems.
 */
int rcu_my_thread_group_empty(void)
{
	return thread_group_empty(current);
}
Ejemplo n.º 9
0
asmlinkage long sys_times(struct tms __user * tbuf)
{
    /*
     *	In the SMP world we might just be unlucky and have one of
     *	the times increment as we use it. Since the value is an
     *	atomically safe type this is just fine. Conceptually its
     *	as if the syscall took an instant longer to occur.
     */
    if (tbuf) {
        struct tms tmp;
        cputime_t utime, stime, cutime, cstime;

#ifdef CONFIG_SMP
        if (thread_group_empty(current)) {
            /*
             * Single thread case without the use of any locks.
             *
             * We may race with release_task if two threads are
             * executing. However, release task first adds up the
             * counters (__exit_signal) before  removing the task
             * from the process tasklist (__unhash_process).
             * __exit_signal also acquires and releases the
             * siglock which results in the proper memory ordering
             * so that the list modifications are always visible
             * after the counters have been updated.
             *
             * If the counters have been updated by the second thread
             * but the thread has not yet been removed from the list
             * then the other branch will be executing which will
             * block on tasklist_lock until the exit handling of the
             * other task is finished.
             *
             * This also implies that the sighand->siglock cannot
             * be held by another processor. So we can also
             * skip acquiring that lock.
             */
            utime = cputime_add(current->signal->utime, current->utime);
            stime = cputime_add(current->signal->utime, current->stime);
            cutime = current->signal->cutime;
            cstime = current->signal->cstime;
        } else
#endif
        {

            /* Process with multiple threads */
            struct task_struct *tsk = current;
            struct task_struct *t;

            read_lock(&tasklist_lock);
            utime = tsk->signal->utime;
            stime = tsk->signal->stime;
            t = tsk;
            do {
                utime = cputime_add(utime, t->utime);
                stime = cputime_add(stime, t->stime);
                t = next_thread(t);
            } while (t != tsk);

            /*
             * While we have tasklist_lock read-locked, no dying thread
             * can be updating current->signal->[us]time.  Instead,
             * we got their counts included in the live thread loop.
             * However, another thread can come in right now and
             * do a wait call that updates current->signal->c[us]time.
             * To make sure we always see that pair updated atomically,
             * we take the siglock around fetching them.
             */
            spin_lock_irq(&tsk->sighand->siglock);
            cutime = tsk->signal->cutime;
            cstime = tsk->signal->cstime;
            spin_unlock_irq(&tsk->sighand->siglock);
            read_unlock(&tasklist_lock);
        }
        tmp.tms_utime = cputime_to_clock_t(utime);
        tmp.tms_stime = cputime_to_clock_t(stime);
        tmp.tms_cutime = cputime_to_clock_t(cutime);
        tmp.tms_cstime = cputime_to_clock_t(cstime);
        if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
            return -EFAULT;
    }
    return (long) jiffies_64_to_clock_t(get_jiffies_64());
}