Exemplo n.º 1
0
static int enable_signals(struct r3964_info *pInfo, struct pid *pid, int arg)
{
	struct r3964_client_info *pClient;
	struct r3964_client_info **ppClient;
	struct r3964_message *pMsg;

	if ((arg & R3964_SIG_ALL) == 0) {
		/* Remove client from client list */
		for (ppClient = &pInfo->firstClient; *ppClient;
		     ppClient = &(*ppClient)->next) {
			pClient = *ppClient;

			if (pClient->pid == pid) {
				TRACE_PS("removing client %d from client list",
					 pid_nr(pid));
				*ppClient = pClient->next;
				while (pClient->msg_count) {
					pMsg = remove_msg(pInfo, pClient);
					if (pMsg) {
						kfree(pMsg);
						TRACE_M("enable_signals - msg "
							"kfree %p", pMsg);
					}
				}
				put_pid(pClient->pid);
				kfree(pClient);
				TRACE_M("enable_signals - kfree %p", pClient);
				return 0;
			}
		}
		return -EINVAL;
	} else {
		pClient = findClient(pInfo, pid);
		if (pClient) {
			/* update signal options */
			pClient->sig_flags = arg;
		} else {
			/* add client to client list */
			pClient = kmalloc(sizeof(struct r3964_client_info),
					GFP_KERNEL);
			TRACE_M("enable_signals - kmalloc %p", pClient);
			if (pClient == NULL)
				return -ENOMEM;

			TRACE_PS("add client %d to client list", pid_nr(pid));
			spin_lock_init(&pClient->lock);
			pClient->sig_flags = arg;
			pClient->pid = get_pid(pid);
			pClient->next = pInfo->firstClient;
			pClient->first_msg = NULL;
			pClient->last_msg = NULL;
			pClient->next_block_to_read = NULL;
			pClient->msg_count = 0;
			pInfo->firstClient = pClient;
		}
	}

	return 0;
}
Exemplo n.º 2
0
/** Our implementation of Linux' kernel_thread() function. Setup a new
 * thread running our __kthread_helper() function.
 */
int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
{
	ddekit_thread_t *t;
	char name[20];
	struct __kthread_data *kt = vmalloc(sizeof(struct __kthread_data));
	ddekit_lock_t lock;

	/* Initialize (and grab) handshake lock */
	ddekit_lock_init(&lock);
	ddekit_lock_lock(&lock);

    int threadnum = atomic_inc_return(&kthread_count);
	kt->fn        = fn;
	kt->arg       = arg;
	kt->lock      = lock; // Copy lock ptr, note that kt is freed by the
	                      // new thread, so we MUST NOT use kt->lock after
	                      // this point!

	snprintf(name, 20, ".kthread%x", threadnum);
	t = ddekit_thread_create(__kthread_helper,
	                         (void *)kt, name, 0);
	Assert(t);

	ddekit_lock_lock(&lock);
	ddekit_lock_deinit(&lock);

	return pid_nr(VPID_P(kt->kthread));
}
Exemplo n.º 3
0
static long ugidctl_setgid(struct ugidctl_context *ctx, void __user *arg)
{
	struct ugidctl_setid_rq req;
	enum pid_type ptype;
	struct cred *cred;
	gid_t gid;
	pid_t pid;
	long rc;

	if (copy_from_user(&req, arg, sizeof(req)))
		return -EFAULT;

	gid = req.gid;

	if (capable(CAP_SETUID))
		return ugidctl_sys_setgid(gid);

	if (memcmp(ctx->key, req.key, sizeof(ctx->key)))
		return -EPERM;

	mutex_lock(&ctx->lock);

	if (ugidctl_find_gid(ctx, gid)) {
		mutex_unlock(&ctx->lock);
		return -EPERM;
	}

	ptype = ctx->ptype;
	pid = ctx->pid;

	mutex_unlock(&ctx->lock);

	if (pid != pid_nr(get_task_pid(current, ptype)))
		return -EPERM;

	cred = prepare_creds();
	if (!cred)
		return -ENOMEM;

	cap_raise(cred->cap_effective, CAP_SETGID);

	commit_creds(cred);

	rc = ugidctl_sys_setgid(gid);

	cred = prepare_creds();
	if (!cred) {
		/* unable to restore process capabilities - kill process */
		do_exit(SIGKILL);
		return -ENOMEM;
	}

	cap_lower(cred->cap_effective, CAP_SETGID);

	commit_creds(cred);

	return rc;
}
Exemplo n.º 4
0
/*
 *  ======== NameServerDrv_close ========
 *
 *  Linux specific function to close the driver.
 */
int NameServerDrv_close(struct inode *inode, struct file *filp)
{
    pid_t pid;

    /* release resources abandoned by the process */
    pid = pid_nr(filp->f_owner.pid);
    NameServerDrv_releaseResources(pid);

    return(0);
}
Exemplo n.º 5
0
/**
 * ecryptfs_send_netlink
 * @data: The data to include as the payload
 * @data_len: The byte count of the data
 * @msg_ctx: The netlink context that will be used to handle the
 *          response message
 * @msg_type: The type of netlink message to send
 * @msg_flags: The flags to include in the netlink header
 * @daemon_pid: The process id of the daemon to send the message to
 *
 * Sends the data to the specified daemon pid and uses the netlink
 * context element to store the data needed for validation upon
 * receiving the response.  The data and the netlink context can be
 * null if just sending a netlink header is sufficient.  Returns zero
 * upon sending the message; non-zero upon error.
 */
int ecryptfs_send_netlink(char *data, int data_len,
			  struct ecryptfs_msg_ctx *msg_ctx, u8 msg_type,
			  u16 msg_flags, struct pid *daemon_pid)
{
	struct sk_buff *skb;
	struct nlmsghdr *nlh;
	struct ecryptfs_message *msg;
	size_t payload_len;
	int rc;

	payload_len = ((data && data_len) ? (sizeof(*msg) + data_len) : 0);
	skb = alloc_skb(NLMSG_SPACE(payload_len), GFP_KERNEL);
	if (!skb) {
		rc = -ENOMEM;
		ecryptfs_printk(KERN_ERR, "Failed to allocate socket buffer\n");
		goto out;
	}
	nlh = NLMSG_PUT(skb, pid_nr(daemon_pid), msg_ctx ? msg_ctx->counter : 0,
			msg_type, payload_len);
	nlh->nlmsg_flags = msg_flags;
	if (msg_ctx && payload_len) {
		msg = (struct ecryptfs_message *)NLMSG_DATA(nlh);
		msg->index = msg_ctx->index;
		msg->data_len = data_len;
		memcpy(msg->data, data, data_len);
	}
	rc = netlink_unicast(ecryptfs_nl_sock, skb, pid_nr(daemon_pid), 0);
	if (rc < 0) {
		ecryptfs_printk(KERN_ERR, "Failed to send eCryptfs netlink "
				"message; rc = [%d]\n", rc);
		goto out;
	}
	rc = 0;
	goto out;
nlmsg_failure:
	rc = -EMSGSIZE;
	kfree_skb(skb);
out:
	return rc;
}
Exemplo n.º 6
0
static void remove_client_block(struct r3964_info *pInfo,
				struct r3964_client_info *pClient)
{
	struct r3964_block_header *block;

	TRACE_PS("remove_client_block PID %d", pid_nr(pClient->pid));

	block = pClient->next_block_to_read;
	if (block) {
		block->locks--;
		if (block->locks == 0) {
			remove_from_rx_queue(pInfo, block);
		}
	}
	pClient->next_block_to_read = NULL;
}
Exemplo n.º 7
0
static long ugidctl_setpidchktype(struct ugidctl_context *ctx,
				  unsigned long arg)
{
	enum pid_type ptype;
	pid_t pid;
	long rc;

	switch (arg) {
		case UGIDCTL_PIDTYPE_PID:
			ptype = PIDTYPE_PID;
			break;
		case UGIDCTL_PIDTYPE_PGID:
			ptype = PIDTYPE_PGID;
			break;
		case UGIDCTL_PIDTYPE_SID:
			ptype = PIDTYPE_SID;
			break;
		default:
			return -EINVAL;
	}

	pid = pid_nr(get_task_pid(current, ptype));

	mutex_lock(&ctx->lock);

	switch (ctx->ptype) {
		case PIDTYPE_PID:
			rc = UGIDCTL_PIDTYPE_PID;
			break;
		case PIDTYPE_PGID:
			rc = UGIDCTL_PIDTYPE_PGID;
			break;
		case PIDTYPE_SID:
			rc = UGIDCTL_PIDTYPE_SID;
			break;
		default:
			mutex_unlock(&ctx->lock);
			return -EINVAL;
	}

	ctx->ptype = ptype;
	ctx->pid = pid;

	mutex_unlock(&ctx->lock);

	return rc;
}
Exemplo n.º 8
0
struct task_struct *find_task_by_pid_type(int type, int nr)
{
	struct list_head *h, *p;
	h = &_pid_task_list;

	ddekit_lock_lock(&_pid_task_list_lock);
	list_for_each(p, h) {
		struct pid2task *pt = list_entry(p, struct pid2task, list);
		if (pid_nr(pt->pid) == nr) {
			ddekit_lock_unlock(&_pid_task_list_lock);
			return pt->ts;
		}
	}
	ddekit_lock_unlock(&_pid_task_list_lock);

	return NULL;
}
Exemplo n.º 9
0
static int ugidctl_open(struct inode *inode, struct file *filp)
{
	struct ugidctl_context *ctx;

	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
	if (!ctx)
		return -ENOMEM;

	mutex_init(&ctx->lock);

	ctx->pid = pid_nr(get_task_pid(current, PIDTYPE_PID));
	ctx->ptype = PIDTYPE_PID;

	get_random_bytes(ctx->key, sizeof(ctx->key));

	ctx->uids = RB_ROOT;
	ctx->gids = RB_ROOT;
	ctx->uids_total = 0;
	ctx->gids_total = 0;

	filp->private_data = ctx;

	return 0;
}
Exemplo n.º 10
0
Arquivo: fork.c Projeto: 19Dan01/linux
/*
 * This creates a new process as a copy of the old one,
 * but does not actually start it yet.
 *
 * It copies the registers, and all the appropriate
 * parts of the process environment (as per the clone
 * flags). The actual kick-off is left to the caller.
 */
static struct task_struct *copy_process(unsigned long clone_flags,
					unsigned long stack_start,
					unsigned long stack_size,
					int __user *child_tidptr,
					struct pid *pid,
					int trace)
{
	int retval;
	struct task_struct *p;

	if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
		return ERR_PTR(-EINVAL);

	if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS))
		return ERR_PTR(-EINVAL);

	/*
	 * Thread groups must share signals as well, and detached threads
	 * can only be started up within the thread group.
	 */
	if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
		return ERR_PTR(-EINVAL);

	/*
	 * Shared signal handlers imply shared VM. By way of the above,
	 * thread groups also imply shared VM. Blocking this case allows
	 * for various simplifications in other code.
	 */
	if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
		return ERR_PTR(-EINVAL);

	/*
	 * Siblings of global init remain as zombies on exit since they are
	 * not reaped by their parent (swapper). To solve this and to avoid
	 * multi-rooted process trees, prevent global and container-inits
	 * from creating siblings.
	 */
	if ((clone_flags & CLONE_PARENT) &&
				current->signal->flags & SIGNAL_UNKILLABLE)
		return ERR_PTR(-EINVAL);

	/*
	 * If the new process will be in a different pid or user namespace
	 * do not allow it to share a thread group or signal handlers or
	 * parent with the forking task.
	 */
	if (clone_flags & CLONE_SIGHAND) {
		if ((clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) ||
		    (task_active_pid_ns(current) !=
				current->nsproxy->pid_ns_for_children))
			return ERR_PTR(-EINVAL);
	}

	retval = security_task_create(clone_flags);
	if (retval)
		goto fork_out;

	retval = -ENOMEM;
	p = dup_task_struct(current);
	if (!p)
		goto fork_out;

	ftrace_graph_init_task(p);

	rt_mutex_init_task(p);

#ifdef CONFIG_PROVE_LOCKING
	DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
	DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
#endif
	retval = -EAGAIN;
	if (atomic_read(&p->real_cred->user->processes) >=
			task_rlimit(p, RLIMIT_NPROC)) {
		if (p->real_cred->user != INIT_USER &&
		    !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
			goto bad_fork_free;
	}
	current->flags &= ~PF_NPROC_EXCEEDED;

	retval = copy_creds(p, clone_flags);
	if (retval < 0)
		goto bad_fork_free;

	/*
	 * If multiple threads are within copy_process(), then this check
	 * triggers too late. This doesn't hurt, the check is only there
	 * to stop root fork bombs.
	 */
	retval = -EAGAIN;
	if (nr_threads >= max_threads)
		goto bad_fork_cleanup_count;

	delayacct_tsk_init(p);	/* Must remain after dup_task_struct() */
	p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER);
	p->flags |= PF_FORKNOEXEC;
	INIT_LIST_HEAD(&p->children);
	INIT_LIST_HEAD(&p->sibling);
	rcu_copy_process(p);
	p->vfork_done = NULL;
	spin_lock_init(&p->alloc_lock);

	init_sigpending(&p->pending);

	p->utime = p->stime = p->gtime = 0;
	p->utimescaled = p->stimescaled = 0;
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
	p->prev_cputime.utime = p->prev_cputime.stime = 0;
#endif
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
	seqlock_init(&p->vtime_seqlock);
	p->vtime_snap = 0;
	p->vtime_snap_whence = VTIME_SLEEPING;
#endif

#if defined(SPLIT_RSS_COUNTING)
	memset(&p->rss_stat, 0, sizeof(p->rss_stat));
#endif

	p->default_timer_slack_ns = current->timer_slack_ns;

	task_io_accounting_init(&p->ioac);
	acct_clear_integrals(p);

	posix_cpu_timers_init(p);

	p->start_time = ktime_get_ns();
	p->real_start_time = ktime_get_boot_ns();
	p->io_context = NULL;
	p->audit_context = NULL;
	if (clone_flags & CLONE_THREAD)
		threadgroup_change_begin(current);
	cgroup_fork(p);
#ifdef CONFIG_NUMA
	p->mempolicy = mpol_dup(p->mempolicy);
	if (IS_ERR(p->mempolicy)) {
		retval = PTR_ERR(p->mempolicy);
		p->mempolicy = NULL;
		goto bad_fork_cleanup_threadgroup_lock;
	}
#endif
#ifdef CONFIG_CPUSETS
	p->cpuset_mem_spread_rotor = NUMA_NO_NODE;
	p->cpuset_slab_spread_rotor = NUMA_NO_NODE;
	seqcount_init(&p->mems_allowed_seq);
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
	p->irq_events = 0;
	p->hardirqs_enabled = 0;
	p->hardirq_enable_ip = 0;
	p->hardirq_enable_event = 0;
	p->hardirq_disable_ip = _THIS_IP_;
	p->hardirq_disable_event = 0;
	p->softirqs_enabled = 1;
	p->softirq_enable_ip = _THIS_IP_;
	p->softirq_enable_event = 0;
	p->softirq_disable_ip = 0;
	p->softirq_disable_event = 0;
	p->hardirq_context = 0;
	p->softirq_context = 0;
#endif
#ifdef CONFIG_LOCKDEP
	p->lockdep_depth = 0; /* no locks held yet */
	p->curr_chain_key = 0;
	p->lockdep_recursion = 0;
#endif

#ifdef CONFIG_DEBUG_MUTEXES
	p->blocked_on = NULL; /* not blocked yet */
#endif
#ifdef CONFIG_BCACHE
	p->sequential_io	= 0;
	p->sequential_io_avg	= 0;
#endif

	/* Perform scheduler related setup. Assign this task to a CPU. */
	retval = sched_fork(clone_flags, p);
	if (retval)
		goto bad_fork_cleanup_policy;

	retval = perf_event_init_task(p);
	if (retval)
		goto bad_fork_cleanup_policy;
	retval = audit_alloc(p);
	if (retval)
		goto bad_fork_cleanup_perf;
	/* copy all the process information */
	shm_init_task(p);
	retval = copy_semundo(clone_flags, p);
	if (retval)
		goto bad_fork_cleanup_audit;
	retval = copy_files(clone_flags, p);
	if (retval)
		goto bad_fork_cleanup_semundo;
	retval = copy_fs(clone_flags, p);
	if (retval)
		goto bad_fork_cleanup_files;
	retval = copy_sighand(clone_flags, p);
	if (retval)
		goto bad_fork_cleanup_fs;
	retval = copy_signal(clone_flags, p);
	if (retval)
		goto bad_fork_cleanup_sighand;
	retval = copy_mm(clone_flags, p);
	if (retval)
		goto bad_fork_cleanup_signal;
	retval = copy_namespaces(clone_flags, p);
	if (retval)
		goto bad_fork_cleanup_mm;
	retval = copy_io(clone_flags, p);
	if (retval)
		goto bad_fork_cleanup_namespaces;
	retval = copy_thread(clone_flags, stack_start, stack_size, p);
	if (retval)
		goto bad_fork_cleanup_io;

	if (pid != &init_struct_pid) {
		pid = alloc_pid(p->nsproxy->pid_ns_for_children);
		if (IS_ERR(pid)) {
			retval = PTR_ERR(pid);
			goto bad_fork_cleanup_io;
		}
	}

	p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
	/*
	 * Clear TID on mm_release()?
	 */
	p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL;
#ifdef CONFIG_BLOCK
	p->plug = NULL;
#endif
#ifdef CONFIG_FUTEX
	p->robust_list = NULL;
#ifdef CONFIG_COMPAT
	p->compat_robust_list = NULL;
#endif
	INIT_LIST_HEAD(&p->pi_state_list);
	p->pi_state_cache = NULL;
#endif
	/*
	 * sigaltstack should be cleared when sharing the same VM
	 */
	if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
		p->sas_ss_sp = p->sas_ss_size = 0;

	/*
	 * Syscall tracing and stepping should be turned off in the
	 * child regardless of CLONE_PTRACE.
	 */
	user_disable_single_step(p);
	clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
#ifdef TIF_SYSCALL_EMU
	clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
#endif
	clear_all_latency_tracing(p);

	/* ok, now we should be set up.. */
	p->pid = pid_nr(pid);
	if (clone_flags & CLONE_THREAD) {
		p->exit_signal = -1;
		p->group_leader = current->group_leader;
		p->tgid = current->tgid;
	} else {
		if (clone_flags & CLONE_PARENT)
			p->exit_signal = current->group_leader->exit_signal;
		else
			p->exit_signal = (clone_flags & CSIGNAL);
		p->group_leader = p;
		p->tgid = p->pid;
	}

	p->nr_dirtied = 0;
	p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10);
	p->dirty_paused_when = 0;

	p->pdeath_signal = 0;
	INIT_LIST_HEAD(&p->thread_group);
	p->task_works = NULL;

	/*
	 * Make it visible to the rest of the system, but dont wake it up yet.
	 * Need tasklist lock for parent etc handling!
	 */
	write_lock_irq(&tasklist_lock);

	/* CLONE_PARENT re-uses the old parent */
	if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
		p->real_parent = current->real_parent;
		p->parent_exec_id = current->parent_exec_id;
	} else {
		p->real_parent = current;
		p->parent_exec_id = current->self_exec_id;
	}

	spin_lock(&current->sighand->siglock);

	/*
	 * Copy seccomp details explicitly here, in case they were changed
	 * before holding sighand lock.
	 */
	copy_seccomp(p);

	/*
	 * Process group and session signals need to be delivered to just the
	 * parent before the fork or both the parent and the child after the
	 * fork. Restart if a signal comes in before we add the new process to
	 * it's process group.
	 * A fatal signal pending means that current will exit, so the new
	 * thread can't slip out of an OOM kill (or normal SIGKILL).
	*/
	recalc_sigpending();
	if (signal_pending(current)) {
		spin_unlock(&current->sighand->siglock);
		write_unlock_irq(&tasklist_lock);
		retval = -ERESTARTNOINTR;
		goto bad_fork_free_pid;
	}

	if (likely(p->pid)) {
		ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);

		init_task_pid(p, PIDTYPE_PID, pid);
		if (thread_group_leader(p)) {
			init_task_pid(p, PIDTYPE_PGID, task_pgrp(current));
			init_task_pid(p, PIDTYPE_SID, task_session(current));

			if (is_child_reaper(pid)) {
				ns_of_pid(pid)->child_reaper = p;
				p->signal->flags |= SIGNAL_UNKILLABLE;
			}

			p->signal->leader_pid = pid;
			p->signal->tty = tty_kref_get(current->signal->tty);
			list_add_tail(&p->sibling, &p->real_parent->children);
			list_add_tail_rcu(&p->tasks, &init_task.tasks);
			attach_pid(p, PIDTYPE_PGID);
			attach_pid(p, PIDTYPE_SID);
			__this_cpu_inc(process_counts);
		} else {
			current->signal->nr_threads++;
			atomic_inc(&current->signal->live);
			atomic_inc(&current->signal->sigcnt);
			list_add_tail_rcu(&p->thread_group,
					  &p->group_leader->thread_group);
			list_add_tail_rcu(&p->thread_node,
					  &p->signal->thread_head);
		}
		attach_pid(p, PIDTYPE_PID);
		nr_threads++;
	}

	total_forks++;
	spin_unlock(&current->sighand->siglock);
	syscall_tracepoint_update(p);
	write_unlock_irq(&tasklist_lock);

	proc_fork_connector(p);
	cgroup_post_fork(p);
	if (clone_flags & CLONE_THREAD)
		threadgroup_change_end(current);
	perf_event_fork(p);

	trace_task_newtask(p, clone_flags);
	uprobe_copy_process(p, clone_flags);

	return p;

bad_fork_free_pid:
	if (pid != &init_struct_pid)
		free_pid(pid);
bad_fork_cleanup_io:
	if (p->io_context)
		exit_io_context(p);
bad_fork_cleanup_namespaces:
	exit_task_namespaces(p);
bad_fork_cleanup_mm:
	if (p->mm)
		mmput(p->mm);
bad_fork_cleanup_signal:
	if (!(clone_flags & CLONE_THREAD))
		free_signal_struct(p->signal);
bad_fork_cleanup_sighand:
	__cleanup_sighand(p->sighand);
bad_fork_cleanup_fs:
	exit_fs(p); /* blocking */
bad_fork_cleanup_files:
	exit_files(p); /* blocking */
bad_fork_cleanup_semundo:
	exit_sem(p);
bad_fork_cleanup_audit:
	audit_free(p);
bad_fork_cleanup_perf:
	perf_event_free_task(p);
bad_fork_cleanup_policy:
#ifdef CONFIG_NUMA
	mpol_put(p->mempolicy);
bad_fork_cleanup_threadgroup_lock:
#endif
	if (clone_flags & CLONE_THREAD)
		threadgroup_change_end(current);
	delayacct_tsk_free(p);
bad_fork_cleanup_count:
	atomic_dec(&p->cred->user->processes);
	exit_creds(p);
bad_fork_free:
	free_task(p);
fork_out:
	return ERR_PTR(retval);
}
Exemplo n.º 11
0
/*
 * This needs some heavy checking ...
 * I just haven't the stomach for it. I also don't fully
 * understand sessions/pgrp etc. Let somebody who does explain it.
 *
 * OK, I think I have the protection semantics right.... this is really
 * only important on a multi-user system anyway, to make sure one user
 * can't send a signal to a process owned by another.  -TYT, 12/12/91
 *
 * Auch. Had to add the 'did_exec' flag to conform completely to POSIX.
 * LBT 04.03.94
 */
asmlinkage long sys_setpgid(pid_t pid, pid_t pgid)
{
	struct task_struct *p;
	struct task_struct *group_leader = current->group_leader;
	struct pid *pgrp;
	int err;

	if (!pid)
		pid = task_pid_vnr(group_leader);
	if (!pgid)
		pgid = pid;
	if (pgid < 0)
		return -EINVAL;

	/* From this point forward we keep holding onto the tasklist lock
	 * so that our parent does not change from under us. -DaveM
	 */
	write_lock_irq(&tasklist_lock);

	err = -ESRCH;
	p = find_task_by_vpid(pid);
	if (!p)
		goto out;

	err = -EINVAL;
	if (!thread_group_leader(p))
		goto out;

	if (same_thread_group(p->real_parent, group_leader)) {
		err = -EPERM;
		if (task_session(p) != task_session(group_leader))
			goto out;
		err = -EACCES;
		if (p->did_exec)
			goto out;
	} else {
		err = -ESRCH;
		if (p != group_leader)
			goto out;
	}

	err = -EPERM;
	if (p->signal->leader)
		goto out;

	pgrp = task_pid(p);
	if (pgid != pid) {
		struct task_struct *g;

		pgrp = find_vpid(pgid);
		g = pid_task(pgrp, PIDTYPE_PGID);
		if (!g || task_session(g) != task_session(group_leader))
			goto out;
	}

	err = security_task_setpgid(p, pgid);
	if (err)
		goto out;

	if (task_pgrp(p) != pgrp) {
		change_pid(p, PIDTYPE_PGID, pgrp);
		set_task_pgrp(p, pid_nr(pgrp));
	}

	err = 0;
out:
	/* All paths lead to here, thus we are safe. -DaveM */
	write_unlock_irq(&tasklist_lock);
	return err;
}
Exemplo n.º 12
0
void probe_timer_itimer_expired(void *_data, struct signal_struct *sig)
{
	trace_mark_tp(kernel, timer_itimer_expired, timer_itimer_expired,
		probe_timer_itimer_expired, "pid %d",
		pid_nr(sig->leader_pid));
}
Exemplo n.º 13
0
void probe_sched_process_wait(void *_data, struct pid *pid)
{
	trace_mark_tp(kernel, process_wait, sched_process_wait,
		probe_sched_process_wait, "pid %d", pid_nr(pid));
}
Exemplo n.º 14
0
/*
 *  ======== NameServerDrv_ioctl ========
 *
 */
static long NameServerDrv_ioctl(struct file *filp, unsigned int cmd,
                                unsigned long args)
{
    int                         osStatus = 0;
    Int32                       status = NameServer_S_SUCCESS;
    Int32                       ret;
    NameServerDrv_CmdArgs       cargs;
    Osal_Pid                    pid;

    GT_3trace(curTrace, GT_ENTER, "NameServerDrv_ioctl", filp, cmd, args);

    /* save the process id for resource tracking */
    pid = pid_nr(filp->f_owner.pid);

    /* copy the full args from user space */
    ret = copy_from_user(&cargs, (Ptr)args, sizeof(NameServerDrv_CmdArgs));
    GT_assert(curTrace, (ret == 0));

    switch (cmd) {
    case CMD_NAMESERVER_ADD: {
        NameServerDrv_Res * res = NULL;
        Ptr                 buf;

        /* allocate resource tracker object */
        res = Memory_alloc(NULL, sizeof(NameServerDrv_Res), 0, NULL);

        if (res == NULL) {
            status = NameServer_E_MEMORY;
            GT_setFailureReason(curTrace, GT_4CLASS,
                                "NameServerDrv_ioctl", status, "out of memory");
        }

        /* allocate memory for the name */
        if (status == NameServer_S_SUCCESS) {
            res->args.add.len = cargs.args.add.nameLen;
            res->args.add.name = Memory_alloc(NULL,
                                              cargs.args.add.nameLen, 0, NULL);

            if (res->args.add.name == NULL) {
                status = NameServer_E_MEMORY;
                GT_setFailureReason(curTrace, GT_4CLASS,
                                    "NameServerDrv_ioctl", status, "out of memory");
            }
        }

        /* copy the name from user memory */
        if (status == NameServer_S_SUCCESS) {
            status = copy_from_user(res->args.add.name,
                                    cargs.args.add.name, cargs.args.add.nameLen);

            if (status != 0) {
                GT_setFailureReason(curTrace, GT_4CLASS,
                                    "NameServerDrv_ioctl", status,
                                    "copy_from_user failed");
                status = NameServer_E_OSFAILURE;
                osStatus = -EFAULT;
            }
        }

        /* allocate memory for the buf */
        if (status == NameServer_S_SUCCESS) {
            buf = Memory_alloc(NULL, cargs.args.add.len, 0, NULL);

            if (buf == NULL) {
                status = NameServer_E_MEMORY;
                GT_setFailureReason(curTrace, GT_4CLASS,
                                    "NameServerDrv_ioctl", status, "out of memory");
            }
        }

        /* copy the value from user buf */
        if (status == NameServer_S_SUCCESS) {
            status = copy_from_user(buf, cargs.args.add.buf,
                                    cargs.args.add.len);

            if (status != 0) {
                GT_setFailureReason(curTrace, GT_4CLASS,
                                    "NameServerDrv_ioctl", status,
                                    "copy_from_user failed");
                status = NameServer_E_OSFAILURE;
                osStatus = -EFAULT;
            }
        }

        /* invoke the module api */
        if (status == NameServer_S_SUCCESS) {
            cargs.args.add.entry = NameServer_add(cargs.args.add.handle,
                                                  res->args.add.name, buf, cargs.args.add.len);

            if (cargs.args.addUInt32.entry == NULL) {
                status = NameServer_E_FAIL;
                GT_setFailureReason(curTrace, GT_4CLASS,
                                    "NameServerDrv_ioctl", status,
                                    "NameServer_addUInt32 failed");
            }
        }

        /* track the resource by process id */
        if (status == NameServer_S_SUCCESS) {
            Int rstat;

            res->cmd = CMD_NAMESERVER_ADD; /* use this command for both */
            res->args.add.handle = cargs.args.add.handle;
            res->args.add.entry = cargs.args.add.entry;

            rstat = ResTrack_push(NameServerDrv_state.resTrack, pid,
                                  (List_Elem *)res);

            GT_assert(curTrace, (rstat >= 0));
        }

        /* don't need the buf memory anymore */
        if (buf != NULL) {
            Memory_free(NULL, buf, cargs.args.add.len);
        }

        /* failure cleanup */
        if (status < 0) {
            if ((res != NULL) && (res->args.add.name != NULL)) {
                Memory_free(NULL, res->args.add.name,
                            cargs.args.add.nameLen);
            }
            if (res != NULL) {
                Memory_free(NULL, res, sizeof(NameServerDrv_Res));
            }
        }
    }
    break;

    case CMD_NAMESERVER_ADDUINT32: {
        NameServerDrv_Res * res = NULL;

        /* allocate resource tracker object */
        res = Memory_alloc(NULL, sizeof(NameServerDrv_Res), 0, NULL);

        if (res == NULL) {
            status = NameServer_E_MEMORY;
            GT_setFailureReason(curTrace, GT_4CLASS,
                                "NameServerDrv_ioctl", status, "out of memory");
        }

        /* allocate memory for the name */
        if (status == NameServer_S_SUCCESS) {
            res->args.add.len = cargs.args.addUInt32.nameLen;
            res->args.add.name = Memory_alloc(NULL,
                                              cargs.args.addUInt32.nameLen, 0, NULL);

            if (res->args.add.name == NULL) {
                status = NameServer_E_MEMORY;
                GT_setFailureReason(curTrace, GT_4CLASS,
                                    "NameServerDrv_ioctl", status, "out of memory");
            }
        }

        /* copy the name from user memory */
        if (status == NameServer_S_SUCCESS) {
            status = copy_from_user(res->args.add.name,
                                    cargs.args.addUInt32.name,
                                    cargs.args.addUInt32.nameLen);

            if (status != 0) {
                GT_setFailureReason(curTrace, GT_4CLASS,
                                    "NameServerDrv_ioctl", status,
                                    "copy_from_user failed");
                status = NameServer_E_OSFAILURE;
                osStatus = -EFAULT;
            }
        }

        /* invoke the module api */
        if (status == NameServer_S_SUCCESS) {
            cargs.args.addUInt32.entry = NameServer_addUInt32(
                                             cargs.args.addUInt32.handle, res->args.add.name,
                                             cargs.args.addUInt32.value);

            if (cargs.args.addUInt32.entry == NULL) {
                status = NameServer_E_FAIL;
                GT_setFailureReason(curTrace, GT_4CLASS,
                                    "NameServerDrv_ioctl", status,
                                    "NameServer_addUInt32 failed");
            }
        }

        /* track the resource by process id */
        if (status == NameServer_S_SUCCESS) {
            Int rstat;

            res->cmd = CMD_NAMESERVER_ADD; /* use this command for both */
            res->args.add.handle = cargs.args.addUInt32.handle;
            res->args.add.entry = cargs.args.addUInt32.entry;

            rstat = ResTrack_push(NameServerDrv_state.resTrack, pid,
                                  (List_Elem *)res);

            GT_assert(curTrace, (rstat >= 0));
        }

        /* failure cleanup */
        if (status < 0) {
            if ((res != NULL) && (res->args.add.name != NULL)) {
                Memory_free(NULL, res->args.add.name,
                            cargs.args.addUInt32.nameLen);
            }
            if (res != NULL) {
                Memory_free(NULL, res, sizeof(NameServerDrv_Res));
            }
        }
    }
    break;

    case CMD_NAMESERVER_GET:
    {
        String   name;
        Ptr      value;
        UInt16 * procId = NULL;

        /* Allocate memory for the name */
        name = Memory_alloc (NULL, cargs.args.get.nameLen, 0, NULL);
        GT_assert (curTrace, (name != NULL));
        /* Copy the name */
        ret = copy_from_user (name,
                              cargs.args.get.name,
                              cargs.args.get.nameLen);
        GT_assert (curTrace, (ret == 0));

        /* Allocate memory for the buf */
        value = Memory_alloc (NULL, cargs.args.get.len, 0, NULL);
        GT_assert (curTrace, (value != NULL));

        /* Allocate memory for the procId */
        if (cargs.args.get.procLen > 0) {
            procId = Memory_alloc (NULL,
                                   cargs.args.get.procLen * sizeof(UInt16),
                                   0,
                                   NULL);
            GT_assert (curTrace, (procId != NULL));
        }
        /* Copy the procIds */
        ret = copy_from_user (procId,
                              cargs.args.get.procId,
                              cargs.args.get.procLen);
        GT_assert (curTrace, (ret == 0));

        status = NameServer_get (cargs.args.get.handle,
                                 name,
                                 value,
                                 &cargs.args.get.len,
                                 procId);
        /* Do not assert. This can return NameServer_E_NOTFOUND
         * as a valid runtime failure.
         */

        /* Copy the value */
        ret = copy_to_user (cargs.args.get.value,
                            value,
                            cargs.args.get.len);
        GT_assert (curTrace, (ret == 0));

        /* free the allocated memory */
        Memory_free (NULL, name, cargs.args.get.nameLen);
        Memory_free (NULL, value, cargs.args.get.len);
        if (procId != NULL) {
            Memory_free (NULL,
                         procId,
                         cargs.args.get.procLen *sizeof(UInt16));
        }

    }
    break;

    case CMD_NAMESERVER_GETLOCAL:
    {
        String   name;
        Ptr      value;

        /* Allocate memory for the name */
        name = Memory_alloc (NULL, cargs.args.getLocal.nameLen, 0, NULL);
        GT_assert (curTrace, (name != NULL));
        /* Copy the name */
        ret = copy_from_user (name,
                              cargs.args.getLocal.name,
                              cargs.args.getLocal.nameLen);
        GT_assert (curTrace, (ret == 0));

        /* Allocate memory for the buf */
        value = Memory_alloc (NULL, cargs.args.getLocal.len, 0, NULL);
        GT_assert (curTrace, (value != NULL));

        status = NameServer_getLocal (cargs.args.getLocal.handle,
                                      name,
                                      value,
                                      &cargs.args.getLocal.len);
        GT_assert (curTrace, (status >= 0));

        /* Copy the value */
        ret = copy_to_user (cargs.args.getLocal.value,
                            value,
                            cargs.args.getLocal.len);
        GT_assert (curTrace, (ret == 0));

        /* free the allocated memory */
        Memory_free (NULL, name, cargs.args.getLocal.nameLen);
        Memory_free (NULL, value,  cargs.args.getLocal.len);
    }
    break;

    case CMD_NAMESERVER_MATCH:
    {
        String name;

        /* Allocate memory for the name */
        name = Memory_alloc (NULL, cargs.args.match.nameLen, 0, NULL);
        GT_assert (curTrace, (name != NULL));
        /* Copy the name */
        ret = copy_from_user (name,
                              cargs.args.match.name,
                              cargs.args.match.nameLen);
        GT_assert (curTrace, (ret == 0));

        cargs.args.match.count = NameServer_match (
                                     cargs.args.match.handle,
                                     name,
                                     &cargs.args.match.value);
        GT_assert (curTrace, (cargs.args.match.count >= 0));

        /* free the allocated memory */
        Memory_free (NULL, name, cargs.args.match.nameLen);
    }
    break;

    case CMD_NAMESERVER_REMOVE: {
        NameServerDrv_Res   res;
        List_Elem *         elem;

        /* save for resource untracking */
        res.cmd = CMD_NAMESERVER_ADD;
        res.args.add.entry = NULL;

        /* allocate memory for the name */
        res.args.add.len = cargs.args.remove.nameLen;
        res.args.add.name = Memory_alloc(NULL,
                                         cargs.args.remove.nameLen, 0, NULL);

        if (res.args.add.name == NULL) {
            status = NameServer_E_MEMORY;
            GT_setFailureReason(curTrace, GT_4CLASS,
                                "NameServerDrv_ioctl", status, "out of memory");
        }

        /* copy the name from user memory */
        if (status == NameServer_S_SUCCESS) {
            status = copy_from_user(res.args.add.name,
                                    cargs.args.remove.name, cargs.args.remove.nameLen);

            if (status != 0) {
                GT_setFailureReason(curTrace, GT_4CLASS,
                                    "NameServerDrv_ioctl", status,
                                    "copy_from_user failed");
                status = NameServer_E_OSFAILURE;
                osStatus = -EFAULT;
            }
        }

        /* invoke the module api */
        if (status == NameServer_S_SUCCESS) {
            status = NameServer_remove(cargs.args.remove.handle,
                                       res.args.add.name);

            if (status < 0) {
                GT_setFailureReason(curTrace, GT_4CLASS,
                                    "NameServerDrv_ioctl", status,
                                    "NameServer_remove failed");
            }
        }

        /* untrack the resource */
        if (status == NameServer_S_SUCCESS) {
            NameServerDrv_Res * resPtr;

            ResTrack_remove(NameServerDrv_state.resTrack, pid,
                            (List_Elem *)(&res), NameServerDrv_resCmpFxn, &elem);

            GT_assert(curTrace, (elem != NULL));

            resPtr = (NameServerDrv_Res *)elem;
            Memory_free(NULL, resPtr->args.add.name, resPtr->args.add.len);
            Memory_free(NULL, elem, sizeof(NameServerDrv_Res));
        }

        /* don't need the name memory anymore */
        if (res.args.add.name != NULL) {
            Memory_free(NULL, res.args.add.name,
                        cargs.args.remove.nameLen);
        }
    }
    break;

    case CMD_NAMESERVER_REMOVEENTRY: {
        NameServerDrv_Res   res;
        List_Elem *         elem;

        /* save for resource untracking */
        res.cmd = CMD_NAMESERVER_ADD;
        res.args.add.name = NULL;
        res.args.add.entry = cargs.args.removeEntry.entry;

        /* invoke the module api */
        status = NameServer_removeEntry(cargs.args.removeEntry.handle,
                                        cargs.args.removeEntry.entry);

        if (status < 0) {
            GT_setFailureReason(curTrace, GT_4CLASS, "NameServerDrv_ioctl",
                                status, "NameServer_remove failed");
        }

        /* untrack the resource */
        if (status == NameServer_S_SUCCESS) {
            NameServerDrv_Res * resPtr;

            ResTrack_remove(NameServerDrv_state.resTrack, pid,
                            (List_Elem *)(&res), NameServerDrv_resCmpFxn, &elem);

            GT_assert(curTrace, (elem != NULL));

            resPtr = (NameServerDrv_Res *)elem;
            Memory_free(NULL, resPtr->args.add.name, resPtr->args.add.len);
            Memory_free(NULL, elem, sizeof(NameServerDrv_Res));
        }
    }
    break;

    case CMD_NAMESERVER_PARAMS_INIT:
    {
        NameServer_Params params;

        NameServer_Params_init (&params);
        ret = copy_to_user (cargs.args.ParamsInit.params,
                            &params,
                            sizeof (NameServer_Params));
        GT_assert (curTrace, (ret == 0));
    }
    break;

    case CMD_NAMESERVER_CREATE: {
        NameServer_Params   params;
        String              name = NULL;
        NameServerDrv_Res * res = NULL;

        /* allocate memory for the name */
        name = Memory_alloc(NULL, cargs.args.create.nameLen, 0, NULL);

        if (name == NULL) {
            status = NameServer_E_MEMORY;
            GT_setFailureReason(curTrace, GT_4CLASS,
                                "NameServerDrv_ioctl", status, "out of memory");
        }

        /* copy the name from user memory */
        if (status == NameServer_S_SUCCESS) {
            status = copy_from_user(name, cargs.args.create.name,
                                    cargs.args.create.nameLen);

            if (status != 0) {
                GT_setFailureReason(curTrace, GT_4CLASS,
                                    "NameServerDrv_ioctl", status,
                                    "copy_from_user failed");
                status = NameServer_E_OSFAILURE;
                osStatus = -EFAULT;
            }
        }

        /* copy the params from user memory */
        if (status == NameServer_S_SUCCESS) {
            status = copy_from_user(&params, cargs.args.create.params,
                                    sizeof(NameServer_Params));

            if (status != 0) {
                GT_setFailureReason(curTrace, GT_4CLASS,
                                    "NameServerDrv_ioctl", status,
                                    "copy_from_user failed");
                status = NameServer_E_OSFAILURE;
                osStatus = -EFAULT;
            }
        }

        /* allocate resource tracker object */
        if (status == NameServer_S_SUCCESS) {
            res = Memory_alloc(NULL, sizeof(NameServerDrv_Res), 0, NULL);

            if (res == NULL) {
                status = NameServer_E_MEMORY;
                GT_setFailureReason(curTrace, GT_4CLASS,
                                    "NameServerDrv_ioctl", status, "out of memory");
            }
        }

        /* invoke the module api */
        if (status == NameServer_S_SUCCESS) {
            cargs.args.create.handle = NameServer_create(name, &params);

            if (cargs.args.create.handle == NULL) {
                status = NameServer_E_FAIL;
                GT_setFailureReason(curTrace, GT_4CLASS,
                                    "NameServerDrv_ioctl", status,
                                    "NameServer_create failed");
            }
        }

        /* track the resource by process id */
        if (status == NameServer_S_SUCCESS) {
            Int rstat;

            res->cmd = cmd;
            res->args.create.handle = cargs.args.create.handle;

            rstat = ResTrack_push(NameServerDrv_state.resTrack, pid,
                                  (List_Elem *)res);

            GT_assert(curTrace, (rstat >= 0));
        }

        /* don't need name memory anymore */
        if (name != NULL) {
            Memory_free(NULL, name, cargs.args.create.nameLen);
        }

        /* failure cleanup */
        if (status < 0) {
            if (res != NULL) {
                Memory_free(NULL, res, sizeof(NameServerDrv_Res));
            }
        }
    }
    break;

    case CMD_NAMESERVER_DELETE: {
        NameServerDrv_Res   res;
        List_Elem *         elem;

        /* save for resource untracking, handle set to null by delete */
        res.cmd = CMD_NAMESERVER_CREATE;
        res.args.create.handle = cargs.args.delete.handle;

        /* common code for delete command */
        status = NameServerDrv_cmd_delete(&cargs.args.delete.handle);

        /* untrack the resource */
        if (status == NameServer_S_SUCCESS) {
            ResTrack_remove(NameServerDrv_state.resTrack, pid,
                            (List_Elem *)(&res), NameServerDrv_resCmpFxn, &elem);

            GT_assert(curTrace, (elem != NULL));
            Memory_free(NULL, elem, sizeof(NameServerDrv_Res));
        }
    }
    break;

    case CMD_NAMESERVER_GETHANDLE:
    {
        String   name;

        /* Allocate memory for the name */
        name = Memory_alloc (NULL, cargs.args.getHandle.nameLen, 0, NULL);
        GT_assert (curTrace, (name != NULL));
        /* Copy the name */
        ret = copy_from_user (name,
                              cargs.args.getHandle.name,
                              cargs.args.getHandle.nameLen);
        GT_assert (curTrace, (ret == 0));

        cargs.args.getHandle.handle = NameServer_getHandle (name);

        /* free the allocated memory */
        Memory_free (NULL, name, cargs.args.getHandle.nameLen);
    }
    break;

    case CMD_NAMESERVER_SETUP: {
        /* register process with resource tracker */
        status = ResTrack_register(NameServerDrv_state.resTrack, pid);

        if (status < 0) {
            GT_setFailureReason(curTrace, GT_4CLASS, "NameServerDrv_ioctl",
                                status, "resource tracker register failed");
            status = NameServer_E_FAIL;
            pid = 0;
        }

        /* setup the module */
        if (status == NameServer_S_SUCCESS) {
            status = NameServer_setup();

            if (status < 0) {
                GT_setFailureReason(curTrace, GT_4CLASS,
                                    "NameServerDrv_ioctl", status,
                                    "kernel-side MessageQ_setup failed");
            }
        }

        /* failure case */
        if (status < 0) {
            if (pid != 0) {
                ResTrack_unregister(NameServerDrv_state.resTrack, pid);
            }
        }
    }
    break;

    case CMD_NAMESERVER_DESTROY: {
        /* unregister process from resource tracker */
        status = ResTrack_unregister(NameServerDrv_state.resTrack, pid);
        GT_assert(curTrace, (status >= 0));

        /* finalize the module */
        status = NameServer_destroy();
        GT_assert(curTrace, (status >= 0));
    }
    break;

    default:
    {
        /* This does not impact return status of this function, so retVal
         * comment is not used.
         */
        status = NameServer_E_INVALIDARG;
        GT_setFailureReason (curTrace,
                             GT_4CLASS,
                             "NameServerDrv_ioctl",
                             status,
                             "Unsupported ioctl command specified");
    }
    break;
    }

    cargs.apiStatus = status;

    /* copy the full args back to user space */
    ret = copy_to_user((Ptr)args, &cargs, sizeof(NameServerDrv_CmdArgs));
    GT_assert (curTrace, (ret == 0));

    GT_1trace (curTrace, GT_LEAVE, "NameServerDrv_ioctl", osStatus);

    /*! @retval 0 Operation successfully completed. */
    return osStatus;
}
Exemplo n.º 15
0
static void
_sis1100_irq_thread(struct sis1100_softc* sc, enum handlercomm command)
{
    u_int32_t new_irqs=0;
    int i;

    mutex_lock(&sc->sem_irqinfo);

    if (command&handlercomm_doorbell) {
        DECLARE_SPINLOCKFLAGS(flags)
        u_int32_t doorbell;

        SPIN_LOCK_IRQSAVE(sc->lock_doorbell, flags);
        doorbell=sc->doorbell;
        sc->doorbell=0;
        SPIN_UNLOCK_IRQRESTORE(sc->lock_doorbell, flags);

        switch (sc->remote_hw) {
        case sis1100_hw_vme:
            new_irqs|=sis3100rem_irq_handler(sc, doorbell);
            break;
        case sis1100_hw_camac:
            new_irqs|=sis5100rem_irq_handler(sc, doorbell);
            break;
        case sis1100_hw_pci:
            /* do nothing */
            break;
        case sis1100_hw_lvd:
            new_irqs|=zellvd_rem_irq_handler(sc, doorbell);
            break;
        case sis1100_hw_pandapixel:
            new_irqs|=pandapixel_rem_irq_handler(sc, doorbell);
            break;
        case sis1100_hw_psf4ad:
            /* do nothing */
        case sis1100_hw_invalid:
            /* do nothing */
            break;
        }
    }

    if (command&handlercomm_lemo) {
        new_irqs|=sis1100_lemo_handler(sc);
    }

    if (command&handlercomm_mbx0) {
        new_irqs|=sis1100_mbox0_handler(sc);
    }

    /* this is called from sis1100_link_up_handler for both
       'UP' and 'DOWN' of link one second after status change */
    if (command&handlercomm_up) {
        new_irqs|=sis1100_synch_handler(sc);
    }

    if (command&handlercomm_ddma) {
        new_irqs|=sis1100_ddma_handler(sc);
    }

    sc->pending_irqs|=new_irqs;

    mutex_unlock(&sc->sem_irqinfo);

    /* inform processes via signal if requested */
    mutex_lock(&sc->sem_fdata);
    for (i=0; i<sis1100_MINORUTMASK+1; i++) {
        if (sc->fdata[i]) {
            struct sis1100_fdata* fd=sc->fdata[i];
            if (fd->sig>0 && ((new_irqs & fd->owned_irqs)||
                              (fd->old_remote_hw!=sc->remote_hw))) {
                int res;
                /* XXXY muss raus */
                pERROR(sc, "irq_pending=%d pending_irqs=0x%x",
                        irq_pending(sc, fd, fd->owned_irqs),
                        sc->pending_irqs);
                pERROR(sc, "sig=%d new_irqs=0x%x owned_irqs=0x%x",
                        fd->sig, new_irqs, fd->owned_irqs);
                pERROR(sc, "old_remote_hw=%d remote_hw=%d",
                        fd->old_remote_hw, sc->remote_hw);
                /* XXXY muss raus */
                pERROR(sc, "send sig to %d", pid_nr(fd->pid));
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)
                res=kill_proc(fd->pid, fd->sig, 1);
#else
                res=kill_pid(fd->pid, fd->sig, 1);
#endif
                if (res)
                    pINFO(sc, "send sig %d to %u: res=%d",
                            fd->sig, pid_nr(fd->pid), res);
            }
        }
    }
    mutex_unlock(&sc->sem_fdata);

    /* wake up processes waiting in sis1100_irq_wait or doing select */
#ifdef __NetBSD__
    wakeup(&sc->remoteirq_wait);
    selwakeup(&sc->sel);
#elif __linux__
    wake_up_interruptible(&sc->remoteirq_wait);
#endif
}
Exemplo n.º 16
0
int cpt_dump_tty(cpt_object_t *obj, struct cpt_context *ctx)
{
	struct tty_struct *tty = obj->o_obj;
	struct cpt_tty_image *v;

	if (tty->link) {
		if (lookup_cpt_object(CPT_OBJ_TTY, tty->link, ctx) == NULL) {
			eprintk_ctx("orphan pty %s %d\n", tty->name, tty->driver->subtype == PTY_TYPE_SLAVE);
			return -EINVAL;
		}
		if (tty->link->link != tty) {
			eprintk_ctx("bad pty pair\n");
			return -EINVAL;
		}
		if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
		    tty->driver->subtype == PTY_TYPE_SLAVE &&
		    tty->link->count)
			obj->o_count++;
	}
	if (obj->o_count != tty->count) {
		eprintk_ctx("tty %s is referenced outside %d %d\n", tty->name, obj->o_count, tty->count);
		return -EBUSY;
	}

	cpt_open_object(obj, ctx);

	v = cpt_get_buf(ctx);
	v->cpt_next = -1;
	v->cpt_object = CPT_OBJ_TTY;
	v->cpt_hdrlen = sizeof(*v);
	v->cpt_content = CPT_CONTENT_ARRAY;

	v->cpt_index = tty->index;
	v->cpt_link = -1;
	if (tty->link)
		v->cpt_link = tty->link->index;
	v->cpt_drv_type = tty->driver->type;
	v->cpt_drv_subtype = tty->driver->subtype;
	v->cpt_drv_flags = tty->driver->flags;
	v->cpt_packet = tty->packet;
	v->cpt_stopped = tty->stopped;
	v->cpt_hw_stopped = tty->hw_stopped;
	v->cpt_flow_stopped = tty->flow_stopped;
	v->cpt_flags = tty->flags;
	v->cpt_ctrl_status = tty->ctrl_status;
	v->cpt_canon_data = tty->canon_data;
	v->cpt_canon_head = tty->canon_head - tty->read_tail;
	v->cpt_canon_column = tty->canon_column;
	v->cpt_column = tty->column;
	v->cpt_erasing = tty->erasing;
	v->cpt_lnext = tty->lnext;
	v->cpt_icanon = tty->icanon;
	v->cpt_raw = tty->raw;
	v->cpt_real_raw = tty->real_raw;
	v->cpt_closing = tty->closing;
	v->cpt_minimum_to_wake = tty->minimum_to_wake;
	v->cpt_pgrp = 0;
	if (tty->pgrp) {
		v->cpt_pgrp = pid_vnr(tty->pgrp);
		if ((int)v->cpt_pgrp < 0) {
			dprintk_ctx("cannot map tty->pgrp %d -> %d\n", pid_vnr(tty->pgrp), (int)v->cpt_pgrp);
			v->cpt_pgrp = -1;
		}
	}
	v->cpt_session = 0;
	if (tty->session) {
		v->cpt_session = pid_vnr(tty->session);
		if ((int)v->cpt_session < 0) {
			eprintk_ctx("cannot map tty->session %d -> %d\n", pid_nr(tty->session), (int)v->cpt_session);
			cpt_release_buf(ctx);
			return -EINVAL;
		}
	}
	memcpy(v->cpt_name, tty->name, 64);
	v->cpt_ws_row = tty->winsize.ws_row;
	v->cpt_ws_col = tty->winsize.ws_col;
	v->cpt_ws_prow = tty->winsize.ws_ypixel;
	v->cpt_ws_pcol = tty->winsize.ws_xpixel;
	if (tty->termios == NULL) {
		eprintk_ctx("NULL termios");
		cpt_release_buf(ctx);
		return -EINVAL;
	}
	v->cpt_c_line = tty->termios->c_line;
	v->cpt_c_iflag = tty->termios->c_iflag;
	v->cpt_c_oflag = tty->termios->c_oflag;
	v->cpt_c_cflag = tty->termios->c_cflag;
	v->cpt_c_lflag = tty->termios->c_lflag;
	memcpy(v->cpt_c_cc, tty->termios->c_cc, NCCS);
	if (NCCS < 32)
		memset(v->cpt_c_cc + NCCS, 255, 32 - NCCS);
	memcpy(v->cpt_read_flags, tty->read_flags, sizeof(v->cpt_read_flags));

	ctx->write(v, sizeof(*v), ctx);
	cpt_release_buf(ctx);

	if (tty->read_buf && tty->read_cnt) {
		struct cpt_obj_bits *v = cpt_get_buf(ctx);
		loff_t saved_pos;

		cpt_push_object(&saved_pos, ctx);
		cpt_open_object(NULL, ctx);
		v->cpt_next = CPT_NULL;
		v->cpt_object = CPT_OBJ_BITS;
		v->cpt_hdrlen = sizeof(*v);
		v->cpt_content = CPT_CONTENT_DATA;
		v->cpt_size = tty->read_cnt;
		ctx->write(v, sizeof(*v), ctx);
		cpt_release_buf(ctx);

		if (tty->read_cnt) {
			int n = min(tty->read_cnt, N_TTY_BUF_SIZE - tty->read_tail);
			ctx->write(tty->read_buf + tty->read_tail, n, ctx);
			if (tty->read_cnt > n)
				ctx->write(tty->read_buf, tty->read_cnt-n, ctx);
			ctx->align(ctx);
		}

		cpt_close_object(ctx);
		cpt_pop_object(&saved_pos, ctx);
	}

	cpt_close_object(ctx);

	return 0;
}
Exemplo n.º 17
0
static struct task_struct *copy_process(unsigned long clone_flags,
					unsigned long stack_start,
					struct pt_regs *regs,
					unsigned long stack_size,
					int __user *child_tidptr,
					struct pid *pid,
					int trace)
{
	int retval;
	struct task_struct *p;
	int cgroup_callbacks_done = 0;

	if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
		return ERR_PTR(-EINVAL);

	if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
		return ERR_PTR(-EINVAL);

	if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
		return ERR_PTR(-EINVAL);

	if ((clone_flags & CLONE_PARENT) &&
				current->signal->flags & SIGNAL_UNKILLABLE)
		return ERR_PTR(-EINVAL);

	retval = security_task_create(clone_flags);
	if (retval)
		goto fork_out;

	retval = -ENOMEM;
	p = dup_task_struct(current);
	if (!p)
		goto fork_out;

	ftrace_graph_init_task(p);

	rt_mutex_init_task(p);

#ifdef CONFIG_PROVE_LOCKING
	DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
	DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
#endif
	retval = -EAGAIN;
	if (atomic_read(&p->real_cred->user->processes) >=
			task_rlimit(p, RLIMIT_NPROC)) {
		if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
		    p->real_cred->user != INIT_USER)
			goto bad_fork_free;
	}
	current->flags &= ~PF_NPROC_EXCEEDED;

	retval = copy_creds(p, clone_flags);
	if (retval < 0)
		goto bad_fork_free;

	retval = -EAGAIN;
	if (nr_threads >= max_threads)
		goto bad_fork_cleanup_count;

	if (!try_module_get(task_thread_info(p)->exec_domain->module))
		goto bad_fork_cleanup_count;

	p->did_exec = 0;
	delayacct_tsk_init(p);	
	copy_flags(clone_flags, p);
	INIT_LIST_HEAD(&p->children);
	INIT_LIST_HEAD(&p->sibling);
	rcu_copy_process(p);
	p->vfork_done = NULL;
	spin_lock_init(&p->alloc_lock);

	init_sigpending(&p->pending);

	p->utime = p->stime = p->gtime = 0;
	p->utimescaled = p->stimescaled = 0;
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
	p->prev_utime = p->prev_stime = 0;
#endif
#if defined(SPLIT_RSS_COUNTING)
	memset(&p->rss_stat, 0, sizeof(p->rss_stat));
#endif

	p->default_timer_slack_ns = current->timer_slack_ns;

	task_io_accounting_init(&p->ioac);
	acct_clear_integrals(p);

	posix_cpu_timers_init(p);

	do_posix_clock_monotonic_gettime(&p->start_time);
	p->real_start_time = p->start_time;
	monotonic_to_bootbased(&p->real_start_time);
	p->io_context = NULL;
	p->audit_context = NULL;
	if (clone_flags & CLONE_THREAD)
		threadgroup_change_begin(current);
	cgroup_fork(p);
#ifdef CONFIG_NUMA
	p->mempolicy = mpol_dup(p->mempolicy);
	if (IS_ERR(p->mempolicy)) {
		retval = PTR_ERR(p->mempolicy);
		p->mempolicy = NULL;
		goto bad_fork_cleanup_cgroup;
	}
	mpol_fix_fork_child_flag(p);
#endif
#ifdef CONFIG_CPUSETS
	p->cpuset_mem_spread_rotor = NUMA_NO_NODE;
	p->cpuset_slab_spread_rotor = NUMA_NO_NODE;
	seqcount_init(&p->mems_allowed_seq);
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
	p->irq_events = 0;
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
	p->hardirqs_enabled = 1;
#else
	p->hardirqs_enabled = 0;
#endif
	p->hardirq_enable_ip = 0;
	p->hardirq_enable_event = 0;
	p->hardirq_disable_ip = _THIS_IP_;
	p->hardirq_disable_event = 0;
	p->softirqs_enabled = 1;
	p->softirq_enable_ip = _THIS_IP_;
	p->softirq_enable_event = 0;
	p->softirq_disable_ip = 0;
	p->softirq_disable_event = 0;
	p->hardirq_context = 0;
	p->softirq_context = 0;
#endif
#ifdef CONFIG_LOCKDEP
	p->lockdep_depth = 0; 
	p->curr_chain_key = 0;
	p->lockdep_recursion = 0;
#endif

#ifdef CONFIG_DEBUG_MUTEXES
	
	p->blocked_on = NULL;
	p->blocked_by = NULL;
	p->blocked_since = 0;
#endif
#ifdef CONFIG_CGROUP_MEM_RES_CTLR
	p->memcg_batch.do_batch = 0;
	p->memcg_batch.memcg = NULL;
#endif

	
	sched_fork(p);

	retval = perf_event_init_task(p);
	if (retval)
		goto bad_fork_cleanup_policy;
	retval = audit_alloc(p);
	if (retval)
		goto bad_fork_cleanup_policy;
	
	retval = copy_semundo(clone_flags, p);
	if (retval)
		goto bad_fork_cleanup_audit;
	retval = copy_files(clone_flags, p);
	if (retval)
		goto bad_fork_cleanup_semundo;
	retval = copy_fs(clone_flags, p);
	if (retval)
		goto bad_fork_cleanup_files;
	retval = copy_sighand(clone_flags, p);
	if (retval)
		goto bad_fork_cleanup_fs;
	retval = copy_signal(clone_flags, p);
	if (retval)
		goto bad_fork_cleanup_sighand;
	retval = copy_mm(clone_flags, p);
	if (retval)
		goto bad_fork_cleanup_signal;
	retval = copy_namespaces(clone_flags, p);
	if (retval)
		goto bad_fork_cleanup_mm;
	retval = copy_io(clone_flags, p);
	if (retval)
		goto bad_fork_cleanup_namespaces;
	retval = copy_thread(clone_flags, stack_start, stack_size, p, regs);
	if (retval)
		goto bad_fork_cleanup_io;

	if (pid != &init_struct_pid) {
		retval = -ENOMEM;
		pid = alloc_pid(p->nsproxy->pid_ns);
		if (!pid)
			goto bad_fork_cleanup_io;
	}

	p->pid = pid_nr(pid);
	p->tgid = p->pid;
	if (clone_flags & CLONE_THREAD)
		p->tgid = current->tgid;

	p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
	p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL;
#ifdef CONFIG_BLOCK
	p->plug = NULL;
#endif
#ifdef CONFIG_FUTEX
	p->robust_list = NULL;
#ifdef CONFIG_COMPAT
	p->compat_robust_list = NULL;
#endif
	INIT_LIST_HEAD(&p->pi_state_list);
	p->pi_state_cache = NULL;
#endif
	if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
		p->sas_ss_sp = p->sas_ss_size = 0;

	user_disable_single_step(p);
	clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
#ifdef TIF_SYSCALL_EMU
	clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
#endif
	clear_all_latency_tracing(p);

	
	if (clone_flags & CLONE_THREAD)
		p->exit_signal = -1;
	else if (clone_flags & CLONE_PARENT)
		p->exit_signal = current->group_leader->exit_signal;
	else
		p->exit_signal = (clone_flags & CSIGNAL);

	p->pdeath_signal = 0;
	p->exit_state = 0;

	p->nr_dirtied = 0;
	p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10);
	p->dirty_paused_when = 0;

	p->group_leader = p;
	INIT_LIST_HEAD(&p->thread_group);

	cgroup_fork_callbacks(p);
	cgroup_callbacks_done = 1;

	
	write_lock_irq(&tasklist_lock);

	
	if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
		p->real_parent = current->real_parent;
		p->parent_exec_id = current->parent_exec_id;
	} else {
		p->real_parent = current;
		p->parent_exec_id = current->self_exec_id;
	}

	spin_lock(&current->sighand->siglock);

	recalc_sigpending();
	if (signal_pending(current)) {
		spin_unlock(&current->sighand->siglock);
		write_unlock_irq(&tasklist_lock);
		retval = -ERESTARTNOINTR;
		goto bad_fork_free_pid;
	}

	if (clone_flags & CLONE_THREAD) {
		current->signal->nr_threads++;
		atomic_inc(&current->signal->live);
		atomic_inc(&current->signal->sigcnt);
		p->group_leader = current->group_leader;
		list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
	}

	if (likely(p->pid)) {
		ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);

		if (thread_group_leader(p)) {
			if (is_child_reaper(pid))
				p->nsproxy->pid_ns->child_reaper = p;

			p->signal->leader_pid = pid;
			p->signal->tty = tty_kref_get(current->signal->tty);
			attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
			attach_pid(p, PIDTYPE_SID, task_session(current));
			list_add_tail(&p->sibling, &p->real_parent->children);
			list_add_tail_rcu(&p->tasks, &init_task.tasks);
			__this_cpu_inc(process_counts);
		}
		attach_pid(p, PIDTYPE_PID, pid);
		nr_threads++;
	}

	total_forks++;
	spin_unlock(&current->sighand->siglock);
	write_unlock_irq(&tasklist_lock);
	proc_fork_connector(p);
	cgroup_post_fork(p);
	if (clone_flags & CLONE_THREAD)
		threadgroup_change_end(current);
	perf_event_fork(p);

	trace_task_newtask(p, clone_flags);

	return p;

bad_fork_free_pid:
	if (pid != &init_struct_pid)
		free_pid(pid);
bad_fork_cleanup_io:
	if (p->io_context)
		exit_io_context(p);
bad_fork_cleanup_namespaces:
	if (unlikely(clone_flags & CLONE_NEWPID))
		pid_ns_release_proc(p->nsproxy->pid_ns);
	exit_task_namespaces(p);
bad_fork_cleanup_mm:
	if (p->mm)
		mmput(p->mm);
bad_fork_cleanup_signal:
	if (!(clone_flags & CLONE_THREAD))
		free_signal_struct(p->signal);
bad_fork_cleanup_sighand:
	__cleanup_sighand(p->sighand);
bad_fork_cleanup_fs:
	exit_fs(p); 
bad_fork_cleanup_files:
	exit_files(p); 
bad_fork_cleanup_semundo:
	exit_sem(p);
bad_fork_cleanup_audit:
	audit_free(p);
bad_fork_cleanup_policy:
	perf_event_free_task(p);
#ifdef CONFIG_NUMA
	mpol_put(p->mempolicy);
bad_fork_cleanup_cgroup:
#endif
	if (clone_flags & CLONE_THREAD)
		threadgroup_change_end(current);
	cgroup_exit(p, cgroup_callbacks_done);
	delayacct_tsk_free(p);
	module_put(task_thread_info(p)->exec_domain->module);
bad_fork_cleanup_count:
	atomic_dec(&p->cred->user->processes);
	exit_creds(p);
bad_fork_free:
	free_task(p);
fork_out:
	return ERR_PTR(retval);
}
Exemplo n.º 18
0
pid_t pid_vnr(struct pid *pid)
{
	return pid_nr(pid);
}
Exemplo n.º 19
0
int autofs_fill_super(struct super_block *s, void *data, int silent)
{
	struct inode *root_inode;
	struct dentry *root;
	struct file *pipe;
	struct autofs_sb_info *sbi;
	struct autofs_info *ino;
	int pgrp = 0;
	bool pgrp_set = false;
	int ret = -EINVAL;

	sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
	if (!sbi)
		return -ENOMEM;
	pr_debug("starting up, sbi = %p\n", sbi);

	s->s_fs_info = sbi;
	sbi->magic = AUTOFS_SBI_MAGIC;
	sbi->pipefd = -1;
	sbi->pipe = NULL;
	sbi->exp_timeout = 0;
	sbi->oz_pgrp = NULL;
	sbi->sb = s;
	sbi->version = 0;
	sbi->sub_version = 0;
	sbi->flags = AUTOFS_SBI_CATATONIC;
	set_autofs_type_indirect(&sbi->type);
	sbi->min_proto = 0;
	sbi->max_proto = 0;
	mutex_init(&sbi->wq_mutex);
	mutex_init(&sbi->pipe_mutex);
	spin_lock_init(&sbi->fs_lock);
	sbi->queues = NULL;
	spin_lock_init(&sbi->lookup_lock);
	INIT_LIST_HEAD(&sbi->active_list);
	INIT_LIST_HEAD(&sbi->expiring_list);
	s->s_blocksize = 1024;
	s->s_blocksize_bits = 10;
	s->s_magic = AUTOFS_SUPER_MAGIC;
	s->s_op = &autofs_sops;
	s->s_d_op = &autofs_dentry_operations;
	s->s_time_gran = 1;

	/*
	 * Get the root inode and dentry, but defer checking for errors.
	 */
	ino = autofs_new_ino(sbi);
	if (!ino) {
		ret = -ENOMEM;
		goto fail_free;
	}
	root_inode = autofs_get_inode(s, S_IFDIR | 0755);
	root = d_make_root(root_inode);
	if (!root)
		goto fail_ino;
	pipe = NULL;

	root->d_fsdata = ino;

	/* Can this call block? */
	if (parse_options(data, root_inode, &pgrp, &pgrp_set, sbi)) {
		pr_err("called with bogus options\n");
		goto fail_dput;
	}

	/* Test versions first */
	if (sbi->max_proto < AUTOFS_MIN_PROTO_VERSION ||
	    sbi->min_proto > AUTOFS_MAX_PROTO_VERSION) {
		pr_err("kernel does not match daemon version "
		       "daemon (%d, %d) kernel (%d, %d)\n",
		       sbi->min_proto, sbi->max_proto,
		       AUTOFS_MIN_PROTO_VERSION, AUTOFS_MAX_PROTO_VERSION);
		goto fail_dput;
	}

	/* Establish highest kernel protocol version */
	if (sbi->max_proto > AUTOFS_MAX_PROTO_VERSION)
		sbi->version = AUTOFS_MAX_PROTO_VERSION;
	else
		sbi->version = sbi->max_proto;
	sbi->sub_version = AUTOFS_PROTO_SUBVERSION;

	if (pgrp_set) {
		sbi->oz_pgrp = find_get_pid(pgrp);
		if (!sbi->oz_pgrp) {
			pr_err("could not find process group %d\n",
				pgrp);
			goto fail_dput;
		}
	} else {
		sbi->oz_pgrp = get_task_pid(current, PIDTYPE_PGID);
	}

	if (autofs_type_trigger(sbi->type))
		__managed_dentry_set_managed(root);

	root_inode->i_fop = &autofs_root_operations;
	root_inode->i_op = &autofs_dir_inode_operations;

	pr_debug("pipe fd = %d, pgrp = %u\n",
		 sbi->pipefd, pid_nr(sbi->oz_pgrp));
	pipe = fget(sbi->pipefd);

	if (!pipe) {
		pr_err("could not open pipe file descriptor\n");
		goto fail_put_pid;
	}
	ret = autofs_prepare_pipe(pipe);
	if (ret < 0)
		goto fail_fput;
	sbi->pipe = pipe;
	sbi->flags &= ~AUTOFS_SBI_CATATONIC;

	/*
	 * Success! Install the root dentry now to indicate completion.
	 */
	s->s_root = root;
	return 0;

	/*
	 * Failure ... clean up.
	 */
fail_fput:
	pr_err("pipe file descriptor does not contain proper ops\n");
	fput(pipe);
fail_put_pid:
	put_pid(sbi->oz_pgrp);
fail_dput:
	dput(root);
	goto fail_free;
fail_ino:
	autofs_free_ino(ino);
fail_free:
	kfree(sbi);
	s->s_fs_info = NULL;
	return ret;
}
Exemplo n.º 20
0
static struct i915_gem_context *
__create_hw_context(struct drm_i915_private *dev_priv,
		    struct drm_i915_file_private *file_priv)
{
	struct i915_gem_context *ctx;
	int ret;

	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
	if (ctx == NULL)
		return ERR_PTR(-ENOMEM);

	ret = assign_hw_id(dev_priv, &ctx->hw_id);
	if (ret) {
		kfree(ctx);
		return ERR_PTR(ret);
	}

	kref_init(&ctx->ref);
	list_add_tail(&ctx->link, &dev_priv->context_list);
	ctx->i915 = dev_priv;

	if (dev_priv->hw_context_size) {
		struct drm_i915_gem_object *obj;
		struct i915_vma *vma;

		obj = alloc_context_obj(dev_priv, dev_priv->hw_context_size);
		if (IS_ERR(obj)) {
			ret = PTR_ERR(obj);
			goto err_out;
		}

		vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
		if (IS_ERR(vma)) {
			i915_gem_object_put(obj);
			ret = PTR_ERR(vma);
			goto err_out;
		}

		ctx->engine[RCS].state = vma;
	}

	/* Default context will never have a file_priv */
	ret = DEFAULT_CONTEXT_HANDLE;
	if (file_priv) {
		ret = idr_alloc(&file_priv->context_idr, ctx,
				DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL);
		if (ret < 0)
			goto err_out;
	}
	ctx->user_handle = ret;

	ctx->file_priv = file_priv;
	if (file_priv) {
		ctx->pid = get_task_pid(current, PIDTYPE_PID);
		ctx->name = kasprintf(GFP_KERNEL, "%s[%d]/%x",
				      current->comm,
				      pid_nr(ctx->pid),
				      ctx->user_handle);
		if (!ctx->name) {
			ret = -ENOMEM;
			goto err_pid;
		}
	}

	/* NB: Mark all slices as needing a remap so that when the context first
	 * loads it will restore whatever remap state already exists. If there
	 * is no remap info, it will be a NOP. */
	ctx->remap_slice = ALL_L3_SLICES(dev_priv);

	i915_gem_context_set_bannable(ctx);
	ctx->ring_size = 4 * PAGE_SIZE;
	ctx->desc_template =
		default_desc_template(dev_priv, dev_priv->mm.aliasing_ppgtt);

	/* GuC requires the ring to be placed above GUC_WOPCM_TOP. If GuC is not
	 * present or not in use we still need a small bias as ring wraparound
	 * at offset 0 sometimes hangs. No idea why.
	 */
	if (HAS_GUC(dev_priv) && i915.enable_guc_loading)
		ctx->ggtt_offset_bias = GUC_WOPCM_TOP;
	else
		ctx->ggtt_offset_bias = I915_GTT_PAGE_SIZE;

	return ctx;

err_pid:
	put_pid(ctx->pid);
	idr_remove(&file_priv->context_idr, ctx->user_handle);
err_out:
	context_close(ctx);
	return ERR_PTR(ret);
}
Exemplo n.º 21
0
int autofs4_fill_super(struct super_block *s, void *data, int silent)
{
	struct inode * root_inode;
	struct dentry * root;
	struct file * pipe;
	int pipefd;
	struct autofs_sb_info *sbi;
	struct autofs_info *ino;
	int pgrp = 0;
	bool pgrp_set = false;

	sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
	if (!sbi)
		goto fail_unlock;
	DPRINTK("starting up, sbi = %p",sbi);

	s->s_fs_info = sbi;
	sbi->magic = AUTOFS_SBI_MAGIC;
	sbi->pipefd = -1;
	sbi->pipe = NULL;
	sbi->catatonic = 1;
	sbi->exp_timeout = 0;
	sbi->oz_pgrp = NULL;
	sbi->sb = s;
	sbi->version = 0;
	sbi->sub_version = 0;
	set_autofs_type_indirect(&sbi->type);
	sbi->min_proto = 0;
	sbi->max_proto = 0;
	mutex_init(&sbi->wq_mutex);
	mutex_init(&sbi->pipe_mutex);
	spin_lock_init(&sbi->fs_lock);
	sbi->queues = NULL;
	spin_lock_init(&sbi->lookup_lock);
	INIT_LIST_HEAD(&sbi->active_list);
	INIT_LIST_HEAD(&sbi->expiring_list);
	s->s_blocksize = 1024;
	s->s_blocksize_bits = 10;
	s->s_magic = AUTOFS_SUPER_MAGIC;
	s->s_op = &autofs4_sops;
	s->s_time_gran = 1;

	/*
	 * Get the root inode and dentry, but defer checking for errors.
	 */
	ino = autofs4_mkroot(sbi);
	if (!ino)
		goto fail_free;
	root_inode = autofs4_get_inode(s, ino);
	if (!root_inode)
		goto fail_ino;

	root = d_alloc_root(root_inode);
	if (!root)
		goto fail_iput;
	pipe = NULL;

	root->d_op = &autofs4_dentry_operations;
	root->d_fsdata = ino;

	/* Can this call block? */
	if (parse_options(data, &pipefd, &root_inode->i_uid, &root_inode->i_gid,
			  &pgrp, &pgrp_set, &sbi->type, &sbi->min_proto,
			  &sbi->max_proto)) {
		printk("autofs: called with bogus options\n");
		goto fail_dput;
	}

	if (pgrp_set) {
		sbi->oz_pgrp = find_get_pid(pgrp);
		if (!sbi->oz_pgrp) {
			pr_warn("autofs: could not find process group %d\n",
				pgrp);
			goto fail_dput;
		}
	} else {
		sbi->oz_pgrp = get_task_pid(current, PIDTYPE_PGID);
	}

	if (autofs_type_trigger(sbi->type))
		__managed_dentry_set_managed(root);

	root_inode->i_fop = &autofs4_root_operations;
	root_inode->i_op = &autofs4_dir_inode_operations;

	/* Couldn't this be tested earlier? */
	if (sbi->max_proto < AUTOFS_MIN_PROTO_VERSION ||
	    sbi->min_proto > AUTOFS_MAX_PROTO_VERSION) {
		printk("autofs: kernel does not match daemon version "
		       "daemon (%d, %d) kernel (%d, %d)\n",
			sbi->min_proto, sbi->max_proto,
			AUTOFS_MIN_PROTO_VERSION, AUTOFS_MAX_PROTO_VERSION);
		goto fail_dput;
	}

	/* Establish highest kernel protocol version */
	if (sbi->max_proto > AUTOFS_MAX_PROTO_VERSION)
		sbi->version = AUTOFS_MAX_PROTO_VERSION;
	else
		sbi->version = sbi->max_proto;
	sbi->sub_version = AUTOFS_PROTO_SUBVERSION;

	DPRINTK("pipe fd = %d, pgrp = %u", pipefd, pid_nr(sbi->oz_pgrp));
	pipe = fget(pipefd);

	if (!pipe) {
		printk("autofs: could not open pipe file descriptor\n");
		goto fail_dput;
	}
	if (!pipe->f_op || !pipe->f_op->write)
		goto fail_fput;
	sbi->pipe = pipe;
	sbi->pipefd = pipefd;
	sbi->catatonic = 0;

	/*
	 * Success! Install the root dentry now to indicate completion.
	 */
	s->s_root = root;
	return 0;
	
	/*
	 * Failure ... clean up.
	 */
fail_fput:
	printk("autofs: pipe file descriptor does not contain proper ops\n");
	fput(pipe);
	/* fall through */
fail_dput:
	dput(root);
	goto fail_free;
fail_iput:
	printk("autofs: get root dentry failed\n");
	iput(root_inode);
fail_ino:
	kfree(ino);
fail_free:
	put_pid(sbi->oz_pgrp);
	kfree(sbi);
	s->s_fs_info = NULL;
fail_unlock:
	return -EINVAL;
}
Exemplo n.º 22
0
static long ugidctl_setgroups(struct ugidctl_context *ctx, void __user *arg)
{
	struct ugidctl_setgroups_rq req;
	enum pid_type ptype;
	gid_t __user *list;
	struct cred *cred;
	unsigned i, count;
	gid_t *bulk;
	pid_t pid;
	long rc;

	if (copy_from_user(&req, arg, sizeof(req)))
		return -EFAULT;

	arg += sizeof(req);
	list = arg;

	count = (unsigned) req.count;

	if (count > NGROUPS_MAX)
		return -EINVAL;

	if (!count)
		return ugidctl_sys_setgroups(0, arg);

	if (capable(CAP_SETGID))
		return ugidctl_sys_setgroups((int) count, list);

	if (memcmp(ctx->key, req.key, sizeof(ctx->key)))
		return -EPERM;

	mutex_lock(&ctx->lock);

	ptype = ctx->ptype;
	pid = ctx->pid;

	mutex_unlock(&ctx->lock);

	if (pid != pid_nr(get_task_pid(current, ptype)))
		return -EPERM;

	bulk = kmalloc(count > UGIDCTL_BULKSIZE ?
		       sizeof(gid_t) * UGIDCTL_BULKSIZE :
		       sizeof(gid_t) * count, GFP_KERNEL);
	if (!bulk)
		return -ENOMEM;

	while (count) {
		unsigned size = count > UGIDCTL_BULKSIZE ?
				UGIDCTL_BULKSIZE : count;

		if (copy_from_user(bulk, arg, sizeof(gid_t) * size))
			return -EFAULT;

		mutex_lock(&ctx->lock);

		for (i = 0; i < size; i++) {
			if (ugidctl_find_gid(ctx, bulk[i])) {
				mutex_unlock(&ctx->lock);
				kfree(bulk);
				return -EPERM;
			}
		}

		mutex_unlock(&ctx->lock);

		arg += sizeof(gid_t) * size;
		count -= size;
	}

	kfree(bulk);

	cred = prepare_creds();
	if (!cred)
		return -ENOMEM;

	cap_raise(cred->cap_effective, CAP_SETGID);

	commit_creds(cred);

	rc = ugidctl_sys_setgroups((int) req.count, list);

	cred = prepare_creds();
	if (!cred) {
		/* unable to restore process capabilities - kill process */
		do_exit(SIGKILL);
		return -ENOMEM;
	}

	cap_lower(cred->cap_effective, CAP_SETGID);

	commit_creds(cred);

	return rc;
}
Exemplo n.º 23
0
/*
 * This creates a new process as a copy of the old one,
 * but does not actually start it yet.
 *
 * It copies the registers, and all the appropriate
 * parts of the process environment (as per the clone
 * flags). The actual kick-off is left to the caller.
 */
static struct task_struct *copy_process(unsigned long clone_flags,
					unsigned long stack_start,
					struct pt_regs *regs,
					unsigned long stack_size,
					int __user *child_tidptr,
					struct pid *pid,
					int trace)
{
	int retval;
	struct task_struct *p;
	int cgroup_callbacks_done = 0;

	if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
		return ERR_PTR(-EINVAL);

	/*
	 * Thread groups must share signals as well, and detached threads
	 * can only be started up within the thread group.
	 */
	if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
		return ERR_PTR(-EINVAL);

	/*
	 * Shared signal handlers imply shared VM. By way of the above,
	 * thread groups also imply shared VM. Blocking this case allows
	 * for various simplifications in other code.
	 */
	if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
		return ERR_PTR(-EINVAL);

	/*
	 * Siblings of global init remain as zombies on exit since they are
	 * not reaped by their parent (swapper). To solve this and to avoid
	 * multi-rooted process trees, prevent global and container-inits
	 * from creating siblings.
	 */
	if ((clone_flags & CLONE_PARENT) &&
				current->signal->flags & SIGNAL_UNKILLABLE)
		return ERR_PTR(-EINVAL);

	retval = security_task_create(clone_flags);
	if (retval)
		goto fork_out;

	retval = -ENOMEM;
	p = dup_task_struct(current);
	if (!p)
		goto fork_out;

	ftrace_graph_init_task(p);

	rt_mutex_init_task(p);

#ifdef CONFIG_PROVE_LOCKING
	DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
	DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
#endif
	retval = -EAGAIN;
	if (atomic_read(&p->real_cred->user->processes) >=
			p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
		if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
		    p->real_cred->user != INIT_USER)
			goto bad_fork_free;
	}

	retval = copy_creds(p, clone_flags);
	if (retval < 0)
		goto bad_fork_free;
#ifdef CONFIG_SECURITY_ANOUBIS
	anoubis_task_create(p);
#endif

	/*
	 * If multiple threads are within copy_process(), then this check
	 * triggers too late. This doesn't hurt, the check is only there
	 * to stop root fork bombs.
	 */
	retval = -EAGAIN;
	if (nr_threads >= max_threads)
		goto bad_fork_cleanup_count;

	if (!try_module_get(task_thread_info(p)->exec_domain->module))
		goto bad_fork_cleanup_count;

	p->did_exec = 0;
	delayacct_tsk_init(p);	/* Must remain after dup_task_struct() */
	copy_flags(clone_flags, p);
	INIT_LIST_HEAD(&p->children);
	INIT_LIST_HEAD(&p->sibling);
	rcu_copy_process(p);
	p->vfork_done = NULL;
	spin_lock_init(&p->alloc_lock);

	init_sigpending(&p->pending);

	p->utime = cputime_zero;
	p->stime = cputime_zero;
	p->gtime = cputime_zero;
	p->utimescaled = cputime_zero;
	p->stimescaled = cputime_zero;
	p->prev_utime = cputime_zero;
	p->prev_stime = cputime_zero;

	p->default_timer_slack_ns = current->timer_slack_ns;

	task_io_accounting_init(&p->ioac);
	acct_clear_integrals(p);

	posix_cpu_timers_init(p);

	p->lock_depth = -1;		/* -1 = no lock */
	do_posix_clock_monotonic_gettime(&p->start_time);
	p->real_start_time = p->start_time;
	monotonic_to_bootbased(&p->real_start_time);
	p->io_context = NULL;
	p->audit_context = NULL;
	cgroup_fork(p);
#ifdef CONFIG_NUMA
	p->mempolicy = mpol_dup(p->mempolicy);
 	if (IS_ERR(p->mempolicy)) {
 		retval = PTR_ERR(p->mempolicy);
 		p->mempolicy = NULL;
 		goto bad_fork_cleanup_cgroup;
 	}
	mpol_fix_fork_child_flag(p);
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
	p->irq_events = 0;
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
	p->hardirqs_enabled = 1;
#else
	p->hardirqs_enabled = 0;
#endif
	p->hardirq_enable_ip = 0;
	p->hardirq_enable_event = 0;
	p->hardirq_disable_ip = _THIS_IP_;
	p->hardirq_disable_event = 0;
	p->softirqs_enabled = 1;
	p->softirq_enable_ip = _THIS_IP_;
	p->softirq_enable_event = 0;
	p->softirq_disable_ip = 0;
	p->softirq_disable_event = 0;
	p->hardirq_context = 0;
	p->softirq_context = 0;
#endif
#ifdef CONFIG_LOCKDEP
	p->lockdep_depth = 0; /* no locks held yet */
	p->curr_chain_key = 0;
	p->lockdep_recursion = 0;
#endif

#ifdef CONFIG_DEBUG_MUTEXES
	p->blocked_on = NULL; /* not blocked yet */
#endif

	p->bts = NULL;

	p->stack_start = stack_start;

	/* Perform scheduler related setup. Assign this task to a CPU. */
	sched_fork(p, clone_flags);

	retval = perf_event_init_task(p);
	if (retval)
		goto bad_fork_cleanup_policy;

	if ((retval = audit_alloc(p)))
		goto bad_fork_cleanup_policy;
	/* copy all the process information */
	if ((retval = copy_semundo(clone_flags, p)))
		goto bad_fork_cleanup_audit;
	if ((retval = copy_files(clone_flags, p)))
		goto bad_fork_cleanup_semundo;
	if ((retval = copy_fs(clone_flags, p)))
		goto bad_fork_cleanup_files;
	if ((retval = copy_sighand(clone_flags, p)))
		goto bad_fork_cleanup_fs;
	if ((retval = copy_signal(clone_flags, p)))
		goto bad_fork_cleanup_sighand;
	if ((retval = copy_mm(clone_flags, p)))
		goto bad_fork_cleanup_signal;
	if ((retval = copy_namespaces(clone_flags, p)))
		goto bad_fork_cleanup_mm;
	if ((retval = copy_io(clone_flags, p)))
		goto bad_fork_cleanup_namespaces;
	retval = copy_thread(clone_flags, stack_start, stack_size, p, regs);
	if (retval)
		goto bad_fork_cleanup_io;

	if (pid != &init_struct_pid) {
		retval = -ENOMEM;
		pid = alloc_pid(p->nsproxy->pid_ns);
		if (!pid)
			goto bad_fork_cleanup_io;

		if (clone_flags & CLONE_NEWPID) {
			retval = pid_ns_prepare_proc(p->nsproxy->pid_ns);
			if (retval < 0)
				goto bad_fork_free_pid;
		}
	}

	p->pid = pid_nr(pid);
	p->tgid = p->pid;
	if (clone_flags & CLONE_THREAD)
		p->tgid = current->tgid;

	if (current->nsproxy != p->nsproxy) {
		retval = ns_cgroup_clone(p, pid);
		if (retval)
			goto bad_fork_free_pid;
	}

	p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
	/*
	 * Clear TID on mm_release()?
	 */
	p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL;
#ifdef CONFIG_FUTEX
	p->robust_list = NULL;
#ifdef CONFIG_COMPAT
	p->compat_robust_list = NULL;
#endif
	INIT_LIST_HEAD(&p->pi_state_list);
	p->pi_state_cache = NULL;
#endif
	/*
	 * sigaltstack should be cleared when sharing the same VM
	 */
	if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
		p->sas_ss_sp = p->sas_ss_size = 0;

	/*
	 * Syscall tracing should be turned off in the child regardless
	 * of CLONE_PTRACE.
	 */
	clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
#ifdef TIF_SYSCALL_EMU
	clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
#endif
	clear_all_latency_tracing(p);

	/* ok, now we should be set up.. */
	p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL);
	p->pdeath_signal = 0;
	p->exit_state = 0;

	/*
	 * Ok, make it visible to the rest of the system.
	 * We dont wake it up yet.
	 */
	p->group_leader = p;
	INIT_LIST_HEAD(&p->thread_group);

	/* Now that the task is set up, run cgroup callbacks if
	 * necessary. We need to run them before the task is visible
	 * on the tasklist. */
	cgroup_fork_callbacks(p);
	cgroup_callbacks_done = 1;

	/* Need tasklist lock for parent etc handling! */
	write_lock_irq(&tasklist_lock);

	/*
	 * The task hasn't been attached yet, so its cpus_allowed mask will
	 * not be changed, nor will its assigned CPU.
	 *
	 * The cpus_allowed mask of the parent may have changed after it was
	 * copied first time - so re-copy it here, then check the child's CPU
	 * to ensure it is on a valid CPU (and if not, just force it back to
	 * parent's CPU). This avoids alot of nasty races.
	 */
	p->cpus_allowed = current->cpus_allowed;
	p->rt.nr_cpus_allowed = current->rt.nr_cpus_allowed;
	if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) ||
			!cpu_online(task_cpu(p))))
		set_task_cpu(p, smp_processor_id());

	/* CLONE_PARENT re-uses the old parent */
	if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
		p->real_parent = current->real_parent;
		p->parent_exec_id = current->parent_exec_id;
	} else {
		p->real_parent = current;
		p->parent_exec_id = current->self_exec_id;
	}

	spin_lock(&current->sighand->siglock);

	/*
	 * Process group and session signals need to be delivered to just the
	 * parent before the fork or both the parent and the child after the
	 * fork. Restart if a signal comes in before we add the new process to
	 * it's process group.
	 * A fatal signal pending means that current will exit, so the new
	 * thread can't slip out of an OOM kill (or normal SIGKILL).
 	 */
	recalc_sigpending();
	if (signal_pending(current)) {
		spin_unlock(&current->sighand->siglock);
		write_unlock_irq(&tasklist_lock);
		retval = -ERESTARTNOINTR;
		goto bad_fork_free_pid;
	}

	if (clone_flags & CLONE_THREAD) {
		atomic_inc(&current->signal->count);
		atomic_inc(&current->signal->live);
		p->group_leader = current->group_leader;
		list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
	}

	if (likely(p->pid)) {
		list_add_tail(&p->sibling, &p->real_parent->children);
		tracehook_finish_clone(p, clone_flags, trace);

		if (thread_group_leader(p)) {
			if (clone_flags & CLONE_NEWPID)
				p->nsproxy->pid_ns->child_reaper = p;

			p->signal->leader_pid = pid;
			tty_kref_put(p->signal->tty);
			p->signal->tty = tty_kref_get(current->signal->tty);
			attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
			attach_pid(p, PIDTYPE_SID, task_session(current));
			list_add_tail_rcu(&p->tasks, &init_task.tasks);
			__get_cpu_var(process_counts)++;
		}
		attach_pid(p, PIDTYPE_PID, pid);
		nr_threads++;
	}

	total_forks++;
	spin_unlock(&current->sighand->siglock);
	write_unlock_irq(&tasklist_lock);
	proc_fork_connector(p);
	cgroup_post_fork(p);
	perf_event_fork(p);
	return p;

bad_fork_free_pid:
	if (pid != &init_struct_pid)
		free_pid(pid);
bad_fork_cleanup_io:
	put_io_context(p->io_context);
bad_fork_cleanup_namespaces:
	exit_task_namespaces(p);
bad_fork_cleanup_mm:
	if (p->mm)
		mmput(p->mm);
bad_fork_cleanup_signal:
	if (!(clone_flags & CLONE_THREAD))
		__cleanup_signal(p->signal);
bad_fork_cleanup_sighand:
	__cleanup_sighand(p->sighand);
bad_fork_cleanup_fs:
	exit_fs(p); /* blocking */
bad_fork_cleanup_files:
	exit_files(p); /* blocking */
bad_fork_cleanup_semundo:
	exit_sem(p);
bad_fork_cleanup_audit:
	audit_free(p);
bad_fork_cleanup_policy:
	perf_event_free_task(p);
#ifdef CONFIG_NUMA
	mpol_put(p->mempolicy);
bad_fork_cleanup_cgroup:
#endif
	cgroup_exit(p, cgroup_callbacks_done);
	delayacct_tsk_free(p);
	module_put(task_thread_info(p)->exec_domain->module);
bad_fork_cleanup_count:
	atomic_dec(&p->cred->user->processes);
	anoubis_task_destroy(p);
	exit_creds(p);
bad_fork_free:
	free_task(p);
fork_out:
	return ERR_PTR(retval);
}
Exemplo n.º 24
0
/*
 * This creates a new process as a copy of the old one,
 * but does not actually start it yet.
 *
 * It copies the registers, and all the appropriate
 * parts of the process environment (as per the clone
 * flags). The actual kick-off is left to the caller.
 */
static struct task_struct *copy_process(unsigned long clone_flags,
					unsigned long stack_start,
					struct pt_regs *regs,
					unsigned long stack_size,
					int __user *parent_tidptr,
					int __user *child_tidptr,
					struct pid *pid)
{
	int retval;
	struct task_struct *p;

	if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
		return ERR_PTR(-EINVAL);

	/*
	 * Thread groups must share signals as well, and detached threads
	 * can only be started up within the thread group.
	 */
	if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
		return ERR_PTR(-EINVAL);

	/*
	 * Shared signal handlers imply shared VM. By way of the above,
	 * thread groups also imply shared VM. Blocking this case allows
	 * for various simplifications in other code.
	 */
	if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
		return ERR_PTR(-EINVAL);

	retval = security_task_create(clone_flags);
	if (retval)
		goto fork_out;

	retval = -ENOMEM;
	p = dup_task_struct(current);
	if (!p)
		goto fork_out;

	rt_mutex_init_task(p);

#ifdef CONFIG_PROVE_LOCKING
	DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
	DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
#endif
	retval = -EAGAIN;
	if (atomic_read(&p->user->processes) >=
			p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
		if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
				p->user != &root_user)
			goto bad_fork_free;
	}

	atomic_inc(&p->user->__count);
	atomic_inc(&p->user->processes);
	get_group_info(p->group_info);

	/*
	 * If multiple threads are within copy_process(), then this check
	 * triggers too late. This doesn't hurt, the check is only there
	 * to stop root fork bombs.
	 */
	if (nr_threads >= max_threads)
		goto bad_fork_cleanup_count;

	if (!try_module_get(task_thread_info(p)->exec_domain->module))
		goto bad_fork_cleanup_count;

	if (p->binfmt && !try_module_get(p->binfmt->module))
		goto bad_fork_cleanup_put_domain;

	p->did_exec = 0;
	delayacct_tsk_init(p);	/* Must remain after dup_task_struct() */
	copy_flags(clone_flags, p);
	p->pid = pid_nr(pid);
	retval = -EFAULT;
	if (clone_flags & CLONE_PARENT_SETTID)
		if (put_user(p->pid, parent_tidptr))
			goto bad_fork_cleanup_delays_binfmt;

	INIT_LIST_HEAD(&p->children);
	INIT_LIST_HEAD(&p->sibling);
	p->vfork_done = NULL;
	spin_lock_init(&p->alloc_lock);

	clear_tsk_thread_flag(p, TIF_SIGPENDING);
	init_sigpending(&p->pending);

	p->utime = cputime_zero;
	p->stime = cputime_zero;
 	p->sched_time = 0;
#ifdef CONFIG_DETECT_SOFTLOCKUP
	p->last_switch_count = 0;
	p->last_switch_timestamp = 0;
#endif

#ifdef CONFIG_TASK_XACCT
	p->rchar = 0;		/* I/O counter: bytes read */
	p->wchar = 0;		/* I/O counter: bytes written */
	p->syscr = 0;		/* I/O counter: read syscalls */
	p->syscw = 0;		/* I/O counter: write syscalls */
#endif
	task_io_accounting_init(p);
	acct_clear_integrals(p);

 	p->it_virt_expires = cputime_zero;
	p->it_prof_expires = cputime_zero;
 	p->it_sched_expires = 0;
 	INIT_LIST_HEAD(&p->cpu_timers[0]);
 	INIT_LIST_HEAD(&p->cpu_timers[1]);
 	INIT_LIST_HEAD(&p->cpu_timers[2]);

	p->lock_depth = -1;		/* -1 = no lock */
	do_posix_clock_monotonic_gettime(&p->start_time);
#ifdef CONFIG_SECURITY
	p->security = NULL;
#endif
	p->io_context = NULL;
	p->audit_context = NULL;
	cpuset_fork(p);
#ifdef CONFIG_NUMA
 	p->mempolicy = mpol_copy(p->mempolicy);
 	if (IS_ERR(p->mempolicy)) {
 		retval = PTR_ERR(p->mempolicy);
 		p->mempolicy = NULL;
 		goto bad_fork_cleanup_cpuset;
 	}
	mpol_fix_fork_child_flag(p);
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
	p->irq_events = 0;
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
	p->hardirqs_enabled = 1;
#else
	p->hardirqs_enabled = 0;
#endif
	p->hardirq_enable_ip = 0;
	p->hardirq_enable_event = 0;
	p->hardirq_disable_ip = _THIS_IP_;
	p->hardirq_disable_event = 0;
	p->softirqs_enabled = 1;
	p->softirq_enable_ip = _THIS_IP_;
	p->softirq_enable_event = 0;
	p->softirq_disable_ip = 0;
	p->softirq_disable_event = 0;
	p->hardirq_context = 0;
	p->softirq_context = 0;
#endif
#ifdef CONFIG_LOCKDEP
	p->lockdep_depth = 0; /* no locks held yet */
	p->curr_chain_key = 0;
	p->lockdep_recursion = 0;
#endif

#ifdef CONFIG_DEBUG_MUTEXES
	p->blocked_on = NULL; /* not blocked yet */
#endif

	p->tgid = p->pid;
	if (clone_flags & CLONE_THREAD)
		p->tgid = current->tgid;

	if ((retval = security_task_alloc(p)))
		goto bad_fork_cleanup_policy;
	if ((retval = audit_alloc(p)))
		goto bad_fork_cleanup_security;
	/* copy all the process information */
	if ((retval = copy_semundo(clone_flags, p)))
		goto bad_fork_cleanup_audit;
	if ((retval = copy_files(clone_flags, p)))
		goto bad_fork_cleanup_semundo;
	if ((retval = copy_fs(clone_flags, p)))
		goto bad_fork_cleanup_files;
	if ((retval = copy_sighand(clone_flags, p)))
		goto bad_fork_cleanup_fs;
	if ((retval = copy_signal(clone_flags, p)))
		goto bad_fork_cleanup_sighand;
	if ((retval = copy_mm(clone_flags, p)))
		goto bad_fork_cleanup_signal;
	if ((retval = copy_keys(clone_flags, p)))
		goto bad_fork_cleanup_mm;
	if ((retval = copy_namespaces(clone_flags, p)))
		goto bad_fork_cleanup_keys;
	retval = copy_thread(clone_flags, stack_start, stack_size, p, regs);
	if (retval)
		goto bad_fork_cleanup_namespaces;

	p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
	/*
	 * Clear TID on mm_release()?
	 */
	p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL;
	p->robust_list = NULL;
#ifdef CONFIG_COMPAT
	p->compat_robust_list = NULL;
#endif
	INIT_LIST_HEAD(&p->pi_state_list);
	p->pi_state_cache = NULL;

	/*
	 * sigaltstack should be cleared when sharing the same VM
	 */
	if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
		p->sas_ss_sp = p->sas_ss_size = 0;

	/*
	 * Syscall tracing should be turned off in the child regardless
	 * of CLONE_PTRACE.
	 */
	clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
#ifdef TIF_SYSCALL_EMU
	clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
#endif

	/* Our parent execution domain becomes current domain
	   These must match for thread signalling to apply */
	p->parent_exec_id = p->self_exec_id;

	/* ok, now we should be set up.. */
	p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 :
			 (clone_flags & CLONE_PARENT) ? current->group_leader->exit_signal :
			 (clone_flags & CSIGNAL);
	p->pdeath_signal = 0;
	p->exit_state = 0;

	/*
	 * Ok, make it visible to the rest of the system.
	 * We dont wake it up yet.
	 */
	p->group_leader = p;
	INIT_LIST_HEAD(&p->thread_group);
	INIT_LIST_HEAD(&p->ptrace_children);
	INIT_LIST_HEAD(&p->ptrace_list);

	/* Perform scheduler related setup. Assign this task to a CPU. */
	sched_fork(p, clone_flags);

	/* Need tasklist lock for parent etc handling! */
	write_lock_irq(&tasklist_lock);

	/* for sys_ioprio_set(IOPRIO_WHO_PGRP) */
	p->ioprio = current->ioprio;

	/*
	 * The task hasn't been attached yet, so its cpus_allowed mask will
	 * not be changed, nor will its assigned CPU.
	 *
	 * The cpus_allowed mask of the parent may have changed after it was
	 * copied first time - so re-copy it here, then check the child's CPU
	 * to ensure it is on a valid CPU (and if not, just force it back to
	 * parent's CPU). This avoids alot of nasty races.
	 */
	p->cpus_allowed = current->cpus_allowed;
	if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) ||
			!cpu_online(task_cpu(p))))
		set_task_cpu(p, smp_processor_id());

	/* CLONE_PARENT re-uses the old parent */
	if (clone_flags & (CLONE_PARENT|CLONE_THREAD))
		p->real_parent = current->real_parent;
	else
		p->real_parent = current;
	p->parent = p->real_parent;

	spin_lock(&current->sighand->siglock);

	/*
	 * Process group and session signals need to be delivered to just the
	 * parent before the fork or both the parent and the child after the
	 * fork. Restart if a signal comes in before we add the new process to
	 * it's process group.
	 * A fatal signal pending means that current will exit, so the new
	 * thread can't slip out of an OOM kill (or normal SIGKILL).
 	 */
 	recalc_sigpending();
	if (signal_pending(current)) {
		spin_unlock(&current->sighand->siglock);
		write_unlock_irq(&tasklist_lock);
		retval = -ERESTARTNOINTR;
		goto bad_fork_cleanup_namespaces;
	}

	if (clone_flags & CLONE_THREAD) {
		p->group_leader = current->group_leader;
		list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);

		if (!cputime_eq(current->signal->it_virt_expires,
				cputime_zero) ||
		    !cputime_eq(current->signal->it_prof_expires,
				cputime_zero) ||
		    current->signal->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY ||
		    !list_empty(&current->signal->cpu_timers[0]) ||
		    !list_empty(&current->signal->cpu_timers[1]) ||
		    !list_empty(&current->signal->cpu_timers[2])) {
			/*
			 * Have child wake up on its first tick to check
			 * for process CPU timers.
			 */
			p->it_prof_expires = jiffies_to_cputime(1);
		}
	}

	if (likely(p->pid)) {
		add_parent(p);
		if (unlikely(p->ptrace & PT_PTRACED))
			__ptrace_link(p, current->parent);

		if (thread_group_leader(p)) {
			p->signal->tty = current->signal->tty;
			p->signal->pgrp = process_group(current);
			set_signal_session(p->signal, process_session(current));
			attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
			attach_pid(p, PIDTYPE_SID, task_session(current));

			list_add_tail_rcu(&p->tasks, &init_task.tasks);
			__get_cpu_var(process_counts)++;
		}
		attach_pid(p, PIDTYPE_PID, pid);
		nr_threads++;
	}

	total_forks++;
	spin_unlock(&current->sighand->siglock);
	write_unlock_irq(&tasklist_lock);
	proc_fork_connector(p);
	return p;

bad_fork_cleanup_namespaces:
	exit_task_namespaces(p);
bad_fork_cleanup_keys:
	exit_keys(p);
bad_fork_cleanup_mm:
	if (p->mm)
		mmput(p->mm);
bad_fork_cleanup_signal:
	cleanup_signal(p);
bad_fork_cleanup_sighand:
	__cleanup_sighand(p->sighand);
bad_fork_cleanup_fs:
	exit_fs(p); /* blocking */
bad_fork_cleanup_files:
	exit_files(p); /* blocking */
bad_fork_cleanup_semundo:
	exit_sem(p);
bad_fork_cleanup_audit:
	audit_free(p);
bad_fork_cleanup_security:
	security_task_free(p);
bad_fork_cleanup_policy:
#ifdef CONFIG_NUMA
	mpol_free(p->mempolicy);
bad_fork_cleanup_cpuset:
#endif
	cpuset_exit(p);
bad_fork_cleanup_delays_binfmt:
	delayacct_tsk_free(p);
	if (p->binfmt)
		module_put(p->binfmt->module);
bad_fork_cleanup_put_domain:
	module_put(task_thread_info(p)->exec_domain->module);
bad_fork_cleanup_count:
	put_group_info(p->group_info);
	atomic_dec(&p->user->processes);
	free_uid(p->user);
bad_fork_free:
	free_task(p);
fork_out:
	return ERR_PTR(retval);
}