Ejemplo n.º 1
0
/*
 * Close the old accounting file (if currently open) and then replace
 * it with file (if non-NULL).
 *
 * NOTE: acct_globals.lock MUST be held on entry and exit.
 */
static void acct_file_reopen(struct file *file)
{
	struct file *old_acct = NULL;
	struct pid_namespace *old_ns = NULL;

	if (acct_globals.file) {
		old_acct = acct_globals.file;
		old_ns = acct_globals.ns;
		del_timer(&acct_globals.timer);
		acct_globals.active = 0;
		acct_globals.needcheck = 0;
		acct_globals.file = NULL;
	}
	if (file) {
		acct_globals.file = file;
		acct_globals.ns = get_pid_ns(task_active_pid_ns(current));
		acct_globals.needcheck = 0;
		acct_globals.active = 1;
		/* It's been deleted if it was used before so this is safe */
		init_timer(&acct_globals.timer);
		acct_globals.timer.function = acct_timeout;
		acct_globals.timer.expires = jiffies + ACCT_TIMEOUT*HZ;
		add_timer(&acct_globals.timer);
	}
	if (old_acct) {
		mnt_unpin(old_acct->f_path.mnt);
		spin_unlock(&acct_globals.lock);
		do_acct_process(old_ns, old_acct);
		filp_close(old_acct, NULL);
		put_pid_ns(old_ns);
		spin_lock(&acct_globals.lock);
	}
}
Ejemplo n.º 2
0
/*
 * Create new nsproxy and all of its the associated namespaces.
 * Return the newly created nsproxy.  Do not attach this to the task,
 * leave it to the caller to do proper locking and attach it to task.
 */
static struct nsproxy *create_new_namespaces(unsigned long flags,
	struct task_struct *tsk, struct user_namespace *user_ns,
	struct fs_struct *new_fs)
{
	struct nsproxy *new_nsp;
	int err;

	new_nsp = create_nsproxy();
	if (!new_nsp)
		return ERR_PTR(-ENOMEM);

	new_nsp->mnt_ns = copy_mnt_ns(flags, tsk->nsproxy->mnt_ns, user_ns, new_fs);
	if (IS_ERR(new_nsp->mnt_ns)) {
		err = PTR_ERR(new_nsp->mnt_ns);
		goto out_ns;
	}

	new_nsp->uts_ns = copy_utsname(flags, user_ns, tsk->nsproxy->uts_ns);
	if (IS_ERR(new_nsp->uts_ns)) {
		err = PTR_ERR(new_nsp->uts_ns);
		goto out_uts;
	}

	new_nsp->ipc_ns = copy_ipcs(flags, user_ns, tsk->nsproxy->ipc_ns);
	if (IS_ERR(new_nsp->ipc_ns)) {
		err = PTR_ERR(new_nsp->ipc_ns);
		goto out_ipc;
	}

	new_nsp->pid_ns_for_children =
		copy_pid_ns(flags, user_ns, tsk->nsproxy->pid_ns_for_children);
	if (IS_ERR(new_nsp->pid_ns_for_children)) {
		err = PTR_ERR(new_nsp->pid_ns_for_children);
		goto out_pid;
	}

	new_nsp->net_ns = copy_net_ns(flags, user_ns, tsk->nsproxy->net_ns);
	if (IS_ERR(new_nsp->net_ns)) {
		err = PTR_ERR(new_nsp->net_ns);
		goto out_net;
	}

	return new_nsp;

out_net:
	if (new_nsp->pid_ns_for_children)
		put_pid_ns(new_nsp->pid_ns_for_children);
out_pid:
	if (new_nsp->ipc_ns)
		put_ipc_ns(new_nsp->ipc_ns);
out_ipc:
	if (new_nsp->uts_ns)
		put_uts_ns(new_nsp->uts_ns);
out_uts:
	if (new_nsp->mnt_ns)
		put_mnt_ns(new_nsp->mnt_ns);
out_ns:
	kmem_cache_free(nsproxy_cachep, new_nsp);
	return ERR_PTR(err);
}
Ejemplo n.º 3
0
struct pid *alloc_pid(struct pid_namespace *ns)
{
	struct pid *pid;
	enum pid_type type;
	int i, nr;
	struct pid_namespace *tmp;
	struct upid *upid;

	pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL);
	if (!pid)
		goto out;

	tmp = ns;
	pid->level = ns->level;
	for (i = ns->level; i >= 0; i--) {
		nr = alloc_pidmap(tmp);
		if (nr < 0)
			goto out_free;

		pid->numbers[i].nr = nr;
		pid->numbers[i].ns = tmp;
		tmp = tmp->parent;
	}

	if (unlikely(is_child_reaper(pid))) {
		if (pid_ns_prepare_proc(ns))
			goto out_free;
	}

	get_pid_ns(ns);
	atomic_set(&pid->count, 1);
	for (type = 0; type < PIDTYPE_MAX; ++type)
		INIT_HLIST_HEAD(&pid->tasks[type]);

	upid = pid->numbers + ns->level;
	spin_lock_irq(&pidmap_lock);
	if (!(ns->nr_hashed & PIDNS_HASH_ADDING))
		goto out_unlock;
	for ( ; upid >= pid->numbers; --upid) {
		hlist_add_head_rcu(&upid->pid_chain,
				&pid_hash[pid_hashfn(upid->nr, upid->ns)]);
		upid->ns->nr_hashed++;
	}
	spin_unlock_irq(&pidmap_lock);

out:
	return pid;

out_unlock:
	spin_unlock_irq(&pidmap_lock);
	put_pid_ns(ns);

out_free:
	while (++i <= ns->level)
		free_pidmap(pid->numbers + i);

	kmem_cache_free(ns->pid_cachep, pid);
	pid = NULL;
	goto out;
}
Ejemplo n.º 4
0
static void proc_kill_sb(struct super_block *sb)
{
	struct pid_namespace *ns;

	ns = (struct pid_namespace *)sb->s_fs_info;
	kill_anon_super(sb);
	put_pid_ns(ns);
}
Ejemplo n.º 5
0
/*
 * Create new nsproxy and all of its the associated namespaces.
 * Return the newly created nsproxy.  Do not attach this to the task,
 * leave it to the caller to do proper locking and attach it to task.
 */
static struct nsproxy *create_new_namespaces(unsigned long flags,
			struct task_struct *tsk, struct fs_struct *new_fs)
{
	struct nsproxy *new_nsp;
	int err;
	//创建一个新的命名空间,并把old赋值给新分配的,设置引用count值为1.
	new_nsp = clone_nsproxy(tsk->nsproxy);
	if (!new_nsp)
		return ERR_PTR(-ENOMEM);
	//查看flag的CLONE_NEWNS标记,看是否设置新的mnt
	new_nsp->mnt_ns = copy_mnt_ns(flags, tsk->nsproxy->mnt_ns, new_fs);
	if (IS_ERR(new_nsp->mnt_ns)) {
		err = PTR_ERR(new_nsp->mnt_ns);
		goto out_ns;
	}
	//查看flag的CLONE_NEWUTS标记,看是否设置新的uts
	new_nsp->uts_ns = copy_utsname(flags, tsk->nsproxy->uts_ns);
	if (IS_ERR(new_nsp->uts_ns)) {
		err = PTR_ERR(new_nsp->uts_ns);
		goto out_uts;
	}
	//查看flag的CLONE_NEWIPC标记,看是否设置新的ipc
	new_nsp->ipc_ns = copy_ipcs(flags, tsk->nsproxy->ipc_ns);
	if (IS_ERR(new_nsp->ipc_ns)) {
		err = PTR_ERR(new_nsp->ipc_ns);
		goto out_ipc;
	}
	//查看flag的CLONE_NEWPID标记,看是否设置新的pid
	new_nsp->pid_ns = copy_pid_ns(flags, task_active_pid_ns(tsk));
	if (IS_ERR(new_nsp->pid_ns)) {
		err = PTR_ERR(new_nsp->pid_ns);
		goto out_pid;
	}
	//查看flag的CLONE_NEWNET标记,看是否设置新的net
	new_nsp->net_ns = copy_net_ns(flags, tsk->nsproxy->net_ns);
	if (IS_ERR(new_nsp->net_ns)) {
		err = PTR_ERR(new_nsp->net_ns);
		goto out_net;
	}

	return new_nsp;

out_net:
	if (new_nsp->pid_ns)
		put_pid_ns(new_nsp->pid_ns);
out_pid:
	if (new_nsp->ipc_ns)
		put_ipc_ns(new_nsp->ipc_ns);
out_ipc:
	if (new_nsp->uts_ns)
		put_uts_ns(new_nsp->uts_ns);
out_uts:
	if (new_nsp->mnt_ns)
		put_mnt_ns(new_nsp->mnt_ns);
out_ns:
	kmem_cache_free(nsproxy_cachep, new_nsp);
	return ERR_PTR(err);
}
Ejemplo n.º 6
0
Archivo: root.c Proyecto: tomdbomb/YAKM
static void proc_kill_sb(struct super_block *sb)
{
    struct pid_namespace *ns;

    ns = (struct pid_namespace *)sb->s_fs_info;
    if (ns->proc_self)
        dput(ns->proc_self);
    if (ns->proc_thread_self)
        dput(ns->proc_thread_self);
    kill_anon_super(sb);
    put_pid_ns(ns);
}
Ejemplo n.º 7
0
void free_nsproxy(struct nsproxy *ns)
{
	if (ns->mnt_ns)
		put_mnt_ns(ns->mnt_ns);
	if (ns->uts_ns)
		put_uts_ns(ns->uts_ns);
	if (ns->ipc_ns)
		put_ipc_ns(ns->ipc_ns);
	if (ns->pid_ns)
		put_pid_ns(ns->pid_ns);
	put_net(ns->net_ns);
	kmem_cache_free(nsproxy_cachep, ns);
}
Ejemplo n.º 8
0
void put_pid(struct pid *pid)
{
	struct pid_namespace *ns;

	if (!pid)
		return;

	ns = pid->numbers[pid->level].ns;
	if ((atomic_read(&pid->count) == 1) ||
	     atomic_dec_and_test(&pid->count)) {
		kmem_cache_free(ns->pid_cachep, pid);
		put_pid_ns(ns);
	}
}
Ejemplo n.º 9
0
void free_nsproxy(struct nsproxy *ns)
{
	if (ns->mnt_ns)
		put_mnt_ns(ns->mnt_ns);
	if (ns->uts_ns)
		put_uts_ns(ns->uts_ns);
	if (ns->ipc_ns)
		put_ipc_ns(ns->ipc_ns);
	if (ns->pid_ns_for_children)
		put_pid_ns(ns->pid_ns_for_children);
	put_cgroup_ns(ns->cgroup_ns);
	put_net(ns->net_ns);
	kmem_cache_free(nsproxy_cachep, ns);
}
Ejemplo n.º 10
0
static int __reserve_pid(pid_t nr)
{
	kerrighed_node_t orig_node = ORIG_NODE(nr);
	struct pid_namespace *pid_ns = find_get_krg_pid_ns();
	struct pid_namespace *pidmap_ns;
	struct pid *pid;
	int r;

	if (orig_node == kerrighed_node_id)
		pidmap_ns = pid_ns;
	else
		pidmap_ns = node_pidmap(orig_node);
	BUG_ON(!pidmap_ns);

	r = reserve_pidmap(pidmap_ns, nr);
	if (r) {
		r = -E_CR_PIDBUSY;
		goto out;
	}

	pid = __alloc_pid(pid_ns, &nr);
	if (!pid) {
		struct upid upid = {
			.nr = nr,
			.ns = pidmap_ns,
		};

		__free_pidmap(&upid);

		r = -ENOMEM;
		goto out;
	}

	/*
	 * this is not always mandatory but really difficult
	 * to know when it is or not
	 */
	r = create_pid_kddm_object(pid, 1);
	if (r)
		goto error;

out:
	put_pid_ns(pid_ns);
	return r;

error:
	free_pid(pid);
	goto out;
}
Ejemplo n.º 11
0
static struct pid *no_pid(int nr)
{
	struct pid_namespace *ns;
	struct pid_kddm_object *obj;
	struct pid *pid;

	obj = _kddm_grab_object_no_ft(pid_kddm_set, nr);
	if (IS_ERR(obj))
		return NULL;
	BUG_ON(!obj);

	spin_lock(&pid_kddm_lock);
	rcu_read_lock();
	pid = find_kpid(nr); /* Double check once locked */
	rcu_read_unlock();
	/*
	 * No need to get a reference on pid since we know that it is used on
	 * another node: nobody will free it for the moment.
	 */

	if (!pid) {
		ns = find_get_krg_pid_ns();
		pid = __alloc_pid(ns, &nr);
		put_pid_ns(ns);
		if (!pid)
			goto out_unlock;
		obj->pid = pid;
		pid->kddm_obj = obj;
	}
	BUG_ON(pid->kddm_obj != obj);

	__get_pid(obj);

out_unlock:
	spin_unlock(&pid_kddm_lock);
	_kddm_put_object(pid_kddm_set, nr);

	return pid;
}
Ejemplo n.º 12
0
static void cgroup_pidlist_destroy_work_fn(struct work_struct *work)
{
	struct delayed_work *dwork = to_delayed_work(work);
	struct cgroup_pidlist *l = container_of(dwork, struct cgroup_pidlist,
						destroy_dwork);
	struct cgroup_pidlist *tofree = NULL;

	mutex_lock(&l->owner->pidlist_mutex);

	/*
	 * Destroy iff we didn't get queued again.  The state won't change
	 * as destroy_dwork can only be queued while locked.
	 */
	if (!delayed_work_pending(dwork)) {
		list_del(&l->links);
		pidlist_free(l->list);
		put_pid_ns(l->key.ns);
		tofree = l;
	}

	mutex_unlock(&l->owner->pidlist_mutex);
	kfree(tofree);
}