struct pid *alloc_pid(struct pid_namespace *ns) { struct pid *pid; enum pid_type type; int i, nr; struct pid_namespace *tmp; struct upid *upid; pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL); if (!pid) goto out; tmp = ns; pid->level = ns->level; for (i = ns->level; i >= 0; i--) { nr = alloc_pidmap(tmp); if (nr < 0) goto out_free; pid->numbers[i].nr = nr; pid->numbers[i].ns = tmp; tmp = tmp->parent; } if (unlikely(is_child_reaper(pid))) { if (pid_ns_prepare_proc(ns)) goto out_free; } get_pid_ns(ns); atomic_set(&pid->count, 1); for (type = 0; type < PIDTYPE_MAX; ++type) INIT_HLIST_HEAD(&pid->tasks[type]); upid = pid->numbers + ns->level; spin_lock_irq(&pidmap_lock); if (!(ns->nr_hashed & PIDNS_HASH_ADDING)) goto out_unlock; for ( ; upid >= pid->numbers; --upid) { hlist_add_head_rcu(&upid->pid_chain, &pid_hash[pid_hashfn(upid->nr, upid->ns)]); upid->ns->nr_hashed++; } spin_unlock_irq(&pidmap_lock); out: return pid; out_unlock: spin_unlock_irq(&pidmap_lock); put_pid_ns(ns); out_free: while (++i <= ns->level) free_pidmap(pid->numbers + i); kmem_cache_free(ns->pid_cachep, pid); pid = NULL; goto out; }
struct pid *alloc_pid(struct pid_namespace *ns) { struct pid *pid; enum pid_type type; int i, nr; struct pid_namespace *tmp; struct upid *upid; pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL); if (!pid) goto out; tmp = ns; for (i = ns->level; i >= 0; i--) { nr = alloc_pidmap(tmp); if (nr < 0) goto out_free; pid->numbers[i].nr = nr; pid->numbers[i].ns = tmp; tmp = tmp->parent; } get_pid_ns(ns); pid->level = ns->level; atomic_set(&pid->count, 1); for (type = 0; type < PIDTYPE_MAX; ++type) INIT_HLIST_HEAD(&pid->tasks[type]); spin_lock_irq(&pidmap_lock); for (i = ns->level; i >= 0; i--) { upid = &pid->numbers[i]; hlist_add_head_rcu(&upid->pid_chain, &pid_hash[pid_hashfn(upid->nr, upid->ns)]); } spin_unlock_irq(&pidmap_lock); out: return pid; out_free: for (i++; i <= ns->level; i++) free_pidmap(pid->numbers[i].ns, pid->numbers[i].nr); kmem_cache_free(ns->pid_cachep, pid); pid = NULL; goto out; }
struct task_struct *copy_process(unsigned long clone_flags, unsigned long stack_start, struct pt_regs *regs, unsigned long stack_size, int __user *parent_tidptr, int __user *child_tidptr) { //... struct task_struct *p = NULL; p = dup_task_struct(current); //... if (nr_threads >= max_threads) goto bad_fork_cleanup_count; //... #ifdef CONFIG_PREEMPT p->thread_info->preempt_count = 1; #endif p->did_exec = 0; p->state = TASK_UNINTERRUPTIBLE; copy_flags(clone_flags, p); p->pid = alloc_pidmap(); //... copy_semundo(clone_flags, p); copy_signal(clone_flags, p); //copy_xxx(clone_flags, p); copy_namespace(clone_flags, p); copy_thread(0, clone_flags, stack_start, stack_size, p, regs); //... local_irq_disable(); p->time_slice = (current->time_slice + 1) >> 1; p->first_time_slice = 1; current->time_slice >>= 1; local_irq_enable(); return p; }
/* * Ok, this is the main fork-routine. * * It copies the process, and if successful kick-starts * it and waits for it to finish using the VM if required. */ long do_fork(unsigned long clone_flags, unsigned long stack_start, struct pt_regs *regs, unsigned long stack_size, int __user *parent_tidptr, int __user *child_tidptr) { struct task_struct *p; int trace = 0; long pid = alloc_pidmap(); if (pid < 0) return -EAGAIN; if (unlikely(current->ptrace)) { trace = fork_traceflag (clone_flags); if (trace) clone_flags |= CLONE_PTRACE; } p = copy_process(clone_flags, stack_start, regs, stack_size, parent_tidptr, child_tidptr, pid); /* * Do this prior waking up the new thread - the thread pointer * might get invalid after that point, if the thread exits quickly. */ if (!IS_ERR(p)) { struct completion vfork; if (clone_flags & CLONE_VFORK) { p->vfork_done = &vfork; init_completion(&vfork); } if ((p->ptrace & PT_PTRACED) || (clone_flags & CLONE_STOPPED)) { /* * We'll start up with an immediate SIGSTOP. */ sigaddset(&p->pending.signal, SIGSTOP); set_tsk_thread_flag(p, TIF_SIGPENDING); } if (!(clone_flags & CLONE_STOPPED)) wake_up_new_task(p, clone_flags); else p->state = TASK_STOPPED; if (unlikely (trace)) { current->ptrace_message = pid; ptrace_notify ((trace << 8) | SIGTRAP); } if (clone_flags & CLONE_VFORK) { wait_for_completion(&vfork); if (unlikely (current->ptrace & PT_TRACE_VFORK_DONE)) ptrace_notify ((PTRACE_EVENT_VFORK_DONE << 8) | SIGTRAP); } } else { free_pidmap(pid); pid = PTR_ERR(p); } return pid; }
/* * This creates a new process as a copy of the old one, * but does not actually start it yet. * * It copies the registers, and all the appropriate * parts of the process environment (as per the clone * flags). The actual kick-off is left to the caller. */ struct task_struct *copy_process(unsigned long clone_flags, unsigned long stack_start, struct pt_regs *regs, unsigned long stack_size, int __user *parent_tidptr, int __user *child_tidptr) { int retval; struct task_struct *p = NULL; if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS)) return ERR_PTR(-EINVAL); /* * Thread groups must share signals as well, and detached threads * can only be started up within the thread group. */ if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND)) return ERR_PTR(-EINVAL); /* * Shared signal handlers imply shared VM. By way of the above, * thread groups also imply shared VM. Blocking this case allows * for various simplifications in other code. */ if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM)) return ERR_PTR(-EINVAL); retval = security_task_create(clone_flags); if (retval) goto fork_out; retval = -ENOMEM; p = dup_task_struct(current); if (!p) goto fork_out; retval = -EAGAIN; if (atomic_read(&p->user->processes) >= p->rlim[RLIMIT_NPROC].rlim_cur) { if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) && p->user != &root_user) goto bad_fork_free; } atomic_inc(&p->user->__count); atomic_inc(&p->user->processes); get_group_info(p->group_info); /* * If multiple threads are within copy_process(), then this check * triggers too late. This doesn't hurt, the check is only there * to stop root fork bombs. */ if (nr_threads >= max_threads) goto bad_fork_cleanup_count; if (!try_module_get(p->thread_info->exec_domain->module)) goto bad_fork_cleanup_count; if (p->binfmt && !try_module_get(p->binfmt->module)) goto bad_fork_cleanup_put_domain; p->did_exec = 0; copy_flags(clone_flags, p); if (clone_flags & CLONE_IDLETASK) p->pid = 0; else { p->pid = alloc_pidmap(); if (p->pid == -1) goto bad_fork_cleanup; } retval = -EFAULT; if (clone_flags & CLONE_PARENT_SETTID) if (put_user(p->pid, parent_tidptr)) goto bad_fork_cleanup; p->proc_dentry = NULL; INIT_LIST_HEAD(&p->children); INIT_LIST_HEAD(&p->sibling); init_waitqueue_head(&p->wait_chldexit); p->vfork_done = NULL; spin_lock_init(&p->alloc_lock); spin_lock_init(&p->proc_lock); clear_tsk_thread_flag(p, TIF_SIGPENDING); init_sigpending(&p->pending); p->it_real_value = p->it_virt_value = p->it_prof_value = 0; p->it_real_incr = p->it_virt_incr = p->it_prof_incr = 0; init_timer(&p->real_timer); p->real_timer.data = (unsigned long) p; p->utime = p->stime = 0; p->cutime = p->cstime = 0; p->lock_depth = -1; /* -1 = no lock */ p->start_time = get_jiffies_64(); p->security = NULL; p->io_context = NULL; p->audit_context = NULL; #ifdef CONFIG_NUMA p->mempolicy = mpol_copy(p->mempolicy); if (IS_ERR(p->mempolicy)) { retval = PTR_ERR(p->mempolicy); p->mempolicy = NULL; goto bad_fork_cleanup; } #endif retval = -ENOMEM; if ((retval = security_task_alloc(p))) goto bad_fork_cleanup_policy; if ((retval = audit_alloc(p))) goto bad_fork_cleanup_security; /* copy all the process information */ if ((retval = copy_semundo(clone_flags, p))) goto bad_fork_cleanup_audit; if ((retval = copy_files(clone_flags, p))) goto bad_fork_cleanup_semundo; if ((retval = copy_fs(clone_flags, p))) goto bad_fork_cleanup_files; if ((retval = copy_sighand(clone_flags, p))) goto bad_fork_cleanup_fs; if ((retval = copy_signal(clone_flags, p))) goto bad_fork_cleanup_sighand; if ((retval = copy_mm(clone_flags, p))) goto bad_fork_cleanup_signal; if ((retval = copy_namespace(clone_flags, p))) goto bad_fork_cleanup_mm; retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs); if (retval) goto bad_fork_cleanup_namespace; p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; /* * Clear TID on mm_release()? */ p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL; /* * Syscall tracing should be turned off in the child regardless * of CLONE_PTRACE. */ clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE); /* Our parent execution domain becomes current domain These must match for thread signalling to apply */ p->parent_exec_id = p->self_exec_id; /* ok, now we should be set up.. */ p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL); p->pdeath_signal = 0; /* Perform scheduler related setup */ sched_fork(p); /* * Ok, make it visible to the rest of the system. * We dont wake it up yet. */ p->tgid = p->pid; p->group_leader = p; INIT_LIST_HEAD(&p->ptrace_children); INIT_LIST_HEAD(&p->ptrace_list); /* Need tasklist lock for parent etc handling! */ write_lock_irq(&tasklist_lock); /* * Check for pending SIGKILL! The new thread should not be allowed * to slip out of an OOM kill. (or normal SIGKILL.) */ if (sigismember(¤t->pending.signal, SIGKILL)) { write_unlock_irq(&tasklist_lock); retval = -EINTR; goto bad_fork_cleanup_namespace; } /* CLONE_PARENT re-uses the old parent */ if (clone_flags & CLONE_PARENT) p->real_parent = current->real_parent; else p->real_parent = current; p->parent = p->real_parent; if (clone_flags & CLONE_THREAD) { spin_lock(¤t->sighand->siglock); /* * Important: if an exit-all has been started then * do not create this new thread - the whole thread * group is supposed to exit anyway. */ if (current->signal->group_exit) { spin_unlock(¤t->sighand->siglock); write_unlock_irq(&tasklist_lock); retval = -EAGAIN; goto bad_fork_cleanup_namespace; } p->tgid = current->tgid; p->group_leader = current->group_leader; if (current->signal->group_stop_count > 0) { /* * There is an all-stop in progress for the group. * We ourselves will stop as soon as we check signals. * Make the new thread part of that group stop too. */ current->signal->group_stop_count++; set_tsk_thread_flag(p, TIF_SIGPENDING); } spin_unlock(¤t->sighand->siglock); } SET_LINKS(p); if (p->ptrace & PT_PTRACED) __ptrace_link(p, current->parent); attach_pid(p, PIDTYPE_PID, p->pid); if (thread_group_leader(p)) { attach_pid(p, PIDTYPE_TGID, p->tgid); attach_pid(p, PIDTYPE_PGID, process_group(p)); attach_pid(p, PIDTYPE_SID, p->signal->session); if (p->pid) __get_cpu_var(process_counts)++; } else link_pid(p, p->pids + PIDTYPE_TGID, &p->group_leader->pids[PIDTYPE_TGID].pid); nr_threads++; write_unlock_irq(&tasklist_lock); retval = 0; fork_out: if (retval) return ERR_PTR(retval); return p; bad_fork_cleanup_namespace: exit_namespace(p); bad_fork_cleanup_mm: exit_mm(p); if (p->active_mm) mmdrop(p->active_mm); bad_fork_cleanup_signal: exit_signal(p); bad_fork_cleanup_sighand: exit_sighand(p); bad_fork_cleanup_fs: exit_fs(p); /* blocking */ bad_fork_cleanup_files: exit_files(p); /* blocking */ bad_fork_cleanup_semundo: exit_sem(p); bad_fork_cleanup_audit: audit_free(p); bad_fork_cleanup_security: security_task_free(p); bad_fork_cleanup_policy: #ifdef CONFIG_NUMA mpol_free(p->mempolicy); #endif bad_fork_cleanup: if (p->pid > 0) free_pidmap(p->pid); if (p->binfmt) module_put(p->binfmt->module); bad_fork_cleanup_put_domain: module_put(p->thread_info->exec_domain->module); bad_fork_cleanup_count: put_group_info(p->group_info); atomic_dec(&p->user->processes); free_uid(p->user); bad_fork_free: free_task(p); goto fork_out; }
/* The arrival of a new process might change a pid mapping on this * node. Update any other processes which might be using that mapping * for something */ static void masq_update_mappings(struct bproc_masq_master_t *m, struct task_struct *proc) { struct list_head *l; struct task_struct *task; int /*id,*/ masq_id, real_id; /* Step 1: Our real/masq pid pair defines one masq->real mapping. * Any other processes in the system that have our masq pid in * their pgrp or session id might have to be updated. */ /* TGID: Since we're not allowing thread groups to span multiple * nodes, we should never end up with a situation where they're * out of sync. (with multithreaded migration this might be * different some day.) */ masq_id = proc->bproc.pid; real_id = proc->pid; for (l = m->proc_list.next; l != &m->proc_list; l = l->next) { task = list_entry(l, struct task_struct, bproc.list); /* tgid shouldn't be necessary since we're not going to allow * thread groups to span nodes. */ if (task->signal->bproc.pgrp == masq_id && task->signal->pgrp != real_id) { detach_pid(task, PIDTYPE_PGID); task->signal->pgrp = real_id; attach_pid(task, PIDTYPE_PGID, real_id); } if (task->signal->bproc.session == masq_id && task->signal->session != real_id) { detach_pid(task, PIDTYPE_SID); task->signal->session = real_id; attach_pid(task, PIDTYPE_SID, real_id); } } #if 0 /* Step 2: Our pgrp and session ID might exist in the system on * some other process. If so, make sure that ours is uptodate as * well. */ if (proc->signal->bproc.pgrp != proc->bproc.pid) { id = masq_find_id_mapping(m, proc->signal->bproc.pgrp, proc); if (!id) id = alloc_pidmap(); if (!id) printk(KERN_EMERG "bproc: failed to allocate pid.\n"); if (proc->signal->pgrp != id) { detach_pid(proc, PIDTYPE_PGID); proc->signal->pgrp = id; attach_pid(proc, PIDTYPE_PGID, id); } } if (proc->signal->bproc.session != proc->bproc.pid) { id = masq_find_id_mapping(m, proc->signal->bproc.session, proc); if (!id) id = alloc_pidmap(); if (!id) printk(KERN_EMERG "bproc: failed to allocate pid.\n"); if (proc->signal->session != id) { detach_pid(proc, PIDTYPE_SID); proc->signal->session = id; attach_pid(proc, PIDTYPE_SID, id); } } #endif }
/* * This creates a new process as a copy of the old one, * but does not actually start it yet. * * It copies the registers, and all the appropriate * parts of the process environment (as per the clone * flags). The actual kick-off is left to the caller. */ struct task_struct *copy_process(unsigned long clone_flags, unsigned long stack_start, struct pt_regs *regs, unsigned long stack_size, int *parent_tidptr, int *child_tidptr) { int retval; struct task_struct *p = NULL; if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS)) return ERR_PTR(-EINVAL); /* * Thread groups must share signals as well, and detached threads * can only be started up within the thread group. */ if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND)) return ERR_PTR(-EINVAL); if ((clone_flags & CLONE_DETACHED) && !(clone_flags & CLONE_THREAD)) return ERR_PTR(-EINVAL); if (!(clone_flags & CLONE_DETACHED) && (clone_flags & CLONE_THREAD)) return ERR_PTR(-EINVAL); retval = -ENOMEM; p = dup_task_struct(current); if (!p) goto fork_out; p->tux_info = NULL; retval = -EAGAIN; /* * Increment user->__count before the rlimit test so that it would * be correct if we take the bad_fork_free failure path. */ atomic_inc(&p->user->__count); if (atomic_read(&p->user->processes) >= p->rlim[RLIMIT_NPROC].rlim_cur) { if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE)) goto bad_fork_free; } atomic_inc(&p->user->processes); /* * Counter increases are protected by * the kernel lock so nr_threads can't * increase under us (but it may decrease). */ if (nr_threads >= max_threads) goto bad_fork_cleanup_count; get_exec_domain(p->exec_domain); if (p->binfmt && p->binfmt->module) __MOD_INC_USE_COUNT(p->binfmt->module); p->did_exec = 0; p->swappable = 0; p->state = TASK_UNINTERRUPTIBLE; copy_flags(clone_flags, p); if (clone_flags & CLONE_IDLETASK) p->pid = 0; else { p->pid = alloc_pidmap(); if (p->pid == -1) goto bad_fork_cleanup; } retval = -EFAULT; if (clone_flags & CLONE_PARENT_SETTID) if (put_user(p->pid, parent_tidptr)) goto bad_fork_cleanup; INIT_LIST_HEAD(&p->run_list); INIT_LIST_HEAD(&p->children); INIT_LIST_HEAD(&p->sibling); init_waitqueue_head(&p->wait_chldexit); p->vfork_done = NULL; spin_lock_init(&p->alloc_lock); spin_lock_init(&p->switch_lock); p->sigpending = 0; init_sigpending(&p->pending); p->it_real_value = p->it_virt_value = p->it_prof_value = 0; p->it_real_incr = p->it_virt_incr = p->it_prof_incr = 0; init_timer(&p->real_timer); p->real_timer.data = (unsigned long) p; p->leader = 0; /* session leadership doesn't inherit */ p->tty_old_pgrp = 0; memset(&p->utime, 0, sizeof(p->utime)); memset(&p->stime, 0, sizeof(p->stime)); memset(&p->cutime, 0, sizeof(p->cutime)); memset(&p->cstime, 0, sizeof(p->cstime)); memset(&p->group_utime, 0, sizeof(p->group_utime)); memset(&p->group_stime, 0, sizeof(p->group_stime)); memset(&p->group_cutime, 0, sizeof(p->group_cutime)); memset(&p->group_cstime, 0, sizeof(p->group_cstime)); #ifdef CONFIG_SMP memset(&p->per_cpu_utime, 0, sizeof(p->per_cpu_utime)); memset(&p->per_cpu_stime, 0, sizeof(p->per_cpu_stime)); #endif memset(&p->timing_state, 0, sizeof(p->timing_state)); p->timing_state.type = PROCESS_TIMING_USER; p->last_sigxcpu = 0; p->array = NULL; p->lock_depth = -1; /* -1 = no lock */ p->start_time = jiffies; retval = -ENOMEM; /* copy all the process information */ if (copy_files(clone_flags, p)) goto bad_fork_cleanup; if (copy_fs(clone_flags, p)) goto bad_fork_cleanup_files; if (copy_sighand(clone_flags, p)) goto bad_fork_cleanup_fs; if (copy_signal(clone_flags, p)) goto bad_fork_cleanup_sighand; if (copy_mm(clone_flags, p)) goto bad_fork_cleanup_signal; if (copy_namespace(clone_flags, p)) goto bad_fork_cleanup_mm; retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs); if (retval) goto bad_fork_cleanup_namespace; p->semundo = NULL; p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; /* * Clear TID on mm_release()? */ p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL; /* Our parent execution domain becomes current domain These must match for thread signalling to apply */ p->parent_exec_id = p->self_exec_id; /* ok, now we should be set up.. */ p->swappable = 1; if (clone_flags & CLONE_DETACHED) p->exit_signal = -1; else p->exit_signal = clone_flags & CSIGNAL; p->pdeath_signal = 0; /* * Share the timeslice between parent and child, thus the * total amount of pending timeslices in the system doesnt change, * resulting in more scheduling fairness. */ local_irq_disable(); p->time_slice = (current->time_slice + 1) >> 1; p->first_time_slice = 1; /* * The remainder of the first timeslice might be recovered by * the parent if the child exits early enough. */ current->time_slice >>= 1; p->last_run = jiffies; if (!current->time_slice) { /* * This case is rare, it happens when the parent has only * a single jiffy left from its timeslice. Taking the * runqueue lock is not a problem. */ current->time_slice = 1; scheduler_tick(0 /* don't update the time stats */); } local_irq_enable(); if ((int)current->time_slice <= 0) BUG(); if ((int)p->time_slice <= 0) BUG(); /* * Ok, add it to the run-queues and make it * visible to the rest of the system. * * Let it rip! */ p->tgid = p->pid; p->group_leader = p; INIT_LIST_HEAD(&p->ptrace_children); INIT_LIST_HEAD(&p->ptrace_list); /* Need tasklist lock for parent etc handling! */ write_lock_irq(&tasklist_lock); /* * Check for pending SIGKILL! The new thread should not be allowed * to slip out of an OOM kill. (or normal SIGKILL.) */ if (sigismember(¤t->pending.signal, SIGKILL)) { write_unlock_irq(&tasklist_lock); retval = -EINTR; goto bad_fork_cleanup_namespace; } /* CLONE_PARENT re-uses the old parent */ if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) p->real_parent = current->real_parent; else p->real_parent = current; p->parent = p->real_parent; if (clone_flags & CLONE_THREAD) { spin_lock(¤t->sighand->siglock); /* * Important: if an exit-all has been started then * do not create this new thread - the whole thread * group is supposed to exit anyway. */ if (current->signal->group_exit) { spin_unlock(¤t->sighand->siglock); write_unlock_irq(&tasklist_lock); retval = -EINTR; goto bad_fork_cleanup_namespace; } p->tgid = current->tgid; p->group_leader = current->group_leader; if (current->signal->group_stop_count > 0) { /* * There is an all-stop in progress for the group. * We ourselves will stop as soon as we check signals. * Make the new thread part of that group stop too. */ current->signal->group_stop_count++; p->sigpending = 1; } spin_unlock(¤t->sighand->siglock); } SET_LINKS(p); if (p->ptrace & PT_PTRACED) __ptrace_link(p, current->parent); attach_pid(p, PIDTYPE_PID, p->pid); if (thread_group_leader(p)) { attach_pid(p, PIDTYPE_TGID, p->tgid); attach_pid(p, PIDTYPE_PGID, p->pgrp); attach_pid(p, PIDTYPE_SID, p->session); } else { link_pid(p, p->pids + PIDTYPE_TGID, &p->group_leader->pids[PIDTYPE_TGID].pid); } /* clear controlling tty of new task if parent's was just cleared */ if (!current->tty && p->tty) p->tty = NULL; nr_threads++; write_unlock_irq(&tasklist_lock); retval = 0; fork_out: if (retval) return ERR_PTR(retval); return p; bad_fork_cleanup_namespace: exit_namespace(p); bad_fork_cleanup_mm: exit_mm(p); if (p->active_mm) mmdrop(p->active_mm); bad_fork_cleanup_signal: exit_signal(p); bad_fork_cleanup_sighand: exit_sighand(p); bad_fork_cleanup_fs: exit_fs(p); /* blocking */ bad_fork_cleanup_files: exit_files(p); /* blocking */ bad_fork_cleanup: if (p->pid > 0) free_pidmap(p->pid); put_exec_domain(p->exec_domain); if (p->binfmt && p->binfmt->module) __MOD_DEC_USE_COUNT(p->binfmt->module); bad_fork_cleanup_count: atomic_dec(&p->user->processes); bad_fork_free: p->state = TASK_ZOMBIE; /* debug */ atomic_dec(&p->usage); put_task_struct(p); goto fork_out; }