示例#1
0
文件: fork.c 项目: Zhang626/miniOS
int do_fork(int clone_flags,
            struct frame *frame,
            struct task_struct **store)
{
    int retval;

    struct task_struct *task;

    retval = -ENOMEM;

    task = alloc_task();

    if (!task) {
        goto out_fork;
    }

    INIT_LIST_HEAD(&task->list);
    INIT_LIST_HEAD(&task->wait_list);
    RBTREE_NODE_INIT(&task->sched_entry);


    retval = do_copy_mm(clone_flags,task);

    if(retval < 0) {
        goto bad_fork_cleanup_mm;
    }


    retval = do_copy_kern_stack(frame,task);

    if(retval < 0) {
        goto bad_fork_cleanup_mm;
    }

    task->trapframe = frame;

    task->ppid = curtask->pid;

    task->task_status = TASK_RUNNING;

    task->task_type = TASK_TYPE_USER;

    task->timeslice = 10;

    task->prio = 1;

    *store = task;

    task_pidmap[task->pid] = task;

    return 0;


bad_fork_cleanup_mm:
    exit_mm(task);
bad_fork_free:
    free_task(task);
out_fork:
    return retval;
}
示例#2
0
void daemonize(void)
{
	struct fs_struct *fs;


	/*
	 * If we were started as result of loading a module, close all of the
	 * user space pages.  We don't need them, and if we didn't close them
	 * they would be locked into memory.
	 */
	exit_mm(current);

	current->session = 1;
	current->pgrp = 1;
	current->tty = NULL;

	/* Become as one with the init task */

	exit_fs(current);	/* current->fs->count--; */
	fs = init_task.fs;
	current->fs = fs;
	atomic_inc(&fs->count);
 	exit_files(current);
	current->files = init_task.files;
	atomic_inc(&current->files->count);
}
示例#3
0
/* main function of the polling thread
 */
static int lirc_thread(void *irctl)
{
	struct irctl *ir = irctl;
	
	lock_kernel();
	
	/* This thread doesn't need any user-level access,
	 * so get rid of all our resources
	 */
	exit_mm(current);
	exit_files(current);
	exit_fs(current);
	current->session = 1;
	current->pgrp = 1;
	current->euid = 0;
	current->tty = NULL;
	sigfillset(&current->blocked);
	
	strcpy(current->comm, "lirc_dev");
	
	unlock_kernel();
	
	if (ir->t_notify != NULL) {
		up(ir->t_notify);
	}
	
	dprintk(LOGHEAD "poll thread started\n", ir->p.name, ir->p.minor);
	
	do {
		if (ir->open) {
			if (ir->jiffies_to_wait) {
				current->state = TASK_INTERRUPTIBLE;
				schedule_timeout(ir->jiffies_to_wait);
			} else {
				interruptible_sleep_on(ir->p.get_queue(ir->p.data));
			}
			if (signal_pending(current)) {
				break;
			}
			if (!add_to_buf(ir)) {
				wake_up_interruptible(&ir->wait_poll);
			}
		} else {
			/* if device not opened so we can sleep half a second */
			current->state = TASK_INTERRUPTIBLE;
			schedule_timeout(HZ/2);
		}
	} while (!ir->shutdown && !signal_pending(current));
	
	ir->tpid = -1;
	if (ir->t_notify != NULL) {
		up(ir->t_notify);
	}
	
	dprintk(LOGHEAD "poll thread ended\n", ir->p.name, ir->p.minor);
	
	return 0;
}
示例#4
0
static void daemonize_keep_fs(void)
{
	int fd;
	
	exit_mm(current);
	set_special_pids(1, 1);
	current->tty = NULL;
	for (fd = 0; fd < 255; fd++)
		close(fd);
}
示例#5
0
文件: exit.c 项目: bingone/fuckOS
int exit(struct task_struct *task)
{
	assert(task);

	if (task == curtask)
		lcr3(pgd2p(kpgd));
	
	schedule_delete_task(task);
	exit_notify(task);
	exit_mm(task->mm);
	exit_files(task);
	exit_task(task);

	if (task == curtask) {
		curtask = NULL;
		schedule();
	}
	return 0;
}
示例#6
0
/*
 * Start up an nfsiod process. This is an awful hack, because when running
 * as a module, we will keep insmod's memory. Besides, the current->comm
 * hack won't work in this case
 * The best would be to have a syscall for nfs client control that (among
 * other things) forks biod's.
 * Alternatively, we might want to have the idle task spawn biod's on demand.
 */
static int run_nfsiod(void *dummy)
{
	int	ret;

#ifdef __SMP__
	lock_kernel();
	syscall_count++;
#endif

	MOD_INC_USE_COUNT;
	exit_mm(current);
	current->session = 1;
	current->pgrp = 1;
	sprintf(current->comm, "nfsiod");
#ifndef MODULE
	current->blocked = ~0UL;
#endif
	ret = nfsiod();
	MOD_DEC_USE_COUNT;
	return ret;
}
示例#7
0
/*
 * This creates a new process as a copy of the old one,
 * but does not actually start it yet.
 *
 * It copies the registers, and all the appropriate
 * parts of the process environment (as per the clone
 * flags). The actual kick-off is left to the caller.
 */
struct task_struct *copy_process(unsigned long clone_flags,
				 unsigned long stack_start,
				 struct pt_regs *regs,
				 unsigned long stack_size,
				 int __user *parent_tidptr,
				 int __user *child_tidptr)
{
	int retval;
	struct task_struct *p = NULL;

	if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
		return ERR_PTR(-EINVAL);

	/*
	 * Thread groups must share signals as well, and detached threads
	 * can only be started up within the thread group.
	 */
	if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
		return ERR_PTR(-EINVAL);

	/*
	 * Shared signal handlers imply shared VM. By way of the above,
	 * thread groups also imply shared VM. Blocking this case allows
	 * for various simplifications in other code.
	 */
	if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
		return ERR_PTR(-EINVAL);

	retval = security_task_create(clone_flags);
	if (retval)
		goto fork_out;

	retval = -ENOMEM;
	p = dup_task_struct(current);
	if (!p)
		goto fork_out;

	retval = -EAGAIN;
	if (atomic_read(&p->user->processes) >=
			p->rlim[RLIMIT_NPROC].rlim_cur) {
		if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
				p->user != &root_user)
			goto bad_fork_free;
	}

	atomic_inc(&p->user->__count);
	atomic_inc(&p->user->processes);
	get_group_info(p->group_info);

	/*
	 * If multiple threads are within copy_process(), then this check
	 * triggers too late. This doesn't hurt, the check is only there
	 * to stop root fork bombs.
	 */
	if (nr_threads >= max_threads)
		goto bad_fork_cleanup_count;

	if (!try_module_get(p->thread_info->exec_domain->module))
		goto bad_fork_cleanup_count;

	if (p->binfmt && !try_module_get(p->binfmt->module))
		goto bad_fork_cleanup_put_domain;

	p->did_exec = 0;
	copy_flags(clone_flags, p);
	if (clone_flags & CLONE_IDLETASK)
		p->pid = 0;
	else {
		p->pid = alloc_pidmap();
		if (p->pid == -1)
			goto bad_fork_cleanup;
	}
	retval = -EFAULT;
	if (clone_flags & CLONE_PARENT_SETTID)
		if (put_user(p->pid, parent_tidptr))
			goto bad_fork_cleanup;

	p->proc_dentry = NULL;

	INIT_LIST_HEAD(&p->children);
	INIT_LIST_HEAD(&p->sibling);
	init_waitqueue_head(&p->wait_chldexit);
	p->vfork_done = NULL;
	spin_lock_init(&p->alloc_lock);
	spin_lock_init(&p->proc_lock);

	clear_tsk_thread_flag(p, TIF_SIGPENDING);
	init_sigpending(&p->pending);

	p->it_real_value = p->it_virt_value = p->it_prof_value = 0;
	p->it_real_incr = p->it_virt_incr = p->it_prof_incr = 0;
	init_timer(&p->real_timer);
	p->real_timer.data = (unsigned long) p;

	p->utime = p->stime = 0;
	p->cutime = p->cstime = 0;
	p->lock_depth = -1;		/* -1 = no lock */
	p->start_time = get_jiffies_64();
	p->security = NULL;
	p->io_context = NULL;
	p->audit_context = NULL;
#ifdef CONFIG_NUMA
 	p->mempolicy = mpol_copy(p->mempolicy);
 	if (IS_ERR(p->mempolicy)) {
 		retval = PTR_ERR(p->mempolicy);
 		p->mempolicy = NULL;
 		goto bad_fork_cleanup;
 	}
#endif

	retval = -ENOMEM;
	if ((retval = security_task_alloc(p)))
		goto bad_fork_cleanup_policy;
	if ((retval = audit_alloc(p)))
		goto bad_fork_cleanup_security;
	/* copy all the process information */
	if ((retval = copy_semundo(clone_flags, p)))
		goto bad_fork_cleanup_audit;
	if ((retval = copy_files(clone_flags, p)))
		goto bad_fork_cleanup_semundo;
	if ((retval = copy_fs(clone_flags, p)))
		goto bad_fork_cleanup_files;
	if ((retval = copy_sighand(clone_flags, p)))
		goto bad_fork_cleanup_fs;
	if ((retval = copy_signal(clone_flags, p)))
		goto bad_fork_cleanup_sighand;
	if ((retval = copy_mm(clone_flags, p)))
		goto bad_fork_cleanup_signal;
	if ((retval = copy_namespace(clone_flags, p)))
		goto bad_fork_cleanup_mm;
	retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs);
	if (retval)
		goto bad_fork_cleanup_namespace;

	p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
	/*
	 * Clear TID on mm_release()?
	 */
	p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL;

	/*
	 * Syscall tracing should be turned off in the child regardless
	 * of CLONE_PTRACE.
	 */
	clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);

	/* Our parent execution domain becomes current domain
	   These must match for thread signalling to apply */
	   
	p->parent_exec_id = p->self_exec_id;

	/* ok, now we should be set up.. */
	p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL);
	p->pdeath_signal = 0;

	/* Perform scheduler related setup */
	sched_fork(p);

	/*
	 * Ok, make it visible to the rest of the system.
	 * We dont wake it up yet.
	 */
	p->tgid = p->pid;
	p->group_leader = p;
	INIT_LIST_HEAD(&p->ptrace_children);
	INIT_LIST_HEAD(&p->ptrace_list);

	/* Need tasklist lock for parent etc handling! */
	write_lock_irq(&tasklist_lock);
	/*
	 * Check for pending SIGKILL! The new thread should not be allowed
	 * to slip out of an OOM kill. (or normal SIGKILL.)
	 */
	if (sigismember(&current->pending.signal, SIGKILL)) {
		write_unlock_irq(&tasklist_lock);
		retval = -EINTR;
		goto bad_fork_cleanup_namespace;
	}

	/* CLONE_PARENT re-uses the old parent */
	if (clone_flags & CLONE_PARENT)
		p->real_parent = current->real_parent;
	else
		p->real_parent = current;
	p->parent = p->real_parent;

	if (clone_flags & CLONE_THREAD) {
		spin_lock(&current->sighand->siglock);
		/*
		 * Important: if an exit-all has been started then
		 * do not create this new thread - the whole thread
		 * group is supposed to exit anyway.
		 */
		if (current->signal->group_exit) {
			spin_unlock(&current->sighand->siglock);
			write_unlock_irq(&tasklist_lock);
			retval = -EAGAIN;
			goto bad_fork_cleanup_namespace;
		}
		p->tgid = current->tgid;
		p->group_leader = current->group_leader;

		if (current->signal->group_stop_count > 0) {
			/*
			 * There is an all-stop in progress for the group.
			 * We ourselves will stop as soon as we check signals.
			 * Make the new thread part of that group stop too.
			 */
			current->signal->group_stop_count++;
			set_tsk_thread_flag(p, TIF_SIGPENDING);
		}

		spin_unlock(&current->sighand->siglock);
	}

	SET_LINKS(p);
	if (p->ptrace & PT_PTRACED)
		__ptrace_link(p, current->parent);

	attach_pid(p, PIDTYPE_PID, p->pid);
	if (thread_group_leader(p)) {
		attach_pid(p, PIDTYPE_TGID, p->tgid);
		attach_pid(p, PIDTYPE_PGID, process_group(p));
		attach_pid(p, PIDTYPE_SID, p->signal->session);
		if (p->pid)
			__get_cpu_var(process_counts)++;
	} else
		link_pid(p, p->pids + PIDTYPE_TGID, &p->group_leader->pids[PIDTYPE_TGID].pid);

	nr_threads++;
	write_unlock_irq(&tasklist_lock);
	retval = 0;

fork_out:
	if (retval)
		return ERR_PTR(retval);
	return p;

bad_fork_cleanup_namespace:
	exit_namespace(p);
bad_fork_cleanup_mm:
	exit_mm(p);
	if (p->active_mm)
		mmdrop(p->active_mm);
bad_fork_cleanup_signal:
	exit_signal(p);
bad_fork_cleanup_sighand:
	exit_sighand(p);
bad_fork_cleanup_fs:
	exit_fs(p); /* blocking */
bad_fork_cleanup_files:
	exit_files(p); /* blocking */
bad_fork_cleanup_semundo:
	exit_sem(p);
bad_fork_cleanup_audit:
	audit_free(p);
bad_fork_cleanup_security:
	security_task_free(p);
bad_fork_cleanup_policy:
#ifdef CONFIG_NUMA
	mpol_free(p->mempolicy);
#endif
bad_fork_cleanup:
	if (p->pid > 0)
		free_pidmap(p->pid);
	if (p->binfmt)
		module_put(p->binfmt->module);
bad_fork_cleanup_put_domain:
	module_put(p->thread_info->exec_domain->module);
bad_fork_cleanup_count:
	put_group_info(p->group_info);
	atomic_dec(&p->user->processes);
	free_uid(p->user);
bad_fork_free:
	free_task(p);
	goto fork_out;
}
示例#8
0
int ticker_thread(void *data)
{
    struct timer_list ticker;

    // detach

    lock_kernel();
    exit_mm(current);
    exit_files(current);
    exit_fs(current);

    // setsid equivalent, used at start of kernel thread, no error checks needed, or at least none made :).
    current->leader = 1;
    current->session = current->pgrp = current->pid;
    current->tty = NULL;
    current->tty_old_pgrp = 0;

    // Name this thread
    sprintf(current->comm, "usbd-bi");

    // setup signal handler
    current->exit_signal = SIGCHLD;
    spin_lock(&current->sigmask_lock);
    flush_signals(current);
    spin_unlock(&current->sigmask_lock);

    // XXX Run at a high priority, ahead of sync and friends
//    current->nice = -20;
    current->policy = SCHED_OTHER;

    unlock_kernel();

    // setup timer
    init_timer(&ticker);
    ticker.data = 0;
    ticker.function = ticker_tick;

    // let startup continue
    up(&ticker_sem_start);

    // process loop
    for (ticker_timer_set = ticker_terminating = 0; !ticker_terminating;) {

        char buf[100];
        char *cp;

        if (!ticker_timer_set) {
            mod_timer(&ticker, jiffies + HZ*RETRYTIME);
        }

        // wait for someone to tell us to do something
        down(&ticker_sem_work);

        if (udc_interrupts != udc_interrupts_last) {
            dbg_tick(1,"--------------");
        }

        // do some work
        memset(buf, 0, sizeof(buf));
        cp = buf;

        if (dbgflg_usbdbi_tick) {
            unsigned long flags;
            local_irq_save(flags);
            dbg_tick(1,"[%d]", udc_interrupts);
            udc_regs();
            local_irq_restore(flags);
        }
    }

    // remove timer
    del_timer(&ticker);

    // let the process stopping us know we are done and return
    up(&ticker_sem_start);
    return 0;
}
示例#9
0
int ticker_thread (void *data)
{
	struct timer_list ticker;

	// detach

	lock_kernel ();
	exit_mm (current);
	exit_files (current);
	exit_fs (current);

	// setsid equivalent, used at start of kernel thread, no error checks needed, or at least none made :). 
	current->leader = 1;
	current->session = current->pgrp = current->pid;
	current->tty = NULL;
	current->tty_old_pgrp = 0;

	// Name this thread 
	sprintf (current->comm, "usbd-bi");

	// setup signal handler
	current->exit_signal = SIGCHLD;
	spin_lock (&current->sigmask_lock);
	flush_signals (current);
	spin_unlock (&current->sigmask_lock);

	// XXX Run at a high priority, ahead of sync and friends
	// current->nice = -20;
	current->policy = SCHED_OTHER;

	unlock_kernel ();

	// setup timer
	init_timer (&ticker);
	ticker.data = 0;
	ticker.function = ticker_tick;

	// let startup continue
	up (&ticker_sem_start);

	// process loop
	for (ticker_timer_set = ticker_terminating = 0; !ticker_terminating;) {

		char buf[100];
		char *cp;

		if (!ticker_timer_set) {
			mod_timer (&ticker, jiffies + HZ * RETRYTIME);
		}
		// wait for someone to tell us to do something
		down (&ticker_sem_work);

		if (udc_interrupts != udc_interrupts_last) {
			dbg_tick (3, "--------------");
		}
		// do some work
		memset (buf, 0, sizeof (buf));
		cp = buf;

		if (dbgflg_usbdbi_tick) {
			unsigned long flags;
			local_irq_save (flags);
			dbg_tick (2, "[%d]", udc_interrupts);
			udc_regs ();
			local_irq_restore (flags);
		}

#if 0
		// XXX
#if defined(CONFIG_SA1110_CALYPSO) && defined(CONFIG_PM) && defined(CONFIG_USBD_TRAFFIC_KEEPAWAKE)
		/* Check for rx/tx activity, and reset the sleep timer if present */
		if (device->usbd_rxtx_timestamp != device->usbd_last_rxtx_timestamp) {
			extern void resetSleepTimer (void);
			dbg_tick (7, "resetting sleep timer");
			resetSleepTimer ();
			device->usbd_last_rxtx_timestamp = device->usbd_rxtx_timestamp;
		}
#endif
#endif
#if 0
		/* Check for TX endpoint stall.  If the endpoint has
		   stalled, we have probably run into the DMA/TCP window
		   problem, and the only thing we can do is disconnect
		   from the bus, then reconnect (and re-enumerate...). */
		if (reconnect > 0 && 0 >= --reconnect) {
			dbg_init (0, "TX stall disconnect finished");
			udc_connect ();
		} else if (0 != USBD_STALL_TIMEOUT_SECONDS) {
			// Stall watchdog unleashed
			unsigned long now, tx_waiting;
			now = jiffies;
			tx_waiting = (now - tx_queue_head_timestamp (now)) / HZ;
			if (tx_waiting > USBD_STALL_TIMEOUT_SECONDS) {
				/* The URB at the head of the queue has waited too long */
				reconnect = USBD_STALL_DISCONNECT_DURATION;
				dbg_init (0, "TX stalled, disconnecting for %d seconds", reconnect);
				udc_disconnect ();
			}
		}
#endif
	}

	// remove timer
	del_timer (&ticker);

	// let the process stopping us know we are done and return
	up (&ticker_sem_start);
	return 0;
}
示例#10
0
/*
 * This is the rpciod kernel thread
 */
static int
rpciod(void *ptr)
{
	struct wait_queue **assassin = (struct wait_queue **) ptr;
	unsigned long	oldflags;
	int		rounds = 0;

	MOD_INC_USE_COUNT;
	lock_kernel();
	/*
	 * Let our maker know we're running ...
	 */
	rpciod_pid = current->pid;
	up(&rpciod_running);

	exit_files(current);
	exit_mm(current);

	spin_lock_irq(&current->sigmask_lock);
	siginitsetinv(&current->blocked, sigmask(SIGKILL));
	recalc_sigpending(current);
	spin_unlock_irq(&current->sigmask_lock);

	current->session = 1;
	current->pgrp = 1;
	sprintf(current->comm, "rpciod");

	dprintk("RPC: rpciod starting (pid %d)\n", rpciod_pid);
	while (rpciod_users) {
		if (signalled()) {
			rpciod_killall();
			flush_signals(current);
		}
		__rpc_schedule();

		if (++rounds >= 64) {	/* safeguard */
			schedule();
			rounds = 0;
		}
		save_flags(oldflags); cli();
		dprintk("RPC: rpciod running checking dispatch\n");
		rpciod_tcp_dispatcher();

		if (!schedq.task) {
			dprintk("RPC: rpciod back to sleep\n");
			interruptible_sleep_on(&rpciod_idle);
			dprintk("RPC: switch to rpciod\n");
			rpciod_tcp_dispatcher();
			rounds = 0;
		}
		restore_flags(oldflags);
	}

	dprintk("RPC: rpciod shutdown commences\n");
	if (all_tasks) {
		printk(KERN_ERR "rpciod: active tasks at shutdown?!\n");
		rpciod_killall();
	}

	rpciod_pid = 0;
	wake_up(assassin);

	dprintk("RPC: rpciod exiting\n");
	MOD_DEC_USE_COUNT;
	return 0;
}
示例#11
0
/*
 * This creates a new process as a copy of the old one,
 * but does not actually start it yet.
 *
 * It copies the registers, and all the appropriate
 * parts of the process environment (as per the clone
 * flags).  The actual kick-off is left to the caller.
 */
struct task_struct *copy_process(unsigned long clone_flags,
			         unsigned long stack_start,
			         struct pt_regs *regs,
			         unsigned long stack_size,
			         int *parent_tidptr,
			         int *child_tidptr)
{
	int retval;
	struct task_struct *p = NULL;

	if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
		return ERR_PTR(-EINVAL);

	/*
	 * Thread groups must share signals as well, and detached threads
	 * can only be started up within the thread group.
	 */
	if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
		return ERR_PTR(-EINVAL);
	if ((clone_flags & CLONE_DETACHED) && !(clone_flags & CLONE_THREAD))
		return ERR_PTR(-EINVAL);
	if (!(clone_flags & CLONE_DETACHED) && (clone_flags & CLONE_THREAD))
		return ERR_PTR(-EINVAL);

	retval = -ENOMEM;
	p = dup_task_struct(current);
	if (!p)
		goto fork_out;

	p->tux_info = NULL;

	retval = -EAGAIN;

	/*
	 * Increment user->__count before the rlimit test so that it would
	 * be correct if we take the bad_fork_free failure path.
	 */
	atomic_inc(&p->user->__count);
	if (atomic_read(&p->user->processes) >= p->rlim[RLIMIT_NPROC].rlim_cur) {
		if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE))
			goto bad_fork_free;
	}

	atomic_inc(&p->user->processes);

	/*
	 * Counter increases are protected by
	 * the kernel lock so nr_threads can't
	 * increase under us (but it may decrease).
	 */
	if (nr_threads >= max_threads)
		goto bad_fork_cleanup_count;
	
	get_exec_domain(p->exec_domain);

	if (p->binfmt && p->binfmt->module)
		__MOD_INC_USE_COUNT(p->binfmt->module);

	p->did_exec = 0;
	p->swappable = 0;
	p->state = TASK_UNINTERRUPTIBLE;

	copy_flags(clone_flags, p);
	if (clone_flags & CLONE_IDLETASK)
		p->pid = 0;
	else {
		p->pid = alloc_pidmap();
		if (p->pid == -1)
			goto bad_fork_cleanup;
	}
	
	retval = -EFAULT;
	if (clone_flags & CLONE_PARENT_SETTID)
		if (put_user(p->pid, parent_tidptr))
			goto bad_fork_cleanup;

	INIT_LIST_HEAD(&p->run_list);

	INIT_LIST_HEAD(&p->children);
	INIT_LIST_HEAD(&p->sibling);
	init_waitqueue_head(&p->wait_chldexit);
	p->vfork_done = NULL;
	spin_lock_init(&p->alloc_lock);
	spin_lock_init(&p->switch_lock);

	p->sigpending = 0;
	init_sigpending(&p->pending);

	p->it_real_value = p->it_virt_value = p->it_prof_value = 0;
	p->it_real_incr = p->it_virt_incr = p->it_prof_incr = 0;
	init_timer(&p->real_timer);
	p->real_timer.data = (unsigned long) p;

	p->leader = 0;		/* session leadership doesn't inherit */
	p->tty_old_pgrp = 0;
	memset(&p->utime, 0, sizeof(p->utime));
	memset(&p->stime, 0, sizeof(p->stime));
	memset(&p->cutime, 0, sizeof(p->cutime));
	memset(&p->cstime, 0, sizeof(p->cstime));
	memset(&p->group_utime, 0, sizeof(p->group_utime));
	memset(&p->group_stime, 0, sizeof(p->group_stime));
	memset(&p->group_cutime, 0, sizeof(p->group_cutime));
	memset(&p->group_cstime, 0, sizeof(p->group_cstime));

#ifdef CONFIG_SMP
	memset(&p->per_cpu_utime, 0, sizeof(p->per_cpu_utime));
	memset(&p->per_cpu_stime, 0, sizeof(p->per_cpu_stime));
#endif

	memset(&p->timing_state, 0, sizeof(p->timing_state));
	p->timing_state.type = PROCESS_TIMING_USER;
	p->last_sigxcpu = 0;
	p->array = NULL;
	p->lock_depth = -1;		/* -1 = no lock */
	p->start_time = jiffies;

	retval = -ENOMEM;
	/* copy all the process information */
	if (copy_files(clone_flags, p))
		goto bad_fork_cleanup;
	if (copy_fs(clone_flags, p))
		goto bad_fork_cleanup_files;
	if (copy_sighand(clone_flags, p))
		goto bad_fork_cleanup_fs;
	if (copy_signal(clone_flags, p))
		goto bad_fork_cleanup_sighand;
	if (copy_mm(clone_flags, p))
		goto bad_fork_cleanup_signal;
	if (copy_namespace(clone_flags, p))
		goto bad_fork_cleanup_mm;
	retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs);
	if (retval)
		goto bad_fork_cleanup_namespace;
	p->semundo = NULL;

	p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID)
		? child_tidptr : NULL;
	/*
	 * Clear TID on mm_release()?
	 */
	p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID)
		? child_tidptr : NULL;

	/* Our parent execution domain becomes current domain
	   These must match for thread signalling to apply */
	   
	p->parent_exec_id = p->self_exec_id;

	/* ok, now we should be set up.. */
	p->swappable = 1;
	if (clone_flags & CLONE_DETACHED)
		p->exit_signal = -1;
	else
		p->exit_signal = clone_flags & CSIGNAL;
	p->pdeath_signal = 0;

	/*
	 * Share the timeslice between parent and child, thus the
	 * total amount of pending timeslices in the system doesnt change,
	 * resulting in more scheduling fairness.
	 */
	local_irq_disable();
	p->time_slice = (current->time_slice + 1) >> 1;
	p->first_time_slice = 1;
	/*
	 * The remainder of the first timeslice might be recovered by
	 * the parent if the child exits early enough.
	 */
	current->time_slice >>= 1;
	p->last_run = jiffies;
	if (!current->time_slice) {
		/*
		 * This case is rare, it happens when the parent has only
		 * a single jiffy left from its timeslice. Taking the
		 * runqueue lock is not a problem.
		 */
		current->time_slice = 1;
		scheduler_tick(0 /* don't update the time stats */);
	}
	local_irq_enable();

	if ((int)current->time_slice <= 0)
		BUG();
	if ((int)p->time_slice <= 0)
		BUG();

	/*
	 * Ok, add it to the run-queues and make it
	 * visible to the rest of the system.
	 *
	 * Let it rip!
	 */
	p->tgid = p->pid;
	p->group_leader = p;
	INIT_LIST_HEAD(&p->ptrace_children);
	INIT_LIST_HEAD(&p->ptrace_list);

	/* Need tasklist lock for parent etc handling! */
	write_lock_irq(&tasklist_lock);
	/*
	 * Check for pending SIGKILL! The new thread should not be allowed
	 * to slip out of an OOM kill. (or normal SIGKILL.)
	 */
	if (sigismember(&current->pending.signal, SIGKILL)) {
		write_unlock_irq(&tasklist_lock);
		retval = -EINTR;
		goto bad_fork_cleanup_namespace;
	}

	/* CLONE_PARENT re-uses the old parent */
	if (clone_flags & (CLONE_PARENT|CLONE_THREAD))
		p->real_parent = current->real_parent;
	else
		p->real_parent = current;
	p->parent = p->real_parent;

	if (clone_flags & CLONE_THREAD) {
		spin_lock(&current->sighand->siglock);
		/*
		 * Important: if an exit-all has been started then
		 * do not create this new thread - the whole thread
		 * group is supposed to exit anyway.
		 */
		if (current->signal->group_exit) {
			spin_unlock(&current->sighand->siglock);
			write_unlock_irq(&tasklist_lock);
			retval = -EINTR;
			goto bad_fork_cleanup_namespace;
		}
		p->tgid = current->tgid;
		p->group_leader = current->group_leader;

		if (current->signal->group_stop_count > 0) {
			/*
			 * There is an all-stop in progress for the group.
			 * We ourselves will stop as soon as we check signals.
			 * Make the new thread part of that group stop too.
			 */
			current->signal->group_stop_count++;
			p->sigpending = 1;
		}

		spin_unlock(&current->sighand->siglock);
	}

	SET_LINKS(p);
	if (p->ptrace & PT_PTRACED)
		__ptrace_link(p, current->parent);

	attach_pid(p, PIDTYPE_PID, p->pid);
	if (thread_group_leader(p)) {
		attach_pid(p, PIDTYPE_TGID, p->tgid);
		attach_pid(p, PIDTYPE_PGID, p->pgrp);
		attach_pid(p, PIDTYPE_SID, p->session);
	} else {
		link_pid(p, p->pids + PIDTYPE_TGID,
			&p->group_leader->pids[PIDTYPE_TGID].pid);
	}

	/* clear controlling tty of new task if parent's was just cleared */
	if (!current->tty && p->tty)
		p->tty = NULL;

	nr_threads++;
	write_unlock_irq(&tasklist_lock);
	retval = 0;

fork_out:
	if (retval)
		return ERR_PTR(retval);
	return p;

bad_fork_cleanup_namespace:
	exit_namespace(p);
bad_fork_cleanup_mm:
	exit_mm(p);
	if (p->active_mm)
		mmdrop(p->active_mm);
bad_fork_cleanup_signal:
	exit_signal(p);
bad_fork_cleanup_sighand:
	exit_sighand(p);
bad_fork_cleanup_fs:
	exit_fs(p); /* blocking */
bad_fork_cleanup_files:
	exit_files(p); /* blocking */
bad_fork_cleanup:
	if (p->pid > 0)
		free_pidmap(p->pid);
	put_exec_domain(p->exec_domain);
	if (p->binfmt && p->binfmt->module)
		__MOD_DEC_USE_COUNT(p->binfmt->module);
bad_fork_cleanup_count:
	atomic_dec(&p->user->processes);
bad_fork_free:
	p->state = TASK_ZOMBIE; /* debug */
	atomic_dec(&p->usage);
	put_task_struct(p);
	goto fork_out;
}