예제 #1
0
static struct task_struct *dup_task_struct(struct task_struct *orig)
{
	struct task_struct *tsk;
	struct thread_info *ti;
	unsigned long *stackend;
	int node = tsk_fork_get_node(orig);
	int err;

	tsk = alloc_task_struct_node(node);
	if (!tsk)
		return NULL;

	ti = alloc_thread_info_node(tsk, node);
	if (!ti)
		goto free_tsk;

	err = arch_dup_task_struct(tsk, orig);
	if (err)
		goto free_ti;

	tsk->stack = ti;

	setup_thread_stack(tsk, orig);
	clear_user_return_notifier(tsk);
	clear_tsk_need_resched(tsk);
	stackend = end_of_stack(tsk);
	*stackend = STACK_END_MAGIC;	/* for overflow detection */

#ifdef CONFIG_CC_STACKPROTECTOR
	tsk->stack_canary = get_random_int();
#endif

	/*
	 * One for us, one for whoever does the "release_task()" (usually
	 * parent)
	 */
	atomic_set(&tsk->usage, 2);
#ifdef CONFIG_BLK_DEV_IO_TRACE
	tsk->btrace_seq = 0;
#endif
	tsk->splice_pipe = NULL;

	account_kernel_stack(ti, 1);

	return tsk;

free_ti:
	free_thread_info(ti);
free_tsk:
	free_task_struct(tsk);
	return NULL;
}
예제 #2
0
static struct task_struct *dup_task_struct(struct task_struct *orig)
{
    struct task_struct *tsk;
    struct thread_info *ti;
    unsigned long *stackend;

    int err;

    prepare_to_copy(orig);

    tsk = alloc_task_struct();
    if (!tsk)
        return NULL;

    ti = alloc_thread_info(tsk);
    if (!ti) {
        free_task_struct(tsk);
        return NULL;
    }

    err = arch_dup_task_struct(tsk, orig);
    if (err)
        goto out;

    tsk->stack = ti;

    err = prop_local_init_single(&tsk->dirties);
    if (err)
        goto out;

    setup_thread_stack(tsk, orig);
    stackend = end_of_stack(tsk);
    *stackend = STACK_END_MAGIC;	/* for overflow detection */

#ifdef CONFIG_CC_STACKPROTECTOR
    tsk->stack_canary = get_random_int();
#endif

    /* One for us, one for whoever does the "release_task()" (usually parent) */
    atomic_set(&tsk->usage,2);
    atomic_set(&tsk->fs_excl, 0);
#ifdef CONFIG_BLK_DEV_IO_TRACE
    tsk->btrace_seq = 0;
#endif
    tsk->splice_pipe = NULL;
    return tsk;

out:
    free_thread_info(ti);
    free_task_struct(tsk);
    return NULL;
}
예제 #3
0
static struct task_struct *dup_task_struct(struct task_struct *orig)
{
	struct task_struct *tsk;
	struct thread_info *ti;
	unsigned long *stackend;
	int node = tsk_fork_get_node(orig);
	int err;

	prepare_to_copy(orig);

	tsk = alloc_task_struct_node(node);
	if (!tsk)
		return NULL;

	ti = alloc_thread_info_node(tsk, node);
	if (!ti) {
		free_task_struct(tsk);
		return NULL;
	}

	err = arch_dup_task_struct(tsk, orig);
	if (err)
		goto out;

	tsk->stack = ti;

	setup_thread_stack(tsk, orig);
	clear_user_return_notifier(tsk);
	clear_tsk_need_resched(tsk);
	stackend = end_of_stack(tsk);
	*stackend = STACK_END_MAGIC;	

#ifdef CONFIG_CC_STACKPROTECTOR
	tsk->stack_canary = get_random_int();
#endif

	atomic_set(&tsk->usage, 2);
#ifdef CONFIG_BLK_DEV_IO_TRACE
	tsk->btrace_seq = 0;
#endif
	tsk->splice_pipe = NULL;

	account_kernel_stack(ti, 1);

	return tsk;

out:
	free_thread_info(ti);
	free_task_struct(tsk);
	return NULL;
}
예제 #4
0
// Must be pinned to the desired CPU upon entry! // FIXME verify?
int spawn_thread(torque_ctx *ctx){
	pthread_attr_t attr;
	tguard tidguard = {
		.ctx = ctx,
	};
	pthread_t tid;
	int ret = 0;

	if(setup_thread_stack(&tidguard.stack,&attr)){
		return -1;
	}
	if(pthread_mutex_init(&tidguard.lock,NULL)){
		dealloc(tidguard.stack.ss_sp,tidguard.stack.ss_size);
		pthread_attr_destroy(&attr);
		return -1;
	}
	if(pthread_cond_init(&tidguard.cond,NULL)){
		pthread_mutex_destroy(&tidguard.lock);
		dealloc(tidguard.stack.ss_sp,tidguard.stack.ss_size);
		pthread_attr_destroy(&attr);
		return -1;
	}
	if(pthread_create(&tid,&attr,thread,&tidguard)){
		pthread_mutex_destroy(&tidguard.lock);
		pthread_cond_destroy(&tidguard.cond);
		dealloc(tidguard.stack.ss_sp,tidguard.stack.ss_size);
		pthread_attr_destroy(&attr);
		return -1;
	}
	ret |= pthread_attr_destroy(&attr);
	ret |= pthread_mutex_lock(&tidguard.lock);
	while(tidguard.status == THREAD_UNLAUNCHED){
		ret |= pthread_cond_wait(&tidguard.cond,&tidguard.lock);
	}
	if(tidguard.status != THREAD_STARTED){
		ret = -1;
	}
	ret |= pthread_mutex_unlock(&tidguard.lock);
	ret |= pthread_cond_destroy(&tidguard.cond);
	ret |= pthread_mutex_destroy(&tidguard.lock);
	if(ret){
		pthread_join(tid,NULL);
	}
	return ret;
}
예제 #5
0
static struct task_struct *dup_task_struct(struct task_struct *orig)
{
	struct task_struct *tsk;
	struct thread_info *ti;
	int err;

	prepare_to_copy(orig);

	tsk = alloc_task_struct();
	if (!tsk)
		return NULL;

	ti = alloc_thread_info(tsk);
	if (!ti) {
		free_task_struct(tsk);
		return NULL;
	}

	*tsk = *orig;
	tsk->stack = ti;

	err = prop_local_init_single(&tsk->dirties);
	if (err) {
		free_thread_info(ti);
		free_task_struct(tsk);
		return NULL;
	}

	setup_thread_stack(tsk, orig);

#ifdef CONFIG_CC_STACKPROTECTOR
	tsk->stack_canary = get_random_int();
#endif

	/* One for us, one for whoever does the "release_task()" (usually parent) */
	atomic_set(&tsk->usage,2);
	atomic_set(&tsk->fs_excl, 0);
#ifdef CONFIG_BLK_DEV_IO_TRACE
	tsk->btrace_seq = 0;
#endif
	tsk->splice_pipe = NULL;
	return tsk;
}
예제 #6
0
static struct task_struct *dup_task_struct(struct task_struct *orig)
{
	struct task_struct *tsk;
	struct thread_info *ti;
	unsigned long *stackend;
	int node = tsk_fork_get_node(orig);
	int err;

	prepare_to_copy(orig);

	tsk = alloc_task_struct_node(node);
	if (!tsk)
		return NULL;

	ti = alloc_thread_info_node(tsk, node);
	if (!ti) {
		free_task_struct(tsk);
		return NULL;
	}

	err = arch_dup_task_struct(tsk, orig);
	if (err)
		goto out;

	tsk->stack = ti;
#ifdef CONFIG_SECCOMP
	/*
	 * We must handle setting up seccomp filters once we're under
	 * the sighand lock in case orig has changed between now and
	 * then. Until then, filter must be NULL to avoid messing up
	 * the usage counts on the error path calling free_task.
	 */
	tsk->seccomp.filter = NULL;
#endif

	setup_thread_stack(tsk, orig);
	clear_user_return_notifier(tsk);
	clear_tsk_need_resched(tsk);
	stackend = end_of_stack(tsk);
	*stackend = STACK_END_MAGIC;	/* for overflow detection */

#ifdef CONFIG_CC_STACKPROTECTOR
	tsk->stack_canary = get_random_int();
#endif

	/*
	 * One for us, one for whoever does the "release_task()" (usually
	 * parent)
	 */
	atomic_set(&tsk->usage, 2);
#ifdef CONFIG_BLK_DEV_IO_TRACE
	tsk->btrace_seq = 0;
#endif
	tsk->splice_pipe = NULL;

	account_kernel_stack(ti, 1);

	return tsk;

out:
	free_thread_info(ti);
	free_task_struct(tsk);
	return NULL;
}
예제 #7
0
/*
 * 由do_fork()调用,为新的子进程创建task_struct, thread_info, 内核栈等信息
 * 创建完成之后和父进程状态完全相同
 */
static struct task_struct *dup_task_struct(struct task_struct *orig)
{
	struct task_struct *tsk;
	struct thread_info *ti;
	unsigned long *stackend;

	int err;

	prepare_to_copy(orig);
	/* 申请task_struct内存空间 */
	tsk = alloc_task_struct();
	if (!tsk)
		return NULL;

	/* 申请thread_info内存空间 */
	ti = alloc_thread_info(tsk);
	if (!ti) {
		/* 失败需要释放前面已经成功申请的task_struct内存空间,防止内存泄漏 */
		free_task_struct(tsk);
		return NULL;
	}
	/* 复制orig进程的信息到tsk进程中,复制完成后地址空间地址相同 */
 	err = arch_dup_task_struct(tsk, orig);
	if (err)
		goto out;

	tsk->stack = ti;

	/* 清空新进程的部分标志 */
	err = prop_local_init_single(&tsk->dirties);
	if (err)
		goto out;
	/* 新进程的进程栈信息 */
	setup_thread_stack(tsk, orig);
	/* 清除进程栈的部分信息 */
	clear_user_return_notifier(tsk);
	clear_tsk_need_resched(tsk);
	stackend = end_of_stack(tsk);
	*stackend = STACK_END_MAGIC;	/* 用作内存越界检测 */

#ifdef CONFIG_CC_STACKPROTECTOR
	tsk->stack_canary = get_random_int();
#endif

	/* One for us, one for whoever does the "release_task()" (usually parent) */
	atomic_set(&tsk->usage,2);
	atomic_set(&tsk->fs_excl, 0);
#ifdef CONFIG_BLK_DEV_IO_TRACE
	tsk->btrace_seq = 0;
#endif
	tsk->splice_pipe = NULL;
	/* 内核栈??? 干啥的??? */
	account_kernel_stack(ti, 1);

	return tsk;

out:
	free_thread_info(ti);
	free_task_struct(tsk);
	return NULL;
}