void set_task_stack_end_magic(struct task_struct *tsk) { unsigned long *stackend; stackend = end_of_stack(tsk); *stackend = STACK_END_MAGIC; /* for overflow detection */ }
static int __die(const char *str, int err, struct pt_regs *regs) { struct task_struct *tsk = current; static int die_counter; int ret; pr_emerg("Internal error: %s: %x [#%d]" S_PREEMPT S_SMP "\n", str, err, ++die_counter); /* trap and error numbers are mostly meaningless on ARM */ ret = notify_die(DIE_OOPS, str, regs, err, 0, SIGSEGV); if (ret == NOTIFY_STOP) return ret; print_modules(); __show_regs(regs); pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n", TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), end_of_stack(tsk)); if (!user_mode(regs)) { dump_backtrace(regs, tsk); dump_instr(KERN_EMERG, regs); } return ret; }
unsigned long get_wchan(struct task_struct *p) { unsigned long fp, lr; unsigned long stack_start, stack_end; int count = 0; if (!p || p == current || p->state == TASK_RUNNING) return 0; if (IS_ENABLED(CONFIG_FRAME_POINTER)) { stack_start = (unsigned long)end_of_stack(p); stack_end = (unsigned long)task_stack_page(p) + THREAD_SIZE; fp = thread_saved_fp(p); do { if (fp < stack_start || fp > stack_end) return 0; lr = ((unsigned long *)fp)[0]; if (!in_sched_functions(lr)) return lr; fp = *(unsigned long *)(fp + 4); } while (count++ < 16); } return 0; }
static struct task_struct *dup_task_struct(struct task_struct *orig) { struct task_struct *tsk; struct thread_info *ti; unsigned long *stackend; int node = tsk_fork_get_node(orig); int err; prepare_to_copy(orig); tsk = alloc_task_struct_node(node); if (!tsk) return NULL; ti = alloc_thread_info_node(tsk, node); if (!ti) { free_task_struct(tsk); return NULL; } err = arch_dup_task_struct(tsk, orig); if (err) goto out; tsk->stack = ti; err = prop_local_init_single(&tsk->dirties); if (err) goto out; setup_thread_stack(tsk, orig); clear_user_return_notifier(tsk); clear_tsk_need_resched(tsk); stackend = end_of_stack(tsk); *stackend = STACK_END_MAGIC; /* for overflow detection */ #ifdef CONFIG_CC_STACKPROTECTOR tsk->stack_canary = get_random_int(); #endif /* * One for us, one for whoever does the "release_task()" (usually * parent) */ atomic_set(&tsk->usage, 2); #ifdef CONFIG_BLK_DEV_IO_TRACE tsk->btrace_seq = 0; #endif tsk->splice_pipe = NULL; account_kernel_stack(ti, 1); return tsk; out: free_thread_info(ti); free_task_struct(tsk); return NULL; }
static struct task_struct *dup_task_struct(struct task_struct *orig) { struct task_struct *tsk; struct thread_info *ti; unsigned long *stackend; int node = tsk_fork_get_node(orig); int err; prepare_to_copy(orig); tsk = alloc_task_struct_node(node); if (!tsk){ printk("[%d:%s] fork fail at alloc_tsk_node, please check kmem_cache_alloc_node()\n", current->pid, current->comm); return NULL; } ti = alloc_thread_info_node(tsk, node); if (!ti) { printk("[%d:%s] fork fail at alloc_t_info_node, please check alloc_pages_node()\n", current->pid, current->comm); free_task_struct(tsk); return NULL; } err = arch_dup_task_struct(tsk, orig); if (err){ printk("[%d:%s] fork fail at arch_dup_task_struct, err:%d \n", current->pid, current->comm, err); goto out; } tsk->stack = ti; setup_thread_stack(tsk, orig); clear_user_return_notifier(tsk); clear_tsk_need_resched(tsk); stackend = end_of_stack(tsk); *stackend = STACK_END_MAGIC; /* for overflow detection */ #ifdef CONFIG_CC_STACKPROTECTOR tsk->stack_canary = get_random_int(); #endif /* * One for us, one for whoever does the "release_task()" (usually * parent) */ atomic_set(&tsk->usage, 2); #ifdef CONFIG_BLK_DEV_IO_TRACE tsk->btrace_seq = 0; #endif tsk->splice_pipe = NULL; account_kernel_stack(ti, 1); return tsk; out: free_thread_info(ti); free_task_struct(tsk); return NULL; }
static struct task_struct *dup_task_struct(struct task_struct *orig) { struct task_struct *tsk; struct thread_info *ti; unsigned long *stackend; int node = tsk_fork_get_node(orig); int err; prepare_to_copy(orig); tsk = alloc_task_struct_node(node); if (!tsk) return NULL; ti = alloc_thread_info_node(tsk, node); if (!ti) { free_task_struct(tsk); return NULL; } err = arch_dup_task_struct(tsk, orig); if (err) goto out; tsk->stack = ti; setup_thread_stack(tsk, orig); clear_user_return_notifier(tsk); clear_tsk_need_resched(tsk); stackend = end_of_stack(tsk); *stackend = STACK_END_MAGIC; #ifdef CONFIG_CC_STACKPROTECTOR tsk->stack_canary = get_random_int(); #endif atomic_set(&tsk->usage, 2); #ifdef CONFIG_BLK_DEV_IO_TRACE tsk->btrace_seq = 0; #endif tsk->splice_pipe = NULL; account_kernel_stack(ti, 1); return tsk; out: free_thread_info(ti); free_task_struct(tsk); return NULL; }
static phys_addr_t stack_page1_virt_to_phys(struct cpu_suspend_ctx *ptr) { struct thread_info *ti = current_thread_info(); if (virt_is_valid_lowmem(ti)) { return virt_to_phys(ptr); } else if (is_vmalloc_addr(ti)) { unsigned long stack_page, low, high; stack_page = (unsigned long)task_stack_page(current); low = (unsigned long)end_of_stack(current); high = stack_page + PAGE_SIZE - sizeof(struct cpu_suspend_ctx); BUG_ON(((unsigned long)ptr < low) || ((unsigned long)ptr >= high)); return ti->phys_addr + ((unsigned long)ptr - stack_page); } else { BUG(); } }
static struct task_struct *dup_task_struct(struct task_struct *orig) { struct task_struct *tsk; struct thread_info *ti; unsigned long *stackend; int node = tsk_fork_get_node(orig); int err; prepare_to_copy(orig); tsk = alloc_task_struct_node(node); if (!tsk) return NULL; ti = alloc_thread_info_node(tsk, node); if (!ti) { free_task_struct(tsk); return NULL; } err = arch_dup_task_struct(tsk, orig); if (err) goto out; tsk->stack = ti; #ifdef CONFIG_SECCOMP /* * We must handle setting up seccomp filters once we're under * the sighand lock in case orig has changed between now and * then. Until then, filter must be NULL to avoid messing up * the usage counts on the error path calling free_task. */ tsk->seccomp.filter = NULL; #endif setup_thread_stack(tsk, orig); clear_user_return_notifier(tsk); clear_tsk_need_resched(tsk); stackend = end_of_stack(tsk); *stackend = STACK_END_MAGIC; /* for overflow detection */ #ifdef CONFIG_CC_STACKPROTECTOR tsk->stack_canary = get_random_int(); #endif /* * One for us, one for whoever does the "release_task()" (usually * parent) */ atomic_set(&tsk->usage, 2); #ifdef CONFIG_BLK_DEV_IO_TRACE tsk->btrace_seq = 0; #endif tsk->splice_pipe = NULL; account_kernel_stack(ti, 1); return tsk; out: free_thread_info(ti); free_task_struct(tsk); return NULL; }
/* * 由do_fork()调用,为新的子进程创建task_struct, thread_info, 内核栈等信息 * 创建完成之后和父进程状态完全相同 */ static struct task_struct *dup_task_struct(struct task_struct *orig) { struct task_struct *tsk; struct thread_info *ti; unsigned long *stackend; int err; prepare_to_copy(orig); /* 申请task_struct内存空间 */ tsk = alloc_task_struct(); if (!tsk) return NULL; /* 申请thread_info内存空间 */ ti = alloc_thread_info(tsk); if (!ti) { /* 失败需要释放前面已经成功申请的task_struct内存空间,防止内存泄漏 */ free_task_struct(tsk); return NULL; } /* 复制orig进程的信息到tsk进程中,复制完成后地址空间地址相同 */ err = arch_dup_task_struct(tsk, orig); if (err) goto out; tsk->stack = ti; /* 清空新进程的部分标志 */ err = prop_local_init_single(&tsk->dirties); if (err) goto out; /* 新进程的进程栈信息 */ setup_thread_stack(tsk, orig); /* 清除进程栈的部分信息 */ clear_user_return_notifier(tsk); clear_tsk_need_resched(tsk); stackend = end_of_stack(tsk); *stackend = STACK_END_MAGIC; /* 用作内存越界检测 */ #ifdef CONFIG_CC_STACKPROTECTOR tsk->stack_canary = get_random_int(); #endif /* One for us, one for whoever does the "release_task()" (usually parent) */ atomic_set(&tsk->usage,2); atomic_set(&tsk->fs_excl, 0); #ifdef CONFIG_BLK_DEV_IO_TRACE tsk->btrace_seq = 0; #endif tsk->splice_pipe = NULL; /* 内核栈??? 干啥的??? */ account_kernel_stack(ti, 1); return tsk; out: free_thread_info(ti); free_task_struct(tsk); return NULL; }