static int collect_syscall(struct task_struct *target, struct syscall_info *info) { struct pt_regs *regs; if (!try_get_task_stack(target)) { /* Task has no stack, so the task isn't in a syscall. */ memset(info, 0, sizeof(*info)); info->data.nr = -1; return 0; } regs = task_pt_regs(target); if (unlikely(!regs)) { put_task_stack(target); return -EAGAIN; } info->sp = user_stack_pointer(regs); info->data.instruction_pointer = instruction_pointer(regs); info->data.nr = syscall_get_nr(target, regs); if (info->data.nr != -1L) syscall_get_arguments(target, regs, (unsigned long *)&info->data.args[0]); put_task_stack(target); return 0; }
static void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs, bool (*fn)(unsigned long, void *), void *arg) { unsigned long sp, pc; unsigned long *ksp; if (regs) { sp = user_stack_pointer(regs); pc = instruction_pointer(regs); } else if (task == NULL || task == current) { const register unsigned long current_sp __asm__ ("sp"); sp = current_sp; pc = (unsigned long)walk_stackframe; } else { /* task blocked in __switch_to */ sp = task->thread.sp; pc = task->thread.ra; } if (unlikely(sp & 0x7)) return; ksp = (unsigned long *)sp; while (!kstack_end(ksp)) { if (__kernel_text_address(pc) && unlikely(fn(pc, arg))) break; pc = (*ksp++) - 0x4; } }
static int collect_syscall(struct task_struct *target, long *callno, unsigned long args[6], unsigned int maxargs, unsigned long *sp, unsigned long *pc) { struct pt_regs *regs = task_pt_regs(target); if (unlikely(!regs)) return -EAGAIN; *sp = user_stack_pointer(regs); *pc = instruction_pointer(regs); *callno = syscall_get_nr(target, regs); if (*callno != -1L && maxargs > 0) syscall_get_arguments(target, regs, 0, maxargs, args); return 0; }
/* * Setup signal stack frame with siginfo structure */ static int setup_rt_frame(int signr, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, struct pt_regs *regs) { int err = 0; struct rt_sigframe __user *frame; struct hexagon_vdso *vdso = current->mm->context.vdso; frame = get_sigframe(ka, regs, sizeof(struct rt_sigframe)); if (!access_ok(VERIFY_WRITE, frame, sizeof(struct rt_sigframe))) goto sigsegv; if (copy_siginfo_to_user(&frame->info, info)) goto sigsegv; /* The on-stack signal trampoline is no longer executed; * however, the libgcc signal frame unwinding code checks for * the presence of these two numeric magic values. */ err |= __put_user(0x7800d166, &frame->tramp[0]); err |= __put_user(0x5400c004, &frame->tramp[1]); err |= setup_sigcontext(regs, &frame->uc.uc_mcontext); err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); err |= __save_altstack(&frame->uc.uc_stack, user_stack_pointer(regs)); if (err) goto sigsegv; /* Load r0/r1 pair with signumber/siginfo pointer... */ regs->r0100 = ((unsigned long long)((unsigned long)&frame->info) << 32) | (unsigned long long)signr; regs->r02 = (unsigned long) &frame->uc; regs->r31 = (unsigned long) vdso->rt_signal_trampoline; pt_psp(regs) = (unsigned long) frame; pt_set_elr(regs, (unsigned long)ka->sa.sa_handler); return 0; sigsegv: force_sigsegv(signr, current); return -EFAULT; }
static void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs, bool (*fn)(unsigned long, void *), void *arg) { unsigned long fp, sp, pc; if (regs) { fp = frame_pointer(regs); sp = user_stack_pointer(regs); pc = instruction_pointer(regs); } else if (task == NULL || task == current) { const register unsigned long current_sp __asm__ ("sp"); fp = (unsigned long)__builtin_frame_address(0); sp = current_sp; pc = (unsigned long)walk_stackframe; } else { /* task blocked in __switch_to */ fp = task->thread.s[0]; sp = task->thread.sp; pc = task->thread.ra; } for (;;) { unsigned long low, high; struct stackframe *frame; if (unlikely(!__kernel_text_address(pc) || fn(pc, arg))) break; /* Validate frame pointer */ low = sp + sizeof(struct stackframe); high = ALIGN(sp, THREAD_SIZE); if (unlikely(fp < low || fp > high || fp & 0x7)) break; /* Unwind stack frame */ frame = (struct stackframe *)fp - 1; sp = fp; fp = frame->fp; pc = ftrace_graph_ret_addr(current, NULL, frame->ra, (unsigned long *)(fp - 8)); } }
/* * Get user stack entries up to the pcstack_limit; return the number of entries * acquired. If pcstack is NULL, return the number of entries potentially * acquirable. */ unsigned long dtrace_getufpstack(uint64_t *pcstack, uint64_t *fpstack, int pcstack_limit) { struct task_struct *p = current; struct mm_struct *mm = p->mm; unsigned long tos, bos, fpc; unsigned long *sp; unsigned long depth = 0; struct vm_area_struct *stack_vma; struct page *stack_page = NULL; struct pt_regs *regs = current_pt_regs(); if (pcstack) { if (unlikely(pcstack_limit < 2)) { DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); return 0; } *pcstack++ = (uint64_t)p->pid; *pcstack++ = (uint64_t)p->tgid; pcstack_limit -= 2; } if (!user_mode(regs)) goto out; /* * There is always at least one address to report: the instruction * pointer itself (frame 0). */ depth++; fpc = instruction_pointer(regs); if (pcstack) { *pcstack++ = (uint64_t)fpc; pcstack_limit--; } /* * We cannot ustack() if this task has no mm, if this task is a kernel * thread, or when someone else has the mmap_sem or the page_table_lock * (because find_user_vma() ultimately does a __get_user_pages() and * thence a follow_page(), which can take that lock). */ if (mm == NULL || (p->flags & PF_KTHREAD) || spin_is_locked(&mm->page_table_lock)) goto out; if (!down_read_trylock(&mm->mmap_sem)) goto out; atomic_inc(&mm->mm_users); /* * The following construct can be replaced with: * tos = current_user_stack_pointer(); * once support for 4.0 is no longer necessary. */ #ifdef CONFIG_X86_64 tos = current_pt_regs()->sp; #else tos = user_stack_pointer(current_pt_regs()); #endif stack_vma = find_user_vma(p, mm, NULL, (unsigned long) tos, 0); if (!stack_vma || stack_vma->vm_start > (unsigned long) tos) goto unlock_out; #ifdef CONFIG_STACK_GROWSUP #error This code does not yet work on STACK_GROWSUP platforms. #endif bos = stack_vma->vm_end; if (stack_guard_page_end(stack_vma, bos)) bos -= PAGE_SIZE; /* * If we have a pcstack, loop as long as we are within the stack limit. * Otherwise, loop until we run out of stack. */ for (sp = (unsigned long *)tos; sp <= (unsigned long *)bos && ((pcstack && pcstack_limit > 0) || !pcstack); sp++) { struct vm_area_struct *code_vma; unsigned long addr; /* * Recheck for faultedness and pin at page boundaries. */ if (!stack_page || (((unsigned long)sp & PAGE_MASK) == 0)) { if (stack_page) { put_page(stack_page); stack_page = NULL; } if (!find_user_vma(p, mm, &stack_page, (unsigned long) sp, 1)) break; } DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); get_user(addr, sp); DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT)) { DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_BADADDR); break; } if (addr == fpc) continue; code_vma = find_user_vma(p, mm, NULL, addr, 0); if (!code_vma || code_vma->vm_start > addr) continue; if ((addr >= tos && addr <= bos) || (code_vma->vm_flags & VM_GROWSDOWN)) { /* stack address - may need it for the fpstack. */ } else if (code_vma->vm_flags & VM_EXEC) { if (pcstack) { *pcstack++ = addr; pcstack_limit--; } depth++; } } if (stack_page != NULL) put_page(stack_page); unlock_out: atomic_dec(&mm->mm_users); up_read(&mm->mmap_sem); out: if (pcstack) while (pcstack_limit--) *pcstack++ = 0; return depth; }