void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) { if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { /* We don't support guest os callchain now */ return; } perf_callchain_store(entry, regs->pc); if (!compat_user_mode(regs)) { /* AARCH64 mode */ struct frame_tail __user *tail; tail = (struct frame_tail __user *)regs->regs[29]; while (entry->nr < entry->max_stack && tail && !((unsigned long)tail & 0xf)) tail = user_backtrace(tail, entry); } else { #ifdef CONFIG_COMPAT /* AARCH32 compat mode */ struct compat_frame_tail __user *tail; tail = (struct compat_frame_tail __user *)regs->compat_fp - 1; while ((entry->nr < entry->max_stack) && tail && !((unsigned long)tail & 0x3)) tail = compat_user_backtrace(tail, entry); #endif } }
void arm_backtrace(struct pt_regs * const regs, unsigned int depth) { struct frame_tail *tail = ((struct frame_tail *) regs->ARM_fp) - 1; if (!user_mode(regs)) { unsigned long base = ((unsigned long)regs) & ~(THREAD_SIZE - 1); walk_stackframe(regs->ARM_fp, base, base + THREAD_SIZE, report_trace, &depth); return; } while (depth-- && tail && !((unsigned long) tail & 3)) tail = user_backtrace(tail); }
void arm_backtrace(struct pt_regs * const regs, unsigned int depth) { int step = 0; void **frame = (void **)regs->ARM_fp; if (!user_mode(regs)) { unsigned long base = ((unsigned long)regs) & ~(THREAD_SIZE - 1); walk_stackframe(regs->ARM_fp, base, base + THREAD_SIZE, report_trace, &depth); return; } while (depth-- && frame && !((unsigned long) frame & 3)) frame = user_backtrace(regs, frame, step++); }
static void arm_backtrace(struct pt_regs * const regs, unsigned int depth) { struct frame_tail *tail = ((struct frame_tail *) regs->ARM_fp) - 1; if (!user_mode(regs)) { struct stackframe frame; frame.fp = regs->ARM_fp; frame.sp = regs->ARM_sp; frame.lr = regs->ARM_lr; frame.pc = regs->ARM_pc; walk_stackframe(&frame, report_trace, &depth); return; } while (depth-- && tail && !((unsigned long) tail & 3)) tail = user_backtrace(tail); }
void sh_backtrace(struct pt_regs * const regs, unsigned int depth) { unsigned long *stackaddr; if (depth > backtrace_limit) depth = backtrace_limit; stackaddr = (unsigned long *)kernel_stack_pointer(regs); if (!user_mode(regs)) { if (depth) unwind_stack(NULL, regs, stackaddr, &backtrace_ops, &depth); return; } while (depth-- && (stackaddr != NULL)) stackaddr = user_backtrace(stackaddr, regs); }
void sh_backtrace(struct pt_regs * const regs, unsigned int depth) { unsigned long *stackaddr; /* * Paranoia - clip max depth as we could get lost in the weeds. */ if (depth > backtrace_limit) depth = backtrace_limit; stackaddr = (unsigned long *)kernel_stack_pointer(regs); if (!user_mode(regs)) { if (depth) unwind_stack(NULL, regs, stackaddr, &backtrace_ops, &depth); return; } while (depth-- && (stackaddr != NULL)) stackaddr = user_backtrace(stackaddr, regs); }
void backtrace(struct pt_regs * const orig_regs, unsigned int cpu, pid_t pid, pid_t tid, PXD32_CSS_Call_Stack_V2 * css_data) { unsigned int u_depth = 0; unsigned int k_depth = 0; struct pt_regs * regs = orig_regs; css_data->depth = 0; /* if it is a swapper process, we don't backtrace it. Or it will result in bug CPA-310 */ if (current->tgid == 0) { css_data->depth = 1; css_data->cs[0].address = orig_regs->ARM_pc; css_data->cs[0].pid = pid; return; } /* kernel space call stack back trace */ if (!user_mode(regs)) { k_depth = kernel_backtrace(regs, cpu, pid, tid, css_data); /* if the last instruction in user mode is SWI then in system call. * if in system call, trace back to user space. */ regs = GET_USERREG(); if(!is_swi_instruction(regs->ARM_pc - arm_inst_size())) { g_sample_count++; return; } } u_depth = user_backtrace(regs, cpu, pid, tid, k_depth, css_data); g_sample_count++; }
unsigned int quadd_get_user_callchain(struct pt_regs *regs, struct quadd_callchain *callchain_data) { unsigned long fp, sp, pc, reg; struct vm_area_struct *vma, *vma_pc; unsigned long __user *tail = NULL; struct mm_struct *mm = current->mm; callchain_data->nr = 0; if (!regs || !user_mode(regs) || !mm) return 0; if (thumb_mode(regs)) return 0; fp = regs->ARM_fp; sp = regs->ARM_sp; pc = regs->ARM_pc; if (fp == 0 || fp < sp || fp & 0x3) return 0; vma = find_vma(mm, sp); if (check_vma_address(fp, vma)) return 0; if (__copy_from_user_inatomic(®, (unsigned long __user *)fp, sizeof(unsigned long))) return 0; if (reg > fp && !check_vma_address(reg, vma)) { unsigned long value; int read_lr = 0; if (!check_vma_address(fp + sizeof(unsigned long), vma)) { if (__copy_from_user_inatomic( &value, (unsigned long __user *)fp + 1, sizeof(unsigned long))) return 0; vma_pc = find_vma(mm, pc); read_lr = 1; } if (!read_lr || check_vma_address(value, vma_pc)) { /* gcc: fp --> short frame tail (fp) */ if (regs->ARM_lr < QUADD_USER_SPACE_MIN_ADDR) return 0; quadd_callchain_store(callchain_data, regs->ARM_lr); tail = (unsigned long __user *)reg; } } if (!tail) tail = (unsigned long __user *)fp; while (tail && !((unsigned long)tail & 0x3)) tail = user_backtrace(tail, callchain_data, vma); return callchain_data->nr; }
/* * call actual syscall routine * from the low-level syscall handler: * - all HPPA_FRAME_NARGS syscall's arguments supposed to be copied onto * our stack, this wins compared to copyin just needed amount anyway * - register args are copied onto stack too */ void syscall(struct trapframe *frame, int *args) { struct lwp *l; struct proc *p; const struct sysent *callp; int nsys, code, argsize, error; int tmp; int rval[2]; uvmexp.syscalls++; #ifdef DEBUG frame_sanity_check(frame, curlwp); #endif /* DEBUG */ if (!USERMODE(frame->tf_iioq_head)) panic("syscall"); l = curlwp; p = l->l_proc; l->l_md.md_regs = frame; nsys = p->p_emul->e_nsysent; callp = p->p_emul->e_sysent; code = frame->tf_t1; /* * Restarting a system call is touchy on the HPPA, * because syscall arguments are passed in registers * and the program counter of the syscall "point" * isn't easily divined. * * We handle the first problem by assuming that we * will have to restart this system call, so we * stuff the first four words of the original arguments * back into the frame as arg0...arg3, which is where * we found them in the first place. Any further * arguments are (still) on the user's stack and the * syscall code will fetch them from there (again). * * The program counter problem is addressed below. */ frame->tf_arg0 = args[0]; frame->tf_arg1 = args[1]; frame->tf_arg2 = args[2]; frame->tf_arg3 = args[3]; /* * Some special handling for the syscall(2) and * __syscall(2) system calls. */ switch (code) { case SYS_syscall: code = *args; args += 1; break; case SYS___syscall: if (callp != sysent) break; /* * NB: even though __syscall(2) takes a quad_t * containing the system call number, because * our argument copying word-swaps 64-bit arguments, * the least significant word of that quad_t * is the first word in the argument array. */ code = *args; args += 2; } /* * Stacks growing from lower addresses to higher * addresses are not really such a good idea, because * it makes it impossible to overlay a struct on top * of C stack arguments (the arguments appear in * reversed order). * * You can do the obvious thing (as locore.S does) and * copy argument words one by one, laying them out in * the "right" order in the destination buffer, but this * ends up word-swapping multi-word arguments (like off_t). * * To compensate, we have some automatically-generated * code that word-swaps these multi-word arguments. * Right now the script that generates this code is * in Perl, because I don't know awk. * * FIXME - this works only on native binaries and * will probably screw up any and all emulation. */ switch (code) { /* * BEGIN automatically generated * by /home/fredette/project/hppa/makescargfix.pl * do not edit! */ case SYS_pread: /* * syscallarg(int) fd; * syscallarg(void *) buf; * syscallarg(size_t) nbyte; * syscallarg(int) pad; * syscallarg(off_t) offset; */ tmp = args[4]; args[4] = args[4 + 1]; args[4 + 1] = tmp; break; case SYS_pwrite: /* * syscallarg(int) fd; * syscallarg(const void *) buf; * syscallarg(size_t) nbyte; * syscallarg(int) pad; * syscallarg(off_t) offset; */ tmp = args[4]; args[4] = args[4 + 1]; args[4 + 1] = tmp; break; case SYS_mmap: /* * syscallarg(void *) addr; * syscallarg(size_t) len; * syscallarg(int) prot; * syscallarg(int) flags; * syscallarg(int) fd; * syscallarg(long) pad; * syscallarg(off_t) pos; */ tmp = args[6]; args[6] = args[6 + 1]; args[6 + 1] = tmp; break; case SYS_lseek: /* * syscallarg(int) fd; * syscallarg(int) pad; * syscallarg(off_t) offset; */ tmp = args[2]; args[2] = args[2 + 1]; args[2 + 1] = tmp; break; case SYS_truncate: /* * syscallarg(const char *) path; * syscallarg(int) pad; * syscallarg(off_t) length; */ tmp = args[2]; args[2] = args[2 + 1]; args[2 + 1] = tmp; break; case SYS_ftruncate: /* * syscallarg(int) fd; * syscallarg(int) pad; * syscallarg(off_t) length; */ tmp = args[2]; args[2] = args[2 + 1]; args[2 + 1] = tmp; break; case SYS_preadv: /* * syscallarg(int) fd; * syscallarg(const struct iovec *) iovp; * syscallarg(int) iovcnt; * syscallarg(int) pad; * syscallarg(off_t) offset; */ tmp = args[4]; args[4] = args[4 + 1]; args[4 + 1] = tmp; break; case SYS_pwritev: /* * syscallarg(int) fd; * syscallarg(const struct iovec *) iovp; * syscallarg(int) iovcnt; * syscallarg(int) pad; * syscallarg(off_t) offset; */ tmp = args[4]; args[4] = args[4 + 1]; args[4 + 1] = tmp; break; default: break; /* * END automatically generated * by /home/fredette/project/hppa/makescargfix.pl * do not edit! */ } #ifdef USERTRACE if (0) { user_backtrace(frame, p, -1); frame->tf_ipsw |= PSW_R; frame->tf_rctr = 0; printf("r %08x", frame->tf_iioq_head); rctr_next_iioq = frame->tf_iioq_head + 4; } #endif if (code < 0 || code >= nsys) callp += p->p_emul->e_nosys; /* bad syscall # */ else callp += code; argsize = callp->sy_argsize; if ((error = trace_enter(l, code, code, NULL, args)) != 0) goto bad; rval[0] = 0; rval[1] = 0; switch (error = (*callp->sy_call)(l, args, rval)) { case 0: l = curlwp; /* changes on exec() */ frame = l->l_md.md_regs; frame->tf_ret0 = rval[0]; frame->tf_ret1 = rval[1]; frame->tf_t1 = 0; break; case ERESTART: /* * Now we have to wind back the instruction * offset queue to the point where the system * call will be made again. This is inherently * tied to the SYSCALL macro. * * Currently, the part of the SYSCALL macro * that we want to rerun reads as: * * ldil L%SYSCALLGATE, r1 * ble 4(sr7, r1) * ldi __CONCAT(SYS_,x), t1 * ldw HPPA_FRAME_ERP(sr0,sp), rp * * And our offset queue head points to the * final ldw instruction. So we need to * subtract twelve to reach the ldil. */ frame->tf_iioq_head -= 12; frame->tf_iioq_tail = frame->tf_iioq_head + 4; break; case EJUSTRETURN: p = curproc; break; default: bad: if (p->p_emul->e_errno) error = p->p_emul->e_errno[error]; frame->tf_t1 = error; break; } trace_exit(l, code, args, rval, error); userret(l, frame->tf_iioq_head, 0); #ifdef DEBUG frame_sanity_check(frame, l); #endif /* DEBUG */ }
void trap(int type, struct trapframe *frame) { struct lwp *l; struct proc *p; struct pcb *pcbp; vaddr_t va; struct vm_map *map; struct vmspace *vm; vm_prot_t vftype; pa_space_t space; u_int opcode; int ret; const char *tts; int type_raw; #ifdef DIAGNOSTIC extern int emergency_stack_start, emergency_stack_end; #endif type_raw = type & ~T_USER; opcode = frame->tf_iir; if (type_raw == T_ITLBMISS || type_raw == T_ITLBMISSNA) { va = frame->tf_iioq_head; space = frame->tf_iisq_head; vftype = VM_PROT_READ; /* XXX VM_PROT_EXECUTE ??? */ } else { va = frame->tf_ior; space = frame->tf_isr; vftype = inst_store(opcode) ? VM_PROT_WRITE : VM_PROT_READ; } if ((l = curlwp) == NULL) l = &lwp0; p = l->l_proc; #ifdef DIAGNOSTIC /* * If we are on the emergency stack, then we either got * a fault on the kernel stack, or we're just handling * a trap for the machine check handler (which also * runs on the emergency stack). * * We *very crudely* differentiate between the two cases * by checking the faulting instruction: if it is the * function prologue instruction that stores the old * frame pointer and updates the stack pointer, we assume * that we faulted on the kernel stack. * * In this case, not completing that instruction will * probably confuse backtraces in kgdb/ddb. Completing * it would be difficult, because we already faulted on * that part of the stack, so instead we fix up the * frame as if the function called has just returned. * This has peculiar knowledge about what values are in * what registers during the "normal gcc -g" prologue. */ if (&type >= &emergency_stack_start && &type < &emergency_stack_end && type != T_IBREAK && STWM_R1_D_SR0_SP(opcode)) { /* Restore the caller's frame pointer. */ frame->tf_r3 = frame->tf_r1; /* Restore the caller's instruction offsets. */ frame->tf_iioq_head = frame->tf_rp; frame->tf_iioq_tail = frame->tf_iioq_head + 4; goto dead_end; } #endif /* DIAGNOSTIC */ #ifdef DEBUG frame_sanity_check(frame, l); #endif /* DEBUG */ /* If this is a trap, not an interrupt, reenable interrupts. */ if (type_raw != T_INTERRUPT) mtctl(frame->tf_eiem, CR_EIEM); if (frame->tf_flags & TFF_LAST) l->l_md.md_regs = frame; if ((type & ~T_USER) > trap_types) tts = "reserved"; else tts = trap_type[type & ~T_USER]; #ifdef TRAPDEBUG if (type_raw != T_INTERRUPT && type_raw != T_IBREAK) printf("trap: %d, %s for %x:%x at %x:%x, fp=%p, rp=%x\n", type, tts, space, (u_int)va, frame->tf_iisq_head, frame->tf_iioq_head, frame, frame->tf_rp); else if (type_raw == T_IBREAK) printf("trap: break instruction %x:%x at %x:%x, fp=%p\n", break5(opcode), break13(opcode), frame->tf_iisq_head, frame->tf_iioq_head, frame); { extern int etext; if (frame < (struct trapframe *)&etext) { printf("trap: bogus frame ptr %p\n", frame); goto dead_end; } } #endif switch (type) { case T_NONEXIST: case T_NONEXIST|T_USER: #if !defined(DDB) && !defined(KGDB) /* we've got screwed up by the central scrutinizer */ panic ("trap: elvis has just left the building!"); break; #else goto dead_end; #endif case T_RECOVERY|T_USER: #ifdef USERTRACE for(;;) { if (frame->tf_iioq_head != rctr_next_iioq) printf("-%08x\nr %08x", rctr_next_iioq - 4, frame->tf_iioq_head); rctr_next_iioq = frame->tf_iioq_head + 4; if (frame->tf_ipsw & PSW_N) { /* Advance the program counter. */ frame->tf_iioq_head = frame->tf_iioq_tail; frame->tf_iioq_tail = frame->tf_iioq_head + 4; /* Clear flags. */ frame->tf_ipsw &= ~(PSW_N|PSW_X|PSW_Y|PSW_Z|PSW_B|PSW_T|PSW_H|PSW_L); /* Simulate another trap. */ continue; } break; } frame->tf_rctr = 0; break; #endif /* USERTRACE */ case T_RECOVERY: #if !defined(DDB) && !defined(KGDB) /* XXX will implement later */ printf ("trap: handicapped"); break; #else goto dead_end; #endif case T_EMULATION | T_USER: #ifdef FPEMUL hppa_fpu_emulate(frame, l); #else /* !FPEMUL */ /* * We don't have FPU emulation, so signal the * process with a SIGFPE. */ hppa_trapsignal_hack(l, SIGFPE, frame->tf_iioq_head); #endif /* !FPEMUL */ break; #ifdef DIAGNOSTIC case T_EXCEPTION: panic("FPU/SFU emulation botch"); /* these just can't happen ever */ case T_PRIV_OP: case T_PRIV_REG: /* these just can't make it to the trap() ever */ case T_HPMC: case T_HPMC | T_USER: case T_EMULATION: #endif case T_IBREAK: case T_DATALIGN: case T_DBREAK: dead_end: if (type & T_USER) { #ifdef DEBUG user_backtrace(frame, l, type); #endif hppa_trapsignal_hack(l, SIGILL, frame->tf_iioq_head); break; } if (trap_kdebug(type, va, frame)) return; else if (type == T_DATALIGN) panic ("trap: %s at 0x%x", tts, (u_int) va); else panic ("trap: no debugger for \"%s\" (%d)", tts, type); break; case T_IBREAK | T_USER: case T_DBREAK | T_USER: /* pass to user debugger */ break; case T_EXCEPTION | T_USER: /* co-proc assist trap */ hppa_trapsignal_hack(l, SIGFPE, va); break; case T_OVERFLOW | T_USER: hppa_trapsignal_hack(l, SIGFPE, va); break; case T_CONDITION | T_USER: break; case T_ILLEGAL | T_USER: #ifdef DEBUG user_backtrace(frame, l, type); #endif hppa_trapsignal_hack(l, SIGILL, va); break; case T_PRIV_OP | T_USER: #ifdef DEBUG user_backtrace(frame, l, type); #endif hppa_trapsignal_hack(l, SIGILL, va); break; case T_PRIV_REG | T_USER: #ifdef DEBUG user_backtrace(frame, l, type); #endif hppa_trapsignal_hack(l, SIGILL, va); break; /* these should never got here */ case T_HIGHERPL | T_USER: case T_LOWERPL | T_USER: hppa_trapsignal_hack(l, SIGSEGV, va); break; case T_IPROT | T_USER: case T_DPROT | T_USER: hppa_trapsignal_hack(l, SIGSEGV, va); break; case T_DATACC: case T_USER | T_DATACC: case T_ITLBMISS: case T_USER | T_ITLBMISS: case T_DTLBMISS: case T_USER | T_DTLBMISS: case T_ITLBMISSNA: case T_USER | T_ITLBMISSNA: case T_DTLBMISSNA: case T_USER | T_DTLBMISSNA: case T_TLB_DIRTY: case T_USER | T_TLB_DIRTY: vm = p->p_vmspace; if (!vm) { #ifdef TRAPDEBUG printf("trap: no vm, p=%p\n", p); #endif goto dead_end; } /* * it could be a kernel map for exec_map faults */ if (!(type & T_USER) && space == HPPA_SID_KERNEL) map = kernel_map; else { map = &vm->vm_map; if (l->l_flag & L_SA) { l->l_savp->savp_faultaddr = va; l->l_flag |= L_SA_PAGEFAULT; } } va = hppa_trunc_page(va); if (map->pmap->pmap_space != space) { #ifdef TRAPDEBUG printf("trap: space missmatch %d != %d\n", space, map->pmap->pmap_space); #endif /* actually dump the user, crap the kernel */ goto dead_end; } /* Never call uvm_fault in interrupt context. */ KASSERT(hppa_intr_depth == 0); ret = uvm_fault(map, va, 0, vftype); #ifdef TRAPDEBUG printf("uvm_fault(%p, %x, %d, %d)=%d\n", map, (u_int)va, 0, vftype, ret); #endif if (map != kernel_map) l->l_flag &= ~L_SA_PAGEFAULT; /* * If this was a stack access we keep track of the maximum * accessed stack size. Also, if uvm_fault gets a protection * failure it is due to accessing the stack region outside * the current limit and we need to reflect that as an access * error. */ if (va >= (vaddr_t)vm->vm_maxsaddr + vm->vm_ssize) { if (ret == 0) { vsize_t nss = btoc(va - USRSTACK + PAGE_SIZE); if (nss > vm->vm_ssize) vm->vm_ssize = nss; } else if (ret == EACCES) ret = EFAULT; } if (ret != 0) { if (type & T_USER) { printf("trapsignal: uvm_fault(%p, %x, %d, %d)=%d\n", map, (u_int)va, 0, vftype, ret); #ifdef DEBUG user_backtrace(frame, l, type); #endif hppa_trapsignal_hack(l, SIGSEGV, frame->tf_ior); } else { if (l && l->l_addr->u_pcb.pcb_onfault) { #ifdef PMAPDEBUG printf("trap: copyin/out %d\n",ret); #endif pcbp = &l->l_addr->u_pcb; frame->tf_iioq_tail = 4 + (frame->tf_iioq_head = pcbp->pcb_onfault); pcbp->pcb_onfault = 0; break; } #if 1 if (trap_kdebug (type, va, frame)) return; #else panic("trap: uvm_fault(%p, %x, %d, %d): %d", map, va, 0, vftype, ret); #endif } } break; case T_DATALIGN | T_USER: #ifdef DEBUG user_backtrace(frame, l, type); #endif hppa_trapsignal_hack(l, SIGBUS, va); break; case T_INTERRUPT: case T_INTERRUPT|T_USER: hppa_intr(frame); mtctl(frame->tf_eiem, CR_EIEM); #if 0 if (trap_kdebug (type, va, frame)) return; #endif break; case T_LOWERPL: case T_DPROT: case T_IPROT: case T_OVERFLOW: case T_CONDITION: case T_ILLEGAL: case T_HIGHERPL: case T_TAKENBR: case T_POWERFAIL: case T_LPMC: case T_PAGEREF: case T_DATAPID: case T_DATAPID | T_USER: if (0 /* T-chip */) { break; } /* FALLTHROUGH to unimplemented */ default: #if 1 if (trap_kdebug (type, va, frame)) return; #endif panic ("trap: unimplemented \'%s\' (%d)", tts, type); } if (type & T_USER) userret(l, l->l_md.md_regs->tf_iioq_head, 0); #ifdef DEBUG frame_sanity_check(frame, l); if (frame->tf_flags & TFF_LAST && curlwp != NULL) frame_sanity_check(curlwp->l_md.md_regs, curlwp); #endif /* DEBUG */ }