void __show_regs(struct pt_regs *regs, int all) { unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L; unsigned long d0, d1, d2, d3, d6, d7; unsigned long sp; unsigned short ss, gs; if (user_mode_vm(regs)) { sp = regs->sp; ss = regs->ss & 0xffff; gs = get_user_gs(regs); } else { sp = kernel_stack_pointer(regs); savesegment(ss, ss); savesegment(gs, gs); } printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n", (u16)regs->cs, regs->ip, regs->flags, smp_processor_id()); print_symbol("EIP is at %s\n", regs->ip); printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n", regs->ax, regs->bx, regs->cx, regs->dx); printk(KERN_DEFAULT "ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n", regs->si, regs->di, regs->bp, sp); printk(KERN_DEFAULT " DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x\n", (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, ss); if (!all) return; cr0 = read_cr0(); cr2 = read_cr2(); cr3 = read_cr3(); cr4 = read_cr4_safe(); printk(KERN_DEFAULT "CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", cr0, cr2, cr3, cr4); get_debugreg(d0, 0); get_debugreg(d1, 1); get_debugreg(d2, 2); get_debugreg(d3, 3); printk(KERN_DEFAULT "DR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n", d0, d1, d2, d3); get_debugreg(d6, 6); get_debugreg(d7, 7); printk(KERN_DEFAULT "DR6: %08lx DR7: %08lx\n", d6, d7); }
void x86_backtrace(struct pt_regs * const regs, unsigned int depth) { struct frame_head *head = (struct frame_head *)frame_pointer(regs); if (!user_mode_vm(regs)) { unsigned long stack = kernel_stack_pointer(regs); if (depth) dump_trace(NULL, regs, (unsigned long *)stack, 0, &backtrace_ops, &depth); return; } while (depth-- && head) head = dump_user_backtrace(head); }
void show_registers(struct pt_regs *regs) { int i; print_modules(); __show_regs(regs, 0); printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)\n", TASK_COMM_LEN, current->comm, task_pid_nr(current), current_thread_info(), current, task_thread_info(current)); /* * When in-kernel, we also print out the stack and code at the * time of the fault.. */ if (!user_mode_vm(regs)) { unsigned int code_prologue = code_bytes * 43 / 64; unsigned int code_len = code_bytes; unsigned char c; u8 *ip; printk(KERN_EMERG "Stack:\n"); show_stack_log_lvl(NULL, regs, ®s->sp, 0, KERN_EMERG); printk(KERN_EMERG "Code: "); ip = (u8 *)regs->ip - code_prologue; if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) { /* try starting at IP */ ip = (u8 *)regs->ip; code_len = code_len - code_prologue + 1; } for (i = 0; i < code_len; i++, ip++) { if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) { printk(" Bad EIP value."); break; } if (ip == (u8 *)regs->ip) printk("<%02x> ", c); else printk("%02x ", c); } } printk("\n"); }
inline void smp_local_timer_interrupt(struct pt_regs * regs) { profile_tick(CPU_PROFILING, regs); #ifdef CONFIG_SMP update_process_times(user_mode_vm(regs)); #endif /* * We take the 'long' return path, and there every subsystem * grabs the apropriate locks (kernel lock/ irq lock). * * we might want to decouple profiling from the 'long path', * and do the profiling totally in assembly. * * Currently this isn't too much of an issue (performance wise), * we can take more than 100K local irqs per second on a 100 MHz P5. */ }
int __kprobes __die(const char *str, struct pt_regs *regs, long err) { #ifdef CONFIG_X86_32 unsigned short ss; unsigned long sp; #endif printk(KERN_DEFAULT "%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter); #ifdef CONFIG_PREEMPT printk("PREEMPT "); #endif #ifdef CONFIG_SMP printk("SMP "); #endif #ifdef CONFIG_DEBUG_PAGEALLOC printk("DEBUG_PAGEALLOC"); #endif printk("\n"); sysfs_printk_last_file(); if (notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV) == NOTIFY_STOP) return 1; show_registers(regs); #ifdef CONFIG_X86_32 if (user_mode_vm(regs)) { sp = regs->sp; ss = regs->ss & 0xffff; } else { sp = kernel_stack_pointer(regs); savesegment(ss, ss); } printk(KERN_EMERG "EIP: [<%08lx>] ", regs->ip); print_symbol("%s", regs->ip); printk(" SS:ESP %04x:%08lx\n", ss, sp); #else /* Executive summary in case the oops scrolled away */ printk(KERN_ALERT "RIP "); printk_address(regs->ip, 1); printk(" RSP <%016lx>\n", regs->sp); #endif return 0; }
/** * pt_regs_to_gdb_regs - Convert ptrace regs to GDB regs * @gdb_regs: A pointer to hold the registers in the order GDB wants. * @regs: The &struct pt_regs of the current process. * * Convert the pt_regs in @regs into the format for registers that * GDB expects, stored in @gdb_regs. */ void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs) { #ifndef CONFIG_X86_32 u32 *gdb_regs32 = (u32 *)gdb_regs; #endif gdb_regs[GDB_AX] = regs->ax; gdb_regs[GDB_BX] = regs->bx; gdb_regs[GDB_CX] = regs->cx; gdb_regs[GDB_DX] = regs->dx; gdb_regs[GDB_SI] = regs->si; gdb_regs[GDB_DI] = regs->di; gdb_regs[GDB_BP] = regs->bp; gdb_regs[GDB_PC] = regs->ip; #ifdef CONFIG_X86_32 gdb_regs[GDB_PS] = regs->flags; gdb_regs[GDB_DS] = regs->ds; gdb_regs[GDB_ES] = regs->es; gdb_regs[GDB_CS] = regs->cs; gdb_regs[GDB_FS] = 0xFFFF; gdb_regs[GDB_GS] = 0xFFFF; if (user_mode_vm(regs)) { gdb_regs[GDB_SS] = regs->ss; gdb_regs[GDB_SP] = regs->sp; } else { gdb_regs[GDB_SS] = __KERNEL_DS; gdb_regs[GDB_SP] = kernel_stack_pointer(regs); } #else gdb_regs[GDB_R8] = regs->r8; gdb_regs[GDB_R9] = regs->r9; gdb_regs[GDB_R10] = regs->r10; gdb_regs[GDB_R11] = regs->r11; gdb_regs[GDB_R12] = regs->r12; gdb_regs[GDB_R13] = regs->r13; gdb_regs[GDB_R14] = regs->r14; gdb_regs[GDB_R15] = regs->r15; gdb_regs32[GDB_PS] = regs->flags; gdb_regs32[GDB_CS] = regs->cs; gdb_regs32[GDB_SS] = regs->ss; gdb_regs[GDB_SP] = kernel_stack_pointer(regs); #endif }
static int crash_nmi_callback(struct notifier_block *self, unsigned long val, void *data) { struct pt_regs *regs; #ifdef CONFIG_X86_32 struct pt_regs fixed_regs; #endif int cpu; if (val != DIE_NMI_IPI) return NOTIFY_OK; regs = ((struct die_args *)data)->regs; cpu = raw_smp_processor_id(); /* Don't do anything if this handler is invoked on crashing cpu. * Otherwise, system will completely hang. Crashing cpu can get * an NMI if system was initially booted with nmi_watchdog parameter. */ if (cpu == crashing_cpu) return NOTIFY_STOP; local_irq_disable(); #ifdef CONFIG_X86_32 if (!user_mode_vm(regs)) { crash_fixup_ss_esp(&fixed_regs, regs); regs = &fixed_regs; } #endif crash_save_cpu(regs, cpu); disable_local_APIC(); atomic_dec(&waiting_for_crash_ipi); /* Assume hlt works */ halt(); for (;;) cpu_relax(); return 1; }
/* * Probabilistic stack overflow check: * * Only check the stack in process context, because everything else * runs on the big interrupt stacks. Checking reliably is too expensive, * so we just check from interrupts. */ static inline void stack_overflow_check(struct pt_regs *regs) { #ifdef CONFIG_DEBUG_STACKOVERFLOW #define STACK_TOP_MARGIN 128 struct orig_ist *oist; u64 irq_stack_top, irq_stack_bottom; u64 estack_top, estack_bottom; u64 curbase = (u64)task_stack_page(current); if (user_mode_vm(regs)) return; if (regs->sp >= curbase + sizeof(struct thread_info) + sizeof(struct pt_regs) + STACK_TOP_MARGIN && regs->sp <= curbase + THREAD_SIZE) return; irq_stack_top = (u64)__get_cpu_var(irq_stack_union.irq_stack) + STACK_TOP_MARGIN; irq_stack_bottom = (u64)__get_cpu_var(irq_stack_ptr); if (regs->sp >= irq_stack_top && regs->sp <= irq_stack_bottom) return; oist = &__get_cpu_var(orig_ist); estack_top = (u64)oist->ist[0] - EXCEPTION_STKSZ + STACK_TOP_MARGIN; estack_bottom = (u64)oist->ist[N_EXCEPTION_STACKS - 1]; if (regs->sp >= estack_top && regs->sp <= estack_bottom) return; WARN_ONCE(1, "do_IRQ(): %s has overflown the kernel stack (cur:%Lx,sp:%lx,irq stk top-bottom:%Lx-%Lx,exception stk top-bottom:%Lx-%Lx)\n", current->comm, curbase, regs->sp, irq_stack_top, irq_stack_bottom, estack_top, estack_bottom); if (sysctl_panic_on_stackoverflow) panic("low stack detected by irq handler - check messages\n"); #endif }
void x86_backtrace(struct pt_regs * const regs, unsigned int depth) { struct stack_frame *head = (struct stack_frame *)frame_pointer(regs); if (!user_mode_vm(regs)) { unsigned long stack = kernel_stack_pointer(regs); if (!((unsigned long)stack & (THREAD_SIZE - 1))) stack = 0; if (depth) dump_trace(NULL, regs, (unsigned long *)stack, 0, &backtrace_ops, &depth); return; } if (x86_backtrace_32(regs, depth)) return; while (depth-- && head) head = dump_user_backtrace(head); }
void __show_regs(struct pt_regs *regs, int all) { unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L; unsigned long d0, d1, d2, d3, d6, d7; unsigned long sp; unsigned short ss, gs; const char *board; if (user_mode_vm(regs)) { sp = regs->sp; ss = regs->ss & 0xffff; gs = get_user_gs(regs); } else { sp = (unsigned long) (®s->sp); savesegment(ss, ss); savesegment(gs, gs); } printk("\n"); board = dmi_get_system_info(DMI_PRODUCT_NAME); if (!board) board = ""; printk("Pid: %d, comm: %s %s (%s %.*s) %s\n", task_pid_nr(current), current->comm, print_tainted(), init_utsname()->release, (int)strcspn(init_utsname()->version, " "), init_utsname()->version, board); printk("EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n", (u16)regs->cs, regs->ip, regs->flags, smp_processor_id()); print_symbol("EIP is at %s\n", regs->ip); printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n", regs->ax, regs->bx, regs->cx, regs->dx); printk("ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n", regs->si, regs->di, regs->bp, sp); printk(" DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x\n", (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, ss); if (!all) return; cr0 = read_cr0(); cr2 = read_cr2(); cr3 = read_cr3(); cr4 = read_cr4_safe(); printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", cr0, cr2, cr3, cr4); get_debugreg(d0, 0); get_debugreg(d1, 1); get_debugreg(d2, 2); get_debugreg(d3, 3); printk("DR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n", d0, d1, d2, d3); get_debugreg(d6, 6); get_debugreg(d7, 7); printk("DR6: %08lx DR7: %08lx\n", d6, d7); }
int __ipipe_handle_exception(struct pt_regs *regs, long error_code, int vector) { bool root_entry = false; unsigned long flags = 0; unsigned long cr2 = 0; if (ipipe_root_domain_p) { root_entry = true; local_save_flags(flags); /* * Replicate hw interrupt state into the virtual mask * before calling the I-pipe event handler over the * root domain. Also required later when calling the * Linux exception handler. */ if (irqs_disabled_hw()) local_irq_disable(); } #ifdef CONFIG_KGDB /* catch exception KGDB is interested in over non-root domains */ else if (__ipipe_xlate_signo[vector] >= 0 && !kgdb_handle_exception(vector, __ipipe_xlate_signo[vector], error_code, regs)) return 1; #endif /* CONFIG_KGDB */ if (vector == ex_do_page_fault) cr2 = native_read_cr2(); if (unlikely(ipipe_trap_notify(vector, regs))) { if (root_entry) local_irq_restore_nosync(flags); return 1; } if (likely(ipipe_root_domain_p)) { /* * If root is not the topmost domain or in case we faulted in * the iret path of x86-32, regs.flags does not match the root * domain state. The fault handler or the low-level return * code may evaluate it. So fix this up, either by the root * state sampled on entry or, if we migrated to root, with the * current state. */ __fixup_if(root_entry ? raw_irqs_disabled_flags(flags) : raw_irqs_disabled(), regs); } else { /* Detect unhandled faults over non-root domains. */ struct ipipe_domain *ipd = ipipe_current_domain; /* Switch to root so that Linux can handle the fault cleanly. */ __ipipe_current_domain = ipipe_root_domain; ipipe_trace_panic_freeze(); /* Always warn about user land and unfixable faults. */ if (user_mode_vm(regs) || !search_exception_tables(instruction_pointer(regs))) { printk(KERN_ERR "BUG: Unhandled exception over domain" " %s at 0x%lx - switching to ROOT\n", ipd->name, instruction_pointer(regs)); dump_stack(); ipipe_trace_panic_dump(); #ifdef CONFIG_IPIPE_DEBUG /* Also report fixable ones when debugging is enabled. */ } else { printk(KERN_WARNING "WARNING: Fixable exception over " "domain %s at 0x%lx - switching to ROOT\n", ipd->name, instruction_pointer(regs)); dump_stack(); ipipe_trace_panic_dump(); #endif /* CONFIG_IPIPE_DEBUG */ } } if (vector == ex_do_page_fault) write_cr2(cr2); __ipipe_std_extable[vector](regs, error_code); /* * Relevant for 64-bit: Restore root domain state as the low-level * return code will not align it to regs.flags. */ if (root_entry) local_irq_restore_nosync(flags); return 0; }
static inline void die_if_kernel(const char *str, struct pt_regs *regs, long err) { if (!user_mode_vm(regs)) die(str, regs, err); }
int vtss_stack_dump(struct vtss_transport_data* trnd, stack_control_t* stk, struct task_struct* task, struct pt_regs* regs, void* reg_fp, int in_irq) { int rc; user_vm_accessor_t* acc; void* stack_base = stk->bp.vdp; void *reg_ip, *reg_sp; if (unlikely(regs == NULL)) { rc = snprintf(stk->dbgmsg, sizeof(stk->dbgmsg)-1, "tid=0x%08x, cpu=0x%08x: incorrect regs", task->pid, smp_processor_id()); if (rc > 0 && rc < sizeof(stk->dbgmsg)-1) { stk->dbgmsg[rc] = '\0'; vtss_record_debug_info(trnd, stk->dbgmsg, 0); } return -EFAULT; } stk->dbgmsg[0] = '\0'; /* Get IP and SP registers from current space */ reg_ip = (void*)REG(ip, regs); reg_sp = (void*)REG(sp, regs); #if defined(CONFIG_X86_64) && defined(VTSS_AUTOCONF_STACKTRACE_OPS_WALK_STACK) { /* Unwind kernel stack and get user BP if possible */ unsigned long bp = 0UL; unsigned long kstart = (unsigned long)__START_KERNEL_map + ((CONFIG_PHYSICAL_START + (CONFIG_PHYSICAL_ALIGN - 1)) & ~(CONFIG_PHYSICAL_ALIGN - 1)); #ifdef VTSS_AUTOCONF_DUMP_TRACE_HAVE_BP dump_trace(task, NULL, NULL, 0, &vtss_stack_ops, &bp); #else dump_trace(task, NULL, NULL, &vtss_stack_ops, &bp); #endif // TRACE("bp=0x%p <=> fp=0x%p", (void*)bp, reg_fp); reg_fp = bp ? (void*)bp : reg_fp; #ifdef VTSS_DEBUG_TRACE if (reg_fp > (void*)kstart) { printk("Warning: bp=0x%p in kernel\n", reg_fp); dump_stack(); rc = snprintf(stk->dbgmsg, sizeof(stk->dbgmsg)-1, "tid=0x%08x, cpu=0x%08x, ip=0x%p, sp=[0x%p,0x%p]: User bp=0x%p inside kernel space", task->pid, smp_processor_id(), reg_ip, reg_sp, stack_base, reg_fp); if (rc > 0 && rc < sizeof(stk->dbgmsg)-1) { stk->dbgmsg[rc] = '\0'; vtss_record_debug_info(trnd, stk->dbgmsg, 0); } } #endif } #endif /* CONFIG_X86_64 && VTSS_AUTOCONF_STACKTRACE_OPS_WALK_STACK */ if (unlikely(!user_mode_vm(regs))) { /* kernel mode regs, so get a user mode regs */ #if defined(CONFIG_X86_64) || LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38) regs = task_pt_regs(task); /*< get user mode regs */ if (regs == NULL || !user_mode_vm(regs)) #endif { #ifdef VTSS_DEBUG_TRACE strcat(stk->dbgmsg, "Cannot get user mode regs"); vtss_record_debug_info(trnd, stk->dbgmsg, 0); printk("Warning: %s\n", stk->dbgmsg); dump_stack(); #endif return -EFAULT; } } /* Get IP and SP registers from user space */ reg_ip = (void*)REG(ip, regs); reg_sp = (void*)REG(sp, regs); { /* Check for correct stack range in task->mm */ struct vm_area_struct* vma; #ifdef VTSS_CHECK_IP_IN_MAP /* Check IP in module map */ vma = find_vma(task->mm, (unsigned long)reg_ip); if (likely(vma != NULL)) { unsigned long vm_start = vma->vm_start; unsigned long vm_end = vma->vm_end; if ((unsigned long)reg_ip < vm_start || (!((vma->vm_flags & (VM_EXEC | VM_WRITE)) == VM_EXEC && vma->vm_file && vma->vm_file->f_dentry) && !(vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso))) { #ifdef VTSS_DEBUG_TRACE rc = snprintf(stk->dbgmsg, sizeof(stk->dbgmsg)-1, "tid=0x%08x, cpu=0x%08x, ip=0x%p, sp=[0x%p,0x%p], fp=0x%p, found_vma=[0x%lx,0x%lx]: Unable to find executable module", task->pid, smp_processor_id(), reg_ip, reg_sp, stack_base, reg_fp, vm_start, vm_end); if (rc > 0 && rc < sizeof(stk->dbgmsg)-1) { stk->dbgmsg[rc] = '\0'; vtss_record_debug_info(trnd, stk->dbgmsg, 0); } #endif return -EFAULT; } } else { #ifdef VTSS_DEBUG_TRACE rc = snprintf(stk->dbgmsg, sizeof(stk->dbgmsg)-1, "tid=0x%08x, cpu=0x%08x, ip=0x%p, sp=[0x%p,0x%p], fp=0x%p: Unable to find executable region", task->pid, smp_processor_id(), reg_ip, reg_sp, stack_base, reg_fp); if (rc > 0 && rc < sizeof(stk->dbgmsg)-1) { stk->dbgmsg[rc] = '\0'; vtss_record_debug_info(trnd, stk->dbgmsg, 0); } #endif return -EFAULT; } #endif /* VTSS_CHECK_IP_IN_MAP */ /* Check SP in module map */ vma = find_vma(task->mm, (unsigned long)reg_sp); if (likely(vma != NULL)) { unsigned long vm_start = vma->vm_start + ((vma->vm_flags & VM_GROWSDOWN) ? PAGE_SIZE : 0UL); unsigned long vm_end = vma->vm_end; // TRACE("vma=[0x%lx - 0x%lx], flags=0x%lx", vma->vm_start, vma->vm_end, vma->vm_flags); if ((unsigned long)reg_sp < vm_start || (vma->vm_flags & (VM_READ | VM_WRITE)) != (VM_READ | VM_WRITE)) { #ifdef VTSS_DEBUG_TRACE rc = snprintf(stk->dbgmsg, sizeof(stk->dbgmsg)-1, "tid=0x%08x, cpu=0x%08x, ip=0x%p, sp=[0x%p,0x%p], fp=0x%p, found_vma=[0x%lx,0x%lx]: Unable to find user stack boundaries", task->pid, smp_processor_id(), reg_ip, reg_sp, stack_base, reg_fp, vm_start, vm_end); if (rc > 0 && rc < sizeof(stk->dbgmsg)-1) { stk->dbgmsg[rc] = '\0'; vtss_record_debug_info(trnd, stk->dbgmsg, 0); } #endif return -EFAULT; } if (!((unsigned long)stack_base >= vm_start && (unsigned long)stack_base <= vm_end) || ((unsigned long)stack_base <= (unsigned long)reg_sp)) { if ((unsigned long)stack_base != 0UL) { TRACE("Fixup stack base to 0x%lx instead of 0x%lx", vm_end, (unsigned long)stack_base); } stack_base = (void*)vm_end; stk->clear(stk); #ifdef VTSS_STACK_LIMIT stack_base = (void*)min((unsigned long)reg_sp + VTSS_STACK_LIMIT, vm_end); if ((unsigned long)stack_base != vm_end) { TRACE("Limiting stack base to 0x%lx instead of 0x%lx, drop 0x%lx bytes", (unsigned long)stack_base, vm_end, (vm_end - (unsigned long)stack_base)); } } else { stack_base = (void*)min((unsigned long)reg_sp + VTSS_STACK_LIMIT, vm_end); if ((unsigned long)stack_base != vm_end) { TRACE("Limiting stack base to 0x%lx instead of 0x%lx, drop 0x%lx bytes", (unsigned long)stack_base, vm_end, (vm_end - (unsigned long)stack_base)); } #endif /* VTSS_STACK_LIMIT */ } } } #ifdef VTSS_DEBUG_TRACE /* Create a common header for debug message */ rc = snprintf(stk->dbgmsg, sizeof(stk->dbgmsg)-1, "tid=0x%08x, cpu=0x%08x, ip=0x%p, sp=[0x%p,0x%p], fp=0x%p: USER STACK: ", task->pid, smp_processor_id(), reg_ip, reg_sp, stack_base, reg_fp); if (!(rc > 0 && rc < sizeof(stk->dbgmsg)-1)) rc = 0; stk->dbgmsg[rc] = '\0'; #else stk->dbgmsg[0] = '\0'; #endif if (stk->ip.vdp == reg_ip && stk->sp.vdp == reg_sp && stk->bp.vdp == stack_base && stk->fp.vdp == reg_fp) { strcat(stk->dbgmsg, "The same context"); vtss_record_debug_info(trnd, stk->dbgmsg, 0); return 0; /* Assume that nothing was changed */ } /* Try to lock vm accessor */ acc = vtss_user_vm_accessor_init(in_irq, vtss_time_limit); if (unlikely((acc == NULL) || acc->trylock(acc, task))) { vtss_user_vm_accessor_fini(acc); strcat(stk->dbgmsg, "Unable to lock vm accessor"); vtss_record_debug_info(trnd, stk->dbgmsg, 0); return -EBUSY; } /* stk->setup(stk, acc, reg_ip, reg_sp, stack_base, reg_fp, stk->wow64); */ stk->acc = acc; stk->ip.vdp = reg_ip; stk->sp.vdp = reg_sp; stk->bp.vdp = stack_base; stk->fp.vdp = reg_fp; VTSS_PROFILE(unw, rc = stk->unwind(stk)); /* Check unwind result */ if (unlikely(rc == VTSS_ERR_NOMEMORY)) { /* Try again with realloced buffer */ while (rc == VTSS_ERR_NOMEMORY && !stk->realloc(stk)) { VTSS_PROFILE(unw, rc = stk->unwind(stk)); } if (rc == VTSS_ERR_NOMEMORY) { strcat(stk->dbgmsg, "Not enough memory - "); } } vtss_user_vm_accessor_fini(acc); if (unlikely(rc)) { stk->clear(stk); strcat(stk->dbgmsg, "Unwind error"); vtss_record_debug_info(trnd, stk->dbgmsg, 0); } return rc; }