static stackid get_stackid(CPUArchState* env) { #ifdef USE_STACK_HEURISTIC target_ulong asid; // Track all kernel-mode stacks together if (in_kernelspace(env)) asid = 0; else asid = panda_current_asid(ENV_GET_CPU(env)); // Invalidate cached stack pointer on ASID change if (cached_asid == 0 || cached_asid != asid) { cached_sp = 0; cached_asid = asid; } target_ulong sp = get_stack_pointer(env); // We can short-circuit the search in most cases if (std::abs(sp - cached_sp) < MAX_STACK_DIFF) { return std::make_pair(asid, cached_sp); } auto &stackset = stacks_seen[asid]; if (stackset.empty()) { stackset.insert(sp); cached_sp = sp; return std::make_pair(asid,sp); } else { // Find the closest stack pointer we've seen auto lb = std::lower_bound(stackset.begin(), stackset.end(), sp); target_ulong stack1 = *lb; lb--; target_ulong stack2 = *lb; target_ulong stack = (std::abs(stack1 - sp) < std::abs(stack2 - sp)) ? stack1 : stack2; int diff = std::abs(stack-sp); if (diff < MAX_STACK_DIFF) { return std::make_pair(asid,stack); } else { stackset.insert(sp); cached_sp = sp; return std::make_pair(asid,sp); } } #else return panda_current_asid(ENV_GET_CPU(env)); #endif }
static int fetch_register_banks(struct process *proc, struct fetch_context *context) { if (ptrace(PTRACE_GETREGS, proc->pid, NULL, &context->regs) == -1) return -1; if (context->hardfp && ptrace(PTRACE_GETVFPREGS, proc->pid, NULL, &context->fpregs) == -1) return -1; context->ncrn = 0; context->nsaa = context->sp = get_stack_pointer(proc); memset(&context->alloc, 0, sizeof(context->alloc)); return 0; }
struct fetch_context * arch_fetch_arg_init(enum tof type, struct Process *proc, struct arg_type_info *ret_info) { struct fetch_context *context = malloc(sizeof(*context)); if (context == NULL || fetch_context_init(proc, context) < 0) { fprintf(stderr, "arch_fetch_arg_init: %s\n", strerror(errno)); free(context); return NULL; } context->stack_pointer = get_stack_pointer(proc) + (s390x(context) ? 160 : 96); if (ret_info->type == ARGTYPE_STRUCT) ++context->greg; return context; }
static void handle_breakpoint(Event *event) { int i, j; Breakpoint *sbp; Process *leader = event->proc->leader; /* The leader has terminated. */ if (leader == NULL) { continue_process(event->proc->pid); return; } debug(DEBUG_FUNCTION, "handle_breakpoint(pid=%d, addr=%p)", event->proc->pid, event->e_un.brk_addr); debug(2, "event: breakpoint (%p)", event->e_un.brk_addr); #ifdef __powerpc__ /* Need to skip following NOP's to prevent a fake function from being stacked. */ long stub_addr = (long) get_count_register(event->proc); Breakpoint *stub_bp = NULL; char nop_instruction[] = PPC_NOP; stub_bp = address2bpstruct(leader, event->e_un.brk_addr); if (stub_bp) { unsigned char *bp_instruction = stub_bp->orig_value; if (memcmp(bp_instruction, nop_instruction, PPC_NOP_LENGTH) == 0) { if (stub_addr != (long) event->e_un.brk_addr) { set_instruction_pointer (event->proc, event->e_un.brk_addr + 4); continue_process(event->proc->pid); return; } } } #endif for (i = event->proc->callstack_depth - 1; i >= 0; i--) { if (event->e_un.brk_addr == event->proc->callstack[i].return_addr) { #ifdef __powerpc__ /* * PPC HACK! (XXX FIXME TODO) * The PLT gets modified during the first call, * so be sure to re-enable the breakpoint. */ unsigned long a; struct library_symbol *libsym = event->proc->callstack[i].c_un.libfunc; void *addr = sym2addr(event->proc, libsym); if (libsym->plt_type != LS_TOPLT_POINT) { unsigned char break_insn[] = BREAKPOINT_VALUE; sbp = address2bpstruct(leader, addr); assert(sbp); a = ptrace(PTRACE_PEEKTEXT, event->proc->pid, addr); if (memcmp(&a, break_insn, BREAKPOINT_LENGTH)) { sbp->enabled--; insert_breakpoint(event->proc, addr, libsym, 1); } } else { sbp = dict_find_entry(leader->breakpoints, addr); /* On powerpc, the breakpoint address may end up being actual entry point of the library symbol, not the PLT address we computed. In that case, sbp is NULL. */ if (sbp == NULL || addr != sbp->addr) { insert_breakpoint(event->proc, addr, libsym, 1); } } #elif defined(__mips__) void *addr = NULL; struct library_symbol *sym= event->proc->callstack[i].c_un.libfunc; struct library_symbol *new_sym; assert(sym); addr = sym2addr(event->proc, sym); sbp = dict_find_entry(leader->breakpoints, addr); if (sbp) { if (addr != sbp->addr) { insert_breakpoint(event->proc, addr, sym, 1); } } else { new_sym=malloc(sizeof(*new_sym) + strlen(sym->name) + 1); memcpy(new_sym,sym,sizeof(*new_sym) + strlen(sym->name) + 1); new_sym->next = leader->list_of_symbols; leader->list_of_symbols = new_sym; insert_breakpoint(event->proc, addr, new_sym, 1); } #endif for (j = event->proc->callstack_depth - 1; j > i; j--) { callstack_pop(event->proc); } if (event->proc->state != STATE_IGNORED) { if (opt_T || options.summary) { calc_time_spent(event->proc); } } event->proc->return_addr = event->e_un.brk_addr; if (event->proc->state != STATE_IGNORED) { mock_return(LT_TOF_FUNCTIONR, event->proc, event->proc->callstack[i].c_un.libfunc->name); output_right(LT_TOF_FUNCTIONR, event->proc, event->proc->callstack[i].c_un.libfunc->name); } callstack_pop(event->proc); sbp = address2bpstruct(leader, event->e_un.brk_addr); continue_after_breakpoint(event->proc, sbp); return; } } if ((sbp = address2bpstruct(leader, event->e_un.brk_addr))) { if (sbp->libsym == NULL) { continue_after_breakpoint(event->proc, sbp); return; } if (strcmp(sbp->libsym->name, "") == 0) { debug(DEBUG_PROCESS, "Hit _dl_debug_state breakpoint!\n"); arch_check_dbg(leader); } if (event->proc->state != STATE_IGNORED) { event->proc->stack_pointer = get_stack_pointer(event->proc); event->proc->return_addr = get_return_addr(event->proc, event->proc->stack_pointer); callstack_push_symfunc(event->proc, sbp->libsym); output_left(LT_TOF_FUNCTION, event->proc, sbp->libsym->name); } #ifdef PLT_REINITALISATION_BP if (event->proc->need_to_reinitialize_breakpoints && (strcmp(sbp->libsym->name, PLTs_initialized_by_here) == 0)) reinitialize_breakpoints(leader); #endif continue_after_breakpoint(event->proc, sbp); return; } if (event->proc->state != STATE_IGNORED && !options.no_plt) { output_line(event->proc, "unexpected breakpoint at %p", (void *)event->e_un.brk_addr); } continue_process(event->proc->pid); }
int run_vm(void) { #if !defined(VM_CLOCKINTERRUPTHANDLER_INTERRUPT) Object temp; int32* mainMethodJavaStack; #endif int16 execp = 0; /* Required for certain compilers. */ init_compiler_specifics(); /* Function below allocates the initial heap. This is done in * initDefaultRAMAllocationPoint in allocation_point.c */ init_vm(); #if defined(ENABLE_DEBUG) connectToDebugger(); sendStartEvent(); while (awaitCommandFromDebugger(0, 0, 0) != RESUME_EVENT) {;} #endif /* Allocating the main stack is delegated to the target specific function * 'get_java_stack_base'. On some architectures/environments it is located * at fixed positions in certain compiler specific sections. The implementor * can allocate the stack in the heap if so desired. * */ mainMethodJavaStack = get_java_stack_base(JAVA_STACK_SIZE); #if defined(VM_CLOCKINTERRUPTHANDLER_INTERRUPT) /* If more threads are started we give the main thread a new C stack pointer. * In case of no other threads running the main thread just inherits the * current C stack. * * In this case we save the current C stack so we may restore it later. This * is required to terminate the process properly. */ mainStackPointer = (pointer) get_stack_pointer(); /* mainMethodJavaStack contains both Java and C stack. Java stack grows * upwards from the beginning, C stack downwards from the end. */ stackPointer = (pointer) &mainMethodJavaStack[JAVA_STACK_SIZE - 2]; /* 'set_stack_pointer' sets the C stack */ stackPointer = (pointer) & mainMethodJavaStack[JAVA_STACK_SIZE - 2]; set_stack_pointer(); #endif #if defined(REPORTCYCLES) papi_start(); papi_mark(); papi_mark(); papi_mark(); papi_mark(); papi_mark(); papi_mark(); #endif #if defined(LDC2_W_OPCODE_USED) || defined(LDC_W_OPCODE_USED) || defined(LDC_OPCODE_USED) || defined(HANDLELDCWITHINDEX_USED) execp = initializeConstants(mainMethodJavaStack); if (execp == -1) { #endif execp = initializeExceptions(mainMethodJavaStack); if (execp == -1) { #if defined(INVOKECLASSINITIALIZERS) execp = invokeClassInitializers(mainMethodJavaStack); if (execp == -1) { #endif /* This is only for testing. All tests will write 0 (null) to * '*mainMethodJavaStack' if the test is successful. */ *mainMethodJavaStack = (int32) (pointer) &temp; #if defined(VM_CLOCKINTERRUPTHANDLER_ENABLE_USED) start_system_tick(); #endif #if defined(DEVICES_SYSTEM_INITIALIZESYSTEMCLASS) execp = enterMethodInterpreter( DEVICES_SYSTEM_INITIALIZESYSTEMCLASS, mainMethodJavaStack); if (execp == -1) { #endif /* Start the VM */ execp = enterMethodInterpreter(mainMethodIndex, mainMethodJavaStack); #if defined(VM_CLOCKINTERRUPTHANDLER_ENABLE_USED) stop_system_tick(); #endif #if defined(DEVICES_SYSTEM_INITIALIZESYSTEMCLASS) } #endif } /* TODO: use executeWithStack instead */ #if defined(INVOKECLASSINITIALIZERS) } #endif #if defined(LDC2_W_OPCODE_USED) || defined(LDC_W_OPCODE_USED) || defined(LDC_OPCODE_USED) || defined(HANDLELDCWITHINDEX_USED) } #endif #if defined(REPORTCYCLES) papi_mark(); #endif mark_error(); if (execp >= 0) { #if defined(JAVA_LANG_THROWABLE_INIT_) handleException(execp); #endif #if defined(VM_CLOCKINTERRUPTHANDLER_INTERRUPT) /* Restore C stack pointer. Otherwise we could not return from here properly */ stackPointer = (pointer) mainStackPointer; set_stack_pointer(); #endif return ERROR; } #if defined(VM_CLOCKINTERRUPTHANDLER_INTERRUPT) /* Restore C stack pointer. Otherwise we could not return from here properly */ stackPointer = (pointer) mainStackPointer; set_stack_pointer(); #endif #if defined(ENABLE_DEBUG) disconnectFromDebugger(); #endif if (*mainMethodJavaStack) { return ERROR; } else { mark_success(); return SUCCESS; } return 0; }
void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, unsigned long *stack, char *log_lvl) { struct unwind_state state; struct stack_info stack_info = {0}; unsigned long visit_mask = 0; int graph_idx = 0; printk("%sCall Trace:\n", log_lvl); unwind_start(&state, task, regs, stack); stack = stack ? : get_stack_pointer(task, regs); /* * Iterate through the stacks, starting with the current stack pointer. * Each stack has a pointer to the next one. * * x86-64 can have several stacks: * - task stack * - interrupt stack * - HW exception stacks (double fault, nmi, debug, mce) * * x86-32 can have up to three stacks: * - task stack * - softirq stack * - hardirq stack */ for (regs = NULL; stack; stack = stack_info.next_sp) { const char *stack_name; /* * If we overflowed the task stack into a guard page, jump back * to the bottom of the usable stack. */ if (task_stack_page(task) - (void *)stack < PAGE_SIZE) stack = task_stack_page(task); if (get_stack_info(stack, task, &stack_info, &visit_mask)) break; stack_name = stack_type_name(stack_info.type); if (stack_name) printk("%s <%s>\n", log_lvl, stack_name); /* * Scan the stack, printing any text addresses we find. At the * same time, follow proper stack frames with the unwinder. * * Addresses found during the scan which are not reported by * the unwinder are considered to be additional clues which are * sometimes useful for debugging and are prefixed with '?'. * This also serves as a failsafe option in case the unwinder * goes off in the weeds. */ for (; stack < stack_info.end; stack++) { unsigned long real_addr; int reliable = 0; unsigned long addr = READ_ONCE_NOCHECK(*stack); unsigned long *ret_addr_p = unwind_get_return_address_ptr(&state); if (!__kernel_text_address(addr)) continue; /* * Don't print regs->ip again if it was already printed * by __show_regs() below. */ if (regs && stack == ®s->ip) { unwind_next_frame(&state); continue; } if (stack == ret_addr_p) reliable = 1; /* * When function graph tracing is enabled for a * function, its return address on the stack is * replaced with the address of an ftrace handler * (return_to_handler). In that case, before printing * the "real" address, we want to print the handler * address as an "unreliable" hint that function graph * tracing was involved. */ real_addr = ftrace_graph_ret_addr(task, &graph_idx, addr, stack); if (real_addr != addr) printk_stack_address(addr, 0, log_lvl); printk_stack_address(real_addr, reliable, log_lvl); if (!reliable) continue; /* * Get the next frame from the unwinder. No need to * check for an error: if anything goes wrong, the rest * of the addresses will just be printed as unreliable. */ unwind_next_frame(&state); /* if the frame has entry regs, print them */ regs = unwind_get_entry_regs(&state); if (regs) __show_regs(regs, 0); } if (stack_name) printk("%s </%s>\n", log_lvl, stack_name); } }