/* * Hook the return address and push it in the stack of return addrs * in current thread info. */ static void __hot prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) { unsigned long old; struct ftrace_graph_ent trace; extern int parisc_return_to_handler; if (unlikely(ftrace_graph_is_dead())) return; if (unlikely(atomic_read(¤t->tracing_graph_pause))) return; old = *parent; trace.func = self_addr; trace.depth = current->curr_ret_stack + 1; /* Only trace if the calling function expects to */ if (!ftrace_graph_entry(&trace)) return; if (ftrace_push_return_trace(old, self_addr, &trace.depth, 0 ) == -EBUSY) return; /* activate parisc_return_to_handler() as return point */ *parent = (unsigned long) &parisc_return_to_handler; }
/* * Hook the return address and push it in the stack of return addresses * in current thread info. */ unsigned long prepare_ftrace_return(unsigned long ip, unsigned long parent) { struct ftrace_graph_ent trace; if (unlikely(atomic_read(¤t->tracing_graph_pause))) goto out; if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY) goto out; trace.func = ftrace_mcount_call_adjust(ip) & PSW_ADDR_INSN; /* Only trace if the calling function expects to. */ if (!ftrace_graph_entry(&trace)) { current->curr_ret_stack--; goto out; } parent = (unsigned long)return_to_handler; out: return parent; }
unsigned long __kprobes prepare_ftrace_return(unsigned long parent, unsigned long ip) { struct ftrace_graph_ent trace; if (unlikely(atomic_read(¤t->tracing_graph_pause))) goto out; if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY) goto out; trace.func = (ip & PSW_ADDR_INSN) - MCOUNT_OFFSET_RET; /* */ if (!ftrace_graph_entry(&trace)) { current->curr_ret_stack--; goto out; } parent = (unsigned long) return_to_handler; out: return parent; }
/* * Hook the return address and push it in the stack of return addresses * in current thread info. */ unsigned long __kprobes prepare_ftrace_return(unsigned long parent, unsigned long ip) { struct ftrace_graph_ent trace; if (unlikely(atomic_read(¤t->tracing_graph_pause))) goto out; ip = (ip & PSW_ADDR_INSN) - MCOUNT_INSN_SIZE; trace.func = ip; trace.depth = current->curr_ret_stack + 1; /* Only trace if the calling function expects to. */ if (!ftrace_graph_entry(&trace)) goto out; if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY) goto out; parent = (unsigned long) return_to_handler; out: return parent; }
/* * Hook the return address and push it in the stack of return addrs * in current thread info. */ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, unsigned long frame_pointer) { struct ftrace_graph_ent trace; unsigned long return_hooker = (unsigned long)&return_to_handler; if (unlikely(atomic_read(¤t->tracing_graph_pause))) return; if (ftrace_push_return_trace(*parent, self_addr, &trace.depth, frame_pointer) == -EBUSY) return; trace.func = self_addr; /* Only trace if the calling function expects to */ if (!ftrace_graph_entry(&trace)) { current->curr_ret_stack--; return; } /* all is well in the world ! hijack RETS ... */ *parent = return_hooker; }