unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs) { int rasize, ncopied; unsigned long orig_ret_vaddr = 0; /* clear high bits for 32-bit apps */ rasize = is_ia32_task() ? 4 : 8; ncopied = copy_from_user(&orig_ret_vaddr, (void __user *)regs->sp, rasize); if (unlikely(ncopied)) return -1; /* check whether address has been already hijacked */ if (orig_ret_vaddr == trampoline_vaddr) return orig_ret_vaddr; ncopied = copy_to_user((void __user *)regs->sp, &trampoline_vaddr, rasize); if (likely(!ncopied)) return orig_ret_vaddr; if (ncopied != rasize) { pr_err("uprobe: return address clobbered: pid=%d, %%sp=%#lx, " "%%ip=%#lx\n", task_pid_nr(current), regs->sp, regs->ip); force_sig_info(SIGSEGV, SEND_SIG_FORCED, current); } return -1; }
long syscall_trace_enter(struct pt_regs *regs) { u32 arch = is_ia32_task() ? AUDIT_ARCH_I386 : AUDIT_ARCH_X86_64; unsigned long phase1_result = syscall_trace_enter_phase1(regs, arch); if (phase1_result == 0) return regs->orig_ax; else return syscall_trace_enter_phase2(regs, arch, phase1_result); }
/* * This function is called by arch_uprobe_post_xol() to adjust the return * address pushed by a call instruction executed out of line. */ static int adjust_ret_addr(unsigned long sp, long correction) { int rasize, ncopied; long ra = 0; if (is_ia32_task()) rasize = 4; else rasize = 8; ncopied = copy_from_user(&ra, (void __user *)sp, rasize); if (unlikely(ncopied)) return -EFAULT; ra += correction; ncopied = copy_to_user((void __user *)sp, &ra, rasize); if (unlikely(ncopied)) return -EFAULT; return 0; }