static void stack_mcontext ( struct vki_mcontext *mc, ThreadState* tst, Bool use_rt_sigreturn, UInt fault_addr ) { VG_TRACK( pre_mem_write, Vg_CoreSignal, tst->tid, "signal frame mcontext", (Addr)mc, sizeof(struct vki_pt_regs) ); # define DO(gpr) mc->mc_gregs[VKI_PT_R0+gpr] = tst->arch.vex.guest_GPR##gpr DO(0); DO(1); DO(2); DO(3); DO(4); DO(5); DO(6); DO(7); DO(8); DO(9); DO(10); DO(11); DO(12); DO(13); DO(14); DO(15); DO(16); DO(17); DO(18); DO(19); DO(20); DO(21); DO(22); DO(23); DO(24); DO(25); DO(26); DO(27); DO(28); DO(29); DO(30); DO(31); # undef DO mc->mc_gregs[VKI_PT_NIP] = tst->arch.vex.guest_CIA; mc->mc_gregs[VKI_PT_MSR] = 0xf032; /* pretty arbitrary */ mc->mc_gregs[VKI_PT_ORIG_R3] = tst->arch.vex.guest_GPR3; mc->mc_gregs[VKI_PT_CTR] = tst->arch.vex.guest_CTR; mc->mc_gregs[VKI_PT_LNK] = tst->arch.vex.guest_LR; mc->mc_gregs[VKI_PT_XER] = LibVEX_GuestPPC32_get_XER(&tst->arch.vex); mc->mc_gregs[VKI_PT_CCR] = LibVEX_GuestPPC32_get_CR(&tst->arch.vex); mc->mc_gregs[VKI_PT_MQ] = 0; mc->mc_gregs[VKI_PT_TRAP] = 0; mc->mc_gregs[VKI_PT_DAR] = fault_addr; mc->mc_gregs[VKI_PT_DSISR] = 0; mc->mc_gregs[VKI_PT_RESULT] = 0; VG_TRACK( post_mem_write, Vg_CoreSignal, tst->tid, (Addr)mc, sizeof(struct vki_pt_regs) ); /* XXX should do FP and vector regs */ /* set up signal return trampoline */ /* NB. 5 Sept 07. mc->mc_pad[0..1] used to contain a the code to which the signal handler returns, and it just did sys_sigreturn or sys_rt_sigreturn. But this doesn't work if the stack is non-executable, and it isn't consistent with the x86-linux and amd64-linux scheme for removing the stack frame. So instead be consistent and use a stub in m_trampoline. Then it doesn't matter whether or not the (guest) stack is executable. This fixes #149519 and #145837. */ VG_TRACK(pre_mem_write, Vg_CoreSignal, tst->tid, "signal frame mcontext", (Addr)&mc->mc_pad, sizeof(mc->mc_pad)); mc->mc_pad[0] = 0; /* invalid */ mc->mc_pad[1] = 0; /* invalid */ VG_TRACK( post_mem_write, Vg_CoreSignal, tst->tid, (Addr)&mc->mc_pad, sizeof(mc->mc_pad) ); /* invalidate any translation of this area */ VG_(discard_translations)( (Addr)&mc->mc_pad, sizeof(mc->mc_pad), "stack_mcontext" ); /* set the signal handler to return to the trampoline */ SET_SIGNAL_LR(tst, (Addr)(use_rt_sigreturn ? (Addr)&VG_(ppc32_linux_SUBST_FOR_rt_sigreturn) : (Addr)&VG_(ppc32_linux_SUBST_FOR_sigreturn) )); }
static void stack_mcontext ( struct vki_mcontext *mc, ThreadState* tst, Int ret, UInt fault_addr ) { VG_TRACK( pre_mem_write, Vg_CoreSignal, tst->tid, "signal frame mcontext", (Addr)mc, sizeof(struct vki_pt_regs) ); # define DO(gpr) mc->mc_gregs[VKI_PT_R0+gpr] = tst->arch.vex.guest_GPR##gpr DO(0); DO(1); DO(2); DO(3); DO(4); DO(5); DO(6); DO(7); DO(8); DO(9); DO(10); DO(11); DO(12); DO(13); DO(14); DO(15); DO(16); DO(17); DO(18); DO(19); DO(20); DO(21); DO(22); DO(23); DO(24); DO(25); DO(26); DO(27); DO(28); DO(29); DO(30); DO(31); # undef DO mc->mc_gregs[VKI_PT_NIP] = tst->arch.vex.guest_CIA; mc->mc_gregs[VKI_PT_MSR] = 0xf032; /* pretty arbitrary */ mc->mc_gregs[VKI_PT_ORIG_R3] = tst->arch.vex.guest_GPR3; mc->mc_gregs[VKI_PT_CTR] = tst->arch.vex.guest_CTR; mc->mc_gregs[VKI_PT_LNK] = tst->arch.vex.guest_LR; mc->mc_gregs[VKI_PT_XER] = LibVEX_GuestPPC32_get_XER(&tst->arch.vex); mc->mc_gregs[VKI_PT_CCR] = LibVEX_GuestPPC32_get_CR(&tst->arch.vex); mc->mc_gregs[VKI_PT_MQ] = 0; mc->mc_gregs[VKI_PT_TRAP] = 0; mc->mc_gregs[VKI_PT_DAR] = fault_addr; mc->mc_gregs[VKI_PT_DSISR] = 0; mc->mc_gregs[VKI_PT_RESULT] = 0; VG_TRACK( post_mem_write, Vg_CoreSignal, tst->tid, (Addr)mc, sizeof(struct vki_pt_regs) ); /* XXX should do FP and vector regs */ /* set up signal return trampoline */ VG_TRACK(pre_mem_write, Vg_CoreSignal, tst->tid, "signal frame mcontext", (Addr)&mc->mc_pad, sizeof(mc->mc_pad)); mc->mc_pad[0] = 0x38000000U + ret; /* li 0,ret */ mc->mc_pad[1] = 0x44000002U; /* sc */ VG_TRACK( post_mem_write, Vg_CoreSignal, tst->tid, (Addr)&mc->mc_pad, sizeof(mc->mc_pad) ); /* invalidate any translation of this area */ VG_(discard_translations)( (Addr64)(Addr)&mc->mc_pad, sizeof(mc->mc_pad), "stack_mcontext" ); /* set the signal handler to return to the trampoline */ SET_SIGNAL_LR(tst, (Addr) &mc->mc_pad[0]); }
void ML_(fill_elfregs_from_tst)(struct vki_user_regs_struct* regs, const ThreadArchState* arch) { # define DO(n) regs->gpr[n] = arch->vex.guest_GPR##n DO(0); DO(1); DO(2); DO(3); DO(4); DO(5); DO(6); DO(7); DO(8); DO(9); DO(10); DO(11); DO(12); DO(13); DO(14); DO(15); DO(16); DO(17); DO(18); DO(19); DO(20); DO(21); DO(22); DO(23); DO(24); DO(25); DO(26); DO(27); DO(28); DO(29); DO(30); DO(31); # undef DO regs->nip = arch->vex.guest_CIA; regs->msr = 0xf032; /* pretty arbitrary */ regs->orig_gpr3 = arch->vex.guest_GPR3; regs->ctr = arch->vex.guest_CTR; regs->link = arch->vex.guest_LR; regs->xer = LibVEX_GuestPPC32_get_XER( &((ThreadArchState*)arch)->vex ); regs->ccr = LibVEX_GuestPPC32_get_CR( &((ThreadArchState*)arch)->vex ); regs->mq = 0; regs->trap = 0; regs->dar = 0; /* should be fault address? */ regs->dsisr = 0; regs->result = 0; }
static Int ptrace_setregs(Int pid, VexGuestArchState* vex) { #if defined(VGP_x86_linux) struct vki_user_regs_struct regs; VG_(memset)(®s, 0, sizeof(regs)); regs.cs = vex->guest_CS; regs.ss = vex->guest_SS; regs.ds = vex->guest_DS; regs.es = vex->guest_ES; regs.fs = vex->guest_FS; regs.gs = vex->guest_GS; regs.eax = vex->guest_EAX; regs.ebx = vex->guest_EBX; regs.ecx = vex->guest_ECX; regs.edx = vex->guest_EDX; regs.esi = vex->guest_ESI; regs.edi = vex->guest_EDI; regs.ebp = vex->guest_EBP; regs.esp = vex->guest_ESP; regs.eflags = LibVEX_GuestX86_get_eflags(vex); regs.eip = vex->guest_EIP; return VG_(ptrace)(VKI_PTRACE_SETREGS, pid, NULL, ®s); #elif defined(VGP_amd64_linux) struct vki_user_regs_struct regs; VG_(memset)(®s, 0, sizeof(regs)); regs.rax = vex->guest_RAX; regs.rbx = vex->guest_RBX; regs.rcx = vex->guest_RCX; regs.rdx = vex->guest_RDX; regs.rsi = vex->guest_RSI; regs.rdi = vex->guest_RDI; regs.rbp = vex->guest_RBP; regs.rsp = vex->guest_RSP; regs.r8 = vex->guest_R8; regs.r9 = vex->guest_R9; regs.r10 = vex->guest_R10; regs.r11 = vex->guest_R11; regs.r12 = vex->guest_R12; regs.r13 = vex->guest_R13; regs.r14 = vex->guest_R14; regs.r15 = vex->guest_R15; regs.eflags = LibVEX_GuestAMD64_get_rflags(vex); regs.rip = vex->guest_RIP; /* Set %{c,d,e,f,s,g}s and %{fs,gs}_base (whatever those are) to values which don't fail the kernel's sanity checks. I have no idea what these should really be set to. Anyway, mostly it seems that zero is an allowable value, except for %cs and %ss which have to have their lowest 2 bits be 11. See putreg() in linux-2.6.23/arch/x86_64/kernel/ptrace.c for the apparently relevant sanity checks. This fixes #145622. */ regs.cs = 3; regs.ds = 0; regs.es = 0; regs.fs = 0; regs.ss = 3; regs.gs = 0; regs.fs_base = 0; regs.gs_base = 0; return VG_(ptrace)(VKI_PTRACE_SETREGS, pid, NULL, ®s); #elif defined(VGP_ppc32_linux) Int rc = 0; /* apparently the casting to void* is the Right Thing To Do */ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R0 * 4), (void*)vex->guest_GPR0); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R1 * 4), (void*)vex->guest_GPR1); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R2 * 4), (void*)vex->guest_GPR2); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R3 * 4), (void*)vex->guest_GPR3); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R4 * 4), (void*)vex->guest_GPR4); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R5 * 4), (void*)vex->guest_GPR5); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R6 * 4), (void*)vex->guest_GPR6); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R7 * 4), (void*)vex->guest_GPR7); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R8 * 4), (void*)vex->guest_GPR8); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R9 * 4), (void*)vex->guest_GPR9); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R10 * 4), (void*)vex->guest_GPR10); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R11 * 4), (void*)vex->guest_GPR11); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R12 * 4), (void*)vex->guest_GPR12); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R13 * 4), (void*)vex->guest_GPR13); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R14 * 4), (void*)vex->guest_GPR14); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R15 * 4), (void*)vex->guest_GPR15); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R16 * 4), (void*)vex->guest_GPR16); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R17 * 4), (void*)vex->guest_GPR17); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R18 * 4), (void*)vex->guest_GPR18); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R19 * 4), (void*)vex->guest_GPR19); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R20 * 4), (void*)vex->guest_GPR20); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R21 * 4), (void*)vex->guest_GPR21); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R22 * 4), (void*)vex->guest_GPR22); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R23 * 4), (void*)vex->guest_GPR23); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R24 * 4), (void*)vex->guest_GPR24); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R25 * 4), (void*)vex->guest_GPR25); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R26 * 4), (void*)vex->guest_GPR26); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R27 * 4), (void*)vex->guest_GPR27); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R28 * 4), (void*)vex->guest_GPR28); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R29 * 4), (void*)vex->guest_GPR29); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R30 * 4), (void*)vex->guest_GPR30); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R31 * 4), (void*)vex->guest_GPR31); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_NIP * 4), (void*)vex->guest_CIA); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_CCR * 4), (void*)LibVEX_GuestPPC32_get_CR(vex)); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_LNK * 4), (void*)vex->guest_LR); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_CTR * 4), (void*)vex->guest_CTR); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_XER * 4), (void*)LibVEX_GuestPPC32_get_XER(vex)); return rc; #elif defined(VGP_ppc64_linux) Int rc = 0; /* FRJ: copied nearly verbatim from the ppc32 case. I compared the vki-ppc64-linux.h with its ppc32 counterpart and saw no appreciable differences, other than the registers being 8 bytes instead of 4. No idea why we don't set all of the entries declared in vki_pt_regs, but ppc32 doesn't so there must be a reason. Finally, note that CR and XER are 32 bits even for ppc64 (see libvex_guest_ppc64.h), but the vki_pt_regs struct still gives them 64 bits. */ /* apparently the casting to void* is the Right Thing To Do */ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R0 * 8), (void*)vex->guest_GPR0); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R1 * 8), (void*)vex->guest_GPR1); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R2 * 8), (void*)vex->guest_GPR2); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R3 * 8), (void*)vex->guest_GPR3); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R4 * 8), (void*)vex->guest_GPR4); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R5 * 8), (void*)vex->guest_GPR5); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R6 * 8), (void*)vex->guest_GPR6); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R7 * 8), (void*)vex->guest_GPR7); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R8 * 8), (void*)vex->guest_GPR8); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R9 * 8), (void*)vex->guest_GPR9); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R10 * 8), (void*)vex->guest_GPR10); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R11 * 8), (void*)vex->guest_GPR11); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R12 * 8), (void*)vex->guest_GPR12); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R13 * 8), (void*)vex->guest_GPR13); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R14 * 8), (void*)vex->guest_GPR14); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R15 * 8), (void*)vex->guest_GPR15); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R16 * 8), (void*)vex->guest_GPR16); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R17 * 8), (void*)vex->guest_GPR17); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R18 * 8), (void*)vex->guest_GPR18); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R19 * 8), (void*)vex->guest_GPR19); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R20 * 8), (void*)vex->guest_GPR20); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R21 * 8), (void*)vex->guest_GPR21); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R22 * 8), (void*)vex->guest_GPR22); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R23 * 8), (void*)vex->guest_GPR23); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R24 * 8), (void*)vex->guest_GPR24); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R25 * 8), (void*)vex->guest_GPR25); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R26 * 8), (void*)vex->guest_GPR26); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R27 * 8), (void*)vex->guest_GPR27); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R28 * 8), (void*)vex->guest_GPR28); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R29 * 8), (void*)vex->guest_GPR29); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R30 * 8), (void*)vex->guest_GPR30); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R31 * 8), (void*)vex->guest_GPR31); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_NIP * 8), (void*)vex->guest_CIA); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_CCR * 8), (void*)(long)LibVEX_GuestPPC64_get_CR(vex)); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_LNK * 8), (void*)vex->guest_LR); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_CTR * 8), (void*)vex->guest_CTR); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_XER * 8), (void*)(long)LibVEX_GuestPPC64_get_XER(vex)); return rc; #elif defined(VGP_arm_linux) struct vki_user_regs_struct uregs; VG_(memset)(&uregs, 0, sizeof(uregs)); uregs.ARM_r0 = vex->guest_R0; uregs.ARM_r1 = vex->guest_R1; uregs.ARM_r2 = vex->guest_R2; uregs.ARM_r3 = vex->guest_R3; uregs.ARM_r4 = vex->guest_R4; uregs.ARM_r5 = vex->guest_R5; uregs.ARM_r6 = vex->guest_R6; uregs.ARM_r7 = vex->guest_R7; uregs.ARM_r8 = vex->guest_R8; uregs.ARM_r9 = vex->guest_R9; uregs.ARM_r10 = vex->guest_R10; uregs.ARM_fp = vex->guest_R11; uregs.ARM_ip = vex->guest_R12; uregs.ARM_sp = vex->guest_R13; uregs.ARM_lr = vex->guest_R14; // Remove the T bit from the bottom of R15T. It will get shipped // over in CPSR.T instead, since LibVEX_GuestARM_get_cpsr copies // it from R15T[0]. uregs.ARM_pc = vex->guest_R15T & 0xFFFFFFFE; uregs.ARM_cpsr = LibVEX_GuestARM_get_cpsr(vex); return VG_(ptrace)(VKI_PTRACE_SETREGS, pid, NULL, &uregs); #elif defined(VGP_arm64_linux) I_die_here; //ATC struct vki_user_pt_regs uregs; VG_(memset)(&uregs, 0, sizeof(uregs)); uregs.regs[0] = vex->guest_X0; uregs.regs[1] = vex->guest_X1; uregs.regs[2] = vex->guest_X2; uregs.regs[3] = vex->guest_X3; uregs.regs[4] = vex->guest_X4; uregs.regs[5] = vex->guest_X5; uregs.regs[6] = vex->guest_X6; uregs.regs[7] = vex->guest_X7; uregs.regs[8] = vex->guest_X8; uregs.regs[9] = vex->guest_X9; uregs.regs[10] = vex->guest_X10; uregs.regs[11] = vex->guest_X11; uregs.regs[12] = vex->guest_X12; uregs.regs[13] = vex->guest_X13; uregs.regs[14] = vex->guest_X14; uregs.regs[15] = vex->guest_X15; uregs.regs[16] = vex->guest_X16; uregs.regs[17] = vex->guest_X17; uregs.regs[18] = vex->guest_X18; uregs.regs[19] = vex->guest_X19; uregs.regs[20] = vex->guest_X20; uregs.regs[21] = vex->guest_X21; uregs.regs[22] = vex->guest_X22; uregs.regs[23] = vex->guest_X23; uregs.regs[24] = vex->guest_X24; uregs.regs[25] = vex->guest_X25; uregs.regs[26] = vex->guest_X26; uregs.regs[27] = vex->guest_X27; uregs.regs[28] = vex->guest_X28; uregs.regs[29] = vex->guest_X29; uregs.regs[30] = vex->guest_X30; uregs.sp = vex->guest_XSP; uregs.pc = vex->guest_PC; uregs.pstate = LibVEX_GuestARM64_get_nzcv(vex); /* is this correct? */ return VG_(ptrace)(VKI_PTRACE_SETREGS, pid, NULL, &uregs); #elif defined(VGP_x86_darwin) I_die_here; #elif defined(VGP_amd64_darwin) I_die_here; #elif defined(VGP_s390x_linux) struct vki_user_regs_struct regs; vki_ptrace_area pa; /* We don't set the psw mask and start at offset 8 */ pa.vki_len = (unsigned long) ®s.per_info - (unsigned long) ®s.psw.addr; pa.vki_process_addr = (unsigned long) ®s.psw.addr; pa.vki_kernel_addr = 8; VG_(memset)(®s, 0, sizeof(regs)); regs.psw.addr = vex->guest_IA; /* We don't set the mask */ regs.gprs[0] = vex->guest_r0; regs.gprs[1] = vex->guest_r1; regs.gprs[2] = vex->guest_r2; regs.gprs[3] = vex->guest_r3; regs.gprs[4] = vex->guest_r4; regs.gprs[5] = vex->guest_r5; regs.gprs[6] = vex->guest_r6; regs.gprs[7] = vex->guest_r7; regs.gprs[8] = vex->guest_r8; regs.gprs[9] = vex->guest_r9; regs.gprs[10] = vex->guest_r10; regs.gprs[11] = vex->guest_r11; regs.gprs[12] = vex->guest_r12; regs.gprs[13] = vex->guest_r13; regs.gprs[14] = vex->guest_r14; regs.gprs[15] = vex->guest_r15; regs.acrs[0] = vex->guest_a0; regs.acrs[1] = vex->guest_a1; regs.acrs[2] = vex->guest_a2; regs.acrs[3] = vex->guest_a3; regs.acrs[4] = vex->guest_a4; regs.acrs[5] = vex->guest_a5; regs.acrs[6] = vex->guest_a6; regs.acrs[7] = vex->guest_a7; regs.acrs[8] = vex->guest_a8; regs.acrs[9] = vex->guest_a9; regs.acrs[10] = vex->guest_a10; regs.acrs[11] = vex->guest_a11; regs.acrs[12] = vex->guest_a12; regs.acrs[13] = vex->guest_a13; regs.acrs[14] = vex->guest_a14; regs.acrs[15] = vex->guest_a15; /* only used for system call restart and friends, just use r2 */ regs.orig_gpr2 = vex->guest_r2; regs.fp_regs.fprs[0].ui = vex->guest_f0; regs.fp_regs.fprs[1].ui = vex->guest_f1; regs.fp_regs.fprs[2].ui = vex->guest_f2; regs.fp_regs.fprs[3].ui = vex->guest_f3; regs.fp_regs.fprs[4].ui = vex->guest_f4; regs.fp_regs.fprs[5].ui = vex->guest_f5; regs.fp_regs.fprs[6].ui = vex->guest_f6; regs.fp_regs.fprs[7].ui = vex->guest_f7; regs.fp_regs.fprs[8].ui = vex->guest_f8; regs.fp_regs.fprs[9].ui = vex->guest_f9; regs.fp_regs.fprs[10].ui = vex->guest_f10; regs.fp_regs.fprs[11].ui = vex->guest_f11; regs.fp_regs.fprs[12].ui = vex->guest_f12; regs.fp_regs.fprs[13].ui = vex->guest_f13; regs.fp_regs.fprs[14].ui = vex->guest_f14; regs.fp_regs.fprs[15].ui = vex->guest_f15; regs.fp_regs.fpc = vex->guest_fpc; return VG_(ptrace)(VKI_PTRACE_POKEUSR_AREA, pid, &pa, NULL); #elif defined(VGP_mips32_linux) || defined(VGP_mips64_linux) struct vki_user_regs_struct regs; VG_(memset)(®s, 0, sizeof(regs)); regs.MIPS_r0 = vex->guest_r0; regs.MIPS_r1 = vex->guest_r1; regs.MIPS_r2 = vex->guest_r2; regs.MIPS_r3 = vex->guest_r3; regs.MIPS_r4 = vex->guest_r4; regs.MIPS_r5 = vex->guest_r5; regs.MIPS_r6 = vex->guest_r6; regs.MIPS_r7 = vex->guest_r7; regs.MIPS_r8 = vex->guest_r8; regs.MIPS_r9 = vex->guest_r9; regs.MIPS_r10 = vex->guest_r10; regs.MIPS_r11 = vex->guest_r11; regs.MIPS_r12 = vex->guest_r12; regs.MIPS_r13 = vex->guest_r13; regs.MIPS_r14 = vex->guest_r14; regs.MIPS_r15 = vex->guest_r15; regs.MIPS_r16 = vex->guest_r16; regs.MIPS_r17 = vex->guest_r17; regs.MIPS_r18 = vex->guest_r18; regs.MIPS_r19 = vex->guest_r19; regs.MIPS_r20 = vex->guest_r20; regs.MIPS_r21 = vex->guest_r21; regs.MIPS_r22 = vex->guest_r22; regs.MIPS_r23 = vex->guest_r23; regs.MIPS_r24 = vex->guest_r24; regs.MIPS_r25 = vex->guest_r25; regs.MIPS_r26 = vex->guest_r26; regs.MIPS_r27 = vex->guest_r27; regs.MIPS_r28 = vex->guest_r28; regs.MIPS_r29 = vex->guest_r29; regs.MIPS_r30 = vex->guest_r30; regs.MIPS_r31 = vex->guest_r31; return VG_(ptrace)(VKI_PTRACE_SETREGS, pid, NULL, ®s); #else # error Unknown arch #endif }
static Int ptrace_setregs(Int pid, VexGuestArchState* vex) { #if defined(VGP_x86_linux) struct vki_user_regs_struct regs; regs.cs = vex->guest_CS; regs.ss = vex->guest_SS; regs.ds = vex->guest_DS; regs.es = vex->guest_ES; regs.fs = vex->guest_FS; regs.gs = vex->guest_GS; regs.eax = vex->guest_EAX; regs.ebx = vex->guest_EBX; regs.ecx = vex->guest_ECX; regs.edx = vex->guest_EDX; regs.esi = vex->guest_ESI; regs.edi = vex->guest_EDI; regs.ebp = vex->guest_EBP; regs.esp = vex->guest_ESP; regs.eflags = LibVEX_GuestX86_get_eflags(vex); regs.eip = vex->guest_EIP; return VG_(ptrace)(VKI_PTRACE_SETREGS, pid, NULL, ®s); #elif defined(VGP_amd64_linux) struct vki_user_regs_struct regs; regs.rax = vex->guest_RAX; regs.rbx = vex->guest_RBX; regs.rcx = vex->guest_RCX; regs.rdx = vex->guest_RDX; regs.rsi = vex->guest_RSI; regs.rdi = vex->guest_RDI; regs.rbp = vex->guest_RBP; regs.rsp = vex->guest_RSP; regs.r8 = vex->guest_R8; regs.r9 = vex->guest_R9; regs.r10 = vex->guest_R10; regs.r11 = vex->guest_R11; regs.r12 = vex->guest_R12; regs.r13 = vex->guest_R13; regs.r14 = vex->guest_R14; regs.r15 = vex->guest_R15; regs.eflags = LibVEX_GuestAMD64_get_rflags(vex); regs.rip = vex->guest_RIP; return VG_(ptrace)(VKI_PTRACE_SETREGS, pid, NULL, ®s); #elif defined(VGP_ppc32_linux) Int rc = 0; /* apparently the casting to void* is the Right Thing To Do */ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R0 * 4), (void*)vex->guest_GPR0); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R1 * 4), (void*)vex->guest_GPR1); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R2 * 4), (void*)vex->guest_GPR2); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R3 * 4), (void*)vex->guest_GPR3); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R4 * 4), (void*)vex->guest_GPR4); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R5 * 4), (void*)vex->guest_GPR5); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R6 * 4), (void*)vex->guest_GPR6); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R7 * 4), (void*)vex->guest_GPR7); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R8 * 4), (void*)vex->guest_GPR8); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R9 * 4), (void*)vex->guest_GPR9); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R10 * 4), (void*)vex->guest_GPR10); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R11 * 4), (void*)vex->guest_GPR11); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R12 * 4), (void*)vex->guest_GPR12); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R13 * 4), (void*)vex->guest_GPR13); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R14 * 4), (void*)vex->guest_GPR14); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R15 * 4), (void*)vex->guest_GPR15); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R16 * 4), (void*)vex->guest_GPR16); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R17 * 4), (void*)vex->guest_GPR17); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R18 * 4), (void*)vex->guest_GPR18); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R19 * 4), (void*)vex->guest_GPR19); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R20 * 4), (void*)vex->guest_GPR20); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R21 * 4), (void*)vex->guest_GPR21); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R22 * 4), (void*)vex->guest_GPR22); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R23 * 4), (void*)vex->guest_GPR23); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R24 * 4), (void*)vex->guest_GPR24); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R25 * 4), (void*)vex->guest_GPR25); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R26 * 4), (void*)vex->guest_GPR26); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R27 * 4), (void*)vex->guest_GPR27); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R28 * 4), (void*)vex->guest_GPR28); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R29 * 4), (void*)vex->guest_GPR29); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R30 * 4), (void*)vex->guest_GPR30); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R31 * 4), (void*)vex->guest_GPR31); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_NIP * 4), (void*)vex->guest_CIA); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_CCR * 4), (void*)LibVEX_GuestPPC32_get_CR(vex)); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_LNK * 4), (void*)vex->guest_LR); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_CTR * 4), (void*)vex->guest_CTR); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_XER * 4), (void*)LibVEX_GuestPPC32_get_XER(vex)); return rc; #elif defined(VGP_ppc64_linux) I_die_here; #elif defined(VGP_ppc32_aix5) I_die_here; #elif defined(VGP_ppc64_aix5) I_die_here; #else # error Unknown arch #endif }
static Int ptrace_setregs(Int pid, VexGuestArchState* vex) { #if defined(VGP_x86_linux) struct vki_user_regs_struct regs; VG_(memset)(®s, 0, sizeof(regs)); regs.cs = vex->guest_CS; regs.ss = vex->guest_SS; regs.ds = vex->guest_DS; regs.es = vex->guest_ES; regs.fs = vex->guest_FS; regs.gs = vex->guest_GS; regs.eax = vex->guest_EAX; regs.ebx = vex->guest_EBX; regs.ecx = vex->guest_ECX; regs.edx = vex->guest_EDX; regs.esi = vex->guest_ESI; regs.edi = vex->guest_EDI; regs.ebp = vex->guest_EBP; regs.esp = vex->guest_ESP; regs.eflags = LibVEX_GuestX86_get_eflags(vex); regs.eip = vex->guest_EIP; return VG_(ptrace)(VKI_PTRACE_SETREGS, pid, NULL, ®s); #elif defined(VGP_amd64_linux) struct vki_user_regs_struct regs; VG_(memset)(®s, 0, sizeof(regs)); regs.rax = vex->guest_RAX; regs.rbx = vex->guest_RBX; regs.rcx = vex->guest_RCX; regs.rdx = vex->guest_RDX; regs.rsi = vex->guest_RSI; regs.rdi = vex->guest_RDI; regs.rbp = vex->guest_RBP; regs.rsp = vex->guest_RSP; regs.r8 = vex->guest_R8; regs.r9 = vex->guest_R9; regs.r10 = vex->guest_R10; regs.r11 = vex->guest_R11; regs.r12 = vex->guest_R12; regs.r13 = vex->guest_R13; regs.r14 = vex->guest_R14; regs.r15 = vex->guest_R15; regs.eflags = LibVEX_GuestAMD64_get_rflags(vex); regs.rip = vex->guest_RIP; /* Set %{c,d,e,f,s,g}s and %{fs,gs}_base (whatever those are) to values which don't fail the kernel's sanity checks. I have no idea what these should really be set to. Anyway, mostly it seems that zero is an allowable value, except for %cs and %ss which have to have their lowest 2 bits be 11. See putreg() in linux-2.6.23/arch/x86_64/kernel/ptrace.c for the apparently relevant sanity checks. This fixes #145622. */ regs.cs = 3; regs.ds = 0; regs.es = 0; regs.fs = 0; regs.ss = 3; regs.gs = 0; regs.fs_base = 0; regs.gs_base = 0; return VG_(ptrace)(VKI_PTRACE_SETREGS, pid, NULL, ®s); #elif defined(VGP_ppc32_linux) Int rc = 0; /* apparently the casting to void* is the Right Thing To Do */ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R0 * 4), (void*)vex->guest_GPR0); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R1 * 4), (void*)vex->guest_GPR1); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R2 * 4), (void*)vex->guest_GPR2); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R3 * 4), (void*)vex->guest_GPR3); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R4 * 4), (void*)vex->guest_GPR4); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R5 * 4), (void*)vex->guest_GPR5); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R6 * 4), (void*)vex->guest_GPR6); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R7 * 4), (void*)vex->guest_GPR7); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R8 * 4), (void*)vex->guest_GPR8); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R9 * 4), (void*)vex->guest_GPR9); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R10 * 4), (void*)vex->guest_GPR10); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R11 * 4), (void*)vex->guest_GPR11); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R12 * 4), (void*)vex->guest_GPR12); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R13 * 4), (void*)vex->guest_GPR13); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R14 * 4), (void*)vex->guest_GPR14); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R15 * 4), (void*)vex->guest_GPR15); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R16 * 4), (void*)vex->guest_GPR16); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R17 * 4), (void*)vex->guest_GPR17); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R18 * 4), (void*)vex->guest_GPR18); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R19 * 4), (void*)vex->guest_GPR19); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R20 * 4), (void*)vex->guest_GPR20); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R21 * 4), (void*)vex->guest_GPR21); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R22 * 4), (void*)vex->guest_GPR22); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R23 * 4), (void*)vex->guest_GPR23); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R24 * 4), (void*)vex->guest_GPR24); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R25 * 4), (void*)vex->guest_GPR25); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R26 * 4), (void*)vex->guest_GPR26); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R27 * 4), (void*)vex->guest_GPR27); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R28 * 4), (void*)vex->guest_GPR28); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R29 * 4), (void*)vex->guest_GPR29); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R30 * 4), (void*)vex->guest_GPR30); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R31 * 4), (void*)vex->guest_GPR31); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_NIP * 4), (void*)vex->guest_CIA); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_CCR * 4), (void*)LibVEX_GuestPPC32_get_CR(vex)); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_LNK * 4), (void*)vex->guest_LR); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_CTR * 4), (void*)vex->guest_CTR); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_XER * 4), (void*)LibVEX_GuestPPC32_get_XER(vex)); return rc; #elif defined(VGP_ppc64_linux) Int rc = 0; /* FRJ: copied nearly verbatim from the ppc32 case. I compared the vki-ppc64-linux.h with its ppc32 counterpart and saw no appreciable differences, other than the registers being 8 bytes instead of 4. No idea why we don't set all of the entries declared in vki_pt_regs, but ppc32 doesn't so there must be a reason. Finally, note that CR and XER are 32 bits even for ppc64 (see libvex_guest_ppc64.h), but the vki_pt_regs struct still gives them 64 bits. */ /* apparently the casting to void* is the Right Thing To Do */ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R0 * 8), (void*)vex->guest_GPR0); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R1 * 8), (void*)vex->guest_GPR1); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R2 * 8), (void*)vex->guest_GPR2); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R3 * 8), (void*)vex->guest_GPR3); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R4 * 8), (void*)vex->guest_GPR4); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R5 * 8), (void*)vex->guest_GPR5); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R6 * 8), (void*)vex->guest_GPR6); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R7 * 8), (void*)vex->guest_GPR7); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R8 * 8), (void*)vex->guest_GPR8); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R9 * 8), (void*)vex->guest_GPR9); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R10 * 8), (void*)vex->guest_GPR10); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R11 * 8), (void*)vex->guest_GPR11); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R12 * 8), (void*)vex->guest_GPR12); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R13 * 8), (void*)vex->guest_GPR13); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R14 * 8), (void*)vex->guest_GPR14); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R15 * 8), (void*)vex->guest_GPR15); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R16 * 8), (void*)vex->guest_GPR16); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R17 * 8), (void*)vex->guest_GPR17); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R18 * 8), (void*)vex->guest_GPR18); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R19 * 8), (void*)vex->guest_GPR19); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R20 * 8), (void*)vex->guest_GPR20); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R21 * 8), (void*)vex->guest_GPR21); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R22 * 8), (void*)vex->guest_GPR22); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R23 * 8), (void*)vex->guest_GPR23); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R24 * 8), (void*)vex->guest_GPR24); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R25 * 8), (void*)vex->guest_GPR25); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R26 * 8), (void*)vex->guest_GPR26); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R27 * 8), (void*)vex->guest_GPR27); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R28 * 8), (void*)vex->guest_GPR28); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R29 * 8), (void*)vex->guest_GPR29); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R30 * 8), (void*)vex->guest_GPR30); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R31 * 8), (void*)vex->guest_GPR31); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_NIP * 8), (void*)vex->guest_CIA); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_CCR * 8), (void*)(long)LibVEX_GuestPPC64_get_CR(vex)); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_LNK * 8), (void*)vex->guest_LR); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_CTR * 8), (void*)vex->guest_CTR); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_XER * 8), (void*)(long)LibVEX_GuestPPC64_get_XER(vex)); return rc; #elif defined(VGP_arm_linux) struct vki_user_regs_struct uregs; VG_(memset)(&uregs, 0, sizeof(uregs)); uregs.ARM_r0 = vex->guest_R0; uregs.ARM_r1 = vex->guest_R1; uregs.ARM_r2 = vex->guest_R2; uregs.ARM_r3 = vex->guest_R3; uregs.ARM_r4 = vex->guest_R4; uregs.ARM_r5 = vex->guest_R5; uregs.ARM_r6 = vex->guest_R6; uregs.ARM_r7 = vex->guest_R7; uregs.ARM_r8 = vex->guest_R8; uregs.ARM_r9 = vex->guest_R9; uregs.ARM_r10 = vex->guest_R10; uregs.ARM_fp = vex->guest_R11; uregs.ARM_ip = vex->guest_R12; uregs.ARM_sp = vex->guest_R13; uregs.ARM_lr = vex->guest_R14; uregs.ARM_pc = vex->guest_R15T; uregs.ARM_cpsr = LibVEX_GuestARM_get_cpsr(vex); return VG_(ptrace)(VKI_PTRACE_SETREGS, pid, NULL, &uregs); #elif defined(VGP_ppc32_aix5) I_die_here; #elif defined(VGP_ppc64_aix5) I_die_here; #elif defined(VGP_x86_darwin) I_die_here; #elif defined(VGP_amd64_darwin) I_die_here; #else # error Unknown arch #endif }
/* store registers in the guest state (gdbserver_to_valgrind) or fetch register from the guest state (valgrind_to_gdbserver). */ static void transfer_register (ThreadId tid, int abs_regno, void * buf, transfer_direction dir, int size, Bool *mod) { ThreadState* tst = VG_(get_ThreadState)(tid); int set = abs_regno / num_regs; int regno = abs_regno % num_regs; *mod = False; VexGuestPPC32State* ppc32 = (VexGuestPPC32State*) get_arch (set, tst); switch (regno) { // numbers here have to match the order of regs above // Attention: gdb order does not match valgrind order. case 0: VG_(transfer) (&ppc32->guest_GPR0, buf, dir, size, mod); break; case 1: VG_(transfer) (&ppc32->guest_GPR1, buf, dir, size, mod); break; case 2: VG_(transfer) (&ppc32->guest_GPR2, buf, dir, size, mod); break; case 3: VG_(transfer) (&ppc32->guest_GPR3, buf, dir, size, mod); break; case 4: VG_(transfer) (&ppc32->guest_GPR4, buf, dir, size, mod); break; case 5: VG_(transfer) (&ppc32->guest_GPR5, buf, dir, size, mod); break; case 6: VG_(transfer) (&ppc32->guest_GPR6, buf, dir, size, mod); break; case 7: VG_(transfer) (&ppc32->guest_GPR7, buf, dir, size, mod); break; case 8: VG_(transfer) (&ppc32->guest_GPR8, buf, dir, size, mod); break; case 9: VG_(transfer) (&ppc32->guest_GPR9, buf, dir, size, mod); break; case 10: VG_(transfer) (&ppc32->guest_GPR10, buf, dir, size, mod); break; case 11: VG_(transfer) (&ppc32->guest_GPR11, buf, dir, size, mod); break; case 12: VG_(transfer) (&ppc32->guest_GPR12, buf, dir, size, mod); break; case 13: VG_(transfer) (&ppc32->guest_GPR13, buf, dir, size, mod); break; case 14: VG_(transfer) (&ppc32->guest_GPR14, buf, dir, size, mod); break; case 15: VG_(transfer) (&ppc32->guest_GPR15, buf, dir, size, mod); break; case 16: VG_(transfer) (&ppc32->guest_GPR16, buf, dir, size, mod); break; case 17: VG_(transfer) (&ppc32->guest_GPR17, buf, dir, size, mod); break; case 18: VG_(transfer) (&ppc32->guest_GPR18, buf, dir, size, mod); break; case 19: VG_(transfer) (&ppc32->guest_GPR19, buf, dir, size, mod); break; case 20: VG_(transfer) (&ppc32->guest_GPR20, buf, dir, size, mod); break; case 21: VG_(transfer) (&ppc32->guest_GPR21, buf, dir, size, mod); break; case 22: VG_(transfer) (&ppc32->guest_GPR22, buf, dir, size, mod); break; case 23: VG_(transfer) (&ppc32->guest_GPR23, buf, dir, size, mod); break; case 24: VG_(transfer) (&ppc32->guest_GPR24, buf, dir, size, mod); break; case 25: VG_(transfer) (&ppc32->guest_GPR25, buf, dir, size, mod); break; case 26: VG_(transfer) (&ppc32->guest_GPR26, buf, dir, size, mod); break; case 27: VG_(transfer) (&ppc32->guest_GPR27, buf, dir, size, mod); break; case 28: VG_(transfer) (&ppc32->guest_GPR28, buf, dir, size, mod); break; case 29: VG_(transfer) (&ppc32->guest_GPR29, buf, dir, size, mod); break; case 30: VG_(transfer) (&ppc32->guest_GPR30, buf, dir, size, mod); break; case 31: VG_(transfer) (&ppc32->guest_GPR31, buf, dir, size, mod); break; case 32: VG_(transfer) (&ppc32->guest_VSR0, buf, dir, size, mod); break; case 33: VG_(transfer) (&ppc32->guest_VSR1, buf, dir, size, mod); break; case 34: VG_(transfer) (&ppc32->guest_VSR2, buf, dir, size, mod); break; case 35: VG_(transfer) (&ppc32->guest_VSR3, buf, dir, size, mod); break; case 36: VG_(transfer) (&ppc32->guest_VSR4, buf, dir, size, mod); break; case 37: VG_(transfer) (&ppc32->guest_VSR5, buf, dir, size, mod); break; case 38: VG_(transfer) (&ppc32->guest_VSR6, buf, dir, size, mod); break; case 39: VG_(transfer) (&ppc32->guest_VSR7, buf, dir, size, mod); break; case 40: VG_(transfer) (&ppc32->guest_VSR8, buf, dir, size, mod); break; case 41: VG_(transfer) (&ppc32->guest_VSR9, buf, dir, size, mod); break; case 42: VG_(transfer) (&ppc32->guest_VSR10, buf, dir, size, mod); break; case 43: VG_(transfer) (&ppc32->guest_VSR11, buf, dir, size, mod); break; case 44: VG_(transfer) (&ppc32->guest_VSR12, buf, dir, size, mod); break; case 45: VG_(transfer) (&ppc32->guest_VSR13, buf, dir, size, mod); break; case 46: VG_(transfer) (&ppc32->guest_VSR14, buf, dir, size, mod); break; case 47: VG_(transfer) (&ppc32->guest_VSR15, buf, dir, size, mod); break; case 48: VG_(transfer) (&ppc32->guest_VSR16, buf, dir, size, mod); break; case 49: VG_(transfer) (&ppc32->guest_VSR17, buf, dir, size, mod); break; case 50: VG_(transfer) (&ppc32->guest_VSR18, buf, dir, size, mod); break; case 51: VG_(transfer) (&ppc32->guest_VSR19, buf, dir, size, mod); break; case 52: VG_(transfer) (&ppc32->guest_VSR20, buf, dir, size, mod); break; case 53: VG_(transfer) (&ppc32->guest_VSR21, buf, dir, size, mod); break; case 54: VG_(transfer) (&ppc32->guest_VSR22, buf, dir, size, mod); break; case 55: VG_(transfer) (&ppc32->guest_VSR23, buf, dir, size, mod); break; case 56: VG_(transfer) (&ppc32->guest_VSR24, buf, dir, size, mod); break; case 57: VG_(transfer) (&ppc32->guest_VSR25, buf, dir, size, mod); break; case 58: VG_(transfer) (&ppc32->guest_VSR26, buf, dir, size, mod); break; case 59: VG_(transfer) (&ppc32->guest_VSR27, buf, dir, size, mod); break; case 60: VG_(transfer) (&ppc32->guest_VSR28, buf, dir, size, mod); break; case 61: VG_(transfer) (&ppc32->guest_VSR29, buf, dir, size, mod); break; case 62: VG_(transfer) (&ppc32->guest_VSR30, buf, dir, size, mod); break; case 63: VG_(transfer) (&ppc32->guest_VSR31, buf, dir, size, mod); break; case 64: VG_(transfer) (&ppc32->guest_CIA, buf, dir, size, mod); break; case 65: *mod = False; break; // VEX does not model Machine State Register case 66: { UInt cr = LibVEX_GuestPPC32_get_CR (ppc32); if (dir == valgrind_to_gdbserver) { VG_(transfer) (&cr, buf, dir, size, mod); } else { UInt newcr; VG_(transfer) (&newcr, buf, dir, size, mod); *mod = newcr != cr; LibVEX_GuestPPC32_put_CR (newcr, ppc32); } break; } case 67: VG_(transfer) (&ppc32->guest_LR, buf, dir, size, mod); break; case 68: VG_(transfer) (&ppc32->guest_CTR, buf, dir, size, mod); break; case 69: { UInt xer = LibVEX_GuestPPC32_get_XER (ppc32); if (dir == valgrind_to_gdbserver) { VG_(transfer) (&xer, buf, dir, size, mod); } else { UInt newxer; VG_(transfer) (&newxer, buf, dir, size, mod); *mod = newxer != xer; LibVEX_GuestPPC32_put_XER (newxer, ppc32); } break; } case 70: VG_(transfer) (&ppc32->guest_FPROUND, buf, dir, size, mod); break; case 71: *mod = False; break; // GDBTD???? VEX { "orig_r3", 3296, 32 }, case 72: *mod = False; break; // GDBTD???? VEX { "trap", 3328, 32 }, case 73: VG_(transfer) (&ppc32->guest_VSR32, buf, dir, size, mod); break; case 74: VG_(transfer) (&ppc32->guest_VSR33, buf, dir, size, mod); break; case 75: VG_(transfer) (&ppc32->guest_VSR34, buf, dir, size, mod); break; case 76: VG_(transfer) (&ppc32->guest_VSR35, buf, dir, size, mod); break; case 77: VG_(transfer) (&ppc32->guest_VSR36, buf, dir, size, mod); break; case 78: VG_(transfer) (&ppc32->guest_VSR37, buf, dir, size, mod); break; case 79: VG_(transfer) (&ppc32->guest_VSR38, buf, dir, size, mod); break; case 80: VG_(transfer) (&ppc32->guest_VSR39, buf, dir, size, mod); break; case 81: VG_(transfer) (&ppc32->guest_VSR40, buf, dir, size, mod); break; case 82: VG_(transfer) (&ppc32->guest_VSR41, buf, dir, size, mod); break; case 83: VG_(transfer) (&ppc32->guest_VSR42, buf, dir, size, mod); break; case 84: VG_(transfer) (&ppc32->guest_VSR43, buf, dir, size, mod); break; case 85: VG_(transfer) (&ppc32->guest_VSR44, buf, dir, size, mod); break; case 86: VG_(transfer) (&ppc32->guest_VSR45, buf, dir, size, mod); break; case 87: VG_(transfer) (&ppc32->guest_VSR46, buf, dir, size, mod); break; case 88: VG_(transfer) (&ppc32->guest_VSR47, buf, dir, size, mod); break; case 89: VG_(transfer) (&ppc32->guest_VSR48, buf, dir, size, mod); break; case 90: VG_(transfer) (&ppc32->guest_VSR49, buf, dir, size, mod); break; case 91: VG_(transfer) (&ppc32->guest_VSR50, buf, dir, size, mod); break; case 92: VG_(transfer) (&ppc32->guest_VSR51, buf, dir, size, mod); break; case 93: VG_(transfer) (&ppc32->guest_VSR52, buf, dir, size, mod); break; case 94: VG_(transfer) (&ppc32->guest_VSR53, buf, dir, size, mod); break; case 95: VG_(transfer) (&ppc32->guest_VSR54, buf, dir, size, mod); break; case 96: VG_(transfer) (&ppc32->guest_VSR55, buf, dir, size, mod); break; case 97: VG_(transfer) (&ppc32->guest_VSR56, buf, dir, size, mod); break; case 98: VG_(transfer) (&ppc32->guest_VSR57, buf, dir, size, mod); break; case 99: VG_(transfer) (&ppc32->guest_VSR58, buf, dir, size, mod); break; case 100: VG_(transfer) (&ppc32->guest_VSR59, buf, dir, size, mod); break; case 101: VG_(transfer) (&ppc32->guest_VSR60, buf, dir, size, mod); break; case 102: VG_(transfer) (&ppc32->guest_VSR61, buf, dir, size, mod); break; case 103: VG_(transfer) (&ppc32->guest_VSR62, buf, dir, size, mod); break; case 104: VG_(transfer) (&ppc32->guest_VSR63, buf, dir, size, mod); break; case 105: VG_(transfer) (&ppc32->guest_VSCR, buf, dir, size, mod); break; case 106: VG_(transfer) (&ppc32->guest_VRSAVE, buf, dir, size, mod); break; default: vg_assert(0); } }