static Int ptrace_setregs(Int pid, VexGuestArchState* vex) { #if defined(VGP_x86_linux) struct vki_user_regs_struct regs; VG_(memset)(®s, 0, sizeof(regs)); regs.cs = vex->guest_CS; regs.ss = vex->guest_SS; regs.ds = vex->guest_DS; regs.es = vex->guest_ES; regs.fs = vex->guest_FS; regs.gs = vex->guest_GS; regs.eax = vex->guest_EAX; regs.ebx = vex->guest_EBX; regs.ecx = vex->guest_ECX; regs.edx = vex->guest_EDX; regs.esi = vex->guest_ESI; regs.edi = vex->guest_EDI; regs.ebp = vex->guest_EBP; regs.esp = vex->guest_ESP; regs.eflags = LibVEX_GuestX86_get_eflags(vex); regs.eip = vex->guest_EIP; return VG_(ptrace)(VKI_PTRACE_SETREGS, pid, NULL, ®s); #elif defined(VGP_amd64_linux) struct vki_user_regs_struct regs; VG_(memset)(®s, 0, sizeof(regs)); regs.rax = vex->guest_RAX; regs.rbx = vex->guest_RBX; regs.rcx = vex->guest_RCX; regs.rdx = vex->guest_RDX; regs.rsi = vex->guest_RSI; regs.rdi = vex->guest_RDI; regs.rbp = vex->guest_RBP; regs.rsp = vex->guest_RSP; regs.r8 = vex->guest_R8; regs.r9 = vex->guest_R9; regs.r10 = vex->guest_R10; regs.r11 = vex->guest_R11; regs.r12 = vex->guest_R12; regs.r13 = vex->guest_R13; regs.r14 = vex->guest_R14; regs.r15 = vex->guest_R15; regs.eflags = LibVEX_GuestAMD64_get_rflags(vex); regs.rip = vex->guest_RIP; /* Set %{c,d,e,f,s,g}s and %{fs,gs}_base (whatever those are) to values which don't fail the kernel's sanity checks. I have no idea what these should really be set to. Anyway, mostly it seems that zero is an allowable value, except for %cs and %ss which have to have their lowest 2 bits be 11. See putreg() in linux-2.6.23/arch/x86_64/kernel/ptrace.c for the apparently relevant sanity checks. This fixes #145622. */ regs.cs = 3; regs.ds = 0; regs.es = 0; regs.fs = 0; regs.ss = 3; regs.gs = 0; regs.fs_base = 0; regs.gs_base = 0; return VG_(ptrace)(VKI_PTRACE_SETREGS, pid, NULL, ®s); #elif defined(VGP_ppc32_linux) Int rc = 0; /* apparently the casting to void* is the Right Thing To Do */ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R0 * 4), (void*)vex->guest_GPR0); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R1 * 4), (void*)vex->guest_GPR1); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R2 * 4), (void*)vex->guest_GPR2); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R3 * 4), (void*)vex->guest_GPR3); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R4 * 4), (void*)vex->guest_GPR4); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R5 * 4), (void*)vex->guest_GPR5); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R6 * 4), (void*)vex->guest_GPR6); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R7 * 4), (void*)vex->guest_GPR7); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R8 * 4), (void*)vex->guest_GPR8); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R9 * 4), (void*)vex->guest_GPR9); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R10 * 4), (void*)vex->guest_GPR10); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R11 * 4), (void*)vex->guest_GPR11); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R12 * 4), (void*)vex->guest_GPR12); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R13 * 4), (void*)vex->guest_GPR13); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R14 * 4), (void*)vex->guest_GPR14); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R15 * 4), (void*)vex->guest_GPR15); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R16 * 4), (void*)vex->guest_GPR16); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R17 * 4), (void*)vex->guest_GPR17); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R18 * 4), (void*)vex->guest_GPR18); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R19 * 4), (void*)vex->guest_GPR19); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R20 * 4), (void*)vex->guest_GPR20); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R21 * 4), (void*)vex->guest_GPR21); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R22 * 4), (void*)vex->guest_GPR22); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R23 * 4), (void*)vex->guest_GPR23); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R24 * 4), (void*)vex->guest_GPR24); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R25 * 4), (void*)vex->guest_GPR25); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R26 * 4), (void*)vex->guest_GPR26); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R27 * 4), (void*)vex->guest_GPR27); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R28 * 4), (void*)vex->guest_GPR28); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R29 * 4), (void*)vex->guest_GPR29); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R30 * 4), (void*)vex->guest_GPR30); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R31 * 4), (void*)vex->guest_GPR31); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_NIP * 4), (void*)vex->guest_CIA); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_CCR * 4), (void*)LibVEX_GuestPPC32_get_CR(vex)); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_LNK * 4), (void*)vex->guest_LR); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_CTR * 4), (void*)vex->guest_CTR); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_XER * 4), (void*)LibVEX_GuestPPC32_get_XER(vex)); return rc; #elif defined(VGP_ppc64_linux) Int rc = 0; /* FRJ: copied nearly verbatim from the ppc32 case. I compared the vki-ppc64-linux.h with its ppc32 counterpart and saw no appreciable differences, other than the registers being 8 bytes instead of 4. No idea why we don't set all of the entries declared in vki_pt_regs, but ppc32 doesn't so there must be a reason. Finally, note that CR and XER are 32 bits even for ppc64 (see libvex_guest_ppc64.h), but the vki_pt_regs struct still gives them 64 bits. */ /* apparently the casting to void* is the Right Thing To Do */ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R0 * 8), (void*)vex->guest_GPR0); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R1 * 8), (void*)vex->guest_GPR1); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R2 * 8), (void*)vex->guest_GPR2); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R3 * 8), (void*)vex->guest_GPR3); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R4 * 8), (void*)vex->guest_GPR4); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R5 * 8), (void*)vex->guest_GPR5); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R6 * 8), (void*)vex->guest_GPR6); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R7 * 8), (void*)vex->guest_GPR7); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R8 * 8), (void*)vex->guest_GPR8); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R9 * 8), (void*)vex->guest_GPR9); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R10 * 8), (void*)vex->guest_GPR10); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R11 * 8), (void*)vex->guest_GPR11); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R12 * 8), (void*)vex->guest_GPR12); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R13 * 8), (void*)vex->guest_GPR13); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R14 * 8), (void*)vex->guest_GPR14); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R15 * 8), (void*)vex->guest_GPR15); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R16 * 8), (void*)vex->guest_GPR16); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R17 * 8), (void*)vex->guest_GPR17); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R18 * 8), (void*)vex->guest_GPR18); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R19 * 8), (void*)vex->guest_GPR19); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R20 * 8), (void*)vex->guest_GPR20); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R21 * 8), (void*)vex->guest_GPR21); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R22 * 8), (void*)vex->guest_GPR22); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R23 * 8), (void*)vex->guest_GPR23); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R24 * 8), (void*)vex->guest_GPR24); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R25 * 8), (void*)vex->guest_GPR25); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R26 * 8), (void*)vex->guest_GPR26); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R27 * 8), (void*)vex->guest_GPR27); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R28 * 8), (void*)vex->guest_GPR28); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R29 * 8), (void*)vex->guest_GPR29); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R30 * 8), (void*)vex->guest_GPR30); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R31 * 8), (void*)vex->guest_GPR31); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_NIP * 8), (void*)vex->guest_CIA); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_CCR * 8), (void*)(long)LibVEX_GuestPPC64_get_CR(vex)); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_LNK * 8), (void*)vex->guest_LR); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_CTR * 8), (void*)vex->guest_CTR); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_XER * 8), (void*)(long)LibVEX_GuestPPC64_get_XER(vex)); return rc; #elif defined(VGP_arm_linux) struct vki_user_regs_struct uregs; VG_(memset)(&uregs, 0, sizeof(uregs)); uregs.ARM_r0 = vex->guest_R0; uregs.ARM_r1 = vex->guest_R1; uregs.ARM_r2 = vex->guest_R2; uregs.ARM_r3 = vex->guest_R3; uregs.ARM_r4 = vex->guest_R4; uregs.ARM_r5 = vex->guest_R5; uregs.ARM_r6 = vex->guest_R6; uregs.ARM_r7 = vex->guest_R7; uregs.ARM_r8 = vex->guest_R8; uregs.ARM_r9 = vex->guest_R9; uregs.ARM_r10 = vex->guest_R10; uregs.ARM_fp = vex->guest_R11; uregs.ARM_ip = vex->guest_R12; uregs.ARM_sp = vex->guest_R13; uregs.ARM_lr = vex->guest_R14; // Remove the T bit from the bottom of R15T. It will get shipped // over in CPSR.T instead, since LibVEX_GuestARM_get_cpsr copies // it from R15T[0]. uregs.ARM_pc = vex->guest_R15T & 0xFFFFFFFE; uregs.ARM_cpsr = LibVEX_GuestARM_get_cpsr(vex); return VG_(ptrace)(VKI_PTRACE_SETREGS, pid, NULL, &uregs); #elif defined(VGP_arm64_linux) I_die_here; //ATC struct vki_user_pt_regs uregs; VG_(memset)(&uregs, 0, sizeof(uregs)); uregs.regs[0] = vex->guest_X0; uregs.regs[1] = vex->guest_X1; uregs.regs[2] = vex->guest_X2; uregs.regs[3] = vex->guest_X3; uregs.regs[4] = vex->guest_X4; uregs.regs[5] = vex->guest_X5; uregs.regs[6] = vex->guest_X6; uregs.regs[7] = vex->guest_X7; uregs.regs[8] = vex->guest_X8; uregs.regs[9] = vex->guest_X9; uregs.regs[10] = vex->guest_X10; uregs.regs[11] = vex->guest_X11; uregs.regs[12] = vex->guest_X12; uregs.regs[13] = vex->guest_X13; uregs.regs[14] = vex->guest_X14; uregs.regs[15] = vex->guest_X15; uregs.regs[16] = vex->guest_X16; uregs.regs[17] = vex->guest_X17; uregs.regs[18] = vex->guest_X18; uregs.regs[19] = vex->guest_X19; uregs.regs[20] = vex->guest_X20; uregs.regs[21] = vex->guest_X21; uregs.regs[22] = vex->guest_X22; uregs.regs[23] = vex->guest_X23; uregs.regs[24] = vex->guest_X24; uregs.regs[25] = vex->guest_X25; uregs.regs[26] = vex->guest_X26; uregs.regs[27] = vex->guest_X27; uregs.regs[28] = vex->guest_X28; uregs.regs[29] = vex->guest_X29; uregs.regs[30] = vex->guest_X30; uregs.sp = vex->guest_XSP; uregs.pc = vex->guest_PC; uregs.pstate = LibVEX_GuestARM64_get_nzcv(vex); /* is this correct? */ return VG_(ptrace)(VKI_PTRACE_SETREGS, pid, NULL, &uregs); #elif defined(VGP_x86_darwin) I_die_here; #elif defined(VGP_amd64_darwin) I_die_here; #elif defined(VGP_s390x_linux) struct vki_user_regs_struct regs; vki_ptrace_area pa; /* We don't set the psw mask and start at offset 8 */ pa.vki_len = (unsigned long) ®s.per_info - (unsigned long) ®s.psw.addr; pa.vki_process_addr = (unsigned long) ®s.psw.addr; pa.vki_kernel_addr = 8; VG_(memset)(®s, 0, sizeof(regs)); regs.psw.addr = vex->guest_IA; /* We don't set the mask */ regs.gprs[0] = vex->guest_r0; regs.gprs[1] = vex->guest_r1; regs.gprs[2] = vex->guest_r2; regs.gprs[3] = vex->guest_r3; regs.gprs[4] = vex->guest_r4; regs.gprs[5] = vex->guest_r5; regs.gprs[6] = vex->guest_r6; regs.gprs[7] = vex->guest_r7; regs.gprs[8] = vex->guest_r8; regs.gprs[9] = vex->guest_r9; regs.gprs[10] = vex->guest_r10; regs.gprs[11] = vex->guest_r11; regs.gprs[12] = vex->guest_r12; regs.gprs[13] = vex->guest_r13; regs.gprs[14] = vex->guest_r14; regs.gprs[15] = vex->guest_r15; regs.acrs[0] = vex->guest_a0; regs.acrs[1] = vex->guest_a1; regs.acrs[2] = vex->guest_a2; regs.acrs[3] = vex->guest_a3; regs.acrs[4] = vex->guest_a4; regs.acrs[5] = vex->guest_a5; regs.acrs[6] = vex->guest_a6; regs.acrs[7] = vex->guest_a7; regs.acrs[8] = vex->guest_a8; regs.acrs[9] = vex->guest_a9; regs.acrs[10] = vex->guest_a10; regs.acrs[11] = vex->guest_a11; regs.acrs[12] = vex->guest_a12; regs.acrs[13] = vex->guest_a13; regs.acrs[14] = vex->guest_a14; regs.acrs[15] = vex->guest_a15; /* only used for system call restart and friends, just use r2 */ regs.orig_gpr2 = vex->guest_r2; regs.fp_regs.fprs[0].ui = vex->guest_f0; regs.fp_regs.fprs[1].ui = vex->guest_f1; regs.fp_regs.fprs[2].ui = vex->guest_f2; regs.fp_regs.fprs[3].ui = vex->guest_f3; regs.fp_regs.fprs[4].ui = vex->guest_f4; regs.fp_regs.fprs[5].ui = vex->guest_f5; regs.fp_regs.fprs[6].ui = vex->guest_f6; regs.fp_regs.fprs[7].ui = vex->guest_f7; regs.fp_regs.fprs[8].ui = vex->guest_f8; regs.fp_regs.fprs[9].ui = vex->guest_f9; regs.fp_regs.fprs[10].ui = vex->guest_f10; regs.fp_regs.fprs[11].ui = vex->guest_f11; regs.fp_regs.fprs[12].ui = vex->guest_f12; regs.fp_regs.fprs[13].ui = vex->guest_f13; regs.fp_regs.fprs[14].ui = vex->guest_f14; regs.fp_regs.fprs[15].ui = vex->guest_f15; regs.fp_regs.fpc = vex->guest_fpc; return VG_(ptrace)(VKI_PTRACE_POKEUSR_AREA, pid, &pa, NULL); #elif defined(VGP_mips32_linux) || defined(VGP_mips64_linux) struct vki_user_regs_struct regs; VG_(memset)(®s, 0, sizeof(regs)); regs.MIPS_r0 = vex->guest_r0; regs.MIPS_r1 = vex->guest_r1; regs.MIPS_r2 = vex->guest_r2; regs.MIPS_r3 = vex->guest_r3; regs.MIPS_r4 = vex->guest_r4; regs.MIPS_r5 = vex->guest_r5; regs.MIPS_r6 = vex->guest_r6; regs.MIPS_r7 = vex->guest_r7; regs.MIPS_r8 = vex->guest_r8; regs.MIPS_r9 = vex->guest_r9; regs.MIPS_r10 = vex->guest_r10; regs.MIPS_r11 = vex->guest_r11; regs.MIPS_r12 = vex->guest_r12; regs.MIPS_r13 = vex->guest_r13; regs.MIPS_r14 = vex->guest_r14; regs.MIPS_r15 = vex->guest_r15; regs.MIPS_r16 = vex->guest_r16; regs.MIPS_r17 = vex->guest_r17; regs.MIPS_r18 = vex->guest_r18; regs.MIPS_r19 = vex->guest_r19; regs.MIPS_r20 = vex->guest_r20; regs.MIPS_r21 = vex->guest_r21; regs.MIPS_r22 = vex->guest_r22; regs.MIPS_r23 = vex->guest_r23; regs.MIPS_r24 = vex->guest_r24; regs.MIPS_r25 = vex->guest_r25; regs.MIPS_r26 = vex->guest_r26; regs.MIPS_r27 = vex->guest_r27; regs.MIPS_r28 = vex->guest_r28; regs.MIPS_r29 = vex->guest_r29; regs.MIPS_r30 = vex->guest_r30; regs.MIPS_r31 = vex->guest_r31; return VG_(ptrace)(VKI_PTRACE_SETREGS, pid, NULL, ®s); #else # error Unknown arch #endif }
static Int ptrace_setregs(Int pid, VexGuestArchState* vex) { #if defined(VGP_x86_linux) struct vki_user_regs_struct regs; VG_(memset)(®s, 0, sizeof(regs)); regs.cs = vex->guest_CS; regs.ss = vex->guest_SS; regs.ds = vex->guest_DS; regs.es = vex->guest_ES; regs.fs = vex->guest_FS; regs.gs = vex->guest_GS; regs.eax = vex->guest_EAX; regs.ebx = vex->guest_EBX; regs.ecx = vex->guest_ECX; regs.edx = vex->guest_EDX; regs.esi = vex->guest_ESI; regs.edi = vex->guest_EDI; regs.ebp = vex->guest_EBP; regs.esp = vex->guest_ESP; regs.eflags = LibVEX_GuestX86_get_eflags(vex); regs.eip = vex->guest_EIP; return VG_(ptrace)(VKI_PTRACE_SETREGS, pid, NULL, ®s); #elif defined(VGP_amd64_linux) struct vki_user_regs_struct regs; VG_(memset)(®s, 0, sizeof(regs)); regs.rax = vex->guest_RAX; regs.rbx = vex->guest_RBX; regs.rcx = vex->guest_RCX; regs.rdx = vex->guest_RDX; regs.rsi = vex->guest_RSI; regs.rdi = vex->guest_RDI; regs.rbp = vex->guest_RBP; regs.rsp = vex->guest_RSP; regs.r8 = vex->guest_R8; regs.r9 = vex->guest_R9; regs.r10 = vex->guest_R10; regs.r11 = vex->guest_R11; regs.r12 = vex->guest_R12; regs.r13 = vex->guest_R13; regs.r14 = vex->guest_R14; regs.r15 = vex->guest_R15; regs.eflags = LibVEX_GuestAMD64_get_rflags(vex); regs.rip = vex->guest_RIP; /* Set %{c,d,e,f,s,g}s and %{fs,gs}_base (whatever those are) to values which don't fail the kernel's sanity checks. I have no idea what these should really be set to. Anyway, mostly it seems that zero is an allowable value, except for %cs and %ss which have to have their lowest 2 bits be 11. See putreg() in linux-2.6.23/arch/x86_64/kernel/ptrace.c for the apparently relevant sanity checks. This fixes #145622. */ regs.cs = 3; regs.ds = 0; regs.es = 0; regs.fs = 0; regs.ss = 3; regs.gs = 0; regs.fs_base = 0; regs.gs_base = 0; return VG_(ptrace)(VKI_PTRACE_SETREGS, pid, NULL, ®s); #elif defined(VGP_ppc32_linux) Int rc = 0; /* apparently the casting to void* is the Right Thing To Do */ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R0 * 4), (void*)vex->guest_GPR0); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R1 * 4), (void*)vex->guest_GPR1); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R2 * 4), (void*)vex->guest_GPR2); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R3 * 4), (void*)vex->guest_GPR3); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R4 * 4), (void*)vex->guest_GPR4); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R5 * 4), (void*)vex->guest_GPR5); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R6 * 4), (void*)vex->guest_GPR6); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R7 * 4), (void*)vex->guest_GPR7); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R8 * 4), (void*)vex->guest_GPR8); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R9 * 4), (void*)vex->guest_GPR9); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R10 * 4), (void*)vex->guest_GPR10); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R11 * 4), (void*)vex->guest_GPR11); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R12 * 4), (void*)vex->guest_GPR12); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R13 * 4), (void*)vex->guest_GPR13); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R14 * 4), (void*)vex->guest_GPR14); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R15 * 4), (void*)vex->guest_GPR15); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R16 * 4), (void*)vex->guest_GPR16); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R17 * 4), (void*)vex->guest_GPR17); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R18 * 4), (void*)vex->guest_GPR18); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R19 * 4), (void*)vex->guest_GPR19); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R20 * 4), (void*)vex->guest_GPR20); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R21 * 4), (void*)vex->guest_GPR21); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R22 * 4), (void*)vex->guest_GPR22); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R23 * 4), (void*)vex->guest_GPR23); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R24 * 4), (void*)vex->guest_GPR24); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R25 * 4), (void*)vex->guest_GPR25); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R26 * 4), (void*)vex->guest_GPR26); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R27 * 4), (void*)vex->guest_GPR27); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R28 * 4), (void*)vex->guest_GPR28); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R29 * 4), (void*)vex->guest_GPR29); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R30 * 4), (void*)vex->guest_GPR30); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R31 * 4), (void*)vex->guest_GPR31); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_NIP * 4), (void*)vex->guest_CIA); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_CCR * 4), (void*)LibVEX_GuestPPC32_get_CR(vex)); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_LNK * 4), (void*)vex->guest_LR); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_CTR * 4), (void*)vex->guest_CTR); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_XER * 4), (void*)LibVEX_GuestPPC32_get_XER(vex)); return rc; #elif defined(VGP_ppc64_linux) Int rc = 0; /* FRJ: copied nearly verbatim from the ppc32 case. I compared the vki-ppc64-linux.h with its ppc32 counterpart and saw no appreciable differences, other than the registers being 8 bytes instead of 4. No idea why we don't set all of the entries declared in vki_pt_regs, but ppc32 doesn't so there must be a reason. Finally, note that CR and XER are 32 bits even for ppc64 (see libvex_guest_ppc64.h), but the vki_pt_regs struct still gives them 64 bits. */ /* apparently the casting to void* is the Right Thing To Do */ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R0 * 8), (void*)vex->guest_GPR0); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R1 * 8), (void*)vex->guest_GPR1); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R2 * 8), (void*)vex->guest_GPR2); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R3 * 8), (void*)vex->guest_GPR3); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R4 * 8), (void*)vex->guest_GPR4); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R5 * 8), (void*)vex->guest_GPR5); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R6 * 8), (void*)vex->guest_GPR6); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R7 * 8), (void*)vex->guest_GPR7); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R8 * 8), (void*)vex->guest_GPR8); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R9 * 8), (void*)vex->guest_GPR9); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R10 * 8), (void*)vex->guest_GPR10); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R11 * 8), (void*)vex->guest_GPR11); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R12 * 8), (void*)vex->guest_GPR12); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R13 * 8), (void*)vex->guest_GPR13); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R14 * 8), (void*)vex->guest_GPR14); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R15 * 8), (void*)vex->guest_GPR15); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R16 * 8), (void*)vex->guest_GPR16); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R17 * 8), (void*)vex->guest_GPR17); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R18 * 8), (void*)vex->guest_GPR18); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R19 * 8), (void*)vex->guest_GPR19); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R20 * 8), (void*)vex->guest_GPR20); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R21 * 8), (void*)vex->guest_GPR21); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R22 * 8), (void*)vex->guest_GPR22); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R23 * 8), (void*)vex->guest_GPR23); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R24 * 8), (void*)vex->guest_GPR24); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R25 * 8), (void*)vex->guest_GPR25); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R26 * 8), (void*)vex->guest_GPR26); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R27 * 8), (void*)vex->guest_GPR27); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R28 * 8), (void*)vex->guest_GPR28); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R29 * 8), (void*)vex->guest_GPR29); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R30 * 8), (void*)vex->guest_GPR30); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R31 * 8), (void*)vex->guest_GPR31); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_NIP * 8), (void*)vex->guest_CIA); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_CCR * 8), (void*)(long)LibVEX_GuestPPC64_get_CR(vex)); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_LNK * 8), (void*)vex->guest_LR); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_CTR * 8), (void*)vex->guest_CTR); rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_XER * 8), (void*)(long)LibVEX_GuestPPC64_get_XER(vex)); return rc; #elif defined(VGP_arm_linux) struct vki_user_regs_struct uregs; VG_(memset)(&uregs, 0, sizeof(uregs)); uregs.ARM_r0 = vex->guest_R0; uregs.ARM_r1 = vex->guest_R1; uregs.ARM_r2 = vex->guest_R2; uregs.ARM_r3 = vex->guest_R3; uregs.ARM_r4 = vex->guest_R4; uregs.ARM_r5 = vex->guest_R5; uregs.ARM_r6 = vex->guest_R6; uregs.ARM_r7 = vex->guest_R7; uregs.ARM_r8 = vex->guest_R8; uregs.ARM_r9 = vex->guest_R9; uregs.ARM_r10 = vex->guest_R10; uregs.ARM_fp = vex->guest_R11; uregs.ARM_ip = vex->guest_R12; uregs.ARM_sp = vex->guest_R13; uregs.ARM_lr = vex->guest_R14; uregs.ARM_pc = vex->guest_R15T; uregs.ARM_cpsr = LibVEX_GuestARM_get_cpsr(vex); return VG_(ptrace)(VKI_PTRACE_SETREGS, pid, NULL, &uregs); #elif defined(VGP_ppc32_aix5) I_die_here; #elif defined(VGP_ppc64_aix5) I_die_here; #elif defined(VGP_x86_darwin) I_die_here; #elif defined(VGP_amd64_darwin) I_die_here; #else # error Unknown arch #endif }
static void transfer_register (ThreadId tid, int abs_regno, void * buf, transfer_direction dir, int size, Bool *mod) { ThreadState* tst = VG_(get_ThreadState)(tid); int set = abs_regno / num_regs; int regno = abs_regno % num_regs; *mod = False; VexGuestARMState* arm = (VexGuestARMState*) get_arch (set, tst); switch (regno) { case 0: VG_(transfer) (&arm->guest_R0, buf, dir, size, mod); break; case 1: VG_(transfer) (&arm->guest_R1, buf, dir, size, mod); break; case 2: VG_(transfer) (&arm->guest_R2, buf, dir, size, mod); break; case 3: VG_(transfer) (&arm->guest_R3, buf, dir, size, mod); break; case 4: VG_(transfer) (&arm->guest_R4, buf, dir, size, mod); break; case 5: VG_(transfer) (&arm->guest_R5, buf, dir, size, mod); break; case 6: VG_(transfer) (&arm->guest_R6, buf, dir, size, mod); break; case 7: VG_(transfer) (&arm->guest_R7, buf, dir, size, mod); break; case 8: VG_(transfer) (&arm->guest_R8, buf, dir, size, mod); break; case 9: VG_(transfer) (&arm->guest_R9, buf, dir, size, mod); break; case 10: VG_(transfer) (&arm->guest_R10, buf, dir, size, mod); break; case 11: VG_(transfer) (&arm->guest_R11, buf, dir, size, mod); break; case 12: VG_(transfer) (&arm->guest_R12, buf, dir, size, mod); break; case 13: VG_(transfer) (&arm->guest_R13, buf, dir, size, mod); break; case 14: VG_(transfer) (&arm->guest_R14, buf, dir, size, mod); break; case 15: { VG_(transfer) (&arm->guest_R15T, buf, dir, size, mod); if (dir == gdbserver_to_valgrind && *mod) { arm->guest_R15T = thumb_pc(arm->guest_R15T); } break; } case 16: case 17: case 18: case 19: case 20: case 21: case 22: case 23: case 24: *mod = False; break; case 25: { UInt cpsr = LibVEX_GuestARM_get_cpsr (arm); if (dir == valgrind_to_gdbserver) { VG_(transfer) (&cpsr, buf, dir, size, mod); } else { # if 0 UInt newcpsr; VG_(transfer) (&newcpsr, buf, dir, size, mod); *mod = newcpsr != cpsr; LibVEX_GuestARM_put_flags (newcpsr, arm); # else *mod = False; # endif } break; } case 26: VG_(transfer) (&arm->guest_D0, buf, dir, size, mod); break; case 27: VG_(transfer) (&arm->guest_D1, buf, dir, size, mod); break; case 28: VG_(transfer) (&arm->guest_D2, buf, dir, size, mod); break; case 29: VG_(transfer) (&arm->guest_D3, buf, dir, size, mod); break; case 30: VG_(transfer) (&arm->guest_D4, buf, dir, size, mod); break; case 31: VG_(transfer) (&arm->guest_D5, buf, dir, size, mod); break; case 32: VG_(transfer) (&arm->guest_D6, buf, dir, size, mod); break; case 33: VG_(transfer) (&arm->guest_D7, buf, dir, size, mod); break; case 34: VG_(transfer) (&arm->guest_D8, buf, dir, size, mod); break; case 35: VG_(transfer) (&arm->guest_D9, buf, dir, size, mod); break; case 36: VG_(transfer) (&arm->guest_D10, buf, dir, size, mod); break; case 37: VG_(transfer) (&arm->guest_D11, buf, dir, size, mod); break; case 38: VG_(transfer) (&arm->guest_D12, buf, dir, size, mod); break; case 39: VG_(transfer) (&arm->guest_D13, buf, dir, size, mod); break; case 40: VG_(transfer) (&arm->guest_D14, buf, dir, size, mod); break; case 41: VG_(transfer) (&arm->guest_D15, buf, dir, size, mod); break; case 42: VG_(transfer) (&arm->guest_D16, buf, dir, size, mod); break; case 43: VG_(transfer) (&arm->guest_D17, buf, dir, size, mod); break; case 44: VG_(transfer) (&arm->guest_D18, buf, dir, size, mod); break; case 45: VG_(transfer) (&arm->guest_D19, buf, dir, size, mod); break; case 46: VG_(transfer) (&arm->guest_D20, buf, dir, size, mod); break; case 47: VG_(transfer) (&arm->guest_D21, buf, dir, size, mod); break; case 48: VG_(transfer) (&arm->guest_D22, buf, dir, size, mod); break; case 49: VG_(transfer) (&arm->guest_D23, buf, dir, size, mod); break; case 50: VG_(transfer) (&arm->guest_D24, buf, dir, size, mod); break; case 51: VG_(transfer) (&arm->guest_D25, buf, dir, size, mod); break; case 52: VG_(transfer) (&arm->guest_D26, buf, dir, size, mod); break; case 53: VG_(transfer) (&arm->guest_D27, buf, dir, size, mod); break; case 54: VG_(transfer) (&arm->guest_D28, buf, dir, size, mod); break; case 55: VG_(transfer) (&arm->guest_D29, buf, dir, size, mod); break; case 56: VG_(transfer) (&arm->guest_D30, buf, dir, size, mod); break; case 57: VG_(transfer) (&arm->guest_D31, buf, dir, size, mod); break; case 58: VG_(transfer) (&arm->guest_FPSCR, buf, dir, size, mod); break; default: vg_assert(0); } }
/* store registers in the guest state (gdbserver_to_valgrind) or fetch register from the guest state (valgrind_to_gdbserver). */ static void transfer_register (ThreadId tid, int abs_regno, void * buf, transfer_direction dir, int size, Bool *mod) { ThreadState* tst = VG_(get_ThreadState)(tid); int set = abs_regno / num_regs; int regno = abs_regno % num_regs; *mod = False; VexGuestARMState* arm = (VexGuestARMState*) get_arch (set, tst); switch (regno) { // numbers here have to match the order of regs above // Attention: gdb order does not match valgrind order. case 0: VG_(transfer) (&arm->guest_R0, buf, dir, size, mod); break; case 1: VG_(transfer) (&arm->guest_R1, buf, dir, size, mod); break; case 2: VG_(transfer) (&arm->guest_R2, buf, dir, size, mod); break; case 3: VG_(transfer) (&arm->guest_R3, buf, dir, size, mod); break; case 4: VG_(transfer) (&arm->guest_R4, buf, dir, size, mod); break; case 5: VG_(transfer) (&arm->guest_R5, buf, dir, size, mod); break; case 6: VG_(transfer) (&arm->guest_R6, buf, dir, size, mod); break; case 7: VG_(transfer) (&arm->guest_R7, buf, dir, size, mod); break; case 8: VG_(transfer) (&arm->guest_R8, buf, dir, size, mod); break; case 9: VG_(transfer) (&arm->guest_R9, buf, dir, size, mod); break; case 10: VG_(transfer) (&arm->guest_R10, buf, dir, size, mod); break; case 11: VG_(transfer) (&arm->guest_R11, buf, dir, size, mod); break; case 12: VG_(transfer) (&arm->guest_R12, buf, dir, size, mod); break; case 13: VG_(transfer) (&arm->guest_R13, buf, dir, size, mod); break; case 14: VG_(transfer) (&arm->guest_R14, buf, dir, size, mod); break; case 15: { VG_(transfer) (&arm->guest_R15T, buf, dir, size, mod); if (dir == gdbserver_to_valgrind && *mod) { // If gdb is changing the PC, we have to set the thumb bit // if needed. arm->guest_R15T = thumb_pc(arm->guest_R15T); } break; } case 16: case 17: case 18: case 19: case 20: /* 9 "empty registers". See struct reg regs above. */ case 21: case 22: case 23: case 24: *mod = False; break; case 25: { UInt cpsr = LibVEX_GuestARM_get_cpsr (arm); if (dir == valgrind_to_gdbserver) { VG_(transfer) (&cpsr, buf, dir, size, mod); } else { # if 0 UInt newcpsr; VG_(transfer) (&newcpsr, buf, dir, size, mod); *mod = newcpsr != cpsr; // GDBTD ???? see FIXME in guest_arm_helpers.c LibVEX_GuestARM_put_flags (newcpsr, arm); # else *mod = False; # endif } break; } case 26: VG_(transfer) (&arm->guest_D0, buf, dir, size, mod); break; case 27: VG_(transfer) (&arm->guest_D1, buf, dir, size, mod); break; case 28: VG_(transfer) (&arm->guest_D2, buf, dir, size, mod); break; case 29: VG_(transfer) (&arm->guest_D3, buf, dir, size, mod); break; case 30: VG_(transfer) (&arm->guest_D4, buf, dir, size, mod); break; case 31: VG_(transfer) (&arm->guest_D5, buf, dir, size, mod); break; case 32: VG_(transfer) (&arm->guest_D6, buf, dir, size, mod); break; case 33: VG_(transfer) (&arm->guest_D7, buf, dir, size, mod); break; case 34: VG_(transfer) (&arm->guest_D8, buf, dir, size, mod); break; case 35: VG_(transfer) (&arm->guest_D9, buf, dir, size, mod); break; case 36: VG_(transfer) (&arm->guest_D10, buf, dir, size, mod); break; case 37: VG_(transfer) (&arm->guest_D11, buf, dir, size, mod); break; case 38: VG_(transfer) (&arm->guest_D12, buf, dir, size, mod); break; case 39: VG_(transfer) (&arm->guest_D13, buf, dir, size, mod); break; case 40: VG_(transfer) (&arm->guest_D14, buf, dir, size, mod); break; case 41: VG_(transfer) (&arm->guest_D15, buf, dir, size, mod); break; case 42: VG_(transfer) (&arm->guest_D16, buf, dir, size, mod); break; case 43: VG_(transfer) (&arm->guest_D17, buf, dir, size, mod); break; case 44: VG_(transfer) (&arm->guest_D18, buf, dir, size, mod); break; case 45: VG_(transfer) (&arm->guest_D19, buf, dir, size, mod); break; case 46: VG_(transfer) (&arm->guest_D20, buf, dir, size, mod); break; case 47: VG_(transfer) (&arm->guest_D21, buf, dir, size, mod); break; case 48: VG_(transfer) (&arm->guest_D22, buf, dir, size, mod); break; case 49: VG_(transfer) (&arm->guest_D23, buf, dir, size, mod); break; case 50: VG_(transfer) (&arm->guest_D24, buf, dir, size, mod); break; case 51: VG_(transfer) (&arm->guest_D25, buf, dir, size, mod); break; case 52: VG_(transfer) (&arm->guest_D26, buf, dir, size, mod); break; case 53: VG_(transfer) (&arm->guest_D27, buf, dir, size, mod); break; case 54: VG_(transfer) (&arm->guest_D28, buf, dir, size, mod); break; case 55: VG_(transfer) (&arm->guest_D29, buf, dir, size, mod); break; case 56: VG_(transfer) (&arm->guest_D30, buf, dir, size, mod); break; case 57: VG_(transfer) (&arm->guest_D31, buf, dir, size, mod); break; case 58: VG_(transfer) (&arm->guest_FPSCR, buf, dir, size, mod); break; default: vg_assert(0); } }