示例#1
0
static void x86_thread_state64_from_vex(x86_thread_state64_t *mach, 
                                        VexGuestAMD64State *vex)
{
    mach->__rax = vex->guest_RAX;
    mach->__rbx = vex->guest_RBX;
    mach->__rcx = vex->guest_RCX;
    mach->__rdx = vex->guest_RDX;
    mach->__rdi = vex->guest_RDI;
    mach->__rsi = vex->guest_RSI;
    mach->__rbp = vex->guest_RBP;
    mach->__rsp = vex->guest_RSP;
    mach->__rflags = LibVEX_GuestAMD64_get_rflags(vex);
    mach->__rip = vex->guest_RIP;
    mach->__r8  = vex->guest_R8;
    mach->__r9  = vex->guest_R9;
    mach->__r10 = vex->guest_R10;
    mach->__r11 = vex->guest_R11;
    mach->__r12 = vex->guest_R12;
    mach->__r13 = vex->guest_R13;
    mach->__r14 = vex->guest_R14;
    mach->__r15 = vex->guest_R15;
    /* GrP fixme
    mach->__cs = vex->guest_CS;
    mach->__fs = vex->guest_FS;
    mach->__gs = vex->guest_GS;
    */
}
/* Create a plausible-looking sigcontext from the thread's
   Vex guest state.
*/
static 
void synth_ucontext(ThreadId tid, const vki_siginfo_t *si,
                    UWord trapno, UWord err, const vki_sigset_t *set, 
                    struct vki_ucontext *uc, struct _vki_fpstate *fpstate)
{
   ThreadState *tst = VG_(get_ThreadState)(tid);
   struct vki_mcontext *sc = &uc->uc_mcontext;

   VG_(memset)(uc, 0, sizeof(*uc));

   uc->uc_flags = 0;
   uc->uc_link = 0;
   uc->uc_sigmask = *set;
   uc->uc_stack = tst->altstack;
   VG_(memcpy)(&sc->fpstate, fpstate, sizeof(*fpstate));

#  define SC2(reg,REG)  sc->reg = tst->arch.vex.guest_##REG
   SC2(r8,R8);
   SC2(r9,R9);
   SC2(r10,R10);
   SC2(r11,R11);
   SC2(r12,R12);
   SC2(r13,R13);
   SC2(r14,R14);
   SC2(r15,R15);
   SC2(rdi,RDI);
   SC2(rsi,RSI);
   SC2(rbp,RBP);
   SC2(rbx,RBX);
   SC2(rdx,RDX);
   SC2(rax,RAX);
   SC2(rcx,RCX);
   SC2(rsp,RSP);
/*
   SC2(cs,CS);
   SC2(gs,SS);
   XXX
*/
   SC2(rip,RIP);
   sc->addr = (UWord)si->si_addr;
   sc->err = err;
   sc->fpformat = VKI_FPFMT_NODEV;
   sc->len = sizeof(*sc);
   sc->ownedfp = VKI_FPOWNED_NONE;
   sc->rflags = LibVEX_GuestAMD64_get_rflags(&tst->arch.vex);
   sc->trapno = trapno;
#  undef SC2
}
/* Create a plausible-looking sigcontext from the thread's
   Vex guest state.  NOTE: does not fill in the FP or SSE
   bits of sigcontext at the moment.
*/
static
void synth_ucontext(ThreadId tid, const vki_siginfo_t *si,
                    UWord trapno, UWord err, const vki_sigset_t *set,
                    struct vki_ucontext *uc, struct _vki_fpstate *fpstate)
{
    ThreadState *tst = VG_(get_ThreadState)(tid);
    struct vki_sigcontext *sc = &uc->uc_mcontext;

    VG_(memset)(uc, 0, sizeof(*uc));

    uc->uc_flags = 0;
    uc->uc_link = 0;
    uc->uc_sigmask = *set;
    uc->uc_stack = tst->altstack;
    sc->fpstate = fpstate;

    // FIXME: save_i387(&tst->arch, fpstate);

#  define SC2(reg,REG)  sc->reg = tst->arch.vex.guest_##REG
    SC2(r8,R8);
    SC2(r9,R9);
    SC2(r10,R10);
    SC2(r11,R11);
    SC2(r12,R12);
    SC2(r13,R13);
    SC2(r14,R14);
    SC2(r15,R15);
    SC2(rdi,RDI);
    SC2(rsi,RSI);
    SC2(rbp,RBP);
    SC2(rbx,RBX);
    SC2(rdx,RDX);
    SC2(rax,RAX);
    SC2(rcx,RCX);
    SC2(rsp,RSP);

    SC2(rip,RIP);
    sc->eflags = LibVEX_GuestAMD64_get_rflags(&tst->arch.vex);
    // FIXME: SC2(cs,CS);
    // FIXME: SC2(gs,GS);
    // FIXME: SC2(fs,FS);
    sc->trapno = trapno;
    sc->err = err;
#  undef SC2

    sc->cr2 = (UWord)si->_sifields._sigfault._addr;
}
static void fill_mcontext(ThreadState *tst, struct vki_mcontext *sc)
{
   sc->rax = tst->arch.vex.guest_RAX;
   sc->rcx = tst->arch.vex.guest_RCX;
   sc->rdx = tst->arch.vex.guest_RDX;
   sc->rbx = tst->arch.vex.guest_RBX;
   sc->rbp = tst->arch.vex.guest_RBP;
   sc->rsp = tst->arch.vex.guest_RSP;
   sc->rsi = tst->arch.vex.guest_RSI;
   sc->rdi = tst->arch.vex.guest_RDI;
   sc->r8 = tst->arch.vex.guest_R8;
   sc->r9 = tst->arch.vex.guest_R9;
   sc->r10 = tst->arch.vex.guest_R10;
   sc->r11 = tst->arch.vex.guest_R11;
   sc->r12 = tst->arch.vex.guest_R12;
   sc->r13 = tst->arch.vex.guest_R13;
   sc->r14 = tst->arch.vex.guest_R14;
   sc->r15 = tst->arch.vex.guest_R15;
   sc->rip = tst->arch.vex.guest_RIP;
/*
   Not supported by VEX.
   sc->cs = tst->arch.vex.guest_CS;
   sc->ss = tst->arch.vex.guest_SS;
   sc->ds = tst->arch.vex.guest_DS;
   sc->es = tst->arch.vex.guest_ES;
   sc->fs = tst->arch.vex.guest_FS;
   sc->gs = tst->arch.vex.guest_GS;
*/
   sc->rflags = LibVEX_GuestAMD64_get_rflags(&tst->arch.vex);
/*
   not yet.
   VG_(memcpy)(&sc->fpstate, fpstate, sizeof(*fpstate));
*/
   sc->fpformat = VKI_FPFMT_NODEV;
   sc->ownedfp = VKI_FPOWNED_NONE;
   sc->len = sizeof(*sc);
   VG_(memset)(sc->spare2, 0, sizeof(sc->spare2));
}
static Int ptrace_setregs(Int pid, VexGuestArchState* vex)
{
#if defined(VGP_x86_linux)
   struct vki_user_regs_struct regs;
   VG_(memset)(&regs, 0, sizeof(regs));
   regs.cs     = vex->guest_CS;
   regs.ss     = vex->guest_SS;
   regs.ds     = vex->guest_DS;
   regs.es     = vex->guest_ES;
   regs.fs     = vex->guest_FS;
   regs.gs     = vex->guest_GS;
   regs.eax    = vex->guest_EAX;
   regs.ebx    = vex->guest_EBX;
   regs.ecx    = vex->guest_ECX;
   regs.edx    = vex->guest_EDX;
   regs.esi    = vex->guest_ESI;
   regs.edi    = vex->guest_EDI;
   regs.ebp    = vex->guest_EBP;
   regs.esp    = vex->guest_ESP;
   regs.eflags = LibVEX_GuestX86_get_eflags(vex);
   regs.eip    = vex->guest_EIP;
   return VG_(ptrace)(VKI_PTRACE_SETREGS, pid, NULL, &regs);

#elif defined(VGP_amd64_linux)
   struct vki_user_regs_struct regs;
   VG_(memset)(&regs, 0, sizeof(regs));
   regs.rax    = vex->guest_RAX;
   regs.rbx    = vex->guest_RBX;
   regs.rcx    = vex->guest_RCX;
   regs.rdx    = vex->guest_RDX;
   regs.rsi    = vex->guest_RSI;
   regs.rdi    = vex->guest_RDI;
   regs.rbp    = vex->guest_RBP;
   regs.rsp    = vex->guest_RSP;
   regs.r8     = vex->guest_R8;
   regs.r9     = vex->guest_R9;
   regs.r10    = vex->guest_R10;
   regs.r11    = vex->guest_R11;
   regs.r12    = vex->guest_R12;
   regs.r13    = vex->guest_R13;
   regs.r14    = vex->guest_R14;
   regs.r15    = vex->guest_R15;
   regs.eflags = LibVEX_GuestAMD64_get_rflags(vex);
   regs.rip    = vex->guest_RIP;
   /* Set %{c,d,e,f,s,g}s and %{fs,gs}_base (whatever those are) to
      values which don't fail the kernel's sanity checks.  I have no
      idea what these should really be set to.  Anyway, mostly it
      seems that zero is an allowable value, except for %cs and %ss
      which have to have their lowest 2 bits be 11.  See putreg() in
      linux-2.6.23/arch/x86_64/kernel/ptrace.c for the apparently
      relevant sanity checks.  This fixes #145622. */
   regs.cs      = 3;
   regs.ds      = 0;
   regs.es      = 0;
   regs.fs      = 0;
   regs.ss      = 3;
   regs.gs      = 0;
   regs.fs_base = 0;
   regs.gs_base = 0;
   return VG_(ptrace)(VKI_PTRACE_SETREGS, pid, NULL, &regs);

#elif defined(VGP_ppc32_linux)
   Int rc = 0;
   /* apparently the casting to void* is the Right Thing To Do */
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R0  * 4), (void*)vex->guest_GPR0);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R1  * 4), (void*)vex->guest_GPR1);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R2  * 4), (void*)vex->guest_GPR2);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R3  * 4), (void*)vex->guest_GPR3);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R4  * 4), (void*)vex->guest_GPR4);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R5  * 4), (void*)vex->guest_GPR5);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R6  * 4), (void*)vex->guest_GPR6);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R7  * 4), (void*)vex->guest_GPR7);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R8  * 4), (void*)vex->guest_GPR8);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R9  * 4), (void*)vex->guest_GPR9);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R10 * 4), (void*)vex->guest_GPR10);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R11 * 4), (void*)vex->guest_GPR11);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R12 * 4), (void*)vex->guest_GPR12);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R13 * 4), (void*)vex->guest_GPR13);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R14 * 4), (void*)vex->guest_GPR14);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R15 * 4), (void*)vex->guest_GPR15);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R16 * 4), (void*)vex->guest_GPR16);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R17 * 4), (void*)vex->guest_GPR17);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R18 * 4), (void*)vex->guest_GPR18);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R19 * 4), (void*)vex->guest_GPR19);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R20 * 4), (void*)vex->guest_GPR20);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R21 * 4), (void*)vex->guest_GPR21);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R22 * 4), (void*)vex->guest_GPR22);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R23 * 4), (void*)vex->guest_GPR23);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R24 * 4), (void*)vex->guest_GPR24);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R25 * 4), (void*)vex->guest_GPR25);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R26 * 4), (void*)vex->guest_GPR26);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R27 * 4), (void*)vex->guest_GPR27);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R28 * 4), (void*)vex->guest_GPR28);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R29 * 4), (void*)vex->guest_GPR29);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R30 * 4), (void*)vex->guest_GPR30);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R31 * 4), (void*)vex->guest_GPR31);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_NIP * 4), (void*)vex->guest_CIA);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_CCR * 4),
                     (void*)LibVEX_GuestPPC32_get_CR(vex));
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_LNK * 4), (void*)vex->guest_LR);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_CTR * 4), (void*)vex->guest_CTR);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_XER * 4),
                     (void*)LibVEX_GuestPPC32_get_XER(vex));
   return rc;

#elif defined(VGP_ppc64_linux)
   Int rc = 0; 
   /* FRJ: copied nearly verbatim from the ppc32 case. I compared the 
      vki-ppc64-linux.h with its ppc32 counterpart and saw no 
      appreciable differences, other than the registers being 8 bytes 
      instead of 4. No idea why we don't set all of the entries 
      declared in vki_pt_regs, but ppc32 doesn't so there must be a 
      reason. 
 
      Finally, note that CR and XER are 32 bits even for ppc64 (see 
      libvex_guest_ppc64.h), but the vki_pt_regs struct still gives 
      them 64 bits. 
   */ 
   /* apparently the casting to void* is the Right Thing To Do */ 
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R0  * 8), (void*)vex->guest_GPR0); 
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R1  * 8), (void*)vex->guest_GPR1); 
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R2  * 8), (void*)vex->guest_GPR2); 
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R3  * 8), (void*)vex->guest_GPR3); 
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R4  * 8), (void*)vex->guest_GPR4); 
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R5  * 8), (void*)vex->guest_GPR5); 
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R6  * 8), (void*)vex->guest_GPR6); 
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R7  * 8), (void*)vex->guest_GPR7); 
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R8  * 8), (void*)vex->guest_GPR8); 
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R9  * 8), (void*)vex->guest_GPR9); 
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R10 * 8), (void*)vex->guest_GPR10); 
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R11 * 8), (void*)vex->guest_GPR11); 
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R12 * 8), (void*)vex->guest_GPR12); 
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R13 * 8), (void*)vex->guest_GPR13); 
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R14 * 8), (void*)vex->guest_GPR14); 
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R15 * 8), (void*)vex->guest_GPR15); 
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R16 * 8), (void*)vex->guest_GPR16); 
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R17 * 8), (void*)vex->guest_GPR17); 
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R18 * 8), (void*)vex->guest_GPR18); 
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R19 * 8), (void*)vex->guest_GPR19); 
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R20 * 8), (void*)vex->guest_GPR20); 
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R21 * 8), (void*)vex->guest_GPR21); 
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R22 * 8), (void*)vex->guest_GPR22); 
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R23 * 8), (void*)vex->guest_GPR23); 
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R24 * 8), (void*)vex->guest_GPR24); 
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R25 * 8), (void*)vex->guest_GPR25); 
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R26 * 8), (void*)vex->guest_GPR26); 
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R27 * 8), (void*)vex->guest_GPR27); 
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R28 * 8), (void*)vex->guest_GPR28); 
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R29 * 8), (void*)vex->guest_GPR29); 
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R30 * 8), (void*)vex->guest_GPR30); 
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R31 * 8), (void*)vex->guest_GPR31); 
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_NIP * 8), (void*)vex->guest_CIA); 
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_CCR * 8), 
                                              (void*)(long)LibVEX_GuestPPC64_get_CR(vex)); 
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_LNK * 8), (void*)vex->guest_LR); 
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_CTR * 8), (void*)vex->guest_CTR); 
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_XER * 8), 
                                              (void*)(long)LibVEX_GuestPPC64_get_XER(vex)); 
   return rc; 

#elif defined(VGP_arm_linux)
   struct vki_user_regs_struct uregs;
   VG_(memset)(&uregs, 0, sizeof(uregs));
   uregs.ARM_r0   = vex->guest_R0; 
   uregs.ARM_r1   = vex->guest_R1; 
   uregs.ARM_r2   = vex->guest_R2; 
   uregs.ARM_r3   = vex->guest_R3; 
   uregs.ARM_r4   = vex->guest_R4; 
   uregs.ARM_r5   = vex->guest_R5; 
   uregs.ARM_r6   = vex->guest_R6; 
   uregs.ARM_r7   = vex->guest_R7; 
   uregs.ARM_r8   = vex->guest_R8; 
   uregs.ARM_r9   = vex->guest_R9; 
   uregs.ARM_r10  = vex->guest_R10; 
   uregs.ARM_fp   = vex->guest_R11; 
   uregs.ARM_ip   = vex->guest_R12; 
   uregs.ARM_sp   = vex->guest_R13; 
   uregs.ARM_lr   = vex->guest_R14; 
   // Remove the T bit from the bottom of R15T.  It will get shipped
   // over in CPSR.T instead, since LibVEX_GuestARM_get_cpsr copies
   // it from R15T[0].
   uregs.ARM_pc   = vex->guest_R15T & 0xFFFFFFFE;
   uregs.ARM_cpsr = LibVEX_GuestARM_get_cpsr(vex);
   return VG_(ptrace)(VKI_PTRACE_SETREGS, pid, NULL, &uregs);

#elif defined(VGP_arm64_linux)
   I_die_here;
   //ATC
   struct vki_user_pt_regs uregs;
   VG_(memset)(&uregs, 0, sizeof(uregs));
   uregs.regs[0]  = vex->guest_X0;
   uregs.regs[1]  = vex->guest_X1;
   uregs.regs[2]  = vex->guest_X2;
   uregs.regs[3]  = vex->guest_X3;
   uregs.regs[4]  = vex->guest_X4;
   uregs.regs[5]  = vex->guest_X5;
   uregs.regs[6]  = vex->guest_X6;
   uregs.regs[7]  = vex->guest_X7;
   uregs.regs[8]  = vex->guest_X8;
   uregs.regs[9]  = vex->guest_X9;
   uregs.regs[10] = vex->guest_X10;
   uregs.regs[11] = vex->guest_X11;
   uregs.regs[12] = vex->guest_X12;
   uregs.regs[13] = vex->guest_X13;
   uregs.regs[14] = vex->guest_X14;
   uregs.regs[15] = vex->guest_X15;
   uregs.regs[16] = vex->guest_X16;
   uregs.regs[17] = vex->guest_X17;
   uregs.regs[18] = vex->guest_X18;
   uregs.regs[19] = vex->guest_X19;
   uregs.regs[20] = vex->guest_X20;
   uregs.regs[21] = vex->guest_X21;
   uregs.regs[22] = vex->guest_X22;
   uregs.regs[23] = vex->guest_X23;
   uregs.regs[24] = vex->guest_X24;
   uregs.regs[25] = vex->guest_X25;
   uregs.regs[26] = vex->guest_X26;
   uregs.regs[27] = vex->guest_X27;
   uregs.regs[28] = vex->guest_X28;
   uregs.regs[29] = vex->guest_X29;
   uregs.regs[30] = vex->guest_X30;
   uregs.sp       = vex->guest_XSP;
   uregs.pc       = vex->guest_PC;
   uregs.pstate   = LibVEX_GuestARM64_get_nzcv(vex); /* is this correct? */
   return VG_(ptrace)(VKI_PTRACE_SETREGS, pid, NULL, &uregs);

#elif defined(VGP_x86_darwin)
   I_die_here;

#elif defined(VGP_amd64_darwin)
   I_die_here;

#elif defined(VGP_s390x_linux)
   struct vki_user_regs_struct regs;
   vki_ptrace_area pa;

   /* We don't set the psw mask and start at offset 8 */
   pa.vki_len = (unsigned long) &regs.per_info - (unsigned long) &regs.psw.addr;
   pa.vki_process_addr = (unsigned long) &regs.psw.addr;
   pa.vki_kernel_addr = 8;

   VG_(memset)(&regs, 0, sizeof(regs));
   regs.psw.addr = vex->guest_IA;

   /* We don't set the mask */
   regs.gprs[0] = vex->guest_r0;
   regs.gprs[1] = vex->guest_r1;
   regs.gprs[2] = vex->guest_r2;
   regs.gprs[3] = vex->guest_r3;
   regs.gprs[4] = vex->guest_r4;
   regs.gprs[5] = vex->guest_r5;
   regs.gprs[6] = vex->guest_r6;
   regs.gprs[7] = vex->guest_r7;
   regs.gprs[8] = vex->guest_r8;
   regs.gprs[9] = vex->guest_r9;
   regs.gprs[10] = vex->guest_r10;
   regs.gprs[11] = vex->guest_r11;
   regs.gprs[12] = vex->guest_r12;
   regs.gprs[13] = vex->guest_r13;
   regs.gprs[14] = vex->guest_r14;
   regs.gprs[15] = vex->guest_r15;

   regs.acrs[0] = vex->guest_a0;
   regs.acrs[1] = vex->guest_a1;
   regs.acrs[2] = vex->guest_a2;
   regs.acrs[3] = vex->guest_a3;
   regs.acrs[4] = vex->guest_a4;
   regs.acrs[5] = vex->guest_a5;
   regs.acrs[6] = vex->guest_a6;
   regs.acrs[7] = vex->guest_a7;
   regs.acrs[8] = vex->guest_a8;
   regs.acrs[9] = vex->guest_a9;
   regs.acrs[10] = vex->guest_a10;
   regs.acrs[11] = vex->guest_a11;
   regs.acrs[12] = vex->guest_a12;
   regs.acrs[13] = vex->guest_a13;
   regs.acrs[14] = vex->guest_a14;
   regs.acrs[15] = vex->guest_a15;

   /* only used for system call restart and friends, just use r2 */
   regs.orig_gpr2 = vex->guest_r2;

   regs.fp_regs.fprs[0].ui = vex->guest_f0;
   regs.fp_regs.fprs[1].ui = vex->guest_f1;
   regs.fp_regs.fprs[2].ui = vex->guest_f2;
   regs.fp_regs.fprs[3].ui = vex->guest_f3;
   regs.fp_regs.fprs[4].ui = vex->guest_f4;
   regs.fp_regs.fprs[5].ui = vex->guest_f5;
   regs.fp_regs.fprs[6].ui = vex->guest_f6;
   regs.fp_regs.fprs[7].ui = vex->guest_f7;
   regs.fp_regs.fprs[8].ui = vex->guest_f8;
   regs.fp_regs.fprs[9].ui = vex->guest_f9;
   regs.fp_regs.fprs[10].ui = vex->guest_f10;
   regs.fp_regs.fprs[11].ui = vex->guest_f11;
   regs.fp_regs.fprs[12].ui = vex->guest_f12;
   regs.fp_regs.fprs[13].ui = vex->guest_f13;
   regs.fp_regs.fprs[14].ui = vex->guest_f14;
   regs.fp_regs.fprs[15].ui = vex->guest_f15;
   regs.fp_regs.fpc = vex->guest_fpc;

   return VG_(ptrace)(VKI_PTRACE_POKEUSR_AREA, pid,  &pa, NULL);

#elif defined(VGP_mips32_linux) || defined(VGP_mips64_linux)
   struct vki_user_regs_struct regs;
   VG_(memset)(&regs, 0, sizeof(regs));
   regs.MIPS_r0     = vex->guest_r0;
   regs.MIPS_r1     = vex->guest_r1;
   regs.MIPS_r2     = vex->guest_r2;
   regs.MIPS_r3     = vex->guest_r3;
   regs.MIPS_r4     = vex->guest_r4;
   regs.MIPS_r5     = vex->guest_r5;
   regs.MIPS_r6     = vex->guest_r6;
   regs.MIPS_r7     = vex->guest_r7;
   regs.MIPS_r8     = vex->guest_r8;
   regs.MIPS_r9     = vex->guest_r9;
   regs.MIPS_r10     = vex->guest_r10;
   regs.MIPS_r11     = vex->guest_r11;
   regs.MIPS_r12     = vex->guest_r12;
   regs.MIPS_r13     = vex->guest_r13;
   regs.MIPS_r14     = vex->guest_r14;
   regs.MIPS_r15     = vex->guest_r15;
   regs.MIPS_r16     = vex->guest_r16;
   regs.MIPS_r17     = vex->guest_r17;
   regs.MIPS_r18     = vex->guest_r18;
   regs.MIPS_r19     = vex->guest_r19;
   regs.MIPS_r20     = vex->guest_r20;
   regs.MIPS_r21     = vex->guest_r21;
   regs.MIPS_r22     = vex->guest_r22;
   regs.MIPS_r23     = vex->guest_r23;
   regs.MIPS_r24     = vex->guest_r24;
   regs.MIPS_r25     = vex->guest_r25;
   regs.MIPS_r26     = vex->guest_r26;
   regs.MIPS_r27     = vex->guest_r27;
   regs.MIPS_r28     = vex->guest_r28;
   regs.MIPS_r29     = vex->guest_r29;
   regs.MIPS_r30     = vex->guest_r30;
   regs.MIPS_r31     = vex->guest_r31;
   return VG_(ptrace)(VKI_PTRACE_SETREGS, pid, NULL, &regs);

#else
#  error Unknown arch
#endif
}
示例#6
0
static Int ptrace_setregs(Int pid, VexGuestArchState* vex)
{
#if defined(VGP_x86_linux)
   struct vki_user_regs_struct regs;
   regs.cs     = vex->guest_CS;
   regs.ss     = vex->guest_SS;
   regs.ds     = vex->guest_DS;
   regs.es     = vex->guest_ES;
   regs.fs     = vex->guest_FS;
   regs.gs     = vex->guest_GS;
   regs.eax    = vex->guest_EAX;
   regs.ebx    = vex->guest_EBX;
   regs.ecx    = vex->guest_ECX;
   regs.edx    = vex->guest_EDX;
   regs.esi    = vex->guest_ESI;
   regs.edi    = vex->guest_EDI;
   regs.ebp    = vex->guest_EBP;
   regs.esp    = vex->guest_ESP;
   regs.eflags = LibVEX_GuestX86_get_eflags(vex);
   regs.eip    = vex->guest_EIP;
   return VG_(ptrace)(VKI_PTRACE_SETREGS, pid, NULL, &regs);

#elif defined(VGP_amd64_linux)
   struct vki_user_regs_struct regs;
   regs.rax    = vex->guest_RAX;
   regs.rbx    = vex->guest_RBX;
   regs.rcx    = vex->guest_RCX;
   regs.rdx    = vex->guest_RDX;
   regs.rsi    = vex->guest_RSI;
   regs.rdi    = vex->guest_RDI;
   regs.rbp    = vex->guest_RBP;
   regs.rsp    = vex->guest_RSP;
   regs.r8     = vex->guest_R8;
   regs.r9     = vex->guest_R9;
   regs.r10    = vex->guest_R10;
   regs.r11    = vex->guest_R11;
   regs.r12    = vex->guest_R12;
   regs.r13    = vex->guest_R13;
   regs.r14    = vex->guest_R14;
   regs.r15    = vex->guest_R15;
   regs.eflags = LibVEX_GuestAMD64_get_rflags(vex);
   regs.rip    = vex->guest_RIP;
   return VG_(ptrace)(VKI_PTRACE_SETREGS, pid, NULL, &regs);

#elif defined(VGP_ppc32_linux)
   Int rc = 0;
   /* apparently the casting to void* is the Right Thing To Do */
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R0  * 4), (void*)vex->guest_GPR0);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R1  * 4), (void*)vex->guest_GPR1);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R2  * 4), (void*)vex->guest_GPR2);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R3  * 4), (void*)vex->guest_GPR3);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R4  * 4), (void*)vex->guest_GPR4);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R5  * 4), (void*)vex->guest_GPR5);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R6  * 4), (void*)vex->guest_GPR6);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R7  * 4), (void*)vex->guest_GPR7);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R8  * 4), (void*)vex->guest_GPR8);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R9  * 4), (void*)vex->guest_GPR9);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R10 * 4), (void*)vex->guest_GPR10);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R11 * 4), (void*)vex->guest_GPR11);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R12 * 4), (void*)vex->guest_GPR12);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R13 * 4), (void*)vex->guest_GPR13);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R14 * 4), (void*)vex->guest_GPR14);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R15 * 4), (void*)vex->guest_GPR15);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R16 * 4), (void*)vex->guest_GPR16);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R17 * 4), (void*)vex->guest_GPR17);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R18 * 4), (void*)vex->guest_GPR18);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R19 * 4), (void*)vex->guest_GPR19);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R20 * 4), (void*)vex->guest_GPR20);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R21 * 4), (void*)vex->guest_GPR21);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R22 * 4), (void*)vex->guest_GPR22);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R23 * 4), (void*)vex->guest_GPR23);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R24 * 4), (void*)vex->guest_GPR24);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R25 * 4), (void*)vex->guest_GPR25);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R26 * 4), (void*)vex->guest_GPR26);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R27 * 4), (void*)vex->guest_GPR27);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R28 * 4), (void*)vex->guest_GPR28);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R29 * 4), (void*)vex->guest_GPR29);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R30 * 4), (void*)vex->guest_GPR30);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R31 * 4), (void*)vex->guest_GPR31);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_NIP * 4), (void*)vex->guest_CIA);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_CCR * 4),
                     (void*)LibVEX_GuestPPC32_get_CR(vex));
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_LNK * 4), (void*)vex->guest_LR);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_CTR * 4), (void*)vex->guest_CTR);
   rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_XER * 4),
                     (void*)LibVEX_GuestPPC32_get_XER(vex));
   return rc;

#elif defined(VGP_ppc64_linux)
   I_die_here;

#elif defined(VGP_ppc32_aix5)
   I_die_here;

#elif defined(VGP_ppc64_aix5)
   I_die_here;

#else
#  error Unknown arch
#endif
}
示例#7
0
/* Architecture-specific part of VG_(restore_context). */
void ML_(restore_machine_context)(ThreadId tid, vki_ucontext_t *uc,
                                  CorePart part, Bool esp_is_thrptr)
{
   ThreadState *tst = VG_(get_ThreadState)(tid);
   struct vki_fpchip_state *fs
      = &uc->uc_mcontext.fpregs.fp_reg_set.fpchip_state;

   /* CPU */
   if (uc->uc_flags & VKI_UC_CPU) {
      /* Common registers */
      tst->arch.vex.guest_RIP = uc->uc_mcontext.gregs[VKI_REG_RIP];
      VG_TRACK(copy_mem_to_reg, part, tid,
               (Addr)&uc->uc_mcontext.gregs[VKI_REG_RIP], OFFSET_amd64_RIP,
               sizeof(UWord));
      tst->arch.vex.guest_RAX = uc->uc_mcontext.gregs[VKI_REG_RAX];
      VG_TRACK(copy_mem_to_reg, part, tid,
               (Addr)&uc->uc_mcontext.gregs[VKI_REG_RAX], OFFSET_amd64_RAX,
               sizeof(UWord));
      tst->arch.vex.guest_RBX = uc->uc_mcontext.gregs[VKI_REG_RBX];
      VG_TRACK(copy_mem_to_reg, part, tid,
               (Addr)&uc->uc_mcontext.gregs[VKI_REG_RBX], OFFSET_amd64_RBX,
               sizeof(UWord));
      tst->arch.vex.guest_RCX = uc->uc_mcontext.gregs[VKI_REG_RCX];
      VG_TRACK(copy_mem_to_reg, part, tid,
               (Addr)&uc->uc_mcontext.gregs[VKI_REG_RCX], OFFSET_amd64_RCX,
               sizeof(UWord));
      tst->arch.vex.guest_RDX = uc->uc_mcontext.gregs[VKI_REG_RDX];
      VG_TRACK(copy_mem_to_reg, part, tid,
               (Addr)&uc->uc_mcontext.gregs[VKI_REG_RDX], OFFSET_amd64_RDX,
               sizeof(UWord));
      tst->arch.vex.guest_RBP = uc->uc_mcontext.gregs[VKI_REG_RBP];
      VG_TRACK(copy_mem_to_reg, part, tid,
               (Addr)&uc->uc_mcontext.gregs[VKI_REG_RBP], OFFSET_amd64_RBP,
               sizeof(UWord));
      tst->arch.vex.guest_RSI = uc->uc_mcontext.gregs[VKI_REG_RSI];
      VG_TRACK(copy_mem_to_reg, part, tid,
               (Addr)&uc->uc_mcontext.gregs[VKI_REG_RSI], OFFSET_amd64_RSI,
               sizeof(UWord));
      tst->arch.vex.guest_RDI = uc->uc_mcontext.gregs[VKI_REG_RDI];
      VG_TRACK(copy_mem_to_reg, part, tid,
               (Addr)&uc->uc_mcontext.gregs[VKI_REG_RDI], OFFSET_amd64_RDI,
               sizeof(UWord));
      tst->arch.vex.guest_R8 = uc->uc_mcontext.gregs[VKI_REG_R8];
      VG_TRACK(copy_mem_to_reg, part, tid,
               (Addr)&uc->uc_mcontext.gregs[VKI_REG_R8], OFFSET_amd64_R8,
               sizeof(UWord));
      tst->arch.vex.guest_R9 = uc->uc_mcontext.gregs[VKI_REG_R9];
      VG_TRACK(copy_mem_to_reg, part, tid,
               (Addr)&uc->uc_mcontext.gregs[VKI_REG_R9], OFFSET_amd64_R9,
               sizeof(UWord));
      tst->arch.vex.guest_R10 = uc->uc_mcontext.gregs[VKI_REG_R10];
      VG_TRACK(copy_mem_to_reg, part, tid,
               (Addr)&uc->uc_mcontext.gregs[VKI_REG_R10], OFFSET_amd64_R10,
               sizeof(UWord));
      tst->arch.vex.guest_R11 = uc->uc_mcontext.gregs[VKI_REG_R11];
      VG_TRACK(copy_mem_to_reg, part, tid,
               (Addr)&uc->uc_mcontext.gregs[VKI_REG_R11], OFFSET_amd64_R11,
               sizeof(UWord));
      tst->arch.vex.guest_R12 = uc->uc_mcontext.gregs[VKI_REG_R12];
      VG_TRACK(copy_mem_to_reg, part, tid,
               (Addr)&uc->uc_mcontext.gregs[VKI_REG_R12], OFFSET_amd64_R12,
               sizeof(UWord));
      tst->arch.vex.guest_R13 = uc->uc_mcontext.gregs[VKI_REG_R13];
      VG_TRACK(copy_mem_to_reg, part, tid,
               (Addr)&uc->uc_mcontext.gregs[VKI_REG_R13], OFFSET_amd64_R13,
               sizeof(UWord));
      tst->arch.vex.guest_R14 = uc->uc_mcontext.gregs[VKI_REG_R14];
      VG_TRACK(copy_mem_to_reg, part, tid,
               (Addr)&uc->uc_mcontext.gregs[VKI_REG_R14], OFFSET_amd64_R14,
               sizeof(UWord));
      tst->arch.vex.guest_R15 = uc->uc_mcontext.gregs[VKI_REG_R15];
      VG_TRACK(copy_mem_to_reg, part, tid,
               (Addr)&uc->uc_mcontext.gregs[VKI_REG_R15], OFFSET_amd64_R15,
               sizeof(UWord));
      tst->arch.vex.guest_RSP = uc->uc_mcontext.gregs[VKI_REG_RSP];
      VG_TRACK(copy_mem_to_reg, part, tid,
               (Addr)&uc->uc_mcontext.gregs[VKI_REG_RSP], OFFSET_amd64_RSP,
               sizeof(UWord));

      /* Ignore ERR and TRAPNO. */

      /* Ignore segment registers. */

      /* Segment bases */
      tst->arch.vex.guest_FS_CONST = uc->uc_mcontext.gregs[VKI_REG_FSBASE];
      VG_TRACK(copy_mem_to_reg, part, tid,
               (Addr)&uc->uc_mcontext.gregs[VKI_REG_FSBASE],
               offsetof(VexGuestAMD64State, guest_FS_CONST), sizeof(UWord));

      /* Rflags.  Refer to the x86-solaris variant of this code for a detailed
         description. */
      {
         ULong rflags;
         ULong orig_rflags;
         ULong new_rflags;
         Bool ok_restore = False;

         VG_TRACK(pre_mem_read, part, tid,
                  "restore_machine_context(uc->uc_mcontext.gregs[VKI_REG_RFL])",
                  (Addr)&uc->uc_mcontext.gregs[VKI_REG_RFL], sizeof(UWord));
         rflags = uc->uc_mcontext.gregs[VKI_REG_RFL];
         orig_rflags = LibVEX_GuestAMD64_get_rflags(&tst->arch.vex);
         new_rflags = rflags;
         /* The kernel disallows the ID flag to be changed via the setcontext
            call, thus do the same. */
         if (orig_rflags & VKI_RFLAGS_ID_BIT)
            new_rflags |= VKI_RFLAGS_ID_BIT;
         else
            new_rflags &= ~VKI_RFLAGS_ID_BIT;
         LibVEX_GuestAMD64_put_rflags(new_rflags, &tst->arch.vex);
         VG_TRACK(post_reg_write, part, tid,
                  offsetof(VexGuestAMD64State, guest_CC_DEP1), sizeof(UWord));
         VG_TRACK(post_reg_write, part, tid,
                  offsetof(VexGuestAMD64State, guest_CC_DEP2), sizeof(UWord));

         if (rflags != ~VKI_UC_GUEST_RFLAGS_NEG(uc)) {
            VG_(debugLog)(1, "syswrap-solaris",
                             "The rflags value was restored from an "
                             "explicitly set value in thread %u.\n", tid);
            ok_restore = True;
         }
         else {
            ULong buf[5];
            ULong checksum;

            buf[0] = VKI_UC_GUEST_CC_OP(uc);
            buf[1] = VKI_UC_GUEST_CC_NDEP(uc);
            buf[2] = VKI_UC_GUEST_CC_DEP1(uc);
            buf[3] = VKI_UC_GUEST_CC_DEP2(uc);
            buf[4] = rflags;
            checksum = ML_(fletcher64)((UInt*)&buf,
                                       sizeof(buf) / sizeof(UInt));
            if (checksum == VKI_UC_GUEST_RFLAGS_CHECKSUM(uc)) {
               /* Check ok, the full restoration is possible. */
               VG_(debugLog)(1, "syswrap-solaris",
                                "The CC_* guest state values were fully "
                                "restored in thread %u.\n", tid);
               ok_restore = True;

               tst->arch.vex.guest_CC_OP = VKI_UC_GUEST_CC_OP(uc);
               tst->arch.vex.guest_CC_NDEP = VKI_UC_GUEST_CC_NDEP(uc);
               tst->arch.vex.guest_CC_DEP1 = VKI_UC_GUEST_CC_DEP1(uc);
               VG_TRACK(copy_mem_to_reg, part, tid,
                        (Addr)&VKI_UC_GUEST_CC_DEP1(uc),
                        offsetof(VexGuestAMD64State, guest_CC_DEP1),
                        sizeof(UWord));
               tst->arch.vex.guest_CC_DEP2 = VKI_UC_GUEST_CC_DEP2(uc);
               VG_TRACK(copy_mem_to_reg, part, tid,
                        (Addr)&VKI_UC_GUEST_CC_DEP2(uc),
                        offsetof(VexGuestAMD64State, guest_CC_DEP2),
                        sizeof(UWord));
            }
         }

         if (!ok_restore)
            VG_(debugLog)(1, "syswrap-solaris",
                             "Cannot fully restore the CC_* guest state "
                             "values, using approximate rflags in thread "
                             "%u.\n", tid);
      }
   }

   if (uc->uc_flags & VKI_UC_FPU) {
      /* FPU */
      VexEmNote note;
      SizeT i;

      /* x87 */
      /* Control word */
      VG_TRACK(pre_mem_read, part, tid,
               "restore_machine_context(uc->uc_mcontext.fpregs..cw)",
               (Addr)&fs->cw, sizeof(fs->cw));
      /* Status word */
      VG_TRACK(pre_mem_read, part, tid,
               "restore_machine_context(uc->uc_mcontext.fpregs..sw)",
               (Addr)&fs->sw, sizeof(fs->sw));
      /* Compressed tag word */
      VG_TRACK(pre_mem_read, part, tid,
               "restore_machine_context(uc->uc_mcontext.fpregs..fctw)",
               (Addr)&fs->fctw, sizeof(fs->fctw));
      /* Last x87 opcode */
      VG_TRACK(pre_mem_read, part, tid,
               "restore_machine_context(uc->uc_mcontext.fpregs..fop)",
               (Addr)&fs->fop, sizeof(fs->fop));
      /* Last x87 instruction pointer */
      VG_TRACK(pre_mem_read, part, tid,
               "restore_machine_context(uc->uc_mcontext.fpregs..rip)",
               (Addr)&fs->rip, sizeof(fs->rip));
      /* Last x87 data pointer */
      VG_TRACK(pre_mem_read, part, tid,
               "restore_machine_context(uc->uc_mcontext.fpregs..rdp)",
               (Addr)&fs->rdp, sizeof(fs->rdp));
      /* Media-instruction control and status register */
      VG_TRACK(pre_mem_read, part, tid,
               "restore_machine_context(uc->uc_mcontext.fpregs..mxcsr)",
               (Addr)&fs->mxcsr, sizeof(fs->mxcsr));
      /* Supported features in MXCSR */
      VG_TRACK(pre_mem_read, part, tid,
               "restore_machine_context(uc->uc_mcontext.fpregs..mxcsr_mask)",
               (Addr)&fs->mxcsr_mask, sizeof(fs->mxcsr_mask));

      /* ST registers */
      for (i = 0; i < 8; i++) {
         Addr addr = (Addr)&fs->st[i];
         VG_TRACK(copy_mem_to_reg, part, tid, addr,
                  offsetof(VexGuestAMD64State, guest_FPREG[i]), sizeof(ULong));
      }

      /* XMM registers */
      VG_TRACK(copy_mem_to_reg, part, tid, (Addr)&fs->xmm[0],
               offsetof(VexGuestAMD64State, guest_YMM0), sizeof(U128));
      VG_TRACK(copy_mem_to_reg, part, tid, (Addr)&fs->xmm[1],
               offsetof(VexGuestAMD64State, guest_YMM1), sizeof(U128));
      VG_TRACK(copy_mem_to_reg, part, tid, (Addr)&fs->xmm[2],
               offsetof(VexGuestAMD64State, guest_YMM2), sizeof(U128));
      VG_TRACK(copy_mem_to_reg, part, tid, (Addr)&fs->xmm[3],
               offsetof(VexGuestAMD64State, guest_YMM3), sizeof(U128));
      VG_TRACK(copy_mem_to_reg, part, tid, (Addr)&fs->xmm[4],
               offsetof(VexGuestAMD64State, guest_YMM4), sizeof(U128));
      VG_TRACK(copy_mem_to_reg, part, tid, (Addr)&fs->xmm[5],
               offsetof(VexGuestAMD64State, guest_YMM5), sizeof(U128));
      VG_TRACK(copy_mem_to_reg, part, tid, (Addr)&fs->xmm[6],
               offsetof(VexGuestAMD64State, guest_YMM6), sizeof(U128));
      VG_TRACK(copy_mem_to_reg, part, tid, (Addr)&fs->xmm[7],
               offsetof(VexGuestAMD64State, guest_YMM7), sizeof(U128));

      note = LibVEX_GuestAMD64_fxrstor((HWord)fs, &tst->arch.vex);
      if (note != EmNote_NONE)
         VG_(message)(Vg_UserMsg,
                      "Error restoring FP state in thread %u: %s.\n",
                      tid, LibVEX_EmNote_string(note));
   }
}
示例#8
0
/* Architecture-specific part of VG_(save_context). */
void ML_(save_machine_context)(ThreadId tid, vki_ucontext_t *uc,
                               CorePart part)
{
   ThreadState *tst = VG_(get_ThreadState)(tid);
   struct vki_fpchip_state *fs
      = &uc->uc_mcontext.fpregs.fp_reg_set.fpchip_state;
   SizeT i;

   /* CPU */
   /* Common registers */
   uc->uc_mcontext.gregs[VKI_REG_RIP] = tst->arch.vex.guest_RIP;
   VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_amd64_RIP,
            (Addr)&uc->uc_mcontext.gregs[VKI_REG_RIP], sizeof(UWord));
   uc->uc_mcontext.gregs[VKI_REG_RAX] = tst->arch.vex.guest_RAX;
   VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_amd64_RAX,
            (Addr)&uc->uc_mcontext.gregs[VKI_REG_RAX], sizeof(UWord));
   uc->uc_mcontext.gregs[VKI_REG_RBX] = tst->arch.vex.guest_RBX;
   VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_amd64_RBX,
            (Addr)&uc->uc_mcontext.gregs[VKI_REG_RBX], sizeof(UWord));
   uc->uc_mcontext.gregs[VKI_REG_RCX] = tst->arch.vex.guest_RCX;
   VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_amd64_RCX,
            (Addr)&uc->uc_mcontext.gregs[VKI_REG_RCX], sizeof(UWord));
   uc->uc_mcontext.gregs[VKI_REG_RDX] = tst->arch.vex.guest_RDX;
   VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_amd64_RDX,
            (Addr)&uc->uc_mcontext.gregs[VKI_REG_RDX], sizeof(UWord));
   uc->uc_mcontext.gregs[VKI_REG_RBP] = tst->arch.vex.guest_RBP;
   VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_amd64_RBP,
            (Addr)&uc->uc_mcontext.gregs[VKI_REG_RBP], sizeof(UWord));
   uc->uc_mcontext.gregs[VKI_REG_RSI] = tst->arch.vex.guest_RSI;
   VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_amd64_RSI,
            (Addr)&uc->uc_mcontext.gregs[VKI_REG_RSI], sizeof(UWord));
   uc->uc_mcontext.gregs[VKI_REG_RDI] = tst->arch.vex.guest_RDI;
   VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_amd64_RDI,
            (Addr)&uc->uc_mcontext.gregs[VKI_REG_RDI], sizeof(UWord));
   uc->uc_mcontext.gregs[VKI_REG_R8] = tst->arch.vex.guest_R8;
   VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_amd64_R8,
            (Addr)&uc->uc_mcontext.gregs[VKI_REG_R8], sizeof(UWord));
   uc->uc_mcontext.gregs[VKI_REG_R9] = tst->arch.vex.guest_R9;
   VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_amd64_R9,
            (Addr)&uc->uc_mcontext.gregs[VKI_REG_R9], sizeof(UWord));
   uc->uc_mcontext.gregs[VKI_REG_R10] = tst->arch.vex.guest_R10;
   VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_amd64_R10,
            (Addr)&uc->uc_mcontext.gregs[VKI_REG_R10], sizeof(UWord));
   uc->uc_mcontext.gregs[VKI_REG_R11] = tst->arch.vex.guest_R11;
   VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_amd64_R11,
            (Addr)&uc->uc_mcontext.gregs[VKI_REG_R11], sizeof(UWord));
   uc->uc_mcontext.gregs[VKI_REG_R12] = tst->arch.vex.guest_R12;
   VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_amd64_R12,
            (Addr)&uc->uc_mcontext.gregs[VKI_REG_R12], sizeof(UWord));
   uc->uc_mcontext.gregs[VKI_REG_R13] = tst->arch.vex.guest_R13;
   VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_amd64_R13,
            (Addr)&uc->uc_mcontext.gregs[VKI_REG_R13], sizeof(UWord));
   uc->uc_mcontext.gregs[VKI_REG_R14] = tst->arch.vex.guest_R14;
   VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_amd64_R14,
            (Addr)&uc->uc_mcontext.gregs[VKI_REG_R14], sizeof(UWord));
   uc->uc_mcontext.gregs[VKI_REG_R15] = tst->arch.vex.guest_R15;
   VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_amd64_R15,
            (Addr)&uc->uc_mcontext.gregs[VKI_REG_R15], sizeof(UWord));
   uc->uc_mcontext.gregs[VKI_REG_RSP] = tst->arch.vex.guest_RSP;
   VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_amd64_RSP,
            (Addr)&uc->uc_mcontext.gregs[VKI_REG_RSP], sizeof(UWord));

   /* ERR and TRAPNO */
   uc->uc_mcontext.gregs[VKI_REG_ERR] = 0;
   VG_TRACK(post_mem_write, part, tid,
            (Addr)&uc->uc_mcontext.gregs[VKI_REG_ERR], sizeof(UWord));
   uc->uc_mcontext.gregs[VKI_REG_TRAPNO] = 0;
   VG_TRACK(post_mem_write, part, tid,
            (Addr)&uc->uc_mcontext.gregs[VKI_REG_TRAPNO], sizeof(UWord));

   /* Segment registers */
   /* Valgrind does not support moves from/to segment registers on AMD64.  The
      values returned below are the ones that are set by the kernel when
      a program is started. */
   uc->uc_mcontext.gregs[VKI_REG_CS] = VKI_UCS_SEL;
   VG_TRACK(post_mem_write, part, tid,
            (Addr)&uc->uc_mcontext.gregs[VKI_REG_CS], sizeof(UWord));
   uc->uc_mcontext.gregs[VKI_REG_DS] = 0;
   VG_TRACK(post_mem_write, part, tid,
            (Addr)&uc->uc_mcontext.gregs[VKI_REG_DS], sizeof(UWord));
   uc->uc_mcontext.gregs[VKI_REG_SS] = VKI_UDS_SEL;
   VG_TRACK(post_mem_write, part, tid,
            (Addr)&uc->uc_mcontext.gregs[VKI_REG_SS], sizeof(UWord));
   uc->uc_mcontext.gregs[VKI_REG_ES] = 0;
   VG_TRACK(post_mem_write, part, tid,
            (Addr)&uc->uc_mcontext.gregs[VKI_REG_ES], sizeof(UWord));
   uc->uc_mcontext.gregs[VKI_REG_FS] = 0;
   VG_TRACK(post_mem_write, part, tid,
            (Addr)&uc->uc_mcontext.gregs[VKI_REG_FS], sizeof(UWord));
   uc->uc_mcontext.gregs[VKI_REG_GS] = 0;
   VG_TRACK(post_mem_write, part, tid,
            (Addr)&uc->uc_mcontext.gregs[VKI_REG_GS], sizeof(UWord));

   /* Segment bases */
   uc->uc_mcontext.gregs[VKI_REG_FSBASE] = tst->arch.vex.guest_FS_CONST;
   VG_TRACK(post_mem_write, part, tid,
            (Addr)&uc->uc_mcontext.gregs[VKI_REG_FSBASE], sizeof(UWord));
   uc->uc_mcontext.gregs[VKI_REG_GSBASE] = 0;
   VG_TRACK(post_mem_write, part, tid,
            (Addr)&uc->uc_mcontext.gregs[VKI_REG_GSBASE], sizeof(UWord));

   /* Handle rflags.  Refer to the x86-solaris variant of this code for
      a detailed description. */
   uc->uc_mcontext.gregs[VKI_REG_RFL] =
      LibVEX_GuestAMD64_get_rflags(&tst->arch.vex);
   VG_TRACK(post_mem_write, part, tid,
         (Addr)&uc->uc_mcontext.gregs[VKI_REG_RFL], sizeof(UWord));
   VKI_UC_GUEST_CC_OP(uc) = tst->arch.vex.guest_CC_OP;
   VKI_UC_GUEST_CC_NDEP(uc) = tst->arch.vex.guest_CC_NDEP;
   VKI_UC_GUEST_CC_DEP1(uc) = tst->arch.vex.guest_CC_DEP1;
   VG_TRACK(copy_reg_to_mem, part, tid,
            offsetof(VexGuestAMD64State, guest_CC_DEP1),
            (Addr)&VKI_UC_GUEST_CC_DEP1(uc), sizeof(UWord));
   VKI_UC_GUEST_CC_DEP2(uc) = tst->arch.vex.guest_CC_DEP2;
   VG_TRACK(copy_reg_to_mem, part, tid,
            offsetof(VexGuestAMD64State, guest_CC_DEP2),
            (Addr)&VKI_UC_GUEST_CC_DEP2(uc), sizeof(UWord));
   VKI_UC_GUEST_RFLAGS_NEG(uc) = ~uc->uc_mcontext.gregs[VKI_REG_RFL];
   /* Calculate a checksum. */
   {
      ULong buf[5];
      ULong checksum;

      buf[0] = VKI_UC_GUEST_CC_OP(uc);
      buf[1] = VKI_UC_GUEST_CC_NDEP(uc);
      buf[2] = VKI_UC_GUEST_CC_DEP1(uc);
      buf[3] = VKI_UC_GUEST_CC_DEP2(uc);
      buf[4] = uc->uc_mcontext.gregs[VKI_REG_RFL];
      checksum = ML_(fletcher64)((UInt*)&buf, sizeof(buf) / sizeof(UInt));
      VKI_UC_GUEST_RFLAGS_CHECKSUM(uc) = checksum;
   }

   /* FPU */
   /* The fpregset_t structure on amd64 follows the layout that is used by the
      FXSAVE instruction, therefore it is only necessary to call a VEX
      function that simulates this instruction. */
   LibVEX_GuestAMD64_fxsave(&tst->arch.vex, (HWord)fs);

   /* Control word */
   VG_TRACK(post_mem_write, part, tid, (Addr)&fs->cw, sizeof(fs->cw));
   /* Status word */
   VG_TRACK(post_mem_write, part, tid, (Addr)&fs->sw, sizeof(fs->sw));
   /* Compressed tag word */
   VG_TRACK(post_mem_write, part, tid, (Addr)&fs->fctw, sizeof(fs->fctw));
   /* Unused */
   VG_TRACK(post_mem_write, part, tid, (Addr)&fs->__fx_rsvd,
            sizeof(fs->__fx_rsvd));
   vg_assert(fs->__fx_rsvd == 0);
   /* Last x87 opcode */
   VG_TRACK(post_mem_write, part, tid, (Addr)&fs->fop, sizeof(fs->fop));
   vg_assert(fs->fop == 0);
   /* Last x87 instruction pointer */
   VG_TRACK(post_mem_write, part, tid, (Addr)&fs->rip, sizeof(fs->rip));
   vg_assert(fs->rip == 0);
   /* Last x87 data pointer */
   VG_TRACK(post_mem_write, part, tid, (Addr)&fs->rdp, sizeof(fs->rdp));
   vg_assert(fs->rdp == 0);
   /* Media-instruction control and status register */
   VG_TRACK(post_mem_write, part, tid, (Addr)&fs->mxcsr, sizeof(fs->mxcsr));
   /* Supported features in MXCSR */
   VG_TRACK(post_mem_write, part, tid, (Addr)&fs->mxcsr_mask,
            sizeof(fs->mxcsr_mask));

   /* ST registers */
   for (i = 0; i < 8; i++) {
      Addr addr = (Addr)&fs->st[i];
      /* x87 uses 80b FP registers but VEX uses only 64b registers, thus we
         have to lie here. :< */
      VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestAMD64State,
               guest_FPREG[i]), addr, sizeof(ULong));
      VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestAMD64State,
               guest_FPREG[i]), addr + 8, sizeof(UShort));
   }

   /* XMM registers */
   VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestAMD64State,
            guest_YMM0), (Addr)&fs->xmm[0], sizeof(U128));
   VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestAMD64State,
            guest_YMM1), (Addr)&fs->xmm[1], sizeof(U128));
   VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestAMD64State,
            guest_YMM2), (Addr)&fs->xmm[2], sizeof(U128));
   VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestAMD64State,
            guest_YMM3), (Addr)&fs->xmm[3], sizeof(U128));
   VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestAMD64State,
            guest_YMM4), (Addr)&fs->xmm[4], sizeof(U128));
   VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestAMD64State,
            guest_YMM5), (Addr)&fs->xmm[5], sizeof(U128));
   VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestAMD64State,
            guest_YMM6), (Addr)&fs->xmm[6], sizeof(U128));
   VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestAMD64State,
            guest_YMM7), (Addr)&fs->xmm[7], sizeof(U128));

   /* Status word (sw) at exception */
   fs->status = 0;
   VG_TRACK(post_mem_write, part, tid, (Addr)&fs->status, sizeof(fs->status));

   /* MXCSR at exception */
   fs->xstatus = 0;
   VG_TRACK(post_mem_write, part, tid, (Addr)&fs->xstatus,
            sizeof(fs->xstatus));
}
示例#9
0
static Int ptrace_setregs(Int pid, VexGuestArchState* vex)
{
#if defined(VGP_x86_linux)
    struct vki_user_regs_struct regs;
    VG_(memset)(&regs, 0, sizeof(regs));
    regs.cs     = vex->guest_CS;
    regs.ss     = vex->guest_SS;
    regs.ds     = vex->guest_DS;
    regs.es     = vex->guest_ES;
    regs.fs     = vex->guest_FS;
    regs.gs     = vex->guest_GS;
    regs.eax    = vex->guest_EAX;
    regs.ebx    = vex->guest_EBX;
    regs.ecx    = vex->guest_ECX;
    regs.edx    = vex->guest_EDX;
    regs.esi    = vex->guest_ESI;
    regs.edi    = vex->guest_EDI;
    regs.ebp    = vex->guest_EBP;
    regs.esp    = vex->guest_ESP;
    regs.eflags = LibVEX_GuestX86_get_eflags(vex);
    regs.eip    = vex->guest_EIP;
    return VG_(ptrace)(VKI_PTRACE_SETREGS, pid, NULL, &regs);

#elif defined(VGP_amd64_linux)
    struct vki_user_regs_struct regs;
    VG_(memset)(&regs, 0, sizeof(regs));
    regs.rax    = vex->guest_RAX;
    regs.rbx    = vex->guest_RBX;
    regs.rcx    = vex->guest_RCX;
    regs.rdx    = vex->guest_RDX;
    regs.rsi    = vex->guest_RSI;
    regs.rdi    = vex->guest_RDI;
    regs.rbp    = vex->guest_RBP;
    regs.rsp    = vex->guest_RSP;
    regs.r8     = vex->guest_R8;
    regs.r9     = vex->guest_R9;
    regs.r10    = vex->guest_R10;
    regs.r11    = vex->guest_R11;
    regs.r12    = vex->guest_R12;
    regs.r13    = vex->guest_R13;
    regs.r14    = vex->guest_R14;
    regs.r15    = vex->guest_R15;
    regs.eflags = LibVEX_GuestAMD64_get_rflags(vex);
    regs.rip    = vex->guest_RIP;
    /* Set %{c,d,e,f,s,g}s and %{fs,gs}_base (whatever those are) to
       values which don't fail the kernel's sanity checks.  I have no
       idea what these should really be set to.  Anyway, mostly it
       seems that zero is an allowable value, except for %cs and %ss
       which have to have their lowest 2 bits be 11.  See putreg() in
       linux-2.6.23/arch/x86_64/kernel/ptrace.c for the apparently
       relevant sanity checks.  This fixes #145622. */
    regs.cs      = 3;
    regs.ds      = 0;
    regs.es      = 0;
    regs.fs      = 0;
    regs.ss      = 3;
    regs.gs      = 0;
    regs.fs_base = 0;
    regs.gs_base = 0;
    return VG_(ptrace)(VKI_PTRACE_SETREGS, pid, NULL, &regs);

#elif defined(VGP_ppc32_linux)
    Int rc = 0;
    /* apparently the casting to void* is the Right Thing To Do */
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R0  * 4), (void*)vex->guest_GPR0);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R1  * 4), (void*)vex->guest_GPR1);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R2  * 4), (void*)vex->guest_GPR2);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R3  * 4), (void*)vex->guest_GPR3);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R4  * 4), (void*)vex->guest_GPR4);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R5  * 4), (void*)vex->guest_GPR5);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R6  * 4), (void*)vex->guest_GPR6);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R7  * 4), (void*)vex->guest_GPR7);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R8  * 4), (void*)vex->guest_GPR8);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R9  * 4), (void*)vex->guest_GPR9);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R10 * 4), (void*)vex->guest_GPR10);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R11 * 4), (void*)vex->guest_GPR11);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R12 * 4), (void*)vex->guest_GPR12);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R13 * 4), (void*)vex->guest_GPR13);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R14 * 4), (void*)vex->guest_GPR14);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R15 * 4), (void*)vex->guest_GPR15);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R16 * 4), (void*)vex->guest_GPR16);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R17 * 4), (void*)vex->guest_GPR17);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R18 * 4), (void*)vex->guest_GPR18);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R19 * 4), (void*)vex->guest_GPR19);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R20 * 4), (void*)vex->guest_GPR20);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R21 * 4), (void*)vex->guest_GPR21);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R22 * 4), (void*)vex->guest_GPR22);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R23 * 4), (void*)vex->guest_GPR23);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R24 * 4), (void*)vex->guest_GPR24);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R25 * 4), (void*)vex->guest_GPR25);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R26 * 4), (void*)vex->guest_GPR26);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R27 * 4), (void*)vex->guest_GPR27);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R28 * 4), (void*)vex->guest_GPR28);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R29 * 4), (void*)vex->guest_GPR29);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R30 * 4), (void*)vex->guest_GPR30);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R31 * 4), (void*)vex->guest_GPR31);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_NIP * 4), (void*)vex->guest_CIA);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_CCR * 4),
                      (void*)LibVEX_GuestPPC32_get_CR(vex));
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_LNK * 4), (void*)vex->guest_LR);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_CTR * 4), (void*)vex->guest_CTR);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_XER * 4),
                      (void*)LibVEX_GuestPPC32_get_XER(vex));
    return rc;

#elif defined(VGP_ppc64_linux)
    Int rc = 0;
    /* FRJ: copied nearly verbatim from the ppc32 case. I compared the
       vki-ppc64-linux.h with its ppc32 counterpart and saw no
       appreciable differences, other than the registers being 8 bytes
       instead of 4. No idea why we don't set all of the entries
       declared in vki_pt_regs, but ppc32 doesn't so there must be a
       reason.

       Finally, note that CR and XER are 32 bits even for ppc64 (see
       libvex_guest_ppc64.h), but the vki_pt_regs struct still gives
       them 64 bits.
    */
    /* apparently the casting to void* is the Right Thing To Do */
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R0  * 8), (void*)vex->guest_GPR0);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R1  * 8), (void*)vex->guest_GPR1);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R2  * 8), (void*)vex->guest_GPR2);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R3  * 8), (void*)vex->guest_GPR3);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R4  * 8), (void*)vex->guest_GPR4);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R5  * 8), (void*)vex->guest_GPR5);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R6  * 8), (void*)vex->guest_GPR6);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R7  * 8), (void*)vex->guest_GPR7);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R8  * 8), (void*)vex->guest_GPR8);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R9  * 8), (void*)vex->guest_GPR9);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R10 * 8), (void*)vex->guest_GPR10);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R11 * 8), (void*)vex->guest_GPR11);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R12 * 8), (void*)vex->guest_GPR12);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R13 * 8), (void*)vex->guest_GPR13);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R14 * 8), (void*)vex->guest_GPR14);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R15 * 8), (void*)vex->guest_GPR15);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R16 * 8), (void*)vex->guest_GPR16);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R17 * 8), (void*)vex->guest_GPR17);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R18 * 8), (void*)vex->guest_GPR18);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R19 * 8), (void*)vex->guest_GPR19);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R20 * 8), (void*)vex->guest_GPR20);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R21 * 8), (void*)vex->guest_GPR21);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R22 * 8), (void*)vex->guest_GPR22);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R23 * 8), (void*)vex->guest_GPR23);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R24 * 8), (void*)vex->guest_GPR24);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R25 * 8), (void*)vex->guest_GPR25);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R26 * 8), (void*)vex->guest_GPR26);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R27 * 8), (void*)vex->guest_GPR27);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R28 * 8), (void*)vex->guest_GPR28);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R29 * 8), (void*)vex->guest_GPR29);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R30 * 8), (void*)vex->guest_GPR30);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R31 * 8), (void*)vex->guest_GPR31);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_NIP * 8), (void*)vex->guest_CIA);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_CCR * 8),
                      (void*)(long)LibVEX_GuestPPC64_get_CR(vex));
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_LNK * 8), (void*)vex->guest_LR);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_CTR * 8), (void*)vex->guest_CTR);
    rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_XER * 8),
                      (void*)(long)LibVEX_GuestPPC64_get_XER(vex));
    return rc;

#elif defined(VGP_arm_linux)
    struct vki_user_regs_struct uregs;
    VG_(memset)(&uregs, 0, sizeof(uregs));
    uregs.ARM_r0   = vex->guest_R0;
    uregs.ARM_r1   = vex->guest_R1;
    uregs.ARM_r2   = vex->guest_R2;
    uregs.ARM_r3   = vex->guest_R3;
    uregs.ARM_r4   = vex->guest_R4;
    uregs.ARM_r5   = vex->guest_R5;
    uregs.ARM_r6   = vex->guest_R6;
    uregs.ARM_r7   = vex->guest_R7;
    uregs.ARM_r8   = vex->guest_R8;
    uregs.ARM_r9   = vex->guest_R9;
    uregs.ARM_r10  = vex->guest_R10;
    uregs.ARM_fp   = vex->guest_R11;
    uregs.ARM_ip   = vex->guest_R12;
    uregs.ARM_sp   = vex->guest_R13;
    uregs.ARM_lr   = vex->guest_R14;
    uregs.ARM_pc   = vex->guest_R15T;
    uregs.ARM_cpsr = LibVEX_GuestARM_get_cpsr(vex);
    return VG_(ptrace)(VKI_PTRACE_SETREGS, pid, NULL, &uregs);

#elif defined(VGP_ppc32_aix5)
    I_die_here;

#elif defined(VGP_ppc64_aix5)
    I_die_here;

#elif defined(VGP_x86_darwin)
    I_die_here;

#elif defined(VGP_amd64_darwin)
    I_die_here;

#else
#  error Unknown arch
#endif
}
示例#10
0
/* store registers in the guest state (gdbserver_to_valgrind)
   or fetch register from the guest state (valgrind_to_gdbserver). */
static
void transfer_register (ThreadId tid, int abs_regno, void * buf,
                        transfer_direction dir, int size, Bool *mod)
{
    ThreadState* tst = VG_(get_ThreadState)(tid);
    int set = abs_regno / dyn_num_regs;
    int regno = abs_regno % dyn_num_regs;
    *mod = False;

    VexGuestAMD64State* amd64 = (VexGuestAMD64State*) get_arch (set, tst);

    switch (regno) {
    // numbers here have to match the order of regs above.
    // Attention: gdb order does not match valgrind order.
    case 0:
        VG_(transfer) (&amd64->guest_RAX, buf, dir, size, mod);
        break;
    case 1:
        VG_(transfer) (&amd64->guest_RBX, buf, dir, size, mod);
        break;
    case 2:
        VG_(transfer) (&amd64->guest_RCX, buf, dir, size, mod);
        break;
    case 3:
        VG_(transfer) (&amd64->guest_RDX, buf, dir, size, mod);
        break;
    case 4:
        VG_(transfer) (&amd64->guest_RSI, buf, dir, size, mod);
        break;
    case 5:
        VG_(transfer) (&amd64->guest_RDI, buf, dir, size, mod);
        break;
    case 6:
        VG_(transfer) (&amd64->guest_RBP, buf, dir, size, mod);
        break;
    case 7:
        VG_(transfer) (&amd64->guest_RSP, buf, dir, size, mod);
        break;
    case 8:
        VG_(transfer) (&amd64->guest_R8,  buf, dir, size, mod);
        break;
    case 9:
        VG_(transfer) (&amd64->guest_R9,  buf, dir, size, mod);
        break;
    case 10:
        VG_(transfer) (&amd64->guest_R10, buf, dir, size, mod);
        break;
    case 11:
        VG_(transfer) (&amd64->guest_R11, buf, dir, size, mod);
        break;
    case 12:
        VG_(transfer) (&amd64->guest_R12, buf, dir, size, mod);
        break;
    case 13:
        VG_(transfer) (&amd64->guest_R13, buf, dir, size, mod);
        break;
    case 14:
        VG_(transfer) (&amd64->guest_R14, buf, dir, size, mod);
        break;
    case 15:
        VG_(transfer) (&amd64->guest_R15, buf, dir, size, mod);
        break;
    case 16:
        VG_(transfer) (&amd64->guest_RIP, buf, dir, size, mod);
        break;
    case 17:
        if (dir == valgrind_to_gdbserver) {
            ULong rflags;
            /* we can only retrieve the real flags (set 0)
               retrieving shadow flags is not ok */
            if (set == 0)
                rflags = LibVEX_GuestAMD64_get_rflags (amd64);
            else
                rflags = 0;
            VG_(transfer) (&rflags, buf, dir, size, mod);
        } else {
            *mod = False; //GDBTD? how do we store rflags in libvex_guest_amd64.h ???
        }
        break;
    case 18:
        *mod = False;
        break; //GDBTD VG_(transfer) (&amd64->guest_CS, buf, dir, size, mod);
    case 19:
        *mod = False;
        break; //GDBTD VG_(transfer) (&amd64->guest_SS, buf, dir, size, mod);
    case 20:
        *mod = False;
        break; //GDBTD VG_(transfer) (&amd64->guest_DS, buf, dir, size, mod);
    case 21:
        *mod = False;
        break; //GDBTD VG_(transfer) (&amd64->guest_ES, buf, dir, size, mod);
    case 22:
        *mod = False;
        break; //GDBTD VG_(transfer) (&amd64->guest_FS, buf, dir, size, mod);
    case 23:
        VG_(transfer) (&amd64->guest_GS_0x60, buf, dir, size, mod);
        break;
    case 24:
    case 25:
    case 26:
    case 27: /* register 24 to 31 are float registers 80 bits but 64 bits in valgrind */
    case 28:
    case 29:
    case 30:
    case 31:
        if (dir == valgrind_to_gdbserver) {
            UChar fpreg80[10];
            convert_f64le_to_f80le ((UChar *)&amd64->guest_FPREG[regno-16],
                                    fpreg80);
            VG_(transfer) (&fpreg80, buf, dir, sizeof(fpreg80), mod);
        } else {
            ULong fpreg64;
            convert_f80le_to_f64le (buf, (UChar *)&fpreg64);
            VG_(transfer) (&amd64->guest_FPREG[regno-16], &fpreg64,
                           dir, sizeof(fpreg64), mod);
        }
        break;
    case 32:
        if (dir == valgrind_to_gdbserver) {
            // vex only models the rounding bits (see libvex_guest_amd64.h)
            UWord value = 0x037f;
            value |= amd64->guest_FPROUND << 10;
            VG_(transfer)(&value, buf, dir, size, mod);
        } else {
            *mod = False; // GDBTD???? VEX equivalent fcrtl
        }
        break;
    case 33:
        if (dir == valgrind_to_gdbserver) {
            UWord value = amd64->guest_FC3210;
            value |= (amd64->guest_FTOP & 7) << 11;
            VG_(transfer)(&value, buf, dir, size, mod);
        } else {
            *mod = False; // GDBTD???? VEX equivalent fstat
        }
        break;
    case 34:
        if (dir == valgrind_to_gdbserver) {
            // vex doesn't model these precisely
            UWord value =
                ((amd64->guest_FPTAG[0] ? 0 : 3) << 0)  |
                ((amd64->guest_FPTAG[1] ? 0 : 3) << 2)  |
                ((amd64->guest_FPTAG[2] ? 0 : 3) << 4)  |
                ((amd64->guest_FPTAG[3] ? 0 : 3) << 6)  |
                ((amd64->guest_FPTAG[4] ? 0 : 3) << 8)  |
                ((amd64->guest_FPTAG[5] ? 0 : 3) << 10) |
                ((amd64->guest_FPTAG[6] ? 0 : 3) << 12) |
                ((amd64->guest_FPTAG[7] ? 0 : 3) << 14);
            VG_(transfer)(&value, buf, dir, size, mod);
        } else {
            *mod = False; // GDBTD???? VEX equivalent ftag
        }
        break;
    case 35:
        *mod = False;
        break; // GDBTD ??? equivalent of fiseg
    case 36:
        *mod = False;
        break; // GDBTD ??? equivalent of fioff
    case 37:
        *mod = False;
        break; // GDBTD ??? equivalent of foseg
    case 38:
        *mod = False;
        break; // GDBTD ??? equivalent of fooff
    case 39:
        *mod = False;
        break; // GDBTD ??? equivalent of fop
    case 40:
        VG_(transfer) (&amd64->guest_YMM0[0],  buf, dir, size, mod);
        break;
    case 41:
        VG_(transfer) (&amd64->guest_YMM1[0],  buf, dir, size, mod);
        break;
    case 42:
        VG_(transfer) (&amd64->guest_YMM2[0],  buf, dir, size, mod);
        break;
    case 43:
        VG_(transfer) (&amd64->guest_YMM3[0],  buf, dir, size, mod);
        break;
    case 44:
        VG_(transfer) (&amd64->guest_YMM4[0],  buf, dir, size, mod);
        break;
    case 45:
        VG_(transfer) (&amd64->guest_YMM5[0],  buf, dir, size, mod);
        break;
    case 46:
        VG_(transfer) (&amd64->guest_YMM6[0],  buf, dir, size, mod);
        break;
    case 47:
        VG_(transfer) (&amd64->guest_YMM7[0],  buf, dir, size, mod);
        break;
    case 48:
        VG_(transfer) (&amd64->guest_YMM8[0],  buf, dir, size, mod);
        break;
    case 49:
        VG_(transfer) (&amd64->guest_YMM9[0],  buf, dir, size, mod);
        break;
    case 50:
        VG_(transfer) (&amd64->guest_YMM10[0], buf, dir, size, mod);
        break;
    case 51:
        VG_(transfer) (&amd64->guest_YMM11[0], buf, dir, size, mod);
        break;
    case 52:
        VG_(transfer) (&amd64->guest_YMM12[0], buf, dir, size, mod);
        break;
    case 53:
        VG_(transfer) (&amd64->guest_YMM13[0], buf, dir, size, mod);
        break;
    case 54:
        VG_(transfer) (&amd64->guest_YMM14[0], buf, dir, size, mod);
        break;
    case 55:
        VG_(transfer) (&amd64->guest_YMM15[0], buf, dir, size, mod);
        break;
    case 56:
        if (dir == valgrind_to_gdbserver) {
            // vex only models the rounding bits (see libvex_guest_x86.h)
            UWord value = 0x1f80;
            value |= amd64->guest_SSEROUND << 13;
            VG_(transfer)(&value, buf, dir, size, mod);
        } else {
            *mod = False;  // GDBTD???? VEX equivalent mxcsr
        }
        break;
    case 57:
        *mod = False;
        break; // GDBTD???? VEX equivalent { "orig_rax"},
    case 58:
        VG_(transfer) (&amd64->guest_YMM0[4],  buf, dir, size, mod);
        break;
    case 59:
        VG_(transfer) (&amd64->guest_YMM1[4],  buf, dir, size, mod);
        break;
    case 60:
        VG_(transfer) (&amd64->guest_YMM2[4],  buf, dir, size, mod);
        break;
    case 61:
        VG_(transfer) (&amd64->guest_YMM3[4],  buf, dir, size, mod);
        break;
    case 62:
        VG_(transfer) (&amd64->guest_YMM4[4],  buf, dir, size, mod);
        break;
    case 63:
        VG_(transfer) (&amd64->guest_YMM5[4],  buf, dir, size, mod);
        break;
    case 64:
        VG_(transfer) (&amd64->guest_YMM6[4],  buf, dir, size, mod);
        break;
    case 65:
        VG_(transfer) (&amd64->guest_YMM7[4],  buf, dir, size, mod);
        break;
    case 66:
        VG_(transfer) (&amd64->guest_YMM8[4],  buf, dir, size, mod);
        break;
    case 67:
        VG_(transfer) (&amd64->guest_YMM9[4],  buf, dir, size, mod);
        break;
    case 68:
        VG_(transfer) (&amd64->guest_YMM10[4], buf, dir, size, mod);
        break;
    case 69:
        VG_(transfer) (&amd64->guest_YMM11[4], buf, dir, size, mod);
        break;
    case 70:
        VG_(transfer) (&amd64->guest_YMM12[4], buf, dir, size, mod);
        break;
    case 71:
        VG_(transfer) (&amd64->guest_YMM13[4], buf, dir, size, mod);
        break;
    case 72:
        VG_(transfer) (&amd64->guest_YMM14[4], buf, dir, size, mod);
        break;
    case 73:
        VG_(transfer) (&amd64->guest_YMM15[4], buf, dir, size, mod);
        break;
    default:
        vg_assert(0);
    }
}