示例#1
0
/* Note: this is VG_, not ML_. */
SysRes VG_(am_do_mmap_NO_NOTIFY)( Addr start, SizeT length, UInt prot, 
                                  UInt flags, Int fd, Off64T offset)
{
   SysRes res;
   aspacem_assert(VG_IS_PAGE_ALIGNED(offset));
#  if defined(VGP_x86_linux) || defined(VGP_ppc32_linux) \
      || defined(VGP_arm_linux)
   /* mmap2 uses 4096 chunks even if actual page size is bigger. */
   aspacem_assert((offset % 4096) == 0);
   res = VG_(do_syscall6)(__NR_mmap2, (UWord)start, length,
                          prot, flags, fd, offset / 4096);
#  elif defined(VGP_amd64_linux) || defined(VGP_ppc64_linux) \
        || defined(VGP_s390x_linux) || defined(VGP_mips32_linux)
   res = VG_(do_syscall6)(__NR_mmap, (UWord)start, length, 
                         prot, flags, fd, offset);
#  elif defined(VGP_x86_darwin)
   if (fd == 0  &&  (flags & VKI_MAP_ANONYMOUS)) {
       fd = -1;  // MAP_ANON with fd==0 is EINVAL
   }
   res = VG_(do_syscall7)(__NR_mmap, (UWord)start, length,
                          prot, flags, fd, offset & 0xffffffff, offset >> 32);
#  elif defined(VGP_amd64_darwin)
   if (fd == 0  &&  (flags & VKI_MAP_ANONYMOUS)) {
       fd = -1;  // MAP_ANON with fd==0 is EINVAL
   }
   res = VG_(do_syscall6)(__NR_mmap, (UWord)start, length,
                          prot, flags, (UInt)fd, offset);
#  else
#    error Unknown platform
#  endif
   return res;
}
static void notify_tool_of_mmap(Addr a, SizeT len, UInt prot, ULong di_handle)
{
   Bool rr, ww, xx;

   /* 'a' is the return value from a real kernel mmap, hence: */
   vg_assert(VG_IS_PAGE_ALIGNED(a));
   /* whereas len is whatever the syscall supplied.  So: */
   len = VG_PGROUNDUP(len);

   rr = toBool(prot & VKI_PROT_READ);
   ww = toBool(prot & VKI_PROT_WRITE);
   xx = toBool(prot & VKI_PROT_EXEC);

   VG_TRACK( new_mem_mmap, a, len, rr, ww, xx, di_handle );
}
/* ---------------------------------------------------------------------
   mips handler for mmap and mmap2
   ------------------------------------------------------------------ */
static void notify_core_of_mmap(Addr a, SizeT len, UInt prot,
                                UInt flags, Int fd, Off64T offset)
{
   Bool d;

   /* 'a' is the return value from a real kernel mmap, hence: */
   vg_assert(VG_IS_PAGE_ALIGNED(a));
   /* whereas len is whatever the syscall supplied.  So: */
   len = VG_PGROUNDUP(len);

   d = VG_(am_notify_client_mmap)( a, len, prot, flags, fd, offset );

   if (d)
      VG_(discard_translations)( a, (ULong)len,
                                 "notify_core_of_mmap" );
}
示例#4
0
/* Note: this is VG_, not ML_. */
SysRes VG_(am_do_mmap_NO_NOTIFY)( Addr start, SizeT length, UInt prot, 
                                  UInt flags, UInt fd, Off64T offset)
{
   SysRes res;
   aspacem_assert(VG_IS_PAGE_ALIGNED(offset));
#  if defined(VGP_x86_linux) || defined(VGP_ppc32_linux)
   /* mmap2 uses 4096 chunks even if actual page size is bigger. */
   aspacem_assert((offset % 4096) == 0);
   res = VG_(do_syscall6)(__NR_mmap2, (UWord)start, length,
                          prot, flags, fd, offset / 4096);
#  elif defined(VGP_amd64_linux) || defined(VGP_ppc64_linux) \
        || defined(VGP_ppc32_aix5) || defined(VGP_ppc64_aix5)
   res = VG_(do_syscall6)(__NR_mmap, (UWord)start, length, 
                         prot, flags, fd, offset);
#  else
#    error Unknown platform
#  endif
   return res;
}
示例#5
0
VgStack* VG_(am_alloc_VgStack)( /*OUT*/Addr* initial_sp )
{
   Int      szB;
   SysRes   sres;
   VgStack* stack;
   UInt*    p;
   Int      i;

   /* Allocate the stack. */
   szB = VG_STACK_GUARD_SZB 
         + VG_STACK_ACTIVE_SZB + VG_STACK_GUARD_SZB;

#if !defined(VGPV_ppc64_linux_bgq)
   sres = VG_(am_mmap_anon_float_valgrind)( szB );
   if (sr_isError(sres))
      return NULL;

   stack = (VgStack*)(AddrH)sr_Res(sres);

   aspacem_assert(VG_IS_PAGE_ALIGNED(szB));
   aspacem_assert(VG_IS_PAGE_ALIGNED(stack));

   /* Protect the guard areas. */
   sres = local_do_mprotect_NO_NOTIFY( 
             (Addr) &stack[0], 
             VG_STACK_GUARD_SZB, VKI_PROT_NONE 
          );
   if (sr_isError(sres)) goto protect_failed;
   VG_(am_notify_mprotect)( 
      (Addr) &stack->bytes[0], 
      VG_STACK_GUARD_SZB, VKI_PROT_NONE 
   );

   sres = local_do_mprotect_NO_NOTIFY( 
             (Addr) &stack->bytes[VG_STACK_GUARD_SZB + VG_STACK_ACTIVE_SZB], 
             VG_STACK_GUARD_SZB, VKI_PROT_NONE 
          );
   if (sr_isError(sres)) goto protect_failed;
   VG_(am_notify_mprotect)( 
      (Addr) &stack->bytes[VG_STACK_GUARD_SZB + VG_STACK_ACTIVE_SZB],
      VG_STACK_GUARD_SZB, VKI_PROT_NONE 
   );
#else
   { sres = VG_(am_mmap_anon_float_valgrind)( szB );
     if (sr_isError(sres))
        return NULL;
     stack = (VgStack*)sr_Res(sres);
   }
#endif

   /* Looks good.  Fill the active area with junk so we can later
      tell how much got used. */

   p = (UInt*)&stack->bytes[VG_STACK_GUARD_SZB];
   for (i = 0; i < VG_STACK_ACTIVE_SZB/sizeof(UInt); i++)
      p[i] = 0xDEADBEEF;

   *initial_sp = (Addr)&stack->bytes[VG_STACK_GUARD_SZB + VG_STACK_ACTIVE_SZB];
   *initial_sp -= 8;
   *initial_sp &= ~((Addr)0x1F); /* 32-align it */

   VG_(debugLog)( 1,"aspacem","allocated thread stack at 0x%llx size %d\n",
                  (ULong)(Addr)stack, szB);
   ML_(am_do_sanity_check)();
   return stack;

  protect_failed:
   /* The stack was allocated, but we can't protect it.  Unmap it and
      return NULL (failure). */
   (void)ML_(am_do_munmap_NO_NOTIFY)( (Addr)stack, szB );
   ML_(am_do_sanity_check)();
   return NULL;
}
/* Based on ML_(generic_PRE_sys_mmap) from syswrap-generic.c.
   If we are trying to do mmap with VKI_MAP_SHARED flag we need to align the
   start address on VKI_SHMLBA like we did in
   VG_(am_mmap_file_float_valgrind_flags)
 */
static SysRes mips_PRE_sys_mmap(ThreadId tid,
                                UWord arg1, UWord arg2, UWord arg3,
                                UWord arg4, UWord arg5, Off64T arg6)
{
   Addr       advised;
   SysRes     sres;
   MapRequest mreq;
   Bool       mreq_ok;

   if (arg2 == 0) {
      /* SuSV3 says: If len is zero, mmap() shall fail and no mapping
         shall be established. */
      return VG_(mk_SysRes_Error)( VKI_EINVAL );
   }

   if (!VG_IS_PAGE_ALIGNED(arg1)) {
      /* zap any misaligned addresses. */
      /* SuSV3 says misaligned addresses only cause the MAP_FIXED case
         to fail.   Here, we catch them all. */
      return VG_(mk_SysRes_Error)( VKI_EINVAL );
   }

   if (!VG_IS_PAGE_ALIGNED(arg6)) {
      /* zap any misaligned offsets. */
      /* SuSV3 says: The off argument is constrained to be aligned and
         sized according to the value returned by sysconf() when
         passed _SC_PAGESIZE or _SC_PAGE_SIZE. */
      return VG_(mk_SysRes_Error)( VKI_EINVAL );
   }

   /* Figure out what kind of allocation constraints there are
      (fixed/hint/any), and ask aspacem what we should do. */
   mreq.start = arg1;
   mreq.len   = arg2;
   if (arg4 & VKI_MAP_FIXED) {
      mreq.rkind = MFixed;
   } else
   if (arg1 != 0) {
      mreq.rkind = MHint;
   } else {
      mreq.rkind = MAny;
   }

   if ((VKI_SHMLBA > VKI_PAGE_SIZE) && (VKI_MAP_SHARED & arg4)
       && !(VKI_MAP_FIXED & arg4))
      mreq.len = arg2 + VKI_SHMLBA - VKI_PAGE_SIZE;

   /* Enquire ... */
   advised = VG_(am_get_advisory)( &mreq, True/*client*/, &mreq_ok );

   if ((VKI_SHMLBA > VKI_PAGE_SIZE) && (VKI_MAP_SHARED & arg4)
       && !(VKI_MAP_FIXED & arg4))
      advised = VG_ROUNDUP(advised, VKI_SHMLBA);

   if (!mreq_ok) {
      /* Our request was bounced, so we'd better fail. */
      return VG_(mk_SysRes_Error)( VKI_EINVAL );
   }

   /* Otherwise we're OK (so far).  Install aspacem's choice of
      address, and let the mmap go through.  */
   sres = VG_(am_do_mmap_NO_NOTIFY)(advised, arg2, arg3,
                                    arg4 | VKI_MAP_FIXED,
                                    arg5, arg6);

   /* A refinement: it may be that the kernel refused aspacem's choice
      of address.  If we were originally asked for a hinted mapping,
      there is still a last chance: try again at any address.
      Hence: */
   if (mreq.rkind == MHint && sr_isError(sres)) {
      mreq.start = 0;
      mreq.len   = arg2;
      mreq.rkind = MAny;
      advised = VG_(am_get_advisory)( &mreq, True/*client*/, &mreq_ok );
      if (!mreq_ok) {
         /* Our request was bounced, so we'd better fail. */
         return VG_(mk_SysRes_Error)( VKI_EINVAL );
      }
      /* and try again with the kernel */
      sres = VG_(am_do_mmap_NO_NOTIFY)(advised, arg2, arg3,
                                       arg4 | VKI_MAP_FIXED,
                                       arg5, arg6);
   }

   if (!sr_isError(sres)) {
      ULong di_handle;
      /* Notify aspacem. */
      notify_core_of_mmap(
         (Addr)sr_Res(sres), /* addr kernel actually assigned */
         arg2, /* length */
         arg3, /* prot */
         arg4, /* the original flags value */
         arg5, /* fd */
         arg6  /* offset */
      );
      /* Load symbols? */
      di_handle = VG_(di_notify_mmap)( (Addr)sr_Res(sres), 
                                       False/*allow_SkFileV*/, (Int)arg5 );
      /* Notify the tool. */
      notify_tool_of_mmap(
         (Addr)sr_Res(sres), /* addr kernel actually assigned */
         arg2, /* length */
         arg3, /* prot */
         di_handle /* so the tool can refer to the read debuginfo later,
                      if it wants. */
      );
   }

   /* Stay sane */
   if (!sr_isError(sres) && (arg4 & VKI_MAP_FIXED))
      vg_assert(sr_Res(sres) == arg1);

   return sres;
}
示例#7
0
void test_VG_IS_XYZ_ALIGNED(void)
{
   CHECK(   VG_IS_2_ALIGNED(0x0) );
   CHECK( ! VG_IS_2_ALIGNED(0x1) );
   CHECK(   VG_IS_2_ALIGNED(0x2) );
   CHECK( ! VG_IS_2_ALIGNED(0x3) );
   CHECK(   VG_IS_2_ALIGNED(0x4) );
   CHECK( ! VG_IS_2_ALIGNED(0x5) );
   CHECK(   VG_IS_2_ALIGNED(0x6) );
   CHECK( ! VG_IS_2_ALIGNED(0x7) );
   CHECK(   VG_IS_2_ALIGNED(0x8) );
   CHECK( ! VG_IS_2_ALIGNED(0x9) );
   CHECK(   VG_IS_2_ALIGNED(0xa) );
   CHECK( ! VG_IS_2_ALIGNED(0xb) );
   CHECK(   VG_IS_2_ALIGNED(0xc) );
   CHECK( ! VG_IS_2_ALIGNED(0xd) );
   CHECK(   VG_IS_2_ALIGNED(0xe) );
   CHECK( ! VG_IS_2_ALIGNED(0xf) );

   CHECK(   VG_IS_4_ALIGNED(0x0) );
   CHECK( ! VG_IS_4_ALIGNED(0x1) );
   CHECK( ! VG_IS_4_ALIGNED(0x2) );
   CHECK( ! VG_IS_4_ALIGNED(0x3) );
   CHECK(   VG_IS_4_ALIGNED(0x4) );
   CHECK( ! VG_IS_4_ALIGNED(0x5) );
   CHECK( ! VG_IS_4_ALIGNED(0x6) );
   CHECK( ! VG_IS_4_ALIGNED(0x7) );
   CHECK(   VG_IS_4_ALIGNED(0x8) );
   CHECK( ! VG_IS_4_ALIGNED(0x9) );
   CHECK( ! VG_IS_4_ALIGNED(0xa) );
   CHECK( ! VG_IS_4_ALIGNED(0xb) );
   CHECK(   VG_IS_4_ALIGNED(0xc) );
   CHECK( ! VG_IS_4_ALIGNED(0xd) );
   CHECK( ! VG_IS_4_ALIGNED(0xe) );
   CHECK( ! VG_IS_4_ALIGNED(0xf) );

   CHECK(   VG_IS_8_ALIGNED(0x0) );
   CHECK( ! VG_IS_8_ALIGNED(0x1) );
   CHECK( ! VG_IS_8_ALIGNED(0x2) );
   CHECK( ! VG_IS_8_ALIGNED(0x3) );
   CHECK( ! VG_IS_8_ALIGNED(0x4) );
   CHECK( ! VG_IS_8_ALIGNED(0x5) );
   CHECK( ! VG_IS_8_ALIGNED(0x6) );
   CHECK( ! VG_IS_8_ALIGNED(0x7) );
   CHECK(   VG_IS_8_ALIGNED(0x8) );
   CHECK( ! VG_IS_8_ALIGNED(0x9) );
   CHECK( ! VG_IS_8_ALIGNED(0xa) );
   CHECK( ! VG_IS_8_ALIGNED(0xb) );
   CHECK( ! VG_IS_8_ALIGNED(0xc) );
   CHECK( ! VG_IS_8_ALIGNED(0xd) );
   CHECK( ! VG_IS_8_ALIGNED(0xe) );
   CHECK( ! VG_IS_8_ALIGNED(0xf) );

   CHECK(   VG_IS_16_ALIGNED(0x0) );
   CHECK( ! VG_IS_16_ALIGNED(0x1) );
   CHECK( ! VG_IS_16_ALIGNED(0x2) );
   CHECK( ! VG_IS_16_ALIGNED(0x3) );
   CHECK( ! VG_IS_16_ALIGNED(0x4) );
   CHECK( ! VG_IS_16_ALIGNED(0x5) );
   CHECK( ! VG_IS_16_ALIGNED(0x6) );
   CHECK( ! VG_IS_16_ALIGNED(0x7) );
   CHECK( ! VG_IS_16_ALIGNED(0x8) );
   CHECK( ! VG_IS_16_ALIGNED(0x9) );
   CHECK( ! VG_IS_16_ALIGNED(0xa) );
   CHECK( ! VG_IS_16_ALIGNED(0xb) );
   CHECK( ! VG_IS_16_ALIGNED(0xc) );
   CHECK( ! VG_IS_16_ALIGNED(0xd) );
   CHECK( ! VG_IS_16_ALIGNED(0xe) );
   CHECK( ! VG_IS_16_ALIGNED(0xf) );

   CHECK(   VG_IS_WORD_ALIGNED(0x0) );
   CHECK( ! VG_IS_WORD_ALIGNED(0x1) );
   CHECK( ! VG_IS_WORD_ALIGNED(0x2) );
   CHECK( ! VG_IS_WORD_ALIGNED(0x3) );
   // 0x4 case below  
   CHECK( ! VG_IS_WORD_ALIGNED(0x5) );
   CHECK( ! VG_IS_WORD_ALIGNED(0x6) );
   CHECK( ! VG_IS_WORD_ALIGNED(0x7) );
   CHECK(   VG_IS_WORD_ALIGNED(0x8) );
   CHECK( ! VG_IS_WORD_ALIGNED(0x9) );
   CHECK( ! VG_IS_WORD_ALIGNED(0xa) );
   CHECK( ! VG_IS_WORD_ALIGNED(0xb) );
   // 0xc case below  
   CHECK( ! VG_IS_WORD_ALIGNED(0xd) );
   CHECK( ! VG_IS_WORD_ALIGNED(0xe) );
   CHECK( ! VG_IS_WORD_ALIGNED(0xf) );
   if        (4 == sizeof(void*)) {
      CHECK(   VG_IS_WORD_ALIGNED(0x4) );
      CHECK(   VG_IS_WORD_ALIGNED(0xc) );
   } else if (8 == sizeof(void*)) {
      CHECK( ! VG_IS_WORD_ALIGNED(0x4) );
      CHECK( ! VG_IS_WORD_ALIGNED(0xc) );
   } else {
      assert(0);
   }

   CHECK(   VG_IS_PAGE_ALIGNED(0x0) );
   CHECK( ! VG_IS_PAGE_ALIGNED(0x1) );
   CHECK( ! VG_IS_PAGE_ALIGNED(0x2) );
   CHECK( ! VG_IS_PAGE_ALIGNED(0x3) );
   CHECK( ! VG_IS_PAGE_ALIGNED(0x4) );
   CHECK( ! VG_IS_PAGE_ALIGNED(VKI_PAGE_SIZE-1) );
   CHECK(   VG_IS_PAGE_ALIGNED(VKI_PAGE_SIZE  ) );
   CHECK( ! VG_IS_PAGE_ALIGNED(VKI_PAGE_SIZE+1) );
}
示例#8
0
static 
Addr setup_client_stack( void*  init_sp,
                         char** orig_envp, 
                         const ExeInfo* info,
                         Addr   clstack_end,
                         SizeT  clstack_max_size )
{
   char **cpp;
   char *strtab;		/* string table */
   char *stringbase;
   Addr *ptr;
   unsigned stringsize;		/* total size of strings in bytes */
   unsigned auxsize;		/* total size of auxv in bytes */
   Int argc;			/* total argc */
   Int envc;			/* total number of env vars */
   unsigned stacksize;		/* total client stack size */
   Addr client_SP;	        /* client stack base (initial SP) */
   Addr clstack_start;
   Int i;
   Bool have_exename;

   vg_assert(VG_IS_PAGE_ALIGNED(clstack_end+1));
   vg_assert( VG_(args_for_client) );

   /* ==================== compute sizes ==================== */

   /* first of all, work out how big the client stack will be */
   stringsize   = 0;
   auxsize = 0;
   have_exename = VG_(args_the_exename) != NULL;

   /* paste on the extra args if the loader needs them (ie, the #! 
      interpreter and its argument) */
   argc = 0;
   if (info->interp_name != NULL) {
      argc++;
      stringsize += VG_(strlen)(info->interp_name) + 1;
   }
   if (info->interp_args != NULL) {
      argc++;
      stringsize += VG_(strlen)(info->interp_args) + 1;
   }

   /* now scan the args we're given... */
   if (have_exename)
      stringsize += VG_(strlen)( VG_(args_the_exename) ) + 1;

   for (i = 0; i < VG_(sizeXA)( VG_(args_for_client) ); i++) {
      argc++;
      stringsize += VG_(strlen)( * (HChar**) 
                                   VG_(indexXA)( VG_(args_for_client), i ))
                    + 1;
   }

   /* ...and the environment */
   envc = 0;
   for (cpp = orig_envp; cpp && *cpp; cpp++) {
      envc++;
      stringsize += VG_(strlen)(*cpp) + 1;
   }

   /* Darwin executable_path + NULL */
   auxsize += 2 * sizeof(Word);
   if (info->executable_path) {
       stringsize += 1 + VG_(strlen)(info->executable_path);
   }

   /* Darwin mach_header */
   if (info->dynamic) auxsize += sizeof(Word);

   /* OK, now we know how big the client stack is */
   stacksize =
      sizeof(Word) +                          /* argc */
      (have_exename ? sizeof(char **) : 0) +  /* argc[0] == exename */
      sizeof(char **)*argc +                  /* argv */
      sizeof(char **) +	                      /* terminal NULL */
      sizeof(char **)*envc +                  /* envp */
      sizeof(char **) +	                      /* terminal NULL */
      auxsize +                               /* auxv */
      VG_ROUNDUP(stringsize, sizeof(Word));   /* strings (aligned) */

   if (0) VG_(printf)("stacksize = %d\n", stacksize);

   /* client_SP is the client's stack pointer */
   client_SP = clstack_end - stacksize;
   client_SP = VG_ROUNDDN(client_SP, 32); /* make stack 32 byte aligned */

   /* base of the string table (aligned) */
   stringbase = strtab = (char *)clstack_end 
                         - VG_ROUNDUP(stringsize, sizeof(int));

   /* The max stack size */
   clstack_max_size = VG_PGROUNDUP(clstack_max_size);

   /* Darwin stack is chosen by the ume loader */
   clstack_start = clstack_end - clstack_max_size;

   /* Record stack extent -- needed for stack-change code. */
   /* GrP fixme really? */
   VG_(clstk_base) = clstack_start;
   VG_(clstk_end)  = clstack_end;

   if (0)
      VG_(printf)("stringsize=%d auxsize=%d stacksize=%d maxsize=0x%x\n"
                  "clstack_start %p\n"
                  "clstack_end   %p\n",
	          stringsize, auxsize, stacksize, (Int)clstack_max_size,
                  (void*)clstack_start, (void*)clstack_end);

   /* ==================== allocate space ==================== */

   /* Stack was allocated by the ume loader. */

   /* ==================== create client stack ==================== */

   ptr = (Addr*)client_SP;

   /* --- mach_header --- */
   if (info->dynamic) *ptr++ = info->text;

   /* --- client argc --- */
   *ptr++ = (Addr)(argc + (have_exename ? 1 : 0));

   /* --- client argv --- */
   if (info->interp_name) {
      *ptr++ = (Addr)copy_str(&strtab, info->interp_name);
      VG_(free)(info->interp_name);
   }
   if (info->interp_args) {
      *ptr++ = (Addr)copy_str(&strtab, info->interp_args);
      VG_(free)(info->interp_args);
   }

   if (have_exename)
      *ptr++ = (Addr)copy_str(&strtab, VG_(args_the_exename));

   for (i = 0; i < VG_(sizeXA)( VG_(args_for_client) ); i++) {
      *ptr++ = (Addr)copy_str(
                       &strtab, 
                       * (HChar**) VG_(indexXA)( VG_(args_for_client), i )
                     );
   }
   *ptr++ = 0;

   /* --- envp --- */
   VG_(client_envp) = (Char **)ptr;
   for (cpp = orig_envp; cpp && *cpp; ptr++, cpp++)
      *ptr = (Addr)copy_str(&strtab, *cpp);
   *ptr++ = 0;

   /* --- executable_path + NULL --- */
   if (info->executable_path) 
       *ptr++ = (Addr)copy_str(&strtab, info->executable_path);
   else 
       *ptr++ = 0;
   *ptr++ = 0;

   vg_assert((strtab-stringbase) == stringsize);

   /* client_SP is pointing at client's argc/argv */

   if (0) VG_(printf)("startup SP = %#lx\n", client_SP);
   return client_SP;
}
示例#9
0
/* Just before starting the client, we may need to make final
   adjustments to its initial image.  Also we need to set up the VEX
   guest state for thread 1 (the root thread) and copy in essential
   starting values.  This is handed the IIFinaliseImageInfo created by
   VG_(ii_create_image).
*/
void VG_(ii_finalise_image)( IIFinaliseImageInfo iifii )
{
   UInt   adler32_act;
   SysRes sres;
   /* On AIX we get a block of 37 words telling us the initial state
      for (GPR0 .. GPR31, PC, CR, LR, CTR, XER), and we start with all
      the other registers zeroed. */

   ThreadArchState* arch = &VG_(threads)[1].arch;

#  if defined(VGP_ppc32_aix5)

   vg_assert(0 == sizeof(VexGuestPPC32State) % 8);

   /* Zero out the initial state, and set up the simulated FPU in a
      sane way. */
   LibVEX_GuestPPC32_initialise(&arch->vex);

   /* Zero out the shadow area. */
   VG_(memset)(&arch->vex_shadow, 0, sizeof(VexGuestPPC32State));

#  else /* defined(VGP_ppc64_aix5) */

   vg_assert(0 == sizeof(VexGuestPPC64State) % 8);

   /* Zero out the initial state, and set up the simulated FPU in a
      sane way. */
   LibVEX_GuestPPC64_initialise(&arch->vex);

   /* Zero out the shadow area. */
   VG_(memset)(&arch->vex_shadow, 0, sizeof(VexGuestPPC64State));

#  endif

   /* iifii.intregs37 contains the integer register state as it needs
      to be at client startup.  These values are supplied by the
      launcher.  The 37 regs are:initial values from launcher for:
      GPR0 .. GPR31, PC, CR, LR, CTR, XER. */

   /* Put essential stuff into the new state. */
   arch->vex.guest_GPR0  =  (UWord)iifii.intregs37[0];
   arch->vex.guest_GPR1  =  (UWord)iifii.intregs37[1];
   arch->vex.guest_GPR2  =  (UWord)iifii.intregs37[2];
   arch->vex.guest_GPR3  =  (UWord)iifii.intregs37[3];
   arch->vex.guest_GPR4  =  (UWord)iifii.intregs37[4];
   arch->vex.guest_GPR5  =  (UWord)iifii.intregs37[5];
   arch->vex.guest_GPR6  =  (UWord)iifii.intregs37[6];
   arch->vex.guest_GPR7  =  (UWord)iifii.intregs37[7];
   arch->vex.guest_GPR8  =  (UWord)iifii.intregs37[8];
   arch->vex.guest_GPR9  =  (UWord)iifii.intregs37[9];
   arch->vex.guest_GPR10 =  (UWord)iifii.intregs37[10];
   arch->vex.guest_GPR11 =  (UWord)iifii.intregs37[11];
   arch->vex.guest_GPR12 =  (UWord)iifii.intregs37[12];
   arch->vex.guest_GPR13 =  (UWord)iifii.intregs37[13];
   arch->vex.guest_GPR14 =  (UWord)iifii.intregs37[14];
   arch->vex.guest_GPR15 =  (UWord)iifii.intregs37[15];
   arch->vex.guest_GPR16 =  (UWord)iifii.intregs37[16];
   arch->vex.guest_GPR17 =  (UWord)iifii.intregs37[17];
   arch->vex.guest_GPR18 =  (UWord)iifii.intregs37[18];
   arch->vex.guest_GPR19 =  (UWord)iifii.intregs37[19];
   arch->vex.guest_GPR20 =  (UWord)iifii.intregs37[20];
   arch->vex.guest_GPR21 =  (UWord)iifii.intregs37[21];
   arch->vex.guest_GPR22 =  (UWord)iifii.intregs37[22];
   arch->vex.guest_GPR23 =  (UWord)iifii.intregs37[23];
   arch->vex.guest_GPR24 =  (UWord)iifii.intregs37[24];
   arch->vex.guest_GPR25 =  (UWord)iifii.intregs37[25];
   arch->vex.guest_GPR26 =  (UWord)iifii.intregs37[26];
   arch->vex.guest_GPR27 =  (UWord)iifii.intregs37[27];
   arch->vex.guest_GPR28 =  (UWord)iifii.intregs37[28];
   arch->vex.guest_GPR29 =  (UWord)iifii.intregs37[29];
   arch->vex.guest_GPR30 =  (UWord)iifii.intregs37[30];
   arch->vex.guest_GPR31 =  (UWord)iifii.intregs37[31];

   arch->vex.guest_CIA      = (UWord)iifii.intregs37[32+0];
   arch->vex.guest_LR       = (UWord)iifii.intregs37[32+2];
   arch->vex.guest_CTR      = (UWord)iifii.intregs37[32+3];

#  if defined(VGP_ppc32_aix5)

   LibVEX_GuestPPC32_put_CR(  (UWord)iifii.intregs37[32+1], &arch->vex );
   LibVEX_GuestPPC32_put_XER( (UWord)iifii.intregs37[32+4], &arch->vex );

   /* Set the cache line size (KLUDGE) */
   VG_(machine_ppc32_set_clszB)( 128 );

#  else /* defined(VGP_ppc64_aix5) */

   LibVEX_GuestPPC64_put_CR(  (UWord)iifii.intregs37[32+1], &arch->vex );
   LibVEX_GuestPPC64_put_XER( (UWord)iifii.intregs37[32+4], &arch->vex );

   /* Set the cache line size (KLUDGE) */
   VG_(machine_ppc64_set_clszB)( 128 );

#  endif

   /* Fix up the client's command line.  Its argc/v/envp is in r3/4/5
      (32-bit AIX) or r14/15/16 (64-bit AIX).  but that is for the
      Valgrind invokation as a whole.  Hence we need to decrement argc
      and advance argv to step over the args for Valgrind, and the
      name of the Valgrind tool exe bogusly inserted by the launcher
      (hence the "+1"). */

#  if defined(VGP_ppc32_aix5)

   { UWord n_vargs = VG_(sizeXA)( VG_(args_for_valgrind) );
     vg_assert(arch->vex.guest_GPR3 >= 1 + n_vargs);
     arch->vex.guest_GPR3 -= (1 + n_vargs);
     arch->vex.guest_GPR4 += sizeof(UWord) * (1 + n_vargs);
   }

#  else /* defined(VGP_ppc64_aix5) */

   { UWord n_vargs = VG_(sizeXA)( VG_(args_for_valgrind) );
     vg_assert(arch->vex.guest_GPR14 >= 1 + n_vargs);
     arch->vex.guest_GPR14 -= (1 + n_vargs);
     arch->vex.guest_GPR15 += sizeof(UWord) * (1 + n_vargs);
   }

#  endif

   /* At this point the guest register state is correct for client
      startup.  However, that's not where we want to start; in fact we
      want to start at VG_(ppc{32,64}_aix5_do_preloads_then_start_client),
      passing it iifii.preloadpage in r3.  This will load the core/tool
      preload .so's, then restore r2-r10 from what's stashed in the
      preloadpage, and then start the client really.  Hence: */

   /* Save r2-r10 and the client start point in preloadpage */
   iifii.preloadpage->r2  = (ULong)arch->vex.guest_GPR2;
   iifii.preloadpage->r3  = (ULong)arch->vex.guest_GPR3;
   iifii.preloadpage->r4  = (ULong)arch->vex.guest_GPR4;
   iifii.preloadpage->r5  = (ULong)arch->vex.guest_GPR5;
   iifii.preloadpage->r6  = (ULong)arch->vex.guest_GPR6;
   iifii.preloadpage->r7  = (ULong)arch->vex.guest_GPR7;
   iifii.preloadpage->r8  = (ULong)arch->vex.guest_GPR8;
   iifii.preloadpage->r9  = (ULong)arch->vex.guest_GPR9;
   iifii.preloadpage->r10 = (ULong)arch->vex.guest_GPR10;
   iifii.preloadpage->client_start = (ULong)arch->vex.guest_CIA;


#  if defined(VGP_ppc32_aix5)

   /* Set up to start at VG_(ppc32_aix5_do_preloads_then_start_client) */
   arch->vex.guest_CIA = (UWord)&VG_(ppc32_aix5_do_preloads_then_start_client);

#  else /* defined(VGP_ppc64_aix5) */

   /* Set up to start at VG_(ppc64_aix5_do_preloads_then_start_client) */
   arch->vex.guest_CIA = (UWord)&VG_(ppc64_aix5_do_preloads_then_start_client);

#  endif

   arch->vex.guest_GPR3 = (UWord)iifii.preloadpage;

   /* The rest of the preloadpage fields will already have been filled
      in by VG_(setup_client_initial_image).  So we're done. */

   /* Finally, decompress the page compressed by the launcher.  We
      can't do this any earlier, because the page is (effectively)
      decompressed in place, which trashes iifii.intregs37.  So we have
      to wait till this point, at which we're done with iifii.intregs37
      (to be precise, with what it points at). */
   VG_(debugLog)(1, "initimg", "decompressing page at %p\n", 
                    (void*)iifii.compressed_page);
   vg_assert(VG_IS_PAGE_ALIGNED(iifii.compressed_page));

   Huffman_Uncompress( (void*)iifii.compressed_page, unz_page,
                       VKI_PAGE_SIZE, VKI_PAGE_SIZE );
   adler32_act = compute_adler32(unz_page, VKI_PAGE_SIZE);

   VG_(debugLog)(1, "initimg", 
                    "decompress done, adler32s: act 0x%x, exp 0x%x\n",
                    adler32_act, iifii.adler32_exp );

   VG_(memcpy)((void*)iifii.compressed_page, unz_page, VKI_PAGE_SIZE);

   VG_(debugLog)(1, "initimg", "copy back done\n");

   /* Tell the tool that we just wrote to the registers. */
   VG_TRACK( post_reg_write, Vg_CoreStartup, /*tid*/1, /*offset*/0,
             sizeof(VexGuestArchState));

   /* Determine the brk limit. */
   VG_(debugLog)(1, "initimg", "establishing current brk ..\n");
   vg_assert(__NR_AIX5_sbrk != __NR_AIX5_UNKNOWN);
   sres = VG_(do_syscall1)(__NR_AIX5_sbrk, 0);
   vg_assert(sres.err == 0); /* assert no error */
   VG_(brk_base) = VG_(brk_limit) = sres.res;
   VG_(debugLog)(1, "initimg", ".. brk = %p\n", (void*)VG_(brk_base));
}
示例#10
0
/* 
   Parse a LC_THREAD or LC_UNIXTHREAD command. 
   Return 0 on success, -1 on any failure.
   The stack address is returned in *stack. If the executable requested 
   a non-default stack address, *customstack is set to TRUE. The thread's 
   entry point is returned in *entry.
   The stack itself (if any) is not mapped.
   Other custom register settings are silently ignored (GrP fixme).
*/
static int 
load_genericthread(vki_uint8_t **stack_end, 
                   int *customstack, vki_uint8_t **entry, 
                   struct thread_command *threadcmd)
{
   unsigned int flavor;
   unsigned int count;
   unsigned int *p;
   unsigned int left; 

   p = (unsigned int *)(threadcmd + 1);
   left = (threadcmd->cmdsize - sizeof(struct thread_command)) / sizeof(*p);

   while (left > 0) {
      if (left < 2) {
         print("bad executable (invalid thread command)\n");
         return -1;
      }
      flavor = *p++; left--;
      count = *p++; left--;
      
      if (left < count) {
         print("bad executable (invalid thread command 2)\n");
         return -1;
      }

#if defined(VGA_x86)
      if (flavor == i386_THREAD_STATE && count == i386_THREAD_STATE_COUNT) {
         i386_thread_state_t *state = (i386_thread_state_t *)p;
         if (entry) *entry = (vki_uint8_t *)state->__eip;
         if (stack_end) {
            *stack_end = (vki_uint8_t *)(state->__esp ? state->__esp
                                                      : VKI_USRSTACK);
            vg_assert(VG_IS_PAGE_ALIGNED(*stack_end));
            (*stack_end)--;
         }
         if (customstack) *customstack = state->__esp;
         return 0;
      }

#elif defined(VGA_amd64)
      if (flavor == x86_THREAD_STATE64 && count == x86_THREAD_STATE64_COUNT){
         x86_thread_state64_t *state = (x86_thread_state64_t *)p;
         if (entry) *entry = (vki_uint8_t *)state->__rip;
         if (stack_end) {
            *stack_end = (vki_uint8_t *)(state->__rsp ? state->__rsp 
                                                      : VKI_USRSTACK64);
            vg_assert(VG_IS_PAGE_ALIGNED(*stack_end));
            (*stack_end)--;
         }
         if (customstack) *customstack = state->__rsp;
         return 0;
      }
      
#elif defined(VGA_arm)
      if (flavor == ARM_THREAD_STATE && count == ARM_THREAD_STATE_COUNT){
         arm_thread_state_t *state = (arm_thread_state_t *)p;
         if (entry) *entry = (vki_uint8_t *)state->__pc;
		 if (stack_end) {
            *stack_end = (vki_uint8_t *)(state->__sp ? state->__sp 
                                                      : VKI_USRSTACK);
            vg_assert(VG_IS_PAGE_ALIGNED(*stack_end));
            (*stack_end)--;
         }
         if (customstack) *customstack = state->__sp;
         return 0;
      }

#else
# error unknown platform
#endif
      p += count;
      left -= count;
   }

   print("bad executable (no arch-compatible thread state)\n");
   return -1;
}
VgStack* VG_(am_alloc_VgStack)( Addr* initial_sp )
{
   Int      szB;
   SysRes   sres;
   VgStack* stack;
   UInt*    p;
   Int      i;

   
   szB = VG_STACK_GUARD_SZB 
         + VG_STACK_ACTIVE_SZB + VG_STACK_GUARD_SZB;

   sres = VG_(am_mmap_anon_float_valgrind)( szB );
   if (sr_isError(sres))
      return NULL;

   stack = (VgStack*)(AddrH)sr_Res(sres);

   aspacem_assert(VG_IS_PAGE_ALIGNED(szB));
   aspacem_assert(VG_IS_PAGE_ALIGNED(stack));

   
   sres = local_do_mprotect_NO_NOTIFY( 
             (Addr) &stack[0], 
             VG_STACK_GUARD_SZB, VKI_PROT_NONE 
          );
   if (sr_isError(sres)) goto protect_failed;
   VG_(am_notify_mprotect)( 
      (Addr) &stack->bytes[0], 
      VG_STACK_GUARD_SZB, VKI_PROT_NONE 
   );

   sres = local_do_mprotect_NO_NOTIFY( 
             (Addr) &stack->bytes[VG_STACK_GUARD_SZB + VG_STACK_ACTIVE_SZB], 
             VG_STACK_GUARD_SZB, VKI_PROT_NONE 
          );
   if (sr_isError(sres)) goto protect_failed;
   VG_(am_notify_mprotect)( 
      (Addr) &stack->bytes[VG_STACK_GUARD_SZB + VG_STACK_ACTIVE_SZB],
      VG_STACK_GUARD_SZB, VKI_PROT_NONE 
   );


   p = (UInt*)&stack->bytes[VG_STACK_GUARD_SZB];
   for (i = 0; i < VG_STACK_ACTIVE_SZB/sizeof(UInt); i++)
      p[i] = 0xDEADBEEF;

   *initial_sp = (Addr)&stack->bytes[VG_STACK_GUARD_SZB + VG_STACK_ACTIVE_SZB];
   *initial_sp -= 8;
   *initial_sp &= ~((Addr)0x1F); 

   VG_(debugLog)( 1,"aspacem","allocated thread stack at 0x%llx size %d\n",
                  (ULong)(Addr)stack, szB);
   ML_(am_do_sanity_check)();
   return stack;

  protect_failed:
   (void)ML_(am_do_munmap_NO_NOTIFY)( (Addr)stack, szB );
   ML_(am_do_sanity_check)();
   return NULL;
}
示例#12
0
/* Note: this is VG_, not ML_. */
SysRes VG_(am_do_mmap_NO_NOTIFY)( Addr start, SizeT length, UInt prot, 
                                  UInt flags, Int fd, Off64T offset)
{
#define DEBUG_MYSELF 0
#if defined(VGO_l4re)
   void *val;
#endif
   SysRes res;
   aspacem_assert(VG_IS_PAGE_ALIGNED(offset));
#  if defined(VGP_x86_linux) || defined(VGP_ppc32_linux) \
      || defined(VGP_arm_linux)
   /* mmap2 uses 4096 chunks even if actual page size is bigger. */
   aspacem_assert((offset % 4096) == 0);
   res = VG_(do_syscall6)(__NR_mmap2, (UWord)start, length,
                          prot, flags, fd, offset / 4096);
#  elif defined(VGP_amd64_linux) || defined(VGP_ppc64_linux) \
        || defined(VGP_ppc32_aix5) || defined(VGP_ppc64_aix5) \
        || defined(VGP_s390x_linux)
   res = VG_(do_syscall6)(__NR_mmap, (UWord)start, length, 
                         prot, flags, fd, offset);
#  elif defined(VGP_x86_darwin)
   if (fd == 0  &&  (flags & VKI_MAP_ANONYMOUS)) {
       fd = -1;  // MAP_ANON with fd==0 is EINVAL
   }
   res = VG_(do_syscall7)(__NR_mmap, (UWord)start, length,
                          prot, flags, fd, offset & 0xffffffff, offset >> 32);
#  elif defined(VGP_amd64_darwin)
   if (fd == 0  &&  (flags & VKI_MAP_ANONYMOUS)) {
       fd = -1;  // MAP_ANON with fd==0 is EINVAL
   }
   res = VG_(do_syscall6)(__NR_mmap, (UWord)start, length,
                          prot, flags, (UInt)fd, offset);
#  elif defined(VGO_l4re)
   #if DEBUG_MYSELF
   VG_(am_show_nsegments)(0,"before mmap");
   l4re_rm_show_lists();
   VG_(debugLog)(1, "aspacem", "\033[34mmmap(start=%p, length=%d (0x%x), fd=%d,\033[0m\n",(void *)start, (int) length, (int)length, fd);
   VG_(debugLog)(1, "aspacem", "\033[34moffset=%ld (0x%lx), prot=%d=%c%c%c, flags=%d=%s%s%s%s)\033[0m\n",
                 (long)offset, (long) offset,
                 prot,
                 prot & VKI_PROT_READ ? 'r' : '-',
                 prot & VKI_PROT_WRITE ? 'w' : '-',
                 prot & VKI_PROT_EXEC ? 'x' : '-',
                 flags,
                 flags & VKI_MAP_SHARED ? "MAP_SHARED " : "",
                 flags & VKI_MAP_PRIVATE ? "MAP_PRIVATE " : "",
                 flags & VKI_MAP_FIXED ? "MAP_FIXED " : "",
                 flags & VKI_MAP_ANONYMOUS ? "MAP_ANONYMOUS " : "");

   #endif

   val = mmap((void *)start, VG_PGROUNDUP(length), prot, flags, fd, offset);

   #if DEBUG_MYSELF
      VG_(debugLog)(1, "aspacem", "\033[34;1mmmap returns %p\n", val);
      VG_(am_show_nsegments)(0,"after mmap");
      l4re_rm_show_lists();
   #endif

   res._isError = (val == (void *)-1);
   if (sr_isError(res)) {
      res._err = - (int)val;
      res._res = -1;
      VG_(debugLog)(1, "aspacem", "mmap failed\n");
      enter_kdebug("ERROR: mmap failed");
   } else {
      res._err = 0;
      res._res = (int)val;
   }

#  else
#    error Unknown platform
#  endif
   return res;
}