예제 #1
0
파일: readmacho.c 프로젝트: Grindland/ppres
/* Map a given fat or thin object aboard, find the thin part if
   necessary, do some checks, and write details of both the fat and
   thin parts into *ii.  Returns False (and leaves the file unmapped)
   on failure.  Guarantees to return pointers to a valid(ish) Mach-O
   image if it succeeds. */
static Bool map_image_aboard ( DebugInfo* di, /* only for err msgs */
                               /*OUT*/ImageInfo* ii, UChar* filename )
{
   VG_(memset)(ii, 0, sizeof(*ii));

   /* First off, try to map the thing in. */
   { SizeT  size;
     SysRes fd, sres;
     struct vg_stat stat_buf;

     fd = VG_(stat)(filename, &stat_buf);
     if (sr_isError(fd)) {
        ML_(symerr)(di, True, "Can't stat image (to determine its size)?!");
        return False;
     }
     size = stat_buf.size;

     fd = VG_(open)(filename, VKI_O_RDONLY, 0);
     if (sr_isError(fd)) {
       ML_(symerr)(di, True, "Can't open image to read symbols?!");
        return False;
     }

     sres = VG_(am_mmap_file_float_valgrind)
               ( size, VKI_PROT_READ, sr_Res(fd), 0 );
     if (sr_isError(sres)) {
        ML_(symerr)(di, True, "Can't mmap image to read symbols?!");
        return False;
     }

     VG_(close)(sr_Res(fd));

     ii->img     = (UChar*)sr_Res(sres);
     ii->img_szB = size;
   }

   /* Now it's mapped in and we have .img and .img_szB set.  Look for
      the embedded Mach-O object.  If not findable, unmap and fail. */
   { struct fat_header*  fh_be;
     struct fat_header   fh;
     struct MACH_HEADER* mh;
     
     // Assume initially that we have a thin image, and update
     // these if it turns out to be fat.
     ii->macho_img     = ii->img;
     ii->macho_img_szB = ii->img_szB;

     // Check for fat header.
     if (ii->img_szB < sizeof(struct fat_header)) {
        ML_(symerr)(di, True, "Invalid Mach-O file (0 too small).");
        goto unmap_and_fail;
     }

     // Fat header is always BIG-ENDIAN
     fh_be = (struct fat_header *)ii->img;
     fh.magic = VG_(ntohl)(fh_be->magic);
     fh.nfat_arch = VG_(ntohl)(fh_be->nfat_arch);
     if (fh.magic == FAT_MAGIC) {
        // Look for a good architecture.
        struct fat_arch *arch_be;
        struct fat_arch arch;
        Int f;
        if (ii->img_szB < sizeof(struct fat_header)
                          + fh.nfat_arch * sizeof(struct fat_arch)) {
           ML_(symerr)(di, True, "Invalid Mach-O file (1 too small).");
           goto unmap_and_fail;
        }
        for (f = 0, arch_be = (struct fat_arch *)(fh_be+1); 
             f < fh.nfat_arch;
             f++, arch_be++) {
           Int cputype;
#          if defined(VGA_ppc)
           cputype = CPU_TYPE_POWERPC;
#          elif defined(VGA_ppc64)
           cputype = CPU_TYPE_POWERPC64;
#          elif defined(VGA_x86)
           cputype = CPU_TYPE_X86;
#          elif defined(VGA_amd64)
           cputype = CPU_TYPE_X86_64;
#          else
#            error "unknown architecture"
#          endif
           arch.cputype    = VG_(ntohl)(arch_be->cputype);
           arch.cpusubtype = VG_(ntohl)(arch_be->cpusubtype);
           arch.offset     = VG_(ntohl)(arch_be->offset);
           arch.size       = VG_(ntohl)(arch_be->size);
           if (arch.cputype == cputype) {
              if (ii->img_szB < arch.offset + arch.size) {
                 ML_(symerr)(di, True, "Invalid Mach-O file (2 too small).");
                 goto unmap_and_fail;
              }
              ii->macho_img     = ii->img + arch.offset;
              ii->macho_img_szB = arch.size;
              break;
           }
        }
        if (f == fh.nfat_arch) {
           ML_(symerr)(di, True,
                       "No acceptable architecture found in fat file.");
           goto unmap_and_fail;
        }
     }

     /* Sanity check what we found. */

     /* assured by logic above */
     vg_assert(ii->img_szB >= sizeof(struct fat_header));

     if (ii->macho_img_szB < sizeof(struct MACH_HEADER)) {
        ML_(symerr)(di, True, "Invalid Mach-O file (3 too small).");
        goto unmap_and_fail;
     }

     if (ii->macho_img_szB > ii->img_szB) {
        ML_(symerr)(di, True, "Invalid Mach-O file (thin bigger than fat).");
        goto unmap_and_fail;
     }

     if (ii->macho_img >= ii->img
         && ii->macho_img + ii->macho_img_szB <= ii->img + ii->img_szB) {
        /* thin entirely within fat, as expected */
     } else {
        ML_(symerr)(di, True, "Invalid Mach-O file (thin not inside fat).");
        goto unmap_and_fail;
     }

     mh = (struct MACH_HEADER *)ii->macho_img;
     if (mh->magic != MAGIC) {
        ML_(symerr)(di, True, "Invalid Mach-O file (bad magic).");
        goto unmap_and_fail;
     }

     if (ii->macho_img_szB < sizeof(struct MACH_HEADER) + mh->sizeofcmds) {
        ML_(symerr)(di, True, "Invalid Mach-O file (4 too small).");
        goto unmap_and_fail;
     }
   }

   vg_assert(ii->img);
   vg_assert(ii->macho_img);
   vg_assert(ii->img_szB > 0);
   vg_assert(ii->macho_img_szB > 0);
   vg_assert(ii->macho_img >= ii->img);
   vg_assert(ii->macho_img + ii->macho_img_szB <= ii->img + ii->img_szB);
   return True;  /* success */
   /*NOTREACHED*/

  unmap_and_fail:
   unmap_image(ii);
   return False; /* bah! */
}
예제 #2
0
파일: readmacho.c 프로젝트: Grindland/ppres
/* Read a symbol table (nlist).  Add the resulting candidate symbols
   to 'syms'; the caller will post-process them and hand them off to
   ML_(addSym) itself. */
static
void read_symtab( /*OUT*/XArray* /* DiSym */ syms,
                  struct _DebugInfo* di, 
                  struct NLIST* o_symtab, UInt o_symtab_count,
                  UChar*     o_strtab, UInt o_strtab_sz )
{
   Int    i;
   Addr   sym_addr;
   DiSym  risym;
   UChar* name;

   static UChar* s_a_t_v = NULL; /* do not make non-static */

   for (i = 0; i < o_symtab_count; i++) {
      struct NLIST *nl = o_symtab+i;
      if ((nl->n_type & N_TYPE) == N_SECT) {
         sym_addr = di->text_bias + nl->n_value;
    /*} else if ((nl->n_type & N_TYPE) == N_ABS) {
         GrP fixme don't ignore absolute symbols?
         sym_addr = nl->n_value; */
      } else {
         continue;
      }
      
      if (di->trace_symtab)
         VG_(printf)("nlist raw: avma %010lx  %s\n",
                     sym_addr, o_strtab + nl->n_un.n_strx );

      /* If no part of the symbol falls within the mapped range,
         ignore it. */
      if (sym_addr <= di->text_avma
          || sym_addr >= di->text_avma+di->text_size) {
         continue;
      }

      /* skip names which point outside the string table;
         following these risks segfaulting Valgrind */
      name = o_strtab + nl->n_un.n_strx;
      if (name < o_strtab || name >= o_strtab + o_strtab_sz)
         continue;

      /* skip nameless symbols; these appear to be common, but
         useless */
      if (*name == 0)
         continue;

      risym.tocptr = 0;
      risym.addr = sym_addr;
      risym.size = // let canonicalize fix it
                   di->text_avma+di->text_size - sym_addr;
      risym.name = ML_(addStr)(di, name, -1);
      risym.isText = True;
      risym.isIFunc = False;
      // Lots of user function names get prepended with an underscore.  Eg. the
      // function 'f' becomes the symbol '_f'.  And the "below main"
      // function is called "start".  So we skip the leading underscore, and
      // if we see 'start' and --show-below-main=no, we rename it as
      // "start_according_to_valgrind", which makes it easy to spot later
      // and display as "(below main)".
      if (risym.name[0] == '_') {
         risym.name++;
      } else if (!VG_(clo_show_below_main) && VG_STREQ(risym.name, "start")) {
         if (s_a_t_v == NULL)
            s_a_t_v = ML_(addStr)(di, "start_according_to_valgrind", -1);
         vg_assert(s_a_t_v);
         risym.name = s_a_t_v;
      }

      vg_assert(risym.name);
      VG_(addToXA)( syms, &risym );
   }
}
예제 #3
0
VgStack* VG_(am_alloc_VgStack)( Addr* initial_sp )
{
   Int      szB;
   SysRes   sres;
   VgStack* stack;
   UInt*    p;
   Int      i;

   
   szB = VG_STACK_GUARD_SZB 
         + VG_STACK_ACTIVE_SZB + VG_STACK_GUARD_SZB;

   sres = VG_(am_mmap_anon_float_valgrind)( szB );
   if (sr_isError(sres))
      return NULL;

   stack = (VgStack*)(AddrH)sr_Res(sres);

   aspacem_assert(VG_IS_PAGE_ALIGNED(szB));
   aspacem_assert(VG_IS_PAGE_ALIGNED(stack));

   
   sres = local_do_mprotect_NO_NOTIFY( 
             (Addr) &stack[0], 
             VG_STACK_GUARD_SZB, VKI_PROT_NONE 
          );
   if (sr_isError(sres)) goto protect_failed;
   VG_(am_notify_mprotect)( 
      (Addr) &stack->bytes[0], 
      VG_STACK_GUARD_SZB, VKI_PROT_NONE 
   );

   sres = local_do_mprotect_NO_NOTIFY( 
             (Addr) &stack->bytes[VG_STACK_GUARD_SZB + VG_STACK_ACTIVE_SZB], 
             VG_STACK_GUARD_SZB, VKI_PROT_NONE 
          );
   if (sr_isError(sres)) goto protect_failed;
   VG_(am_notify_mprotect)( 
      (Addr) &stack->bytes[VG_STACK_GUARD_SZB + VG_STACK_ACTIVE_SZB],
      VG_STACK_GUARD_SZB, VKI_PROT_NONE 
   );


   p = (UInt*)&stack->bytes[VG_STACK_GUARD_SZB];
   for (i = 0; i < VG_STACK_ACTIVE_SZB/sizeof(UInt); i++)
      p[i] = 0xDEADBEEF;

   *initial_sp = (Addr)&stack->bytes[VG_STACK_GUARD_SZB + VG_STACK_ACTIVE_SZB];
   *initial_sp -= 8;
   *initial_sp &= ~((Addr)0x1F); 

   VG_(debugLog)( 1,"aspacem","allocated thread stack at 0x%llx size %d\n",
                  (ULong)(Addr)stack, szB);
   ML_(am_do_sanity_check)();
   return stack;

  protect_failed:
   (void)ML_(am_do_munmap_NO_NOTIFY)( (Addr)stack, szB );
   ML_(am_do_sanity_check)();
   return NULL;
}
예제 #4
0
void ML_(am_barf) ( HChar* what )
{
   VG_(debugLog)(0, "aspacem", "Valgrind: FATAL: %s\n", what);
   VG_(debugLog)(0, "aspacem", "Exiting now.\n");
   ML_(am_exit)(1);
}
예제 #5
0
static Addr build_rt_sigframe(ThreadState *tst,
			      Addr sp_top_of_frame,
			      const vki_siginfo_t *siginfo,
			      const struct vki_ucontext *siguc,
			      UInt flags,
			      const vki_sigset_t *mask,
			      void *restorer)
{
   struct rt_sigframe *frame;
   Addr sp = sp_top_of_frame;
   Int sigNo = siginfo->si_signo;

   vg_assert((flags & VKI_SA_SIGINFO) != 0);
   vg_assert((sizeof(*frame) & 7) == 0);
   vg_assert((sp & 7) == 0);

   sp -= sizeof(*frame);
   frame = (struct rt_sigframe *)sp;

   if (! ML_(sf_maybe_extend_stack)(tst, sp, sizeof(*frame), flags))
      return sp_top_of_frame;

   /* retcode, sigNo, sc, sregs fields are to be written */
   VG_TRACK( pre_mem_write, Vg_CoreSignal, tst->tid, "signal handler frame",
	     sp, offsetof(struct rt_sigframe, vg) );

   save_sigregs(tst, &frame->uc.uc_mcontext);

   if (flags & VKI_SA_RESTORER) {
      frame->retcode[0] = 0;
      frame->retcode[1] = 0;
      SET_SIGNAL_GPR(tst, 14, restorer);
   } else {
      frame->retcode[0] = 0x0a;
      frame->retcode[1] = __NR_rt_sigreturn;
      /* This normally should be &frame->recode. but since there
         might be problems with non-exec stack and we must discard
         the translation for the on-stack sigreturn we just use the
         trampoline like x86,ppc. We still fill in the retcode, lets
         just hope that nobody actually jumps here */
      SET_SIGNAL_GPR(tst, 14, (Addr)&VG_(s390x_linux_SUBST_FOR_rt_sigreturn));
   }

   VG_(memcpy)(&frame->info, siginfo, sizeof(vki_siginfo_t));
   frame->uc.uc_flags = 0;
   frame->uc.uc_link = 0;
   frame->uc.uc_sigmask = *mask;
   frame->uc.uc_stack = tst->altstack;

   SET_SIGNAL_GPR(tst, 2, siginfo->si_signo);
   SET_SIGNAL_GPR(tst, 3, &frame->info);
   SET_SIGNAL_GPR(tst, 4, &frame->uc);

   /* Set up backchain. */
   *((Addr *) sp) = sp_top_of_frame;

   VG_TRACK( post_mem_write, Vg_CoreSignal, tst->tid,
             sp, offsetof(struct rt_sigframe, vg) );

   build_vg_sigframe(&frame->vg, tst, flags, sigNo);
   return sp;
}
예제 #6
0
/* 
   When a client clones, we need to keep track of the new thread.  This means:
   1. allocate a ThreadId+ThreadState+stack for the the thread

   2. initialize the thread's new VCPU state

   3. create the thread using the same args as the client requested,
   but using the scheduler entrypoint for EIP, and a separate stack
   for ESP.
 */
static SysRes do_clone ( ThreadId ptid, 
                         ULong flags, Addr rsp, 
                         Long* parent_tidptr, 
                         Long* child_tidptr, 
                         Addr tlsaddr )
{
   static const Bool debug = False;

   ThreadId     ctid = VG_(alloc_ThreadState)();
   ThreadState* ptst = VG_(get_ThreadState)(ptid);
   ThreadState* ctst = VG_(get_ThreadState)(ctid);
   UWord*       stack;
   NSegment*    seg;
   SysRes       res;
   Long         rax;
   vki_sigset_t blockall, savedmask;

   VG_(sigfillset)(&blockall);

   vg_assert(VG_(is_running_thread)(ptid));
   vg_assert(VG_(is_valid_tid)(ctid));

   stack = (UWord*)ML_(allocstack)(ctid);
   if (stack == NULL) {
      res = VG_(mk_SysRes_Error)( VKI_ENOMEM );
      goto out;
   }

   /* Copy register state

      Both parent and child return to the same place, and the code
      following the clone syscall works out which is which, so we
      don't need to worry about it.

      The parent gets the child's new tid returned from clone, but the
      child gets 0.

      If the clone call specifies a NULL rsp for the new thread, then
      it actually gets a copy of the parent's rsp.
   */
   setup_child( &ctst->arch, &ptst->arch );

   /* Make sys_clone appear to have returned Success(0) in the
      child. */
   ctst->arch.vex.guest_RAX = 0;

   if (rsp != 0)
      ctst->arch.vex.guest_RSP = rsp;

   ctst->os_state.parent = ptid;

   /* inherit signal mask */
   ctst->sig_mask = ptst->sig_mask;
   ctst->tmp_sig_mask = ptst->sig_mask;

   /* We don't really know where the client stack is, because its
      allocated by the client.  The best we can do is look at the
      memory mappings and try to derive some useful information.  We
      assume that esp starts near its highest possible value, and can
      only go down to the start of the mmaped segment. */
   seg = VG_(am_find_nsegment)((Addr)rsp);
   if (seg && seg->kind != SkResvn) {
      ctst->client_stack_highest_word = (Addr)VG_PGROUNDUP(rsp);
      ctst->client_stack_szB = ctst->client_stack_highest_word - seg->start;

      if (debug)
	 VG_(printf)("tid %d: guessed client stack range %p-%p\n",
		     ctid, seg->start, VG_PGROUNDUP(rsp));
   } else {
      VG_(message)(Vg_UserMsg, "!? New thread %d starts with RSP(%p) unmapped\n",
		   ctid, rsp);
      ctst->client_stack_szB  = 0;
   }

   if (flags & VKI_CLONE_SETTLS) {
      if (debug)
	 VG_(printf)("clone child has SETTLS: tls at %p\n", tlsaddr);
      ctst->arch.vex.guest_FS_ZERO = tlsaddr;
   }

   flags &= ~VKI_CLONE_SETTLS;

   /* start the thread with everything blocked */
   VG_(sigprocmask)(VKI_SIG_SETMASK, &blockall, &savedmask);

   /* Create the new thread */
   rax = do_syscall_clone_amd64_linux(
            ML_(start_thread_NORETURN), stack, flags, &VG_(threads)[ctid],
            child_tidptr, parent_tidptr, NULL
         );
   res = VG_(mk_SysRes_amd64_linux)( rax );

   VG_(sigprocmask)(VKI_SIG_SETMASK, &savedmask, NULL);

  out:
   if (res.isError) {
      /* clone failed */
      VG_(cleanup_thread)(&ctst->arch);
      ctst->status = VgTs_Empty;
   }

   return res;
}
/* When a client clones, we need to keep track of the new thread. This means:
   1. allocate a ThreadId+ThreadState+stack for the the thread

   2. initialize the thread's new VCPU state

   3. create the thread using the same args as the client requested, but using
      the scheduler entrypoint for IP, and a separate stack for SP. */
static SysRes do_clone ( ThreadId ptid,
                         UInt flags, Addr sp,
                         Int* parent_tidptr,
                         Int* child_tidptr,
                         Addr child_tls )
{
   const Bool debug = False;
   ThreadId ctid = VG_ (alloc_ThreadState) ();
   ThreadState * ptst = VG_ (get_ThreadState) (ptid);
   ThreadState * ctst = VG_ (get_ThreadState) (ctid);
   UInt ret = 0;
   UWord * stack;
   SysRes res;
   vki_sigset_t blockall, savedmask;

   VG_(sigfillset)(&blockall);
   vg_assert(VG_(is_running_thread)(ptid));
   vg_assert(VG_(is_valid_tid)(ctid));
   stack = (UWord *)ML_(allocstack)(ctid);
   if (stack == NULL) {
      res = VG_(mk_SysRes_Error)(VKI_ENOMEM);
      goto out;
   }
   setup_child(&ctst->arch, &ptst->arch);

   /* on MIPS we need to set V0 and A3 to zero */
   ctst->arch.vex.guest_r2 = 0;
   ctst->arch.vex.guest_r7 = 0;
   if (sp != 0)
      ctst->arch.vex.guest_r29 = sp;

   ctst->os_state.parent = ptid;
   ctst->sig_mask = ptst->sig_mask;
   ctst->tmp_sig_mask = ptst->sig_mask;

   ctst->os_state.threadgroup = ptst->os_state.threadgroup;

   ML_(guess_and_register_stack) (sp, ctst);

   VG_TRACK(pre_thread_ll_create, ptid, ctid);
   if (flags & VKI_CLONE_SETTLS) {
       if (debug)
         VG_(printf)("clone child has SETTLS: tls at %#lx\n", child_tls);
       res = sys_set_tls(ctid, child_tls);
       if (sr_isError(res))
          goto out;
       ctst->arch.vex.guest_r27 = child_tls;
   }

   flags &= ~VKI_CLONE_SETTLS;
   VG_ (sigprocmask) (VKI_SIG_SETMASK, &blockall, &savedmask);
   /* Create the new thread */
   ret = do_syscall_clone_mips64_linux(ML_(start_thread_NORETURN),
                                       stack, flags, &VG_(threads)[ctid],
                                       parent_tidptr, NULL /*child_tls*/,
                                       child_tidptr);
   if (debug)
     VG_(printf)("ret: 0x%x\n", ret);

   res = VG_(mk_SysRes_mips64_linux)( /* val */ ret, 0, /* errflag */ 0);

   VG_(sigprocmask)(VKI_SIG_SETMASK, &savedmask, NULL);

   out:
   if (sr_isError (res)) {
      VG_ (cleanup_thread) (&ctst->arch);
      ctst->status = VgTs_Empty;
      VG_TRACK (pre_thread_ll_exit, ctid);
   }
   ptst->arch.vex.guest_r2 = 0;

   return res;
}
void pthread_hijack(Addr self, Addr kport, Addr func, Addr func_arg, 
                    Addr stacksize, Addr flags, Addr sp)
{
   vki_sigset_t blockall;
   ThreadState *tst = (ThreadState *)func_arg;
   VexGuestAMD64State *vex = &tst->arch.vex;

   // VG_(printf)("pthread_hijack pthread %p, machthread %p, func %p, arg %p, stack %p, flags %p, stack %p\n", self, kport, func, func_arg, stacksize, flags, sp);

   // Wait for parent thread's permission.
   // The parent thread holds V's lock on our behalf.
   semaphore_wait(tst->os_state.child_go);

   /* Start the thread with all signals blocked.  VG_(scheduler) will
      set the mask correctly when we finally get there. */
   VG_(sigfillset)(&blockall);
   VG_(sigprocmask)(VKI_SIG_SETMASK, &blockall, NULL);

   // Set thread's registers
   // Do this FIRST because some code below tries to collect a backtrace, 
   // which requires valid register data.
   LibVEX_GuestAMD64_initialise(vex);
   vex->guest_RIP = pthread_starter;
   vex->guest_RDI = self;
   vex->guest_RSI = kport;
   vex->guest_RDX = func;
   vex->guest_RCX = tst->os_state.func_arg;
   vex->guest_R8  = stacksize;
   vex->guest_R9  = flags;
   vex->guest_RSP = sp;

   // Record thread's stack and Mach port and pthread struct
   tst->os_state.pthread = self;
   tst->os_state.lwpid = kport;
   record_named_port(tst->tid, kport, MACH_PORT_RIGHT_SEND, "thread-%p");

   if ((flags & 0x01000000) == 0) {
      // kernel allocated stack - needs mapping
      Addr stack = VG_PGROUNDUP(sp) - stacksize;
      tst->client_stack_highest_byte = stack+stacksize-1;
      tst->client_stack_szB = stacksize;

      // pthread structure
      ML_(notify_core_and_tool_of_mmap)(
            stack+stacksize, pthread_structsize, 
            VKI_PROT_READ|VKI_PROT_WRITE, VKI_MAP_PRIVATE, -1, 0);
      // stack contents
      ML_(notify_core_and_tool_of_mmap)(
            stack, stacksize, 
            VKI_PROT_READ|VKI_PROT_WRITE, VKI_MAP_PRIVATE, -1, 0);
      // guard page
      ML_(notify_core_and_tool_of_mmap)(
            stack-VKI_PAGE_SIZE, VKI_PAGE_SIZE,
            0, VKI_MAP_PRIVATE, -1, 0);
   } else {
      // client allocated stack
      find_stack_segment(tst->tid, sp);
   }
   ML_(sync_mappings)("after", "pthread_hijack", 0);

   // DDD: should this be here rather than in POST(sys_bsdthread_create)?
   // But we don't have ptid here...
   //VG_TRACK ( pre_thread_ll_create, ptid, tst->tid );

   // Tell parent thread's POST(sys_bsdthread_create) that we're done 
   // initializing registers and mapping memory.
   semaphore_signal(tst->os_state.child_done);
   // LOCK IS GONE BELOW THIS POINT

   // Go!
   call_on_new_stack_0_1(tst->os_state.valgrind_stack_init_SP, 0, 
                         start_thread_NORETURN, (Word)tst);

   /*NOTREACHED*/
   vg_assert(0);
}
예제 #9
0
VgStack* VG_(am_alloc_VgStack)( /*OUT*/Addr* initial_sp )
{
   Int      szB;
   SysRes   sres;
   VgStack* stack;
   UInt*    p;
   Int      i;

   /* Allocate the stack. */
   szB = VG_STACK_GUARD_SZB 
         + VG_STACK_ACTIVE_SZB + VG_STACK_GUARD_SZB;

   sres = VG_(am_mmap_anon_float_valgrind)( szB );
   if (sr_isError(sres))
      return NULL;

   stack = (VgStack*)(AddrH)sr_Res(sres);

   aspacem_assert(VG_IS_PAGE_ALIGNED(szB));
   aspacem_assert(VG_IS_PAGE_ALIGNED(stack));

   /* Protect the guard areas. */
   sres = local_do_mprotect_NO_NOTIFY( 
             (Addr) &stack[0], 
             VG_STACK_GUARD_SZB, VKI_PROT_NONE 
          );
   if (sr_isError(sres)) goto protect_failed;
   VG_(am_notify_mprotect)( 
      (Addr) &stack->bytes[0], 
      VG_STACK_GUARD_SZB, VKI_PROT_NONE 
   );

   sres = local_do_mprotect_NO_NOTIFY( 
             (Addr) &stack->bytes[VG_STACK_GUARD_SZB + VG_STACK_ACTIVE_SZB], 
             VG_STACK_GUARD_SZB, VKI_PROT_NONE 
          );
   if (sr_isError(sres)) goto protect_failed;
   VG_(am_notify_mprotect)( 
      (Addr) &stack->bytes[VG_STACK_GUARD_SZB + VG_STACK_ACTIVE_SZB],
      VG_STACK_GUARD_SZB, VKI_PROT_NONE 
   );

   /* Looks good.  Fill the active area with junk so we can later
      tell how much got used. */

   p = (UInt*)&stack->bytes[VG_STACK_GUARD_SZB];
   for (i = 0; i < VG_STACK_ACTIVE_SZB/sizeof(UInt); i++)
      p[i] = 0xDEADBEEF;

   *initial_sp = (Addr)&stack->bytes[VG_STACK_GUARD_SZB + VG_STACK_ACTIVE_SZB];
   *initial_sp -= 8;
   *initial_sp &= ~((Addr)0x1F); /* 32-align it */

   VG_(debugLog)( 1,"aspacem","allocated thread stack at 0x%llx size %d\n",
                  (ULong)(Addr)stack, szB);
   ML_(am_do_sanity_check)();
   return stack;

  protect_failed:
   /* The stack was allocated, but we can't protect it.  Unmap it and
      return NULL (failure). */
   (void)ML_(am_do_munmap_NO_NOTIFY)( (Addr)stack, szB );
   ML_(am_do_sanity_check)();
   return NULL;
}
예제 #10
0
/* 
   When a client clones, we need to keep track of the new thread.  This means:
   1. allocate a ThreadId+ThreadState+stack for the thread

   2. initialize the thread's new VCPU state

   3. create the thread using the same args as the client requested,
   but using the scheduler entrypoint for IP, and a separate stack
   for SP.
 */
static SysRes do_clone ( ThreadId ptid, 
                         UInt flags, Addr sp, 
                         Int *parent_tidptr, 
                         Int *child_tidptr, 
                         Addr child_tls)
{
   const Bool debug = False;

   ThreadId     ctid = VG_(alloc_ThreadState)();
   ThreadState* ptst = VG_(get_ThreadState)(ptid);
   ThreadState* ctst = VG_(get_ThreadState)(ctid);
   ULong        word64;
   UWord*       stack;
   SysRes       res;
   vki_sigset_t blockall, savedmask;

   VG_(sigfillset)(&blockall);

   vg_assert(VG_(is_running_thread)(ptid));
   vg_assert(VG_(is_valid_tid)(ctid));

   stack = (UWord*)ML_(allocstack)(ctid);
   if (stack == NULL) {
      res = VG_(mk_SysRes_Error)( VKI_ENOMEM );
      goto out;
   }

//?   /* make a stack frame */
//?   stack -= 16;
//?   *(UWord *)stack = 0;


   /* Copy register state

      Both parent and child return to the same place, and the code
      following the clone syscall works out which is which, so we
      don't need to worry about it.

      The parent gets the child's new tid returned from clone, but the
      child gets 0.

      If the clone call specifies a NULL SP for the new thread, then
      it actually gets a copy of the parent's SP.

      The child's TLS register (r2) gets set to the tlsaddr argument
      if the CLONE_SETTLS flag is set.
   */
   setup_child( &ctst->arch, &ptst->arch );

   /* Make sys_clone appear to have returned Success(0) in the
      child. */
   { UInt old_cr = LibVEX_GuestPPC64_get_CR( &ctst->arch.vex );
     /* %r3 = 0 */
     ctst->arch.vex.guest_GPR3 = 0;
     /* %cr0.so = 0 */
     LibVEX_GuestPPC64_put_CR( old_cr & ~(1<<28), &ctst->arch.vex );
   }

   if (sp != 0)
      ctst->arch.vex.guest_GPR1 = sp;

   ctst->os_state.parent = ptid;

   /* inherit signal mask */
   ctst->sig_mask = ptst->sig_mask;
   ctst->tmp_sig_mask = ptst->sig_mask;

   /* Start the child with its threadgroup being the same as the
      parent's.  This is so that any exit_group calls that happen
      after the child is created but before it sets its
      os_state.threadgroup field for real (in thread_wrapper in
      syswrap-linux.c), really kill the new thread.  a.k.a this avoids
      a race condition in which the thread is unkillable (via
      exit_group) because its threadgroup is not set.  The race window
      is probably only a few hundred or a few thousand cycles long.
      See #226116. */
   ctst->os_state.threadgroup = ptst->os_state.threadgroup;

   ML_(guess_and_register_stack) (sp, ctst);

   /* Assume the clone will succeed, and tell any tool that wants to
      know that this thread has come into existence.  If the clone
      fails, we'll send out a ll_exit notification for it at the out:
      label below, to clean up. */
   vg_assert(VG_(owns_BigLock_LL)(ptid));
   VG_TRACK ( pre_thread_ll_create, ptid, ctid );

   if (flags & VKI_CLONE_SETTLS) {
      if (debug)
         VG_(printf)("clone child has SETTLS: tls at %#lx\n", child_tls);
      ctst->arch.vex.guest_GPR13 = child_tls;
   }

   flags &= ~VKI_CLONE_SETTLS;

   /* start the thread with everything blocked */
   VG_(sigprocmask)(VKI_SIG_SETMASK, &blockall, &savedmask);

   /* Create the new thread */
   word64 = do_syscall_clone_ppc64_linux(
               ML_(start_thread_NORETURN),
               stack, flags, &VG_(threads)[ctid],
               child_tidptr, parent_tidptr, NULL
            );

   /* Low half word64 is syscall return value.  Hi half is
      the entire CR, from which we need to extract CR0.SO. */
   /* VG_(printf)("word64 = 0x%llx\n", word64); */
   res = VG_(mk_SysRes_ppc64_linux)( 
            /*val*/(UInt)(word64 & 0xFFFFFFFFULL), 
            /*errflag*/ (UInt)((word64 >> (32+28)) & 1)
         );

   VG_(sigprocmask)(VKI_SIG_SETMASK, &savedmask, NULL);

  out:
   if (sr_isError(res)) {
      /* clone failed */
      VG_(cleanup_thread)(&ctst->arch);
      ctst->status = VgTs_Empty;
      /* oops.  Better tell the tool the thread exited in a hurry :-) */
      VG_TRACK( pre_thread_ll_exit, ctid );
   }

   return res;
}
예제 #11
0
/* Read stabs-format debug info.  This is all rather horrible because
   stabs is a underspecified, kludgy hack.
*/
void ML_(read_debuginfo_stabs) ( DebugInfo* di,
                                 UChar* stabC,   Int stab_sz, 
                                 HChar* stabstr, Int stabstr_sz )
{
   Int    i;
   Int    n_stab_entries;
   struct nlist* stab = (struct nlist*)stabC;
   HChar *next_stabstr = NULL;
   /* state for various things */
   struct {
      Addr     start;         /* start address */
      Addr     end;           /* end address */
      Int      line;          /* first line */
   } func = { 0, 0, -1 };
   struct {
      HChar   *name;
      Bool     same;
   } file = { NULL, True };
   struct {
      Int      prev;          /* prev line */
      Int      no;            /* current line */
      Int      ovf;           /* line wrap */
      Addr     addr;          /* start of this line */
      Bool     first;         /* first line in function */
   } line = { 0, 0, 0, 0, False };

   vg_assert (0);
   /* Stab reader broken since debuginfo server (revision 13440)
      See #if 0 for call to  ML_(read_debuginfo_stabs) in readelf.c.
      If ever it is repaired, file.name above should be replaced by a fndn_ix
      for performance reasons. */

   /* Ok.  It all looks plausible.  Go on and read debug data. 
         stab kinds: 100   N_SO     a source file name
                      68   N_SLINE  a source line number
                      36   N_FUN    start of a function

      In this loop, we maintain a current file name, updated as 
      N_SO/N_SOLs appear, and a current function base address, 
      updated as N_FUNs appear.  Based on that, address ranges for 
      N_SLINEs are calculated, and stuffed into the line info table.

      Finding the instruction address range covered by an N_SLINE is
      complicated;  see the N_SLINE case below.
   */
   file.name     = ML_(addStr)(di,"???", -1);

   n_stab_entries = stab_sz/(int)sizeof(struct nlist);

   TRACE_SYMTAB("\n--- Reading STABS (%d entries) ---\n", n_stab_entries);

   for (i = 0; i < n_stab_entries; i++) {
      const struct nlist *st = &stab[i];
      HChar *string;

      TRACE_SYMTAB("%2d  type=%d   othr=%d   desc=%d   "
                   "value=0x%x   strx=%d  %s\n", i,
                   st->n_type, st->n_other, st->n_desc, 
                   (Int)st->n_value,
                   (Int)st->n_un.n_strx, 
                   stabstr + st->n_un.n_strx );

      /* handle continued string stabs */
      {
         Int   qbuflen = 0;
         Int   qidx = 0;
         HChar* qbuf = NULL;
         Int   qlen;
         Bool  qcontinuing = False;
         UInt  qstringidx;

         qstringidx = st->n_un.n_strx;
         string = stabstr + qstringidx;
         qlen = VG_(strlen)(string);

         while (string 
                && qlen > 0 
                && (qcontinuing || string[qlen-1] == '\\')) {
            /* Gak, we have a continuation. Skip forward through
               subsequent stabs to gather all the parts of the
               continuation.  Increment i, but keep st pointing at
               current stab. */

            qcontinuing = string[qlen-1] == '\\';

            /* remove trailing \ */
            while (string[qlen-1] == '\\' && qlen > 0)
               qlen--;

            TRACE_SYMTAB("cont: found extension string: \"%s\" "
                         "len=%d(%c) idx=%d buflen=%d\n", 
                         string, qlen, string[qlen-1], qidx, qbuflen);

            /* XXX this is silly.  The si->strtab should have a way of
               appending to the last added string... */
            if ((qidx + qlen) >= qbuflen) {
               HChar *n;
               
               if (qbuflen == 0)
                  qbuflen = 16;
               while ((qidx + qlen) >= qbuflen)
                  qbuflen *= 2;
               n = ML_(dinfo_zalloc)("di.readstabs.rds.1", qbuflen);
               VG_(memcpy)(n, qbuf, qidx);
               
               if (qbuf != NULL)
                  ML_(dinfo_free)(qbuf);
               qbuf = n;
            }

            VG_(memcpy)(&qbuf[qidx], string, qlen);
            qidx += qlen;
            if (di->trace_symtab) {
               qbuf[qidx] = '\0';
               TRACE_SYMTAB("cont: working buf=\"%s\"\n", qbuf);
            }

            i++;
            if (i >= n_stab_entries)
               break;

            if (stab[i].n_un.n_strx) {
               string = stabstr + stab[i].n_un.n_strx;
               qlen = VG_(strlen)(string);
            } else {
               string = NULL;
               qlen = 0;
            }
         }

         if (qbuf != NULL) {
            i--;                        /* overstepped */
            string = ML_(addStr)(di, qbuf, qidx);
            ML_(dinfo_free)(qbuf);
            TRACE_SYMTAB("cont: made composite: \"%s\"\n", string);
         }
      }

      switch(st->n_type) {
         case N_UNDEF:
            /* new string table base */
            if (next_stabstr != NULL) {
               stabstr_sz -= next_stabstr - stabstr;
               stabstr = next_stabstr;
               if (stabstr_sz <= 0) {
                  VG_(printf)(" @@ bad stabstr size %d\n", stabstr_sz);
                  return;
               }
            }
            next_stabstr = stabstr + st->n_value;
            break;

         case N_BINCL: {
            break;
         }

         case N_EINCL:
            break;

         case N_EXCL:
            break;

         case N_SOL:                /* sub-source (include) file */
            if (line.ovf != 0) 
               VG_(message)(Vg_UserMsg, 
                            "Warning: file %s is very big (> 65535 lines) "
                            "Line numbers and annotation for this file might "
                            "be wrong.  Sorry.\n",
                            file.name);
            /* FALLTHROUGH */

         case N_SO: {                /* new source file */
            HChar *nm = string;
            UInt len = VG_(strlen)(nm);
            Addr addr = func.start + st->n_value;

            if (line.addr != 0) {
               /* finish off previous line */
               ML_(addLineInfo)(di, 
                                ML_(addFnDn) (di,
                                              file.name,
                                              NULL),
                                line.addr,
                                addr, line.no + line.ovf * LINENO_OVERFLOW, i);
            }

            /* reset line state */
            line.ovf = 0;            
            line.addr = 0;
            line.prev = 0;
            line.no = 0;

            if (len > 0 && nm[len-1] != '/') {
               file.name = ML_(addStr)(di, nm, -1);
               TRACE_SYMTAB("new source: %s\n", file.name);
            } else if (len == 0)
               file.name = ML_(addStr)(di, "?1\0", -1);

            break;
         }

         case N_SLINE: {        /* line info */
            Addr addr = func.start + st->n_value;

            if (line.addr != 0) {
               /* there was a previous */
               ML_(addLineInfo)(di, 
                                ML_(addFnDn)(di,
                                             file.name,
                                             NULL), 
                                line.addr,
                                addr, line.no + line.ovf * LINENO_OVERFLOW, i);
            }

            line.addr = addr;
            line.prev = line.no;
            line.no = (Int)((UShort)st->n_desc);

            if (line.prev > line.no + OVERFLOW_DIFFERENCE && file.same) {
               VG_(message)(Vg_DebugMsg, 
                  "Line number overflow detected (%d --> %d) in %s\n", 
                  line.prev, line.no, file.name);
               line.ovf++;
            }
            file.same = True;

            /* This is pretty horrible.  If this is the first line of
               the function, then bind any unbound symbols to the arg
               scope, since they're probably arguments. */
            if (line.first) {
               line.first = False;
               
               /* remember first line of function */
               if (func.start != 0) {
                  func.line = line.no;
               }
            }
            break;
         }

         case N_FUN: {                /* function start/end */
            Addr addr = 0;        /* end address for prev line/scope */

            /* if this the end of the function or we haven't
               previously finished the previous function... */
            if (*string == '\0' || func.start != 0) {
               /* end of function */
               line.first = False;

               /* end line at end of function */
               addr = func.start + st->n_value;

               /* now between functions */
               func.start = 0;

               // XXXX DEAD POINT XXXX
            }

            if (*string != '\0') {
               /* new function */
               line.first = True;

               /* line ends at start of next function */
               addr = di->text_debug_bias + st->n_value;

               func.start = addr;
            }

            if (line.addr) {
               ML_(addLineInfo)(di, 
                                ML_(addFnDn) (di,
                                              file.name,
                                              NULL),
                                line.addr,
                                addr, line.no + line.ovf * LINENO_OVERFLOW, i);
               line.addr = 0;
            }

            //DEAD POINT
            //DEAD POINT
            break;
         }

         case N_LBRAC: {
            /* open new scope */
            // DEAD POINT
            break;
         }

         case N_RBRAC: {
            /* close scope */
            // DEAD POINT
            break;
         }

         case N_GSYM:                /* global variable */
         case N_STSYM:                /* static in data segment */
         case N_LCSYM:                /* static in bss segment */
         case N_PSYM:                /* function parameter */
         case N_LSYM:                /* stack variable */
         case N_RSYM:                  /* register variable */
            break;
      }
   }
}
/* EXPORTED */
void VG_(sigframe_create)( ThreadId tid, 
                           Addr sp_top_of_frame,
                           const vki_siginfo_t *siginfo,
                           const struct vki_ucontext *siguc,
                           void *handler, 
                           UInt flags,
                           const vki_sigset_t *mask,
                           void *restorer )
{
  Addr sp;
  ThreadState* tst = VG_(get_ThreadState)(tid);
  Int sigNo = siginfo->si_signo;
  struct vg_sig_private *priv;

  /* Stack must be 8-byte aligned */
  sp_top_of_frame &= ~0xf;

  if (flags & VKI_SA_SIGINFO)
    {
      sp = sp_top_of_frame - sizeof(struct rt_sigframe);
    }
  else
    {
      sp = sp_top_of_frame - sizeof(struct sigframe);
    }

  tst = VG_(get_ThreadState)(tid);
  if (! ML_(sf_maybe_extend_stack)(tst, sp, sp_top_of_frame - sp, flags))
    return;

  vg_assert(VG_IS_8_ALIGNED(sp));
      
  if (flags & VKI_SA_SIGINFO)
    {
      struct rt_sigframe *frame = (struct rt_sigframe *) sp;
      struct vki_ucontext *ucp = &frame->rs_uc;
      if (VG_(clo_trace_signals))
        VG_(printf)("rt_sigframe\n");
      /* Create siginfo.  */
      VG_TRACK( pre_mem_write, Vg_CoreSignal, tid, "signal frame siginfo",
               (Addr)&frame->rs_info, sizeof(frame->rs_info) );

      VG_(memcpy)(&frame->rs_info, siginfo, sizeof(*siginfo));

      VG_TRACK( post_mem_write, Vg_CoreSignal, tid, 
               (Addr)&frame->rs_info, sizeof(frame->rs_info) );

      /* Create the ucontext.  */
      VG_TRACK( pre_mem_write, Vg_CoreSignal, tid, "signal frame ucontext",
               (Addr)ucp, offsetof(struct vki_ucontext, uc_mcontext) );

      ucp->uc_flags = 0;  
      ucp->uc_link = 0;
      ucp->uc_stack = tst->altstack;

      VG_TRACK( post_mem_write, Vg_CoreSignal, tid, (Addr)ucp,
               offsetof(struct vki_ucontext, uc_mcontext) );

      struct vki_sigcontext *scp = &(frame->rs_uc.uc_mcontext);
      setup_sigcontext2(tst, &(scp), siginfo);

      ucp->uc_sigmask = tst->sig_mask;

      priv = &frame->priv;

      /*
       * Arguments to signal handler:
       *
       *   a0 = signal number
       *   a1 = 0 (should be cause)
       *   a2 = pointer to ucontext
       *
       * $25 and c0_epc point to the signal handler, $29 points to
       * the struct rt_sigframe.
       */

      tst->arch.vex.guest_r4 = siginfo->si_signo;
      tst->arch.vex.guest_r5 = (Addr) &frame->rs_info;
      tst->arch.vex.guest_r6 = (Addr) &frame->rs_uc;
      tst->arch.vex.guest_r29 = (Addr) frame;
      tst->arch.vex.guest_r25 = (Addr) handler;

      if (flags & VKI_SA_RESTORER)
        {
          tst->arch.vex.guest_r31 = (Addr) restorer;  
        }
      else
        {
          tst->arch.vex.guest_r31 = (Addr)&VG_(mips32_linux_SUBST_FOR_rt_sigreturn);
        }

    }
  else
    {
      if (VG_(clo_trace_signals))
/* Create a signal frame for thread 'tid'.  Make a 3-arg frame
   regardless of whether the client originally requested a 1-arg
   version (no SA_SIGINFO) or a 3-arg one (SA_SIGINFO) since in the
   former case, the amd64 calling conventions will simply cause the
   extra 2 args to be ignored (inside the handler).  (We hope!) */
void VG_(sigframe_create) ( ThreadId tid,
                            Addr sp_top_of_frame,
                            const vki_siginfo_t *siginfo,
                            const struct vki_ucontext *siguc,
                            void *handler,
                            UInt flags,
                            const vki_sigset_t *mask,
                            void *restorer )
{
   ThreadState* tst;
   Addr rsp;
   struct hacky_sigframe* frame;
   Int sigNo = siginfo->si_signo;

   vg_assert(VG_IS_16_ALIGNED(sizeof(struct hacky_sigframe)));

   sp_top_of_frame &= ~0xfUL;
   rsp = sp_top_of_frame - sizeof(struct hacky_sigframe);
   rsp -= 8; /* ELF ABI says that rsp+8 must be 16 aligned on
                entry to a function. */

   tst = VG_(get_ThreadState)(tid);
   if (! ML_(sf_extend_stack)(tst, rsp, sp_top_of_frame - rsp))
      return;

   vg_assert(VG_IS_16_ALIGNED(rsp+8));

   frame = (struct hacky_sigframe *) rsp;

   /* clear it (very conservatively) (why so conservatively??) */
   VG_(memset)(&frame->lower_guardzone, 0, sizeof frame->lower_guardzone);
   VG_(memset)(&frame->gst,      0, sizeof(VexGuestAMD64State));
   VG_(memset)(&frame->gshadow1, 0, sizeof(VexGuestAMD64State));
   VG_(memset)(&frame->gshadow2, 0, sizeof(VexGuestAMD64State));
   VG_(memset)(&frame->fake_siginfo,  0, sizeof(frame->fake_siginfo));
   VG_(memset)(&frame->fake_ucontext, 0, sizeof(frame->fake_ucontext));

   /* save stuff in frame */
   frame->gst           = tst->arch.vex;
   frame->gshadow1      = tst->arch.vex_shadow1;
   frame->gshadow2      = tst->arch.vex_shadow2;
   frame->sigNo_private = sigNo;
   frame->mask          = tst->sig_mask;
   frame->magicPI       = 0x31415927;

   /* Minimally fill in the siginfo and ucontext.  Note, utter
      lameness prevails.  Be underwhelmed, be very underwhelmed. */
   frame->fake_siginfo.si_signo = sigNo;
   frame->fake_siginfo.si_code  = siginfo->si_code;

   /* Set up stack pointer */
   vg_assert(rsp == (Addr)&frame->returnAddr);
   VG_(set_SP)(tid, rsp);
   VG_TRACK( post_reg_write, Vg_CoreSignal, tid, VG_O_STACK_PTR, sizeof(ULong));

   /* Set up program counter */
   VG_(set_IP)(tid, (ULong)handler);
   VG_TRACK( post_reg_write, Vg_CoreSignal, tid, VG_O_INSTR_PTR, sizeof(ULong));

   /* Set up RA and args for the frame */
   VG_TRACK( pre_mem_write, Vg_CoreSignal, tid, "signal handler frame",
             (Addr)frame, 1*sizeof(ULong) );
   frame->returnAddr  = (ULong)&VG_(amd64_darwin_SUBST_FOR_sigreturn);

   /* XXX should tell the tool that these regs got written */
   tst->arch.vex.guest_RDI = (ULong) sigNo;
   tst->arch.vex.guest_RSI = (Addr)  &frame->fake_siginfo;/* oh well */
   tst->arch.vex.guest_RDX = (Addr)  &frame->fake_ucontext; /* oh well */

   VG_TRACK( post_mem_write, Vg_CoreSignal, tid,
             (Addr)frame, 1*sizeof(ULong) );
   VG_TRACK( post_mem_write, Vg_CoreSignal, tid,
             (Addr)&frame->fake_siginfo, sizeof(frame->fake_siginfo));
   VG_TRACK( post_mem_write, Vg_CoreSignal, tid,
             (Addr)&frame->fake_ucontext, sizeof(frame->fake_ucontext));

   if (VG_(clo_trace_signals))
      VG_(message)(Vg_DebugMsg,
                   "sigframe_create (thread %d): "
                   "next EIP=%#lx, next ESP=%#lx\n",
                   tid, (Addr)handler, (Addr)frame );
}
예제 #14
0
파일: readmacho.c 프로젝트: Grindland/ppres
/* Search for an existing dSYM file as a possible separate debug file.  
   Adapted from gdb. */
static Char *
find_separate_debug_file (const Char *executable_name)
{
   Char *basename_str;
   Char *dot_ptr;
   Char *slash_ptr;
   Char *dsymfile;
    
   /* Make sure the object file name itself doesn't contain ".dSYM" in it or we
      will end up with an infinite loop where after we add a dSYM symbol file,
      it will then enter this function asking if there is a debug file for the
      dSYM file itself.  */
   if (VG_(strcasestr) (executable_name, ".dSYM") == NULL)
   {
      /* Check for the existence of a .dSYM file for a given executable.  */
      basename_str = VG_(basename) (executable_name);
      dsymfile = ML_(dinfo_zalloc)("di.readmacho.dsymfile", 
                    VG_(strlen) (executable_name)
                    + VG_(strlen) (APPLE_DSYM_EXT_AND_SUBDIRECTORY)
                    + VG_(strlen) (basename_str)
                    + 1
                 );
        
      /* First try for the dSYM in the same directory as the original file.  */
      VG_(strcpy) (dsymfile, executable_name);
      VG_(strcat) (dsymfile, APPLE_DSYM_EXT_AND_SUBDIRECTORY);
      VG_(strcat) (dsymfile, basename_str);
        
      if (file_exists_p (dsymfile))
         return dsymfile;
        
      /* Now search for any parent directory that has a '.' in it so we can find
         Mac OS X applications, bundles, plugins, and any other kinds of files. 
         Mac OS X application bundles wil have their program in
         "/some/path/MyApp.app/Contents/MacOS/MyApp" (or replace ".app" with
         ".bundle" or ".plugin" for other types of bundles).  So we look for any
         prior '.' character and try appending the apple dSYM extension and
         subdirectory and see if we find an existing dSYM file (in the above
         MyApp example the dSYM would be at either:
         "/some/path/MyApp.app.dSYM/Contents/Resources/DWARF/MyApp" or
         "/some/path/MyApp.dSYM/Contents/Resources/DWARF/MyApp".  */
      VG_(strcpy) (dsymfile, VG_(dirname) (executable_name));
      while ((dot_ptr = VG_(strrchr) (dsymfile, '.')))
      {
         /* Find the directory delimiter that follows the '.' character since
            we now look for a .dSYM that follows any bundle extension.  */
         slash_ptr = VG_(strchr) (dot_ptr, '/');
         if (slash_ptr)
         {
             /* NULL terminate the string at the '/' character and append
                the path down to the dSYM file.  */
            *slash_ptr = '\0';
            VG_(strcat) (slash_ptr, APPLE_DSYM_EXT_AND_SUBDIRECTORY);
            VG_(strcat) (slash_ptr, basename_str);
            if (file_exists_p (dsymfile))
               return dsymfile;
         }
         
         /* NULL terminate the string at the '.' character and append
            the path down to the dSYM file.  */
         *dot_ptr = '\0';
         VG_(strcat) (dot_ptr, APPLE_DSYM_EXT_AND_SUBDIRECTORY);
         VG_(strcat) (dot_ptr, basename_str);
         if (file_exists_p (dsymfile))
            return dsymfile;
         
         /* NULL terminate the string at the '.' locatated by the strrchr()
            function again.  */
         *dot_ptr = '\0';
         
         /* We found a previous extension '.' character and did not find a
            dSYM file so now find previous directory delimiter so we don't
            try multiple times on a file name that may have a version number
            in it such as "/some/path/MyApp.6.0.4.app".  */
         slash_ptr = VG_(strrchr) (dsymfile, '/');
         if (!slash_ptr)
            break;
         /* NULL terminate the string at the previous directory character
            and search again.  */
         *slash_ptr = '\0';
      }
   }

   return NULL;
}
 /*
   When a client clones, we need to keep track of the new thread.  This means:
   1. allocate a ThreadId+ThreadState+stack for the thread
   2. initialize the thread's new VCPU state
   3. create the thread using the same args as the client requested,
   but using the scheduler entrypoint for IP, and a separate stack
   for SP.
 */
static SysRes do_clone ( ThreadId ptid,
                         Long flags, Addr sp,
                         Long * parent_tidptr,
                         Long * child_tidptr,
                         Addr child_tls )
{
  const Bool debug = False;
  ThreadId ctid = VG_ (alloc_ThreadState) ();
  ThreadState * ptst = VG_ (get_ThreadState) (ptid);
  ThreadState * ctst = VG_ (get_ThreadState) (ctid);
  Long ret = 0;
  Long * stack;
  SysRes res;
  vki_sigset_t blockall, savedmask;

  VG_ (sigfillset) (&blockall);
  vg_assert (VG_ (is_running_thread) (ptid));
  vg_assert (VG_ (is_valid_tid) (ctid));
  stack = (Long *) ML_ (allocstack) (ctid);
  if (stack == NULL) {
    res = VG_ (mk_SysRes_Error) (VKI_ENOMEM);
    goto out;
  }
  setup_child (&ctst->arch, &ptst->arch);

  /* On TILEGX we need to set r0 and r3 to zero */
  ctst->arch.vex.guest_r0 = 0;
  ctst->arch.vex.guest_r3 = 0;
  if (sp != 0)
    ctst->arch.vex.guest_r54 = sp;

  ctst->os_state.parent = ptid;
  ctst->sig_mask = ptst->sig_mask;
  ctst->tmp_sig_mask = ptst->sig_mask;

  /* Start the child with its threadgroup being the same as the
     parent's.  This is so that any exit_group calls that happen
     after the child is created but before it sets its
     os_state.threadgroup field for real (in thread_wrapper in
     syswrap-linux.c), really kill the new thread.  a.k.a this avoids
     a race condition in which the thread is unkillable (via
     exit_group) because its threadgroup is not set.  The race window
     is probably only a few hundred or a few thousand cycles long.
     See #226116. */

  ctst->os_state.threadgroup = ptst->os_state.threadgroup;
  ML_(guess_and_register_stack) (sp, ctst);

  VG_TRACK (pre_thread_ll_create, ptid, ctid);
  if (flags & VKI_CLONE_SETTLS) {
    if (debug)
      VG_(printf)("clone child has SETTLS: tls at %#lx\n", child_tls);
    ctst->arch.vex.guest_r53 = child_tls;
    res = sys_set_tls(ctid, child_tls);
    if (sr_isError(res))
      goto out;
  }

  flags &= ~VKI_CLONE_SETTLS;
  VG_ (sigprocmask) (VKI_SIG_SETMASK, &blockall, &savedmask);
  /* Create the new thread */
  ret = do_syscall_clone_tilegx_linux (ML_ (start_thread_NORETURN),
                                       stack, flags, &VG_ (threads)[ctid],
                                       child_tidptr, parent_tidptr,
                                       (Long)NULL /*child_tls*/);

  /* High half word64 is syscall return value. */
  if (debug)
    VG_(printf)("ret: 0x%llx\n", (ULong)ret);

  res = VG_(mk_SysRes_tilegx_linux) (/*val */ ret);

  VG_ (sigprocmask) (VKI_SIG_SETMASK, &savedmask, NULL);

 out:
  if (sr_isError (res)) {
    VG_(cleanup_thread) (&ctst->arch);
    ctst->status = VgTs_Empty;
    VG_TRACK (pre_thread_ll_exit, ctid);
  }
  ptst->arch.vex.guest_r0 = 0;

  return res;
}
예제 #16
0
파일: readmacho.c 프로젝트: Grindland/ppres
Bool ML_(read_macho_debug_info)( struct _DebugInfo* di )
{
   struct symtab_command *symcmd = NULL;
   struct dysymtab_command *dysymcmd = NULL;
   HChar* dsymfilename = NULL;
   Bool have_uuid = False;
   UChar uuid[16];
   ImageInfo ii;  /* main file */
   ImageInfo iid; /* auxiliary .dSYM file */
   Bool ok;

   /* mmap the object file to look for di->soname and di->text_bias 
      and uuid and nlist and STABS */

   if (VG_(clo_verbosity) > 1)
      VG_(message)(Vg_DebugMsg,
                   "%s (%#lx)\n", di->filename, di->rx_map_avma );

   /* This should be ensured by our caller. */
   vg_assert(di->have_rx_map);
   vg_assert(di->have_rw_map);

   VG_(memset)(&ii,   0, sizeof(ii));
   VG_(memset)(&iid,  0, sizeof(iid));
   VG_(memset)(&uuid, 0, sizeof(uuid));

   ok = map_image_aboard( di, &ii, di->filename );
   if (!ok) goto fail;

   vg_assert(ii.macho_img != NULL && ii.macho_img_szB > 0);

   /* Poke around in the Mach-O header, to find some important
      stuff. */
   // Find LC_SYMTAB and LC_DYSYMTAB, if present.
   // Read di->soname from LC_ID_DYLIB if present, 
   //    or from LC_ID_DYLINKER if present, 
   //    or use "NONE".
   // Get di->text_bias (aka slide) based on the corresponding LC_SEGMENT
   // Get uuid for later dsym search

   di->text_bias = 0;

   { struct MACH_HEADER *mh = (struct MACH_HEADER *)ii.macho_img;
      struct load_command *cmd;
      Int c;

      for (c = 0, cmd = (struct load_command *)(mh+1);
           c < mh->ncmds;
           c++, cmd = (struct load_command *)(cmd->cmdsize
                                              + (unsigned long)cmd)) {
         if (cmd->cmd == LC_SYMTAB) {
            symcmd = (struct symtab_command *)cmd;
         } 
         else if (cmd->cmd == LC_DYSYMTAB) {
            dysymcmd = (struct dysymtab_command *)cmd;
         } 
         else if (cmd->cmd == LC_ID_DYLIB && mh->filetype == MH_DYLIB) {
            // GrP fixme bundle?
            struct dylib_command *dcmd = (struct dylib_command *)cmd;
            UChar *dylibname = dcmd->dylib.name.offset + (UChar *)dcmd;
            UChar *soname = VG_(strrchr)(dylibname, '/');
            if (!soname) soname = dylibname;
            else soname++;
            di->soname = ML_(dinfo_strdup)("di.readmacho.dylibname",
                                           soname);
         }
         else if (cmd->cmd==LC_ID_DYLINKER  &&  mh->filetype==MH_DYLINKER) {
            struct dylinker_command *dcmd = (struct dylinker_command *)cmd;
            UChar *dylinkername = dcmd->name.offset + (UChar *)dcmd;
            UChar *soname = VG_(strrchr)(dylinkername, '/');
            if (!soname) soname = dylinkername;
            else soname++;
            di->soname = ML_(dinfo_strdup)("di.readmacho.dylinkername",
                                           soname);
         }

         // A comment from Julian about why varinfo[35] fail:
         //
         // My impression is, from comparing the output of otool -l for these
         // executables with the logic in ML_(read_macho_debug_info),
         // specifically the part that begins "else if (cmd->cmd ==
         // LC_SEGMENT_CMD) {", that it's a complete hack which just happens
         // to work ok for text symbols.  In particular, it appears to assume
         // that in a "struct load_command" of type LC_SEGMENT_CMD, the first
         // "struct SEGMENT_COMMAND" inside it is going to contain the info we
         // need.  However, otool -l shows, and also the Apple docs state,
         // that a struct load_command may contain an arbitrary number of
         // struct SEGMENT_COMMANDs, so I'm not sure why it's OK to merely
         // snarf the first.  But I'm not sure about this.
         //
         // The "Try for __DATA" block below simply adds acquisition of data
         // svma/bias values using the same assumption.  It also needs
         // (probably) to deal with bss sections, but I don't understand how
         // this all ties together really, so it requires further study.
         //
         // If you can get your head around the relationship between MachO
         // segments, sections and load commands, this might be relatively
         // easy to fix properly.
         //
         // Basically we need to come up with plausible numbers for di->
         // {text,data,bss}_{avma,svma}, from which the _bias numbers are
         // then trivially derived.  Then I think the debuginfo reader should
         // work pretty well.
         else if (cmd->cmd == LC_SEGMENT_CMD) {
            struct SEGMENT_COMMAND *seg = (struct SEGMENT_COMMAND *)cmd;
            /* Try for __TEXT */
            if (!di->text_present
                && 0 == VG_(strcmp)(seg->segname, "__TEXT")
                /* DDD: is the  next line a kludge? -- JRS */
                && seg->fileoff == 0 && seg->filesize != 0) {
               di->text_present = True;
               di->text_svma = (Addr)seg->vmaddr;
               di->text_avma = di->rx_map_avma;
               di->text_size = seg->vmsize;
               di->text_bias = di->text_avma - di->text_svma;
               /* Make the _debug_ values be the same as the
                  svma/bias for the primary object, since there is
                  no secondary (debuginfo) object, but nevertheless
                  downstream biasing of Dwarf3 relies on the
                  _debug_ values. */
               di->text_debug_svma = di->text_svma;
               di->text_debug_bias = di->text_bias;
            }
            /* Try for __DATA */
            if (!di->data_present
                && 0 == VG_(strcmp)(seg->segname, "__DATA")
                /* && DDD:seg->fileoff == 0 */ && seg->filesize != 0) {
               di->data_present = True;
               di->data_svma = (Addr)seg->vmaddr;
               di->data_avma = di->rw_map_avma;
               di->data_size = seg->vmsize;
               di->data_bias = di->data_avma - di->data_svma;
               di->data_debug_svma = di->data_svma;
               di->data_debug_bias = di->data_bias;
            }
         }
         else if (cmd->cmd == LC_UUID) {
             struct uuid_command *uuid_cmd = (struct uuid_command *)cmd;
             VG_(memcpy)(uuid, uuid_cmd->uuid, sizeof(uuid));
             have_uuid = True;
         }
      }
   }

   if (!di->soname) {
      di->soname = ML_(dinfo_strdup)("di.readmacho.noname", "NONE");
   }

   /* Now we have the base object to hand.  Read symbols from it. */

   if (ii.macho_img && ii.macho_img_szB > 0 && symcmd && dysymcmd) {

      /* Read nlist symbol table */
      struct NLIST *syms;
      UChar *strs;
      XArray* /* DiSym */ candSyms = NULL;
      Word i, nCandSyms;

      if (ii.macho_img_szB < symcmd->stroff + symcmd->strsize
          || ii.macho_img_szB < symcmd->symoff + symcmd->nsyms
                                                 * sizeof(struct NLIST)) {
         ML_(symerr)(di, False, "Invalid Mach-O file (5 too small).");
         goto fail;
      }   
      if (dysymcmd->ilocalsym + dysymcmd->nlocalsym > symcmd->nsyms
          || dysymcmd->iextdefsym + dysymcmd->nextdefsym > symcmd->nsyms) {
         ML_(symerr)(di, False, "Invalid Mach-O file (bad symbol table).");
         goto fail;
      }
      
      syms = (struct NLIST *)(ii.macho_img + symcmd->symoff);
      strs = (UChar *)(ii.macho_img + symcmd->stroff);
      
      if (VG_(clo_verbosity) > 1)
         VG_(message)(Vg_DebugMsg,
            "   reading syms   from primary file (%d %d)\n",
            dysymcmd->nextdefsym, dysymcmd->nlocalsym );

      /* Read candidate symbols into 'candSyms', so we can truncate
         overlapping ends and generally tidy up, before presenting
         them to ML_(addSym). */
      candSyms = VG_(newXA)(
                    ML_(dinfo_zalloc), "di.readmacho.candsyms.1",
                    ML_(dinfo_free), sizeof(DiSym)
                 );
      vg_assert(candSyms);

      // extern symbols
      read_symtab(candSyms,
                  di, 
                  syms + dysymcmd->iextdefsym, dysymcmd->nextdefsym, 
                  strs, symcmd->strsize);
      // static and private_extern symbols
      read_symtab(candSyms,
                  di, 
                  syms + dysymcmd->ilocalsym, dysymcmd->nlocalsym, 
                  strs, symcmd->strsize);

      /* tidy up the cand syms -- trim overlapping ends.  May resize
         candSyms. */
      tidy_up_cand_syms( candSyms, di->trace_symtab );

      /* and finally present them to ML_(addSym) */
      nCandSyms = VG_(sizeXA)( candSyms );
      for (i = 0; i < nCandSyms; i++) {
         DiSym* cand = (DiSym*) VG_(indexXA)( candSyms, i );
         if (di->trace_symtab)
            VG_(printf)("nlist final: acquire  avma %010lx-%010lx  %s\n",
                        cand->addr, cand->addr + cand->size - 1, cand->name );
         ML_(addSym)( di, cand );
      }
      VG_(deleteXA)( candSyms );
   }

   /* If there's no UUID in the primary, don't even bother to try and
      read any DWARF, since we won't be able to verify it matches.
      Our policy is not to load debug info unless we can verify that
      it matches the primary.  Just declare success at this point.
      And don't complain to the user, since that would cause us to
      complain on objects compiled without -g.  (Some versions of
      XCode are observed to omit a UUID entry for object linked(?)
      without -g.  Others don't appear to omit it.) */
   if (!have_uuid)
      goto success;

   /* mmap the dSYM file to look for DWARF debug info.  If successful,
      use the .macho_img and .macho_img_szB in iid. */

   dsymfilename = find_separate_debug_file( di->filename );

   /* Try to load it. */
   if (dsymfilename) {
      Bool valid;

      if (VG_(clo_verbosity) > 1)
         VG_(message)(Vg_DebugMsg, "   dSYM= %s\n", dsymfilename);

      ok = map_image_aboard( di, &iid, dsymfilename );
      if (!ok) goto fail;

      /* check it has the right uuid. */
      vg_assert(have_uuid);
      valid = iid.macho_img && iid.macho_img_szB > 0 
              && check_uuid_matches( (Addr)iid.macho_img,
                                     iid.macho_img_szB, uuid );
      if (valid)
         goto read_the_dwarf;

      if (VG_(clo_verbosity) > 1)
         VG_(message)(Vg_DebugMsg, "   dSYM does not have "
                                   "correct UUID (out of date?)\n");
   }

   /* There was no dsym file, or it doesn't match.  We'll have to try
      regenerating it, unless --dsymutil=no, in which case just complain
      instead. */

   /* If this looks like a lib that we shouldn't run dsymutil on, just
      give up.  (possible reasons: is system lib, or in /usr etc, or
      the dsym dir would not be writable by the user, or we're running
      as root) */
   vg_assert(di->filename);
   if (is_systemish_library_name(di->filename))
      goto success;

   if (!VG_(clo_dsymutil)) {
      if (VG_(clo_verbosity) == 1) {
         VG_(message)(Vg_DebugMsg, "%s:\n", di->filename);
      }
      if (VG_(clo_verbosity) > 0)
         VG_(message)(Vg_DebugMsg, "%sdSYM directory %s; consider using "
                      "--dsymutil=yes\n",
                      VG_(clo_verbosity) > 1 ? "   " : "",
                      dsymfilename ? "has wrong UUID" : "is missing"); 
      goto success;
   }

   /* Run dsymutil */

   { Int r;
     HChar* dsymutil = "/usr/bin/dsymutil ";
     HChar* cmd = ML_(dinfo_zalloc)( "di.readmacho.tmp1", 
                                     VG_(strlen)(dsymutil)
                                     + VG_(strlen)(di->filename)
                                     + 30 /* misc */ );
     VG_(strcpy)(cmd, dsymutil);
     if (0) VG_(strcat)(cmd, "--verbose ");
     VG_(strcat)(cmd, di->filename);
     VG_(message)(Vg_DebugMsg, "run: %s\n", cmd);
     r = VG_(system)( cmd );
     if (r)
        VG_(message)(Vg_DebugMsg, "run: %s FAILED\n", dsymutil);
     ML_(dinfo_free)(cmd);
     dsymfilename = find_separate_debug_file(di->filename);
   }

   /* Try again to load it. */
   if (dsymfilename) {
      Bool valid;

      if (VG_(clo_verbosity) > 1)
         VG_(message)(Vg_DebugMsg, "   dsyms= %s\n", dsymfilename);

      ok = map_image_aboard( di, &iid, dsymfilename );
      if (!ok) goto fail;

      /* check it has the right uuid. */
      vg_assert(have_uuid);
      valid = iid.macho_img && iid.macho_img_szB > 0 
              && check_uuid_matches( (Addr)iid.macho_img,
                                     iid.macho_img_szB, uuid );
      if (!valid) {
         if (VG_(clo_verbosity) > 0) {
            VG_(message)(Vg_DebugMsg,
               "WARNING: did not find expected UUID %02X%02X%02X%02X"
               "-%02X%02X-%02X%02X-%02X%02X-%02X%02X%02X%02X%02X%02X"
               " in dSYM dir\n",
               (UInt)uuid[0], (UInt)uuid[1], (UInt)uuid[2], (UInt)uuid[3],
               (UInt)uuid[4], (UInt)uuid[5], (UInt)uuid[6], (UInt)uuid[7],
               (UInt)uuid[8], (UInt)uuid[9], (UInt)uuid[10],
               (UInt)uuid[11], (UInt)uuid[12], (UInt)uuid[13],
               (UInt)uuid[14], (UInt)uuid[15] );
            VG_(message)(Vg_DebugMsg,
                         "WARNING: for %s\n", di->filename);
         }
         unmap_image( &iid );
         /* unmap_image zeroes the fields, so the following test makes
            sense. */
         goto fail;
      }
   }

   /* Right.  Finally we have our best try at the dwarf image, so go
      on to reading stuff out of it. */

  read_the_dwarf:
   if (iid.macho_img && iid.macho_img_szB > 0) {
      UChar* debug_info_img = NULL;
      Word   debug_info_sz;
      UChar* debug_abbv_img;
      Word   debug_abbv_sz;
      UChar* debug_line_img;
      Word   debug_line_sz;
      UChar* debug_str_img;
      Word   debug_str_sz;
      UChar* debug_ranges_img;
      Word   debug_ranges_sz;
      UChar* debug_loc_img;
      Word   debug_loc_sz;
      UChar* debug_name_img;
      Word   debug_name_sz;

      debug_info_img = 
          getsectdata(iid.macho_img, iid.macho_img_szB, 
                      "__DWARF", "__debug_info", &debug_info_sz);
      debug_abbv_img = 
          getsectdata(iid.macho_img, iid.macho_img_szB, 
                      "__DWARF", "__debug_abbrev", &debug_abbv_sz);
      debug_line_img = 
          getsectdata(iid.macho_img, iid.macho_img_szB, 
                      "__DWARF", "__debug_line", &debug_line_sz);
      debug_str_img = 
          getsectdata(iid.macho_img, iid.macho_img_szB, 
                      "__DWARF", "__debug_str", &debug_str_sz);
      debug_ranges_img = 
          getsectdata(iid.macho_img, iid.macho_img_szB, 
                      "__DWARF", "__debug_ranges", &debug_ranges_sz);
      debug_loc_img = 
          getsectdata(iid.macho_img, iid.macho_img_szB, 
                      "__DWARF", "__debug_loc", &debug_loc_sz);
      debug_name_img = 
          getsectdata(iid.macho_img, iid.macho_img_szB, 
                      "__DWARF", "__debug_pubnames", &debug_name_sz);
   
      if (debug_info_img) {
         if (VG_(clo_verbosity) > 1) {
            if (0)
            VG_(message)(Vg_DebugMsg,
                         "Reading dwarf3 for %s (%#lx) from %s"
                         " (%ld %ld %ld %ld %ld %ld)\n",
                         di->filename, di->text_avma, dsymfilename,
                         debug_info_sz, debug_abbv_sz, debug_line_sz, 
                         debug_str_sz, debug_ranges_sz, debug_loc_sz
                         );
            VG_(message)(Vg_DebugMsg,
               "   reading dwarf3 from dsyms file\n");
         }
         /* The old reader: line numbers and unwind info only */
         ML_(read_debuginfo_dwarf3) ( di,
                                      debug_info_img, debug_info_sz,
                                      debug_abbv_img, debug_abbv_sz,
                                      debug_line_img, debug_line_sz,
                                      debug_str_img,  debug_str_sz );

         /* The new reader: read the DIEs in .debug_info to acquire
            information on variable types and locations.  But only if
            the tool asks for it, or the user requests it on the
            command line. */
         if (VG_(needs).var_info /* the tool requires it */
             || VG_(clo_read_var_info) /* the user asked for it */) {
            ML_(new_dwarf3_reader)(
               di, debug_info_img,   debug_info_sz,
                   debug_abbv_img,   debug_abbv_sz,
                   debug_line_img,   debug_line_sz,
                   debug_str_img,    debug_str_sz,
                   debug_ranges_img, debug_ranges_sz,
                   debug_loc_img,    debug_loc_sz
            );
         }
      }
   }

   if (dsymfilename) ML_(dinfo_free)(dsymfilename);

  success:
   if (ii.img)
      unmap_image(&ii);
   if (iid.img)
      unmap_image(&iid);
   return True;

   /* NOTREACHED */

  fail:
   ML_(symerr)(di, True, "Error reading Mach-O object.");
   if (ii.img)
      unmap_image(&ii);
   if (iid.img)
      unmap_image(&iid);
   return False;
}
예제 #17
0
   LINX_(__NR_set_mempolicy,     sys_set_mempolicy),  // 238 
   LINXY(__NR_get_mempolicy,     sys_get_mempolicy),  // 239

   LINXY(__NR_mq_open,           sys_mq_open),        // 240 
   LINX_(__NR_mq_unlink,         sys_mq_unlink),      // 241 
   LINX_(__NR_mq_timedsend,      sys_mq_timedsend),   // 242 
   LINX_(__NR_mq_timedreceive,   sys_mq_timedreceive),// 243 
   LINX_(__NR_mq_notify,         sys_mq_notify),      // 244

   LINXY(__NR_mq_getsetattr,     sys_mq_getsetattr),  // 245 
   //   (__NR_kexec_load,        sys_ni_syscall),     // 246 
   LINXY(__NR_waitid,            sys_waitid),         // 247 
//   LINX_(__NR_add_key,           sys_add_key),        // 248
//   LINX_(__NR_request_key,       sys_request_key),    // 249

//   LINXY(__NR_keyctl,            sys_keyctl),         // 250
//   LINX_(__NR_ioprio_set,        sys_ioprio_set),     // 251
//   LINX_(__NR_ioprio_get,        sys_ioprio_get),     // 252
   LINX_(__NR_inotify_init,	 sys_inotify_init),   // 253
   LINX_(__NR_inotify_add_watch, sys_inotify_add_watch), // 254

   LINX_(__NR_inotify_rm_watch,	 sys_inotify_rm_watch), // 255
};

const UInt ML_(syscall_table_size) = 
            sizeof(ML_(syscall_table)) / sizeof(ML_(syscall_table)[0]);

/*--------------------------------------------------------------------*/
/*--- end                                                          ---*/
/*--------------------------------------------------------------------*/
/*  wqthread note: The kernel may create or destroy pthreads in the 
    wqthread pool at any time with no userspace interaction, 
    and wqthread_start may be entered at any time with no userspace 
    interaction.
    To handle this in valgrind, we create and destroy a valgrind 
    thread for every work item.
*/
void wqthread_hijack(Addr self, Addr kport, Addr stackaddr, Addr workitem, 
                     Int reuse, Addr sp)
{
   ThreadState *tst;
   VexGuestAMD64State *vex;
   Addr stack;
   SizeT stacksize;
   vki_sigset_t blockall;

   /* When we enter here we hold no lock (!), so we better acquire it
      pronto.  Why do we hold no lock?  Because (presumably) the only
      way to get here is as a result of a SfMayBlock syscall
      "workq_ops(WQOPS_THREAD_RETURN)", which will have dropped the
      lock.  At least that's clear for the 'reuse' case.  The
      non-reuse case?  Dunno, perhaps it's a new thread the kernel
      pulled out of a hat.  In any case we still need to take a
      lock. */
   VG_(acquire_BigLock_LL)("wqthread_hijack");

   if (0) VG_(printf)(
             "wqthread_hijack: self %#lx, kport %#lx, "
	     "stackaddr %#lx, workitem %#lx, reuse/flags %x, sp %#lx\n", 
	     self, kport, stackaddr, workitem, reuse, sp);

   /* Start the thread with all signals blocked.  VG_(scheduler) will
      set the mask correctly when we finally get there. */
   VG_(sigfillset)(&blockall);
   VG_(sigprocmask)(VKI_SIG_SETMASK, &blockall, NULL);

   /* For 10.7 and earlier, |reuse| appeared to be used as a simple
      boolean.  In 10.8 and later its name changed to |flags| and has
      various other bits OR-d into it too, so it's necessary to fish
      out just the relevant parts.  Hence: */
#  if DARWIN_VERS <= DARWIN_10_7
   Bool is_reuse = reuse != 0;
#  elif DARWIN_VERS > DARWIN_10_7
   Bool is_reuse = (reuse & 0x20000 /* == WQ_FLAG_THREAD_REUSE */) != 0;
#  else
#    error "Unsupported Darwin version"
#  endif

   if (is_reuse) {

     /* For whatever reason, tst->os_state.pthread appear to have a
        constant offset of 96 on 10.7, but zero on 10.6 and 10.5.  No
        idea why. */
#      if DARWIN_VERS <= DARWIN_10_6
       UWord magic_delta = 0;
#      elif DARWIN_VERS == DARWIN_10_7 || DARWIN_VERS == DARWIN_10_8
       UWord magic_delta = 0x60;
#      elif DARWIN_VERS == DARWIN_10_9 || DARWIN_VERS == DARWIN_10_10
       UWord magic_delta = 0xE0;
#      elif DARWIN_VERS == DARWIN_10_11
       UWord magic_delta = 0x100;
#      else
#        error "magic_delta: to be computed on new OS version"
         // magic_delta = tst->os_state.pthread - self
#      endif

       // This thread already exists; we're merely re-entering 
       // after leaving via workq_ops(WQOPS_THREAD_RETURN). 
       // Don't allocate any V thread resources.
       // Do reset thread registers.
       ThreadId tid = VG_(lwpid_to_vgtid)(kport);
       vg_assert(VG_(is_valid_tid)(tid));
       vg_assert(mach_thread_self() == kport);

       tst = VG_(get_ThreadState)(tid);

       if (0) VG_(printf)("wqthread_hijack reuse %s: tid %d, tst %p, "
                          "tst->os_state.pthread %#lx, self %#lx\n",
                          tst->os_state.pthread == self ? "SAME" : "DIFF",
                          tid, tst, tst->os_state.pthread, self);

       vex = &tst->arch.vex;
       vg_assert(tst->os_state.pthread - magic_delta == self);
   }
   else {
       // This is a new thread.
       tst = VG_(get_ThreadState)(VG_(alloc_ThreadState)());        
       vex = &tst->arch.vex;
       allocstack(tst->tid);
       LibVEX_GuestAMD64_initialise(vex);
   }
       
   // Set thread's registers
   // Do this FIRST because some code below tries to collect a backtrace, 
   // which requires valid register data.
   vex->guest_RIP = wqthread_starter;
   vex->guest_RDI = self;
   vex->guest_RSI = kport;
   vex->guest_RDX = stackaddr;
   vex->guest_RCX = workitem;
   vex->guest_R8  = reuse;
   vex->guest_R9  = 0;
   vex->guest_RSP = sp;

   stacksize = 512*1024;  // wq stacks are always DEFAULT_STACK_SIZE
   stack = VG_PGROUNDUP(sp) - stacksize;

   if (is_reuse) {
      // Continue V's thread back in the scheduler. 
      // The client thread is of course in another location entirely.

      /* Drop the lock before going into
         ML_(wqthread_continue_NORETURN).  The latter will immediately
         attempt to reacquire it in non-LL mode, which is a bit
         wasteful but I don't think is harmful.  A better solution
         would be to not drop the lock but instead "upgrade" it from a
         LL lock to a full lock, but that's too much like hard work
         right now. */
      VG_(release_BigLock_LL)("wqthread_hijack(1)");
      ML_(wqthread_continue_NORETURN)(tst->tid);
   } 
   else {
      // Record thread's stack and Mach port and pthread struct
      tst->os_state.pthread = self;
      tst->os_state.lwpid = kport;
      record_named_port(tst->tid, kport, MACH_PORT_RIGHT_SEND, "wqthread-%p");
      
      // kernel allocated stack - needs mapping
      tst->client_stack_highest_byte = stack+stacksize-1;
      tst->client_stack_szB = stacksize;

      // GrP fixme scheduler lock?!
      
      // pthread structure
      ML_(notify_core_and_tool_of_mmap)(
            stack+stacksize, pthread_structsize, 
            VKI_PROT_READ|VKI_PROT_WRITE, VKI_MAP_PRIVATE, -1, 0);
      // stack contents
      // GrP fixme uninitialized!
      ML_(notify_core_and_tool_of_mmap)(
            stack, stacksize, 
            VKI_PROT_READ|VKI_PROT_WRITE, VKI_MAP_PRIVATE, -1, 0);
      // guard page
      // GrP fixme ban_mem_stack!
      ML_(notify_core_and_tool_of_mmap)(
            stack-VKI_PAGE_SIZE, VKI_PAGE_SIZE,
            0, VKI_MAP_PRIVATE, -1, 0);

      ML_(sync_mappings)("after", "wqthread_hijack", 0);

      // Go!
      /* Same comments as the 'release' in the then-clause.
         start_thread_NORETURN calls run_thread_NORETURN calls
         thread_wrapper which acquires the lock before continuing.
         Let's hope nothing non-thread-local happens until that point.

         DDD: I think this is plain wrong .. if we get to
         thread_wrapper not holding the lock, and someone has recycled
         this thread slot in the meantime, we're hosed.  Is that
         possible, though? */
      VG_(release_BigLock_LL)("wqthread_hijack(2)");
      call_on_new_stack_0_1(tst->os_state.valgrind_stack_init_SP, 0, 
                            start_thread_NORETURN, (Word)tst);
   }

   /*NOTREACHED*/
   vg_assert(0);
}
예제 #19
0
/* 
   When a client clones, we need to keep track of the new thread.  This means:
   1. allocate a ThreadId+ThreadState+stack for the thread

   2. initialize the thread's new VCPU state

   3. create the thread using the same args as the client requested,
   but using the scheduler entrypoint for EIP, and a separate stack
   for ESP.
 */
static SysRes do_clone ( ThreadId ptid, 
                         ULong flags, Addr rsp, 
                         Long* parent_tidptr, 
                         Long* child_tidptr, 
                         Addr tlsaddr )
{
   static const Bool debug = False;

   ThreadId     ctid = VG_(alloc_ThreadState)();
   ThreadState* ptst = VG_(get_ThreadState)(ptid);
   ThreadState* ctst = VG_(get_ThreadState)(ctid);
   UWord*       stack;
   SysRes       res;
   Long         rax;
   vki_sigset_t blockall, savedmask;

   VG_(sigfillset)(&blockall);

   vg_assert(VG_(is_running_thread)(ptid));
   vg_assert(VG_(is_valid_tid)(ctid));

   stack = (UWord*)ML_(allocstack)(ctid);
   if (stack == NULL) {
      res = VG_(mk_SysRes_Error)( VKI_ENOMEM );
      goto out;
   }

   /* Copy register state

      Both parent and child return to the same place, and the code
      following the clone syscall works out which is which, so we
      don't need to worry about it.

      The parent gets the child's new tid returned from clone, but the
      child gets 0.

      If the clone call specifies a NULL rsp for the new thread, then
      it actually gets a copy of the parent's rsp.
   */
   setup_child( &ctst->arch, &ptst->arch );

   /* Make sys_clone appear to have returned Success(0) in the
      child. */
   ctst->arch.vex.guest_RAX = 0;

   if (rsp != 0)
      ctst->arch.vex.guest_RSP = rsp;

   ctst->os_state.parent = ptid;

   /* inherit signal mask */
   ctst->sig_mask = ptst->sig_mask;
   ctst->tmp_sig_mask = ptst->sig_mask;

   /* Start the child with its threadgroup being the same as the
      parent's.  This is so that any exit_group calls that happen
      after the child is created but before it sets its
      os_state.threadgroup field for real (in thread_wrapper in
      syswrap-linux.c), really kill the new thread.  a.k.a this avoids
      a race condition in which the thread is unkillable (via
      exit_group) because its threadgroup is not set.  The race window
      is probably only a few hundred or a few thousand cycles long.
      See #226116. */
   ctst->os_state.threadgroup = ptst->os_state.threadgroup;

   ML_(guess_and_register_stack) (rsp, ctst);

   /* Assume the clone will succeed, and tell any tool that wants to
      know that this thread has come into existence.  If the clone
      fails, we'll send out a ll_exit notification for it at the out:
      label below, to clean up. */
   vg_assert(VG_(owns_BigLock_LL)(ptid));
   VG_TRACK ( pre_thread_ll_create, ptid, ctid );

   if (flags & VKI_CLONE_SETTLS) {
      if (debug)
	 VG_(printf)("clone child has SETTLS: tls at %#lx\n", tlsaddr);
      ctst->arch.vex.guest_FS_CONST = tlsaddr;
   }

   flags &= ~VKI_CLONE_SETTLS;

   /* start the thread with everything blocked */
   VG_(sigprocmask)(VKI_SIG_SETMASK, &blockall, &savedmask);

   /* Create the new thread */
   rax = do_syscall_clone_amd64_linux(
            ML_(start_thread_NORETURN), stack, flags, &VG_(threads)[ctid],
            child_tidptr, parent_tidptr, NULL
         );
   res = VG_(mk_SysRes_amd64_linux)( rax );

   VG_(sigprocmask)(VKI_SIG_SETMASK, &savedmask, NULL);

  out:
   if (sr_isError(res)) {
      /* clone failed */
      VG_(cleanup_thread)(&ctst->arch);
      ctst->status = VgTs_Empty;
      /* oops.  Better tell the tool the thread exited in a hurry :-) */
      VG_TRACK( pre_thread_ll_exit, ctid );
   }

   return res;
}