/* A thread acquires a mutex.  Fails if:
    - thread is not blocked waiting for the mutex
    - mutex is held by another thread
 */
void VG_(tm_mutex_acquire)(ThreadId tid, Addr mutexp)
{
   struct mutex *mx;

   mx = mutex_check_initialized(tid, mutexp, "acquiring");   
   
   switch(mx->state) {
   case MX_Unlocking:		/* ownership transfer or relock */
      VG_TRACK( post_mutex_unlock, mx->owner, (void *)mutexp );
      if (mx->owner != tid)
	 thread_unblock_mutex(tid, mx, "acquiring mutex");
      break;

   case MX_Free:
      thread_unblock_mutex(tid, mx, "acquiring mutex");
      break;

   case MX_Locked:
      if (debug_mutex)
	 VG_(printf)("mutex=%p mx->state=%s\n", mutexp, pp_mutexstate(mx));
      VG_TRACK( post_mutex_unlock, mx->owner, (void *)mutexp );
      mutex_report(tid, mutexp, MXE_Locked, "acquiring");
      thread_unblock_mutex(tid, mx, "acquiring mutex");
      break;

   case MX_Init:
   case MX_Dead:
      vg_assert(0);
   } 
     
   mx->owner = tid;
   mutex_setstate(tid, mx, MX_Locked);

   VG_TRACK( post_mutex_lock, tid, (void *)mutexp );
}
Exemple #2
0
void VG_(unknown_esp_update)(Addr new_ESP)
{
   Addr old_ESP = VG_(get_archreg)(R_ESP);
   Int  delta   = (Int)new_ESP - (Int)old_ESP;

   if (delta < -(VG_HUGE_DELTA) || VG_HUGE_DELTA < delta) {
      /* %esp has changed by more than HUGE_DELTA.  We take this to mean
         that the application is switching to a new stack, for whatever
         reason. 
       
         JRS 20021001: following discussions with John Regehr, if a stack
         switch happens, it seems best not to mess at all with memory
         permissions.  Seems to work well with Netscape 4.X.  Really the
         only remaining difficulty is knowing exactly when a stack switch is
         happening. */
      if (VG_(clo_verbosity) > 1)
           VG_(message)(Vg_UserMsg, "Warning: client switching stacks?  "
                                    "%%esp: %p --> %p", old_ESP, new_ESP);
   } else if (delta < 0) {
      VG_TRACK( new_mem_stack, new_ESP, -delta );

   } else if (delta > 0) {
      VG_TRACK( die_mem_stack, old_ESP,  delta );
   }
}
/* Allocate memory, noticing whether or not we are doing the full
   instrumentation thing. */
static __inline__
void* alloc_and_new_mem ( ThreadState* tst, UInt size, UInt alignment,
                          Bool is_zeroed, VgAllocKind kind )
{
   Addr p;

   VGP_PUSHCC(VgpCliMalloc);

   vg_cmalloc_n_mallocs ++;
   vg_cmalloc_bs_mallocd += size;

   vg_assert(alignment >= 4);
   if (alignment == 4)
      p = (Addr)VG_(arena_malloc)(VG_AR_CLIENT, size);
   else
      p = (Addr)VG_(arena_malloc_aligned)(VG_AR_CLIENT, alignment, size);

   if (needs_shadow_chunks())
      addShadowChunk ( tst, p, size, kind );

   VG_TRACK( ban_mem_heap, p-VG_AR_CLIENT_REDZONE_SZB, 
                           VG_AR_CLIENT_REDZONE_SZB );
   VG_TRACK( new_mem_heap, p, size, is_zeroed );
   VG_TRACK( ban_mem_heap, p+size, VG_AR_CLIENT_REDZONE_SZB );

   VGP_POPCC(VgpCliMalloc);
   return (void*)p;
}
void VG_(sigframe_destroy)( ThreadId tid, Bool isRT )
{
   Addr          esp;
   ThreadState*  tst;
   SizeT	 size;
   Int		 sigNo;

   tst = VG_(get_ThreadState)(tid);

   
   esp   = tst->arch.vex.guest_ESP;

   if (!isRT)
      size = restore_sigframe(tst, (struct sigframe *)esp, &sigNo);
   else
      size = restore_rt_sigframe(tst, (struct rt_sigframe *)esp, &sigNo);

   VG_TRACK( die_mem_stack_signal, esp - VG_STACK_REDZONE_SZB,
             size + VG_STACK_REDZONE_SZB );

   if (VG_(clo_trace_signals))
      VG_(message)(
         Vg_DebugMsg, 
         "VG_(signal_return) (thread %d): isRT=%d valid magic; EIP=%#x\n",
         tid, isRT, tst->arch.vex.guest_EIP);

   
   VG_TRACK( post_deliver_signal, tid, sigNo );
}
/* EXPORTED */
void VG_(sigframe_destroy)( ThreadId tid, Bool isRT )
{
   Addr          sp;
   ThreadState*  tst;
   SizeT         size;
   Int           sigNo;

   tst = VG_(get_ThreadState)(tid);

   /* Correctly reestablish the frame base address. */
   sp   = tst->arch.vex.guest_SP;

   if (!isRT)
      size = restore_sigframe(tst, (struct sigframe *)sp, &sigNo);
   else
      size = restore_rt_sigframe(tst, (struct rt_sigframe *)sp, &sigNo);

   /* same as for creation: we must announce the full memory (including
      alignment), otherwise massif might fail on longjmp */
   VG_TRACK( die_mem_stack_signal, sp - VG_STACK_REDZONE_SZB,
             size + VG_STACK_REDZONE_SZB );

   if (VG_(clo_trace_signals))
      VG_(message)(
         Vg_DebugMsg,
         "VG_(sigframe_destroy) (thread %u): isRT=%d valid magic; IP=%#llx\n",
         tid, isRT, tst->arch.vex.guest_IA);

   /* tell the tools */
   VG_TRACK( post_deliver_signal, tid, sigNo );
}
void VG_(sigframe_destroy)( ThreadId tid )
{
   Addr          rsp;
   ThreadState*  tst;
   SizeT	 size;
   Int		 sigNo;

   tst = VG_(get_ThreadState)(tid);

   /* Correctly reestablish the frame base address. */
   rsp   = tst->arch.vex.guest_RSP;

   size = restore_sigframe(tst, (struct sigframe *)rsp, &sigNo);

   VG_TRACK( die_mem_stack_signal, rsp - VG_STACK_REDZONE_SZB,
             size + VG_STACK_REDZONE_SZB );

   if (VG_(clo_trace_signals))
      VG_(message)(
         Vg_DebugMsg, 
         "VG_(signal_return) (thread %d): valid magic; RIP=%#llx\n",
         tid, tst->arch.vex.guest_RIP);

   /* tell the tools */
   VG_TRACK( post_deliver_signal, tid, sigNo );
}
Exemple #7
0
static
void startup_segment_callback ( Addr start, UInt size, 
                                Char rr, Char ww, Char xx, 
                                UInt foffset, UChar* filename )
{
   UInt r_esp;
   Bool is_stack_segment;
   Bool verbose = False; /* set to True for debugging */

   if (verbose)
      VG_(message)(Vg_DebugMsg,
                   "initial map %8x-%8x %c%c%c? %8x (%d) (%s)",
                   start,start+size,rr,ww,xx,foffset,
                   size, filename?filename:(UChar*)"NULL");

   if (rr != 'r' && xx != 'x' && ww != 'w') {
      /* Implausible as it seems, R H 6.2 generates such segments:
      40067000-400ac000 r-xp 00000000 08:05 320686 /usr/X11R6/lib/libXt.so.6.0
      400ac000-400ad000 ---p 00045000 08:05 320686 /usr/X11R6/lib/libXt.so.6.0
      400ad000-400b0000 rw-p 00045000 08:05 320686 /usr/X11R6/lib/libXt.so.6.0
      when running xedit. So just ignore them. */
      if (0)
         VG_(printf)("No permissions on a segment mapped from %s\n", 
                     filename?filename:(UChar*)"NULL");
      return;
   }

   /* If this segment corresponds to something mmap'd /dev/zero by the
      low-level memory manager (vg_malloc2.c), skip it.  Clients
      should never have access to the segments which hold valgrind
      internal data.  And access to client data in the VG_AR_CLIENT
      arena is mediated by the skin, so we don't want make it
      accessible at this stage. */
   if (VG_(is_inside_segment_mmapd_by_low_level_MM)( start )) {
      if (verbose)
         VG_(message)(Vg_DebugMsg,
                      "   skipping %8x-%8x (owned by our MM)", 
                      start, start+size );
      /* Don't announce it to the skin. */
      return;
   }
   
   /* This parallels what happens when we mmap some new memory */
   if (filename != NULL && xx == 'x') {
      VG_(new_exe_segment)( start, size );
   }

   VG_TRACK( new_mem_startup, start, size, rr=='r', ww=='w', xx=='x' );

   /* If this is the stack segment mark all below %esp as noaccess. */
   r_esp = VG_(baseBlock)[VGOFF_(m_esp)];
   is_stack_segment = start <= r_esp && r_esp < start+size;
   if (is_stack_segment) {
      if (0)
         VG_(message)(Vg_DebugMsg, "invalidating stack area: %x .. %x",
                      start,r_esp);
      VG_TRACK( die_mem_stack, start, r_esp-start );
   }
}
static 
void stack_mcontext ( struct vki_mcontext *mc, 
                      ThreadState* tst, 
                      Bool use_rt_sigreturn,
                      UInt fault_addr )
{
   VG_TRACK( pre_mem_write, Vg_CoreSignal, tst->tid, "signal frame mcontext",
             (Addr)mc, sizeof(struct vki_pt_regs) );

#  define DO(gpr)  mc->mc_gregs[VKI_PT_R0+gpr] = tst->arch.vex.guest_GPR##gpr
   DO(0);  DO(1);  DO(2);  DO(3);  DO(4);  DO(5);  DO(6);  DO(7);
   DO(8);  DO(9);  DO(10); DO(11); DO(12); DO(13); DO(14); DO(15);
   DO(16); DO(17); DO(18); DO(19); DO(20); DO(21); DO(22); DO(23);
   DO(24); DO(25); DO(26); DO(27); DO(28); DO(29); DO(30); DO(31);
#  undef DO

   mc->mc_gregs[VKI_PT_NIP]     = tst->arch.vex.guest_CIA;
   mc->mc_gregs[VKI_PT_MSR]     = 0xf032;   /* pretty arbitrary */
   mc->mc_gregs[VKI_PT_ORIG_R3] = tst->arch.vex.guest_GPR3;
   mc->mc_gregs[VKI_PT_CTR]     = tst->arch.vex.guest_CTR;
   mc->mc_gregs[VKI_PT_LNK]     = tst->arch.vex.guest_LR;
   mc->mc_gregs[VKI_PT_XER]     = LibVEX_GuestPPC32_get_XER(&tst->arch.vex);
   mc->mc_gregs[VKI_PT_CCR]     = LibVEX_GuestPPC32_get_CR(&tst->arch.vex);
   mc->mc_gregs[VKI_PT_MQ]      = 0;
   mc->mc_gregs[VKI_PT_TRAP]    = 0;
   mc->mc_gregs[VKI_PT_DAR]     = fault_addr;
   mc->mc_gregs[VKI_PT_DSISR]   = 0;
   mc->mc_gregs[VKI_PT_RESULT]  = 0;
   VG_TRACK( post_mem_write, Vg_CoreSignal, tst->tid, 
             (Addr)mc, sizeof(struct vki_pt_regs) );

   /* XXX should do FP and vector regs */

   /* set up signal return trampoline */
   /* NB.  5 Sept 07.  mc->mc_pad[0..1] used to contain a the code to
      which the signal handler returns, and it just did sys_sigreturn
      or sys_rt_sigreturn.  But this doesn't work if the stack is
      non-executable, and it isn't consistent with the x86-linux and
      amd64-linux scheme for removing the stack frame.  So instead be
      consistent and use a stub in m_trampoline.  Then it doesn't
      matter whether or not the (guest) stack is executable.  This
      fixes #149519 and #145837. */
   VG_TRACK(pre_mem_write, Vg_CoreSignal, tst->tid, "signal frame mcontext",
            (Addr)&mc->mc_pad, sizeof(mc->mc_pad));
   mc->mc_pad[0] = 0; /* invalid */
   mc->mc_pad[1] = 0; /* invalid */
   VG_TRACK( post_mem_write,  Vg_CoreSignal, tst->tid, 
             (Addr)&mc->mc_pad, sizeof(mc->mc_pad) );
   /* invalidate any translation of this area */
   VG_(discard_translations)( (Addr)&mc->mc_pad, 
                              sizeof(mc->mc_pad), "stack_mcontext" );   

   /* set the signal handler to return to the trampoline */
   SET_SIGNAL_LR(tst, (Addr)(use_rt_sigreturn 
                               ? (Addr)&VG_(ppc32_linux_SUBST_FOR_rt_sigreturn)
                               : (Addr)&VG_(ppc32_linux_SUBST_FOR_sigreturn)
                      ));
}
Exemple #9
0
void VG_(unknown_SP_update)( Addr old_SP, Addr new_SP, UInt ecu )
{
   static Int moans = 3;
   Word delta  = (Word)new_SP - (Word)old_SP;

   /* Check if the stack pointer is still in the same stack as before. */
   if (current_stack == NULL ||
       new_SP < current_stack->start || new_SP > current_stack->end) {
      Stack* new_stack = find_stack_by_addr(new_SP);
      if (new_stack 
          && (current_stack == NULL || new_stack->id != current_stack->id)) { 
         /* The stack pointer is now in another stack.  Update the current
            stack information and return without doing anything else. */
         current_stack = new_stack;
         return;
      }
   }

   if (delta < -VG_(clo_max_stackframe) || VG_(clo_max_stackframe) < delta) {
      /* SP has changed by more than some threshold amount (by
         default, 2MB).  We take this to mean that the application is
         switching to a new stack, for whatever reason.
       
         JRS 20021001: following discussions with John Regehr, if a stack
         switch happens, it seems best not to mess at all with memory
         permissions.  Seems to work well with Netscape 4.X.  Really the
         only remaining difficulty is knowing exactly when a stack switch is
         happening. */
      if (VG_(clo_verbosity) > 0 && moans > 0) {
         moans--;
         VG_(message)(Vg_UserMsg,
            "Warning: client switching stacks?  "
            "SP change: %p --> %p", old_SP, new_SP);
         VG_(message)(Vg_UserMsg,
            "         to suppress, use: --max-stackframe=%ld or greater",
            (delta < 0 ? -delta : delta));
         if (moans == 0)
            VG_(message)(Vg_UserMsg,
                "         further instances of this message "
                "will not be shown.");
      }
   } else if (delta < 0) {
      VG_TRACK( new_mem_stack_w_ECU, new_SP, -delta, ecu );
      VG_TRACK( new_mem_stack,       new_SP, -delta );

   } else if (delta > 0) {
      VG_TRACK( die_mem_stack, old_SP,  delta );
   }
}
static Bool extend ( ThreadState *tst, Addr addr, SizeT size )
{
   ThreadId        tid = tst->tid;
   NSegment const* stackseg = NULL;

   if (VG_(extend_stack)(addr, tst->client_stack_szB)) {
      stackseg = VG_(am_find_nsegment)(addr);
      if (0 && stackseg)
	 VG_(printf)("frame=%#lx seg=%#lx-%#lx\n",
		     addr, stackseg->start, stackseg->end);
   }

   if (stackseg == NULL || !stackseg->hasR || !stackseg->hasW) {
      VG_(message)(
         Vg_UserMsg,
         "Can't extend stack to %#lx during signal delivery for thread %d:\n",
         addr, tid);
      if (stackseg == NULL)
         VG_(message)(Vg_UserMsg, "  no stack segment\n");
      else
         VG_(message)(Vg_UserMsg, "  too small or bad protection modes\n");

      
      VG_(set_default_handler)(VKI_SIGSEGV);
      VG_(synth_fault_mapping)(tid, addr);

      return False;
   }

   VG_TRACK( new_mem_stack_signal, addr - VG_STACK_REDZONE_SZB,
             size + VG_STACK_REDZONE_SZB, tid );

   return True;
}
void VG_(sigframe_create)( ThreadId tid, 
                            Addr rsp_top_of_frame,
                            const vki_siginfo_t *siginfo,
                            const struct vki_ucontext *siguc,
                            void *handler, 
                            UInt flags,
                            const vki_sigset_t *mask,
                            void *restorer )
{
   Addr rsp;
   struct sigframe *frame;
   ThreadState* tst = VG_(get_ThreadState)(tid);

   rsp = build_sigframe(tst, rsp_top_of_frame, siginfo, siguc, handler,
      flags, mask, restorer);
   frame = (struct sigframe *)rsp;

   /* Set the thread so it will next run the handler. */
   /* tst->m_rsp  = rsp;  also notify the tool we've updated RSP */
   VG_(set_SP)(tid, rsp);
   VG_TRACK( post_reg_write, Vg_CoreSignal, tid, VG_O_STACK_PTR, sizeof(Addr));

   //VG_(printf)("handler = %p\n", handler);
   tst->arch.vex.guest_RIP = (Addr) handler;
   tst->arch.vex.guest_RDI = (ULong) siginfo->si_signo;
   tst->arch.vex.guest_RSI = (Addr) &frame->sigInfo;
   tst->arch.vex.guest_RDX = (Addr) &frame->uContext;
   /* This thread needs to be marked runnable, but we leave that the
      caller to do. */
}
static Bool extend ( ThreadState *tst, Addr addr, SizeT size )
{
   ThreadId tid = tst->tid;
   VG_TRACK( new_mem_stack_signal,
             addr - VG_STACK_REDZONE_SZB, size, tid );
   return True;
}
/* EXPORTED */
void VG_(sigframe_create)( ThreadId tid,
			   Addr sp_top_of_frame,
			   const vki_siginfo_t *siginfo,
			   const struct vki_ucontext *siguc,
			   void *handler,
			   UInt flags,
			   const vki_sigset_t *mask,
			   void *restorer )
{
   Addr sp;
   ThreadState* tst = VG_(get_ThreadState)(tid);

   if (flags & VKI_SA_SIGINFO)
      sp = build_rt_sigframe(tst, sp_top_of_frame, siginfo, siguc,
			     flags, mask, restorer);
   else
      sp = build_sigframe(tst, sp_top_of_frame, siginfo, siguc,
			  flags, mask, restorer);

   /* Set the thread so it will next run the handler. */
   VG_(set_SP)(tid, sp);
   VG_TRACK( post_reg_write, Vg_CoreSignal, tid, VG_O_STACK_PTR, sizeof(Addr));

   tst->arch.vex.guest_IA = (Addr) handler;
   /* We might have interrupted a repeating instruction that uses the guest
      counter. Since our VEX requires that a new instruction will see a
      guest counter == 0, we have to set it here. The old value will be
      restored by restore_vg_sigframe. */
   tst->arch.vex.guest_counter = 0;
   /* This thread needs to be marked runnable, but we leave that the
      caller to do. */
}
/* EXPORTED */
void VG_(sigframe_create)( ThreadId tid, 
                           Addr esp_top_of_frame,
                           const vki_siginfo_t *siginfo,
                           const struct vki_ucontext *siguc,
                           void *handler, 
                           UInt flags,
                           const vki_sigset_t *mask,
		           void *restorer )
{
   Addr		esp;
   ThreadState* tst = VG_(get_ThreadState)(tid);

   if (flags & VKI_SA_SIGINFO)
      esp = build_rt_sigframe(tst, esp_top_of_frame, siginfo, siguc,
                                   handler, flags, mask, restorer);
   else
      esp = build_sigframe(tst, esp_top_of_frame, siginfo, siguc,
                                handler, flags, mask, restorer);

   /* Set the thread so it will next run the handler. */
   /* tst->m_esp  = esp;  also notify the tool we've updated ESP */
   VG_(set_SP)(tid, esp);
   VG_TRACK( post_reg_write, Vg_CoreSignal, tid, VG_O_STACK_PTR, sizeof(Addr));

   //VG_(printf)("handler = %p\n", handler);
   tst->arch.vex.guest_EIP = (Addr) handler;
   /* This thread needs to be marked runnable, but we leave that the
      caller to do. */

   if (0)
      VG_(printf)("pushed signal frame; %%ESP now = %#lx, "
                  "next %%EIP = %#x, status=%d\n",
		  esp, tst->arch.vex.guest_EIP, tst->status);
}
void VG_(sigframe_create)( ThreadId tid, 
                           Addr esp_top_of_frame,
                           const vki_siginfo_t *siginfo,
                           const struct vki_ucontext *siguc,
                           void *handler, 
                           UInt flags,
                           const vki_sigset_t *mask,
		           void *restorer )
{
   Addr		esp;
   ThreadState* tst = VG_(get_ThreadState)(tid);

   if (flags & VKI_SA_SIGINFO)
      esp = build_rt_sigframe(tst, esp_top_of_frame, siginfo, siguc,
                                   flags, mask, restorer);
   else
      esp = build_sigframe(tst, esp_top_of_frame, siginfo, siguc,
                                flags, mask, restorer);

   
   
   VG_(set_SP)(tid, esp);
   VG_TRACK( post_reg_write, Vg_CoreSignal, tid, VG_O_STACK_PTR, sizeof(Addr));

   
   tst->arch.vex.guest_EIP = (Addr) handler;

   if (0)
      VG_(printf)("pushed signal frame; %%ESP now = %#lx, "
                  "next %%EIP = %#x, status=%d\n",
		  esp, tst->arch.vex.guest_EIP, tst->status);
}
static 
void stack_mcontext ( struct vki_mcontext *mc, 
                      ThreadState* tst, 
                      Int ret,
                      UInt fault_addr )
{
   VG_TRACK( pre_mem_write, Vg_CoreSignal, tst->tid, "signal frame mcontext",
             (Addr)mc, sizeof(struct vki_pt_regs) );

#  define DO(gpr)  mc->mc_gregs[VKI_PT_R0+gpr] = tst->arch.vex.guest_GPR##gpr
   DO(0);  DO(1);  DO(2);  DO(3);  DO(4);  DO(5);  DO(6);  DO(7);
   DO(8);  DO(9);  DO(10); DO(11); DO(12); DO(13); DO(14); DO(15);
   DO(16); DO(17); DO(18); DO(19); DO(20); DO(21); DO(22); DO(23);
   DO(24); DO(25); DO(26); DO(27); DO(28); DO(29); DO(30); DO(31);
#  undef DO

   mc->mc_gregs[VKI_PT_NIP]     = tst->arch.vex.guest_CIA;
   mc->mc_gregs[VKI_PT_MSR]     = 0xf032;   /* pretty arbitrary */
   mc->mc_gregs[VKI_PT_ORIG_R3] = tst->arch.vex.guest_GPR3;
   mc->mc_gregs[VKI_PT_CTR]     = tst->arch.vex.guest_CTR;
   mc->mc_gregs[VKI_PT_LNK]     = tst->arch.vex.guest_LR;
   mc->mc_gregs[VKI_PT_XER]     = LibVEX_GuestPPC32_get_XER(&tst->arch.vex);
   mc->mc_gregs[VKI_PT_CCR]     = LibVEX_GuestPPC32_get_CR(&tst->arch.vex);
   mc->mc_gregs[VKI_PT_MQ]      = 0;
   mc->mc_gregs[VKI_PT_TRAP]    = 0;
   mc->mc_gregs[VKI_PT_DAR]     = fault_addr;
   mc->mc_gregs[VKI_PT_DSISR]   = 0;
   mc->mc_gregs[VKI_PT_RESULT]  = 0;
   VG_TRACK( post_mem_write, Vg_CoreSignal, tst->tid, 
             (Addr)mc, sizeof(struct vki_pt_regs) );

   /* XXX should do FP and vector regs */

   /* set up signal return trampoline */
   VG_TRACK(pre_mem_write, Vg_CoreSignal, tst->tid, "signal frame mcontext",
            (Addr)&mc->mc_pad, sizeof(mc->mc_pad));
   mc->mc_pad[0] = 0x38000000U + ret;   /* li 0,ret */
   mc->mc_pad[1] = 0x44000002U;         /* sc */
   VG_TRACK( post_mem_write,  Vg_CoreSignal, tst->tid, 
             (Addr)&mc->mc_pad, sizeof(mc->mc_pad) );
   /* invalidate any translation of this area */
   VG_(discard_translations)( (Addr64)(Addr)&mc->mc_pad, 
                              sizeof(mc->mc_pad), "stack_mcontext" );   

   /* set the signal handler to return to the trampoline */
   SET_SIGNAL_LR(tst, (Addr) &mc->mc_pad[0]);
}
/* EXPORTED */
void VG_(sigframe_create)( ThreadId tid, 
                           Bool on_altstack,
                           Addr sp_top_of_frame,
                           const vki_siginfo_t *siginfo,
                           const struct vki_ucontext *siguc,
                           void *handler, 
                           UInt flags,
                           const vki_sigset_t *mask,
                           void *restorer )
{
   ThreadState *tst;
   Addr sp    = sp_top_of_frame;
   Int  sigNo = siginfo->si_signo;
   UInt size;

   tst = VG_(get_ThreadState)(tid);

   size = sizeof(struct rt_sigframe);

   sp -= size;
   sp = VG_ROUNDDN(sp, 16);

   if (! ML_(sf_maybe_extend_stack)(tst, sp, size, flags))
      return; // Give up.  No idea if this is correct

   struct rt_sigframe *rsf = (struct rt_sigframe *)sp;
      
   /* Track our writes to siginfo */
   VG_TRACK( pre_mem_write, Vg_CoreSignal, tst->tid,  /* VVVVV */
             "signal handler siginfo", (Addr)rsf, 
             offsetof(struct rt_sigframe, sig));

   VG_(memcpy)(&rsf->info, siginfo, sizeof(vki_siginfo_t));

   if (sigNo == VKI_SIGILL && siginfo->si_code > 0) {
      rsf->info._sifields._sigfault._addr
        = (Addr*)(tst)->arch.vex.guest_PC;
   }
   VG_TRACK( post_mem_write, Vg_CoreSignal, tst->tid, /* ^^^^^ */
         (Addr)rsf, offsetof(struct rt_sigframe, sig));

   build_sigframe(tst, &rsf->sig, siginfo, siguc,
                       handler, flags, mask, restorer);
   tst->arch.vex.guest_X1 = (Addr)&rsf->info;
   tst->arch.vex.guest_X2 = (Addr)&rsf->sig.uc;

   VG_(set_SP)(tid, sp);
   VG_TRACK( post_reg_write, Vg_CoreSignal, tid, VG_O_STACK_PTR,
             sizeof(Addr));
   tst->arch.vex.guest_X0 = sigNo; 

   if (flags & VKI_SA_RESTORER)
       tst->arch.vex.guest_X30 = (Addr)restorer; 
   else
       tst->arch.vex.guest_X30
          = (Addr)&VG_(arm64_linux_SUBST_FOR_rt_sigreturn);

   tst->arch.vex.guest_PC = (Addr)handler;
}
/* Remove a signal frame from thread 'tid's stack, and restore the CPU
   state from it.  Note, isRT is irrelevant here. */
void VG_(sigframe_destroy)( ThreadId tid, Bool isRT )
{
   ThreadState *tst;
   Addr esp;
   Int sigNo;
   struct hacky_sigframe* frame;
 
   vg_assert(VG_(is_valid_tid)(tid));
   tst = VG_(get_ThreadState)(tid);

   /* Check that the stack frame looks valid */
   esp = VG_(get_SP)(tid);

   /* why -4 ? because the signal handler's return will have popped
      the return address off the stack; and the return address is the
      lowest-addressed element of hacky_sigframe. */
   frame = (struct hacky_sigframe*)(esp - 4);
   vg_assert(frame->magicPI == 0x31415927);

   /* This +8 is because of the -4 referred to in the ELF ABI comment
      in VG_(sigframe_create) just above. */
   vg_assert(VG_IS_16_ALIGNED((Addr)frame + 4));

   /* restore the entire guest state, and shadows, from the
      frame.  Note, as per comments above, this is a kludge - should
      restore it from saved ucontext.  Oh well. */
   tst->arch.vex = frame->gst;
   tst->arch.vex_shadow1 = frame->gshadow1;
   tst->arch.vex_shadow2 = frame->gshadow2;
   tst->sig_mask = frame->mask;
   tst->tmp_sig_mask = frame->mask;
   sigNo = frame->sigNo_private;

   if (VG_(clo_trace_signals))
      VG_(message)(Vg_DebugMsg,
                   "sigframe_destroy (thread %d): "
                   "valid magic; next EIP=%#x\n",
                   tid, tst->arch.vex.guest_EIP);

   VG_TRACK( die_mem_stack_signal, 
             (Addr)frame - VG_STACK_REDZONE_SZB, 
             sizeof(struct hacky_sigframe) );

   /* tell the tools */
   VG_TRACK( post_deliver_signal, tid, sigNo );
}
/* Extend the stack segment downwards if needed so as to ensure the
   new signal frames are mapped to something.  Return a Bool
   indicating whether or not the operation was successful.
*/
static Bool extend ( ThreadState *tst, Addr addr, SizeT size )
{
   /* For tracking memory events, indicate the entire frame has been
      allocated.  Except, don't mess with the area which
      overlaps the previous frame's redzone. */
   VG_TRACK( new_mem_stack_signal, addr, size - VG_STACK_REDZONE_SZB );
   return True;
}
void VG_(sigframe_create)( ThreadId tid, 
                            Bool on_altstack,
                            Addr rsp_top_of_frame,
                            const vki_siginfo_t *siginfo,
                            const struct vki_ucontext *siguc,
                            void *handler, 
                            UInt flags,
                            const vki_sigset_t *mask,
                            void *restorer )
{
   Addr rsp;
   struct rt_sigframe *frame;
   ThreadState* tst = VG_(get_ThreadState)(tid);

   rsp = build_rt_sigframe(tst, rsp_top_of_frame, siginfo, siguc,
                                handler, flags, mask, restorer);
   frame = (struct rt_sigframe *)rsp;

   /* Set the thread so it will next run the handler. */
   /* tst->m_rsp  = rsp;  also notify the tool we've updated RSP */
   VG_(set_SP)(tid, rsp);
   VG_TRACK( post_reg_write, Vg_CoreSignal, tid, VG_O_STACK_PTR, sizeof(Addr));

   //VG_(printf)("handler = %p\n", handler);
   tst->arch.vex.guest_RIP = (Addr) handler;
   tst->arch.vex.guest_RDI = (ULong) siginfo->si_signo;
   tst->arch.vex.guest_RSI = (Addr) &frame->sigInfo;
   tst->arch.vex.guest_RDX = (Addr) &frame->uContext;
   /* And tell the tool that these registers have been written. */
   VG_TRACK( post_reg_write, Vg_CoreSignal, tst->tid,
             offsetof(VexGuestAMD64State,guest_RIP), sizeof(UWord) );
   VG_TRACK( post_reg_write, Vg_CoreSignal, tst->tid,
             offsetof(VexGuestAMD64State,guest_RDI), sizeof(UWord) );
   VG_TRACK( post_reg_write, Vg_CoreSignal, tst->tid,
             offsetof(VexGuestAMD64State,guest_RSI), sizeof(UWord) );
   VG_TRACK( post_reg_write, Vg_CoreSignal, tst->tid,
             offsetof(VexGuestAMD64State,guest_RDX), sizeof(UWord) );

   /* This thread needs to be marked runnable, but we leave that the
      caller to do. */

   if (0)
      VG_(printf)("pushed signal frame; %%RSP now = %#lx, "
                  "next %%RIP = %#llx, status=%d\n",
		  rsp, tst->arch.vex.guest_RIP, tst->status);
}
Exemple #21
0
void VG_(unknown_SP_update)( Addr old_SP, Addr new_SP ) {
   IF_STACK_SWITCH_SET_current_stack_AND_RETURN;
   IF_BIG_DELTA_complaints_AND_RETURN;
   IF_SMALLER_STACK_die_mem_stack_AND_RETURN;
   if (delta < 0) { // IF_BIGGER_STACK
      VG_TRACK( new_mem_stack,      new_SP, -delta );
      return;
   }
   // SAME_STACK. nothing to do.
}
Exemple #22
0
// Run a thread from beginning to end and return the thread's
// scheduler-return-code.
static VgSchedReturnCode thread_wrapper(Word /*ThreadId*/ tidW)
{
   VgSchedReturnCode ret;
   ThreadId     tid = (ThreadId)tidW;
   ThreadState* tst = VG_(get_ThreadState)(tid);
   VG_(debugLog)(1, "syswrap-l4re", 
                    "thread_wrapper(tid=%lld): entry\n", 
                    (ULong)tidW);

   vg_assert(tst->status == VgTs_Init);

   /* make sure we get the CPU lock before doing anything significant */
   VG_(acquire_BigLock)(tid, "thread_wrapper(starting new thread)");

   if (0)
      VG_(printf)("thread tid %d started: stack = %p\n",
		  tid, &tid);

   VG_TRACK(pre_thread_first_insn, tid);

   tst->os_state.lwpid = VG_(gettid)();
   tst->os_state.threadgroup = VG_(getpid)();

   /* Thread created with all signals blocked; scheduler will set the
      appropriate mask */
#if defined(VGO_l4re)
   {
	   ThreadState* ctst = VG_(get_ThreadState)(tid);
	   VG_(debugLog)(1, "syswrap", "register contents a startup tid=%d\n", tid);	
	   VG_(debugLog)(1, "syswrap", "eax = %x\n", ctst->arch.vex.guest_EAX);
	   VG_(debugLog)(1, "syswrap", "ebx = %x\n", ctst->arch.vex.guest_EBX);
	   VG_(debugLog)(1, "syswrap", "ecx = %x\n", ctst->arch.vex.guest_ECX);
	   VG_(debugLog)(1, "syswrap", "edx = %x\n", ctst->arch.vex.guest_EDX);
	   VG_(debugLog)(1, "syswrap", "esi = %x\n", ctst->arch.vex.guest_ESI);
	   VG_(debugLog)(1, "syswrap", "edi = %x\n", ctst->arch.vex.guest_EDI);
	   VG_(debugLog)(1, "syswrap", "ebp = %x\n", ctst->arch.vex.guest_EBP);
	   VG_(debugLog)(1, "syswrap", "esp = %x\n", ctst->arch.vex.guest_ESP);

   }
#endif

   ret = VG_(scheduler)(tid);

   vg_assert(VG_(is_exiting)(tid));
   
   vg_assert(tst->status == VgTs_Runnable);
   vg_assert(VG_(is_running_thread)(tid));

   VG_(debugLog)(1, "syswrap-l4re", 
                    "thread_wrapper(tid=%lld): exit\n", 
                    (ULong)tidW);

   /* Return to caller, still holding the lock. */
   return ret;
}
Exemple #23
0
/* Finish unlocking a Mutex.  The mutex can validly be in one of three
   states:
   - Unlocking
   - Locked, owned by someone else (someone else got it in the meantime)
   - Free (someone else completed a lock-unlock cycle)
 */
void VG_(tm_mutex_unlock)(ThreadId tid, Addr mutexp)
{
   struct mutex *mx;
   struct thread *th;

   mx = mutex_check_initialized(tid, mutexp, "unlocking mutex");

   th = thread_get(tid);

   if (th == NULL)
      thread_report(tid, THE_NotExist, "unlocking mutex");
   else {
      switch(th->state) {
      case TS_Alive:
      case TS_Running:		/* OK */
	 break;

      case TS_Dead:
      case TS_Zombie:
	 thread_report(tid, THE_NotAlive, "unlocking mutex");
	 break;

      case TS_JoinBlocked:
      case TS_CVBlocked:
      case TS_MutexBlocked:
	 thread_report(tid, THE_Blocked, "unlocking mutex");
	 do_thread_run(th);
	 break;
      }
   }

   switch(mx->state) {
   case MX_Locked:
      /* Someone else might have taken ownership in the meantime */
      if (mx->owner == tid)
	 mutex_report(tid, mutexp, MXE_Locked, "unlocking");
      break;

   case MX_Free:
      /* OK - nothing to do */
      break;

   case MX_Unlocking:
      /* OK - we need to complete the unlock */
      VG_TRACK( post_mutex_unlock, tid, (void *)mutexp );
      mutex_setstate(tid, mx, MX_Free);
      break;

   case MX_Init:
   case MX_Dead:
      vg_assert(0);
   }
}
/* Extend the stack segment downwards if needed so as to ensure the
   new signal frames are mapped to something.  Return a Bool
   indicating whether or not the operation was successful.
*/
static Bool extend ( ThreadState *tst, Addr addr, SizeT size )
{
   ThreadId tid = tst->tid;
   /* For tracking memory events, indicate the entire frame has been
      allocated.  Except, don't mess with the area which
      overlaps the previous frame's redzone. */
   /* XXX is the following call really right?  compared with the
      amd64-linux version, this doesn't appear to handle the redzone
      in the same way. */
   VG_TRACK( new_mem_stack_signal,
             addr, size - VG_STACK_REDZONE_SZB, tid );
   return True;
}
static
void die_and_free_mem ( ThreadState* tst, ShadowChunk* sc,
                        ShadowChunk** prev_chunks_next_ptr )
{
   /* Note: ban redzones again -- just in case user de-banned them
      with a client request... */
   VG_TRACK( ban_mem_heap, sc->data-VG_AR_CLIENT_REDZONE_SZB, 
                           VG_AR_CLIENT_REDZONE_SZB );
   VG_TRACK( die_mem_heap, sc->data, sc->size );
   VG_TRACK( ban_mem_heap, sc->data+sc->size, VG_AR_CLIENT_REDZONE_SZB );

   /* Remove sc from the malloclist using prev_chunks_next_ptr to
      avoid repeating the hash table lookup.  Can't remove until at least
      after free and free_mismatch errors are done because they use
      describe_addr() which looks for it in malloclist. */
   *prev_chunks_next_ptr = sc->next;

   if (VG_(needs).alternative_free)
      SK_(alt_free) ( sc, tst );
   else
      VG_(freeShadowChunk) ( sc );
}
Exemple #26
0
/* A thread attempts to lock a mutex.  If "blocking" then the thread
   is put into a blocked state until the lock is acquired.  Fails if:
    - tid is invalid
    - mutex has not been initialized
    - thread is blocked on another object (?)
    - blocking on this mutex could cause a deadlock
   (Lock rank detection?)
 */
void VG_(tm_mutex_trylock)(ThreadId tid, Addr mutexp)
{
   struct mutex *mx;

   mx = mutex_check_initialized(tid, mutexp, "trylocking");

   thread_block_mutex(tid, mx);

   if (mx->state == MX_Locked && mx->owner == tid) /* deadlock */
      mutex_report(tid, mutexp, MXE_Deadlock, "trylocking");

   VG_TRACK( pre_mutex_lock, tid, (void *)mutexp );
}
void VG_(sigframe_destroy)( ThreadId tid, Bool isRT )
{
   ThreadState *tst;
   Addr esp;
   Int sigNo;
   struct hacky_sigframe* frame;
 
   vg_assert(VG_(is_valid_tid)(tid));
   tst = VG_(get_ThreadState)(tid);

   
   esp = VG_(get_SP)(tid);

   frame = (struct hacky_sigframe*)(esp - 4);
   vg_assert(frame->magicPI == 0x31415927);

   vg_assert(VG_IS_16_ALIGNED((Addr)frame + 4));

   tst->arch.vex = frame->gst;
   tst->arch.vex_shadow1 = frame->gshadow1;
   tst->arch.vex_shadow2 = frame->gshadow2;
   tst->sig_mask = frame->mask;
   tst->tmp_sig_mask = frame->mask;
   sigNo = frame->sigNo_private;

   if (VG_(clo_trace_signals))
      VG_(message)(Vg_DebugMsg,
                   "sigframe_destroy (thread %d): "
                   "valid magic; next EIP=%#x\n",
                   tid, tst->arch.vex.guest_EIP);

   VG_TRACK( die_mem_stack_signal, 
             (Addr)frame - VG_STACK_REDZONE_SZB, 
             sizeof(struct hacky_sigframe) );

   
   VG_TRACK( post_deliver_signal, tid, sigNo );
}
/* Remove a signal frame from thread 'tid's stack, and restore the CPU
   state from it.  Note, isRT is irrelevant here. */
void VG_(sigframe_destroy)( ThreadId tid, Bool isRT )
{
   ThreadState *tst;
   Addr sp;
   Int sigNo;
   struct hacky_sigframe* frame;
 
   vg_assert(VG_(is_valid_tid)(tid));
   tst = VG_(get_ThreadState)(tid);

   /* Check that the stack frame looks valid */
   sp = tst->arch.vex.guest_GPR1;
   vg_assert(VG_IS_16_ALIGNED(sp));

   frame = (struct hacky_sigframe*)(sp - 256);
   vg_assert(frame->magicPI == 0x31415927);

   /* restore the entire guest state, and shadows, from the
      frame.  Note, as per comments above, this is a kludge - should
      restore it from saved ucontext.  Oh well. */
   tst->arch.vex = frame->gst;
   tst->arch.vex_shadow1 = frame->gshadow1;
   tst->arch.vex_shadow2 = frame->gshadow2;
   sigNo = frame->sigNo_private;

   if (VG_(clo_trace_signals))
      VG_(message)(Vg_DebugMsg,
                   "vg_pop_signal_frame (thread %d): valid magic; CIA=%#x",
                   tid, tst->arch.vex.guest_CIA);

   VG_TRACK( die_mem_stack_signal, 
             (Addr)frame, 
             sizeof(struct hacky_sigframe) - VG_STACK_REDZONE_SZB );

   /* tell the tools */
   VG_TRACK( post_deliver_signal, tid, sigNo );
}
static void notify_tool_of_mmap(Addr a, SizeT len, UInt prot, ULong di_handle)
{
   Bool rr, ww, xx;

   /* 'a' is the return value from a real kernel mmap, hence: */
   vg_assert(VG_IS_PAGE_ALIGNED(a));
   /* whereas len is whatever the syscall supplied.  So: */
   len = VG_PGROUNDUP(len);

   rr = toBool(prot & VKI_PROT_READ);
   ww = toBool(prot & VKI_PROT_WRITE);
   xx = toBool(prot & VKI_PROT_EXEC);

   VG_TRACK( new_mem_mmap, a, len, rr, ww, xx, di_handle );
}
void VG_(client_free) ( ThreadState* tst, void* p, VgAllocKind kind )
{
   ShadowChunk*  sc;
   ShadowChunk** prev_chunks_next_ptr;

   VGP_PUSHCC(VgpCliMalloc);

#  ifdef DEBUG_CLIENTMALLOC
   VG_(printf)("[m %d, f %d (%d)] client_free ( %p, %x )\n", 
               count_malloclists(), 
               0/*count_freelist()*/, 0/*vg_freed_list_volume*/,
               p, kind );
#  endif

   vg_cmalloc_n_frees ++;

   if (! needs_shadow_chunks()) {
      VG_(arena_free) ( VG_AR_CLIENT, p );

   } else {
      sc = getShadowChunk ( (Addr)p, &prev_chunks_next_ptr );

      if (sc == NULL) {
         VG_TRACK( bad_free, tst, (Addr)p );
         VGP_POPCC(VgpCliMalloc);
         return;
      }

      /* check if its a matching free() / delete / delete [] */
      if (kind != sc->allockind)
         VG_TRACK( mismatched_free, tst, (Addr)p );

      die_and_free_mem ( tst, sc, prev_chunks_next_ptr );
   } 
   VGP_POPCC(VgpCliMalloc);
}