Esempio n. 1
0
/* If ptr is pointing to a heap-allocated block which hasn't been seen
   before, push it onto the mark stack.  Clique is the index of the
   clique leader; -1 if none. */
static void _lc_markstack_push(Addr ptr, Int clique)
{
   Int sh_no;

   if (!VG_(is_client_addr)(ptr)) /* quick filter */
      return;

   sh_no = find_shadow_for(ptr, lc_shadows, lc_n_shadows);

   if (VG_DEBUG_LEAKCHECK)
      VG_(printf)("ptr=%p -> block %d\n", ptr, sh_no);

   if (sh_no == -1)
      return;

   tl_assert(sh_no >= 0 && sh_no < lc_n_shadows);
   tl_assert(ptr <= lc_shadows[sh_no]->data + lc_shadows[sh_no]->size);

   if (lc_markstack[sh_no].state == Unreached) {
      if (0)
	 VG_(printf)("pushing %p-%p\n", lc_shadows[sh_no]->data, 
		     lc_shadows[sh_no]->data + lc_shadows[sh_no]->size);

      tl_assert(lc_markstack[sh_no].next == -1);
      lc_markstack[sh_no].next = lc_markstack_top;
      lc_markstack_top = sh_no;
   }

   if (clique != -1) {
      if (0)
	 VG_(printf)("mopup: %d: %p is %d\n", 
		     sh_no, lc_shadows[sh_no]->data, lc_markstack[sh_no].state);

      /* An unmarked block - add it to the clique.  Add its size to
	 the clique-leader's indirect size.  If the new block was
	 itself a clique leader, it isn't any more, so add its
	 indirect to the new clique leader.

	 If this block *is* the clique leader, it means this is a
	 cyclic structure, so none of this applies. */
      if (lc_markstack[sh_no].state == Unreached) {
	 lc_markstack[sh_no].state = IndirectLeak;

	 if (sh_no != clique) {
	    if (VG_DEBUG_CLIQUE) {
	       if (lc_markstack[sh_no].indirect)
		  VG_(printf)("  clique %d joining clique %d adding %d+%d bytes\n", 
			      sh_no, clique, 
			      lc_shadows[sh_no]->size, lc_markstack[sh_no].indirect);
	       else
		  VG_(printf)("  %d joining %d adding %d\n", 
			      sh_no, clique, lc_shadows[sh_no]->size);
	    }

	    lc_markstack[clique].indirect += lc_shadows[sh_no]->size;
	    lc_markstack[clique].indirect += lc_markstack[sh_no].indirect;
	    lc_markstack[sh_no].indirect = 0; /* shouldn't matter */
	 }
      }
   } else if (ptr == lc_shadows[sh_no]->data) {
      lc_markstack[sh_no].state = Proper;
   } else {
      if (lc_markstack[sh_no].state == Unreached)
	 lc_markstack[sh_no].state = Interior;
   }
}
Esempio n. 2
0
/* Get and immediately print a StackTrace. */
void VG_(get_and_pp_StackTrace) ( ThreadId tid, UInt n_ips )
{
   Addr ips[n_ips];
   VG_(get_StackTrace)(tid, ips, n_ips);
   VG_(pp_StackTrace) (     ips, n_ips);
}
Esempio n. 3
0
Char *
VG_(strtok) (Char *s, const Char *delim)
{
   return VG_(strtok_r) (s, delim, &olds);
}
Esempio n. 4
0
void malloc_trim ( void )
{ VG_(core_panic)("call to malloc_trim\n"); }
Esempio n. 5
0
Bool VG_(replacement_malloc_process_cmd_line_option)(Char* arg)
{
   if      (VG_CLO_STREQN(12, arg, "--alignment=")) {
      VG_(clo_alignment) = (Int)VG_(atoll)(&arg[12]);

      if (VG_(clo_alignment) < 4 
          || VG_(clo_alignment) > 4096
          || VG_(log2)( VG_(clo_alignment) ) == -1 /* not a power of 2 */) {
         VG_(message)(Vg_UserMsg, "");
         VG_(message)(Vg_UserMsg, 
            "Invalid --alignment= setting.  "
            "Should be a power of 2, >= 4, <= 4096.");
         VG_(bad_option)("--alignment");
      }
   }

   else if (VG_CLO_STREQ(arg, "--sloppy-malloc=yes"))
      VG_(clo_sloppy_malloc) = True;
   else if (VG_CLO_STREQ(arg, "--sloppy-malloc=no"))
      VG_(clo_sloppy_malloc) = False;

   else if (VG_CLO_STREQ(arg, "--trace-malloc=yes"))
      VG_(clo_trace_malloc) = True;
   else if (VG_CLO_STREQ(arg, "--trace-malloc=no"))
      VG_(clo_trace_malloc) = False;

   else 
      return False;

   return True;
}
Esempio n. 6
0
static void load_one_suppressions_file ( Char* filename )
{
#  define N_BUF 200
   Int  fd;
   Bool eof;
   Char buf[N_BUF+1];
   fd = VG_(open_read)( filename );
   if (fd == -1) {
      VG_(message)(Vg_UserMsg, 
                   "FATAL: can't open suppressions file `%s'", 
                   filename );
      VG_(exit)(1);
   }

   while (True) {
      Suppression* supp;
      supp = VG_(malloc)(VG_AR_PRIVATE, sizeof(Suppression));
      supp->count = 0;
      supp->param = supp->caller0 = supp->caller1 
                  = supp->caller2 = supp->caller3 = NULL;

      eof = VG_(getLine) ( fd, buf, N_BUF );
      if (eof) break;

      if (!STREQ(buf, "{")) goto syntax_error;
      
      eof = VG_(getLine) ( fd, buf, N_BUF );
      if (eof || STREQ(buf, "}")) goto syntax_error;
      supp->sname = copyStr(buf);

      eof = VG_(getLine) ( fd, buf, N_BUF );
      if (eof) goto syntax_error;
      else if (STREQ(buf, "Param"))  supp->skind = Param;
      else if (STREQ(buf, "Value0")) supp->skind = Value0; /* backwards compat */
      else if (STREQ(buf, "Cond"))   supp->skind = Value0;
      else if (STREQ(buf, "Value1")) supp->skind = Value1;
      else if (STREQ(buf, "Value2")) supp->skind = Value2;
      else if (STREQ(buf, "Value4")) supp->skind = Value4;
      else if (STREQ(buf, "Value8")) supp->skind = Value8;
      else if (STREQ(buf, "Addr1"))  supp->skind = Addr1;
      else if (STREQ(buf, "Addr2"))  supp->skind = Addr2;
      else if (STREQ(buf, "Addr4"))  supp->skind = Addr4;
      else if (STREQ(buf, "Addr8"))  supp->skind = Addr8;
      else if (STREQ(buf, "Free"))   supp->skind = FreeS;
      else if (STREQ(buf, "PThread")) supp->skind = PThread;
      else goto syntax_error;

      if (supp->skind == Param) {
         eof = VG_(getLine) ( fd, buf, N_BUF );
         if (eof) goto syntax_error;
         supp->param = copyStr(buf);
      }

      eof = VG_(getLine) ( fd, buf, N_BUF );
      if (eof) goto syntax_error;
      supp->caller0 = copyStr(buf);
      if (!setLocationTy(&(supp->caller0), &(supp->caller0_ty)))
         goto syntax_error;

      eof = VG_(getLine) ( fd, buf, N_BUF );
      if (eof) goto syntax_error;
      if (!STREQ(buf, "}")) {
         supp->caller1 = copyStr(buf);
         if (!setLocationTy(&(supp->caller1), &(supp->caller1_ty)))
            goto syntax_error;
      
         eof = VG_(getLine) ( fd, buf, N_BUF );
         if (eof) goto syntax_error;
         if (!STREQ(buf, "}")) {
            supp->caller2 = copyStr(buf);
            if (!setLocationTy(&(supp->caller2), &(supp->caller2_ty)))
               goto syntax_error;

            eof = VG_(getLine) ( fd, buf, N_BUF );
            if (eof) goto syntax_error;
            if (!STREQ(buf, "}")) {
               supp->caller3 = copyStr(buf);
              if (!setLocationTy(&(supp->caller3), &(supp->caller3_ty)))
                 goto syntax_error;

               eof = VG_(getLine) ( fd, buf, N_BUF );
               if (eof || !STREQ(buf, "}")) goto syntax_error;
	    }
         }
      }

      supp->next = vg_suppressions;
      vg_suppressions = supp;
   }

   VG_(close)(fd);
   return;

  syntax_error:
   if (eof) {
      VG_(message)(Vg_UserMsg, 
                   "FATAL: in suppressions file `%s': unexpected EOF", 
                   filename );
   } else {
      VG_(message)(Vg_UserMsg, 
                   "FATAL: in suppressions file `%s': syntax error on: %s", 
                   filename, buf );
   }
   VG_(close)(fd);
   VG_(message)(Vg_UserMsg, "exiting now.");
    VG_(exit)(1);

#  undef N_BUF   
}
Esempio n. 7
0
/* HACK: We shouldn't call VG_(core_panic) or VG_(message) on the simulated
   CPU.  Really we should pass the request in the usual way, and
   Valgrind itself can do the panic.  Too tedious, however.  
*/
void pvalloc ( void )
{ VG_(core_panic)("call to pvalloc\n"); }
Esempio n. 8
0
void VG_(core_panic) ( const HChar* str )
{
   VG_(core_panic_at)(str, NULL);
}
Esempio n. 9
0
void VG_(tool_panic) ( const HChar* str )
{
   panic(VG_(details).name, VG_(details).bug_reports_to, str, NULL);
}
Esempio n. 10
0
// Print the scheduler status.
static void show_sched_status_wrk ( Bool host_stacktrace,
                                    Bool stack_usage,
                                    Bool exited_threads,
                                    const UnwindStartRegs* startRegsIN)
{
   Int i; 
   if (host_stacktrace) {
      const Bool save_clo_xml = VG_(clo_xml);
      Addr stacktop;
      Addr ips[BACKTRACE_DEPTH];
      Int  n_ips;
      ThreadState *tst 
         = VG_(get_ThreadState)( VG_(lwpid_to_vgtid)( VG_(gettid)() ) );
 
      // If necessary, fake up an ExeContext which is of our actual real CPU
      // state.  Could cause problems if we got the panic/exception within the
      // execontext/stack dump/symtab code.  But it's better than nothing.
      UnwindStartRegs startRegs;
      VG_(memset)(&startRegs, 0, sizeof(startRegs));
      
      if (startRegsIN == NULL) {
         GET_STARTREGS(&startRegs);
      } else {
         startRegs = *startRegsIN;
      }
 
      stacktop = tst->os_state.valgrind_stack_init_SP;

      n_ips = 
         VG_(get_StackTrace_wrk)(
            0/*tid is unknown*/, 
            ips, BACKTRACE_DEPTH, 
            NULL/*array to dump SP values in*/,
            NULL/*array to dump FP values in*/,
            &startRegs, stacktop
         );
      VG_(printf)("\nhost stacktrace:\n"); 
      VG_(clo_xml) = False;
      VG_(pp_StackTrace) (ips, n_ips);
      VG_(clo_xml) = save_clo_xml;
   }

   VG_(printf)("\nsched status:\n"); 
   VG_(printf)("  running_tid=%d\n", VG_(get_running_tid)());
   for (i = 1; i < VG_N_THREADS; i++) {
      VgStack* stack 
         = (VgStack*)VG_(threads)[i].os_state.valgrind_stack_base;
      /* If a thread slot was never used (yet), valgrind_stack_base is 0.
         If a thread slot is used by a thread or was used by a thread which
         has exited, then valgrind_stack_base points to the stack base. */
      if (VG_(threads)[i].status == VgTs_Empty
          && (!exited_threads || stack == 0)) continue;
      VG_(printf)("\nThread %d: status = %s\n", i, 
                  VG_(name_of_ThreadStatus)(VG_(threads)[i].status) );
      if (VG_(threads)[i].status != VgTs_Empty)
         VG_(get_and_pp_StackTrace)( i, BACKTRACE_DEPTH );
      if (stack_usage && VG_(threads)[i].client_stack_highest_byte != 0 ) {
         Addr start, end;
         
         start = end = 0;
         VG_(stack_limits)(VG_(threads)[i].client_stack_highest_byte,
                           &start, &end);
         if (start != end)
            VG_(printf)("client stack range: [%p %p] client SP: %p\n",
                        (void*)start, (void*)end, (void*)VG_(get_SP)(i));
         else
            VG_(printf)("client stack range: ???????\n");
      }
      if (stack_usage && stack != 0)
          VG_(printf)("valgrind stack top usage: %ld of %ld\n",
                      VG_(clo_valgrind_stacksize)
                      - VG_(am_get_VgStack_unused_szB)(stack,
                                                       VG_(clo_valgrind_stacksize)),
                      (SizeT) VG_(clo_valgrind_stacksize));
   }
   VG_(printf)("\n");
}
Esempio n. 11
0
void VG_(assert_fail) ( Bool isCore, const HChar* expr, const HChar* file, 
                        Int line, const HChar* fn, const HChar* format, ... )
{
   va_list vargs, vargs_copy;
   const HChar* component;
   const HChar* bugs_to;
   UInt written;

   static Bool entered = False;
   if (entered) 
      VG_(exit)(2);
   entered = True;

   if (isCore) {
      component = "valgrind";
      bugs_to   = VG_BUGS_TO;
   } else { 
      component = VG_(details).name;
      bugs_to   = VG_(details).bug_reports_to;
   }

   if (VG_(clo_xml))
      VG_(printf_xml)("</valgrindoutput>\n");

   // Treat vg_assert2(0, "foo") specially, as a panicky abort
   if (VG_STREQ(expr, "0")) {
      VG_(printf)("\n%s: %s:%d (%s): the 'impossible' happened.\n",
                  component, file, line, fn );
   } else {
      VG_(printf)("\n%s: %s:%d (%s): Assertion '%s' failed.\n",
                  component, file, line, fn, expr );
   }

   /* Check whether anything will be written */
   HChar buf[5];
   va_start(vargs, format);
   va_copy(vargs_copy, vargs);
   written = VG_(vsnprintf) ( buf, sizeof(buf), format, vargs );
   va_end(vargs);

   if (written > 0) {
      VG_(printf)("%s: ", component);
      VG_(vprintf)(format, vargs_copy);
      VG_(printf)("\n");
   }

   report_and_quit(bugs_to, NULL);
}
Esempio n. 12
0
/* Top level entry point to leak detector.  Call here, passing in
   suitable address-validating functions (see comment at top of
   scan_all_valid_memory above).  All this is to avoid duplication
   of the leak-detection code for Memcheck and Addrcheck.
   Also pass in a tool-specific function to extract the .where field
   for allocated blocks, an indication of the resolution wanted for
   distinguishing different allocation points, and whether or not
   reachable blocks should be shown.
*/
void MAC_(do_detect_memory_leaks) (
   ThreadId tid, LeakCheckMode mode,
   Bool (*is_within_valid_secondary) ( Addr ),
   Bool (*is_valid_aligned_word)     ( Addr )
)
{
   Int i;
   
   tl_assert(mode != LC_Off);

   /* VG_(HT_to_array) allocates storage for shadows */
   lc_shadows = (MAC_Chunk**)VG_(HT_to_array)( MAC_(malloc_list),
                                               &lc_n_shadows );

   /* Sort the array. */
   VG_(ssort)((void*)lc_shadows, lc_n_shadows, sizeof(VgHashNode*), lc_compar);

   /* Sanity check; assert that the blocks are now in order */
   for (i = 0; i < lc_n_shadows-1; i++) {
      tl_assert( lc_shadows[i]->data <= lc_shadows[i+1]->data);
   }

   /* Sanity check -- make sure they don't overlap */
   for (i = 0; i < lc_n_shadows-1; i++) {
      tl_assert( lc_shadows[i]->data + lc_shadows[i]->size
                 < lc_shadows[i+1]->data );
   }

   if (lc_n_shadows == 0) {
      tl_assert(lc_shadows == NULL);
      if (VG_(clo_verbosity) >= 1 && !VG_(clo_xml)) {
         VG_(message)(Vg_UserMsg, 
                      "No malloc'd blocks -- no leaks are possible.");
      }
      return;
   }

   if (VG_(clo_verbosity) > 0 && !VG_(clo_xml))
      VG_(message)(Vg_UserMsg, 
                   "searching for pointers to %d not-freed blocks.", 
                   lc_n_shadows );

   lc_min_mallocd_addr = lc_shadows[0]->data;
   lc_max_mallocd_addr = lc_shadows[lc_n_shadows-1]->data
                         + lc_shadows[lc_n_shadows-1]->size;

   lc_markstack = VG_(malloc)( lc_n_shadows * sizeof(*lc_markstack) );
   for (i = 0; i < lc_n_shadows; i++) {
      lc_markstack[i].next = -1;
      lc_markstack[i].state = Unreached;
      lc_markstack[i].indirect = 0;
   }
   lc_markstack_top = -1;

   lc_is_within_valid_secondary = is_within_valid_secondary;
   lc_is_valid_aligned_word     = is_valid_aligned_word;

   lc_scanned = 0;

   /* Do the scan of memory, pushing any pointers onto the mark stack */
   VG_(find_root_memory)(lc_scan_memory);

   /* Push registers onto mark stack */
   VG_(apply_to_GP_regs)(lc_markstack_push);

   /* Keep walking the heap until everything is found */
   lc_do_leakcheck(-1);

   if (VG_(clo_verbosity) > 0 && !VG_(clo_xml))
      VG_(message)(Vg_UserMsg, "checked %d bytes.", lc_scanned);

   blocks_leaked     = MAC_(bytes_leaked)     = 0;
   blocks_indirect   = MAC_(bytes_indirect)   = 0;
   blocks_dubious    = MAC_(bytes_dubious)    = 0;
   blocks_reachable  = MAC_(bytes_reachable)  = 0;
   blocks_suppressed = MAC_(bytes_suppressed) = 0;

   if (mode == LC_Full)
      full_report(tid);
   else
      make_summary();

   if (VG_(clo_verbosity) > 0 && !VG_(clo_xml)) {
      VG_(message)(Vg_UserMsg, "");
      VG_(message)(Vg_UserMsg, "LEAK SUMMARY:");
      VG_(message)(Vg_UserMsg, "   definitely lost: %d bytes in %d blocks.", 
                               MAC_(bytes_leaked), blocks_leaked );
      if (blocks_indirect > 0)
	 VG_(message)(Vg_UserMsg, "   indirectly lost: %d bytes in %d blocks.", 
		      MAC_(bytes_indirect), blocks_indirect );
      VG_(message)(Vg_UserMsg, "     possibly lost: %d bytes in %d blocks.", 
                               MAC_(bytes_dubious), blocks_dubious );
      VG_(message)(Vg_UserMsg, "   still reachable: %d bytes in %d blocks.", 
                               MAC_(bytes_reachable), blocks_reachable );
      VG_(message)(Vg_UserMsg, "        suppressed: %d bytes in %d blocks.", 
                               MAC_(bytes_suppressed), blocks_suppressed );
      if (mode == LC_Summary && blocks_leaked > 0)
	 VG_(message)(Vg_UserMsg,
		      "Use --leak-check=full to see details of leaked memory.");
      else if (!MAC_(clo_show_reachable)) {
         VG_(message)(Vg_UserMsg, 
           "Reachable blocks (those to which a pointer was found) are not shown.");
         VG_(message)(Vg_UserMsg, 
            "To see them, rerun with: --show-reachable=yes");
      }
   }

   VG_(free) ( lc_shadows );
   VG_(free) ( lc_markstack );
}
Esempio n. 13
0
static void full_report(ThreadId tid)
{
   Int i;
   Int    n_lossrecords;
   LossRecord* errlist;
   LossRecord* p;
   Bool   is_suppressed;
   LeakExtra leak_extra;

   /* Go through and group lost structures into cliques.  For each
      Unreached block, push it onto the mark stack, and find all the
      blocks linked to it.  These are marked IndirectLeak, and their
      size is added to the clique leader's indirect size.  If one of
      the found blocks was itself a clique leader (from a previous
      pass), then the cliques are merged. */
   for (i = 0; i < lc_n_shadows; i++) {
      if (VG_DEBUG_CLIQUE)
	 VG_(printf)("cliques: %d at %p -> %s\n",
		     i, lc_shadows[i]->data, str_lossmode(lc_markstack[i].state));
      if (lc_markstack[i].state != Unreached)
	 continue;

      tl_assert(lc_markstack_top == -1);

      if (VG_DEBUG_CLIQUE)
	 VG_(printf)("%d: gathering clique %p\n", i, lc_shadows[i]->data);
      
      _lc_markstack_push(lc_shadows[i]->data, i);

      lc_do_leakcheck(i);

      tl_assert(lc_markstack_top == -1);
      tl_assert(lc_markstack[i].state == IndirectLeak);

      lc_markstack[i].state = Unreached; /* Return to unreached state,
					    to indicate its a clique
					    leader */
   }
      
   /* Common up the lost blocks so we can print sensible error messages. */
   n_lossrecords = 0;
   errlist       = NULL;
   for (i = 0; i < lc_n_shadows; i++) {
      ExeContext* where = lc_shadows[i]->where;

      for (p = errlist; p != NULL; p = p->next) {
         if (p->loss_mode == lc_markstack[i].state
             && VG_(eq_ExeContext) ( MAC_(clo_leak_resolution),
                                     p->allocated_at, 
                                     where) ) {
            break;
	 }
      }
      if (p != NULL) {
         p->num_blocks  ++;
         p->total_bytes += lc_shadows[i]->size;
	 p->indirect_bytes += lc_markstack[i].indirect;
      } else {
         n_lossrecords ++;
         p = VG_(malloc)(sizeof(LossRecord));
         p->loss_mode    = lc_markstack[i].state;
         p->allocated_at = where;
         p->total_bytes  = lc_shadows[i]->size;
	 p->indirect_bytes = lc_markstack[i].indirect;
         p->num_blocks   = 1;
         p->next         = errlist;
         errlist         = p;
      }
   }

   /* Print out the commoned-up blocks and collect summary stats. */
   for (i = 0; i < n_lossrecords; i++) {
      Bool        print_record;
      LossRecord* p_min = NULL;
      UInt        n_min = 0xFFFFFFFF;
      for (p = errlist; p != NULL; p = p->next) {
         if (p->num_blocks > 0 && p->total_bytes < n_min) {
            n_min = p->total_bytes + p->indirect_bytes;
            p_min = p;
         }
      }
      tl_assert(p_min != NULL);

      /* Ok to have tst==NULL;  it's only used if --gdb-attach=yes, and
         we disallow that when --leak-check=yes.  
         
         Prints the error if not suppressed, unless it's reachable (Proper
         or IndirectLeak) and --show-reachable=no */

      print_record = ( MAC_(clo_show_reachable) || 
		       Unreached == p_min->loss_mode || 
                       Interior == p_min->loss_mode );

      // Nb: because VG_(unique_error) does all the error processing
      // immediately, and doesn't save the error, leakExtra can be
      // stack-allocated.
      leak_extra.n_this_record   = i+1;
      leak_extra.n_total_records = n_lossrecords;
      leak_extra.lossRecord      = p_min;
      is_suppressed = 
         VG_(unique_error) ( tid, LeakErr, /*Addr*/0, /*s*/NULL,
                             /*extra*/&leak_extra, 
                             /*where*/p_min->allocated_at, print_record,
                             /*allow_GDB_attach*/False, /*count_error*/False );

      if (is_suppressed) {
         blocks_suppressed      += p_min->num_blocks;
         MAC_(bytes_suppressed) += p_min->total_bytes;

      } else if (Unreached  == p_min->loss_mode) {
         blocks_leaked      += p_min->num_blocks;
         MAC_(bytes_leaked) += p_min->total_bytes;

      } else if (IndirectLeak  == p_min->loss_mode) {
         blocks_indirect    += p_min->num_blocks;
         MAC_(bytes_indirect)+= p_min->total_bytes;

      } else if (Interior    == p_min->loss_mode) {
         blocks_dubious      += p_min->num_blocks;
         MAC_(bytes_dubious) += p_min->total_bytes;

      } else if (Proper        == p_min->loss_mode) {
         blocks_reachable      += p_min->num_blocks;
         MAC_(bytes_reachable) += p_min->total_bytes;

      } else {
         VG_(tool_panic)("generic_detect_memory_leaks: unknown loss mode");
      }
      p_min->num_blocks = 0;
   }
}
Esempio n. 14
0
/* Scan a block of memory between [start, start+len).  This range may
   be bogus, inaccessable, or otherwise strange; we deal with it.

   If clique != -1, it means we're gathering leaked memory into
   cliques, and clique is the index of the current clique leader. */
static void _lc_scan_memory(Addr start, SizeT len, Int clique)
{
   Addr ptr = VG_ROUNDUP(start, sizeof(Addr));
   Addr end = VG_ROUNDDN(start+len, sizeof(Addr));
   vki_sigset_t sigmask;

   if (VG_DEBUG_LEAKCHECK)
      VG_(printf)("scan %p-%p\n", start, start+len);
   VG_(sigprocmask)(VKI_SIG_SETMASK, NULL, &sigmask);
   VG_(set_fault_catcher)(scan_all_valid_memory_catcher);

   lc_scanned += end-ptr;

   if (!VG_(is_client_addr)(ptr) ||
       !VG_(is_addressable)(ptr, sizeof(Addr), VKI_PROT_READ))
      ptr = VG_PGROUNDUP(ptr+1);	/* first page bad */

   while (ptr < end) {
      Addr addr;

      /* Skip invalid chunks */
      if (!(*lc_is_within_valid_secondary)(ptr)) {
	 ptr = VG_ROUNDUP(ptr+1, SECONDARY_SIZE);
	 continue;
      }

      /* Look to see if this page seems reasonble */
      if ((ptr % VKI_PAGE_SIZE) == 0) {
	 if (!VG_(is_client_addr)(ptr) ||
	     !VG_(is_addressable)(ptr, sizeof(Addr), VKI_PROT_READ))
	    ptr += VKI_PAGE_SIZE; /* bad page - skip it */
      }

      if (__builtin_setjmp(memscan_jmpbuf) == 0) {
	 if ((*lc_is_valid_aligned_word)(ptr)) {
	    addr = *(Addr *)ptr;
	    _lc_markstack_push(addr, clique);
	 } else if (0 && VG_DEBUG_LEAKCHECK)
	    VG_(printf)("%p not valid\n", ptr);
	 ptr += sizeof(Addr);
      } else {
	 /* We need to restore the signal mask, because we were
	    longjmped out of a signal handler. */
	 VG_(sigprocmask)(VKI_SIG_SETMASK, &sigmask, NULL);

	 ptr = VG_PGROUNDUP(ptr+1);	/* bad page - skip it */
      }
   }

   VG_(sigprocmask)(VKI_SIG_SETMASK, &sigmask, NULL);
   VG_(set_fault_catcher)(NULL);
}
Esempio n. 15
0
void VG_(record_address_error) ( Addr a, Int size, Bool isWrite )
{
   ErrContext ec;
   Bool       just_below_esp;
   if (vg_ignore_errors) return;

   just_below_esp 
      = VG_(is_just_below_ESP)( VG_(baseBlock)[VGOFF_(m_esp)], a );

   /* If this is caused by an access immediately below %ESP, and the
      user asks nicely, we just ignore it. */
   if (VG_(clo_workaround_gcc296_bugs) && just_below_esp)
      return;

   clear_ErrContext( &ec );
   ec.count   = 1;
   ec.next    = NULL;
   ec.where   = VG_(get_ExeContext)( False, VG_(baseBlock)[VGOFF_(m_eip)], 
                                            VG_(baseBlock)[VGOFF_(m_ebp)] );
   ec.ekind   = AddrErr;
   ec.axskind = isWrite ? WriteAxs : ReadAxs;
   ec.size    = size;
   ec.addr    = a;
   ec.tid     = VG_(get_current_tid)();
   ec.m_eip = VG_(baseBlock)[VGOFF_(m_eip)];
   ec.m_esp = VG_(baseBlock)[VGOFF_(m_esp)];
   ec.m_ebp = VG_(baseBlock)[VGOFF_(m_ebp)];
   ec.addrinfo.akind     = Undescribed;
   ec.addrinfo.maybe_gcc = just_below_esp;
   VG_(maybe_add_context) ( &ec );
}
Esempio n. 16
0
/* Print some helpful-ish text about unimplemented things, and give up. */
void VG_(unimplemented) ( const HChar* msg )
{
   if (VG_(clo_xml))
      VG_(printf_xml)("</valgrindoutput>\n");
   VG_(umsg)("\n");
   VG_(umsg)("Valgrind detected that your program requires\n");
   VG_(umsg)("the following unimplemented functionality:\n");
   VG_(umsg)("   %s\n", msg);
   VG_(umsg)("This may be because the functionality is hard to implement,\n");
   VG_(umsg)("or because no reasonable program would behave this way,\n");
   VG_(umsg)("or because nobody has yet needed it.  "
             "In any case, let us know at\n");
   VG_(umsg)("%s and/or try to work around the problem, if you can.\n",
             VG_BUGS_TO);
   VG_(umsg)("\n");
   VG_(umsg)("Valgrind has to exit now.  Sorry.  Bye!\n");
   VG_(umsg)("\n");
   VG_(show_sched_status)(False,  // host_stacktrace
                          False,  // stack_usage
                          False); // exited_threads
   VG_(exit)(1);
}
Esempio n. 17
0
void VG_(show_all_errors) ( void )
{
   Int         i, n_min;
   Int         n_err_contexts, n_supp_contexts;
   ErrContext  *p, *p_min;
   Suppression *su;
   Bool        any_supp;

   if (VG_(clo_verbosity) == 0)
      return;

   n_err_contexts = 0;
   for (p = vg_err_contexts; p != NULL; p = p->next) {
      if (p->supp == NULL)
         n_err_contexts++;
   }

   n_supp_contexts = 0;
   for (su = vg_suppressions; su != NULL; su = su->next) {
      if (su->count > 0)
         n_supp_contexts++;
   }

   VG_(message)(Vg_UserMsg,
                "ERROR SUMMARY: "
                "%d errors from %d contexts (suppressed: %d from %d)",
                vg_n_errs_found, n_err_contexts, 
                vg_n_errs_suppressed, n_supp_contexts );

   if (VG_(clo_verbosity) <= 1)
      return;

   /* Print the contexts in order of increasing error count. */
   for (i = 0; i < n_err_contexts; i++) {
      n_min = (1 << 30) - 1;
      p_min = NULL;
      for (p = vg_err_contexts; p != NULL; p = p->next) {
         if (p->supp != NULL) continue;
         if (p->count < n_min) {
            n_min = p->count;
            p_min = p;
         }
      }
      if (p_min == NULL) VG_(panic)("pp_AllErrContexts");

      VG_(message)(Vg_UserMsg, "");
      VG_(message)(Vg_UserMsg, "%d errors in context %d of %d:",
                   p_min->count,
                   i+1, n_err_contexts);
      pp_ErrContext( p_min, False );

      if ((i+1 == VG_(clo_dump_error))) {
	VG_(translate) ( 0 /* dummy ThreadId; irrelevant due to below NULLs */,
                         p_min->where->eips[0], NULL, NULL, NULL );
      }

      p_min->count = 1 << 30;
   } 

   if (n_supp_contexts > 0) 
      VG_(message)(Vg_DebugMsg, "");
   any_supp = False;
   for (su = vg_suppressions; su != NULL; su = su->next) {
      if (su->count > 0) {
         any_supp = True;
         VG_(message)(Vg_DebugMsg, "supp: %4d %s", su->count, 
                                   su->sname);
      }
   }

   if (n_err_contexts > 0) {
      if (any_supp) 
         VG_(message)(Vg_UserMsg, "");
      VG_(message)(Vg_UserMsg,
                   "IN SUMMARY: "
                   "%d errors from %d contexts (suppressed: %d from %d)",
                   vg_n_errs_found, n_err_contexts, 
                   vg_n_errs_suppressed,
                   n_supp_contexts );
      VG_(message)(Vg_UserMsg, "");
   }
}
Esempio n. 18
0
/* store registers in the guest state (gdbserver_to_valgrind)
   or fetch register from the guest state (valgrind_to_gdbserver). */
static
void transfer_register (ThreadId tid, int abs_regno, void * buf,
                        transfer_direction dir, int size, Bool *mod)
{
    ThreadState* tst = VG_(get_ThreadState)(tid);
    int set = abs_regno / dyn_num_regs;
    int regno = abs_regno % dyn_num_regs;
    *mod = False;

    VexGuestAMD64State* amd64 = (VexGuestAMD64State*) get_arch (set, tst);

    switch (regno) {
    // numbers here have to match the order of regs above.
    // Attention: gdb order does not match valgrind order.
    case 0:
        VG_(transfer) (&amd64->guest_RAX, buf, dir, size, mod);
        break;
    case 1:
        VG_(transfer) (&amd64->guest_RBX, buf, dir, size, mod);
        break;
    case 2:
        VG_(transfer) (&amd64->guest_RCX, buf, dir, size, mod);
        break;
    case 3:
        VG_(transfer) (&amd64->guest_RDX, buf, dir, size, mod);
        break;
    case 4:
        VG_(transfer) (&amd64->guest_RSI, buf, dir, size, mod);
        break;
    case 5:
        VG_(transfer) (&amd64->guest_RDI, buf, dir, size, mod);
        break;
    case 6:
        VG_(transfer) (&amd64->guest_RBP, buf, dir, size, mod);
        break;
    case 7:
        VG_(transfer) (&amd64->guest_RSP, buf, dir, size, mod);
        break;
    case 8:
        VG_(transfer) (&amd64->guest_R8,  buf, dir, size, mod);
        break;
    case 9:
        VG_(transfer) (&amd64->guest_R9,  buf, dir, size, mod);
        break;
    case 10:
        VG_(transfer) (&amd64->guest_R10, buf, dir, size, mod);
        break;
    case 11:
        VG_(transfer) (&amd64->guest_R11, buf, dir, size, mod);
        break;
    case 12:
        VG_(transfer) (&amd64->guest_R12, buf, dir, size, mod);
        break;
    case 13:
        VG_(transfer) (&amd64->guest_R13, buf, dir, size, mod);
        break;
    case 14:
        VG_(transfer) (&amd64->guest_R14, buf, dir, size, mod);
        break;
    case 15:
        VG_(transfer) (&amd64->guest_R15, buf, dir, size, mod);
        break;
    case 16:
        VG_(transfer) (&amd64->guest_RIP, buf, dir, size, mod);
        break;
    case 17:
        if (dir == valgrind_to_gdbserver) {
            ULong rflags;
            /* we can only retrieve the real flags (set 0)
               retrieving shadow flags is not ok */
            if (set == 0)
                rflags = LibVEX_GuestAMD64_get_rflags (amd64);
            else
                rflags = 0;
            VG_(transfer) (&rflags, buf, dir, size, mod);
        } else {
            *mod = False; //GDBTD? how do we store rflags in libvex_guest_amd64.h ???
        }
        break;
    case 18:
        *mod = False;
        break; //GDBTD VG_(transfer) (&amd64->guest_CS, buf, dir, size, mod);
    case 19:
        *mod = False;
        break; //GDBTD VG_(transfer) (&amd64->guest_SS, buf, dir, size, mod);
    case 20:
        *mod = False;
        break; //GDBTD VG_(transfer) (&amd64->guest_DS, buf, dir, size, mod);
    case 21:
        *mod = False;
        break; //GDBTD VG_(transfer) (&amd64->guest_ES, buf, dir, size, mod);
    case 22:
        *mod = False;
        break; //GDBTD VG_(transfer) (&amd64->guest_FS, buf, dir, size, mod);
    case 23:
        VG_(transfer) (&amd64->guest_GS_0x60, buf, dir, size, mod);
        break;
    case 24:
    case 25:
    case 26:
    case 27: /* register 24 to 31 are float registers 80 bits but 64 bits in valgrind */
    case 28:
    case 29:
    case 30:
    case 31:
        if (dir == valgrind_to_gdbserver) {
            UChar fpreg80[10];
            convert_f64le_to_f80le ((UChar *)&amd64->guest_FPREG[regno-16],
                                    fpreg80);
            VG_(transfer) (&fpreg80, buf, dir, sizeof(fpreg80), mod);
        } else {
            ULong fpreg64;
            convert_f80le_to_f64le (buf, (UChar *)&fpreg64);
            VG_(transfer) (&amd64->guest_FPREG[regno-16], &fpreg64,
                           dir, sizeof(fpreg64), mod);
        }
        break;
    case 32:
        if (dir == valgrind_to_gdbserver) {
            // vex only models the rounding bits (see libvex_guest_amd64.h)
            UWord value = 0x037f;
            value |= amd64->guest_FPROUND << 10;
            VG_(transfer)(&value, buf, dir, size, mod);
        } else {
            *mod = False; // GDBTD???? VEX equivalent fcrtl
        }
        break;
    case 33:
        if (dir == valgrind_to_gdbserver) {
            UWord value = amd64->guest_FC3210;
            value |= (amd64->guest_FTOP & 7) << 11;
            VG_(transfer)(&value, buf, dir, size, mod);
        } else {
            *mod = False; // GDBTD???? VEX equivalent fstat
        }
        break;
    case 34:
        if (dir == valgrind_to_gdbserver) {
            // vex doesn't model these precisely
            UWord value =
                ((amd64->guest_FPTAG[0] ? 0 : 3) << 0)  |
                ((amd64->guest_FPTAG[1] ? 0 : 3) << 2)  |
                ((amd64->guest_FPTAG[2] ? 0 : 3) << 4)  |
                ((amd64->guest_FPTAG[3] ? 0 : 3) << 6)  |
                ((amd64->guest_FPTAG[4] ? 0 : 3) << 8)  |
                ((amd64->guest_FPTAG[5] ? 0 : 3) << 10) |
                ((amd64->guest_FPTAG[6] ? 0 : 3) << 12) |
                ((amd64->guest_FPTAG[7] ? 0 : 3) << 14);
            VG_(transfer)(&value, buf, dir, size, mod);
        } else {
            *mod = False; // GDBTD???? VEX equivalent ftag
        }
        break;
    case 35:
        *mod = False;
        break; // GDBTD ??? equivalent of fiseg
    case 36:
        *mod = False;
        break; // GDBTD ??? equivalent of fioff
    case 37:
        *mod = False;
        break; // GDBTD ??? equivalent of foseg
    case 38:
        *mod = False;
        break; // GDBTD ??? equivalent of fooff
    case 39:
        *mod = False;
        break; // GDBTD ??? equivalent of fop
    case 40:
        VG_(transfer) (&amd64->guest_YMM0[0],  buf, dir, size, mod);
        break;
    case 41:
        VG_(transfer) (&amd64->guest_YMM1[0],  buf, dir, size, mod);
        break;
    case 42:
        VG_(transfer) (&amd64->guest_YMM2[0],  buf, dir, size, mod);
        break;
    case 43:
        VG_(transfer) (&amd64->guest_YMM3[0],  buf, dir, size, mod);
        break;
    case 44:
        VG_(transfer) (&amd64->guest_YMM4[0],  buf, dir, size, mod);
        break;
    case 45:
        VG_(transfer) (&amd64->guest_YMM5[0],  buf, dir, size, mod);
        break;
    case 46:
        VG_(transfer) (&amd64->guest_YMM6[0],  buf, dir, size, mod);
        break;
    case 47:
        VG_(transfer) (&amd64->guest_YMM7[0],  buf, dir, size, mod);
        break;
    case 48:
        VG_(transfer) (&amd64->guest_YMM8[0],  buf, dir, size, mod);
        break;
    case 49:
        VG_(transfer) (&amd64->guest_YMM9[0],  buf, dir, size, mod);
        break;
    case 50:
        VG_(transfer) (&amd64->guest_YMM10[0], buf, dir, size, mod);
        break;
    case 51:
        VG_(transfer) (&amd64->guest_YMM11[0], buf, dir, size, mod);
        break;
    case 52:
        VG_(transfer) (&amd64->guest_YMM12[0], buf, dir, size, mod);
        break;
    case 53:
        VG_(transfer) (&amd64->guest_YMM13[0], buf, dir, size, mod);
        break;
    case 54:
        VG_(transfer) (&amd64->guest_YMM14[0], buf, dir, size, mod);
        break;
    case 55:
        VG_(transfer) (&amd64->guest_YMM15[0], buf, dir, size, mod);
        break;
    case 56:
        if (dir == valgrind_to_gdbserver) {
            // vex only models the rounding bits (see libvex_guest_x86.h)
            UWord value = 0x1f80;
            value |= amd64->guest_SSEROUND << 13;
            VG_(transfer)(&value, buf, dir, size, mod);
        } else {
            *mod = False;  // GDBTD???? VEX equivalent mxcsr
        }
        break;
    case 57:
        *mod = False;
        break; // GDBTD???? VEX equivalent { "orig_rax"},
    case 58:
        VG_(transfer) (&amd64->guest_YMM0[4],  buf, dir, size, mod);
        break;
    case 59:
        VG_(transfer) (&amd64->guest_YMM1[4],  buf, dir, size, mod);
        break;
    case 60:
        VG_(transfer) (&amd64->guest_YMM2[4],  buf, dir, size, mod);
        break;
    case 61:
        VG_(transfer) (&amd64->guest_YMM3[4],  buf, dir, size, mod);
        break;
    case 62:
        VG_(transfer) (&amd64->guest_YMM4[4],  buf, dir, size, mod);
        break;
    case 63:
        VG_(transfer) (&amd64->guest_YMM5[4],  buf, dir, size, mod);
        break;
    case 64:
        VG_(transfer) (&amd64->guest_YMM6[4],  buf, dir, size, mod);
        break;
    case 65:
        VG_(transfer) (&amd64->guest_YMM7[4],  buf, dir, size, mod);
        break;
    case 66:
        VG_(transfer) (&amd64->guest_YMM8[4],  buf, dir, size, mod);
        break;
    case 67:
        VG_(transfer) (&amd64->guest_YMM9[4],  buf, dir, size, mod);
        break;
    case 68:
        VG_(transfer) (&amd64->guest_YMM10[4], buf, dir, size, mod);
        break;
    case 69:
        VG_(transfer) (&amd64->guest_YMM11[4], buf, dir, size, mod);
        break;
    case 70:
        VG_(transfer) (&amd64->guest_YMM12[4], buf, dir, size, mod);
        break;
    case 71:
        VG_(transfer) (&amd64->guest_YMM13[4], buf, dir, size, mod);
        break;
    case 72:
        VG_(transfer) (&amd64->guest_YMM14[4], buf, dir, size, mod);
        break;
    case 73:
        VG_(transfer) (&amd64->guest_YMM15[4], buf, dir, size, mod);
        break;
    default:
        vg_assert(0);
    }
}
Esempio n. 19
0
void VG_(replacement_malloc_print_debug_usage)(void)
{
   VG_(printf)(
"    --trace-malloc=no|yes     show client malloc details? [no]\n"
   );
}
static Addr build_rt_sigframe(ThreadState *tst,
			      Addr sp_top_of_frame,
			      const vki_siginfo_t *siginfo,
			      const struct vki_ucontext *siguc,
			      UInt flags,
			      const vki_sigset_t *mask,
			      void *restorer)
{
   struct rt_sigframe *frame;
   Addr sp = sp_top_of_frame;
   Int sigNo = siginfo->si_signo;

   vg_assert((flags & VKI_SA_SIGINFO) != 0);
   vg_assert((sizeof(*frame) & 7) == 0);
   vg_assert((sp & 7) == 0);

   sp -= sizeof(*frame);
   frame = (struct rt_sigframe *)sp;

   if (! ML_(sf_maybe_extend_stack)(tst, sp, sizeof(*frame), flags))
      return sp_top_of_frame;

   /* retcode, sigNo, sc, sregs fields are to be written */
   VG_TRACK( pre_mem_write, Vg_CoreSignal, tst->tid, "signal handler frame",
	     sp, offsetof(struct rt_sigframe, vg) );

   save_sigregs(tst, &frame->uc.uc_mcontext);

   if (flags & VKI_SA_RESTORER) {
      frame->retcode[0] = 0;
      frame->retcode[1] = 0;
      SET_SIGNAL_GPR(tst, 14, restorer);
   } else {
      frame->retcode[0] = 0x0a;
      frame->retcode[1] = __NR_rt_sigreturn;
      /* This normally should be &frame->recode. but since there
         might be problems with non-exec stack and we must discard
         the translation for the on-stack sigreturn we just use the
         trampoline like x86,ppc. We still fill in the retcode, lets
         just hope that nobody actually jumps here */
      SET_SIGNAL_GPR(tst, 14, (Addr)&VG_(s390x_linux_SUBST_FOR_rt_sigreturn));
   }

   VG_(memcpy)(&frame->info, siginfo, sizeof(vki_siginfo_t));
   frame->uc.uc_flags = 0;
   frame->uc.uc_link = 0;
   frame->uc.uc_sigmask = *mask;
   frame->uc.uc_stack = tst->altstack;

   SET_SIGNAL_GPR(tst, 2, siginfo->si_signo);
   SET_SIGNAL_GPR(tst, 3, &frame->info);
   SET_SIGNAL_GPR(tst, 4, &frame->uc);

   /* Set up backchain. */
   *((Addr *) sp) = sp_top_of_frame;

   VG_TRACK( post_mem_write, Vg_CoreSignal, tst->tid,
             sp, offsetof(struct rt_sigframe, vg) );

   build_vg_sigframe(&frame->vg, tst, flags, sigNo);
   return sp;
}
Esempio n. 21
0
void malloc_stats ( void )
{ VG_(core_panic)("call to malloc_stats\n"); }
Esempio n. 22
0
static void test__foreach_map(void)
{
   fprintf(stderr, "Calling VG_(foreach_map)()\n");
   VG_(foreach_map)(f, /*dummy*/NULL);
}
Esempio n. 23
0
void malloc_set_state ( void )
{ VG_(core_panic)("call to malloc_set_state\n"); }
Esempio n. 24
0
/* Does an error context match a suppression?  ie is this a
   suppressible error?  If so, return a pointer to the Suppression
   record, otherwise NULL.
   Tries to minimise the number of calls to what_fn_is_this since they
   are expensive.  
*/
static Suppression* is_suppressible_error ( ErrContext* ec )
{
#  define STREQ(s1,s2) (s1 != NULL && s2 != NULL \
                        && VG_(strcmp)((s1),(s2))==0)

   Char caller0_obj[M_VG_ERRTXT];
   Char caller0_fun[M_VG_ERRTXT];
   Char caller1_obj[M_VG_ERRTXT];
   Char caller1_fun[M_VG_ERRTXT];
   Char caller2_obj[M_VG_ERRTXT];
   Char caller2_fun[M_VG_ERRTXT];
   Char caller3_obj[M_VG_ERRTXT];
   Char caller3_fun[M_VG_ERRTXT];

   Suppression* su;
   Int          su_size;

   /* vg_what_fn_or_object_is_this returns:
         <function_name>      or
         <object_name>        or
         ???
      so the strings in the suppression file should match these.
   */

   /* Initialise these strs so they are always safe to compare, even
      if what_fn_or_object_is_this doesn't write anything to them. */
   caller0_obj[0] = caller1_obj[0] = caller2_obj[0] = caller3_obj[0] = 0;
   caller0_fun[0] = caller1_fun[0] = caller2_obj[0] = caller3_obj[0] = 0;

   VG_(what_obj_and_fun_is_this)
      ( ec->where->eips[0], caller0_obj, M_VG_ERRTXT,
                            caller0_fun, M_VG_ERRTXT );
   VG_(what_obj_and_fun_is_this)
      ( ec->where->eips[1], caller1_obj, M_VG_ERRTXT,
                            caller1_fun, M_VG_ERRTXT );

   if (VG_(clo_backtrace_size) > 2) {
      VG_(what_obj_and_fun_is_this)
         ( ec->where->eips[2], caller2_obj, M_VG_ERRTXT,
                               caller2_fun, M_VG_ERRTXT );

      if (VG_(clo_backtrace_size) > 3) {
         VG_(what_obj_and_fun_is_this)
            ( ec->where->eips[3], caller3_obj, M_VG_ERRTXT,
                                  caller3_fun, M_VG_ERRTXT );
      }
   }

   /* See if the error context matches any suppression. */
   for (su = vg_suppressions; su != NULL; su = su->next) {
      switch (su->skind) {
         case FreeS:  case PThread:
         case Param:  case Value0: su_size = 0; break;
         case Value1: case Addr1:  su_size = 1; break;
         case Value2: case Addr2:  su_size = 2; break;
         case Value4: case Addr4:  su_size = 4; break;
         case Value8: case Addr8:  su_size = 8; break;
         default: VG_(panic)("errcontext_matches_suppression");
      }
      switch (su->skind) {
         case Param:
            if (ec->ekind != ParamErr) continue;
            if (!STREQ(su->param, ec->syscall_param)) continue;
            break;
         case Value0: case Value1: case Value2: case Value4: case Value8:
            if (ec->ekind != ValueErr) continue;
            if (ec->size  != su_size)  continue;
            break;
         case Addr1: case Addr2: case Addr4: case Addr8:
            if (ec->ekind != AddrErr) continue;
            if (ec->size  != su_size) continue;
            break;
         case FreeS:
            if (ec->ekind != FreeErr 
                && ec->ekind != FreeMismatchErr) continue;
            break;
         case PThread:
            if (ec->ekind != PThreadErr) continue;
            break;
      }

      switch (su->caller0_ty) {
         case ObjName: if (!VG_(stringMatch)(su->caller0, 
                                             caller0_obj)) continue;
                       break;
         case FunName: if (!VG_(stringMatch)(su->caller0, 
                                             caller0_fun)) continue;
                       break;
         default: goto baaaad;
      }

      if (su->caller1 != NULL) {
         vg_assert(VG_(clo_backtrace_size) >= 2);
         switch (su->caller1_ty) {
            case ObjName: if (!VG_(stringMatch)(su->caller1, 
                                                caller1_obj)) continue;
                          break;
            case FunName: if (!VG_(stringMatch)(su->caller1, 
                                                caller1_fun)) continue;
                          break;
            default: goto baaaad;
         }
      }

      if (VG_(clo_backtrace_size) > 2 && su->caller2 != NULL) {
         switch (su->caller2_ty) {
            case ObjName: if (!VG_(stringMatch)(su->caller2, 
                                                caller2_obj)) continue;
                          break;
            case FunName: if (!VG_(stringMatch)(su->caller2, 
                                                caller2_fun)) continue;
                          break;
            default: goto baaaad;
         }
      }

      if (VG_(clo_backtrace_size) > 3 && su->caller3 != NULL) {
         switch (su->caller3_ty) {
            case ObjName: if (!VG_(stringMatch)(su->caller3,
                                                caller3_obj)) continue;
                          break;
            case FunName: if (!VG_(stringMatch)(su->caller3, 
                                                caller3_fun)) continue;
                          break;
            default: goto baaaad;
         }
      }

      return su;
   }

   return NULL;

  baaaad:
   VG_(panic)("is_suppressible_error");

#  undef STREQ
}
Esempio n. 25
0
UInt VG_(get_StackTrace) ( ThreadId tid, StackTrace ips, UInt n_ips )
{
   /* thread in thread table */
   Addr ip                 = VG_(get_IP)(tid);
   Addr fp                 = VG_(get_FP)(tid);
   Addr sp                 = VG_(get_SP)(tid);
   Addr lr                 = VG_(get_LR)(tid);
   Addr stack_highest_word = VG_(threads)[tid].client_stack_highest_word;

#  if defined(VGP_x86_linux)
   /* Nasty little hack to deal with syscalls - if libc is using its
      _dl_sysinfo_int80 function for syscalls (the TLS version does),
      then ip will always appear to be in that function when doing a
      syscall, not the actual libc function doing the syscall.  This
      check sees if IP is within that function, and pops the return
      address off the stack so that ip is placed within the library
      function calling the syscall.  This makes stack backtraces much
      more useful.

      The function is assumed to look like this (from glibc-2.3.6 sources):
         _dl_sysinfo_int80:
            int $0x80
            ret
      That is 3 (2+1) bytes long.  We could be more thorough and check
      the 3 bytes of the function are as expected, but I can't be
      bothered.
   */
   if (VG_(client__dl_sysinfo_int80) != 0 /* we know its address */
       && ip >= VG_(client__dl_sysinfo_int80)
       && ip < VG_(client__dl_sysinfo_int80)+3
       && VG_(am_is_valid_for_client)(sp, sizeof(Addr), VKI_PROT_READ)) {
      ip = *(Addr *)sp;
      sp += sizeof(Addr);
   }
#  endif

   if (0)
      VG_(printf)("tid %d: stack_highest=%p ip=%p sp=%p fp=%p\n",
		  tid, stack_highest_word, ip, sp, fp);

   return VG_(get_StackTrace2)(tid, ips, n_ips, ip, sp, fp, lr, sp, 
                                    stack_highest_word);
}
Esempio n. 26
0
static void pp_ErrContext ( ErrContext* ec, Bool printCount )
{
   if (printCount)
      VG_(message)(Vg_UserMsg, "Observed %d times:", ec->count );
   if (ec->tid > 1)
      VG_(message)(Vg_UserMsg, "Thread %d:", ec->tid );
   switch (ec->ekind) {
      case ValueErr:
         if (ec->size == 0) {
             VG_(message)(
                Vg_UserMsg,
                "Conditional jump or move depends on uninitialised value(s)");
         } else {
             VG_(message)(Vg_UserMsg,
                          "Use of uninitialised value of size %d",
                          ec->size);
         }
         VG_(pp_ExeContext)(ec->where);
         break;
      case AddrErr:
         switch (ec->axskind) {
            case ReadAxs:
               VG_(message)(Vg_UserMsg, "Invalid read of size %d", 
                                        ec->size ); 
               break;
            case WriteAxs:
               VG_(message)(Vg_UserMsg, "Invalid write of size %d", 
                                        ec->size ); 
               break;
            case ExecAxs:
               VG_(message)(Vg_UserMsg, "Jump to the invalid address "
                                        "stated on the next line");
               break;
            default: 
               VG_(panic)("pp_ErrContext(axskind)");
         }
         VG_(pp_ExeContext)(ec->where);
         pp_AddrInfo(ec->addr, &ec->addrinfo);
         break;
      case FreeErr:
         VG_(message)(Vg_UserMsg,"Invalid free() / delete / delete[]");
         /* fall through */
      case FreeMismatchErr:
         if (ec->ekind == FreeMismatchErr)
            VG_(message)(Vg_UserMsg, 
                         "Mismatched free() / delete / delete []");
         VG_(pp_ExeContext)(ec->where);
         pp_AddrInfo(ec->addr, &ec->addrinfo);
         break;
      case ParamErr:
         if (ec->isWriteableLack) {
            VG_(message)(Vg_UserMsg, 
               "Syscall param %s contains unaddressable byte(s)",
                ec->syscall_param );
         } else {
            VG_(message)(Vg_UserMsg, 
                "Syscall param %s contains uninitialised or "
                "unaddressable byte(s)",
            ec->syscall_param);
         }
         VG_(pp_ExeContext)(ec->where);
         pp_AddrInfo(ec->addr, &ec->addrinfo);
         break;
      case UserErr:
         if (ec->isWriteableLack) {
            VG_(message)(Vg_UserMsg, 
               "Unaddressable byte(s) found during client check request");
         } else {
            VG_(message)(Vg_UserMsg, 
               "Uninitialised or "
               "unaddressable byte(s) found during client check request");
         }
         VG_(pp_ExeContext)(ec->where);
         pp_AddrInfo(ec->addr, &ec->addrinfo);
         break;
      case PThreadErr:
         VG_(message)(Vg_UserMsg, "%s", ec->syscall_param );
         VG_(pp_ExeContext)(ec->where);
         break;
      default: 
         VG_(panic)("pp_ErrContext");
   }
}
Esempio n. 27
0
/* Take a snapshot of the client's stack, putting the up to 'n_ips'
   IPs into 'ips'.  In order to be thread-safe, we pass in the
   thread's IP SP, FP if that's meaningful, and LR if that's
   meaningful.  Returns number of IPs put in 'ips'.

   If you know what the thread ID for this stack is, send that as the
   first parameter, else send zero.  This helps generate better stack
   traces on ppc64-linux and has no effect on other platforms.
*/
UInt VG_(get_StackTrace2) ( ThreadId tid_if_known,
                            Addr* ips, UInt n_ips, 
                            Addr ip, Addr sp, Addr fp, Addr lr,
                            Addr fp_min, Addr fp_max_orig )
{
#if defined(VGP_ppc32_linux) || defined(VGP_ppc64_linux)
   Bool  lr_is_first_RA = False; /* ppc only */
#endif
   Bool  debug = False;
   Int   i;
   Addr  fp_max;
   UInt  n_found = 0;

   vg_assert(sizeof(Addr) == sizeof(UWord));
   vg_assert(sizeof(Addr) == sizeof(void*));

   /* Snaffle IPs from the client's stack into ips[0 .. n_ips-1],
      stopping when the trail goes cold, which we guess to be
      when FP is not a reasonable stack location. */

   // JRS 2002-sep-17: hack, to round up fp_max to the end of the
   // current page, at least.  Dunno if it helps.
   // NJN 2002-sep-17: seems to -- stack traces look like 1.0.X again
   fp_max = VG_PGROUNDUP(fp_max_orig);
   fp_max -= sizeof(Addr);

   if (debug)
      VG_(printf)("n_ips=%d fp_min=%p fp_max_orig=%p, fp_max=%p ip=%p fp=%p\n",
		  n_ips, fp_min, fp_max_orig, fp_max, ip, fp);

   /* Assertion broken before main() is reached in pthreaded programs;  the
    * offending stack traces only have one item.  --njn, 2002-aug-16 */
   /* vg_assert(fp_min <= fp_max);*/

   if (fp_min + VG_(clo_max_stackframe) <= fp_max) {
      /* If the stack is ridiculously big, don't poke around ... but
         don't bomb out either.  Needed to make John Regehr's
         user-space threads package work. JRS 20021001 */
      ips[0] = ip;
      return 1;
   } 

   /* Otherwise unwind the stack in a platform-specific way.  Trying
      to merge the x86, amd64, ppc32 and ppc64 logic into a single
      piece of code is just too confusing and difficult to
      performance-tune.  */

#  if defined(VGP_x86_linux)

   /*--------------------- x86 ---------------------*/

   /* fp is %ebp.  sp is %esp.  ip is %eip. */

   ips[0] = ip;
   i = 1;

   /* Loop unwinding the stack. Note that the IP value we get on
    * each pass (whether from CFI info or a stack frame) is a
    * return address so is actually after the calling instruction
    * in the calling function.
    *
    * Because of this we subtract one from the IP after each pass
    * of the loop so that we find the right CFI block on the next
    * pass - otherwise we can find the wrong CFI info if it happens
    * to change after the calling instruction and that will mean
    * that we will fail to unwind the next step.
    *
    * This most frequently happens at the end of a function when
    * a tail call occurs and we wind up using the CFI info for the
    * next function which is completely wrong.
    */
   while (True) {

      if (i >= n_ips)
         break;

      /* Try to derive a new (ip,sp,fp) triple from the current
         set. */

      /* On x86, first try the old-fashioned method of following the
         %ebp-chain.  Code which doesn't use this (that is, compiled
         with -fomit-frame-pointer) is not ABI compliant and so
         relatively rare.  Besides, trying the CFI first almost always
         fails, and is expensive. */
      /* Deal with frames resulting from functions which begin "pushl%
         ebp ; movl %esp, %ebp" which is the ABI-mandated preamble. */
      if (fp_min <= fp && fp <= fp_max) {
         /* fp looks sane, so use it. */
         ip = (((UWord*)fp)[1]);
         sp = fp + sizeof(Addr) /*saved %ebp*/ 
                 + sizeof(Addr) /*ra*/;
         fp = (((UWord*)fp)[0]);
         ips[i++] = ip;
         if (debug)
            VG_(printf)("     ipsF[%d]=%08p\n", i-1, ips[i-1]);
         ip = ip - 1;
         continue;
      }

      /* That didn't work out, so see if there is any CFI info to hand
         which can be used. */
      if ( VG_(use_CF_info)( &ip, &sp, &fp, fp_min, fp_max ) ) {
         ips[i++] = ip;
         if (debug)
            VG_(printf)("     ipsC[%d]=%08p\n", i-1, ips[i-1]);
         ip = ip - 1;
         continue;
      }

      /* No luck.  We have to give up. */
      break;
   }

#  elif defined(VGP_amd64_linux)

   /*--------------------- amd64 ---------------------*/

   /* fp is %rbp.  sp is %rsp.  ip is %rip. */

   ips[0] = ip;
   i = 1;

   /* Loop unwinding the stack. Note that the IP value we get on
    * each pass (whether from CFI info or a stack frame) is a
    * return address so is actually after the calling instruction
    * in the calling function.
    *
    * Because of this we subtract one from the IP after each pass
    * of the loop so that we find the right CFI block on the next
    * pass - otherwise we can find the wrong CFI info if it happens
    * to change after the calling instruction and that will mean
    * that we will fail to unwind the next step.
    *
    * This most frequently happens at the end of a function when
    * a tail call occurs and we wind up using the CFI info for the
    * next function which is completely wrong.
    */
   while (True) {

      if (i >= n_ips)
         break;

      /* Try to derive a new (ip,sp,fp) triple from the current
         set. */

      /* First off, see if there is any CFI info to hand which can
         be used. */
      if ( VG_(use_CF_info)( &ip, &sp, &fp, fp_min, fp_max ) ) {
         ips[i++] = ip;
         if (debug)
            VG_(printf)("     ipsC[%d]=%08p\n", i-1, ips[i-1]);
         ip = ip - 1;
         continue;
      }

      /* If VG_(use_CF_info) fails, it won't modify ip/sp/fp, so
         we can safely try the old-fashioned method. */
      /* This bit is supposed to deal with frames resulting from
         functions which begin "pushq %rbp ; movq %rsp, %rbp".
         Unfortunately, since we can't (easily) look at the insns at
         the start of the fn, like GDB does, there's no reliable way
         to tell.  Hence the hack of first trying out CFI, and if that
         fails, then use this as a fallback. */
      if (fp_min <= fp && fp <= fp_max) {
         /* fp looks sane, so use it. */
         ip = (((UWord*)fp)[1]);
         sp = fp + sizeof(Addr) /*saved %rbp*/ 
                 + sizeof(Addr) /*ra*/;
         fp = (((UWord*)fp)[0]);
         ips[i++] = ip;
         if (debug)
            VG_(printf)("     ipsF[%d]=%08p\n", i-1, ips[i-1]);
         ip = ip - 1;
         continue;
      }

      /* No luck there.  We have to give up. */
      break;
   }

#  elif defined(VGP_ppc32_linux) || defined(VGP_ppc64_linux)

   /*--------------------- ppc32/64 ---------------------*/

   /* fp is %r1.  ip is %cia.  Note, ppc uses r1 as both the stack and
      frame pointers. */

#  if defined(VGP_ppc64_linux)
   /* Deal with bogus LR values caused by function
      interception/wrapping; see comment on similar code a few lines
      further down. */
   if (lr == (Addr)&VG_(ppc64_linux_magic_redirect_return_stub)
       && VG_(is_valid_tid)(tid_if_known)) {
      Long hsp = VG_(threads)[tid_if_known].arch.vex.guest_REDIR_SP;
      if (hsp >= 1 && hsp < VEX_GUEST_PPC64_REDIR_STACK_SIZE)
         lr = VG_(threads)[tid_if_known]
                 .arch.vex.guest_REDIR_STACK[hsp-1];
   }
#  endif

   lr_is_first_RA = False;
   {
#     define M_VG_ERRTXT 1000
      UChar buf_lr[M_VG_ERRTXT], buf_ip[M_VG_ERRTXT];
      if (VG_(get_fnname_nodemangle) (lr, buf_lr, M_VG_ERRTXT))
         if (VG_(get_fnname_nodemangle) (ip, buf_ip, M_VG_ERRTXT))
            if (VG_(strncmp)(buf_lr, buf_ip, M_VG_ERRTXT))
               lr_is_first_RA = True;
#     undef M_VG_ERRTXT
   }

   ips[0] = ip;
   i = 1;

   if (fp_min <= fp && fp < fp_max-VG_WORDSIZE+1) {

      /* initial FP is sane; keep going */
      fp = (((UWord*)fp)[0]);

      while (True) {

        /* on ppc64-linux (ppc64-elf, really), the lr save slot is 2
           words back from sp, whereas on ppc32-elf(?) it's only one
           word back. */
#        if defined(VGP_ppc64_linux)
         const Int lr_offset = 2;
#        else
         const Int lr_offset = 1;
#        endif

         if (i >= n_ips)
            break;

         /* Try to derive a new (ip,fp) pair from the current set. */

         if (fp_min <= fp && fp <= fp_max) {
            /* fp looks sane, so use it. */

            if (i == 1 && lr_is_first_RA)
               ip = lr;
            else
               ip = (((UWord*)fp)[lr_offset]);

#           if defined(VGP_ppc64_linux)
            /* Nasty hack to do with function replacement/wrapping on
               ppc64-linux.  If LR points to our magic return stub,
               then we are in a wrapped or intercepted function, in
               which LR has been messed with.  The original LR will
               have been pushed onto the thread's hidden REDIR stack
               one down from the top (top element is the saved R2) and
               so we should restore the value from there instead. */
            if (i == 1 
                && ip == (Addr)&VG_(ppc64_linux_magic_redirect_return_stub)
                && VG_(is_valid_tid)(tid_if_known)) {
               Long hsp = VG_(threads)[tid_if_known].arch.vex.guest_REDIR_SP;
               if (hsp >= 1 && hsp < VEX_GUEST_PPC64_REDIR_STACK_SIZE)
                  ip = VG_(threads)[tid_if_known]
                          .arch.vex.guest_REDIR_STACK[hsp-1];
            }
#           endif

            fp = (((UWord*)fp)[0]);
            ips[i++] = ip;
            if (debug)
               VG_(printf)("     ipsF[%d]=%08p\n", i-1, ips[i-1]);
            continue;
         }

         /* No luck there.  We have to give up. */
         break;
      }
   }

#  else
#    error "Unknown platform"
#  endif

   n_found = i;
   return n_found;
}
Esempio n. 28
0
/* Top-level entry point to the error management subsystem.  All
   detected errors are notified here; this routine decides if/when the
   user should see the error. */
static void VG_(maybe_add_context) ( ErrContext* ec )
{
   ErrContext* p;
   ErrContext* p_prev;
   Bool        cheap_addr_cmp         = False;
   static Bool is_first_shown_context = True;
   static Bool stopping_message       = False;
   static Bool slowdown_message       = False;
   static Int  vg_n_errs_shown        = 0;

   vg_assert(ec->tid >= 0 && ec->tid < VG_N_THREADS);

   /* After M_VG_COLLECT_NO_ERRORS_AFTER_SHOWN different errors have
      been found, or M_VG_COLLECT_NO_ERRORS_AFTER_FOUND total errors
      have been found, just refuse to collect any more.  This stops
      the burden of the error-management system becoming excessive in
      extremely buggy programs, although it does make it pretty
      pointless to continue the Valgrind run after this point. */
   if (VG_(clo_error_limit) 
       && (vg_n_errs_shown >= M_VG_COLLECT_NO_ERRORS_AFTER_SHOWN
           || vg_n_errs_found >= M_VG_COLLECT_NO_ERRORS_AFTER_FOUND)) {
      if (!stopping_message) {
         VG_(message)(Vg_UserMsg, "");

	 if (vg_n_errs_shown >= M_VG_COLLECT_NO_ERRORS_AFTER_SHOWN) {
            VG_(message)(Vg_UserMsg, 
               "More than %d different errors detected.  "
               "I'm not reporting any more.",
               M_VG_COLLECT_NO_ERRORS_AFTER_SHOWN );
         } else {
            VG_(message)(Vg_UserMsg, 
               "More than %d total errors detected.  "
               "I'm not reporting any more.",
               M_VG_COLLECT_NO_ERRORS_AFTER_FOUND );
	 }

         VG_(message)(Vg_UserMsg, 
            "Final error counts will be inaccurate.  Go fix your program!");
         VG_(message)(Vg_UserMsg, 
            "Rerun with --error-limit=no to disable this cutoff.  Note");
         VG_(message)(Vg_UserMsg, 
            "that your program may now segfault without prior warning from");
         VG_(message)(Vg_UserMsg, 
            "Valgrind, because errors are no longer being displayed.");
         VG_(message)(Vg_UserMsg, "");
         stopping_message = True;
         vg_ignore_errors = True;
      }
      return;
   }

   /* After M_VG_COLLECT_ERRORS_SLOWLY_AFTER different errors have
      been found, be much more conservative about collecting new
      ones. */
   if (vg_n_errs_shown >= M_VG_COLLECT_ERRORS_SLOWLY_AFTER) {
      cheap_addr_cmp = True;
      if (!slowdown_message) {
         VG_(message)(Vg_UserMsg, "");
         VG_(message)(Vg_UserMsg, 
            "More than %d errors detected.  Subsequent errors",
            M_VG_COLLECT_ERRORS_SLOWLY_AFTER);
         VG_(message)(Vg_UserMsg, 
            "will still be recorded, but in less detail than before.");
         slowdown_message = True;
      }
   }


   /* First, see if we've got an error record matching this one. */
   p      = vg_err_contexts;
   p_prev = NULL;
   while (p != NULL) {
      if (eq_ErrContext(cheap_addr_cmp, p, ec)) {
         /* Found it. */
         p->count++;
	 if (p->supp != NULL) {
            /* Deal correctly with suppressed errors. */
            p->supp->count++;
            vg_n_errs_suppressed++;	 
         } else {
            vg_n_errs_found++;
         }

         /* Move p to the front of the list so that future searches
            for it are faster. */
         if (p_prev != NULL) {
            vg_assert(p_prev->next == p);
            p_prev->next    = p->next;
            p->next         = vg_err_contexts;
            vg_err_contexts = p;
	 }
         return;
      }
      p_prev = p;
      p      = p->next;
   }

   /* Didn't see it.  Copy and add. */

   /* OK, we're really going to collect it.  First, describe any addr
      info in the error. */
   if (ec->addrinfo.akind == Undescribed)
      VG_(describe_addr) ( ec->addr, &ec->addrinfo );

   p = VG_(malloc)(VG_AR_ERRCTXT, sizeof(ErrContext));
   *p = *ec;
   p->next = vg_err_contexts;
   p->supp = is_suppressible_error(ec);
   vg_err_contexts = p;
   if (p->supp == NULL) {
      vg_n_errs_found++;
      if (!is_first_shown_context)
         VG_(message)(Vg_UserMsg, "");
      pp_ErrContext(p, False);      
      is_first_shown_context = False;
      vg_n_errs_shown++;
      /* Perhaps we want a GDB attach at this point? */
      if (vg_is_GDB_attach_requested()) {
         VG_(swizzle_esp_then_start_GDB)(
            ec->m_eip, ec->m_esp, ec->m_ebp);
      }
   } else {
      vg_n_errs_suppressed++;
      p->supp->count++;
   }
}
/* Create a signal frame for thread 'tid'.  Make a 3-arg frame
   regardless of whether the client originally requested a 1-arg
   version (no SA_SIGINFO) or a 3-arg one (SA_SIGINFO) since in the
   former case, the amd64 calling conventions will simply cause the
   extra 2 args to be ignored (inside the handler).  (We hope!) */
void VG_(sigframe_create) ( ThreadId tid,
                            Addr sp_top_of_frame,
                            const vki_siginfo_t *siginfo,
                            const struct vki_ucontext *siguc,
                            void *handler,
                            UInt flags,
                            const vki_sigset_t *mask,
                            void *restorer )
{
   ThreadState* tst;
   Addr rsp;
   struct hacky_sigframe* frame;
   Int sigNo = siginfo->si_signo;

   vg_assert(VG_IS_16_ALIGNED(sizeof(struct hacky_sigframe)));

   sp_top_of_frame &= ~0xfUL;
   rsp = sp_top_of_frame - sizeof(struct hacky_sigframe);
   rsp -= 8; /* ELF ABI says that rsp+8 must be 16 aligned on
                entry to a function. */

   tst = VG_(get_ThreadState)(tid);
   if (! ML_(sf_extend_stack)(tst, rsp, sp_top_of_frame - rsp))
      return;

   vg_assert(VG_IS_16_ALIGNED(rsp+8));

   frame = (struct hacky_sigframe *) rsp;

   /* clear it (very conservatively) (why so conservatively??) */
   VG_(memset)(&frame->lower_guardzone, 0, sizeof frame->lower_guardzone);
   VG_(memset)(&frame->gst,      0, sizeof(VexGuestAMD64State));
   VG_(memset)(&frame->gshadow1, 0, sizeof(VexGuestAMD64State));
   VG_(memset)(&frame->gshadow2, 0, sizeof(VexGuestAMD64State));
   VG_(memset)(&frame->fake_siginfo,  0, sizeof(frame->fake_siginfo));
   VG_(memset)(&frame->fake_ucontext, 0, sizeof(frame->fake_ucontext));

   /* save stuff in frame */
   frame->gst           = tst->arch.vex;
   frame->gshadow1      = tst->arch.vex_shadow1;
   frame->gshadow2      = tst->arch.vex_shadow2;
   frame->sigNo_private = sigNo;
   frame->mask          = tst->sig_mask;
   frame->magicPI       = 0x31415927;

   /* Minimally fill in the siginfo and ucontext.  Note, utter
      lameness prevails.  Be underwhelmed, be very underwhelmed. */
   frame->fake_siginfo.si_signo = sigNo;
   frame->fake_siginfo.si_code  = siginfo->si_code;

   /* Set up stack pointer */
   vg_assert(rsp == (Addr)&frame->returnAddr);
   VG_(set_SP)(tid, rsp);
   VG_TRACK( post_reg_write, Vg_CoreSignal, tid, VG_O_STACK_PTR, sizeof(ULong));

   /* Set up program counter */
   VG_(set_IP)(tid, (ULong)handler);
   VG_TRACK( post_reg_write, Vg_CoreSignal, tid, VG_O_INSTR_PTR, sizeof(ULong));

   /* Set up RA and args for the frame */
   VG_TRACK( pre_mem_write, Vg_CoreSignal, tid, "signal handler frame",
             (Addr)frame, 1*sizeof(ULong) );
   frame->returnAddr  = (ULong)&VG_(amd64_darwin_SUBST_FOR_sigreturn);

   /* XXX should tell the tool that these regs got written */
   tst->arch.vex.guest_RDI = (ULong) sigNo;
   tst->arch.vex.guest_RSI = (Addr)  &frame->fake_siginfo;/* oh well */
   tst->arch.vex.guest_RDX = (Addr)  &frame->fake_ucontext; /* oh well */

   VG_TRACK( post_mem_write, Vg_CoreSignal, tid,
             (Addr)frame, 1*sizeof(ULong) );
   VG_TRACK( post_mem_write, Vg_CoreSignal, tid,
             (Addr)&frame->fake_siginfo, sizeof(frame->fake_siginfo));
   VG_TRACK( post_mem_write, Vg_CoreSignal, tid,
             (Addr)&frame->fake_ucontext, sizeof(frame->fake_ucontext));

   if (VG_(clo_trace_signals))
      VG_(message)(Vg_DebugMsg,
                   "sigframe_create (thread %d): "
                   "next EIP=%#lx, next ESP=%#lx\n",
                   tid, (Addr)handler, (Addr)frame );
}
Esempio n. 30
0
/* Used for printing leak errors, avoids exposing the LossRecord type (which
   comes in as void*, requiring a cast. */
void MAC_(pp_LeakError)(void* vextra)
{
   HChar* xpre  = VG_(clo_xml) ? "  <what>" : "";
   HChar* xpost = VG_(clo_xml) ? "</what>"  : "";

   LeakExtra* extra = (LeakExtra*)vextra;
   LossRecord* l    = extra->lossRecord;
   const Char *loss = str_lossmode(l->loss_mode);

   if (VG_(clo_xml)) {
      VG_(message)(Vg_UserMsg, "  <kind>%t</kind>", xml_kind(l->loss_mode));
   } else {
      VG_(message)(Vg_UserMsg, "");
   }

   if (l->indirect_bytes) {
      VG_(message)(Vg_UserMsg, 
         "%s%d (%d direct, %d indirect) bytes in %d blocks"
         " are %s in loss record %d of %d%s",
         xpre,
         l->total_bytes + l->indirect_bytes, 
         l->total_bytes, l->indirect_bytes, l->num_blocks,
         loss, extra->n_this_record, extra->n_total_records,
         xpost
      );
      if (VG_(clo_xml)) {
         VG_(message)(Vg_UserMsg, "  <leakedbytes>%d</leakedbytes>", 
                                  l->total_bytes + l->indirect_bytes);
         VG_(message)(Vg_UserMsg, "  <leakedblocks>%d</leakedblocks>", 
                                  l->num_blocks);
      }
   } else {
      VG_(message)(
         Vg_UserMsg, 
         "%s%d bytes in %d blocks are %s in loss record %d of %d%s",
         xpre,
         l->total_bytes, l->num_blocks,
         loss, extra->n_this_record, extra->n_total_records,
         xpost
      );
      if (VG_(clo_xml)) {
         VG_(message)(Vg_UserMsg, "  <leakedbytes>%d</leakedbytes>", 
                                  l->total_bytes);
         VG_(message)(Vg_UserMsg, "  <leakedblocks>%d</leakedblocks>", 
                                  l->num_blocks);
      }
   }
   VG_(pp_ExeContext)(l->allocated_at);
}