Beispiel #1
0
// Process the mark stack until empty.
static void lc_process_markstack(Int clique)
{
   Int  top = -1;    // shut gcc up
   Bool is_prior_definite;

   while (lc_pop(&top)) {
      tl_assert(top >= 0 && top < lc_n_chunks);      

      // See comment about 'is_prior_definite' at the top to understand this.
      is_prior_definite = ( Possible != lc_extras[top].state );

      lc_scan_memory(lc_chunks[top]->data, lc_chunks[top]->szB,
                     is_prior_definite, clique);
   }
}
Beispiel #2
0
/* Top level entry point to leak detector.  Call here, passing in
   suitable address-validating functions (see comment at top of
   scan_all_valid_memory above).  These functions used to encapsulate the
   differences between Memcheck and Addrcheck;  they no longer do but it
   doesn't hurt to keep them here.
*/
void MC_(do_detect_memory_leaks) (
   ThreadId tid, LeakCheckMode mode,
   Bool (*is_within_valid_secondary) ( Addr ),
   Bool (*is_valid_aligned_word)     ( Addr )
)
{
   Int i;
   
   tl_assert(mode != LC_Off);

   lc_shadows = find_active_shadows(&lc_n_shadows);

   /* Sort the array. */
   VG_(ssort)((void*)lc_shadows, lc_n_shadows, sizeof(VgHashNode*), lc_compar);

   /* Sanity check; assert that the blocks are now in order */
   for (i = 0; i < lc_n_shadows-1; i++) {
      tl_assert( lc_shadows[i]->data <= lc_shadows[i+1]->data);
   }

   /* Sanity check -- make sure they don't overlap.  But do allow
      exact duplicates.  If this assertion fails, it may mean that the
      application has done something stupid with
      VALGRIND_MALLOCLIKE_BLOCK client requests, specifically, has
      made overlapping requests (which are nonsensical).  Another way
      to screw up is to use VALGRIND_MALLOCLIKE_BLOCK for stack
      locations; again nonsensical. */
   for (i = 0; i < lc_n_shadows-1; i++) {
      Bool nonsense_overlap = ! (
            /* normal case - no overlap */
            (lc_shadows[i]->data + lc_shadows[i]->szB <= lc_shadows[i+1]->data)
         ||
            /* degenerate case: exact duplicates */
              (lc_shadows[i]->data == lc_shadows[i+1]->data
            && lc_shadows[i]->szB == lc_shadows[i+1]->szB)
         );
      if (nonsense_overlap) {
         VG_(message)(Vg_UserMsg, "Block [0x%lx, 0x%lx) overlaps with block [0x%lx, 0x%lx)",
                      lc_shadows[   i]->data, (lc_shadows[   i]->data + lc_shadows[   i]->szB),
                      lc_shadows[1+ i]->data, (lc_shadows[1+ i]->data + lc_shadows[1+ i]->szB) );
      }
      tl_assert (!nonsense_overlap);
   }

   if (lc_n_shadows == 0) {
      tl_assert(lc_shadows == NULL);
      if (VG_(clo_verbosity) >= 1 && !VG_(clo_xml)) {
         VG_(message)(Vg_UserMsg, 
                      "All heap blocks were freed -- no leaks are possible.");
      }
      return;
   }

   if (VG_(clo_verbosity) > 0 && !VG_(clo_xml))
      VG_(message)(Vg_UserMsg, 
                   "searching for pointers to %'d not-freed blocks.",
                   lc_n_shadows );

   lc_min_mallocd_addr = lc_shadows[0]->data;
   lc_max_mallocd_addr = lc_shadows[lc_n_shadows-1]->data
                         + lc_shadows[lc_n_shadows-1]->szB;

   lc_markstack = VG_(malloc)( "mc.ddml.1",
                               lc_n_shadows * sizeof(*lc_markstack) );
   for (i = 0; i < lc_n_shadows; i++) {
      lc_markstack[i].next = -1;
      lc_markstack[i].state = Unreached;
      lc_markstack[i].indirect = 0;
   }
   lc_markstack_top = -1;

   lc_is_within_valid_secondary = is_within_valid_secondary;
   lc_is_valid_aligned_word     = is_valid_aligned_word;

   lc_scanned = 0;

   /* Push roots onto the mark stack.  Roots are:
      - the integer registers of all threads
      - all mappings belonging to the client, including stacks
      - .. but excluding any client heap segments.
      Client heap segments are excluded because we wish to differentiate
      client heap blocks which are referenced only from inside the heap
      from those outside.  This facilitates the indirect vs direct loss
      categorisation, which [if the users ever manage to understand it]
      is really useful for detecting lost cycles.
   */
   { Addr*     seg_starts;
     Int       n_seg_starts;
     seg_starts = get_seg_starts( &n_seg_starts );
     tl_assert(seg_starts && n_seg_starts > 0);
     /* VG_(am_show_nsegments)( 0,"leakcheck"); */
     for (i = 0; i < n_seg_starts; i++) {
        NSegment const* seg = VG_(am_find_nsegment)( seg_starts[i] );
        tl_assert(seg);
        if (seg->kind != SkFileC && seg->kind != SkAnonC) 
           continue;
        if (!(seg->hasR && seg->hasW))
           continue;
        if (seg->isCH)
           continue;

        /* Don't poke around in device segments as this may cause
           hangs.  Exclude /dev/zero just in case someone allocated
           memory by explicitly mapping /dev/zero. */
        if (seg->kind == SkFileC 
            && (VKI_S_ISCHR(seg->mode) || VKI_S_ISBLK(seg->mode))) {
           HChar* dev_name = VG_(am_get_filename)( (NSegment*)seg );
           if (dev_name && 0 == VG_(strcmp)(dev_name, "/dev/zero")) {
              /* don't skip /dev/zero */
           } else {
              /* skip this device mapping */
              continue;
           }
        }

        if (0)
           VG_(printf)("ACCEPT %2d  %#lx %#lx\n", i, seg->start, seg->end);
        lc_scan_memory(seg->start, seg->end+1 - seg->start);
     }
   }

   /* Push registers onto mark stack */
   VG_(apply_to_GP_regs)(lc_markstack_push);

   /* Keep walking the heap until everything is found */
   lc_do_leakcheck(-1);

   if (VG_(clo_verbosity) > 0 && !VG_(clo_xml))
      VG_(message)(Vg_UserMsg, "checked %'lu bytes.", lc_scanned);

   blocks_leaked     = MC_(bytes_leaked)     = 0;
   blocks_indirect   = MC_(bytes_indirect)   = 0;
   blocks_dubious    = MC_(bytes_dubious)    = 0;
   blocks_reachable  = MC_(bytes_reachable)  = 0;
   blocks_suppressed = MC_(bytes_suppressed) = 0;

   if (mode == LC_Full)
      full_report(tid);
   else
      make_summary();

   if (VG_(clo_verbosity) > 0 && !VG_(clo_xml)) {
      VG_(message)(Vg_UserMsg, "");
      VG_(message)(Vg_UserMsg, "LEAK SUMMARY:");
      VG_(message)(Vg_UserMsg, "   definitely lost: %'lu bytes in %'lu blocks.",
                               MC_(bytes_leaked), blocks_leaked );
      if (blocks_indirect > 0)
	 VG_(message)(Vg_UserMsg, "   indirectly lost: %'lu bytes in %'lu blocks.",
		      MC_(bytes_indirect), blocks_indirect );
      VG_(message)(Vg_UserMsg, "     possibly lost: %'lu bytes in %'lu blocks.",
                               MC_(bytes_dubious), blocks_dubious );
      VG_(message)(Vg_UserMsg, "   still reachable: %'lu bytes in %'lu blocks.",
                               MC_(bytes_reachable), blocks_reachable );
      VG_(message)(Vg_UserMsg, "        suppressed: %'lu bytes in %'lu blocks.",
                               MC_(bytes_suppressed), blocks_suppressed );
      if (mode == LC_Summary 
          && (blocks_leaked + blocks_indirect 
              + blocks_dubious + blocks_reachable) > 0) {
         VG_(message)(Vg_UserMsg,
                      "Rerun with --leak-check=full to see details of leaked memory.");
      }
      if (blocks_reachable > 0 && !MC_(clo_show_reachable) && mode == LC_Full) {
         VG_(message)(Vg_UserMsg, 
           "Reachable blocks (those to which a pointer was found) are not shown.");
         VG_(message)(Vg_UserMsg, 
            "To see them, rerun with: --leak-check=full --show-reachable=yes");
      }
   }

   VG_(free) ( lc_shadows );
   VG_(free) ( lc_markstack );
}
Beispiel #3
0
void MC_(detect_memory_leaks) ( ThreadId tid, LeakCheckMode mode )
{
   Int i;
   
   tl_assert(mode != LC_Off);

   // Get the chunks, stop if there were none.
   lc_chunks = find_active_chunks(&lc_n_chunks);
   if (lc_n_chunks == 0) {
      tl_assert(lc_chunks == NULL);
      if (VG_(clo_verbosity) >= 1 && !VG_(clo_xml)) {
         VG_(UMSG)("All heap blocks were freed -- no leaks are possible.\n");
      }
      return;
   }

   // Sort the array so blocks are in ascending order in memory.
   VG_(ssort)(lc_chunks, lc_n_chunks, sizeof(VgHashNode*), compare_MC_Chunks);

   // Sanity check -- make sure they're in order.
   for (i = 0; i < lc_n_chunks-1; i++) {
      tl_assert( lc_chunks[i]->data <= lc_chunks[i+1]->data);
   }

   // Sanity check -- make sure they don't overlap.  But do allow exact
   // duplicates.  If this assertion fails, it may mean that the application
   // has done something stupid with VALGRIND_MALLOCLIKE_BLOCK client
   // requests, specifically, has made overlapping requests (which are
   // nonsensical).  Another way to screw up is to use
   // VALGRIND_MALLOCLIKE_BLOCK for stack locations; again nonsensical.
   for (i = 0; i < lc_n_chunks-1; i++) {
      MC_Chunk* ch1 = lc_chunks[i];
      MC_Chunk* ch2 = lc_chunks[i+1];
      Bool nonsense_overlap = ! (
            // Normal case - no overlap.
            (ch1->data + ch1->szB <= ch2->data) ||
            // Degenerate case: exact duplicates.
            (ch1->data == ch2->data && ch1->szB  == ch2->szB)
         );
      if (nonsense_overlap) {
         VG_(UMSG)("Block [0x%lx, 0x%lx) overlaps with block [0x%lx, 0x%lx)\n",
                   ch1->data, (ch1->data + ch1->szB),
                   ch2->data, (ch2->data + ch2->szB));
      }
      tl_assert (!nonsense_overlap);
   }

   // Initialise lc_extras.
   lc_extras = VG_(malloc)( "mc.dml.2", lc_n_chunks * sizeof(LC_Extra) );
   for (i = 0; i < lc_n_chunks; i++) {
      lc_extras[i].state        = Unreached;
      lc_extras[i].indirect_szB = 0;
   }

   // Initialise lc_markstack.
   lc_markstack = VG_(malloc)( "mc.dml.2", lc_n_chunks * sizeof(Int) );
   for (i = 0; i < lc_n_chunks; i++) {
      lc_markstack[i] = -1;
   }
   lc_markstack_top = -1;

   // Verbosity.
   if (VG_(clo_verbosity) > 0 && !VG_(clo_xml))
      VG_(UMSG)( "searching for pointers to %'d not-freed blocks.\n",
                 lc_n_chunks );

   // Scan the memory root-set, pushing onto the mark stack any blocks
   // pointed to.
   {
      Int   n_seg_starts;
      Addr* seg_starts = VG_(get_segment_starts)( &n_seg_starts );

      tl_assert(seg_starts && n_seg_starts > 0);

      lc_scanned_szB = 0;

      // VG_(am_show_nsegments)( 0, "leakcheck");
      for (i = 0; i < n_seg_starts; i++) {
         SizeT seg_size;
         NSegment const* seg = VG_(am_find_nsegment)( seg_starts[i] );
         tl_assert(seg);

         if (seg->kind != SkFileC && seg->kind != SkAnonC) continue;
         if (!(seg->hasR && seg->hasW))                    continue;
         if (seg->isCH)                                    continue;

         // Don't poke around in device segments as this may cause
         // hangs.  Exclude /dev/zero just in case someone allocated
         // memory by explicitly mapping /dev/zero.
         if (seg->kind == SkFileC 
             && (VKI_S_ISCHR(seg->mode) || VKI_S_ISBLK(seg->mode))) {
            HChar* dev_name = VG_(am_get_filename)( (NSegment*)seg );
            if (dev_name && 0 == VG_(strcmp)(dev_name, "/dev/zero")) {
               // Don't skip /dev/zero.
            } else {
               // Skip this device mapping.
               continue;
            }
         }

         if (0)
            VG_(printf)("ACCEPT %2d  %#lx %#lx\n", i, seg->start, seg->end);

         // Scan the segment.  We use -1 for the clique number, because this
         // is a root-set.
         seg_size = seg->end - seg->start + 1;
         if (VG_(clo_verbosity) > 2) {
            VG_(message)(Vg_DebugMsg,
                         "  Scanning root segment: %#lx..%#lx (%lu)\n",
                         seg->start, seg->end, seg_size);
         }
         lc_scan_memory(seg->start, seg_size, /*is_prior_definite*/True, -1);
      }
   }

   // Scan GP registers for chunk pointers.
   VG_(apply_to_GP_regs)(lc_push_if_a_chunk_ptr_register);

   // Process the pushed blocks.  After this, every block that is reachable
   // from the root-set has been traced.
   lc_process_markstack(/*clique*/-1);

   if (VG_(clo_verbosity) > 0 && !VG_(clo_xml))
      VG_(UMSG)("checked %'lu bytes.\n", lc_scanned_szB);

   // Trace all the leaked blocks to determine which are directly leaked and
   // which are indirectly leaked.  For each Unreached block, push it onto
   // the mark stack, and find all the as-yet-Unreached blocks reachable
   // from it.  These form a clique and are marked IndirectLeak, and their
   // size is added to the clique leader's indirect size.  If one of the
   // found blocks was itself a clique leader (from a previous clique), then
   // the cliques are merged.
   for (i = 0; i < lc_n_chunks; i++) {
      MC_Chunk* ch = lc_chunks[i];
      LC_Extra* ex = &(lc_extras[i]);

      if (VG_DEBUG_CLIQUE)
         VG_(printf)("cliques: %d at %#lx -> Loss state %d\n",
                     i, ch->data, ex->state);

      tl_assert(lc_markstack_top == -1);

      if (ex->state == Unreached) {
         if (VG_DEBUG_CLIQUE)
            VG_(printf)("%d: gathering clique %#lx\n", i, ch->data);
         
         // Push this Unreached block onto the stack and process it.
         lc_push(i, ch);
         lc_process_markstack(i);

         tl_assert(lc_markstack_top == -1);
         tl_assert(ex->state == Unreached);
      }
   }
      
   print_results( tid, ( mode == LC_Full ? True : False ) );

   VG_(free) ( lc_chunks );
   VG_(free) ( lc_extras );
   VG_(free) ( lc_markstack );
}
Beispiel #4
0
void MC_(detect_memory_leaks) ( ThreadId tid, LeakCheckMode mode )
{
   Int i, j;
   
   tl_assert(mode != LC_Off);

   // Get the chunks, stop if there were none.
   lc_chunks = find_active_chunks(&lc_n_chunks);
   if (lc_n_chunks == 0) {
      tl_assert(lc_chunks == NULL);
      if (VG_(clo_verbosity) >= 1 && !VG_(clo_xml)) {
         VG_(umsg)("All heap blocks were freed -- no leaks are possible\n");
         VG_(umsg)("\n");
      }
      return;
   }

   // Sort the array so blocks are in ascending order in memory.
   VG_(ssort)(lc_chunks, lc_n_chunks, sizeof(VgHashNode*), compare_MC_Chunks);

   // Sanity check -- make sure they're in order.
   for (i = 0; i < lc_n_chunks-1; i++) {
      tl_assert( lc_chunks[i]->data <= lc_chunks[i+1]->data);
   }

   // Sanity check -- make sure they don't overlap.  The one exception is that
   // we allow a MALLOCLIKE block to sit entirely within a malloc() block.
   // This is for bug 100628.  If this occurs, we ignore the malloc() block
   // for leak-checking purposes.  This is a hack and probably should be done
   // better, but at least it's consistent with mempools (which are treated
   // like this in find_active_chunks).  Mempools have a separate VgHashTable
   // for mempool chunks, but if custom-allocated blocks are put in a separate
   // table from normal heap blocks it makes free-mismatch checking more
   // difficult.
   //
   // If this check fails, it probably means that the application
   // has done something stupid with VALGRIND_MALLOCLIKE_BLOCK client
   // requests, eg. has made overlapping requests (which are
   // nonsensical), or used VALGRIND_MALLOCLIKE_BLOCK for stack locations;
   // again nonsensical.
   //
   for (i = 0; i < lc_n_chunks-1; i++) {
      MC_Chunk* ch1 = lc_chunks[i];
      MC_Chunk* ch2 = lc_chunks[i+1];

      Addr start1    = ch1->data;
      Addr start2    = ch2->data;
      Addr end1      = ch1->data + ch1->szB - 1;
      Addr end2      = ch2->data + ch2->szB - 1;
      Bool isCustom1 = ch1->allockind == MC_AllocCustom;
      Bool isCustom2 = ch2->allockind == MC_AllocCustom;

      if (end1 < start2) {
         // Normal case - no overlap.

      // We used to allow exact duplicates, I'm not sure why.  --njn
      //} else if (start1 == start2 && end1 == end2) {
         // Degenerate case: exact duplicates.

      } else if (start1 >= start2 && end1 <= end2 && isCustom1 && !isCustom2) {
         // Block i is MALLOCLIKE and entirely within block i+1.
         // Remove block i+1.
         for (j = i+1; j < lc_n_chunks-1; j++) {
            lc_chunks[j] = lc_chunks[j+1];
         }
         lc_n_chunks--;

      } else if (start2 >= start1 && end2 <= end1 && isCustom2 && !isCustom1) {
         // Block i+1 is MALLOCLIKE and entirely within block i.
         // Remove block i.
         for (j = i; j < lc_n_chunks-1; j++) {
            lc_chunks[j] = lc_chunks[j+1];
         }
         lc_n_chunks--;

      } else {
         VG_(umsg)("Block 0x%lx..0x%lx overlaps with block 0x%lx..0x%lx",
                   start1, end1, start1, end2);
         VG_(umsg)("This is usually caused by using VALGRIND_MALLOCLIKE_BLOCK");
         VG_(umsg)("in an inappropriate way.");
         tl_assert (0);
      }
   }

   // Initialise lc_extras.
   lc_extras = VG_(malloc)( "mc.dml.2", lc_n_chunks * sizeof(LC_Extra) );
   for (i = 0; i < lc_n_chunks; i++) {
      lc_extras[i].state        = Unreached;
      lc_extras[i].indirect_szB = 0;
   }

   // Initialise lc_markstack.
   lc_markstack = VG_(malloc)( "mc.dml.2", lc_n_chunks * sizeof(Int) );
   for (i = 0; i < lc_n_chunks; i++) {
      lc_markstack[i] = -1;
   }
   lc_markstack_top = -1;

   // Verbosity.
   if (VG_(clo_verbosity) > 1 && !VG_(clo_xml)) {
      VG_(umsg)( "Searching for pointers to %'d not-freed blocks\n",
                 lc_n_chunks );
   }

   // Scan the memory root-set, pushing onto the mark stack any blocks
   // pointed to.
   {
      Int   n_seg_starts;
      Addr* seg_starts = VG_(get_segment_starts)( &n_seg_starts );

      tl_assert(seg_starts && n_seg_starts > 0);

      lc_scanned_szB = 0;

      // VG_(am_show_nsegments)( 0, "leakcheck");
      for (i = 0; i < n_seg_starts; i++) {
         SizeT seg_size;
         NSegment const* seg = VG_(am_find_nsegment)( seg_starts[i] );
         tl_assert(seg);

         if (seg->kind != SkFileC && seg->kind != SkAnonC) continue;
         if (!(seg->hasR && seg->hasW))                    continue;
         if (seg->isCH)                                    continue;

         // Don't poke around in device segments as this may cause
         // hangs.  Exclude /dev/zero just in case someone allocated
         // memory by explicitly mapping /dev/zero.
         if (seg->kind == SkFileC 
             && (VKI_S_ISCHR(seg->mode) || VKI_S_ISBLK(seg->mode))) {
            HChar* dev_name = VG_(am_get_filename)( (NSegment*)seg );
            if (dev_name && 0 == VG_(strcmp)(dev_name, "/dev/zero")) {
               // Don't skip /dev/zero.
            } else {
               // Skip this device mapping.
               continue;
            }
         }

         if (0)
            VG_(printf)("ACCEPT %2d  %#lx %#lx\n", i, seg->start, seg->end);

         // Scan the segment.  We use -1 for the clique number, because this
         // is a root-set.
         seg_size = seg->end - seg->start + 1;
         if (VG_(clo_verbosity) > 2) {
            VG_(message)(Vg_DebugMsg,
                         "  Scanning root segment: %#lx..%#lx (%lu)\n",
                         seg->start, seg->end, seg_size);
         }
         lc_scan_memory(seg->start, seg_size, /*is_prior_definite*/True, -1);
      }
   }

   // Scan GP registers for chunk pointers.
   VG_(apply_to_GP_regs)(lc_push_if_a_chunk_ptr_register);

   // Process the pushed blocks.  After this, every block that is reachable
   // from the root-set has been traced.
   lc_process_markstack(/*clique*/-1);

   if (VG_(clo_verbosity) > 1 && !VG_(clo_xml)) {
      VG_(umsg)("Checked %'lu bytes\n", lc_scanned_szB);
      VG_(umsg)( "\n" );
   }

   // Trace all the leaked blocks to determine which are directly leaked and
   // which are indirectly leaked.  For each Unreached block, push it onto
   // the mark stack, and find all the as-yet-Unreached blocks reachable
   // from it.  These form a clique and are marked IndirectLeak, and their
   // size is added to the clique leader's indirect size.  If one of the
   // found blocks was itself a clique leader (from a previous clique), then
   // the cliques are merged.
   for (i = 0; i < lc_n_chunks; i++) {
      MC_Chunk* ch = lc_chunks[i];
      LC_Extra* ex = &(lc_extras[i]);

      if (VG_DEBUG_CLIQUE)
         VG_(printf)("cliques: %d at %#lx -> Loss state %d\n",
                     i, ch->data, ex->state);

      tl_assert(lc_markstack_top == -1);

      if (ex->state == Unreached) {
         if (VG_DEBUG_CLIQUE)
            VG_(printf)("%d: gathering clique %#lx\n", i, ch->data);
         
         // Push this Unreached block onto the stack and process it.
         lc_push(i, ch);
         lc_process_markstack(i);

         tl_assert(lc_markstack_top == -1);
         tl_assert(ex->state == Unreached);
      }
   }
      
   print_results( tid, ( mode == LC_Full ? True : False ) );

   VG_(free) ( lc_chunks );
   VG_(free) ( lc_extras );
   VG_(free) ( lc_markstack );
}
Beispiel #5
0
/* Top level entry point to leak detector.  Call here, passing in
   suitable address-validating functions (see comment at top of
   scan_all_valid_memory above).  All this is to avoid duplication
   of the leak-detection code for Memcheck and Addrcheck.
   Also pass in a tool-specific function to extract the .where field
   for allocated blocks, an indication of the resolution wanted for
   distinguishing different allocation points, and whether or not
   reachable blocks should be shown.
*/
void MAC_(do_detect_memory_leaks) (
   ThreadId tid, LeakCheckMode mode,
   Bool (*is_within_valid_secondary) ( Addr ),
   Bool (*is_valid_aligned_word)     ( Addr )
)
{
   Int i;
   
   tl_assert(mode != LC_Off);

   /* VG_(HT_to_array) allocates storage for shadows */
   lc_shadows = (MAC_Chunk**)VG_(HT_to_array)( MAC_(malloc_list),
                                               &lc_n_shadows );

   /* Sort the array. */
   VG_(ssort)((void*)lc_shadows, lc_n_shadows, sizeof(VgHashNode*), lc_compar);

   /* Sanity check; assert that the blocks are now in order */
   for (i = 0; i < lc_n_shadows-1; i++) {
      tl_assert( lc_shadows[i]->data <= lc_shadows[i+1]->data);
   }

   /* Sanity check -- make sure they don't overlap */
   for (i = 0; i < lc_n_shadows-1; i++) {
      tl_assert( lc_shadows[i]->data + lc_shadows[i]->size
                 < lc_shadows[i+1]->data );
   }

   if (lc_n_shadows == 0) {
      tl_assert(lc_shadows == NULL);
      if (VG_(clo_verbosity) >= 1 && !VG_(clo_xml)) {
         VG_(message)(Vg_UserMsg, 
                      "No malloc'd blocks -- no leaks are possible.");
      }
      return;
   }

   if (VG_(clo_verbosity) > 0 && !VG_(clo_xml))
      VG_(message)(Vg_UserMsg, 
                   "searching for pointers to %d not-freed blocks.", 
                   lc_n_shadows );

   lc_min_mallocd_addr = lc_shadows[0]->data;
   lc_max_mallocd_addr = lc_shadows[lc_n_shadows-1]->data
                         + lc_shadows[lc_n_shadows-1]->size;

   lc_markstack = VG_(malloc)( lc_n_shadows * sizeof(*lc_markstack) );
   for (i = 0; i < lc_n_shadows; i++) {
      lc_markstack[i].next = -1;
      lc_markstack[i].state = Unreached;
      lc_markstack[i].indirect = 0;
   }
   lc_markstack_top = -1;

   lc_is_within_valid_secondary = is_within_valid_secondary;
   lc_is_valid_aligned_word     = is_valid_aligned_word;

   lc_scanned = 0;

   /* Push roots onto the mark stack.  Roots are:
      - the integer registers of all threads
      - all mappings belonging to the client, including stacks
      - .. but excluding any client heap segments.
      Client heap segments are excluded because we wish to differentiate
      client heap blocks which are referenced only from inside the heap
      from those outside.  This facilitates the indirect vs direct loss
      categorisation, which [if the users ever manage to understand it]
      is really useful for detecting lost cycles.
   */
   { NSegment* seg;
     Addr*     seg_starts;
     Int       n_seg_starts;
     seg_starts = get_seg_starts( &n_seg_starts );
     tl_assert(seg_starts && n_seg_starts > 0);
     /* VG_(am_show_nsegments)( 0,"leakcheck"); */
     for (i = 0; i < n_seg_starts; i++) {
        seg = VG_(am_find_nsegment)( seg_starts[i] );
        tl_assert(seg);
        if (seg->kind != SkFileC && seg->kind != SkAnonC) 
           continue;
        if (!(seg->hasR && seg->hasW))
           continue;
        if (seg->isCH)
           continue;
        if (0)
           VG_(printf)("ACCEPT %2d  %p %p\n", i, seg->start, seg->end);
        lc_scan_memory(seg->start, seg->end+1 - seg->start);
     }
   }

   /* Push registers onto mark stack */
   VG_(apply_to_GP_regs)(lc_markstack_push);

   /* Keep walking the heap until everything is found */
   lc_do_leakcheck(-1);

   if (VG_(clo_verbosity) > 0 && !VG_(clo_xml))
      VG_(message)(Vg_UserMsg, "checked %d bytes.", lc_scanned);

   blocks_leaked     = MAC_(bytes_leaked)     = 0;
   blocks_indirect   = MAC_(bytes_indirect)   = 0;
   blocks_dubious    = MAC_(bytes_dubious)    = 0;
   blocks_reachable  = MAC_(bytes_reachable)  = 0;
   blocks_suppressed = MAC_(bytes_suppressed) = 0;

   if (mode == LC_Full)
      full_report(tid);
   else
      make_summary();

   if (VG_(clo_verbosity) > 0 && !VG_(clo_xml)) {
      VG_(message)(Vg_UserMsg, "");
      VG_(message)(Vg_UserMsg, "LEAK SUMMARY:");
      VG_(message)(Vg_UserMsg, "   definitely lost: %d bytes in %d blocks.", 
                               MAC_(bytes_leaked), blocks_leaked );
      if (blocks_indirect > 0)
	 VG_(message)(Vg_UserMsg, "   indirectly lost: %d bytes in %d blocks.", 
		      MAC_(bytes_indirect), blocks_indirect );
      VG_(message)(Vg_UserMsg, "     possibly lost: %d bytes in %d blocks.", 
                               MAC_(bytes_dubious), blocks_dubious );
      VG_(message)(Vg_UserMsg, "   still reachable: %d bytes in %d blocks.", 
                               MAC_(bytes_reachable), blocks_reachable );
      VG_(message)(Vg_UserMsg, "        suppressed: %d bytes in %d blocks.", 
                               MAC_(bytes_suppressed), blocks_suppressed );
      if (mode == LC_Summary && blocks_leaked > 0)
	 VG_(message)(Vg_UserMsg,
		      "Use --leak-check=full to see details of leaked memory.");
      else if (!MAC_(clo_show_reachable)) {
         VG_(message)(Vg_UserMsg, 
           "Reachable blocks (those to which a pointer was found) are not shown.");
         VG_(message)(Vg_UserMsg, 
            "To see them, rerun with: --show-reachable=yes");
      }
   }

   VG_(free) ( lc_shadows );
   VG_(free) ( lc_markstack );
}