Ejemplo n.º 1
0
void MC_(detect_memory_leaks) ( ThreadId tid, LeakCheckMode mode )
{
   Int i;
   
   tl_assert(mode != LC_Off);

   // Get the chunks, stop if there were none.
   lc_chunks = find_active_chunks(&lc_n_chunks);
   if (lc_n_chunks == 0) {
      tl_assert(lc_chunks == NULL);
      if (VG_(clo_verbosity) >= 1 && !VG_(clo_xml)) {
         VG_(UMSG)("All heap blocks were freed -- no leaks are possible.\n");
      }
      return;
   }

   // Sort the array so blocks are in ascending order in memory.
   VG_(ssort)(lc_chunks, lc_n_chunks, sizeof(VgHashNode*), compare_MC_Chunks);

   // Sanity check -- make sure they're in order.
   for (i = 0; i < lc_n_chunks-1; i++) {
      tl_assert( lc_chunks[i]->data <= lc_chunks[i+1]->data);
   }

   // Sanity check -- make sure they don't overlap.  But do allow exact
   // duplicates.  If this assertion fails, it may mean that the application
   // has done something stupid with VALGRIND_MALLOCLIKE_BLOCK client
   // requests, specifically, has made overlapping requests (which are
   // nonsensical).  Another way to screw up is to use
   // VALGRIND_MALLOCLIKE_BLOCK for stack locations; again nonsensical.
   for (i = 0; i < lc_n_chunks-1; i++) {
      MC_Chunk* ch1 = lc_chunks[i];
      MC_Chunk* ch2 = lc_chunks[i+1];
      Bool nonsense_overlap = ! (
            // Normal case - no overlap.
            (ch1->data + ch1->szB <= ch2->data) ||
            // Degenerate case: exact duplicates.
            (ch1->data == ch2->data && ch1->szB  == ch2->szB)
         );
      if (nonsense_overlap) {
         VG_(UMSG)("Block [0x%lx, 0x%lx) overlaps with block [0x%lx, 0x%lx)\n",
                   ch1->data, (ch1->data + ch1->szB),
                   ch2->data, (ch2->data + ch2->szB));
      }
      tl_assert (!nonsense_overlap);
   }

   // Initialise lc_extras.
   lc_extras = VG_(malloc)( "mc.dml.2", lc_n_chunks * sizeof(LC_Extra) );
   for (i = 0; i < lc_n_chunks; i++) {
      lc_extras[i].state        = Unreached;
      lc_extras[i].indirect_szB = 0;
   }

   // Initialise lc_markstack.
   lc_markstack = VG_(malloc)( "mc.dml.2", lc_n_chunks * sizeof(Int) );
   for (i = 0; i < lc_n_chunks; i++) {
      lc_markstack[i] = -1;
   }
   lc_markstack_top = -1;

   // Verbosity.
   if (VG_(clo_verbosity) > 0 && !VG_(clo_xml))
      VG_(UMSG)( "searching for pointers to %'d not-freed blocks.\n",
                 lc_n_chunks );

   // Scan the memory root-set, pushing onto the mark stack any blocks
   // pointed to.
   {
      Int   n_seg_starts;
      Addr* seg_starts = VG_(get_segment_starts)( &n_seg_starts );

      tl_assert(seg_starts && n_seg_starts > 0);

      lc_scanned_szB = 0;

      // VG_(am_show_nsegments)( 0, "leakcheck");
      for (i = 0; i < n_seg_starts; i++) {
         SizeT seg_size;
         NSegment const* seg = VG_(am_find_nsegment)( seg_starts[i] );
         tl_assert(seg);

         if (seg->kind != SkFileC && seg->kind != SkAnonC) continue;
         if (!(seg->hasR && seg->hasW))                    continue;
         if (seg->isCH)                                    continue;

         // Don't poke around in device segments as this may cause
         // hangs.  Exclude /dev/zero just in case someone allocated
         // memory by explicitly mapping /dev/zero.
         if (seg->kind == SkFileC 
             && (VKI_S_ISCHR(seg->mode) || VKI_S_ISBLK(seg->mode))) {
            HChar* dev_name = VG_(am_get_filename)( (NSegment*)seg );
            if (dev_name && 0 == VG_(strcmp)(dev_name, "/dev/zero")) {
               // Don't skip /dev/zero.
            } else {
               // Skip this device mapping.
               continue;
            }
         }

         if (0)
            VG_(printf)("ACCEPT %2d  %#lx %#lx\n", i, seg->start, seg->end);

         // Scan the segment.  We use -1 for the clique number, because this
         // is a root-set.
         seg_size = seg->end - seg->start + 1;
         if (VG_(clo_verbosity) > 2) {
            VG_(message)(Vg_DebugMsg,
                         "  Scanning root segment: %#lx..%#lx (%lu)\n",
                         seg->start, seg->end, seg_size);
         }
         lc_scan_memory(seg->start, seg_size, /*is_prior_definite*/True, -1);
      }
   }

   // Scan GP registers for chunk pointers.
   VG_(apply_to_GP_regs)(lc_push_if_a_chunk_ptr_register);

   // Process the pushed blocks.  After this, every block that is reachable
   // from the root-set has been traced.
   lc_process_markstack(/*clique*/-1);

   if (VG_(clo_verbosity) > 0 && !VG_(clo_xml))
      VG_(UMSG)("checked %'lu bytes.\n", lc_scanned_szB);

   // Trace all the leaked blocks to determine which are directly leaked and
   // which are indirectly leaked.  For each Unreached block, push it onto
   // the mark stack, and find all the as-yet-Unreached blocks reachable
   // from it.  These form a clique and are marked IndirectLeak, and their
   // size is added to the clique leader's indirect size.  If one of the
   // found blocks was itself a clique leader (from a previous clique), then
   // the cliques are merged.
   for (i = 0; i < lc_n_chunks; i++) {
      MC_Chunk* ch = lc_chunks[i];
      LC_Extra* ex = &(lc_extras[i]);

      if (VG_DEBUG_CLIQUE)
         VG_(printf)("cliques: %d at %#lx -> Loss state %d\n",
                     i, ch->data, ex->state);

      tl_assert(lc_markstack_top == -1);

      if (ex->state == Unreached) {
         if (VG_DEBUG_CLIQUE)
            VG_(printf)("%d: gathering clique %#lx\n", i, ch->data);
         
         // Push this Unreached block onto the stack and process it.
         lc_push(i, ch);
         lc_process_markstack(i);

         tl_assert(lc_markstack_top == -1);
         tl_assert(ex->state == Unreached);
      }
   }
      
   print_results( tid, ( mode == LC_Full ? True : False ) );

   VG_(free) ( lc_chunks );
   VG_(free) ( lc_extras );
   VG_(free) ( lc_markstack );
}
Ejemplo n.º 2
0
void MC_(detect_memory_leaks) ( ThreadId tid, LeakCheckMode mode )
{
   Int i, j;
   
   tl_assert(mode != LC_Off);

   // Get the chunks, stop if there were none.
   lc_chunks = find_active_chunks(&lc_n_chunks);
   if (lc_n_chunks == 0) {
      tl_assert(lc_chunks == NULL);
      if (VG_(clo_verbosity) >= 1 && !VG_(clo_xml)) {
         VG_(umsg)("All heap blocks were freed -- no leaks are possible\n");
         VG_(umsg)("\n");
      }
      return;
   }

   // Sort the array so blocks are in ascending order in memory.
   VG_(ssort)(lc_chunks, lc_n_chunks, sizeof(VgHashNode*), compare_MC_Chunks);

   // Sanity check -- make sure they're in order.
   for (i = 0; i < lc_n_chunks-1; i++) {
      tl_assert( lc_chunks[i]->data <= lc_chunks[i+1]->data);
   }

   // Sanity check -- make sure they don't overlap.  The one exception is that
   // we allow a MALLOCLIKE block to sit entirely within a malloc() block.
   // This is for bug 100628.  If this occurs, we ignore the malloc() block
   // for leak-checking purposes.  This is a hack and probably should be done
   // better, but at least it's consistent with mempools (which are treated
   // like this in find_active_chunks).  Mempools have a separate VgHashTable
   // for mempool chunks, but if custom-allocated blocks are put in a separate
   // table from normal heap blocks it makes free-mismatch checking more
   // difficult.
   //
   // If this check fails, it probably means that the application
   // has done something stupid with VALGRIND_MALLOCLIKE_BLOCK client
   // requests, eg. has made overlapping requests (which are
   // nonsensical), or used VALGRIND_MALLOCLIKE_BLOCK for stack locations;
   // again nonsensical.
   //
   for (i = 0; i < lc_n_chunks-1; i++) {
      MC_Chunk* ch1 = lc_chunks[i];
      MC_Chunk* ch2 = lc_chunks[i+1];

      Addr start1    = ch1->data;
      Addr start2    = ch2->data;
      Addr end1      = ch1->data + ch1->szB - 1;
      Addr end2      = ch2->data + ch2->szB - 1;
      Bool isCustom1 = ch1->allockind == MC_AllocCustom;
      Bool isCustom2 = ch2->allockind == MC_AllocCustom;

      if (end1 < start2) {
         // Normal case - no overlap.

      // We used to allow exact duplicates, I'm not sure why.  --njn
      //} else if (start1 == start2 && end1 == end2) {
         // Degenerate case: exact duplicates.

      } else if (start1 >= start2 && end1 <= end2 && isCustom1 && !isCustom2) {
         // Block i is MALLOCLIKE and entirely within block i+1.
         // Remove block i+1.
         for (j = i+1; j < lc_n_chunks-1; j++) {
            lc_chunks[j] = lc_chunks[j+1];
         }
         lc_n_chunks--;

      } else if (start2 >= start1 && end2 <= end1 && isCustom2 && !isCustom1) {
         // Block i+1 is MALLOCLIKE and entirely within block i.
         // Remove block i.
         for (j = i; j < lc_n_chunks-1; j++) {
            lc_chunks[j] = lc_chunks[j+1];
         }
         lc_n_chunks--;

      } else {
         VG_(umsg)("Block 0x%lx..0x%lx overlaps with block 0x%lx..0x%lx",
                   start1, end1, start1, end2);
         VG_(umsg)("This is usually caused by using VALGRIND_MALLOCLIKE_BLOCK");
         VG_(umsg)("in an inappropriate way.");
         tl_assert (0);
      }
   }

   // Initialise lc_extras.
   lc_extras = VG_(malloc)( "mc.dml.2", lc_n_chunks * sizeof(LC_Extra) );
   for (i = 0; i < lc_n_chunks; i++) {
      lc_extras[i].state        = Unreached;
      lc_extras[i].indirect_szB = 0;
   }

   // Initialise lc_markstack.
   lc_markstack = VG_(malloc)( "mc.dml.2", lc_n_chunks * sizeof(Int) );
   for (i = 0; i < lc_n_chunks; i++) {
      lc_markstack[i] = -1;
   }
   lc_markstack_top = -1;

   // Verbosity.
   if (VG_(clo_verbosity) > 1 && !VG_(clo_xml)) {
      VG_(umsg)( "Searching for pointers to %'d not-freed blocks\n",
                 lc_n_chunks );
   }

   // Scan the memory root-set, pushing onto the mark stack any blocks
   // pointed to.
   {
      Int   n_seg_starts;
      Addr* seg_starts = VG_(get_segment_starts)( &n_seg_starts );

      tl_assert(seg_starts && n_seg_starts > 0);

      lc_scanned_szB = 0;

      // VG_(am_show_nsegments)( 0, "leakcheck");
      for (i = 0; i < n_seg_starts; i++) {
         SizeT seg_size;
         NSegment const* seg = VG_(am_find_nsegment)( seg_starts[i] );
         tl_assert(seg);

         if (seg->kind != SkFileC && seg->kind != SkAnonC) continue;
         if (!(seg->hasR && seg->hasW))                    continue;
         if (seg->isCH)                                    continue;

         // Don't poke around in device segments as this may cause
         // hangs.  Exclude /dev/zero just in case someone allocated
         // memory by explicitly mapping /dev/zero.
         if (seg->kind == SkFileC 
             && (VKI_S_ISCHR(seg->mode) || VKI_S_ISBLK(seg->mode))) {
            HChar* dev_name = VG_(am_get_filename)( (NSegment*)seg );
            if (dev_name && 0 == VG_(strcmp)(dev_name, "/dev/zero")) {
               // Don't skip /dev/zero.
            } else {
               // Skip this device mapping.
               continue;
            }
         }

         if (0)
            VG_(printf)("ACCEPT %2d  %#lx %#lx\n", i, seg->start, seg->end);

         // Scan the segment.  We use -1 for the clique number, because this
         // is a root-set.
         seg_size = seg->end - seg->start + 1;
         if (VG_(clo_verbosity) > 2) {
            VG_(message)(Vg_DebugMsg,
                         "  Scanning root segment: %#lx..%#lx (%lu)\n",
                         seg->start, seg->end, seg_size);
         }
         lc_scan_memory(seg->start, seg_size, /*is_prior_definite*/True, -1);
      }
   }

   // Scan GP registers for chunk pointers.
   VG_(apply_to_GP_regs)(lc_push_if_a_chunk_ptr_register);

   // Process the pushed blocks.  After this, every block that is reachable
   // from the root-set has been traced.
   lc_process_markstack(/*clique*/-1);

   if (VG_(clo_verbosity) > 1 && !VG_(clo_xml)) {
      VG_(umsg)("Checked %'lu bytes\n", lc_scanned_szB);
      VG_(umsg)( "\n" );
   }

   // Trace all the leaked blocks to determine which are directly leaked and
   // which are indirectly leaked.  For each Unreached block, push it onto
   // the mark stack, and find all the as-yet-Unreached blocks reachable
   // from it.  These form a clique and are marked IndirectLeak, and their
   // size is added to the clique leader's indirect size.  If one of the
   // found blocks was itself a clique leader (from a previous clique), then
   // the cliques are merged.
   for (i = 0; i < lc_n_chunks; i++) {
      MC_Chunk* ch = lc_chunks[i];
      LC_Extra* ex = &(lc_extras[i]);

      if (VG_DEBUG_CLIQUE)
         VG_(printf)("cliques: %d at %#lx -> Loss state %d\n",
                     i, ch->data, ex->state);

      tl_assert(lc_markstack_top == -1);

      if (ex->state == Unreached) {
         if (VG_DEBUG_CLIQUE)
            VG_(printf)("%d: gathering clique %#lx\n", i, ch->data);
         
         // Push this Unreached block onto the stack and process it.
         lc_push(i, ch);
         lc_process_markstack(i);

         tl_assert(lc_markstack_top == -1);
         tl_assert(ex->state == Unreached);
      }
   }
      
   print_results( tid, ( mode == LC_Full ? True : False ) );

   VG_(free) ( lc_chunks );
   VG_(free) ( lc_extras );
   VG_(free) ( lc_markstack );
}