static
void die_and_free_mem ( MAC_Chunk* mc,
                        MAC_Chunk** prev_chunks_next_ptr, UInt rzB )
{
   /* Note: ban redzones again -- just in case user de-banned them
      with a client request... */
   MAC_(ban_mem_heap)( mc->data-rzB, rzB );
   MAC_(die_mem_heap)( mc->data, mc->size );
   MAC_(ban_mem_heap)( mc->data+mc->size, rzB );

   /* Remove mc from the malloclist using prev_chunks_next_ptr to
      avoid repeating the hash table lookup.  Can't remove until at least
      after free and free_mismatch errors are done because they use
      describe_addr() which looks for it in malloclist. */
   *prev_chunks_next_ptr = mc->next;

   /* Record where freed */
   mc->where = VG_(get_ExeContext) ( VG_(get_current_or_recent_tid)() );

   /* Put it out of harm's way for a while, if not from a client request */
   if (MAC_AllocCustom != mc->allockind)
      add_to_freed_queue ( mc );
   else
      VG_(free) ( mc );
}
Esempio n. 2
0
static
void die_and_free_mem ( ThreadId tid, MC_Chunk* mc, SizeT rzB )
{
   if (MC_(clo_free_fill) != -1) {
      tl_assert(MC_(clo_free_fill) >= 0x00 && MC_(clo_free_fill) <= 0xFF);
      VG_(memset)((void*)mc->data, MC_(clo_free_fill), mc->szB);
   }

   /* Note: make redzones noaccess again -- just in case user made them
      accessible with a client request... */
   MC_(make_mem_noaccess)( mc->data-rzB, mc->szB + 2*rzB );

   /* Put it out of harm's way for a while, if not from a client request */
   if (MC_AllocCustom != mc->allockind) {
      /* Record where freed */
      mc->where = VG_(record_ExeContext) ( tid, 0/*first_ip_delta*/ );
      add_to_freed_queue ( mc );
   } else {
      VG_(free) ( mc );
   }
}
Esempio n. 3
0
static
void die_and_free_mem ( ThreadId tid, MC_Chunk* mc, SizeT rzB )
{
    if (MC_(clo_free_fill) != -1) {
        tl_assert(MC_(clo_free_fill) >= 0x00 && MC_(clo_free_fill) <= 0xFF);
        VG_(memset)((void*)mc->data, MC_(clo_free_fill), mc->szB);
    }

    /* Note: make redzones noaccess again -- just in case user made them
       accessible with a client request... */
    MC_(make_mem_noaccess)( mc->data-rzB, mc->szB + 2*rzB );

    /* Record where freed */
    mc->where = VG_(record_ExeContext) ( tid, 0/*first_ip_delta*/ );
    /* Put it out of harm's way for a while */
    add_to_freed_queue ( mc );
    /* If the free list volume is bigger than MC_(clo_freelist_vol),
       we wait till the next block allocation to release blocks.
       This increase the chance to discover dangling pointer usage,
       even for big blocks being freed by the client. */
}
Esempio n. 4
0
static
void die_and_free_mem ( ThreadId tid, MC_Chunk* mc, SizeT rzB )
{
   /* Note: we do not free fill the custom allocs produced
      by MEMPOOL or by MALLOC/FREELIKE_BLOCK requests. */
   if (MC_(clo_free_fill) != -1 && MC_AllocCustom != mc->allockind ) {
      tl_assert(MC_(clo_free_fill) >= 0x00 && MC_(clo_free_fill) <= 0xFF);
      VG_(memset)((void*)mc->data, MC_(clo_free_fill), mc->szB);
   }

   /* Note: make redzones noaccess again -- just in case user made them
      accessible with a client request... */
   MC_(make_mem_noaccess)( mc->data-rzB, mc->szB + 2*rzB );

   /* Record where freed */
   MC_(set_freed_at) (tid, mc);
   /* Put it out of harm's way for a while */
   add_to_freed_queue ( mc );
   /* If the free list volume is bigger than MC_(clo_freelist_vol),
      we wait till the next block allocation to release blocks.
      This increase the chance to discover dangling pointer usage,
      even for big blocks being freed by the client. */
}