예제 #1
0
void MC_(mempool_free)(Addr pool, Addr addr)
{
    MC_Mempool*  mp;
    MC_Chunk*    mc;
    ThreadId     tid = VG_(get_running_tid)();

    mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
    if (mp == NULL) {
        MC_(record_illegal_mempool_error)(tid, pool);
        return;
    }

    if (VG_(clo_verbosity) > 2) {
        VG_(message)(Vg_UserMsg, "mempool_free(0x%lx, 0x%lx)\n", pool, addr);
        VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
    }

    if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
    mc = VG_(HT_remove)(mp->chunks, (UWord)addr);
    if (mc == NULL) {
        MC_(record_free_error)(tid, (Addr)addr);
        return;
    }

    if (VG_(clo_verbosity) > 2) {
        VG_(message)(Vg_UserMsg,
                     "mempool_free(0x%lx, 0x%lx) freed chunk of %ld bytes\n",
                     pool, addr, mc->szB + 0UL);
    }

    die_and_free_mem ( tid, mc, mp->rzB );
    if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
}
예제 #2
0
__inline__
void MAC_(handle_free) ( Addr p, UInt rzB, MAC_AllocKind kind )
{
   MAC_Chunk*  mc;
   MAC_Chunk** prev_chunks_next_ptr;
   ThreadId    tid = VG_(get_current_or_recent_tid)();

   VGP_PUSHCC(VgpCliMalloc);

   cmalloc_n_frees++;

   mc = (MAC_Chunk*)VG_(HT_get_node) ( MAC_(malloc_list), (UInt)p,
                                       (VgHashNode***)&prev_chunks_next_ptr );
   if (mc == NULL) {
      MAC_(record_free_error) ( tid, p );
      VGP_POPCC(VgpCliMalloc);
      return;
   }

   /* check if its a matching free() / delete / delete [] */
   if (kind != mc->allockind) {
      MAC_(record_freemismatch_error) ( tid, p );
   }

   die_and_free_mem ( mc, prev_chunks_next_ptr, rzB );
   VGP_POPCC(VgpCliMalloc);
}
예제 #3
0
void MAC_(mempool_free)(Addr pool, Addr addr)
{
   MAC_Mempool*  mp;
   MAC_Mempool** prev_pool;
   MAC_Chunk*    mc;
   MAC_Chunk**   prev_chunk;
   ThreadId      tid = VG_(get_running_tid)();

   mp = (MAC_Mempool*)VG_(HT_get_node)(MAC_(mempool_list), (UWord)pool,
                                       (void*)&prev_pool);

   if (mp == NULL) {
      MAC_(record_illegal_mempool_error)(tid, pool);
      return;
   }

   mc = (MAC_Chunk*)VG_(HT_get_node)(mp->chunks, (UWord)addr,
                                     (void*)&prev_chunk);

   if (mc == NULL) {
      MAC_(record_free_error)(tid, (Addr)addr);
      return;
   }

   die_and_free_mem ( tid, mc, prev_chunk, mp->rzB );
}
예제 #4
0
void MC_(handle_free) ( ThreadId tid, Addr p, UInt rzB, MC_AllocKind kind )
{
    MC_Chunk* mc;

    cmalloc_n_frees++;

    mc = VG_(HT_remove) ( MC_(malloc_list), (UWord)p );
    if (mc == NULL) {
        MC_(record_free_error) ( tid, p );
    } else {
        /* check if it is a matching free() / delete / delete [] */
        if (kind != mc->allockind) {
            tl_assert(p == mc->data);
            MC_(record_freemismatch_error) ( tid, mc );
        }
        die_and_free_mem ( tid, mc, rzB );
    }
}
예제 #5
0
void VG_(client_free) ( ThreadState* tst, void* p, VgAllocKind kind )
{
   ShadowChunk*  sc;
   ShadowChunk** prev_chunks_next_ptr;

   VGP_PUSHCC(VgpCliMalloc);

#  ifdef DEBUG_CLIENTMALLOC
   VG_(printf)("[m %d, f %d (%d)] client_free ( %p, %x )\n", 
               count_malloclists(), 
               0/*count_freelist()*/, 0/*vg_freed_list_volume*/,
               p, kind );
#  endif

   vg_cmalloc_n_frees ++;

   if (! needs_shadow_chunks()) {
      VG_(arena_free) ( VG_AR_CLIENT, p );

   } else {
      sc = getShadowChunk ( (Addr)p, &prev_chunks_next_ptr );

      if (sc == NULL) {
         VG_TRACK( bad_free, tst, (Addr)p );
         VGP_POPCC(VgpCliMalloc);
         return;
      }

      /* check if its a matching free() / delete / delete [] */
      if (kind != sc->allockind)
         VG_TRACK( mismatched_free, tst, (Addr)p );

      die_and_free_mem ( tst, sc, prev_chunks_next_ptr );
   } 
   VGP_POPCC(VgpCliMalloc);
}
예제 #6
0
void* SK_(realloc) ( void* p, Int new_size )
{
   MAC_Chunk  *mc;
   MAC_Chunk **prev_chunks_next_ptr;
   UInt        i;
   ThreadId    tid = VG_(get_current_or_recent_tid)();

   VGP_PUSHCC(VgpCliMalloc);

   cmalloc_n_frees ++;
   cmalloc_n_mallocs ++;
   cmalloc_bs_mallocd += new_size;

   if (new_size < 0) {
      VG_(message)(Vg_UserMsg, 
                   "Warning: silly arg (%d) to realloc()", new_size );
      return NULL;
   }

   /* First try and find the block. */
   mc = (MAC_Chunk*)VG_(HT_get_node) ( MAC_(malloc_list), (UInt)p,
                                       (VgHashNode***)&prev_chunks_next_ptr );

   if (mc == NULL) {
      MAC_(record_free_error) ( tid, (Addr)p );
      /* Perhaps we should return to the program regardless. */
      VGP_POPCC(VgpCliMalloc);
      return NULL;
   }
  
   /* check if its a matching free() / delete / delete [] */
   if (MAC_AllocMalloc != mc->allockind) {
      /* can not realloc a range that was allocated with new or new [] */
      MAC_(record_freemismatch_error) ( tid, (Addr)p );
      /* but keep going anyway */
   }

   if (mc->size == new_size) {
      /* size unchanged */
      mc->where = VG_(get_ExeContext)(tid);
      VGP_POPCC(VgpCliMalloc);
      return p;
      
   } else if (mc->size > new_size) {
      /* new size is smaller */
      MAC_(die_mem_heap)( mc->data+new_size, mc->size-new_size );
      mc->size = new_size;
      mc->where = VG_(get_ExeContext)(tid);
      VGP_POPCC(VgpCliMalloc);
      return p;

   } else {
      /* new size is bigger */
      Addr p_new;

      /* Get new memory */
      p_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_size);

      /* First half kept and copied, second half new, 
         red zones as normal */
      MAC_(ban_mem_heap) ( p_new-VG_(vg_malloc_redzone_szB), 
                                 VG_(vg_malloc_redzone_szB) );
      MAC_(copy_mem_heap)( (Addr)p, p_new, mc->size );
      MAC_(new_mem_heap) ( p_new+mc->size, new_size-mc->size, /*inited*/False );
      MAC_(ban_mem_heap) ( p_new+new_size, VG_(vg_malloc_redzone_szB) );

      /* Copy from old to new */
      for (i = 0; i < mc->size; i++)
         ((UChar*)p_new)[i] = ((UChar*)p)[i];

      /* Free old memory */
      die_and_free_mem ( mc, prev_chunks_next_ptr,
                         VG_(vg_malloc_redzone_szB) );

      /* this has to be after die_and_free_mem, otherwise the
         former succeeds in shorting out the new block, not the
         old, in the case when both are on the same list.  */
      add_MAC_Chunk ( p_new, new_size, MAC_AllocMalloc );

      VGP_POPCC(VgpCliMalloc);
      return (void*)p_new;
   }  
}
예제 #7
0
void* VG_(client_realloc) ( ThreadState* tst, void* p, UInt new_size )
{
   ShadowChunk  *sc;
   ShadowChunk **prev_chunks_next_ptr;
   UInt          i;

   VGP_PUSHCC(VgpCliMalloc);

   vg_cmalloc_n_frees ++;
   vg_cmalloc_n_mallocs ++;
   vg_cmalloc_bs_mallocd += new_size;

   if (! needs_shadow_chunks()) {
      vg_assert(p != NULL && new_size != 0);
      p = VG_(arena_realloc) ( VG_AR_CLIENT, p, VG_(clo_alignment), 
                               new_size );
      VGP_POPCC(VgpCliMalloc);
      return p;

   } else {
      /* First try and find the block. */
      sc = getShadowChunk ( (Addr)p, &prev_chunks_next_ptr );

      if (sc == NULL) {
         VG_TRACK( bad_free, tst, (Addr)p );
         /* Perhaps we should return to the program regardless. */
         VGP_POPCC(VgpCliMalloc);
         return NULL;
      }
     
      /* check if its a matching free() / delete / delete [] */
      if (Vg_AllocMalloc != sc->allockind) {
         /* can not realloc a range that was allocated with new or new [] */
         VG_TRACK( mismatched_free, tst, (Addr)p );
         /* but keep going anyway */
      }

      if (sc->size == new_size) {
         /* size unchanged */
         VGP_POPCC(VgpCliMalloc);
         return p;
         
      } else if (sc->size > new_size) {
         /* new size is smaller */
         VG_TRACK( die_mem_heap, sc->data+new_size, sc->size-new_size );
         sc->size = new_size;
         VGP_POPCC(VgpCliMalloc);
#        ifdef DEBUG_CLIENTMALLOC
         VG_(printf)("[m %d, f %d (%d)] client_realloc_smaller ( %p, %d ) = %p\n", 
                     count_malloclists(), 
                     0/*count_freelist()*/, 0/*vg_freed_list_volume*/,
                     p, new_size, p );
#        endif
         return p;

      } else {
         /* new size is bigger */
         Addr p_new;
         
         /* Get new memory */
         vg_assert(VG_(clo_alignment) >= 4);
         if (VG_(clo_alignment) == 4)
            p_new = (Addr)VG_(arena_malloc)(VG_AR_CLIENT, new_size);
         else
            p_new = (Addr)VG_(arena_malloc_aligned)(VG_AR_CLIENT, 
                                            VG_(clo_alignment), new_size);

         /* First half kept and copied, second half new, 
            red zones as normal */
         VG_TRACK( ban_mem_heap, p_new-VG_AR_CLIENT_REDZONE_SZB, 
                                 VG_AR_CLIENT_REDZONE_SZB );
         VG_TRACK( copy_mem_heap, (Addr)p, p_new, sc->size );
         VG_TRACK( new_mem_heap, p_new+sc->size, new_size-sc->size, 
                   /*inited=*/False );
         VG_TRACK( ban_mem_heap, p_new+new_size, VG_AR_CLIENT_REDZONE_SZB );

         /* Copy from old to new */
         for (i = 0; i < sc->size; i++)
            ((UChar*)p_new)[i] = ((UChar*)p)[i];

         /* Free old memory */
         die_and_free_mem ( tst, sc, prev_chunks_next_ptr );

         /* this has to be after die_and_free_mem, otherwise the
            former succeeds in shorting out the new block, not the
            old, in the case when both are on the same list.  */
         addShadowChunk ( tst, p_new, new_size, Vg_AllocMalloc );

         VGP_POPCC(VgpCliMalloc);
#        ifdef DEBUG_CLIENTMALLOC
         VG_(printf)("[m %d, f %d (%d)] client_realloc_bigger ( %p, %d ) = %p\n", 
                     count_malloclists(), 
                     0/*count_freelist()*/, 0/*vg_freed_list_volume*/,
                     p, new_size, (void*)p_new );
#        endif
         return (void*)p_new;
      }  
   }
}
예제 #8
0
void MC_(mempool_trim)(Addr pool, Addr addr, SizeT szB)
{
    MC_Mempool*  mp;
    MC_Chunk*    mc;
    ThreadId     tid = VG_(get_running_tid)();
    UInt         n_shadows, i;
    VgHashNode** chunks;

    if (VG_(clo_verbosity) > 2) {
        VG_(message)(Vg_UserMsg, "mempool_trim(0x%lx, 0x%lx, %ld)\n",
                     pool, addr, szB);
        VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
    }

    mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
    if (mp == NULL) {
        MC_(record_illegal_mempool_error)(tid, pool);
        return;
    }

    check_mempool_sane(mp);
    chunks = VG_(HT_to_array) ( mp->chunks, &n_shadows );
    if (n_shadows == 0) {
        tl_assert(chunks == NULL);
        return;
    }

    tl_assert(chunks != NULL);
    for (i = 0; i < n_shadows; ++i) {

        Addr lo, hi, min, max;

        mc = (MC_Chunk*) chunks[i];

        lo = mc->data;
        hi = mc->szB == 0 ? mc->data : mc->data + mc->szB - 1;

#define EXTENT_CONTAINS(x) ((addr <= (x)) && ((x) < addr + szB))

        if (EXTENT_CONTAINS(lo) && EXTENT_CONTAINS(hi)) {

            /* The current chunk is entirely within the trim extent: keep
               it. */

            continue;

        } else if ( (! EXTENT_CONTAINS(lo)) &&
                    (! EXTENT_CONTAINS(hi)) ) {

            /* The current chunk is entirely outside the trim extent:
               delete it. */

            if (VG_(HT_remove)(mp->chunks, (UWord)mc->data) == NULL) {
                MC_(record_free_error)(tid, (Addr)mc->data);
                VG_(free)(chunks);
                if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
                return;
            }
            die_and_free_mem ( tid, mc, mp->rzB );

        } else {

            /* The current chunk intersects the trim extent: remove,
               trim, and reinsert it. */

            tl_assert(EXTENT_CONTAINS(lo) ||
                      EXTENT_CONTAINS(hi));
            if (VG_(HT_remove)(mp->chunks, (UWord)mc->data) == NULL) {
                MC_(record_free_error)(tid, (Addr)mc->data);
                VG_(free)(chunks);
                if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
                return;
            }

            if (mc->data < addr) {
                min = mc->data;
                lo = addr;
            } else {
                min = addr;
                lo = mc->data;
            }

            if (mc->data + szB > addr + szB) {
                max = mc->data + szB;
                hi = addr + szB;
            } else {
                max = addr + szB;
                hi = mc->data + szB;
            }

            tl_assert(min <= lo);
            tl_assert(lo < hi);
            tl_assert(hi <= max);

            if (min < lo && !EXTENT_CONTAINS(min)) {
                MC_(make_mem_noaccess)( min, lo - min);
            }

            if (hi < max && !EXTENT_CONTAINS(max)) {
                MC_(make_mem_noaccess)( hi, max - hi );
            }

            mc->data = lo;
            mc->szB = (UInt) (hi - lo);
            VG_(HT_add_node)( mp->chunks, mc );
        }

#undef EXTENT_CONTAINS

    }
    check_mempool_sane(mp);
    VG_(free)(chunks);
}
예제 #9
0
void* MC_(realloc) ( ThreadId tid, void* p_old, SizeT new_szB )
{
    MC_Chunk* mc;
    void*     p_new;
    SizeT     old_szB;

    if (complain_about_silly_args(new_szB, "realloc"))
        return NULL;

    cmalloc_n_frees ++;
    cmalloc_n_mallocs ++;
    cmalloc_bs_mallocd += (ULong)new_szB;

    /* Remove the old block */
    mc = VG_(HT_remove) ( MC_(malloc_list), (UWord)p_old );
    if (mc == NULL) {
        MC_(record_free_error) ( tid, (Addr)p_old );
        /* We return to the program regardless. */
        return NULL;
    }

    /* check if its a matching free() / delete / delete [] */
    if (MC_AllocMalloc != mc->allockind) {
        /* can not realloc a range that was allocated with new or new [] */
        tl_assert((Addr)p_old == mc->data);
        MC_(record_freemismatch_error) ( tid, mc );
        /* but keep going anyway */
    }

    old_szB = mc->szB;

    /* In all cases, even when the new size is smaller or unchanged, we
       reallocate and copy the contents, and make the old block
       inaccessible.  This is so as to guarantee to catch all cases of
       accesses via the old address after reallocation, regardless of
       the change in size.  (Of course the ability to detect accesses
       to the old block also depends on the size of the freed blocks
       queue). */

    if (new_szB <= old_szB) {
        /* new size is smaller or the same */
        Addr a_new;
        /* Get new memory */
        a_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_szB);

        if (a_new) {
            ExeContext* ec;

            ec = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
            tl_assert(ec);

            /* Retained part is copied, red zones set as normal */
            MC_(make_mem_noaccess)( a_new-MC_MALLOC_REDZONE_SZB,
                                    MC_MALLOC_REDZONE_SZB );
            MC_(copy_address_range_state) ( (Addr)p_old, a_new, new_szB );
            MC_(make_mem_noaccess)        ( a_new+new_szB, MC_MALLOC_REDZONE_SZB );

            /* Copy from old to new */
            VG_(memcpy)((void*)a_new, p_old, new_szB);

            /* Possibly fill freed area with specified junk. */
            if (MC_(clo_free_fill) != -1) {
                tl_assert(MC_(clo_free_fill) >= 0x00 && MC_(clo_free_fill) <= 0xFF);
                VG_(memset)((void*)p_old, MC_(clo_free_fill), old_szB);
            }

            /* Free old memory */
            /* Nb: we have to allocate a new MC_Chunk for the new memory rather
               than recycling the old one, so that any erroneous accesses to the
               old memory are reported. */
            die_and_free_mem ( tid, mc, MC_MALLOC_REDZONE_SZB );

            // Allocate a new chunk.
            mc = create_MC_Chunk( ec, a_new, new_szB, MC_AllocMalloc );
        }

        p_new = (void*)a_new;

    } else {
        /* new size is bigger */
        Addr a_new;
        tl_assert(old_szB < new_szB);
        /* Get new memory */
        a_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_szB);

        if (a_new) {
            UInt        ecu;
            ExeContext* ec;

            ec = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
            tl_assert(ec);
            ecu = VG_(get_ECU_from_ExeContext)(ec);
            tl_assert(VG_(is_plausible_ECU)(ecu));

            /* First half kept and copied, second half new, red zones as normal */
            MC_(make_mem_noaccess)( a_new-MC_MALLOC_REDZONE_SZB,
                                    MC_MALLOC_REDZONE_SZB );
            MC_(copy_address_range_state) ( (Addr)p_old, a_new, mc->szB );
            MC_(make_mem_undefined_w_otag)( a_new+mc->szB, new_szB-mc->szB,
                                            ecu | MC_OKIND_HEAP );
            MC_(make_mem_noaccess)        ( a_new+new_szB, MC_MALLOC_REDZONE_SZB );

            /* Possibly fill new area with specified junk */
            if (MC_(clo_malloc_fill) != -1) {
                tl_assert(MC_(clo_malloc_fill) >= 0x00
                          && MC_(clo_malloc_fill) <= 0xFF);
                VG_(memset)((void*)(a_new+old_szB), MC_(clo_malloc_fill),
                            new_szB-old_szB);
            }

            /* Copy from old to new */
            VG_(memcpy)((void*)a_new, p_old, mc->szB);

            /* Possibly fill freed area with specified junk. */
            if (MC_(clo_free_fill) != -1) {
                tl_assert(MC_(clo_free_fill) >= 0x00 && MC_(clo_free_fill) <= 0xFF);
                VG_(memset)((void*)p_old, MC_(clo_free_fill), old_szB);
            }

            /* Free old memory */
            /* Nb: we have to allocate a new MC_Chunk for the new memory rather
               than recycling the old one, so that any erroneous accesses to the
               old memory are reported. */
            die_and_free_mem ( tid, mc, MC_MALLOC_REDZONE_SZB );

            // Allocate a new chunk.
            mc = create_MC_Chunk( ec, a_new, new_szB, MC_AllocMalloc );
        }

        p_new = (void*)a_new;
    }

    // Now insert the new mc (with a possibly new 'data' field) into
    // malloc_list.  If this realloc() did not increase the memory size, we
    // will have removed and then re-added mc unnecessarily.  But that's ok
    // because shrinking a block with realloc() is (presumably) much rarer
    // than growing it, and this way simplifies the growing case.
    VG_(HT_add_node)( MC_(malloc_list), mc );

    return p_new;
}
예제 #10
0
void* MC_(realloc) ( ThreadId tid, void* p_old, SizeT new_szB )
{
   MC_Chunk* old_mc;
   MC_Chunk* new_mc;
   Addr      a_new; 
   SizeT     old_szB;

   if (MC_(record_fishy_value_error)(tid, "realloc", "size", new_szB))
      return NULL;

   cmalloc_n_frees ++;
   cmalloc_n_mallocs ++;
   cmalloc_bs_mallocd += (ULong)new_szB;

   /* Remove the old block */
   old_mc = VG_(HT_remove) ( MC_(malloc_list), (UWord)p_old );
   if (old_mc == NULL) {
      MC_(record_free_error) ( tid, (Addr)p_old );
      /* We return to the program regardless. */
      return NULL;
   }

   /* check if its a matching free() / delete / delete [] */
   if (MC_AllocMalloc != old_mc->allockind) {
      /* can not realloc a range that was allocated with new or new [] */
      tl_assert((Addr)p_old == old_mc->data);
      record_freemismatch_error ( tid, old_mc );
      /* but keep going anyway */
   }

   old_szB = old_mc->szB;

   /* Get new memory */
   a_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_szB);

   if (a_new) {
      /* In all cases, even when the new size is smaller or unchanged, we
         reallocate and copy the contents, and make the old block
         inaccessible.  This is so as to guarantee to catch all cases of
         accesses via the old address after reallocation, regardless of
         the change in size.  (Of course the ability to detect accesses
         to the old block also depends on the size of the freed blocks
         queue). */

      // Allocate a new chunk.
      new_mc = create_MC_Chunk( tid, a_new, new_szB, MC_AllocMalloc );

      // Now insert the new mc (with a new 'data' field) into malloc_list.
      VG_(HT_add_node)( MC_(malloc_list), new_mc );

      /* Retained part is copied, red zones set as normal */

      /* Redzone at the front */
      MC_(make_mem_noaccess)( a_new-MC_(Malloc_Redzone_SzB), 
                              MC_(Malloc_Redzone_SzB) );

      /* payload */
      if (old_szB >= new_szB) {
         /* new size is smaller or the same */

         /* Copy address range state and value from old to new */
         MC_(copy_address_range_state) ( (Addr)p_old, a_new, new_szB );
         VG_(memcpy)((void*)a_new, p_old, new_szB);
      } else {
         /* new size is bigger */
         UInt        ecu;

         /* Copy address range state and value from old to new */
         MC_(copy_address_range_state) ( (Addr)p_old, a_new, old_szB );
         VG_(memcpy)((void*)a_new, p_old, old_szB);

         // If the block has grown, we mark the grown area as undefined.
         // We have to do that after VG_(HT_add_node) to ensure the ecu
         // execontext is for a fully allocated block.
         ecu = VG_(get_ECU_from_ExeContext)(MC_(allocated_at)(new_mc));
         tl_assert(VG_(is_plausible_ECU)(ecu));
         MC_(make_mem_undefined_w_otag)( a_new+old_szB,
                                         new_szB-old_szB,
                                         ecu | MC_OKIND_HEAP );

         /* Possibly fill new area with specified junk */
         if (MC_(clo_malloc_fill) != -1) {
            tl_assert(MC_(clo_malloc_fill) >= 0x00
                      && MC_(clo_malloc_fill) <= 0xFF);
            VG_(memset)((void*)(a_new+old_szB), MC_(clo_malloc_fill), 
                                                new_szB-old_szB);
         }
      }

      /* Redzone at the back. */
      MC_(make_mem_noaccess)        ( a_new+new_szB, MC_(Malloc_Redzone_SzB));

      /* Possibly fill freed area with specified junk. */
      if (MC_(clo_free_fill) != -1) {
         tl_assert(MC_(clo_free_fill) >= 0x00 && MC_(clo_free_fill) <= 0xFF);
         VG_(memset)((void*)p_old, MC_(clo_free_fill), old_szB);
      }

      /* Free old memory */
      /* Nb: we have to allocate a new MC_Chunk for the new memory rather
         than recycling the old one, so that any erroneous accesses to the
         old memory are reported. */
      die_and_free_mem ( tid, old_mc, MC_(Malloc_Redzone_SzB) );

   } else {
      /* Could not allocate new client memory.
         Re-insert the old_mc (with the old ptr) in the HT, as old_mc was
         unconditionally removed at the beginning of the function. */
      VG_(HT_add_node)( MC_(malloc_list), old_mc );
   }

   return (void*)a_new;
}