示例#1
0
/* Allocate memory and note change in memory available */
void* MC_(new_block) ( ThreadId tid,
                       Addr p, SizeT szB, SizeT alignB, UInt rzB,
                       Bool is_zeroed, MC_AllocKind kind, VgHashTable table)
{
   ExeContext* ec;

   cmalloc_n_mallocs ++;

   // Allocate and zero if necessary
   if (p) {
      tl_assert(MC_AllocCustom == kind);
   } else {
      tl_assert(MC_AllocCustom != kind);
      p = (Addr)VG_(cli_malloc)( alignB, szB );
      if (!p) {
         return NULL;
      }
      if (is_zeroed) {
         VG_(memset)((void*)p, 0, szB);
      } else 
      if (MC_(clo_malloc_fill) != -1) {
         tl_assert(MC_(clo_malloc_fill) >= 0x00 && MC_(clo_malloc_fill) <= 0xFF);
         VG_(memset)((void*)p, MC_(clo_malloc_fill), szB);
      }
   }

   // Only update this stat if allocation succeeded.
   cmalloc_bs_mallocd += (ULong)szB;

   ec = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
   tl_assert(ec);

   VG_(HT_add_node)( table, create_MC_Chunk(ec, p, szB, kind) );

   if (is_zeroed)
      MC_(make_mem_defined)( p, szB );
   else {
      UInt ecu = VG_(get_ECU_from_ExeContext)(ec);
      tl_assert(VG_(is_plausible_ECU)(ecu));
      MC_(make_mem_undefined_w_otag)( p, szB, ecu | MC_OKIND_HEAP );
   }

   return (void*)p;
}
void* MC_(realloc) ( ThreadId tid, void* p_old, SizeT new_szB )
{
    MC_Chunk* mc;
    void*     p_new;
    SizeT     old_szB;

    if (complain_about_silly_args(new_szB, "realloc"))
        return NULL;

    cmalloc_n_frees ++;
    cmalloc_n_mallocs ++;
    cmalloc_bs_mallocd += (ULong)new_szB;

    /* Remove the old block */
    mc = VG_(HT_remove) ( MC_(malloc_list), (UWord)p_old );
    if (mc == NULL) {
        MC_(record_free_error) ( tid, (Addr)p_old );
        /* We return to the program regardless. */
        return NULL;
    }

    /* check if its a matching free() / delete / delete [] */
    if (MC_AllocMalloc != mc->allockind) {
        /* can not realloc a range that was allocated with new or new [] */
        tl_assert((Addr)p_old == mc->data);
        MC_(record_freemismatch_error) ( tid, mc );
        /* but keep going anyway */
    }

    old_szB = mc->szB;

    /* In all cases, even when the new size is smaller or unchanged, we
       reallocate and copy the contents, and make the old block
       inaccessible.  This is so as to guarantee to catch all cases of
       accesses via the old address after reallocation, regardless of
       the change in size.  (Of course the ability to detect accesses
       to the old block also depends on the size of the freed blocks
       queue). */

    if (new_szB <= old_szB) {
        /* new size is smaller or the same */
        Addr a_new;
        /* Get new memory */
        a_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_szB);

        if (a_new) {
            ExeContext* ec;

            ec = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
            tl_assert(ec);

            /* Retained part is copied, red zones set as normal */
            MC_(make_mem_noaccess)( a_new-MC_MALLOC_REDZONE_SZB,
                                    MC_MALLOC_REDZONE_SZB );
            MC_(copy_address_range_state) ( (Addr)p_old, a_new, new_szB );
            MC_(make_mem_noaccess)        ( a_new+new_szB, MC_MALLOC_REDZONE_SZB );

            /* Copy from old to new */
            VG_(memcpy)((void*)a_new, p_old, new_szB);

            /* Possibly fill freed area with specified junk. */
            if (MC_(clo_free_fill) != -1) {
                tl_assert(MC_(clo_free_fill) >= 0x00 && MC_(clo_free_fill) <= 0xFF);
                VG_(memset)((void*)p_old, MC_(clo_free_fill), old_szB);
            }

            /* Free old memory */
            /* Nb: we have to allocate a new MC_Chunk for the new memory rather
               than recycling the old one, so that any erroneous accesses to the
               old memory are reported. */
            die_and_free_mem ( tid, mc, MC_MALLOC_REDZONE_SZB );

            // Allocate a new chunk.
            mc = create_MC_Chunk( ec, a_new, new_szB, MC_AllocMalloc );
        }

        p_new = (void*)a_new;

    } else {
        /* new size is bigger */
        Addr a_new;
        tl_assert(old_szB < new_szB);
        /* Get new memory */
        a_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_szB);

        if (a_new) {
            UInt        ecu;
            ExeContext* ec;

            ec = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
            tl_assert(ec);
            ecu = VG_(get_ECU_from_ExeContext)(ec);
            tl_assert(VG_(is_plausible_ECU)(ecu));

            /* First half kept and copied, second half new, red zones as normal */
            MC_(make_mem_noaccess)( a_new-MC_MALLOC_REDZONE_SZB,
                                    MC_MALLOC_REDZONE_SZB );
            MC_(copy_address_range_state) ( (Addr)p_old, a_new, mc->szB );
            MC_(make_mem_undefined_w_otag)( a_new+mc->szB, new_szB-mc->szB,
                                            ecu | MC_OKIND_HEAP );
            MC_(make_mem_noaccess)        ( a_new+new_szB, MC_MALLOC_REDZONE_SZB );

            /* Possibly fill new area with specified junk */
            if (MC_(clo_malloc_fill) != -1) {
                tl_assert(MC_(clo_malloc_fill) >= 0x00
                          && MC_(clo_malloc_fill) <= 0xFF);
                VG_(memset)((void*)(a_new+old_szB), MC_(clo_malloc_fill),
                            new_szB-old_szB);
            }

            /* Copy from old to new */
            VG_(memcpy)((void*)a_new, p_old, mc->szB);

            /* Possibly fill freed area with specified junk. */
            if (MC_(clo_free_fill) != -1) {
                tl_assert(MC_(clo_free_fill) >= 0x00 && MC_(clo_free_fill) <= 0xFF);
                VG_(memset)((void*)p_old, MC_(clo_free_fill), old_szB);
            }

            /* Free old memory */
            /* Nb: we have to allocate a new MC_Chunk for the new memory rather
               than recycling the old one, so that any erroneous accesses to the
               old memory are reported. */
            die_and_free_mem ( tid, mc, MC_MALLOC_REDZONE_SZB );

            // Allocate a new chunk.
            mc = create_MC_Chunk( ec, a_new, new_szB, MC_AllocMalloc );
        }

        p_new = (void*)a_new;
    }

    // Now insert the new mc (with a possibly new 'data' field) into
    // malloc_list.  If this realloc() did not increase the memory size, we
    // will have removed and then re-added mc unnecessarily.  But that's ok
    // because shrinking a block with realloc() is (presumably) much rarer
    // than growing it, and this way simplifies the growing case.
    VG_(HT_add_node)( MC_(malloc_list), mc );

    return p_new;
}
示例#3
0
void* MC_(realloc) ( ThreadId tid, void* p_old, SizeT new_szB )
{
   MC_Chunk* old_mc;
   MC_Chunk* new_mc;
   Addr      a_new; 
   SizeT     old_szB;

   if (MC_(record_fishy_value_error)(tid, "realloc", "size", new_szB))
      return NULL;

   cmalloc_n_frees ++;
   cmalloc_n_mallocs ++;
   cmalloc_bs_mallocd += (ULong)new_szB;

   /* Remove the old block */
   old_mc = VG_(HT_remove) ( MC_(malloc_list), (UWord)p_old );
   if (old_mc == NULL) {
      MC_(record_free_error) ( tid, (Addr)p_old );
      /* We return to the program regardless. */
      return NULL;
   }

   /* check if its a matching free() / delete / delete [] */
   if (MC_AllocMalloc != old_mc->allockind) {
      /* can not realloc a range that was allocated with new or new [] */
      tl_assert((Addr)p_old == old_mc->data);
      record_freemismatch_error ( tid, old_mc );
      /* but keep going anyway */
   }

   old_szB = old_mc->szB;

   /* Get new memory */
   a_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_szB);

   if (a_new) {
      /* In all cases, even when the new size is smaller or unchanged, we
         reallocate and copy the contents, and make the old block
         inaccessible.  This is so as to guarantee to catch all cases of
         accesses via the old address after reallocation, regardless of
         the change in size.  (Of course the ability to detect accesses
         to the old block also depends on the size of the freed blocks
         queue). */

      // Allocate a new chunk.
      new_mc = create_MC_Chunk( tid, a_new, new_szB, MC_AllocMalloc );

      // Now insert the new mc (with a new 'data' field) into malloc_list.
      VG_(HT_add_node)( MC_(malloc_list), new_mc );

      /* Retained part is copied, red zones set as normal */

      /* Redzone at the front */
      MC_(make_mem_noaccess)( a_new-MC_(Malloc_Redzone_SzB), 
                              MC_(Malloc_Redzone_SzB) );

      /* payload */
      if (old_szB >= new_szB) {
         /* new size is smaller or the same */

         /* Copy address range state and value from old to new */
         MC_(copy_address_range_state) ( (Addr)p_old, a_new, new_szB );
         VG_(memcpy)((void*)a_new, p_old, new_szB);
      } else {
         /* new size is bigger */
         UInt        ecu;

         /* Copy address range state and value from old to new */
         MC_(copy_address_range_state) ( (Addr)p_old, a_new, old_szB );
         VG_(memcpy)((void*)a_new, p_old, old_szB);

         // If the block has grown, we mark the grown area as undefined.
         // We have to do that after VG_(HT_add_node) to ensure the ecu
         // execontext is for a fully allocated block.
         ecu = VG_(get_ECU_from_ExeContext)(MC_(allocated_at)(new_mc));
         tl_assert(VG_(is_plausible_ECU)(ecu));
         MC_(make_mem_undefined_w_otag)( a_new+old_szB,
                                         new_szB-old_szB,
                                         ecu | MC_OKIND_HEAP );

         /* Possibly fill new area with specified junk */
         if (MC_(clo_malloc_fill) != -1) {
            tl_assert(MC_(clo_malloc_fill) >= 0x00
                      && MC_(clo_malloc_fill) <= 0xFF);
            VG_(memset)((void*)(a_new+old_szB), MC_(clo_malloc_fill), 
                                                new_szB-old_szB);
         }
      }

      /* Redzone at the back. */
      MC_(make_mem_noaccess)        ( a_new+new_szB, MC_(Malloc_Redzone_SzB));

      /* Possibly fill freed area with specified junk. */
      if (MC_(clo_free_fill) != -1) {
         tl_assert(MC_(clo_free_fill) >= 0x00 && MC_(clo_free_fill) <= 0xFF);
         VG_(memset)((void*)p_old, MC_(clo_free_fill), old_szB);
      }

      /* Free old memory */
      /* Nb: we have to allocate a new MC_Chunk for the new memory rather
         than recycling the old one, so that any erroneous accesses to the
         old memory are reported. */
      die_and_free_mem ( tid, old_mc, MC_(Malloc_Redzone_SzB) );

   } else {
      /* Could not allocate new client memory.
         Re-insert the old_mc (with the old ptr) in the HT, as old_mc was
         unconditionally removed at the beginning of the function. */
      VG_(HT_add_node)( MC_(malloc_list), old_mc );
   }

   return (void*)a_new;
}