void MC_(mempool_free)(Addr pool, Addr addr) { MC_Mempool* mp; MC_Chunk* mc; ThreadId tid = VG_(get_running_tid)(); mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool); if (mp == NULL) { MC_(record_illegal_mempool_error)(tid, pool); return; } if (VG_(clo_verbosity) > 2) { VG_(message)(Vg_UserMsg, "mempool_free(0x%lx, 0x%lx)\n", pool, addr); VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH); } if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp); mc = VG_(HT_remove)(mp->chunks, (UWord)addr); if (mc == NULL) { MC_(record_free_error)(tid, (Addr)addr); return; } if (VG_(clo_verbosity) > 2) { VG_(message)(Vg_UserMsg, "mempool_free(0x%lx, 0x%lx) freed chunk of %ld bytes\n", pool, addr, mc->szB + 0UL); } die_and_free_mem ( tid, mc, mp->rzB ); if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp); }
void MC_(mempool_change)(Addr pool, Addr addrA, Addr addrB, SizeT szB) { MC_Mempool* mp; MC_Chunk* mc; ThreadId tid = VG_(get_running_tid)(); if (VG_(clo_verbosity) > 2) { VG_(message)(Vg_UserMsg, "mempool_change(0x%lx, 0x%lx, 0x%lx, %ld)\n", pool, addrA, addrB, szB); VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH); } mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool); if (mp == NULL) { MC_(record_illegal_mempool_error)(tid, pool); return; } check_mempool_sane(mp); mc = VG_(HT_remove)(mp->chunks, (UWord)addrA); if (mc == NULL) { MC_(record_free_error)(tid, (Addr)addrA); return; } mc->data = addrB; mc->szB = szB; VG_(HT_add_node)( mp->chunks, mc ); check_mempool_sane(mp); }
void MC_(mempool_alloc)(ThreadId tid, Addr pool, Addr addr, SizeT szB) { MC_Mempool* mp; if (VG_(clo_verbosity) > 2) { VG_(message)(Vg_UserMsg, "mempool_alloc(0x%lx, 0x%lx, %ld)\n", pool, addr, szB); VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH); } mp = VG_(HT_lookup) ( MC_(mempool_list), (UWord)pool ); if (mp == NULL) { MC_(record_illegal_mempool_error) ( tid, pool ); } else { if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp); MC_(new_block)(tid, addr, szB, /*ignored*/0, mp->is_zeroed, MC_AllocCustom, mp->chunks); if (mp->rzB > 0) { // This is not needed if the user application has properly // marked the superblock noaccess when defining the mempool. // We however still mark the redzones noaccess to still catch // some bugs if user forgot. MC_(make_mem_noaccess) ( addr - mp->rzB, mp->rzB); MC_(make_mem_noaccess) ( addr + szB, mp->rzB); } if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp); } }
void MC_(destroy_mempool)(Addr pool) { MC_Chunk* mc; MC_Mempool* mp; if (VG_(clo_verbosity) > 2) { VG_(message)(Vg_UserMsg, "destroy_mempool(0x%lx)\n", pool); VG_(get_and_pp_StackTrace) (VG_(get_running_tid)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH); } mp = VG_(HT_remove) ( MC_(mempool_list), (UWord)pool ); if (mp == NULL) { ThreadId tid = VG_(get_running_tid)(); MC_(record_illegal_mempool_error) ( tid, pool ); return; } check_mempool_sane(mp); // Clean up the chunks, one by one VG_(HT_ResetIter)(mp->chunks); while ( (mc = VG_(HT_Next)(mp->chunks)) ) { /* Note: make redzones noaccess again -- just in case user made them accessible with a client request... */ MC_(make_mem_noaccess)(mc->data-mp->rzB, mc->szB + 2*mp->rzB ); } // Destroy the chunk table VG_(HT_destruct)(mp->chunks, (void (*)(void *))delete_MC_Chunk); VG_(free)(mp); }
void MC_(create_mempool)(Addr pool, UInt rzB, Bool is_zeroed) { MC_Mempool* mp; if (VG_(clo_verbosity) > 2) { VG_(message)(Vg_UserMsg, "create_mempool(0x%lx, %d, %d)\n", pool, rzB, is_zeroed); VG_(get_and_pp_StackTrace) (VG_(get_running_tid)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH); } mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool); if (mp != NULL) { VG_(tool_panic)("MC_(create_mempool): duplicate pool creation"); } mp = VG_(malloc)("mc.cm.1", sizeof(MC_Mempool)); mp->pool = pool; mp->rzB = rzB; mp->is_zeroed = is_zeroed; mp->chunks = VG_(HT_construct)( "MC_(create_mempool)" ); check_mempool_sane(mp); /* Paranoia ... ensure this area is off-limits to the client, so the mp->data field isn't visible to the leak checker. If memory management is working correctly, anything pointer returned by VG_(malloc) should be noaccess as far as the client is concerned. */ if (!MC_(check_mem_is_noaccess)( (Addr)mp, sizeof(MC_Mempool), NULL )) { VG_(tool_panic)("MC_(create_mempool): shadow area is accessible"); } VG_(HT_add_node)( MC_(mempool_list), mp ); }
void MC_(mempool_alloc)(ThreadId tid, Addr pool, Addr addr, SizeT szB) { MC_Mempool* mp; if (VG_(clo_verbosity) > 2) { VG_(message)(Vg_UserMsg, "mempool_alloc(0x%lx, 0x%lx, %ld)", pool, addr, szB); VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH); } mp = VG_(HT_lookup) ( MC_(mempool_list), (UWord)pool ); if (mp == NULL) { MC_(record_illegal_mempool_error) ( tid, pool ); } else { check_mempool_sane(mp); MC_(new_block)(tid, addr, szB, /*ignored*/0, mp->rzB, mp->is_zeroed, MC_AllocCustom, mp->chunks); check_mempool_sane(mp); } }
void MC_(mempool_trim)(Addr pool, Addr addr, SizeT szB) { MC_Mempool* mp; MC_Chunk* mc; ThreadId tid = VG_(get_running_tid)(); UInt n_shadows, i; VgHashNode** chunks; if (VG_(clo_verbosity) > 2) { VG_(message)(Vg_UserMsg, "mempool_trim(0x%lx, 0x%lx, %ld)\n", pool, addr, szB); VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH); } mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool); if (mp == NULL) { MC_(record_illegal_mempool_error)(tid, pool); return; } check_mempool_sane(mp); chunks = VG_(HT_to_array) ( mp->chunks, &n_shadows ); if (n_shadows == 0) { tl_assert(chunks == NULL); return; } tl_assert(chunks != NULL); for (i = 0; i < n_shadows; ++i) { Addr lo, hi, min, max; mc = (MC_Chunk*) chunks[i]; lo = mc->data; hi = mc->szB == 0 ? mc->data : mc->data + mc->szB - 1; #define EXTENT_CONTAINS(x) ((addr <= (x)) && ((x) < addr + szB)) if (EXTENT_CONTAINS(lo) && EXTENT_CONTAINS(hi)) { /* The current chunk is entirely within the trim extent: keep it. */ continue; } else if ( (! EXTENT_CONTAINS(lo)) && (! EXTENT_CONTAINS(hi)) ) { /* The current chunk is entirely outside the trim extent: delete it. */ if (VG_(HT_remove)(mp->chunks, (UWord)mc->data) == NULL) { MC_(record_free_error)(tid, (Addr)mc->data); VG_(free)(chunks); if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp); return; } die_and_free_mem ( tid, mc, mp->rzB ); } else { /* The current chunk intersects the trim extent: remove, trim, and reinsert it. */ tl_assert(EXTENT_CONTAINS(lo) || EXTENT_CONTAINS(hi)); if (VG_(HT_remove)(mp->chunks, (UWord)mc->data) == NULL) { MC_(record_free_error)(tid, (Addr)mc->data); VG_(free)(chunks); if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp); return; } if (mc->data < addr) { min = mc->data; lo = addr; } else { min = addr; lo = mc->data; } if (mc->data + szB > addr + szB) { max = mc->data + szB; hi = addr + szB; } else { max = addr + szB; hi = mc->data + szB; } tl_assert(min <= lo); tl_assert(lo < hi); tl_assert(hi <= max); if (min < lo && !EXTENT_CONTAINS(min)) { MC_(make_mem_noaccess)( min, lo - min); } if (hi < max && !EXTENT_CONTAINS(max)) { MC_(make_mem_noaccess)( hi, max - hi ); } mc->data = lo; mc->szB = (UInt) (hi - lo); VG_(HT_add_node)( mp->chunks, mc ); } #undef EXTENT_CONTAINS } check_mempool_sane(mp); VG_(free)(chunks); }