/* This prints out the message for the error types where Memcheck and Addrcheck have identical messages */ void MAC_(pp_shared_SkinError) ( Error* err ) { MAC_Error* err_extra = VG_(get_error_extra)(err); switch (VG_(get_error_kind)(err)) { case FreeErr: VG_(message)(Vg_UserMsg, "Invalid free() / delete / delete[]"); /* fall through */ case FreeMismatchErr: if (VG_(get_error_kind)(err) == FreeMismatchErr) VG_(message)(Vg_UserMsg, "Mismatched free() / delete / delete []"); VG_(pp_ExeContext)( VG_(get_error_where)(err) ); MAC_(pp_AddrInfo)(VG_(get_error_address)(err), &err_extra->addrinfo); break; case LeakErr: { /* Totally abusing the types of these spare fields... oh well. */ UInt n_this_record = (UInt)VG_(get_error_address)(err); UInt n_total_records = (UInt)VG_(get_error_string) (err); MAC_(pp_LeakError)(err_extra, n_this_record, n_total_records); break; } default: VG_(printf)("Error:\n unknown Memcheck/Addrcheck error code %d\n", VG_(get_error_kind)(err)); VG_(skin_panic)("unknown error code in MAC_(pp_shared_SkinError)"); } }
__inline__ void MAC_(handle_free) ( Addr p, UInt rzB, MAC_AllocKind kind ) { MAC_Chunk* mc; MAC_Chunk** prev_chunks_next_ptr; ThreadId tid = VG_(get_current_or_recent_tid)(); VGP_PUSHCC(VgpCliMalloc); cmalloc_n_frees++; mc = (MAC_Chunk*)VG_(HT_get_node) ( MAC_(malloc_list), (UInt)p, (VgHashNode***)&prev_chunks_next_ptr ); if (mc == NULL) { MAC_(record_free_error) ( tid, p ); VGP_POPCC(VgpCliMalloc); return; } /* check if its a matching free() / delete / delete [] */ if (kind != mc->allockind) { MAC_(record_freemismatch_error) ( tid, p ); } die_and_free_mem ( mc, prev_chunks_next_ptr, rzB ); VGP_POPCC(VgpCliMalloc); }
/* Compute a quick summary of the leak check. */ static void make_summary() { Int i; for(i = 0; i < lc_n_shadows; i++) { SizeT size = lc_shadows[i]->size; switch(lc_markstack[i].state) { case Unreached: blocks_leaked++; MAC_(bytes_leaked) += size; break; case Proper: blocks_reachable++; MAC_(bytes_reachable) += size; break; case Interior: blocks_dubious++; MAC_(bytes_dubious) += size; break; case IndirectLeak: /* shouldn't happen */ blocks_indirect++; MAC_(bytes_indirect) += size; break; } } }
static void ac_pp_Error ( Error* err ) { MAC_Error* err_extra = VG_(get_error_extra)(err); switch (VG_(get_error_kind)(err)) { case CoreMemErr: VG_(message)(Vg_UserMsg, "%s contains unaddressable byte(s)", VG_(get_error_string)(err)); VG_(pp_ExeContext)( VG_(get_error_where)(err) ); break; case ParamErr: VG_(message)(Vg_UserMsg, "Syscall param %s contains unaddressable byte(s)", VG_(get_error_string)(err) ); VG_(pp_ExeContext)( VG_(get_error_where)(err) ); MAC_(pp_AddrInfo)(VG_(get_error_address)(err), &err_extra->addrinfo); break; case UserErr: VG_(message)(Vg_UserMsg, "Unaddressable byte(s) found during client check request"); VG_(pp_ExeContext)( VG_(get_error_where)(err) ); MAC_(pp_AddrInfo)(VG_(get_error_address)(err), &err_extra->addrinfo); break; default: MAC_(pp_shared_Error)(err); break; } }
static void die_and_free_mem ( MAC_Chunk* mc, MAC_Chunk** prev_chunks_next_ptr, UInt rzB ) { /* Note: ban redzones again -- just in case user de-banned them with a client request... */ MAC_(ban_mem_heap)( mc->data-rzB, rzB ); MAC_(die_mem_heap)( mc->data, mc->size ); MAC_(ban_mem_heap)( mc->data+mc->size, rzB ); /* Remove mc from the malloclist using prev_chunks_next_ptr to avoid repeating the hash table lookup. Can't remove until at least after free and free_mismatch errors are done because they use describe_addr() which looks for it in malloclist. */ *prev_chunks_next_ptr = mc->next; /* Record where freed */ mc->where = VG_(get_ExeContext) ( VG_(get_current_or_recent_tid)() ); /* Put it out of harm's way for a while, if not from a client request */ if (MAC_AllocCustom != mc->allockind) add_to_freed_queue ( mc ); else VG_(free) ( mc ); }
void MAC_(mempool_free)(Addr pool, Addr addr) { MAC_Mempool* mp; MAC_Mempool** prev_pool; MAC_Chunk* mc; MAC_Chunk** prev_chunk; ThreadId tid = VG_(get_running_tid)(); mp = (MAC_Mempool*)VG_(HT_get_node)(MAC_(mempool_list), (UWord)pool, (void*)&prev_pool); if (mp == NULL) { MAC_(record_illegal_mempool_error)(tid, pool); return; } mc = (MAC_Chunk*)VG_(HT_get_node)(mp->chunks, (UWord)addr, (void*)&prev_chunk); if (mc == NULL) { MAC_(record_free_error)(tid, (Addr)addr); return; } die_and_free_mem ( tid, mc, prev_chunk, mp->rzB ); }
/* Allocate memory and note change in memory available */ __inline__ void* MAC_(new_block) ( ThreadId tid, Addr p, SizeT size, SizeT align, UInt rzB, Bool is_zeroed, MAC_AllocKind kind, VgHashTable table) { VGP_PUSHCC(VgpCliMalloc); cmalloc_n_mallocs ++; // Allocate and zero if necessary if (p) { tl_assert(MAC_AllocCustom == kind); } else { tl_assert(MAC_AllocCustom != kind); p = (Addr)VG_(cli_malloc)( align, size ); if (!p) { VGP_POPCC(VgpCliMalloc); return NULL; } if (is_zeroed) VG_(memset)((void*)p, 0, size); } // Only update this stat if allocation succeeded. cmalloc_bs_mallocd += size; add_MAC_Chunk( tid, p, size, kind, table ); MAC_(ban_mem_heap)( p-rzB, rzB ); MAC_(new_mem_heap)( p, size, is_zeroed ); MAC_(ban_mem_heap)( p+size, rzB ); VGP_POPCC(VgpCliMalloc); return (void*)p; }
/* Describe an address as best you can, for error messages, putting the result in ai. */ static void describe_addr ( Addr a, AddrInfo* ai ) { MAC_Chunk* sc; ThreadId tid; /* Nested functions, yeah. Need the lexical scoping of 'a'. */ /* Closure for searching thread stacks */ Bool addr_is_in_bounds(Addr stack_min, Addr stack_max) { return (stack_min <= a && a <= stack_max); } /* Closure for searching free'd list */ Bool addr_is_in_MAC_Chunk(MAC_Chunk* mc) { return VG_(addr_is_in_block)( a, mc->data, mc->size ); } /* Closure for searching malloc'd lists */ Bool addr_is_in_HashNode(VgHashNode* sh_ch) { return addr_is_in_MAC_Chunk( (MAC_Chunk*)sh_ch ); } /* Perhaps it's a user-def'd block ? (only check if requested, though) */ if (NULL != MAC_(describe_addr_supp)) { if (MAC_(describe_addr_supp)( a, ai )) return; } /* Perhaps it's on a thread's stack? */ tid = VG_(first_matching_thread_stack)(addr_is_in_bounds); if (tid != VG_INVALID_THREADID) { ai->akind = Stack; ai->stack_tid = tid; return; } /* Search for a recently freed block which might bracket it. */ sc = MAC_(first_matching_freed_MAC_Chunk)(addr_is_in_MAC_Chunk); if (NULL != sc) { ai->akind = Freed; ai->blksize = sc->size; ai->rwoffset = (Int)a - (Int)sc->data; ai->lastchange = sc->where; return; } /* Search for a currently malloc'd block which might bracket it. */ sc = (MAC_Chunk*)VG_(HT_first_match)(MAC_(malloc_list), addr_is_in_HashNode); if (NULL != sc) { ai->akind = Mallocd; ai->blksize = sc->size; ai->rwoffset = (Int)(a) - (Int)sc->data; ai->lastchange = sc->where; return; } /* Clueless ... */ ai->akind = Unknown; return; }
void* MAC_(__builtin_vec_new) ( ThreadId tid, SizeT n ) { if (complain_about_silly_args(n, "__builtin_vec_new")) { return NULL; } else { return MAC_(new_block) ( tid, 0, n, VG_(clo_alignment), MAC_MALLOC_REDZONE_SZB, /*is_zeroed*/False, MAC_AllocNewVec, MAC_(malloc_list)); } }
void* MAC_(calloc) ( ThreadId tid, SizeT nmemb, SizeT size1 ) { if (complain_about_silly_args2(nmemb, size1)) { return NULL; } else { return MAC_(new_block) ( tid, 0, nmemb*size1, VG_(clo_alignment), MAC_MALLOC_REDZONE_SZB, /*is_zeroed*/True, MAC_AllocMalloc, MAC_(malloc_list)); } }
void* MAC_(memalign) ( ThreadId tid, SizeT align, SizeT n ) { if (complain_about_silly_args(n, "memalign")) { return NULL; } else { return MAC_(new_block) ( tid, 0, n, align, MAC_MALLOC_REDZONE_SZB, /*is_zeroed*/False, MAC_AllocMalloc, MAC_(malloc_list)); } }
static void done_prof_mem ( void ) { Int i; for (i = 0; i < N_PROF_EVENTS; i++) { if ((i % 10) == 0) VG_(printf)("\n"); if (MAC_(event_ctr)[i] > 0) VG_(printf)( "prof mem event %2d: %d\n", i, MAC_(event_ctr)[i] ); } VG_(printf)("\n"); }
static void destroy_mempool_nuke_chunk(VgHashNode *node, void *d) { MAC_Chunk *mc = (MAC_Chunk *)node; MAC_Mempool *mp = (MAC_Mempool *)d; /* Note: ban redzones again -- just in case user de-banned them with a client request... */ MAC_(ban_mem_heap)(mc->data-mp->rzB, mp->rzB ); MAC_(die_mem_heap)(mc->data, mc->size ); MAC_(ban_mem_heap)(mc->data+mc->size, mp->rzB ); }
void MAC_(common_fini)(void (*leak_check)(void)) { MAC_(print_malloc_stats)(); if (VG_(clo_verbosity) == 1) { if (!MAC_(clo_leak_check)) VG_(message)(Vg_UserMsg, "For a detailed leak analysis, rerun with: --leak-check=yes"); VG_(message)(Vg_UserMsg, "For counts of detected errors, rerun with: -v"); } if (MAC_(clo_leak_check)) leak_check(); done_prof_mem(); }
void MAC_(print_malloc_stats) ( void ) { MallocStats ms; ms.nblocks = 0; ms.nbytes = 0; if (VG_(clo_verbosity) == 0) return; if (VG_(clo_xml)) return; /* Count memory still in use. */ VG_(HT_apply_to_all_nodes)(MAC_(malloc_list), malloc_stats_count_chunk, &ms); VG_(message)(Vg_UserMsg, "malloc/free: in use at exit: %d bytes in %d blocks.", ms.nbytes, ms.nblocks); VG_(message)(Vg_UserMsg, "malloc/free: %d allocs, %d frees, %u bytes allocated.", cmalloc_n_mallocs, cmalloc_n_frees, cmalloc_bs_mallocd); if (VG_(clo_verbosity) > 1) VG_(message)(Vg_UserMsg, ""); }
void MAC_(mempool_alloc)(ThreadId tid, Addr pool, Addr addr, SizeT size) { MAC_Mempool* mp; MAC_Mempool** prev_next; mp = (MAC_Mempool*)VG_(HT_get_node) ( MAC_(mempool_list), (UWord)pool, (void*)&prev_next ); if (mp == NULL) { MAC_(record_illegal_mempool_error) ( tid, pool ); return; } MAC_(new_block)(tid, addr, size, /*ignored*/0, mp->rzB, mp->is_zeroed, MAC_AllocCustom, mp->chunks); }
/* Allocate memory and note change in memory available */ __inline__ void MAC_(new_block) ( Addr p, UInt size, UInt rzB, Bool is_zeroed, MAC_AllocKind kind ) { VGP_PUSHCC(VgpCliMalloc); cmalloc_n_mallocs ++; cmalloc_bs_mallocd += size; add_MAC_Chunk( p, size, kind ); MAC_(ban_mem_heap)( p-rzB, rzB ); MAC_(new_mem_heap)( p, size, is_zeroed ); MAC_(ban_mem_heap)( p+size, rzB ); VGP_POPCC(VgpCliMalloc); }
/* This is for memory errors in pthread functions, as opposed to pthread API errors which are found by the core. */ void MAC_(record_core_mem_error) ( ThreadState* tst, Bool isWrite, Char* msg ) { MAC_Error err_extra; MAC_(clear_MAC_Error)( &err_extra ); err_extra.isWrite = isWrite; VG_(maybe_record_error)( tst, CoreMemErr, /*addr*/0, msg, &err_extra ); }
void MAC_(record_freemismatch_error) ( ThreadId tid, Addr a ) { MAC_Error err_extra; sk_assert(VG_INVALID_THREADID != tid); MAC_(clear_MAC_Error)( &err_extra ); err_extra.addrinfo.akind = Undescribed; VG_(maybe_record_error)( tid, FreeMismatchErr, a, /*s*/NULL, &err_extra ); }
void MAC_(record_freemismatch_error) ( ThreadState* tst, Addr a ) { MAC_Error err_extra; sk_assert(NULL != tst); MAC_(clear_MAC_Error)( &err_extra ); err_extra.addrinfo.akind = Undescribed; VG_(maybe_record_error)( tst, FreeMismatchErr, a, /*s*/NULL, &err_extra ); }
void MAC_(record_jump_error) ( ThreadId tid, Addr a ) { MAC_Error err_extra; sk_assert(VG_INVALID_THREADID != tid); MAC_(clear_MAC_Error)( &err_extra ); err_extra.axskind = ExecAxs; err_extra.addrinfo.akind = Undescribed; VG_(maybe_record_error)( tid, AddrErr, a, /*s*/NULL, &err_extra ); }
void MAC_(record_param_error) ( ThreadId tid, Addr a, Bool isWrite, Char* msg ) { MAC_Error err_extra; sk_assert(VG_INVALID_THREADID != tid); MAC_(clear_MAC_Error)( &err_extra ); err_extra.addrinfo.akind = Undescribed; err_extra.isWrite = isWrite; VG_(maybe_record_error)( tid, ParamErr, a, msg, &err_extra ); }
void MAC_(record_jump_error) ( ThreadState* tst, Addr a ) { MAC_Error err_extra; sk_assert(NULL != tst); MAC_(clear_MAC_Error)( &err_extra ); err_extra.axskind = ExecAxs; err_extra.addrinfo.akind = Undescribed; VG_(maybe_record_error)( tst, AddrErr, a, /*s*/NULL, &err_extra ); }
void MAC_(record_param_error) ( ThreadState* tst, Addr a, Bool isWrite, Char* msg ) { MAC_Error err_extra; sk_assert(NULL != tst); MAC_(clear_MAC_Error)( &err_extra ); err_extra.addrinfo.akind = Undescribed; err_extra.isWrite = isWrite; VG_(maybe_record_error)( tst, ParamErr, a, msg, &err_extra ); }
void MAC_(record_address_error) ( ThreadState* tst, Addr a, Int size, Bool isWrite ) { MAC_Error err_extra; Bool just_below_esp; just_below_esp = is_just_below_ESP( VG_(get_stack_pointer)(), a ); /* If this is caused by an access immediately below %ESP, and the user asks nicely, we just ignore it. */ if (MAC_(clo_workaround_gcc296_bugs) && just_below_esp) return; MAC_(clear_MAC_Error)( &err_extra ); err_extra.axskind = isWrite ? WriteAxs : ReadAxs; err_extra.size = size; err_extra.addrinfo.akind = Undescribed; err_extra.addrinfo.maybe_gcc = just_below_esp; VG_(maybe_record_error)( tst, AddrErr, a, /*s*/NULL, &err_extra ); }
void* SK_(malloc) ( ThreadState* tst, Int n ) { if (n < 0) { VG_(message)(Vg_UserMsg, "Warning: silly arg (%d) to malloc()", n ); return NULL; } else { Addr p = (Addr)VG_(cli_malloc)( VG_(clo_alignment), n ); MAC_(new_block) ( tst, p, n, VG_(vg_malloc_redzone_szB), /*is_zeroed*/False, MAC_AllocMalloc ); return (void*)p; } }
void* SK_(__builtin_new) ( Int n ) { if (n < 0) { VG_(message)(Vg_UserMsg, "Warning: silly arg (%d) to __builtin_new()", n); return NULL; } else { Addr p = (Addr)VG_(cli_malloc)( VG_(clo_alignment), n ); MAC_(new_block) ( p, n, VG_(vg_malloc_redzone_szB), /*is_zeroed*/False, MAC_AllocNew ); return (void*)p; } }
void* SK_(memalign) ( Int align, Int n ) { if (n < 0) { VG_(message)(Vg_UserMsg, "Warning: silly arg (%d) to memalign()", n); return NULL; } else { Addr p = (Addr)VG_(cli_malloc)( align, n ); MAC_(new_block) ( p, n, VG_(vg_malloc_redzone_szB), /*is_zeroed*/False, MAC_AllocMalloc ); return (void*)p; } }
Bool MAC_(handle_common_client_requests)(ThreadState* tst, UInt* arg, UInt* ret ) { UInt* argv = (UInt*)arg; switch (arg[0]) { case VG_USERREQ__COUNT_LEAKS: { /* count leaked bytes */ UInt** argp = (UInt**)arg; *argp[1] = MAC_(total_bytes_leaked); *argp[2] = MAC_(total_bytes_dubious); *argp[3] = MAC_(total_bytes_reachable); *argp[4] = MAC_(total_bytes_suppressed); *ret = 0; return True; } case VG_USERREQ__MALLOCLIKE_BLOCK: { Addr p = (Addr)argv[1]; UInt sizeB = argv[2]; UInt rzB = argv[3]; Bool is_zeroed = (Bool)argv[4]; MAC_(new_block) ( tst, p, sizeB, rzB, is_zeroed, MAC_AllocCustom ); return True; } case VG_USERREQ__FREELIKE_BLOCK: { Addr p = (Addr)argv[1]; UInt rzB = argv[2]; MAC_(handle_free) ( tst, p, rzB, MAC_AllocCustom ); return True; } default: return False; } }
/* Allocate its shadow chunk, put it on the appropriate list. */ static void add_MAC_Chunk ( Addr p, UInt size, MAC_AllocKind kind ) { MAC_Chunk* mc; mc = VG_(malloc)(sizeof(MAC_Chunk)); mc->data = p; mc->size = size; mc->allockind = kind; mc->where = VG_(get_ExeContext)(VG_(get_current_or_recent_tid)()); /* Paranoia ... ensure this area is off-limits to the client, so the mc->data field isn't visible to the leak checker. If memory management is working correctly, anything pointer returned by VG_(malloc) should be noaccess as far as the client is concerned. */ if (!MAC_(check_noaccess)( (Addr)mc, sizeof(MAC_Chunk), NULL )) { VG_(skin_panic)("add_MAC_chunk: shadow area is accessible"); } VG_(HT_add_node)( MAC_(malloc_list), (VgHashNode*)mc ); }