__inline__ void MAC_(handle_free) ( Addr p, UInt rzB, MAC_AllocKind kind ) { MAC_Chunk* mc; MAC_Chunk** prev_chunks_next_ptr; ThreadId tid = VG_(get_current_or_recent_tid)(); VGP_PUSHCC(VgpCliMalloc); cmalloc_n_frees++; mc = (MAC_Chunk*)VG_(HT_get_node) ( MAC_(malloc_list), (UInt)p, (VgHashNode***)&prev_chunks_next_ptr ); if (mc == NULL) { MAC_(record_free_error) ( tid, p ); VGP_POPCC(VgpCliMalloc); return; } /* check if its a matching free() / delete / delete [] */ if (kind != mc->allockind) { MAC_(record_freemismatch_error) ( tid, p ); } die_and_free_mem ( mc, prev_chunks_next_ptr, rzB ); VGP_POPCC(VgpCliMalloc); }
/* Allocate memory and note change in memory available */ __inline__ void* MAC_(new_block) ( ThreadId tid, Addr p, SizeT size, SizeT align, UInt rzB, Bool is_zeroed, MAC_AllocKind kind, VgHashTable table) { VGP_PUSHCC(VgpCliMalloc); cmalloc_n_mallocs ++; // Allocate and zero if necessary if (p) { tl_assert(MAC_AllocCustom == kind); } else { tl_assert(MAC_AllocCustom != kind); p = (Addr)VG_(cli_malloc)( align, size ); if (!p) { VGP_POPCC(VgpCliMalloc); return NULL; } if (is_zeroed) VG_(memset)((void*)p, 0, size); } // Only update this stat if allocation succeeded. cmalloc_bs_mallocd += size; add_MAC_Chunk( tid, p, size, kind, table ); MAC_(ban_mem_heap)( p-rzB, rzB ); MAC_(new_mem_heap)( p, size, is_zeroed ); MAC_(ban_mem_heap)( p+size, rzB ); VGP_POPCC(VgpCliMalloc); return (void*)p; }
/* Allocate memory, noticing whether or not we are doing the full instrumentation thing. */ static __inline__ void* alloc_and_new_mem ( ThreadState* tst, UInt size, UInt alignment, Bool is_zeroed, VgAllocKind kind ) { Addr p; VGP_PUSHCC(VgpCliMalloc); vg_cmalloc_n_mallocs ++; vg_cmalloc_bs_mallocd += size; vg_assert(alignment >= 4); if (alignment == 4) p = (Addr)VG_(arena_malloc)(VG_AR_CLIENT, size); else p = (Addr)VG_(arena_malloc_aligned)(VG_AR_CLIENT, alignment, size); if (needs_shadow_chunks()) addShadowChunk ( tst, p, size, kind ); VG_TRACK( ban_mem_heap, p-VG_AR_CLIENT_REDZONE_SZB, VG_AR_CLIENT_REDZONE_SZB ); VG_TRACK( new_mem_heap, p, size, is_zeroed ); VG_TRACK( ban_mem_heap, p+size, VG_AR_CLIENT_REDZONE_SZB ); VGP_POPCC(VgpCliMalloc); return (void*)p; }
void VG_(demangle) ( Char* orig, Char* result, Int result_size ) { Int n_result = 0; Char* demangled = NULL; VGP_PUSHCC(VgpDemangle); if (VG_(clo_demangle)) demangled = VG_(cplus_demangle) ( orig, DMGL_ANSI | DMGL_PARAMS ); if (demangled) { ADD_TO_RESULT(demangled, VG_(strlen)(demangled)); VG_(arena_free) (VG_AR_DEMANGLE, demangled); } else { ADD_TO_RESULT(orig, VG_(strlen)(orig)); } /* Check that the demangler isn't leaking. */ /* 15 Feb 02: if this assertion fails, this is not a disaster. Comment it out, and let me know. ([email protected]). */ vg_assert(VG_(is_empty_arena)(VG_AR_DEMANGLE)); /* VG_(show_all_arena_stats)(); */ VGP_POPCC(VgpDemangle); }
// Do a three step traversal: by file, then fn, then line. // Returns a pointer to the line CC, creates a new one if necessary. static LineCC* get_lineCC(Addr origAddr) { Char file[FILE_LEN], fn[FN_LEN]; Int line; CodeLoc loc; LineCC* lineCC; get_debug_info(origAddr, file, fn, &line); VGP_PUSHCC(VgpGetLineCC); loc.file = file; loc.fn = fn; loc.line = line; lineCC = VG_(OSet_Lookup)(CC_table, &loc); if (!lineCC) { // Allocate and zero a new node. lineCC = VG_(OSet_AllocNode)(CC_table, sizeof(LineCC)); lineCC->loc.file = get_perm_string(loc.file); lineCC->loc.fn = get_perm_string(loc.fn); lineCC->loc.line = loc.line; VG_(OSet_Insert)(CC_table, lineCC); } VGP_POPCC(VgpGetLineCC); return lineCC; }
void VG_(demangle) ( Char* orig, Char* result, Int result_size ) { Char* demangled = NULL; VGP_PUSHCC(VgpDemangle); if (VG_(clo_demangle)) demangled = VG_(cplus_demangle) ( orig, DMGL_ANSI | DMGL_PARAMS ); if (demangled) { VG_(strncpy_safely)(result, demangled, result_size); VG_(arena_free) (VG_AR_DEMANGLE, demangled); } else { VG_(strncpy_safely)(result, orig, result_size); } // 13 Mar 2005: We used to check here that the demangler wasn't leaking // by calling the (now-removed) function VG_(is_empty_arena)(). But, // very rarely (ie. I've heard of it twice in 3 years), the demangler // does leak. But, we can't do much about it, and it's not a disaster, // so we just let it slide without aborting or telling the user. // Finally, to reduce the endless nuisance of multiple different names // for "the frame below main()" screwing up the testsuite, change all // known incarnations of said into a single name, "(below main)". if (0==VG_(strcmp)("__libc_start_main", result) || 0==VG_(strcmp)("generic_start_main", result)) VG_(strncpy_safely)(result, "(below main)", 13); VGP_POPCC(VgpDemangle); }
void* VG_(arena_realloc) ( ArenaId aid, void* ptr, Int req_alignB, Int req_pszB ) { Arena* a; Int old_bszW, old_pszW, old_pszB, i; UChar *p_old, *p_new; UInt* ch; VGP_PUSHCC(VgpMalloc); ensure_mm_init(); a = arenaId_to_ArenaP(aid); vg_assert(req_pszB >= 0); vg_assert(req_pszB < 0x7FFFFFF0); ch = payload_to_first(a, ptr); vg_assert(blockSane(a, ch)); old_bszW = get_bszW_lo(ch); vg_assert(is_inuse_bszW(old_bszW)); old_bszW = mk_plain_bszW(old_bszW); old_pszW = bszW_to_pszW(a, old_bszW); old_pszB = old_pszW * VKI_BYTES_PER_WORD; if (req_pszB <= old_pszB) { VGP_POPCC(VgpMalloc); return ptr; } if (req_alignB == 8) p_new = VG_(arena_malloc) ( aid, req_pszB ); else p_new = VG_(arena_malloc_aligned) ( aid, req_alignB, req_pszB ); p_old = (UChar*)ptr; for (i = 0; i < old_pszB; i++) p_new[i] = p_old[i]; VG_(arena_free)(aid, p_old); VGP_POPCC(VgpMalloc); return p_new; }
/* Find the translation address for a given (original) code address. If found, update VG_(tt_fast) so subsequent lookups are fast. If no translation can be found, return zero. This routine is (the only one) called from vg_run_innerloop. */ Addr VG_(search_transtab) ( Addr original_addr ) { TTEntry* tte; VGP_PUSHCC(VgpSlowFindT); tte = search_tt ( original_addr ); if (tte == NULL) { /* We didn't find it. vg_run_innerloop will have to request a translation. */ VGP_POPCC(VgpSlowFindT); return (Addr)0; } else { /* Found it. Put the search result into the fast cache now. */ UInt cno = (UInt)original_addr & VG_TT_FAST_MASK; VG_(tt_fast)[cno] = (Addr)(tte->tcentry); n_tt_fast_misses++; VGP_POPCC(VgpSlowFindT); return (Addr)&(tte->tcentry->payload[0]); } }
void log_0I_1Dw_cache_access(InstrInfo* n, Addr data_addr, Word data_size) { //VG_(printf)("0I_1Dw: CCaddr=0x%010lx, daddr=0x%010lx, dsize=%lu\n", // n, data_addr, data_size); VGP_PUSHCC(VgpCacheSimulate); cachesim_D1_doref(data_addr, data_size, &n->parent->Dw.m1, &n->parent->Dw.m2); n->parent->Dw.a++; VGP_POPCC(VgpCacheSimulate); }
void log_1I_0D_cache_access(InstrInfo* n) { //VG_(printf)("1I_0D : CCaddr=0x%010lx, iaddr=0x%010lx, isize=%lu\n", // n, n->instr_addr, n->instr_len); VGP_PUSHCC(VgpCacheSimulate); cachesim_I1_doref(n->instr_addr, n->instr_len, &n->parent->Ir.m1, &n->parent->Ir.m2); n->parent->Ir.a++; VGP_POPCC(VgpCacheSimulate); }
// Do a three step traversal: by file, then fn, then line. // In all cases prepends new nodes to their chain. Returns a pointer to the // line node, creates a new one if necessary. static lineCC* get_lineCC(Addr origAddr) { fileCC *curr_fileCC; fnCC *curr_fnCC; lineCC *curr_lineCC; Char file[FILE_LEN], fn[FN_LEN]; Int line; UInt file_hash, fn_hash, line_hash; get_debug_info(origAddr, file, fn, &line); VGP_PUSHCC(VgpGetLineCC); // level 1 file_hash = hash(file, N_FILE_ENTRIES); curr_fileCC = CC_table[file_hash]; while (NULL != curr_fileCC && !VG_STREQ(file, curr_fileCC->file)) { curr_fileCC = curr_fileCC->next; } if (NULL == curr_fileCC) { CC_table[file_hash] = curr_fileCC = new_fileCC(file, CC_table[file_hash]); distinct_files++; } // level 2 fn_hash = hash(fn, N_FN_ENTRIES); curr_fnCC = curr_fileCC->fns[fn_hash]; while (NULL != curr_fnCC && !VG_STREQ(fn, curr_fnCC->fn)) { curr_fnCC = curr_fnCC->next; } if (NULL == curr_fnCC) { curr_fileCC->fns[fn_hash] = curr_fnCC = new_fnCC(fn, curr_fileCC->fns[fn_hash]); distinct_fns++; } // level 3 line_hash = line % N_LINE_ENTRIES; curr_lineCC = curr_fnCC->lines[line_hash]; while (NULL != curr_lineCC && line != curr_lineCC->line) { curr_lineCC = curr_lineCC->next; } if (NULL == curr_lineCC) { curr_fnCC->lines[line_hash] = curr_lineCC = new_lineCC(line, curr_fnCC->lines[line_hash]); distinct_lines++; } VGP_POPCC(VgpGetLineCC); return curr_lineCC; }
void log_1I_1Dw_cache_access(instr_info* n, Addr data_addr) { //VG_(printf)("1I_1Dw: CCaddr=%p, iaddr=%p, isize=%u, daddr=%p, dsize=%u\n", // n, n->instr_addr, n->instr_len, data_addr, n->data_size); VGP_PUSHCC(VgpCacheSimulate); cachesim_I1_doref(n->instr_addr, n->instr_len, &n->parent->Ir.m1, &n->parent->Ir.m2); n->parent->Ir.a++; cachesim_D1_doref(data_addr, n->data_size, &n->parent->Dw.m1, &n->parent->Dw.m2); n->parent->Dw.a++; VGP_POPCC(VgpCacheSimulate); }
void VG_(client_free) ( ThreadState* tst, void* p, VgAllocKind kind ) { ShadowChunk* sc; ShadowChunk** prev_chunks_next_ptr; VGP_PUSHCC(VgpCliMalloc); # ifdef DEBUG_CLIENTMALLOC VG_(printf)("[m %d, f %d (%d)] client_free ( %p, %x )\n", count_malloclists(), 0/*count_freelist()*/, 0/*vg_freed_list_volume*/, p, kind ); # endif vg_cmalloc_n_frees ++; if (! needs_shadow_chunks()) { VG_(arena_free) ( VG_AR_CLIENT, p ); } else { sc = getShadowChunk ( (Addr)p, &prev_chunks_next_ptr ); if (sc == NULL) { VG_TRACK( bad_free, tst, (Addr)p ); VGP_POPCC(VgpCliMalloc); return; } /* check if its a matching free() / delete / delete [] */ if (kind != sc->allockind) VG_TRACK( mismatched_free, tst, (Addr)p ); die_and_free_mem ( tst, sc, prev_chunks_next_ptr ); } VGP_POPCC(VgpCliMalloc); }
void log_1I_1Dr_cache_access(InstrInfo* n, Addr data_addr, Word data_size) { //VG_(printf)("1I_1Dr: CCaddr=0x%010lx, iaddr=0x%010lx, isize=%lu\n" // " daddr=0x%010lx, dsize=%lu\n", // n, n->instr_addr, n->instr_len, data_addr, data_size); VGP_PUSHCC(VgpCacheSimulate); cachesim_I1_doref(n->instr_addr, n->instr_len, &n->parent->Ir.m1, &n->parent->Ir.m2); n->parent->Ir.a++; cachesim_D1_doref(data_addr, data_size, &n->parent->Dr.m1, &n->parent->Dr.m2); n->parent->Dr.a++; VGP_POPCC(VgpCacheSimulate); }
void log_2I_0D_cache_access(InstrInfo* n, InstrInfo* n2) { //VG_(printf)("2I_0D : CC1addr=0x%010lx, i1addr=0x%010lx, i1size=%lu\n" // " CC2addr=0x%010lx, i2addr=0x%010lx, i2size=%lu\n", // n, n->instr_addr, n->instr_len, // n2, n2->instr_addr, n2->instr_len); VGP_PUSHCC(VgpCacheSimulate); cachesim_I1_doref(n->instr_addr, n->instr_len, &n->parent->Ir.m1, &n->parent->Ir.m2); n->parent->Ir.a++; cachesim_I1_doref(n2->instr_addr, n2->instr_len, &n2->parent->Ir.m1, &n2->parent->Ir.m2); n2->parent->Ir.a++; VGP_POPCC(VgpCacheSimulate); }
/* Allocate memory and note change in memory available */ __inline__ void MAC_(new_block) ( Addr p, UInt size, UInt rzB, Bool is_zeroed, MAC_AllocKind kind ) { VGP_PUSHCC(VgpCliMalloc); cmalloc_n_mallocs ++; cmalloc_bs_mallocd += size; add_MAC_Chunk( p, size, kind ); MAC_(ban_mem_heap)( p-rzB, rzB ); MAC_(new_mem_heap)( p, size, is_zeroed ); MAC_(ban_mem_heap)( p+size, rzB ); VGP_POPCC(VgpCliMalloc); }
void* VG_(arena_calloc) ( ArenaId aid, Int alignB, Int nmemb, Int nbytes ) { Int i, size; UChar* p; VGP_PUSHCC(VgpMalloc); size = nmemb * nbytes; vg_assert(size >= 0); if (alignB == 8) p = VG_(arena_malloc) ( aid, size ); else p = VG_(arena_malloc_aligned) ( aid, alignB, size ); for (i = 0; i < size; i++) p[i] = 0; VGP_POPCC(VgpMalloc); return p; }
/* Take a snapshot of the client's stack, putting the up to 'n_ips' IPs into 'ips'. In order to be thread-safe, we pass in the thread's IP SP, FP if that's meaningful, and LR if that's meaningful. Returns number of IPs put in 'ips'. */ UInt VG_(get_StackTrace2) ( Addr* ips, UInt n_ips, Addr ip, Addr sp, Addr fp, Addr lr, Addr fp_min, Addr fp_max_orig ) { #if defined(VGP_ppc32_linux) Bool lr_is_first_RA = False; /* ppc only */ #endif Bool debug = False; Int i; Addr fp_max; UInt n_found = 0; VGP_PUSHCC(VgpExeContext); vg_assert(sizeof(Addr) == sizeof(UWord)); vg_assert(sizeof(Addr) == sizeof(void*)); /* Snaffle IPs from the client's stack into ips[0 .. n_ips-1], putting zeroes in when the trail goes cold, which we guess to be when FP is not a reasonable stack location. */ for (i = 0; i < n_ips; i++) ips[i] = 0; // JRS 2002-sep-17: hack, to round up fp_max to the end of the // current page, at least. Dunno if it helps. // NJN 2002-sep-17: seems to -- stack traces look like 1.0.X again fp_max = VG_PGROUNDUP(fp_max_orig); fp_max -= sizeof(Addr); if (debug) VG_(printf)("n_ips=%d fp_min=%p fp_max_orig=%p, fp_max=%p ip=%p fp=%p\n", n_ips, fp_min, fp_max_orig, fp_max, ip, fp); /* Assertion broken before main() is reached in pthreaded programs; the * offending stack traces only have one item. --njn, 2002-aug-16 */ /* vg_assert(fp_min <= fp_max);*/ if (fp_min + VG_(clo_max_stackframe) <= fp_max) { /* If the stack is ridiculously big, don't poke around ... but don't bomb out either. Needed to make John Regehr's user-space threads package work. JRS 20021001 */ ips[0] = ip; VGP_POPCC(VgpExeContext); return 1; } /* Otherwise unwind the stack in a platform-specific way. Trying to merge the x86, amd64 and ppc32 logic into a single piece of code is just too confusing and difficult to performance-tune. */ # if defined(VGP_x86_linux) /*--------------------- x86 ---------------------*/ /* fp is %ebp. sp is %esp. ip is %eip. */ ips[0] = ip; i = 1; /* Loop unwinding the stack. Note that the IP value we get on * each pass (whether from CFI info or a stack frame) is a * return address so is actually after the calling instruction * in the calling function. * * Because of this we subtract one from the IP after each pass * of the loop so that we find the right CFI block on the next * pass - otherwise we can find the wrong CFI info if it happens * to change after the calling instruction and that will mean * that we will fail to unwind the next step. * * This most frequently happens at the end of a function when * a tail call occurs and we wind up using the CFI info for the * next function which is completely wrong. */ while (True) { if (i >= n_ips) break; /* Try to derive a new (ip,sp,fp) triple from the current set. */ /* On x86, first try the old-fashioned method of following the %ebp-chain. Code which doesn't use this (that is, compiled with -fomit-frame-pointer) is not ABI compliant and so relatively rare. Besides, trying the CFI first almost always fails, and is expensive. */ /* Deal with frames resulting from functions which begin "pushl% ebp ; movl %esp, %ebp" which is the ABI-mandated preamble. */ if (fp_min <= fp && fp <= fp_max) { /* fp looks sane, so use it. */ ip = (((UWord*)fp)[1]); sp = fp + sizeof(Addr) /*saved %ebp*/ + sizeof(Addr) /*ra*/; fp = (((UWord*)fp)[0]); ips[i++] = ip; if (debug) VG_(printf)(" ipsF[%d]=%08p\n", i-1, ips[i-1]); ip = ip - 1; continue; } /* That didn't work out, so see if there is any CFI info to hand which can be used. */ if ( VG_(use_CFI_info)( &ip, &sp, &fp, fp_min, fp_max ) ) { ips[i++] = ip; if (debug) VG_(printf)(" ipsC[%d]=%08p\n", i-1, ips[i-1]); ip = ip - 1; continue; } /* No luck. We have to give up. */ break; } # elif defined(VGP_amd64_linux) /*--------------------- amd64 ---------------------*/ /* fp is %rbp. sp is %rsp. ip is %rip. */ ips[0] = ip; i = 1; /* Loop unwinding the stack. Note that the IP value we get on * each pass (whether from CFI info or a stack frame) is a * return address so is actually after the calling instruction * in the calling function. * * Because of this we subtract one from the IP after each pass * of the loop so that we find the right CFI block on the next * pass - otherwise we can find the wrong CFI info if it happens * to change after the calling instruction and that will mean * that we will fail to unwind the next step. * * This most frequently happens at the end of a function when * a tail call occurs and we wind up using the CFI info for the * next function which is completely wrong. */ while (True) { if (i >= n_ips) break; /* Try to derive a new (ip,sp,fp) triple from the current set. */ /* First off, see if there is any CFI info to hand which can be used. */ if ( VG_(use_CFI_info)( &ip, &sp, &fp, fp_min, fp_max ) ) { ips[i++] = ip; if (debug) VG_(printf)(" ipsC[%d]=%08p\n", i-1, ips[i-1]); ip = ip - 1; continue; } /* If VG_(use_CFI_info) fails, it won't modify ip/sp/fp, so we can safely try the old-fashioned method. */ /* This bit is supposed to deal with frames resulting from functions which begin "pushq %rbp ; movq %rsp, %rbp". Unfortunately, since we can't (easily) look at the insns at the start of the fn, like GDB does, there's no reliable way to tell. Hence the hack of first trying out CFI, and if that fails, then use this as a fallback. */ if (fp_min <= fp && fp <= fp_max) { /* fp looks sane, so use it. */ ip = (((UWord*)fp)[1]); sp = fp + sizeof(Addr) /*saved %rbp*/ + sizeof(Addr) /*ra*/; fp = (((UWord*)fp)[0]); ips[i++] = ip; if (debug) VG_(printf)(" ipsF[%d]=%08p\n", i-1, ips[i-1]); ip = ip - 1; continue; } /* No luck there. We have to give up. */ break; } # elif defined(VGP_ppc32_linux) /*--------------------- ppc32 ---------------------*/ /* fp is %r1. ip is %cia. Note, ppc uses r1 as both the stack and frame pointers. */ lr_is_first_RA = False; { # define M_VG_ERRTXT 1000 UChar buf_lr[M_VG_ERRTXT], buf_ip[M_VG_ERRTXT]; if (VG_(get_fnname_nodemangle) (lr, buf_lr, M_VG_ERRTXT)) if (VG_(get_fnname_nodemangle) (ip, buf_ip, M_VG_ERRTXT)) if (VG_(strncmp)(buf_lr, buf_ip, M_VG_ERRTXT)) lr_is_first_RA = True; # undef M_VG_ERRTXT } ips[0] = ip; i = 1; if (fp_min <= fp && fp < fp_max-4+1) { /* initial FP is sane; keep going */ fp = (((UWord*)fp)[0]); while (True) { if (i >= n_ips) break; /* Try to derive a new (ip,fp) pair from the current set. */ if (fp_min <= fp && fp <= fp_max) { /* fp looks sane, so use it. */ if (i == 1 && lr_is_first_RA) ip = lr; else ip = (((UWord*)fp)[1]); fp = (((UWord*)fp)[0]); ips[i++] = ip; if (debug) VG_(printf)(" ipsF[%d]=%08p\n", i-1, ips[i-1]); continue; } /* No luck there. We have to give up. */ break; } } # else # error "Unknown platform" # endif n_found = i; VGP_POPCC(VgpExeContext); return n_found; }
static void cg_fini(Int exitcode) { static Char buf1[128], buf2[128], buf3[128], fmt [128]; CC D_total; ULong L2_total_m, L2_total_mr, L2_total_mw, L2_total, L2_total_r, L2_total_w; Int l1, l2, l3; Int p; fprint_CC_table_and_calc_totals(); if (VG_(clo_verbosity) == 0) return; /* I cache results. Use the I_refs value to determine the first column * width. */ l1 = ULong_width(Ir_total.a); l2 = ULong_width(Dr_total.a); l3 = ULong_width(Dw_total.a); /* Make format string, getting width right for numbers */ VG_(sprintf)(fmt, "%%s %%,%dllu", l1); VG_(message)(Vg_UserMsg, fmt, "I refs: ", Ir_total.a); VG_(message)(Vg_UserMsg, fmt, "I1 misses: ", Ir_total.m1); VG_(message)(Vg_UserMsg, fmt, "L2i misses: ", Ir_total.m2); p = 100; if (0 == Ir_total.a) Ir_total.a = 1; VG_(percentify)(Ir_total.m1, Ir_total.a, 2, l1+1, buf1); VG_(message)(Vg_UserMsg, "I1 miss rate: %s", buf1); VG_(percentify)(Ir_total.m2, Ir_total.a, 2, l1+1, buf1); VG_(message)(Vg_UserMsg, "L2i miss rate: %s", buf1); VG_(message)(Vg_UserMsg, ""); /* D cache results. Use the D_refs.rd and D_refs.wr values to determine the * width of columns 2 & 3. */ D_total.a = Dr_total.a + Dw_total.a; D_total.m1 = Dr_total.m1 + Dw_total.m1; D_total.m2 = Dr_total.m2 + Dw_total.m2; /* Make format string, getting width right for numbers */ VG_(sprintf)(fmt, "%%s %%,%dllu (%%,%dllu rd + %%,%dllu wr)", l1, l2, l3); VG_(message)(Vg_UserMsg, fmt, "D refs: ", D_total.a, Dr_total.a, Dw_total.a); VG_(message)(Vg_UserMsg, fmt, "D1 misses: ", D_total.m1, Dr_total.m1, Dw_total.m1); VG_(message)(Vg_UserMsg, fmt, "L2d misses: ", D_total.m2, Dr_total.m2, Dw_total.m2); p = 10; if (0 == D_total.a) D_total.a = 1; if (0 == Dr_total.a) Dr_total.a = 1; if (0 == Dw_total.a) Dw_total.a = 1; VG_(percentify)( D_total.m1, D_total.a, 1, l1+1, buf1); VG_(percentify)(Dr_total.m1, Dr_total.a, 1, l2+1, buf2); VG_(percentify)(Dw_total.m1, Dw_total.a, 1, l3+1, buf3); VG_(message)(Vg_UserMsg, "D1 miss rate: %s (%s + %s )", buf1, buf2,buf3); VG_(percentify)( D_total.m2, D_total.a, 1, l1+1, buf1); VG_(percentify)(Dr_total.m2, Dr_total.a, 1, l2+1, buf2); VG_(percentify)(Dw_total.m2, Dw_total.a, 1, l3+1, buf3); VG_(message)(Vg_UserMsg, "L2d miss rate: %s (%s + %s )", buf1, buf2,buf3); VG_(message)(Vg_UserMsg, ""); /* L2 overall results */ L2_total = Dr_total.m1 + Dw_total.m1 + Ir_total.m1; L2_total_r = Dr_total.m1 + Ir_total.m1; L2_total_w = Dw_total.m1; VG_(message)(Vg_UserMsg, fmt, "L2 refs: ", L2_total, L2_total_r, L2_total_w); L2_total_m = Dr_total.m2 + Dw_total.m2 + Ir_total.m2; L2_total_mr = Dr_total.m2 + Ir_total.m2; L2_total_mw = Dw_total.m2; VG_(message)(Vg_UserMsg, fmt, "L2 misses: ", L2_total_m, L2_total_mr, L2_total_mw); VG_(percentify)(L2_total_m, (Ir_total.a + D_total.a), 1, l1+1, buf1); VG_(percentify)(L2_total_mr, (Ir_total.a + Dr_total.a), 1, l2+1, buf2); VG_(percentify)(L2_total_mw, Dw_total.a, 1, l3+1, buf3); VG_(message)(Vg_UserMsg, "L2 miss rate: %s (%s + %s )", buf1, buf2,buf3); // Various stats if (VG_(clo_verbosity) > 1) { Int debug_lookups = full_debugs + fn_debugs + file_line_debugs + no_debugs; VG_(message)(Vg_DebugMsg, ""); VG_(message)(Vg_DebugMsg, "cachegrind: distinct files: %d", distinct_files); VG_(message)(Vg_DebugMsg, "cachegrind: distinct fns: %d", distinct_fns); VG_(message)(Vg_DebugMsg, "cachegrind: distinct lines: %d", distinct_lines); VG_(message)(Vg_DebugMsg, "cachegrind: distinct instrs:%d", distinct_instrs); VG_(message)(Vg_DebugMsg, "cachegrind: debug lookups : %d", debug_lookups); VG_(message)(Vg_DebugMsg, "cachegrind: with full info:%3d%% (%d)", full_debugs * 100 / debug_lookups, full_debugs); VG_(message)(Vg_DebugMsg, "cachegrind: with file/line info:%3d%% (%d)", file_line_debugs * 100 / debug_lookups, file_line_debugs); VG_(message)(Vg_DebugMsg, "cachegrind: with fn name info:%3d%% (%d)", fn_debugs * 100 / debug_lookups, fn_debugs); VG_(message)(Vg_DebugMsg, "cachegrind: with zero info:%3d%% (%d)", no_debugs * 100 / debug_lookups, no_debugs); VG_(message)(Vg_DebugMsg, "cachegrind: string table size: %u", VG_(OSet_Size)(stringTable)); VG_(message)(Vg_DebugMsg, "cachegrind: CC table size: %u", VG_(OSet_Size)(CC_table)); VG_(message)(Vg_DebugMsg, "cachegrind: InstrInfo table size: %u", VG_(OSet_Size)(instrInfoTable)); } VGP_POPCC(VgpCacheResults); }
void* SK_(realloc) ( void* p, Int new_size ) { MAC_Chunk *mc; MAC_Chunk **prev_chunks_next_ptr; UInt i; ThreadId tid = VG_(get_current_or_recent_tid)(); VGP_PUSHCC(VgpCliMalloc); cmalloc_n_frees ++; cmalloc_n_mallocs ++; cmalloc_bs_mallocd += new_size; if (new_size < 0) { VG_(message)(Vg_UserMsg, "Warning: silly arg (%d) to realloc()", new_size ); return NULL; } /* First try and find the block. */ mc = (MAC_Chunk*)VG_(HT_get_node) ( MAC_(malloc_list), (UInt)p, (VgHashNode***)&prev_chunks_next_ptr ); if (mc == NULL) { MAC_(record_free_error) ( tid, (Addr)p ); /* Perhaps we should return to the program regardless. */ VGP_POPCC(VgpCliMalloc); return NULL; } /* check if its a matching free() / delete / delete [] */ if (MAC_AllocMalloc != mc->allockind) { /* can not realloc a range that was allocated with new or new [] */ MAC_(record_freemismatch_error) ( tid, (Addr)p ); /* but keep going anyway */ } if (mc->size == new_size) { /* size unchanged */ mc->where = VG_(get_ExeContext)(tid); VGP_POPCC(VgpCliMalloc); return p; } else if (mc->size > new_size) { /* new size is smaller */ MAC_(die_mem_heap)( mc->data+new_size, mc->size-new_size ); mc->size = new_size; mc->where = VG_(get_ExeContext)(tid); VGP_POPCC(VgpCliMalloc); return p; } else { /* new size is bigger */ Addr p_new; /* Get new memory */ p_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_size); /* First half kept and copied, second half new, red zones as normal */ MAC_(ban_mem_heap) ( p_new-VG_(vg_malloc_redzone_szB), VG_(vg_malloc_redzone_szB) ); MAC_(copy_mem_heap)( (Addr)p, p_new, mc->size ); MAC_(new_mem_heap) ( p_new+mc->size, new_size-mc->size, /*inited*/False ); MAC_(ban_mem_heap) ( p_new+new_size, VG_(vg_malloc_redzone_szB) ); /* Copy from old to new */ for (i = 0; i < mc->size; i++) ((UChar*)p_new)[i] = ((UChar*)p)[i]; /* Free old memory */ die_and_free_mem ( mc, prev_chunks_next_ptr, VG_(vg_malloc_redzone_szB) ); /* this has to be after die_and_free_mem, otherwise the former succeeds in shorting out the new block, not the old, in the case when both are on the same list. */ add_MAC_Chunk ( p_new, new_size, MAC_AllocMalloc ); VGP_POPCC(VgpCliMalloc); return (void*)p_new; } }
static void cg_fini(Int exitcode) { static char buf1[128], buf2[128], buf3[128], fmt [128]; CC D_total; ULong L2_total_m, L2_total_mr, L2_total_mw, L2_total, L2_total_r, L2_total_w; Int l1, l2, l3; Int p; fprint_CC_table_and_calc_totals(); if (VG_(clo_verbosity) == 0) return; /* I cache results. Use the I_refs value to determine the first column * width. */ l1 = ULong_width(Ir_total.a); l2 = ULong_width(Dr_total.a); l3 = ULong_width(Dw_total.a); /* Make format string, getting width right for numbers */ VG_(sprintf)(fmt, "%%s %%,%dld", l1); VG_(message)(Vg_UserMsg, fmt, "I refs: ", Ir_total.a); VG_(message)(Vg_UserMsg, fmt, "I1 misses: ", Ir_total.m1); VG_(message)(Vg_UserMsg, fmt, "L2i misses: ", Ir_total.m2); p = 100; if (0 == Ir_total.a) Ir_total.a = 1; percentify(Ir_total.m1 * 100 * p / Ir_total.a, p, l1+1, buf1); VG_(message)(Vg_UserMsg, "I1 miss rate: %s", buf1); percentify(Ir_total.m2 * 100 * p / Ir_total.a, p, l1+1, buf1); VG_(message)(Vg_UserMsg, "L2i miss rate: %s", buf1); VG_(message)(Vg_UserMsg, ""); /* D cache results. Use the D_refs.rd and D_refs.wr values to determine the * width of columns 2 & 3. */ D_total.a = Dr_total.a + Dw_total.a; D_total.m1 = Dr_total.m1 + Dw_total.m1; D_total.m2 = Dr_total.m2 + Dw_total.m2; /* Make format string, getting width right for numbers */ VG_(sprintf)(fmt, "%%s %%,%dld (%%,%dld rd + %%,%dld wr)", l1, l2, l3); VG_(message)(Vg_UserMsg, fmt, "D refs: ", D_total.a, Dr_total.a, Dw_total.a); VG_(message)(Vg_UserMsg, fmt, "D1 misses: ", D_total.m1, Dr_total.m1, Dw_total.m1); VG_(message)(Vg_UserMsg, fmt, "L2d misses: ", D_total.m2, Dr_total.m2, Dw_total.m2); p = 10; if (0 == D_total.a) D_total.a = 1; if (0 == Dr_total.a) Dr_total.a = 1; if (0 == Dw_total.a) Dw_total.a = 1; percentify( D_total.m1 * 100 * p / D_total.a, p, l1+1, buf1); percentify(Dr_total.m1 * 100 * p / Dr_total.a, p, l2+1, buf2); percentify(Dw_total.m1 * 100 * p / Dw_total.a, p, l3+1, buf3); VG_(message)(Vg_UserMsg, "D1 miss rate: %s (%s + %s )", buf1, buf2,buf3); percentify( D_total.m2 * 100 * p / D_total.a, p, l1+1, buf1); percentify(Dr_total.m2 * 100 * p / Dr_total.a, p, l2+1, buf2); percentify(Dw_total.m2 * 100 * p / Dw_total.a, p, l3+1, buf3); VG_(message)(Vg_UserMsg, "L2d miss rate: %s (%s + %s )", buf1, buf2,buf3); VG_(message)(Vg_UserMsg, ""); /* L2 overall results */ L2_total = Dr_total.m1 + Dw_total.m1 + Ir_total.m1; L2_total_r = Dr_total.m1 + Ir_total.m1; L2_total_w = Dw_total.m1; VG_(message)(Vg_UserMsg, fmt, "L2 refs: ", L2_total, L2_total_r, L2_total_w); L2_total_m = Dr_total.m2 + Dw_total.m2 + Ir_total.m2; L2_total_mr = Dr_total.m2 + Ir_total.m2; L2_total_mw = Dw_total.m2; VG_(message)(Vg_UserMsg, fmt, "L2 misses: ", L2_total_m, L2_total_mr, L2_total_mw); percentify(L2_total_m * 100 * p / (Ir_total.a + D_total.a), p, l1+1, buf1); percentify(L2_total_mr * 100 * p / (Ir_total.a + Dr_total.a), p, l2+1, buf2); percentify(L2_total_mw * 100 * p / Dw_total.a, p, l3+1, buf3); VG_(message)(Vg_UserMsg, "L2 miss rate: %s (%s + %s )", buf1, buf2,buf3); // Various stats if (VG_(clo_verbosity) > 1) { int BB_lookups = full_debug_BBs + fn_debug_BBs + file_line_debug_BBs + no_debug_BBs; VG_(message)(Vg_DebugMsg, ""); VG_(message)(Vg_DebugMsg, "Distinct files: %d", distinct_files); VG_(message)(Vg_DebugMsg, "Distinct fns: %d", distinct_fns); VG_(message)(Vg_DebugMsg, "Distinct lines: %d", distinct_lines); VG_(message)(Vg_DebugMsg, "Distinct instrs: %d", distinct_instrs); VG_(message)(Vg_DebugMsg, "BB lookups: %d", BB_lookups); VG_(message)(Vg_DebugMsg, "With full debug info:%3d%% (%d)", full_debug_BBs * 100 / BB_lookups, full_debug_BBs); VG_(message)(Vg_DebugMsg, "With file/line debug info:%3d%% (%d)", file_line_debug_BBs * 100 / BB_lookups, file_line_debug_BBs); VG_(message)(Vg_DebugMsg, "With fn name debug info:%3d%% (%d)", fn_debug_BBs * 100 / BB_lookups, fn_debug_BBs); VG_(message)(Vg_DebugMsg, "With no debug info:%3d%% (%d)", no_debug_BBs * 100 / BB_lookups, no_debug_BBs); VG_(message)(Vg_DebugMsg, "BBs Retranslated: %d", BB_retranslations); } VGP_POPCC(VgpCacheResults); }
void* VG_(client_realloc) ( ThreadState* tst, void* p, UInt new_size ) { ShadowChunk *sc; ShadowChunk **prev_chunks_next_ptr; UInt i; VGP_PUSHCC(VgpCliMalloc); vg_cmalloc_n_frees ++; vg_cmalloc_n_mallocs ++; vg_cmalloc_bs_mallocd += new_size; if (! needs_shadow_chunks()) { vg_assert(p != NULL && new_size != 0); p = VG_(arena_realloc) ( VG_AR_CLIENT, p, VG_(clo_alignment), new_size ); VGP_POPCC(VgpCliMalloc); return p; } else { /* First try and find the block. */ sc = getShadowChunk ( (Addr)p, &prev_chunks_next_ptr ); if (sc == NULL) { VG_TRACK( bad_free, tst, (Addr)p ); /* Perhaps we should return to the program regardless. */ VGP_POPCC(VgpCliMalloc); return NULL; } /* check if its a matching free() / delete / delete [] */ if (Vg_AllocMalloc != sc->allockind) { /* can not realloc a range that was allocated with new or new [] */ VG_TRACK( mismatched_free, tst, (Addr)p ); /* but keep going anyway */ } if (sc->size == new_size) { /* size unchanged */ VGP_POPCC(VgpCliMalloc); return p; } else if (sc->size > new_size) { /* new size is smaller */ VG_TRACK( die_mem_heap, sc->data+new_size, sc->size-new_size ); sc->size = new_size; VGP_POPCC(VgpCliMalloc); # ifdef DEBUG_CLIENTMALLOC VG_(printf)("[m %d, f %d (%d)] client_realloc_smaller ( %p, %d ) = %p\n", count_malloclists(), 0/*count_freelist()*/, 0/*vg_freed_list_volume*/, p, new_size, p ); # endif return p; } else { /* new size is bigger */ Addr p_new; /* Get new memory */ vg_assert(VG_(clo_alignment) >= 4); if (VG_(clo_alignment) == 4) p_new = (Addr)VG_(arena_malloc)(VG_AR_CLIENT, new_size); else p_new = (Addr)VG_(arena_malloc_aligned)(VG_AR_CLIENT, VG_(clo_alignment), new_size); /* First half kept and copied, second half new, red zones as normal */ VG_TRACK( ban_mem_heap, p_new-VG_AR_CLIENT_REDZONE_SZB, VG_AR_CLIENT_REDZONE_SZB ); VG_TRACK( copy_mem_heap, (Addr)p, p_new, sc->size ); VG_TRACK( new_mem_heap, p_new+sc->size, new_size-sc->size, /*inited=*/False ); VG_TRACK( ban_mem_heap, p_new+new_size, VG_AR_CLIENT_REDZONE_SZB ); /* Copy from old to new */ for (i = 0; i < sc->size; i++) ((UChar*)p_new)[i] = ((UChar*)p)[i]; /* Free old memory */ die_and_free_mem ( tst, sc, prev_chunks_next_ptr ); /* this has to be after die_and_free_mem, otherwise the former succeeds in shorting out the new block, not the old, in the case when both are on the same list. */ addShadowChunk ( tst, p_new, new_size, Vg_AllocMalloc ); VGP_POPCC(VgpCliMalloc); # ifdef DEBUG_CLIENTMALLOC VG_(printf)("[m %d, f %d (%d)] client_realloc_bigger ( %p, %d ) = %p\n", count_malloclists(), 0/*count_freelist()*/, 0/*vg_freed_list_volume*/, p, new_size, (void*)p_new ); # endif return (void*)p_new; } } }
void VG_(arena_free) ( ArenaId aid, void* ptr ) { Superblock* sb; UInt* sb_payl_firstw; UInt* sb_payl_lastw; UInt* other; UInt* ch; Int ch_bszW, ch_pszW, other_bszW, ch_listno; Arena* a; VGP_PUSHCC(VgpMalloc); ensure_mm_init(); a = arenaId_to_ArenaP(aid); if (ptr == NULL) { VGP_POPCC(VgpMalloc); return; } ch = payload_to_first(a, ptr); # ifdef DEBUG_MALLOC vg_assert(blockSane(a,ch)); # endif a->bytes_on_loan -= sizeof(Word) * bszW_to_pszW(a, mk_plain_bszW(get_bszW_lo(ch))); sb = findSb( a, ch ); sb_payl_firstw = &(sb->payload_words[0]); sb_payl_lastw = &(sb->payload_words[sb->n_payload_words-1]); /* Put this chunk back on a list somewhere. */ ch_bszW = get_bszW_lo(ch); ch_pszW = bszW_to_pszW(a, ch_bszW); ch_listno = pszW_to_listNo(ch_pszW); mkFreeBlock( a, ch, ch_bszW, ch_listno ); /* See if this block can be merged with the following one. */ other = ch + ch_bszW; /* overhead_szW(a) is the smallest possible bszW for this arena. So the nearest possible end to the block beginning at other is other+overhead_szW(a)-1. Hence the test below. */ if (other+overhead_szW(a)-1 <= sb_payl_lastw) { other_bszW = get_bszW_lo(other); if (!is_inuse_bszW(other_bszW)) { /* VG_(printf)( "merge-successor\n"); */ other_bszW = mk_plain_bszW(other_bszW); # ifdef DEBUG_MALLOC vg_assert(blockSane(a, other)); # endif unlinkBlock( a, ch, ch_listno ); unlinkBlock( a, other, pszW_to_listNo(bszW_to_pszW(a,other_bszW)) ); ch_bszW += other_bszW; ch_listno = pszW_to_listNo(bszW_to_pszW(a, ch_bszW)); mkFreeBlock( a, ch, ch_bszW, ch_listno ); } } /* See if this block can be merged with its predecessor. */ if (ch-overhead_szW(a) >= sb_payl_firstw) { other_bszW = get_bszW_hi_from_last_word( ch-1 ); if (!is_inuse_bszW(other_bszW)) { /* VG_(printf)( "merge-predecessor\n"); */ other = last_to_first( ch-1 ); other_bszW = mk_plain_bszW(other_bszW); unlinkBlock( a, ch, ch_listno ); unlinkBlock( a, other, pszW_to_listNo(bszW_to_pszW(a, other_bszW)) ); ch = other; ch_bszW += other_bszW; ch_listno = pszW_to_listNo(bszW_to_pszW(a, ch_bszW)); mkFreeBlock( a, ch, ch_bszW, ch_listno ); } } # ifdef DEBUG_MALLOC mallocSanityCheckArena(aid); # endif VGP_POPCC(VgpMalloc); }
/* The idea for malloc_aligned() is to allocate a big block, base, and then split it into two parts: frag, which is returned to the the free pool, and align, which is the bit we're really after. Here's a picture. L and H denote the block lower and upper overheads, in words. The details are gruesome. Note it is slightly complicated because the initial request to generate base may return a bigger block than we asked for, so it is important to distinguish the base request size and the base actual size. frag_b align_b | | | frag_p | align_p | | | | v v v v +---+ +---+---+ +---+ | L |----------------| H | L |---------------| H | +---+ +---+---+ +---+ ^ ^ ^ | | : | base_p this addr must be aligned | base_b . . . . . . . <------ frag_bszW -------> . . . . <------------- base_pszW_act -----------> . . . . . . . . */ void* VG_(arena_malloc_aligned) ( ArenaId aid, Int req_alignB, Int req_pszB ) { Int req_alignW, req_pszW, base_pszW_req, base_pszW_act, frag_bszW; Word *base_b, *base_p, *align_p; UInt saved_bytes_on_loan; Arena* a; void* v; VGP_PUSHCC(VgpMalloc); ensure_mm_init(); a = arenaId_to_ArenaP(aid); vg_assert(req_pszB >= 0); vg_assert(req_pszB < 0x7FFFFFF0); /* Check that the requested alignment seems reasonable; that is, is a power of 2. */ switch (req_alignB) { case 4: case 8: case 16: case 32: case 64: case 128: case 256: case 512: case 1024: case 2048: case 4096: case 8192: case 16384: case 32768: case 65536: case 131072: case 262144: case 1048576: /* can't be bothered to calculate larger ones */ break; default: VG_(printf)("vg_malloc_aligned(%p, %d, %d)\nbad alignment request", a, req_alignB, req_pszB ); VG_(core_panic)("vg_malloc_aligned"); /*NOTREACHED*/ } /* Required alignment, in words. Since it's constrained to be a power of 2 >= word size, no need to align the alignment. Still, we check. */ if (req_alignB == 4) req_alignB = 8; req_alignW = req_alignB / VKI_BYTES_PER_WORD; vg_assert(req_alignB == req_alignW * VKI_BYTES_PER_WORD); /* Required payload size for the aligned chunk. */ req_pszW = req_pszB_to_req_pszW(req_pszB); /* Payload size to request for the big block that we will split up. */ base_pszW_req = req_pszW + overhead_szW(a) + req_alignW; /* Payload ptr for the block we are going to split. Note this changes a->bytes_on_loan; we save and restore it ourselves. */ saved_bytes_on_loan = a->bytes_on_loan; base_p = VG_(arena_malloc) ( aid, base_pszW_req * VKI_BYTES_PER_WORD ); a->bytes_on_loan = saved_bytes_on_loan; /* Block ptr for the block we are going to split. */ base_b = payload_to_first ( a, base_p ); /* Pointer to the payload of the aligned block we are going to return. This has to be suitably aligned. */ align_p = align_upwards ( base_b + 2 * overhead_szW_lo(a) + overhead_szW_hi(a), req_alignB ); /* The block size of the fragment we will create. This must be big enough to actually create a fragment. */ frag_bszW = align_p - overhead_szW_lo(a) - base_b; vg_assert(frag_bszW >= overhead_szW(a)); /* The actual payload size of the block we are going to split. */ base_pszW_act = bszW_to_pszW(a, mk_plain_bszW(get_bszW_lo(base_b))); /* Create the fragment block, and put it back on the relevant free list. */ mkFreeBlock ( a, base_b, frag_bszW, pszW_to_listNo(bszW_to_pszW(a, frag_bszW)) ); /* Create the aligned block. */ mkInuseBlock ( a, align_p - overhead_szW_lo(a), base_p + base_pszW_act + overhead_szW_hi(a) - (align_p - overhead_szW_lo(a)) ); /* Final sanity checks. */ vg_assert(( (UInt)align_p % req_alignB) == 0); vg_assert(is_inuse_bszW(get_bszW_lo(payload_to_first(a, align_p)))); vg_assert(req_pszW <= bszW_to_pszW(a, mk_plain_bszW(get_bszW_lo( payload_to_first(a, align_p)))) ); a->bytes_on_loan += sizeof(Word) * bszW_to_pszW(a, mk_plain_bszW(get_bszW_lo( payload_to_first(a, align_p)))); if (a->bytes_on_loan > a->bytes_on_loan_max) a->bytes_on_loan_max = a->bytes_on_loan; # ifdef DEBUG_MALLOC mallocSanityCheckArena(aid); # endif VGP_POPCC(VgpMalloc); v = (void*)align_p; vg_assert( (((UInt)v) % req_alignB) == 0 ); return v; }
void* VG_(arena_malloc) ( ArenaId aid, Int req_pszB ) { Int req_pszW, req_bszW, frag_bszW, b_bszW, lno; Superblock* new_sb; Word* b; Arena* a; void* v; VGP_PUSHCC(VgpMalloc); ensure_mm_init(); a = arenaId_to_ArenaP(aid); vg_assert(req_pszB >= 0); vg_assert(req_pszB < 0x7FFFFFF0); req_pszW = req_pszB_to_req_pszW(req_pszB); /* Keep gcc -O happy: */ b = NULL; /* Start searching at this list. */ lno = pszW_to_listNo(req_pszW); /* This loop finds a list which has a block big enough, or sets req_listno to N_LISTS if no such block exists. */ while (True) { if (lno == VG_N_MALLOC_LISTS) break; /* If this list is empty, try the next one. */ if (a->freelist[lno] == NULL) { lno++; continue; } /* Scan a->list[lno] to find a big-enough chunk. */ b = a->freelist[lno]; b_bszW = mk_plain_bszW(get_bszW_lo(b)); while (True) { if (bszW_to_pszW(a, b_bszW) >= req_pszW) break; b = get_next_p(b); b_bszW = mk_plain_bszW(get_bszW_lo(b)); if (b == a->freelist[lno]) break; } if (bszW_to_pszW(a, b_bszW) >= req_pszW) break; /* No luck? Try a larger list. */ lno++; } /* Either lno < VG_N_MALLOC_LISTS and b points to the selected block, or lno == VG_N_MALLOC_LISTS, and we have to allocate a new superblock. */ if (lno == VG_N_MALLOC_LISTS) { req_bszW = pszW_to_bszW(a, req_pszW); new_sb = newSuperblock(a, req_bszW); if (NULL == new_sb) { // Should only fail if for client, otherwise, should have aborted // already. vg_assert(VG_AR_CLIENT == aid); return NULL; } new_sb->next = a->sblocks; a->sblocks = new_sb; b = &(new_sb->payload_words[0]); lno = pszW_to_listNo(bszW_to_pszW(a, new_sb->n_payload_words)); mkFreeBlock ( a, b, new_sb->n_payload_words, lno); } /* Ok, we can allocate from b, which lives in list req_listno. */ vg_assert(b != NULL); vg_assert(lno >= 0 && lno < VG_N_MALLOC_LISTS); vg_assert(a->freelist[lno] != NULL); b_bszW = mk_plain_bszW(get_bszW_lo(b)); req_bszW = pszW_to_bszW(a, req_pszW); /* req_bszW is the size of the block we are after. b_bszW is the size of what we've actually got. */ vg_assert(b_bszW >= req_bszW); /* Could we split this block and still get a useful fragment? Where "useful" means that the payload size of the frag is at least one word. */ frag_bszW = b_bszW - req_bszW; if (frag_bszW > overhead_szW(a)) { splitChunk(a, b, lno, req_bszW); } else { /* No, mark as in use and use as-is. */ unlinkBlock(a, b, lno); /* set_bszW_lo(b, mk_inuse_bszW(b_bszW)); set_bszW_hi(b, mk_inuse_bszW(b_bszW)); */ mkInuseBlock(a, b, b_bszW); } vg_assert(req_bszW <= mk_plain_bszW(get_bszW_lo(b))); a->bytes_on_loan += sizeof(Word) * bszW_to_pszW(a, mk_plain_bszW(get_bszW_lo(b))); if (a->bytes_on_loan > a->bytes_on_loan_max) a->bytes_on_loan_max = a->bytes_on_loan; # ifdef DEBUG_MALLOC mallocSanityCheckArena(aid); # endif VGP_POPCC(VgpMalloc); v = first_to_payload(a, b); vg_assert( (((UInt)v) & 7) == 0 ); return v; }