void __dbfree(const char *file, int line, void *cptr) { ENTER(); MALLOC_INIT(); if (__free_hook != NULL) { (*__free_hook)(file,line,cptr); } else { _free(cptr); } LEAVE(); }
void * __dbmalloc(const char *file, int line, size_t size) { void *return_ptr; ENTER(); MALLOC_INIT(); if (__malloc_hook != NULL) { return_ptr = (*__malloc_hook)(file,line,size); } else { return_ptr = _malloc(size); } LEAVE(); return(return_ptr); }
int DBmalloc_mark(const char *file, int line, int todo) { char *temp=NULL; int status; ulong_t top = (ulong_t)&temp; ENTER(); MALLOC_INIT(); _malloc_hold_threads(); status = DBFmalloc_mark("malloc_mark",file,line,top,todo); _malloc_release_threads(); LEAVE(); return(status); }
int main (void) { size_t i; char * ptrs[num]; MALLOC_INIT(); for (i = 0; i < num; i++) { size_t size = (rand() % (max_size - min_size)) + min_size; ptrs[i] = je_malloc(size); memset(ptrs[i], 1, size); } for (i = 0; i < num; i++) je_free(ptrs[i]); return 0; }
int DBmalloc_dump_unreferenced(int fd, const char *file, int line, int todo) { static char *title = "Dump of Unreferenced Heap Elements\n"; int rtn = 0; char *temp=NULL; ulong_t top = (ulong_t)&temp; ENTER(); MALLOC_INIT(); _malloc_hold_threads(); __malloc_outfd=fd; rtn = DBFmalloc_mark("malloc_check_unreferenced", file, line, top, todo); write(fd, title, strlen(title)); malloc_list_items(fd, 3, 0L, 0L); /* unreferenced */ __malloc_outfd=-1; _malloc_release_threads(); LEAVE(); return rtn; }
/* * Must be called with the mutex locked! */ int DBFmalloc_chain_check(const char *func,const char *file,int line,int todo) { Arena * ap; Block * bp; Dtail * dt; Dhead * dp, *dpend; int nb; int rtn = 0; int cont = 1; arena_range_t range; MALLOC_INIT(); /* * Go through malloc chains for each Arena */ for (ap = __arenas.a_next; cont && ap != &__arenas; ap = ap->a_next) { if (ap->a_size < sizeof(*ap)) panic("a_size"); dt = &ap->a_dtail; dp = (Dhead *)(ap + 1); dpend = (Dhead *)((char *)ap + ap->a_size - sizeof(Dtail)); range.r_start = (char *)dp; range.r_end = (char *)dpend; range.r_type = RANGE_ARENA; range.un.r_arena = ap; rtn += DBFmalloc_chain_check_helper(func,file,line,todo, &range, &cont); } /* * Go through malloc chains for every Block in every Band */ for (nb = 0; cont && nb < *__pnband; nb++) { Band *band; band = __pBands[nb]; /* * For each Block on the allocated list */ for (bp = band->alist; cont && bp; bp = bp->next) { int esize = bp->nbpe + SB_OVERHEAD(); range.r_start = (char *)(bp + 1); range.r_end = range.r_start + esize * band->nalloc; range.r_type = RANGE_BLOCK; range.un.r_block = bp; rtn += DBFmalloc_chain_check_helper(func,file,line,todo,&range,&cont); } /* * For each Block on the depleted list */ for (bp = band->dlist; cont && bp; bp = bp->next) { int esize = bp->nbpe + SB_OVERHEAD(); range.r_start = (char *)(bp + 1); range.r_end = range.r_start + esize * band->nalloc; range.r_type = RANGE_BLOCK; range.un.r_block = bp; rtn += DBFmalloc_chain_check_helper(func,file,line,todo,&range,&cont); } } return(rtn); } /* malloc_chain_check(... */
// we assume we are already locked when inside here // so we DO not call ENTER / LEAVE int DBFmalloc_mark(const char *func, const char *file, int line, ulong_t top, int todo) { int rtn = 0; mse_t *roots; int nroots; Block * bp; int nb; int i = 0; unsigned int start, end; int count=0; int cache = 0; extern int _malloc_scan_start(); extern int _malloc_scan_finish(); MALLOC_INIT(); /* * Mark all the nodes as unreferenced at the start. * Make sure the malloc chain is valid or we will be in for trouble */ if ((rtn = DBFmalloc_mark_all("DBFmalloc_mark", file, line, todo, 0)) != 0) { return rtn; } if ((roots = alloca(MAX_ROOTS * sizeof *roots)) == NULL) { return 0; } mc_cache_clear(); cache = mc_get_cache_size(); mc_set_cache(0); nroots = _malloc_scan_start(roots, MAX_ROOTS, top); _mark_stack_create(); /* * We go through each root of the root set. * We never mark more than one page of a root at a time to avoid * having its unmarked children overflowing the mark stack. */ start = roots[i].mse_start; end = roots[i].mse_end; if (end > start + __pagesize) end = start + __pagesize; while (i < nroots) { _mark_stack_push(start, end); count++; /* _mark will find every heap node reachable from the root * that was pushed onto the mark stack. */ rtn += _mark(func, file, line, todo); start += __pagesize; if (start >= roots[i].mse_end) { i++; start = roots[i].mse_start; end = roots[i].mse_end; if (end > start + __pagesize) end = start + __pagesize; } else { end += __pagesize; if (end > roots[i].mse_end) end = roots[i].mse_end; } } _mark_stack_destroy(); // /* // * Go through every Block in every Band and mark it // * - this is because it's an internal structure and shouldn't be // * reported as a leak // */ // for (nb = 0; nb < *__pnband; nb++) // { // Band *band; // // band = __pBands[nb]; // // /* // * For each Block on the allocated list // */ // for (bp = band->alist; bp; bp = bp->next) // { // Dhead *dh = (Dhead *)bp - 1; // dh->d_debug.flag |= M_REFERENCED; // } // // /* // * For each Block on the depleted list // */ // for (bp = band->dlist; bp; bp = bp->next) // { // Dhead *dh = (Dhead *)bp - 1; // dh->d_debug.flag |= M_REFERENCED; // } // } _malloc_scan_finish(); mc_set_cache(cache); return rtn; } /* malloc_mark(... */
SIZETYPE DBmalloc_size( CONST char * file, int line, CONST DATATYPE * cptr ) { char *func = "malloc_size"; register struct mlist *ptr; // initialize the malloc sub-system. MALLOC_INIT(); #ifdef MALLOC_PTHREAD if ( !malloc_preamble ) pthread_mutex_lock( &malloc_mutex ); #endif // IF malloc chain checking is on, go do it. if (malloc_opts & MOPT_CKCHAIN) { VOIDCAST DBFmalloc_chain_check(func, file, line, 1); } // // verify that cptr is within the malloc region and that it is on // the correct alignment // if ( (cptr < malloc_data_start) || (cptr > malloc_data_end) || ((((long) cptr) & malloc_round) != 0)) { malloc_errno = M_CODE_BAD_PTR; malloc_warning(func, file, line, (struct mlist *) NULL); #ifdef MALLOC_PTHREAD if ( !malloc_preamble ) pthread_mutex_unlock( &malloc_mutex ); #endif return( (SIZETYPE) -1 ); } // // convert pointer to mlist struct pointer. To do this we must // move the pointer backwards the correct number of bytes... // ptr = DATATOMLIST(cptr); // check the magic number if ((ptr->flag & M_MAGIC_BITS) != M_MAGIC) { malloc_errno = M_CODE_BAD_MAGIC; malloc_warning(func, file, line, (struct mlist *) NULL); #ifdef MALLOC_PTHREAD if ( !malloc_preamble ) pthread_mutex_unlock( &malloc_mutex ); #endif return( (SIZETYPE) -1 ); } // if this segment is not flagged as being in use if (!(ptr->flag & M_INUSE)) { malloc_errno = M_CODE_NOT_INUSE; malloc_warning(func, file, line, ptr); #ifdef MALLOC_PTHREAD if ( !malloc_preamble ) pthread_mutex_unlock( &malloc_mutex ); #endif return( (SIZETYPE) -1 ); } // check to see that the pointers are still connected if ((ptr->prev && (ptr->prev->next != ptr)) || (ptr->next && (ptr->next->prev != ptr)) || ((ptr->next == NULL) && (ptr->prev == NULL))) { malloc_errno = M_CODE_BAD_CONNECT; malloc_warning(func, file, line, ptr); #ifdef MALLOC_PTHREAD if ( !malloc_preamble ) pthread_mutex_unlock( &malloc_mutex ); #endif return( (SIZETYPE) -1 ); } // check fill regions for overflow VOIDCAST FILLCHECK(func, file, line, ptr, SHOWERRORS); #ifdef MALLOC_PTHREAD if ( !malloc_preamble ) pthread_mutex_unlock( &malloc_mutex ); #endif return( ptr->r_size ); } // end of DBmalloc_size(...)