void mprAllocTerm(MprApp *app) { MprSlab *slabs; MprBlk *appBlk, *slabBlk; /* * Must do a carefully ordered cleanup. Need to free all children blocks * before freeing the slab memory. Save a local pointer to the slabs. */ slabs = app->alloc.slabs; /* * Free the app and all children. Set DONT_OS_FREE to prevent free() being * called on app itself. We need that so we can free the slabs below. */ appBlk = GET_HDR(app); appBlk->flags |= ALLOC_FLAGS_DONT_OS_FREE; mprFree(app); /* * Slabs are initially marked don't free. We must preserve them while all * other blocks are freed. Then we clear the don't free flag and free. * Now we don't have an app structure which is used by mprFree. We must * fake it. */ slabBlk = GET_HDR(slabs); slabBlk->flags &= ~ALLOC_FLAGS_KEEP; mprFree(slabs); /* * Now we can finally free the memory for the app structure */ free(appBlk); }
/*! * Free memory chunk * \param mpool Memory pool to be used (if NULL default pool is used) * \param chunk Chunk location (starting address) * \return 0 if successful, -1 otherwise */ int ffs_free ( ffs_mpool_t *mpool, void *chunk_to_be_freed ) { ffs_hdr_t *chunk, *before, *after; ASSERT ( mpool && chunk_to_be_freed ); chunk = chunk_to_be_freed - sizeof (size_t); MARK_FREE ( chunk ); /* mark it as free */ /* join with left? */ before = ( (void *) chunk ) - sizeof(size_t); if ( CHECK_FREE ( before ) ) { before = GET_HDR ( before ); ffs_remove_chunk ( mpool, before ); before->size += chunk->size; /* join */ chunk = before; } /* join with right? */ after = GET_AFTER ( chunk ); if ( CHECK_FREE ( after ) ) { ffs_remove_chunk ( mpool, after ); chunk->size += after->size; /* join */ } /* insert chunk in free list */ ffs_insert_chunk ( mpool, chunk ); /* set chunk tail */ CLONE_SIZE_TO_TAIL ( chunk ); return 0; }
uint mprGetAllocBlockCount(MprCtx ptr) { MprBlk *bp, *firstChild, *cp; uint count; mprAssert(VALID_BLK(ptr)); if (ptr == 0) { return 0; } bp = GET_HDR(ptr); mprAssert(VALID_HDR(bp)); /* * Add one for itself */ count = 1; if ((firstChild = bp->children) != 0) { cp = firstChild; do { count += mprGetAllocBlockCount(GET_PTR(cp)); cp = cp->next; } while (cp != firstChild); } return count; }
int mprIsAllocBlockValid(MprCtx ptr) { MprBlk *bp; bp = GET_HDR(ptr); return (bp && VALID_HDR(bp)); }
/* Non-macro version of header location routine */ GC_INNER hdr * GC_find_header(ptr_t h) { # ifdef HASH_TL hdr * result; GET_HDR(h, result); return(result); # else return(HDR_INNER(h)); # endif }
int mprValidateBlock(MprCtx ptr) { MprBlk *bp, *parent, *cp, *firstChild; int count; mprAssert(ptr); mprAssert(VALID_BLK(ptr)); bp = GET_HDR(ptr); mprAssert(bp); mprAssert(VALID_HDR(bp)); mprAssert(VALID_HDR(bp->parent)); if (ptr != bp->app) { mprAssert(bp != bp->parent); } mprAssert(! (bp->flags & ALLOC_FLAGS_FREE)); mprAssert(! (bp->flags & ALLOC_FLAGS_FREEING)); /* * */ count = 0; parent = bp->parent; if ((firstChild = bp->children) != 0) { cp = firstChild; mprAssert((int) cp != 0xfeefee); do { mprAssert(bp->next->prev == bp); mprAssert(bp->prev->next == bp); mprAssert(bp->prev->parent == parent); mprAssert(bp->next->parent == parent); count++; cp = cp->next; if (bp->next == bp) { mprAssert(bp->prev == bp); if (ptr != bp->app) { mprAssert(parent->children == bp); } } if (bp->prev == bp) { mprAssert(bp->next == bp); if (ptr != bp->app) { mprAssert(parent->children == bp); } } } while (cp != firstChild); } return 0; }
int mprPrintAllocBlocks(MprCtx ptr, int indent) { MprBlk *bp, *firstChild, *cp; const char *location; int subTotal, size, indentSpaces, code; subTotal = 0; bp = GET_HDR(ptr); if (! (bp->flags & ALLOC_FLAGS_REQUIRED)) { size = bp->size + HDR_SIZE; /* * Take one level off because we don't trace app */ indentSpaces = indent; if (bp->flags & ALLOC_FLAGS_REQUIRED) { code = 'R'; } else if (bp->flags & ALLOC_FLAGS_IS_SLAB) { code = 'S'; } else { code = ' '; } #if BLD_FEATURE_ALLOC_LEAK_TRACK location = bp->location; #else location = ""; #endif mprLog(bp->app, 0, "%c %.*s %-16s %.*s size %5d has %3d deps, total %6d", code, indentSpaces, " ", mprGetBaseName(location), 8 - indent, " ", size, mprGetAllocBlockCount(GET_PTR(bp)), mprGetAllocBlockMemory(GET_PTR(bp)) /* (uint) bp */ ); subTotal += size; } if ((firstChild = bp->children) != 0) { cp = firstChild; do { subTotal += mprPrintAllocBlocks(GET_PTR(cp), indent + 2); cp = cp->next; } while (cp != firstChild); } return subTotal; }
const char *mprGetAllocLocation(MprCtx ptr) { MprBlk *bp; if (ptr == 0) { return 0; } mprAssert(VALID_BLK(ptr)); bp = GET_HDR(ptr); mprAssert(VALID_HDR(bp)); return bp->location; }
GC_header_cache_miss(ptr_t p, hdr_cache_entry *hce) #endif { hdr *hhdr; HC_MISS(); GET_HDR(p, hhdr); if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) { if (GC_all_interior_pointers) { if (hhdr != 0) { ptr_t current = p; current = (ptr_t)HBLKPTR(current); do { current = current - HBLKSIZE*(word)hhdr; hhdr = HDR(current); } while(IS_FORWARDING_ADDR_OR_NIL(hhdr)); /* current points to near the start of the large object */ if (hhdr -> hb_flags & IGNORE_OFF_PAGE) return 0; if (HBLK_IS_FREE(hhdr) || p - current >= (ptrdiff_t)(hhdr->hb_sz)) { GC_ADD_TO_BLACK_LIST_NORMAL(p, source); /* Pointer past the end of the block */ return 0; } } else { GC_ADD_TO_BLACK_LIST_NORMAL(p, source); /* And return zero: */ } GC_ASSERT(hhdr == 0 || !HBLK_IS_FREE(hhdr)); return hhdr; /* Pointers past the first page are probably too rare */ /* to add them to the cache. We don't. */ /* And correctness relies on the fact that we don't. */ } else { if (hhdr == 0) { GC_ADD_TO_BLACK_LIST_NORMAL(p, source); } return 0; } } else { if (HBLK_IS_FREE(hhdr)) { GC_ADD_TO_BLACK_LIST_NORMAL(p, source); return 0; } else { hce -> block_addr = (word)(p) >> LOG_HBLKSIZE; hce -> hce_hdr = hhdr; return hhdr; } } }
MprApp *mprGetApp(MprCtx ptr) { MprBlk *bp; mprAssert(ptr); bp = GET_HDR(ptr); mprAssert(VALID_HDR(bp)); CHECK_HDR(bp); mprAssert(bp->app->magic == APP_MAGIC); return bp->app; }
void *mprAllocZeroedBlock(MPR_LOC_DEC(ctx, loc), uint size) { void *newBlock; MprBlk *bp; bp = GET_HDR(ctx); mprAssert(VALID_BLK(ctx)); newBlock = mprAllocBlock(MPR_LOC_PASS(ctx, loc), size); if (newBlock) { memset(newBlock, 0, size); } return newBlock; }
void mprSetRequiredAlloc(MprCtx ptr, bool recurse) { MprBlk *bp, *firstChild, *cp; bp = GET_HDR(ptr); bp->flags |= ALLOC_FLAGS_REQUIRED; if (recurse && (firstChild = bp->children) != 0) { cp = firstChild; do { mprSetRequiredAlloc(GET_PTR(cp), recurse); cp = cp->next; } while (cp != firstChild); } }
uint mprGetAllocBlockSize(MprCtx ptr) { MprBlk *bp; mprAssert(VALID_BLK(ptr)); if (ptr == 0) { return 0; } bp = GET_HDR(ptr); mprAssert(VALID_HDR(bp)); CHECK_HDR(bp); return bp->size; }
/* * Nakon oslobađanja bloka radi se sortiranje liste tako da je na početku najmanji blok, * a na kraju najveći blok. * Blok se dodaje listu koja se: * 1. blok spaja sa susjedima ukoliko su slobodni * 2. sortira listu */ int bf_free ( bf_mpool_t *mpool, void *chunk_to_be_freed ) { bf_hdr_t *chunk, *before, *after; ASSERT ( mpool && chunk_to_be_freed ); chunk = chunk_to_be_freed - sizeof (size_t); MARK_FREE ( chunk ); /* mark it as free */ /* join with left? */ before = ( (void *) chunk ) - sizeof(size_t); if ( CHECK_FREE ( before ) ) { before = GET_HDR ( before ); bf_remove_chunk ( mpool, before ); before->size += chunk->size; /* join */ chunk = before; } /* join with right? */ after = GET_AFTER ( chunk ); if ( CHECK_FREE ( after ) ) { bf_remove_chunk ( mpool, after ); chunk->size += after->size; /* join */ } /* insert chunk in free list */ //dodaje slobodan blok u listu i sortira bf_insert_chunk ( mpool, chunk ); /* set chunk tail */ CLONE_SIZE_TO_TAIL ( chunk ); bf_hdr_t *iter; iter = mpool->first; print("Ispis slobodnih blokova:\n"); while( iter != NULL ) { print("%d -> ", iter->size); iter = iter->next; } print("\n"); return 0; }
void *mprGetAllocParent(MprCtx ptr) { MprBlk *bp; mprAssert(VALID_BLK(ptr)); if (ptr == 0) { return 0; } bp = GET_HDR(ptr); mprAssert(VALID_HDR(bp)); CHECK_HDR(bp); return GET_PTR(bp->parent); }
int mprValidateAllocTree(MprCtx ptr) { #if VALIDATE_ALLOC MprBlk *bp, *cp, *firstChild; mprAssert(ptr); mprAssert(VALID_BLK(ptr)); bp = GET_HDR(ptr); mprValidateBlock(GET_PTR(bp)); if ((firstChild = bp->children) != 0) { cp = firstChild; do { mprValidateAllocTree(GET_PTR(cp)); cp = cp->next; } while (cp != firstChild); } #endif return 0; }
uint mprGetAllocBlockMemory(MprCtx ptr) { MprBlk *bp, *firstChild, *cp; uint count; mprAssert(VALID_BLK(ptr)); if (ptr == 0) { return 0; } bp = GET_HDR(ptr); mprAssert(VALID_HDR(bp)); count = bp->size + HDR_SIZE; if ((firstChild = bp->children) != 0) { cp = firstChild; do { count += mprGetAllocBlockMemory(GET_PTR(cp)); cp = cp->next; } while (cp != firstChild); } return count; }
MprDestructor mprSetDestructor(MprCtx ptr, MprDestructor destructor) { MprDestructor old; MprBlk *bp; mprAssert(VALID_BLK(ptr)); if (ptr == 0) { return 0; } bp = GET_HDR(ptr); mprAssert(bp); mprAssert(VALID_HDR(bp)); mprAssert(ptr != mprGetAllocParent(ptr)); CHECK_HDR(bp); old = bp->destructor; bp->destructor = destructor; return old; }
int mprStealAllocBlock(MPR_LOC_DEC(ctx, loc), const void *ptr) { MprBlk *bp, *parent; if (ptr == 0) { return 0; } mprAssert(VALID_BLK(ctx)); mprAssert(VALID_BLK(ptr)); bp = GET_HDR(ptr); #if BLD_DEBUG && !BREW if (bp == stopAlloc) { mprBreakpoint(MPR_LOC, "breakOnAddr"); } #endif mprAssert(bp); mprAssert(VALID_HDR(bp)); mprAssert(ptr != mprGetAllocParent(ptr)); CHECK_HDR(bp); mprAssert(bp->prev); mprAssert(bp->prev->next); mprAssert(bp->next); mprAssert(bp->next->prev); parent = bp->parent; mprAssert(VALID_HDR(parent)); mprLock(bp->app->allocLock); if (parent->children == bp) { if (bp->next == bp) { parent->children = 0; } else { parent->children = bp->next; } } bp->prev->next = bp->next; bp->next->prev = bp->prev; parent = GET_HDR(ctx); mprAssert(VALID_HDR(parent)); bp->parent = parent; if (parent->children == 0) { parent->children = bp; bp->next = bp->prev = bp; } else { bp->next = parent->children; bp->prev = parent->children->prev; parent->children->prev->next = bp; parent->children->prev = bp; } #if BLD_FEATURE_ALLOC_LEAK_TRACK bp->location = loc; #endif VALIDATE_BLOCK(GET_PTR(bp)); mprUnlock(bp->app->allocLock); return 0; }
/* finalized when this finalizer is invoked. */ GC_API void GC_register_finalizer_inner(void * obj, GC_finalization_proc fn, void *cd, GC_finalization_proc *ofn, void **ocd, finalization_mark_proc mp) { ptr_t base; struct finalizable_object * curr_fo, * prev_fo; size_t index; struct finalizable_object *new_fo; hdr *hhdr; DCL_LOCK_STATE; # ifdef THREADS LOCK(); # endif if (log_fo_table_size == -1 || GC_fo_entries > ((word)1 << log_fo_table_size)) { GC_grow_table((struct hash_chain_entry ***)(&fo_head), &log_fo_table_size); if (GC_print_stats) { GC_log_printf("Grew fo table to %u entries\n", (1 << log_fo_table_size)); } } /* in the THREADS case signals are disabled and we hold allocation */ /* lock; otherwise neither is true. Proceed carefully. */ base = (ptr_t)obj; index = HASH2(base, log_fo_table_size); prev_fo = 0; curr_fo = fo_head[index]; while (curr_fo != 0) { GC_ASSERT(GC_size(curr_fo) >= sizeof(struct finalizable_object)); if (curr_fo -> fo_hidden_base == HIDE_POINTER(base)) { /* Interruption by a signal in the middle of this */ /* should be safe. The client may see only *ocd */ /* updated, but we'll declare that to be his */ /* problem. */ if (ocd) *ocd = (void *) (curr_fo -> fo_client_data); if (ofn) *ofn = curr_fo -> fo_fn; /* Delete the structure for base. */ if (prev_fo == 0) { fo_head[index] = fo_next(curr_fo); } else { fo_set_next(prev_fo, fo_next(curr_fo)); } if (fn == 0) { GC_fo_entries--; /* May not happen if we get a signal. But a high */ /* estimate will only make the table larger than */ /* necessary. */ # if !defined(THREADS) && !defined(DBG_HDRS_ALL) GC_free((void *)curr_fo); # endif } else { curr_fo -> fo_fn = fn; curr_fo -> fo_client_data = (ptr_t)cd; curr_fo -> fo_mark_proc = mp; /* Reinsert it. We deleted it first to maintain */ /* consistency in the event of a signal. */ if (prev_fo == 0) { fo_head[index] = curr_fo; } else { fo_set_next(prev_fo, curr_fo); } } # ifdef THREADS UNLOCK(); # endif return; } prev_fo = curr_fo; curr_fo = fo_next(curr_fo); } if (ofn) *ofn = 0; if (ocd) *ocd = 0; if (fn == 0) { # ifdef THREADS UNLOCK(); # endif return; } GET_HDR(base, hhdr); if (0 == hhdr) { /* We won't collect it, hence finalizer wouldn't be run. */ # ifdef THREADS UNLOCK(); # endif return; } new_fo = (struct finalizable_object *) GC_INTERNAL_MALLOC(sizeof(struct finalizable_object),NORMAL); if (EXPECT(0 == new_fo, FALSE)) { # ifdef THREADS UNLOCK(); # endif new_fo = (struct finalizable_object *) GC_oom_fn(sizeof(struct finalizable_object)); if (0 == new_fo) { GC_finalization_failures++; return; } /* It's not likely we'll make it here, but ... */ # ifdef THREADS LOCK(); # endif } GC_ASSERT(GC_size(new_fo) >= sizeof(struct finalizable_object)); new_fo -> fo_hidden_base = (word)HIDE_POINTER(base); new_fo -> fo_fn = fn; new_fo -> fo_client_data = (ptr_t)cd; new_fo -> fo_object_size = hhdr -> hb_sz; new_fo -> fo_mark_proc = mp; fo_set_next(new_fo, fo_head[index]); GC_fo_entries++; fo_head[index] = new_fo; # ifdef THREADS UNLOCK(); # endif }
/* finalized when this finalizer is invoked. */ STATIC void GC_register_finalizer_inner(void * obj, GC_finalization_proc fn, void *cd, GC_finalization_proc *ofn, void **ocd, finalization_mark_proc mp) { ptr_t base; struct finalizable_object * curr_fo, * prev_fo; size_t index; struct finalizable_object *new_fo = 0; hdr *hhdr = NULL; /* initialized to prevent warning. */ GC_oom_func oom_fn; DCL_LOCK_STATE; LOCK(); if (log_fo_table_size == -1 || GC_fo_entries > ((word)1 << log_fo_table_size)) { GC_grow_table((struct hash_chain_entry ***)&GC_fnlz_roots.fo_head, &log_fo_table_size); GC_COND_LOG_PRINTF("Grew fo table to %u entries\n", 1 << (unsigned)log_fo_table_size); } /* in the THREADS case we hold allocation lock. */ base = (ptr_t)obj; for (;;) { index = HASH2(base, log_fo_table_size); prev_fo = 0; curr_fo = GC_fnlz_roots.fo_head[index]; while (curr_fo != 0) { GC_ASSERT(GC_size(curr_fo) >= sizeof(struct finalizable_object)); if (curr_fo -> fo_hidden_base == GC_HIDE_POINTER(base)) { /* Interruption by a signal in the middle of this */ /* should be safe. The client may see only *ocd */ /* updated, but we'll declare that to be his problem. */ if (ocd) *ocd = (void *) (curr_fo -> fo_client_data); if (ofn) *ofn = curr_fo -> fo_fn; /* Delete the structure for base. */ if (prev_fo == 0) { GC_fnlz_roots.fo_head[index] = fo_next(curr_fo); } else { fo_set_next(prev_fo, fo_next(curr_fo)); } if (fn == 0) { GC_fo_entries--; /* May not happen if we get a signal. But a high */ /* estimate will only make the table larger than */ /* necessary. */ # if !defined(THREADS) && !defined(DBG_HDRS_ALL) GC_free((void *)curr_fo); # endif } else { curr_fo -> fo_fn = fn; curr_fo -> fo_client_data = (ptr_t)cd; curr_fo -> fo_mark_proc = mp; /* Reinsert it. We deleted it first to maintain */ /* consistency in the event of a signal. */ if (prev_fo == 0) { GC_fnlz_roots.fo_head[index] = curr_fo; } else { fo_set_next(prev_fo, curr_fo); } } UNLOCK(); # ifndef DBG_HDRS_ALL if (EXPECT(new_fo != 0, FALSE)) { /* Free unused new_fo returned by GC_oom_fn() */ GC_free((void *)new_fo); } # endif return; } prev_fo = curr_fo; curr_fo = fo_next(curr_fo); } if (EXPECT(new_fo != 0, FALSE)) { /* new_fo is returned by GC_oom_fn(), so fn != 0 and hhdr != 0. */ break; } if (fn == 0) { if (ocd) *ocd = 0; if (ofn) *ofn = 0; UNLOCK(); return; } GET_HDR(base, hhdr); if (EXPECT(0 == hhdr, FALSE)) { /* We won't collect it, hence finalizer wouldn't be run. */ if (ocd) *ocd = 0; if (ofn) *ofn = 0; UNLOCK(); return; } new_fo = (struct finalizable_object *) GC_INTERNAL_MALLOC(sizeof(struct finalizable_object),NORMAL); if (EXPECT(new_fo != 0, TRUE)) break; oom_fn = GC_oom_fn; UNLOCK(); new_fo = (struct finalizable_object *) (*oom_fn)(sizeof(struct finalizable_object)); if (0 == new_fo) { /* No enough memory. *ocd and *ofn remains unchanged. */ return; } /* It's not likely we'll make it here, but ... */ LOCK(); /* Recalculate index since the table may grow and */ /* check again that our finalizer is not in the table. */ } GC_ASSERT(GC_size(new_fo) >= sizeof(struct finalizable_object)); if (ocd) *ocd = 0; if (ofn) *ofn = 0; new_fo -> fo_hidden_base = GC_HIDE_POINTER(base); new_fo -> fo_fn = fn; new_fo -> fo_client_data = (ptr_t)cd; new_fo -> fo_object_size = hhdr -> hb_sz; new_fo -> fo_mark_proc = mp; fo_set_next(new_fo, GC_fnlz_roots.fo_head[index]); GC_fo_entries++; GC_fnlz_roots.fo_head[index] = new_fo; UNLOCK(); }
void *mprSlabAllocBlock(MPR_LOC_DEC(ctx, loc), uint size, uint inc) { #if NO_SLAB return mprAllocBlock(MPR_LOC_PASS(ctx, loc), size); #else MprBlk *parent, *bp; MprSlabBlock *sb; MprApp *app; MprSlab *slab; int slabIndex; if (ctx == 0) { mprAssert(ctx); return 0; } mprAssert(size > 0); mprAssert(VALID_BLK(ctx)); parent = GET_HDR(ctx); mprAssert(VALID_HDR(parent)); CHECK_HDR(parent); size = SLAB_ALIGN(size); app = parent->app; mprAssert(app); slabIndex = GET_SLAB(size); if (slabIndex < 0 || slabIndex >= MPR_MAX_SLAB) { return mprAllocBlock(MPR_LOC_PASS(ctx, loc), size); } /* * Dequeue a block from the slab. "sb" will point to the user data * portion of the block (i.e. after the MprBlk header). Slabs must be * allocated off the "slabs" context to ensure they don't get freed * until after all other blocks are freed. */ mprLock(app->allocLock); slab = &app->alloc.slabs[slabIndex]; if ((sb = slab->next) == 0) { if (growSlab(MPR_LOC_ARGS(parent->app->alloc.slabs), slab, size, inc) < 0) { mprUnlock(app->allocLock); return 0; } sb = slab->next; } mprAssert(sb); /* * Dequeue the block */ slab->next = sb->next; #if BLD_FEATURE_ALLOC_STATS { MprSlabStats *slabStats; /* * Update the slab stats */ slabStats = &slab->stats; slabStats->totalAllocCount++; slabStats->freeCount--; slabStats->allocCount++; if (slabStats->allocCount > slabStats->peakAllocCount) { slabStats->peakAllocCount = slabStats->allocCount; } } #endif /* BLD_FEATURE_ALLOC_STATS */ bp = GET_HDR(sb); #if BLD_DEBUG && !BREW if (bp == stopAlloc) { mprBreakpoint(MPR_LOC, "breakOnAddr"); } #endif bp->size = size; bp->flags = ALLOC_MAGIC | ALLOC_FLAGS_SLAB_BLOCK; bp->destructor = 0; bp->parent = parent; if (parent->children == 0) { parent->children = bp; bp->next = bp->prev = bp; } else { /* * Append to the end of the list. Preserve alloc order */ bp->next = parent->children; bp->prev = parent->children->prev; parent->children->prev->next = bp; parent->children->prev = bp; } bp->children = 0; bp->app = app; #if BLD_FEATURE_ALLOC_LEAK_TRACK bp->location = loc; #endif mprUnlock(app->allocLock); return GET_PTR(bp); #endif }
void *mprReallocBlock(MPR_LOC_DEC(ctx, loc), void *ptr, uint size) { MprBlk *bp, *newbp, *firstChild, *cp; MprApp *app; void *newPtr; mprAssert(VALID_BLK(ctx)); mprAssert(size > 0); if (ptr == 0) { return mprAllocBlock(MPR_LOC_PASS(ctx, loc), size); } mprAssert(VALID_BLK(ptr)); bp = GET_HDR(ptr); mprAssert(bp); mprAssert(VALID_HDR(bp)); CHECK_HDR(bp); if (size < bp->size) { return ptr; } newPtr = mprAllocBlock(MPR_LOC_PASS(ctx, loc), size); if (newPtr == 0) { bp->flags &= ~ALLOC_FLAGS_FREE; free(bp); return 0; } newbp = GET_HDR(newPtr); mprAssert(newbp->size >= size); memcpy((char*) newbp + HDR_SIZE, (char*) bp + HDR_SIZE, bp->size); mprAssert(newbp->size >= size); /* * Fix the next / prev pointers */ app = bp->app; mprLock(app->allocLock); newbp->next->prev = newbp; newbp->prev->next = newbp; /* * Need to fix the parent pointer of all children */ if ((firstChild = newbp->children) != 0) { cp = firstChild; do { cp->parent = newbp; cp = cp->next; } while (cp != firstChild); } /* * May need to set the children pointer of our parent */ if (newbp->parent->children == bp) { newbp->parent->children = newbp; } /* * Free the original block */ mprFree(ptr); mprUnlock(app->allocLock); return GET_PTR(newbp); }
int mprFree(void *ptr) { MprAllocStats *stats; MprBlk *bp, *parent, *cp, *firstChild, *prev; MprApp *app; if (ptr == 0) { return 0; } mprAssert(VALID_BLK(ptr)); VALIDATE_BLOCK(ptr); bp = GET_HDR(ptr); #if BLD_DEBUG && !BREW if (bp == stopAlloc) { mprBreakpoint(MPR_LOC, "breakOnAddr"); } #endif mprAssert(bp); mprAssert(VALID_HDR(bp)); CHECK_HDR(bp); /* * Test if already freed */ mprAssert(! (bp->flags & ALLOC_FLAGS_FREE)); if (bp->flags & ALLOC_FLAGS_FREE) { return 0; } /* * Return if recursive freeing or this is a permanent block */ app = bp->app; mprLock(app->allocLock); if (bp->flags & (ALLOC_FLAGS_FREEING | ALLOC_FLAGS_KEEP)) { mprUnlock(app->allocLock); return 0; } bp->flags |= ALLOC_FLAGS_FREEING; /* * Call any destructors */ if (bp->destructor) { mprUnlock(app->allocLock); if ((bp->destructor)(ptr) < 0) { return -1; } mprLock(app->allocLock); bp->destructor = 0; } /* * Free the children. Free in reverse order so firstChild is preserved * during the list scan as an end of list marker. */ if ((firstChild = bp->children) != 0) { cp = firstChild->prev; while (cp != firstChild) { mprAssert(VALID_HDR(cp)); VALIDATE_BLOCK(GET_PTR(cp)); prev = cp->prev; /* * FUTURE - OPT. Make this inline */ mprFree(GET_PTR(cp)); cp = prev; } mprFree(GET_PTR(firstChild)); /* * Just for clarity */ bp->children = 0; } parent = bp->parent; mprAssert(VALID_HDR(parent)); /* * Unlink from the parent */ if (parent->children == bp) { if (bp->next == bp) { parent->children = 0; } else { parent->children = bp->next; } } /* * Remove from the sibling chain */ bp->prev->next = bp->next; bp->next->prev = bp->prev; bp->flags |= ALLOC_FLAGS_FREE; /* * Release the memory. If from a slab, return to the slab. Otherwise, * return to the O/S. */ if (bp->flags & ALLOC_FLAGS_SLAB_BLOCK) { slabFree(bp); } else { mprAssert(bp); /* * Update the stats */ stats = &bp->app->alloc.stats; stats->bytesAllocated -= (bp->size + HDR_SIZE); mprAssert(stats->bytesAllocated >= 0); stats->allocCount--; mprAssert(stats->allocCount >= 0); #if BLD_DEBUG && !BREW if (bp == stopAlloc) { mprBreakpoint(MPR_LOC, "breakOnAddr"); } #endif /* * Return to the O/S */ if (! (bp->flags & ALLOC_FLAGS_DONT_OS_FREE)) { free(bp); } } /* OPT */ if (app != ptr) { mprUnlock(app->allocLock); } return 0; }
void *mprAllocBlock(MPR_LOC_DEC(ctx, loc), uint size) { MprAllocStats *stats; MprBlk *bp, *parent; MprApp *app; int diff; mprAssert(size > 0); if (ctx == 0) { #if BREW mprAssert(ctx); return 0; #else ctx = rootCtx; #endif } if (size == 0) { size = 1; } mprAssert(VALID_BLK(ctx)); parent = GET_HDR(ctx); mprAssert(VALID_HDR(parent)); CHECK_HDR(parent); size = ALLOC_ALIGN(size); app = parent->app; stats = &app->alloc.stats; mprLock(app->allocLock); stats->bytesAllocated += size + HDR_SIZE; if (stats->bytesAllocated > stats->peakAllocated) { stats->peakAllocated = stats->bytesAllocated; } /* * Prevent allocation if over the maximum */ if (stats->maxMemory && stats->bytesAllocated > stats->maxMemory) { stats->bytesAllocated -= (size + HDR_SIZE); mprUnlock(app->allocLock); if (mprAllocException(MPR_LOC_PASS(ctx, loc), size, 0) < 0) { return 0; } mprLock(app->allocLock); } if ((bp = malloc(size + HDR_SIZE)) == 0) { mprAssert(bp); stats->errors++; mprUnlock(app->allocLock); mprAllocException(MPR_LOC_PASS(ctx, loc), size, 0); return 0; } #if BLD_DEBUG memset(bp, 0xf7, size + HDR_SIZE); #endif #if BLD_DEBUG && !BREW if (bp == stopAlloc) { mprBreakpoint(MPR_LOC, "breakOnAddr"); } #endif /* * Warn if allocation puts us over the red line */ if (stats->redLine && stats->bytesAllocated > stats->redLine) { mprUnlock(app->allocLock); if (mprAllocException(MPR_LOC_PASS(ctx, loc), size, 1) < 0) { return 0; } mprLock(app->allocLock); } bp->size = size; bp->flags = ALLOC_MAGIC; bp->destructor = 0; bp->parent = parent; if (parent->children == 0) { parent->children = bp; bp->next = bp->prev = bp; } else { /* * Append to the end of the list. Preserve alloc order */ bp->next = parent->children; bp->prev = parent->children->prev; parent->children->prev->next = bp; parent->children->prev = bp; } bp->children = 0; #if BLD_FEATURE_ALLOC_LEAK_TRACK bp->location = loc; #endif bp->app = parent->app; VALIDATE_BLOCK(GET_PTR(bp)); stats->allocCount++; /* * Monitor stack usage */ diff = (int) bp->app->stackStart - (int) &stats; if (diff < 0) { app->maxStack -= diff; app->stackStart = (void*) &stats; diff = 0; } if ((uint) diff > app->maxStack) { app->maxStack = diff; } mprUnlock(app->allocLock); return GET_PTR(bp); }
MprApp *mprAllocInit(MprAllocCback cback) { MprAllocStats *stats; MprApp *app; MprSlab *slab; MprBlk *bp, *sp; int i; bp = malloc(sizeof(MprApp) + HDR_SIZE); mprAssert(bp); if (bp == 0) { if (cback) { (*cback)(0, sizeof(MprApp), 0, 0); } return 0; } memset(bp, 0, sizeof(MprApp) + HDR_SIZE); bp->parent = bp; bp->size = sizeof(MprApp); bp->flags = ALLOC_MAGIC; bp->next = bp->prev = bp; #if BLD_FEATURE_ALLOC_LEAK_TRACK bp->location = MPR_LOC; #endif app = (MprApp*) GET_PTR(bp); app->magic = APP_MAGIC; app->alloc.cback = cback; app->stackStart = (void*) &app; bp->app = app; app->alloc.slabs = mprAllocZeroedBlock(MPR_LOC_PASS(app, MPR_LOC), sizeof(MprSlab) * MPR_MAX_SLAB); if (app->alloc.slabs == 0) { mprFree(app); return 0; } /* * The slab control structures must not be freed. Set keep to safeguard * against accidents. */ sp = GET_HDR(app->alloc.slabs); sp->flags |= ALLOC_FLAGS_KEEP; for (i = 0; i < MPR_MAX_SLAB; i++) { /* * This is overriden by requestors calling slabAlloc */ slab = &app->alloc.slabs[i]; slab->preAllocateIncr = MPR_SLAB_DEFAULT_INC; } /* * Keep aggregated stats even in production code */ stats = &app->alloc.stats; stats->bytesAllocated += sizeof(MprApp); if (stats->bytesAllocated > stats->peakAllocated) { stats->peakAllocated = stats->bytesAllocated; } stats->allocCount++; #if !BREW rootCtx = app; #endif #if (WIN || BREW_SIMULATOR) && BLD_DEBUG _CrtSetReportHook(crtReportHook); #endif return app; }