int main() { while (___sl_get_nondet_int()) { PLArenaPool pool; while (___sl_get_nondet_int()) { // initialize arena pool PL_InitArenaPool(&pool, "cool pool", 0x1000, 0x10); torture_arena(&pool); ___sl_plot("01-torture_arena"); PL_FreeArenaPool(&pool); ___sl_plot("02-PL_FreeArenaPool"); PL_FinishArenaPool(&pool); ___sl_plot("03-PL_FinishArenaPool"); } ___sl_plot("04-done"); PL_ArenaFinish(); ___sl_plot("05-PL_ArenaFinish"); } return 0; }
/* # 404 "arena.c" */ NSSArena * nssArena_Create ( void ) { NSSArena *rv = (NSSArena *)((void *)0); rv = ((NSSArena *)nss_ZAlloc(((NSSArena *)((void *)0)), sizeof(NSSArena))); if( (NSSArena *)((void *)0) == rv ) { nss_SetError(NSS_ERROR_NO_MEMORY); return (NSSArena *)((void *)0); } rv->lock = PR_NewLock(); if( (PRLock *)((void *)0) == rv->lock ) { (void)nss_ZFreeIf(rv); nss_SetError(NSS_ERROR_NO_MEMORY); return (NSSArena *)((void *)0); } /* # 442 "arena.c" */ PL_InitArenaPool(&rv->pool, "NSS", 2048, sizeof(double)); /* # 457 "arena.c" */ return rv; }
/* ** Test Arena allocation. */ static void ArenaAllocate( void ) { PLArenaPool ap; void *ptr; PRInt32 i; PL_InitArenaPool( &ap, "AllocArena", 2048, sizeof(double)); PR_LOG( tLM, PR_LOG_DEBUG, ("AA, InitPool -- Pool: %p. first: %p, current: %p, size: %d", &ap, ap.first, ap.current, ap.arenasize )); for( i = 0; i < 150; i++ ) { PL_ARENA_ALLOCATE( ptr, &ap, 512 ); PR_LOG( tLM, PR_LOG_DEBUG,("AA, after alloc -- Pool: %p. first: %p, current: %p, size: %d", &ap, ap.first, ap.current, ap.arenasize )); PR_LOG( tLM, PR_LOG_DEBUG,( "AA -- Pool: %p. alloc: %p ", &ap, ptr )); } PL_FreeArenaPool( &ap ); for( i = 0; i < 221; i++ ) { PL_ARENA_ALLOCATE( ptr, &ap, 512 ); PR_LOG( tLM, PR_LOG_DEBUG,("AA, after alloc -- Pool: %p. first: %p, current: %p, size: %d", &ap, ap.first, ap.current, ap.arenasize )); PR_LOG( tLM, PR_LOG_DEBUG,( "AA -- Pool: %p. alloc: %p ", &ap, ptr )); } PL_FreeArenaPool( &ap ); return; } /* end ArenaGrow() */
nsDisplayListBuilder::nsDisplayListBuilder(nsIFrame* aReferenceFrame, PRBool aIsForEvents, PRBool aBuildCaret) : mReferenceFrame(aReferenceFrame), mMovingFrame(nsnull), mIgnoreScrollFrame(nsnull), mCurrentTableItem(nsnull), mBuildCaret(aBuildCaret), mEventDelivery(aIsForEvents), mIsAtRootOfPseudoStackingContext(PR_FALSE), mPaintAllFrames(PR_FALSE) { PL_InitArenaPool(&mPool, "displayListArena", 1024, sizeof(void*)-1); nsPresContext* pc = aReferenceFrame->PresContext(); nsIPresShell *shell = pc->PresShell(); PRBool suppressed; shell->IsPaintingSuppressed(&suppressed); mIsBackgroundOnly = suppressed; if (pc->IsRenderingOnlySelection()) { nsCOMPtr<nsISelectionController> selcon(do_QueryInterface(shell)); if (selcon) { selcon->GetSelection(nsISelectionController::SELECTION_NORMAL, getter_AddRefs(mBoundingSelection)); } } if (mIsBackgroundOnly) { mBuildCaret = PR_FALSE; } }
nsStringBundleService::nsStringBundleService() : mBundleMap(MAX_CACHED_BUNDLES, PR_TRUE) { #ifdef DEBUG_tao_ printf("\n++ nsStringBundleService::nsStringBundleService ++\n"); #endif PR_INIT_CLIST(&mBundleCache); PL_InitArenaPool(&mCacheEntryPool, "srEntries", sizeof(bundleCacheEntry_t)*MAX_CACHED_BUNDLES, sizeof(bundleCacheEntry_t)); mErrorService = do_GetService(kErrorServiceCID); NS_ASSERTION(mErrorService, "Couldn't get error service"); }
int main() { PLArenaPool pool; PL_InitArenaPool(&pool, "cool pool", 0x1000, 0x10); ___sl_plot("PL_InitArenaPool"); // this should be OK PL_FreeArenaPool(&pool); ___sl_plot("PL_FreeArenaPool-01"); PL_FreeArenaPool(&pool); ___sl_plot("PL_FreeArenaPool-02"); PL_FinishArenaPool(&pool); ___sl_plot("PL_FinishArenaPool-00"); PL_ArenaFinish(); return 0; }
PLArenaPool * PORT_NewArena(unsigned long chunksize) { PORTArenaPool *pool; pool = PORT_ZNew(PORTArenaPool); if (!pool) { return NULL; } pool->magic = ARENAPOOL_MAGIC; pool->lock = PZ_NewLock(nssILockArena); if (!pool->lock) { ++port_allocFailures; PORT_Free(pool); return NULL; } PL_InitArenaPool(&pool->arena, "security", chunksize, sizeof(double)); return(&pool->arena); }
int main() { // initialize arena pool PLArenaPool pool; PL_InitArenaPool(&pool, "cool pool", 0x1000, 0x10); // trigger allocation of one arena void *ptr = PL_ArenaAllocate(&pool, 0x100); __VERIFIER_plot("01-PL_ArenaAllocate", &ptr); // free the arena pool PL_FreeArenaPool(&pool); __VERIFIER_plot("02-PL_FreeArenaPool"); PL_ArenaFinish(); __VERIFIER_plot("03-PL_ArenaFinish"); return 0; }
int main() { while (___sl_get_nondet_int()) { PLArenaPool pool; while (___sl_get_nondet_int()) { // initialize arena pool PL_InitArenaPool(&pool, "cool pool", 0x1000, sizeof(double)); torture_arena(&pool); PL_FreeArenaPool(&pool); PL_FinishArenaPool(&pool); } PL_ArenaFinish(); } return 0; }
/* ** Test Arena grow. */ static void ArenaGrow( void ) { PLArenaPool ap; void *ptr; PRInt32 i; PL_InitArenaPool( &ap, "TheArena", 4096, sizeof(double)); PL_ARENA_ALLOCATE( ptr, &ap, 512 ); PR_LOG( tLM, PR_LOG_DEBUG, ("Before growth -- Pool: %p. alloc: %p ", &ap, ptr )); for( i = 0; i < 10; i++ ) { PL_ARENA_GROW( ptr, &ap, 512, 7000 ); PR_LOG( tLM, PR_LOG_DEBUG, ("After growth -- Pool: %p. alloc: %p ", &ap, ptr )); } return; } /* end ArenaGrow() */
int main() { // initialize arena pool PLArenaPool pool; PL_InitArenaPool(&pool, "cool pool", 0x1000, 0x10); // trigger allocation of one arena void *ptr1 = PL_ArenaAllocate(&pool, 0x100); // attempt to reuse the existing arena void *ptr2 = PL_ArenaAllocate(&pool, 0x100); // free the arena pool twice PL_FreeArenaPool(&pool); PL_FreeArenaPool(&pool); ___sl_plot("01-PL_FreeArenaPool"); ptr1 = PL_ArenaAllocate(&pool, 0x100); ptr2 = PL_ArenaAllocate(&pool, 0x100); ___sl_plot("02-PL_ArenaAllocate"); // free the arena pool PL_FreeArenaPool(&pool); ___sl_plot("04-PL_FreeArenaPool", &ptr1, &ptr2); PL_ArenaFinish(); ___sl_plot("05-PL_ArenaFinish"); // XXX: this is misuse of the NSPR API void *ptr0 = PL_ArenaAllocate(&pool, 0x100); ___sl_plot("06-PL_ArenaAllocate"); // free the arena pool PL_FreeArenaPool(&pool); ___sl_plot("07-PL_FreeArenaPool"); PL_ArenaFinish(); ___sl_plot("08-PL_ArenaFinish"); return 0; }
/* ** StressThread() ** A bunch of these beat on individual arenas ** This tests the free_list protection. ** */ static void PR_CALLBACK StressThread( void *arg ) { PLArenaPool ap; PRIntn i; PRIntn sz; void *ptr; PRThread *tp = PR_GetCurrentThread(); PR_LOG( tLM, PR_LOG_DEBUG, ("Stress Thread %p started\n", PR_GetCurrentThread())); PL_InitArenaPool( &ap, "TheArena", RandSize( poolMin, poolMax), sizeof(double)); for ( i = 0; i < stressIterations; i++ ) { PRIntn allocated = 0; while ( allocated < maxAlloc ) { sz = RandSize( arenaMin, arenaMax ); PL_ARENA_ALLOCATE( ptr, &ap, sz ); if ( ptr == NULL ) { PR_LOG( tLM, PR_LOG_ERROR, ("ARENA_ALLOCATE() returned NULL\n\tAllocated: %d\n", allocated)); break; } allocated += sz; } PR_LOG( tLM, PR_LOG_DEBUG, ("Stress thread %p finished one iteration\n", tp)); PL_FreeArenaPool( &ap ); } PR_LOG( tLM, PR_LOG_DEBUG, ("Stress thread %p finished all iteration\n", tp)); PL_FinishArenaPool( &ap ); PR_LOG( tLM, PR_LOG_DEBUG, ("Stress thread %p after FinishArenaPool()\n", tp)); /* That's all folks! let's quit */ PR_EnterMonitor(tMon); threadCount--; PR_Notify(tMon); PR_ExitMonitor(tMon); return; }
/********************* Arena code follows ***************************** * ArenaPools are like heaps. The memory in them consists of large blocks, * called arenas, which are allocated from the/a system heap. Inside an * ArenaPool, the arenas are organized as if they were in a stack. Newly * allocated arenas are "pushed" on that stack. When you attempt to * allocate memory from an ArenaPool, the code first looks to see if there * is enough unused space in the top arena on the stack to satisfy your * request, and if so, your request is satisfied from that arena. * Otherwise, a new arena is allocated (or taken from NSPR's list of freed * arenas) and pushed on to the stack. The new arena is always big enough * to satisfy the request, and is also at least a minimum size that is * established at the time that the ArenaPool is created. * * The ArenaMark function returns the address of a marker in the arena at * the top of the arena stack. It is the address of the place in the arena * on the top of the arena stack from which the next block of memory will * be allocated. Each ArenaPool has its own separate stack, and hence * marks are only relevant to the ArenaPool from which they are gotten. * Marks may be nested. That is, a thread can get a mark, and then get * another mark. * * It is intended that all the marks in an ArenaPool may only be owned by a * single thread. In DEBUG builds, this is enforced. In non-DEBUG builds, * it is not. In DEBUG builds, when a thread gets a mark from an * ArenaPool, no other thread may acquire a mark in that ArenaPool while * that mark exists, that is, until that mark is unmarked or released. * Therefore, it is important that every mark be unmarked or released when * the creating thread has no further need for exclusive ownership of the * right to manage the ArenaPool. * * The ArenaUnmark function discards the ArenaMark at the address given, * and all marks nested inside that mark (that is, acquired from that same * ArenaPool while that mark existed). It is an error for a thread other * than the mark's creator to try to unmark it. When a thread has unmarked * all its marks from an ArenaPool, then another thread is able to set * marks in that ArenaPool. ArenaUnmark does not deallocate (or "pop") any * memory allocated from the ArenaPool since the mark was created. * * ArenaRelease "pops" the stack back to the mark, deallocating all the * memory allocated from the arenas in the ArenaPool since that mark was * created, and removing any arenas from the ArenaPool that have no * remaining active allocations when that is done. It implicitly releases * any marks nested inside the mark being explicitly released. It is the * only operation, other than destroying the arenapool, that potentially * reduces the number of arenas on the stack. Otherwise, the stack grows * until the arenapool is destroyed, at which point all the arenas are * freed or returned to a "free arena list", depending on their sizes. */ PLArenaPool * PORT_NewArena(unsigned long chunksize) { PORTArenaPool *pool; if (chunksize > MAX_SIZE) { PORT_SetError(SEC_ERROR_NO_MEMORY); return NULL; } pool = PORT_ZNew(PORTArenaPool); if (!pool) { return NULL; } pool->magic = ARENAPOOL_MAGIC; pool->lock = PZ_NewLock(nssILockArena); if (!pool->lock) { PORT_Free(pool); return NULL; } PL_InitArenaPool(&pool->arena, "security", chunksize, sizeof(double)); return (&pool->arena); }
pool_t* pool_create(size_t initial_size) { PLArenaPool *pool = (PLArenaPool*)malloc(sizeof(PLArenaPool)); PL_InitArenaPool(pool, "dta pool", initial_size, 4096); return pool; }
void PORT_InitCheapArena(PORTCheapArenaPool *pool, unsigned long chunksize) { pool->magic = CHEAP_ARENAPOOL_MAGIC; PL_InitArenaPool(&pool->arena, "security", chunksize, sizeof(double)); }
/* ** Test arena Mark and Release. */ static void MarkAndRelease( void ) { PLArenaPool ap; void *ptr = NULL; void *mark0, *mark1; PRIntn i; PL_InitArenaPool( &ap, "TheArena", 4096, sizeof(double)); mark0 = PL_ARENA_MARK( &ap ); PR_LOG( tLM, PR_LOG_DEBUG, ("mark0. ap: %p, ap.f: %p, ap.c: %p, ap.siz: %d, alloc: %p, m0: %p", &ap, ap.first.next, ap.current, ap.arenasize, ptr, mark0 )); for( i = 0; i < 201; i++ ) { PL_ARENA_ALLOCATE( ptr, &ap, 512 ); PR_LOG( tLM, PR_LOG_DEBUG, ("mr. ap: %p, ap.f: %p, ap.c: %p, ap.siz: %d, alloc: %p", &ap, ap.first.next, ap.current, ap.arenasize, ptr )); } mark1 = PL_ARENA_MARK( &ap ); PR_LOG( tLM, PR_LOG_DEBUG, ("mark1. ap: %p, ap.f: %p, ap.c: %p, ap.siz: %d, alloc: %p, m1: %p", &ap, ap.first.next, ap.current, ap.arenasize, ptr, mark1 )); for( i = 0; i < 225; i++ ) { PL_ARENA_ALLOCATE( ptr, &ap, 512 ); PR_LOG( tLM, PR_LOG_DEBUG, ("mr. ap: %p, ap.f: %p, ap.c: %p, ap.siz: %d, alloc: %p", &ap, ap.first.next, ap.current, ap.arenasize, ptr )); } PL_ARENA_RELEASE( &ap, mark1 ); PR_LOG( tLM, PR_LOG_DEBUG, ("Release-1: %p -- Pool: %p. first: %p, current: %p, size: %d", mark1, &ap, ap.first, ap.current, ap.arenasize )); for( i = 0; i < 20; i++ ) { PL_ARENA_ALLOCATE( ptr, &ap, 512 ); PR_LOG( tLM, PR_LOG_DEBUG, ("mr. ap: %p, ap.f: %p, ap.c: %p, ap.siz: %d, alloc: %p", &ap, ap.first.next, ap.current, ap.arenasize, ptr )); } PL_ARENA_RELEASE( &ap, mark1 ); PR_LOG( tLM, PR_LOG_DEBUG, ("Release-1. ap: %p, ap.f: %p, ap.c: %p, ap.siz: %d, alloc: %p", &ap, ap.first.next, ap.current, ap.arenasize, ptr )); PL_ARENA_RELEASE( &ap, mark0 ); PR_LOG( tLM, PR_LOG_DEBUG, ("Release-0. ap: %p, ap.f: %p, ap.c: %p, ap.siz: %d, alloc: %p", &ap, ap.first.next, ap.current, ap.arenasize, ptr )); PL_FreeArenaPool( &ap ); PR_LOG( tLM, PR_LOG_DEBUG, ("Free. ap: %p, ap.f: %p, ap.c: %p, ap.siz: %d, alloc: %p", &ap, ap.first.next, ap.current, ap.arenasize, ptr )); PL_FinishArenaPool( &ap ); PR_LOG( tLM, PR_LOG_DEBUG, ("Finish. ap: %p, ap.f: %p, ap.c: %p, ap.siz: %d, alloc: %p", &ap, ap.first.next, ap.current, ap.arenasize, ptr )); return; } /* end MarkAndRelease() */