/* ** Test Arena allocation. */ static void ArenaAllocate( void ) { PLArenaPool ap; void *ptr; PRInt32 i; PL_InitArenaPool( &ap, "AllocArena", 2048, sizeof(double)); PR_LOG( tLM, PR_LOG_DEBUG, ("AA, InitPool -- Pool: %p. first: %p, current: %p, size: %d", &ap, ap.first, ap.current, ap.arenasize )); for( i = 0; i < 150; i++ ) { PL_ARENA_ALLOCATE( ptr, &ap, 512 ); PR_LOG( tLM, PR_LOG_DEBUG,("AA, after alloc -- Pool: %p. first: %p, current: %p, size: %d", &ap, ap.first, ap.current, ap.arenasize )); PR_LOG( tLM, PR_LOG_DEBUG,( "AA -- Pool: %p. alloc: %p ", &ap, ptr )); } PL_FreeArenaPool( &ap ); for( i = 0; i < 221; i++ ) { PL_ARENA_ALLOCATE( ptr, &ap, 512 ); PR_LOG( tLM, PR_LOG_DEBUG,("AA, after alloc -- Pool: %p. first: %p, current: %p, size: %d", &ap, ap.first, ap.current, ap.arenasize )); PR_LOG( tLM, PR_LOG_DEBUG,( "AA -- Pool: %p. alloc: %p ", &ap, ptr )); } PL_FreeArenaPool( &ap ); return; } /* end ArenaGrow() */
static void *arena_alloc(void* pool, PRSize size) { PLArenaPool* arena = (PLArenaPool*)pool; void* result; PL_ARENA_ALLOCATE(result, arena, size); memset(result, 0, size); return result; }
// equivalent to strdup() - does no error checking, // we're assuming we're only called with a valid pointer static char *ArenaStrDup(const char* str, PLArenaPool* aArena) { void* mem; PRUint32 len = strlen(str); PL_ARENA_ALLOCATE(mem, aArena, len+1); if (mem) memcpy(mem, str, len+1); return static_cast<char*>(mem); }
void* nsPresArena::Allocate(uint32_t aCode, size_t aSize) { NS_ABORT_IF_FALSE(aSize > 0, "PresArena cannot allocate zero bytes"); // We only hand out aligned sizes aSize = PL_ARENA_ALIGN(&mPool, aSize); // If there is no free-list entry for this type already, we have // to create one now, to record its size. FreeList* list = mFreeLists.PutEntry(aCode); nsTArray<void*>::index_type len = list->mEntries.Length(); if (list->mEntrySize == 0) { NS_ABORT_IF_FALSE(len == 0, "list with entries but no recorded size"); list->mEntrySize = aSize; } else { NS_ABORT_IF_FALSE(list->mEntrySize == aSize, "different sizes for same object type code"); } void* result; if (len > 0) { // LIFO behavior for best cache utilization result = list->mEntries.ElementAt(len - 1); list->mEntries.RemoveElementAt(len - 1); #if defined(DEBUG) { MOZ_MAKE_MEM_DEFINED(result, list->mEntrySize); char* p = reinterpret_cast<char*>(result); char* limit = p + list->mEntrySize; for (; p < limit; p += sizeof(uintptr_t)) { uintptr_t val = *reinterpret_cast<uintptr_t*>(p); NS_ABORT_IF_FALSE(val == mozPoisonValue(), nsPrintfCString("PresArena: poison overwritten; " "wanted %.16llx " "found %.16llx " "errors in bits %.16llx", uint64_t(mozPoisonValue()), uint64_t(val), uint64_t(mozPoisonValue() ^ val) ).get()); } } #endif MOZ_MAKE_MEM_UNDEFINED(result, list->mEntrySize); return result; } // Allocate a new chunk from the arena list->mEntriesEverAllocated++; PL_ARENA_ALLOCATE(result, &mPool, aSize); if (!result) { NS_ABORT_OOM(aSize); } return result; }
// equivalent to strdup() - does no error checking, // we're assuming we're only called with a valid pointer static char * ArenaStrDup(const char* str, PLArenaPool* aArena) { void* mem; const PRUint32 size = strlen(str) + 1; PL_ARENA_ALLOCATE(mem, aArena, size); if (mem) memcpy(mem, str, size); return static_cast<char*>(mem); }
void * PORT_ArenaAlloc(PLArenaPool *arena, size_t size) { void *p = NULL; PORTArenaPool *pool = (PORTArenaPool *)arena; if (size <= 0) { size = 1; } if (size > MAX_SIZE) { /* you lose. */ } else /* Is it one of ours? Assume so and check the magic */ if (ARENAPOOL_MAGIC == pool->magic ) { PZ_Lock(pool->lock); #ifdef THREADMARK /* Most likely one of ours. Is there a thread id? */ if (pool->marking_thread && pool->marking_thread != PR_GetCurrentThread() ) { /* Another thread holds a mark in this arena */ PZ_Unlock(pool->lock); PORT_SetError(SEC_ERROR_NO_MEMORY); PORT_Assert(0); return NULL; } /* tid != null */ #endif /* THREADMARK */ PL_ARENA_ALLOCATE(p, arena, size); PZ_Unlock(pool->lock); } else { PL_ARENA_ALLOCATE(p, arena, size); } if (!p) { ++port_allocFailures; PORT_SetError(SEC_ERROR_NO_MEMORY); } return(p); }
static char* ArenaStrdup(const nsAFlatCString& aString, PLArenaPool* aArena) { void *mem; // add one to include the null terminator PRInt32 len = (aString.Length()+1) * sizeof(char); PL_ARENA_ALLOCATE(mem, aArena, len); NS_ASSERTION(mem, "Couldn't allocate space!\n"); if (mem) memcpy(mem, aString.get(), len); return static_cast<char*>(mem); }
static void addToVerifyLog(JNIEnv *env, CERTVerifyLog *log, CERTCertificate *cert, unsigned long error, unsigned int depth) { CERTVerifyLogNode *node, *tnode; PR_ASSERT(log != NULL); PL_ARENA_ALLOCATE(node, log->arena, sizeof(CERTVerifyLogNode)); if ( node == NULL ) { JSS_throw(env, OUT_OF_MEMORY_ERROR); return; } node->cert = CERT_DupCertificate(cert); node->error = error; node->depth = depth; node->arg = NULL; if ( log->tail == NULL ) { /* empty list */ log->head = log->tail = node; node->prev = NULL; node->next = NULL; } else if ( depth >= log->tail->depth ) { /* add to tail */ node->prev = log->tail; log->tail->next = node; log->tail = node; node->next = NULL; } else if ( depth < log->head->depth ) { /* add at head */ node->prev = NULL; node->next = log->head; log->head->prev = node; log->head = node; } else { /* add in middle */ tnode = log->tail; while ( tnode != NULL ) { if ( depth >= tnode->depth ) { /* insert after tnode */ node->prev = tnode; node->next = tnode->next; tnode->next->prev = node; tnode->next = node; break; } tnode = tnode->prev; } } log->count++; }
//--------------------------------------------- // nsZipArchive::CreateZipItem //--------------------------------------------- nsZipItem* nsZipArchive::CreateZipItem(PRUint16 namelen) { // sizeof(nsZipItem) includes space for name's null byte #ifndef STANDALONE // Arena allocate the nsZipItem void *mem; PL_ARENA_ALLOCATE(mem, &mArena, sizeof(nsZipItem)+namelen); return (nsZipItem*)mem; #else return (nsZipItem*)malloc(sizeof(nsZipItem)+namelen); #endif }
void * PORT_ArenaMark(PLArenaPool *arena) { void * result; PORTArenaPool *pool = (PORTArenaPool *)arena; if (ARENAPOOL_MAGIC == pool->magic ) { PZ_Lock(pool->lock); #ifdef THREADMARK { threadmark_mark *tm, **pw; PRThread * currentThread = PR_GetCurrentThread(); if (! pool->marking_thread ) { /* First mark */ pool->marking_thread = currentThread; } else if (currentThread != pool->marking_thread ) { PZ_Unlock(pool->lock); PORT_SetError(SEC_ERROR_NO_MEMORY); PORT_Assert(0); return NULL; } result = PL_ARENA_MARK(arena); PL_ARENA_ALLOCATE(tm, arena, sizeof(threadmark_mark)); if (!tm) { PZ_Unlock(pool->lock); PORT_SetError(SEC_ERROR_NO_MEMORY); return NULL; } tm->mark = result; tm->next = (threadmark_mark *)NULL; pw = &pool->first_mark; while( *pw ) { pw = &(*pw)->next; } *pw = tm; } #else /* THREADMARK */ result = PL_ARENA_MARK(arena); #endif /* THREADMARK */ PZ_Unlock(pool->lock); } else { /* a "pure" NSPR arena */ result = PL_ARENA_MARK(arena); } return result; }
bundleCacheEntry_t * nsStringBundleService::insertIntoCache(nsIStringBundle* aBundle, nsCStringKey* aHashKey) { bundleCacheEntry_t *cacheEntry; if (mBundleMap.Count() < MAX_CACHED_BUNDLES) { // cache not full - create a new entry void *cacheEntryArena; PL_ARENA_ALLOCATE(cacheEntryArena, &mCacheEntryPool, sizeof(bundleCacheEntry_t)); cacheEntry = (bundleCacheEntry_t*)cacheEntryArena; } else { // cache is full // take the last entry in the list, and recycle it. cacheEntry = (bundleCacheEntry_t*)PR_LIST_TAIL(&mBundleCache); // remove it from the hash table and linked list NS_ASSERTION(mBundleMap.Exists(cacheEntry->mHashKey), "Element will not be removed!"); #ifdef DEBUG_alecf NS_WARNING(nsPrintfCString(300, "Booting %s to make room for %s\n", cacheEntry->mHashKey->GetString(), aHashKey->GetString()).get()); #endif mBundleMap.Remove(cacheEntry->mHashKey); PR_REMOVE_LINK((PRCList*)cacheEntry); // free up excess memory recycleEntry(cacheEntry); } // at this point we have a new cacheEntry that doesn't exist // in the hashtable, so set up the cacheEntry cacheEntry->mBundle = aBundle; NS_ADDREF(cacheEntry->mBundle); cacheEntry->mHashKey = (nsCStringKey*)aHashKey->Clone(); // insert the entry into the cache and map, make it the MRU mBundleMap.Put(cacheEntry->mHashKey, cacheEntry); return cacheEntry; }
void* Allocate(PRUint32 aCode, size_t aSize) { NS_ABORT_IF_FALSE(aSize > 0, "PresArena cannot allocate zero bytes"); // We only hand out aligned sizes aSize = PL_ARENA_ALIGN(&mPool, aSize); // If there is no free-list entry for this type already, we have // to create one now, to record its size. FreeList* list = mFreeLists.PutEntry(aCode); if (!list) { return nsnull; } nsTArray<void*>::index_type len = list->mEntries.Length(); if (list->mEntrySize == 0) { NS_ABORT_IF_FALSE(len == 0, "list with entries but no recorded size"); list->mEntrySize = aSize; } else { NS_ABORT_IF_FALSE(list->mEntrySize == aSize, "different sizes for same object type code"); } void* result; if (len > 0) { // LIFO behavior for best cache utilization result = list->mEntries.ElementAt(len - 1); list->mEntries.RemoveElementAt(len - 1); #ifdef DEBUG { char* p = reinterpret_cast<char*>(result); char* limit = p + list->mEntrySize; for (; p < limit; p += sizeof(PRUword)) { NS_ABORT_IF_FALSE(*reinterpret_cast<PRUword*>(p) == ARENA_POISON, "PresArena: poison overwritten"); } } #endif return result; } // Allocate a new chunk from the arena PL_ARENA_ALLOCATE(result, &mPool, aSize); return result; }
/* ** Test Arena grow. */ static void ArenaGrow( void ) { PLArenaPool ap; void *ptr; PRInt32 i; PL_InitArenaPool( &ap, "TheArena", 4096, sizeof(double)); PL_ARENA_ALLOCATE( ptr, &ap, 512 ); PR_LOG( tLM, PR_LOG_DEBUG, ("Before growth -- Pool: %p. alloc: %p ", &ap, ptr )); for( i = 0; i < 10; i++ ) { PL_ARENA_GROW( ptr, &ap, 512, 7000 ); PR_LOG( tLM, PR_LOG_DEBUG, ("After growth -- Pool: %p. alloc: %p ", &ap, ptr )); } return; } /* end ArenaGrow() */
/* ** StressThread() ** A bunch of these beat on individual arenas ** This tests the free_list protection. ** */ static void PR_CALLBACK StressThread( void *arg ) { PLArenaPool ap; PRIntn i; PRIntn sz; void *ptr; PRThread *tp = PR_GetCurrentThread(); PR_LOG( tLM, PR_LOG_DEBUG, ("Stress Thread %p started\n", PR_GetCurrentThread())); PL_InitArenaPool( &ap, "TheArena", RandSize( poolMin, poolMax), sizeof(double)); for ( i = 0; i < stressIterations; i++ ) { PRIntn allocated = 0; while ( allocated < maxAlloc ) { sz = RandSize( arenaMin, arenaMax ); PL_ARENA_ALLOCATE( ptr, &ap, sz ); if ( ptr == NULL ) { PR_LOG( tLM, PR_LOG_ERROR, ("ARENA_ALLOCATE() returned NULL\n\tAllocated: %d\n", allocated)); break; } allocated += sz; } PR_LOG( tLM, PR_LOG_DEBUG, ("Stress thread %p finished one iteration\n", tp)); PL_FreeArenaPool( &ap ); } PR_LOG( tLM, PR_LOG_DEBUG, ("Stress thread %p finished all iteration\n", tp)); PL_FinishArenaPool( &ap ); PR_LOG( tLM, PR_LOG_DEBUG, ("Stress thread %p after FinishArenaPool()\n", tp)); /* That's all folks! let's quit */ PR_EnterMonitor(tMon); threadCount--; PR_Notify(tMon); PR_ExitMonitor(tMon); return; }
void* nsDisplayListBuilder::Allocate(size_t aSize) { void *tmp; PL_ARENA_ALLOCATE(tmp, &mPool, aSize); return tmp; }
/* ** Test arena Mark and Release. */ static void MarkAndRelease( void ) { PLArenaPool ap; void *ptr = NULL; void *mark0, *mark1; PRIntn i; PL_InitArenaPool( &ap, "TheArena", 4096, sizeof(double)); mark0 = PL_ARENA_MARK( &ap ); PR_LOG( tLM, PR_LOG_DEBUG, ("mark0. ap: %p, ap.f: %p, ap.c: %p, ap.siz: %d, alloc: %p, m0: %p", &ap, ap.first.next, ap.current, ap.arenasize, ptr, mark0 )); for( i = 0; i < 201; i++ ) { PL_ARENA_ALLOCATE( ptr, &ap, 512 ); PR_LOG( tLM, PR_LOG_DEBUG, ("mr. ap: %p, ap.f: %p, ap.c: %p, ap.siz: %d, alloc: %p", &ap, ap.first.next, ap.current, ap.arenasize, ptr )); } mark1 = PL_ARENA_MARK( &ap ); PR_LOG( tLM, PR_LOG_DEBUG, ("mark1. ap: %p, ap.f: %p, ap.c: %p, ap.siz: %d, alloc: %p, m1: %p", &ap, ap.first.next, ap.current, ap.arenasize, ptr, mark1 )); for( i = 0; i < 225; i++ ) { PL_ARENA_ALLOCATE( ptr, &ap, 512 ); PR_LOG( tLM, PR_LOG_DEBUG, ("mr. ap: %p, ap.f: %p, ap.c: %p, ap.siz: %d, alloc: %p", &ap, ap.first.next, ap.current, ap.arenasize, ptr )); } PL_ARENA_RELEASE( &ap, mark1 ); PR_LOG( tLM, PR_LOG_DEBUG, ("Release-1: %p -- Pool: %p. first: %p, current: %p, size: %d", mark1, &ap, ap.first, ap.current, ap.arenasize )); for( i = 0; i < 20; i++ ) { PL_ARENA_ALLOCATE( ptr, &ap, 512 ); PR_LOG( tLM, PR_LOG_DEBUG, ("mr. ap: %p, ap.f: %p, ap.c: %p, ap.siz: %d, alloc: %p", &ap, ap.first.next, ap.current, ap.arenasize, ptr )); } PL_ARENA_RELEASE( &ap, mark1 ); PR_LOG( tLM, PR_LOG_DEBUG, ("Release-1. ap: %p, ap.f: %p, ap.c: %p, ap.siz: %d, alloc: %p", &ap, ap.first.next, ap.current, ap.arenasize, ptr )); PL_ARENA_RELEASE( &ap, mark0 ); PR_LOG( tLM, PR_LOG_DEBUG, ("Release-0. ap: %p, ap.f: %p, ap.c: %p, ap.siz: %d, alloc: %p", &ap, ap.first.next, ap.current, ap.arenasize, ptr )); PL_FreeArenaPool( &ap ); PR_LOG( tLM, PR_LOG_DEBUG, ("Free. ap: %p, ap.f: %p, ap.c: %p, ap.siz: %d, alloc: %p", &ap, ap.first.next, ap.current, ap.arenasize, ptr )); PL_FinishArenaPool( &ap ); PR_LOG( tLM, PR_LOG_DEBUG, ("Finish. ap: %p, ap.f: %p, ap.c: %p, ap.siz: %d, alloc: %p", &ap, ap.first.next, ap.current, ap.arenasize, ptr )); return; } /* end MarkAndRelease() */