static void js_free_temp_space(void *priv, void *item, size_t size) { if (size >= TEMP_SIZE_LIMIT) return; JSCompiler *jsc = (JSCompiler *) priv; int bin = JS_CeilingLog2(size) - TEMP_SIZE_START_LOG2; JS_ASSERT(unsigned(bin) < NUM_TEMP_FREELISTS); *(void **)item = jsc->tempFreeList[bin]; jsc->tempFreeList[bin] = item; }
JS_HashTableEnumerateEntries(JSHashTable *ht, JSHashEnumerator f, void *arg) { JSHashEntry *he, **hep, **bucket; uint32 nlimit, n, nbuckets, newlog2; int rv; nlimit = ht->nentries; n = 0; for (bucket = ht->buckets; n != nlimit; ++bucket) { hep = bucket; while ((he = *hep) != NULL) { JS_ASSERT(n < nlimit); rv = f(he, n, arg); n++; if (rv & HT_ENUMERATE_REMOVE) { *hep = he->next; ht->allocOps->freeEntry(ht->allocPriv, he, HT_FREE_ENTRY); --ht->nentries; } else { hep = &he->next; } if (rv & HT_ENUMERATE_STOP) { goto out; } } } out: /* Shrink table if removal of entries made it underloaded */ if (ht->nentries != nlimit) { JS_ASSERT(ht->nentries < nlimit); nbuckets = NBUCKETS(ht); if (MINBUCKETS < nbuckets && ht->nentries < UNDERLOADED(nbuckets)) { newlog2 = JS_CeilingLog2(ht->nentries); if (newlog2 < MINBUCKETSLOG2) newlog2 = MINBUCKETSLOG2; /* Check that we really shrink the table. */ JS_ASSERT(JS_HASH_BITS - ht->shift > newlog2); Resize(ht, JS_HASH_BITS - newlog2); } } return (int)n; }
JS_InitArenaPool(JSArenaPool *pool, const char *name, size_t size, size_t align, size_t *quotap) { if (align == 0) align = JS_ARENA_DEFAULT_ALIGN; pool->mask = JS_BITMASK(JS_CeilingLog2(align)); pool->first.next = NULL; pool->first.base = pool->first.avail = pool->first.limit = JS_ARENA_ALIGN(pool, &pool->first + 1); pool->current = &pool->first; pool->arenasize = size; pool->quotap = quotap; #ifdef JS_ARENAMETER memset(&pool->stats, 0, sizeof pool->stats); pool->stats.name = strdup(name); pool->stats.next = arena_stats_list; arena_stats_list = &pool->stats; #endif }
static void * js_alloc_temp_space(void *priv, size_t size) { JSCompiler *jsc = (JSCompiler *) priv; void *space; if (size < TEMP_SIZE_LIMIT) { int bin = JS_CeilingLog2(size) - TEMP_SIZE_START_LOG2; JS_ASSERT(unsigned(bin) < NUM_TEMP_FREELISTS); space = jsc->tempFreeList[bin]; if (space) { jsc->tempFreeList[bin] = *(void **)space; return space; } } JS_ARENA_ALLOCATE(space, &jsc->context->tempPool, size); if (!space) js_ReportOutOfScriptQuota(jsc->context); return space; }
JS_NewHashTable(uint32 n, JSHashFunction keyHash, JSHashComparator keyCompare, JSHashComparator valueCompare, JSHashAllocOps *allocOps, void *allocPriv) { JSHashTable *ht; size_t nb; if (n <= MINBUCKETS) { n = MINBUCKETSLOG2; } else { n = JS_CeilingLog2(n); if ((int32)n < 0) return NULL; } if (!allocOps) allocOps = &defaultHashAllocOps; ht = (JSHashTable*) allocOps->allocTable(allocPriv, sizeof *ht); if (!ht) return NULL; memset(ht, 0, sizeof *ht); ht->shift = JS_HASH_BITS - n; n = JS_BIT(n); nb = n * sizeof(JSHashEntry *); ht->buckets = (JSHashEntry**) allocOps->allocTable(allocPriv, nb); if (!ht->buckets) { allocOps->freeTable(allocPriv, ht, nb); return NULL; } memset(ht->buckets, 0, nb); ht->keyHash = keyHash; ht->keyCompare = keyCompare; ht->valueCompare = valueCompare; ht->allocOps = allocOps; ht->allocPriv = allocPriv; return ht; }
JS_InitArenaPool(JSArenaPool *pool, const char *name, size_t size, size_t align) { #ifdef JS_THREADSAFE /* Must come through here once in primordial thread to init safely! */ if (!arena_freelist_lock) { arena_freelist_lock = JS_NEW_LOCK(); JS_ASSERT(arena_freelist_lock); } #endif if (align == 0) align = JS_ARENA_DEFAULT_ALIGN; pool->mask = JS_BITMASK(JS_CeilingLog2(align)); pool->first.next = NULL; pool->first.base = pool->first.avail = pool->first.limit = JS_ARENA_ALIGN(pool, &pool->first + 1); pool->current = &pool->first; pool->arenasize = size; #ifdef JS_ARENAMETER memset(&pool->stats, 0, sizeof pool->stats); pool->stats.name = strdup(name); pool->stats.next = arena_stats_list; arena_stats_list = &pool->stats; #endif }