/* * When the jschars reside in a freshly allocated buffer the memory can be used * as a new JSAtom's storage without copying. The contract is that the caller no * longer owns the memory and this method is responsible for freeing the memory. */ MOZ_ALWAYS_INLINE static JSAtom * AtomizeAndtake(ExclusiveContext *cx, jschar *tbchars, size_t length, InternBehavior ib) { JS_ASSERT(tbchars[length] == 0); if (JSAtom *s = cx->staticStrings().lookup(tbchars, length)) { js_free(tbchars); return s; } AtomHasher::Lookup lookup(tbchars, length); AtomSet::Ptr pp = cx->permanentAtoms().readonlyThreadsafeLookup(lookup); if (pp) { js_free(tbchars); return pp->asPtr(); } AutoLockForExclusiveAccess lock(cx); /* * If a GC occurs at js_NewStringCopy then |p| will still have the correct * hash, allowing us to avoid rehashing it. Even though the hash is * unchanged, we need to re-lookup the table position because a last-ditch * GC will potentially free some table entries. */ AtomSet& atoms = cx->atoms(); AtomSet::AddPtr p = atoms.lookupForAdd(lookup); if (p) { JSAtom *atom = p->asPtr(); p->setTagged(bool(ib)); js_free(tbchars); return atom; } AutoCompartment ac(cx, cx->atomsCompartment()); JSFlatString *flat = js_NewString<NoGC>(cx, tbchars, length); if (!flat) { js_free(tbchars); js_ReportOutOfMemory(cx); return nullptr; } JSAtom *atom = flat->morphAtomizedStringIntoAtom(); if (!atoms.relookupOrAdd(p, lookup, AtomStateEntry(atom, bool(ib)))) { js_ReportOutOfMemory(cx); /* SystemAllocPolicy does not report OOM. */ return nullptr; } return atom; }
/* * Callers passing OwnChars have freshly allocated *pchars and thus this * memory can be used as a new JSAtom's buffer without copying. When this flag * is set, the contract is that callers will free *pchars iff *pchars == NULL. */ JS_ALWAYS_INLINE static JSAtom * AtomizeInline(JSContext *cx, const jschar **pchars, size_t length, InternBehavior ib, OwnCharsBehavior ocb = CopyChars) { const jschar *chars = *pchars; if (JSAtom *s = JSAtom::lookupStatic(chars, length)) return s; AutoLockAtomsCompartment lock(cx); AtomSet &atoms = cx->runtime->atomState.atoms; AtomSet::AddPtr p = atoms.lookupForAdd(AtomHasher::Lookup(chars, length)); if (p) { JSAtom *atom = p->asPtr(); p->setTagged(bool(ib)); return atom; } SwitchToCompartment sc(cx, cx->runtime->atomsCompartment); JSFixedString *key; if (ocb == TakeCharOwnership) { key = js_NewString(cx, const_cast<jschar *>(chars), length); if (!key) return NULL; *pchars = NULL; /* Called should not free *pchars. */ } else { JS_ASSERT(ocb == CopyChars); key = js_NewStringCopyN(cx, chars, length); if (!key) return NULL; } /* * We have to relookup the key as the last ditch GC invoked from the * string allocation or OOM handling may unlock the atomsCompartment. * * N.B. this avoids recomputing the hash but still has a potential * (# collisions * # chars) comparison cost in the case of a hash * collision! */ AtomHasher::Lookup lookup(chars, length); if (!atoms.relookupOrAdd(p, lookup, AtomStateEntry((JSAtom *) key, bool(ib)))) { JS_ReportOutOfMemory(cx); /* SystemAllocPolicy does not report */ return NULL; } return key->morphAtomizedStringIntoAtom(); }
MOZ_ALWAYS_INLINE static JSAtom * AtomizeAndCopyChars(ExclusiveContext *cx, const CharT *tbchars, size_t length, InternBehavior ib) { if (JSAtom *s = cx->staticStrings().lookup(tbchars, length)) return s; AtomHasher::Lookup lookup(tbchars, length); // Note: when this function is called while the permanent atoms table is // being initialized (in initializeAtoms()), |permanentAtoms| is not yet // initialized so this lookup is always skipped. Only once // transformToPermanentAtoms() is called does |permanentAtoms| get // initialized and then this lookup will go ahead. if (cx->isPermanentAtomsInitialized()) { AtomSet::Ptr pp = cx->permanentAtoms().readonlyThreadsafeLookup(lookup); if (pp) return pp->asPtr(); } AutoLockForExclusiveAccess lock(cx); AtomSet& atoms = cx->atoms(); AtomSet::AddPtr p = atoms.lookupForAdd(lookup); if (p) { JSAtom *atom = p->asPtr(); p->setTagged(bool(ib)); return atom; } AutoCompartment ac(cx, cx->atomsCompartment()); JSFlatString *flat = NewStringCopyN<NoGC>(cx, tbchars, length); if (!flat) { // Grudgingly forgo last-ditch GC. The alternative would be to release // the lock, manually GC here, and retry from the top. If you fix this, // please also fix or comment the similar case in Symbol::new_. ReportOutOfMemory(cx); return nullptr; } JSAtom *atom = flat->morphAtomizedStringIntoAtom(); // We have held the lock since looking up p, and the operations we've done // since then can't GC; therefore the atoms table has not been modified and // p is still valid. if (!atoms.add(p, AtomStateEntry(atom, bool(ib)))) { ReportOutOfMemory(cx); /* SystemAllocPolicy does not report OOM. */ return nullptr; } return atom; }
/* * When the jschars reside in a freshly allocated buffer the memory can be used * as a new JSAtom's storage without copying. The contract is that the caller no * longer owns the memory and this method is responsible for freeing the memory. */ JS_ALWAYS_INLINE static JSAtom * AtomizeAndTakeOwnership(JSContext *cx, const jschar *tbchars, size_t length, InternBehavior ib) { JS_ASSERT(tbchars[length] == 0); if (JSAtom *s = cx->runtime->staticStrings.lookup(tbchars, length)) { js_free((void*)tbchars); return s; } /* * If a GC occurs at js_NewStringCopy then |p| will still have the correct * hash, allowing us to avoid rehashing it. Even though the hash is * unchanged, we need to re-lookup the table position because a last-ditch * GC will potentially free some table entries. */ AtomSet& atoms = cx->runtime->atoms; AtomSet::AddPtr p = atoms.lookupForAdd(AtomHasher::Lookup(tbchars, length)); SkipRoot skipHash(cx, &p); /* Prevent the hash from being poisoned. */ if (p) { JSAtom *atom = p->asPtr(); p->setTagged(bool(ib)); js_free((void*)tbchars); return atom; } AutoEnterAtomsCompartment ac(cx); JSFlatString *flat = js_NewString<CanGC>(cx, const_cast<jschar*>(tbchars), length); if (!flat) { js_free((void*)tbchars); return NULL; } JSAtom *atom = flat->morphAtomizedStringIntoAtom(); if (!atoms.relookupOrAdd(p, AtomHasher::Lookup(tbchars, length), AtomStateEntry(atom, bool(ib)))) { JS_ReportOutOfMemory(cx); /* SystemAllocPolicy does not report OOM. */ return NULL; } return atom; }
JS_ALWAYS_INLINE static JSAtom * AtomizeAndCopyChars(ExclusiveContext *cx, const jschar *tbchars, size_t length, InternBehavior ib) { if (JSAtom *s = cx->staticStrings().lookup(tbchars, length)) return s; /* * If a GC occurs at js_NewStringCopy then |p| will still have the correct * hash, allowing us to avoid rehashing it. Even though the hash is * unchanged, we need to re-lookup the table position because a last-ditch * GC will potentially free some table entries. */ AutoLockForExclusiveAccess lock(cx); AtomSet& atoms = cx->atoms(); AtomSet::AddPtr p = atoms.lookupForAdd(AtomHasher::Lookup(tbchars, length)); SkipRoot skipHash(cx, &p); /* Prevent the hash from being poisoned. */ if (p) { JSAtom *atom = p->asPtr(); p->setTagged(bool(ib)); return atom; } AutoCompartment ac(cx, cx->atomsCompartment()); JSFlatString *flat = js_NewStringCopyN<allowGC>(cx, tbchars, length); if (!flat) return NULL; JSAtom *atom = flat->morphAtomizedStringIntoAtom(); if (!atoms.relookupOrAdd(p, AtomHasher::Lookup(tbchars, length), AtomStateEntry(atom, bool(ib)))) { if (allowGC) js_ReportOutOfMemory(cx); /* SystemAllocPolicy does not report OOM. */ return NULL; } return atom; }