JSString * js::ConcatStrings(ThreadSafeContext *cx, typename MaybeRooted<JSString*, allowGC>::HandleType left, typename MaybeRooted<JSString*, allowGC>::HandleType right) { JS_ASSERT_IF(!left->isAtom(), cx->isInsideCurrentZone(left)); JS_ASSERT_IF(!right->isAtom(), cx->isInsideCurrentZone(right)); size_t leftLen = left->length(); if (leftLen == 0) return right; size_t rightLen = right->length(); if (rightLen == 0) return left; size_t wholeLength = leftLen + rightLen; if (!JSString::validateLength(cx, wholeLength)) return nullptr; bool isLatin1 = left->hasLatin1Chars() && right->hasLatin1Chars(); bool canUseFatInline = isLatin1 ? JSFatInlineString::latin1LengthFits(wholeLength) : JSFatInlineString::twoByteLengthFits(wholeLength); if (canUseFatInline && cx->isJSContext()) { JSFatInlineString *str = js_NewGCFatInlineString<allowGC>(cx); if (!str) return nullptr; AutoCheckCannotGC nogc; ScopedThreadSafeStringInspector leftInspector(left); ScopedThreadSafeStringInspector rightInspector(right); if (!leftInspector.ensureChars(cx, nogc) || !rightInspector.ensureChars(cx, nogc)) return nullptr; if (isLatin1) { Latin1Char *buf = str->initLatin1(wholeLength); PodCopy(buf, leftInspector.latin1Chars(), leftLen); PodCopy(buf + leftLen, rightInspector.latin1Chars(), rightLen); buf[wholeLength] = 0; } else { jschar *buf = str->initTwoByte(wholeLength); if (leftInspector.hasTwoByteChars()) PodCopy(buf, leftInspector.twoByteChars(), leftLen); else CopyAndInflateChars(buf, leftInspector.latin1Chars(), leftLen); if (rightInspector.hasTwoByteChars()) PodCopy(buf + leftLen, rightInspector.twoByteChars(), rightLen); else CopyAndInflateChars(buf + leftLen, rightInspector.latin1Chars(), rightLen); buf[wholeLength] = 0; } return str; } return JSRope::new_<allowGC>(cx, left, right, wholeLength); }
bool StackFrame::copyRawFrameSlots(AutoValueVector *vec) { if (!vec->resize(numFormalArgs() + script()->nfixed())) return false; PodCopy(vec->begin(), argv(), numFormalArgs()); PodCopy(vec->begin() + numFormalArgs(), slots(), script()->nfixed()); return true; }
JSFlatString * JSDependentString::undependInternal(ExclusiveContext *cx) { /* * We destroy the base() pointer in undepend, so we need a pre-barrier. We * don't need a post-barrier because there aren't any outgoing pointers * afterwards. */ JSString::writeBarrierPre(base()); size_t n = length(); CharT *s = cx->pod_malloc<CharT>(n + 1); if (!s) return nullptr; AutoCheckCannotGC nogc; PodCopy(s, nonInlineChars<CharT>(nogc), n); s[n] = '\0'; setNonInlineChars<CharT>(s); /* * Transform *this into an undepended string so 'base' will remain rooted * for the benefit of any other dependent string that depends on *this. */ if (IsSame<CharT, Latin1Char>::value) d.u1.flags = UNDEPENDED_FLAGS | LATIN1_CHARS_BIT; else d.u1.flags = UNDEPENDED_FLAGS; return &this->asFlat(); }
JSFlatString * JSDependentString::undepend(ExclusiveContext *cx) { JS_ASSERT(JSString::isDependent()); /* * We destroy the base() pointer in undepend, so we need a pre-barrier. We * don't need a post-barrier because there aren't any outgoing pointers * afterwards. */ JSString::writeBarrierPre(base()); size_t n = length(); size_t size = (n + 1) * sizeof(jschar); jschar *s = (jschar *) cx->malloc_(size); if (!s) return nullptr; PodCopy(s, nonInlineChars(), n); s[n] = 0; d.s.u2.nonInlineCharsTwoByte = s; /* * Transform *this into an undepended string so 'base' will remain rooted * for the benefit of any other dependent string that depends on *this. */ d.u1.flags = UNDEPENDED_FLAGS; return &this->asFlat(); }
NS_IMETHODIMP nsBinaryInputStream::ReadArrayBuffer(uint32_t aLength, JS::Handle<JS::Value> aBuffer, JSContext* aCx, uint32_t* aReadLength) { if (!aBuffer.isObject()) { return NS_ERROR_FAILURE; } JS::RootedObject buffer(aCx, &aBuffer.toObject()); if (!JS_IsArrayBufferObject(buffer)) { return NS_ERROR_FAILURE; } uint32_t bufferLength = JS_GetArrayBufferByteLength(buffer); if (bufferLength < aLength) { return NS_ERROR_FAILURE; } uint32_t bufSize = std::min<uint32_t>(aLength, 4096); UniquePtr<char[]> buf = MakeUnique<char[]>(bufSize); uint32_t pos = 0; *aReadLength = 0; do { // Read data into temporary buffer. uint32_t bytesRead; uint32_t amount = std::min(aLength - pos, bufSize); nsresult rv = Read(buf.get(), amount, &bytesRead); if (NS_WARN_IF(NS_FAILED(rv))) { return rv; } MOZ_ASSERT(bytesRead <= amount); if (bytesRead == 0) { break; } // Copy data into actual buffer. JS::AutoCheckCannotGC nogc; bool isShared; if (bufferLength != JS_GetArrayBufferByteLength(buffer)) { return NS_ERROR_FAILURE; } char* data = reinterpret_cast<char*>(JS_GetArrayBufferData(buffer, &isShared, nogc)); MOZ_ASSERT(!isShared); // Implied by JS_GetArrayBufferData() if (!data) { return NS_ERROR_FAILURE; } *aReadLength += bytesRead; PodCopy(data + pos, buf.get(), bytesRead); pos += bytesRead; } while (pos < aLength); return NS_OK; }
void CopyChars(jschar *dest, const JSLinearString &str) { AutoCheckCannotGC nogc; if (str.hasTwoByteChars()) PodCopy(dest, str.twoByteChars(nogc), str.length()); else CopyAndInflateChars(dest, str.latin1Chars(nogc), str.length()); }
bool MatchPairs::initArrayFrom(MatchPairs& copyFrom) { MOZ_ASSERT(copyFrom.pairCount() > 0); if (!allocOrExpandArray(copyFrom.pairCount())) return false; PodCopy(pairs_, copyFrom.pairs_, pairCount_); return true; }
JSString * js::ConcatStrings(ThreadSafeContext *cx, typename MaybeRooted<JSString*, allowGC>::HandleType left, typename MaybeRooted<JSString*, allowGC>::HandleType right) { JS_ASSERT_IF(!left->isAtom(), cx->isInsideCurrentZone(left)); JS_ASSERT_IF(!right->isAtom(), cx->isInsideCurrentZone(right)); size_t leftLen = left->length(); if (leftLen == 0) return right; size_t rightLen = right->length(); if (rightLen == 0) return left; size_t wholeLength = leftLen + rightLen; if (!JSString::validateLength(cx, wholeLength)) return nullptr; if (JSFatInlineString::twoByteLengthFits(wholeLength) && cx->isJSContext()) { JSFatInlineString *str = js_NewGCFatInlineString<allowGC>(cx); if (!str) return nullptr; ScopedThreadSafeStringInspector leftInspector(left); ScopedThreadSafeStringInspector rightInspector(right); if (!leftInspector.ensureChars(cx) || !rightInspector.ensureChars(cx)) return nullptr; jschar *buf = str->init(wholeLength); PodCopy(buf, leftInspector.chars(), leftLen); PodCopy(buf + leftLen, rightInspector.chars(), rightLen); buf[wholeLength] = 0; return str; } return JSRope::new_<allowGC>(cx, left, right, wholeLength); }
bool CacheableChars::clone(JSContext* cx, CacheableChars* out) const { uint32_t length = NullableStringLength(get()); UniqueChars chars(cx->pod_calloc<char>(length + 1)); if (!chars) return false; PodCopy(chars.get(), get(), length); *out = Move(chars); return true; }
bool JSDependentString::copyNonPureCharsZ(ThreadSafeContext *cx, ScopedJSFreePtr<jschar> &out) const { JS_ASSERT(JSString::isDependent()); size_t n = length(); jschar *s = cx->pod_malloc<jschar>(n + 1); if (!s) return false; PodCopy(s, nonInlineChars(), n); s[n] = 0; out.reset(s); return true; }
static bool DecodeFuncBody(JSContext* cx, Decoder& d, ModuleGenerator& mg, FunctionGenerator& fg, uint32_t funcIndex) { const uint8_t* bodyBegin = d.currentPosition(); FunctionDecoder f(cx, d, mg, fg, funcIndex); if (!DecodeExpr(f, f.ret())) return false; const uint8_t* bodyEnd = d.currentPosition(); uintptr_t bodyLength = bodyEnd - bodyBegin; if (!fg.bytecode().resize(bodyLength)) return false; PodCopy(fg.bytecode().begin(), bodyBegin, bodyLength); return true; }
bool JSRope::copyNonPureCharsInternal(ThreadSafeContext *cx, ScopedJSFreePtr<jschar> &out, bool nullTerminate) const { /* * Perform non-destructive post-order traversal of the rope, splatting * each node's characters into a contiguous buffer. */ size_t n = length(); if (cx) out.reset(cx->pod_malloc<jschar>(n + 1)); else out.reset(js_pod_malloc<jschar>(n + 1)); if (!out) return false; Vector<const JSString *, 8, SystemAllocPolicy> nodeStack; const JSString *str = this; jschar *pos = out; while (true) { if (str->isRope()) { if (!nodeStack.append(str->asRope().rightChild())) return false; str = str->asRope().leftChild(); } else { size_t len = str->length(); PodCopy(pos, str->asLinear().chars(), len); pos += len; if (nodeStack.empty()) break; str = nodeStack.popCopy(); } } JS_ASSERT(pos == out + n); if (nullTerminate) out[n] = 0; return true; }
JSFlatString* JSDependentString::undependInternal(ExclusiveContext* cx) { size_t n = length(); CharT* s = cx->pod_malloc<CharT>(n + 1); if (!s) return nullptr; AutoCheckCannotGC nogc; PodCopy(s, nonInlineChars<CharT>(nogc), n); s[n] = '\0'; setNonInlineChars<CharT>(s); /* * Transform *this into an undepended string so 'base' will remain rooted * for the benefit of any other dependent string that depends on *this. */ if (IsSame<CharT, Latin1Char>::value) d.u1.flags = UNDEPENDED_FLAGS | LATIN1_CHARS_BIT; else d.u1.flags = UNDEPENDED_FLAGS; return &this->asFlat(); }
static bool Snapshot(JSContext* cx, HandleObject pobj_, unsigned flags, AutoIdVector* props) { // We initialize |ht| lazily (in Enumerate()) because it ends up unused // anywhere from 67--99.9% of the time. Maybe<IdSet> ht; RootedObject pobj(cx, pobj_); do { if (JSNewEnumerateOp enumerate = pobj->getOps()->enumerate) { // This hook has the full control over what gets enumerated. AutoIdVector properties(cx); if (!enumerate(cx, pobj, properties)) return false; for (size_t n = 0; n < properties.length(); n++) { if (!Enumerate(cx, pobj, properties[n], true, flags, ht, props)) return false; } if (pobj->isNative()) { if (!EnumerateNativeProperties(cx, pobj.as<NativeObject>(), flags, ht, props)) return false; } } else if (pobj->isNative()) { // Give the object a chance to resolve all lazy properties if (JSEnumerateOp enumerate = pobj->getClass()->enumerate) { if (!enumerate(cx, pobj.as<NativeObject>())) return false; } if (!EnumerateNativeProperties(cx, pobj.as<NativeObject>(), flags, ht, props)) return false; } else if (pobj->is<ProxyObject>()) { AutoIdVector proxyProps(cx); if (flags & JSITER_HIDDEN || flags & JSITER_SYMBOLS) { // This gets all property keys, both strings and // symbols. The call to Enumerate in the loop below // will filter out unwanted keys, per the flags. if (!Proxy::ownPropertyKeys(cx, pobj, proxyProps)) return false; Rooted<PropertyDescriptor> desc(cx); for (size_t n = 0, len = proxyProps.length(); n < len; n++) { bool enumerable = false; // We need to filter, if the caller just wants enumerable // symbols. if (!(flags & JSITER_HIDDEN)) { if (!Proxy::getOwnPropertyDescriptor(cx, pobj, proxyProps[n], &desc)) return false; enumerable = desc.enumerable(); } if (!Enumerate(cx, pobj, proxyProps[n], enumerable, flags, ht, props)) return false; } } else { // Returns enumerable property names (no symbols). if (!Proxy::getOwnEnumerablePropertyKeys(cx, pobj, proxyProps)) return false; for (size_t n = 0, len = proxyProps.length(); n < len; n++) { if (!Enumerate(cx, pobj, proxyProps[n], true, flags, ht, props)) return false; } } } else { MOZ_CRASH("non-native objects must have an enumerate op"); } if (flags & JSITER_OWNONLY) break; if (!GetPrototype(cx, pobj, &pobj)) return false; } while (pobj != nullptr); #ifdef JS_MORE_DETERMINISTIC /* * In some cases the enumeration order for an object depends on the * execution mode (interpreter vs. JIT), especially for native objects * with a class enumerate hook (where resolving a property changes the * resulting enumeration order). These aren't really bugs, but the * differences can change the generated output and confuse correctness * fuzzers, so we sort the ids if such a fuzzer is running. * * We don't do this in the general case because (a) doing so is slow, * and (b) it also breaks the web, which expects enumeration order to * follow the order in which properties are added, in certain cases. * Since ECMA does not specify an enumeration order for objects, both * behaviors are technically correct to do. */ jsid* ids = props->begin(); size_t n = props->length(); AutoIdVector tmp(cx); if (!tmp.resize(n)) return false; PodCopy(tmp.begin(), ids, n); if (!MergeSort(ids, n, tmp.begin(), SortComparatorIds(cx))) return false; #endif /* JS_MORE_DETERMINISTIC */ return true; }
void CopyChars(Latin1Char *dest, const JSLinearString &str) { AutoCheckCannotGC nogc; PodCopy(dest, str.latin1Chars(nogc), str.length()); }
JSString* js::ConcatStrings(ExclusiveContext* cx, typename MaybeRooted<JSString*, allowGC>::HandleType left, typename MaybeRooted<JSString*, allowGC>::HandleType right) { MOZ_ASSERT_IF(!left->isAtom(), cx->isInsideCurrentZone(left)); MOZ_ASSERT_IF(!right->isAtom(), cx->isInsideCurrentZone(right)); size_t leftLen = left->length(); if (leftLen == 0) return right; size_t rightLen = right->length(); if (rightLen == 0) return left; size_t wholeLength = leftLen + rightLen; if (!JSString::validateLength(cx, wholeLength)) return nullptr; bool isLatin1 = left->hasLatin1Chars() && right->hasLatin1Chars(); bool canUseInline = isLatin1 ? JSInlineString::lengthFits<Latin1Char>(wholeLength) : JSInlineString::lengthFits<char16_t>(wholeLength); if (canUseInline && cx->isJSContext()) { Latin1Char* latin1Buf = nullptr; // initialize to silence GCC warning char16_t* twoByteBuf = nullptr; // initialize to silence GCC warning JSInlineString* str = isLatin1 ? AllocateInlineString<allowGC>(cx, wholeLength, &latin1Buf) : AllocateInlineString<allowGC>(cx, wholeLength, &twoByteBuf); if (!str) return nullptr; AutoCheckCannotGC nogc; JSLinearString* leftLinear = left->ensureLinear(cx); if (!leftLinear) return nullptr; JSLinearString* rightLinear = right->ensureLinear(cx); if (!rightLinear) return nullptr; if (isLatin1) { PodCopy(latin1Buf, leftLinear->latin1Chars(nogc), leftLen); PodCopy(latin1Buf + leftLen, rightLinear->latin1Chars(nogc), rightLen); latin1Buf[wholeLength] = 0; } else { if (leftLinear->hasTwoByteChars()) PodCopy(twoByteBuf, leftLinear->twoByteChars(nogc), leftLen); else CopyAndInflateChars(twoByteBuf, leftLinear->latin1Chars(nogc), leftLen); if (rightLinear->hasTwoByteChars()) PodCopy(twoByteBuf + leftLen, rightLinear->twoByteChars(nogc), rightLen); else CopyAndInflateChars(twoByteBuf + leftLen, rightLinear->latin1Chars(nogc), rightLen); twoByteBuf[wholeLength] = 0; } return str; } return JSRope::new_<allowGC>(cx, left, right, wholeLength); }
static bool EnumerateNativeProperties(JSContext* cx, HandleNativeObject pobj, unsigned flags, Maybe<IdSet>& ht, AutoIdVector* props, Handle<UnboxedPlainObject*> unboxed = nullptr) { bool enumerateSymbols; if (flags & JSITER_SYMBOLSONLY) { enumerateSymbols = true; } else { /* Collect any dense elements from this object. */ size_t firstElemIndex = props->length(); size_t initlen = pobj->getDenseInitializedLength(); const Value* vp = pobj->getDenseElements(); bool hasHoles = false; for (size_t i = 0; i < initlen; ++i, ++vp) { if (vp->isMagic(JS_ELEMENTS_HOLE)) { hasHoles = true; } else { /* Dense arrays never get so large that i would not fit into an integer id. */ if (!Enumerate(cx, pobj, INT_TO_JSID(i), /* enumerable = */ true, flags, ht, props)) return false; } } /* Collect any typed array or shared typed array elements from this object. */ if (pobj->is<TypedArrayObject>()) { size_t len = pobj->as<TypedArrayObject>().length(); for (size_t i = 0; i < len; i++) { if (!Enumerate(cx, pobj, INT_TO_JSID(i), /* enumerable = */ true, flags, ht, props)) return false; } } // Collect any sparse elements from this object. bool isIndexed = pobj->isIndexed(); if (isIndexed) { // If the dense elements didn't have holes, we don't need to include // them in the sort. if (!hasHoles) firstElemIndex = props->length(); for (Shape::Range<NoGC> r(pobj->lastProperty()); !r.empty(); r.popFront()) { Shape& shape = r.front(); jsid id = shape.propid(); uint32_t dummy; if (IdIsIndex(id, &dummy)) { if (!Enumerate(cx, pobj, id, shape.enumerable(), flags, ht, props)) return false; } } MOZ_ASSERT(firstElemIndex <= props->length()); jsid* ids = props->begin() + firstElemIndex; size_t n = props->length() - firstElemIndex; AutoIdVector tmp(cx); if (!tmp.resize(n)) return false; PodCopy(tmp.begin(), ids, n); if (!MergeSort(ids, n, tmp.begin(), SortComparatorIntegerIds)) return false; } if (unboxed) { // If |unboxed| is set then |pobj| is the expando for an unboxed // plain object we are enumerating. Add the unboxed properties // themselves here since they are all property names that were // given to the object before any of the expando's properties. MOZ_ASSERT(pobj->is<UnboxedExpandoObject>()); if (!EnumerateExtraProperties(cx, unboxed, flags, ht, props)) return false; } size_t initialLength = props->length(); /* Collect all unique property names from this object's shape. */ bool symbolsFound = false; Shape::Range<NoGC> r(pobj->lastProperty()); for (; !r.empty(); r.popFront()) { Shape& shape = r.front(); jsid id = shape.propid(); if (JSID_IS_SYMBOL(id)) { symbolsFound = true; continue; } uint32_t dummy; if (isIndexed && IdIsIndex(id, &dummy)) continue; if (!Enumerate(cx, pobj, id, shape.enumerable(), flags, ht, props)) return false; } ::Reverse(props->begin() + initialLength, props->end()); enumerateSymbols = symbolsFound && (flags & JSITER_SYMBOLS); } if (enumerateSymbols) { // Do a second pass to collect symbols. ES6 draft rev 25 (2014 May 22) // 9.1.12 requires that all symbols appear after all strings in the // result. size_t initialLength = props->length(); for (Shape::Range<NoGC> r(pobj->lastProperty()); !r.empty(); r.popFront()) { Shape& shape = r.front(); jsid id = shape.propid(); if (JSID_IS_SYMBOL(id)) { if (!Enumerate(cx, pobj, id, shape.enumerable(), flags, ht, props)) return false; } } ::Reverse(props->begin() + initialLength, props->end()); } return true; }
static bool Snapshot(JSContext* cx, HandleObject pobj_, unsigned flags, AutoIdVector* props) { // We initialize |ht| lazily (in Enumerate()) because it ends up unused // anywhere from 67--99.9% of the time. Maybe<IdSet> ht; RootedObject pobj(cx, pobj_); // Don't check for duplicates if we're only interested in own properties. // This does the right thing for most objects: native objects don't have // duplicate property ids and we allow the [[OwnPropertyKeys]] proxy trap to // return duplicates. // // The only special case is when the object has a newEnumerate hook: it // can return duplicate properties and we have to filter them. This is // handled below. bool checkForDuplicates = !(flags & JSITER_OWNONLY); do { if (pobj->getClass()->getNewEnumerate()) { if (pobj->is<UnboxedPlainObject>() && pobj->as<UnboxedPlainObject>().maybeExpando()) { // Special case unboxed objects with an expando object. RootedNativeObject expando(cx, pobj->as<UnboxedPlainObject>().maybeExpando()); if (!EnumerateNativeProperties(cx, expando, flags, ht, props, checkForDuplicates, pobj.as<UnboxedPlainObject>())) { return false; } } else { // The newEnumerate hook may return duplicates. Whitelist the // unboxed object hooks because we know they are well-behaved. if (!pobj->is<UnboxedPlainObject>()) checkForDuplicates = true; if (checkForDuplicates) { if (!EnumerateExtraProperties<true>(cx, pobj, flags, ht, props)) return false; } else { if (!EnumerateExtraProperties<false>(cx, pobj, flags, ht, props)) return false; } if (pobj->isNative()) { if (!EnumerateNativeProperties(cx, pobj.as<NativeObject>(), flags, ht, props, checkForDuplicates)) { return false; } } } } else if (pobj->isNative()) { // Give the object a chance to resolve all lazy properties if (JSEnumerateOp enumerate = pobj->getClass()->getEnumerate()) { if (!enumerate(cx, pobj.as<NativeObject>())) return false; } if (!EnumerateNativeProperties(cx, pobj.as<NativeObject>(), flags, ht, props, checkForDuplicates)) { return false; } } else if (pobj->is<ProxyObject>()) { if (checkForDuplicates) { if (!EnumerateProxyProperties<true>(cx, pobj, flags, ht, props)) return false; } else { if (!EnumerateProxyProperties<false>(cx, pobj, flags, ht, props)) return false; } } else { MOZ_CRASH("non-native objects must have an enumerate op"); } if (flags & JSITER_OWNONLY) break; if (!GetPrototype(cx, pobj, &pobj)) return false; } while (pobj != nullptr); #ifdef JS_MORE_DETERMINISTIC /* * In some cases the enumeration order for an object depends on the * execution mode (interpreter vs. JIT), especially for native objects * with a class enumerate hook (where resolving a property changes the * resulting enumeration order). These aren't really bugs, but the * differences can change the generated output and confuse correctness * fuzzers, so we sort the ids if such a fuzzer is running. * * We don't do this in the general case because (a) doing so is slow, * and (b) it also breaks the web, which expects enumeration order to * follow the order in which properties are added, in certain cases. * Since ECMA does not specify an enumeration order for objects, both * behaviors are technically correct to do. */ jsid* ids = props->begin(); size_t n = props->length(); AutoIdVector tmp(cx); if (!tmp.resize(n)) return false; PodCopy(tmp.begin(), ids, n); if (!MergeSort(ids, n, tmp.begin(), SortComparatorIds(cx))) return false; #endif /* JS_MORE_DETERMINISTIC */ return true; }
JSFlatString * JSRope::flattenInternal(ExclusiveContext *maybecx) { /* * Perform a depth-first dag traversal, splatting each node's characters * into a contiguous buffer. Visit each rope node three times: * 1. record position in the buffer and recurse into left child; * 2. recurse into the right child; * 3. transform the node into a dependent string. * To avoid maintaining a stack, tree nodes are mutated to indicate how many * times they have been visited. Since ropes can be dags, a node may be * encountered multiple times during traversal. However, step 3 above leaves * a valid dependent string, so everything works out. * * While ropes avoid all sorts of quadratic cases with string * concatenation, they can't help when ropes are immediately flattened. * One idiomatic case that we'd like to keep linear (and has traditionally * been linear in SM and other JS engines) is: * * while (...) { * s += ... * s.flatten * } * * To do this, when the buffer for a to-be-flattened rope is allocated, the * allocation size is rounded up. Then, if the resulting flat string is the * left-hand side of a new rope that gets flattened and there is enough * capacity, the rope is flattened into the same buffer, thereby avoiding * copying the left-hand side. Clearing the 'extensible' bit turns off this * optimization. This is necessary, e.g., when the JSAPI hands out the raw * null-terminated char array of a flat string. * * N.B. This optimization can create chains of dependent strings. */ const size_t wholeLength = length(); size_t wholeCapacity; jschar *wholeChars; JSString *str = this; jschar *pos; /* * JSString::flattenData is a tagged pointer to the parent node. * The tag indicates what to do when we return to the parent. */ static const uintptr_t Tag_Mask = 0x3; static const uintptr_t Tag_FinishNode = 0x0; static const uintptr_t Tag_VisitRightChild = 0x1; /* Find the left most string, containing the first string. */ JSRope *leftMostRope = this; while (leftMostRope->leftChild()->isRope()) leftMostRope = &leftMostRope->leftChild()->asRope(); if (leftMostRope->leftChild()->isExtensible()) { JSExtensibleString &left = leftMostRope->leftChild()->asExtensible(); size_t capacity = left.capacity(); if (capacity >= wholeLength) { /* * Simulate a left-most traversal from the root to leftMost->leftChild() * via first_visit_node */ JS_ASSERT(str->isRope()); while (str != leftMostRope) { if (b == WithIncrementalBarrier) { JSString::writeBarrierPre(str->d.s.u2.left); JSString::writeBarrierPre(str->d.s.u3.right); } JSString *child = str->d.s.u2.left; JS_ASSERT(child->isRope()); str->d.s.u2.nonInlineCharsTwoByte = left.nonInlineChars(); child->d.u1.flattenData = uintptr_t(str) | Tag_VisitRightChild; str = child; } if (b == WithIncrementalBarrier) { JSString::writeBarrierPre(str->d.s.u2.left); JSString::writeBarrierPre(str->d.s.u3.right); } str->d.s.u2.nonInlineCharsTwoByte = left.nonInlineChars(); wholeCapacity = capacity; wholeChars = const_cast<jschar *>(left.nonInlineChars()); pos = wholeChars + left.d.u1.length; JS_STATIC_ASSERT(!(EXTENSIBLE_FLAGS & DEPENDENT_FLAGS)); left.d.u1.flags ^= (EXTENSIBLE_FLAGS | DEPENDENT_FLAGS); left.d.s.u3.base = (JSLinearString *)this; /* will be true on exit */ StringWriteBarrierPostRemove(maybecx, &left.d.s.u2.left); StringWriteBarrierPost(maybecx, (JSString **)&left.d.s.u3.base); goto visit_right_child; } } if (!AllocChars(maybecx, wholeLength, &wholeChars, &wholeCapacity)) return nullptr; pos = wholeChars; first_visit_node: { if (b == WithIncrementalBarrier) { JSString::writeBarrierPre(str->d.s.u2.left); JSString::writeBarrierPre(str->d.s.u3.right); } JSString &left = *str->d.s.u2.left; str->d.s.u2.nonInlineCharsTwoByte = pos; StringWriteBarrierPostRemove(maybecx, &str->d.s.u2.left); if (left.isRope()) { /* Return to this node when 'left' done, then goto visit_right_child. */ left.d.u1.flattenData = uintptr_t(str) | Tag_VisitRightChild; str = &left; goto first_visit_node; } size_t len = left.length(); PodCopy(pos, left.asLinear().chars(), len); pos += len; } visit_right_child: { JSString &right = *str->d.s.u3.right; if (right.isRope()) { /* Return to this node when 'right' done, then goto finish_node. */ right.d.u1.flattenData = uintptr_t(str) | Tag_FinishNode; str = &right; goto first_visit_node; } size_t len = right.length(); PodCopy(pos, right.asLinear().chars(), len); pos += len; } finish_node: { if (str == this) { JS_ASSERT(pos == wholeChars + wholeLength); *pos = '\0'; str->d.u1.length = wholeLength; str->d.u1.flags = EXTENSIBLE_FLAGS; str->d.s.u2.nonInlineCharsTwoByte = wholeChars; str->d.s.u3.capacity = wholeCapacity; StringWriteBarrierPostRemove(maybecx, &str->d.s.u2.left); StringWriteBarrierPostRemove(maybecx, &str->d.s.u3.right); return &this->asFlat(); } uintptr_t flattenData = str->d.u1.flattenData; str->d.u1.flags = DEPENDENT_FLAGS; str->d.u1.length = pos - str->d.s.u2.nonInlineCharsTwoByte; str->d.s.u3.base = (JSLinearString *)this; /* will be true on exit */ StringWriteBarrierPost(maybecx, (JSString **)&str->d.s.u3.base); str = (JSString *)(flattenData & ~Tag_Mask); if ((flattenData & Tag_Mask) == Tag_VisitRightChild) goto visit_right_child; JS_ASSERT((flattenData & Tag_Mask) == Tag_FinishNode); goto finish_node; } }