js::ForgetSourceHook(JSRuntime* rt) { return Move(rt->sourceHook); }
int main() { size_t m; Vector<int> v1; v1.append(2); v1.append(4); v1.append(6); v1.append(8); MOZ_RELEASE_ASSERT(!BinarySearch(v1, 0, v1.length(), 1, &m) && m == 0); MOZ_RELEASE_ASSERT( BinarySearch(v1, 0, v1.length(), 2, &m) && m == 0); MOZ_RELEASE_ASSERT(!BinarySearch(v1, 0, v1.length(), 3, &m) && m == 1); MOZ_RELEASE_ASSERT( BinarySearch(v1, 0, v1.length(), 4, &m) && m == 1); MOZ_RELEASE_ASSERT(!BinarySearch(v1, 0, v1.length(), 5, &m) && m == 2); MOZ_RELEASE_ASSERT( BinarySearch(v1, 0, v1.length(), 6, &m) && m == 2); MOZ_RELEASE_ASSERT(!BinarySearch(v1, 0, v1.length(), 7, &m) && m == 3); MOZ_RELEASE_ASSERT( BinarySearch(v1, 0, v1.length(), 8, &m) && m == 3); MOZ_RELEASE_ASSERT(!BinarySearch(v1, 0, v1.length(), 9, &m) && m == 4); MOZ_RELEASE_ASSERT(!BinarySearch(v1, 1, 3, 1, &m) && m == 1); MOZ_RELEASE_ASSERT(!BinarySearch(v1, 1, 3, 2, &m) && m == 1); MOZ_RELEASE_ASSERT(!BinarySearch(v1, 1, 3, 3, &m) && m == 1); MOZ_RELEASE_ASSERT( BinarySearch(v1, 1, 3, 4, &m) && m == 1); MOZ_RELEASE_ASSERT(!BinarySearch(v1, 1, 3, 5, &m) && m == 2); MOZ_RELEASE_ASSERT( BinarySearch(v1, 1, 3, 6, &m) && m == 2); MOZ_RELEASE_ASSERT(!BinarySearch(v1, 1, 3, 7, &m) && m == 3); MOZ_RELEASE_ASSERT(!BinarySearch(v1, 1, 3, 8, &m) && m == 3); MOZ_RELEASE_ASSERT(!BinarySearch(v1, 1, 3, 9, &m) && m == 3); MOZ_RELEASE_ASSERT(!BinarySearch(v1, 0, 0, 0, &m) && m == 0); MOZ_RELEASE_ASSERT(!BinarySearch(v1, 0, 0, 9, &m) && m == 0); Vector<int> v2; MOZ_RELEASE_ASSERT(!BinarySearch(v2, 0, 0, 0, &m) && m == 0); MOZ_RELEASE_ASSERT(!BinarySearch(v2, 0, 0, 9, &m) && m == 0); Vector<Person> v3; v3.append(Person(2, 42)); v3.append(Person(4, 13)); v3.append(Person(6, 360)); MOZ_RELEASE_ASSERT(!BinarySearch(GetAge(v3), 0, v3.length(), 1, &m) && m == 0); MOZ_RELEASE_ASSERT( BinarySearch(GetAge(v3), 0, v3.length(), 2, &m) && m == 0); MOZ_RELEASE_ASSERT(!BinarySearch(GetAge(v3), 0, v3.length(), 3, &m) && m == 1); MOZ_RELEASE_ASSERT( BinarySearch(GetAge(v3), 0, v3.length(), 4, &m) && m == 1); MOZ_RELEASE_ASSERT(!BinarySearch(GetAge(v3), 0, v3.length(), 5, &m) && m == 2); MOZ_RELEASE_ASSERT( BinarySearch(GetAge(v3), 0, v3.length(), 6, &m) && m == 2); MOZ_RELEASE_ASSERT(!BinarySearch(GetAge(v3), 0, v3.length(), 7, &m) && m == 3); }
static AstLoadStoreAddress AstDecodeLoadStoreAddress(const LinearMemoryAddress<AstDecodeStackItem>& addr) { uint32_t flags = FloorLog2(addr.align); return AstLoadStoreAddress(addr.base.expr, flags, addr.offset); }
JSRuntime::JSRuntime(JSRuntime *parentRuntime) : JS::shadow::Runtime( #ifdef JSGC_GENERATIONAL &gc.storeBuffer #endif ), mainThread(this), parentRuntime(parentRuntime), interrupt(false), interruptPar(false), handlingSignal(false), interruptCallback(nullptr), interruptLock(nullptr), interruptLockOwner(nullptr), exclusiveAccessLock(nullptr), exclusiveAccessOwner(nullptr), mainThreadHasExclusiveAccess(false), numExclusiveThreads(0), numCompartments(0), localeCallbacks(nullptr), defaultLocale(nullptr), defaultVersion_(JSVERSION_DEFAULT), ownerThread_(nullptr), tempLifoAlloc(TEMP_LIFO_ALLOC_PRIMARY_CHUNK_SIZE), freeLifoAlloc(TEMP_LIFO_ALLOC_PRIMARY_CHUNK_SIZE), execAlloc_(nullptr), jitRuntime_(nullptr), selfHostingGlobal_(nullptr), nativeStackBase(0), cxCallback(nullptr), destroyCompartmentCallback(nullptr), destroyZoneCallback(nullptr), sweepZoneCallback(nullptr), compartmentNameCallback(nullptr), activityCallback(nullptr), activityCallbackArg(nullptr), requestDepth(0), #ifdef DEBUG checkRequestDepth(0), activeContext(nullptr), #endif gc(thisFromCtor()), gcInitialized(false), #if defined(JS_ARM_SIMULATOR) || defined(JS_MIPS_SIMULATOR) simulatorRuntime_(nullptr), #endif scriptAndCountsVector(nullptr), NaNValue(DoubleNaNValue()), negativeInfinityValue(DoubleValue(NegativeInfinity<double>())), positiveInfinityValue(DoubleValue(PositiveInfinity<double>())), emptyString(nullptr), #ifdef NIGHTLY_BUILD assertOnScriptEntryHook_(nullptr), #endif debugMode(false), spsProfiler(thisFromCtor()), profilingScripts(false), hadOutOfMemory(false), haveCreatedContext(false), data(nullptr), signalHandlersInstalled_(false), canUseSignalHandlers_(false), defaultFreeOp_(thisFromCtor()), debuggerMutations(0), securityCallbacks(const_cast<JSSecurityCallbacks *>(&NullSecurityCallbacks)), DOMcallbacks(nullptr), destroyPrincipals(nullptr), structuredCloneCallbacks(nullptr), telemetryCallback(nullptr), propertyRemovals(0), #if !EXPOSE_INTL_API thousandsSeparator(0), decimalSeparator(0), numGrouping(0), #endif mathCache_(nullptr), activeCompilations_(0), keepAtoms_(0), trustedPrincipals_(nullptr), beingDestroyed_(false), atoms_(nullptr), atomsCompartment_(nullptr), staticStrings(nullptr), commonNames(nullptr), permanentAtoms(nullptr), wellKnownSymbols(nullptr), wrapObjectCallbacks(&DefaultWrapObjectCallbacks), preserveWrapperCallback(nullptr), jitSupportsFloatingPoint(false), ionPcScriptCache(nullptr), threadPool(this), defaultJSContextCallback(nullptr), ctypesActivityCallback(nullptr), forkJoinWarmup(0), offthreadIonCompilationEnabled_(true), parallelParsingEnabled_(true), #ifdef DEBUG enteredPolicy(nullptr), #endif largeAllocationFailureCallback(nullptr), oomCallback(nullptr) { liveRuntimesCount++; /* Initialize infallibly first, so we can goto bad and JS_DestroyRuntime. */ JS_INIT_CLIST(&onNewGlobalObjectWatchers); PodArrayZero(nativeStackQuota); PodZero(&asmJSCacheOps); }
bool ModuleGenerator::finishCodegen(StaticLinkData* link) { uint32_t offsetInWhole = masm_.size(); // Generate stubs in a separate MacroAssembler since, otherwise, for modules // larger than the JumpImmediateRange, even local uses of Label will fail // due to the large absolute offsets temporarily stored by Label::bind(). Vector<Offsets> entries(cx_); Vector<ProfilingOffsets> interpExits(cx_); Vector<ProfilingOffsets> jitExits(cx_); EnumeratedArray<JumpTarget, JumpTarget::Limit, Offsets> jumpTargets; ProfilingOffsets badIndirectCallExit; Offsets interruptExit; { TempAllocator alloc(&lifo_); MacroAssembler masm(MacroAssembler::AsmJSToken(), alloc); if (!entries.resize(numExports())) return false; for (uint32_t i = 0; i < numExports(); i++) { uint32_t target = exportMap_->exportFuncIndices[i]; const Sig& sig = module_->exports[i].sig(); entries[i] = GenerateEntry(masm, target, sig, usesHeap()); } if (!interpExits.resize(numImports())) return false; if (!jitExits.resize(numImports())) return false; for (uint32_t i = 0; i < numImports(); i++) { interpExits[i] = GenerateInterpExit(masm, module_->imports[i], i); jitExits[i] = GenerateJitExit(masm, module_->imports[i], usesHeap()); } for (JumpTarget target : MakeEnumeratedRange(JumpTarget::Limit)) jumpTargets[target] = GenerateJumpTarget(masm, target); badIndirectCallExit = GenerateBadIndirectCallExit(masm); interruptExit = GenerateInterruptStub(masm); if (masm.oom() || !masm_.asmMergeWith(masm)) return false; } // Adjust each of the resulting Offsets (to account for being merged into // masm_) and then create code ranges for all the stubs. for (uint32_t i = 0; i < numExports(); i++) { entries[i].offsetBy(offsetInWhole); module_->exports[i].initStubOffset(entries[i].begin); if (!module_->codeRanges.emplaceBack(CodeRange::Entry, entries[i])) return false; } for (uint32_t i = 0; i < numImports(); i++) { interpExits[i].offsetBy(offsetInWhole); module_->imports[i].initInterpExitOffset(interpExits[i].begin); if (!module_->codeRanges.emplaceBack(CodeRange::ImportInterpExit, interpExits[i])) return false; jitExits[i].offsetBy(offsetInWhole); module_->imports[i].initJitExitOffset(jitExits[i].begin); if (!module_->codeRanges.emplaceBack(CodeRange::ImportJitExit, jitExits[i])) return false; } for (JumpTarget target : MakeEnumeratedRange(JumpTarget::Limit)) { jumpTargets[target].offsetBy(offsetInWhole); if (!module_->codeRanges.emplaceBack(CodeRange::Inline, jumpTargets[target])) return false; } badIndirectCallExit.offsetBy(offsetInWhole); if (!module_->codeRanges.emplaceBack(CodeRange::ErrorExit, badIndirectCallExit)) return false; interruptExit.offsetBy(offsetInWhole); if (!module_->codeRanges.emplaceBack(CodeRange::Inline, interruptExit)) return false; // Fill in StaticLinkData with the offsets of these stubs. link->pod.outOfBoundsOffset = jumpTargets[JumpTarget::OutOfBounds].begin; link->pod.interruptOffset = interruptExit.begin; for (uint32_t sigIndex = 0; sigIndex < numSigs_; sigIndex++) { const TableModuleGeneratorData& table = shared_->sigToTable[sigIndex]; if (table.elemFuncIndices.empty()) continue; Uint32Vector elemOffsets; if (!elemOffsets.resize(table.elemFuncIndices.length())) return false; for (size_t i = 0; i < table.elemFuncIndices.length(); i++) { uint32_t funcIndex = table.elemFuncIndices[i]; if (funcIndex == BadIndirectCall) elemOffsets[i] = badIndirectCallExit.begin; else elemOffsets[i] = funcEntry(funcIndex); } if (!link->funcPtrTables.emplaceBack(table.globalDataOffset, Move(elemOffsets))) return false; } // Only call convertOutOfRangeBranchesToThunks after all other codegen that may // emit new jumps to JumpTargets has finished. if (!convertOutOfRangeBranchesToThunks()) return false; // Now that all thunks have been generated, patch all the thunks. for (CallThunk& callThunk : module_->callThunks) { uint32_t funcIndex = callThunk.u.funcIndex; callThunk.u.codeRangeIndex = funcIndexToCodeRange_[funcIndex]; masm_.patchThunk(callThunk.offset, funcEntry(funcIndex)); } for (JumpTarget target : MakeEnumeratedRange(JumpTarget::Limit)) { for (uint32_t thunkOffset : jumpThunks_[target]) masm_.patchThunk(thunkOffset, jumpTargets[target].begin); } // Code-generation is complete! masm_.finish(); return !masm_.oom(); }
JSFlatString * JSRope::flattenInternal(ExclusiveContext *maybecx) { /* * Perform a depth-first dag traversal, splatting each node's characters * into a contiguous buffer. Visit each rope node three times: * 1. record position in the buffer and recurse into left child; * 2. recurse into the right child; * 3. transform the node into a dependent string. * To avoid maintaining a stack, tree nodes are mutated to indicate how many * times they have been visited. Since ropes can be dags, a node may be * encountered multiple times during traversal. However, step 3 above leaves * a valid dependent string, so everything works out. * * While ropes avoid all sorts of quadratic cases with string * concatenation, they can't help when ropes are immediately flattened. * One idiomatic case that we'd like to keep linear (and has traditionally * been linear in SM and other JS engines) is: * * while (...) { * s += ... * s.flatten * } * * To do this, when the buffer for a to-be-flattened rope is allocated, the * allocation size is rounded up. Then, if the resulting flat string is the * left-hand side of a new rope that gets flattened and there is enough * capacity, the rope is flattened into the same buffer, thereby avoiding * copying the left-hand side. Clearing the 'extensible' bit turns off this * optimization. This is necessary, e.g., when the JSAPI hands out the raw * null-terminated char array of a flat string. * * N.B. This optimization can create chains of dependent strings. */ const size_t wholeLength = length(); size_t wholeCapacity; jschar *wholeChars; JSString *str = this; jschar *pos; /* * JSString::flattenData is a tagged pointer to the parent node. * The tag indicates what to do when we return to the parent. */ static const uintptr_t Tag_Mask = 0x3; static const uintptr_t Tag_FinishNode = 0x0; static const uintptr_t Tag_VisitRightChild = 0x1; /* Find the left most string, containing the first string. */ JSRope *leftMostRope = this; while (leftMostRope->leftChild()->isRope()) leftMostRope = &leftMostRope->leftChild()->asRope(); if (leftMostRope->leftChild()->isExtensible()) { JSExtensibleString &left = leftMostRope->leftChild()->asExtensible(); size_t capacity = left.capacity(); if (capacity >= wholeLength) { /* * Simulate a left-most traversal from the root to leftMost->leftChild() * via first_visit_node */ JS_ASSERT(str->isRope()); while (str != leftMostRope) { if (b == WithIncrementalBarrier) { JSString::writeBarrierPre(str->d.s.u2.left); JSString::writeBarrierPre(str->d.s.u3.right); } JSString *child = str->d.s.u2.left; JS_ASSERT(child->isRope()); str->d.s.u2.nonInlineCharsTwoByte = left.nonInlineChars(); child->d.u1.flattenData = uintptr_t(str) | Tag_VisitRightChild; str = child; } if (b == WithIncrementalBarrier) { JSString::writeBarrierPre(str->d.s.u2.left); JSString::writeBarrierPre(str->d.s.u3.right); } str->d.s.u2.nonInlineCharsTwoByte = left.nonInlineChars(); wholeCapacity = capacity; wholeChars = const_cast<jschar *>(left.nonInlineChars()); pos = wholeChars + left.d.u1.length; JS_STATIC_ASSERT(!(EXTENSIBLE_FLAGS & DEPENDENT_FLAGS)); left.d.u1.flags ^= (EXTENSIBLE_FLAGS | DEPENDENT_FLAGS); left.d.s.u3.base = (JSLinearString *)this; /* will be true on exit */ StringWriteBarrierPostRemove(maybecx, &left.d.s.u2.left); StringWriteBarrierPost(maybecx, (JSString **)&left.d.s.u3.base); goto visit_right_child; } } if (!AllocChars(maybecx, wholeLength, &wholeChars, &wholeCapacity)) return nullptr; pos = wholeChars; first_visit_node: { if (b == WithIncrementalBarrier) { JSString::writeBarrierPre(str->d.s.u2.left); JSString::writeBarrierPre(str->d.s.u3.right); } JSString &left = *str->d.s.u2.left; str->d.s.u2.nonInlineCharsTwoByte = pos; StringWriteBarrierPostRemove(maybecx, &str->d.s.u2.left); if (left.isRope()) { /* Return to this node when 'left' done, then goto visit_right_child. */ left.d.u1.flattenData = uintptr_t(str) | Tag_VisitRightChild; str = &left; goto first_visit_node; } size_t len = left.length(); PodCopy(pos, left.asLinear().chars(), len); pos += len; } visit_right_child: { JSString &right = *str->d.s.u3.right; if (right.isRope()) { /* Return to this node when 'right' done, then goto finish_node. */ right.d.u1.flattenData = uintptr_t(str) | Tag_FinishNode; str = &right; goto first_visit_node; } size_t len = right.length(); PodCopy(pos, right.asLinear().chars(), len); pos += len; } finish_node: { if (str == this) { JS_ASSERT(pos == wholeChars + wholeLength); *pos = '\0'; str->d.u1.length = wholeLength; str->d.u1.flags = EXTENSIBLE_FLAGS; str->d.s.u2.nonInlineCharsTwoByte = wholeChars; str->d.s.u3.capacity = wholeCapacity; StringWriteBarrierPostRemove(maybecx, &str->d.s.u2.left); StringWriteBarrierPostRemove(maybecx, &str->d.s.u3.right); return &this->asFlat(); } uintptr_t flattenData = str->d.u1.flattenData; str->d.u1.flags = DEPENDENT_FLAGS; str->d.u1.length = pos - str->d.s.u2.nonInlineCharsTwoByte; str->d.s.u3.base = (JSLinearString *)this; /* will be true on exit */ StringWriteBarrierPost(maybecx, (JSString **)&str->d.s.u3.base); str = (JSString *)(flattenData & ~Tag_Mask); if ((flattenData & Tag_Mask) == Tag_VisitRightChild) goto visit_right_child; JS_ASSERT((flattenData & Tag_Mask) == Tag_FinishNode); goto finish_node; } }
// static nsresult nsChannelClassifier::SetBlockedTrackingContent(nsIChannel *channel) { // Can be called in EITHER the parent or child process. nsCOMPtr<nsIParentChannel> parentChannel; NS_QueryNotificationCallbacks(channel, parentChannel); if (parentChannel) { // This channel is a parent-process proxy for a child process request. The // actual channel will be notified via the status passed to // nsIRequest::Cancel and do this for us. return NS_OK; } nsresult rv; nsCOMPtr<nsIDOMWindow> win; nsCOMPtr<mozIThirdPartyUtil> thirdPartyUtil = do_GetService(THIRDPARTYUTIL_CONTRACTID, &rv); NS_ENSURE_SUCCESS(rv, NS_OK); rv = thirdPartyUtil->GetTopWindowForChannel(channel, getter_AddRefs(win)); NS_ENSURE_SUCCESS(rv, NS_OK); nsCOMPtr<nsPIDOMWindow> pwin = do_QueryInterface(win, &rv); NS_ENSURE_SUCCESS(rv, NS_OK); nsCOMPtr<nsIDocShell> docShell = pwin->GetDocShell(); if (!docShell) { return NS_OK; } nsCOMPtr<nsIDocument> doc = do_GetInterface(docShell, &rv); NS_ENSURE_SUCCESS(rv, NS_OK); // This event might come after the user has navigated to another page. // To prevent showing the TrackingProtection UI on the wrong page, we need to // check that the loading URI for the channel is the same as the URI currently // loaded in the document. if (!SameLoadingURI(doc, channel)) { return NS_OK; } // Notify nsIWebProgressListeners of this security event. // Can be used to change the UI state. nsCOMPtr<nsISecurityEventSink> eventSink = do_QueryInterface(docShell, &rv); NS_ENSURE_SUCCESS(rv, NS_OK); uint32_t state = 0; nsCOMPtr<nsISecureBrowserUI> securityUI; docShell->GetSecurityUI(getter_AddRefs(securityUI)); if (!securityUI) { return NS_OK; } doc->SetHasTrackingContentBlocked(true); securityUI->GetState(&state); state |= nsIWebProgressListener::STATE_BLOCKED_TRACKING_CONTENT; eventSink->OnSecurityChange(nullptr, state); // Log a warning to the web console. nsCOMPtr<nsIURI> uri; channel->GetURI(getter_AddRefs(uri)); nsCString utf8spec; uri->GetSpec(utf8spec); NS_ConvertUTF8toUTF16 spec(utf8spec); const char16_t* params[] = { spec.get() }; nsContentUtils::ReportToConsole(nsIScriptError::warningFlag, NS_LITERAL_CSTRING("Tracking Protection"), doc, nsContentUtils::eNECKO_PROPERTIES, "TrackingUriBlocked", params, ArrayLength(params)); return NS_OK; }
static bool Str(JSContext *cx, const Value &v, StringifyContext *scx) { /* Step 11 must be handled by the caller. */ JS_ASSERT(!IsFilteredValue(v)); JS_CHECK_RECURSION(cx, return false); /* * This method implements the Str algorithm in ES5 15.12.3, but: * * * We move property retrieval (step 1) into callers to stream the * stringification process and avoid constantly copying strings. * * We move the preprocessing in steps 2-4 into a helper function to * allow both JO and JA to use this method. While JA could use it * without this move, JO must omit any |undefined|-valued property per * so it can't stream out a value using the Str method exactly as * defined by ES5. * * We move step 11 into callers, again to ease streaming. */ /* Step 8. */ if (v.isString()) return Quote(cx, scx->sb, v.toString()); /* Step 5. */ if (v.isNull()) return scx->sb.append("null"); /* Steps 6-7. */ if (v.isBoolean()) return v.toBoolean() ? scx->sb.append("true") : scx->sb.append("false"); /* Step 9. */ if (v.isNumber()) { if (v.isDouble()) { if (!IsFinite(v.toDouble())) return scx->sb.append("null"); } StringBuffer sb(cx); if (!NumberValueToStringBuffer(cx, v, sb)) return false; return scx->sb.append(sb.begin(), sb.length()); } /* Step 10. */ JS_ASSERT(v.isObject()); RootedObject obj(cx, &v.toObject()); scx->depth++; bool ok; if (ObjectClassIs(obj, ESClass_Array, cx)) ok = JA(cx, obj, scx); else ok = JO(cx, obj, scx); scx->depth--; return ok; }
static bool TestBasicFeatures() { // Check that a Maybe<T> is initialized to Nothing. Maybe<BasicValue> mayValue; static_assert(IsSame<BasicValue, DECLTYPE(mayValue)::ValueType>::value, "Should have BasicValue ValueType"); MOZ_RELEASE_ASSERT(!mayValue); MOZ_RELEASE_ASSERT(!mayValue.isSome()); MOZ_RELEASE_ASSERT(mayValue.isNothing()); // Check that emplace() default constructs and the accessors work. mayValue.emplace(); MOZ_RELEASE_ASSERT(mayValue); MOZ_RELEASE_ASSERT(mayValue.isSome()); MOZ_RELEASE_ASSERT(!mayValue.isNothing()); MOZ_RELEASE_ASSERT(*mayValue == BasicValue()); MOZ_RELEASE_ASSERT(mayValue.value() == BasicValue()); static_assert(IsSame<BasicValue, DECLTYPE(mayValue.value())>::value, "value() should return a BasicValue"); MOZ_RELEASE_ASSERT(mayValue.ref() == BasicValue()); static_assert(IsSame<BasicValue&, DECLTYPE(mayValue.ref())>::value, "ref() should return a BasicValue&"); MOZ_RELEASE_ASSERT(mayValue.ptr() != nullptr); static_assert(IsSame<BasicValue*, DECLTYPE(mayValue.ptr())>::value, "ptr() should return a BasicValue*"); MOZ_RELEASE_ASSERT(mayValue->GetStatus() == eWasDefaultConstructed); // Check that reset() works. mayValue.reset(); MOZ_RELEASE_ASSERT(!mayValue); MOZ_RELEASE_ASSERT(!mayValue.isSome()); MOZ_RELEASE_ASSERT(mayValue.isNothing()); // Check that emplace(T1) calls the correct constructor. mayValue.emplace(1); MOZ_RELEASE_ASSERT(mayValue); MOZ_RELEASE_ASSERT(mayValue->GetStatus() == eWasConstructed); MOZ_RELEASE_ASSERT(mayValue->GetTag() == 1); mayValue.reset(); MOZ_RELEASE_ASSERT(!mayValue); // Check that Some() and Nothing() work. mayValue = Some(BasicValue(2)); MOZ_RELEASE_ASSERT(mayValue); MOZ_RELEASE_ASSERT(mayValue->GetStatus() == eWasMoveConstructed); MOZ_RELEASE_ASSERT(mayValue->GetTag() == 2); mayValue = Nothing(); MOZ_RELEASE_ASSERT(!mayValue); // Check that the accessors work through a const ref. mayValue.emplace(); const Maybe<BasicValue>& mayValueCRef = mayValue; MOZ_RELEASE_ASSERT(mayValueCRef); MOZ_RELEASE_ASSERT(mayValueCRef.isSome()); MOZ_RELEASE_ASSERT(!mayValueCRef.isNothing()); MOZ_RELEASE_ASSERT(*mayValueCRef == BasicValue()); MOZ_RELEASE_ASSERT(mayValueCRef.value() == BasicValue()); static_assert(IsSame<BasicValue, DECLTYPE(mayValueCRef.value())>::value, "value() should return a BasicValue"); MOZ_RELEASE_ASSERT(mayValueCRef.ref() == BasicValue()); static_assert(IsSame<const BasicValue&, DECLTYPE(mayValueCRef.ref())>::value, "ref() should return a const BasicValue&"); MOZ_RELEASE_ASSERT(mayValueCRef.ptr() != nullptr); static_assert(IsSame<const BasicValue*, DECLTYPE(mayValueCRef.ptr())>::value, "ptr() should return a const BasicValue*"); MOZ_RELEASE_ASSERT(mayValueCRef->GetStatus() == eWasDefaultConstructed); mayValue.reset(); return true; }
static JSBool GCParameter(JSContext *cx, unsigned argc, jsval *vp) { JSString *str; if (argc == 0) { str = JS_ValueToString(cx, JSVAL_VOID); JS_ASSERT(str); } else { str = JS_ValueToString(cx, vp[2]); if (!str) return JS_FALSE; vp[2] = STRING_TO_JSVAL(str); } JSFlatString *flatStr = JS_FlattenString(cx, str); if (!flatStr) return false; size_t paramIndex = 0; for (;; paramIndex++) { if (paramIndex == ArrayLength(paramMap)) { JS_ReportError(cx, "the first argument argument must be maxBytes, " "maxMallocBytes, gcStackpoolLifespan, gcBytes or " "gcNumber"); return false; } if (JS_FlatStringEqualsAscii(flatStr, paramMap[paramIndex].name)) break; } JSGCParamKey param = paramMap[paramIndex].param; if (argc == 1) { uint32_t value = JS_GetGCParameter(cx->runtime(), param); vp[0] = JS_NumberValue(value); return true; } if (param == JSGC_NUMBER || param == JSGC_BYTES) { JS_ReportError(cx, "Attempt to change read-only parameter %s", paramMap[paramIndex].name); return false; } uint32_t value; if (!JS_ValueToECMAUint32(cx, vp[3], &value)) { JS_ReportError(cx, "the second argument must be convertable to uint32_t " "with non-zero value"); return false; } if (param == JSGC_MAX_BYTES) { uint32_t gcBytes = JS_GetGCParameter(cx->runtime(), JSGC_BYTES); if (value < gcBytes) { JS_ReportError(cx, "attempt to set maxBytes to the value less than the current " "gcBytes (%u)", gcBytes); return false; } } JS_SetGCParameter(cx->runtime(), param, value); *vp = JSVAL_VOID; return true; }
static JSBool CountHeap(JSContext *cx, unsigned argc, jsval *vp) { jsval v; int32_t traceKind; JSString *str; JSCountHeapTracer countTracer; JSCountHeapNode *node; size_t counter; RootedValue startValue(cx, UndefinedValue()); if (argc > 0) { v = JS_ARGV(cx, vp)[0]; if (JSVAL_IS_TRACEABLE(v)) { startValue = v; } else if (!JSVAL_IS_NULL(v)) { JS_ReportError(cx, "the first argument is not null or a heap-allocated " "thing"); return JS_FALSE; } } traceKind = -1; if (argc > 1) { str = JS_ValueToString(cx, JS_ARGV(cx, vp)[1]); if (!str) return JS_FALSE; JSFlatString *flatStr = JS_FlattenString(cx, str); if (!flatStr) return JS_FALSE; for (size_t i = 0; ;) { if (JS_FlatStringEqualsAscii(flatStr, traceKindNames[i].name)) { traceKind = traceKindNames[i].kind; break; } if (++i == ArrayLength(traceKindNames)) { JSAutoByteString bytes(cx, str); if (!!bytes) JS_ReportError(cx, "trace kind name '%s' is unknown", bytes.ptr()); return JS_FALSE; } } } JS_TracerInit(&countTracer.base, JS_GetRuntime(cx), CountHeapNotify); if (!countTracer.visited.init()) { JS_ReportOutOfMemory(cx); return JS_FALSE; } countTracer.ok = true; countTracer.traceList = NULL; countTracer.recycleList = NULL; if (startValue.isUndefined()) { JS_TraceRuntime(&countTracer.base); } else { JS_CallValueTracer(&countTracer.base, startValue.address(), "root"); } counter = 0; while ((node = countTracer.traceList) != NULL) { if (traceKind == -1 || node->kind == traceKind) counter++; countTracer.traceList = node->next; node->next = countTracer.recycleList; countTracer.recycleList = node; JS_TraceChildren(&countTracer.base, node->thing, node->kind); } while ((node = countTracer.recycleList) != NULL) { countTracer.recycleList = node->next; js_free(node); } if (!countTracer.ok) { JS_ReportOutOfMemory(cx); return false; } *vp = JS_NumberValue((double) counter); return true; }
void TestNotNullWithMyPtr() { int i4 = 4; int i5 = 5; MyPtr<int> my4 = &i4; MyPtr<int> my5 = &i5; NotNull<int*> nni4 = WrapNotNull(&i4); NotNull<int*> nni5 = WrapNotNull(&i5); NotNull<MyPtr<int>> nnmy4 = WrapNotNull(my4); //WrapNotNull(nullptr); // no wrapping from nullptr //WrapNotNull(0); // no wrapping from zero // NotNull<int*> construction combinations //NotNull<int*> nni4a; // no default //NotNull<int*> nni4a(nullptr); // no nullptr //NotNull<int*> nni4a(0); // no zero //NotNull<int*> nni4a(&i4); // no int* //NotNull<int*> nni4a(my4); // no MyPtr<int> NotNull<int*> nni4b(WrapNotNull(&i4)); // WrapNotNull(int*) NotNull<int*> nni4c(WrapNotNull(my4)); // WrapNotNull(MyPtr<int>) NotNull<int*> nni4d(nni4); // NotNull<int*> NotNull<int*> nni4e(nnmy4); // NotNull<MyPtr<int>> CHECK(*nni4b == 4); CHECK(*nni4c == 4); CHECK(*nni4d == 4); CHECK(*nni4e == 4); // NotNull<MyPtr<int>> construction combinations //NotNull<MyPtr<int>> nnmy4a; // no default //NotNull<MyPtr<int>> nnmy4a(nullptr); // no nullptr //NotNull<MyPtr<int>> nnmy4a(0); // no zero //NotNull<MyPtr<int>> nnmy4a(&i4); // no int* //NotNull<MyPtr<int>> nnmy4a(my4); // no MyPtr<int> NotNull<MyPtr<int>> nnmy4b(WrapNotNull(&i4)); // WrapNotNull(int*) NotNull<MyPtr<int>> nnmy4c(WrapNotNull(my4)); // WrapNotNull(MyPtr<int>) NotNull<MyPtr<int>> nnmy4d(nni4); // NotNull<int*> NotNull<MyPtr<int>> nnmy4e(nnmy4); // NotNull<MyPtr<int>> CHECK(*nnmy4b == 4); CHECK(*nnmy4c == 4); CHECK(*nnmy4d == 4); CHECK(*nnmy4e == 4); // NotNull<int*> assignment combinations //nni4b = nullptr; // no nullptr //nni4b = 0; // no zero //nni4a = &i4; // no int* //nni4a = my4; // no MyPtr<int> nni4b = WrapNotNull(&i4); // WrapNotNull(int*) nni4c = WrapNotNull(my4); // WrapNotNull(MyPtr<int>) nni4d = nni4; // NotNull<int*> nni4e = nnmy4; // NotNull<MyPtr<int>> CHECK(*nni4b == 4); CHECK(*nni4c == 4); CHECK(*nni4d == 4); CHECK(*nni4e == 4); // NotNull<MyPtr<int>> assignment combinations //nnmy4a = nullptr; // no nullptr //nnmy4a = 0; // no zero //nnmy4a = &i4; // no int* //nnmy4a = my4; // no MyPtr<int> nnmy4b = WrapNotNull(&i4); // WrapNotNull(int*) nnmy4c = WrapNotNull(my4); // WrapNotNull(MyPtr<int>) nnmy4d = nni4; // NotNull<int*> nnmy4e = nnmy4; // NotNull<MyPtr<int>> CHECK(*nnmy4b == 4); CHECK(*nnmy4c == 4); CHECK(*nnmy4d == 4); CHECK(*nnmy4e == 4); NotNull<MyPtr<int>> nnmy5 = WrapNotNull(&i5); CHECK(*nnmy5 == 5); CHECK(nnmy5 == &i5); // NotNull<MyPtr<int>> == int* CHECK(nnmy5 == my5); // NotNull<MyPtr<int>> == MyPtr<int> CHECK(nnmy5 == nni5); // NotNull<MyPtr<int>> == NotNull<int*> CHECK(nnmy5 == nnmy5); // NotNull<MyPtr<int>> == NotNull<MyPtr<int>> CHECK(&i5 == nnmy5); // int* == NotNull<MyPtr<int>> CHECK(my5 == nnmy5); // MyPtr<int> == NotNull<MyPtr<int>> CHECK(nni5 == nnmy5); // NotNull<int*> == NotNull<MyPtr<int>> CHECK(nnmy5 == nnmy5); // NotNull<MyPtr<int>> == NotNull<MyPtr<int>> //CHECK(nni5 == nullptr); // no comparisons with nullptr //CHECK(nullptr == nni5); // no comparisons with nullptr //CHECK(nni5 == 0); // no comparisons with zero //CHECK(0 == nni5); // no comparisons with zero CHECK(*nnmy5 == 5); CHECK(nnmy5 != &i4); // NotNull<MyPtr<int>> != int* CHECK(nnmy5 != my4); // NotNull<MyPtr<int>> != MyPtr<int> CHECK(nnmy5 != nni4); // NotNull<MyPtr<int>> != NotNull<int*> CHECK(nnmy5 != nnmy4); // NotNull<MyPtr<int>> != NotNull<MyPtr<int>> CHECK(&i4 != nnmy5); // int* != NotNull<MyPtr<int>> CHECK(my4 != nnmy5); // MyPtr<int> != NotNull<MyPtr<int>> CHECK(nni4 != nnmy5); // NotNull<int*> != NotNull<MyPtr<int>> CHECK(nnmy4 != nnmy5); // NotNull<MyPtr<int>> != NotNull<MyPtr<int>> //CHECK(nni4 != nullptr); // no comparisons with nullptr //CHECK(nullptr != nni4); // no comparisons with nullptr //CHECK(nni4 != 0); // no comparisons with zero //CHECK(0 != nni4); // no comparisons with zero // int* parameter f_i(&i4); // identity int* --> int* f_i(my4); // implicit MyPtr<int> --> int* f_i(my4.get()); // explicit MyPtr<int> --> int* f_i(nni4); // implicit NotNull<int*> --> int* f_i(nni4.get()); // explicit NotNull<int*> --> int* //f_i(nnmy4); // no implicit NotNull<MyPtr<int>> --> int* f_i(nnmy4.get()); // explicit NotNull<MyPtr<int>> --> int* f_i(nnmy4.get().get());// doubly-explicit NotNull<MyPtr<int>> --> int* // MyPtr<int> parameter f_my(&i4); // implicit int* --> MyPtr<int> f_my(my4); // identity MyPtr<int> --> MyPtr<int> f_my(my4.get()); // explicit MyPtr<int> --> MyPtr<int> //f_my(nni4); // no implicit NotNull<int*> --> MyPtr<int> f_my(nni4.get()); // explicit NotNull<int*> --> MyPtr<int> f_my(nnmy4); // implicit NotNull<MyPtr<int>> --> MyPtr<int> f_my(nnmy4.get()); // explicit NotNull<MyPtr<int>> --> MyPtr<int> f_my(nnmy4.get().get());// doubly-explicit NotNull<MyPtr<int>> --> MyPtr<int> // NotNull<int*> parameter f_nni(nni4); // identity NotNull<int*> --> NotNull<int*> f_nni(nnmy4); // implicit NotNull<MyPtr<int>> --> NotNull<int*> // NotNull<MyPtr<int>> parameter f_nnmy(nni4); // implicit NotNull<int*> --> NotNull<MyPtr<int>> f_nnmy(nnmy4); // identity NotNull<MyPtr<int>> --> NotNull<MyPtr<int>> //CHECK(nni4); // disallow boolean conversion / unary expression usage //CHECK(nnmy4); // ditto // '->' dereferencing. Blah blah; MyPtr<Blah> myblah = &blah; NotNull<Blah*> nnblah = WrapNotNull(&blah); NotNull<MyPtr<Blah>> nnmyblah = WrapNotNull(myblah); (&blah)->blah(); // int* myblah->blah(); // MyPtr<int> nnblah->blah(); // NotNull<int*> nnmyblah->blah(); // NotNull<MyPtr<int>> (&blah)->mX = 1; CHECK((&blah)->mX == 1); myblah->mX = 2; CHECK(myblah->mX == 2); nnblah->mX = 3; CHECK(nnblah->mX == 3); nnmyblah->mX = 4; CHECK(nnmyblah->mX == 4); // '*' dereferencing (lvalues and rvalues) *(&i4) = 7; // int* CHECK(*(&i4) == 7); *my4 = 6; // MyPtr<int> CHECK(*my4 == 6); *nni4 = 5; // NotNull<int*> CHECK(*nni4 == 5); *nnmy4 = 4; // NotNull<MyPtr<int>> CHECK(*nnmy4 == 4); // Non-null arrays. static const int N = 20; int a[N]; NotNull<int*> nna = WrapNotNull(a); for (int i = 0; i < N; i++) { nna[i] = i; } for (int i = 0; i < N; i++) { nna[i] *= 2; } for (int i = 0; i < N; i++) { CHECK(nna[i] == i * 2); } }
void CopyChars(Latin1Char *dest, const JSLinearString &str) { AutoCheckCannotGC nogc; PodCopy(dest, str.latin1Chars(nogc), str.length()); }
static bool TestMap() { // Check that map handles the 'Nothing' case. Maybe<BasicValue> mayValue; MOZ_RELEASE_ASSERT(mayValue.map(&TimesTwo) == Nothing()); static_assert(IsSame<Maybe<int>, DECLTYPE(mayValue.map(&TimesTwo))>::value, "map(TimesTwo) should return a Maybe<int>"); MOZ_RELEASE_ASSERT(mayValue.map(&TimesTwoAndResetOriginal) == Nothing()); MOZ_RELEASE_ASSERT(mayValue.map(&TimesNum, 3) == Nothing()); static_assert(IsSame<Maybe<int>, DECLTYPE(mayValue.map(&TimesNum, 3))>::value, "map(TimesNum, 3) should return a Maybe<int>"); MOZ_RELEASE_ASSERT(mayValue.map(&TimesNumAndResetOriginal, 3) == Nothing()); // Check that map handles the 'Some' case. mayValue = Some(BasicValue(2)); MOZ_RELEASE_ASSERT(mayValue.map(&TimesTwo) == Some(4)); MOZ_RELEASE_ASSERT(mayValue.map(&TimesTwoAndResetOriginal) == Some(4)); MOZ_RELEASE_ASSERT(mayValue->GetTag() == 1); mayValue = Some(BasicValue(2)); MOZ_RELEASE_ASSERT(mayValue.map(&TimesNum, 3) == Some(6)); MOZ_RELEASE_ASSERT(mayValue.map(&TimesNumAndResetOriginal, 3) == Some(6)); MOZ_RELEASE_ASSERT(mayValue->GetTag() == 1); // Check that map works with a const reference. mayValue->SetTag(2); const Maybe<BasicValue>& mayValueCRef = mayValue; MOZ_RELEASE_ASSERT(mayValueCRef.map(&TimesTwo) == Some(4)); static_assert(IsSame<Maybe<int>, DECLTYPE(mayValueCRef.map(&TimesTwo))>::value, "map(TimesTwo) should return a Maybe<int>"); MOZ_RELEASE_ASSERT(mayValueCRef.map(&TimesNum, 3) == Some(6)); static_assert(IsSame<Maybe<int>, DECLTYPE(mayValueCRef.map(&TimesNum, 3))>::value, "map(TimesNum, 3) should return a Maybe<int>"); // Check that map works with functors. // XXX(seth): Support for functors will be added in bug 1054115; it had to be // ripped out temporarily because of incompatibilities with GCC 4.4. /* MultiplyTagFunctor tagMultiplier; MOZ_RELEASE_ASSERT(tagMultiplier.mBy.GetStatus() == eWasConstructed); MOZ_RELEASE_ASSERT(mayValue.map(tagMultiplier) == Some(4)); MOZ_RELEASE_ASSERT(tagMultiplier.mBy.GetStatus() == eWasConstructed); MOZ_RELEASE_ASSERT(mayValue.map(tagMultiplier, BasicValue(3)) == Some(6)); MOZ_RELEASE_ASSERT(tagMultiplier.mBy.GetStatus() == eWasConstructed); MOZ_RELEASE_ASSERT(tagMultiplier.mArgMoved == true); BasicValue multiplyBy(3); MOZ_RELEASE_ASSERT(mayValue.map(tagMultiplier, multiplyBy) == Some(6)); MOZ_RELEASE_ASSERT(tagMultiplier.mBy.GetStatus() == eWasConstructed); MOZ_RELEASE_ASSERT(tagMultiplier.mArgMoved == false); */ return true; }
static bool Snapshot(JSContext* cx, HandleObject pobj_, unsigned flags, AutoIdVector* props) { // We initialize |ht| lazily (in Enumerate()) because it ends up unused // anywhere from 67--99.9% of the time. Maybe<IdSet> ht; RootedObject pobj(cx, pobj_); do { if (JSNewEnumerateOp enumerate = pobj->getOps()->enumerate) { // This hook has the full control over what gets enumerated. AutoIdVector properties(cx); if (!enumerate(cx, pobj, properties)) return false; RootedId id(cx); for (size_t n = 0; n < properties.length(); n++) { id = properties[n]; bool enumerable = true; // The enumerate hook does not indicate whether the properties // it returns are enumerable or not. There is no non-effectful // way to determine this from the object, so carve out // exceptions here for places where the property is not // enumerable. if (pobj->is<UnboxedArrayObject>() && id == NameToId(cx->names().length)) enumerable = false; if (!Enumerate(cx, pobj, id, enumerable, flags, ht, props)) return false; } if (pobj->isNative()) { if (!EnumerateNativeProperties(cx, pobj.as<NativeObject>(), flags, ht, props)) return false; } } else if (pobj->isNative()) { // Give the object a chance to resolve all lazy properties if (JSEnumerateOp enumerate = pobj->getClass()->enumerate) { if (!enumerate(cx, pobj.as<NativeObject>())) return false; } if (!EnumerateNativeProperties(cx, pobj.as<NativeObject>(), flags, ht, props)) return false; } else if (pobj->is<ProxyObject>()) { AutoIdVector proxyProps(cx); if (flags & JSITER_HIDDEN || flags & JSITER_SYMBOLS) { // This gets all property keys, both strings and // symbols. The call to Enumerate in the loop below // will filter out unwanted keys, per the flags. if (!Proxy::ownPropertyKeys(cx, pobj, proxyProps)) return false; Rooted<PropertyDescriptor> desc(cx); for (size_t n = 0, len = proxyProps.length(); n < len; n++) { bool enumerable = false; // We need to filter, if the caller just wants enumerable // symbols. if (!(flags & JSITER_HIDDEN)) { if (!Proxy::getOwnPropertyDescriptor(cx, pobj, proxyProps[n], &desc)) return false; enumerable = desc.enumerable(); } if (!Enumerate(cx, pobj, proxyProps[n], enumerable, flags, ht, props)) return false; } } else { // Returns enumerable property names (no symbols). if (!Proxy::getOwnEnumerablePropertyKeys(cx, pobj, proxyProps)) return false; for (size_t n = 0, len = proxyProps.length(); n < len; n++) { if (!Enumerate(cx, pobj, proxyProps[n], true, flags, ht, props)) return false; } } } else { MOZ_CRASH("non-native objects must have an enumerate op"); } if (flags & JSITER_OWNONLY) break; if (!GetPrototype(cx, pobj, &pobj)) return false; } while (pobj != nullptr); #ifdef JS_MORE_DETERMINISTIC /* * In some cases the enumeration order for an object depends on the * execution mode (interpreter vs. JIT), especially for native objects * with a class enumerate hook (where resolving a property changes the * resulting enumeration order). These aren't really bugs, but the * differences can change the generated output and confuse correctness * fuzzers, so we sort the ids if such a fuzzer is running. * * We don't do this in the general case because (a) doing so is slow, * and (b) it also breaks the web, which expects enumeration order to * follow the order in which properties are added, in certain cases. * Since ECMA does not specify an enumeration order for objects, both * behaviors are technically correct to do. */ jsid* ids = props->begin(); size_t n = props->length(); AutoIdVector tmp(cx); if (!tmp.resize(n)) return false; PodCopy(tmp.begin(), ids, n); if (!MergeSort(ids, n, tmp.begin(), SortComparatorIds(cx))) return false; #endif /* JS_MORE_DETERMINISTIC */ return true; }
static bool TestCopyAndMove() { // Check that we get moves when possible for types that can support both moves // and copies. Maybe<BasicValue> mayBasicValue = Some(BasicValue(1)); MOZ_RELEASE_ASSERT(mayBasicValue->GetStatus() == eWasMoveConstructed); MOZ_RELEASE_ASSERT(mayBasicValue->GetTag() == 1); mayBasicValue = Some(BasicValue(2)); MOZ_RELEASE_ASSERT(mayBasicValue->GetStatus() == eWasMoveAssigned); MOZ_RELEASE_ASSERT(mayBasicValue->GetTag() == 2); mayBasicValue.reset(); mayBasicValue.emplace(BasicValue(3)); MOZ_RELEASE_ASSERT(mayBasicValue->GetStatus() == eWasMoveConstructed); MOZ_RELEASE_ASSERT(mayBasicValue->GetTag() == 3); // Check that we get copies when moves aren't possible. Maybe<BasicValue> mayBasicValue2 = Some(*mayBasicValue); MOZ_RELEASE_ASSERT(mayBasicValue2->GetStatus() == eWasCopyConstructed); MOZ_RELEASE_ASSERT(mayBasicValue2->GetTag() == 3); mayBasicValue->SetTag(4); mayBasicValue2 = mayBasicValue; // This test should work again when we fix bug 1052940. //MOZ_RELEASE_ASSERT(mayBasicValue2->GetStatus() == eWasCopyAssigned); MOZ_RELEASE_ASSERT(mayBasicValue2->GetTag() == 4); mayBasicValue->SetTag(5); mayBasicValue2.reset(); mayBasicValue2.emplace(*mayBasicValue); MOZ_RELEASE_ASSERT(mayBasicValue2->GetStatus() == eWasCopyConstructed); MOZ_RELEASE_ASSERT(mayBasicValue2->GetTag() == 5); // Check that Move() works. (Another sanity check for move support.) Maybe<BasicValue> mayBasicValue3 = Some(Move(*mayBasicValue)); MOZ_RELEASE_ASSERT(mayBasicValue3->GetStatus() == eWasMoveConstructed); MOZ_RELEASE_ASSERT(mayBasicValue3->GetTag() == 5); MOZ_RELEASE_ASSERT(mayBasicValue->GetStatus() == eWasMovedFrom); mayBasicValue2->SetTag(6); mayBasicValue3 = Some(Move(*mayBasicValue2)); MOZ_RELEASE_ASSERT(mayBasicValue3->GetStatus() == eWasMoveAssigned); MOZ_RELEASE_ASSERT(mayBasicValue3->GetTag() == 6); MOZ_RELEASE_ASSERT(mayBasicValue2->GetStatus() == eWasMovedFrom); Maybe<BasicValue> mayBasicValue4; mayBasicValue4.emplace(Move(*mayBasicValue3)); MOZ_RELEASE_ASSERT(mayBasicValue4->GetStatus() == eWasMoveConstructed); MOZ_RELEASE_ASSERT(mayBasicValue4->GetTag() == 6); MOZ_RELEASE_ASSERT(mayBasicValue3->GetStatus() == eWasMovedFrom); // Check that we always get copies for types that don't support moves. // XXX(seth): These tests fail but probably shouldn't. For now we'll just // consider using Maybe with types that allow copies but have deleted or // private move constructors, or which do not support copy assignment, to // be supported only to the extent that we need for existing code to work. // These tests should work again when we fix bug 1052940. /* Maybe<UnmovableValue> mayUnmovableValue = Some(UnmovableValue()); MOZ_RELEASE_ASSERT(mayUnmovableValue->GetStatus() == eWasCopyConstructed); mayUnmovableValue = Some(UnmovableValue()); MOZ_RELEASE_ASSERT(mayUnmovableValue->GetStatus() == eWasCopyAssigned); mayUnmovableValue.reset(); mayUnmovableValue.emplace(UnmovableValue()); MOZ_RELEASE_ASSERT(mayUnmovableValue->GetStatus() == eWasCopyConstructed); */ // Check that types that only support moves, but not copies, work. Maybe<UncopyableValue> mayUncopyableValue = Some(UncopyableValue()); MOZ_RELEASE_ASSERT(mayUncopyableValue->GetStatus() == eWasMoveConstructed); mayUncopyableValue = Some(UncopyableValue()); MOZ_RELEASE_ASSERT(mayUncopyableValue->GetStatus() == eWasMoveAssigned); mayUncopyableValue.reset(); mayUncopyableValue.emplace(UncopyableValue()); MOZ_RELEASE_ASSERT(mayUncopyableValue->GetStatus() == eWasMoveConstructed); // Check that types that support neither moves or copies work. Maybe<UncopyableUnmovableValue> mayUncopyableUnmovableValue; mayUncopyableUnmovableValue.emplace(); MOZ_RELEASE_ASSERT(mayUncopyableUnmovableValue->GetStatus() == eWasDefaultConstructed); mayUncopyableUnmovableValue.reset(); mayUncopyableUnmovableValue.emplace(0); MOZ_RELEASE_ASSERT(mayUncopyableUnmovableValue->GetStatus() == eWasConstructed); return true; }
/* * Since memory has been exhausted, avoid the normal error-handling path which * allocates an error object, report and callstack. If code is running, simply * throw the static atom "out of memory". If code is not running, call the * error reporter directly. * * Furthermore, callers of js_ReportOutOfMemory (viz., malloc) assume a GC does * not occur, so GC must be avoided or suppressed. */ void js_ReportOutOfMemory(ThreadSafeContext *cxArg) { #ifdef JS_MORE_DETERMINISTIC /* * OOMs are non-deterministic, especially across different execution modes * (e.g. interpreter vs JIT). In more-deterministic builds, print to stderr * so that the fuzzers can detect this. */ fprintf(stderr, "js_ReportOutOfMemory called\n"); #endif if (cxArg->isForkJoinContext()) { cxArg->asForkJoinContext()->setPendingAbortFatal(ParallelBailoutOutOfMemory); return; } if (!cxArg->isJSContext()) return; JSContext *cx = cxArg->asJSContext(); cx->runtime()->hadOutOfMemory = true; /* Report the oom. */ if (JS::OutOfMemoryCallback oomCallback = cx->runtime()->oomCallback) { AutoSuppressGC suppressGC(cx); oomCallback(cx, cx->runtime()->oomCallbackData); } if (JS_IsRunning(cx)) { cx->setPendingException(StringValue(cx->names().outOfMemory)); return; } /* Get the message for this error, but we don't expand any arguments. */ const JSErrorFormatString *efs = js_GetLocalizedErrorMessage(cx, nullptr, nullptr, JSMSG_OUT_OF_MEMORY); const char *msg = efs ? efs->format : "Out of memory"; /* Fill out the report, but don't do anything that requires allocation. */ JSErrorReport report; PodZero(&report); report.flags = JSREPORT_ERROR; report.errorNumber = JSMSG_OUT_OF_MEMORY; PopulateReportBlame(cx, &report); /* Report the error. */ if (JSErrorReporter onError = cx->errorReporter) { AutoSuppressGC suppressGC(cx); onError(cx, msg, &report); } /* * We would like to enforce the invariant that any exception reported * during an OOM situation does not require wrapping. Besides avoiding * allocation when memory is low, this reduces the number of places where * we might need to GC. * * When JS code is running, we set the pending exception to an atom, which * does not need wrapping. If no JS code is running, no exception should be * set at all. */ JS_ASSERT(!cx->isExceptionPending()); }
static bool TestFunctionalAccessors() { BasicValue value(9); sStaticBasicValue = new BasicValue(9); // Check that the 'some' case of functional accessors works. Maybe<BasicValue> someValue = Some(BasicValue(3)); MOZ_RELEASE_ASSERT(someValue.valueOr(value) == BasicValue(3)); static_assert(IsSame<BasicValue, DECLTYPE(someValue.valueOr(value))>::value, "valueOr should return a BasicValue"); MOZ_RELEASE_ASSERT(someValue.valueOrFrom(&MakeBasicValue) == BasicValue(3)); static_assert(IsSame<BasicValue, DECLTYPE(someValue.valueOrFrom(&MakeBasicValue))>::value, "valueOrFrom should return a BasicValue"); MOZ_RELEASE_ASSERT(someValue.ptrOr(&value) != &value); static_assert(IsSame<BasicValue*, DECLTYPE(someValue.ptrOr(&value))>::value, "ptrOr should return a BasicValue*"); MOZ_RELEASE_ASSERT(*someValue.ptrOrFrom(&MakeBasicValuePtr) == BasicValue(3)); static_assert(IsSame<BasicValue*, DECLTYPE(someValue.ptrOrFrom(&MakeBasicValuePtr))>::value, "ptrOrFrom should return a BasicValue*"); MOZ_RELEASE_ASSERT(someValue.refOr(value) == BasicValue(3)); static_assert(IsSame<BasicValue&, DECLTYPE(someValue.refOr(value))>::value, "refOr should return a BasicValue&"); MOZ_RELEASE_ASSERT(someValue.refOrFrom(&MakeBasicValueRef) == BasicValue(3)); static_assert(IsSame<BasicValue&, DECLTYPE(someValue.refOrFrom(&MakeBasicValueRef))>::value, "refOrFrom should return a BasicValue&"); // Check that the 'some' case works through a const reference. const Maybe<BasicValue>& someValueCRef = someValue; MOZ_RELEASE_ASSERT(someValueCRef.valueOr(value) == BasicValue(3)); static_assert(IsSame<BasicValue, DECLTYPE(someValueCRef.valueOr(value))>::value, "valueOr should return a BasicValue"); MOZ_RELEASE_ASSERT(someValueCRef.valueOrFrom(&MakeBasicValue) == BasicValue(3)); static_assert(IsSame<BasicValue, DECLTYPE(someValueCRef.valueOrFrom(&MakeBasicValue))>::value, "valueOrFrom should return a BasicValue"); MOZ_RELEASE_ASSERT(someValueCRef.ptrOr(&value) != &value); static_assert(IsSame<const BasicValue*, DECLTYPE(someValueCRef.ptrOr(&value))>::value, "ptrOr should return a const BasicValue*"); MOZ_RELEASE_ASSERT(*someValueCRef.ptrOrFrom(&MakeBasicValuePtr) == BasicValue(3)); static_assert(IsSame<const BasicValue*, DECLTYPE(someValueCRef.ptrOrFrom(&MakeBasicValuePtr))>::value, "ptrOrFrom should return a const BasicValue*"); MOZ_RELEASE_ASSERT(someValueCRef.refOr(value) == BasicValue(3)); static_assert(IsSame<const BasicValue&, DECLTYPE(someValueCRef.refOr(value))>::value, "refOr should return a const BasicValue&"); MOZ_RELEASE_ASSERT(someValueCRef.refOrFrom(&MakeBasicValueRef) == BasicValue(3)); static_assert(IsSame<const BasicValue&, DECLTYPE(someValueCRef.refOrFrom(&MakeBasicValueRef))>::value, "refOrFrom should return a const BasicValue&"); // Check that the 'none' case of functional accessors works. Maybe<BasicValue> noneValue; MOZ_RELEASE_ASSERT(noneValue.valueOr(value) == BasicValue(9)); static_assert(IsSame<BasicValue, DECLTYPE(noneValue.valueOr(value))>::value, "valueOr should return a BasicValue"); MOZ_RELEASE_ASSERT(noneValue.valueOrFrom(&MakeBasicValue) == BasicValue(9)); static_assert(IsSame<BasicValue, DECLTYPE(noneValue.valueOrFrom(&MakeBasicValue))>::value, "valueOrFrom should return a BasicValue"); MOZ_RELEASE_ASSERT(noneValue.ptrOr(&value) == &value); static_assert(IsSame<BasicValue*, DECLTYPE(noneValue.ptrOr(&value))>::value, "ptrOr should return a BasicValue*"); MOZ_RELEASE_ASSERT(*noneValue.ptrOrFrom(&MakeBasicValuePtr) == BasicValue(9)); static_assert(IsSame<BasicValue*, DECLTYPE(noneValue.ptrOrFrom(&MakeBasicValuePtr))>::value, "ptrOrFrom should return a BasicValue*"); MOZ_RELEASE_ASSERT(noneValue.refOr(value) == BasicValue(9)); static_assert(IsSame<BasicValue&, DECLTYPE(noneValue.refOr(value))>::value, "refOr should return a BasicValue&"); MOZ_RELEASE_ASSERT(noneValue.refOrFrom(&MakeBasicValueRef) == BasicValue(9)); static_assert(IsSame<BasicValue&, DECLTYPE(noneValue.refOrFrom(&MakeBasicValueRef))>::value, "refOrFrom should return a BasicValue&"); // Check that the 'none' case works through a const reference. const Maybe<BasicValue>& noneValueCRef = noneValue; MOZ_RELEASE_ASSERT(noneValueCRef.valueOr(value) == BasicValue(9)); static_assert(IsSame<BasicValue, DECLTYPE(noneValueCRef.valueOr(value))>::value, "valueOr should return a BasicValue"); MOZ_RELEASE_ASSERT(noneValueCRef.valueOrFrom(&MakeBasicValue) == BasicValue(9)); static_assert(IsSame<BasicValue, DECLTYPE(noneValueCRef.valueOrFrom(&MakeBasicValue))>::value, "valueOrFrom should return a BasicValue"); MOZ_RELEASE_ASSERT(noneValueCRef.ptrOr(&value) == &value); static_assert(IsSame<const BasicValue*, DECLTYPE(noneValueCRef.ptrOr(&value))>::value, "ptrOr should return a const BasicValue*"); MOZ_RELEASE_ASSERT(*noneValueCRef.ptrOrFrom(&MakeBasicValuePtr) == BasicValue(9)); static_assert(IsSame<const BasicValue*, DECLTYPE(noneValueCRef.ptrOrFrom(&MakeBasicValuePtr))>::value, "ptrOrFrom should return a const BasicValue*"); MOZ_RELEASE_ASSERT(noneValueCRef.refOr(value) == BasicValue(9)); static_assert(IsSame<const BasicValue&, DECLTYPE(noneValueCRef.refOr(value))>::value, "refOr should return a const BasicValue&"); MOZ_RELEASE_ASSERT(noneValueCRef.refOrFrom(&MakeBasicValueRef) == BasicValue(9)); static_assert(IsSame<const BasicValue&, DECLTYPE(noneValueCRef.refOrFrom(&MakeBasicValueRef))>::value, "refOrFrom should return a const BasicValue&"); // Clean up so the undestroyed objects count stays accurate. delete sStaticBasicValue; sStaticBasicValue = nullptr; return true; }
Maybe<uint64_t> nsStringInputStream::ExpectedSerializedLength() { return Some(static_cast<uint64_t>(Length())); }
static bool TestMap() { // Check that map handles the 'Nothing' case. Maybe<BasicValue> mayValue; MOZ_RELEASE_ASSERT(mayValue.map(&TimesTwo) == Nothing()); static_assert(IsSame<Maybe<int>, DECLTYPE(mayValue.map(&TimesTwo))>::value, "map(TimesTwo) should return a Maybe<int>"); MOZ_RELEASE_ASSERT(mayValue.map(&TimesTwoAndResetOriginal) == Nothing()); // Check that map handles the 'Some' case. mayValue = Some(BasicValue(2)); MOZ_RELEASE_ASSERT(mayValue.map(&TimesTwo) == Some(4)); MOZ_RELEASE_ASSERT(mayValue.map(&TimesTwoAndResetOriginal) == Some(4)); MOZ_RELEASE_ASSERT(mayValue->GetTag() == 1); mayValue = Some(BasicValue(2)); // Check that map works with a const reference. mayValue->SetTag(2); const Maybe<BasicValue>& mayValueCRef = mayValue; MOZ_RELEASE_ASSERT(mayValueCRef.map(&TimesTwo) == Some(4)); static_assert(IsSame<Maybe<int>, DECLTYPE(mayValueCRef.map(&TimesTwo))>::value, "map(TimesTwo) should return a Maybe<int>"); // Check that map works with functors. MultiplyTagFunctor tagMultiplier; MOZ_RELEASE_ASSERT(tagMultiplier.mBy.GetStatus() == eWasConstructed); MOZ_RELEASE_ASSERT(mayValue.map(tagMultiplier) == Some(4)); MOZ_RELEASE_ASSERT(tagMultiplier.mBy.GetStatus() == eWasConstructed); // Check that map works with lambda expressions. int two = 2; mayValue = Some(BasicValue(2)); Maybe<int> mappedValue = mayValue.map([&](const BasicValue& aVal) { return aVal.GetTag() * two; }); MOZ_RELEASE_ASSERT(mappedValue == Some(4)); mappedValue = mayValue.map([=](const BasicValue& aVal) { return aVal.GetTag() * two; }); MOZ_RELEASE_ASSERT(mappedValue == Some(4)); mappedValue = mayValueCRef.map([&](const BasicValue& aVal) { return aVal.GetTag() * two; }); MOZ_RELEASE_ASSERT(mappedValue == Some(4)); return true; }
Statistics::Statistics(JSRuntime* rt) : runtime(rt), startupTime(PRMJ_Now()), fp(nullptr), fullFormat(false), gcDepth(0), nonincrementalReason(nullptr), timedGCStart(0), preBytes(0), maxPauseInInterval(0), phaseNestingDepth(0), activeDagSlot(PHASE_DAG_NONE), suspendedPhaseNestingDepth(0), sliceCallback(nullptr), aborted(false) { PodArrayZero(phaseTotals); PodArrayZero(counts); PodArrayZero(phaseStartTimes); for (size_t d = 0; d < MAX_MULTIPARENT_PHASES + 1; d++) PodArrayZero(phaseTimes[d]); static bool initialized = false; if (!initialized) { initialized = true; for (size_t i = 0; i < PHASE_LIMIT; i++) MOZ_ASSERT(phases[i].index == i); // Create a static table of descendants for every phase with multiple // children. This assumes that all descendants come linearly in the // list, which is reasonable since full dags are not supported; any // path from the leaf to the root must encounter at most one node with // multiple parents. size_t dagSlot = 0; for (size_t i = 0; i < mozilla::ArrayLength(dagChildEdges); i++) { Phase parent = dagChildEdges[i].parent; if (!phaseExtra[parent].dagSlot) phaseExtra[parent].dagSlot = ++dagSlot; Phase child = dagChildEdges[i].child; MOZ_ASSERT(phases[child].parent == PHASE_MULTI_PARENTS); int j = child; do { dagDescendants[phaseExtra[parent].dagSlot].append(Phase(j)); j++; } while (j != PHASE_LIMIT && phases[j].parent != PHASE_MULTI_PARENTS); } MOZ_ASSERT(dagSlot <= MAX_MULTIPARENT_PHASES); // Fill in the depth of each node in the tree. Multi-parented nodes // have depth 0. mozilla::Vector<Phase> stack; stack.append(PHASE_LIMIT); // Dummy entry to avoid special-casing the first node for (int i = 0; i < PHASE_LIMIT; i++) { if (phases[i].parent == PHASE_NO_PARENT || phases[i].parent == PHASE_MULTI_PARENTS) { stack.clear(); } else { while (stack.back() != phases[i].parent) stack.popBack(); } phaseExtra[i].depth = stack.length(); stack.append(Phase(i)); } } char* env = getenv("MOZ_GCTIMER"); if (!env || strcmp(env, "none") == 0) { fp = nullptr; return; } if (strcmp(env, "stdout") == 0) { fullFormat = false; fp = stdout; } else if (strcmp(env, "stderr") == 0) { fullFormat = false; fp = stderr; } else { fullFormat = true; fp = fopen(env, "a"); MOZ_ASSERT(fp); } }
static Maybe<int*> ReturnSomeNullptr() { return Some(nullptr); }
bool ModuleGenerator::convertOutOfRangeBranchesToThunks() { masm_.haltingAlign(CodeAlignment); // Create thunks for callsites that have gone out of range. Use a map to // create one thunk for each callee since there is often high reuse. OffsetMap alreadyThunked(cx_); if (!alreadyThunked.init()) return false; for (; lastPatchedCallsite_ < masm_.callSites().length(); lastPatchedCallsite_++) { const CallSiteAndTarget& cs = masm_.callSites()[lastPatchedCallsite_]; if (!cs.isInternal()) continue; uint32_t callerOffset = cs.returnAddressOffset(); MOZ_RELEASE_ASSERT(callerOffset < INT32_MAX); if (funcIsDefined(cs.targetIndex())) { uint32_t calleeOffset = funcEntry(cs.targetIndex()); MOZ_RELEASE_ASSERT(calleeOffset < INT32_MAX); if (uint32_t(abs(int32_t(calleeOffset) - int32_t(callerOffset))) < JumpRange()) { masm_.patchCall(callerOffset, calleeOffset); continue; } } OffsetMap::AddPtr p = alreadyThunked.lookupForAdd(cs.targetIndex()); if (!p) { Offsets offsets; offsets.begin = masm_.currentOffset(); uint32_t thunkOffset = masm_.thunkWithPatch().offset(); if (masm_.oom()) return false; offsets.end = masm_.currentOffset(); if (!module_->codeRanges.emplaceBack(CodeRange::CallThunk, offsets)) return false; if (!module_->callThunks.emplaceBack(thunkOffset, cs.targetIndex())) return false; if (!alreadyThunked.add(p, cs.targetIndex(), offsets.begin)) return false; } masm_.patchCall(callerOffset, p->value()); } // Create thunks for jumps to stubs. Stubs are always generated at the end // so unconditionally thunk all existing jump sites. for (JumpTarget target : MakeEnumeratedRange(JumpTarget::Limit)) { if (masm_.jumpSites()[target].empty()) continue; for (uint32_t jumpSite : masm_.jumpSites()[target]) { RepatchLabel label; label.use(jumpSite); masm_.bind(&label); } Offsets offsets; offsets.begin = masm_.currentOffset(); uint32_t thunkOffset = masm_.thunkWithPatch().offset(); if (masm_.oom()) return false; offsets.end = masm_.currentOffset(); if (!module_->codeRanges.emplaceBack(CodeRange::Inline, offsets)) return false; if (!jumpThunks_[target].append(thunkOffset)) return false; } // Unlike callsites, which need to be persisted in the Module, we can simply // flush jump sites after each patching pass. masm_.clearJumpSites(); return true; }
static Maybe<Base*> ReturnDerivedPointer() { Derived* d = nullptr; return Some(d); }
nsresult txStylesheet::addTemplate(txTemplateItem* aTemplate, ImportFrame* aImportFrame) { NS_ASSERTION(aTemplate, "missing template"); txInstruction* instr = aTemplate->mFirstInstruction; nsresult rv = mTemplateInstructions.add(instr); NS_ENSURE_SUCCESS(rv, rv); // mTemplateInstructions now owns the instructions aTemplate->mFirstInstruction.forget(); if (!aTemplate->mName.isNull()) { rv = mNamedTemplates.add(aTemplate->mName, instr); NS_ENSURE_TRUE(NS_SUCCEEDED(rv) || rv == NS_ERROR_XSLT_ALREADY_SET, rv); } if (!aTemplate->mMatch) { // This is no error, see section 6 Named Templates return NS_OK; } // get the txList for the right mode nsTArray<MatchableTemplate>* templates = aImportFrame->mMatchableTemplates.get(aTemplate->mMode); if (!templates) { nsAutoPtr< nsTArray<MatchableTemplate> > newList( new nsTArray<MatchableTemplate>); NS_ENSURE_TRUE(newList, NS_ERROR_OUT_OF_MEMORY); rv = aImportFrame->mMatchableTemplates.set(aTemplate->mMode, newList); NS_ENSURE_SUCCESS(rv, rv); templates = newList.forget(); } // Add the simple patterns to the list of matchable templates, according // to default priority nsAutoPtr<txPattern> simple = Move(aTemplate->mMatch); nsAutoPtr<txPattern> unionPattern; if (simple->getType() == txPattern::UNION_PATTERN) { unionPattern = Move(simple); simple = unionPattern->getSubPatternAt(0); unionPattern->setSubPatternAt(0, nullptr); } uint32_t unionPos = 1; // only used when unionPattern is set while (simple) { double priority = aTemplate->mPrio; if (mozilla::IsNaN(priority)) { priority = simple->getDefaultPriority(); NS_ASSERTION(!mozilla::IsNaN(priority), "simple pattern without default priority"); } uint32_t i, len = templates->Length(); for (i = 0; i < len; ++i) { if (priority > (*templates)[i].mPriority) { break; } } MatchableTemplate* nt = templates->InsertElementAt(i); NS_ENSURE_TRUE(nt, NS_ERROR_OUT_OF_MEMORY); nt->mFirstInstruction = instr; nt->mMatch = Move(simple); nt->mPriority = priority; if (unionPattern) { simple = unionPattern->getSubPatternAt(unionPos); if (simple) { unionPattern->setSubPatternAt(unionPos, nullptr); } ++unionPos; } } return NS_OK; }
bool ModuleGenerator::finishCodegen() { uint32_t offsetInWhole = masm_.size(); uint32_t numFuncExports = metadata_->funcExports.length(); MOZ_ASSERT(numFuncExports == exportedFuncs_.count()); // Generate stubs in a separate MacroAssembler since, otherwise, for modules // larger than the JumpImmediateRange, even local uses of Label will fail // due to the large absolute offsets temporarily stored by Label::bind(). OffsetVector entries; ProfilingOffsetVector interpExits; ProfilingOffsetVector jitExits; EnumeratedArray<JumpTarget, JumpTarget::Limit, Offsets> jumpTargets; Offsets interruptExit; { TempAllocator alloc(&lifo_); MacroAssembler masm(MacroAssembler::AsmJSToken(), alloc); if (!entries.resize(numFuncExports)) return false; for (uint32_t i = 0; i < numFuncExports; i++) entries[i] = GenerateEntry(masm, metadata_->funcExports[i]); if (!interpExits.resize(numFuncImports())) return false; if (!jitExits.resize(numFuncImports())) return false; for (uint32_t i = 0; i < numFuncImports(); i++) { interpExits[i] = GenerateInterpExit(masm, metadata_->funcImports[i], i); jitExits[i] = GenerateJitExit(masm, metadata_->funcImports[i]); } for (JumpTarget target : MakeEnumeratedRange(JumpTarget::Limit)) jumpTargets[target] = GenerateJumpTarget(masm, target); interruptExit = GenerateInterruptStub(masm); if (masm.oom() || !masm_.asmMergeWith(masm)) return false; } // Adjust each of the resulting Offsets (to account for being merged into // masm_) and then create code ranges for all the stubs. for (uint32_t i = 0; i < numFuncExports; i++) { entries[i].offsetBy(offsetInWhole); metadata_->funcExports[i].initEntryOffset(entries[i].begin); if (!metadata_->codeRanges.emplaceBack(CodeRange::Entry, entries[i])) return false; } for (uint32_t i = 0; i < numFuncImports(); i++) { interpExits[i].offsetBy(offsetInWhole); metadata_->funcImports[i].initInterpExitOffset(interpExits[i].begin); if (!metadata_->codeRanges.emplaceBack(CodeRange::ImportInterpExit, interpExits[i])) return false; jitExits[i].offsetBy(offsetInWhole); metadata_->funcImports[i].initJitExitOffset(jitExits[i].begin); if (!metadata_->codeRanges.emplaceBack(CodeRange::ImportJitExit, jitExits[i])) return false; } for (JumpTarget target : MakeEnumeratedRange(JumpTarget::Limit)) { jumpTargets[target].offsetBy(offsetInWhole); if (!metadata_->codeRanges.emplaceBack(CodeRange::Inline, jumpTargets[target])) return false; } interruptExit.offsetBy(offsetInWhole); if (!metadata_->codeRanges.emplaceBack(CodeRange::Inline, interruptExit)) return false; // Fill in LinkData with the offsets of these stubs. linkData_.interruptOffset = interruptExit.begin; linkData_.outOfBoundsOffset = jumpTargets[JumpTarget::OutOfBounds].begin; linkData_.unalignedAccessOffset = jumpTargets[JumpTarget::UnalignedAccess].begin; linkData_.badIndirectCallOffset = jumpTargets[JumpTarget::BadIndirectCall].begin; // Only call convertOutOfRangeBranchesToThunks after all other codegen that may // emit new jumps to JumpTargets has finished. if (!convertOutOfRangeBranchesToThunks()) return false; // Now that all thunks have been generated, patch all the thunks. for (CallThunk& callThunk : metadata_->callThunks) { uint32_t funcIndex = callThunk.u.funcIndex; callThunk.u.codeRangeIndex = funcIndexToCodeRange_[funcIndex]; masm_.patchThunk(callThunk.offset, funcCodeRange(funcIndex).funcNonProfilingEntry()); } for (JumpTarget target : MakeEnumeratedRange(JumpTarget::Limit)) { for (uint32_t thunkOffset : jumpThunks_[target]) masm_.patchThunk(thunkOffset, jumpTargets[target].begin); } // Code-generation is complete! masm_.finish(); return !masm_.oom(); }
void nsTimerImpl::Fire() { if (mCanceled) { return; } #if !defined(MOZILLA_XPCOMRT_API) PROFILER_LABEL("Timer", "Fire", js::ProfileEntry::Category::OTHER); #endif TimeStamp now = TimeStamp::Now(); if (MOZ_LOG_TEST(GetTimerLog(), LogLevel::Debug)) { TimeDuration a = now - mStart; // actual delay in intervals TimeDuration b = TimeDuration::FromMilliseconds(mDelay); // expected delay in intervals TimeDuration delta = (a > b) ? a - b : b - a; uint32_t d = delta.ToMilliseconds(); // delta in ms sDeltaSum += d; sDeltaSumSquared += double(d) * double(d); sDeltaNum++; MOZ_LOG(GetTimerLog(), LogLevel::Debug, ("[this=%p] expected delay time %4ums\n", this, mDelay)); MOZ_LOG(GetTimerLog(), LogLevel::Debug, ("[this=%p] actual delay time %fms\n", this, a.ToMilliseconds())); MOZ_LOG(GetTimerLog(), LogLevel::Debug, ("[this=%p] (mType is %d) -------\n", this, mType)); MOZ_LOG(GetTimerLog(), LogLevel::Debug, ("[this=%p] delta %4dms\n", this, (a > b) ? (int32_t)d : -(int32_t)d)); mStart = mStart2; mStart2 = TimeStamp(); } TimeStamp timeout = mTimeout; if (IsRepeatingPrecisely()) { // Precise repeating timers advance mTimeout by mDelay without fail before // calling Fire(). timeout -= TimeDuration::FromMilliseconds(mDelay); } if (mCallbackType == CallbackType::Interface) { mTimerCallbackWhileFiring = mCallback.i; } mFiring = true; // Handle callbacks that re-init the timer, but avoid leaking. // See bug 330128. CallbackUnion callback = mCallback; CallbackType callbackType = mCallbackType; if (callbackType == CallbackType::Interface) { NS_ADDREF(callback.i); } else if (callbackType == CallbackType::Observer) { NS_ADDREF(callback.o); } ReleaseCallback(); if (MOZ_LOG_TEST(GetTimerFiringsLog(), LogLevel::Debug)) { LogFiring(callbackType, callback); } switch (callbackType) { case CallbackType::Function: callback.c(this, mClosure); break; case CallbackType::Interface: callback.i->Notify(this); break; case CallbackType::Observer: callback.o->Observe(static_cast<nsITimer*>(this), NS_TIMER_CALLBACK_TOPIC, nullptr); break; default: ; } // If the callback didn't re-init the timer, and it's not a one-shot timer, // restore the callback state. if (mCallbackType == CallbackType::Unknown && mType != TYPE_ONE_SHOT && !mCanceled) { mCallback = callback; mCallbackType = callbackType; } else { // The timer was a one-shot, or the callback was reinitialized. if (callbackType == CallbackType::Interface) { NS_RELEASE(callback.i); } else if (callbackType == CallbackType::Observer) { NS_RELEASE(callback.o); } } mFiring = false; mTimerCallbackWhileFiring = nullptr; MOZ_LOG(GetTimerLog(), LogLevel::Debug, ("[this=%p] Took %fms to fire timer callback\n", this, (TimeStamp::Now() - now).ToMilliseconds())); // Reschedule repeating timers, but make sure that we aren't armed already // (which can happen if the callback reinitialized the timer). if (IsRepeating() && !mArmed) { if (mType == TYPE_REPEATING_SLACK) { SetDelayInternal(mDelay); // force mTimeout to be recomputed. For } // REPEATING_PRECISE_CAN_SKIP timers this has // already happened. if (gThread) { gThread->AddTimer(this); } } }
AutoMessageArgs() : totalLength_(0), count_(0), allocatedElements_(false) { PodArrayZero(args_); }
JSString * js::ConcatStrings(ExclusiveContext *cx, typename MaybeRooted<JSString*, allowGC>::HandleType left, typename MaybeRooted<JSString*, allowGC>::HandleType right) { MOZ_ASSERT_IF(!left->isAtom(), cx->isInsideCurrentZone(left)); MOZ_ASSERT_IF(!right->isAtom(), cx->isInsideCurrentZone(right)); size_t leftLen = left->length(); if (leftLen == 0) return right; size_t rightLen = right->length(); if (rightLen == 0) return left; size_t wholeLength = leftLen + rightLen; if (!JSString::validateLength(cx, wholeLength)) return nullptr; bool isLatin1 = left->hasLatin1Chars() && right->hasLatin1Chars(); bool canUseInline = isLatin1 ? JSInlineString::lengthFits<Latin1Char>(wholeLength) : JSInlineString::lengthFits<char16_t>(wholeLength); if (canUseInline && cx->isJSContext()) { Latin1Char *latin1Buf = nullptr; // initialize to silence GCC warning char16_t *twoByteBuf = nullptr; // initialize to silence GCC warning JSInlineString *str = isLatin1 ? AllocateInlineString<allowGC>(cx, wholeLength, &latin1Buf) : AllocateInlineString<allowGC>(cx, wholeLength, &twoByteBuf); if (!str) return nullptr; AutoCheckCannotGC nogc; JSLinearString *leftLinear = left->ensureLinear(cx); if (!leftLinear) return nullptr; JSLinearString *rightLinear = right->ensureLinear(cx); if (!rightLinear) return nullptr; if (isLatin1) { PodCopy(latin1Buf, leftLinear->latin1Chars(nogc), leftLen); PodCopy(latin1Buf + leftLen, rightLinear->latin1Chars(nogc), rightLen); latin1Buf[wholeLength] = 0; } else { if (leftLinear->hasTwoByteChars()) PodCopy(twoByteBuf, leftLinear->twoByteChars(nogc), leftLen); else CopyAndInflateChars(twoByteBuf, leftLinear->latin1Chars(nogc), leftLen); if (rightLinear->hasTwoByteChars()) PodCopy(twoByteBuf + leftLen, rightLinear->twoByteChars(nogc), rightLen); else CopyAndInflateChars(twoByteBuf + leftLen, rightLinear->latin1Chars(nogc), rightLen); twoByteBuf[wholeLength] = 0; } return str; } return JSRope::new_<allowGC>(cx, left, right, wholeLength); }
js::SetSourceHook(JSRuntime* rt, UniquePtr<SourceHook> hook) { rt->sourceHook = Move(hook); }