bool RegExpShared::compile(JSContext *cx, JSAtom *source) { if (!sticky()) return code.compile(cx, *source, &parenCount, getFlags()); /* * The sticky case we implement hackily by prepending a caret onto the front * and relying on |::execute| to pseudo-slice the string when it sees a sticky regexp. */ static const jschar prefix[] = {'^', '(', '?', ':'}; static const jschar postfix[] = {')'}; using mozilla::ArrayLength; StringBuffer sb(cx); if (!sb.reserve(ArrayLength(prefix) + source->length() + ArrayLength(postfix))) return false; sb.infallibleAppend(prefix, ArrayLength(prefix)); sb.infallibleAppend(source->chars(), source->length()); sb.infallibleAppend(postfix, ArrayLength(postfix)); JSAtom *fakeySource = sb.finishAtom(); if (!fakeySource) return false; return code.compile(cx, *fakeySource, &parenCount, getFlags()); }
bool RegExpShared::compile(JSContext *cx, bool matchOnly, const jschar *sampleChars, size_t sampleLength) { TraceLogger *logger = TraceLoggerForMainThread(cx->runtime()); AutoTraceLog logCompile(logger, TraceLogger::IrregexpCompile); if (!sticky()) { RootedAtom pattern(cx, source); return compile(cx, pattern, matchOnly, sampleChars, sampleLength); } /* * The sticky case we implement hackily by prepending a caret onto the front * and relying on |::execute| to pseudo-slice the string when it sees a sticky regexp. */ static const jschar prefix[] = {'^', '(', '?', ':'}; static const jschar postfix[] = {')'}; using mozilla::ArrayLength; StringBuffer sb(cx); if (!sb.reserve(ArrayLength(prefix) + source->length() + ArrayLength(postfix))) return false; sb.infallibleAppend(prefix, ArrayLength(prefix)); sb.infallibleAppend(source->chars(), source->length()); sb.infallibleAppend(postfix, ArrayLength(postfix)); RootedAtom fakeySource(cx, sb.finishAtom()); if (!fakeySource) return false; return compile(cx, fakeySource, matchOnly, sampleChars, sampleLength); }
bool RegExpShared::compile(JSContext* cx, HandleLinearString input, CompilationMode mode, ForceByteCodeEnum force) { TraceLoggerThread* logger = TraceLoggerForMainThread(cx->runtime()); AutoTraceLog logCompile(logger, TraceLogger_IrregexpCompile); if (!sticky()) { RootedAtom pattern(cx, source); return compile(cx, pattern, input, mode, force); } /* * The sticky case we implement hackily by prepending a caret onto the front * and relying on |::execute| to pseudo-slice the string when it sees a sticky regexp. */ static const char prefix[] = {'^', '(', '?', ':'}; static const char postfix[] = {')'}; using mozilla::ArrayLength; StringBuffer sb(cx); if (!sb.reserve(ArrayLength(prefix) + source->length() + ArrayLength(postfix))) return false; sb.infallibleAppend(prefix, ArrayLength(prefix)); if (!sb.append(source)) return false; sb.infallibleAppend(postfix, ArrayLength(postfix)); RootedAtom fakeySource(cx, sb.finishAtom()); if (!fakeySource) return false; return compile(cx, fakeySource, input, mode, force); }
bool RegExpShared::needsSweep(JSRuntime* rt) { // Sometimes RegExpShared instances are marked without the compartment // being subsequently cleared. This can happen if a GC is restarted while // in progress (i.e. performing a full GC in the middle of an incremental // GC) or if a RegExpShared referenced via the stack is traced but is not // in a zone being collected. // // Because of this we only treat the marked_ bit as a hint, and destroy the // RegExpShared if it was accidentally marked earlier but wasn't marked by // the current trace. bool keep = marked() && IsMarked(&source); for (size_t i = 0; i < ArrayLength(compilationArray); i++) { RegExpShared::RegExpCompilation& compilation = compilationArray[i]; if (compilation.jitCode && gc::IsAboutToBeFinalized(&compilation.jitCode)) keep = false; } MOZ_ASSERT(rt->isHeapMajorCollecting()); if (keep || rt->gc.isHeapCompacting()) { clearMarked(); return false; } return true; }
static bool GCParameter(JSContext *cx, unsigned argc, Value *vp) { CallArgs args = CallArgsFromVp(argc, vp); JSString *str = ToString(cx, args.get(0)); if (!str) return false; JSFlatString *flatStr = JS_FlattenString(cx, str); if (!flatStr) return false; size_t paramIndex = 0; for (;; paramIndex++) { if (paramIndex == ArrayLength(paramMap)) { JS_ReportError(cx, "the first argument must be one of " GC_PARAMETER_ARGS_LIST); return false; } if (JS_FlatStringEqualsAscii(flatStr, paramMap[paramIndex].name)) break; } JSGCParamKey param = paramMap[paramIndex].param; // Request mode. if (args.length() == 1) { uint32_t value = JS_GetGCParameter(cx->runtime(), param); args.rval().setNumber(value); return true; } if (param == JSGC_NUMBER || param == JSGC_BYTES) { JS_ReportError(cx, "Attempt to change read-only parameter %s", paramMap[paramIndex].name); return false; } uint32_t value; if (!ToUint32(cx, args[1], &value)) { JS_ReportError(cx, "the second argument must be convertable to uint32_t " "with non-zero value"); return false; } if (param == JSGC_MAX_BYTES) { uint32_t gcBytes = JS_GetGCParameter(cx->runtime(), JSGC_BYTES); if (value < gcBytes) { JS_ReportError(cx, "attempt to set maxBytes to the value less than the current " "gcBytes (%u)", gcBytes); return false; } } JS_SetGCParameter(cx->runtime(), param, value); args.rval().setUndefined(); return true; }
// static nsresult nsChannelClassifier::SetBlockedTrackingContent(nsIChannel *channel) { // Can be called in EITHER the parent or child process. nsCOMPtr<nsIParentChannel> parentChannel; NS_QueryNotificationCallbacks(channel, parentChannel); if (parentChannel) { // This channel is a parent-process proxy for a child process request. The // actual channel will be notified via the status passed to // nsIRequest::Cancel and do this for us. return NS_OK; } nsresult rv; nsCOMPtr<nsIDOMWindow> win; nsCOMPtr<mozIThirdPartyUtil> thirdPartyUtil = do_GetService(THIRDPARTYUTIL_CONTRACTID, &rv); NS_ENSURE_SUCCESS(rv, NS_OK); rv = thirdPartyUtil->GetTopWindowForChannel(channel, getter_AddRefs(win)); NS_ENSURE_SUCCESS(rv, NS_OK); nsCOMPtr<nsPIDOMWindow> pwin = do_QueryInterface(win, &rv); NS_ENSURE_SUCCESS(rv, NS_OK); nsCOMPtr<nsIDocShell> docShell = pwin->GetDocShell(); if (!docShell) { return NS_OK; } nsCOMPtr<nsIDocument> doc = do_GetInterface(docShell, &rv); NS_ENSURE_SUCCESS(rv, NS_OK); // Notify nsIWebProgressListeners of this security event. // Can be used to change the UI state. nsCOMPtr<nsISecurityEventSink> eventSink = do_QueryInterface(docShell, &rv); NS_ENSURE_SUCCESS(rv, NS_OK); uint32_t state = 0; nsCOMPtr<nsISecureBrowserUI> securityUI; docShell->GetSecurityUI(getter_AddRefs(securityUI)); if (!securityUI) { return NS_OK; } doc->SetHasTrackingContentBlocked(true); securityUI->GetState(&state); state |= nsIWebProgressListener::STATE_BLOCKED_TRACKING_CONTENT; eventSink->OnSecurityChange(nullptr, state); // Log a warning to the web console. nsCOMPtr<nsIURI> uri; channel->GetURI(getter_AddRefs(uri)); nsCString utf8spec; uri->GetSpec(utf8spec); NS_ConvertUTF8toUTF16 spec(utf8spec); const char16_t* params[] = { spec.get() }; nsContentUtils::ReportToConsole(nsIScriptError::warningFlag, NS_LITERAL_CSTRING("Tracking Protection"), doc, nsContentUtils::eNECKO_PROPERTIES, "TrackingUriBlocked", params, ArrayLength(params)); return NS_OK; }
void RegExpCompartment::sweep(JSRuntime *rt) { for (Set::Enum e(set_); !e.empty(); e.popFront()) { RegExpShared *shared = e.front(); // Sometimes RegExpShared instances are marked without the // compartment being subsequently cleared. This can happen if a GC is // restarted while in progress (i.e. performing a full GC in the // middle of an incremental GC) or if a RegExpShared referenced via the // stack is traced but is not in a zone being collected. // // Because of this we only treat the marked_ bit as a hint, and destroy // the RegExpShared if it was accidentally marked earlier but wasn't // marked by the current trace. bool keep = shared->marked() && !IsStringAboutToBeFinalized(shared->source.unsafeGet()); for (size_t i = 0; i < ArrayLength(shared->compilationArray); i++) { RegExpShared::RegExpCompilation &compilation = shared->compilationArray[i]; if (keep && compilation.jitCode) keep = !IsJitCodeAboutToBeFinalized(compilation.jitCode.unsafeGet()); } if (keep) { shared->clearMarked(); } else { js_delete(shared); e.removeFront(); } } if (matchResultTemplateObject_ && IsObjectAboutToBeFinalized(matchResultTemplateObject_.unsafeGet())) { matchResultTemplateObject_.set(nullptr); } }
void HelperThread::threadLoop() { MOZ_ASSERT(CanUseExtraThreads()); JS::AutoSuppressGCAnalysis nogc; AutoLockHelperThreadState lock; js::TlsPerThreadData.set(threadData.ptr()); // Compute the thread's stack limit, for over-recursed checks. uintptr_t stackLimit = GetNativeStackBase(); #if JS_STACK_GROWTH_DIRECTION > 0 stackLimit += HELPER_STACK_QUOTA; #else stackLimit -= HELPER_STACK_QUOTA; #endif for (size_t i = 0; i < ArrayLength(threadData->nativeStackLimit); i++) threadData->nativeStackLimit[i] = stackLimit; while (true) { MOZ_ASSERT(idle()); // Block until a task is available. Save the value of whether we are // going to do an Ion compile, in case the value returned by the method // changes. bool ionCompile = false; while (true) { if (terminate) return; if (HelperThreadState().canStartAsmJSCompile() || (ionCompile = HelperThreadState().pendingIonCompileHasSufficientPriority()) || HelperThreadState().canStartParseTask() || HelperThreadState().canStartCompressionTask() || HelperThreadState().canStartGCHelperTask() || HelperThreadState().canStartGCParallelTask()) { break; } HelperThreadState().wait(GlobalHelperThreadState::PRODUCER); } // Dispatch tasks, prioritizing AsmJS work. if (HelperThreadState().canStartAsmJSCompile()) handleAsmJSWorkload(); else if (ionCompile) handleIonWorkload(); else if (HelperThreadState().canStartParseTask()) handleParseWorkload(); else if (HelperThreadState().canStartCompressionTask()) handleCompressionWorkload(); else if (HelperThreadState().canStartGCHelperTask()) handleGCHelperWorkload(); else if (HelperThreadState().canStartGCParallelTask()) handleGCParallelWorkload(); else MOZ_CRASH("No task to perform"); } }
bool js::XDRAtom(XDRState<mode> *xdr, MutableHandleAtom atomp) { AssertCanGC(); if (mode == XDR_ENCODE) { uint32_t nchars = atomp->length(); if (!xdr->codeUint32(&nchars)) return false; jschar *chars = const_cast<jschar *>(atomp->getChars(xdr->cx())); if (!chars) return false; return xdr->codeChars(chars, nchars); } /* Avoid JSString allocation for already existing atoms. See bug 321985. */ uint32_t nchars; if (!xdr->codeUint32(&nchars)) return false; JSContext *cx = xdr->cx(); JSAtom *atom; #if IS_LITTLE_ENDIAN /* Directly access the little endian chars in the XDR buffer. */ const jschar *chars = reinterpret_cast<const jschar *>(xdr->buf.read(nchars * sizeof(jschar))); atom = AtomizeChars<CanGC>(cx, chars, nchars); #else /* * We must copy chars to a temporary buffer to convert between little and * big endian data. */ jschar *chars; jschar stackChars[256]; if (nchars <= ArrayLength(stackChars)) { chars = stackChars; } else { /* * This is very uncommon. Don't use the tempLifoAlloc arena for this as * most allocations here will be bigger than tempLifoAlloc's default * chunk size. */ chars = cx->runtime->pod_malloc<jschar>(nchars); if (!chars) return false; } JS_ALWAYS_TRUE(xdr->codeChars(chars, nchars)); atom = AtomizeChars<CanGC>(cx, chars, nchars); if (chars != stackChars) js_free(chars); #endif /* !IS_LITTLE_ENDIAN */ if (!atom) return false; atomp.set(atom); return true; }
// ES6 (22 May, 2014) 9.5.14 Proxy.[[Construct]] bool ScriptedDirectProxyHandler::construct(JSContext* cx, HandleObject proxy, const CallArgs& args) const { // step 1 RootedObject handler(cx, GetDirectProxyHandlerObject(proxy)); // step 2 if (!handler) { JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_PROXY_REVOKED); return false; } // step 3 RootedObject target(cx, proxy->as<ProxyObject>().target()); MOZ_ASSERT(target->isConstructor()); // step 7 RootedObject argsArray(cx, NewDenseCopiedArray(cx, args.length(), args.array())); if (!argsArray) return false; // step 4-5 RootedValue trap(cx); if (!GetProperty(cx, handler, handler, cx->names().construct, &trap)) return false; // step 6 if (trap.isUndefined()) { ConstructArgs cargs(cx); if (!FillArgumentsFromArraylike(cx, cargs, args)) return false; RootedValue targetv(cx, ObjectValue(*target)); RootedObject obj(cx); if (!Construct(cx, targetv, cargs, args.newTarget(), &obj)) return false; args.rval().setObject(*obj); return true; } // step 8-9 Value constructArgv[] = { ObjectValue(*target), ObjectValue(*argsArray), args.newTarget() }; RootedValue thisValue(cx, ObjectValue(*handler)); if (!Invoke(cx, thisValue, trap, ArrayLength(constructArgv), constructArgv, args.rval())) return false; // step 10 if (!args.rval().isObject()) { JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_PROXY_CONSTRUCT_OBJECT); return false; } return true; }
// ES6 draft rev 32 (2 Feb 2014) 9.5.10 Proxy.[[Delete]](P) bool ScriptedDirectProxyHandler::delete_(JSContext* cx, HandleObject proxy, HandleId id, ObjectOpResult& result) const { // step 2 RootedObject handler(cx, GetDirectProxyHandlerObject(proxy)); // step 3 if (!handler) { JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_PROXY_REVOKED); return false; } // steps 4-5 RootedObject target(cx, proxy->as<ProxyObject>().target()); // steps 6-7 RootedValue trap(cx); if (!GetProperty(cx, handler, handler, cx->names().deleteProperty, &trap)) return false; // step 8 if (trap.isUndefined()) return DeleteProperty(cx, target, id, result); // steps 9-10 RootedValue value(cx); if (!IdToStringOrSymbol(cx, id, &value)) return false; Value argv[] = { ObjectValue(*target), value }; RootedValue trapResult(cx); if (!Invoke(cx, ObjectValue(*handler), trap, ArrayLength(argv), argv, &trapResult)) return false; // step 11 if (!ToBoolean(trapResult)) return result.fail(JSMSG_PROXY_DELETE_RETURNED_FALSE); // steps 12-13 Rooted<PropertyDescriptor> desc(cx); if (!GetOwnPropertyDescriptor(cx, target, id, &desc)) return false; // step 14-15 if (desc.object() && !desc.configurable()) { RootedValue v(cx, IdToValue(id)); ReportValueError(cx, JSMSG_CANT_DELETE, JSDVG_IGNORE_STACK, v, nullptr); return false; } // step 16 return result.succeed(); }
// ES6 (22 May, 2014) 9.5.4 Proxy.[[PreventExtensions]]() bool ScriptedDirectProxyHandler::preventExtensions(JSContext *cx, HandleObject proxy) const { // step 1 RootedObject handler(cx, GetDirectProxyHandlerObject(proxy)); // step 2 if (!handler) { JS_ReportErrorNumber(cx, js_GetErrorMessage, nullptr, JSMSG_PROXY_REVOKED); return false; } // step 3 RootedObject target(cx, proxy->as<ProxyObject>().target()); // step 4-5 RootedValue trap(cx); if (!JSObject::getProperty(cx, handler, handler, cx->names().preventExtensions, &trap)) return false; // step 6 if (trap.isUndefined()) return DirectProxyHandler::preventExtensions(cx, proxy); // step 7, 9 Value argv[] = { ObjectValue(*target) }; RootedValue trapResult(cx); if (!Invoke(cx, ObjectValue(*handler), trap, ArrayLength(argv), argv, &trapResult)) return false; // step 8 bool success = ToBoolean(trapResult); if (success) { // step 10 bool extensible; if (!JSObject::isExtensible(cx, target, &extensible)) return false; if (extensible) { JS_ReportErrorNumber(cx, js_GetErrorMessage, nullptr, JSMSG_CANT_REPORT_AS_NON_EXTENSIBLE); return false; } // step 11 "return true" return true; } // step 11 "return false" // This actually corresponds to 19.1.2.5 step 4. We cannot pass the failure back, so throw here // directly instead. JS_ReportErrorNumber(cx, js_GetErrorMessage, nullptr, JSMSG_CANT_CHANGE_EXTENSIBILITY); return false; }
void RegExpShared::trace(JSTracer* trc) { if (trc->isMarkingTracer()) marked_ = true; TraceNullableEdge(trc, &source, "RegExpShared source"); for (size_t i = 0; i < ArrayLength(compilationArray); i++) { RegExpCompilation& compilation = compilationArray[i]; TraceNullableEdge(trc, &compilation.jitCode, "RegExpShared code"); } }
// ES6 (5 April, 2014) 9.5.3 Proxy.[[IsExtensible]]() bool ScriptedDirectProxyHandler::isExtensible(JSContext* cx, HandleObject proxy, bool* extensible) const { // step 1 RootedObject handler(cx, GetDirectProxyHandlerObject(proxy)); // step 2 if (!handler) { JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_PROXY_REVOKED); return false; } // step 3 RootedObject target(cx, proxy->as<ProxyObject>().target()); // step 4-5 RootedValue trap(cx); if (!GetProperty(cx, handler, handler, cx->names().isExtensible, &trap)) return false; // step 6 if (trap.isUndefined()) return IsExtensible(cx, target, extensible); // step 7, 9 Value argv[] = { ObjectValue(*target) }; RootedValue trapResult(cx); if (!Invoke(cx, ObjectValue(*handler), trap, ArrayLength(argv), argv, &trapResult)) return false; // step 8 bool booleanTrapResult = ToBoolean(trapResult); // step 10-11 bool targetResult; if (!IsExtensible(cx, target, &targetResult)) return false; // step 12 if (targetResult != booleanTrapResult) { JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_PROXY_EXTENSIBILITY); return false; } // step 13 *extensible = booleanTrapResult; return true; }
// ES6 20141014 9.5.4 Proxy.[[PreventExtensions]]() bool ScriptedDirectProxyHandler::preventExtensions(JSContext *cx, HandleObject proxy, bool *succeeded) const { // Steps 1-3. RootedObject handler(cx, GetDirectProxyHandlerObject(proxy)); if (!handler) { JS_ReportErrorNumber(cx, js_GetErrorMessage, nullptr, JSMSG_PROXY_REVOKED); return false; } // Step 4. RootedObject target(cx, proxy->as<ProxyObject>().target()); // Steps 5-6. RootedValue trap(cx); if (!JSObject::getProperty(cx, handler, handler, cx->names().preventExtensions, &trap)) return false; // Step 7. if (trap.isUndefined()) return DirectProxyHandler::preventExtensions(cx, proxy, succeeded); // Steps 8, 10. Value argv[] = { ObjectValue(*target) }; RootedValue trapResult(cx); if (!Invoke(cx, ObjectValue(*handler), trap, ArrayLength(argv), argv, &trapResult)) return false; // Step 9. bool booleanTrapResult = ToBoolean(trapResult); // Step 11. if (booleanTrapResult) { bool extensible; if (!JSObject::isExtensible(cx, target, &extensible)) return false; if (extensible) { JS_ReportErrorNumber(cx, js_GetErrorMessage, nullptr, JSMSG_CANT_REPORT_AS_NON_EXTENSIBLE); return false; } } // Step 12. *succeeded = booleanTrapResult; return true; }
void RegExpShared::trace(JSTracer* trc) { if (IS_GC_MARKING_TRACER(trc)) marked_ = true; if (source) MarkString(trc, &source, "RegExpShared source"); for (size_t i = 0; i < ArrayLength(compilationArray); i++) { RegExpCompilation& compilation = compilationArray[i]; if (compilation.jitCode) MarkJitCode(trc, &compilation.jitCode, "RegExpShared code"); } }
bool JSRuntime::initializeAtoms(JSContext *cx) { atoms_ = cx->new_<AtomSet>(); if (!atoms_ || !atoms_->init(JS_STRING_HASH_COUNT)) return false; if (parentRuntime) { staticStrings = parentRuntime->staticStrings; commonNames = parentRuntime->commonNames; emptyString = parentRuntime->emptyString; permanentAtoms = parentRuntime->permanentAtoms; return true; } permanentAtoms = cx->new_<AtomSet>(); if (!permanentAtoms || !permanentAtoms->init(JS_STRING_HASH_COUNT)) return false; staticStrings = cx->new_<StaticStrings>(); if (!staticStrings || !staticStrings->init(cx)) return false; static const CommonNameInfo cachedNames[] = { #define COMMON_NAME_INFO(idpart, id, text) { js_##idpart##_str, sizeof(text) - 1 }, FOR_EACH_COMMON_PROPERTYNAME(COMMON_NAME_INFO) #undef COMMON_NAME_INFO #define COMMON_NAME_INFO(name, code, init, clasp) { js_##name##_str, sizeof(#name) - 1 }, JS_FOR_EACH_PROTOTYPE(COMMON_NAME_INFO) #undef COMMON_NAME_INFO }; commonNames = cx->new_<JSAtomState>(); if (!commonNames) return false; FixedHeapPtr<PropertyName> *names = reinterpret_cast<FixedHeapPtr<PropertyName> *>(commonNames); for (size_t i = 0; i < ArrayLength(cachedNames); i++, names++) { JSAtom *atom = Atomize(cx, cachedNames[i].str, cachedNames[i].length, InternAtom); if (!atom) return false; names->init(atom->asPropertyName()); } JS_ASSERT(uintptr_t(names) == uintptr_t(commonNames + 1)); emptyString = commonNames->empty; return true; }
size_t RegExpShared::sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) { size_t n = mallocSizeOf(this); for (size_t i = 0; i < ArrayLength(compilationArray); i++) { const RegExpCompilation& compilation = compilationArray[i]; if (compilation.byteCode) n += mallocSizeOf(compilation.byteCode); } n += tables.sizeOfExcludingThis(mallocSizeOf); for (size_t i = 0; i < tables.length(); i++) n += mallocSizeOf(tables[i]); return n; }
// ES6 draft rev 32 (2 Feb 2015) 9.5.4 Proxy.[[PreventExtensions]]() bool ScriptedDirectProxyHandler::preventExtensions(JSContext* cx, HandleObject proxy, ObjectOpResult& result) const { // Steps 1-3. RootedObject handler(cx, GetDirectProxyHandlerObject(proxy)); if (!handler) { JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_PROXY_REVOKED); return false; } // Step 4. RootedObject target(cx, proxy->as<ProxyObject>().target()); // Steps 5-6. RootedValue trap(cx); if (!GetProperty(cx, handler, handler, cx->names().preventExtensions, &trap)) return false; // Step 7. if (trap.isUndefined()) return PreventExtensions(cx, target, result); // Steps 8-9. Value argv[] = { ObjectValue(*target) }; RootedValue trapResult(cx); if (!Invoke(cx, ObjectValue(*handler), trap, ArrayLength(argv), argv, &trapResult)) return false; // Steps 10-11. if (ToBoolean(trapResult)) { bool extensible; if (!IsExtensible(cx, target, &extensible)) return false; if (extensible) { JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_CANT_REPORT_AS_NON_EXTENSIBLE); return false; } return result.succeed(); } return result.fail(JSMSG_PROXY_PREVENTEXTENSIONS_RETURNED_FALSE); }
const char* Definition::kindString(Kind kind) { static const char* const table[] = { "", js_var_str, js_const_str, js_const_str, js_let_str, "argument", js_function_str, "unknown", js_import_str }; MOZ_ASSERT(kind < ArrayLength(table)); return table[kind]; }
// ES6 (14 October, 2014) 9.5.11 Proxy.[[Enumerate]] bool ScriptedDirectProxyHandler::enumerate(JSContext* cx, HandleObject proxy, MutableHandleObject objp) const { // step 1 RootedObject handler(cx, GetDirectProxyHandlerObject(proxy)); // step 2 if (!handler) { JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_PROXY_REVOKED); return false; } // step 3: unnecessary assert // step 4 RootedObject target(cx, proxy->as<ProxyObject>().target()); // step 5-6 RootedValue trap(cx); if (!GetProperty(cx, handler, handler, cx->names().enumerate, &trap)) return false; // step 7 if (trap.isUndefined()) return GetIterator(cx, target, 0, objp); // step 8-9 Value argv[] = { ObjectOrNullValue(target) }; RootedValue trapResult(cx); if (!Invoke(cx, ObjectValue(*handler), trap, ArrayLength(argv), argv, &trapResult)) return false; // step 10 if (trapResult.isPrimitive()) { ReportInvalidTrapResult(cx, proxy, cx->names().enumerate); return false; } // step 11 objp.set(&trapResult.toObject()); return true; }
nsCSSScanner::nsCSSScanner(const nsAString& aBuffer, uint32_t aLineNumber) : mReadPointer(aBuffer.BeginReading()) , mOffset(0) , mCount(aBuffer.Length()) , mPushback(mLocalPushback) , mPushbackCount(0) , mPushbackSize(ArrayLength(mLocalPushback)) , mLineNumber(aLineNumber) , mLineOffset(0) , mTokenLineNumber(aLineNumber) , mTokenLineOffset(0) , mTokenOffset(0) , mRecordStartOffset(0) , mReporter(nullptr) , mSVGMode(false) , mRecording(false) { MOZ_COUNT_CTOR(nsCSSScanner); }
bool PendingLookup::IsBinaryFile() { nsString fileName; nsresult rv = mQuery->GetSuggestedFileName(fileName); if (NS_FAILED(rv)) { LOG(("No suggested filename [this = %p]", this)); return false; } LOG(("Suggested filename: %s [this = %p]", NS_ConvertUTF16toUTF8(fileName).get(), this)); for (size_t i = 0; i < ArrayLength(kBinaryFileExtensions); ++i) { if (StringEndsWith(fileName, nsDependentString(kBinaryFileExtensions[i]))) { return true; } } return false; }
// ES6 (22 May, 2014) 9.5.13 Proxy.[[Call]] bool ScriptedDirectProxyHandler::call(JSContext* cx, HandleObject proxy, const CallArgs& args) const { // step 1 RootedObject handler(cx, GetDirectProxyHandlerObject(proxy)); // step 2 if (!handler) { JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_PROXY_REVOKED); return false; } // step 3 RootedObject target(cx, proxy->as<ProxyObject>().target()); MOZ_ASSERT(target->isCallable()); // step 7 RootedObject argsArray(cx, NewDenseCopiedArray(cx, args.length(), args.array())); if (!argsArray) return false; // step 4-5 RootedValue trap(cx); if (!GetProperty(cx, handler, handler, cx->names().apply, &trap)) return false; // step 6 if (trap.isUndefined()) { RootedValue targetv(cx, ObjectValue(*target)); return Invoke(cx, args.thisv(), targetv, args.length(), args.array(), args.rval()); } // step 8 Value argv[] = { ObjectValue(*target), args.thisv(), ObjectValue(*argsArray) }; RootedValue thisValue(cx, ObjectValue(*handler)); return Invoke(cx, thisValue, trap, ArrayLength(argv), argv, args.rval()); }
bool js::WatchHandler(JSContext* cx, JSObject* obj_, jsid id_, JS::Value old, JS::Value* nvp, void* closure) { RootedObject obj(cx, obj_); RootedId id(cx, id_); /* Avoid recursion on (obj, id) already being watched on cx. */ AutoResolving resolving(cx, obj, id, AutoResolving::WATCH); if (resolving.alreadyStarted()) return true; JSObject* callable = (JSObject*)closure; Value argv[] = { IdToValue(id), old, *nvp }; RootedValue rv(cx); if (!Invoke(cx, ObjectValue(*obj), ObjectOrNullValue(callable), ArrayLength(argv), argv, &rv)) return false; *nvp = rv; return true; }
bool js::InitCommonNames(JSContext *cx) { static const CommonNameInfo cachedNames[] = { #define COMMON_NAME_INFO(idpart, id, text) { js_##idpart##_str, sizeof(text) - 1 }, FOR_EACH_COMMON_PROPERTYNAME(COMMON_NAME_INFO) #undef COMMON_NAME_INFO #define COMMON_NAME_INFO(name, code, init) { js_##name##_str, sizeof(#name) - 1 }, JS_FOR_EACH_PROTOTYPE(COMMON_NAME_INFO) #undef COMMON_NAME_INFO }; FixedHeapPtr<PropertyName> *names = &cx->runtime->firstCachedName; for (size_t i = 0; i < ArrayLength(cachedNames); i++, names++) { JSAtom *atom = Atomize(cx, cachedNames[i].str, cachedNames[i].length, InternAtom); if (!atom) return false; names->init(atom->asPropertyName()); } JS_ASSERT(uintptr_t(names) == uintptr_t(&cx->runtime->atomState + 1)); cx->runtime->emptyString = cx->names().empty; return true; }
void LoopUnroller::go(LoopIterationBound *bound) { // For now we always unroll loops the same number of times. static const size_t UnrollCount = 10; JitSpew(JitSpew_Unrolling, "Attempting to unroll loop"); header = bound->header; // UCE might have determined this isn't actually a loop. if (!header->isLoopHeader()) return; backedge = header->backedge(); oldPreheader = header->loopPredecessor(); JS_ASSERT(oldPreheader->numSuccessors() == 1); // Only unroll loops with two blocks: an initial one ending with the // bound's test, and the body ending with the backedge. MTest *test = bound->test; if (header->lastIns() != test) return; if (test->ifTrue() == backedge) { if (test->ifFalse()->id() <= backedge->id()) return; } else if (test->ifFalse() == backedge) { if (test->ifTrue()->id() <= backedge->id()) return; } else { return; } if (backedge->numPredecessors() != 1 || backedge->numSuccessors() != 1) return; JS_ASSERT(backedge->phisEmpty()); MBasicBlock *bodyBlocks[] = { header, backedge }; // All instructions in the header and body must be clonable. for (size_t i = 0; i < ArrayLength(bodyBlocks); i++) { MBasicBlock *block = bodyBlocks[i]; for (MInstructionIterator iter(block->begin()); iter != block->end(); iter++) { MInstruction *ins = *iter; if (ins->canClone()) continue; if (ins->isTest() || ins->isGoto() || ins->isInterruptCheck()) continue; #ifdef DEBUG JitSpew(JitSpew_Unrolling, "Aborting: can't clone instruction %s", ins->opName()); #endif return; } } // Compute the linear inequality we will use for exiting the unrolled loop: // // iterationBound - iterationCount - UnrollCount >= 0 // LinearSum remainingIterationsInequality(bound->boundSum); if (!remainingIterationsInequality.add(bound->currentSum, -1)) return; if (!remainingIterationsInequality.add(-int32_t(UnrollCount))) return; // Terms in the inequality need to be either loop invariant or phis from // the original header. for (size_t i = 0; i < remainingIterationsInequality.numTerms(); i++) { MDefinition *def = remainingIterationsInequality.term(i).term; if (def->block()->id() < header->id()) continue; if (def->block() == header && def->isPhi()) continue; return; } // OK, we've checked everything, now unroll the loop. JitSpew(JitSpew_Unrolling, "Unrolling loop"); // The old preheader will go before the unrolled loop, and the old loop // will need a new empty preheader. CompileInfo &info = oldPreheader->info(); if (header->trackedSite().pc()) { unrolledHeader = MBasicBlock::New(graph, nullptr, info, oldPreheader, header->trackedSite(), MBasicBlock::LOOP_HEADER); unrolledBackedge = MBasicBlock::New(graph, nullptr, info, unrolledHeader, backedge->trackedSite(), MBasicBlock::NORMAL); newPreheader = MBasicBlock::New(graph, nullptr, info, unrolledHeader, oldPreheader->trackedSite(), MBasicBlock::NORMAL); } else { unrolledHeader = MBasicBlock::NewAsmJS(graph, info, oldPreheader, MBasicBlock::LOOP_HEADER); unrolledBackedge = MBasicBlock::NewAsmJS(graph, info, unrolledHeader, MBasicBlock::NORMAL); newPreheader = MBasicBlock::NewAsmJS(graph, info, unrolledHeader, MBasicBlock::NORMAL); } unrolledHeader->discardAllResumePoints(); unrolledBackedge->discardAllResumePoints(); newPreheader->discardAllResumePoints(); // Insert new blocks at their RPO position, and update block ids. graph.insertBlockAfter(oldPreheader, unrolledHeader); graph.insertBlockAfter(unrolledHeader, unrolledBackedge); graph.insertBlockAfter(unrolledBackedge, newPreheader); graph.renumberBlocksAfter(oldPreheader); if (!unrolledDefinitions.init()) CrashAtUnhandlableOOM("LoopUnroller::go"); // Add phis to the unrolled loop header which correspond to the phis in the // original loop header. JS_ASSERT(header->getPredecessor(0) == oldPreheader); for (MPhiIterator iter(header->phisBegin()); iter != header->phisEnd(); iter++) { MPhi *old = *iter; JS_ASSERT(old->numOperands() == 2); MPhi *phi = MPhi::New(alloc); phi->setResultType(old->type()); phi->setResultTypeSet(old->resultTypeSet()); phi->setRange(old->range()); unrolledHeader->addPhi(phi); if (!phi->reserveLength(2)) CrashAtUnhandlableOOM("LoopUnroller::go"); // Set the first input for the phi for now. We'll set the second after // finishing the unroll. phi->addInput(old->getOperand(0)); // The old phi will now take the value produced by the unrolled loop. old->replaceOperand(0, phi); if (!unrolledDefinitions.putNew(old, phi)) CrashAtUnhandlableOOM("LoopUnroller::go"); } // The loop condition can bail out on e.g. integer overflow, so make a // resume point based on the initial resume point of the original header. MResumePoint *headerResumePoint = header->entryResumePoint(); if (headerResumePoint) { MResumePoint *rp = makeReplacementResumePoint(unrolledHeader, headerResumePoint); unrolledHeader->setEntryResumePoint(rp); // Perform an interrupt check at the start of the unrolled loop. unrolledHeader->add(MInterruptCheck::New(alloc)); } // Generate code for the test in the unrolled loop. for (size_t i = 0; i < remainingIterationsInequality.numTerms(); i++) { MDefinition *def = remainingIterationsInequality.term(i).term; MDefinition *replacement = getReplacementDefinition(def); remainingIterationsInequality.replaceTerm(i, replacement); } MCompare *compare = ConvertLinearInequality(alloc, unrolledHeader, remainingIterationsInequality); MTest *unrolledTest = MTest::New(alloc, compare, unrolledBackedge, newPreheader); unrolledHeader->end(unrolledTest); // Make an entry resume point for the unrolled body. The unrolled header // does not have side effects on stack values, even if the original loop // header does, so use the same resume point as for the unrolled header. if (headerResumePoint) { MResumePoint *rp = makeReplacementResumePoint(unrolledBackedge, headerResumePoint); unrolledBackedge->setEntryResumePoint(rp); } // Make an entry resume point for the new preheader. There are no // instructions which use this but some other stuff wants one to be here. if (headerResumePoint) { MResumePoint *rp = makeReplacementResumePoint(newPreheader, headerResumePoint); newPreheader->setEntryResumePoint(rp); } // Generate the unrolled code. JS_ASSERT(UnrollCount > 1); size_t unrollIndex = 0; while (true) { // Clone the contents of the original loop into the unrolled loop body. for (size_t i = 0; i < ArrayLength(bodyBlocks); i++) { MBasicBlock *block = bodyBlocks[i]; for (MInstructionIterator iter(block->begin()); iter != block->end(); iter++) { MInstruction *ins = *iter; if (ins->canClone()) { makeReplacementInstruction(*iter); } else { // Control instructions are handled separately. JS_ASSERT(ins->isTest() || ins->isGoto() || ins->isInterruptCheck()); } } } // Compute the value of each loop header phi after the execution of // this unrolled iteration. MDefinitionVector phiValues(alloc); JS_ASSERT(header->getPredecessor(1) == backedge); for (MPhiIterator iter(header->phisBegin()); iter != header->phisEnd(); iter++) { MPhi *old = *iter; MDefinition *oldInput = old->getOperand(1); if (!phiValues.append(getReplacementDefinition(oldInput))) CrashAtUnhandlableOOM("LoopUnroller::go"); } unrolledDefinitions.clear(); if (unrollIndex == UnrollCount - 1) { // We're at the end of the last unrolled iteration, set the // backedge input for the unrolled loop phis. size_t phiIndex = 0; for (MPhiIterator iter(unrolledHeader->phisBegin()); iter != unrolledHeader->phisEnd(); iter++) { MPhi *phi = *iter; phi->addInput(phiValues[phiIndex++]); } JS_ASSERT(phiIndex == phiValues.length()); break; } // Update the map for the phis in the next iteration. size_t phiIndex = 0; for (MPhiIterator iter(header->phisBegin()); iter != header->phisEnd(); iter++) { MPhi *old = *iter; if (!unrolledDefinitions.putNew(old, phiValues[phiIndex++])) CrashAtUnhandlableOOM("LoopUnroller::go"); } JS_ASSERT(phiIndex == phiValues.length()); unrollIndex++; } MGoto *backedgeJump = MGoto::New(alloc, unrolledHeader); unrolledBackedge->end(backedgeJump); // Place the old preheader before the unrolled loop. JS_ASSERT(oldPreheader->lastIns()->isGoto()); oldPreheader->discardLastIns(); oldPreheader->end(MGoto::New(alloc, unrolledHeader)); // Place the new preheader before the original loop. newPreheader->end(MGoto::New(alloc, header)); // Cleanup the MIR graph. if (!unrolledHeader->addPredecessorWithoutPhis(unrolledBackedge)) CrashAtUnhandlableOOM("LoopUnroller::go"); header->replacePredecessor(oldPreheader, newPreheader); oldPreheader->setSuccessorWithPhis(unrolledHeader, 0); newPreheader->setSuccessorWithPhis(header, 0); unrolledBackedge->setSuccessorWithPhis(unrolledHeader, 1); }
static JSBool CountHeap(JSContext *cx, unsigned argc, jsval *vp) { jsval v; int32_t traceKind; JSString *str; JSCountHeapTracer countTracer; JSCountHeapNode *node; size_t counter; Value startValue = UndefinedValue(); if (argc > 0) { v = JS_ARGV(cx, vp)[0]; if (JSVAL_IS_TRACEABLE(v)) { startValue = v; } else if (!JSVAL_IS_NULL(v)) { JS_ReportError(cx, "the first argument is not null or a heap-allocated " "thing"); return JS_FALSE; } } traceKind = -1; if (argc > 1) { str = JS_ValueToString(cx, JS_ARGV(cx, vp)[1]); if (!str) return JS_FALSE; JSFlatString *flatStr = JS_FlattenString(cx, str); if (!flatStr) return JS_FALSE; for (size_t i = 0; ;) { if (JS_FlatStringEqualsAscii(flatStr, traceKindNames[i].name)) { traceKind = traceKindNames[i].kind; break; } if (++i == ArrayLength(traceKindNames)) { JSAutoByteString bytes(cx, str); if (!!bytes) JS_ReportError(cx, "trace kind name '%s' is unknown", bytes.ptr()); return JS_FALSE; } } } JS_TracerInit(&countTracer.base, JS_GetRuntime(cx), CountHeapNotify); if (!countTracer.visited.init()) { JS_ReportOutOfMemory(cx); return JS_FALSE; } countTracer.ok = true; countTracer.traceList = NULL; countTracer.recycleList = NULL; if (startValue.isUndefined()) { JS_TraceRuntime(&countTracer.base); } else { JS_CallValueTracer(&countTracer.base, startValue, "root"); } counter = 0; while ((node = countTracer.traceList) != NULL) { if (traceKind == -1 || node->kind == traceKind) counter++; countTracer.traceList = node->next; node->next = countTracer.recycleList; countTracer.recycleList = node; JS_TraceChildren(&countTracer.base, node->thing, node->kind); } while ((node = countTracer.recycleList) != NULL) { countTracer.recycleList = node->next; js_free(node); } if (!countTracer.ok) { JS_ReportOutOfMemory(cx); return false; } *vp = JS_NumberValue((double) counter); return true; }
static JSBool GCParameter(JSContext *cx, unsigned argc, jsval *vp) { JSString *str; if (argc == 0) { str = JS_ValueToString(cx, JSVAL_VOID); JS_ASSERT(str); } else { str = JS_ValueToString(cx, vp[2]); if (!str) return JS_FALSE; vp[2] = STRING_TO_JSVAL(str); } JSFlatString *flatStr = JS_FlattenString(cx, str); if (!flatStr) return false; size_t paramIndex = 0; for (;; paramIndex++) { if (paramIndex == ArrayLength(paramMap)) { JS_ReportError(cx, "the first argument argument must be maxBytes, " "maxMallocBytes, gcStackpoolLifespan, gcBytes or " "gcNumber"); return false; } if (JS_FlatStringEqualsAscii(flatStr, paramMap[paramIndex].name)) break; } JSGCParamKey param = paramMap[paramIndex].param; if (argc == 1) { uint32_t value = JS_GetGCParameter(cx->runtime, param); vp[0] = JS_NumberValue(value); return true; } if (param == JSGC_NUMBER || param == JSGC_BYTES) { JS_ReportError(cx, "Attempt to change read-only parameter %s", paramMap[paramIndex].name); return false; } uint32_t value; if (!JS_ValueToECMAUint32(cx, vp[3], &value)) { JS_ReportError(cx, "the second argument must be convertable to uint32_t " "with non-zero value"); return false; } if (param == JSGC_MAX_BYTES) { uint32_t gcBytes = JS_GetGCParameter(cx->runtime, JSGC_BYTES); if (value < gcBytes) { JS_ReportError(cx, "attempt to set maxBytes to the value less than the current " "gcBytes (%u)", gcBytes); return false; } } JS_SetGCParameter(cx->runtime, param, value); *vp = JSVAL_VOID; return true; }
// Generate a stub that is called via the internal ABI derived from the // signature of the import and calls into an appropriate InvokeImport C++ // function, having boxed all the ABI arguments into a homogeneous Value array. static bool GenerateInterpExitStub(ModuleGenerator& mg, unsigned importIndex, Label* throwLabel, ProfilingOffsets* offsets) { MacroAssembler& masm = mg.masm(); const Sig& sig = *mg.import(importIndex).sig; masm.setFramePushed(0); // Argument types for InvokeImport_*: static const MIRType typeArray[] = { MIRType_Pointer, // ImportExit MIRType_Int32, // argc MIRType_Pointer }; // argv MIRTypeVector invokeArgTypes; MOZ_ALWAYS_TRUE(invokeArgTypes.append(typeArray, ArrayLength(typeArray))); // At the point of the call, the stack layout shall be (sp grows to the left): // | stack args | padding | Value argv[] | padding | retaddr | caller stack args | // The padding between stack args and argv ensures that argv is aligned. The // padding between argv and retaddr ensures that sp is aligned. unsigned argOffset = AlignBytes(StackArgBytes(invokeArgTypes), sizeof(double)); unsigned argBytes = Max<size_t>(1, sig.args().length()) * sizeof(Value); unsigned framePushed = StackDecrementForCall(masm, ABIStackAlignment, argOffset + argBytes); GenerateExitPrologue(masm, framePushed, ExitReason::ImportInterp, offsets); // Fill the argument array. unsigned offsetToCallerStackArgs = sizeof(AsmJSFrame) + masm.framePushed(); Register scratch = ABIArgGenerator::NonArgReturnReg0; FillArgumentArray(masm, sig.args(), argOffset, offsetToCallerStackArgs, scratch); // Prepare the arguments for the call to InvokeImport_*. ABIArgMIRTypeIter i(invokeArgTypes); // argument 0: importIndex if (i->kind() == ABIArg::GPR) masm.mov(ImmWord(importIndex), i->gpr()); else masm.store32(Imm32(importIndex), Address(masm.getStackPointer(), i->offsetFromArgBase())); i++; // argument 1: argc unsigned argc = sig.args().length(); if (i->kind() == ABIArg::GPR) masm.mov(ImmWord(argc), i->gpr()); else masm.store32(Imm32(argc), Address(masm.getStackPointer(), i->offsetFromArgBase())); i++; // argument 2: argv Address argv(masm.getStackPointer(), argOffset); if (i->kind() == ABIArg::GPR) { masm.computeEffectiveAddress(argv, i->gpr()); } else { masm.computeEffectiveAddress(argv, scratch); masm.storePtr(scratch, Address(masm.getStackPointer(), i->offsetFromArgBase())); } i++; MOZ_ASSERT(i.done()); // Make the call, test whether it succeeded, and extract the return value. AssertStackAlignment(masm, ABIStackAlignment); switch (sig.ret()) { case ExprType::Void: masm.call(SymbolicAddress::InvokeImport_Void); masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel); break; case ExprType::I32: masm.call(SymbolicAddress::InvokeImport_I32); masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel); masm.unboxInt32(argv, ReturnReg); break; case ExprType::I64: MOZ_CRASH("no int64 in asm.js"); case ExprType::F32: MOZ_CRASH("Float32 shouldn't be returned from a FFI"); case ExprType::F64: masm.call(SymbolicAddress::InvokeImport_F64); masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel); masm.loadDouble(argv, ReturnDoubleReg); break; case ExprType::I32x4: case ExprType::F32x4: case ExprType::B32x4: MOZ_CRASH("SIMD types shouldn't be returned from a FFI"); } GenerateExitEpilogue(masm, framePushed, ExitReason::ImportInterp, offsets); if (masm.oom()) return false; offsets->end = masm.currentOffset(); return true; }