void ResourceHandleInternal::start() { if (m_state != ConnectionStateNew) CRASH(); m_state = ConnectionStateStarted; m_loader = adoptPtr(Platform::current()->createURLLoader()); ASSERT(m_loader); WrappedResourceRequest wrappedRequest(m_request); wrappedRequest.setAllowStoredCredentials(allowStoredCredentials()); m_loader->loadAsynchronously(wrappedRequest, this); }
FixedVMPoolExecutableAllocator() : MetaAllocator(32) // round up all allocations to 32 bytes { m_reservation = PageReservation::reserveWithGuardPages(fixedPoolSize, OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true); #if !ENABLE(CLASSIC_INTERPRETER) if (!m_reservation) CRASH(); #endif if (m_reservation) { ASSERT(m_reservation.size() == fixedPoolSize); addFreshFreeSpace(m_reservation.base(), m_reservation.size()); } }
void VM::throwException(ExecState* exec, Exception* exception) { if (Options::breakOnThrow()) { dataLog("In call frame ", RawPointer(exec), " for code block ", *exec->codeBlock(), "\n"); CRASH(); } ASSERT(exec == topCallFrame || exec == exec->lexicalGlobalObject()->globalExec() || exec == exec->vmEntryGlobalObject()->globalExec()); interpreter->notifyDebuggerOfExceptionToBeThrown(exec, exception); setException(exception); }
PassRefPtr<ExecutableMemoryHandle> ExecutableAllocator::allocate(JSGlobalData& globalData, size_t sizeInBytes, void* ownerUID, JITCompilationEffort effort) { RefPtr<ExecutableMemoryHandle> result = allocator->allocate(sizeInBytes, ownerUID); if (!result) { if (effort == JITCompilationCanFail) return result; releaseExecutableMemory(globalData); result = allocator->allocate(sizeInBytes, ownerUID); if (!result) CRASH(); } return result.release(); }
NetworkProcessConnection* WebProcess::networkConnection() { ASSERT(m_usesNetworkProcess); // If we've lost our connection to the network process (e.g. it crashed) try to re-establish it. if (!m_networkProcessConnection) ensureNetworkProcessConnection(); // If we failed to re-establish it then we are beyond recovery and should crash. if (!m_networkProcessConnection) CRASH(); return m_networkProcessConnection.get(); }
void ResourceLoader::didReceiveData(const char* data, int length, long long encodedDataLength, bool allAtOnce) { if (!m_cancelled && !fastMallocSize(documentLoader()->applicationCacheHost())) CRASH(); if (!m_cancelled && !fastMallocSize(documentLoader()->frame())) CRASH(); // The following assertions are not quite valid here, since a subclass // might override didReceiveData in a way that invalidates them. This // happens with the steps listed in 3266216 // ASSERT(con == connection); // ASSERT(!m_reachedTerminalState); // Protect this in this delegate method since the additional processing can do // anything including possibly derefing this; one example of this is Radar 3266216. RefPtr<ResourceLoader> protector(this); addData(data, length, allAtOnce); // FIXME: If we get a resource with more than 2B bytes, this code won't do the right thing. // However, with today's computers and networking speeds, this won't happen in practice. // Could be an issue with a giant local file. if (m_options.sendLoadCallbacks == SendCallbacks && m_frame) frameLoader()->notifier()->didReceiveData(this, data, length, static_cast<int>(encodedDataLength)); }
static size_t getPlatformThreadRegisters(const PlatformThread& platformThread, PlatformThreadRegisters& regs) { #if OS(DARWIN) #if CPU(X86) unsigned user_count = sizeof(regs)/sizeof(int); thread_state_flavor_t flavor = i386_THREAD_STATE; #elif CPU(X86_64) unsigned user_count = x86_THREAD_STATE64_COUNT; thread_state_flavor_t flavor = x86_THREAD_STATE64; #elif CPU(PPC) unsigned user_count = PPC_THREAD_STATE_COUNT; thread_state_flavor_t flavor = PPC_THREAD_STATE; #elif CPU(PPC64) unsigned user_count = PPC_THREAD_STATE64_COUNT; thread_state_flavor_t flavor = PPC_THREAD_STATE64; #elif CPU(ARM) unsigned user_count = ARM_THREAD_STATE_COUNT; thread_state_flavor_t flavor = ARM_THREAD_STATE; #else #error Unknown Architecture #endif kern_return_t result = thread_get_state(platformThread, flavor, (thread_state_t)®s, &user_count); if (result != KERN_SUCCESS) { WTFReportFatalError(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, "JavaScript garbage collection failed because thread_get_state returned an error (%d). This is probably the result of running inside Rosetta, which is not supported.", result); CRASH(); } return user_count * sizeof(usword_t); // end OS(DARWIN) #elif OS(WINDOWS) regs.ContextFlags = CONTEXT_INTEGER | CONTEXT_CONTROL | CONTEXT_SEGMENTS; GetThreadContext(platformThread, ®s); return sizeof(CONTEXT); #elif USE(PTHREADS) pthread_attr_init(®s); #if HAVE(PTHREAD_NP_H) || OS(NETBSD) // e.g. on FreeBSD 5.4, [email protected] pthread_attr_get_np(platformThread, ®s); #else // FIXME: this function is non-portable; other POSIX systems may have different np alternatives pthread_getattr_np(platformThread, ®s); #endif return 0; #else #error Need a way to get thread registers on this platform #endif }
void makeLargeMallocFailSilently() { malloc_zone_t* zone = malloc_default_zone(); #if __MAC_OS_X_VERSION_MIN_REQUIRED >= 1070 mach_vm_address_t pageStart = reinterpret_cast<vm_address_t>(zone) & static_cast<vm_size_t>(~(getpagesize() - 1)); vm_prot_t initialProtection = protectionOfRegion(pageStart); vm_size_t len = reinterpret_cast<vm_address_t>(zone) - pageStart + sizeof(malloc_zone_t); if (mach_vm_protect(mach_task_self(), pageStart, len, 0, initialProtection | VM_PROT_WRITE)) CRASH(); #endif savedMalloc = zone->malloc; savedRealloc = zone->realloc; zone->malloc = checkedMalloc; zone->realloc = checkedRealloc; #if __MAC_OS_X_VERSION_MIN_REQUIRED >= 1070 if (mach_vm_protect(mach_task_self(), pageStart, len, 0, initialProtection)) CRASH(); #endif }
uint8_t avr_core_watch_read(avr_t *avr, uint16_t addr) { if (addr > avr->ramend) { AVR_LOG(avr, LOG_ERROR, FONT_RED "CORE: *** Invalid read address PC=%04x SP=%04x O=%04x Address %04x out of ram (%04x)\n" FONT_DEFAULT, avr->pc, _avr_sp_get(avr), avr->flash[avr->pc + 1] | (avr->flash[avr->pc]<<8), addr, avr->ramend); CRASH(); } if (avr->gdb) { avr_gdb_handle_watchpoints(avr, addr, AVR_GDB_WATCH_READ); } return avr->data[addr]; }
token_t * peek_token(lexer_state * state, int offset) { if (offset > TOKEN_BUFFER_SIZE) CRASH("offset > TOKEN_BUFFER_SIZE"); if (offset <= 0) CRASH("offset <= 0"); int ahead = state->ahead; token_list * b = state->token_buffer; for (int i = 0; i < offset; i++) { b = b->next; if (i >= ahead) { b->token = read_token(state); state->ahead++; } } return b->token; }
void OSAllocator::decommit(void* address, size_t bytes) { // According to http://msdn.microsoft.com/en-us/library/aa366892(VS.85).aspx, // bytes (i.e. dwSize) being 0 when dwFreeType is MEM_DECOMMIT means that we'll // decommit the entire region allocated by VirtualAlloc() instead of decommitting // nothing as we would expect. Hence, we should check if bytes is 0 and handle it // appropriately before calling VirtualFree(). // See: https://bugs.webkit.org/show_bug.cgi?id=121972. if (!bytes) return; bool result = VirtualFree(address, bytes, MEM_DECOMMIT); if (!result) CRASH(); }
void makeLargeMallocFailSilently() { malloc_zone_t* zone = malloc_default_zone(); #if !defined(BUILDING_ON_TIGER) && !defined(BUILDING_ON_LEOPARD) && !defined(BUILDING_ON_SNOW_LEOPARD) mach_vm_address_t pageStart = reinterpret_cast<vm_address_t>(zone) & static_cast<vm_size_t>(~(getpagesize() - 1)); vm_prot_t initialProtection = protectionOfRegion(pageStart); vm_size_t len = reinterpret_cast<vm_address_t>(zone) - pageStart + sizeof(malloc_zone_t); if (mach_vm_protect(mach_task_self(), pageStart, len, 0, initialProtection | VM_PROT_WRITE)) CRASH(); #endif savedMalloc = zone->malloc; savedRealloc = zone->realloc; zone->malloc = checkedMalloc; zone->realloc = checkedRealloc; #if !defined(BUILDING_ON_TIGER) && !defined(BUILDING_ON_LEOPARD) && !defined(BUILDING_ON_SNOW_LEOPARD) if (mach_vm_protect(mach_task_self(), pageStart, len, 0, initialProtection)) CRASH(); #endif }
void TiledLayerChromium::updateCompositorResources(GraphicsContext3D* context) { // Painting could cause compositing to get turned off, which may cause the tiler to become invalidated mid-update. if (m_skipsDraw || m_updateRect.isEmpty() || !m_tiler->numTiles()) return; int left, top, right, bottom; m_tiler->contentRectToTileIndices(m_updateRect, left, top, right, bottom); for (int j = top; j <= bottom; ++j) { for (int i = left; i <= right; ++i) { UpdatableTile* tile = tileAt(i, j); if (!tile) tile = createTile(i, j); else if (!tile->dirty()) continue; // Calculate page-space rectangle to copy from. IntRect sourceRect = m_tiler->tileContentRect(tile); const IntPoint anchor = sourceRect.location(); sourceRect.intersect(m_tiler->layerRectToContentRect(tile->m_dirtyLayerRect)); // Paint rect not guaranteed to line up on tile boundaries, so // make sure that sourceRect doesn't extend outside of it. sourceRect.intersect(m_paintRect); if (sourceRect.isEmpty()) continue; ASSERT(tile->texture()->isReserved()); // Calculate tile-space rectangle to upload into. IntRect destRect(IntPoint(sourceRect.x() - anchor.x(), sourceRect.y() - anchor.y()), sourceRect.size()); if (destRect.x() < 0) CRASH(); if (destRect.y() < 0) CRASH(); // Offset from paint rectangle to this tile's dirty rectangle. IntPoint paintOffset(sourceRect.x() - m_paintRect.x(), sourceRect.y() - m_paintRect.y()); if (paintOffset.x() < 0) CRASH(); if (paintOffset.y() < 0) CRASH(); if (paintOffset.x() + destRect.width() > m_paintRect.width()) CRASH(); if (paintOffset.y() + destRect.height() > m_paintRect.height()) CRASH(); tile->texture()->bindTexture(context); const GC3Dint filter = m_tiler->hasBorderTexels() ? GraphicsContext3D::LINEAR : GraphicsContext3D::NEAREST; GLC(context, context->texParameteri(GraphicsContext3D::TEXTURE_2D, GraphicsContext3D::TEXTURE_MIN_FILTER, filter)); GLC(context, context->texParameteri(GraphicsContext3D::TEXTURE_2D, GraphicsContext3D::TEXTURE_MAG_FILTER, filter)); GLC(context, context->bindTexture(GraphicsContext3D::TEXTURE_2D, 0)); textureUpdater()->updateTextureRect(context, tile->texture(), sourceRect, destRect); tile->clearDirty(); } } }
void genericUnwind(VM* vm, ExecState* callFrame, UnwindStart unwindStart) { if (Options::breakOnThrow()) { CodeBlock* codeBlock = callFrame->codeBlock(); if (codeBlock) dataLog("In call frame ", RawPointer(callFrame), " for code block ", *codeBlock, "\n"); else dataLog("In call frame ", RawPointer(callFrame), " with null CodeBlock\n"); CRASH(); } ExecState* shadowChickenTopFrame = callFrame; if (unwindStart == UnwindFromCallerFrame) { VMEntryFrame* topVMEntryFrame = vm->topVMEntryFrame; shadowChickenTopFrame = callFrame->callerFrame(topVMEntryFrame); } vm->shadowChicken().log(*vm, shadowChickenTopFrame, ShadowChicken::Packet::throwPacket()); Exception* exception = vm->exception(); RELEASE_ASSERT(exception); HandlerInfo* handler = vm->interpreter->unwind(*vm, callFrame, exception, unwindStart); // This may update callFrame. void* catchRoutine; Instruction* catchPCForInterpreter = 0; if (handler) { // handler->target is meaningless for getting a code offset when catching // the exception in a DFG/FTL frame. This bytecode target offset could be // something that's in an inlined frame, which means an array access // with this bytecode offset in the machine frame is utterly meaningless // and can cause an overflow. OSR exit properly exits to handler->target // in the proper frame. if (!JITCode::isOptimizingJIT(callFrame->codeBlock()->jitType())) catchPCForInterpreter = &callFrame->codeBlock()->instructions()[handler->target]; #if ENABLE(JIT) catchRoutine = handler->nativeCode.executableAddress(); #else catchRoutine = catchPCForInterpreter->u.pointer; #endif } else catchRoutine = LLInt::getCodePtr(handleUncaughtException); ASSERT(bitwise_cast<uintptr_t>(callFrame) < bitwise_cast<uintptr_t>(vm->topVMEntryFrame)); vm->callFrameForCatch = callFrame; vm->targetMachinePCForThrow = catchRoutine; vm->targetInterpreterPCForThrow = catchPCForInterpreter; RELEASE_ASSERT(catchRoutine); }
void cryptographicallyRandomValuesFromOS(unsigned char* buffer, size_t length) { #if OS(UNIX) int fd = open("/dev/urandom", O_RDONLY, 0); if (fd < 0) CRASH(); // We need /dev/urandom for this API to work... if (read(fd, buffer, length) != static_cast<ssize_t>(length)) CRASH(); close(fd); #elif OS(WINDOWS) HCRYPTPROV hCryptProv = 0; if (!CryptAcquireContext(&hCryptProv, 0, MS_DEF_PROV, PROV_RSA_FULL, CRYPT_VERIFYCONTEXT)) CRASH(); if (!CryptGenRandom(hCryptProv, length, buffer)) CRASH(); CryptReleaseContext(hCryptProv, 0); #else #error "This configuration doesn't have a strong source of randomness." // WARNING: When adding new sources of OS randomness, the randomness must // be of cryptographic quality! #endif }
static void SIGSYSHandler(int signal, siginfo_t* info, void* data) { if (signal != SIGSYS || info->si_code != SYS_SECCOMP) CRASH(); ucontext_t* ucontext = static_cast<ucontext_t*>(data); if (!ucontext) CRASH(); SeccompBrokerClient* client = &SeccompBrokerClient::shared(); if (client->handleIfOpeningOnlineCPUCount(&ucontext->uc_mcontext)) return; // createFromContext might return a nullptr if it is able to resolve the // syscall locally without sending it to the broker process. In this case, // we just return. Examples of locally resolved syscalls are the ones // with cached resources and invalid arguments. std::unique_ptr<Syscall> syscall = Syscall::createFromContext(ucontext); if (!syscall) return; client->dispatch(syscall.get()); }
void HandleHeap::writeBarrier(HandleSlot slot, const JSValue& value) { // Forbid assignment to handles during the finalization phase, since it would violate many GC invariants. // File a bug with stack trace if you hit this. if (m_nextToFinalize) CRASH(); if (!value == !*slot && slot->isCell() == value.isCell()) return; Node* node = toNode(slot); #if ENABLE(GC_VALIDATION) if (!isLiveNode(node)) CRASH(); #endif SentinelLinkedList<Node>::remove(node); if (!value || !value.isCell()) { m_immediateList.push(node); return; } if (node->isWeak()) { m_weakList.push(node); #if ENABLE(GC_VALIDATION) if (!isLiveNode(node)) CRASH(); #endif return; } m_strongList.push(node); #if ENABLE(GC_VALIDATION) if (!isLiveNode(node)) CRASH(); #endif }
int cmp(const GF2m& a, const GF2m& b) { // return 0 - equal; 1 - non equal if (a.BitLength > b.BitLength) CRASH("fields sizes are not equal"); uint32* pn1 = a.n; uint32* pn2 = b.n; for (int32 i=a.GetInt32Length(); i >= 0; i--) { if (pn1[i] != pn2[i]) return 1; } return 0; }
FixedVMPoolExecutableAllocator() : MetaAllocator(jitAllocationGranule) // round up all allocations to 32 bytes { m_reservation = PageReservation::reserveWithGuardPages(fixedExecutableMemoryPoolSize, OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true); #if !(ENABLE(CLASSIC_INTERPRETER) || ENABLE(LLINT)) if (!m_reservation) CRASH(); #endif if (m_reservation) { ASSERT(m_reservation.size() == fixedExecutableMemoryPoolSize); addFreshFreeSpace(m_reservation.base(), m_reservation.size()); startOfFixedExecutableMemoryPool = reinterpret_cast<uintptr_t>(m_reservation.base()); } }
PassRef<StyleRuleBase> StyleRuleBase::copy() const { switch (type()) { case Style: return static_cast<const StyleRule*>(this)->copy(); case Page: return static_cast<const StyleRulePage*>(this)->copy(); case FontFace: return static_cast<const StyleRuleFontFace*>(this)->copy(); case Media: return static_cast<const StyleRuleMedia*>(this)->copy(); #if ENABLE(CSS3_CONDITIONAL_RULES) case Supports: return static_cast<const StyleRuleSupports*>(this)->copy(); #endif #if ENABLE(CSS_REGIONS) case Region: return static_cast<const StyleRuleRegion*>(this)->copy(); #endif case Keyframes: return static_cast<const StyleRuleKeyframes*>(this)->copy(); #if ENABLE(SHADOW_DOM) case HostInternal: return static_cast<const StyleRuleHost*>(this)->copy(); #endif #if ENABLE(CSS_DEVICE_ADAPTATION) case Viewport: return static_cast<const StyleRuleViewport*>(this)->copy(); #endif #if ENABLE(CSS_SHADERS) case Filter: return static_cast<const StyleRuleFilter*>(this)->copy(); #endif case Import: // FIXME: Copy import rules. break; case Unknown: case Charset: case Keyframe: #if !ENABLE(CSS_REGIONS) case Region: #endif break; } CRASH(); // HACK: EFL won't build without this (old GCC with crappy -Werror=return-type) return PassRef<StyleRuleBase>(*static_cast<StyleRuleBase*>(nullptr)); }
void Heap::getConservativeRegisterRoots(HashSet<JSCell*>& roots) { ASSERT(isValidThreadState(m_globalData)); if (m_operationInProgress != NoOperation) CRASH(); m_operationInProgress = Collection; ConservativeRoots registerFileRoots(&m_objectSpace.blocks()); registerFile().gatherConservativeRoots(registerFileRoots); size_t registerFileRootCount = registerFileRoots.size(); JSCell** registerRoots = registerFileRoots.roots(); for (size_t i = 0; i < registerFileRootCount; i++) { setMarked(registerRoots[i]); roots.add(registerRoots[i]); } m_operationInProgress = NoOperation; }
void IDBRequest::onSuccess(PassRefPtr<SerializedScriptValue> serializedScriptValue) { IDB_TRACE("IDBRequest::onSuccess(SerializedScriptValue)"); if (!shouldEnqueueEvent()) return; #if USE(V8) v8::HandleScope handleScope; v8::Local<v8::Context> context = toV8Context(scriptExecutionContext(), m_worldContextHandle); if (context.IsEmpty()) CRASH(); v8::Context::Scope contextScope(context); #endif ScriptValue value = deserializeIDBValue(scriptExecutionContext(), serializedScriptValue); onSuccessInternal(value); }
void OSAllocator::decommit(void* address, size_t bytes) { #if OS(LINUX) madvise(address, bytes, MADV_DONTNEED); if (mprotect(address, bytes, PROT_NONE)) CRASH(); #elif HAVE(MADV_FREE_REUSE) while (madvise(address, bytes, MADV_FREE_REUSABLE) == -1 && errno == EAGAIN) { } #elif HAVE(MADV_FREE) while (madvise(address, bytes, MADV_FREE) == -1 && errno == EAGAIN) { } #elif HAVE(MADV_DONTNEED) while (madvise(address, bytes, MADV_DONTNEED) == -1 && errno == EAGAIN) { } #else UNUSED_PARAM(address); UNUSED_PARAM(bytes); #endif }
void printInternal(PrintStream& out, JSC::Profiler::CompilationKind kind) { switch (kind) { case JSC::Profiler::LLInt: out.print("LLInt"); return; case JSC::Profiler::Baseline: out.print("Baseline"); return; case JSC::Profiler::DFG: out.print("DFG"); return; default: CRASH(); return; } }
static bool enableAssembler(ExecutableAllocator& executableAllocator) { if (!Options::useJIT() && !Options::useRegExpJIT()) return false; if (!executableAllocator.isValid()) { if (Options::crashIfCantAllocateJITMemory()) CRASH(); return false; } #if USE(CF) || OS(UNIX) char* canUseJITString = getenv("JavaScriptCoreUseJIT"); return !canUseJITString || atoi(canUseJITString); #else return true; #endif }
PassRefPtr<UStringImpl> UStringImpl::createUninitialized(unsigned length, UChar*& data) { if (!length) { data = 0; return empty(); } // Allocate a single buffer large enough to contain the StringImpl // struct as well as the data which it contains. This removes one // heap allocation from this call. if (length > ((std::numeric_limits<size_t>::max() - sizeof(UStringImpl)) / sizeof(UChar))) CRASH(); size_t size = sizeof(UStringImpl) + length * sizeof(UChar); UStringImpl* string = static_cast<UStringImpl*>(fastMalloc(size)); data = reinterpret_cast<UChar*>(string + 1); return adoptRef(new (string) UStringImpl(length)); }
bool run() { ASSERT(m_graph.m_form == SSA); // Liveness is a backwards analysis; the roots are the blocks that // end in a terminal (Return/Unreachable). For now, we // use a fixpoint formulation since liveness is a rapid analysis with // convergence guaranteed after O(connectivity). // Start by assuming that everything is dead. for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) { BasicBlock* block = m_graph.block(blockIndex); if (!block) continue; block->ssa->flushFormatAtHead.fill(DeadFlush); block->ssa->flushFormatAtTail.fill(DeadFlush); } do { m_changed = false; for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) process(blockIndex); } while (m_changed); Operands<FlushFormat>& root = m_graph.block(0)->ssa->flushFormatAtHead; for (unsigned i = root.size(); i--;) { if (root.isArgument(i)) { if (root[i] == DeadFlush || root[i] == FlushedJSValue) continue; } else { if (root[i] == DeadFlush) continue; } dataLog( "Bad flush liveness analysis result: bad flush liveness at root: ", root, "\n"); dataLog("IR at time of error:\n"); m_graph.dump(); CRASH(); } return true; }
void IDBRequest::onSuccess(PassRefPtr<IDBKey> key, PassRefPtr<IDBKey> primaryKey, PassRefPtr<SerializedScriptValue> serializedValue) { IDB_TRACE("IDBRequest::onSuccess(key, primaryKey, value)"); if (!shouldEnqueueEvent()) return; #if USE(V8) v8::HandleScope handleScope; v8::Local<v8::Context> context = toV8Context(scriptExecutionContext(), m_worldContextHandle); if (context.IsEmpty()) CRASH(); v8::Context::Scope contextScope(context); #endif ScriptValue value = deserializeIDBValue(scriptExecutionContext(), serializedValue); ASSERT(m_pendingCursor); setResultCursor(m_pendingCursor.release(), key, primaryKey, value); enqueueEvent(createSuccessEvent()); }
CString TextCodecUTF8::encode(const UChar* characters, size_t length, UnencodableHandling) { // The maximum number of UTF-8 bytes needed per UTF-16 code unit is 3. // BMP characters take only one UTF-16 code unit and can take up to 3 bytes (3x). // Non-BMP characters take two UTF-16 code units and can take up to 4 bytes (2x). if (length > numeric_limits<size_t>::max() / 3) CRASH(); Vector<uint8_t> bytes(length * 3); size_t i = 0; size_t bytesWritten = 0; while (i < length) { UChar32 character; U16_NEXT(characters, i, length, character); U8_APPEND_UNSAFE(bytes.data(), bytesWritten, character); } return CString(reinterpret_cast<char*>(bytes.data()), bytesWritten); }
static void initializeSQLiteIfNecessary() { static std::once_flag flag; std::call_once(flag, [] { // It should be safe to call this outside of std::call_once, since it is documented to be // completely threadsafe. But in the past it was not safe, and the SQLite developers still // aren't confident that it really is, and we still support ancient versions of SQLite. So // std::call_once is used to stay on the safe side. See bug #143245. int ret = sqlite3_initialize(); if (ret != SQLITE_OK) { #if SQLITE_VERSION_NUMBER >= 3007015 WTFLogAlways("Failed to initialize SQLite: %s", sqlite3_errstr(ret)); #else WTFLogAlways("Failed to initialize SQLite"); #endif CRASH(); } }); }