Beispiel #1
0
void EventListenerMap::clear()
{
    auto locker = holdLock(m_lock);
    
    assertNoActiveIterators();

    m_entries.clear();
}
void ConcurrentPtrHashSet::clear()
{
    // This is just in case. It does not make it OK for other threads to call add(). But it might prevent
    // some bad crashes if we did make that mistake.
    auto locker = holdLock(m_lock); 
    
    m_allTables.clear();
    initialize();
}
void resetInstructionCacheOnAllThreads()
{
    auto locker = holdLock(wasmThreads().getLock());
    for (auto& thread : wasmThreads().threads(locker)) {
        sendMessage(thread.get(), [] (const PlatformRegisters&) {
            // It's likely that the signal handler will already reset the instruction cache but we might as well be sure.
            WTF::crossModifyingCodeFence();
        });
    }
}
void ConcurrentPtrHashSet::deleteOldTables()
{
    // This is just in case. It does not make it OK for other threads to call add(). But it might prevent
    // some bad crashes if we did make that mistake.
    auto locker = holdLock(m_lock); 
    
    m_allTables.removeAllMatching(
        [&] (std::unique_ptr<Table>& table) -> bool {
            return table.get() != m_table.loadRelaxed();
        });
}
 MarkedBlock::Handle* run() override
 {
     if (m_done)
         return nullptr;
     auto locker = holdLock(m_lock);
     m_index = m_directory.m_markingNotEmpty.findBit(m_index, true);
     if (m_index >= m_directory.m_blocks.size()) {
         m_done = true;
         return nullptr;
     }
     return m_directory.m_blocks[m_index++];
 }
void JSSegmentedVariableObject::visitChildren(JSCell* cell, SlotVisitor& slotVisitor)
{
    JSSegmentedVariableObject* thisObject = jsCast<JSSegmentedVariableObject*>(cell);
    ASSERT_GC_OBJECT_INHERITS(thisObject, info());
    Base::visitChildren(thisObject, slotVisitor);
    
    // FIXME: We could avoid locking here if SegmentedVector was lock-free. It could be made lock-free
    // relatively easily.
    auto locker = holdLock(thisObject->m_lock);
    for (unsigned i = thisObject->m_variables.size(); i--;)
        slotVisitor.appendHidden(thisObject->m_variables[i]);
}
void BlockDirectory::stopAllocatingForGood()
{
    if (false)
        dataLog(RawPointer(this), ": BlockDirectory::stopAllocatingForGood!\n");
    
    m_localAllocators.forEach(
        [&] (LocalAllocator* allocator) {
            allocator->stopAllocatingForGood();
        });

    auto locker = holdLock(m_localAllocatorsLock);
    while (!m_localAllocators.isEmpty())
        m_localAllocators.begin()->remove();
}
Beispiel #8
0
void EventListenerMap::replace(const AtomicString& eventType, EventListener& oldListener, Ref<EventListener>&& newListener, const RegisteredEventListener::Options& options)
{
    auto locker = holdLock(m_lock);
    
    assertNoActiveIterators();

    auto* listeners = find(eventType);
    ASSERT(listeners);
    size_t index = findListener(*listeners, oldListener, options.capture);
    ASSERT(index != notFound);
    auto& registeredListener = listeners->at(index);
    registeredListener->markAsRemoved();
    registeredListener = RegisteredEventListener::create(WTFMove(newListener), options);
}
Beispiel #9
0
void EventListenerMap::removeFirstEventListenerCreatedFromMarkup(const AtomicString& eventType)
{
    auto locker = holdLock(m_lock);
    
    assertNoActiveIterators();

    for (unsigned i = 0; i < m_entries.size(); ++i) {
        if (m_entries[i].first == eventType) {
            removeFirstListenerCreatedFromMarkup(*m_entries[i].second);
            if (m_entries[i].second->isEmpty())
                m_entries.remove(i);
            return;
        }
    }
}
bool BlockDirectory::isPagedOut(MonotonicTime deadline)
{
    unsigned itersSinceLastTimeCheck = 0;
    for (auto* block : m_blocks) {
        if (block)
            holdLock(block->block().lock());
        ++itersSinceLastTimeCheck;
        if (itersSinceLastTimeCheck >= Heap::s_timeCheckResolution) {
            MonotonicTime currentTime = MonotonicTime::now();
            if (currentTime > deadline)
                return true;
            itersSinceLastTimeCheck = 0;
        }
    }
    return false;
}
Beispiel #11
0
bool EventListenerMap::remove(const AtomicString& eventType, EventListener& listener, bool useCapture)
{
    auto locker = holdLock(m_lock);
    
    assertNoActiveIterators();

    for (unsigned i = 0; i < m_entries.size(); ++i) {
        if (m_entries[i].first == eventType) {
            bool wasRemoved = removeListenerFromVector(*m_entries[i].second, listener, useCapture);
            if (m_entries[i].second->isEmpty())
                m_entries.remove(i);
            return wasRemoved;
        }
    }

    return false;
}
Beispiel #12
0
bool EventListenerMap::add(const AtomicString& eventType, Ref<EventListener>&& listener, const RegisteredEventListener::Options& options)
{
    auto locker = holdLock(m_lock);
    
    assertNoActiveIterators();

    if (auto* listeners = find(eventType)) {
        if (findListener(*listeners, listener, options.capture) != notFound)
            return false; // Duplicate listener.
        listeners->append(RegisteredEventListener::create(WTFMove(listener), options));
        return true;
    }

    auto listeners = std::make_unique<EventListenerVector>();
    listeners->uncheckedAppend(RegisteredEventListener::create(WTFMove(listener), options));
    m_entries.append({ eventType, WTFMove(listeners) });
    return true;
}
void BlockDirectory::removeBlock(MarkedBlock::Handle* block)
{
    ASSERT(block->directory() == this);
    ASSERT(m_blocks[block->index()] == block);
    
    subspace()->didRemoveBlock(block->index());
    
    m_blocks[block->index()] = nullptr;
    m_freeBlockIndices.append(block->index());
    
    forEachBitVector(
        holdLock(m_bitvectorLock),
        [&] (FastBitVector& vector) {
            vector[block->index()] = false;
        });
    
    block->didRemoveFromDirectory();
}
Beispiel #14
0
void JSDOMGlobalObject::visitChildren(JSCell* cell, SlotVisitor& visitor)
{
    JSDOMGlobalObject* thisObject = jsCast<JSDOMGlobalObject*>(cell);
    ASSERT_GC_OBJECT_INHERITS(thisObject, info());
    Base::visitChildren(thisObject, visitor);
    
    {
        auto locker = holdLock(thisObject->m_gcLock);
        
        for (auto& structure : thisObject->structures(locker).values())
            visitor.append(&structure);
        
        for (auto& constructor : thisObject->constructors(locker).values())
            visitor.append(&constructor);
        
        for (auto& deferredPromise : thisObject->deferredPromises(locker))
            deferredPromise->visitAggregate(visitor);
    }

    thisObject->m_builtinInternalFunctions.visit(visitor);
}
void ConcurrentPtrHashSet::resizeIfNecessary()
{
    auto locker = holdLock(m_lock);
    Table* table = m_table.loadRelaxed();
    if (table->load.loadRelaxed() < table->maxLoad())
        return;
    
    std::unique_ptr<Table> newTable = Table::create(table->size * 2);
    unsigned mask = newTable->mask;
    unsigned load = 0;
    for (unsigned i = 0; i < table->size; ++i) {
        void* ptr = table->array[i].loadRelaxed();
        if (!ptr)
            continue;
        
        unsigned startIndex = hash(ptr) & mask;
        unsigned index = startIndex;
        for (;;) {
            Atomic<void*>& entryRef = newTable->array[index];
            void* entry = entryRef.loadRelaxed();
            if (!entry) {
                entryRef.storeRelaxed(ptr);
                break;
            }
            RELEASE_ASSERT(entry != ptr);
            index = (index + 1) & mask;
            RELEASE_ASSERT(index != startIndex);
        }
        
        load++;
    }
    
    newTable->load.storeRelaxed(load);
    
    m_table.store(newTable.get());
    m_allTables.append(WTFMove(newTable));
}
ThreadGroup::~ThreadGroup()
{
    auto locker = holdLock(m_lock);
    for (auto& thread : m_threads)
        thread->removeFromThreadGroup(locker, *this);
}
Beispiel #17
0
// We are creating a bunch of threads that touch the main thread's stack. This will make ASAN unhappy.
// The reason this is OK is that we guarantee that the main thread doesn't continue until all threads
// that could touch its stack are done executing.
SUPPRESS_ASAN 
void Plan::run()
{
    if (!parseAndValidateModule())
        return;

    auto tryReserveCapacity = [this] (auto& vector, size_t size, const char* what) {
        if (UNLIKELY(!vector.tryReserveCapacity(size))) {
            StringBuilder builder;
            builder.appendLiteral("Failed allocating enough space for ");
            builder.appendNumber(size);
            builder.append(what);
            m_errorMessage = builder.toString();
            return false;
        }
        return true;
    };

    if (!tryReserveCapacity(m_wasmExitStubs, m_moduleInformation->importFunctionSignatureIndices.size(), " WebAssembly to JavaScript stubs")
        || !tryReserveCapacity(m_unlinkedWasmToWasmCalls, m_functionLocationInBinary.size(), " unlinked WebAssembly to WebAssembly calls")
        || !tryReserveCapacity(m_wasmInternalFunctions, m_functionLocationInBinary.size(), " WebAssembly functions")
        || !tryReserveCapacity(m_compilationContexts, m_functionLocationInBinary.size(), " compilation contexts"))
        return;

    m_unlinkedWasmToWasmCalls.resize(m_functionLocationInBinary.size());
    m_wasmInternalFunctions.resize(m_functionLocationInBinary.size());
    m_compilationContexts.resize(m_functionLocationInBinary.size());

    for (unsigned importIndex = 0; importIndex < m_moduleInformation->imports.size(); ++importIndex) {
        Import* import = &m_moduleInformation->imports[importIndex];
        if (import->kind != ExternalKind::Function)
            continue;
        unsigned importFunctionIndex = m_wasmExitStubs.size();
        if (verbose)
            dataLogLn("Processing import function number ", importFunctionIndex, ": ", import->module, ": ", import->field);
        SignatureIndex signatureIndex = m_moduleInformation->importFunctionSignatureIndices.at(import->kindIndex);
        m_wasmExitStubs.uncheckedAppend(exitStubGenerator(m_vm, m_callLinkInfos, signatureIndex, importFunctionIndex));
    }

    m_currentIndex = 0;

    auto doWork = [this] {
        while (true) {
            uint32_t functionIndex;
            {
                auto locker = holdLock(m_lock);
                if (m_currentIndex >= m_functionLocationInBinary.size())
                    return;
                functionIndex = m_currentIndex;
                ++m_currentIndex;
            }

            const uint8_t* functionStart = m_source + m_functionLocationInBinary[functionIndex].start;
            size_t functionLength = m_functionLocationInBinary[functionIndex].end - m_functionLocationInBinary[functionIndex].start;
            ASSERT(functionLength <= m_sourceLength);
            SignatureIndex signatureIndex = m_moduleInformation->internalFunctionSignatureIndices[functionIndex];
            const Signature* signature = SignatureInformation::get(m_vm, signatureIndex);
            unsigned functionIndexSpace = m_wasmExitStubs.size() + functionIndex;
            ASSERT_UNUSED(functionIndexSpace, m_moduleInformation->signatureIndexFromFunctionIndexSpace(functionIndexSpace) == signatureIndex);
            ASSERT(validateFunction(m_vm, functionStart, functionLength, signature, *m_moduleInformation, m_moduleSignatureIndicesToUniquedSignatureIndices));

            m_unlinkedWasmToWasmCalls[functionIndex] = Vector<UnlinkedWasmToWasmCall>();
            auto parseAndCompileResult = parseAndCompile(*m_vm, m_compilationContexts[functionIndex], functionStart, functionLength, signature, m_unlinkedWasmToWasmCalls[functionIndex], *m_moduleInformation, m_moduleSignatureIndicesToUniquedSignatureIndices);

            if (UNLIKELY(!parseAndCompileResult)) {
                auto locker = holdLock(m_lock);
                if (!m_errorMessage) {
                    // Multiple compiles could fail simultaneously. We arbitrarily choose the first.
                    m_errorMessage = makeString(parseAndCompileResult.error(), ", in function at index ", String::number(functionIndex)); // FIXME make this an Expected.
                }
                m_currentIndex = m_functionLocationInBinary.size();

                // We will terminate on the next execution.
                continue; 
            }

            m_wasmInternalFunctions[functionIndex] = WTFMove(*parseAndCompileResult);
        }
    };

    MonotonicTime startTime;
    if (verbose || Options::reportCompileTimes())
        startTime = MonotonicTime::now();

    uint32_t threadCount = Options::useConcurrentJIT() ? WTF::numberOfProcessorCores() : 1;
    uint32_t numWorkerThreads = threadCount - 1;
    Vector<ThreadIdentifier> threads;
    threads.reserveCapacity(numWorkerThreads);
    for (uint32_t i = 0; i < numWorkerThreads; i++)
        threads.uncheckedAppend(createThread("jsc.wasm-b3-compilation.thread", doWork));

    doWork(); // Let the main thread do some work too.

    for (uint32_t i = 0; i < numWorkerThreads; i++)
        waitForThreadCompletion(threads[i]);

    for (uint32_t functionIndex = 0; functionIndex < m_functionLocationInBinary.size(); functionIndex++) {
        {
            CompilationContext& context = m_compilationContexts[functionIndex];
            SignatureIndex signatureIndex = m_moduleInformation->internalFunctionSignatureIndices[functionIndex];
            String signatureDescription = SignatureInformation::get(m_vm, signatureIndex)->toString();
            {
                LinkBuffer linkBuffer(*m_vm, *context.wasmEntrypointJIT, nullptr);
                m_wasmInternalFunctions[functionIndex]->wasmEntrypoint.compilation =
                    std::make_unique<B3::Compilation>(FINALIZE_CODE(linkBuffer, ("WebAssembly function[%i] %s", functionIndex, signatureDescription.ascii().data())), WTFMove(context.wasmEntrypointByproducts));
            }

            {
                LinkBuffer linkBuffer(*m_vm, *context.jsEntrypointJIT, nullptr);
                linkBuffer.link(context.jsEntrypointToWasmEntrypointCall, FunctionPtr(m_wasmInternalFunctions[functionIndex]->wasmEntrypoint.compilation->code().executableAddress()));

                m_wasmInternalFunctions[functionIndex]->jsToWasmEntrypoint.compilation =
                    std::make_unique<B3::Compilation>(FINALIZE_CODE(linkBuffer, ("JavaScript->WebAssembly entrypoint[%i] %s", functionIndex, signatureDescription.ascii().data())), WTFMove(context.jsEntrypointByproducts));
            }
        }
    }

    if (verbose || Options::reportCompileTimes()) {
        dataLogLn("Took ", (MonotonicTime::now() - startTime).microseconds(),
            " us to compile and link the module");
    }

    // Patch the call sites for each WebAssembly function.
    for (auto& unlinked : m_unlinkedWasmToWasmCalls) {
        for (auto& call : unlinked) {
            void* executableAddress;
            if (m_moduleInformation->isImportedFunctionFromFunctionIndexSpace(call.functionIndex)) {
                // FIXME imports could have been linked in B3, instead of generating a patchpoint. This condition should be replaced by a RELEASE_ASSERT. https://bugs.webkit.org/show_bug.cgi?id=166462
                executableAddress = call.target == UnlinkedWasmToWasmCall::Target::ToJs
                    ? m_wasmExitStubs.at(call.functionIndex).wasmToJs.code().executableAddress()
                    : m_wasmExitStubs.at(call.functionIndex).wasmToWasm.code().executableAddress();
            } else {
                ASSERT(call.target != UnlinkedWasmToWasmCall::Target::ToJs);
                executableAddress = m_wasmInternalFunctions.at(call.functionIndex - m_wasmExitStubs.size())->wasmEntrypoint.compilation->code().executableAddress();
            }
            MacroAssembler::repatchCall(call.callLocation, CodeLocationLabel(executableAddress));
        }
    }

    m_failed = false;
}
BlockDirectory::~BlockDirectory()
{
    auto locker = holdLock(m_localAllocatorsLock);
    while (!m_localAllocators.isEmpty())
        m_localAllocators.begin()->remove();
}
ThreadGroupAddResult ThreadGroup::add(Thread& thread)
{
    auto locker = holdLock(m_lock);
    return add(locker, thread);
}