Ejemplo n.º 1
0
void AutoplayUmaHelper::onVisibilityChangedForMutedVideoOffscreenDuration(
    bool isVisible) {
  if (isVisible == m_isVisible)
    return;

  if (isVisible)
    m_mutedVideoAutoplayOffscreenDurationMS +=
        static_cast<int64_t>(monotonicallyIncreasingTimeMS()) -
        m_mutedVideoAutoplayOffscreenStartTimeMS;
  else
    m_mutedVideoAutoplayOffscreenStartTimeMS =
        static_cast<int64_t>(monotonicallyIncreasingTimeMS());

  m_isVisible = isVisible;
}
Ejemplo n.º 2
0
void NotificationImageLoader::didFinishLoading(unsigned long resourceIdentifier,
                                               double finishTime) {
  // If this has been stopped it is not desirable to trigger further work,
  // there is a shutdown of some sort in progress.
  if (m_stopped)
    return;

  DEFINE_THREAD_SAFE_STATIC_LOCAL(
      CustomCountHistogram, finishedTimeHistogram,
      new CustomCountHistogram("Notifications.Icon.LoadFinishTime", 1,
                               1000 * 60 * 60 /* 1 hour max */,
                               50 /* buckets */));
  finishedTimeHistogram.count(monotonicallyIncreasingTimeMS() - m_startTime);

  if (m_data) {
    DEFINE_THREAD_SAFE_STATIC_LOCAL(
        CustomCountHistogram, fileSizeHistogram,
        new CustomCountHistogram("Notifications.Icon.FileSize", 1,
                                 10000000 /* ~10mb max */, 50 /* buckets */));
    fileSizeHistogram.count(m_data->size());

    std::unique_ptr<ImageDecoder> decoder = ImageDecoder::create(
        m_data, true /* dataComplete */, ImageDecoder::AlphaPremultiplied,
        ImageDecoder::ColorSpaceApplied);
    if (decoder) {
      // The |ImageFrame*| is owned by the decoder.
      ImageFrame* imageFrame = decoder->frameBufferAtIndex(0);
      if (imageFrame) {
        (*m_imageCallback)(imageFrame->bitmap());
        return;
      }
    }
  }
  runCallbackWithEmptyBitmap();
}
Ejemplo n.º 3
0
void NotificationImageLoader::start(
    ExecutionContext* executionContext,
    const KURL& url,
    std::unique_ptr<ImageCallback> imageCallback) {
  DCHECK(!m_stopped);

  m_startTime = monotonicallyIncreasingTimeMS();
  m_imageCallback = std::move(imageCallback);

  ThreadableLoaderOptions threadableLoaderOptions;
  threadableLoaderOptions.preflightPolicy = PreventPreflight;
  threadableLoaderOptions.crossOriginRequestPolicy = AllowCrossOriginRequests;
  threadableLoaderOptions.timeoutMilliseconds = kImageFetchTimeoutInMs;

  // TODO(mvanouwerkerk): Add an entry for notifications to
  // FetchInitiatorTypeNames and use it.
  ResourceLoaderOptions resourceLoaderOptions;
  resourceLoaderOptions.allowCredentials = AllowStoredCredentials;
  if (executionContext->isWorkerGlobalScope())
    resourceLoaderOptions.requestInitiatorContext = WorkerContext;

  ResourceRequest resourceRequest(url);
  resourceRequest.setRequestContext(WebURLRequest::RequestContextImage);
  resourceRequest.setPriority(ResourceLoadPriorityMedium);
  resourceRequest.setRequestorOrigin(executionContext->getSecurityOrigin());

  m_threadableLoader = ThreadableLoader::create(
      *executionContext, this, threadableLoaderOptions, resourceLoaderOptions);
  m_threadableLoader->start(resourceRequest);
}
void NotificationImageLoader::didFail(const ResourceError& error)
{
    DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, failedTimeHistogram, new CustomCountHistogram("Notifications.Icon.LoadFailTime", 1, 1000 * 60 * 60 /* 1 hour max */, 50 /* buckets */));
    failedTimeHistogram.count(monotonicallyIncreasingTimeMS() - m_startTime);

    runCallbackWithEmptyBitmap();
}
Ejemplo n.º 5
0
void AutoplayUmaHelper::maybeStopRecordingMutedVideoOffscreenDuration() {
  if (!m_mutedVideoOffscreenDurationVisibilityObserver)
    return;

  if (!m_isVisible)
    m_mutedVideoAutoplayOffscreenDurationMS +=
        static_cast<int64_t>(monotonicallyIncreasingTimeMS()) -
        m_mutedVideoAutoplayOffscreenStartTimeMS;

  // Since histograms uses int32_t, the duration needs to be limited to
  // std::numeric_limits<int32_t>::max().
  int32_t boundedTime = static_cast<int32_t>(
      std::min<int64_t>(m_mutedVideoAutoplayOffscreenDurationMS,
                        std::numeric_limits<int32_t>::max()));

  if (m_source == AutoplaySource::Attribute) {
    DEFINE_STATIC_LOCAL(
        CustomCountHistogram, durationHistogram,
        ("Media.Video.Autoplay.Muted.Attribute.OffscreenDuration", 1,
         maxOffscreenDurationUmaMS, offscreenDurationUmaBucketCount));
    durationHistogram.count(boundedTime);
  } else {
    DEFINE_STATIC_LOCAL(
        CustomCountHistogram, durationHistogram,
        ("Media.Video.Autoplay.Muted.PlayMethod.OffscreenDuration", 1,
         maxOffscreenDurationUmaMS, offscreenDurationUmaBucketCount));
    durationHistogram.count(boundedTime);
  }
  m_mutedVideoOffscreenDurationVisibilityObserver->stop();
  m_mutedVideoOffscreenDurationVisibilityObserver = nullptr;
  m_mutedVideoAutoplayOffscreenDurationMS = 0;
  m_element->removeEventListener(EventTypeNames::pause, this, false);
  maybeUnregisterUnloadListener();
}
Ejemplo n.º 6
0
void FileReader::didReceiveData()
{
    // Fire the progress event at least every 50ms.
    double now = monotonicallyIncreasingTimeMS();
    if (!m_lastProgressNotificationTimeMS)
        m_lastProgressNotificationTimeMS = now;
    else if (now - m_lastProgressNotificationTimeMS > progressNotificationIntervalMS) {
        fireEvent(eventNames().progressEvent);
        m_lastProgressNotificationTimeMS = now;
    }
}
Ejemplo n.º 7
0
void AutoplayUmaHelper::maybeStartRecordingMutedVideoOffscreenDuration() {
  if (!m_element->isHTMLVideoElement() || !m_element->muted())
    return;

  // Start recording muted video playing offscreen duration.
  m_mutedVideoAutoplayOffscreenStartTimeMS =
      static_cast<int64_t>(monotonicallyIncreasingTimeMS());
  m_isVisible = false;
  m_mutedVideoOffscreenDurationVisibilityObserver =
      new ElementVisibilityObserver(
          m_element,
          WTF::bind(&AutoplayUmaHelper::
                        onVisibilityChangedForMutedVideoOffscreenDuration,
                    wrapWeakPersistent(this)));
  m_mutedVideoOffscreenDurationVisibilityObserver->start();
  m_element->addEventListener(EventTypeNames::pause, this, false);
  if (m_element->document().domWindow())
    m_element->document().domWindow()->addEventListener(EventTypeNames::unload,
                                                        this, false);
}
Ejemplo n.º 8
0
void FileWriter::didWrite(long long bytes, bool complete)
{
    if (m_operationInProgress == OperationAbort) {
        completeAbort();
        return;
    }
    ASSERT(m_readyState == WRITING);
    ASSERT(m_truncateLength == -1);
    ASSERT(m_operationInProgress == OperationWrite);
    ASSERT(!m_bytesToWrite || bytes + m_bytesWritten > 0);
    ASSERT(bytes + m_bytesWritten <= m_bytesToWrite);
    m_bytesWritten += bytes;
    ASSERT((m_bytesWritten == m_bytesToWrite) || !complete);
    setPosition(position() + bytes);
    if (position() > length())
        setLength(position());
    if (complete) {
        m_blobBeingWritten.clear();
        m_operationInProgress = OperationNone;
    }

    int numAborts = m_numAborts;
    // We could get an abort in the handler for this event. If we do, it's
    // already handled the cleanup and signalCompletion call.
    double now = monotonicallyIncreasingTimeMS();
    if (complete || !m_lastProgressNotificationTimeMS || (now - m_lastProgressNotificationTimeMS > progressNotificationIntervalMS)) {
        m_lastProgressNotificationTimeMS = now;
        fireEvent(eventNames().progressEvent);
    }

    if (complete) {
      if (numAborts == m_numAborts)
          signalCompletion(FileError::OK);
      unsetPendingActivity(this);
    }
}
Ejemplo n.º 9
0
Plan::CompilationPath Plan::compileInThreadImpl(LongLivedState& longLivedState)
{
    cleanMustHandleValuesIfNecessary();
    
    if (verboseCompilationEnabled(mode) && osrEntryBytecodeIndex != UINT_MAX) {
        dataLog("\n");
        dataLog("Compiler must handle OSR entry from bc#", osrEntryBytecodeIndex, " with values: ", mustHandleValues, "\n");
        dataLog("\n");
    }
    
    Graph dfg(*vm, *this, longLivedState);
    
    if (!parse(dfg)) {
        finalizer = std::make_unique<FailedFinalizer>(*this);
        return FailPath;
    }

    codeBlock->setCalleeSaveRegisters(RegisterSet::dfgCalleeSaveRegisters());
    
    // By this point the DFG bytecode parser will have potentially mutated various tables
    // in the CodeBlock. This is a good time to perform an early shrink, which is more
    // powerful than a late one. It's safe to do so because we haven't generated any code
    // that references any of the tables directly, yet.
    codeBlock->shrinkToFit(CodeBlock::EarlyShrink);

    if (validationEnabled())
        validate(dfg);
    
    if (Options::dumpGraphAfterParsing()) {
        dataLog("Graph after parsing:\n");
        dfg.dump();
    }

    performLiveCatchVariablePreservationPhase(dfg);

    if (Options::useMaximalFlushInsertionPhase())
        performMaximalFlushInsertion(dfg);
    
    performCPSRethreading(dfg);
    performUnification(dfg);
    performPredictionInjection(dfg);
    
    performStaticExecutionCountEstimation(dfg);
    
    if (mode == FTLForOSREntryMode) {
        bool result = performOSREntrypointCreation(dfg);
        if (!result) {
            finalizer = std::make_unique<FailedFinalizer>(*this);
            return FailPath;
        }
        performCPSRethreading(dfg);
    }
    
    if (validationEnabled())
        validate(dfg);
    
    performBackwardsPropagation(dfg);
    performPredictionPropagation(dfg);
    performFixup(dfg);
    performStructureRegistration(dfg);
    performInvalidationPointInjection(dfg);
    performTypeCheckHoisting(dfg);
    
    dfg.m_fixpointState = FixpointNotConverged;
    
    // For now we're back to avoiding a fixpoint. Note that we've ping-ponged on this decision
    // many times. For maximum throughput, it's best to fixpoint. But the throughput benefit is
    // small and not likely to show up in FTL anyway. On the other hand, not fixpointing means
    // that the compiler compiles more quickly. We want the third tier to compile quickly, which
    // not fixpointing accomplishes; and the fourth tier shouldn't need a fixpoint.
    if (validationEnabled())
        validate(dfg);
        
    performStrengthReduction(dfg);
    performCPSRethreading(dfg);
    performCFA(dfg);
    performConstantFolding(dfg);
    bool changed = false;
    changed |= performCFGSimplification(dfg);
    changed |= performLocalCSE(dfg);
    
    if (validationEnabled())
        validate(dfg);
    
    performCPSRethreading(dfg);
    if (!isFTL(mode)) {
        // Only run this if we're not FTLing, because currently for a LoadVarargs that is forwardable and
        // in a non-varargs inlined call frame, this will generate ForwardVarargs while the FTL
        // ArgumentsEliminationPhase will create a sequence of GetStack+PutStacks. The GetStack+PutStack
        // sequence then gets sunk, eliminating anything that looks like an escape for subsequent phases,
        // while the ForwardVarargs doesn't get simplified until later (or not at all) and looks like an
        // escape for all of the arguments. This then disables object allocation sinking.
        //
        // So, for now, we just disable this phase for the FTL.
        //
        // If we wanted to enable it, we'd have to do any of the following:
        // - Enable ForwardVarargs->GetStack+PutStack strength reduction, and have that run before
        //   PutStack sinking and object allocation sinking.
        // - Make VarargsForwarding emit a GetLocal+SetLocal sequence, that we can later turn into
        //   GetStack+PutStack.
        //
        // But, it's not super valuable to enable those optimizations, since the FTL
        // ArgumentsEliminationPhase does everything that this phase does, and it doesn't introduce this
        // pathology.
        
        changed |= performVarargsForwarding(dfg); // Do this after CFG simplification and CPS rethreading.
    }
    if (changed) {
        performCFA(dfg);
        performConstantFolding(dfg);
    }
    
    // If we're doing validation, then run some analyses, to give them an opportunity
    // to self-validate. Now is as good a time as any to do this.
    if (validationEnabled()) {
        dfg.ensureDominators();
        dfg.ensureNaturalLoops();
        dfg.ensurePrePostNumbering();
    }

    switch (mode) {
    case DFGMode: {
        dfg.m_fixpointState = FixpointConverged;
    
        performTierUpCheckInjection(dfg);

        performFastStoreBarrierInsertion(dfg);
        performStoreBarrierClustering(dfg);
        performCleanUp(dfg);
        performCPSRethreading(dfg);
        performDCE(dfg);
        performPhantomInsertion(dfg);
        performStackLayout(dfg);
        performVirtualRegisterAllocation(dfg);
        performWatchpointCollection(dfg);
        dumpAndVerifyGraph(dfg, "Graph after optimization:");
        
        JITCompiler dataFlowJIT(dfg);
        if (codeBlock->codeType() == FunctionCode)
            dataFlowJIT.compileFunction();
        else
            dataFlowJIT.compile();
        
        return DFGPath;
    }
    
    case FTLMode:
    case FTLForOSREntryMode: {
#if ENABLE(FTL_JIT)
        if (FTL::canCompile(dfg) == FTL::CannotCompile) {
            finalizer = std::make_unique<FailedFinalizer>(*this);
            return FailPath;
        }
        
        performCleanUp(dfg); // Reduce the graph size a bit.
        performCriticalEdgeBreaking(dfg);
        if (Options::createPreHeaders())
            performLoopPreHeaderCreation(dfg);
        performCPSRethreading(dfg);
        performSSAConversion(dfg);
        performSSALowering(dfg);
        
        // Ideally, these would be run to fixpoint with the object allocation sinking phase.
        performArgumentsElimination(dfg);
        if (Options::usePutStackSinking())
            performPutStackSinking(dfg);
        
        performConstantHoisting(dfg);
        performGlobalCSE(dfg);
        performLivenessAnalysis(dfg);
        performCFA(dfg);
        performConstantFolding(dfg);
        performCleanUp(dfg); // Reduce the graph size a lot.
        changed = false;
        changed |= performStrengthReduction(dfg);
        if (Options::useObjectAllocationSinking()) {
            changed |= performCriticalEdgeBreaking(dfg);
            changed |= performObjectAllocationSinking(dfg);
        }
        if (changed) {
            // State-at-tail and state-at-head will be invalid if we did strength reduction since
            // it might increase live ranges.
            performLivenessAnalysis(dfg);
            performCFA(dfg);
            performConstantFolding(dfg);
        }
        
        // Currently, this relies on pre-headers still being valid. That precludes running CFG
        // simplification before it, unless we re-created the pre-headers. There wouldn't be anything
        // wrong with running LICM earlier, if we wanted to put other CFG transforms above this point.
        // Alternatively, we could run loop pre-header creation after SSA conversion - but if we did that
        // then we'd need to do some simple SSA fix-up.
        performLivenessAnalysis(dfg);
        performCFA(dfg);
        performLICM(dfg);

        // FIXME: Currently: IntegerRangeOptimization *must* be run after LICM.
        //
        // IntegerRangeOptimization makes changes on nodes based on preceding blocks
        // and nodes. LICM moves nodes which can invalidates assumptions used
        // by IntegerRangeOptimization.
        //
        // Ideally, the dependencies should be explicit. See https://bugs.webkit.org/show_bug.cgi?id=157534.
        performLivenessAnalysis(dfg);
        performIntegerRangeOptimization(dfg);
        
        performCleanUp(dfg);
        performIntegerCheckCombining(dfg);
        performGlobalCSE(dfg);
        
        // At this point we're not allowed to do any further code motion because our reasoning
        // about code motion assumes that it's OK to insert GC points in random places.
        dfg.m_fixpointState = FixpointConverged;
        
        performLivenessAnalysis(dfg);
        performCFA(dfg);
        performGlobalStoreBarrierInsertion(dfg);
        performStoreBarrierClustering(dfg);
        if (Options::useMovHintRemoval())
            performMovHintRemoval(dfg);
        performCleanUp(dfg);
        performDCE(dfg); // We rely on this to kill dead code that won't be recognized as dead by B3.
        performStackLayout(dfg);
        performLivenessAnalysis(dfg);
        performOSRAvailabilityAnalysis(dfg);
        performWatchpointCollection(dfg);
        
        if (FTL::canCompile(dfg) == FTL::CannotCompile) {
            finalizer = std::make_unique<FailedFinalizer>(*this);
            return FailPath;
        }

        dumpAndVerifyGraph(dfg, "Graph just before FTL lowering:", shouldDumpDisassembly(mode));

        // Flash a safepoint in case the GC wants some action.
        Safepoint::Result safepointResult;
        {
            GraphSafepoint safepoint(dfg, safepointResult);
        }
        if (safepointResult.didGetCancelled())
            return CancelPath;

        FTL::State state(dfg);
        FTL::lowerDFGToB3(state);
        
        if (UNLIKELY(computeCompileTimes()))
            m_timeBeforeFTL = monotonicallyIncreasingTimeMS();
        
        if (Options::b3AlwaysFailsBeforeCompile()) {
            FTL::fail(state);
            return FTLPath;
        }
        
        FTL::compile(state, safepointResult);
        if (safepointResult.didGetCancelled())
            return CancelPath;
        
        if (Options::b3AlwaysFailsBeforeLink()) {
            FTL::fail(state);
            return FTLPath;
        }
        
        if (state.allocationFailed) {
            FTL::fail(state);
            return FTLPath;
        }

        FTL::link(state);
        
        if (state.allocationFailed) {
            FTL::fail(state);
            return FTLPath;
        }
        
        return FTLPath;
#else
        RELEASE_ASSERT_NOT_REACHED();
        return FailPath;
#endif // ENABLE(FTL_JIT)
    }
        
    default:
        RELEASE_ASSERT_NOT_REACHED();
        return FailPath;
    }
}
Ejemplo n.º 10
0
void Plan::compileInThread(LongLivedState& longLivedState, ThreadData* threadData)
{
    this->threadData = threadData;
    
    double before = 0;
    CString codeBlockName;
    if (UNLIKELY(computeCompileTimes()))
        before = monotonicallyIncreasingTimeMS();
    if (UNLIKELY(reportCompileTimes()))
        codeBlockName = toCString(*codeBlock);
    
    CompilationScope compilationScope;

    if (logCompilationChanges(mode) || Options::reportDFGPhaseTimes())
        dataLog("DFG(Plan) compiling ", *codeBlock, " with ", mode, ", number of instructions = ", codeBlock->instructionCount(), "\n");

    CompilationPath path = compileInThreadImpl(longLivedState);

    RELEASE_ASSERT(path == CancelPath || finalizer);
    RELEASE_ASSERT((path == CancelPath) == (stage == Cancelled));
    
    double after = 0;
    if (UNLIKELY(computeCompileTimes())) {
        after = monotonicallyIncreasingTimeMS();
    
        if (Options::reportTotalCompileTimes()) {
            if (isFTL(mode)) {
                totalFTLCompileTime += after - before;
                totalFTLDFGCompileTime += m_timeBeforeFTL - before;
                totalFTLB3CompileTime += after - m_timeBeforeFTL;
            } else
                totalDFGCompileTime += after - before;
        }
    }
    const char* pathName = nullptr;
    switch (path) {
    case FailPath:
        pathName = "N/A (fail)";
        break;
    case DFGPath:
        pathName = "DFG";
        break;
    case FTLPath:
        pathName = "FTL";
        break;
    case CancelPath:
        pathName = "Cancelled";
        break;
    default:
        RELEASE_ASSERT_NOT_REACHED();
        break;
    }
    if (codeBlock) { // codeBlock will be null if the compilation was cancelled.
        if (path == FTLPath)
            CODEBLOCK_LOG_EVENT(codeBlock, "ftlCompile", ("took ", after - before, " ms (DFG: ", m_timeBeforeFTL - before, ", B3: ", after - m_timeBeforeFTL, ") with ", pathName));
        else
            CODEBLOCK_LOG_EVENT(codeBlock, "dfgCompile", ("took ", after - before, " ms with ", pathName));
    }
    if (UNLIKELY(reportCompileTimes())) {
        dataLog("Optimized ", codeBlockName, " using ", mode, " with ", pathName, " into ", finalizer ? finalizer->codeSize() : 0, " bytes in ", after - before, " ms");
        if (path == FTLPath)
            dataLog(" (DFG: ", m_timeBeforeFTL - before, ", B3: ", after - m_timeBeforeFTL, ")");
        dataLog(".\n");
    }
}
Ejemplo n.º 11
0
void Plan::compileInThread(LongLivedState& longLivedState, ThreadData* threadData)
{
    this->threadData = threadData;
    
    double before = 0;
    CString codeBlockName;
    if (computeCompileTimes())
        before = monotonicallyIncreasingTimeMS();
    if (reportCompileTimes())
        codeBlockName = toCString(*codeBlock);
    
    SamplingRegion samplingRegion("DFG Compilation (Plan)");
    CompilationScope compilationScope;

    if (logCompilationChanges(mode))
        dataLog("DFG(Plan) compiling ", *codeBlock, " with ", mode, ", number of instructions = ", codeBlock->instructionCount(), "\n");

    CompilationPath path = compileInThreadImpl(longLivedState);

    RELEASE_ASSERT(path == CancelPath || finalizer);
    RELEASE_ASSERT((path == CancelPath) == (stage == Cancelled));
    
    double after = 0;
    if (computeCompileTimes())
        after = monotonicallyIncreasingTimeMS();
    
    if (Options::reportTotalCompileTimes()) {
        if (isFTL(mode)) {
            totalFTLCompileTime += after - before;
            totalFTLDFGCompileTime += m_timeBeforeFTL - before;
            totalFTLB3CompileTime += after - m_timeBeforeFTL;
        } else
            totalDFGCompileTime += after - before;
    }
    
    if (reportCompileTimes()) {
        const char* pathName;
        switch (path) {
        case FailPath:
            pathName = "N/A (fail)";
            break;
        case DFGPath:
            pathName = "DFG";
            break;
        case FTLPath:
            pathName = "FTL";
            break;
        case CancelPath:
            pathName = "Cancelled";
            break;
        default:
            RELEASE_ASSERT_NOT_REACHED();
#if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
            pathName = "";
#endif
            break;
        }
        dataLog("Optimized ", codeBlockName, " using ", mode, " with ", pathName, " into ", finalizer ? finalizer->codeSize() : 0, " bytes in ", after - before, " ms");
        if (path == FTLPath)
            dataLog(" (DFG: ", m_timeBeforeFTL - before, ", B3: ", after - m_timeBeforeFTL, ")");
        dataLog(".\n");
    }
}