const ScaledImageFragment* ImageFrameGenerator::decodeAndScale(const SkISize& scaledSize, size_t index) { // Prevents concurrent decode or scale operations on the same image data. // Multiple LazyDecodingPixelRefs can call this method at the same time. MutexLocker lock(m_decodeMutex); if (m_decodeFailedAndEmpty) return 0; const ScaledImageFragment* cachedImage = 0; cachedImage = tryToLockCompleteCache(scaledSize, index); if (cachedImage) return cachedImage; TRACE_EVENT2("webkit", "ImageFrameGenerator::decodeAndScale", "generator", this, "decodeCount", static_cast<int>(m_decodeCount)); cachedImage = tryToScale(0, scaledSize, index); if (cachedImage) return cachedImage; cachedImage = tryToResumeDecodeAndScale(scaledSize, index); if (cachedImage) return cachedImage; return 0; }
void VisualViewport::setSize(const IntSize& size) { // When the main frame is remote, we won't have an associated frame. if (!mainFrame()) return; if (m_size == size) return; bool autosizerNeedsUpdating = (size.width() != m_size.width()) && mainFrame()->settings() && mainFrame()->settings()->textAutosizingEnabled(); TRACE_EVENT2("blink", "VisualViewport::setSize", "width", size.width(), "height", size.height()); m_size = size; if (m_innerViewportContainerLayer) { m_innerViewportContainerLayer->setSize(m_size); // Need to re-compute sizes for the overlay scrollbars. initializeScrollbars(); } if (autosizerNeedsUpdating) { // This needs to happen after setting the m_size member since it'll be read in the update call. if (TextAutosizer* textAutosizer = mainFrame()->document()->textAutosizer()) textAutosizer->updatePageInfoInAllFrames(); } }
void WebCompositorInputHandlerImpl::scrollBy(const IntPoint& increment) { if (increment == IntPoint::zero()) return; TRACE_EVENT2("cc", "WebCompositorInputHandlerImpl::scrollBy", "x", increment.x(), "y", increment.y()); WebMouseWheelEvent event; event.type = WebInputEvent::MouseWheel; event.deltaX = -increment.x(); event.deltaY = -increment.y(); event.hasPreciseScrollingDeltas = true; event.x = m_wheelFlingPoint.x(); event.y = m_wheelFlingPoint.y(); WebCompositorInputHandlerImpl::EventDisposition disposition = handleInputEventInternal(event); switch (disposition) { case DidHandle: case DropEvent: break; case DidNotHandle: TRACE_EVENT_INSTANT0("cc", "WebCompositorInputHandlerImpl::scrollBy::AbortFling"); // FIXME: If we got a DidNotHandle, that means we need to deliver wheels on the main thread. // In this case we need to schedule a commit and transfer the fling curve over to the main // thread and run the rest of the wheels from there. // This can happen when flinging a page that contains a scrollable subarea that we can't // scroll on the thread if the fling starts outside the subarea but then is flung "under" the // pointer. // For now, just abort the fling. cancelCurrentFling(); } }
bool ImageFrameGenerator::getYUVComponentSizes(SkISize componentSizes[3]) { TRACE_EVENT2("blink", "ImageFrameGenerator::getYUVComponentSizes", "width", m_fullSize.width(), "height", m_fullSize.height()); if (m_yuvDecodingFailed) return false; SharedBuffer* data = 0; bool allDataReceived = false; m_data->data(&data, &allDataReceived); // FIXME: YUV decoding does not currently support progressive decoding. if (!allDataReceived) return false; OwnPtr<ImageDecoder> decoder = ImageDecoder::create(*data, ImageDecoder::AlphaPremultiplied, ImageDecoder::GammaAndColorProfileApplied); if (!decoder) return false; // Setting a dummy ImagePlanes object signals to the decoder that we want to do YUV decoding. decoder->setData(data, allDataReceived); OwnPtr<ImagePlanes> dummyImagePlanes = adoptPtr(new ImagePlanes); decoder->setImagePlanes(dummyImagePlanes.release()); ASSERT(componentSizes); return updateYUVComponentSizes(decoder.get(), componentSizes, ImageDecoder::SizeForMemoryAllocation); }
void VisualViewport::setSize(const IntSize& size) { if (m_size == size) return; TRACE_EVENT2("blink", "VisualViewport::setSize", "width", size.width(), "height", size.height()); bool widthDidChange = size.width() != m_size.width(); m_size = size; if (m_innerViewportContainerLayer) { m_innerViewportContainerLayer->setSize(FloatSize(m_size)); // Need to re-compute sizes for the overlay scrollbars. initializeScrollbars(); } if (!mainFrame()) return; enqueueResizeEvent(); bool autosizerNeedsUpdating = widthDidChange && mainFrame()->settings() && mainFrame()->settings()->textAutosizingEnabled(); if (autosizerNeedsUpdating) { // This needs to happen after setting the m_size member since it'll be read // in the update call. if (TextAutosizer* textAutosizer = mainFrame()->document()->textAutosizer()) textAutosizer->updatePageInfoInAllFrames(); } }
bool ImageFrameGenerator::getYUVComponentSizes(SkISize componentSizes[3]) { ASSERT(componentSizes); TRACE_EVENT2("webkit", "ImageFrameGenerator::getYUVComponentSizes", "width", m_fullSize.width(), "height", m_fullSize.height()); SharedBuffer* data = 0; bool allDataReceived = false; m_data.data(&data, &allDataReceived); // FIXME: YUV decoding does not currently support progressive decoding. if (!allDataReceived) return false; OwnPtr<ImageDecoder> decoder = ImageDecoder::create(*data, ImageSource::AlphaPremultiplied, ImageSource::GammaAndColorProfileApplied); if (!decoder) return false; // JPEG images support YUV decoding: other decoders do not. So don't pump data into decoders // that always return false to updateYUVComponentSizes() requests. if (decoder->filenameExtension() != "jpg") return false; // Setting a dummy ImagePlanes object signals to the decoder that we want to do YUV decoding. decoder->setData(data, allDataReceived); OwnPtr<ImagePlanes> dummyImagePlanes = adoptPtr(new ImagePlanes); decoder->setImagePlanes(dummyImagePlanes.release()); return updateYUVComponentSizes(decoder.get(), componentSizes, ImageDecoder::SizeForMemoryAllocation); }
bool ImageFrameGenerator::decode(size_t index, ImageDecoder** decoder, SkBitmap* bitmap) { TRACE_EVENT2("blink", "ImageFrameGenerator::decode", "width", m_fullSize.width(), "height", m_fullSize.height()); ASSERT(decoder); SharedBuffer* data = 0; bool allDataReceived = false; bool newDecoder = false; m_data.data(&data, &allDataReceived); // Try to create an ImageDecoder if we are not given one. if (!*decoder) { newDecoder = true; if (m_imageDecoderFactory) *decoder = m_imageDecoderFactory->create().leakPtr(); if (!*decoder) *decoder = ImageDecoder::create(*data, ImageSource::AlphaPremultiplied, ImageSource::GammaAndColorProfileApplied).leakPtr(); if (!*decoder) return false; } if (!m_isMultiFrame && newDecoder && allDataReceived) { // If we're using an external memory allocator that means we're decoding // directly into the output memory and we can save one memcpy. ASSERT(m_externalAllocator.get()); (*decoder)->setMemoryAllocator(m_externalAllocator.get()); } (*decoder)->setData(data, allDataReceived); ImageFrame* frame = (*decoder)->frameBufferAtIndex(index); // For multi-frame image decoders, we need to know how many frames are // in that image in order to release the decoder when all frames are // decoded. frameCount() is reliable only if all data is received and set in // decoder, particularly with GIF. if (allDataReceived) m_frameCount = (*decoder)->frameCount(); (*decoder)->setData(0, false); // Unref SharedBuffer from ImageDecoder. (*decoder)->clearCacheExceptFrame(index); (*decoder)->setMemoryAllocator(0); if (!frame || frame->status() == ImageFrame::FrameEmpty) return false; // A cache object is considered complete if we can decode a complete frame. // Or we have received all data. The image might not be fully decoded in // the latter case. const bool isDecodeComplete = frame->status() == ImageFrame::FrameComplete || allDataReceived; SkBitmap fullSizeBitmap = frame->getSkBitmap(); if (!fullSizeBitmap.isNull()) { ASSERT(fullSizeBitmap.width() == m_fullSize.width() && fullSizeBitmap.height() == m_fullSize.height()); setHasAlpha(index, !fullSizeBitmap.isOpaque()); } *bitmap = fullSizeBitmap; return isDecodeComplete; }
PassOwnPtr<ScaledImageFragment> ImageFrameGenerator::decode(ImageDecoder** decoder) { TRACE_EVENT2("webkit", "ImageFrameGenerator::decode", "width", m_fullSize.width(), "height", m_fullSize.height()); ASSERT(decoder); SharedBuffer* data = 0; bool allDataReceived = false; m_data.data(&data, &allDataReceived); // Try to create an ImageDecoder if we are not given one. if (!*decoder) { *decoder = ImageDecoder::create(*data, ImageSource::AlphaPremultiplied, ImageSource::GammaAndColorProfileApplied).leakPtr(); if (!*decoder && m_imageDecoderFactory) *decoder = m_imageDecoderFactory->create().leakPtr(); if (!*decoder) return nullptr; } // TODO: this is very ugly. We need to refactor the way how we can pass a // memory allocator to image decoders. (*decoder)->setMemoryAllocator(&m_allocator); (*decoder)->setData(data, allDataReceived); // If this call returns a newly allocated DiscardablePixelRef, then // ImageFrame::m_bitmap and the contained DiscardablePixelRef are locked. // They will be unlocked when ImageDecoder is destroyed since ImageDecoder // owns the ImageFrame. Partially decoded SkBitmap is thus inserted into the // ImageDecodingStore while locked. ImageFrame* frame = (*decoder)->frameBufferAtIndex(0); (*decoder)->setData(0, false); // Unref SharedBuffer from ImageDecoder. if (!frame || frame->status() == ImageFrame::FrameEmpty) return nullptr; bool isComplete = frame->status() == ImageFrame::FrameComplete; SkBitmap fullSizeBitmap = frame->getSkBitmap(); { MutexLocker lock(m_alphaMutex); m_hasAlpha = !fullSizeBitmap.isOpaque(); } ASSERT(fullSizeBitmap.width() == m_fullSize.width() && fullSizeBitmap.height() == m_fullSize.height()); return ScaledImageFragment::create(m_fullSize, fullSizeBitmap, isComplete); }
void PinchViewport::setSize(const IntSize& size) { if (m_size == size) return; TRACE_EVENT2("blink", "PinchViewport::setSize", "width", size.width(), "height", size.height()); m_size = size; // Make sure we clamp the offset to within the new bounds. setLocation(m_offset); if (m_innerViewportContainerLayer) { m_innerViewportContainerLayer->setSize(m_size); // Need to re-compute sizes for the overlay scrollbars. setupScrollbar(WebScrollbar::Horizontal); setupScrollbar(WebScrollbar::Vertical); } }
bool ImageFrameGenerator::decodeToYUV(SkISize componentSizes[3], void* planes[3], size_t rowBytes[3]) { // This method is called to populate a discardable memory owned by Skia. // Prevents concurrent decode or scale operations on the same image data. MutexLocker lock(m_decodeMutex); if (m_decodeFailedAndEmpty) return false; TRACE_EVENT2("blink", "ImageFrameGenerator::decodeToYUV", "generator", this, "decodeCount", static_cast<int>(m_decodeCount)); if (!planes || !planes[0] || !planes[1] || !planes[2] || !rowBytes || !rowBytes[0] || !rowBytes[1] || !rowBytes[2]) { return false; } SharedBuffer* data = 0; bool allDataReceived = false; m_data.data(&data, &allDataReceived); // FIXME: YUV decoding does not currently support progressive decoding. ASSERT(allDataReceived); OwnPtr<ImageDecoder> decoder = ImageDecoder::create(*data, ImageSource::AlphaPremultiplied, ImageSource::GammaAndColorProfileApplied); if (!decoder) return false; decoder->setData(data, allDataReceived); OwnPtr<ImagePlanes> imagePlanes = adoptPtr(new ImagePlanes(planes, rowBytes)); decoder->setImagePlanes(imagePlanes.release()); bool sizeUpdated = updateYUVComponentSizes(decoder.get(), componentSizes, ImageDecoder::ActualSize); RELEASE_ASSERT(sizeUpdated); bool yuvDecoded = decoder->decodeToYUV(); if (yuvDecoded) setHasAlpha(0, false); // YUV is always opaque return yuvDecoded; }
void WebCompositorInputHandlerImpl::scrollBy(const WebPoint& increment) { if (increment == WebPoint()) return; TRACE_EVENT2("webkit", "WebCompositorInputHandlerImpl::scrollBy", "x", increment.x, "y", increment.y); bool didScroll = false; switch (m_flingParameters.sourceDevice) { case WebGestureEvent::Touchpad: didScroll = touchpadFlingScroll(increment); break; case WebGestureEvent::Touchscreen: didScroll = m_inputHandlerClient->scrollByIfPossible(m_flingParameters.point, IntSize(-increment.x, -increment.y)); break; } if (didScroll) { m_flingParameters.cumulativeScroll.width += increment.x; m_flingParameters.cumulativeScroll.height += increment.y; } }
void WebCompositorInputHandlerImpl::scrollBy(const IntPoint& increment) { if (increment == IntPoint::zero()) return; TRACE_EVENT2("cc", "WebCompositorInputHandlerImpl::scrollBy", "x", increment.x(), "y", increment.y()); WebMouseWheelEvent syntheticWheel; syntheticWheel.type = WebInputEvent::MouseWheel; syntheticWheel.deltaX = increment.x(); syntheticWheel.deltaY = increment.y(); syntheticWheel.hasPreciseScrollingDeltas = true; syntheticWheel.x = m_wheelFlingParameters.point.x; syntheticWheel.y = m_wheelFlingParameters.point.y; syntheticWheel.globalX = m_wheelFlingParameters.globalPoint.x; syntheticWheel.globalY = m_wheelFlingParameters.globalPoint.y; syntheticWheel.modifiers = m_wheelFlingParameters.modifiers; WebCompositorInputHandlerImpl::EventDisposition disposition = handleInputEventInternal(syntheticWheel); switch (disposition) { case DidHandle: m_wheelFlingParameters.cumulativeScroll.width += increment.x(); m_wheelFlingParameters.cumulativeScroll.height += increment.y(); case DropEvent: break; case DidNotHandle: TRACE_EVENT_INSTANT0("cc", "WebCompositorInputHandlerImpl::scrollBy::AbortFling"); // If we got a DidNotHandle, that means we need to deliver wheels on the main thread. // In this case we need to schedule a commit and transfer the fling curve over to the main // thread and run the rest of the wheels from there. // This can happen when flinging a page that contains a scrollable subarea that we can't // scroll on the thread if the fling starts outside the subarea but then is flung "under" the // pointer. m_client->transferActiveWheelFlingAnimation(m_wheelFlingParameters); cancelCurrentFling(); break; } }
bool ImageFrameGenerator::decodeAndScale(const SkImageInfo& info, size_t index, void* pixels, size_t rowBytes) { // This method is called to populate a discardable memory owned by Skia. // Prevents concurrent decode or scale operations on the same image data. MutexLocker lock(m_decodeMutex); // This implementation does not support scaling so check the requested size. SkISize scaledSize = SkISize::Make(info.width(), info.height()); ASSERT(m_fullSize == scaledSize); if (m_decodeFailedAndEmpty) return false; TRACE_EVENT2("blink", "ImageFrameGenerator::decodeAndScale", "generator", this, "decodeCount", m_decodeCount); m_externalAllocator = adoptPtr(new ExternalMemoryAllocator(info, pixels, rowBytes)); SkBitmap bitmap = tryToResumeDecode(scaledSize, index); if (bitmap.isNull()) return false; // Don't keep the allocator because it contains a pointer to memory // that we do not own. m_externalAllocator.clear(); ASSERT(bitmap.width() == scaledSize.width()); ASSERT(bitmap.height() == scaledSize.height()); bool result = true; SkAutoLockPixels bitmapLock(bitmap); // Check to see if decoder has written directly to the memory provided // by Skia. If not make a copy. if (bitmap.getPixels() != pixels) result = bitmap.copyPixelsTo(pixels, rowBytes * info.height(), rowBytes); return result; }
void ThreadTimers::sharedTimerFiredInternal() { // Do a re-entrancy check. if (m_firingTimers) return; m_firingTimers = true; m_pendingSharedTimerFireTime = 0; double fireTime = monotonicallyIncreasingTime(); double timeToQuit = fireTime + maxDurationOfFiringTimers; while (!m_timerHeap.isEmpty() && m_timerHeap.first()->m_nextFireTime <= fireTime) { TimerBase& timer = *m_timerHeap.first(); timer.m_nextFireTime = 0; timer.m_unalignedNextFireTime = 0; timer.heapDeleteMin(); double interval = timer.repeatInterval(); timer.setNextFireTime(interval ? fireTime + interval : 0); TRACE_EVENT2("blink", "ThreadTimers::sharedTimerFiredInternal", "src_file", timer.location().fileName(), "src_func", timer.location().functionName()); // Once the timer has been fired, it may be deleted, so do nothing else with it after this point. timer.fired(); // Catch the case where the timer asked timers to fire in a nested event loop, or we are over time limit. if (!m_firingTimers || timeToQuit < monotonicallyIncreasingTime() || (isMainThread() && Platform::current()->currentThread()->scheduler()->shouldYieldForHighPriorityWork())) break; } m_firingTimers = false; updateSharedTimer(); }
void CCOverdrawMetrics::recordMetricsInternal(MetricsType metricsType, const LayerTreeHostType* layerTreeHost) const { const char* histogramOpaqueName = 0; const char* histogramTranslucentName = 0; const char* histogramCulledName = 0; const char* cullCounterName = 0; const char* opaqueCounterName = 0; const char* translucentCounterName = 0; switch (metricsType) { case DRAWING: histogramOpaqueName = "Renderer4.drawPixelCountOpaque"; histogramTranslucentName = "Renderer4.drawPixelCountTranslucent"; histogramCulledName = "Renderer4.drawPixelCountCulled"; cullCounterName = "DrawPixelsCulled"; opaqueCounterName = "PixelsDrawnOpaque"; translucentCounterName = "PixelsDrawnTranslucent"; break; case PAINTING: histogramOpaqueName = "Renderer4.paintPixelCountOpaque"; histogramTranslucentName = "Renderer4.paintPixelCountTranslucent"; histogramCulledName = "Renderer4.paintPixelCountCulled"; cullCounterName = "PaintPixelsCulled"; opaqueCounterName = "PixelsPaintedOpaque"; translucentCounterName = "PixelsPaintedTranslucent"; break; } ASSERT(histogramOpaqueName); float normalization = 1000.f / (layerTreeHost->viewportSize().width() * layerTreeHost->viewportSize().height()); PlatformSupport::histogramCustomCounts(histogramOpaqueName, static_cast<int>(normalization * m_pixelsDrawnOpaque), 100, 1000000, 50); PlatformSupport::histogramCustomCounts(histogramTranslucentName, static_cast<int>(normalization * m_pixelsDrawnTranslucent), 100, 1000000, 50); PlatformSupport::histogramCustomCounts(histogramCulledName, static_cast<int>(normalization * m_pixelsCulled), 100, 1000000, 50); TRACE_COUNTER_ID1("webkit", cullCounterName, layerTreeHost, m_pixelsCulled); TRACE_EVENT2("webkit", "CCOverdrawMetrics", opaqueCounterName, m_pixelsDrawnOpaque, translucentCounterName, m_pixelsDrawnTranslucent); }
void WebCompositorInputHandlerImpl::notifyCurrentFlingVelocity(const WebFloatSize& velocity) { TRACE_EVENT2("webkit", "WebCompositorInputHandlerImpl::notifyCurrentFlingVelocity", "vx", velocity.width, "vy", velocity.height); m_inputHandlerClient->notifyCurrentFlingVelocity(toClientScrollIncrement(velocity)); }
// Update the existing display items by removing invalidated entries, updating // repainted ones, and appending new items. // - For cached drawing display item, copy the corresponding cached DrawingDisplayItem; // - For cached subsequence display item, copy the cached display items between the // corresponding SubsequenceDisplayItem and EndSubsequenceDisplayItem (incl.); // - Otherwise, copy the new display item. // // The algorithm is O(|m_currentDisplayItemList| + |m_newDisplayItemList|). // Coefficients are related to the ratio of out-of-order CachedDisplayItems // and the average number of (Drawing|Subsequence)DisplayItems per client. // void PaintController::commitNewDisplayItemsInternal() { TRACE_EVENT2("blink,benchmark", "PaintController::commitNewDisplayItems", "current_display_list_size", (int)m_currentPaintArtifact.displayItemList().size(), "num_non_cached_new_items", (int)m_newDisplayItemList.size() - m_numCachedNewItems); m_numCachedNewItems = 0; if (RuntimeEnabledFeatures::slimmingPaintV2Enabled()) m_clientsCheckedPaintInvalidation.clear(); // These data structures are used during painting only. ASSERT(m_scopeStack.isEmpty()); m_scopeStack.clear(); m_nextScope = 1; ASSERT(!skippingCache()); #if ENABLE(ASSERT) m_newDisplayItemIndicesByClient.clear(); m_clientsWithPaintOffsetInvalidations.clear(); m_invalidations.clear(); #endif if (m_currentPaintArtifact.isEmpty()) { #if ENABLE(ASSERT) for (const auto& item : m_newDisplayItemList) ASSERT(!item.isCached()); #endif m_currentPaintArtifact = PaintArtifact(std::move(m_newDisplayItemList), m_newPaintChunks.releasePaintChunks()); m_newDisplayItemList = DisplayItemList(kInitialDisplayItemListCapacityBytes); m_validlyCachedClientsDirty = true; return; } updateValidlyCachedClientsIfNeeded(); // Stores indices to valid DrawingDisplayItems in m_currentDisplayItems that have not been matched // by CachedDisplayItems during synchronized matching. The indexed items will be matched // by later out-of-order CachedDisplayItems in m_newDisplayItemList. This ensures that when // out-of-order CachedDisplayItems occur, we only traverse at most once over m_currentDisplayItems // looking for potential matches. Thus we can ensure that the algorithm runs in linear time. OutOfOrderIndexContext outOfOrderIndexContext(m_currentPaintArtifact.displayItemList().begin()); // TODO(jbroman): Consider revisiting this heuristic. DisplayItemList updatedList(std::max(m_currentPaintArtifact.displayItemList().usedCapacityInBytes(), m_newDisplayItemList.usedCapacityInBytes())); Vector<PaintChunk> updatedPaintChunks; DisplayItemList::iterator currentIt = m_currentPaintArtifact.displayItemList().begin(); DisplayItemList::iterator currentEnd = m_currentPaintArtifact.displayItemList().end(); for (DisplayItemList::iterator newIt = m_newDisplayItemList.begin(); newIt != m_newDisplayItemList.end(); ++newIt) { const DisplayItem& newDisplayItem = *newIt; const DisplayItem::Id newDisplayItemId = newDisplayItem.nonCachedId(); bool newDisplayItemHasCachedType = newDisplayItem.type() != newDisplayItemId.type; bool isSynchronized = currentIt != currentEnd && newDisplayItemId.matches(*currentIt); if (newDisplayItemHasCachedType) { ASSERT(newDisplayItem.isCached()); ASSERT(clientCacheIsValid(newDisplayItem.client()) || (RuntimeEnabledFeatures::slimmingPaintOffsetCachingEnabled() && !paintOffsetWasInvalidated(newDisplayItem.client()))); if (!isSynchronized) { currentIt = findOutOfOrderCachedItem(newDisplayItemId, outOfOrderIndexContext); if (currentIt == currentEnd) { #ifndef NDEBUG showDebugData(); WTFLogAlways("%s not found in m_currentDisplayItemList\n", newDisplayItem.asDebugString().utf8().data()); #endif ASSERT_NOT_REACHED(); // We did not find the cached display item. This should be impossible, but may occur if there is a bug // in the system, such as under-invalidation, incorrect cache checking or duplicate display ids. // In this case, attempt to recover rather than crashing or bailing on display of the rest of the display list. continue; } } #if ENABLE(ASSERT) if (RuntimeEnabledFeatures::slimmingPaintUnderInvalidationCheckingEnabled()) { DisplayItemList::iterator temp = currentIt; checkUnderInvalidation(newIt, temp); } #endif if (newDisplayItem.isCachedDrawing()) { updatedList.appendByMoving(*currentIt); ++currentIt; } else { ASSERT(newDisplayItem.type() == DisplayItem::CachedSubsequence); copyCachedSubsequence(currentIt, updatedList); ASSERT(updatedList.last().type() == DisplayItem::EndSubsequence); } } else { ASSERT(!newDisplayItem.isDrawing() || newDisplayItem.skippedCache() || !clientCacheIsValid(newDisplayItem.client()) || (RuntimeEnabledFeatures::slimmingPaintOffsetCachingEnabled() && paintOffsetWasInvalidated(newDisplayItem.client()))); updatedList.appendByMoving(*newIt); if (isSynchronized) ++currentIt; } // Items before currentIt should have been copied so we don't need to index them. if (currentIt - outOfOrderIndexContext.nextItemToIndex > 0) outOfOrderIndexContext.nextItemToIndex = currentIt; } #if ENABLE(ASSERT) if (RuntimeEnabledFeatures::slimmingPaintUnderInvalidationCheckingEnabled()) checkNoRemainingCachedDisplayItems(); #endif // ENABLE(ASSERT) // TODO(jbroman): When subsequence caching applies to SPv2, we'll need to // merge the paint chunks as well. m_currentPaintArtifact = PaintArtifact(std::move(updatedList), m_newPaintChunks.releasePaintChunks()); m_newDisplayItemList = DisplayItemList(kInitialDisplayItemListCapacityBytes); m_validlyCachedClientsDirty = true; }
PassOwnPtr<ScaledImageFragment> ImageFrameGenerator::decode(size_t index, ImageDecoder** decoder) { TRACE_EVENT2("webkit", "ImageFrameGenerator::decode", "width", m_fullSize.width(), "height", m_fullSize.height()); ASSERT(decoder); SharedBuffer* data = 0; bool allDataReceived = false; m_data.data(&data, &allDataReceived); // Try to create an ImageDecoder if we are not given one. if (!*decoder) { if (m_imageDecoderFactory) *decoder = m_imageDecoderFactory->create().leakPtr(); if (!*decoder) *decoder = ImageDecoder::create(*data, ImageSource::AlphaPremultiplied, ImageSource::GammaAndColorProfileApplied).leakPtr(); if (!*decoder) return nullptr; } // TODO: this is very ugly. We need to refactor the way how we can pass a // memory allocator to image decoders. if (!m_isMultiFrame) (*decoder)->setMemoryAllocator(&m_allocator); (*decoder)->setData(data, allDataReceived); // If this call returns a newly allocated DiscardablePixelRef, then // ImageFrame::m_bitmap and the contained DiscardablePixelRef are locked. // They will be unlocked when ImageDecoder is destroyed since ImageDecoder // owns the ImageFrame. Partially decoded SkBitmap is thus inserted into the // ImageDecodingStore while locked. ImageFrame* frame = (*decoder)->frameBufferAtIndex(index); (*decoder)->setData(0, false); // Unref SharedBuffer from ImageDecoder. (*decoder)->clearCacheExceptFrame(index); if (!frame || frame->status() == ImageFrame::FrameEmpty) return nullptr; const bool isComplete = frame->status() == ImageFrame::FrameComplete; SkBitmap fullSizeBitmap = frame->getSkBitmap(); { MutexLocker lock(m_alphaMutex); if (index >= m_hasAlpha.size()) { const size_t oldSize = m_hasAlpha.size(); m_hasAlpha.resize(index + 1); for (size_t i = oldSize; i < m_hasAlpha.size(); ++i) m_hasAlpha[i] = true; } m_hasAlpha[index] = !fullSizeBitmap.isOpaque(); } ASSERT(fullSizeBitmap.width() == m_fullSize.width() && fullSizeBitmap.height() == m_fullSize.height()); if (isComplete) return ScaledImageFragment::createComplete(m_fullSize, index, fullSizeBitmap); // If the image is partial we need to return a copy. This is to avoid future // decode operations writing to the same bitmap. SkBitmap copyBitmap; fullSizeBitmap.copyTo(©Bitmap, fullSizeBitmap.config(), &m_allocator); return ScaledImageFragment::createPartial(m_fullSize, index, nextGenerationId(), copyBitmap); }
// Update the existing display items by removing invalidated entries, updating // repainted ones, and appending new items. // - For CachedDisplayItem, copy the corresponding cached DrawingDisplayItem; // - For SubtreeCachedDisplayItem, copy the cached display items between the // corresponding BeginSubtreeDisplayItem and EndSubtreeDisplayItem (incl.); // - Otherwise, copy the new display item. // // The algorithm is O(|m_currentDisplayItems| + |m_newDisplayItems|). // Coefficients are related to the ratio of out-of-order [Subtree]CachedDisplayItems // and the average number of (Drawing|BeginSubtree)DisplayItems per client. // // TODO(pdr): Implement the DisplayListDiff algorithm for SlimmingPaintV2. void DisplayItemList::commitNewDisplayItems(DisplayListDiff*) { TRACE_EVENT2("blink,benchmark", "DisplayItemList::commitNewDisplayItems", "current_display_list_size", (int)m_currentDisplayItems.size(), "num_non_cached_new_items", (int)m_newDisplayItems.size() - m_numCachedItems); // These data structures are used during painting only. ASSERT(m_scopeStack.isEmpty()); m_scopeStack.clear(); m_nextScope = 1; ASSERT(!skippingCache()); #if ENABLE(ASSERT) m_newDisplayItemIndicesByClient.clear(); #endif if (m_currentDisplayItems.isEmpty()) { #if ENABLE(ASSERT) for (const auto& item : m_newDisplayItems) ASSERT(!item.isCached()); #endif m_currentDisplayItems.swap(m_newDisplayItems); m_validlyCachedClientsDirty = true; m_numCachedItems = 0; return; } updateValidlyCachedClientsIfNeeded(); // Stores indices to valid DrawingDisplayItems in m_currentDisplayItems that have not been matched // by CachedDisplayItems during synchronized matching. The indexed items will be matched // by later out-of-order CachedDisplayItems in m_newDisplayItems. This ensures that when // out-of-order CachedDisplayItems occur, we only traverse at most once over m_currentDisplayItems // looking for potential matches. Thus we can ensure that the algorithm runs in linear time. OutOfOrderIndexContext outOfOrderIndexContext(m_currentDisplayItems.begin()); #if ENABLE(ASSERT) if (RuntimeEnabledFeatures::slimmingPaintUnderInvalidationCheckingEnabled()) { // Under-invalidation checking requires a full index of m_currentDisplayItems. size_t i = 0; for (const auto& item : m_currentDisplayItems) { addItemToIndexIfNeeded(item, i, outOfOrderIndexContext.displayItemIndicesByClient); ++i; } } #endif // ENABLE(ASSERT) // TODO(jbroman): Consider revisiting this heuristic. DisplayItems updatedList( kMaximumDisplayItemSize, std::max(m_currentDisplayItems.usedCapacityInBytes(), m_newDisplayItems.usedCapacityInBytes())); DisplayItems::iterator currentIt = m_currentDisplayItems.begin(); DisplayItems::iterator currentEnd = m_currentDisplayItems.end(); for (DisplayItems::iterator newIt = m_newDisplayItems.begin(); newIt != m_newDisplayItems.end(); ++newIt) { const DisplayItem& newDisplayItem = *newIt; const DisplayItem::Id newDisplayItemId = newDisplayItem.nonCachedId(); bool newDisplayItemHasCachedType = newDisplayItem.type() != newDisplayItemId.type; bool isSynchronized = currentIt != currentEnd && newDisplayItemId.matches(*currentIt); if (newDisplayItemHasCachedType) { ASSERT(!RuntimeEnabledFeatures::slimmingPaintUnderInvalidationCheckingEnabled()); ASSERT(newDisplayItem.isCached()); ASSERT(clientCacheIsValid(newDisplayItem.client())); if (!isSynchronized) { DisplayItems::iterator foundIt = findOutOfOrderCachedItem(currentIt, newDisplayItemId, outOfOrderIndexContext); if (foundIt == currentEnd) { #ifndef NDEBUG showDebugData(); WTFLogAlways("%s not found in m_currentDisplayItems\n", newDisplayItem.asDebugString().utf8().data()); #endif ASSERT_NOT_REACHED(); // If foundIt == currentEnd, it means that we did not find the cached display item. This should be impossible, but may occur // if there is a bug in the system, such as under-invalidation, incorrect cache checking or duplicate display ids. In this case, // attempt to recover rather than crashing or bailing on display of the rest of the display list. continue; } ASSERT(foundIt != currentIt); // because we are in 'if (!isSynchronized)' currentIt = foundIt; } if (newDisplayItem.isCachedDrawing()) { updatedList.appendByMoving(*currentIt, currentIt->derivedSize()); ++currentIt; } else { ASSERT(newDisplayItem.isCachedSubtree()); copyCachedSubtree(currentIt, updatedList); ASSERT(updatedList.last().isEndSubtree()); } } else { #if ENABLE(ASSERT) if (RuntimeEnabledFeatures::slimmingPaintUnderInvalidationCheckingEnabled()) checkCachedDisplayItemIsUnchanged(newDisplayItem, outOfOrderIndexContext.displayItemIndicesByClient); else ASSERT(!newDisplayItem.isDrawing() || newDisplayItem.skippedCache() || !clientCacheIsValid(newDisplayItem.client())); #endif // ENABLE(ASSERT) updatedList.appendByMoving(*newIt, newIt->derivedSize()); if (isSynchronized) ++currentIt; } } #if ENABLE(ASSERT) if (RuntimeEnabledFeatures::slimmingPaintUnderInvalidationCheckingEnabled()) checkNoRemainingCachedDisplayItems(); #endif // ENABLE(ASSERT) m_newDisplayItems.clear(); m_validlyCachedClientsDirty = true; m_currentDisplayItems.swap(updatedList); m_numCachedItems = 0; }