// Find forward for the item and index all skipped indexable items. DisplayItems::iterator DisplayItemList::findOutOfOrderCachedItemForward(const DisplayItem::Id& id, OutOfOrderIndexContext& context) { DisplayItems::iterator currentEnd = m_currentDisplayItems.end(); for (; context.nextItemToIndex != currentEnd; ++context.nextItemToIndex) { const DisplayItem& item = *context.nextItemToIndex; ASSERT(item.isValid()); if (item.isCacheable() && clientCacheIsValid(item.client())) { if (id.matches(item)) return context.nextItemToIndex++; addItemToIndexIfNeeded(item, context.nextItemToIndex - m_currentDisplayItems.begin(), context.displayItemIndicesByClient); } } return currentEnd; }
size_t DisplayItemList::findMatchingItemFromIndex(const DisplayItem::Id& id, const DisplayItemIndicesByClientMap& displayItemIndicesByClient, const DisplayItems& list) { DisplayItemIndicesByClientMap::const_iterator it = displayItemIndicesByClient.find(id.client); if (it == displayItemIndicesByClient.end()) return kNotFound; const Vector<size_t>& indices = it->value; for (size_t index : indices) { const DisplayItem& existingItem = list[index]; ASSERT(!existingItem.isValid() || existingItem.client() == id.client); if (existingItem.isValid() && id.matches(existingItem)) return index; } return kNotFound; }
// Update the existing display items by removing invalidated entries, updating // repainted ones, and appending new items. // - For CachedDisplayItem, copy the corresponding cached DrawingDisplayItem; // - For SubtreeCachedDisplayItem, copy the cached display items between the // corresponding BeginSubtreeDisplayItem and EndSubtreeDisplayItem (incl.); // - Otherwise, copy the new display item. // // The algorithm is O(|m_currentDisplayItems| + |m_newDisplayItems|). // Coefficients are related to the ratio of out-of-order [Subtree]CachedDisplayItems // and the average number of (Drawing|BeginSubtree)DisplayItems per client. // // TODO(pdr): Implement the DisplayListDiff algorithm for SlimmingPaintV2. void DisplayItemList::commitNewDisplayItems(DisplayListDiff*) { TRACE_EVENT2("blink,benchmark", "DisplayItemList::commitNewDisplayItems", "current_display_list_size", (int)m_currentDisplayItems.size(), "num_non_cached_new_items", (int)m_newDisplayItems.size() - m_numCachedItems); // These data structures are used during painting only. ASSERT(m_scopeStack.isEmpty()); m_scopeStack.clear(); m_nextScope = 1; ASSERT(!skippingCache()); #if ENABLE(ASSERT) m_newDisplayItemIndicesByClient.clear(); #endif if (m_currentDisplayItems.isEmpty()) { #if ENABLE(ASSERT) for (const auto& item : m_newDisplayItems) ASSERT(!item.isCached()); #endif m_currentDisplayItems.swap(m_newDisplayItems); m_validlyCachedClientsDirty = true; m_numCachedItems = 0; return; } updateValidlyCachedClientsIfNeeded(); // Stores indices to valid DrawingDisplayItems in m_currentDisplayItems that have not been matched // by CachedDisplayItems during synchronized matching. The indexed items will be matched // by later out-of-order CachedDisplayItems in m_newDisplayItems. This ensures that when // out-of-order CachedDisplayItems occur, we only traverse at most once over m_currentDisplayItems // looking for potential matches. Thus we can ensure that the algorithm runs in linear time. OutOfOrderIndexContext outOfOrderIndexContext(m_currentDisplayItems.begin()); #if ENABLE(ASSERT) if (RuntimeEnabledFeatures::slimmingPaintUnderInvalidationCheckingEnabled()) { // Under-invalidation checking requires a full index of m_currentDisplayItems. size_t i = 0; for (const auto& item : m_currentDisplayItems) { addItemToIndexIfNeeded(item, i, outOfOrderIndexContext.displayItemIndicesByClient); ++i; } } #endif // ENABLE(ASSERT) // TODO(jbroman): Consider revisiting this heuristic. DisplayItems updatedList( kMaximumDisplayItemSize, std::max(m_currentDisplayItems.usedCapacityInBytes(), m_newDisplayItems.usedCapacityInBytes())); DisplayItems::iterator currentIt = m_currentDisplayItems.begin(); DisplayItems::iterator currentEnd = m_currentDisplayItems.end(); for (DisplayItems::iterator newIt = m_newDisplayItems.begin(); newIt != m_newDisplayItems.end(); ++newIt) { const DisplayItem& newDisplayItem = *newIt; const DisplayItem::Id newDisplayItemId = newDisplayItem.nonCachedId(); bool newDisplayItemHasCachedType = newDisplayItem.type() != newDisplayItemId.type; bool isSynchronized = currentIt != currentEnd && newDisplayItemId.matches(*currentIt); if (newDisplayItemHasCachedType) { ASSERT(!RuntimeEnabledFeatures::slimmingPaintUnderInvalidationCheckingEnabled()); ASSERT(newDisplayItem.isCached()); ASSERT(clientCacheIsValid(newDisplayItem.client())); if (!isSynchronized) { DisplayItems::iterator foundIt = findOutOfOrderCachedItem(currentIt, newDisplayItemId, outOfOrderIndexContext); if (foundIt == currentEnd) { #ifndef NDEBUG showDebugData(); WTFLogAlways("%s not found in m_currentDisplayItems\n", newDisplayItem.asDebugString().utf8().data()); #endif ASSERT_NOT_REACHED(); // If foundIt == currentEnd, it means that we did not find the cached display item. This should be impossible, but may occur // if there is a bug in the system, such as under-invalidation, incorrect cache checking or duplicate display ids. In this case, // attempt to recover rather than crashing or bailing on display of the rest of the display list. continue; } ASSERT(foundIt != currentIt); // because we are in 'if (!isSynchronized)' currentIt = foundIt; } if (newDisplayItem.isCachedDrawing()) { updatedList.appendByMoving(*currentIt, currentIt->derivedSize()); ++currentIt; } else { ASSERT(newDisplayItem.isCachedSubtree()); copyCachedSubtree(currentIt, updatedList); ASSERT(updatedList.last().isEndSubtree()); } } else { #if ENABLE(ASSERT) if (RuntimeEnabledFeatures::slimmingPaintUnderInvalidationCheckingEnabled()) checkCachedDisplayItemIsUnchanged(newDisplayItem, outOfOrderIndexContext.displayItemIndicesByClient); else ASSERT(!newDisplayItem.isDrawing() || newDisplayItem.skippedCache() || !clientCacheIsValid(newDisplayItem.client())); #endif // ENABLE(ASSERT) updatedList.appendByMoving(*newIt, newIt->derivedSize()); if (isSynchronized) ++currentIt; } } #if ENABLE(ASSERT) if (RuntimeEnabledFeatures::slimmingPaintUnderInvalidationCheckingEnabled()) checkNoRemainingCachedDisplayItems(); #endif // ENABLE(ASSERT) m_newDisplayItems.clear(); m_validlyCachedClientsDirty = true; m_currentDisplayItems.swap(updatedList); m_numCachedItems = 0; }
// Update the existing display items by removing invalidated entries, updating // repainted ones, and appending new items. // - For cached drawing display item, copy the corresponding cached DrawingDisplayItem; // - For cached subsequence display item, copy the cached display items between the // corresponding SubsequenceDisplayItem and EndSubsequenceDisplayItem (incl.); // - Otherwise, copy the new display item. // // The algorithm is O(|m_currentDisplayItemList| + |m_newDisplayItemList|). // Coefficients are related to the ratio of out-of-order CachedDisplayItems // and the average number of (Drawing|Subsequence)DisplayItems per client. // void PaintController::commitNewDisplayItemsInternal() { TRACE_EVENT2("blink,benchmark", "PaintController::commitNewDisplayItems", "current_display_list_size", (int)m_currentPaintArtifact.displayItemList().size(), "num_non_cached_new_items", (int)m_newDisplayItemList.size() - m_numCachedNewItems); m_numCachedNewItems = 0; if (RuntimeEnabledFeatures::slimmingPaintV2Enabled()) m_clientsCheckedPaintInvalidation.clear(); // These data structures are used during painting only. ASSERT(m_scopeStack.isEmpty()); m_scopeStack.clear(); m_nextScope = 1; ASSERT(!skippingCache()); #if ENABLE(ASSERT) m_newDisplayItemIndicesByClient.clear(); m_clientsWithPaintOffsetInvalidations.clear(); m_invalidations.clear(); #endif if (m_currentPaintArtifact.isEmpty()) { #if ENABLE(ASSERT) for (const auto& item : m_newDisplayItemList) ASSERT(!item.isCached()); #endif m_currentPaintArtifact = PaintArtifact(std::move(m_newDisplayItemList), m_newPaintChunks.releasePaintChunks()); m_newDisplayItemList = DisplayItemList(kInitialDisplayItemListCapacityBytes); m_validlyCachedClientsDirty = true; return; } updateValidlyCachedClientsIfNeeded(); // Stores indices to valid DrawingDisplayItems in m_currentDisplayItems that have not been matched // by CachedDisplayItems during synchronized matching. The indexed items will be matched // by later out-of-order CachedDisplayItems in m_newDisplayItemList. This ensures that when // out-of-order CachedDisplayItems occur, we only traverse at most once over m_currentDisplayItems // looking for potential matches. Thus we can ensure that the algorithm runs in linear time. OutOfOrderIndexContext outOfOrderIndexContext(m_currentPaintArtifact.displayItemList().begin()); // TODO(jbroman): Consider revisiting this heuristic. DisplayItemList updatedList(std::max(m_currentPaintArtifact.displayItemList().usedCapacityInBytes(), m_newDisplayItemList.usedCapacityInBytes())); Vector<PaintChunk> updatedPaintChunks; DisplayItemList::iterator currentIt = m_currentPaintArtifact.displayItemList().begin(); DisplayItemList::iterator currentEnd = m_currentPaintArtifact.displayItemList().end(); for (DisplayItemList::iterator newIt = m_newDisplayItemList.begin(); newIt != m_newDisplayItemList.end(); ++newIt) { const DisplayItem& newDisplayItem = *newIt; const DisplayItem::Id newDisplayItemId = newDisplayItem.nonCachedId(); bool newDisplayItemHasCachedType = newDisplayItem.type() != newDisplayItemId.type; bool isSynchronized = currentIt != currentEnd && newDisplayItemId.matches(*currentIt); if (newDisplayItemHasCachedType) { ASSERT(newDisplayItem.isCached()); ASSERT(clientCacheIsValid(newDisplayItem.client()) || (RuntimeEnabledFeatures::slimmingPaintOffsetCachingEnabled() && !paintOffsetWasInvalidated(newDisplayItem.client()))); if (!isSynchronized) { currentIt = findOutOfOrderCachedItem(newDisplayItemId, outOfOrderIndexContext); if (currentIt == currentEnd) { #ifndef NDEBUG showDebugData(); WTFLogAlways("%s not found in m_currentDisplayItemList\n", newDisplayItem.asDebugString().utf8().data()); #endif ASSERT_NOT_REACHED(); // We did not find the cached display item. This should be impossible, but may occur if there is a bug // in the system, such as under-invalidation, incorrect cache checking or duplicate display ids. // In this case, attempt to recover rather than crashing or bailing on display of the rest of the display list. continue; } } #if ENABLE(ASSERT) if (RuntimeEnabledFeatures::slimmingPaintUnderInvalidationCheckingEnabled()) { DisplayItemList::iterator temp = currentIt; checkUnderInvalidation(newIt, temp); } #endif if (newDisplayItem.isCachedDrawing()) { updatedList.appendByMoving(*currentIt); ++currentIt; } else { ASSERT(newDisplayItem.type() == DisplayItem::CachedSubsequence); copyCachedSubsequence(currentIt, updatedList); ASSERT(updatedList.last().type() == DisplayItem::EndSubsequence); } } else { ASSERT(!newDisplayItem.isDrawing() || newDisplayItem.skippedCache() || !clientCacheIsValid(newDisplayItem.client()) || (RuntimeEnabledFeatures::slimmingPaintOffsetCachingEnabled() && paintOffsetWasInvalidated(newDisplayItem.client()))); updatedList.appendByMoving(*newIt); if (isSynchronized) ++currentIt; } // Items before currentIt should have been copied so we don't need to index them. if (currentIt - outOfOrderIndexContext.nextItemToIndex > 0) outOfOrderIndexContext.nextItemToIndex = currentIt; } #if ENABLE(ASSERT) if (RuntimeEnabledFeatures::slimmingPaintUnderInvalidationCheckingEnabled()) checkNoRemainingCachedDisplayItems(); #endif // ENABLE(ASSERT) // TODO(jbroman): When subsequence caching applies to SPv2, we'll need to // merge the paint chunks as well. m_currentPaintArtifact = PaintArtifact(std::move(updatedList), m_newPaintChunks.releasePaintChunks()); m_newDisplayItemList = DisplayItemList(kInitialDisplayItemListCapacityBytes); m_validlyCachedClientsDirty = true; }