Region Layer::latchBuffer(bool& recomputeVisibleRegions) { ATRACE_CALL(); Region outDirtyRegion; if (mQueuedFrames > 0) { // if we've already called updateTexImage() without going through // a composition step, we have to skip this layer at this point // because we cannot call updateTeximage() without a corresponding // compositionComplete() call. // we'll trigger an update in onPreComposition(). if (mRefreshPending) { return outDirtyRegion; } // Capture the old state of the layer for comparisons later const bool oldOpacity = isOpaque(); sp<GraphicBuffer> oldActiveBuffer = mActiveBuffer; // signal another event if we have more frames pending if (android_atomic_dec(&mQueuedFrames) > 1) { mFlinger->signalLayerUpdate(); } struct Reject : public SurfaceTexture::BufferRejecter { Layer::State& front; Layer::State& current; bool& recomputeVisibleRegions; Reject(Layer::State& front, Layer::State& current, bool& recomputeVisibleRegions) : front(front), current(current), recomputeVisibleRegions(recomputeVisibleRegions) { } virtual bool reject(const sp<GraphicBuffer>& buf, const BufferQueue::BufferItem& item) { if (buf == NULL) { return false; } uint32_t bufWidth = buf->getWidth(); uint32_t bufHeight = buf->getHeight(); // check that we received a buffer of the right size // (Take the buffer's orientation into account) if (item.mTransform & Transform::ROT_90) { swap(bufWidth, bufHeight); } bool isFixedSize = item.mScalingMode != NATIVE_WINDOW_SCALING_MODE_FREEZE; if (front.active != front.requested) { if (isFixedSize || (bufWidth == front.requested.w && bufHeight == front.requested.h)) { // Here we pretend the transaction happened by updating the // current and drawing states. Drawing state is only accessed // in this thread, no need to have it locked front.active = front.requested; // We also need to update the current state so that // we don't end-up overwriting the drawing state with // this stale current state during the next transaction // // NOTE: We don't need to hold the transaction lock here // because State::active is only accessed from this thread. current.active = front.active; // recompute visible region recomputeVisibleRegions = true; } ALOGD_IF(DEBUG_RESIZE, "latchBuffer/reject: buffer (%ux%u, tr=%02x), scalingMode=%d\n" " drawing={ active ={ wh={%4u,%4u} crop={%4d,%4d,%4d,%4d} (%4d,%4d) }\n" " requested={ wh={%4u,%4u} crop={%4d,%4d,%4d,%4d} (%4d,%4d) }}\n", bufWidth, bufHeight, item.mTransform, item.mScalingMode, front.active.w, front.active.h, front.active.crop.left, front.active.crop.top, front.active.crop.right, front.active.crop.bottom, front.active.crop.getWidth(), front.active.crop.getHeight(), front.requested.w, front.requested.h, front.requested.crop.left, front.requested.crop.top, front.requested.crop.right, front.requested.crop.bottom, front.requested.crop.getWidth(), front.requested.crop.getHeight()); } if (!isFixedSize) { if (front.active.w != bufWidth || front.active.h != bufHeight) { // reject this buffer return true; } } return false; } }; Reject r(mDrawingState, currentState(), recomputeVisibleRegions); #ifndef STE_HARDWARE if (mSurfaceTexture->updateTexImage(&r, true) < NO_ERROR) { #else if (mSurfaceTexture->updateTexImage(&r, true, true) < NO_ERROR) { #endif // something happened! recomputeVisibleRegions = true; return outDirtyRegion; } // update the active buffer mActiveBuffer = mSurfaceTexture->getCurrentBuffer(); if (mActiveBuffer == NULL) { // this can only happen if the very first buffer was rejected. return outDirtyRegion; } mRefreshPending = true; mFrameLatencyNeeded = true; if (oldActiveBuffer == NULL) { // the first time we receive a buffer, we need to trigger a // geometry invalidation. recomputeVisibleRegions = true; } Rect crop(mSurfaceTexture->getCurrentCrop()); const uint32_t transform(mSurfaceTexture->getCurrentTransform()); const uint32_t scalingMode(mSurfaceTexture->getCurrentScalingMode()); if ((crop != mCurrentCrop) || (transform != mCurrentTransform) || (scalingMode != mCurrentScalingMode)) { mCurrentCrop = crop; mCurrentTransform = transform; mCurrentScalingMode = scalingMode; recomputeVisibleRegions = true; } if (oldActiveBuffer != NULL) { uint32_t bufWidth = mActiveBuffer->getWidth(); uint32_t bufHeight = mActiveBuffer->getHeight(); if (bufWidth != uint32_t(oldActiveBuffer->width) || bufHeight != uint32_t(oldActiveBuffer->height)) { recomputeVisibleRegions = true; } } mCurrentOpacity = getOpacityForFormat(mActiveBuffer->format); if (oldOpacity != isOpaque()) { recomputeVisibleRegions = true; } glTexParameterx(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTexParameterx(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); // FIXME: postedRegion should be dirty & bounds const Layer::State& front(drawingState()); Region dirtyRegion(Rect(front.active.w, front.active.h)); // transform the dirty region to window-manager space outDirtyRegion = (front.transform.transform(dirtyRegion)); } return outDirtyRegion; } void Layer::dump(String8& result, char* buffer, size_t SIZE) const { LayerBaseClient::dump(result, buffer, SIZE); sp<const GraphicBuffer> buf0(mActiveBuffer); uint32_t w0=0, h0=0, s0=0, f0=0; if (buf0 != 0) { w0 = buf0->getWidth(); h0 = buf0->getHeight(); s0 = buf0->getStride(); f0 = buf0->format; } snprintf(buffer, SIZE, " " "format=%2d, activeBuffer=[%4ux%4u:%4u,%3X]," " queued-frames=%d, mRefreshPending=%d\n", mFormat, w0, h0, s0,f0, mQueuedFrames, mRefreshPending); result.append(buffer); if (mSurfaceTexture != 0) { mSurfaceTexture->dump(result, " ", buffer, SIZE); } }
QRegion QSGAbstractSoftwareRenderer::optimizeRenderList() { // Iterate through the renderlist from front to back // Objective is to update the dirty status and rects. for (auto i = m_renderableNodes.rbegin(); i != m_renderableNodes.rend(); ++i) { auto node = *i; if (!m_dirtyRegion.isEmpty()) { // See if the current dirty regions apply to the current node node->addDirtyRegion(m_dirtyRegion, true); } if (!m_obscuredRegion.isEmpty()) { // Don't try to paint things that are covered by opaque objects node->subtractDirtyRegion(m_obscuredRegion); } // Keep up with obscured regions if (node->isOpaque()) { m_obscuredRegion += node->boundingRectMin(); } if (node->isDirty()) { // Don't paint things outside of the rendering area if (!m_background->rect().toRect().contains(node->boundingRectMax(), /*proper*/ true)) { // Some part(s) of node is(are) outside of the rendering area QRegion renderArea(m_background->rect().toRect()); QRegion outsideRegions = node->dirtyRegion().subtracted(renderArea); if (!outsideRegions.isEmpty()) node->subtractDirtyRegion(outsideRegions); } // Get the dirty region's to pass to the next nodes if (node->isOpaque()) { // if isOpaque, subtract node's dirty rect from m_dirtyRegion m_dirtyRegion -= node->boundingRectMin(); } else { // if isAlpha, add node's dirty rect to m_dirtyRegion m_dirtyRegion += node->dirtyRegion(); } // if previousDirtyRegion has content outside of boundingRect add to m_dirtyRegion QRegion prevDirty = node->previousDirtyRegion(); if (!prevDirty.isNull()) m_dirtyRegion += prevDirty; } } if (m_obscuredRegion.contains(m_background->rect().toAlignedRect())) { m_isOpaque = true; } else { m_isOpaque = false; } // Empty dirtyRegion (for second pass) m_dirtyRegion = QRegion(); m_obscuredRegion = QRegion(); // Iterate through the renderlist from back to front // Objective is to make sure all non-opaque items are painted when an item under them is dirty for (auto j = m_renderableNodes.begin(); j != m_renderableNodes.end(); ++j) { auto node = *j; if (!node->isOpaque() && !m_dirtyRegion.isEmpty()) { // Only blended nodes need to be updated node->addDirtyRegion(m_dirtyRegion, true); } m_dirtyRegion += node->dirtyRegion(); } QRegion updateRegion = m_dirtyRegion; // Empty dirtyRegion m_dirtyRegion = QRegion(); m_obscuredRegion = QRegion(); return updateRegion; }
bool G1RemSet::concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i, bool check_for_refs_into_cset) { // Construct the region representing the card. HeapWord* start = _ct_bs->addr_for(card_ptr); // And find the region containing it. HeapRegion* r = _g1->heap_region_containing(start); assert(r != NULL, "unexpected null"); HeapWord* end = _ct_bs->addr_for(card_ptr + 1); MemRegion dirtyRegion(start, end); #if CARD_REPEAT_HISTO init_ct_freq_table(_g1->max_capacity()); ct_freq_note_card(_ct_bs->index_for(start)); #endif assert(!check_for_refs_into_cset || _cset_rs_update_cl[worker_i] != NULL, "sanity"); UpdateRSOrPushRefOopClosure update_rs_oop_cl(_g1, _g1->g1_rem_set(), _cset_rs_update_cl[worker_i], check_for_refs_into_cset, worker_i); update_rs_oop_cl.set_from(r); TriggerClosure trigger_cl; FilterIntoCSClosure into_cs_cl(NULL, _g1, &trigger_cl); InvokeIfNotTriggeredClosure invoke_cl(&trigger_cl, &into_cs_cl); Mux2Closure mux(&invoke_cl, &update_rs_oop_cl); FilterOutOfRegionClosure filter_then_update_rs_oop_cl(r, (check_for_refs_into_cset ? (OopClosure*)&mux : (OopClosure*)&update_rs_oop_cl)); // Undirty the card. *card_ptr = CardTableModRefBS::clean_card_val(); // We must complete this write before we do any of the reads below. OrderAccess::storeload(); // And process it, being careful of unallocated portions of TLAB's. // The region for the current card may be a young region. The // current card may have been a card that was evicted from the // card cache. When the card was inserted into the cache, we had // determined that its region was non-young. While in the cache, // the region may have been freed during a cleanup pause, reallocated // and tagged as young. // // We wish to filter out cards for such a region but the current // thread, if we're running conucrrently, may "see" the young type // change at any time (so an earlier "is_young" check may pass or // fail arbitrarily). We tell the iteration code to perform this // filtering when it has been determined that there has been an actual // allocation in this region and making it safe to check the young type. bool filter_young = true; HeapWord* stop_point = r->oops_on_card_seq_iterate_careful(dirtyRegion, &filter_then_update_rs_oop_cl, filter_young); // If stop_point is non-null, then we encountered an unallocated region // (perhaps the unfilled portion of a TLAB.) For now, we'll dirty the // card and re-enqueue: if we put off the card until a GC pause, then the // unallocated portion will be filled in. Alternatively, we might try // the full complexity of the technique used in "regular" precleaning. if (stop_point != NULL) { // The card might have gotten re-dirtied and re-enqueued while we // worked. (In fact, it's pretty likely.) if (*card_ptr != CardTableModRefBS::dirty_card_val()) { *card_ptr = CardTableModRefBS::dirty_card_val(); MutexLockerEx x(Shared_DirtyCardQ_lock, Mutex::_no_safepoint_check_flag); DirtyCardQueue* sdcq = JavaThread::dirty_card_queue_set().shared_dirty_card_queue(); sdcq->enqueue(card_ptr); } } else { out_of_histo.add_entry(filter_then_update_rs_oop_cl.out_of_region()); _conc_refine_cards++; } return trigger_cl.value(); }
Region Layer::latchBuffer(bool& recomputeVisibleRegions) { ATRACE_CALL(); Region outDirtyRegion; if (mQueuedFrames > 0) { // if we've already called updateTexImage() without going through // a composition step, we have to skip this layer at this point // because we cannot call updateTeximage() without a corresponding // compositionComplete() call. // we'll trigger an update in onPreComposition(). if (mRefreshPending) { return outDirtyRegion; } // Capture the old state of the layer for comparisons later const bool oldOpacity = isOpaque(); sp<GraphicBuffer> oldActiveBuffer = mActiveBuffer; struct Reject : public SurfaceFlingerConsumer::BufferRejecter { Layer::State& front; Layer::State& current; bool& recomputeVisibleRegions; Reject(Layer::State& front, Layer::State& current, bool& recomputeVisibleRegions) : front(front), current(current), recomputeVisibleRegions(recomputeVisibleRegions) { } virtual bool reject(const sp<GraphicBuffer>& buf, const IGraphicBufferConsumer::BufferItem& item) { if (buf == NULL) { return false; } uint32_t bufWidth = buf->getWidth(); uint32_t bufHeight = buf->getHeight(); // check that we received a buffer of the right size // (Take the buffer's orientation into account) if (item.mTransform & Transform::ROT_90) { swap(bufWidth, bufHeight); } bool isFixedSize = item.mScalingMode != NATIVE_WINDOW_SCALING_MODE_FREEZE; if (front.active != front.requested) { if (isFixedSize || (bufWidth == front.requested.w && bufHeight == front.requested.h)) { // Here we pretend the transaction happened by updating the // current and drawing states. Drawing state is only accessed // in this thread, no need to have it locked front.active = front.requested; // We also need to update the current state so that // we don't end-up overwriting the drawing state with // this stale current state during the next transaction // // NOTE: We don't need to hold the transaction lock here // because State::active is only accessed from this thread. current.active = front.active; // recompute visible region recomputeVisibleRegions = true; } ALOGD_IF(DEBUG_RESIZE, "latchBuffer/reject: buffer (%ux%u, tr=%02x), scalingMode=%d\n" " drawing={ active ={ wh={%4u,%4u} crop={%4d,%4d,%4d,%4d} (%4d,%4d) }\n" " requested={ wh={%4u,%4u} crop={%4d,%4d,%4d,%4d} (%4d,%4d) }}\n", bufWidth, bufHeight, item.mTransform, item.mScalingMode, front.active.w, front.active.h, front.active.crop.left, front.active.crop.top, front.active.crop.right, front.active.crop.bottom, front.active.crop.getWidth(), front.active.crop.getHeight(), front.requested.w, front.requested.h, front.requested.crop.left, front.requested.crop.top, front.requested.crop.right, front.requested.crop.bottom, front.requested.crop.getWidth(), front.requested.crop.getHeight()); } if (!isFixedSize) { if (front.active.w != bufWidth || front.active.h != bufHeight) { // reject this buffer //ALOGD("rejecting buffer: bufWidth=%d, bufHeight=%d, front.active.{w=%d, h=%d}", // bufWidth, bufHeight, front.active.w, front.active.h); return true; } } // if the transparent region has changed (this test is // conservative, but that's fine, worst case we're doing // a bit of extra work), we latch the new one and we // trigger a visible-region recompute. if (!front.activeTransparentRegion.isTriviallyEqual( front.requestedTransparentRegion)) { front.activeTransparentRegion = front.requestedTransparentRegion; // We also need to update the current state so that // we don't end-up overwriting the drawing state with // this stale current state during the next transaction // // NOTE: We don't need to hold the transaction lock here // because State::active is only accessed from this thread. current.activeTransparentRegion = front.activeTransparentRegion; // recompute visible region recomputeVisibleRegions = true; } return false; } }; Reject r(mDrawingState, getCurrentState(), recomputeVisibleRegions); status_t updateResult = mSurfaceFlingerConsumer->updateTexImage(&r); if (updateResult == BufferQueue::PRESENT_LATER) { // Producer doesn't want buffer to be displayed yet. Signal a // layer update so we check again at the next opportunity. mFlinger->signalLayerUpdate(); return outDirtyRegion; } // Decrement the queued-frames count. Signal another event if we // have more frames pending. if (android_atomic_dec(&mQueuedFrames) > 1) { mFlinger->signalLayerUpdate(); } if (updateResult != NO_ERROR) { // something happened! recomputeVisibleRegions = true; return outDirtyRegion; } // update the active buffer mActiveBuffer = mSurfaceFlingerConsumer->getCurrentBuffer(); if (mActiveBuffer == NULL) { // this can only happen if the very first buffer was rejected. return outDirtyRegion; } mRefreshPending = true; mFrameLatencyNeeded = true; if (oldActiveBuffer == NULL) { // the first time we receive a buffer, we need to trigger a // geometry invalidation. recomputeVisibleRegions = true; } Rect crop(mSurfaceFlingerConsumer->getCurrentCrop()); const uint32_t transform(mSurfaceFlingerConsumer->getCurrentTransform()); const uint32_t scalingMode(mSurfaceFlingerConsumer->getCurrentScalingMode()); if ((crop != mCurrentCrop) || (transform != mCurrentTransform) || (scalingMode != mCurrentScalingMode)) { mCurrentCrop = crop; mCurrentTransform = transform; mCurrentScalingMode = scalingMode; recomputeVisibleRegions = true; } if (oldActiveBuffer != NULL) { uint32_t bufWidth = mActiveBuffer->getWidth(); uint32_t bufHeight = mActiveBuffer->getHeight(); if (bufWidth != uint32_t(oldActiveBuffer->width) || bufHeight != uint32_t(oldActiveBuffer->height)) { recomputeVisibleRegions = true; } } mCurrentOpacity = getOpacityForFormat(mActiveBuffer->format); if (oldOpacity != isOpaque()) { recomputeVisibleRegions = true; } // FIXME: postedRegion should be dirty & bounds const Layer::State& s(getDrawingState()); Region dirtyRegion(Rect(s.active.w, s.active.h)); // transform the dirty region to window-manager space outDirtyRegion = (s.transform.transform(dirtyRegion)); } return outDirtyRegion; }
bool G1RemSet::refine_card(jbyte* card_ptr, uint worker_i, bool check_for_refs_into_cset) { assert(_g1->is_in_exact(_ct_bs->addr_for(card_ptr)), err_msg("Card at "PTR_FORMAT" index "SIZE_FORMAT" representing heap at "PTR_FORMAT" (%u) must be in committed heap", p2i(card_ptr), _ct_bs->index_for(_ct_bs->addr_for(card_ptr)), _ct_bs->addr_for(card_ptr), _g1->addr_to_region(_ct_bs->addr_for(card_ptr)))); // If the card is no longer dirty, nothing to do. if (*card_ptr != CardTableModRefBS::dirty_card_val()) { // No need to return that this card contains refs that point // into the collection set. return false; } // Construct the region representing the card. HeapWord* start = _ct_bs->addr_for(card_ptr); // And find the region containing it. HeapRegion* r = _g1->heap_region_containing(start); // Why do we have to check here whether a card is on a young region, // given that we dirty young regions and, as a result, the // post-barrier is supposed to filter them out and never to enqueue // them? When we allocate a new region as the "allocation region" we // actually dirty its cards after we release the lock, since card // dirtying while holding the lock was a performance bottleneck. So, // as a result, it is possible for other threads to actually // allocate objects in the region (after the acquire the lock) // before all the cards on the region are dirtied. This is unlikely, // and it doesn't happen often, but it can happen. So, the extra // check below filters out those cards. if (r->is_young()) { return false; } // While we are processing RSet buffers during the collection, we // actually don't want to scan any cards on the collection set, // since we don't want to update remembered sets with entries that // point into the collection set, given that live objects from the // collection set are about to move and such entries will be stale // very soon. This change also deals with a reliability issue which // involves scanning a card in the collection set and coming across // an array that was being chunked and looking malformed. Note, // however, that if evacuation fails, we have to scan any objects // that were not moved and create any missing entries. if (r->in_collection_set()) { return false; } // The result from the hot card cache insert call is either: // * pointer to the current card // (implying that the current card is not 'hot'), // * null // (meaning we had inserted the card ptr into the "hot" card cache, // which had some headroom), // * a pointer to a "hot" card that was evicted from the "hot" cache. // G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache(); if (hot_card_cache->use_cache()) { assert(!check_for_refs_into_cset, "sanity"); assert(!SafepointSynchronize::is_at_safepoint(), "sanity"); card_ptr = hot_card_cache->insert(card_ptr); if (card_ptr == NULL) { // There was no eviction. Nothing to do. return false; } start = _ct_bs->addr_for(card_ptr); r = _g1->heap_region_containing(start); // Checking whether the region we got back from the cache // is young here is inappropriate. The region could have been // freed, reallocated and tagged as young while in the cache. // Hence we could see its young type change at any time. } // Don't use addr_for(card_ptr + 1) which can ask for // a card beyond the heap. This is not safe without a perm // gen at the upper end of the heap. HeapWord* end = start + CardTableModRefBS::card_size_in_words; MemRegion dirtyRegion(start, end); #if CARD_REPEAT_HISTO init_ct_freq_table(_g1->max_capacity()); ct_freq_note_card(_ct_bs->index_for(start)); #endif G1ParPushHeapRSClosure* oops_in_heap_closure = NULL; if (check_for_refs_into_cset) { // ConcurrentG1RefineThreads have worker numbers larger than what // _cset_rs_update_cl[] is set up to handle. But those threads should // only be active outside of a collection which means that when they // reach here they should have check_for_refs_into_cset == false. assert((size_t)worker_i < n_workers(), "index of worker larger than _cset_rs_update_cl[].length"); oops_in_heap_closure = _cset_rs_update_cl[worker_i]; } G1UpdateRSOrPushRefOopClosure update_rs_oop_cl(_g1, _g1->g1_rem_set(), oops_in_heap_closure, check_for_refs_into_cset, worker_i); update_rs_oop_cl.set_from(r); G1TriggerClosure trigger_cl; FilterIntoCSClosure into_cs_cl(NULL, _g1, &trigger_cl); G1InvokeIfNotTriggeredClosure invoke_cl(&trigger_cl, &into_cs_cl); G1Mux2Closure mux(&invoke_cl, &update_rs_oop_cl); FilterOutOfRegionClosure filter_then_update_rs_oop_cl(r, (check_for_refs_into_cset ? (OopClosure*)&mux : (OopClosure*)&update_rs_oop_cl)); // The region for the current card may be a young region. The // current card may have been a card that was evicted from the // card cache. When the card was inserted into the cache, we had // determined that its region was non-young. While in the cache, // the region may have been freed during a cleanup pause, reallocated // and tagged as young. // // We wish to filter out cards for such a region but the current // thread, if we're running concurrently, may "see" the young type // change at any time (so an earlier "is_young" check may pass or // fail arbitrarily). We tell the iteration code to perform this // filtering when it has been determined that there has been an actual // allocation in this region and making it safe to check the young type. bool filter_young = true; HeapWord* stop_point = r->oops_on_card_seq_iterate_careful(dirtyRegion, &filter_then_update_rs_oop_cl, filter_young, card_ptr); // If stop_point is non-null, then we encountered an unallocated region // (perhaps the unfilled portion of a TLAB.) For now, we'll dirty the // card and re-enqueue: if we put off the card until a GC pause, then the // unallocated portion will be filled in. Alternatively, we might try // the full complexity of the technique used in "regular" precleaning. if (stop_point != NULL) { // The card might have gotten re-dirtied and re-enqueued while we // worked. (In fact, it's pretty likely.) if (*card_ptr != CardTableModRefBS::dirty_card_val()) { *card_ptr = CardTableModRefBS::dirty_card_val(); MutexLockerEx x(Shared_DirtyCardQ_lock, Mutex::_no_safepoint_check_flag); DirtyCardQueue* sdcq = JavaThread::dirty_card_queue_set().shared_dirty_card_queue(); sdcq->enqueue(card_ptr); } } else { _conc_refine_cards++; } // This gets set to true if the card being refined has // references that point into the collection set. bool has_refs_into_cset = trigger_cl.triggered(); // We should only be detecting that the card contains references // that point into the collection set if the current thread is // a GC worker thread. assert(!has_refs_into_cset || SafepointSynchronize::is_at_safepoint(), "invalid result at non safepoint"); return has_refs_into_cset; }