Exemple #1
0
void FenceTime::applyTrustedSnapshot(const Snapshot& src) {
    if (CC_UNLIKELY(src.state != Snapshot::State::SIGNAL_TIME)) {
        // Applying Snapshot::State::FENCE, could change the valid state of the
        // FenceTime, which is not allowed. Callers should create a new
        // FenceTime from the snapshot instead.
        ALOGE("applyTrustedSnapshot: Unexpected fence.");
        return;
    }

    if (src.state == Snapshot::State::EMPTY) {
        return;
    }

    nsecs_t signalTime = mSignalTime.load(std::memory_order_relaxed);
    if (signalTime != Fence::SIGNAL_TIME_PENDING) {
        // We should always get the same signalTime here that we did in
        // getSignalTime(). This check races with getSignalTime(), but it is
        // only a sanity check so that's okay.
        if (CC_UNLIKELY(signalTime != src.signalTime)) {
            ALOGE("FenceTime::applyTrustedSnapshot: signalTime mismatch. "
                    "(%" PRId64 " (old) != %" PRId64 " (new))",
                    signalTime, src.signalTime);
        }
        return;
    }

    std::lock_guard<std::mutex> lock(mMutex);
    mFence.clear();
    mSignalTime.store(src.signalTime, std::memory_order_relaxed);
}
ssize_t AudioBufferProviderSource::read(void *buffer,
                                        size_t count,
                                        int64_t readPTS)
{
    if (CC_UNLIKELY(!mNegotiated)) {
        return NEGOTIATE;
    }
    if (CC_UNLIKELY(mBuffer.raw == NULL)) {
        mBuffer.frameCount = count;
        status_t status = mProvider->getNextBuffer(&mBuffer, readPTS);
        if (status != OK) {
            return status == NOT_ENOUGH_DATA ? (ssize_t) WOULD_BLOCK : (ssize_t) status;
        }
        ALOG_ASSERT(mBuffer.raw != NULL);
        // mConsumed is 0 either from constructor or after releaseBuffer()
    }
    size_t available = mBuffer.frameCount - mConsumed;
    if (CC_UNLIKELY(count > available)) {
        count = available;
    }
    // count could be zero, either because count was zero on entry or
    // available is zero, but both are unlikely so don't check for that
    memcpy(buffer, (char *) mBuffer.raw + (mConsumed << mBitShift), count << mBitShift);
    if (CC_UNLIKELY((mConsumed += count) >= mBuffer.frameCount)) {
        mProvider->releaseBuffer(&mBuffer);
        mBuffer.raw = NULL;
        mConsumed = 0;
    }
    mFramesRead += count;
    // For better responsiveness with large values of count,
    // return a short count rather than continuing with next buffer.
    // This gives the caller a chance to interpolate other actions.
    return count;
}
void ShadowTessellator::tessellateSpotShadow(bool isCasterOpaque,
        const Vector3* casterPolygon, int casterVertexCount, const Vector3& casterCentroid,
        const mat4& receiverTransform, const Vector3& lightCenter, int lightRadius,
        const Rect& casterBounds, const Rect& localClip, VertexBuffer& shadowVertexBuffer) {
    ATRACE_CALL();

    Caches& caches = Caches::getInstance();

    Vector3 adjustedLightCenter(lightCenter);
    if (CC_UNLIKELY(caches.propertyLightPosY > 0)) {
        adjustedLightCenter.y = - caches.propertyLightPosY; // negated since this shifts up
    }
    if (CC_UNLIKELY(caches.propertyLightPosZ > 0)) {
        adjustedLightCenter.z = caches.propertyLightPosZ;
    }

#if DEBUG_SHADOW
    ALOGD("light center %f %f %f",
            adjustedLightCenter.x, adjustedLightCenter.y, adjustedLightCenter.z);
#endif

    // light position (because it's in local space) needs to compensate for receiver transform
    // TODO: should apply to light orientation, not just position
    Matrix4 reverseReceiverTransform;
    reverseReceiverTransform.loadInverse(receiverTransform);
    reverseReceiverTransform.mapPoint3d(adjustedLightCenter);

    const int lightVertexCount = 8;
    if (CC_UNLIKELY(caches.propertyLightDiameter > 0)) {
        lightRadius = caches.propertyLightDiameter;
    }

    // Now light and caster are both in local space, we will check whether
    // the shadow is within the clip area.
    Rect lightRect = Rect(adjustedLightCenter.x - lightRadius, adjustedLightCenter.y - lightRadius,
            adjustedLightCenter.x + lightRadius, adjustedLightCenter.y + lightRadius);
    lightRect.unionWith(localClip);
    if (!lightRect.intersects(casterBounds)) {
#if DEBUG_SHADOW
        ALOGD("Spot shadow is out of clip rect!");
#endif
        return;
    }

    SpotShadow::createSpotShadow(isCasterOpaque, adjustedLightCenter, lightRadius,
            casterPolygon, casterVertexCount, casterCentroid, shadowVertexBuffer);

#if DEBUG_SHADOW
     if(shadowVertexBuffer.getVertexCount() <= 0) {
        ALOGD("Spot shadow generation failed %d", shadowVertexBuffer.getVertexCount());
     }
#endif
}
void Snapshot::applyClip(const ClipBase* recordedClip, const Matrix4& transform) {
    if (CC_UNLIKELY(recordedClip && recordedClip->intersectWithRoot)) {
        // current clip is being replaced, but must intersect with clip root
        *mClipArea = *(getClipRoot(this)->mClipArea);
    }
    mClipArea->applyClip(recordedClip, transform);
}
void ShadowTessellator::tessellateAmbientShadow(bool isCasterOpaque,
        const Vector3* casterPolygon, int casterVertexCount,
        const Vector3& centroid3d, const Rect& casterBounds,
        const Rect& localClip, float maxZ, VertexBuffer& shadowVertexBuffer) {
    ATRACE_CALL();

    // A bunch of parameters to tweak the shadow.
    // TODO: Allow some of these changable by debug settings or APIs.
    float heightFactor = 1.0f / 128;
    const float geomFactor = 64;

    Caches& caches = Caches::getInstance();
    if (CC_UNLIKELY(caches.propertyAmbientRatio > 0.0f)) {
        heightFactor *= caches.propertyAmbientRatio;
    }

    Rect ambientShadowBounds(casterBounds);
    ambientShadowBounds.outset(maxZ * geomFactor * heightFactor);

    if (!localClip.intersects(ambientShadowBounds)) {
#if DEBUG_SHADOW
        ALOGD("Ambient shadow is out of clip rect!");
#endif
        return;
    }

    AmbientShadow::createAmbientShadow(isCasterOpaque, casterPolygon,
            casterVertexCount, centroid3d, heightFactor, geomFactor,
            shadowVertexBuffer);
}
void FrameInfoVisualizer::setDensity(float density) {
    if (CC_UNLIKELY(mDensity != density)) {
        mDensity = density;
        mVerticalUnit = dpToPx(PROFILE_DRAW_DP_PER_MS, density);
        mThresholdStroke = dpToPx(PROFILE_DRAW_THRESHOLD_STROKE_WIDTH, density);
    }
}
/**
 * CameraBuffer
 *
 * Constructor for buffers allocated using mmap
 *
 * \param fd [IN] File descriptor to map
 * \param length [IN] amount of data to map
 * \param v4l2fmt [IN] Pixel format in V4L2 enum
 * \param offset [IN] offset from the begining of the file (mmap param)
 * \param prot [IN] memory protection (mmap param)
 * \param flags [IN] flags (mmap param)
 *
 * Success of the mmap can be queried by checking the size of the resulting
 * buffer
 */
CameraBuffer::CameraBuffer(int fd, int length, int v4l2fmt, int offset,
                           int prot, int flags):
        mWidth(1),
        mHeight(length),
        mSize(0),
        mFormat(0),
        mV4L2Fmt(v4l2fmt),
        mStride(1),
        mInit(false),
        mLocked(false),
        mType(BUF_TYPE_MMAP),
        mOwner(NULL),
        mDataPtr(NULL),
        mRequestID(0)
{

    mDataPtr = mmap(NULL, length, prot, flags, fd, offset);
    if (CC_UNLIKELY(mDataPtr == MAP_FAILED)) {
        LOGE("Failed to MMAP the buffer %s", strerror(errno));
        mDataPtr = NULL;
        return;
    }
    mLocked = true;
    mInit = true;
    mSize = length;
    CLEAR(mUserBuffer);
    mUserBuffer.release_fence = -1;
    mUserBuffer.acquire_fence = -1;
    LOG1("mmaped address for  %p length %d", mDataPtr, mSize);
}
status_t ResultProcessor::handleRegisterRequest(Message &msg)
{
    status_t status = NO_ERROR;
    RequestState_t* reqState;
    int reqId = msg.request->getId();
    /**
     * check if the request was not already register. we may receive registration
     * request duplicated in case of request that are held by the PSL
     */
    if(mRequestsInTransit.indexOfKey(reqId) != NAME_NOT_FOUND) {
        return NO_ERROR;
    }

    status = mReqStatePool.acquireItem(&reqState);
    if (status != NO_ERROR) {
        LOGE("Could not acquire an empty reqState from the pool");
        return status;
    }

    reqState->init(msg.request);
    mRequestsInTransit.add(reqState->reqId, reqState);
    LOGR("<request %d> registered @ ResultProcessor", reqState->reqId);
    /**
     * get the number of partial results the request may return, this is not
     *  going to change once the camera is open, so do it only once.
     *  We initialize the value to 0, the minimum value should be 1
     */
    if (CC_UNLIKELY(mPartialResultCount == 0)) {
        mPartialResultCount = msg.request->getpartialResultCount();
    }
    return status;
}
void BakedOpRenderer::endFrame(const Rect& repaintRect) {
    if (CC_UNLIKELY(Properties::debugOverdraw)) {
        ClipRect overdrawClip(repaintRect);
        Rect viewportRect(mRenderTarget.viewportWidth, mRenderTarget.viewportHeight);
        // overdraw visualization
        for (int i = 1; i <= 4; i++) {
            if (i < 4) {
                // nth level of overdraw tests for n+1 draws per pixel
                mRenderState.stencil().enableDebugTest(i + 1, false);
            } else {
                // 4th level tests for 4 or higher draws per pixel
                mRenderState.stencil().enableDebugTest(4, true);
            }

            SkPaint paint;
            paint.setColor(mCaches.getOverdrawColor(i));
            Glop glop;
            GlopBuilder(mRenderState, mCaches, &glop)
                    .setRoundRectClipState(nullptr)
                    .setMeshUnitQuad()
                    .setFillPaint(paint, 1.0f)
                    .setTransform(Matrix4::identity(), TransformFlags::None)
                    .setModelViewMapUnitToRect(viewportRect)
                    .build();
            renderGlop(nullptr, &overdrawClip, glop);
        }
        mRenderState.stencil().disable();
    }

    // Note: we leave FBO 0 renderable here, for post-frame-content decoration
}
        virtual void onPositionLost(RenderNode& node, const TreeInfo* info) override {
            if (CC_UNLIKELY(!mWeakRef || (info && !info->updateWindowPositions))) return;

            ATRACE_NAME("SurfaceView position lost");
            JNIEnv* env = jnienv();
            jobject localref = env->NewLocalRef(mWeakRef);
            if (CC_UNLIKELY(!localref)) {
                jnienv()->DeleteWeakGlobalRef(mWeakRef);
                mWeakRef = nullptr;
                return;
            }

            env->CallVoidMethod(localref, gSurfaceViewPositionLostMethod,
                    info ? info->canvasContext.getFrameNumber() : 0);
            env->DeleteLocalRef(localref);
        }
        virtual void onPositionUpdated(RenderNode& node, const TreeInfo& info) override {
            if (CC_UNLIKELY(!mWeakRef || !info.updateWindowPositions)) return;

            Matrix4 transform;
            info.damageAccumulator->computeCurrentTransform(&transform);
            const RenderProperties& props = node.properties();
            uirenderer::Rect bounds(props.getWidth(), props.getHeight());
            transform.mapRect(bounds);
            bounds.left -= info.windowInsetLeft;
            bounds.right -= info.windowInsetLeft;
            bounds.top -= info.windowInsetTop;
            bounds.bottom -= info.windowInsetTop;

            if (CC_LIKELY(transform.isPureTranslate())) {
                // snap/round the computed bounds, so they match the rounding behavior
                // of the clear done in SurfaceView#draw().
                bounds.snapToPixelBoundaries();
            } else {
                // Conservatively round out so the punched hole (in the ZOrderOnTop = true case)
                // doesn't extend beyond the other window
                bounds.roundOut();
            }

            incStrong(0);
            auto functor = std::bind(
                std::mem_fn(&SurfaceViewPositionUpdater::doUpdatePositionAsync), this,
                (jlong) info.canvasContext.getFrameNumber(),
                (jint) bounds.left, (jint) bounds.top,
                (jint) bounds.right, (jint) bounds.bottom);

            info.canvasContext.enqueueFrameWork(std::move(functor));
        }
ssize_t AudioBufferProviderSource::availableToRead()
{
    if (CC_UNLIKELY(!mNegotiated)) {
        return NEGOTIATE;
    }
    return mBuffer.raw != NULL ? mBuffer.frameCount - mConsumed : 0;
}
bool egl_display_t::HibernationMachine::incWakeCount(WakeRefStrength strength) {
    Mutex::Autolock _l(mLock);
    ALOGE_IF(mWakeCount < 0 || mWakeCount == INT32_MAX,
             "Invalid WakeCount (%d) on enter\n", mWakeCount);

    mWakeCount++;
    if (strength == STRONG)
        mAttemptHibernation = false;

    if (CC_UNLIKELY(mHibernating)) {
        ALOGV("Awakening\n");
        egl_connection_t* const cnx = &gEGLImpl;

        // These conditions should be guaranteed before entering hibernation;
        // we don't want to get into a state where we can't wake up.
        ALOGD_IF(!mDpyValid || !cnx->egl.eglAwakenProcessIMG,
                 "Invalid hibernation state, unable to awaken\n");

        if (!cnx->egl.eglAwakenProcessIMG()) {
            ALOGE("Failed to awaken EGL implementation\n");
            return false;
        }
        mHibernating = false;
    }
    return true;
}
bool EglManager::swapBuffers(const Frame& frame, const SkRect& screenDirty) {
    if (CC_UNLIKELY(Properties::waitForGpuCompletion)) {
        ATRACE_NAME("Finishing GPU work");
        fence();
    }

    EGLint rects[4];
    frame.map(screenDirty, rects);
    eglSwapBuffersWithDamageKHR(mEglDisplay, frame.mSurface, rects, screenDirty.isEmpty() ? 0 : 1);

    EGLint err = eglGetError();
    if (CC_LIKELY(err == EGL_SUCCESS)) {
        return true;
    }
    if (err == EGL_BAD_SURFACE || err == EGL_BAD_NATIVE_WINDOW) {
        // For some reason our surface was destroyed out from under us
        // This really shouldn't happen, but if it does we can recover easily
        // by just not trying to use the surface anymore
        ALOGW("swapBuffers encountered EGL error %d on %p, halting rendering...", err,
              frame.mSurface);
        return false;
    }
    LOG_ALWAYS_FATAL("Encountered EGL error %d %s during rendering", err, egl_error_str(err));
    // Impossible to hit this, but the compiler doesn't know that
    return false;
}
Exemple #15
0
BpBinder* BpBinder::create(int32_t handle) {
    int32_t trackedUid = -1;
    if (sCountByUidEnabled) {
        trackedUid = IPCThreadState::self()->getCallingUid();
        AutoMutex _l(sTrackingLock);
        uint32_t trackedValue = sTrackingMap[trackedUid];
        if (CC_UNLIKELY(trackedValue & LIMIT_REACHED_MASK)) {
            if (sBinderProxyThrottleCreate) {
                return nullptr;
            }
        } else {
            if ((trackedValue & COUNTING_VALUE_MASK) >= sBinderProxyCountHighWatermark) {
                ALOGE("Too many binder proxy objects sent to uid %d from uid %d (%d proxies held)",
                      getuid(), trackedUid, trackedValue);
                sTrackingMap[trackedUid] |= LIMIT_REACHED_MASK;
                if (sLimitCallback) sLimitCallback(trackedUid);
                if (sBinderProxyThrottleCreate) {
                    ALOGI("Throttling binder proxy creates from uid %d in uid %d until binder proxy"
                          " count drops below %d",
                          trackedUid, getuid(), sBinderProxyCountLowWatermark);
                    return nullptr;
                }
            }
        }
        sTrackingMap[trackedUid]++;
    }
    return new BpBinder(handle, trackedUid);
}
sp<GraphicBuffer> GLConsumer::getDebugTexImageBuffer() {
    Mutex::Autolock _l(sStaticInitLock);
    if (CC_UNLIKELY(sReleasedTexImageBuffer == NULL)) {
        // The first time, create the debug texture in case the application
        // continues to use it.
        sp<GraphicBuffer> buffer = new GraphicBuffer(
                kDebugData.width, kDebugData.height, PIXEL_FORMAT_RGBA_8888,
                GraphicBuffer::USAGE_SW_WRITE_RARELY);
        uint32_t* bits;
        buffer->lock(GraphicBuffer::USAGE_SW_WRITE_RARELY, reinterpret_cast<void**>(&bits));
        uint32_t stride = buffer->getStride();
        uint32_t height = buffer->getHeight();
        memset(bits, 0, stride * height * 4);
        for (uint32_t y = 0; y < kDebugData.height; y++) {
            for (uint32_t x = 0; x < kDebugData.width; x++) {
                bits[x] = (kDebugData.bits[y + kDebugData.width + x] == 'X') ?
                    0xFF000000 : 0xFFFFFFFF;
            }
            bits += stride;
        }
        buffer->unlock();
        sReleasedTexImageBuffer = buffer;
    }
    return sReleasedTexImageBuffer;
}
bool Snapshot::clipTransformed(const Rect& r, SkRegion::Op op) {
    bool clipped = false;

    switch (op) {
        case SkRegion::kIntersect_Op: {
            if (CC_UNLIKELY(!clipRegion->isEmpty())) {
                ensureClipRegion();
                clipped = clipRegionOp(r.left, r.top, r.right, r.bottom, SkRegion::kIntersect_Op);
            } else {
                clipped = clipRect->intersect(r);
                if (!clipped) {
                    clipRect->setEmpty();
                    clipped = true;
                }
            }
            break;
        }
        case SkRegion::kReplace_Op: {
            setClip(r.left, r.top, r.right, r.bottom);
            clipped = true;
            break;
        }
        default: {
            ensureClipRegion();
            clipped = clipRegionOp(r.left, r.top, r.right, r.bottom, op);
            break;
        }
    }

    if (clipped) {
        flags |= Snapshot::kFlagClipSet;
    }

    return clipped;
}
void LayerBuilder::flushLayerClears(LinearAllocator& allocator) {
    if (CC_UNLIKELY(!mClearRects.empty())) {
        const int vertCount = mClearRects.size() * 4;
        // put the verts in the frame allocator, since
        //     1) SimpleRectsOps needs verts, not rects
        //     2) even if mClearRects stored verts, std::vectors will move their contents
        Vertex* const verts = (Vertex*) allocator.create_trivial_array<Vertex>(vertCount);

        Vertex* currentVert = verts;
        Rect bounds = mClearRects[0];
        for (auto&& rect : mClearRects) {
            bounds.unionWith(rect);
            Vertex::set(currentVert++, rect.left, rect.top);
            Vertex::set(currentVert++, rect.right, rect.top);
            Vertex::set(currentVert++, rect.left, rect.bottom);
            Vertex::set(currentVert++, rect.right, rect.bottom);
        }
        mClearRects.clear(); // discard rects before drawing so this method isn't reentrant

        // One or more unclipped saveLayers have been enqueued, with deferred clears.
        // Flush all of these clears with a single draw
        SkPaint* paint = allocator.create<SkPaint>();
        paint->setXfermodeMode(SkXfermode::kClear_Mode);
        SimpleRectsOp* op = allocator.create_trivial<SimpleRectsOp>(bounds,
                Matrix4::identity(), nullptr, paint,
                verts, vertCount);
        BakedOpState* bakedState = BakedOpState::directConstruct(allocator,
                &repaintClip, bounds, *op);
        deferUnmergeableOp(allocator, bakedState, OpBatchType::Vertices);
    }
}
status_t CameraMetadata::checkType(uint32_t tag, uint8_t expectedType) {
    int tagType = get_camera_metadata_tag_type(tag);
    if ( CC_UNLIKELY(tagType == -1)) {
        ALOGE("Update metadata entry: Unknown tag %d", tag);
        return INVALID_OPERATION;
    }
    if ( CC_UNLIKELY(tagType != expectedType) ) {
        ALOGE("Mismatched tag type when updating entry %s (%d) of type %s; "
                "got type %s data instead ",
                get_camera_metadata_tag_name(tag), tag,
                camera_metadata_type_names[tagType],
                camera_metadata_type_names[expectedType]);
        return INVALID_OPERATION;
    }
    return OK;
}
Exemple #20
0
BpBinder::~BpBinder()
{
    ALOGV("Destroying BpBinder %p handle %d\n", this, mHandle);

    IPCThreadState* ipc = IPCThreadState::self();

    if (mTrackedUid >= 0) {
        AutoMutex _l(sTrackingLock);
        uint32_t trackedValue = sTrackingMap[mTrackedUid];
        if (CC_UNLIKELY((trackedValue & COUNTING_VALUE_MASK) == 0)) {
            ALOGE("Unexpected Binder Proxy tracking decrement in %p handle %d\n", this, mHandle);
        } else {
            if (CC_UNLIKELY(
                (trackedValue & LIMIT_REACHED_MASK) &&
                ((trackedValue & COUNTING_VALUE_MASK) <= sBinderProxyCountLowWatermark)
                )) {
                ALOGI("Limit reached bit reset for uid %d (fewer than %d proxies from uid %d held)",
                                   getuid(), mTrackedUid, sBinderProxyCountLowWatermark);
                sTrackingMap[mTrackedUid] &= ~LIMIT_REACHED_MASK;
            }
            if (--sTrackingMap[mTrackedUid] == 0) {
                sTrackingMap.erase(mTrackedUid);
            }
        }
    }

    mLock.lock();
    Vector<Obituary>* obits = mObituaries;
    if(obits != NULL) {
        if (ipc) ipc->clearDeathNotification(mHandle, this);
        mObituaries = NULL;
    }
    mLock.unlock();

    if (obits != NULL) {
        // XXX Should we tell any remaining DeathRecipient
        // objects that the last strong ref has gone away, so they
        // are no longer linked?
        delete obits;
    }

    if (ipc) {
        ipc->expungeHandle(mHandle, this);
        ipc->decWeakHandle(mHandle);
    }
}
void DrawProfiler::setDensity(float density) {
    if (CC_UNLIKELY(mDensity != density)) {
        mDensity = density;
        mVerticalUnit = dpToPx(PROFILE_DRAW_DP_PER_MS, density);
        mHorizontalUnit = dpToPx(PROFILE_DRAW_WIDTH, density);
        mThresholdStroke = dpToPx(PROFILE_DRAW_THRESHOLD_STROKE_WIDTH, density);
    }
}
void Layer::onDraw(const Region& clip) const
{
    if (CC_UNLIKELY(mActiveBuffer == 0)) {
        // the texture has not been created yet, this Layer has
        // in fact never been drawn into. This happens frequently with
        // SurfaceView because the WindowManager can't know when the client
        // has drawn the first time.

        // If there is nothing under us, we paint the screen in black, otherwise
        // we just skip this update.

        // figure out if there is something below us
        Region under;
        const SurfaceFlinger::LayerVector& drawingLayers(
                mFlinger->mDrawingState.layersSortedByZ);
        const size_t count = drawingLayers.size();
        for (size_t i=0 ; i<count ; ++i) {
            const sp<LayerBase>& layer(drawingLayers[i]);
            if (layer.get() == static_cast<LayerBase const*>(this))
                break;
            under.orSelf(layer->visibleRegionScreen);
        }
        // if not everything below us is covered, we plug the holes!
        Region holes(clip.subtract(under));
        if (!holes.isEmpty()) {
            clearWithOpenGL(holes, 0, 0, 0, 1);
        }
        return;
    }

    if (!isProtected()) {
        glBindTexture(GL_TEXTURE_EXTERNAL_OES, mTextureName);
        GLenum filter = GL_NEAREST;
        if (getFiltering() || needsFiltering() || isFixedSize() || isCropped()) {
            // TODO: we could be more subtle with isFixedSize()
            filter = GL_LINEAR;
        }
        glTexParameterx(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MAG_FILTER, filter);
        glTexParameterx(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MIN_FILTER, filter);
        glMatrixMode(GL_TEXTURE);
        glLoadMatrixf(mTextureMatrix);
        glMatrixMode(GL_MODELVIEW);
        glDisable(GL_TEXTURE_2D);
        glEnable(GL_TEXTURE_EXTERNAL_OES);
    } else {
        glBindTexture(GL_TEXTURE_2D, mFlinger->getProtectedTexName());
        glMatrixMode(GL_TEXTURE);
        glLoadIdentity();
        glMatrixMode(GL_MODELVIEW);
        glDisable(GL_TEXTURE_EXTERNAL_OES);
        glEnable(GL_TEXTURE_2D);
    }

    drawWithOpenGL(clip);

    glDisable(GL_TEXTURE_EXTERNAL_OES);
    glDisable(GL_TEXTURE_2D);
}
ssize_t MonoPipeReader::availableToRead()
{
    if (CC_UNLIKELY(!mNegotiated)) {
        return NEGOTIATE;
    }
    ssize_t ret = android_atomic_acquire_load(&mPipe->mRear) - mPipe->mFront;
    ALOG_ASSERT((0 <= ret) && (ret <= mPipe->mMaxFrames));
    return ret;
}
void GpuMemoryTracker::onGLContextDestroyed() {
    gGpuThread = 0;
    if (CC_UNLIKELY(gObjectSet.size() > 0)) {
        std::stringstream os;
        dump(os);
        ALOGE("%s", os.str().c_str());
        LOG_ALWAYS_FATAL("Leaked %zd GPU objects!", gObjectSet.size());
    }
}
void PicturePileLayerContent::draw(SkCanvas* canvas)
{
    TRACE_METHOD();
    android::Mutex::Autolock lock(m_drawLock);
    m_picturePile.draw(canvas);

    if (CC_UNLIKELY(!m_hasContent))
        ALOGW("Warning: painting PicturePile without content!");
}
Exemple #26
0
FenceTime::FenceTime(nsecs_t signalTime)
  : mState(Fence::isValidTimestamp(signalTime) ? State::VALID : State::INVALID),
    mFence(nullptr),
    mSignalTime(signalTime) {
    if (CC_UNLIKELY(mSignalTime == Fence::SIGNAL_TIME_PENDING)) {
        ALOGE("Pending signal time not allowed after signal.");
        mSignalTime = Fence::SIGNAL_TIME_INVALID;
    }
}
camera_metadata_ro_entry_t CameraMetadata::find(uint32_t tag) const {
    status_t res;
    camera_metadata_ro_entry entry;
    res = find_camera_metadata_ro_entry(mBuffer, tag, &entry);
    if (CC_UNLIKELY( res != OK )) {
        entry.count = 0;
        entry.data.u8 = NULL;
    }
    return entry;
}
ssize_t MonoPipe::availableToWrite() const
{
    if (CC_UNLIKELY(!mNegotiated)) {
        return NEGOTIATE;
    }
    // uses mMaxFrames not mReqFrames, so allows "over-filling" the pipe beyond requested limit
    ssize_t ret = mMaxFrames - (mRear - android_atomic_acquire_load(&mFront));
    ALOG_ASSERT((0 <= ret) && (ret <= mMaxFrames));
    return ret;
}
ssize_t MonoPipeReader::read(void *buffer, size_t count, int64_t readPTS)
{
    // Compute the "next read PTS" and cache it.  Callers of read pass a read
    // PTS indicating the local time for which they are requesting data along
    // with a count (which is the number of audio frames they are going to
    // ultimately pass to the next stage of the pipeline).  Offsetting readPTS
    // by the duration of count will give us the readPTS which will be passed to
    // us next time, assuming they system continues to operate in steady state
    // with no discontinuities.  We stash this value so it can be used by the
    // MonoPipe writer to imlement getNextWriteTimestamp.
    int64_t nextReadPTS;
    nextReadPTS = mPipe->offsetTimestampByAudioFrames(readPTS, count);

    // count == 0 is unlikely and not worth checking for explicitly; will be handled automatically
    ssize_t red = availableToRead();
    if (CC_UNLIKELY(red <= 0)) {
        // Uh-oh, looks like we are underflowing.  Update the next read PTS and
        // get out.
        mPipe->updateFrontAndNRPTS(mPipe->mFront, nextReadPTS);
        return red;
    }
    if (CC_LIKELY((size_t) red > count)) {
        red = count;
    }
    size_t front = mPipe->mFront & (mPipe->mMaxFrames - 1);
    size_t part1 = mPipe->mMaxFrames - front;
    if (part1 > (size_t) red) {
        part1 = red;
    }
    if (CC_LIKELY(part1 > 0)) {
        memcpy(buffer, (char *) mPipe->mBuffer + (front << mBitShift), part1 << mBitShift);
        if (CC_UNLIKELY(front + part1 == mPipe->mMaxFrames)) {
            size_t part2 = red - part1;
            if (CC_LIKELY(part2 > 0)) {
                memcpy((char *) buffer + (part1 << mBitShift), mPipe->mBuffer, part2 << mBitShift);
            }
        }
        mPipe->updateFrontAndNRPTS(red + mPipe->mFront, nextReadPTS);
        mFramesRead += red;
    }
    return red;
}
const ClipBase* Snapshot::serializeIntersectedClip(LinearAllocator& allocator,
        const ClipBase* recordedClip, const Matrix4& recordedClipTransform) {
    auto target = this;
    if (CC_UNLIKELY(recordedClip && recordedClip->intersectWithRoot)) {
        // Clip must be intersected with root, instead of current clip.
        target = getClipRoot(this);
    }

    return target->mClipArea->serializeIntersectedClip(allocator,
            recordedClip, recordedClipTransform);
}