Looper::Looper(bool allowNonCallbacks) : mAllowNonCallbacks(allowNonCallbacks), mSendingMessage(false), mResponseIndex(0), mNextMessageUptime(LLONG_MAX) { int wakeFds[2]; int result = pipe(wakeFds); LOG_ALWAYS_FATAL_IF(result != 0, "Could not create wake pipe. errno=%d", errno); mWakeReadPipeFd = wakeFds[0]; mWakeWritePipeFd = wakeFds[1]; result = fcntl(mWakeReadPipeFd, F_SETFL, O_NONBLOCK); LOG_ALWAYS_FATAL_IF(result != 0, "Could not make wake read pipe non-blocking. errno=%d", errno); result = fcntl(mWakeWritePipeFd, F_SETFL, O_NONBLOCK); LOG_ALWAYS_FATAL_IF(result != 0, "Could not make wake write pipe non-blocking. errno=%d", errno); mIdling = false; // Allocate the epoll instance and register the wake pipe. mEpollFd = epoll_create(EPOLL_SIZE_HINT); LOG_ALWAYS_FATAL_IF(mEpollFd < 0, "Could not create epoll instance. errno=%d", errno); struct epoll_event eventItem; memset(& eventItem, 0, sizeof(epoll_event)); // zero out unused members of data field union eventItem.events = EPOLLIN; eventItem.data.fd = mWakeReadPipeFd; result = epoll_ctl(mEpollFd, EPOLL_CTL_ADD, mWakeReadPipeFd, & eventItem); LOG_ALWAYS_FATAL_IF(result != 0, "Could not add wake read pipe to epoll instance. errno=%d", errno); }
void Looper::rebuildEpollLocked() { // Close old epoll instance if we have one. if (mEpollFd >= 0) { #if DEBUG_CALLBACKS ALOGD("%p ~ rebuildEpollLocked - rebuilding epoll set", this); #endif close(mEpollFd); } // Allocate the new epoll instance and register the wake pipe. mEpollFd = epoll_create(EPOLL_SIZE_HINT); LOG_ALWAYS_FATAL_IF(mEpollFd < 0, "Could not create epoll instance. errno=%d", errno); struct epoll_event eventItem; memset(& eventItem, 0, sizeof(epoll_event)); // zero out unused members of data field union eventItem.events = EPOLLIN; eventItem.data.fd = mWakeEventFd; int result = epoll_ctl(mEpollFd, EPOLL_CTL_ADD, mWakeEventFd, & eventItem); LOG_ALWAYS_FATAL_IF(result != 0, "Could not add wake event fd to epoll instance. errno=%d", errno); for (size_t i = 0; i < mRequests.size(); i++) { const Request& request = mRequests.valueAt(i); struct epoll_event eventItem; request.initEventItem(&eventItem); int epollResult = epoll_ctl(mEpollFd, EPOLL_CTL_ADD, request.fd, & eventItem); if (epollResult < 0) { ALOGE("Error adding epoll events for fd %d while rebuilding epoll set, errno=%d", request.fd, errno); } } }
UrlInterceptResponse::UrlInterceptResponse(JNIEnv* env, jobject response) { ScopedLocalRef<jclass> javaResponse(env, env->FindClass("android/webkit/WebResourceResponse")); LOG_ALWAYS_FATAL_IF(!javaResponse.get()); jfieldID mimeType = env->GetFieldID(javaResponse.get(), "mMimeType", "Ljava/lang/String;"); LOG_ALWAYS_FATAL_IF(!mimeType); jfieldID encoding = env->GetFieldID(javaResponse.get(), "mEncoding", "Ljava/lang/String;"); LOG_ALWAYS_FATAL_IF(!encoding); jfieldID inputStream = env->GetFieldID(javaResponse.get(), "mInputStream", "Ljava/io/InputStream;"); LOG_ALWAYS_FATAL_IF(!inputStream); ScopedLocalRef<jobject> stream(env, env->GetObjectField(response, inputStream)); if (stream.get()) m_inputStream.set(new JavaInputStreamWrapper(env, stream.get())); ScopedLocalRef<jstring> mimeStr(env, static_cast<jstring>(env->GetObjectField(response, mimeType))); ScopedLocalRef<jstring> encodingStr(env, static_cast<jstring>(env->GetObjectField(response, encoding))); if (mimeStr.get()) { const char* s = env->GetStringUTFChars(mimeStr.get(), NULL); m_mimeType.assign(s, env->GetStringUTFLength(mimeStr.get())); env->ReleaseStringUTFChars(mimeStr.get(), s); } if (encodingStr.get()) { const char* s = env->GetStringUTFChars(encodingStr.get(), NULL); m_encoding.assign(s, env->GetStringUTFLength(encodingStr.get())); env->ReleaseStringUTFChars(encodingStr.get(), s); } }
void RegisterDrawGLFunctor(JNIEnv* env) { jclass clazz = env->FindClass(kClassName); LOG_ALWAYS_FATAL_IF(!clazz, "Unable to find class '%s'", kClassName); int res = env->RegisterNatives(clazz, kJniMethods, NELEM(kJniMethods)); LOG_ALWAYS_FATAL_IF(res < 0, "register native methods failed: res=%d", res); }
void EglDisplayImpl::OnGraphicsContextsLost() { lock_.Lock(); LOG_ALWAYS_FATAL_IF(invalidated_); invalidated_ = true; while (color_buffers_locked_) { cond_no_locked_buffers_.Wait(lock_); } BindLocked(); LOG_ALWAYS_FATAL_IF(!invalidated_); // color_buffers_ is not locked by display lock. ColorBufferRegistry::ObjectList color_buffers = color_buffers_.GetAllObjects(); for (ColorBufferRegistry::ObjectList::const_iterator iter = color_buffers.begin(); iter != color_buffers.end(); ++iter) { (*iter)->DeleteTextureLocked(); } ContextRegistry::ObjectList contexts = contexts_.GetAllObjects(); for (ContextRegistry::ObjectList::const_iterator iter_context = contexts.begin(); iter_context != contexts.end(); ++iter_context) { (*iter_context)->GetGlesContext()->Invalidate(); } UnbindLocked(); lock_.Unlock(); }
void* Loader::open(egl_connection_t* cnx) { void* dso; driver_t* hnd = 0; dso = load_driver("GLES", cnx, EGL | GLESv1_CM | GLESv2); if (dso) { hnd = new driver_t(dso); } else { // Always load EGL first dso = load_driver("EGL", cnx, EGL); if (dso) { hnd = new driver_t(dso); hnd->set( load_driver("GLESv1_CM", cnx, GLESv1_CM), GLESv1_CM ); hnd->set( load_driver("GLESv2", cnx, GLESv2), GLESv2 ); } } LOG_ALWAYS_FATAL_IF(!hnd, "couldn't find an OpenGL ES implementation"); cnx->libGles2 = load_wrapper("/system/lib/libGLESv2.so"); cnx->libGles1 = load_wrapper("/system/lib/libGLESv1_CM.so"); LOG_ALWAYS_FATAL_IF(!cnx->libGles2 || !cnx->libGles1, "couldn't load system OpenGL ES wrapper libraries"); return (void*)hnd; }
status_t SensorManager::assertStateLocked() const { bool initSensorManager = false; if (mSensorServer == NULL) { initSensorManager = true; } else { // Ping binder to check if sensorservice is alive. status_t err = IInterface::asBinder(mSensorServer)->pingBinder(); if (err != NO_ERROR) { initSensorManager = true; } } if (initSensorManager) { // try for 300 seconds (60*5(getService() tries for 5 seconds)) before giving up ... const String16 name("sensorservice"); for (int i = 0; i < 60; i++) { status_t err = getService(name, &mSensorServer); if (err == NAME_NOT_FOUND) { sleep(1); continue; } if (err != NO_ERROR) { return err; } break; } class DeathObserver : public IBinder::DeathRecipient { SensorManager& mSensorManger; virtual void binderDied(const wp<IBinder>& who) { ALOGW("sensorservice died [%p]", who.unsafe_get()); mSensorManger.sensorManagerDied(); } public: explicit DeathObserver(SensorManager& mgr) : mSensorManger(mgr) { } }; LOG_ALWAYS_FATAL_IF(mSensorServer.get() == NULL, "getService(SensorService) NULL"); mDeathObserver = new DeathObserver(*const_cast<SensorManager *>(this)); IInterface::asBinder(mSensorServer)->linkToDeath(mDeathObserver); mSensors = mSensorServer->getSensorList(mOpPackageName); size_t count = mSensors.size(); mSensorList = static_cast<Sensor const**>(malloc(count * sizeof(Sensor*))); LOG_ALWAYS_FATAL_IF(mSensorList == NULL, "mSensorList NULL"); for (size_t i=0 ; i<count ; i++) { mSensorList[i] = mSensors.array() + i; } } return NO_ERROR; }
void RenderThread::initializeDisplayEventReceiver() { LOG_ALWAYS_FATAL_IF(mDisplayEventReceiver, "Initializing a second DisplayEventReceiver?"); mDisplayEventReceiver = new DisplayEventReceiver(); status_t status = mDisplayEventReceiver->initCheck(); LOG_ALWAYS_FATAL_IF(status != NO_ERROR, "Initialization of DisplayEventReceiver " "failed with status: %d", status); // Register the FD mLooper->addFd(mDisplayEventReceiver->getFd(), 0, Looper::EVENT_INPUT, RenderThread::displayEventReceiverCallback, this); }
JavaInputStreamWrapper(JNIEnv* env, jobject inputStream) : m_inputStream(env->NewGlobalRef(inputStream)) , m_buffer(NULL) { LOG_ALWAYS_FATAL_IF(!m_inputStream); ScopedLocalRef<jclass> inputStreamClass(env, env->FindClass("java/io/InputStream")); LOG_ALWAYS_FATAL_IF(!inputStreamClass.get()); m_read = env->GetMethodID(inputStreamClass.get(), "read", "([B)I"); LOG_ALWAYS_FATAL_IF(!m_read); m_close = env->GetMethodID(inputStreamClass.get(), "close", "()V"); LOG_ALWAYS_FATAL_IF(!m_close); }
JavaInputStreamWrapper(JNIEnv* env, jobject inputStream) : m_inputStream(env->NewGlobalRef(inputStream)) , m_buffer(0) { LOG_ALWAYS_FATAL_IF(!inputStream); jclass inputStreamClass = env->FindClass("java/io/InputStream"); LOG_ALWAYS_FATAL_IF(!inputStreamClass); m_read = env->GetMethodID(inputStreamClass, "read", "([B)I"); LOG_ALWAYS_FATAL_IF(!m_read); m_close = env->GetMethodID(inputStreamClass, "close", "()V"); LOG_ALWAYS_FATAL_IF(!m_close); env->DeleteLocalRef(inputStreamClass); }
bool capturePixels(SkBitmap* bmp) { sk_sp<SkColorSpace> colorSpace = SkColorSpace::MakeSRGB(); SkImageInfo destinationConfig = SkImageInfo::Make(mSize.width(), mSize.height(), kRGBA_8888_SkColorType, kPremul_SkAlphaType, colorSpace); bmp->allocPixels(destinationConfig); android_memset32((uint32_t*)bmp->getPixels(), SK_ColorRED, mSize.width() * mSize.height() * 4); android::CpuConsumer::LockedBuffer nativeBuffer; android::status_t retval = mCpuConsumer->lockNextBuffer(&nativeBuffer); if (retval == android::BAD_VALUE) { SkDebugf("write_canvas_png() got no buffer; returning transparent"); // No buffer ready to read - commonly triggered by dm sending us // a no-op source, or calling code that doesn't do anything on this // backend. bmp->eraseColor(SK_ColorTRANSPARENT); return false; } else if (retval) { SkDebugf("Failed to lock buffer to read pixels: %d.", retval); return false; } // Move the pixels into the destination SkBitmap LOG_ALWAYS_FATAL_IF(nativeBuffer.format != android::PIXEL_FORMAT_RGBA_8888, "Native buffer not RGBA!"); SkImageInfo nativeConfig = SkImageInfo::Make(nativeBuffer.width, nativeBuffer.height, kRGBA_8888_SkColorType, kPremul_SkAlphaType); // Android stride is in pixels, Skia stride is in bytes SkBitmap nativeWrapper; bool success = nativeWrapper.installPixels(nativeConfig, nativeBuffer.data, nativeBuffer.stride * 4); if (!success) { SkDebugf("Failed to wrap HWUI buffer in a SkBitmap"); return false; } LOG_ALWAYS_FATAL_IF(bmp->colorType() != kRGBA_8888_SkColorType, "Destination buffer not RGBA!"); success = nativeWrapper.readPixels(destinationConfig, bmp->getPixels(), bmp->rowBytes(), 0, 0); if (!success) { SkDebugf("Failed to extract pixels from HWUI buffer"); return false; } mCpuConsumer->unlockBuffer(nativeBuffer); return true; }
void StaticAudioTrackServerProxy::releaseBuffer(Buffer* buffer) { size_t stepCount = buffer->mFrameCount; LOG_ALWAYS_FATAL_IF(!((int64_t) stepCount <= mFramesReady)); LOG_ALWAYS_FATAL_IF(!(stepCount <= mUnreleased)); if (stepCount == 0) { // prevent accidental re-use of buffer buffer->mRaw = NULL; buffer->mNonContig = 0; return; } mUnreleased -= stepCount; audio_track_cblk_t* cblk = mCblk; size_t position = mState.mPosition; size_t newPosition = position + stepCount; int32_t setFlags = 0; if (!(position <= newPosition && newPosition <= mFrameCount)) { ALOGW("%s newPosition %zu outside [%zu, %zu]", __func__, newPosition, position, mFrameCount); newPosition = mFrameCount; } else if (mState.mLoopCount != 0 && newPosition == mState.mLoopEnd) { newPosition = mState.mLoopStart; if (mState.mLoopCount == -1 || --mState.mLoopCount != 0) { setFlags = CBLK_LOOP_CYCLE; } else { setFlags = CBLK_LOOP_FINAL; } } if (newPosition == mFrameCount) { setFlags |= CBLK_BUFFER_END; } mState.mPosition = newPosition; if (mFramesReady != INT64_MAX) { mFramesReady -= stepCount; } mFramesReadySafe = clampToSize(mFramesReady); cblk->mServer += stepCount; // This may overflow, but client is not supposed to rely on it StaticAudioTrackPosLoop posLoop; posLoop.mBufferPosition = mState.mPosition; posLoop.mLoopCount = mState.mLoopCount; mPosLoopMutator.push(posLoop); if (setFlags != 0) { (void) android_atomic_or(setFlags, &cblk->mFlags); // this would be a good place to wake a futex } buffer->mFrameCount = 0; buffer->mRaw = NULL; buffer->mNonContig = 0; }
void ServerProxy::releaseBuffer(Buffer* buffer) { LOG_ALWAYS_FATAL_IF(buffer == NULL); size_t stepCount = buffer->mFrameCount; if (stepCount == 0 || mIsShutdown) { // prevent accidental re-use of buffer buffer->mFrameCount = 0; buffer->mRaw = NULL; buffer->mNonContig = 0; return; } LOG_ALWAYS_FATAL_IF(!(stepCount <= mUnreleased && mUnreleased <= mFrameCount)); mUnreleased -= stepCount; audio_track_cblk_t* cblk = mCblk; if (mIsOut) { int32_t front = cblk->u.mStreaming.mFront; android_atomic_release_store(stepCount + front, &cblk->u.mStreaming.mFront); } else { int32_t rear = cblk->u.mStreaming.mRear; android_atomic_release_store(stepCount + rear, &cblk->u.mStreaming.mRear); } cblk->mServer += stepCount; size_t half = mFrameCount / 2; if (half == 0) { half = 1; } size_t minimum = (size_t) cblk->mMinimum; if (minimum == 0) { minimum = mIsOut ? half : 1; } else if (minimum > half) { minimum = half; } // FIXME AudioRecord wakeup needs to be optimized; it currently wakes up client every time if (!mIsOut || (mAvailToClient + stepCount >= minimum)) { ALOGV("mAvailToClient=%zu stepCount=%zu minimum=%zu", mAvailToClient, stepCount, minimum); int32_t old = android_atomic_or(CBLK_FUTEX_WAKE, &cblk->mFutex); if (!(old & CBLK_FUTEX_WAKE)) { (void) syscall(__NR_futex, &cblk->mFutex, mClientInServer ? FUTEX_WAKE_PRIVATE : FUTEX_WAKE, 1); } } buffer->mFrameCount = 0; buffer->mRaw = NULL; buffer->mNonContig = 0; }
size_t AudioTrackServerProxy::framesReady() { LOG_ALWAYS_FATAL_IF(!mIsOut); if (mIsShutdown) { return 0; } audio_track_cblk_t* cblk = mCblk; int32_t flush = cblk->u.mStreaming.mFlush; if (flush != mFlush) { // FIXME should return an accurate value, but over-estimate is better than under-estimate return mFrameCount; } // the acquire might not be necessary since not doing a subsequent read int32_t rear = android_atomic_acquire_load(&cblk->u.mStreaming.mRear); ssize_t filled = rear - cblk->u.mStreaming.mFront; // pipe should not already be overfull if (!(0 <= filled && (size_t) filled <= mFrameCount)) { ALOGE("Shared memory control block is corrupt (filled=%zd); shutting down", filled); mIsShutdown = true; return 0; } // cache this value for later use by obtainBuffer(), with added barrier // and racy if called by normal mixer thread // ignores flush(), so framesReady() may report a larger mFrameCount than obtainBuffer() return filled; }
ProcessState::ProcessState() : mDriverFD(open_driver()) , mVMStart(MAP_FAILED) , mManagesContexts(false) , mBinderContextCheckFunc(NULL) , mBinderContextUserData(NULL) , mThreadPoolStarted(false) , mThreadPoolSeq(1) { if (mDriverFD >= 0) { // XXX Ideally, there should be a specific define for whether we // have mmap (or whether we could possibly have the kernel module // availabla). #if !defined(HAVE_WIN32_IPC) // mmap the binder, providing a chunk of virtual address space to receive transactions. mVMStart = mmap(0, BINDER_VM_SIZE, PROT_READ, MAP_PRIVATE | MAP_NORESERVE, mDriverFD, 0); if (mVMStart == MAP_FAILED) { // *sigh* ALOGE("Using /dev/binder failed: unable to mmap transaction memory.\n"); close(mDriverFD); mDriverFD = -1; } #else mDriverFD = -1; #endif } LOG_ALWAYS_FATAL_IF(mDriverFD < 0, "Binder driver could not be opened. Terminating."); }
void blockOnPrecache() { if (mTask != NULL) { mBuffer = mTask->getResult(); LOG_ALWAYS_FATAL_IF(mBuffer == NULL, "Failed to precache"); mTask.clear(); } }
status_t BpBinder::linkToDeath( const sp<DeathRecipient>& recipient, void* cookie, uint32_t flags) { Obituary ob; ob.recipient = recipient; ob.cookie = cookie; ob.flags = flags; LOG_ALWAYS_FATAL_IF(recipient == NULL, "linkToDeath(): recipient must be non-NULL"); { AutoMutex _l(mLock); if (!mObitsSent) { if (!mObituaries) { mObituaries = new Vector<Obituary>; if (!mObituaries) { return NO_MEMORY; } ALOGV("Requesting death notification: %p handle %d\n", this, mHandle); getWeakRefs()->incWeak(this); IPCThreadState* self = IPCThreadState::self(); self->requestDeathNotification(mHandle, this); self->flushCommands(); } ssize_t res = mObituaries->add(ob); return res >= (ssize_t)NO_ERROR ? (status_t)NO_ERROR : res; } } return DEAD_OBJECT; }
void register_java_util_jar_StrictJarFile(JNIEnv* env) { jniRegisterNativeMethods(env, "java/util/jar/StrictJarFile", gMethods, NELEM(gMethods)); zipEntryCtor = env->GetMethodID(JniConstants::zipEntryClass, "<init>", "(Ljava/lang/String;Ljava/lang/String;JJJIII[BJJ)V"); LOG_ALWAYS_FATAL_IF(zipEntryCtor == NULL, "Unable to find ZipEntry.<init>"); }
void CanvasContext::processLayerUpdate(DeferredLayerUpdater* layerUpdater) { bool success = layerUpdater->apply(); LOG_ALWAYS_FATAL_IF(!success, "Failed to update layer!"); if (layerUpdater->backingLayer()->deferredUpdateScheduled) { mCanvas->pushLayerUpdate(layerUpdater->backingLayer()); } }
RefBase::~RefBase() { int32_t flags = mRefs->mFlags.load(std::memory_order_relaxed); // Life-time of this object is extended to WEAK, in // which case weakref_impl doesn't out-live the object and we // can free it now. if ((flags & OBJECT_LIFETIME_MASK) == OBJECT_LIFETIME_WEAK) { // It's possible that the weak count is not 0 if the object // re-acquired a weak reference in its destructor if (mRefs->mWeak.load(std::memory_order_relaxed) == 0) { delete mRefs; } } else if (mRefs->mStrong.load(std::memory_order_relaxed) == INITIAL_STRONG_VALUE) { // We never acquired a strong reference on this object. LOG_ALWAYS_FATAL_IF(mRefs->mWeak.load() != 0, "RefBase: Explicit destruction with non-zero weak " "reference count"); // TODO: Always report if we get here. Currently MediaMetadataRetriever // C++ objects are inconsistently managed and sometimes get here. // There may be other cases, but we believe they should all be fixed. delete mRefs; } // For debugging purposes, clear mRefs. Ineffective against outstanding wp's. const_cast<weakref_impl*&>(mRefs) = NULL; }
InputChannel::InputChannel(const String8& name, int32_t ashmemFd, int32_t receivePipeFd, int32_t sendPipeFd) : mName(name), mAshmemFd(ashmemFd), mReceivePipeFd(receivePipeFd), mSendPipeFd(sendPipeFd) { #if DEBUG_CHANNEL_LIFECYCLE ALOGD("Input channel constructed: name='%s', ashmemFd=%d, receivePipeFd=%d, sendPipeFd=%d", mName.string(), ashmemFd, receivePipeFd, sendPipeFd); #endif int result = fcntl(mReceivePipeFd, F_SETFL, O_NONBLOCK); LOG_ALWAYS_FATAL_IF(result != 0, "channel '%s' ~ Could not make receive pipe " "non-blocking. errno=%d", mName.string(), errno); result = fcntl(mSendPipeFd, F_SETFL, O_NONBLOCK); LOG_ALWAYS_FATAL_IF(result != 0, "channel '%s' ~ Could not make send pipe " "non-blocking. errno=%d", mName.string(), errno); }
OffscreenBuffer* BakedOpRenderer::startTemporaryLayer(uint32_t width, uint32_t height) { LOG_ALWAYS_FATAL_IF(mRenderTarget.offscreenBuffer, "already has layer..."); OffscreenBuffer* buffer = mRenderState.layerPool().get(mRenderState, width, height); startRepaintLayer(buffer, Rect(width, height)); return buffer; }
SyncFeatures::SyncFeatures() : Singleton<SyncFeatures>(), mHasNativeFenceSync(false), mHasFenceSync(false), mHasWaitSync(false) { EGLDisplay dpy = eglGetDisplay(EGL_DEFAULT_DISPLAY); // This can only be called after EGL has been initialized; otherwise the // check below will abort. const char* exts = eglQueryStringImplementationANDROID(dpy, EGL_EXTENSIONS); LOG_ALWAYS_FATAL_IF(exts == NULL, "eglQueryStringImplementationANDROID failed"); if (strstr(exts, "EGL_ANDROID_native_fence_sync")) { // This makes GLConsumer use the EGL_ANDROID_native_fence_sync // extension to create Android native fences to signal when all // GLES reads for a given buffer have completed. mHasNativeFenceSync = true; } if (strstr(exts, "EGL_KHR_fence_sync")) { mHasFenceSync = true; } if (strstr(exts, "EGL_KHR_wait_sync")) { mHasWaitSync = true; } mString.append("[using:"); if (useNativeFenceSync()) { mString.append(" EGL_ANDROID_native_fence_sync"); } if (useFenceSync()) { mString.append(" EGL_KHR_fence_sync"); } if (useWaitSync()) { mString.append(" EGL_KHR_wait_sync"); } mString.append("]"); }
int register_android_os_Trace(JNIEnv* env) { int res = jniRegisterNativeMethods(env, "android/os/Trace", gTraceMethods, NELEM(gTraceMethods)); LOG_ALWAYS_FATAL_IF(res < 0, "Unable to register native methods."); return 0; }
void Initialize() { // Read list of public native libraries from the config file. std::string file_content; LOG_ALWAYS_FATAL_IF(!base::ReadFileToString(kPublicNativeLibrariesConfig, &file_content), "Error reading public native library list from \"%s\": %s", kPublicNativeLibrariesConfig, strerror(errno)); std::vector<std::string> lines = base::Split(file_content, "\n"); std::vector<std::string> sonames; for (const auto& line : lines) { auto trimmed_line = base::Trim(line); if (trimmed_line[0] == '#' || trimmed_line.empty()) { continue; } sonames.push_back(trimmed_line); } public_libraries_ = base::Join(sonames, ':'); // android_init_namespaces() expects all the public libraries // to be loaded so that they can be found by soname alone. for (const auto& soname : sonames) { dlopen(soname.c_str(), RTLD_NOW | RTLD_NODELETE); } }
void BakedOpRenderer::setupStencilRectList(const ClipBase* clip) { LOG_ALWAYS_FATAL_IF(clip->mode != ClipMode::RectangleList, "can't rectlist clip without rectlist"); auto&& rectList = reinterpret_cast<const ClipRectList*>(clip)->rectList; int quadCount = rectList.getTransformedRectanglesCount(); std::vector<Vertex> rectangleVertices; rectangleVertices.reserve(quadCount * 4); for (int i = 0; i < quadCount; i++) { const TransformedRectangle& tr(rectList.getTransformedRectangle(i)); const Matrix4& transform = tr.getTransform(); Rect bounds = tr.getBounds(); if (transform.rectToRect()) { // If rectToRect, can simply map bounds before storing verts transform.mapRect(bounds); bounds.doIntersect(clip->rect); if (bounds.isEmpty()) { continue; // will be outside of scissor, skip } } rectangleVertices.push_back(Vertex{bounds.left, bounds.top}); rectangleVertices.push_back(Vertex{bounds.right, bounds.top}); rectangleVertices.push_back(Vertex{bounds.left, bounds.bottom}); rectangleVertices.push_back(Vertex{bounds.right, bounds.bottom}); if (!transform.rectToRect()) { // If not rectToRect, must map each point individually for (auto cur = rectangleVertices.end() - 4; cur < rectangleVertices.end(); cur++) { transform.mapPoint(cur->x, cur->y); } } } setupStencilQuads(rectangleVertices, rectList.getTransformedRectanglesCount()); }
int ARMAssembler::generate(const char* name) { // fixup all the branches size_t count = mBranchTargets.size(); while (count--) { const branch_target_t& bt = mBranchTargets[count]; uint32_t* target_pc = mLabels.valueFor(bt.label); LOG_ALWAYS_FATAL_IF(!target_pc, "error resolving branch targets, target_pc is null"); int32_t offset = int32_t(target_pc - (bt.pc+2)); *bt.pc |= offset & 0xFFFFFF; } mAssembly->resize( int(pc()-base())*4 ); // the instruction cache is flushed by CodeCache const int64_t duration = ggl_system_time() - mDuration; const char * const format = "generated %s (%d ins) at [%p:%p] in %lld ns\n"; ALOGI(format, name, int(pc()-base()), base(), pc(), duration); char value[PROPERTY_VALUE_MAX]; property_get("debug.pf.disasm", value, "0"); if (atoi(value) != 0) { printf(format, name, int(pc()-base()), base(), pc(), duration); disassemble(name); } return OK; }
void BakedOpRenderer::drawRects(const float* rects, int count, const SkPaint* paint) { std::vector<Vertex> vertices; vertices.reserve(count); Vertex* vertex = vertices.data(); for (int index = 0; index < count; index += 4) { float l = rects[index + 0]; float t = rects[index + 1]; float r = rects[index + 2]; float b = rects[index + 3]; Vertex::set(vertex++, l, t); Vertex::set(vertex++, r, t); Vertex::set(vertex++, l, b); Vertex::set(vertex++, r, b); } LOG_ALWAYS_FATAL_IF(mRenderTarget.frameBufferId != 0, "decoration only supported for FBO 0"); // TODO: Currently assume full FBO damage, due to FrameInfoVisualizer::unionDirty. // Should should scissor/set mHasDrawn safely. mRenderState.scissor().setEnabled(false); mHasDrawn = true; Glop glop; GlopBuilder(mRenderState, mCaches, &glop) .setRoundRectClipState(nullptr) .setMeshIndexedQuads(vertices.data(), count / 4) .setFillPaint(*paint, 1.0f) .setTransform(Matrix4::identity(), TransformFlags::None) .setModelViewIdentityEmptyBounds() .build(); mRenderState.render(glop, mRenderTarget.orthoMatrix); }
void RefBase::decStrong(const void* id) const { weakref_impl* const refs = mRefs; refs->removeStrongRef(id); const int32_t c = refs->mStrong.fetch_sub(1, std::memory_order_release); #if PRINT_REFS ALOGD("decStrong of %p from %p: cnt=%d\n", this, id, c); #endif LOG_ALWAYS_FATAL_IF(BAD_STRONG(c), "decStrong() called on %p too many times", refs); if (c == 1) { std::atomic_thread_fence(std::memory_order_acquire); refs->mBase->onLastStrongRef(id); int32_t flags = refs->mFlags.load(std::memory_order_relaxed); if ((flags&OBJECT_LIFETIME_MASK) == OBJECT_LIFETIME_STRONG) { delete this; // The destructor does not delete refs in this case. } } // Note that even with only strong reference operations, the thread // deallocating this may not be the same as the thread deallocating refs. // That's OK: all accesses to this happen before its deletion here, // and all accesses to refs happen before its deletion in the final decWeak. // The destructor can safely access mRefs because either it's deleting // mRefs itself, or it's running entirely before the final mWeak decrement. refs->decWeak(id); }
void EglDisplayImpl::OnColorBufferReleasedLocked() { --color_buffers_locked_; LOG_ALWAYS_FATAL_IF(color_buffers_locked_ < 0); if (!color_buffers_locked_) { cond_no_locked_buffers_.Signal(); } }