BpMemoryHeap::~BpMemoryHeap() { if (mHeapId != -1) { close(mHeapId); if (mRealHeap) { // by construction we're the last one if (mBase != MAP_FAILED) { sp<IBinder> binder = const_cast<BpMemoryHeap*>(this)->asBinder(); if (VERBOSE) { ALOGD("UNMAPPING binder=%p, heap=%p, size=%d, fd=%d", binder.get(), this, mSize, mHeapId); CallStack stack; stack.update(); stack.dump("callstack"); } munmap(mBase, mSize); } } else { // remove from list only if it was mapped before sp<IBinder> binder = const_cast<BpMemoryHeap*>(this)->asBinder(); free_heap(binder); } } }
void endOfThread(void *param) { ALOGD("*********** THREAD HAS STOPPPED ***********"); CallStack* cs = new CallStack(); cs->update(0, 100); cs->dump("ExecutionManager terminated: "); // exit(1); }
status_t GraphicBufferAllocator::free(buffer_handle_t handle) { // [MTK] {{{ if (true == mIsDumpCallStack) { XLOGD("[GraphicBufferAllocator::free] handle:%p", handle); CallStack cs; cs.update(); cs.dump(" "); } // [MTK] }}} BufferLiberatorThread::queueCaptiveBuffer(handle); return NO_ERROR; }
static int gl_no_context() { if (egl_tls_t::logNoContextCall()) { ALOGE("call to OpenGL ES API with no current context " "(logged once per thread)"); char value[PROPERTY_VALUE_MAX]; property_get("debug.egl.callstack", value, "0"); if (atoi(value)) { CallStack stack; stack.update(); stack.dump(); } } return 0; }
~weakref_impl() { bool dumpStack = false; if (!mRetain && mStrongRefs != NULL) { dumpStack = true; #if DEBUG_REFS_FATAL_SANITY_CHECKS LOG_ALWAYS_FATAL("Strong references remain!"); #else ALOGE("Strong references remain:"); #endif ref_entry* refs = mStrongRefs; while (refs) { char inc = refs->ref >= 0 ? '+' : '-'; ALOGD("\t%c ID %p (ref %d):", inc, refs->id, refs->ref); #if DEBUG_REFS_CALLSTACK_ENABLED refs->stack.dump(); #endif refs = refs->next; } } if (!mRetain && mWeakRefs != NULL) { dumpStack = true; #if DEBUG_REFS_FATAL_SANITY_CHECKS LOG_ALWAYS_FATAL("Weak references remain:"); #else ALOGE("Weak references remain!"); #endif ref_entry* refs = mWeakRefs; while (refs) { char inc = refs->ref >= 0 ? '+' : '-'; ALOGD("\t%c ID %p (ref %d):", inc, refs->id, refs->ref); #if DEBUG_REFS_CALLSTACK_ENABLED refs->stack.dump(); #endif refs = refs->next; } } if (dumpStack) { ALOGE("above errors at:"); CallStack stack; stack.update(); stack.dump(); } }
static int gl_no_context() { if (egl_tls_t::logNoContextCall()) { char const* const error = "call to OpenGL ES API with " "no current context (logged once per thread)"; if (LOG_NDEBUG) { ALOGE(error); } else { LOG_ALWAYS_FATAL(error); } char value[PROPERTY_VALUE_MAX]; property_get("debug.egl.callstack", value, "0"); if (atoi(value)) { CallStack stack; stack.update(); stack.dump(); } } return 0; }
status_t GraphicBufferMapper::unregisterBuffer(buffer_handle_t handle) { // [MTK] {{{ if (true == mIsDumpCallStack) { XLOGD("[GraphicBufferMapper::unregisterBuffer] handle:%p", handle); CallStack cs; cs.update(); cs.dump(" "); } // [MTK] }}} ATRACE_CALL(); status_t err; err = mAllocMod->unregisterBuffer(mAllocMod, handle); ALOGW_IF(err, "unregisterBuffer(%p) failed %d (%s)", handle, err, strerror(-err)); return err; }
void removeRef(ref_entry** refs, const void* id) { if (mTrackEnabled) { AutoMutex _l(mMutex); ref_entry* const head = *refs; ref_entry* ref = head; while (ref != NULL) { if (ref->id == id) { *refs = ref->next; delete ref; return; } refs = &ref->next; ref = *refs; } #if DEBUG_REFS_FATAL_SANITY_CHECKS LOG_ALWAYS_FATAL("RefBase: removing id %p on RefBase %p" "(weakref_type %p) that doesn't exist!", id, mBase, this); #endif ALOGE("RefBase: removing id %p on RefBase %p" "(weakref_type %p) that doesn't exist!", id, mBase, this); ref = head; while (ref) { char inc = ref->ref >= 0 ? '+' : '-'; ALOGD("\t%c ID %p (ref %d):", inc, ref->id, ref->ref); ref = ref->next; } CallStack stack; stack.update(); stack.dump(); } }
status_t GraphicBufferAllocator::alloc(uint32_t w, uint32_t h, PixelFormat format, int usage, buffer_handle_t* handle, int32_t* stride) { ATRACE_CALL(); // make sure to not allocate a N x 0 or 0 x N buffer, since this is // allowed from an API stand-point allocate a 1x1 buffer instead. if (!w || !h) w = h = 1; // we have a h/w allocator and h/w buffer is requested status_t err; // If too many async frees are queued up then wait for some of them to // complete before attempting to allocate more memory. This is exercised // by the android.opengl.cts.GLSurfaceViewTest CTS test. BufferLiberatorThread::maybeWaitForLiberation(); err = mAllocDev->alloc(mAllocDev, w, h, format, usage, handle, stride); if (err != NO_ERROR) { ALOGW("WOW! gralloc alloc failed, waiting for pending frees!"); BufferLiberatorThread::waitForLiberation(); err = mAllocDev->alloc(mAllocDev, w, h, format, usage, handle, stride); } ALOGW_IF(err, "alloc(%u, %u, %d, %08x, ...) failed %d (%s)", w, h, format, usage, err, strerror(-err)); if (err == NO_ERROR) { Mutex::Autolock _l(sLock); KeyedVector<buffer_handle_t, alloc_rec_t>& list(sAllocList); int bpp = bytesPerPixel(format); if (bpp < 0) { // probably a HAL custom format. in any case, we don't know // what its pixel size is. bpp = 0; } alloc_rec_t rec; rec.w = w; rec.h = h; rec.s = *stride; rec.format = format; rec.usage = usage; rec.size = h * stride[0] * bpp; // [MTK] {{{ rec.pid = IPCThreadState::self()->getCallingPid(); // [MTK] }}} list.add(*handle, rec); } // [MTK] {{{ // dump call stack here after handle value got if (true == mIsDumpCallStack) { XLOGD("[GraphicBufferAllocator::alloc] handle:%p", *handle); CallStack cs; cs.update(); cs.dump(" "); } // [MTK] }}} return err; }