//------------------------------------------------------------------------- // BacktraceThread functions. //------------------------------------------------------------------------- static void SignalHandler(int n __attribute__((unused)), siginfo_t* siginfo, void* sigcontext) { if (pthread_mutex_lock(&g_entry_mutex) == 0) { pid_t pid = getpid(); pid_t tid = gettid(); ThreadEntry* cur_entry = g_list; while (cur_entry) { if (cur_entry->Match(pid, tid)) { break; } cur_entry = cur_entry->next; } pthread_mutex_unlock(&g_entry_mutex); if (!cur_entry) { BACK_LOGW("Unable to find pid %d tid %d information", pid, tid); return; } if (android_atomic_acquire_cas(STATE_WAITING, STATE_DUMPING, &cur_entry->state) == 0) { cur_entry->thread_intf->ThreadUnwind(siginfo, sigcontext, cur_entry->num_ignore_frames); } android_atomic_release_store(STATE_DONE, &cur_entry->state); } }
/* * Exercise several of the atomic ops. */ static void doAtomicTest(int num) { int addVal = (num & 0x01) + 1; int i; for (i = 0; i < ITERATION_COUNT; i++) { if (USE_ATOMIC) { android_atomic_inc(&incTest); android_atomic_dec(&decTest); android_atomic_add(addVal, &addTest); int val; do { val = casTest; } while (android_atomic_release_cas(val, val + 3, &casTest) != 0); do { val = casTest; } while (android_atomic_acquire_cas(val, val - 1, &casTest) != 0); int64_t wval; do { wval = dvmQuasiAtomicRead64(&wideCasTest); } while (dvmQuasiAtomicCas64(wval, wval + 0x0000002000000001LL, &wideCasTest) != 0); do { wval = dvmQuasiAtomicRead64(&wideCasTest); } while (dvmQuasiAtomicCas64(wval, wval - 0x0000002000000001LL, &wideCasTest) != 0); } else { incr(); decr(); add(addVal); int val; do { val = casTest; } while (compareAndSwap(val, val + 3, &casTest) != 0); do { val = casTest; } while (compareAndSwap(val, val - 1, &casTest) != 0); int64_t wval; do { wval = wideCasTest; } while (compareAndSwapWide(wval, wval + 0x0000002000000001LL, &wideCasTest) != 0); do { wval = wideCasTest; } while (compareAndSwapWide(wval, wval - 0x0000002000000001LL, &wideCasTest) != 0); } } }
static void unwind_backtrace_thread_signal_handler(int n __attribute__((unused)), siginfo_t* siginfo, void* sigcontext) { if (!android_atomic_acquire_cas(gettid(), STATE_DUMPING, &g_unwind_signal_state.tid_state)) { g_unwind_signal_state.returned_frames = unwind_backtrace_signal_arch( siginfo, sigcontext, g_unwind_signal_state.map_info_list, g_unwind_signal_state.backtrace, g_unwind_signal_state.ignore_depth, g_unwind_signal_state.max_depth); android_atomic_release_store(STATE_DONE, &g_unwind_signal_state.tid_state); } else { ALOGV("Received spurious SIGURG on thread %d that was intended for thread %d.", gettid(), android_atomic_acquire_load(&g_unwind_signal_state.tid_state)); } }
bool BacktraceThread::TriggerUnwindOnThread(ThreadEntry* entry) { entry->state = STATE_WAITING; if (tgkill(Pid(), Tid(), SIGURG) != 0) { BACK_LOGW("tgkill failed %s", strerror(errno)); return false; } // Allow up to ten seconds for the dump to start. int wait_millis = 10000; int32_t state; while (true) { state = android_atomic_acquire_load(&entry->state); if (state != STATE_WAITING) { break; } if (wait_millis--) { usleep(1000); } else { break; } } bool cancelled = false; if (state == STATE_WAITING) { if (android_atomic_acquire_cas(state, STATE_CANCEL, &entry->state) == 0) { BACK_LOGW("Cancelled dump of thread %d", entry->tid); state = STATE_CANCEL; cancelled = true; } else { state = android_atomic_acquire_load(&entry->state); } } // Wait for at most ten seconds for the cancel or dump to finish. wait_millis = 10000; while (android_atomic_acquire_load(&entry->state) != STATE_DONE) { if (wait_millis--) { usleep(1000); } else { BACK_LOGW("Didn't finish thread unwind in 60 seconds."); break; } } return !cancelled; }
ssize_t unwind_backtrace_thread(pid_t tid, backtrace_frame_t* backtrace, size_t ignore_depth, size_t max_depth) { if (tid == gettid()) { return unwind_backtrace(backtrace, ignore_depth + 1, max_depth); } ALOGV("Unwinding thread %d from thread %d.", tid, gettid()); // TODO: there's no tgkill(2) on Mac OS, so we'd either need the // mach_port_t or the pthread_t rather than the tid. #if defined(CORKSCREW_HAVE_ARCH) && !defined(__APPLE__) struct sigaction act; struct sigaction oact; memset(&act, 0, sizeof(act)); act.sa_sigaction = unwind_backtrace_thread_signal_handler; act.sa_flags = SA_RESTART | SA_SIGINFO | SA_ONSTACK; sigemptyset(&act.sa_mask); pthread_mutex_lock(&g_unwind_signal_mutex); map_info_t* milist = acquire_my_map_info_list(); ssize_t frames = -1; if (!sigaction(SIGURG, &act, &oact)) { g_unwind_signal_state.map_info_list = milist; g_unwind_signal_state.backtrace = backtrace; g_unwind_signal_state.ignore_depth = ignore_depth; g_unwind_signal_state.max_depth = max_depth; g_unwind_signal_state.returned_frames = 0; android_atomic_release_store(tid, &g_unwind_signal_state.tid_state); // Signal the specific thread that we want to dump. int32_t tid_state = tid; if (tgkill(getpid(), tid, SIGURG)) { ALOGV("Failed to send SIGURG to thread %d.", tid); } else { // Wait for the other thread to start dumping the stack, or time out. int wait_millis = 250; for (;;) { tid_state = android_atomic_acquire_load(&g_unwind_signal_state.tid_state); if (tid_state != tid) { break; } if (wait_millis--) { ALOGV("Waiting for thread %d to start dumping the stack...", tid); usleep(1000); } else { ALOGV("Timed out waiting for thread %d to start dumping the stack.", tid); break; } } } // Try to cancel the dump if it has not started yet. if (tid_state == tid) { if (!android_atomic_acquire_cas(tid, STATE_CANCEL, &g_unwind_signal_state.tid_state)) { ALOGV("Canceled thread %d stack dump.", tid); tid_state = STATE_CANCEL; } else { tid_state = android_atomic_acquire_load(&g_unwind_signal_state.tid_state); } } // Wait indefinitely for the dump to finish or be canceled. // We cannot apply a timeout here because the other thread is accessing state that // is owned by this thread, such as milist. It should not take very // long to take the dump once started. while (tid_state == STATE_DUMPING) { ALOGV("Waiting for thread %d to finish dumping the stack...", tid); usleep(1000); tid_state = android_atomic_acquire_load(&g_unwind_signal_state.tid_state); } if (tid_state == STATE_DONE) { frames = g_unwind_signal_state.returned_frames; } sigaction(SIGURG, &oact, NULL); } release_my_map_info_list(milist); pthread_mutex_unlock(&g_unwind_signal_mutex); return frames; #else return -1; #endif }
/* * Implements monitorenter for "synchronized" stuff. * * This does not fail or throw an exception (unless deadlock prediction * is enabled and set to "err" mode). */ void dvmLockObject(Thread* self, Object *obj) { volatile u4 *thinp; ThreadStatus oldStatus; struct timespec tm; long sleepDelayNs; long minSleepDelayNs = 1000000; /* 1 millisecond */ long maxSleepDelayNs = 1000000000; /* 1 second */ u4 thin, newThin, threadId; assert(self != NULL); assert(obj != NULL); threadId = self->threadId; thinp = &obj->lock; retry: thin = *thinp; if (LW_SHAPE(thin) == LW_SHAPE_THIN) { /* * The lock is a thin lock. The owner field is used to * determine the acquire method, ordered by cost. */ if (LW_LOCK_OWNER(thin) == threadId) { /* * The calling thread owns the lock. Increment the * value of the recursion count field. */ obj->lock += 1 << LW_LOCK_COUNT_SHIFT; if (LW_LOCK_COUNT(obj->lock) == LW_LOCK_COUNT_MASK) { /* * The reacquisition limit has been reached. Inflate * the lock so the next acquire will not overflow the * recursion count field. */ inflateMonitor(self, obj); } } else if (LW_LOCK_OWNER(thin) == 0) { /* * The lock is unowned. Install the thread id of the * calling thread into the owner field. This is the * common case. In performance critical code the JIT * will have tried this before calling out to the VM. */ newThin = thin | (threadId << LW_LOCK_OWNER_SHIFT); if (android_atomic_acquire_cas(thin, newThin, (int32_t*)thinp) != 0) { /* * The acquire failed. Try again. */ goto retry; } } else { ALOGV("(%d) spin on lock %p: %#x (%#x) %#x", threadId, &obj->lock, 0, *thinp, thin); /* * The lock is owned by another thread. Notify the VM * that we are about to wait. */ oldStatus = dvmChangeStatus(self, THREAD_MONITOR); /* * Spin until the thin lock is released or inflated. */ sleepDelayNs = 0; for (;;) { thin = *thinp; /* * Check the shape of the lock word. Another thread * may have inflated the lock while we were waiting. */ if (LW_SHAPE(thin) == LW_SHAPE_THIN) { if (LW_LOCK_OWNER(thin) == 0) { /* * The lock has been released. Install the * thread id of the calling thread into the * owner field. */ newThin = thin | (threadId << LW_LOCK_OWNER_SHIFT); if (android_atomic_acquire_cas(thin, newThin, (int32_t *)thinp) == 0) { /* * The acquire succeed. Break out of the * loop and proceed to inflate the lock. */ break; } } else { /* * The lock has not been released. Yield so * the owning thread can run. */ if (sleepDelayNs == 0) { sched_yield(); sleepDelayNs = minSleepDelayNs; } else { tm.tv_sec = 0; tm.tv_nsec = sleepDelayNs; nanosleep(&tm, NULL); /* * Prepare the next delay value. Wrap to * avoid once a second polls for eternity. */ if (sleepDelayNs < maxSleepDelayNs / 2) { sleepDelayNs *= 2; } else { sleepDelayNs = minSleepDelayNs; } } } } else { /* * The thin lock was inflated by another thread. * Let the VM know we are no longer waiting and * try again. */ ALOGV("(%d) lock %p surprise-fattened", threadId, &obj->lock); dvmChangeStatus(self, oldStatus); goto retry; } } ALOGV("(%d) spin on lock done %p: %#x (%#x) %#x", threadId, &obj->lock, 0, *thinp, thin); /* * We have acquired the thin lock. Let the VM know that * we are no longer waiting. */ dvmChangeStatus(self, oldStatus); /* * Fatten the lock. */ inflateMonitor(self, obj); ALOGV("(%d) lock %p fattened", threadId, &obj->lock); } } else { /* * The lock is a fat lock. */ assert(LW_MONITOR(obj->lock) != NULL); lockMonitor(self, LW_MONITOR(obj->lock)); } }