void ServerProxy::releaseBuffer(Buffer* buffer) { LOG_ALWAYS_FATAL_IF(buffer == NULL); size_t stepCount = buffer->mFrameCount; if (stepCount == 0 || mIsShutdown) { // prevent accidental re-use of buffer buffer->mFrameCount = 0; buffer->mRaw = NULL; buffer->mNonContig = 0; return; } LOG_ALWAYS_FATAL_IF(!(stepCount <= mUnreleased && mUnreleased <= mFrameCount)); mUnreleased -= stepCount; audio_track_cblk_t* cblk = mCblk; if (mIsOut) { int32_t front = cblk->u.mStreaming.mFront; android_atomic_release_store(stepCount + front, &cblk->u.mStreaming.mFront); } else { int32_t rear = cblk->u.mStreaming.mRear; android_atomic_release_store(stepCount + rear, &cblk->u.mStreaming.mRear); } cblk->mServer += stepCount; size_t half = mFrameCount / 2; if (half == 0) { half = 1; } size_t minimum = (size_t) cblk->mMinimum; if (minimum == 0) { minimum = mIsOut ? half : 1; } else if (minimum > half) { minimum = half; } // FIXME AudioRecord wakeup needs to be optimized; it currently wakes up client every time if (!mIsOut || (mAvailToClient + stepCount >= minimum)) { ALOGV("mAvailToClient=%zu stepCount=%zu minimum=%zu", mAvailToClient, stepCount, minimum); int32_t old = android_atomic_or(CBLK_FUTEX_WAKE, &cblk->mFutex); if (!(old & CBLK_FUTEX_WAKE)) { (void) syscall(__NR_futex, &cblk->mFutex, mClientInServer ? FUTEX_WAKE_PRIVATE : FUTEX_WAKE, 1); } } buffer->mFrameCount = 0; buffer->mRaw = NULL; buffer->mNonContig = 0; }
/* * Implements monitorexit for "synchronized" stuff. * * On failure, throws an exception and returns "false". */ bool dvmUnlockObject(Thread* self, Object *obj) { u4 thin; assert(self != NULL); assert(self->status == THREAD_RUNNING); assert(obj != NULL); /* * Cache the lock word as its value can change while we are * examining its state. */ thin = *(volatile u4 *)&obj->lock; if (LW_SHAPE(thin) == LW_SHAPE_THIN) { /* * The lock is thin. We must ensure that the lock is owned * by the given thread before unlocking it. */ if (LW_LOCK_OWNER(thin) == self->threadId) { /* * We are the lock owner. It is safe to update the lock * without CAS as lock ownership guards the lock itself. */ if (LW_LOCK_COUNT(thin) == 0) { /* * The lock was not recursively acquired, the common * case. Unlock by clearing all bits except for the * hash state. */ thin &= (LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT); android_atomic_release_store(thin, (int32_t*)&obj->lock); } else { /* * The object was recursively acquired. Decrement the * lock recursion count field. */ obj->lock -= 1 << LW_LOCK_COUNT_SHIFT; } } else { /* * We do not own the lock. The JVM spec requires that we * throw an exception in this case. */ dvmThrowIllegalMonitorStateException("unlock of unowned monitor"); return false; } } else { /* * The lock is fat. We must check to see if unlockMonitor has * raised any exceptions before continuing. */ assert(LW_MONITOR(obj->lock) != NULL); if (!unlockMonitor(self, LW_MONITOR(obj->lock))) { /* * An exception has been raised. Do not fall through. */ return false; } } return true; }
ssize_t audio_utils_fifo_write(struct audio_utils_fifo *fifo, const void *buffer, size_t count) { int32_t front = android_atomic_acquire_load(&fifo->mFront); int32_t rear = fifo->mRear; size_t availToWrite = fifo->mFrameCount - audio_utils_fifo_diff(fifo, rear, front); if (availToWrite > count) { availToWrite = count; } rear &= fifo->mFrameCountP2 - 1; size_t part1 = fifo->mFrameCount - rear; if (part1 > availToWrite) { part1 = availToWrite; } if (part1 > 0) { memcpy((char *) fifo->mBuffer + (rear * fifo->mFrameSize), buffer, part1 * fifo->mFrameSize); size_t part2 = availToWrite - part1; if (part2 > 0) { memcpy(fifo->mBuffer, (char *) buffer + (part1 * fifo->mFrameSize), part2 * fifo->mFrameSize); } android_atomic_release_store(audio_utils_fifo_sum(fifo, fifo->mRear, availToWrite), &fifo->mRear); } return availToWrite; }
ssize_t audio_utils_fifo_read(struct audio_utils_fifo *fifo, void *buffer, size_t count) { int32_t rear = android_atomic_acquire_load(&fifo->mRear); int32_t front = fifo->mFront; size_t availToRead = audio_utils_fifo_diff(fifo, rear, front); if (availToRead > count) { availToRead = count; } front &= fifo->mFrameCountP2 - 1; size_t part1 = fifo->mFrameCount - front; if (part1 > availToRead) { part1 = availToRead; } if (part1 > 0) { memcpy(buffer, (char *) fifo->mBuffer + (front * fifo->mFrameSize), part1 * fifo->mFrameSize); size_t part2 = availToRead - part1; if (part2 > 0) { memcpy((char *) buffer + (part1 * fifo->mFrameSize), fifo->mBuffer, part2 * fifo->mFrameSize); } android_atomic_release_store(audio_utils_fifo_sum(fifo, fifo->mFront, availToRead), &fifo->mFront); } return availToRead; }
//------------------------------------------------------------------------- // BacktraceThread functions. //------------------------------------------------------------------------- static void SignalHandler(int n __attribute__((unused)), siginfo_t* siginfo, void* sigcontext) { if (pthread_mutex_lock(&g_entry_mutex) == 0) { pid_t pid = getpid(); pid_t tid = gettid(); ThreadEntry* cur_entry = g_list; while (cur_entry) { if (cur_entry->Match(pid, tid)) { break; } cur_entry = cur_entry->next; } pthread_mutex_unlock(&g_entry_mutex); if (!cur_entry) { BACK_LOGW("Unable to find pid %d tid %d information", pid, tid); return; } if (android_atomic_acquire_cas(STATE_WAITING, STATE_DUMPING, &cur_entry->state) == 0) { cur_entry->thread_intf->ThreadUnwind(siginfo, sigcontext, cur_entry->num_ignore_frames); } android_atomic_release_store(STATE_DONE, &cur_entry->state); } }
void NBLog::Writer::log(const NBLog::Entry *entry, bool trusted) { if (!mEnabled) { return; } if (!trusted) { log(entry->mEvent, entry->mData, entry->mLength); return; } size_t rear = mRear & (mSize - 1); size_t written = mSize - rear; // written = number of bytes that have been written so far size_t need = entry->mLength + 3; // mEvent, mLength, data[length], mLength // need = number of bytes remaining to write if (written > need) { written = need; } size_t i; // FIXME optimize this using memcpy for the data part of the Entry. // The Entry could have a method copyTo(ptr, offset, size) to optimize the copy. for (i = 0; i < written; ++i) { mShared->mBuffer[rear + i] = entry->readAt(i); } if (rear + written == mSize && (need -= written) > 0) { for (i = 0; i < need; ++i) { mShared->mBuffer[i] = entry->readAt(written + i); } written += need; } android_atomic_release_store(mRear += written, &mShared->mRear); }
void FastCapture::onWork() { const FastCaptureState * const current = (const FastCaptureState *) mCurrent; FastCaptureDumpState * const dumpState = (FastCaptureDumpState *) mDumpState; const FastCaptureState::Command command = mCommand; const size_t frameCount = current->mFrameCount; if ((command & FastCaptureState::READ) /*&& isWarm*/) { ALOG_ASSERT(mInputSource != NULL); ALOG_ASSERT(mReadBuffer != NULL); dumpState->mReadSequence++; ATRACE_BEGIN("read"); ssize_t framesRead = mInputSource->read(mReadBuffer, frameCount, AudioBufferProvider::kInvalidPTS); ATRACE_END(); dumpState->mReadSequence++; if (framesRead >= 0) { LOG_ALWAYS_FATAL_IF((size_t) framesRead > frameCount); mTotalNativeFramesRead += framesRead; dumpState->mFramesRead = mTotalNativeFramesRead; mReadBufferState = framesRead; } else { dumpState->mReadErrors++; mReadBufferState = 0; } // FIXME rename to attemptedIO mAttemptedWrite = true; } if (command & FastCaptureState::WRITE) { ALOG_ASSERT(mPipeSink != NULL); ALOG_ASSERT(mReadBuffer != NULL); if (mReadBufferState < 0) { unsigned channelCount = Format_channelCount(mFormat); memset(mReadBuffer, 0, frameCount * Format_frameSize(mFormat)); mReadBufferState = frameCount; } if (mReadBufferState > 0) { ssize_t framesWritten = mPipeSink->write(mReadBuffer, mReadBufferState); // FIXME This supports at most one fast capture client. // To handle multiple clients this could be converted to an array, // or with a lot more work the control block could be shared by all clients. audio_track_cblk_t* cblk = current->mCblk; if (cblk != NULL && framesWritten > 0) { int32_t rear = cblk->u.mStreaming.mRear; android_atomic_release_store(framesWritten + rear, &cblk->u.mStreaming.mRear); cblk->mServer += framesWritten; int32_t old = android_atomic_or(CBLK_FUTEX_WAKE, &cblk->mFutex); if (!(old & CBLK_FUTEX_WAKE)) { // client is never in server process, so don't use FUTEX_WAKE_PRIVATE (void) syscall(__NR_futex, &cblk->mFutex, FUTEX_WAKE, 1); } } } } }
/* * public native void putIntVolatile(Object obj, long offset, int newValue); */ static void Dalvik_sun_misc_Unsafe_putIntVolatile(const u4 *args, JValue *pResult) { // We ignore the this pointer in args[0]. Object *obj = (Object *) args[1]; s8 offset = GET_ARG_LONG(args, 2); s4 value = (s4) args[4]; volatile int32_t *address = (volatile int32_t *) (((u1 *) obj) + offset); android_atomic_release_store(value, address); RETURN_VOID(); }
/* * Determine if "method" is a "privileged" invocation, i.e. is it one * of the variations of AccessController.doPrivileged(). * * Because the security stuff pulls in a pile of stuff that we may not * want or need, we don't do the class/method lookups at init time, but * instead on first use. */ bool dvmIsPrivilegedMethod(const Method* method) { int i; assert(method != NULL); if (!gDvm.javaSecurityAccessControllerReady) { /* * Populate on first use. No concurrency risk since we're just * finding pointers to fixed structures. */ static const char* kSignatures[NUM_DOPRIV_FUNCS] = { "(Ljava/security/PrivilegedAction;)Ljava/lang/Object;", "(Ljava/security/PrivilegedExceptionAction;)Ljava/lang/Object;", "(Ljava/security/PrivilegedAction;Ljava/security/AccessControlContext;)Ljava/lang/Object;", "(Ljava/security/PrivilegedExceptionAction;Ljava/security/AccessControlContext;)Ljava/lang/Object;", }; ClassObject* clazz; clazz = dvmFindClassNoInit("Ljava/security/AccessController;", NULL); if (clazz == NULL) { LOGW("Couldn't find java/security/AccessController\n"); return false; } assert(NELEM(gDvm.methJavaSecurityAccessController_doPrivileged) == NELEM(kSignatures)); /* verify init */ for (i = 0; i < NUM_DOPRIV_FUNCS; i++) { gDvm.methJavaSecurityAccessController_doPrivileged[i] = dvmFindDirectMethodByDescriptor(clazz, "doPrivileged", kSignatures[i]); if (gDvm.methJavaSecurityAccessController_doPrivileged[i] == NULL) { LOGW("Warning: couldn't find java/security/AccessController" ".doPrivileged %s\n", kSignatures[i]); return false; } } /* all good, raise volatile readiness flag */ android_atomic_release_store(true, &gDvm.javaSecurityAccessControllerReady); } for (i = 0; i < NUM_DOPRIV_FUNCS; i++) { if (gDvm.methJavaSecurityAccessController_doPrivileged[i] == method) { //LOGI("+++ doPriv match\n"); return true; } } return false; }
void AudioTrackClientProxy::flush() { // This works for mFrameCountP2 <= 2^30 size_t increment = mFrameCountP2 << 1; size_t mask = increment - 1; audio_track_cblk_t* cblk = mCblk; // mFlush is 32 bits concatenated as [ flush_counter ] [ newfront_offset ] // Should newFlush = cblk->u.mStreaming.mRear? Only problem is // if you want to flush twice to the same rear location after a 32 bit wrap. int32_t newFlush = (cblk->u.mStreaming.mRear & mask) | ((cblk->u.mStreaming.mFlush & ~mask) + increment); android_atomic_release_store(newFlush, &cblk->u.mStreaming.mFlush); }
static void unwind_backtrace_thread_signal_handler(int n __attribute__((unused)), siginfo_t* siginfo, void* sigcontext) { if (!android_atomic_acquire_cas(gettid(), STATE_DUMPING, &g_unwind_signal_state.tid_state)) { g_unwind_signal_state.returned_frames = unwind_backtrace_signal_arch( siginfo, sigcontext, g_unwind_signal_state.map_info_list, g_unwind_signal_state.backtrace, g_unwind_signal_state.ignore_depth, g_unwind_signal_state.max_depth); android_atomic_release_store(STATE_DONE, &g_unwind_signal_state.tid_state); } else { ALOGV("Received spurious SIGURG on thread %d that was intended for thread %d.", gettid(), android_atomic_acquire_load(&g_unwind_signal_state.tid_state)); } }
void ClientProxy::releaseBuffer(Buffer* buffer) { LOG_ALWAYS_FATAL_IF(buffer == NULL); size_t stepCount = buffer->mFrameCount; if (stepCount == 0 || mIsShutdown) { // prevent accidental re-use of buffer buffer->mFrameCount = 0; buffer->mRaw = NULL; buffer->mNonContig = 0; return; } LOG_ALWAYS_FATAL_IF(!(stepCount <= mUnreleased && mUnreleased <= mFrameCount)); mUnreleased -= stepCount; audio_track_cblk_t* cblk = mCblk; // Both of these barriers are required if (mIsOut) { int32_t rear = cblk->u.mStreaming.mRear; android_atomic_release_store(stepCount + rear, &cblk->u.mStreaming.mRear); } else { int32_t front = cblk->u.mStreaming.mFront; android_atomic_release_store(stepCount + front, &cblk->u.mStreaming.mFront); } }
static void atrace_init_once() { atrace_marker_fd = open("/sys/kernel/debug/tracing/trace_marker", O_WRONLY); if (atrace_marker_fd == -1) { ALOGE("Error opening trace file: %s (%d)", strerror(errno), errno); atrace_enabled_tags = 0; goto done; } atrace_enabled_tags = atrace_get_property(); done: android_atomic_release_store(1, &atrace_is_ready); }
/* * Changes the shape of a monitor from thin to fat, preserving the * internal lock state. The calling thread must own the lock. */ static void inflateMonitor(Thread *self, Object *obj) { Monitor *mon; u4 thin; assert(self != NULL); assert(obj != NULL); assert(LW_SHAPE(obj->lock) == LW_SHAPE_THIN); assert(LW_LOCK_OWNER(obj->lock) == self->threadId); /* Allocate and acquire a new monitor. */ mon = dvmCreateMonitor(obj); lockMonitor(self, mon); /* Propagate the lock state. */ thin = obj->lock; mon->lockCount = LW_LOCK_COUNT(thin); thin &= LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT; thin |= (u4)mon | LW_SHAPE_FAT; /* Publish the updated lock word. */ android_atomic_release_store(thin, (int32_t *)&obj->lock); }
void MonoPipe::updateFrontAndNRPTS(int32_t newFront, int64_t newNextRdPTS) { // Set the MSB of the update sequence number to indicate that there is a // multi-variable update in progress. Use an atomic store with an "acquire" // barrier to make sure that the next operations cannot be re-ordered and // take place before the change to mUpdateSeq is commited.. int32_t tmp = mUpdateSeq | 0x80000000; android_atomic_acquire_store(tmp, &mUpdateSeq); // Update mFront and mNextRdPTS mFront = newFront; mNextRdPTS = newNextRdPTS; // We are finished with the update. Compute the next sequnce number (which // should be the old sequence number, plus one, and with the MSB cleared) // and then store it in mUpdateSeq using an atomic store with a "release" // barrier so our update operations cannot be re-ordered past the update of // the sequence number. tmp = (tmp + 1) & 0x7FFFFFFF; android_atomic_release_store(tmp, &mUpdateSeq); }
status_t ServerProxy::obtainBuffer(Buffer* buffer, bool ackFlush) { LOG_ALWAYS_FATAL_IF(buffer == NULL || buffer->mFrameCount == 0); if (mIsShutdown) { goto no_init; } { audio_track_cblk_t* cblk = mCblk; // compute number of frames available to write (AudioTrack) or read (AudioRecord), // or use previous cached value from framesReady(), with added barrier if it omits. int32_t front; int32_t rear; // See notes on barriers at ClientProxy::obtainBuffer() if (mIsOut) { int32_t flush = cblk->u.mStreaming.mFlush; rear = android_atomic_acquire_load(&cblk->u.mStreaming.mRear); front = cblk->u.mStreaming.mFront; if (flush != mFlush) { mFlush = flush; // effectively obtain then release whatever is in the buffer android_atomic_release_store(rear, &cblk->u.mStreaming.mFront); if (front != rear) { int32_t old = android_atomic_or(CBLK_FUTEX_WAKE, &cblk->mFutex); if (!(old & CBLK_FUTEX_WAKE)) { (void) __futex_syscall3(&cblk->mFutex, mClientInServer ? FUTEX_WAKE_PRIVATE : FUTEX_WAKE, 1); } } front = rear; } } else { front = android_atomic_acquire_load(&cblk->u.mStreaming.mFront); rear = cblk->u.mStreaming.mRear; } ssize_t filled = rear - front; // pipe should not already be overfull if (!(0 <= filled && (size_t) filled <= mFrameCount)) { ALOGE("Shared memory control block is corrupt (filled=%d); shutting down", filled); mIsShutdown = true; } if (mIsShutdown) { goto no_init; } // don't allow filling pipe beyond the nominal size size_t availToServer; if (mIsOut) { availToServer = filled; mAvailToClient = mFrameCount - filled; } else { availToServer = mFrameCount - filled; mAvailToClient = filled; } // 'availToServer' may be non-contiguous, so return only the first contiguous chunk size_t part1; // Use modulo operator instead of and operator. // x &= (y-1) returns the remainder if y is even // Use modulo operator to generalize it for all values. // This is needed for tunnel voip and tunnel encode usecases. if (mIsOut) { front %= mFrameCountP2; part1 = mFrameCountP2 - front; } else { rear %= mFrameCountP2; part1 = mFrameCountP2 - rear; } if (part1 > availToServer) { part1 = availToServer; } size_t ask = buffer->mFrameCount; if (part1 > ask) { part1 = ask; } // is assignment redundant in some cases? buffer->mFrameCount = part1; buffer->mRaw = part1 > 0 ? &((char *) mBuffers)[(mIsOut ? front : rear) * mFrameSize] : NULL; buffer->mNonContig = availToServer - part1; // After flush(), allow releaseBuffer() on a previously obtained buffer; // see "Acknowledge any pending flush()" in audioflinger/Tracks.cpp. if (!ackFlush) { mUnreleased = part1; } return part1 > 0 ? NO_ERROR : WOULD_BLOCK; } no_init: buffer->mFrameCount = 0; buffer->mRaw = NULL; buffer->mNonContig = 0; mUnreleased = 0; return NO_INIT; }
status_t ServerProxy::obtainBuffer(Buffer* buffer, bool ackFlush) { LOG_ALWAYS_FATAL_IF(buffer == NULL || buffer->mFrameCount == 0); if (mIsShutdown) { goto no_init; } { audio_track_cblk_t* cblk = mCblk; // compute number of frames available to write (AudioTrack) or read (AudioRecord), // or use previous cached value from framesReady(), with added barrier if it omits. int32_t front; int32_t rear; // See notes on barriers at ClientProxy::obtainBuffer() if (mIsOut) { int32_t flush = cblk->u.mStreaming.mFlush; rear = android_atomic_acquire_load(&cblk->u.mStreaming.mRear); front = cblk->u.mStreaming.mFront; if (flush != mFlush) { // effectively obtain then release whatever is in the buffer const size_t overflowBit = mFrameCountP2 << 1; const size_t mask = overflowBit - 1; int32_t newFront = (front & ~mask) | (flush & mask); ssize_t filled = rear - newFront; if (filled >= (ssize_t)overflowBit) { // front and rear offsets span the overflow bit of the p2 mask // so rebasing newFront on the front offset is off by the overflow bit. // adjust newFront to match rear offset. ALOGV("flush wrap: filled %zx >= overflowBit %zx", filled, overflowBit); newFront += overflowBit; filled -= overflowBit; } // Rather than shutting down on a corrupt flush, just treat it as a full flush if (!(0 <= filled && (size_t) filled <= mFrameCount)) { ALOGE("mFlush %#x -> %#x, front %#x, rear %#x, mask %#x, newFront %#x, " "filled %zd=%#x", mFlush, flush, front, rear, (unsigned)mask, newFront, filled, (unsigned)filled); newFront = rear; } mFlush = flush; android_atomic_release_store(newFront, &cblk->u.mStreaming.mFront); // There is no danger from a false positive, so err on the side of caution if (true /*front != newFront*/) { int32_t old = android_atomic_or(CBLK_FUTEX_WAKE, &cblk->mFutex); if (!(old & CBLK_FUTEX_WAKE)) { (void) syscall(__NR_futex, &cblk->mFutex, mClientInServer ? FUTEX_WAKE_PRIVATE : FUTEX_WAKE, 1); } } front = newFront; } } else { front = android_atomic_acquire_load(&cblk->u.mStreaming.mFront); rear = cblk->u.mStreaming.mRear; } ssize_t filled = rear - front; // pipe should not already be overfull if (!(0 <= filled && (size_t) filled <= mFrameCount)) { ALOGE("Shared memory control block is corrupt (filled=%zd); shutting down", filled); mIsShutdown = true; } if (mIsShutdown) { goto no_init; } // don't allow filling pipe beyond the nominal size size_t availToServer; if (mIsOut) { availToServer = filled; mAvailToClient = mFrameCount - filled; } else { availToServer = mFrameCount - filled; mAvailToClient = filled; } // 'availToServer' may be non-contiguous, so return only the first contiguous chunk size_t part1; if (mIsOut) { front &= mFrameCountP2 - 1; part1 = mFrameCountP2 - front; } else { rear &= mFrameCountP2 - 1; part1 = mFrameCountP2 - rear; } if (part1 > availToServer) { part1 = availToServer; } size_t ask = buffer->mFrameCount; if (part1 > ask) { part1 = ask; } // is assignment redundant in some cases? buffer->mFrameCount = part1; buffer->mRaw = part1 > 0 ? &((char *) mBuffers)[(mIsOut ? front : rear) * mFrameSize] : NULL; buffer->mNonContig = availToServer - part1; // After flush(), allow releaseBuffer() on a previously obtained buffer; // see "Acknowledge any pending flush()" in audioflinger/Tracks.cpp. if (!ackFlush) { mUnreleased = part1; } return part1 > 0 ? NO_ERROR : WOULD_BLOCK; } no_init: buffer->mFrameCount = 0; buffer->mRaw = NULL; buffer->mNonContig = 0; mUnreleased = 0; return NO_INIT; }
/* * Entry point for JDWP thread. The thread was created through the VM * mechanisms, so there is a java/lang/Thread associated with us. */ static void* jdwpThreadStart(void* arg) { JdwpState* state = (JdwpState*) arg; LOGV("JDWP: thread running\n"); /* * Finish initializing "state", then notify the creating thread that * we're running. */ state->debugThreadHandle = dvmThreadSelf()->handle; state->run = true; android_atomic_release_store(true, &state->debugThreadStarted); dvmDbgLockMutex(&state->threadStartLock); dvmDbgCondBroadcast(&state->threadStartCond); dvmDbgUnlockMutex(&state->threadStartLock); /* set the thread state to VMWAIT so GCs don't wait for us */ dvmDbgThreadWaiting(); /* * Loop forever if we're in server mode, processing connections. In * non-server mode, we bail out of the thread when the debugger drops * us. * * We broadcast a notification when a debugger attaches, after we * successfully process the handshake. */ while (state->run) { bool first; if (state->params.server) { /* * Block forever, waiting for a connection. To support the * "timeout=xxx" option we'll need to tweak this. */ if (!dvmJdwpAcceptConnection(state)) break; } else { /* * If we're not acting as a server, we need to connect out to the * debugger. To support the "timeout=xxx" option we need to * have a timeout if the handshake reply isn't received in a * reasonable amount of time. */ if (!dvmJdwpEstablishConnection(state)) { /* wake anybody who was waiting for us to succeed */ dvmDbgLockMutex(&state->attachLock); dvmDbgCondBroadcast(&state->attachCond); dvmDbgUnlockMutex(&state->attachLock); break; } } /* prep debug code to handle the new connection */ dvmDbgConnected(); /* process requests until the debugger drops */ first = true; while (true) { // sanity check -- shouldn't happen? if (dvmThreadSelf()->status != THREAD_VMWAIT) { LOGE("JDWP thread no longer in VMWAIT (now %d); resetting\n", dvmThreadSelf()->status); dvmDbgThreadWaiting(); } if (!dvmJdwpProcessIncoming(state)) /* blocking read */ break; if (first && !dvmJdwpAwaitingHandshake(state)) { /* handshake worked, tell the interpreter that we're active */ first = false; /* set thread ID; requires object registry to be active */ state->debugThreadId = dvmDbgGetThreadSelfId(); /* wake anybody who's waiting for us */ dvmDbgLockMutex(&state->attachLock); dvmDbgCondBroadcast(&state->attachCond); dvmDbgUnlockMutex(&state->attachLock); } } dvmJdwpCloseConnection(state); if (state->ddmActive) { state->ddmActive = false; /* broadcast the disconnect; must be in RUNNING state */ dvmDbgThreadRunning(); dvmDbgDdmDisconnected(); dvmDbgThreadWaiting(); } /* release session state, e.g. remove breakpoint instructions */ dvmJdwpResetState(state); /* tell the interpreter that the debugger is no longer around */ dvmDbgDisconnected(); /* if we had threads suspended, resume them now */ dvmUndoDebuggerSuspensions(); /* if we connected out, this was a one-shot deal */ if (!state->params.server) state->run = false; } /* back to running, for thread shutdown */ dvmDbgThreadRunning(); LOGV("JDWP: thread exiting\n"); return NULL; }
// Set whether tracing is enabled in this process. This is used to prevent // the Zygote process from tracing. void atrace_set_tracing_enabled(bool enabled) { android_atomic_release_store(enabled ? 1 : 0, &atrace_is_enabled); atrace_update_tags(); }
ssize_t MonoPipe::write(const void *buffer, size_t count) { if (CC_UNLIKELY(!mNegotiated)) { return NEGOTIATE; } size_t totalFramesWritten = 0; while (count > 0) { // can't return a negative value, as we already checked for !mNegotiated size_t avail = availableToWrite(); size_t written = avail; if (CC_LIKELY(written > count)) { written = count; } size_t rear = mRear & (mMaxFrames - 1); size_t part1 = mMaxFrames - rear; if (part1 > written) { part1 = written; } if (CC_LIKELY(part1 > 0)) { memcpy((char *) mBuffer + (rear << mBitShift), buffer, part1 << mBitShift); if (CC_UNLIKELY(rear + part1 == mMaxFrames)) { size_t part2 = written - part1; if (CC_LIKELY(part2 > 0)) { memcpy(mBuffer, (char *) buffer + (part1 << mBitShift), part2 << mBitShift); } } android_atomic_release_store(written + mRear, &mRear); totalFramesWritten += written; } if (!mWriteCanBlock || mIsShutdown) { break; } count -= written; buffer = (char *) buffer + (written << mBitShift); // Simulate blocking I/O by sleeping at different rates, depending on a throttle. // The throttle tries to keep the mean pipe depth near the setpoint, with a slight jitter. uint32_t ns; if (written > 0) { size_t filled = (mMaxFrames - avail) + written; // FIXME cache these values to avoid re-computation if (filled <= mSetpoint / 2) { // pipe is (nearly) empty, fill quickly ns = written * ( 500000000 / Format_sampleRate(mFormat)); } else if (filled <= (mSetpoint * 3) / 4) { // pipe is below setpoint, fill at slightly faster rate ns = written * ( 750000000 / Format_sampleRate(mFormat)); } else if (filled <= (mSetpoint * 5) / 4) { // pipe is at setpoint, fill at nominal rate ns = written * (1000000000 / Format_sampleRate(mFormat)); } else if (filled <= (mSetpoint * 3) / 2) { // pipe is above setpoint, fill at slightly slower rate ns = written * (1150000000 / Format_sampleRate(mFormat)); } else if (filled <= (mSetpoint * 7) / 4) { // pipe is overflowing, fill slowly ns = written * (1350000000 / Format_sampleRate(mFormat)); } else { // pipe is severely overflowing ns = written * (1750000000 / Format_sampleRate(mFormat)); } } else { ns = count * (1350000000 / Format_sampleRate(mFormat)); } if (ns > 999999999) { ns = 999999999; } struct timespec nowTs; bool nowTsValid = !clock_gettime(CLOCK_MONOTONIC, &nowTs); // deduct the elapsed time since previous write() completed if (nowTsValid && mWriteTsValid) { time_t sec = nowTs.tv_sec - mWriteTs.tv_sec; long nsec = nowTs.tv_nsec - mWriteTs.tv_nsec; ALOGE_IF(sec < 0 || (sec == 0 && nsec < 0), "clock_gettime(CLOCK_MONOTONIC) failed: was %ld.%09ld but now %ld.%09ld", mWriteTs.tv_sec, mWriteTs.tv_nsec, nowTs.tv_sec, nowTs.tv_nsec); if (nsec < 0) { --sec; nsec += 1000000000; } if (sec == 0) { if ((long) ns > nsec) { ns -= nsec; } else { ns = 0; } } } if (ns > 0) { const struct timespec req = {0, ns}; nanosleep(&req, NULL); } // record the time that this write() completed if (nowTsValid) { mWriteTs = nowTs; if ((mWriteTs.tv_nsec += ns) >= 1000000000) { mWriteTs.tv_nsec -= 1000000000; ++mWriteTs.tv_sec; } } mWriteTsValid = nowTsValid; } mFramesWritten += totalFramesWritten; return totalFramesWritten; }
ssize_t unwind_backtrace_thread(pid_t tid, backtrace_frame_t* backtrace, size_t ignore_depth, size_t max_depth) { if (tid == gettid()) { return unwind_backtrace(backtrace, ignore_depth + 1, max_depth); } ALOGV("Unwinding thread %d from thread %d.", tid, gettid()); // TODO: there's no tgkill(2) on Mac OS, so we'd either need the // mach_port_t or the pthread_t rather than the tid. #if defined(CORKSCREW_HAVE_ARCH) && !defined(__APPLE__) struct sigaction act; struct sigaction oact; memset(&act, 0, sizeof(act)); act.sa_sigaction = unwind_backtrace_thread_signal_handler; act.sa_flags = SA_RESTART | SA_SIGINFO | SA_ONSTACK; sigemptyset(&act.sa_mask); pthread_mutex_lock(&g_unwind_signal_mutex); map_info_t* milist = acquire_my_map_info_list(); ssize_t frames = -1; if (!sigaction(SIGURG, &act, &oact)) { g_unwind_signal_state.map_info_list = milist; g_unwind_signal_state.backtrace = backtrace; g_unwind_signal_state.ignore_depth = ignore_depth; g_unwind_signal_state.max_depth = max_depth; g_unwind_signal_state.returned_frames = 0; android_atomic_release_store(tid, &g_unwind_signal_state.tid_state); // Signal the specific thread that we want to dump. int32_t tid_state = tid; if (tgkill(getpid(), tid, SIGURG)) { ALOGV("Failed to send SIGURG to thread %d.", tid); } else { // Wait for the other thread to start dumping the stack, or time out. int wait_millis = 250; for (;;) { tid_state = android_atomic_acquire_load(&g_unwind_signal_state.tid_state); if (tid_state != tid) { break; } if (wait_millis--) { ALOGV("Waiting for thread %d to start dumping the stack...", tid); usleep(1000); } else { ALOGV("Timed out waiting for thread %d to start dumping the stack.", tid); break; } } } // Try to cancel the dump if it has not started yet. if (tid_state == tid) { if (!android_atomic_acquire_cas(tid, STATE_CANCEL, &g_unwind_signal_state.tid_state)) { ALOGV("Canceled thread %d stack dump.", tid); tid_state = STATE_CANCEL; } else { tid_state = android_atomic_acquire_load(&g_unwind_signal_state.tid_state); } } // Wait indefinitely for the dump to finish or be canceled. // We cannot apply a timeout here because the other thread is accessing state that // is owned by this thread, such as milist. It should not take very // long to take the dump once started. while (tid_state == STATE_DUMPING) { ALOGV("Waiting for thread %d to finish dumping the stack...", tid); usleep(1000); tid_state = android_atomic_acquire_load(&g_unwind_signal_state.tid_state); } if (tid_state == STATE_DONE) { frames = g_unwind_signal_state.returned_frames; } sigaction(SIGURG, &oact, NULL); } release_my_map_info_list(milist); pthread_mutex_unlock(&g_unwind_signal_mutex); return frames; #else return -1; #endif }
template<typename T> bool StateQueue<T>::push(StateQueue<T>::block_t block) { #define PUSH_BLOCK_ACK_NS 3000000L // 3 ms: time between checks for ack in push() // FIXME should be configurable static const struct timespec req = {0, PUSH_BLOCK_ACK_NS}; ALOG_ASSERT(!mInMutation, "push() called when in a mutation"); #ifdef STATE_QUEUE_DUMP if (block == BLOCK_UNTIL_ACKED) { mMutatorDump->mPushAck++; } #endif if (mIsDirty) { #ifdef STATE_QUEUE_DUMP mMutatorDump->mPushDirty++; #endif // wait for prior push to be acknowledged if (mExpecting != NULL) { #ifdef STATE_QUEUE_DUMP unsigned count = 0; #endif for (;;) { const T *ack = (const T *) mAck; // no additional barrier needed if (ack == mExpecting) { // unnecessary as we're about to rewrite //mExpecting = NULL; break; } if (block == BLOCK_NEVER) { return false; } #ifdef STATE_QUEUE_DUMP if (count == 1) { mMutatorDump->mBlockedSequence++; } ++count; #endif nanosleep(&req, NULL); } #ifdef STATE_QUEUE_DUMP if (count > 1) { mMutatorDump->mBlockedSequence++; } #endif } // publish #ifdef __LP64__ android_atomic_release_store64((int64_t) mMutating, (volatile int64_t *) &mNext); #else android_atomic_release_store((int32_t) mMutating, (volatile int32_t *) &mNext); #endif mExpecting = mMutating; // copy with circular wraparound if (++mMutating >= &mStates[kN]) { mMutating = &mStates[0]; } *mMutating = *mExpecting; mIsDirty = false; } // optionally wait for this push or a prior push to be acknowledged if (block == BLOCK_UNTIL_ACKED) { if (mExpecting != NULL) { #ifdef STATE_QUEUE_DUMP unsigned count = 0; #endif for (;;) { const T *ack = (const T *) mAck; // no additional barrier needed if (ack == mExpecting) { mExpecting = NULL; break; } #ifdef STATE_QUEUE_DUMP if (count == 1) { mMutatorDump->mBlockedSequence++; } ++count; #endif nanosleep(&req, NULL); } #ifdef STATE_QUEUE_DUMP if (count > 1) { mMutatorDump->mBlockedSequence++; } #endif } } return true; }