/** * Given a thread ID, return a handle to the corresponding DThread. If the returned * pointer is non-NULL, it is the responsibility of the caller to close the handle. * * @pre caller must be in thread critical section * @post if a non-NULL value is returned then a handle to the thread has been * opened on the callers behalf * @param aThreadId ID of the thread to return a handle for * @return a DThread* to the appropriate thread, or NULL if a handle could not be * opened to the specified thread */ DThread* DebugUtils::OpenThreadHandle(TUint64 aThreadId) { __ASSERT_CRITICAL; LOG_MSG2("DebugUtils::OpenThreadHandle(0x%lx)", aThreadId); DObjectCon& threads = *Kern::Containers()[EThread]; // Get containing holding threads threads.Wait(); // Obtain the container mutex so the list does get changed under us DThread* thread = Kern::ThreadFromId(aThreadId); // Open a handle to the thread so that it doesn't exit while we are processing if (thread) { // if opening a handle fails then set thread to NULL if(KErrNone != thread->Open()) { LOG_MSG2("\tCould not open handle to thread %d", (TUint32)aThreadId); thread = NULL; } } else { LOG_MSG2("\tThread with ID %d is NULL", (TUint32)aThreadId); } threads.Signal(); // Release the container mutex return thread; }
// // The implementation of RBMDriver::SetAbsPriority() call. // TInt DBMLChannel::SetAbsPriority(TInt aThreadHandle, TInt aNewPrio, TInt* aOldPrio) { NKern::LockSystem(); // // Under the system lock find the DThread object and increment its ref-count (i.e Open()) // DThread* thr = (DThread*) Kern::ObjectFromHandle(&Kern::CurrentThread(), aThreadHandle, EThread); TInt r; if (!thr) { r = EBadHandle; } else { r = thr->Open(); } // // Now it's safe to release the system lock and to work with the object. // NKern::ThreadEnterCS(); NKern::UnlockSystem(); if (r != KErrNone) { NKern::ThreadLeaveCS(); return r; } *aOldPrio = thr->iDefaultPriority; Kern::SetThreadPriority(aNewPrio, thr); // // Work is done - close the object. // thr->Close(NULL); NKern::ThreadLeaveCS(); return KErrNone; }
void DPowerManager::NotifyWakeupEvent(TInt aReason) { // private NKern::LockSystem(); DThread* client = iClient; if (!client) { NKern::UnlockSystem(); return; } iClient = NULL; Kern::QueueRequestComplete(client, iRequest, aReason); NKern::UnlockSystem(); client->Close(NULL); }
void DThread_Wrapper( void *object ) { DThread *self = (DThread*)object; self->running = 1; if( self->thdSpecData == NULL ){ self->thdSpecData = (DThreadData*)GlobalAlloc( GPTR, sizeof(DThreadData) ); self->thdSpecData->thdObject = self; } self->thdSpecData->state = 0; TlsSetValue( thdSpecKey, self->thdSpecData ); if( self->taskFunc ) self->taskFunc( self->taskArg ); DThread_Exit( self ); }
void DThread_Wrapper( void *object ) { DThread *self = (DThread*)object; self->running = 1; if( self->thdSpecData == NULL ){ self->thdSpecData = DThreadData_New(); self->thdSpecData->thdObject = self; } self->state = 0; self->vmpause = 0; self->vmpaused = 0; self->vmstop = 0; self->vmstopped = 0; if( self->taskFunc ) self->taskFunc( self->taskArg ); DThread_Exit( self ); }
TInt DKdaChannel::GetThreadInfo(TUint aTid, TAny* aInfo) { TInt r = OpenTempObject(aTid, EThread); if (r == KErrNone) { DThread* pT = (DThread*)iTempObj; TDbgThreadInfo info; pT->FullName(info.iFullName); info.iPid = pT->iOwningProcess->iId; info.iStackBase = pT->iUserStackRunAddress; info.iStackSize = pT->iUserStackSize; info.iExitCategory = pT->iExitCategory; info.iExitReason = pT->iExitReason; GetThreadCpuInfo(pT, info.iCpu); umemput32(aInfo, &info, sizeof(info)); CloseTempObject(); } return r; }
static void* DThread_Wrapper( void *p ) { DThread *self = (DThread*) p; if( self->thdSpecData == NULL ){ self->thdSpecData = (DThreadData*)dao_calloc( 1, sizeof(DThreadData) ); self->thdSpecData->thdObject = self; } self->thdSpecData->state = 0; pthread_setspecific( thdSpecKey, self->thdSpecData ); if( self->cleaner ){ pthread_cleanup_push( self->cleaner, self->taskArg ); if( self->taskFunc ) self->taskFunc( self->taskArg ); pthread_cleanup_pop( 1 ); }else{ if( self->taskFunc ) self->taskFunc( self->taskArg ); } pthread_exit( 0 ); return NULL; }
void DumpExcInfoX(TArmExcInfo& a) { DumpExcInfo(a); NThread* nthread = NCurrentThread(); if (nthread == NULL) Kern::Printf("No current thread"); else { DThread* thread = Kern::NThreadToDThread(NCurrentThread()); if (thread) { TFullName thread_name; thread->TraceAppendFullName(thread_name, EFalse); Kern::Printf("Thread full name=%S", &thread_name); Kern::Printf("Thread ID=%d, KernCSLocked=%d",TheCurrentThread->iId,NKern::KernelLocked()); } else Kern::Printf("Thread N/A, KernCSLocked=%d",NKern::KernelLocked()); } }
/** * Opens a reference to the first thread of the given process. Returns NULL if * there are no threads remaining in the process or if the thread couldn't be opened. * * @pre Caller must be in thread context, in critical section, no fast mutexes held. * @post if result is non-NULL caller is responsible for closing the handle * @param aProcess The process whose first thread is to be opened * @return an Open()ed pointer to the first thread in the process, or NULL. */ DThread* DebugUtils::OpenFirstThreadForProcess(DProcess* aProcess) { __ASSERT_CRITICAL; // Copied from memspy's DMemSpyDriverOSAdaptionDProcess::OpenFirstThread() // It appears that the system lock needs to be held while manipulating the iThreadQ DThread* result = NULL; NKern::LockSystem(); // We don't use DProcess::FirstThread() as that doesn't appear to do any checking of whether the list is empty, ie if there are no threads at all SDblQueLink* threadLink = aProcess->iThreadQ.First(); if (threadLink != NULL && threadLink != &aProcess->iThreadQ.iA) { result = _LOFF(threadLink,DThread,iProcessLink); if (result->Open() != KErrNone) { result = NULL; } } NKern::UnlockSystem(); return result; }
static void* DThread_Wrapper( void *p ) { DThread *self = (DThread*) p; if( self->thdSpecData == NULL ){ self->thdSpecData = DThreadData_New(); self->thdSpecData->thdObject = self; } self->state = 0; self->vmpause = 0; self->vmpaused = 0; self->vmstop = 0; self->vmstopped = 0; if( self->cleaner ){ pthread_cleanup_push( self->cleaner, self->taskArg ); if( self->taskFunc ) self->taskFunc( self->taskArg ); pthread_cleanup_pop( 1 ); }else{ if( self->taskFunc ) self->taskFunc( self->taskArg ); } pthread_exit( 0 ); return NULL; }
void DPowerManager::RequestWakeupEventNotification(TRequestStatus* aStatus) { // called by ExecHandler __KTRACE_OPT(KPOWER,Kern::Printf("PowerManger::RequestWakeupEventNotification()")); Lock(); // we aquire this lock to avoid new requests while in PowerDown NKern::LockSystem(); if (iClient || iRequest->SetStatus(aStatus) != KErrNone) Kern::RequestComplete(aStatus, KErrInUse); else { iClient = TheCurrentThread; iClient->Open(); } NKern::UnlockSystem(); Unlock(); __KTRACE_OPT(KPOWER,Kern::Printf("<PowerManger::RequestWakeupEventNotification()")); }
inline TInt DPriSamplerImpl::EncodeChunkName(DThread& t) { // the size of the following name is in the first byte TUint8* size = &sample[0]; *size = 0; this->sampleDescriptor.Zero(); t.TraceAppendFullName(this->sampleDescriptor,false); *size += this->sampleDescriptor.Size(); // copy the 4 bytes from the thread id field this->sampleDescriptor.Append((TUint8*)&(t.iId),sizeof(TUint)); *size += sizeof(TUint); // the size is the descriptor length + the size field LOGSTRING2("Name size - %d",*size); return ((TInt)(*size))+1; }
/** Handle the requests for this channel. @param aFunction The operation the LDD should perform. @param a1 The first argument for the operation. @param a2 The second argument for the operation. @return KErrNone on success or one of the system wide error codes. */ TInt DDefragChannel::Request(TInt aFunction, TAny* a1, TAny* a2) { TInt r = KErrNone; NKern::ThreadEnterCS(); Kern::SemaphoreWait(*iDefragSemaphore); if (!iDefragDfcFree && aFunction != RDefragChannel::EControlGeneralDefragDfcComplete) {// Only allow a single defrag operation at a time. r = KErrInUse; goto exit; } switch (aFunction) { case RDefragChannel::EControlGeneralDefragDfc: // Queue a defrag operation so that on completion it queues a // DFC on this driver. iRequestThread = &Kern::CurrentThread(); iRequestThread->Open(); // Open a reference on this channel to stop the destructor running before // the defrag request has completed. Open(); r = iCompleteReq->SetStatus((TRequestStatus*)a1); if (r == KErrNone) r = iDefragReq.DefragRam(&iDefragCompleteDfc, KDefragRamThreadPriority); if (r != KErrNone) {// defrag operation didn't start so close all openned handles AsyncClose(); iRequestThread->AsyncClose(); iRequestThread = NULL; } else iDefragDfcFree = EFalse; break; case RDefragChannel::EControlGeneralDefragDfcComplete: if (iRequestThread != NULL) {// The defrag dfc hasn't completed so this shouldn't have been invoked. r = KErrGeneral; } else { iDefragDfcFree = ETrue; } break; case RDefragChannel::EControlGeneralDefragSem: {// Queue a defrag operation so that it will signal a fast mutex once // it has completed. NFastSemaphore sem; NKern::FSSetOwner(&sem, 0); r = iDefragReq.DefragRam(&sem, KDefragRamThreadPriority); if (r != KErrNone) {// Error occurred attempting to queue the defrag operation. break; } // Defrag operation has now been queued so wait for it to finish. // Could do some extra kernel side work here before waiting on the // semaphore. NKern::FSWait(&sem); r = iDefragReq.Result(); } break; case RDefragChannel::EControlGeneralDefrag: // Synchronously perform a defrag. { r = iDefragReq.DefragRam(KDefragRamThreadPriority); } break; case RDefragChannel::EControlAllocLowestZone: // Allocate from the lowest preference zone r = DoAllocLowestZone(); break; case RDefragChannel::EControlClaimLowestZone: // Claims the lowest preference zone r = DoClaimLowestZone(); break; case RDefragChannel::EControlCloseChunk: // Have finished with the chunk so close it then free the RAM mapped by it r = DoChunkClose(); TRACE( if (r != KErrNone) {Kern::Printf("ChunkClose returns %d", r);}); break; default: r=KErrNotSupported; break; }