avtExecutionManager::~avtExecutionManager() { MutexDestroy( &mutexMapLock, false ); // Destroy any mutexes in the list. std::map<MUTEX_ID, MUTEX *>::iterator it; for(it=mutexMap.begin(); it != mutexMap.end(); ++it) { MutexDestroy( it->second ); } }
//////////////////////////////////////////////////////////// /// Destroy all users //////////////////////////////////////////////////////////// bool UsersDestroy() { struct UserData * Iterator = UserList.First; // Remove all users while (Iterator != NULL) { struct UserData * TempUser = Iterator; Iterator = Iterator->Next; // Close the socket MutexLock(&TempUser->MutexData); SocketClose(&TempUser->Connection.Socket); MutexUnlock(&TempUser->MutexData); // Destroy the mutex MutexDestroy(&TempUser->MutexData); // Free it free(TempUser); TempUser = NULL; } return true; }
bool PoolCreate(TPool * const poolP, uint32_t const zonesize) { bool success; bool mutexCreated; poolP->zonesize = zonesize; mutexCreated = MutexCreate(&poolP->mutexP); if (mutexCreated) { TPoolZone * const firstZoneP = PoolZoneAlloc(zonesize); if (firstZoneP != NULL) { poolP->firstzone = firstZoneP; poolP->currentzone = firstZoneP; success = TRUE; } else success = FALSE; if (!success) MutexDestroy(poolP->mutexP); } else success = FALSE; return success; }
//////////////////////////////////////////////////////////// /// Close (destroy) the window. //////////////////////////////////////////////////////////// void WindowClose() { // Destroy the custom icon, if any if (WindowIcon) DestroyIcon(WindowIcon); if (!WindowCallback) { // Destroy the window if (WindowhWnd) DestroyWindow(WindowhWnd); // Unregister window class if we were the last window if (HasUnicodeSupport()) { UnregisterClassW(WindowClassNameW, GetModuleHandle(NULL)); } else { UnregisterClassA(WindowClassNameA, GetModuleHandle(NULL)); } } else { // The window is external : remove the hook on its message callback SetWindowLongPtr(WindowhWnd, GWLP_WNDPROC, WindowCallback); } // Set window open state to false WindowIsOpened = false; // Destroy the event mutex MutexDestroy(&WindowEventMutex); }
void avtExecutionManager::MutexDestroy( const MUTEX_ID stringID ) { if (tPool == NULL) return; MutexDestroy( RemoveMutex(stringID) ); }
int ehMpd(MW_EVENT event, int argi, void* argp) { switch (event) { case MW_INIT: if (mpConsoleMutex) return 0; // already inited memset(&mpx,0,sizeof(mpx)); MutexCreate(&mpConsoleMutex); if (loopclip) ThreadCreate(&mpThreadHandle, mpThread, 0); break; case MW_UNINIT: MutexDestroy(&mpConsoleMutex); mpClose(); break; case MW_PARSE_ARGS: { int i = 0; char** argv = (char**)argp; for (i = 0; i < argi; i++) { if (!strcmp(argv[i], "--mploop")) { loopclip = argv[++i]; break; } else if (!strcmp(argv[i], "--mpbin")) { mpbin = argv[++i]; } } } break; } return 0; }
void BuildQueueDestroy(BuildQueue* queue) { Log(kDebug, "destroying build queue"); const BuildQueueConfig* config = &queue->m_Config; MutexLock(&queue->m_Lock); queue->m_QuitSignalled = true; MutexUnlock(&queue->m_Lock); CondBroadcast(&queue->m_WorkAvailable); for (int i = 0, thread_count = config->m_ThreadCount; i < thread_count; ++i) { if (i > 0) { Log(kDebug, "joining with build thread %d", i); ThreadJoin(queue->m_Threads[i]); } ThreadStateDestroy(&queue->m_ThreadState[i]); } // Deallocate storage. MemAllocHeap* heap = queue->m_Config.m_Heap; HeapFree(heap, queue->m_ExpensiveWaitList); HeapFree(heap, queue->m_Queue); CondDestroy(&queue->m_WorkAvailable); MutexDestroy(&queue->m_Lock); // Unblock all signals on the main thread. SignalHandlerSetCondition(nullptr); SignalBlockThread(false); }
/** * Cleanup after a tspinit() has been used. */ void tspshutdown( void ) { if ( init ) { // #ifdef GUCEF_CORE_DEBUG_MODE // FreeConsole(); // #endif init = 0; #ifdef USE_TSP_MUTEX MutexDestroy( lock ); lock = NULL; #endif /* USE_TSP_MUTEX ? */ } if ( coutfile ) { free( coutfile ); coutfile = NULL; } if ( fptr ) { fclose( fptr ); fptr = NULL; } }
void INTDestroy(void) #ifdef BODY_DEF { delwin(m_pWin); endwin();/* End curses mode */ MutexDestroy(&m_Lock); }
Log::~Log() { if( gs_Log == this ) LogSetDefault(nullptr); TimerDestroy(timer); MutexDestroy(mutex); }
void cleanUp() { threadRun = false; ThreadWait(t); ThreadDestroy(t); MutexDestroy(mutex); getClientSocket().close(); Net::cleanup(); }
static void logClose(struct _TServer * const srvP) { if (srvP->logfileisopen) { FileClose(srvP->logfileP); MutexDestroy(srvP->logmutexP); srvP->logfileisopen = FALSE; } }
//////////////////////////////////////////////////////////// /// Remove user from the list //////////////////////////////////////////////////////////// void UserRemove(struct UserData * User) { if (UserContains(User)) { struct UserData * UserPtr; // Remove from the list MutexLock(&User->MutexData); // Update the previous node if (User->Previous == NULL) { MutexLock(&UserList.First->MutexData); UserPtr = UserList.First; UserList.First = User->Next; } else { MutexLock(&User->Previous->Next->MutexData); UserPtr = User->Previous->Next; User->Previous->Next = User->Next; } MutexUnlock(&UserPtr->MutexData); // Update the next node if (User->Next == NULL) { MutexLock(&UserList.Last->MutexData); UserPtr = UserList.Last; UserList.Last = User->Previous; } else { MutexLock(&User->Next->Previous->MutexData); UserPtr = User->Next->Previous; User->Next->Previous = User->Previous; } MutexUnlock(&UserPtr->MutexData); // Close the socket SocketClose(&User->Connection.Socket); // Unlock the mutex MutexUnlock(&User->MutexData); // Destroy the mutex MutexDestroy(&User->MutexData); // Delete it free(User); User = NULL; } }
Condition::~Condition(void) { MutexDestroy(_mutex); #if defined(POSIX_THREADS) cond_destroy((cond_t *)_condition); free(_condition); #endif }
void MutexLock::unlock_and_destroy(void) { #if defined(POSIX_THREADS) if (_locked) { mutex_unlock((mutex_t *)_mutex); } _locked = 0; MutexDestroy(_mutex); #endif }
void HeapDestroy(MemAllocHeap* heap) { if (heap->m_Flags & HeapFlags::kThreadSafe) { MutexDestroy(&heap->m_Lock); } #if ENABLED(USE_DLMALLOC) destroy_mspace(heap->m_MemSpace); heap->m_MemSpace = nullptr; #endif }
void PoolFree(TPool * const poolP) { TPoolZone * poolZoneP; TPoolZone * nextPoolZoneP; for (poolZoneP = poolP->firstzone; poolZoneP; poolZoneP = nextPoolZoneP) { nextPoolZoneP = poolZoneP->next; free(poolZoneP); } MutexDestroy(poolP->mutexP); }
/*----------------------------------------------------------------------------* * NAME * SchedDeinit * * DESCRIPTION * Deinitialise the scheduler. * * RETURNS * void * *----------------------------------------------------------------------------*/ void SchedDeinit(void *data) { SchedulerInstanceType *inst; inst = (SchedulerInstanceType *) data; if (instance != inst) { return; } if (inst != NULL) { uint8 i; for (i = 0; i < _SCHED_MAX_SEGMENTS; i ++) { if (inst->thread[i].inUse) { MessageQueueEntryType *msg, *msgNext; for (msg = inst->thread[i].messageFreeList; msg; msg = msgNext) { msgNext = msg->next; MemFree(msg); msg = NULL; } MemFree(inst->thread[i].tasks); inst->thread[i].tasks = NULL; MutexDestroy(&inst->thread[i].qMutex); EventDestroy(&inst->thread[i].eventHandle); } } MutexDestroy(&inst->bgMutex); EventDestroy(&inst->eventHandle); instance = NULL; MemFree(inst); inst = NULL; } }
// // SmaUnload // // This routine should release resources allocated in the SmaLoad function. // // INPUTS: // // None. // // OUTPUTS: // // None. // // RETURNS: // // FSUCCESS - Successful unload of SMA. // // This routine is called at IRQL_PASSIVE_LEVEL. // void SMAUnload(void) { GLOBAL_MEM_LIST *pMemList; _DBG_ENTER_LVL(_DBG_LVL_MAIN, SmaUnload); // there should be no CAs at this point, all VPD must be unloaded // before IbAccess ASSERT(0 == g_Sma->NumCa); ASSERT(NULL == g_Sma->CaObj); // Free up global memory // //Note: Global Grh gets freed automatically here for (pMemList = g_Sma->Bin.MemList; NULL != pMemList; ) { GLOBAL_MEM_LIST *pMemListNext; pMemListNext = (GLOBAL_MEM_LIST*)pMemList->Next; MemoryDeallocate( pMemList->VirtualAddr ); // registered SMP emm if (pMemList->HdrAddr ) MemoryDeallocate( pMemList->HdrAddr ); // SmpBlock memory MemoryDeallocate( pMemList ); pMemList = pMemListNext; } _TRC_UNREGISTER(); // Remove user list if ( NULL != g_Sma->SmUserTbl ) MemoryDeallocate( g_Sma->SmUserTbl ); IbtDestroyNotifyGroup(); // Locks MutexDestroy( &g_Sma->Bin.Mutex ); SpinLockDestroy( &g_Sma->Bin.Lock ); SpinLockDestroy( &g_Sma->RQ.Lock ); SpinLockDestroy( &g_Sma->CaListLock ); MemoryDeallocate( g_Sma ); _DBG_LEAVE_LVL(_DBG_LVL_MAIN); }
/** * Cleanup a readers/writers lock. */ void rwl_destroy( TRWLock *rwlock ) { /* * Set the delete flag. */ rwlock->delflag = 1; /* * Wait for all readers and all writers to finish */ while ( rwlock->rcount || rwlock->wcount ) { ThreadDelay( 10 ); } /* * Cleanup allocated storage */ MutexDestroy( rwlock->datalock ); free( rwlock ); }
/******************************************************************* * fm_zonemap_pol_get_lineage_get_zonemap(): *******************************************************************/ static ZonemapHashEntry_t * fm_zonemap_pol_get_lineage_get_zonemap( pcm_context_t *pCtx, poid_t *pRoutingPoid, poid_t *pBrandPoid, ZonemapHashEntry_t **apHashTable, const char *pszTarget, pin_errbuf_t *ebufp) { int32 nTargetHash; int32 nLoadStatus; ZonemapHashEntry_t *pHashEntry; ZonemapHashEntry_t *pBucket; ZonemapHashEntry_t *pCurNode; Blob_t **ppBuffer; poid_t **ppMatrixPoid; int32 *pnSearchMode; int32 nLoadFromDB; int32 nUnlockNode = PIN_BOOLEAN_FALSE; time_t current_time = 0; PIN_HEAP_VAR; /* The HashTableLock has already been acquired by the time * this procedure is called. */ pHashEntry = pBucket = NULL; if (PIN_ERR_IS_ERR(ebufp)) { return NULL; } /* Debug */ PIN_ERR_LOG_MSG(PIN_ERR_LEVEL_DEBUG, "fm_zonemap_pol_get_lineage_get_zonemap starting"); /* * Hash zone name */ nTargetHash = fm_zonemap_pol_get_lineage_hash_zone_name(pszTarget); /* Get bucket */ pBucket = apHashTable[nTargetHash]; /* Assume we don't have to load from DB */ nLoadFromDB = PIN_BOOLEAN_FALSE; /* * Have we already loaded this matrix? */ pHashEntry = fm_zonemap_pol_get_lineage_find_bucket(pBucket, pszTarget, ebufp); if (pHashEntry != NULL) { /* * do a revision check only if a certain time has * elapsed */ current_time = pin_virtual_time((time_t*)NULL); if(current_time > (pHashEntry->lastUpdate + interval)) { /* * reset the timer */ pHashEntry->lastUpdate = current_time; /* * Yes: Has it changed since last load? */ if (fm_zonemap_pol_get_lineage_rev_changed(pCtx, pHashEntry->pMatrixPoid, ebufp)) { /* * Yes: Free old data & reload matrix from DB */ /* Lock the node */ MutexLock(pHashEntry->Lock); /* Remove node from bucket */ for (pCurNode = pBucket; pCurNode != NULL; pCurNode = pCurNode->pNext) { if (pCurNode->pNext == pHashEntry) { pCurNode->pNext = pHashEntry->pNext; break; } } PIN_SET_GLOBAL_HEAP; /* Free buffer */ free(pHashEntry->pBuffer); pHashEntry->pBuffer = NULL; /* Free matrix poid */ PIN_POID_DESTROY(pHashEntry->pMatrixPoid, NULL); pHashEntry->pMatrixPoid = NULL; PIN_RESET_GLOBAL_HEAP; /* Indicate that we need to load from DB */ nLoadFromDB = PIN_BOOLEAN_TRUE; /* Indicate that we need to unlock the node */ nUnlockNode = PIN_BOOLEAN_TRUE; } } } else { /* * No: Add new node to bucket */ PIN_SET_GLOBAL_HEAP; /* Create & init new bucket */ pHashEntry = (ZonemapHashEntry_t *) malloc(sizeof(ZonemapHashEntry_t)); /* Verify that memory was allocated */ if (pHashEntry != NULL) { pHashEntry->pszZonemapName = malloc(strlen(pszTarget) + 1); if (pHashEntry->pszZonemapName == NULL) { pin_set_err(ebufp, PIN_ERRLOC_FM, PIN_ERRCLASS_SYSTEM_DETERMINATE, PIN_ERR_NO_MEM, 0, 0, 0); PIN_ERR_LOG_EBUF(PIN_ERR_LEVEL_ERROR, "fm_zonemap_pol_get_lineage_get_zonemap: " "failed to allocate memory for zonemap name", ebufp); free(pHashEntry); pHashEntry = NULL; goto Done; } strcpy((char*) pHashEntry->pszZonemapName, pszTarget); pHashEntry->pNext = NULL; pHashEntry->pBuffer = NULL; /* Init semaphore */ MutexInit(pHashEntry->Lock); } PIN_RESET_GLOBAL_HEAP; /* Verify that memory was allocated */ if (pHashEntry == NULL) { pin_set_err(ebufp, PIN_ERRLOC_FM, PIN_ERRCLASS_SYSTEM_DETERMINATE, PIN_ERR_NO_MEM, 0, 0, 0); PIN_ERR_LOG_EBUF(PIN_ERR_LEVEL_ERROR, "fm_zonemap_pol_get_lineage_get_zonemap: " "failed to allocate memory for hash entry", ebufp); goto Done; } /* Indicate that we need to load from DB */ nLoadFromDB = PIN_BOOLEAN_TRUE; } /* Do we need to load from the database? */ if (nLoadFromDB == PIN_BOOLEAN_TRUE) { /* * Yes: Load matrix from DB */ /* Copy trie address from global to local mem */ ppBuffer = &(pHashEntry->pBuffer); ppMatrixPoid = &(pHashEntry->pMatrixPoid); pnSearchMode = &(pHashEntry->nDefaultSearchMode); /* Attempt to read matrix from DB */ nLoadStatus = fm_zonemap_pol_get_lineage_load_zonemap( pCtx, pszTarget, pRoutingPoid, ppMatrixPoid,pBrandPoid, ppBuffer, pnSearchMode, ebufp); PIN_SET_GLOBAL_HEAP; /* Did we load a matrix? */ if (nLoadStatus == PIN_BOOLEAN_FALSE) { /* Failed: set return value */ /* If this happened on a RELOAD, we have more * work to do */ if (nUnlockNode == PIN_BOOLEAN_TRUE) { /* * Yes: Unlock the node */ MutexUnlock(pHashEntry->Lock); nUnlockNode = PIN_BOOLEAN_FALSE; } free((char*)pHashEntry->pszZonemapName); pHashEntry->pszZonemapName = NULL; /* Destroy the mutex */ MutexDestroy(pHashEntry->Lock); free(pHashEntry); pHashEntry = NULL; } else { /* * Add bucket to hash table */ pHashEntry->pNext = apHashTable[nTargetHash]; apHashTable[nTargetHash] = pHashEntry; pHashEntry->lastUpdate = pin_virtual_time((time_t *)NULL); } PIN_RESET_GLOBAL_HEAP; } Done: /* Do we need to unlock the node? */ if (nUnlockNode == PIN_BOOLEAN_TRUE) { /* * Yes: Unlock the node */ MutexUnlock(pHashEntry->Lock); } /* Debug */ PIN_ERR_LOG_MSG(PIN_ERR_LEVEL_DEBUG, "fm_zonemap_pol_get_lineage_get_zonemap returning"); return pHashEntry; }
UvdState::~UvdState() { MutexDestroy(&m_lock); }
TInt CTestCondwait::TestCond411() { int errsum=0, err = 0; int retval = 0; ThreadData lThreadData; sem_t lSignalSemaphore; sem_t lSuspendSemaphore; sem_t lTestSemaphore; pthread_mutex_t lTestMutex; pthread_cond_t lTestCondVar; pthread_condattr_t lCondAttr; pthread_mutexattr_t lTestMutexAttr; pthread_mutexattr_t defaultattr; pthread_mutexattr_t errorcheckattr; pthread_mutexattr_t recursiveattr; pthread_mutexattr_init(&defaultattr); pthread_mutexattr_init(&errorcheckattr); pthread_mutexattr_init(&recursiveattr); pthread_mutexattr_settype(&errorcheckattr,PTHREAD_MUTEX_ERRORCHECK); pthread_mutexattr_settype(&recursiveattr,PTHREAD_MUTEX_RECURSIVE); pthread_mutex_t l_staticmutex = PTHREAD_MUTEX_INITIALIZER; pthread_mutex_t l_errorcheckmutex = PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP; pthread_mutex_t l_recursivemutex = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP; pthread_cond_t l_staticcondvar = PTHREAD_COND_INITIALIZER; CommonData lCommonData; lCommonData.iStaticMutex = &l_staticmutex; lCommonData.iErrorCheckMutex = &l_errorcheckmutex; lCommonData.iRecursiveMutex = &l_recursivemutex; lCommonData.iStaticCondVar = &l_staticcondvar; retval = sem_init(&lSignalSemaphore,0,0); if(retval != 0) { return retval; } retval = sem_init(&lSuspendSemaphore,0,0); if(retval != 0) { return retval; } lThreadData.iSignalSemaphore = &lSignalSemaphore; lThreadData.iSuspendSemaphore = &lSuspendSemaphore; lThreadData.iTestSemaphore = &lTestSemaphore; lThreadData.iTestMutex = &lTestMutex; lThreadData.iTestMutexAttr = &lTestMutexAttr; lThreadData.iTestCondVar = &lTestCondVar; lThreadData.iDefaultAttr = &defaultattr; lThreadData.iErrorcheckAttr = &errorcheckattr; lThreadData.iRecursiveAttr = &recursiveattr; lThreadData.iCondAttr = &lCondAttr; for (int loop = 0; loop < EThreadMain; loop++) { g_spinFlag[loop] = true; } lThreadData.iSuspending = false; lThreadData.iSpinCounter = 0; lThreadData.iCurrentCommand = -1; lThreadData.iSelf = EThreadMain; lThreadData.iValue = 0; lThreadData.iRetValue = 0; lThreadData.ierrno = 0; lThreadData.iExpectederrno = 0; lThreadData.iTimes = 0; lThreadData.iStopped = false; lThreadData.iCommonData = &lCommonData; retval = CondInit(&lThreadData); retval = MutexInitNULL(&lThreadData); fp=func; retval = ThreadCreate(&lThreadData, (void*) EThread1); WaitTillSuspended(&lThreadData, (void*) EThread1); fp=func1; retval = ThreadCreate(&lThreadData, (void*) EThread2); retval = ThreadDestroy(&lThreadData, (void*) EThread1); retval = ThreadDestroy(&lThreadData, (void*) EThread2); retval = MutexDestroy(&lThreadData); retval = CondDestroy(&lThreadData); StopThread(&lThreadData); err = pthread_cond_destroy(&l_staticcondvar); if(err != EINVAL) { errsum += err; } err = pthread_mutex_destroy(&l_recursivemutex); if(err != EINVAL) { errsum += err; } err = pthread_mutex_destroy(&l_errorcheckmutex); if(err != EINVAL) { errsum += err; } err = pthread_mutex_destroy(&l_staticmutex); if(err != EINVAL) { errsum += err; } err = pthread_mutexattr_destroy(&recursiveattr); if(err != EINVAL) { errsum += err; } err = pthread_mutexattr_destroy(&errorcheckattr); if(err != EINVAL) { errsum += err; } err = pthread_mutexattr_destroy(&defaultattr); if(err != EINVAL) { errsum += err; } err = sem_destroy(&lSignalSemaphore); if(err != EINVAL) { errsum += err; } err = sem_destroy(&lSuspendSemaphore); if(err != EINVAL) { errsum += err; } return retval+errsum; }
FSTATUS SMALoad( IN IBT_COMPONENT_INFO *ComponentInfo ) { FSTATUS status = FSUCCESS; _DBG_ENTER_LVL(_DBG_LVL_MAIN, SmaLoad); _DBG_INIT; _TRC_REGISTER(); #if !defined(VXWORKS) _DBG_PRINT(_DBG_LVL_MAIN, (" InfiniBand Subnet Management Agent. Built %s %s\n",\ __DATE__, __TIME__ )); #else _DBG_PRINT(_DBG_LVL_MAIN, (" InfiniBand Subnet Management Agent. Built %s %s\n",\ _DBG_PTR(__DATE__), _DBG_PTR(__TIME__) )); #endif // Establish dispatch entry points for the functions supported. MemoryClear( ComponentInfo, sizeof(IBT_COMPONENT_INFO) ); ComponentInfo->AddDevice = SmaAddDevice; ComponentInfo->RemoveDevice = SmaRemoveDevice; ComponentInfo->Unload = SMAUnload; // Read Global settings for the driver which may be set in a // OS specific way. SmaInitGlobalSettings(); // Allocate space for Global data g_Sma = (SMA_GLOBAL_INFO*)MemoryAllocate2AndClear(sizeof(SMA_GLOBAL_INFO), IBA_MEM_FLAG_PREMPTABLE, SMA_MEM_TAG); if ( NULL == g_Sma ) { _DBG_ERROR(("MemAlloc failed for g_Sma!\n")); goto done; } // initialize global data g_Sma->NumCa = 0; g_Sma->CaObj = NULL; g_Sma->WorkReqRecv = g_Sma->WorkReqSend = NULL; g_Sma->SmUserTbl = NULL; g_Sma->NumUser = 0; // SpinLockInitState( &g_Sma->Lock ) // SpinLockInit( &g_Sma->Lock ) // Init Storage area for MADs g_Sma->Bin.NumBlocks = 0; g_Sma->Bin.Head = g_Sma->Bin.Tail = NULL; g_Sma->Bin.MemList = NULL; g_Sma->Bin.CurrentIndex = 0; // set start mem index // Locks SpinLockInitState( &g_Sma->CaListLock ); SpinLockInit( &g_Sma->CaListLock ); SpinLockInitState( &g_Sma->Bin.Lock ); SpinLockInit( &g_Sma->Bin.Lock ); MutexInitState( &g_Sma->Bin.Mutex ); MutexInit( &g_Sma->Bin.Mutex ); SpinLockInitState( &g_Sma->RQ.Lock ); SpinLockInit( &g_Sma->RQ.Lock ); // Init Global Ibt user group for notifications IbtInitNotifyGroup(NotifyIbtCallback); // Allocate memory for Global GRH since the SMA does not need a GRH. // This memory will automatically get mapped to all CA registrations. g_Sma->GlobalGrh = CreateGlobalMemList( 0, sizeof(IB_GRH), 0, FALSE ); if ( NULL == g_Sma->GlobalGrh ) { status = FINSUFFICIENT_RESOURCES; goto failmemlist; } g_Sma->GlobalGrh->VirtualAddr = MemoryAllocate2AndClear(sizeof(IB_GRH), IBA_MEM_FLAG_PREMPTABLE, SMA_MEM_TAG); if ( NULL == g_Sma->GlobalGrh->VirtualAddr ) { status = FINSUFFICIENT_RESOURCES; goto failgrhvirt; } g_Sma->GlobalGrh->AccessControl.AsUINT16 = 0; g_Sma->GlobalGrh->AccessControl.s.LocalWrite = 1; g_Sma->GlobalGrh->CaMemIndex = 0; // Increment index for future allocations g_Sma->Bin.CurrentIndex++; done: _DBG_LEAVE_LVL(_DBG_LVL_MAIN); return status; failgrhvirt: MemoryDeallocate( g_Sma->GlobalGrh ); failmemlist: IbtDestroyNotifyGroup(); SpinLockDestroy( &g_Sma->RQ.Lock ); MutexDestroy( &g_Sma->Bin.Mutex ); SpinLockDestroy( &g_Sma->Bin.Lock ); SpinLockDestroy( &g_Sma->CaListLock ); MemoryDeallocate( g_Sma ); goto done; }
gxMutex::~gxMutex() { // PC-lint 09/08/2005: Function may throw exception in destructor MutexDestroy(); }
//----------------------------------------------------------------------------- C700Driver::~C700Driver() { MutexDestroy(mMIDIEvtMtx); MutexDestroy(mREGLOGEvtMtx); }
SafeScalarImpl::~SafeScalarImpl(void) { MutexDestroy(_mutex); }