void mutex_destroy(kmutex_t *mp) { mutex_impl_t *lp = (mutex_impl_t *)mp; if (lp->m_owner == 0 && !MUTEX_HAS_WAITERS(lp)) { MUTEX_DESTROY(lp); } else if (MUTEX_TYPE_SPIN(lp)) { LOCKSTAT_RECORD0(LS_MUTEX_DESTROY_RELEASE, lp); MUTEX_DESTROY(lp); } else if (MUTEX_TYPE_ADAPTIVE(lp)) { LOCKSTAT_RECORD0(LS_MUTEX_DESTROY_RELEASE, lp); if (MUTEX_OWNER(lp) != curthread) mutex_panic("mutex_destroy: not owner", lp); if (MUTEX_HAS_WAITERS(lp)) { turnstile_t *ts = turnstile_lookup(lp); turnstile_exit(lp); if (ts != NULL) mutex_panic("mutex_destroy: has waiters", lp); } MUTEX_DESTROY(lp); } else { mutex_panic("mutex_destroy: bad mutex", lp); } }
Client::~Client(){ MUTEX_DESTROY(mutex_sendto); MUTEX_DESTROY(mutex_print); TerminateThread(client_send_thread.ThreadHandle, 0); TerminateThread(client_recv_thread.ThreadHandle, 0); closesocket(mysocket); }
LoadBalancer::~LoadBalancer(){ MUTEX_DESTROY(mutex_sendto); MUTEX_DESTROY(mutex_print); TerminateThread(pkt_thread.ThreadHandle, 0); TerminateThread(send_heartbeat_thread.ThreadHandle, 0); closesocket(client_socket); closesocket(server_socket); }
void OMR::Monitor::destroy() { #ifdef WIN32 MUTEX_DESTROY(_monitor); #else int32_t rc = MUTEX_DESTROY(_monitor); TR_ASSERT(rc == 0, "error destroying monitor\n"); #endif }
void OMR::Monitor::destroy() { #if defined(OMR_OS_WINDOWS) MUTEX_DESTROY(_monitor); #else int32_t rc = MUTEX_DESTROY(_monitor); TR_ASSERT(rc == 0, "error destroying monitor\n"); #endif /* defined(OMR_OS_WINDOWS) */ }
/* clean xmap */ void xmap_clean(XMAP *xmap) { if(xmap) { //WARN_LOGGER(xmap->logger, "Ready clean tree[%p]", xmap->tree); mmtree_close(xmap->tree); //WARN_LOGGER(xmap->logger, "Ready clean tree64[%p]", xmap->tree64); mmtree64_close(xmap->tree64); //WARN_LOGGER(xmap->logger, "Ready clean queue[%p]", xmap->queue); mmqueue_clean(xmap->queue); //WARN_LOGGER(xmap->logger, "Ready clean kmap[%p]", xmap->kmap); mmtrie_clean(xmap->kmap); //WARN_LOGGER(xmap->logger, "Ready reset db[%p]", xmap->db); cdb_reset(xmap->db); //WARN_LOGGER(xmap->logger, "Ready clean db[%p]", xmap->db); cdb_clean(xmap->db); if(xmap->diskio.map) { munmap(xmap->diskio.map, xmap->diskio.size); xmap->diskio.map = NULL; } if(xmap->diskio.fd > 0) { close(xmap->diskio.fd); xmap->diskio.fd = 0; } if(xmap->metaio.map) { munmap(xmap->metaio.map, xmap->metaio.size); xmap->metaio.map = NULL; } if(xmap->metaio.fd > 0) { close(xmap->metaio.fd); xmap->metaio.fd = 0; } if(xmap->stateio.map) { munmap(xmap->stateio.map, xmap->stateio.size); xmap->stateio.map = NULL; } if(xmap->stateio.fd > 0) { close(xmap->stateio.fd); xmap->stateio.fd = 0; } //WARN_LOGGER(xmap->logger, "Ready clean mutex[%p]", xmap->mutex); MUTEX_DESTROY(xmap->mutex); MUTEX_DESTROY(xmap->cmutex); LOGGER_CLEAN(xmap->logger); xmm_free(xmap, sizeof(XMAP)); } return ; }
int freerdp_chanman_uninit(void) { while (g_chan_man_list) { freerdp_chanman_free(g_chan_man_list->chan_man); } MUTEX_DESTROY(g_mutex_init); MUTEX_DESTROY(g_mutex_list); return 0; }
int ams_Destroy(ams_p ams) { MUTEX_DESTROY(ams->runflag_lock); free(ams->runflag_lock); COND_DESTROY(ams->runflag_cond); free(ams->runflag_cond); MUTEX_DESTROY(ams->waiting_lock); free(ams->waiting_lock); COND_DESTROY(ams->waiting_cond); free(ams->waiting_cond); free(ams); return MC_SUCCESS; }
void ippr_raudio_fini() { if (raudio_proxy_init == 1) { MUTEX_DESTROY(&raudiofr.fr_lock); raudio_proxy_init = 0; } }
/*---------------------------------------------------------------------*/ BOOL ArchiveClose(H_ARCHIVE harchive) { NODE *node; STREAM *stream; if (!ValidateHandle(harchive)) return (FALSE); _archive[harchive].last_error = ARC_NO_ERROR; _archive_error = ARC_NO_ERROR; ArchiveLog(ARC_LOG_VERBOSE, "Close archive: %s", _archive[harchive].path); /* Close all open event files on archive... */ if ((node = FirstNode(&_archive[harchive].streams))) { do { stream = (STREAM *) node->data; if (_archive[harchive].access == ARC_WRITE) CloseEventFileAndRename(harchive, stream); else CloseEventFile(stream); } while ((node = NextNode(node)) != NULL); } /* If open for write... */ if (_archive[harchive].access == ARC_WRITE) { MUTEX_LOCK(&_archive[harchive].purge.mutex); if(_archive[harchive].purge.active ) { _archive[harchive].purge.stop = TRUE; MUTEX_UNLOCK(&_archive[harchive].purge.mutex); ArchiveLog(ARC_LOG_VERBOSE, "Stopping purge thread"); SEM_POST(&_archive[harchive].purge.semaphore); THREAD_JOIN(&_archive[harchive].purge.thread_id); } else MUTEX_UNLOCK(&_archive[harchive].purge.mutex); /* Mark as closed and write state to disk */ _archive[harchive].state.write = FALSE; if (!WriteState(harchive)) return (FALSE); } /* Close the state file */ if (!FileClose(_archive[harchive].file)) { _archive[harchive].last_error = ARC_FILE_IO_ERROR; return (FALSE); } _n_archives--; /* Clear state */ MUTEX_DESTROY(&_archive[harchive].mutex); DestroyList(&_archive[harchive].streams); DestroyPurge(&_archive[harchive].purge); InitArchive(harchive); return (TRUE); }
void ippr_rcmd_fini() { if (rcmd_proxy_init == 1) { MUTEX_DESTROY(&rcmdfr.fr_lock); rcmd_proxy_init = 0; } }
/** * Discards a lock object. * An lock must be discarded to free the resources associated with it. * * @note A lock must not be destroyed if threads are waiting on it, or * if it is currently owned. */ void MM_LightweightNonReentrantLock::tearDown() { if(NULL != _extensions) { if(NULL != _tracing) { if (NULL != _tracing->monitor_name) { _tracing->monitor_name = NULL; } J9Pool* tracingPool = _extensions->_lightweightNonReentrantLockPool; if(NULL != tracingPool) { omrthread_monitor_enter(_extensions->_lightweightNonReentrantLockPoolMutex); pool_removeElement(tracingPool, _tracing); omrthread_monitor_exit(_extensions->_lightweightNonReentrantLockPoolMutex); } _tracing = NULL; } } if (_initialized) { #if defined(J9MODRON_USE_CUSTOM_SPINLOCKS) omrgc_spinlock_destroy(&_spinlock); #else /* J9MODRON_USE_CUSTOM_SPINLOCKS */ MUTEX_DESTROY(_mutex); #endif /* J9MODRON_USE_CUSTOM_SPINLOCKS */ _initialized = false; } }
void xqueue_clean(void *xqueue) { XQUEUE *q = (XQUEUE *)xqueue; XQNODE *node = NULL; int i = 0; if(q) { //fprintf(stdout, "%s::%d q:%p nleft:%d qtotal:%d qleft:%p\n", __FILE__, __LINE__, q, q->nleft, q->qtotal, q->left); for(i = 0; i < q->nlist; i++); { if(q->list[i]) xmm_free(q->list[i], XQ_NODES_MAX * sizeof(XQNODE)); } /* while((node = q->left)) { q->left = node->next; xmm_free(node, sizeof(XQNODE)); } */ MUTEX_DESTROY(q->mutex); xmm_free(q, sizeof(XQUEUE)); } return ; }
/** * free a work node. * * @param[in] node work node object * * @return operation status * @retval 0 success * * @internal */ static int _afs_wq_node_free(struct afs_work_queue_node * node) { int ret = 0; if (queue_IsOnQueue(node) || (node->state == AFS_WQ_NODE_STATE_SCHEDULED) || (node->state == AFS_WQ_NODE_STATE_RUNNING) || (node->state == AFS_WQ_NODE_STATE_BLOCKED)) { ret = AFS_WQ_ERROR; goto error; } ret = _afs_wq_node_free_deps(node); if (ret) { goto error; } MUTEX_DESTROY(&node->lock); CV_DESTROY(&node->state_cv); if (node->rock_dtor) { (*node->rock_dtor) (node->rock); } free(node); error: return ret; }
TEST(Mutex, test) { MUTEX mx; MUTEX_INIT(mx); MUTEX_LOCK(mx); MUTEX_UNLOCK(mx); MUTEX_DESTROY(mx); }
void ippr_netbios_fini() { if (netbios_proxy_init == 1) { MUTEX_DESTROY(&netbiosfr.fr_lock); netbios_proxy_init = 0; } }
void ippr_irc_fini() { if (irc_proxy_init == 1) { MUTEX_DESTROY(&ircnatfr.fr_lock); irc_proxy_init = 0; } }
void dbg_deinit() { if (MUTEX_DESTROY(&(dbg_g->dbg_fd_out_lock)) != 0) { dbg_exit(EC__EXIT__NO_RECOVER); } free(dbg_g); }
void S_queue_destroy(pTHX_ message_queue* queue) { MUTEX_LOCK(&queue->mutex); node_destroy(&queue->front); COND_DESTROY(&queue->condvar); MUTEX_UNLOCK(&queue->mutex); MUTEX_DESTROY(&queue->mutex); }
void endAllocation( void ) { /* Destroy any data structures required to make the allocation thread- safe */ MUTEX_DESTROY( allocation ); krnlData = NULL; }
void ipf_p_raudio_main_unload() { if (raudio_proxy_init == 1) { MUTEX_DESTROY(&raudiofr.fr_lock); raudio_proxy_init = 0; } }
void ipf_p_pptp_main_unload(void) { if (pptp_proxy_init == 1) { MUTEX_DESTROY(&pptpfr.fr_lock); pptp_proxy_init = 0; } }
void cleanup() { free(COMPTE_PRINCIPAL_MAIL); flushRecentMutex(); flushDB(); releaseDNSCache(); MUTEX_DESTROY(DBRefreshMutex); }
/* Clean event */ void event_clean(EVENT *event) { if(event) { MUTEX_DESTROY(event->mutex); } return ; }
void ippr_pptp_fini(void) { if (pptp_proxy_init == 1) { MUTEX_DESTROY(&pptpfr.fr_lock); pptp_proxy_init = 0; } }
void ipf_p_irc_main_unload() { if (irc_proxy_init == 1) { MUTEX_DESTROY(&ircnatfr.fr_lock); irc_proxy_init = 0; } }
/* -------------------------------------------------------------------- */ void ippr_rpcb_fini() { if (rpcb_proxy_init == 1) { MUTEX_DESTROY(&rpcbfr.fr_lock); rpcb_proxy_init = 0; } }
void pqueue_destroy(pqueue_t *pq) { pqueue_drop_items(pq, binheap_count(pq->heap)); binheap_destroy(pq->heap); MUTEX_DESTROY(pq->lock); free(pq); }
void ipf_p_tftp_main_unload(void) { if (tftp_proxy_init == 1) { MUTEX_DESTROY(&tftpfr.fr_lock); tftp_proxy_init = 0; } }
/* exit: * Shuts down the driver, freeing allocated resources, etc. All channels * will already have been destroyed by the `destroy_channel' function, so * normally all this needs to do is tidy up anything `init' left untidy. * Return zero if successful, non-zero otherwise. */ static int local_exit(void) { MUTEX_LOCK(port_list); while (port_list) destroy_port(port_list); MUTEX_UNLOCK(port_list); MUTEX_DESTROY(port_list); return 0; }