/** * free a work node. * * @param[in] node work node object * * @return operation status * @retval 0 success * * @internal */ static int _afs_wq_node_free(struct afs_work_queue_node * node) { int ret = 0; if (queue_IsOnQueue(node) || (node->state == AFS_WQ_NODE_STATE_SCHEDULED) || (node->state == AFS_WQ_NODE_STATE_RUNNING) || (node->state == AFS_WQ_NODE_STATE_BLOCKED)) { ret = AFS_WQ_ERROR; goto error; } ret = _afs_wq_node_free_deps(node); if (ret) { goto error; } MUTEX_DESTROY(&node->lock); CV_DESTROY(&node->state_cv); if (node->rock_dtor) { (*node->rock_dtor) (node->rock); } free(node); error: return ret; }
/** * destroy a node list object. * * @param[in] list list object * * @return operation status * @retval 0 success * @retval AFS_WQ_ERROR list not empty * * @internal */ static int _afs_wq_node_list_destroy(struct afs_work_queue_node_list * list) { int ret = 0; if (queue_IsNotEmpty(&list->list)) { ret = AFS_WQ_ERROR; goto error; } MUTEX_DESTROY(&list->lock); CV_DESTROY(&list->cv); error: return ret; }
/* ignores all remaining multiRx calls */ void multi_Finalize_Ignore(struct multi_handle *mh) { int i; int nCalls = mh->nConns; for (i = 0; i < nCalls; i++) { struct rx_call *call = mh->calls[i]; if (call) rx_EndCall(call, 0); } #ifdef RX_ENABLE_LOCKS MUTEX_DESTROY(&mh->lock); CV_DESTROY(&mh->cv); #endif /* RX_ENABLE_LOCKS */ osi_Free(mh->calls, sizeof(struct rx_call *) * nCalls); osi_Free(mh->ready, sizeof(short *) * nCalls); osi_Free(mh, sizeof(struct multi_handle)); }
/** * shut down volume group cache subsystem. * * @return operation status * @retval 0 success * * @todo implement */ int VVGCache_PkgShutdown(void) { int i; /* fix it later */ /* free hash table */ free(VVGCache_hash_table.hash_buckets); VVGCache_hash_table.hash_buckets = NULL; /* destroy per-partition VVGC state */ for (i = 0; i <= VOLMAXPARTS; i++) { VVGCache.part[i].state = VVGC_PART_STATE_INVALID; CV_DESTROY(&VVGCache.part[i].cv); } return EOPNOTSUPP; }