void zcrypt_keystore_fini(spa_t *spa) { void *cookie; avl_tree_t *tree; zcrypt_keystore_node_t *node; if (spa->spa_keystore == NULL) return; rw_enter(&spa->spa_keystore->sk_lock, RW_WRITER); /* * Note we don't bother with the refcnt of the keys in here * because this function can't return failure so we just need to * destroy everything. */ cookie = NULL; tree = &spa->spa_keystore->sk_dslkeys; while ((node = avl_destroy_nodes(tree, &cookie)) != NULL) { mutex_enter(&node->skn_lock); (void) zcrypt_keychain_fini(node->skn_keychain); zcrypt_key_free(node->skn_wrapkey); mutex_exit(&node->skn_lock); bzero(node, sizeof (zcrypt_keystore_node_t)); kmem_free(node, sizeof (zcrypt_keystore_node_t)); } avl_destroy(tree); rw_exit(&spa->spa_keystore->sk_lock); rw_destroy(&spa->spa_keystore->sk_lock); kmem_free(spa->spa_keystore, sizeof (zcrypt_keystore_t)); spa->spa_keystore = NULL; }
void zfs_fuid_table_destroy(avl_tree_t *idx_tree, avl_tree_t *domain_tree) { fuid_domain_t *domnode; void *cookie; cookie = NULL; while ((domnode = avl_destroy_nodes(domain_tree, &cookie))) ksiddomain_rele(domnode->f_ksid); avl_destroy(domain_tree); cookie = NULL; while ((domnode = avl_destroy_nodes(idx_tree, &cookie))) kmem_free(domnode, sizeof (fuid_domain_t)); avl_destroy(idx_tree); }
void uu_avl_destroy(uu_avl_t *ap) { uu_avl_pool_t *pp = ap->ua_pool; if (ap->ua_debug) { if (avl_numnodes(&ap->ua_tree) != 0) { uu_panic("uu_avl_destroy(%p): tree not empty\n", ap); } if (ap->ua_null_walk.uaw_next != &ap->ua_null_walk || ap->ua_null_walk.uaw_prev != &ap->ua_null_walk) { uu_panic("uu_avl_destroy(%p): outstanding walkers\n", ap); } } (void) pthread_mutex_lock(&pp->uap_lock); UU_AVL_PTR(ap->ua_next_enc)->ua_prev_enc = ap->ua_prev_enc; UU_AVL_PTR(ap->ua_prev_enc)->ua_next_enc = ap->ua_next_enc; (void) pthread_mutex_unlock(&pp->uap_lock); ap->ua_prev_enc = UU_PTR_ENCODE(NULL); ap->ua_next_enc = UU_PTR_ENCODE(NULL); ap->ua_pool = NULL; avl_destroy(&ap->ua_tree); uu_free(ap); }
void zil_free(zilog_t *zilog) { lwb_t *lwb; zilog->zl_stop_sync = 1; while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { list_remove(&zilog->zl_lwb_list, lwb); if (lwb->lwb_buf != NULL) zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); kmem_cache_free(zil_lwb_cache, lwb); } list_destroy(&zilog->zl_lwb_list); avl_destroy(&zilog->zl_vdev_tree); mutex_destroy(&zilog->zl_vdev_lock); ASSERT(list_head(&zilog->zl_itx_list) == NULL); list_destroy(&zilog->zl_itx_list); mutex_destroy(&zilog->zl_lock); cv_destroy(&zilog->zl_cv_writer); cv_destroy(&zilog->zl_cv_suspend); kmem_free(zilog, sizeof (zilog_t)); }
/* Shutdown the connection pool. * No possible errors. * Assert Class: 1 */ void pool_shutdown () { xa_debug (1, "DEBUG: Closing the pool."); pool_cleaner (); avl_destroy (pool, NULL); xa_debug (1, "DEBUG: Pool closed."); }
Py_LOCAL(void) avl_tree_dealloc(avl_tree_Object * self) { avl_destroy(self->tree); self->tree = NULL; Py_DECREF(self->compare_func); self->compare_func = NULL; PyObject_DEL(self); }
void cleanup(db *d) { avl_destroy(d->pkgtree, &destroypkg); close(d->file); free(d); d->pkgtree = NULL; d->file = NULL; d = NULL; }
void stSortedSet_destruct(stSortedSet *sortedSet) { #if 0 // FIXME // this breaks the tests, which leak iterators. Need to revisit with // Benedict and also figure out how to tests this. In the mean time, // this is for an urgent bug. checkModifiable(sortedSet); #endif void *a = sortedSet->sortedSet->avl_param; if(sortedSet->destructElementFn != NULL) { st_sortedSet_destruct_destructElementFn = sortedSet->destructElementFn; avl_destroy(sortedSet->sortedSet, (void (*)(void *, void *))st_sortedSet_destructP); } else { avl_destroy(sortedSet->sortedSet, NULL); } free(a); free(sortedSet); }
void mp_dealloc_strings(MP mp){ if(mp->strings!=NULL) avl_destroy(mp->strings); mp->strings= NULL; mp_xfree(mp->cur_string); mp->cur_string= NULL; mp->cur_length= 0; mp->cur_string_size= 0; }
void freeCatalogoProdutos(CATALOGO_PRODUTOS catalogo){ int i; if(catalogo != NULL){ for (i = 0; i <= 26; i++) { avl_destroy(catalogo->indice[i], freeCodigoProduto); } } free(catalogo); }
/* * Discard memory associated with the inverted fragments tree created * by report_dups() via invert_frags(). */ static void free_invert_frags(avl_tree_t *tree) { void *outer = NULL; /* traversal cookie */ void *inner; /* traversal cookie */ inode_dup_t *inode_dup; reference_t *ref_dup; while ((inode_dup = avl_destroy_nodes(tree, &outer)) != NULL) { inner = NULL; while ((ref_dup = avl_destroy_nodes(&inode_dup->id_fragments, &inner)) != NULL) { free((void *)ref_dup); } avl_destroy(&inode_dup->id_fragments); free((void *)inode_dup); } avl_destroy(tree); }
/* * Avl related */ static void fuse_avl_destroy(avl_tree_t *tree_p) { void *cookie = NULL; fuse_avl_cache_node_t *node; while ((node = avl_destroy_nodes(tree_p, &cookie)) != NULL) { fuse_avl_cache_node_destroy(node); } avl_destroy(tree_p); }
int dgl_release_V2(dglGraph_s * pgraph) { pgraph->iErrno = 0; if (pgraph->pNodeTree) avl_destroy(pgraph->pNodeTree, dglTreeNodeCancel); if (pgraph->pEdgeTree) avl_destroy(pgraph->pEdgeTree, dglTreeEdgeCancel); if (pgraph->pNodeBuffer) free(pgraph->pNodeBuffer); if (pgraph->pEdgeBuffer) free(pgraph->pEdgeBuffer); if (pgraph->edgePrioritizer.pvAVL) avl_destroy(pgraph->edgePrioritizer.pvAVL, dglTreeEdgePri32Cancel); if (pgraph->nodePrioritizer.pvAVL) avl_destroy(pgraph->nodePrioritizer.pvAVL, dglTreeNodePri32Cancel); return 0; }
/* * Discard all memory allocations associated with the current duplicates * table. */ void free_dup_state(void) { void *dup_cookie = NULL; void *claim_cookie; fragment_t *fragv; claimant_t *claimv; while ((fragv = avl_destroy_nodes(&dup_frags, &dup_cookie)) != NULL) { claim_cookie = NULL; while ((claimv = avl_destroy_nodes(&fragv->fr_claimants, &claim_cookie)) != NULL) { free((void *)claimv); } avl_destroy(&fragv->fr_claimants); free((void *)fragv); } avl_destroy(&dup_frags); }
void sbc_fini_common(t10_lu_common_t *lu) { disk_params_t *d = lu->l_dtype_params; sbc_overlap_flush(d); avl_destroy(&d->d_mmap_overlaps); free(d->d_io_reserved); free(lu->l_dtype_params); }
void freeCatalogoClientes(CATALOGO_CLIENTES catalogo){ int i; if(catalogo != NULL){ for (i = 0; i <= 26; i++) { avl_destroy(catalogo->indice[i], freeCodigoCliente); } } free(catalogo); }
static void mze_destroy(zap_t *zap) { mzap_ent_t *mze; void *avlcookie = NULL; while (mze = avl_destroy_nodes(&zap->zap_m.zap_avl, &avlcookie)) kmem_free(mze, sizeof (mzap_ent_t)); avl_destroy(&zap->zap_m.zap_avl); }
void vdev_queue_fini(vdev_t *vd) { vdev_queue_t *vq = &vd->vdev_queue; vdev_io_t *vi; avl_destroy(&vq->vq_deadline_tree); avl_destroy(&vq->vq_read_tree); avl_destroy(&vq->vq_write_tree); avl_destroy(&vq->vq_pending_tree); while ((vi = list_head(&vq->vq_io_list)) != NULL) { list_remove(&vq->vq_io_list, vi); zio_vdev_free(vi); } list_destroy(&vq->vq_io_list); mutex_destroy(&vq->vq_lock); }
void vdev_queue_fini(vdev_t *vd) { vdev_queue_t *vq = &vd->vdev_queue; vdev_io_t *vi; zio_priority_t p; for (p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) avl_destroy(&vq->vq_class[p].vqc_queued_tree); avl_destroy(&vq->vq_active_tree); while ((vi = list_head(&vq->vq_io_list)) != NULL) { list_remove(&vq->vq_io_list, vi); zio_vdev_free(vi); } list_destroy(&vq->vq_io_list); mutex_destroy(&vq->vq_lock); }
/*Libertar faturacao*/ void libertar_faturacao(Faturacao f){ int i; /*Não existe faturacao para eliminar*/ if (f==NULL) return; for(i=0;i<=25;i++) avl_destroy(f->produtos[i], libertar_avl_faturacao); free(f); }
static void zil_dva_tree_fini(avl_tree_t *t) { zil_dva_node_t *zn; void *cookie = NULL; while ((zn = avl_destroy_nodes(t, &cookie)) != NULL) kmem_free(zn, sizeof (zil_dva_node_t)); avl_destroy(t); }
void range_tree_destroy(range_tree_t *rt) { VERIFY0(rt->rt_space); if (rt->rt_ops != NULL) rt->rt_ops->rtop_destroy(rt, rt->rt_arg); avl_destroy(&rt->rt_root); kmem_free(rt, sizeof (*rt)); }
void trim_map_destroy(vdev_t *vd) { trim_map_t *tm; trim_seg_t *ts; ASSERT(vd->vdev_ops->vdev_op_leaf); if (!zfs_trim_enabled) return; tm = vd->vdev_trimmap; if (tm == NULL) return; /* * We may have been called before trim_map_vdev_commit_done() * had a chance to run, so do it now to prune the remaining * inflight frees. */ trim_map_vdev_commit_done(vd->vdev_spa, vd); mutex_enter(&tm->tm_lock); while ((ts = list_head(&tm->tm_head)) != NULL) { avl_remove(&tm->tm_queued_frees, ts); list_remove(&tm->tm_head, ts); kmem_free(ts, sizeof (*ts)); TRIM_MAP_SDEC(tm, ts->ts_end - ts->ts_start); TRIM_MAP_QDEC(tm); } mutex_exit(&tm->tm_lock); avl_destroy(&tm->tm_queued_frees); avl_destroy(&tm->tm_inflight_frees); avl_destroy(&tm->tm_inflight_writes); list_destroy(&tm->tm_pending_writes); list_destroy(&tm->tm_head); mutex_destroy(&tm->tm_lock); kmem_free(tm, sizeof (*tm)); vd->vdev_trimmap = NULL; }
/* Call by 'a+b' --> new object */ Py_LOCAL(PyObject *) avl_tree_concat(avl_tree_Object * self, PyObject * tree_object) { if (!is_avl_tree_Object(tree_object)) { PyErr_SetString(PyExc_TypeError, "Bad argument type to avl_tree_concat: expected avl_tree object"); } else { avl_tree_Object *rv; avl_tree t1; rv = PyObject_NEW(avl_tree_Object, &avl_tree_Type); if (rv == NULL) goto abort; if ((rv->tree = avl_dup(self->tree, (void *) rv)) == NULL) goto clear; if ((t1 = avl_dup(((avl_tree_Object *) tree_object)->tree, (void *) 0)) == NULL) { avl_destroy(rv->tree); rv->tree = NULL; goto clear; } avl_cat(rv->tree, t1); avl_destroy(t1); /* free temporary handle */ rv->compare_func = self->compare_func; Py_INCREF(rv->compare_func); return (PyObject *) rv; clear: PyObject_DEL(rv); abort: PyErr_SetString(avlErrorObject, "Sorry, concatenation aborted"); } return NULL; }
/* * Cleanup then free a zvol_state_t which was created by zvol_alloc(). */ static void zvol_free(zvol_state_t *zv) { avl_destroy(&zv->zv_znode.z_range_avl); mutex_destroy(&zv->zv_znode.z_range_lock); del_gendisk(zv->zv_disk); blk_cleanup_queue(zv->zv_queue); put_disk(zv->zv_disk); kmem_free(zv, sizeof (zvol_state_t)); }
static void tzcheck_free_tze_avl(avl_tree_t *t) { void *ck = NULL; tzent_t *tze; while ((tze = avl_destroy_nodes(t, &ck)) != NULL) { free(tze->tze_target); free(tze->tze_path); free(tze); } avl_destroy(t); }
void blman_dealloc_blocks(blman *self) { //dealloc blocks _ALIGNED_FREE(self->blocks); self->blocks = NULL; //set block count to 0 self->blockCount = 0; //clear indexes avl_destroy(self->blockIndex, &blidxnode_destroy); self->blockIndex = avl_create(&blockindex_cmp, NULL, &my_avl_allocator); }
/* * free all data associated with an ace_list */ static void ace_list_free(ace_list_t *al) { acevals_t *node; void *cookie; if (al == NULL) return; cookie = NULL; while ((node = avl_destroy_nodes(&al->user, &cookie)) != NULL) cacl_free(node, sizeof (acevals_t)); cookie = NULL; while ((node = avl_destroy_nodes(&al->group, &cookie)) != NULL) cacl_free(node, sizeof (acevals_t)); avl_destroy(&al->user); avl_destroy(&al->group); /* free the container itself */ cacl_free(al, sizeof (ace_list_t)); }
void libertar_produtoGestao(void*item, void*parametros){ int i; ProdutoGestao pg = (ProdutoGestao) item; /*Não existe para eliminar*/ if (!pg) return; for(i=0;i<26;i++) avl_destroy(pg->clientes[i], libertar_clientes_string); free(pg->prod); free(pg); }
void libertar_clientes(void*item, void*parametros){ int i; Cliente c = (Cliente) item; /*Não existe cliente para eliminar*/ if (!c) return; for(i=0;i<26;i++) avl_destroy(c->produtos[i], libertar_produtos); free(c->cli); free(c); }