static int avl_remove (node* tree, node** parent, data_t key) { if (!tree || !parent) return 0; int err_prev; if (tree -> data == key) { if (!tree -> child [right]) { node* tmp = tree -> child [left]; free (tree); *parent = tmp; return 0; } node *tmp_left = tree -> child [left], *tmp_right = tree -> child [right], *tmp_min = avl_get_min (tmp_right); tmp_min -> child [right] = avl_cut_min (tmp_right); tmp_min -> child [left] = tmp_left; free (tree); *parent = tmp_min; return 0; } else if (tree -> data > key) err_prev = avl_remove (tree -> child [left], &(tree -> child [left]), key); else err_prev = avl_remove (tree -> child [right], &(tree -> child [right]), key); if (!err_prev) { avl_rep_height (tree); *parent = avl_balance (tree); } return err_prev; }
/* * Remove a zfs_snapentry_t from both the zfs_snapshots_by_name and * zfs_snapshots_by_objsetid trees. Upon removal a reference is dropped, * this can result in the structure being freed if that was the last * remaining reference. */ static void zfsctl_snapshot_remove(zfs_snapentry_t *se) { ASSERT(RW_WRITE_HELD(&zfs_snapshot_lock)); avl_remove(&zfs_snapshots_by_name, se); avl_remove(&zfs_snapshots_by_objsetid, se); zfsctl_snapshot_rele(se); }
void remove_obj_cache(object_info_t *obj_info, ofs_block_cache_t *cache) { container_handle_t *ct = obj_info->ct; avl_remove(&obj_info->caches, cache); // remove from object OS_RWLOCK_WRLOCK(&ct->metadata_cache_lock); avl_remove(&ct->metadata_cache, cache); // remove from fs OS_RWLOCK_WRUNLOCK(&ct->metadata_cache_lock); }
/* * Evict the specified entry from the cache. */ static void vdev_cache_evict(vdev_cache_t *vc, vdev_cache_entry_t *ve) { ASSERT(MUTEX_HELD(&vc->vc_lock)); ASSERT3P(ve->ve_fill_io, ==, NULL); ASSERT3P(ve->ve_abd, !=, NULL); avl_remove(&vc->vc_lastused_tree, ve); avl_remove(&vc->vc_offset_tree, ve); abd_free(ve->ve_abd); kmem_free(ve, sizeof (vdev_cache_entry_t)); }
/* * pppt_disable_svc * * clean up all existing sessions and deregister targets from STMF */ static void pppt_disable_svc(void) { pppt_tgt_t *tgt, *next_tgt; avl_tree_t delete_target_list; ASSERT(pppt_global.global_svc_state == PSS_DISABLING); avl_create(&delete_target_list, pppt_tgt_avl_compare, sizeof (pppt_tgt_t), offsetof(pppt_tgt_t, target_global_ln)); PPPT_GLOBAL_LOCK(); for (tgt = avl_first(&pppt_global.global_target_list); tgt != NULL; tgt = next_tgt) { next_tgt = AVL_NEXT(&pppt_global.global_target_list, tgt); avl_remove(&pppt_global.global_target_list, tgt); avl_add(&delete_target_list, tgt); pppt_tgt_async_delete(tgt); } PPPT_GLOBAL_UNLOCK(); for (tgt = avl_first(&delete_target_list); tgt != NULL; tgt = next_tgt) { next_tgt = AVL_NEXT(&delete_target_list, tgt); mutex_enter(&tgt->target_mutex); while ((tgt->target_refcount > 0) || (tgt->target_state != TS_DELETING)) { cv_wait(&tgt->target_cv, &tgt->target_mutex); } mutex_exit(&tgt->target_mutex); avl_remove(&delete_target_list, tgt); pppt_tgt_destroy(tgt); } taskq_destroy(pppt_global.global_sess_taskq); taskq_destroy(pppt_global.global_dispatch_taskq); avl_destroy(&pppt_global.global_sess_list); avl_destroy(&pppt_global.global_target_list); (void) stmf_deregister_port_provider(pppt_global.global_pp); stmf_free(pppt_global.global_dbuf_store); pppt_global.global_dbuf_store = NULL; stmf_free(pppt_global.global_pp); pppt_global.global_pp = NULL; }
/* * Evict the specified entry from the cache. */ static void vdev_cache_evict(vdev_cache_t *vc, vdev_cache_entry_t *ve) { ASSERT(MUTEX_HELD(&vc->vc_lock)); ASSERT(ve->ve_fill_io == NULL); ASSERT(ve->ve_data != NULL); avl_remove(&vc->vc_lastused_tree, ve); avl_remove(&vc->vc_offset_tree, ve); zio_buf_free(ve->ve_data, VCBS); kmem_free(ve, sizeof (vdev_cache_entry_t)); }
static void vdev_queue_io_remove(vdev_queue_t *vq, zio_t *zio) { spa_t *spa = zio->io_spa; avl_remove(&vq->vq_deadline_tree, zio); avl_remove(zio->io_vdev_tree, zio); if (spa->spa_iokstat != NULL) { mutex_enter(&spa->spa_iokstat_lock); kstat_waitq_exit(spa->spa_iokstat->ks_data); mutex_exit(&spa->spa_iokstat_lock); } }
static void vg_prefix_delete(avl_root_t *rootp, vg_prefix_t *vp) { vg_prefix_t *sibp, *delp; avl_remove(rootp, &vp->vp_item); sibp = vp->vp_sibling; while (sibp && sibp != vp) { avl_remove(rootp, &sibp->vp_item); delp = sibp; sibp = sibp->vp_sibling; vg_prefix_free(delp); } vg_prefix_free(vp); }
static void vdev_queue_io_remove(vdev_queue_t *vq, zio_t *zio) { spa_t *spa = zio->io_spa; ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE); avl_remove(vdev_queue_class_tree(vq, zio->io_priority), zio); avl_remove(vdev_queue_type_tree(vq, zio->io_type), zio); mutex_enter(&spa->spa_iokstat_lock); ASSERT3U(spa->spa_queue_stats[zio->io_priority].spa_queued, >, 0); spa->spa_queue_stats[zio->io_priority].spa_queued--; if (spa->spa_iokstat != NULL) kstat_waitq_exit(spa->spa_iokstat->ks_data); mutex_exit(&spa->spa_iokstat_lock); }
static void trim_map_vdev_commit(spa_t *spa, zio_t *zio, vdev_t *vd) { trim_map_t *tm = vd->vdev_trimmap; trim_seg_t *ts; uint64_t start, size, txglimit; ASSERT(vd->vdev_ops->vdev_op_leaf); if (tm == NULL) return; txglimit = MIN(spa->spa_syncing_txg, spa_freeze_txg(spa)) - trim_txg_limit; mutex_enter(&tm->tm_lock); /* * Loop until we send all frees up to the txglimit. */ while ((ts = trim_map_first(tm, txglimit)) != NULL) { list_remove(&tm->tm_head, ts); avl_remove(&tm->tm_queued_frees, ts); avl_add(&tm->tm_inflight_frees, ts); zio_nowait(zio_trim(zio, spa, vd, ts->ts_start, ts->ts_end - ts->ts_start)); } mutex_exit(&tm->tm_lock); }
/* Remove the root of the AVL tree t * Warning: dumps core if t is empty */ int avl_removeroot(avl_tree* t) { int ch; avl* a; if (!t->root->left) { if (!t->root->right) { t->root = 0; return -1; } t->root = t->root->right; return -1; } if (!t->root->right) { t->root = t->root->left; return -1; } if (t->root->balance < 0) { /* remove from the left subtree */ a = t->root->left; while (a->right) a = a->right; } else { /* remove from the right subtree */ a = t->root->right; while (a->left) a = a->left; } ch = avl_remove(t, a); a->left = t->root->left; a->right = t->root->right; a->balance = t->root->balance; t->root = a; if (a->balance == 0) return ch; return 0; }
// Remove the root of the AVL tree t. // Returns True if the depth of the tree has shrunk. static Bool avl_removeroot(AvlTree* t) { Bool ch; AvlNode* n; if (!t->root->left) { if (!t->root->right) { t->root = NULL; return True; } t->root = t->root->right; return True; } if (!t->root->right) { t->root = t->root->left; return True; } if (t->root->balance < 0) { // Remove from the left subtree n = t->root->left; while (n->right) n = n->right; } else { // Remove from the right subtree n = t->root->right; while (n->left) n = n->left; } ch = avl_remove(t, n); n->left = t->root->left; n->right = t->root->right; n->balance = t->root->balance; t->root = n; if (n->balance == 0) return ch; return False; }
static void vdev_queue_io_remove(vdev_queue_t *vq, zio_t *zio) { spa_t *spa = zio->io_spa; spa_stats_history_t *ssh = &spa->spa_stats.io_history; ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE); avl_remove(vdev_queue_class_tree(vq, zio->io_priority), zio); avl_remove(vdev_queue_type_tree(vq, zio->io_type), zio); if (ssh->kstat != NULL) { mutex_enter(&ssh->lock); kstat_waitq_exit(ssh->kstat->ks_data); mutex_exit(&ssh->lock); } }
static void sms_cleanup( void ) { if (extensions_fd > -1) { if( extensions_wd > -1) { inotify_rm_watch(extensions_fd, extensions_wd); extensions_wd = -1; } set_fd_hook(extensions_fd, json_inotify_event_hook, DEL); close(extensions_fd); extensions_fd = -1; } else { task_remove(check_for_changed_sms, NULL); } while (json_sms_tree.items) { struct json_sms *sms = avl_first_item(&json_sms_tree); avl_remove(&json_sms_tree, sms->name, -300381); debugFree(sms, -300382); } }
/* * Evict the specified entry from the cache. */ static void vdev_cache_evict(vdev_cache_t *vc, vdev_cache_entry_t *ve) { ASSERT(MUTEX_HELD(&vc->vc_lock)); ASSERT(ve->ve_fill_io == NULL); ASSERT(ve->ve_data != NULL); dprintf("evicting %p, off %llx, LRU %llu, age %lu, hits %u, stale %u\n", vc, ve->ve_offset, ve->ve_lastused, lbolt - ve->ve_lastused, ve->ve_hits, ve->ve_missed_update); avl_remove(&vc->vc_lastused_tree, ve); avl_remove(&vc->vc_offset_tree, ve); zio_buf_free(ve->ve_data, VCBS); kmem_free(ve, sizeof (vdev_cache_entry_t)); }
/* * Traverse all mounted snapshots and attempt to unmount them. This * is best effort, on failure EEXIST is returned and count will be set * to the number of file snapshots which could not be unmounted. */ int zfsctl_unmount_snapshots(zfs_sb_t *zsb, int flags, int *count) { zfs_snapentry_t *sep, *next; int error = 0; *count = 0; ASSERT(zsb->z_ctldir != NULL); mutex_enter(&zsb->z_ctldir_lock); sep = avl_first(&zsb->z_ctldir_snaps); while (sep != NULL) { next = AVL_NEXT(&zsb->z_ctldir_snaps, sep); avl_remove(&zsb->z_ctldir_snaps, sep); mutex_exit(&zsb->z_ctldir_lock); error = __zfsctl_unmount_snapshot(sep, flags); mutex_enter(&zsb->z_ctldir_lock); if (error == EBUSY) { avl_add(&zsb->z_ctldir_snaps, sep); (*count)++; } else { zfsctl_sep_free(sep); } sep = next; } mutex_exit(&zsb->z_ctldir_lock); return ((*count > 0) ? EEXIST : 0); }
static void vdev_queue_pending_remove(vdev_queue_t *vq, zio_t *zio) { spa_t *spa = zio->io_spa; ASSERT(MUTEX_HELD(&vq->vq_lock)); ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE); vq->vq_class[zio->io_priority].vqc_active--; avl_remove(&vq->vq_active_tree, zio); #ifdef illumos mutex_enter(&spa->spa_iokstat_lock); ASSERT3U(spa->spa_queue_stats[zio->io_priority].spa_active, >, 0); spa->spa_queue_stats[zio->io_priority].spa_active--; if (spa->spa_iokstat != NULL) { kstat_io_t *ksio = spa->spa_iokstat->ks_data; kstat_runq_exit(spa->spa_iokstat->ks_data); if (zio->io_type == ZIO_TYPE_READ) { ksio->reads++; ksio->nread += zio->io_size; } else if (zio->io_type == ZIO_TYPE_WRITE) { ksio->writes++; ksio->nwritten += zio->io_size; } } mutex_exit(&spa->spa_iokstat_lock); #endif }
static void vdev_queue_pending_remove(vdev_queue_t *vq, zio_t *zio) { #ifdef LINUX spa_t *spa = zio->io_spa; spa_stats_history_t *ssh = &spa->spa_stats.io_history; #endif ASSERT(MUTEX_HELD(&vq->vq_lock)); ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE); vq->vq_class[zio->io_priority].vqc_active--; avl_remove(&vq->vq_active_tree, zio); #ifdef LINUX if (ssh->kstat != NULL) { kstat_io_t *ksio = ssh->kstat->ks_data; mutex_enter(&ssh->lock); kstat_runq_exit(ksio); if (zio->io_type == ZIO_TYPE_READ) { ksio->reads++; ksio->nread += zio->io_size; } else if (zio->io_type == ZIO_TYPE_WRITE) { ksio->writes++; ksio->nwritten += zio->io_size; } mutex_exit(&ssh->lock); } #endif }
void event_delete(uint32_t eventId, int server) { eventId = eventId * MAX_HTSP_SERVERS + server; pthread_mutex_lock(&events_mutex); #ifdef USE_AVL struct event_t* event = event_get_nolock(eventId); //fprintf(stderr,"DELETING EVENT:\n"); //event_dump(event); if (event) { avl_remove(&events,(struct avl*)event); event_free_items(event); free(event); } #else if (eventId < MAX_EVENT_ID) { if (events[eventId]) { event_free(events[eventId]); events[eventId] = NULL; } } #endif pthread_mutex_unlock(&events_mutex); }
void vdev_queue_io_done(zio_t *zio) { vdev_queue_t *vq = &zio->io_vd->vdev_queue; int i; if (zio_injection_enabled) delay(SEC_TO_TICK(zio_handle_io_delay(zio))); mutex_enter(&vq->vq_lock); avl_remove(&vq->vq_pending_tree, zio); zio->io_delta = gethrtime() - zio->io_timestamp; vq->vq_io_complete_ts = gethrtime(); vq->vq_io_delta_ts = vq->vq_io_complete_ts - zio->io_timestamp; for (i = 0; i < zfs_vdev_ramp_rate; i++) { zio_t *nio = vdev_queue_io_to_issue(vq, zfs_vdev_max_pending); if (nio == NULL) { break; } mutex_exit(&vq->vq_lock); if (nio->io_done == vdev_queue_agg_io_done) { zio_nowait(nio); } else { zio_vdev_io_reissue(nio); zio_execute(nio); } mutex_enter(&vq->vq_lock); } mutex_exit(&vq->vq_lock); }
static void trim_map_segment_remove(trim_map_t *tm, trim_seg_t *ts, uint64_t start, uint64_t end) { trim_seg_t *nts; boolean_t left_over, right_over; ASSERT(MUTEX_HELD(&tm->tm_lock)); left_over = (ts->ts_start < start); right_over = (ts->ts_end > end); TRIM_MAP_SDEC(tm, end - start); if (left_over && right_over) { nts = kmem_alloc(sizeof (*nts), KM_SLEEP); nts->ts_start = end; nts->ts_end = ts->ts_end; nts->ts_txg = ts->ts_txg; nts->ts_time = ts->ts_time; ts->ts_end = start; avl_insert_here(&tm->tm_queued_frees, nts, ts, AVL_AFTER); list_insert_after(&tm->tm_head, ts, nts); TRIM_MAP_QINC(tm); } else if (left_over) { ts->ts_end = start; } else if (right_over) { ts->ts_start = end; } else { avl_remove(&tm->tm_queued_frees, ts); list_remove(&tm->tm_head, ts); TRIM_MAP_QDEC(tm); kmem_free(ts, sizeof (*ts)); } }
void trim_map_write_done(zio_t *zio) { vdev_t *vd = zio->io_vd; trim_map_t *tm = vd->vdev_trimmap; /* * Don't check for vdev_notrim, since the write could have * started before vdev_notrim was set. */ if (!zfs_trim_enabled || tm == NULL) return; mutex_enter(&tm->tm_lock); /* * Don't fail if the write isn't in the tree, since the write * could have started after vdev_notrim was set. */ if (zio->io_trim_node.avl_child[0] || zio->io_trim_node.avl_child[1] || AVL_XPARENT(&zio->io_trim_node) || tm->tm_inflight_writes.avl_root == &zio->io_trim_node) avl_remove(&tm->tm_inflight_writes, zio); mutex_exit(&tm->tm_lock); }
int zfsctl_unmount_snapshot(zfs_sb_t *zsb, char *name, int flags) { zfs_snapentry_t search; zfs_snapentry_t *sep; int error = 0; mutex_enter(&zsb->z_ctldir_lock); search.se_name = name; sep = avl_find(&zsb->z_ctldir_snaps, &search, NULL); if (sep) { avl_remove(&zsb->z_ctldir_snaps, sep); mutex_exit(&zsb->z_ctldir_lock); error = __zfsctl_unmount_snapshot(sep, flags); mutex_enter(&zsb->z_ctldir_lock); if (error == EBUSY) avl_add(&zsb->z_ctldir_snaps, sep); else zfsctl_sep_free(sep); } else { error = ENOENT; } mutex_exit(&zsb->z_ctldir_lock); ASSERT3S(error, >=, 0); return (error); }
static int zfs_sort_snaps(zfs_handle_t *zhp, void *data) { avl_tree_t *avl = data; zfs_node_t *node; zfs_node_t search; search.zn_handle = zhp; node = avl_find(avl, &search, NULL); if (node) { /* * If this snapshot was renamed while we were creating the * AVL tree, it's possible that we already inserted it under * its old name. Remove the old handle before adding the new * one. */ zfs_close(node->zn_handle); avl_remove(avl, node); free(node); } node = zfs_alloc(zhp->zfs_hdl, sizeof (zfs_node_t)); node->zn_handle = zhp; avl_add(avl, node); return (0); }
void vdev_queue_io_done(zio_t *zio) { vdev_queue_t *vq = &zio->io_vd->vdev_queue; mutex_enter(&vq->vq_lock); avl_remove(&vq->vq_pending_tree, zio); for (int i = 0; i < zfs_vdev_ramp_rate; i++) { zio_t *nio = vdev_queue_io_to_issue(vq, zfs_vdev_max_pending); if (nio == NULL) break; mutex_exit(&vq->vq_lock); if (nio->io_done == vdev_queue_agg_io_done) { zio_nowait(nio); } else { zio_vdev_io_reissue(nio); zio_execute(nio); } mutex_enter(&vq->vq_lock); } mutex_exit(&vq->vq_lock); }
static void mdt_idx_free(ACL_MDT_IDX *idx) { ACL_MDT_IDX_AVL *idx_avl = (ACL_MDT_IDX_AVL*) idx; TREE_NODE *pnode; while (1) { pnode = (TREE_NODE*) avl_first(&idx_avl->avl); if (pnode == NULL) break; avl_remove(&idx_avl->avl, pnode); if (!(idx->flag & ACL_MDT_FLAG_KMR)) acl_myfree(pnode->key.key); if (idx_avl->slice) acl_slice_free2(idx_avl->slice, pnode); else acl_myfree(pnode); } avl_destroy(&idx_avl->avl); acl_myfree(idx->name); if (idx_avl->slice) acl_slice_destroy(idx_avl->slice); acl_myfree(idx_avl); }
pppt_status_t pppt_task_done(pppt_task_t *ptask) { pppt_status_t pppt_status = PPPT_STATUS_SUCCESS; boolean_t remove = B_FALSE; mutex_enter(&ptask->pt_mutex); switch (ptask->pt_state) { case PTS_ACTIVE: remove = B_TRUE; pppt_task_update_state(ptask, PTS_DONE); break; case PTS_ABORTED: pppt_status = PPPT_STATUS_ABORTED; break; case PTS_DONE: /* Repeat calls are OK. Do nothing, return success */ break; default: ASSERT(0); } mutex_exit(&ptask->pt_mutex); if (remove) { mutex_enter(&ptask->pt_sess->ps_mutex); avl_remove(&ptask->pt_sess->ps_task_list, ptask); mutex_exit(&ptask->pt_sess->ps_mutex); } return (pppt_status); }
static void mze_remove(zap_t *zap, mzap_ent_t *mze) { ASSERT(zap->zap_ismicro); ASSERT(RW_WRITE_HELD(&zap->zap_rwlock)); avl_remove(&zap->zap_m.zap_avl, mze); kmem_free(mze, sizeof (mzap_ent_t)); }
void free_pkb_tree(avl_root_t *rootp, pubkeybatch_t *save_pkb) { pubkeybatch_t *pkb; while ((pkb = pubkeybatch_avl_first(rootp)) != NULL) { avl_remove(rootp, &pkb->avlent); if (pkb != save_pkb) server_batch_free(pkb); } }
void destroy_container_resource(container_handle_t *ct) { avl_destroy(&ct->obj_info_list); avl_destroy(&ct->metadata_cache); OS_RWLOCK_DESTROY(&ct->ct_lock); OS_RWLOCK_DESTROY(&ct->metadata_cache_lock); avl_remove(g_container_list, ct); OS_FREE(ct); }