/* * Given a tree of fragment_ts, each element of which has an integral * sub-tree of claimant_ts, produce a tree of inode_dup_ts, each element * of which has an integral sub-tree of reference_ts. */ static void invert_frags(avl_tree_t *source, avl_tree_t *target) { fragment_t *src_frag; claimant_t *src_claim; inode_dup_t *tgt_inode; inode_dup_t tgt_inode_key; reference_t *tgt_ref; reference_t tgt_ref_key; avl_index_t where; avl_create(target, by_ino_cmp, sizeof (inode_dup_t), OFFSETOF(inode_dup_t, id_avl)); src_frag = avl_first(source); while (src_frag != NULL) { src_claim = avl_first(&src_frag->fr_claimants); while (src_claim != NULL) { /* * Have we seen this inode before? */ tgt_inode_key.id_ino = src_claim->cl_inode; tgt_inode = avl_find(target, (void *)&tgt_inode_key, &where); if (tgt_inode == NULL) { /* * No, so set up a record for it. */ tgt_inode = new_inode_dup(src_claim->cl_inode); avl_insert(target, (void *)tgt_inode, where); } /* * Now, how about this logical fragment? In * theory, we should never see a duplicate, since * a given lfn only exists once for a given inode. * As such, we ignore duplicate hits. */ tgt_ref_key.ref_lfn = src_claim->cl_lfn; tgt_ref = avl_find(&tgt_inode->id_fragments, (void *)&tgt_ref_key, &where); if (tgt_ref == NULL) { /* * Haven't seen it, add it. */ tgt_ref = (reference_t *)malloc( sizeof (reference_t)); if (tgt_ref == NULL) errexit("Out of memory in " "invert_frags\n"); tgt_ref->ref_lfn = src_claim->cl_lfn; tgt_ref->ref_pfn = src_frag->fr_pfn; avl_insert(&tgt_inode->id_fragments, (void *)tgt_ref, where); } src_claim = AVL_NEXT(&src_frag->fr_claimants, src_claim); } src_frag = AVL_NEXT(source, src_frag); } }
/* * pppt_disable_svc * * clean up all existing sessions and deregister targets from STMF */ static void pppt_disable_svc(void) { pppt_tgt_t *tgt, *next_tgt; avl_tree_t delete_target_list; ASSERT(pppt_global.global_svc_state == PSS_DISABLING); avl_create(&delete_target_list, pppt_tgt_avl_compare, sizeof (pppt_tgt_t), offsetof(pppt_tgt_t, target_global_ln)); PPPT_GLOBAL_LOCK(); for (tgt = avl_first(&pppt_global.global_target_list); tgt != NULL; tgt = next_tgt) { next_tgt = AVL_NEXT(&pppt_global.global_target_list, tgt); avl_remove(&pppt_global.global_target_list, tgt); avl_add(&delete_target_list, tgt); pppt_tgt_async_delete(tgt); } PPPT_GLOBAL_UNLOCK(); for (tgt = avl_first(&delete_target_list); tgt != NULL; tgt = next_tgt) { next_tgt = AVL_NEXT(&delete_target_list, tgt); mutex_enter(&tgt->target_mutex); while ((tgt->target_refcount > 0) || (tgt->target_state != TS_DELETING)) { cv_wait(&tgt->target_cv, &tgt->target_mutex); } mutex_exit(&tgt->target_mutex); avl_remove(&delete_target_list, tgt); pppt_tgt_destroy(tgt); } taskq_destroy(pppt_global.global_sess_taskq); taskq_destroy(pppt_global.global_dispatch_taskq); avl_destroy(&pppt_global.global_sess_list); avl_destroy(&pppt_global.global_target_list); (void) stmf_deregister_port_provider(pppt_global.global_pp); stmf_free(pppt_global.global_dbuf_store); pppt_global.global_dbuf_store = NULL; stmf_free(pppt_global.global_pp); pppt_global.global_pp = NULL; }
static int vdev_initialize_ranges(vdev_t *vd, abd_t *data) { avl_tree_t *rt = &vd->vdev_initialize_tree->rt_root; for (range_seg_t *rs = avl_first(rt); rs != NULL; rs = AVL_NEXT(rt, rs)) { uint64_t size = rs->rs_end - rs->rs_start; /* Split range into legally-sized physical chunks */ uint64_t writes_required = ((size - 1) / zfs_initialize_chunk_size) + 1; for (uint64_t w = 0; w < writes_required; w++) { int error; error = vdev_initialize_write(vd, VDEV_LABEL_START_SIZE + rs->rs_start + (w * zfs_initialize_chunk_size), MIN(size - (w * zfs_initialize_chunk_size), zfs_initialize_chunk_size), data); if (error != 0) return (error); } } return (0); }
static void sa_find_layout(objset_t *os, uint64_t hash, sa_attr_type_t *attrs, int count, dmu_tx_t *tx, sa_lot_t **lot) { sa_lot_t *tb, tbsearch; avl_index_t loc; sa_os_t *sa = os->os_sa; boolean_t found = B_FALSE; mutex_enter(&sa->sa_lock); tbsearch.lot_hash = hash; tbsearch.lot_instance = 0; tb = avl_find(&sa->sa_layout_hash_tree, &tbsearch, &loc); if (tb) { for (; tb && tb->lot_hash == hash; tb = AVL_NEXT(&sa->sa_layout_hash_tree, tb)) { if (sa_layout_equal(tb, attrs, count) == 0) { found = B_TRUE; break; } } } if (!found) { tb = sa_add_layout_entry(os, attrs, count, avl_numnodes(&sa->sa_layout_num_tree), hash, B_TRUE, tx); } mutex_exit(&sa->sa_lock); *lot = tb; }
/* * Iterate the cache using the given cursor. * * Data is copied to the given buffer ('data') using the copy function * specified at cache creation time. * * If the cache is modified while an iteration is in progress it causes * the iteration to finish prematurely. This is to avoid the need to lock * the whole cache while it is being iterated. */ boolean_t smb_cache_iterate(smb_cache_t *chandle, smb_cache_cursor_t *cursor, void *data) { smb_cache_node_t *node; assert(data); if (smb_cache_rdlock(chandle) != 0) return (B_FALSE); if (cursor->cc_sequence != chandle->ch_sequence) { smb_cache_unlock(chandle); return (B_FALSE); } if (cursor->cc_next == NULL) node = avl_first(&chandle->ch_cache); else node = AVL_NEXT(&chandle->ch_cache, cursor->cc_next); if (node != NULL) chandle->ch_copy(node->cn_data, data, chandle->ch_datasz); cursor->cc_next = node; smb_cache_unlock(chandle); return (node != NULL); }
static uint32_t mze_find_unused_cd(zap_t *zap, uint64_t hash) { mzap_ent_t mze_tofind; mzap_ent_t *mze; avl_index_t idx; avl_tree_t *avl = &zap->zap_m.zap_avl; uint32_t cd; ASSERT(zap->zap_ismicro); ASSERT(RW_LOCK_HELD(&zap->zap_rwlock)); mze_tofind.mze_hash = hash; mze_tofind.mze_phys.mze_cd = 0; cd = 0; for (mze = avl_find(avl, &mze_tofind, &idx); mze && mze->mze_hash == hash; mze = AVL_NEXT(avl, mze)) { if (mze->mze_phys.mze_cd != cd) break; cd++; } return (cd); }
static mzap_ent_t * mze_find(zap_t *zap, const char *name, uint64_t hash) { mzap_ent_t mze_tofind; mzap_ent_t *mze; avl_index_t idx; avl_tree_t *avl = &zap->zap_m.zap_avl; ASSERT(zap->zap_ismicro); ASSERT(RW_LOCK_HELD(&zap->zap_rwlock)); ASSERT3U(zap_hash(zap, name), ==, hash); if (strlen(name) >= sizeof (mze_tofind.mze_phys.mze_name)) return (NULL); mze_tofind.mze_hash = hash; mze_tofind.mze_phys.mze_cd = 0; mze = avl_find(avl, &mze_tofind, &idx); if (mze == NULL) mze = avl_nearest(avl, idx, AVL_AFTER); for (; mze && mze->mze_hash == hash; mze = AVL_NEXT(avl, mze)) { if (strcmp(name, mze->mze_phys.mze_name) == 0) return (mze); } return (NULL); }
/* * Traverse all mounted snapshots and attempt to unmount them. This * is best effort, on failure EEXIST is returned and count will be set * to the number of file snapshots which could not be unmounted. */ int zfsctl_unmount_snapshots(zfs_sb_t *zsb, int flags, int *count) { zfs_snapentry_t *sep, *next; int error = 0; *count = 0; ASSERT(zsb->z_ctldir != NULL); mutex_enter(&zsb->z_ctldir_lock); sep = avl_first(&zsb->z_ctldir_snaps); while (sep != NULL) { next = AVL_NEXT(&zsb->z_ctldir_snaps, sep); avl_remove(&zsb->z_ctldir_snaps, sep); mutex_exit(&zsb->z_ctldir_lock); error = __zfsctl_unmount_snapshot(sep, flags); mutex_enter(&zsb->z_ctldir_lock); if (error == EBUSY) { avl_add(&zsb->z_ctldir_snaps, sep); (*count)++; } else { zfsctl_sep_free(sep); } sep = next; } mutex_exit(&zsb->z_ctldir_lock); return ((*count > 0) ? EEXIST : 0); }
/* * Update cache contents upon write completion. */ void vdev_cache_write(zio_t *zio) { vdev_cache_t *vc = &zio->io_vd->vdev_cache; vdev_cache_entry_t *ve, ve_search; uint64_t io_start = zio->io_offset; uint64_t io_end = io_start + zio->io_size; uint64_t min_offset = P2ALIGN(io_start, VCBS); uint64_t max_offset = P2ROUNDUP(io_end, VCBS); avl_index_t where; ASSERT(zio->io_type == ZIO_TYPE_WRITE); mutex_enter(&vc->vc_lock); ve_search.ve_offset = min_offset; ve = avl_find(&vc->vc_offset_tree, &ve_search, &where); if (ve == NULL) ve = avl_nearest(&vc->vc_offset_tree, where, AVL_AFTER); while (ve != NULL && ve->ve_offset < max_offset) { uint64_t start = MAX(ve->ve_offset, io_start); uint64_t end = MIN(ve->ve_offset + VCBS, io_end); if (ve->ve_fill_io != NULL) { ve->ve_missed_update = 1; } else { bcopy((char *)zio->io_data + start - io_start, ve->ve_data + start - ve->ve_offset, end - start); } ve = AVL_NEXT(&vc->vc_offset_tree, ve); } mutex_exit(&vc->vc_lock); }
/* * Dump the duplicates table in a relatively user-friendly form. * The idea is that the output can be useful when trying to manually * work out which block belongs to which of the claiming inodes. * * What we have is a tree of duplicates indexed by physical * fragment number. What we want to report is: * * Inode %d: * Logical Offset 0x%08llx, Physical Fragment %d * Logical Offsets 0x%08llx - 0x%08llx, Physical Fragments %d - %d * ... * Inode %d: * Logical Offsets 0x%08llx - 0x%08llx, Physical Fragments %d - %d * ... */ int report_dups(int quiet) { int overlaps; inode_dup_t *inode; fragment_t *dup; avl_tree_t inode_frags; overlaps = 0; ASSERT(have_dups()); /* * Figure out how many actual dups are still around. * This tells us whether or not we can mark the * filesystem clean. */ dup = avl_first(&dup_frags); while (dup != NULL) { if (avl_numnodes(&dup->fr_claimants) > 1) { overlaps++; break; } dup = AVL_NEXT(&dup_frags, dup); } /* * Now report on every object that still exists that * had *any* dups associated with it. */ if (!quiet) { (void) puts("\nSome blocks that were found to be in " "multiple files are still\nassigned to " "file(s).\nFragments sorted by inode and " "logical offsets:"); invert_frags(&dup_frags, &inode_frags); inode = avl_first(&inode_frags); while (inode != NULL) { report_inode_dups(inode); inode = AVL_NEXT(&inode_frags, inode); } (void) printf("\n"); free_invert_frags(&inode_frags); } return (overlaps); }
void range_tree_walk(range_tree_t *rt, range_tree_func_t *func, void *arg) { range_seg_t *rs; for (rs = avl_first(&rt->rt_root); rs; rs = AVL_NEXT(&rt->rt_root, rs)) func(arg, rs->rs_start, rs->rs_end - rs->rs_start); }
static sa_lot_t * sa_add_layout_entry(objset_t *os, sa_attr_type_t *attrs, int attr_count, uint64_t lot_num, uint64_t hash, boolean_t zapadd, dmu_tx_t *tx) { sa_os_t *sa = os->os_sa; sa_lot_t *tb, *findtb; int i; avl_index_t loc; ASSERT(MUTEX_HELD(&sa->sa_lock)); tb = kmem_zalloc(sizeof (sa_lot_t), KM_SLEEP); tb->lot_attr_count = attr_count; tb->lot_attrs = kmem_alloc(sizeof (sa_attr_type_t) * attr_count, KM_SLEEP); bcopy(attrs, tb->lot_attrs, sizeof (sa_attr_type_t) * attr_count); tb->lot_num = lot_num; tb->lot_hash = hash; tb->lot_instance = 0; if (zapadd) { char attr_name[8]; if (sa->sa_layout_attr_obj == 0) { sa->sa_layout_attr_obj = zap_create(os, DMU_OT_SA_ATTR_LAYOUTS, DMU_OT_NONE, 0, tx); VERIFY(zap_add(os, sa->sa_master_obj, SA_LAYOUTS, 8, 1, &sa->sa_layout_attr_obj, tx) == 0); } (void) snprintf(attr_name, sizeof (attr_name), "%d", (int)lot_num); VERIFY(0 == zap_update(os, os->os_sa->sa_layout_attr_obj, attr_name, 2, attr_count, attrs, tx)); } list_create(&tb->lot_idx_tab, sizeof (sa_idx_tab_t), offsetof(sa_idx_tab_t, sa_next)); for (i = 0; i != attr_count; i++) { if (sa->sa_attr_table[tb->lot_attrs[i]].sa_length == 0) tb->lot_var_sizes++; } avl_add(&sa->sa_layout_num_tree, tb); /* verify we don't have a hash collision */ if ((findtb = avl_find(&sa->sa_layout_hash_tree, tb, &loc)) != NULL) { for (; findtb && findtb->lot_hash == hash; findtb = AVL_NEXT(&sa->sa_layout_hash_tree, findtb)) { if (findtb->lot_instance != tb->lot_instance) break; tb->lot_instance++; } } avl_add(&sa->sa_layout_hash_tree, tb); return (tb); }
/* * NLM_FREE_ALL, NLM4_FREE_ALL * * Destroy all lock state for the calling client. */ void nlm_do_free_all(nlm4_notify *argp, void *res, struct svc_req *sr) { struct nlm_globals *g; struct nlm_host_list host_list; struct nlm_host *hostp; TAILQ_INIT(&host_list); g = zone_getspecific(nlm_zone_key, curzone); /* Serialize calls to clean locks. */ mutex_enter(&g->clean_lock); /* * Find all hosts that have the given node name and put them on a * local list. */ mutex_enter(&g->lock); for (hostp = avl_first(&g->nlm_hosts_tree); hostp != NULL; hostp = AVL_NEXT(&g->nlm_hosts_tree, hostp)) { if (strcasecmp(hostp->nh_name, argp->name) == 0) { /* * If needed take the host out of the idle list since * we are taking a reference. */ if (hostp->nh_flags & NLM_NH_INIDLE) { TAILQ_REMOVE(&g->nlm_idle_hosts, hostp, nh_link); hostp->nh_flags &= ~NLM_NH_INIDLE; } hostp->nh_refs++; TAILQ_INSERT_TAIL(&host_list, hostp, nh_link); } } mutex_exit(&g->lock); /* Free locks for all hosts on the local list. */ while (!TAILQ_EMPTY(&host_list)) { hostp = TAILQ_FIRST(&host_list); TAILQ_REMOVE(&host_list, hostp, nh_link); /* * Note that this does not do client-side cleanup. * We want to do that ONLY if statd tells us the * server has restarted. */ nlm_host_notify_server(hostp, argp->state); nlm_host_release(g, hostp); } mutex_exit(&g->clean_lock); (void) res; (void) sr; }
int zfsctl_lookup_objset(struct super_block *sb, uint64_t objsetid, zfs_sb_t **zsbp) { zfs_sb_t *zsb = sb->s_fs_info; struct super_block *sbp; zfs_snapentry_t *sep; uint64_t id; int error; ASSERT(zsb->z_ctldir != NULL); mutex_enter(&zsb->z_ctldir_lock); /* * Verify that the snapshot is mounted. */ sep = avl_first(&zsb->z_ctldir_snaps); while (sep != NULL) { error = dmu_snapshot_lookup(zsb->z_os, sep->se_name, &id); if (error) goto out; if (id == objsetid) break; sep = AVL_NEXT(&zsb->z_ctldir_snaps, sep); } if (sep != NULL) { /* * Lookup the mounted root rather than the covered mount * point. This may fail if the snapshot has just been * unmounted by an unrelated user space process. This * race cannot occur to an expired mount point because * we hold the zsb->z_ctldir_lock to prevent the race. */ sbp = zpl_sget(&zpl_fs_type, zfsctl_test_super, zfsctl_set_super, 0, &id); if (IS_ERR(sbp)) { error = -PTR_ERR(sbp); } else { *zsbp = sbp->s_fs_info; deactivate_super(sbp); } } else { error = EINVAL; } out: mutex_exit(&zsb->z_ctldir_lock); ASSERT3S(error, >=, 0); return (error); }
pppt_task_t * pppt_task_lookup(stmf_ic_msgid_t msgid) { pppt_tgt_t *tgt; pppt_sess_t *sess; pppt_task_t lookup_task; pppt_task_t *result; bzero(&lookup_task, sizeof (lookup_task)); lookup_task.pt_task_id = msgid; PPPT_GLOBAL_LOCK(); for (tgt = avl_first(&pppt_global.global_target_list); tgt != NULL; tgt = AVL_NEXT(&pppt_global.global_target_list, tgt)) { mutex_enter(&tgt->target_mutex); for (sess = avl_first(&tgt->target_sess_list); sess != NULL; sess = AVL_NEXT(&tgt->target_sess_list, sess)) { mutex_enter(&sess->ps_mutex); if ((result = avl_find(&sess->ps_task_list, &lookup_task, NULL)) != NULL) { if (pppt_task_hold(result) != PPPT_STATUS_SUCCESS) { result = NULL; } mutex_exit(&sess->ps_mutex); mutex_exit(&tgt->target_mutex); PPPT_GLOBAL_UNLOCK(); return (result); } mutex_exit(&sess->ps_mutex); } mutex_exit(&tgt->target_mutex); } PPPT_GLOBAL_UNLOCK(); return (NULL); }
static void * _avl_walk_advance(uu_avl_walk_t *wp, uu_avl_t *ap) { void *np = wp->uaw_next_result; avl_tree_t *t = &ap->ua_tree; if (np == NULL) return (NULL); wp->uaw_next_result = (wp->uaw_dir > 0)? AVL_NEXT(t, np) : AVL_PREV(t, np); return (np); }
/* * Return the next string in container [zsp]. * Return NULL after the last string, or on error. * This must be called after zed_strings_first(). * XXX: Not thread-safe. */ const char * zed_strings_next(zed_strings_t *zsp) { if (!zsp) { errno = EINVAL; return (NULL); } if (!zsp->iteratorp) return (NULL); zsp->iteratorp = AVL_NEXT(&zsp->tree, zsp->iteratorp); if (!zsp->iteratorp) return (NULL); return (((zed_strings_node_t *)zsp->iteratorp)->string); }
static void pkgdump(void) { FILE *cnts; int err = 0; pkgentry_t *p; if (read_only) return; /* We cannot dump when the current transaction is not complete. */ if (sync_needed) return; cnts = fopen(TCONTENTS, "w"); if (cnts == NULL) exit(99); for (p = avl_first(list); p != NULL; p = AVL_NEXT(list, p)) { if (fprintf(cnts, "%s\n", p->line) < 0) err++; } if (ccmnt[0] != NULL) (void) fprintf(cnts, "%s\n", ccmnt[0]); if (ccmnt[1] != NULL) (void) fprintf(cnts, "%s\n", ccmnt[1]); if (err != 0 || fflush(cnts) == EOF || fsync(fileno(cnts)) != 0 || fclose(cnts) == EOF || rename(TCONTENTS, CONTENTS) != 0) { err++; } if (err != 0) { progerr("cannot rewrite the contents file"); exit(2); } (void) fclose(log); (void) unlink(PKGLOG); log = NULL; ndumps++; logcount = 0; }
/*ARGSUSED*/ static void sbc_reserve(t10_cmd_t *cmd, uint8_t *cdb, size_t cdb_len) { disk_params_t *p = (disk_params_t *)T10_PARAMS_AREA(cmd); t10_lu_impl_t *lu; if (p == NULL) return; if (cdb[1] & 0xe0 || SAM_CONTROL_BYTE_RESERVED(cdb[5])) { spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, 0); spc_sense_ascq(cmd, SPC_ASC_INVALID_CDB, 0x00); trans_send_complete(cmd, STATUS_CHECK); return; } if ((p->d_reserve_owner != NULL) && (p->d_reserve_owner != cmd->c_lu)) { trans_send_complete(cmd, STATUS_RESERVATION_CONFLICT); return; } else if (p->d_reserve_owner == cmd->c_lu) { /* * According SPC-2 revision 20, section 7.21.2 * It shall be permissible for an initiator to * reserve a logic unit that is currently reserved * by that initiator */ trans_send_complete(cmd, STATUS_GOOD); } else { lu = avl_first(&cmd->c_lu->l_common->l_all_open); do { if (lu != cmd->c_lu) lu->l_cmd = sbc_cmd_reserved; lu = AVL_NEXT(&cmd->c_lu->l_common->l_all_open, lu); } while (lu != NULL); p->d_reserve_owner = cmd->c_lu; trans_send_complete(cmd, STATUS_GOOD); } }
void sbc_fini_per(t10_lu_impl_t *itl) { disk_params_t *d = (disk_params_t *)itl->l_common->l_dtype_params; t10_lu_impl_t *lu; if (d->d_reserve_owner == itl) { /* * Since we currently own the reservation, drop it, * and restore everyone elses command pointer. */ lu = avl_first(&itl->l_common->l_all_open); do { lu->l_cmd = sbc_cmd; lu = AVL_NEXT(&itl->l_common->l_all_open, lu); } while (lu != NULL); d->d_reserve_owner = NULL; } }
static int zcrypt_keychain_fini(avl_tree_t keychain) { void *cookie = NULL; zcrypt_keychain_node_t *node = NULL; #if 0 while (AVL_NEXT(&keychain, node) != NULL) { if (!refcount_is_zero(&node->dkn_key->zk_refcnt)) return (EBUSY); } #endif while ((node = avl_destroy_nodes(&keychain, &cookie)) != NULL) { zcrypt_key_free(node->dkn_key); kmem_free(node, sizeof (zcrypt_keychain_node_t)); } avl_destroy(&keychain); return (0); }
static void report_inode_dups(inode_dup_t *inode) { reference_t *dup; daddr32_t first_lfn, last_lfn, first_pfn, last_pfn; (void) printf("Inode %d:\n", inode->id_ino); dup = avl_first(&inode->id_fragments); first_lfn = last_lfn = dup->ref_lfn; first_pfn = last_pfn = dup->ref_pfn; while ((dup = AVL_NEXT(&inode->id_fragments, dup)) != NULL) { if (((last_lfn + 1) != dup->ref_lfn) || ((last_pfn + 1) != dup->ref_pfn)) { report_dup_lfn_pfn(first_lfn, last_lfn, first_pfn, last_pfn); first_lfn = last_lfn = dup->ref_lfn; first_pfn = last_pfn = dup->ref_pfn; } } report_dup_lfn_pfn(first_lfn, last_lfn, first_pfn, last_pfn); }
/* * smb_pwd_iterate * * Scans through users cache using the given iterator */ smb_luser_t * smb_pwd_iterate(smb_pwditer_t *iter) { smb_ucnode_t *ucnode; if (iter == NULL) return (NULL); if (smb_pwd_ops.pwop_iterate != NULL) return (smb_pwd_ops.pwop_iterate(iter)); if (iter->spi_next == NULL) ucnode = avl_first(&smb_uch.uc_cache); else ucnode = AVL_NEXT(&smb_uch.uc_cache, iter->spi_next); if ((iter->spi_next = ucnode) != NULL) return (&ucnode->cn_user); return (NULL); }
/* * For each entry in the proto AVL, make sure we found the corresponding entry * in the manifest. Note we don't check the reverse (that every entry in the * manifest is here) as that is the job of the general build tools. */ static void ucc_check_proto(ucodecheck_t *ucc) { ucode_ent_t *ent; for (ent = avl_first(&ucc->ucc_proto_ents); ent != NULL; ent = AVL_NEXT(&ucc->ucc_proto_ents, ent)) { ucode_ent_t *manifest; manifest = avl_find(&ucc->ucc_manifest_ents, ent, NULL); if (manifest == NULL) { (void) fprintf(stderr, "missing from manifest: %s\n", ent->uce_name); ucc->ucc_errors++; continue; } if (ucc->ucc_verbose) { (void) printf("%s OK\n", ent->uce_name); } } }
static void dsl_keychain_clone_phys(dsl_dataset_t *src, dsl_dir_t *dd, dmu_tx_t *tx, zcrypt_key_t *dwkey) { objset_t *mos = dd->dd_pool->dp_meta_objset; uint64_t keychain = dsl_dir_phys(dd)->dd_keychain_obj; caddr_t wrappedkey = NULL; size_t wkeylen = 0; zcrypt_keystore_node_t *kn; zcrypt_keychain_node_t *n; uint64_t newest_txg = dsl_dataset_phys(src)->ds_creation_txg; kn = zcrypt_keystore_find_node(dsl_dataset_get_spa(src), src->ds_object, B_FALSE); if (kn == NULL) { kn = zcrypt_keystore_find_node(dsl_dataset_get_spa(src), dsl_dir_phys(src->ds_dir)->dd_head_dataset_obj, B_FALSE); } ASSERT(kn != NULL); ASSERT(dwkey != NULL); /* * Walk the in memory AVL tree representation of the keychain * creating new keychain entries using our wrappingkey, stopping * when we reach keychain entries created after the snapshot we * are cloning from. */ mutex_enter(&kn->skn_lock); for (n = avl_first(&kn->skn_keychain); n != NULL && n->dkn_txg <= newest_txg; n = AVL_NEXT(&kn->skn_keychain, n)) { VERIFY(zcrypt_wrap_key(dwkey, n->dkn_key, &wrappedkey, &wkeylen, zio_crypt_select_wrap(dwkey->zk_crypt)) == 0); VERIFY(zap_update_uint64(mos, keychain, &n->dkn_txg, 1, 1, wkeylen, wrappedkey, tx) == 0); kmem_free(wrappedkey, wkeylen); } mutex_exit(&kn->skn_lock); }
void pppt_sess_close_locked(pppt_sess_t *ps) { pppt_tgt_t *tgt = ps->ps_target; pppt_task_t *ptask; stmf_trace("pppt", "Session close %p", (void *)ps); ASSERT(mutex_owned(&pppt_global.global_lock)); ASSERT(mutex_owned(&tgt->target_mutex)); ASSERT(mutex_owned(&ps->ps_mutex)); ASSERT(!ps->ps_closed); /* Caller should ensure session is not closed */ ps->ps_closed = B_TRUE; for (ptask = avl_first(&ps->ps_task_list); ptask != NULL; ptask = AVL_NEXT(&ps->ps_task_list, ptask)) { mutex_enter(&ptask->pt_mutex); if (ptask->pt_state == PTS_ACTIVE) { stmf_abort(STMF_QUEUE_TASK_ABORT, ptask->pt_stmf_task, STMF_ABORTED, NULL); } mutex_exit(&ptask->pt_mutex); } /* * Now that all the tasks are aborting the session refcnt should * go to 0. */ while (ps->ps_refcnt != 0) { cv_wait(&ps->ps_cv, &ps->ps_mutex); } avl_remove(&tgt->target_sess_list, ps); avl_remove(&pppt_global.global_sess_list, ps); (void) taskq_dispatch(pppt_global.global_sess_taskq, &pppt_sess_destroy_task, ps, KM_SLEEP); stmf_trace("pppt", "Session close complete %p", (void *)ps); }
/*ARGSUSED*/ static void sbc_release(t10_cmd_t *cmd, uint8_t *cdb, size_t cdb_len) { disk_params_t *p = (disk_params_t *)T10_PARAMS_AREA(cmd); t10_lu_impl_t *lu; if (p == NULL) return; if (cdb[1] & 0xe0 || cdb[3] || cdb[4] || SAM_CONTROL_BYTE_RESERVED(cdb[5])) { spc_sense_create(cmd, KEY_ILLEGAL_REQUEST, 0); spc_sense_ascq(cmd, SPC_ASC_INVALID_CDB, 0x00); trans_send_complete(cmd, STATUS_CHECK); return; } if (p->d_reserve_owner == NULL) { /* * If nobody is the owner this command is successful. */ trans_send_complete(cmd, STATUS_GOOD); return; } /* * At this point the only way to get in here is to be the owner * of the reservation. */ lu = avl_first(&cmd->c_lu->l_common->l_all_open); do { lu->l_cmd = sbc_cmd; lu = AVL_NEXT(&cmd->c_lu->l_common->l_all_open, lu); } while (lu != NULL); p->d_reserve_owner = NULL; trans_send_complete(cmd, STATUS_GOOD); }
/* * When a .zfs/snapshot/<snapshot> inode is evicted they must be removed * from the snapshot list. This will normally happen as part of the auto * unmount, however in the case of a manual snapshot unmount this will be * the only notification we receive. */ void zfsctl_snapdir_inactive(struct inode *ip) { zfs_sb_t *zsb = ITOZSB(ip); zfs_snapentry_t *sep, *next; mutex_enter(&zsb->z_ctldir_lock); sep = avl_first(&zsb->z_ctldir_snaps); while (sep != NULL) { next = AVL_NEXT(&zsb->z_ctldir_snaps, sep); if (sep->se_inode == ip) { avl_remove(&zsb->z_ctldir_snaps, sep); taskq_cancel_id(zfs_expire_taskq, sep->se_taskqid); zfsctl_sep_free(sep); break; } sep = next; } mutex_exit(&zsb->z_ctldir_lock); }
int zfs_iter_snapshots_sorted(zfs_handle_t *zhp, zfs_iter_f callback, void *data) { int ret = 0; zfs_node_t *node; avl_tree_t avl; void *cookie = NULL; avl_create(&avl, zfs_snapshot_compare, sizeof (zfs_node_t), offsetof(zfs_node_t, zn_avlnode)); ret = zfs_iter_snapshots(zhp, B_FALSE, zfs_sort_snaps, &avl); for (node = avl_first(&avl); node != NULL; node = AVL_NEXT(&avl, node)) ret |= callback(node->zn_handle, data); while ((node = avl_destroy_nodes(&avl, &cookie)) != NULL) free(node); avl_destroy(&avl); return (ret); }
void range_tree_stat_verify(range_tree_t *rt) { range_seg_t *rs; uint64_t hist[RANGE_TREE_HISTOGRAM_SIZE] = { 0 }; int i; for (rs = avl_first(&rt->rt_root); rs != NULL; rs = AVL_NEXT(&rt->rt_root, rs)) { uint64_t size = rs->rs_end - rs->rs_start; int idx = highbit64(size) - 1; hist[idx]++; ASSERT3U(hist[idx], !=, 0); } for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) { if (hist[i] != rt->rt_histogram[i]) { zfs_dbgmsg("i=%d, hist=%p, hist=%llu, rt_hist=%llu", i, hist, hist[i], rt->rt_histogram[i]); } VERIFY3U(hist[i], ==, rt->rt_histogram[i]); } }