/* ARGSUSED */ static int object_from_path(const char *dataset, const char *path, struct stat64 *statbuf, zinject_record_t *record) { objset_t *os; int err; /* * Before doing any libzpool operations, call sync() to ensure that the * on-disk state is consistent with the in-core state. */ sync(); err = dmu_objset_own(dataset, DMU_OST_ZFS, B_TRUE, FTAG, &os); if (err != 0) { (void) fprintf(stderr, "cannot open dataset '%s': %s\n", dataset, strerror(err)); return (-1); } record->zi_objset = dmu_objset_id(os); record->zi_object = statbuf->st_ino; dmu_objset_disown(os, FTAG); return (0); }
int __dbuf_stats_hash_table_data(char *buf, size_t size, dmu_buf_impl_t *db) { arc_buf_info_t abi = { 0 }; dmu_object_info_t doi = { 0 }; dnode_t *dn = DB_DNODE(db); if (db->db_buf) arc_buf_info(db->db_buf, &abi, zfs_dbuf_state_index); if (dn) __dmu_object_info_from_dnode(dn, &doi); size = snprintf(buf, size - 1, "%-16s %-8llu %-8lld %-8lld %-8lld %-8llu %-8llu %-5d %-5d %-5lu | " "%-5d %-5d %-6lld 0x%-6x %-6lu %-8llu %-12llu " "%-6lu %-6lu %-6lu %-6lu %-6lu %-8llu %-8llu %-8d %-5lu | " "%-6d %-6d %-8lu %-8lu %-6llu %-6lu %-5lu %-8llu %-8llu\n", /* dmu_buf_impl_t */ spa_name(dn->dn_objset->os_spa), (u_longlong_t)dmu_objset_id(db->db_objset), (longlong_t)db->db.db_object, (longlong_t)db->db_level, (longlong_t)db->db_blkid, (u_longlong_t)db->db.db_offset, (u_longlong_t)db->db.db_size, !!dbuf_is_metadata(db), db->db_state, (ulong_t)refcount_count(&db->db_holds), /* arc_buf_info_t */ abi.abi_state_type, abi.abi_state_contents, (longlong_t)abi.abi_state_index, abi.abi_flags, (ulong_t)abi.abi_datacnt, (u_longlong_t)abi.abi_size, (u_longlong_t)abi.abi_access, (ulong_t)abi.abi_mru_hits, (ulong_t)abi.abi_mru_ghost_hits, (ulong_t)abi.abi_mfu_hits, (ulong_t)abi.abi_mfu_ghost_hits, (ulong_t)abi.abi_l2arc_hits, (u_longlong_t)abi.abi_l2arc_dattr, (u_longlong_t)abi.abi_l2arc_asize, abi.abi_l2arc_compress, (ulong_t)abi.abi_holds, /* dmu_object_info_t */ doi.doi_type, doi.doi_bonus_type, (ulong_t)doi.doi_data_block_size, (ulong_t)doi.doi_metadata_block_size, (u_longlong_t)doi.doi_bonus_size, (ulong_t)doi.doi_indirection, (ulong_t)refcount_count(&dn->dn_holds), (u_longlong_t)doi.doi_fill_count, (u_longlong_t)doi.doi_max_offset); buf[size] = '\0'; return (size); }
/* * Check if this super block has a matching objset id. */ static int zfsctl_test_super(struct super_block *sb, void *objsetidp) { zfs_sb_t *zsb = sb->s_fs_info; uint64_t objsetid = *(uint64_t *)objsetidp; return (dmu_objset_id(zsb->z_os) == objsetid); }
static void zil_init_log_chain(zilog_t *zilog, blkptr_t *bp) { zio_cksum_t *zc = &bp->blk_cksum; zc->zc_word[ZIL_ZC_GUID_0] = spa_get_random(-1ULL); zc->zc_word[ZIL_ZC_GUID_1] = spa_get_random(-1ULL); zc->zc_word[ZIL_ZC_OBJSET] = dmu_objset_id(zilog->zl_os); zc->zc_word[ZIL_ZC_SEQ] = 1ULL; }
static int #ifdef HAVE_D_REVALIDATE_NAMEIDATA zpl_revalidate(struct dentry *dentry, struct nameidata *nd) { unsigned int flags = (nd ? nd->flags : 0); #else zpl_revalidate(struct dentry *dentry, unsigned int flags) { #endif /* HAVE_D_REVALIDATE_NAMEIDATA */ zfs_sb_t *zsb = dentry->d_sb->s_fs_info; int error; if (flags & LOOKUP_RCU) return (-ECHILD); /* * Automounted snapshots rely on periodic dentry revalidation * to defer snapshots from being automatically unmounted. */ if (zsb->z_issnap) { if (time_after(jiffies, zsb->z_snap_defer_time + MAX(zfs_expire_snapshot * HZ / 2, HZ))) { zsb->z_snap_defer_time = jiffies; zfsctl_snapshot_unmount_delay(zsb->z_os->os_spa, dmu_objset_id(zsb->z_os), zfs_expire_snapshot); } } /* * After a rollback negative dentries created before the rollback * time must be invalidated. Otherwise they can obscure files which * are only present in the rolled back dataset. */ if (dentry->d_inode == NULL) { spin_lock(&dentry->d_lock); error = time_before(dentry->d_time, zsb->z_rollback_time); spin_unlock(&dentry->d_lock); if (error) return (0); } /* * The dentry may reference a stale inode if a mounted file system * was rolled back to a point in time where the object didn't exist. */ if (dentry->d_inode && ITOZ(dentry->d_inode)->z_is_stale) return (0); return (1); }
int zfs_set_version(const char *name, uint64_t newvers) { int error; objset_t *os; dmu_tx_t *tx; uint64_t curvers; /* * XXX for now, require that the filesystem be unmounted. Would * be nice to find the zfsvfs_t and just update that if * possible. */ if (newvers < ZPL_VERSION_INITIAL || newvers > ZPL_VERSION) return (EINVAL); error = dmu_objset_open(name, DMU_OST_ZFS, DS_MODE_PRIMARY, &os); if (error) return (error); error = zap_lookup(os, MASTER_NODE_OBJ, ZPL_VERSION_STR, 8, 1, &curvers); if (error) goto out; if (newvers < curvers) { error = EINVAL; goto out; } tx = dmu_tx_create(os); dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, 0, ZPL_VERSION_STR); error = dmu_tx_assign(tx, TXG_WAIT); if (error) { dmu_tx_abort(tx); goto out; } error = zap_update(os, MASTER_NODE_OBJ, ZPL_VERSION_STR, 8, 1, &newvers, tx); spa_history_internal_log(LOG_DS_UPGRADE, dmu_objset_spa(os), tx, CRED(), "oldver=%llu newver=%llu dataset = %llu", curvers, newvers, dmu_objset_id(os)); dmu_tx_commit(tx); out: dmu_objset_close(os); return (error); }
/* * Read in the data for the dmu_sync()ed block, and change the log * record to write this whole block. */ void zil_get_replay_data(zilog_t *zilog, lr_write_t *lr) { blkptr_t *wbp = &lr->lr_blkptr; char *wbuf = (char *)(lr + 1); /* data follows lr_write_t */ uint64_t blksz; if (BP_IS_HOLE(wbp)) { /* compressed to a hole */ blksz = BP_GET_LSIZE(&lr->lr_blkptr); /* * If the blksz is zero then we must be replaying a log * from an version prior to setting the blksize of null blocks. * So we just zero the actual write size reqeusted. */ if (blksz == 0) { bzero(wbuf, lr->lr_length); return; } bzero(wbuf, blksz); } else { /* * A subsequent write may have overwritten this block, in which * case wbp may have been been freed and reallocated, and our * read of wbp may fail with a checksum error. We can safely * ignore this because the later write will provide the * correct data. */ zbookmark_t zb; zb.zb_objset = dmu_objset_id(zilog->zl_os); zb.zb_object = lr->lr_foid; zb.zb_level = 0; zb.zb_blkid = -1; /* unknown */ blksz = BP_GET_LSIZE(&lr->lr_blkptr); (void) zio_wait(zio_read(NULL, zilog->zl_spa, wbp, wbuf, blksz, NULL, NULL, ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, &zb)); } lr->lr_offset -= lr->lr_offset % blksz; lr->lr_length = blksz; }
/* ARGSUSED */ static void zil_prt_rec_write(zilog_t *zilog, int txtype, lr_write_t *lr) { char *data, *dlimit; blkptr_t *bp = &lr->lr_blkptr; zbookmark_phys_t zb; char buf[SPA_MAXBLOCKSIZE]; int verbose = MAX(dump_opt['d'], dump_opt['i']); int error; (void) printf("%sfoid %llu, offset %llx, length %llx\n", prefix, (u_longlong_t)lr->lr_foid, (u_longlong_t)lr->lr_offset, (u_longlong_t)lr->lr_length); if (txtype == TX_WRITE2 || verbose < 5) return; if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) { (void) printf("%shas blkptr, %s\n", prefix, !BP_IS_HOLE(bp) && bp->blk_birth >= spa_first_txg(zilog->zl_spa) ? "will claim" : "won't claim"); print_log_bp(bp, prefix); if (BP_IS_HOLE(bp)) { (void) printf("\t\t\tLSIZE 0x%llx\n", (u_longlong_t)BP_GET_LSIZE(bp)); bzero(buf, sizeof (buf)); (void) printf("%s<hole>\n", prefix); return; } if (bp->blk_birth < zilog->zl_header->zh_claim_txg) { (void) printf("%s<block already committed>\n", prefix); return; } SET_BOOKMARK(&zb, dmu_objset_id(zilog->zl_os), lr->lr_foid, ZB_ZIL_LEVEL, lr->lr_offset / BP_GET_LSIZE(bp)); error = zio_wait(zio_read(NULL, zilog->zl_spa, bp, buf, BP_GET_LSIZE(bp), NULL, NULL, ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_CANFAIL, &zb)); if (error) return; data = buf; } else { data = (char *)(lr + 1); } dlimit = data + MIN(lr->lr_length, (verbose < 6 ? 20 : SPA_MAXBLOCKSIZE)); (void) printf("%s", prefix); while (data < dlimit) { if (isprint(*data)) (void) printf("%c ", *data); else (void) printf("%2hhX", *data); data++; } (void) printf("\n"); }
/* ARGSUSED */ static int zfsctl_snapdir_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, pathname_t *pnp, int flags, vnode_t *rdir, cred_t *cr, caller_context_t *ct, int *direntflags, pathname_t *realpnp) { zfsctl_snapdir_t *sdp = dvp->v_data; objset_t *snap; char snapname[MAXNAMELEN]; char real[MAXNAMELEN]; char *mountpoint; zfs_snapentry_t *sep, search; struct mounta margs; vfs_t *vfsp; size_t mountpoint_len; avl_index_t where; zfsvfs_t *zfsvfs = dvp->v_vfsp->vfs_data; int err; /* * No extended attributes allowed under .zfs */ if (flags & LOOKUP_XATTR) return (EINVAL); ASSERT(dvp->v_type == VDIR); /* * If we get a recursive call, that means we got called * from the domount() code while it was trying to look up the * spec (which looks like a local path for zfs). We need to * add some flag to domount() to tell it not to do this lookup. */ if (MUTEX_HELD(&sdp->sd_lock)) return (ENOENT); ZFS_ENTER(zfsvfs); if (gfs_lookup_dot(vpp, dvp, zfsvfs->z_ctldir, nm) == 0) { ZFS_EXIT(zfsvfs); return (0); } if (flags & FIGNORECASE) { boolean_t conflict = B_FALSE; err = dmu_snapshot_realname(zfsvfs->z_os, nm, real, MAXNAMELEN, &conflict); if (err == 0) { nm = real; } else if (err != ENOTSUP) { ZFS_EXIT(zfsvfs); return (err); } if (realpnp) (void) strlcpy(realpnp->pn_buf, nm, realpnp->pn_bufsize); if (conflict && direntflags) *direntflags = ED_CASE_CONFLICT; } mutex_enter(&sdp->sd_lock); search.se_name = (char *)nm; if ((sep = avl_find(&sdp->sd_snaps, &search, &where)) != NULL) { *vpp = sep->se_root; VN_HOLD(*vpp); err = traverse(vpp); if (err) { VN_RELE(*vpp); *vpp = NULL; } else if (*vpp == sep->se_root) { /* * The snapshot was unmounted behind our backs, * try to remount it. */ goto domount; } else { /* * VROOT was set during the traverse call. We need * to clear it since we're pretending to be part * of our parent's vfs. */ (*vpp)->v_flag &= ~VROOT; } mutex_exit(&sdp->sd_lock); ZFS_EXIT(zfsvfs); return (err); } /* * The requested snapshot is not currently mounted, look it up. */ err = zfsctl_snapshot_zname(dvp, nm, MAXNAMELEN, snapname); if (err) { mutex_exit(&sdp->sd_lock); ZFS_EXIT(zfsvfs); /* * handle "ls *" or "?" in a graceful manner, * forcing EILSEQ to ENOENT. * Since shell ultimately passes "*" or "?" as name to lookup */ return (err == EILSEQ ? ENOENT : err); } if (dmu_objset_hold(snapname, FTAG, &snap) != 0) { mutex_exit(&sdp->sd_lock); ZFS_EXIT(zfsvfs); return (ENOENT); } sep = kmem_alloc(sizeof (zfs_snapentry_t), KM_SLEEP); sep->se_name = kmem_alloc(strlen(nm) + 1, KM_SLEEP); (void) strcpy(sep->se_name, nm); *vpp = sep->se_root = zfsctl_snapshot_mknode(dvp, dmu_objset_id(snap)); avl_insert(&sdp->sd_snaps, sep, where); dmu_objset_rele(snap, FTAG); domount: mountpoint_len = strlen(refstr_value(dvp->v_vfsp->vfs_mntpt)) + strlen("/.zfs/snapshot/") + strlen(nm) + 1; mountpoint = kmem_alloc(mountpoint_len, KM_SLEEP); (void) snprintf(mountpoint, mountpoint_len, "%s/.zfs/snapshot/%s", refstr_value(dvp->v_vfsp->vfs_mntpt), nm); margs.spec = snapname; margs.dir = mountpoint; margs.flags = MS_SYSSPACE | MS_NOMNTTAB; margs.fstype = "zfs"; margs.dataptr = NULL; margs.datalen = 0; margs.optptr = NULL; margs.optlen = 0; err = domount("zfs", &margs, *vpp, kcred, &vfsp); kmem_free(mountpoint, mountpoint_len); if (err == 0) { /* * Return the mounted root rather than the covered mount point. * Takes the GFS vnode at .zfs/snapshot/<snapname> and returns * the ZFS vnode mounted on top of the GFS node. This ZFS * vnode is the root of the newly created vfsp. */ VFS_RELE(vfsp); err = traverse(vpp); } if (err == 0) { /* * Fix up the root vnode mounted on .zfs/snapshot/<snapname>. * * This is where we lie about our v_vfsp in order to * make .zfs/snapshot/<snapname> accessible over NFS * without requiring manual mounts of <snapname>. */ ASSERT(VTOZ(*vpp)->z_zfsvfs != zfsvfs); VTOZ(*vpp)->z_zfsvfs->z_parent = zfsvfs; (*vpp)->v_vfsp = zfsvfs->z_vfs; (*vpp)->v_flag &= ~VROOT; } mutex_exit(&sdp->sd_lock); ZFS_EXIT(zfsvfs); /* * If we had an error, drop our hold on the vnode and * zfsctl_snapshot_inactive() will clean up. */ if (err) { VN_RELE(*vpp); *vpp = NULL; } return (err); }
void dataset_kstats_create(dataset_kstats_t *dk, objset_t *objset) { /* * There should not be anything wrong with having kstats for * snapshots. Since we are not sure how useful they would be * though nor how much their memory overhead would matter in * a filesystem with many snapshots, we skip them for now. */ if (dmu_objset_is_snapshot(objset)) return; /* * At the time of this writing, KSTAT_STRLEN is 255 in Linux, * and the spa_name can theoretically be up to 256 characters. * In reality though the spa_name can be 240 characters max * [see origin directory name check in pool_namecheck()]. Thus, * the naming scheme for the module name below should not cause * any truncations. In the event that a truncation does happen * though, due to some future change, we silently skip creating * the kstat and log the event. */ char kstat_module_name[KSTAT_STRLEN]; int n = snprintf(kstat_module_name, sizeof (kstat_module_name), "zfs/%s", spa_name(dmu_objset_spa(objset))); if (n < 0) { zfs_dbgmsg("failed to create dataset kstat for objset %lld: " " snprintf() for kstat module name returned %d", (unsigned long long)dmu_objset_id(objset), n); return; } else if (n >= KSTAT_STRLEN) { zfs_dbgmsg("failed to create dataset kstat for objset %lld: " "kstat module name length (%d) exceeds limit (%d)", (unsigned long long)dmu_objset_id(objset), n, KSTAT_STRLEN); return; } char kstat_name[KSTAT_STRLEN]; n = snprintf(kstat_name, sizeof (kstat_name), "objset-0x%llx", (unsigned long long)dmu_objset_id(objset)); if (n < 0) { zfs_dbgmsg("failed to create dataset kstat for objset %lld: " " snprintf() for kstat name returned %d", (unsigned long long)dmu_objset_id(objset), n); return; } ASSERT3U(n, <, KSTAT_STRLEN); kstat_t *kstat = kstat_create(kstat_module_name, 0, kstat_name, "dataset", KSTAT_TYPE_NAMED, sizeof (empty_dataset_kstats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); if (kstat == NULL) return; dataset_kstat_values_t *dk_kstats = kmem_alloc(sizeof (empty_dataset_kstats), KM_SLEEP); bcopy(&empty_dataset_kstats, dk_kstats, sizeof (empty_dataset_kstats)); char *ds_name = kmem_zalloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP); dsl_dataset_name(objset->os_dsl_dataset, ds_name); KSTAT_NAMED_STR_PTR(&dk_kstats->dkv_ds_name) = ds_name; KSTAT_NAMED_STR_BUFLEN(&dk_kstats->dkv_ds_name) = ZFS_MAX_DATASET_NAME_LEN; kstat->ks_data = dk_kstats; kstat->ks_update = dataset_kstats_update; kstat->ks_private = dk; kstat_install(kstat); dk->dk_kstats = kstat; aggsum_init(&dk->dk_aggsums.das_writes, 0); aggsum_init(&dk->dk_aggsums.das_nwritten, 0); aggsum_init(&dk->dk_aggsums.das_reads, 0); aggsum_init(&dk->dk_aggsums.das_nread, 0); aggsum_init(&dk->dk_aggsums.das_nunlinks, 0); aggsum_init(&dk->dk_aggsums.das_nunlinked, 0); }
/* * Given a full path to a file, translate into a dataset name and a relative * path within the dataset. 'dataset' must be at least MAXNAMELEN characters, * and 'relpath' must be at least MAXPATHLEN characters. We also pass a stat * buffer, which we need later to get the object ID. */ static int parse_pathname(const char *inpath, char *dataset, char *relpath, struct stat *statbuf) { struct extmnttab mp; FILE *fp; int match; const char *rel; char fullpath[MAXPATHLEN]; compress_slashes(inpath, fullpath); if (fullpath[0] != '/') { (void) fprintf(stderr, "invalid object '%s': must be full " "path\n", fullpath); usage(); return (-1); } if (strlen(fullpath) >= MAXPATHLEN) { (void) fprintf(stderr, "invalid object; pathname too long\n"); return (-1); } if (stat(fullpath, statbuf) != 0) { (void) fprintf(stderr, "cannot open '%s': %s\n", fullpath, strerror(errno)); return (-1); } #ifdef HAVE_SETMNTENT if ((fp = setmntent(MNTTAB, "r")) == NULL) { #else if ((fp = fopen(MNTTAB, "r")) == NULL) { #endif (void) fprintf(stderr, "cannot open /etc/mtab\n"); return (-1); } match = 0; while (getextmntent(fp, &mp, sizeof (mp)) == 0) { if (makedev(mp.mnt_major, mp.mnt_minor) == statbuf->st_dev) { match = 1; break; } } if (!match) { (void) fprintf(stderr, "cannot find mountpoint for '%s'\n", fullpath); return (-1); } if (strcmp(mp.mnt_fstype, MNTTYPE_ZFS) != 0) { (void) fprintf(stderr, "invalid path '%s': not a ZFS " "filesystem\n", fullpath); return (-1); } if (strncmp(fullpath, mp.mnt_mountp, strlen(mp.mnt_mountp)) != 0) { (void) fprintf(stderr, "invalid path '%s': mountpoint " "doesn't match path\n", fullpath); return (-1); } (void) strcpy(dataset, mp.mnt_special); rel = fullpath + strlen(mp.mnt_mountp); if (rel[0] == '/') rel++; (void) strcpy(relpath, rel); return (0); } #endif //From FreeBSD static int parse_pathname(const char *inpath, char *dataset, char *relpath, struct stat *statbuf) { struct statfs sfs; const char *rel; char fullpath[MAXPATHLEN]; compress_slashes(inpath, fullpath); if (fullpath[0] != '/') { (void) fprintf(stderr, "invalid object '%s': must be full " "path\n", fullpath); usage(); return (-1); } if (strlen(fullpath) >= MAXPATHLEN) { (void) fprintf(stderr, "invalid object; pathname too long\n"); return (-1); } if (stat(fullpath, statbuf) != 0) { (void) fprintf(stderr, "cannot open '%s': %s\n", fullpath, strerror(errno)); return (-1); } if (statfs(fullpath, &sfs) == -1) { (void) fprintf(stderr, "cannot find mountpoint for '%s': %s\n", fullpath, strerror(errno)); return (-1); } if (strcmp(sfs.f_fstypename, MNTTYPE_ZFS) != 0) { (void) fprintf(stderr, "invalid path '%s': not a ZFS " "filesystem\n", fullpath); return (-1); } if (strncmp(fullpath, sfs.f_mntonname, strlen(sfs.f_mntonname)) != 0) { (void) fprintf(stderr, "invalid path '%s': mountpoint " "doesn't match path\n", fullpath); return (-1); } (void) strcpy(dataset, sfs.f_mntfromname); rel = fullpath + strlen(sfs.f_mntonname); if (rel[0] == '/') rel++; (void) strcpy(relpath, rel); return (0); } /* * Convert from a (dataset, path) pair into a (objset, object) pair. Note that * we grab the object number from the inode number, since looking this up via * libzpool is a real pain. */ /* ARGSUSED */ static int object_from_path(const char *dataset, const char *path, struct stat *statbuf, zinject_record_t *record) { objset_t *os; int err; /* * Before doing any libzpool operations, call sync() to ensure that the * on-disk state is consistent with the in-core state. */ sync(); err = dmu_objset_own(dataset, DMU_OST_ZFS, B_TRUE, B_FALSE, FTAG, &os); if (err != 0) { (void) fprintf(stderr, "cannot open dataset '%s': %s\n", dataset, strerror(err)); return (-1); } record->zi_objset = dmu_objset_id(os); record->zi_object = statbuf->st_ino; dmu_objset_disown(os, B_FALSE, FTAG); return (0); }
static void zil_replay_log_record(zilog_t *zilog, lr_t *lr, void *zra, uint64_t claim_txg) { zil_replay_arg_t *zr = zra; const zil_header_t *zh = zilog->zl_header; uint64_t reclen = lr->lrc_reclen; uint64_t txtype = lr->lrc_txtype; char *name; int pass, error; if (!zilog->zl_replay) /* giving up */ return; if (lr->lrc_txg < claim_txg) /* already committed */ return; if (lr->lrc_seq <= zh->zh_replay_seq) /* already replayed */ return; /* Strip case-insensitive bit, still present in log record */ txtype &= ~TX_CI; if (txtype == 0 || txtype >= TX_MAX_TYPE) { error = EINVAL; goto bad; } /* * Make a copy of the data so we can revise and extend it. */ bcopy(lr, zr->zr_lrbuf, reclen); /* * The log block containing this lr may have been byteswapped * so that we can easily examine common fields like lrc_txtype. * However, the log is a mix of different data types, and only the * replay vectors know how to byteswap their records. Therefore, if * the lr was byteswapped, undo it before invoking the replay vector. */ if (zr->zr_byteswap) byteswap_uint64_array(zr->zr_lrbuf, reclen); /* * If this is a TX_WRITE with a blkptr, suck in the data. */ if (txtype == TX_WRITE && reclen == sizeof (lr_write_t)) { lr_write_t *lrw = (lr_write_t *)lr; blkptr_t *wbp = &lrw->lr_blkptr; uint64_t wlen = lrw->lr_length; char *wbuf = zr->zr_lrbuf + reclen; if (BP_IS_HOLE(wbp)) { /* compressed to a hole */ bzero(wbuf, wlen); } else { /* * A subsequent write may have overwritten this block, * in which case wbp may have been been freed and * reallocated, and our read of wbp may fail with a * checksum error. We can safely ignore this because * the later write will provide the correct data. */ zbookmark_t zb; zb.zb_objset = dmu_objset_id(zilog->zl_os); zb.zb_object = lrw->lr_foid; zb.zb_level = -1; zb.zb_blkid = lrw->lr_offset / BP_GET_LSIZE(wbp); (void) zio_wait(zio_read(NULL, zilog->zl_spa, wbp, wbuf, BP_GET_LSIZE(wbp), NULL, NULL, ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, &zb)); (void) memmove(wbuf, wbuf + lrw->lr_blkoff, wlen); } } /* * We must now do two things atomically: replay this log record, * and update the log header sequence number to reflect the fact that * we did so. At the end of each replay function the sequence number * is updated if we are in replay mode. */ for (pass = 1; pass <= 2; pass++) { zilog->zl_replaying_seq = lr->lrc_seq; /* Only byteswap (if needed) on the 1st pass. */ error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lrbuf, zr->zr_byteswap && pass == 1); if (!error) return; /* * The DMU's dnode layer doesn't see removes until the txg * commits, so a subsequent claim can spuriously fail with * EEXIST. So if we receive any error we try syncing out * any removes then retry the transaction. */ if (pass == 1) txg_wait_synced(spa_get_dsl(zilog->zl_spa), 0); } bad: ASSERT(error); name = kmem_alloc(MAXNAMELEN, KM_SLEEP); dmu_objset_name(zr->zr_os, name); cmn_err(CE_WARN, "ZFS replay transaction error %d, " "dataset %s, seq 0x%llx, txtype %llu %s\n", error, name, (u_longlong_t)lr->lrc_seq, (u_longlong_t)txtype, (lr->lrc_txtype & TX_CI) ? "CI" : ""); zilog->zl_replay = B_FALSE; kmem_free(name, MAXNAMELEN); }
ASSERT3U(spa_version(dmu_objset_spa(zsb->z_os)), >=, SPA_VERSION_SA); sa_obj = zap_create(os, DMU_OT_SA_MASTER_NODE, DMU_OT_NONE, 0, tx); error = zap_add(os, MASTER_NODE_OBJ, ZFS_SA_ATTRS, 8, 1, &sa_obj, tx); ASSERT0(error); VERIFY(0 == sa_set_sa_object(os, sa_obj)); sa_register_update_callback(os, zfs_sa_upgrade); } spa_history_log_internal(LOG_DS_UPGRADE, dmu_objset_spa(os), tx, "oldver=%llu newver=%llu dataset = %llu", zsb->z_version, newvers, dmu_objset_id(os)); dmu_tx_commit(tx); zsb->z_version = newvers; if (zsb->z_version >= ZPL_VERSION_FUID) zfs_set_fuid_feature(zsb); return (0); } EXPORT_SYMBOL(zfs_set_version); /* * Read a property stored within the master node. */