int dmu_objset_snapshot(char *fsname, char *snapname, boolean_t recursive) { dsl_sync_task_t *dst; struct osnode *osn; struct snaparg sn = { 0 }; spa_t *spa; int err; (void) strcpy(sn.failed, fsname); err = spa_open(fsname, &spa, FTAG); if (err) return (err); sn.dstg = dsl_sync_task_group_create(spa_get_dsl(spa)); sn.snapname = snapname; list_create(&sn.objsets, sizeof (struct osnode), offsetof(struct osnode, node)); if (recursive) { sn.checkperms = B_TRUE; err = dmu_objset_find(fsname, dmu_objset_snapshot_one, &sn, DS_FIND_CHILDREN); } else { sn.checkperms = B_FALSE; err = dmu_objset_snapshot_one(fsname, &sn); } if (err) goto out; err = dsl_sync_task_group_wait(sn.dstg); for (dst = list_head(&sn.dstg->dstg_tasks); dst; dst = list_next(&sn.dstg->dstg_tasks, dst)) { dsl_dataset_t *ds = dst->dst_arg1; if (dst->dst_err) dsl_dataset_name(ds, sn.failed); } out: while (osn = list_head(&sn.objsets)) { list_remove(&sn.objsets, osn); zil_resume(dmu_objset_zil(osn->os)); dmu_objset_close(osn->os); kmem_free(osn, sizeof (struct osnode)); } list_destroy(&sn.objsets); if (err) (void) strcpy(fsname, sn.failed); dsl_sync_task_group_destroy(sn.dstg); spa_close(spa, FTAG); return (err); }
int dmu_objset_snapshot(char *fsname, char *snapname, nvlist_t *props, boolean_t recursive) { dsl_sync_task_t *dst; struct snaparg *sn; spa_t *spa; int err; sn = kmem_alloc(sizeof (struct snaparg), KM_SLEEP); (void) strcpy(sn->failed, fsname); err = spa_open(fsname, &spa, FTAG); if (err) { kmem_free(sn, sizeof (struct snaparg)); return (err); } sn->dstg = dsl_sync_task_group_create(spa_get_dsl(spa)); sn->snapname = snapname; sn->props = props; if (recursive) { sn->checkperms = B_TRUE; err = dmu_objset_find(fsname, dmu_objset_snapshot_one, sn, DS_FIND_CHILDREN); } else { sn->checkperms = B_FALSE; err = dmu_objset_snapshot_one(fsname, sn); } if (err == 0) err = dsl_sync_task_group_wait(sn->dstg); for (dst = list_head(&sn->dstg->dstg_tasks); dst; dst = list_next(&sn->dstg->dstg_tasks, dst)) { objset_t *os = dst->dst_arg1; dsl_dataset_t *ds = os->os->os_dsl_dataset; if (dst->dst_err) dsl_dataset_name(ds, sn->failed); zil_resume(dmu_objset_zil(os)); dmu_objset_close(os); } if (err) (void) strcpy(fsname, sn->failed); dsl_sync_task_group_destroy(sn->dstg); spa_close(spa, FTAG); kmem_free(sn, sizeof (struct snaparg)); return (err); }
void dsl_sync_task_group_sync(dsl_sync_task_group_t *dstg, dmu_tx_t *tx) { dsl_sync_task_t *dst; dsl_pool_t *dp = dstg->dstg_pool; uint64_t quota, used; ASSERT0(dstg->dstg_err); /* * Check for sufficient space. We just check against what's * on-disk; we don't want any in-flight accounting to get in our * way, because open context may have already used up various * in-core limits (arc_tempreserve, dsl_pool_tempreserve). */ quota = dsl_pool_adjustedsize(dp, B_FALSE) - metaslab_class_get_deferred(spa_normal_class(dp->dp_spa)); used = dp->dp_root_dir->dd_phys->dd_used_bytes; /* MOS space is triple-dittoed, so we multiply by 3. */ if (dstg->dstg_space > 0 && used + dstg->dstg_space * 3 > quota) { dstg->dstg_err = ENOSPC; return; } /* * Check for errors by calling checkfuncs. */ rw_enter(&dp->dp_config_rwlock, RW_WRITER); for (dst = list_head(&dstg->dstg_tasks); dst; dst = list_next(&dstg->dstg_tasks, dst)) { dst->dst_err = dst->dst_checkfunc(dst->dst_arg1, dst->dst_arg2, tx); if (dst->dst_err) dstg->dstg_err = dst->dst_err; } if (dstg->dstg_err == 0) { /* * Execute sync tasks. */ for (dst = list_head(&dstg->dstg_tasks); dst; dst = list_next(&dstg->dstg_tasks, dst)) { dst->dst_syncfunc(dst->dst_arg1, dst->dst_arg2, tx); } } rw_exit(&dp->dp_config_rwlock); if (dstg->dstg_nowaiter) dsl_sync_task_group_destroy(dstg); }
int dsl_sync_task_do(dsl_pool_t *dp, dsl_checkfunc_t *checkfunc, dsl_syncfunc_t *syncfunc, void *arg1, void *arg2, int blocks_modified) { dsl_sync_task_group_t *dstg; int err; dstg = dsl_sync_task_group_create(dp); dsl_sync_task_create(dstg, checkfunc, syncfunc, arg1, arg2, blocks_modified); err = dsl_sync_task_group_wait(dstg); dsl_sync_task_group_destroy(dstg); return (err); }
void dsl_sync_task_group_sync(dsl_sync_task_group_t *dstg, dmu_tx_t *tx) { dsl_sync_task_t *dst; void *tr_cookie; ASSERT3U(dstg->dstg_err, ==, 0); /* * Check for sufficient space. */ dstg->dstg_err = dsl_dir_tempreserve_space(dstg->dstg_pool->dp_mos_dir, dstg->dstg_space, dstg->dstg_space * 3, 0, 0, &tr_cookie, tx); /* don't bother trying again */ if (dstg->dstg_err == ERESTART) dstg->dstg_err = EAGAIN; if (dstg->dstg_err) return; /* * Check for errors by calling checkfuncs. */ rw_enter(&dstg->dstg_pool->dp_config_rwlock, RW_WRITER); for (dst = list_head(&dstg->dstg_tasks); dst; dst = list_next(&dstg->dstg_tasks, dst)) { dst->dst_err = dst->dst_checkfunc(dst->dst_arg1, dst->dst_arg2, tx); if (dst->dst_err) dstg->dstg_err = dst->dst_err; } if (dstg->dstg_err == 0) { /* * Execute sync tasks. */ for (dst = list_head(&dstg->dstg_tasks); dst; dst = list_next(&dstg->dstg_tasks, dst)) { dst->dst_syncfunc(dst->dst_arg1, dst->dst_arg2, dstg->dstg_cr, tx); } } rw_exit(&dstg->dstg_pool->dp_config_rwlock); dsl_dir_tempreserve_clear(tr_cookie, tx); if (dstg->dstg_nowaiter) dsl_sync_task_group_destroy(dstg); }