Пример #1
0
int __osd_xattr_del(const struct lu_env *env, struct osd_object *obj,
		    const char *name, struct osd_thandle *oh)
{
	struct osd_device *osd = osd_obj2dev(obj);
	uint64_t           xa_data_obj;
	int                rc;

	/* try remove xattr from SA at first */
	rc = __osd_sa_xattr_del(env, obj, name, oh);
	if (rc != -ENOENT)
		return rc;

	if (obj->oo_xattr == ZFS_NO_OBJECT)
		return 0;

	rc = -zap_lookup(osd->od_os, obj->oo_xattr, name, sizeof(uint64_t), 1,
			&xa_data_obj);
	if (rc == -ENOENT) {
		rc = 0;
	} else if (rc == 0) {
		/*
		 * Entry exists.
		 * We'll delete the existing object and ZAP entry.
		 */
		rc = -dmu_object_free(osd->od_os, xa_data_obj, oh->ot_tx);
		if (rc)
			return rc;

		rc = -zap_remove(osd->od_os, obj->oo_xattr, name, oh->ot_tx);
	}

	return rc;
}
Пример #2
0
static int
zpios_dmu_object_free(run_args_t *run_args, objset_t *os, uint64_t obj)
{
	struct dmu_tx *tx;
	int rc;

	tx = dmu_tx_create(os);
	dmu_tx_hold_free(tx, obj, 0, DMU_OBJECT_END);
	rc = dmu_tx_assign(tx, TXG_WAIT);
	if (rc) {
		zpios_print(run_args->file,
			    "dmu_tx_assign() failed: %d\n", rc);
		dmu_tx_abort(tx);
		return (rc);
	}

	rc = dmu_object_free(os, obj, tx);
	if (rc) {
		zpios_print(run_args->file,
			    "dmu_object_free() failed: %d\n", rc);
		dmu_tx_abort(tx);
		return (rc);
	}

	dmu_tx_commit(tx);

	return (0);
}
Пример #3
0
void
dsl_dir_destroy_sync(void *arg1, void *tag, cred_t *cr, dmu_tx_t *tx)
{
	dsl_dir_t *dd = arg1;
	objset_t *mos = dd->dd_pool->dp_meta_objset;
	uint64_t val, obj;

	ASSERT(RW_WRITE_HELD(&dd->dd_pool->dp_config_rwlock));
	ASSERT(dd->dd_phys->dd_head_dataset_obj == 0);

	/* Remove our reservation. */
	val = 0;
	dsl_dir_set_reservation_sync(dd, &val, cr, tx);
	ASSERT3U(dd->dd_used_bytes, ==, 0);
	ASSERT3U(dd->dd_phys->dd_reserved, ==, 0);

	VERIFY(0 == zap_destroy(mos, dd->dd_phys->dd_child_dir_zapobj, tx));
	VERIFY(0 == zap_destroy(mos, dd->dd_phys->dd_props_zapobj, tx));
	VERIFY(0 == dsl_deleg_destroy(mos, dd->dd_phys->dd_deleg_zapobj, tx));
	VERIFY(0 == zap_remove(mos,
	    dd->dd_parent->dd_phys->dd_child_dir_zapobj, dd->dd_myname, tx));

	obj = dd->dd_object;
	dsl_dir_close(dd, tag);
	VERIFY(0 == dmu_object_free(mos, obj, tx));
}
Пример #4
0
void
dsl_dir_destroy_sync(void *arg1, void *tag, dmu_tx_t *tx)
{
	dsl_dir_t *dd = arg1;
	objset_t *mos = dd->dd_pool->dp_meta_objset;
	uint64_t obj;
	dd_used_t t;

	ASSERT(RW_WRITE_HELD(&dd->dd_pool->dp_config_rwlock));
	ASSERT(dd->dd_phys->dd_head_dataset_obj == 0);

	/*
	 * Remove our reservation. The impl() routine avoids setting the
	 * actual property, which would require the (already destroyed) ds.
	 */
	dsl_dir_set_reservation_sync_impl(dd, 0, tx);

	ASSERT0(dd->dd_phys->dd_used_bytes);
	ASSERT0(dd->dd_phys->dd_reserved);
	for (t = 0; t < DD_USED_NUM; t++)
		ASSERT0(dd->dd_phys->dd_used_breakdown[t]);

	VERIFY(0 == zap_destroy(mos, dd->dd_phys->dd_child_dir_zapobj, tx));
	VERIFY(0 == zap_destroy(mos, dd->dd_phys->dd_props_zapobj, tx));
	VERIFY(0 == dsl_deleg_destroy(mos, dd->dd_phys->dd_deleg_zapobj, tx));
	VERIFY(0 == zap_remove(mos,
	    dd->dd_parent->dd_phys->dd_child_dir_zapobj, dd->dd_myname, tx));

	obj = dd->dd_object;
	dsl_dir_close(dd, tag);
	VERIFY(0 == dmu_object_free(mos, obj, tx));
}
Пример #5
0
int osd_xattrs_destroy(const struct lu_env *env,
		       struct osd_object *obj, struct osd_thandle *oh)
{
	struct osd_device *osd = osd_obj2dev(obj);
	dmu_tx_t	  *tx = oh->ot_tx;
	zap_attribute_t	  *za = &osd_oti_get(env)->oti_za;
	zap_cursor_t	  *zc;
	uint64_t	   xid;
	int		   rc;

	/* The transaction must have been assigned to a transaction group. */
	LASSERT(tx->tx_txg != 0);

	if (obj->oo_xattr == ZFS_NO_OBJECT)
		return 0; /* Nothing to do for SA xattrs */

	/* Free the ZAP holding the xattrs */
	rc = osd_zap_cursor_init(&zc, osd->od_os, obj->oo_xattr, 0);
	if (rc)
		return rc;

	while (zap_cursor_retrieve(zc, za) == 0) {
		LASSERT(za->za_num_integers == 1);
		LASSERT(za->za_integer_length == sizeof(uint64_t));

		rc = -zap_lookup(osd->od_os, obj->oo_xattr, za->za_name,
				 sizeof(uint64_t), 1, &xid);
		if (rc) {
			CERROR("%s: lookup xattr %s failed: rc = %d\n",
			       osd->od_svname, za->za_name, rc);
		} else {
			rc = -dmu_object_free(osd->od_os, xid, tx);
			if (rc)
				CERROR("%s: free xattr %s failed: rc = %d\n",
				       osd->od_svname, za->za_name, rc);
		}
		zap_cursor_advance(zc);
	}
	osd_zap_cursor_fini(zc);

	rc = -dmu_object_free(osd->od_os, obj->oo_xattr, tx);
	if (rc)
		CERROR("%s: free xattr "LPU64" failed: rc = %d\n",
		       osd->od_svname, obj->oo_xattr, rc);

	return rc;
}
Пример #6
0
void
zfs_znode_delete(znode_t *zp, dmu_tx_t *tx)
{
	zfs_sb_t *zsb = ZTOZSB(zp);
	objset_t *os = zsb->z_os;
	uint64_t obj = zp->z_id;
	uint64_t acl_obj = zfs_external_acl(zp);

	ZFS_OBJ_HOLD_ENTER(zsb, obj);
	if (acl_obj) {
		VERIFY(!zp->z_is_sa);
		VERIFY(0 == dmu_object_free(os, acl_obj, tx));
	}
	VERIFY(0 == dmu_object_free(os, obj, tx));
	zfs_znode_dmu_fini(zp);
	ZFS_OBJ_HOLD_EXIT(zsb, obj);
}
Пример #7
0
int __osd_object_free(udmu_objset_t *uos, uint64_t oid, dmu_tx_t *tx)
{
	LASSERT(uos->objects != 0);
	spin_lock(&uos->lock);
	uos->objects--;
	spin_unlock(&uos->lock);

	return -dmu_object_free(uos->os, oid, tx);
}
Пример #8
0
/*
 * The transaction passed to this routine must have
 * dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT) called and then assigned
 * to a transaction group.
 */
int __osd_object_create(const struct lu_env *env, struct osd_object *obj,
			dmu_buf_t **dbp, dmu_tx_t *tx, struct lu_attr *la,
			uint64_t parent)
{
	uint64_t	     oid;
	int		     rc;
	struct osd_device   *osd = osd_obj2dev(obj);
	const struct lu_fid *fid = lu_object_fid(&obj->oo_dt.do_lu);
	dmu_object_type_t    type = DMU_OT_PLAIN_FILE_CONTENTS;

	/* Assert that the transaction has been assigned to a
	   transaction group. */
	LASSERT(tx->tx_txg != 0);

	/* Use DMU_OTN_UINT8_METADATA for local objects so their data blocks
	 * would get an additional ditto copy */
	if (unlikely(S_ISREG(la->la_mode) &&
		     fid_seq_is_local_file(fid_seq(fid))))
		type = DMU_OTN_UINT8_METADATA;

	/* Create a new DMU object. */
	oid = dmu_object_alloc(osd->od_os, type, 0,
			       DMU_OT_SA, DN_MAX_BONUSLEN, tx);
	rc = -sa_buf_hold(osd->od_os, oid, osd_obj_tag, dbp);
	LASSERTF(rc == 0, "sa_buf_hold "LPU64" failed: %d\n", oid, rc);

	LASSERT(la->la_valid & LA_MODE);
	la->la_size = 0;
	la->la_nlink = 1;

	rc = __osd_attr_init(env, osd, oid, tx, la, parent);
	if (rc != 0) {
		sa_buf_rele(*dbp, osd_obj_tag);
		*dbp = NULL;
		dmu_object_free(osd->od_os, oid, tx);
		return rc;
	}

	return 0;
}
Пример #9
0
void
dsl_dir_destroy_sync(void *arg1, void *tag, dmu_tx_t *tx)
{
	dsl_dataset_t *ds = arg1;
	dsl_dir_t *dd = ds->ds_dir;
	objset_t *mos = dd->dd_pool->dp_meta_objset;
	dsl_prop_setarg_t psa;
	uint64_t value = 0;
	uint64_t obj;
	dd_used_t t;

	ASSERT(RW_WRITE_HELD(&dd->dd_pool->dp_config_rwlock));
	ASSERT(dd->dd_phys->dd_head_dataset_obj == 0);

	/* Remove our reservation. */
	dsl_prop_setarg_init_uint64(&psa, "reservation",
	    (ZPROP_SRC_NONE | ZPROP_SRC_LOCAL | ZPROP_SRC_RECEIVED),
	    &value);
	psa.psa_effective_value = 0;	/* predict default value */

	dsl_dir_set_reservation_sync(ds, &psa, tx);

	ASSERT3U(dd->dd_phys->dd_used_bytes, ==, 0);
	ASSERT3U(dd->dd_phys->dd_reserved, ==, 0);
	for (t = 0; t < DD_USED_NUM; t++)
		ASSERT3U(dd->dd_phys->dd_used_breakdown[t], ==, 0);

	VERIFY(0 == zap_destroy(mos, dd->dd_phys->dd_child_dir_zapobj, tx));
	VERIFY(0 == zap_destroy(mos, dd->dd_phys->dd_props_zapobj, tx));
	VERIFY(0 == dsl_deleg_destroy(mos, dd->dd_phys->dd_deleg_zapobj, tx));
	VERIFY(0 == zap_remove(mos,
	    dd->dd_parent->dd_phys->dd_child_dir_zapobj, dd->dd_myname, tx));

	obj = dd->dd_object;
	dsl_dir_close(dd, tag);
	VERIFY(0 == dmu_object_free(mos, obj, tx));
}
Пример #10
0
int __osd_sa_xattr_set(const struct lu_env *env, struct osd_object *obj,
		       const struct lu_buf *buf, const char *name, int fl,
		       struct osd_thandle *oh)
{
	dmu_buf_impl_t *db;
	uchar_t *nv_value;
	size_t  size;
	int	nv_size;
	int	rc;
	int	too_big = 0;

	LASSERT(obj->oo_sa_hdl);
	if (obj->oo_sa_xattr == NULL) {
		rc = __osd_xattr_cache(env, obj);
		if (rc)
			return rc;
	}

	LASSERT(obj->oo_sa_xattr);
	/* Limited to 32k to keep nvpair memory allocations small */
	if (buf->lb_len > DXATTR_MAX_ENTRY_SIZE) {
		too_big = 1;
	} else {
		/* Prevent the DXATTR SA from consuming the entire SA
		 * region */
		rc = -nvlist_size(obj->oo_sa_xattr, &size, NV_ENCODE_XDR);
		if (rc)
			return rc;

		if (size + buf->lb_len > DXATTR_MAX_SA_SIZE)
			too_big = 1;
	}

	/* even in case of -EFBIG we must lookup xattr and check can we
	 * rewrite it then delete from SA */
	rc = -nvlist_lookup_byte_array(obj->oo_sa_xattr, name, &nv_value,
					&nv_size);
	if (rc == 0) {
		if (fl & LU_XATTR_CREATE) {
			return -EEXIST;
		} else if (too_big) {
			rc = -nvlist_remove(obj->oo_sa_xattr, name,
						DATA_TYPE_BYTE_ARRAY);
			if (rc < 0)
				return rc;
			rc = __osd_sa_xattr_schedule_update(env, obj, oh);
			return rc == 0 ? -EFBIG : rc;
		}
	} else if (rc == -ENOENT) {
		if (fl & LU_XATTR_REPLACE)
			return -ENODATA;
		else if (too_big)
			return -EFBIG;
	} else {
		return rc;
	}

	/* Ensure xattr doesn't exist in ZAP */
	if (obj->oo_xattr != ZFS_NO_OBJECT) {
		struct osd_device *osd = osd_obj2dev(obj);
		uint64_t           objid;
		rc = -zap_lookup(osd->od_os, obj->oo_xattr,
				 name, 8, 1, &objid);
		if (rc == 0) {
			rc = -dmu_object_free(osd->od_os, objid, oh->ot_tx);
			if (rc == 0)
				zap_remove(osd->od_os, obj->oo_xattr,
					   name, oh->ot_tx);
		}
	}

	rc = -nvlist_add_byte_array(obj->oo_sa_xattr, name,
				    (uchar_t *)buf->lb_buf, buf->lb_len);
	if (rc)
		return rc;

	/* batch updates only for just created dnodes where we
	 * used to set number of EAs in a single transaction */
	db = (dmu_buf_impl_t *)obj->oo_db;
	if (DB_DNODE(db)->dn_allocated_txg == oh->ot_tx->tx_txg)
		rc = __osd_sa_xattr_schedule_update(env, obj, oh);
	else
		rc = __osd_sa_xattr_update(env, obj, oh);

	return rc;
}
Пример #11
0
void
zfs_sa_upgrade(sa_handle_t *hdl, dmu_tx_t *tx)
{
	dmu_buf_t *db = sa_get_db(hdl);
	znode_t *zp = sa_get_userdata(hdl);
	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
	sa_bulk_attr_t bulk[20];
	int count = 0;
	sa_bulk_attr_t sa_attrs[20] = { { 0 } };
	zfs_acl_locator_cb_t locate = { 0 };
	uint64_t uid, gid, mode, rdev, xattr, parent;
	uint64_t crtime[2], mtime[2], ctime[2];
	zfs_acl_phys_t znode_acl;
	char scanstamp[AV_SCANSTAMP_SZ];
	boolean_t drop_lock = B_FALSE;

	/*
	 * No upgrade if ACL isn't cached
	 * since we won't know which locks are held
	 * and ready the ACL would require special "locked"
	 * interfaces that would be messy
	 */
	if (zp->z_acl_cached == NULL || vnode_islnk(ZTOV(zp)))
		return;

	/*
	 * If the z_lock is held and we aren't the owner
	 * the just return since we don't want to deadlock
	 * trying to update the status of z_is_sa.  This
	 * file can then be upgraded at a later time.
	 *
	 * Otherwise, we know we are doing the
	 * sa_update() that caused us to enter this function.
	 */
	if (mutex_owner(&zp->z_lock) != curthread) {
		if (mutex_tryenter(&zp->z_lock) == 0)
			return;
		else
			drop_lock = B_TRUE;
	}

	/* First do a bulk query of the attributes that aren't cached */
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CRTIME(zfsvfs), NULL, &crtime, 16);
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL, &mode, 8);
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zfsvfs), NULL, &parent, 8);
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_XATTR(zfsvfs), NULL, &xattr, 8);
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_RDEV(zfsvfs), NULL, &rdev, 8);
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL, &uid, 8);
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL, &gid, 8);
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ZNODE_ACL(zfsvfs), NULL,
	    &znode_acl, 88);

	if (sa_bulk_lookup_locked(hdl, bulk, count) != 0)
		goto done;


	/*
	 * While the order here doesn't matter its best to try and organize
	 * it is such a way to pick up an already existing layout number
	 */
	count = 0;
	SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_MODE(zfsvfs), NULL, &mode, 8);
	SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_SIZE(zfsvfs), NULL,
	    &zp->z_size, 8);
	SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_GEN(zfsvfs),
	    NULL, &zp->z_gen, 8);
	SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_UID(zfsvfs), NULL, &uid, 8);
	SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_GID(zfsvfs), NULL, &gid, 8);
	SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_PARENT(zfsvfs),
	    NULL, &parent, 8);
	SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_FLAGS(zfsvfs), NULL,
	    &zp->z_pflags, 8);
	SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_ATIME(zfsvfs), NULL,
	    zp->z_atime, 16);
	SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_MTIME(zfsvfs), NULL,
	    &mtime, 16);
	SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_CTIME(zfsvfs), NULL,
	    &ctime, 16);
	SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_CRTIME(zfsvfs), NULL,
	    &crtime, 16);
	SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_LINKS(zfsvfs), NULL,
	    &zp->z_links, 8);
	if (vnode_isblk(zp->z_vnode) || vnode_islnk(zp->z_vnode))
		SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_RDEV(zfsvfs), NULL,
		    &rdev, 8);
	SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_DACL_COUNT(zfsvfs), NULL,
	    &zp->z_acl_cached->z_acl_count, 8);

	if (zp->z_acl_cached->z_version < ZFS_ACL_VERSION_FUID)
		zfs_acl_xform(zp, zp->z_acl_cached, CRED());

	locate.cb_aclp = zp->z_acl_cached;
	SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_DACL_ACES(zfsvfs),
	    zfs_acl_data_locator, &locate, zp->z_acl_cached->z_acl_bytes);

	if (xattr)
		SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_XATTR(zfsvfs),
		    NULL, &xattr, 8);

	/* if scanstamp then add scanstamp */

	if (zp->z_pflags & ZFS_BONUS_SCANSTAMP) {
		bcopy((caddr_t)db->db_data + ZFS_OLD_ZNODE_PHYS_SIZE,
		    scanstamp, AV_SCANSTAMP_SZ);
		SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_SCANSTAMP(zfsvfs),
		    NULL, scanstamp, AV_SCANSTAMP_SZ);
		zp->z_pflags &= ~ZFS_BONUS_SCANSTAMP;
	}

	VERIFY(dmu_set_bonustype(db, DMU_OT_SA, tx) == 0);
	VERIFY(sa_replace_all_by_template_locked(hdl, sa_attrs,
	    count, tx) == 0);
	if (znode_acl.z_acl_extern_obj)
		VERIFY(0 == dmu_object_free(zfsvfs->z_os,
		    znode_acl.z_acl_extern_obj, tx));

	zp->z_is_sa = B_TRUE;
done:
	if (drop_lock)
		mutex_exit(&zp->z_lock);
}
Пример #12
0
/* ARGSUSED */
static void
dsl_scan_done(dsl_scan_t *scn, boolean_t complete, dmu_tx_t *tx)
{
	static const char *old_names[] = {
		"scrub_bookmark",
		"scrub_ddt_bookmark",
		"scrub_ddt_class_max",
		"scrub_queue",
		"scrub_min_txg",
		"scrub_max_txg",
		"scrub_func",
		"scrub_errors",
		NULL
	};

	dsl_pool_t *dp = scn->scn_dp;
	spa_t *spa = dp->dp_spa;
	int i;

	/* Remove any remnants of an old-style scrub. */
	for (i = 0; old_names[i]; i++) {
		(void) zap_remove(dp->dp_meta_objset,
		    DMU_POOL_DIRECTORY_OBJECT, old_names[i], tx);
	}

	if (scn->scn_phys.scn_queue_obj != 0) {
		VERIFY(0 == dmu_object_free(dp->dp_meta_objset,
		    scn->scn_phys.scn_queue_obj, tx));
		scn->scn_phys.scn_queue_obj = 0;
	}

	/*
	 * If we were "restarted" from a stopped state, don't bother
	 * with anything else.
	 */
	if (scn->scn_phys.scn_state != DSS_SCANNING)
		return;

	if (complete)
		scn->scn_phys.scn_state = DSS_FINISHED;
	else
		scn->scn_phys.scn_state = DSS_CANCELED;

	spa_history_log_internal(spa, "scan done", tx,
	    "complete=%u", complete);

	if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) {
		mutex_enter(&spa->spa_scrub_lock);
		while (spa->spa_scrub_inflight > 0) {
			cv_wait(&spa->spa_scrub_io_cv,
			    &spa->spa_scrub_lock);
		}
		mutex_exit(&spa->spa_scrub_lock);
		spa->spa_scrub_started = B_FALSE;
		spa->spa_scrub_active = B_FALSE;

		/*
		 * If the scrub/resilver completed, update all DTLs to
		 * reflect this.  Whether it succeeded or not, vacate
		 * all temporary scrub DTLs.
		 */
		vdev_dtl_reassess(spa->spa_root_vdev, tx->tx_txg,
		    complete ? scn->scn_phys.scn_max_txg : 0, B_TRUE);
		if (complete) {
			spa_event_notify(spa, NULL, scn->scn_phys.scn_min_txg ?
			    ESC_ZFS_RESILVER_FINISH : ESC_ZFS_SCRUB_FINISH);
		}
		spa_errlog_rotate(spa);

		/*
		 * We may have finished replacing a device.
		 * Let the async thread assess this and handle the detach.
		 */
		spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
	}

	scn->scn_phys.scn_end_time = gethrestime_sec();
}
Пример #13
0
/* ARGSUSED */
static void
dsl_pool_scrub_cancel_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
{
	dsl_pool_t *dp = arg1;
	boolean_t *completep = arg2;

	if (dp->dp_scrub_func == SCRUB_FUNC_NONE)
		return;

	mutex_enter(&dp->dp_scrub_cancel_lock);

	if (dp->dp_scrub_restart) {
		dp->dp_scrub_restart = B_FALSE;
		*completep = B_FALSE;
	}

	/* XXX this is scrub-clean specific */
	mutex_enter(&dp->dp_spa->spa_scrub_lock);
	while (dp->dp_spa->spa_scrub_inflight > 0) {
		cv_wait(&dp->dp_spa->spa_scrub_io_cv,
		    &dp->dp_spa->spa_scrub_lock);
	}
	mutex_exit(&dp->dp_spa->spa_scrub_lock);
	dp->dp_spa->spa_scrub_active = B_FALSE;

	dp->dp_scrub_func = SCRUB_FUNC_NONE;
	VERIFY(0 == dmu_object_free(dp->dp_meta_objset,
	    dp->dp_scrub_queue_obj, tx));
	dp->dp_scrub_queue_obj = 0;
	bzero(&dp->dp_scrub_bookmark, sizeof (zbookmark_t));
	bzero(&dp->dp_scrub_ddt_bookmark, sizeof (ddt_bookmark_t));

	VERIFY(0 == zap_remove(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
	    DMU_POOL_SCRUB_QUEUE, tx));
	VERIFY(0 == zap_remove(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
	    DMU_POOL_SCRUB_MIN_TXG, tx));
	VERIFY(0 == zap_remove(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
	    DMU_POOL_SCRUB_MAX_TXG, tx));
	VERIFY(0 == zap_remove(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
	    DMU_POOL_SCRUB_BOOKMARK, tx));
	VERIFY(0 == zap_remove(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
	    DMU_POOL_SCRUB_FUNC, tx));
	VERIFY(0 == zap_remove(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
	    DMU_POOL_SCRUB_ERRORS, tx));

	(void) zap_remove(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
	    DMU_POOL_SCRUB_DDT_BOOKMARK, tx);
	(void) zap_remove(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
	    DMU_POOL_SCRUB_DDT_CLASS_MAX, tx);

	spa_history_internal_log(LOG_POOL_SCRUB_DONE, dp->dp_spa, tx, cr,
	    "complete=%u", *completep);

	/* below is scrub-clean specific */
	vdev_scrub_stat_update(dp->dp_spa->spa_root_vdev, POOL_SCRUB_NONE,
	    *completep);
	/*
	 * If the scrub/resilver completed, update all DTLs to reflect this.
	 * Whether it succeeded or not, vacate all temporary scrub DTLs.
	 */
	vdev_dtl_reassess(dp->dp_spa->spa_root_vdev, tx->tx_txg,
	    *completep ? dp->dp_scrub_max_txg : 0, B_TRUE);
	dp->dp_spa->spa_scrub_started = B_FALSE;
	if (*completep)
		spa_event_notify(dp->dp_spa, NULL, dp->dp_scrub_min_txg ?
		    ESC_ZFS_RESILVER_FINISH : ESC_ZFS_SCRUB_FINISH);
	spa_errlog_rotate(dp->dp_spa);

	/*
	 * We may have finished replacing a device.
	 * Let the async thread assess this and handle the detach.
	 */
	spa_async_request(dp->dp_spa, SPA_ASYNC_RESILVER_DONE);

	dp->dp_scrub_min_txg = dp->dp_scrub_max_txg = 0;
	mutex_exit(&dp->dp_scrub_cancel_lock);
}
Пример #14
0
void
vdev_indirect_births_free(objset_t *os, uint64_t object, dmu_tx_t *tx)
{
	VERIFY0(dmu_object_free(os, object, tx));
}
Пример #15
0
static int osd_object_destroy(const struct lu_env *env,
			      struct dt_object *dt, struct thandle *th)
{
	struct osd_thread_info	*info = osd_oti_get(env);
	char			*buf = info->oti_str;
	struct osd_object	*obj = osd_dt_obj(dt);
	struct osd_device	*osd = osd_obj2dev(obj);
	const struct lu_fid	*fid = lu_object_fid(&dt->do_lu);
	struct osd_thandle	*oh;
	int			 rc;
	uint64_t		 oid, zapid;
	ENTRY;

	down_write(&obj->oo_guard);

	if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
		GOTO(out, rc = -ENOENT);

	LASSERT(obj->oo_db != NULL);

	oh = container_of0(th, struct osd_thandle, ot_super);
	LASSERT(oh != NULL);
	LASSERT(oh->ot_tx != NULL);

	/* remove obj ref from index dir (it depends) */
	zapid = osd_get_name_n_idx(env, osd, fid, buf, sizeof(info->oti_str));
	rc = -zap_remove(osd->od_os, zapid, buf, oh->ot_tx);
	if (rc) {
		CERROR("%s: zap_remove(%s) failed: rc = %d\n",
		       osd->od_svname, buf, rc);
		GOTO(out, rc);
	}

	rc = osd_xattrs_destroy(env, obj, oh);
	if (rc) {
		CERROR("%s: cannot destroy xattrs for %s: rc = %d\n",
		       osd->od_svname, buf, rc);
		GOTO(out, rc);
	}

	/* Remove object from inode accounting. It is not fatal for the destroy
	 * operation if something goes wrong while updating accounting, but we
	 * still log an error message to notify the administrator */
	rc = -zap_increment_int(osd->od_os, osd->od_iusr_oid,
				obj->oo_attr.la_uid, -1, oh->ot_tx);
	if (rc)
		CERROR("%s: failed to remove "DFID" from accounting ZAP for usr"
		       " %d: rc = %d\n", osd->od_svname, PFID(fid),
		       obj->oo_attr.la_uid, rc);
	rc = -zap_increment_int(osd->od_os, osd->od_igrp_oid,
				obj->oo_attr.la_gid, -1, oh->ot_tx);
	if (rc)
		CERROR("%s: failed to remove "DFID" from accounting ZAP for grp"
		       " %d: rc = %d\n", osd->od_svname, PFID(fid),
		       obj->oo_attr.la_gid, rc);

	oid = obj->oo_db->db_object;
	if (unlikely(obj->oo_destroy == OSD_DESTROY_NONE)) {
		/* this may happen if the destroy wasn't declared
		 * e.g. when the object is created and then destroyed
		 * in the same transaction - we don't need additional
		 * space for destroy specifically */
		LASSERT(obj->oo_attr.la_size <= osd_sync_destroy_max_size);
		rc = -dmu_object_free(osd->od_os, oid, oh->ot_tx);
		if (rc)
			CERROR("%s: failed to free %s %llu: rc = %d\n",
			       osd->od_svname, buf, oid, rc);
	} else if (obj->oo_destroy == OSD_DESTROY_SYNC) {
		rc = -dmu_object_free(osd->od_os, oid, oh->ot_tx);
		if (rc)
			CERROR("%s: failed to free %s %llu: rc = %d\n",
			       osd->od_svname, buf, oid, rc);
	} else { /* asynchronous destroy */
		rc = osd_object_unlinked_add(obj, oh);
		if (rc)
			GOTO(out, rc);

		rc = -zap_add_int(osd->od_os, osd->od_unlinkedid,
				  oid, oh->ot_tx);
		if (rc)
			CERROR("%s: zap_add_int() failed %s %llu: rc = %d\n",
			       osd->od_svname, buf, oid, rc);
	}

out:
	/* not needed in the cache anymore */
	set_bit(LU_OBJECT_HEARD_BANSHEE, &dt->do_lu.lo_header->loh_flags);
	if (rc == 0)
		obj->oo_destroyed = 1;
	up_write(&obj->oo_guard);
	RETURN (0);
}
Пример #16
0
/*
 * Concurrency: @dt is write locked.
 */
static int osd_object_create(const struct lu_env *env, struct dt_object *dt,
			     struct lu_attr *attr,
			     struct dt_allocation_hint *hint,
			     struct dt_object_format *dof,
			     struct thandle *th)
{
	struct osd_thread_info	*info = osd_oti_get(env);
	struct lustre_mdt_attrs	*lma = &info->oti_mdt_attrs;
	struct zpl_direntry	*zde = &info->oti_zde.lzd_reg;
	const struct lu_fid	*fid = lu_object_fid(&dt->do_lu);
	struct osd_object	*obj = osd_dt_obj(dt);
	struct osd_device	*osd = osd_obj2dev(obj);
	char			*buf = info->oti_str;
	struct osd_thandle	*oh;
	dmu_buf_t		*db = NULL;
	uint64_t		 zapid, parent = 0;
	int			 rc;

	ENTRY;

	/* concurrent create declarations should not see
	 * the object inconsistent (db, attr, etc).
	 * in regular cases acquisition should be cheap */
	down_write(&obj->oo_guard);

	if (unlikely(dt_object_exists(dt)))
		GOTO(out, rc = -EEXIST);

	LASSERT(osd_invariant(obj));
	LASSERT(dof != NULL);

	LASSERT(th != NULL);
	oh = container_of0(th, struct osd_thandle, ot_super);

	/*
	 * XXX missing: Quote handling.
	 */

	LASSERT(obj->oo_db == NULL);

	/* to follow ZFS on-disk format we need
	 * to initialize parent dnode properly */
	if (hint != NULL && hint->dah_parent != NULL &&
	    !dt_object_remote(hint->dah_parent))
		parent = osd_dt_obj(hint->dah_parent)->oo_db->db_object;

	/* we may fix some attributes, better do not change the source */
	obj->oo_attr = *attr;
	obj->oo_attr.la_valid |= LA_SIZE | LA_NLINK | LA_TYPE;

	db = osd_create_type_f(dof->dof_type)(env, obj, &obj->oo_attr, oh);
	if (IS_ERR(db)) {
		rc = PTR_ERR(db);
		db = NULL;
		GOTO(out, rc);
	}

	zde->zde_pad = 0;
	zde->zde_dnode = db->db_object;
	zde->zde_type = IFTODT(attr->la_mode & S_IFMT);

	zapid = osd_get_name_n_idx(env, osd, fid, buf, sizeof(info->oti_str));

	rc = -zap_add(osd->od_os, zapid, buf, 8, 1, zde, oh->ot_tx);
	if (rc)
		GOTO(out, rc);

	/* Now add in all of the "SA" attributes */
	rc = -sa_handle_get(osd->od_os, db->db_object, NULL,
			    SA_HDL_PRIVATE, &obj->oo_sa_hdl);
	if (rc)
		GOTO(out, rc);

	/* configure new osd object */
	obj->oo_db = db;
	parent = parent != 0 ? parent : zapid;
	rc = __osd_attr_init(env, osd, obj->oo_sa_hdl, oh->ot_tx,
			     &obj->oo_attr, parent);
	if (rc)
		GOTO(out, rc);

	/* XXX: oo_lma_flags */
	obj->oo_dt.do_lu.lo_header->loh_attr |= obj->oo_attr.la_mode & S_IFMT;
	smp_mb();
	obj->oo_dt.do_lu.lo_header->loh_attr |= LOHA_EXISTS;
	if (likely(!fid_is_acct(lu_object_fid(&obj->oo_dt.do_lu))))
		/* no body operations for accounting objects */
		obj->oo_dt.do_body_ops = &osd_body_ops;

	rc = -nvlist_alloc(&obj->oo_sa_xattr, NV_UNIQUE_NAME, KM_SLEEP);
	if (rc)
		GOTO(out, rc);

	/* initialize LMA */
	lustre_lma_init(lma, lu_object_fid(&obj->oo_dt.do_lu), 0, 0);
	lustre_lma_swab(lma);
	rc = -nvlist_add_byte_array(obj->oo_sa_xattr, XATTR_NAME_LMA,
				    (uchar_t *)lma, sizeof(*lma));
	if (rc)
		GOTO(out, rc);
	rc = __osd_sa_xattr_update(env, obj, oh);
	if (rc)
		GOTO(out, rc);

	/* Add new object to inode accounting.
	 * Errors are not considered as fatal */
	rc = -zap_increment_int(osd->od_os, osd->od_iusr_oid,
				(attr->la_valid & LA_UID) ? attr->la_uid : 0, 1,
				oh->ot_tx);
	if (rc)
		CERROR("%s: failed to add "DFID" to accounting ZAP for usr %d "
			"(%d)\n", osd->od_svname, PFID(fid), attr->la_uid, rc);
	rc = -zap_increment_int(osd->od_os, osd->od_igrp_oid,
				(attr->la_valid & LA_GID) ? attr->la_gid : 0, 1,
				oh->ot_tx);
	if (rc)
		CERROR("%s: failed to add "DFID" to accounting ZAP for grp %d "
			"(%d)\n", osd->od_svname, PFID(fid), attr->la_gid, rc);

out:
	if (unlikely(rc && db)) {
		dmu_object_free(osd->od_os, db->db_object, oh->ot_tx);
		sa_buf_rele(db, osd_obj_tag);
		obj->oo_db = NULL;
	}
	up_write(&obj->oo_guard);
	RETURN(rc);
}
Пример #17
0
void
bplist_destroy(objset_t *mos, uint64_t object, dmu_tx_t *tx)
{
	VERIFY(dmu_object_free(mos, object, tx) == 0);
}
Пример #18
0
static int osd_object_destroy(const struct lu_env *env,
			      struct dt_object *dt, struct thandle *th)
{
	char			*buf = osd_oti_get(env)->oti_str;
	struct osd_object	*obj = osd_dt_obj(dt);
	struct osd_device	*osd = osd_obj2dev(obj);
	const struct lu_fid	*fid = lu_object_fid(&dt->do_lu);
	struct osd_thandle	*oh;
	int			 rc;
	uint64_t		 oid, zapid;
	ENTRY;

	LASSERT(obj->oo_db != NULL);
	LASSERT(dt_object_exists(dt));
	LASSERT(!lu_object_is_dying(dt->do_lu.lo_header));

	oh = container_of0(th, struct osd_thandle, ot_super);
	LASSERT(oh != NULL);
	LASSERT(oh->ot_tx != NULL);

	/* remove obj ref from index dir (it depends) */
	zapid = osd_get_name_n_idx(env, osd, fid, buf);
	rc = -zap_remove(osd->od_os, zapid, buf, oh->ot_tx);
	if (rc) {
		CERROR("%s: zap_remove(%s) failed: rc = %d\n",
		       osd->od_svname, buf, rc);
		GOTO(out, rc);
	}

	rc = osd_xattrs_destroy(env, obj, oh);
	if (rc) {
		CERROR("%s: cannot destroy xattrs for %s: rc = %d\n",
		       osd->od_svname, buf, rc);
		GOTO(out, rc);
	}

	/* Remove object from inode accounting. It is not fatal for the destroy
	 * operation if something goes wrong while updating accounting, but we
	 * still log an error message to notify the administrator */
	rc = -zap_increment_int(osd->od_os, osd->od_iusr_oid,
				obj->oo_attr.la_uid, -1, oh->ot_tx);
	if (rc)
		CERROR("%s: failed to remove "DFID" from accounting ZAP for usr"
		       " %d: rc = %d\n", osd->od_svname, PFID(fid),
		       obj->oo_attr.la_uid, rc);
	rc = -zap_increment_int(osd->od_os, osd->od_igrp_oid,
				obj->oo_attr.la_gid, -1, oh->ot_tx);
	if (rc)
		CERROR("%s: failed to remove "DFID" from accounting ZAP for grp"
		       " %d: rc = %d\n", osd->od_svname, PFID(fid),
		       obj->oo_attr.la_gid, rc);

	oid = obj->oo_db->db_object;
	if (obj->oo_destroy == OSD_DESTROY_SYNC) {
		rc = -dmu_object_free(osd->od_os, oid, oh->ot_tx);
		if (rc)
			CERROR("%s: failed to free %s "LPU64": rc = %d\n",
			       osd->od_svname, buf, oid, rc);
	} else { /* asynchronous destroy */
		rc = osd_object_unlinked_add(obj, oh);
		if (rc)
			GOTO(out, rc);

		rc = -zap_add_int(osd->od_os, osd->od_unlinkedid,
				  oid, oh->ot_tx);
		if (rc)
			CERROR("%s: zap_add_int() failed %s "LPU64": rc = %d\n",
			       osd->od_svname, buf, oid, rc);
	}

out:
	/* not needed in the cache anymore */
	set_bit(LU_OBJECT_HEARD_BANSHEE, &dt->do_lu.lo_header->loh_flags);
	if (rc == 0)
		obj->oo_destroyed = 1;
	RETURN (0);
}