Exemple #1
0
/*
 * Delete the entire contents of a directory.  Return a count
 * of the number of entries that could not be deleted. If we encounter
 * an error, return a count of at least one so that the directory stays
 * in the unlinked set.
 *
 * NOTE: this function assumes that the directory is inactive,
 *	so there is no need to lock its entries before deletion.
 *	Also, it assumes the directory contents is *only* regular
 *	files.
 */
static int
zfs_purgedir(znode_t *dzp)
{
	zap_cursor_t	zc;
	zap_attribute_t	zap;
	znode_t		*xzp;
	dmu_tx_t	*tx;
	zfsvfs_t	*zfsvfs = dzp->z_zfsvfs;
	zfs_dirlock_t	dl;
	int skipped = 0;
	int error;

	for (zap_cursor_init(&zc, zfsvfs->z_os, dzp->z_id);
	    (error = zap_cursor_retrieve(&zc, &zap)) == 0;
	    zap_cursor_advance(&zc)) {
		error = zfs_zget(zfsvfs,
		    ZFS_DIRENT_OBJ(zap.za_first_integer), &xzp);
		if (error) {
			skipped += 1;
			continue;
		}

/*
	    ASSERT((ZTOV(xzp)->v_type == VREG) ||
		    (ZTOV(xzp)->v_type == VLNK));
*/

		tx = dmu_tx_create(zfsvfs->z_os);
		dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
		dmu_tx_hold_zap(tx, dzp->z_id, FALSE, zap.za_name);
		dmu_tx_hold_sa(tx, xzp->z_sa_hdl, B_FALSE);
		dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
		/* Is this really needed ? */
		zfs_sa_upgrade_txholds(tx, xzp);
		error = dmu_tx_assign(tx, TXG_WAIT);
		if (error) {
			dmu_tx_abort(tx);
			//VN_RELE(ZTOV(xzp)); // async
			VN_RELE_ASYNC(ZTOV(xzp), dsl_pool_vnrele_taskq(dmu_objset_pool(zfsvfs->z_os)));
			skipped += 1;
			continue;
		}
		bzero(&dl, sizeof (dl));
		dl.dl_dzp = dzp;
		dl.dl_name = zap.za_name;

		error = zfs_link_destroy(&dl, xzp, tx, 0, NULL);
		if (error)
			skipped += 1;
		dmu_tx_commit(tx);

		//VN_RELE(ZTOV(xzp)); // async
		VN_RELE_ASYNC(ZTOV(xzp), dsl_pool_vnrele_taskq(dmu_objset_pool(zfsvfs->z_os)));
	}
	zap_cursor_fini(&zc);
	if (error != ENOENT)
		skipped += 1;
	return (skipped);
}
Exemple #2
0
/*
 * Delete the entire contents of a directory.  Return a count
 * of the number of entries that could not be deleted. If we encounter
 * an error, return a count of at least one so that the directory stays
 * in the unlinked set.
 *
 * NOTE: this function assumes that the directory is inactive,
 *	so there is no need to lock its entries before deletion.
 *	Also, it assumes the directory contents is *only* regular
 *	files.
 */
static int
zfs_purgedir(znode_t *dzp)
{
	zap_cursor_t	zc;
	zap_attribute_t	zap;
	znode_t		*xzp;
	dmu_tx_t	*tx;
	zfsvfs_t	*zfsvfs = ZTOZSB(dzp);
	zfs_dirlock_t	dl;
	int skipped = 0;
	int error;

	for (zap_cursor_init(&zc, zfsvfs->z_os, dzp->z_id);
	    (error = zap_cursor_retrieve(&zc, &zap)) == 0;
	    zap_cursor_advance(&zc)) {
		error = zfs_zget(zfsvfs,
		    ZFS_DIRENT_OBJ(zap.za_first_integer), &xzp);
		if (error) {
			skipped += 1;
			continue;
		}

		ASSERT(S_ISREG(ZTOI(xzp)->i_mode) ||
		    S_ISLNK(ZTOI(xzp)->i_mode));

		tx = dmu_tx_create(zfsvfs->z_os);
		dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
		dmu_tx_hold_zap(tx, dzp->z_id, FALSE, zap.za_name);
		dmu_tx_hold_sa(tx, xzp->z_sa_hdl, B_FALSE);
		dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
		/* Is this really needed ? */
		zfs_sa_upgrade_txholds(tx, xzp);
		dmu_tx_mark_netfree(tx);
		error = dmu_tx_assign(tx, TXG_WAIT);
		if (error) {
			dmu_tx_abort(tx);
			zfs_iput_async(ZTOI(xzp));
			skipped += 1;
			continue;
		}
		bzero(&dl, sizeof (dl));
		dl.dl_dzp = dzp;
		dl.dl_name = zap.za_name;

		error = zfs_link_destroy(&dl, xzp, tx, 0, NULL);
		if (error)
			skipped += 1;
		dmu_tx_commit(tx);

		zfs_iput_async(ZTOI(xzp));
	}
	zap_cursor_fini(&zc);
	if (error != ENOENT)
		skipped += 1;
	return (skipped);
}
Exemple #3
0
/*
 * Delete the entire contents of a directory.  Return a count
 * of the number of entries that could not be deleted. If we encounter
 * an error, return a count of at least one so that the directory stays
 * in the unlinked set.
 *
 * NOTE: this function assumes that the directory is inactive,
 *	so there is no need to lock its entries before deletion.
 *	Also, it assumes the directory contents is *only* regular
 *	files.
 */
static int
zfs_purgedir(znode_t *dzp)
{
    zap_cursor_t	zc;
    zap_attribute_t	zap;
    znode_t		*xzp;
    dmu_tx_t	*tx;
    zfsvfs_t	*zfsvfs = dzp->z_zfsvfs;
    int skipped = 0;
    int error;

    for (zap_cursor_init(&zc, zfsvfs->z_os, dzp->z_id);
            (error = zap_cursor_retrieve(&zc, &zap)) == 0;
            zap_cursor_advance(&zc)) {
        error = zfs_zget(zfsvfs,
                         ZFS_DIRENT_OBJ(zap.za_first_integer), &xzp);
        if (error) {
            skipped += 1;
            continue;
        }

        vn_lock(ZTOV(xzp), LK_EXCLUSIVE | LK_RETRY);
        ASSERT((ZTOV(xzp)->v_type == VREG) ||
               (ZTOV(xzp)->v_type == VLNK));

        tx = dmu_tx_create(zfsvfs->z_os);
        dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
        dmu_tx_hold_zap(tx, dzp->z_id, FALSE, zap.za_name);
        dmu_tx_hold_sa(tx, xzp->z_sa_hdl, B_FALSE);
        dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
        /* Is this really needed ? */
        zfs_sa_upgrade_txholds(tx, xzp);
        dmu_tx_mark_netfree(tx);
        error = dmu_tx_assign(tx, TXG_WAIT);
        if (error) {
            dmu_tx_abort(tx);
            vput(ZTOV(xzp));
            skipped += 1;
            continue;
        }

        error = zfs_link_destroy(dzp, zap.za_name, xzp, tx, 0, NULL);
        if (error)
            skipped += 1;
        dmu_tx_commit(tx);

        vput(ZTOV(xzp));
    }
    zap_cursor_fini(&zc);
    if (error != ENOENT)
        skipped += 1;
    return (skipped);
}
Exemple #4
0
static void
__osd_xattr_declare_del(const struct lu_env *env, struct osd_object *obj,
			const char *name, struct osd_thandle *oh)
{
	struct osd_device *osd = osd_obj2dev(obj);
	dmu_tx_t          *tx = oh->ot_tx;
	uint64_t           xa_data_obj;
	int                rc;

	/* update SA_ZPL_DXATTR if xattr was in SA */
	dmu_tx_hold_sa(tx, obj->oo_sa_hdl, 0);

	if (obj->oo_xattr == ZFS_NO_OBJECT)
		return;

	rc = -zap_lookup(osd->od_os, obj->oo_xattr, name, 8, 1, &xa_data_obj);
	if (rc == 0) {
		/*
		 * Entry exists.
		 * We'll delete the existing object and ZAP entry.
		 */
		dmu_tx_hold_bonus(tx, xa_data_obj);
		dmu_tx_hold_free(tx, xa_data_obj, 0, DMU_OBJECT_END);
		dmu_tx_hold_zap(tx, obj->oo_xattr, FALSE, (char *) name);
		return;
	} else if (rc == -ENOENT) {
		/*
		 * Entry doesn't exist, nothing to be changed.
		 */
		return;
	}

	/* An error happened */
	tx->tx_err = -rc;
}
Exemple #5
0
void
zfs_sa_upgrade_txholds(dmu_tx_t *tx, znode_t *zp)
{
	if (!zp->z_zfsvfs->z_use_sa || zp->z_is_sa)
		return;


	dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);

	if (zfs_external_acl(zp)) {
		dmu_tx_hold_free(tx, zfs_external_acl(zp), 0,
		    DMU_OBJECT_END);
	}
}
Exemple #6
0
/*
 * TX_WRITE2 are only generated when dmu_sync() returns EALREADY
 * meaning the pool block is already being synced. So now that we always write
 * out full blocks, all we have to do is expand the eof if
 * the file is grown.
 */
static int
zfs_replay_write2(zfsvfs_t *zfsvfs, void *data, boolean_t byteswap)
{
#ifndef TODO_OSV
	kprintf("TX_WRITE2\n");
	return EOPNOTSUPP;
#else
	lr_write_t *lr = data;
	znode_t	*zp;
	int error;
	uint64_t end;

	if (byteswap)
		byteswap_uint64_array(lr, sizeof (*lr));

	if ((error = zfs_zget(zfsvfs, lr->lr_foid, &zp)) != 0)
		return (error);

top:
	end = lr->lr_offset + lr->lr_length;
	if (end > zp->z_size) {
		dmu_tx_t *tx = dmu_tx_create(zfsvfs->z_os);

		zp->z_size = end;
		dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
		error = dmu_tx_assign(tx, TXG_WAIT);
		if (error) {
			VN_RELE(ZTOV(zp));
			if (error == ERESTART) {
				dmu_tx_wait(tx);
				dmu_tx_abort(tx);
				goto top;
			}
			dmu_tx_abort(tx);
			return (error);
		}
		(void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
		    (void *)&zp->z_size, sizeof (uint64_t), tx);

		/* Ensure the replayed seq is updated */
		(void) zil_replaying(zfsvfs->z_log, tx);

		dmu_tx_commit(tx);
	}

	VN_RELE(ZTOV(zp));

	return (error);
#endif
}
Exemple #7
0
/*
 * TX_WRITE2 are only generated when dmu_sync() returns EALREADY
 * meaning the pool block is already being synced. So now that we always write
 * out full blocks, all we have to do is expand the eof if
 * the file is grown.
 */
static int
zfs_replay_write2(void *arg1, void *arg2, boolean_t byteswap)
{
	zfsvfs_t *zfsvfs = arg1;
	lr_write_t *lr = arg2;
	znode_t	*zp;
	int error;
	uint64_t end;

	if (byteswap)
		byteswap_uint64_array(lr, sizeof (*lr));

	if ((error = zfs_zget(zfsvfs, lr->lr_foid, &zp)) != 0)
		return (error);

top:
	end = lr->lr_offset + lr->lr_length;
	if (end > zp->z_size) {
		dmu_tx_t *tx = dmu_tx_create(zfsvfs->z_os);

		zp->z_size = end;
		dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
		error = dmu_tx_assign(tx, TXG_WAIT);
		if (error) {
			iput(ZTOI(zp));
			if (error == ERESTART) {
				dmu_tx_wait(tx);
				dmu_tx_abort(tx);
				goto top;
			}
			dmu_tx_abort(tx);
			return (error);
		}
		(void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
		    (void *)&zp->z_size, sizeof (uint64_t), tx);

		/* Ensure the replayed seq is updated */
		(void) zil_replaying(zfsvfs->z_log, tx);

		dmu_tx_commit(tx);
	}

	iput(ZTOI(zp));

	return (error);
}
Exemple #8
0
static ssize_t osd_declare_write(const struct lu_env *env, struct dt_object *dt,
				const struct lu_buf *buf, loff_t pos,
				struct thandle *th)
{
	struct osd_object  *obj  = osd_dt_obj(dt);
	struct osd_device  *osd = osd_obj2dev(obj);
	struct osd_thandle *oh;
	uint64_t            oid;
	ENTRY;

	oh = container_of0(th, struct osd_thandle, ot_super);

	/* in some cases declare can race with creation (e.g. llog)
	 * and we need to wait till object is initialized. notice
	 * LOHA_EXISTs is supposed to be the last step in the
	 * initialization */

	/* declare possible size change. notice we can't check
	 * current size here as another thread can change it */

	if (dt_object_exists(dt)) {
		LASSERT(obj->oo_db);
		oid = obj->oo_db->db_object;

		dmu_tx_hold_sa(oh->ot_tx, obj->oo_sa_hdl, 0);
	} else {
		oid = DMU_NEW_OBJECT;
		dmu_tx_hold_sa_create(oh->ot_tx, ZFS_SA_BASE_ATTR_SIZE);
	}

	/* XXX: we still miss for append declaration support in ZFS
	 *	-1 means append which is used by llog mostly, llog
	 *	can grow upto LLOG_MIN_CHUNK_SIZE*8 records */
	if (pos == -1)
		pos = max_t(loff_t, 256 * 8 * LLOG_MIN_CHUNK_SIZE,
			    obj->oo_attr.la_size + (2 << 20));
	dmu_tx_hold_write(oh->ot_tx, oid, pos, buf->lb_len);

	/* dt_declare_write() is usually called for system objects, such
	 * as llog or last_rcvd files. We needn't enforce quota on those
	 * objects, so always set the lqi_space as 0. */
	RETURN(osd_declare_quota(env, osd, obj->oo_attr.la_uid,
				 obj->oo_attr.la_gid, 0, oh, true, NULL,
				 false));
}
Exemple #9
0
int
zfs_sa_set_xattr(znode_t *zp)
{
    zfs_sb_t *zsb = ZTOZSB(zp);
    dmu_tx_t *tx;
    char *obj;
    size_t size;
    int error;

    ASSERT(RW_WRITE_HELD(&zp->z_xattr_lock));
    ASSERT(zp->z_xattr_cached);
    ASSERT(zp->z_is_sa);

    error = nvlist_size(zp->z_xattr_cached, &size, NV_ENCODE_XDR);
    if (error)
        goto out;

    obj = zio_buf_alloc(size);

    error = nvlist_pack(zp->z_xattr_cached, &obj, &size,
                        NV_ENCODE_XDR, KM_SLEEP);
    if (error)
        goto out_free;

    tx = dmu_tx_create(zsb->z_os);
    dmu_tx_hold_sa_create(tx, size);
    dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);

    error = dmu_tx_assign(tx, TXG_WAIT);
    if (error) {
        dmu_tx_abort(tx);
    } else {
        error = sa_update(zp->z_sa_hdl, SA_ZPL_DXATTR(zsb),
                          obj, size, tx);
        if (error)
            dmu_tx_abort(tx);
        else
            dmu_tx_commit(tx);
    }
out_free:
    zio_buf_free(obj, size);
out:
    return (error);
}
Exemple #10
0
int
zfs_sa_set_xattr(znode_t *zp)
{
	zfsvfs_t *zfsvfs = ZTOZSB(zp);
	dmu_tx_t *tx;
	char *obj;
	size_t size;
	int error;

	ASSERT(RW_WRITE_HELD(&zp->z_xattr_lock));
	ASSERT(zp->z_xattr_cached);
	ASSERT(zp->z_is_sa);

	error = nvlist_size(zp->z_xattr_cached, &size, NV_ENCODE_XDR);
	if ((error == 0) && (size > SA_ATTR_MAX_LEN))
		error = EFBIG;
	if (error)
		goto out;

	obj = vmem_alloc(size, KM_SLEEP);

	error = nvlist_pack(zp->z_xattr_cached, &obj, &size,
	    NV_ENCODE_XDR, KM_SLEEP);
	if (error)
		goto out_free;

	tx = dmu_tx_create(zfsvfs->z_os);
	dmu_tx_hold_sa_create(tx, size);
	dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);

	error = dmu_tx_assign(tx, TXG_WAIT);
	if (error) {
		dmu_tx_abort(tx);
	} else {
		VERIFY0(sa_update(zp->z_sa_hdl, SA_ZPL_DXATTR(zfsvfs),
		    obj, size, tx));
		dmu_tx_commit(tx);
	}
out_free:
	vmem_free(obj, size);
out:
	return (error);
}
Exemple #11
0
static ssize_t osd_declare_write(const struct lu_env *env, struct dt_object *dt,
				const loff_t size, loff_t pos,
				struct thandle *th)
{
	struct osd_object  *obj  = osd_dt_obj(dt);
	struct osd_device  *osd = osd_obj2dev(obj);
	struct osd_thandle *oh;
	uint64_t            oid;
	ENTRY;

	oh = container_of0(th, struct osd_thandle, ot_super);

	/* in some cases declare can race with creation (e.g. llog)
	 * and we need to wait till object is initialized. notice
	 * LOHA_EXISTs is supposed to be the last step in the
	 * initialization */

	/* declare possible size change. notice we can't check
	 * current size here as another thread can change it */

	if (dt_object_exists(dt)) {
		LASSERT(obj->oo_db);
		oid = obj->oo_db->db_object;

		dmu_tx_hold_sa(oh->ot_tx, obj->oo_sa_hdl, 0);
	} else {
		oid = DMU_NEW_OBJECT;
		dmu_tx_hold_sa_create(oh->ot_tx, ZFS_SA_BASE_ATTR_SIZE);
	}

	dmu_tx_hold_write(oh->ot_tx, oid, pos, size);

	/* dt_declare_write() is usually called for system objects, such
	 * as llog or last_rcvd files. We needn't enforce quota on those
	 * objects, so always set the lqi_space as 0. */
	RETURN(osd_declare_quota(env, osd, obj->oo_attr.la_uid,
				 obj->oo_attr.la_gid, 0, oh, true, NULL,
				 false));
}
Exemple #12
0
int
zfs_make_xattrdir(znode_t *zp, vattr_t *vap, struct inode **xipp, cred_t *cr)
{
	zfs_sb_t *zsb = ZTOZSB(zp);
	znode_t *xzp;
	dmu_tx_t *tx;
	int error;
	zfs_acl_ids_t acl_ids;
	boolean_t fuid_dirtied;
#ifdef DEBUG
	uint64_t parent;
#endif

	*xipp = NULL;

	if ((error = zfs_zaccess(zp, ACE_WRITE_NAMED_ATTRS, 0, B_FALSE, cr)))
		return (error);

	if ((error = zfs_acl_ids_create(zp, IS_XATTR, vap, cr, NULL,
	    &acl_ids)) != 0)
		return (error);
	if (zfs_acl_ids_overquota(zsb, &acl_ids)) {
		zfs_acl_ids_free(&acl_ids);
		return (SET_ERROR(EDQUOT));
	}

	tx = dmu_tx_create(zsb->z_os);
	dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
	    ZFS_SA_BASE_ATTR_SIZE);
	dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
	dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
	fuid_dirtied = zsb->z_fuid_dirty;
	if (fuid_dirtied)
		zfs_fuid_txhold(zsb, tx);
	error = dmu_tx_assign(tx, TXG_WAIT);
	if (error) {
		zfs_acl_ids_free(&acl_ids);
		dmu_tx_abort(tx);
		return (error);
	}
	zfs_mknode(zp, vap, tx, cr, IS_XATTR, &xzp, &acl_ids);

	if (fuid_dirtied)
		zfs_fuid_sync(zsb, tx);

#ifdef DEBUG
	error = sa_lookup(xzp->z_sa_hdl, SA_ZPL_PARENT(zsb),
	    &parent, sizeof (parent));
	ASSERT(error == 0 && parent == zp->z_id);
#endif

	VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_XATTR(zsb), &xzp->z_id,
	    sizeof (xzp->z_id), tx));

	if (!zp->z_unlinked)
		(void) zfs_log_create(zsb->z_log, tx, TX_MKXATTR, zp,
		    xzp, "", NULL, acl_ids.z_fuidp, vap);

	zfs_acl_ids_free(&acl_ids);
	dmu_tx_commit(tx);

	*xipp = ZTOI(xzp);

	return (0);
}
Exemple #13
0
void
zfs_rmnode(znode_t *zp)
{
	zfsvfs_t	*zfsvfs = zp->z_zfsvfs;
	objset_t	*os = zfsvfs->z_os;
	znode_t		*xzp = NULL;
	dmu_tx_t	*tx;
	uint64_t	acl_obj;
	uint64_t	xattr_obj;
	int		error;

	ASSERT(zp->z_links == 0);
	ASSERT(ZTOV(zp)->v_count == 0);

	/*
	 * If this is an attribute directory, purge its contents.
	 */
	if (ZTOV(zp)->v_type == VDIR && (zp->z_pflags & ZFS_XATTR)) {
		if (zfs_purgedir(zp) != 0) {
			/*
			 * Not enough space to delete some xattrs.
			 * Leave it in the unlinked set.
			 */
			zfs_znode_dmu_fini(zp);
			zfs_znode_free(zp);
			return;
		}
	}

	/*
	 * Free up all the data in the file.
	 */
	error = dmu_free_long_range(os, zp->z_id, 0, DMU_OBJECT_END);
	if (error) {
		/*
		 * Not enough space.  Leave the file in the unlinked set.
		 */
		zfs_znode_dmu_fini(zp);
		zfs_znode_free(zp);
		return;
	}

	/*
	 * If the file has extended attributes, we're going to unlink
	 * the xattr dir.
	 */
	error = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
	    &xattr_obj, sizeof (xattr_obj));
	if (error == 0 && xattr_obj) {
		error = zfs_zget(zfsvfs, xattr_obj, &xzp);
		ASSERT(error == 0);
	}

	acl_obj = zfs_external_acl(zp);

	/*
	 * Set up the final transaction.
	 */
	tx = dmu_tx_create(os);
	dmu_tx_hold_free(tx, zp->z_id, 0, DMU_OBJECT_END);
	dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
	if (xzp) {
		dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, TRUE, NULL);
		dmu_tx_hold_sa(tx, xzp->z_sa_hdl, B_FALSE);
	}
	if (acl_obj)
		dmu_tx_hold_free(tx, acl_obj, 0, DMU_OBJECT_END);

	zfs_sa_upgrade_txholds(tx, zp);
	error = dmu_tx_assign(tx, TXG_WAIT);
	if (error) {
		/*
		 * Not enough space to delete the file.  Leave it in the
		 * unlinked set, leaking it until the fs is remounted (at
		 * which point we'll call zfs_unlinked_drain() to process it).
		 */
		dmu_tx_abort(tx);
		zfs_znode_dmu_fini(zp);
		zfs_znode_free(zp);
		goto out;
	}

	if (xzp) {
		ASSERT(error == 0);
		mutex_enter(&xzp->z_lock);
		xzp->z_unlinked = B_TRUE;	/* mark xzp for deletion */
		xzp->z_links = 0;	/* no more links to it */
		VERIFY(0 == sa_update(xzp->z_sa_hdl, SA_ZPL_LINKS(zfsvfs),
		    &xzp->z_links, sizeof (xzp->z_links), tx));
		mutex_exit(&xzp->z_lock);
		zfs_unlinked_add(xzp, tx);
	}

	/* Remove this znode from the unlinked set */
	VERIFY3U(0, ==,
	    zap_remove_int(zfsvfs->z_os, zfsvfs->z_unlinkedobj, zp->z_id, tx));

	zfs_znode_delete(zp, tx);

	dmu_tx_commit(tx);
out:
	if (xzp)
		VN_RELE(ZTOV(xzp));
}
Exemple #14
0
/*
 * Link zp into dl.  Can only fail if zp has been unlinked.
 */
int
zfs_make_xattrdir(znode_t *zp, vattr_t *vap, vnode_t **xvpp, cred_t *cr)
{
	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
	znode_t *xzp;
	dmu_tx_t *tx;
	int error;
	zfs_acl_ids_t acl_ids;
	boolean_t fuid_dirtied;
	uint64_t parent;

	*xvpp = NULL;

	if ((error = zfs_zaccess(zp, ACE_WRITE_NAMED_ATTRS, 0, B_FALSE, cr)))
		return (error);

	if ((error = zfs_acl_ids_create(zp, IS_XATTR, vap, cr, NULL,
	    &acl_ids)) != 0)
		return (error);
	if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) {
		zfs_acl_ids_free(&acl_ids);
		return (EDQUOT);
	}

top:
	tx = dmu_tx_create(zfsvfs->z_os);
	dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
	    ZFS_SA_BASE_ATTR_SIZE);
	dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
	dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
	fuid_dirtied = zfsvfs->z_fuid_dirty;
	if (fuid_dirtied)
		zfs_fuid_txhold(zfsvfs, tx);
	error = dmu_tx_assign(tx, TXG_NOWAIT);
	if (error) {
		if (error == ERESTART) {
			dmu_tx_wait(tx);
			dmu_tx_abort(tx);
			goto top;
		}
		zfs_acl_ids_free(&acl_ids);
		dmu_tx_abort(tx);
		return (error);
	}
	zfs_mknode(zp, vap, tx, cr, IS_XATTR, &xzp, &acl_ids);

#ifdef HAVE_ZPL
	if (fuid_dirtied)
		zfs_fuid_sync(zfsvfs, tx);
#endif /* HAVE_ZPL */

#ifdef DEBUG
	error = sa_lookup(xzp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
	    &parent, sizeof (parent));
	ASSERT(error == 0 && parent == zp->z_id);
#endif

	VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs), &xzp->z_id,
	    sizeof (xzp->z_id), tx));

	(void) zfs_log_create(zfsvfs->z_log, tx, TX_MKXATTR, zp,
	    xzp, "", NULL, acl_ids.z_fuidp, vap);

	zfs_acl_ids_free(&acl_ids);
	dmu_tx_commit(tx);

	*xvpp = ZTOV(xzp);

	return (0);
}
Exemple #15
0
static int osd_declare_attr_set(const struct lu_env *env,
				struct dt_object *dt,
				const struct lu_attr *attr,
				struct thandle *handle)
{
	struct osd_thread_info	*info = osd_oti_get(env);
	char			*buf = osd_oti_get(env)->oti_str;
	struct osd_object	*obj = osd_dt_obj(dt);
	struct osd_device	*osd = osd_obj2dev(obj);
	struct osd_thandle	*oh;
	uint64_t		 bspace;
	uint32_t		 blksize;
	int			 rc = 0;
	ENTRY;


	LASSERT(handle != NULL);
	LASSERT(osd_invariant(obj));

	oh = container_of0(handle, struct osd_thandle, ot_super);

	down_read(&obj->oo_guard);
	if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
		GOTO(out, rc = 0);

	LASSERT(obj->oo_sa_hdl != NULL);
	LASSERT(oh->ot_tx != NULL);
	dmu_tx_hold_sa(oh->ot_tx, obj->oo_sa_hdl, 0);
	if (oh->ot_tx->tx_err != 0)
		GOTO(out, rc = -oh->ot_tx->tx_err);

	sa_object_size(obj->oo_sa_hdl, &blksize, &bspace);
	bspace = toqb(bspace * blksize);

	__osd_xattr_declare_set(env, obj, sizeof(struct lustre_mdt_attrs),
				XATTR_NAME_LMA, oh);

	if (attr && attr->la_valid & LA_UID) {
		/* account for user inode tracking ZAP update */
		dmu_tx_hold_bonus(oh->ot_tx, osd->od_iusr_oid);
		dmu_tx_hold_zap(oh->ot_tx, osd->od_iusr_oid, TRUE, buf);

		/* quota enforcement for user */
		if (attr->la_uid != obj->oo_attr.la_uid) {
			rc = qsd_transfer(env, osd->od_quota_slave,
					  &oh->ot_quota_trans, USRQUOTA,
					  obj->oo_attr.la_uid, attr->la_uid,
					  bspace, &info->oti_qi);
			if (rc)
				GOTO(out, rc);
		}
	}
	if (attr && attr->la_valid & LA_GID) {
		/* account for user inode tracking ZAP update */
		dmu_tx_hold_bonus(oh->ot_tx, osd->od_igrp_oid);
		dmu_tx_hold_zap(oh->ot_tx, osd->od_igrp_oid, TRUE, buf);

		/* quota enforcement for group */
		if (attr->la_gid != obj->oo_attr.la_gid) {
			rc = qsd_transfer(env, osd->od_quota_slave,
					  &oh->ot_quota_trans, GRPQUOTA,
					  obj->oo_attr.la_gid, attr->la_gid,
					  bspace, &info->oti_qi);
			if (rc)
				GOTO(out, rc);
		}
	}

out:
	up_read(&obj->oo_guard);
	RETURN(rc);
}
Exemple #16
0
void __osd_xattr_declare_set(const struct lu_env *env, struct osd_object *obj,
			     int vallen, const char *name,
			     struct osd_thandle *oh)
{
	struct osd_device *osd = osd_obj2dev(obj);
	dmu_buf_t         *db = obj->oo_db;
	dmu_tx_t          *tx = oh->ot_tx;
	uint64_t           xa_data_obj;
	int                rc = 0;
	int                here;

	if (unlikely(obj->oo_destroyed))
		return;

	here = dt_object_exists(&obj->oo_dt);

	/* object may be not yet created */
	if (here) {
		LASSERT(db);
		LASSERT(obj->oo_sa_hdl);
		/* we might just update SA_ZPL_DXATTR */
		dmu_tx_hold_sa(tx, obj->oo_sa_hdl, 1);

		if (obj->oo_xattr == ZFS_NO_OBJECT)
			rc = -ENOENT;
	}

	if (!here || rc == -ENOENT) {
		/* we'll be updating SA_ZPL_XATTR */
		if (here) {
			LASSERT(obj->oo_sa_hdl);
			dmu_tx_hold_sa(tx, obj->oo_sa_hdl, 1);
		}
		/* xattr zap + entry */
		dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, TRUE, (char *) name);
		/* xattr value obj */
		dmu_tx_hold_sa_create(tx, ZFS_SA_BASE_ATTR_SIZE);
		dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, vallen);
		return;
	}

	rc = -zap_lookup(osd->od_os, obj->oo_xattr, name, sizeof(uint64_t), 1,
			&xa_data_obj);
	if (rc == 0) {
		/*
		 * Entry already exists.
		 * We'll truncate the existing object.
		 */
		dmu_tx_hold_bonus(tx, xa_data_obj);
		dmu_tx_hold_free(tx, xa_data_obj, vallen, DMU_OBJECT_END);
		dmu_tx_hold_write(tx, xa_data_obj, 0, vallen);
		return;
	} else if (rc == -ENOENT) {
		/*
		 * Entry doesn't exist, we need to create a new one and a new
		 * object to store the value.
		 */
		dmu_tx_hold_bonus(tx, obj->oo_xattr);
		dmu_tx_hold_zap(tx, obj->oo_xattr, TRUE, (char *) name);
		dmu_tx_hold_sa_create(tx, ZFS_SA_BASE_ATTR_SIZE);
		dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, vallen);
		return;
	}

	/* An error happened */
	tx->tx_err = -rc;
}
Exemple #17
0
/*
 * Lookup/Create an extended attribute entry.
 *
 * Input arguments:
 *	dzp	- znode for hidden attribute directory
 *	name	- name of attribute
 *	flag	- ZNEW: if the entry already exists, fail with EEXIST.
 *		  ZEXISTS: if the entry does not exist, fail with ENOENT.
 *
 * Output arguments:
 *	vpp	- pointer to the vnode for the entry (NULL if there isn't one)
 *
 * Return value: 0 on success or errno value on failure.
 */
int
zfs_obtain_xattr(znode_t *dzp, const char *name, mode_t mode, cred_t *cr,
                 vnode_t **vpp, int flag)
{
	znode_t  *xzp = NULL;
	zfsvfs_t  *zfsvfs = dzp->z_zfsvfs;
	zilog_t  *zilog;
	zfs_dirlock_t  *dl;
	dmu_tx_t  *tx;
	struct vnode_attr  vattr;
	int error;
	struct componentname cn;
	zfs_acl_ids_t	acl_ids;

	/* zfs_dirent_lock() expects a component name */
	bzero(&cn, sizeof (cn));
	cn.cn_nameiop = LOOKUP;
	cn.cn_flags = ISLASTCN;
	cn.cn_nameptr = (char *)name;
	cn.cn_namelen = strlen(name);

    ZFS_ENTER(zfsvfs);
    ZFS_VERIFY_ZP(dzp);
    zilog = zfsvfs->z_log;

	VATTR_INIT(&vattr);
	VATTR_SET(&vattr, va_type, VREG);
	VATTR_SET(&vattr, va_mode, mode & ~S_IFMT);

	if ((error = zfs_acl_ids_create(dzp, 0,
                                    &vattr, cr, NULL, &acl_ids)) != 0) {
		ZFS_EXIT(zfsvfs);
		return (error);
	}
 top:
	/* Lock the attribute entry name. */
	if ( (error = zfs_dirent_lock(&dl, dzp, (char *)name, &xzp, flag,
                                  NULL, &cn)) ) {
		goto out;
	}
	/* If the name already exists, we're done. */
	if (xzp != NULL) {
		zfs_dirent_unlock(dl);
		goto out;
	}
	tx = dmu_tx_create(zfsvfs->z_os);
	dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
	dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
	//dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
	//dmu_tx_hold_bonus(tx, dzp->z_id);
	dmu_tx_hold_zap(tx, dzp->z_id, TRUE, (char *)name);

#if 1 // FIXME
	if (dzp->z_pflags & ZFS_INHERIT_ACE) {
		dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, SPA_MAXBLOCKSIZE);
	}
#endif
    zfs_sa_upgrade_txholds(tx, dzp);
	error = dmu_tx_assign(tx, TXG_NOWAIT);
	if (error) {
		zfs_dirent_unlock(dl);
		if (error == ERESTART) {
			dmu_tx_wait(tx);
			dmu_tx_abort(tx);
			goto top;
		}
		dmu_tx_abort(tx);
		goto out;
	}

	zfs_mknode(dzp, &vattr, tx, cr, 0, &xzp, &acl_ids);

    /*
      ASSERT(xzp->z_id == zoid);
    */
	(void) zfs_link_create(dl, xzp, tx, ZNEW);
	zfs_log_create(zilog, tx, TX_CREATE, dzp, xzp, (char *)name,
                   NULL /* vsecp */, 0 /*acl_ids.z_fuidp*/, &vattr);
    zfs_acl_ids_free(&acl_ids);
	dmu_tx_commit(tx);
    zfs_znode_wait_vnode(xzp);

	zfs_dirent_unlock(dl);
 out:
	if (error == EEXIST)
		error = ENOATTR;
	if (xzp)
		*vpp = ZTOV(xzp);

    ZFS_EXIT(zfsvfs);
	return (error);
}
Exemple #18
0
int
zfs_make_xattrdir(znode_t *zp, vattr_t *vap, vnode_t **xvpp, cred_t *cr)
{
	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
	znode_t *xzp;
	dmu_tx_t *tx;
	int error;
	zfs_acl_ids_t acl_ids;
	boolean_t fuid_dirtied;
	uint64_t parent;

	*xvpp = NULL;

	/*
	 * In FreeBSD, access checking for creating an EA is being done
	 * in zfs_setextattr(),
	 */
#ifndef __FreeBSD_kernel__
	if (error = zfs_zaccess(zp, ACE_WRITE_NAMED_ATTRS, 0, B_FALSE, cr))
		return (error);
#endif

	if ((error = zfs_acl_ids_create(zp, IS_XATTR, vap, cr, NULL,
	    &acl_ids)) != 0)
		return (error);
	if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) {
		zfs_acl_ids_free(&acl_ids);
		return (SET_ERROR(EDQUOT));
	}

	getnewvnode_reserve(1);

	tx = dmu_tx_create(zfsvfs->z_os);
	dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
	    ZFS_SA_BASE_ATTR_SIZE);
	dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
	dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
	fuid_dirtied = zfsvfs->z_fuid_dirty;
	if (fuid_dirtied)
		zfs_fuid_txhold(zfsvfs, tx);
	error = dmu_tx_assign(tx, TXG_WAIT);
	if (error) {
		zfs_acl_ids_free(&acl_ids);
		dmu_tx_abort(tx);
		return (error);
	}
	zfs_mknode(zp, vap, tx, cr, IS_XATTR, &xzp, &acl_ids);

	if (fuid_dirtied)
		zfs_fuid_sync(zfsvfs, tx);

#ifdef DEBUG
	error = sa_lookup(xzp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
	    &parent, sizeof (parent));
	ASSERT(error == 0 && parent == zp->z_id);
#endif

	VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs), &xzp->z_id,
	    sizeof (xzp->z_id), tx));

	(void) zfs_log_create(zfsvfs->z_log, tx, TX_MKXATTR, zp,
	    xzp, "", NULL, acl_ids.z_fuidp, vap);

	zfs_acl_ids_free(&acl_ids);
	dmu_tx_commit(tx);

	getnewvnode_drop_reserve();

	*xvpp = ZTOV(xzp);

	return (0);
}
Exemple #19
0
void
zfs_rmnode(znode_t *zp)
{
    zfs_sb_t	*zsb = ZTOZSB(zp);
    objset_t	*os = zsb->z_os;
    znode_t		*xzp = NULL;
    dmu_tx_t	*tx;
    uint64_t	acl_obj;
    uint64_t	xattr_obj;
    uint64_t	count;
    int		error;

    ASSERT(zp->z_links == 0);
    ASSERT(atomic_read(&ZTOI(zp)->i_count) == 0);

    /*
     * If this is an attribute directory, purge its contents.
     */
    if (S_ISDIR(ZTOI(zp)->i_mode) && (zp->z_pflags & ZFS_XATTR)) {
        error = zap_count(os, zp->z_id, &count);
        if (error) {
            zfs_znode_dmu_fini(zp);
            return;
        }

        if (count > 0) {
            taskq_t *taskq;

            /*
             * There are still directory entries in this xattr
             * directory.  Let zfs_unlinked_drain() deal with
             * them to avoid deadlocking this process in the
             * zfs_purgedir()->zfs_zget()->ilookup() callpath
             * on the xattr inode's I_FREEING bit.
             */
            taskq = dsl_pool_iput_taskq(dmu_objset_pool(os));
            taskq_dispatch(taskq, (task_func_t *)
                           zfs_unlinked_drain, zsb, TQ_SLEEP);

            zfs_znode_dmu_fini(zp);
            return;
        }
    }

    /*
     * Free up all the data in the file.
     */
    error = dmu_free_long_range(os, zp->z_id, 0, DMU_OBJECT_END);
    if (error) {
        /*
         * Not enough space.  Leave the file in the unlinked set.
         */
        zfs_znode_dmu_fini(zp);
        return;
    }

    /*
     * If the file has extended attributes, we're going to unlink
     * the xattr dir.
     */
    error = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zsb),
                      &xattr_obj, sizeof (xattr_obj));
    if (error == 0 && xattr_obj) {
        error = zfs_zget(zsb, xattr_obj, &xzp);
        ASSERT(error == 0);
    }

    acl_obj = zfs_external_acl(zp);

    /*
     * Set up the final transaction.
     */
    tx = dmu_tx_create(os);
    dmu_tx_hold_free(tx, zp->z_id, 0, DMU_OBJECT_END);
    dmu_tx_hold_zap(tx, zsb->z_unlinkedobj, FALSE, NULL);
    if (xzp) {
        dmu_tx_hold_zap(tx, zsb->z_unlinkedobj, TRUE, NULL);
        dmu_tx_hold_sa(tx, xzp->z_sa_hdl, B_FALSE);
    }
    if (acl_obj)
        dmu_tx_hold_free(tx, acl_obj, 0, DMU_OBJECT_END);

    zfs_sa_upgrade_txholds(tx, zp);
    error = dmu_tx_assign(tx, TXG_WAIT);
    if (error) {
        /*
         * Not enough space to delete the file.  Leave it in the
         * unlinked set, leaking it until the fs is remounted (at
         * which point we'll call zfs_unlinked_drain() to process it).
         */
        dmu_tx_abort(tx);
        zfs_znode_dmu_fini(zp);
        goto out;
    }

    if (xzp) {
        ASSERT(error == 0);
        mutex_enter(&xzp->z_lock);
        xzp->z_unlinked = B_TRUE;	/* mark xzp for deletion */
        xzp->z_links = 0;	/* no more links to it */
        VERIFY(0 == sa_update(xzp->z_sa_hdl, SA_ZPL_LINKS(zsb),
                              &xzp->z_links, sizeof (xzp->z_links), tx));
        mutex_exit(&xzp->z_lock);
        zfs_unlinked_add(xzp, tx);
    }

    /* Remove this znode from the unlinked set */
    VERIFY3U(0, ==,
             zap_remove_int(zsb->z_os, zsb->z_unlinkedobj, zp->z_id, tx));

    zfs_znode_delete(zp, tx);

    dmu_tx_commit(tx);
out:
    if (xzp)
        iput(ZTOI(xzp));
}
Exemple #20
0
void
zfs_rmnode(znode_t *zp)
{
	zfsvfs_t	*zfsvfs = ZTOZSB(zp);
	objset_t	*os = zfsvfs->z_os;
	znode_t		*xzp = NULL;
	dmu_tx_t	*tx;
	uint64_t	acl_obj;
	uint64_t	xattr_obj;
	uint64_t	links;
	int		error;

	ASSERT(ZTOI(zp)->i_nlink == 0);
	ASSERT(atomic_read(&ZTOI(zp)->i_count) == 0);

	/*
	 * If this is an attribute directory, purge its contents.
	 */
	if (S_ISDIR(ZTOI(zp)->i_mode) && (zp->z_pflags & ZFS_XATTR)) {
		if (zfs_purgedir(zp) != 0) {
			/*
			 * Not enough space to delete some xattrs.
			 * Leave it in the unlinked set.
			 */
			zfs_znode_dmu_fini(zp);

			return;
		}
	}

	/*
	 * Free up all the data in the file.  We don't do this for directories
	 * because we need truncate and remove to be in the same tx, like in
	 * zfs_znode_delete(). Otherwise, if we crash here we'll end up with
	 * an inconsistent truncated zap object in the delete queue.  Note a
	 * truncated file is harmless since it only contains user data.
	 */
	if (S_ISREG(ZTOI(zp)->i_mode)) {
		error = dmu_free_long_range(os, zp->z_id, 0, DMU_OBJECT_END);
		if (error) {
			/*
			 * Not enough space or we were interrupted by unmount.
			 * Leave the file in the unlinked set.
			 */
			zfs_znode_dmu_fini(zp);
			return;
		}
	}

	/*
	 * If the file has extended attributes, we're going to unlink
	 * the xattr dir.
	 */
	error = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
	    &xattr_obj, sizeof (xattr_obj));
	if (error == 0 && xattr_obj) {
		error = zfs_zget(zfsvfs, xattr_obj, &xzp);
		ASSERT(error == 0);
	}

	acl_obj = zfs_external_acl(zp);

	/*
	 * Set up the final transaction.
	 */
	tx = dmu_tx_create(os);
	dmu_tx_hold_free(tx, zp->z_id, 0, DMU_OBJECT_END);
	dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
	if (xzp) {
		dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, TRUE, NULL);
		dmu_tx_hold_sa(tx, xzp->z_sa_hdl, B_FALSE);
	}
	if (acl_obj)
		dmu_tx_hold_free(tx, acl_obj, 0, DMU_OBJECT_END);

	zfs_sa_upgrade_txholds(tx, zp);
	error = dmu_tx_assign(tx, TXG_WAIT);
	if (error) {
		/*
		 * Not enough space to delete the file.  Leave it in the
		 * unlinked set, leaking it until the fs is remounted (at
		 * which point we'll call zfs_unlinked_drain() to process it).
		 */
		dmu_tx_abort(tx);
		zfs_znode_dmu_fini(zp);
		goto out;
	}

	if (xzp) {
		ASSERT(error == 0);
		mutex_enter(&xzp->z_lock);
		xzp->z_unlinked = B_TRUE;	/* mark xzp for deletion */
		clear_nlink(ZTOI(xzp));		/* no more links to it */
		links = 0;
		VERIFY(0 == sa_update(xzp->z_sa_hdl, SA_ZPL_LINKS(zfsvfs),
		    &links, sizeof (links), tx));
		mutex_exit(&xzp->z_lock);
		zfs_unlinked_add(xzp, tx);
	}

	/* Remove this znode from the unlinked set */
	VERIFY3U(0, ==,
	    zap_remove_int(zfsvfs->z_os, zfsvfs->z_unlinkedobj, zp->z_id, tx));

	zfs_znode_delete(zp, tx);

	dmu_tx_commit(tx);
out:
	if (xzp)
		zfs_iput_async(ZTOI(xzp));
}
Exemple #21
0
int
zfs_make_xattrdir(znode_t *zp, vattr_t *vap, vnode_t **xvpp, cred_t *cr)
{
    zfsvfs_t *zfsvfs = zp->z_zfsvfs;
    znode_t *xzp;
    dmu_tx_t *tx;
    uint64_t xoid;
    int error;
    zfs_acl_ids_t acl_ids;
    boolean_t fuid_dirtied;

    *xvpp = NULL;

#ifndef __APPLE__
    /* In Mac OS X access preflighting is done above the file system. */
    if (error = zfs_zaccess(zp, ACE_WRITE_NAMED_ATTRS, cr))
        return (error);
#endif /*!__APPLE__*/

    if ((error = zfs_acl_ids_create(zp, IS_XATTR, vap, cr, NULL,
                                    &acl_ids)) != 0)
        return (error);
    if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) {
        zfs_acl_ids_free(&acl_ids);
        return (EDQUOT);
    }
top:
    tx = dmu_tx_create(zfsvfs->z_os);
    dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
                          ZFS_SA_BASE_ATTR_SIZE);
    dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
    dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
    fuid_dirtied = zfsvfs->z_fuid_dirty;
    if (fuid_dirtied)
        zfs_fuid_txhold(zfsvfs, tx);
    error = dmu_tx_assign(tx, TXG_NOWAIT);
    if (error) {
        if (error == ERESTART) {
            dmu_tx_wait(tx);
            dmu_tx_abort(tx);
            goto top;
        }
        zfs_acl_ids_free(&acl_ids);
        dmu_tx_abort(tx);
        return (error);
    }
    zfs_mknode(zp, vap, tx, cr, IS_XATTR, &xzp, &acl_ids);

    if (fuid_dirtied)
        zfs_fuid_sync(zfsvfs, tx);
    VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs), &xzp->z_id,
                          sizeof (xzp->z_id), tx));

    (void) zfs_log_create(zfsvfs->z_log, tx, TX_MKXATTR, zp,
                          xzp, "", NULL, acl_ids.z_fuidp, vap);

    zfs_acl_ids_free(&acl_ids);
    dmu_tx_commit(tx);

    /* Cleanup any znode we consumed during zfs_mknode() */
    printf("ZFS_POSTPROCESS_ZP(xzp);\n");
    printf("zfs_dir attach 2\n");
    zfs_attach_vnode(xzp);

    *xvpp = ZTOV(xzp);

    return (0);
}
Exemple #22
0
static int osd_declare_attr_set(const struct lu_env *env,
				struct dt_object *dt,
				const struct lu_attr *attr,
				struct thandle *handle)
{
	struct osd_thread_info	*info = osd_oti_get(env);
	char			*buf = osd_oti_get(env)->oti_str;
	struct osd_object	*obj = osd_dt_obj(dt);
	struct osd_device	*osd = osd_obj2dev(obj);
	struct osd_thandle	*oh;
	uint64_t		 bspace;
	uint32_t		 blksize;
	int			 rc;
	ENTRY;

	if (!dt_object_exists(dt)) {
		/* XXX: sanity check that object creation is declared */
		RETURN(0);
	}

	LASSERT(handle != NULL);
	LASSERT(osd_invariant(obj));

	oh = container_of0(handle, struct osd_thandle, ot_super);

	LASSERT(obj->oo_sa_hdl != NULL);
	dmu_tx_hold_sa(oh->ot_tx, obj->oo_sa_hdl, 0);

	sa_object_size(obj->oo_sa_hdl, &blksize, &bspace);
	bspace = toqb(bspace * blksize);

	if (attr && attr->la_valid & LA_UID) {
		/* account for user inode tracking ZAP update */
		dmu_tx_hold_bonus(oh->ot_tx, osd->od_iusr_oid);
		dmu_tx_hold_zap(oh->ot_tx, osd->od_iusr_oid, TRUE, buf);

		/* quota enforcement for user */
		if (attr->la_uid != obj->oo_attr.la_uid) {
			rc = qsd_transfer(env, osd->od_quota_slave,
					  &oh->ot_quota_trans, USRQUOTA,
					  obj->oo_attr.la_uid, attr->la_uid,
					  bspace, &info->oti_qi);
			if (rc)
				RETURN(rc);
		}
	}
	if (attr && attr->la_valid & LA_GID) {
		/* account for user inode tracking ZAP update */
		dmu_tx_hold_bonus(oh->ot_tx, osd->od_igrp_oid);
		dmu_tx_hold_zap(oh->ot_tx, osd->od_igrp_oid, TRUE, buf);

		/* quota enforcement for group */
		if (attr->la_gid != obj->oo_attr.la_gid) {
			rc = qsd_transfer(env, osd->od_quota_slave,
					  &oh->ot_quota_trans, GRPQUOTA,
					  obj->oo_attr.la_gid, attr->la_gid,
					  bspace, &info->oti_qi);
			if (rc)
				RETURN(rc);
		}
	}

	RETURN(0);
}
Exemple #23
0
/*
 * Delete the entire contents of a directory.  Return a count
 * of the number of entries that could not be deleted.
 *
 * NOTE: this function assumes that the directory is inactive,
 *	so there is no need to lock its entries before deletion.
 *	Also, it assumes the directory contents is *only* regular
 *	files.
 */
static int
zfs_purgedir(znode_t *dzp)
{
    zap_cursor_t	zc;
    zap_attribute_t	zap;
    znode_t		*xzp;
    dmu_tx_t	*tx;
    zfsvfs_t	*zfsvfs = dzp->z_zfsvfs;
    zfs_dirlock_t	dl;
    int skipped = 0;
    int error;

    for (zap_cursor_init(&zc, zfsvfs->z_os, dzp->z_id);
            (error = zap_cursor_retrieve(&zc, &zap)) == 0;
            zap_cursor_advance(&zc)) {
#ifdef __APPLE__
        error = zfs_zget_sans_vnode(zfsvfs,  ZFS_DIRENT_OBJ(zap.za_first_integer), &xzp);
        ASSERT3U(error, ==, 0);

#else
        error = zfs_zget(zfsvfs,
                         ZFS_DIRENT_OBJ(zap.za_first_integer), &xzp);
        ASSERT3U(error, ==, 0);

        ASSERT((ZTOV(xzp)->v_type == VREG) ||
               (ZTOV(xzp)->v_type == VLNK));
#endif /* __APPLE__ */
        tx = dmu_tx_create(zfsvfs->z_os);
        dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
        dmu_tx_hold_zap(tx, dzp->z_id, FALSE, zap.za_name);
        dmu_tx_hold_sa(tx, xzp->z_sa_hdl, B_FALSE);
        dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
        /* Is this really needed ? */
        zfs_sa_upgrade_txholds(tx, xzp);

        error = dmu_tx_assign(tx, TXG_WAIT);
        if (error) {
            dmu_tx_abort(tx);
#ifdef __APPLE__
            if (ZTOV(xzp) == NULL) {
                zfs_zinactive(xzp);
            } else {
                VN_RELE(ZTOV(xzp));
            }
#else
            VN_RELE(ZTOV(xzp));
#endif /* __APPLE__ */
            skipped += 1;
            continue;
        }
        bzero(&dl, sizeof (dl));
        dl.dl_dzp = dzp;
        dl.dl_name = zap.za_name;

        error = zfs_link_destroy(&dl, xzp, tx, 0, NULL);
        ASSERT3U(error, ==, 0);
        dmu_tx_commit(tx);

#ifdef __APPLE__
        if (ZTOV(xzp) == NULL) {
            zfs_zinactive(xzp);
        } else {
            VN_RELE(ZTOV(xzp));
        }
#else
        VN_RELE(ZTOV(xzp));
#endif /* __APPLE__ */
    }
    zap_cursor_fini(&zc);
    ASSERT(error == ENOENT);
    return (skipped);
}
Exemple #24
0
/*
 * Increase the file length
 *
 *	IN:	zp	- znode of file to free data in.
 *		end	- new end-of-file
 *
 * 	RETURN:	0 on success, error code on failure
 */
static int
zfs_extend(znode_t *zp, uint64_t end)
{
	zfs_sb_t *zsb = ZTOZSB(zp);
	dmu_tx_t *tx;
	rl_t *rl;
	uint64_t newblksz;
	int error;

	/*
	 * We will change zp_size, lock the whole file.
	 */
	rl = zfs_range_lock(zp, 0, UINT64_MAX, RL_WRITER);

	/*
	 * Nothing to do if file already at desired length.
	 */
	if (end <= zp->z_size) {
		zfs_range_unlock(rl);
		return (0);
	}
	tx = dmu_tx_create(zsb->z_os);
	dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
	zfs_sa_upgrade_txholds(tx, zp);
	if (end > zp->z_blksz &&
	    (!ISP2(zp->z_blksz) || zp->z_blksz < zsb->z_max_blksz)) {
		/*
		 * We are growing the file past the current block size.
		 */
		if (zp->z_blksz > ZTOZSB(zp)->z_max_blksz) {
			ASSERT(!ISP2(zp->z_blksz));
			newblksz = MIN(end, SPA_MAXBLOCKSIZE);
		} else {
			newblksz = MIN(end, ZTOZSB(zp)->z_max_blksz);
		}
		dmu_tx_hold_write(tx, zp->z_id, 0, newblksz);
	} else {
		newblksz = 0;
	}

	error = dmu_tx_assign(tx, TXG_WAIT);
	if (error) {
		dmu_tx_abort(tx);
		zfs_range_unlock(rl);
		return (error);
	}

	if (newblksz)
		zfs_grow_blocksize(zp, newblksz, tx);

	zp->z_size = end;

	VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(ZTOZSB(zp)),
	    &zp->z_size, sizeof (zp->z_size), tx));

	zfs_range_unlock(rl);

	dmu_tx_commit(tx);

	return (0);
}
Exemple #25
0
/*
 * Free space in a file
 *
 *	IN:	zp	- znode of file to free data in.
 *		off	- start of range
 *		len	- end of range (0 => EOF)
 *		flag	- current file open mode flags.
 *		log	- TRUE if this action should be logged
 *
 * 	RETURN:	0 on success, error code on failure
 */
int
zfs_freesp(znode_t *zp, uint64_t off, uint64_t len, int flag, boolean_t log)
{
	struct inode *ip = ZTOI(zp);
	dmu_tx_t *tx;
	zfs_sb_t *zsb = ZTOZSB(zp);
	zilog_t *zilog = zsb->z_log;
	uint64_t mode;
	uint64_t mtime[2], ctime[2];
	sa_bulk_attr_t bulk[3];
	int count = 0;
	int error;

	if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_MODE(zsb), &mode,
	    sizeof (mode))) != 0)
		return (error);

	if (off > zp->z_size) {
		error =  zfs_extend(zp, off+len);
		if (error == 0 && log)
			goto log;
		else
			return (error);
	}

	/*
	 * Check for any locks in the region to be freed.
	 */
	if (ip->i_flock && mandatory_lock(ip)) {
		uint64_t length = (len ? len : zp->z_size - off);
		if (!lock_may_write(ip, off, length))
			return (SET_ERROR(EAGAIN));
	}

	if (len == 0) {
		error = zfs_trunc(zp, off);
	} else {
		if ((error = zfs_free_range(zp, off, len)) == 0 &&
		    off + len > zp->z_size)
			error = zfs_extend(zp, off+len);
	}
	if (error || !log)
		return (error);
log:
	tx = dmu_tx_create(zsb->z_os);
	dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
	zfs_sa_upgrade_txholds(tx, zp);
	error = dmu_tx_assign(tx, TXG_WAIT);
	if (error) {
		dmu_tx_abort(tx);
		return (error);
	}

	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zsb), NULL, mtime, 16);
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), NULL, ctime, 16);
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb),
	    NULL, &zp->z_pflags, 8);
	zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime, B_TRUE);
	error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
	ASSERT(error == 0);

	zfs_log_truncate(zilog, tx, TX_TRUNCATE, zp, off, len);

	dmu_tx_commit(tx);
	zfs_inode_update(zp);
	return (0);
}
Exemple #26
0
/*
 * Truncate a file
 *
 *	IN:	zp	- znode of file to free data in.
 *		end	- new end-of-file.
 *
 * 	RETURN:	0 on success, error code on failure
 */
static int
zfs_trunc(znode_t *zp, uint64_t end)
{
	zfs_sb_t *zsb = ZTOZSB(zp);
	dmu_tx_t *tx;
	rl_t *rl;
	int error;
	sa_bulk_attr_t bulk[2];
	int count = 0;

	/*
	 * We will change zp_size, lock the whole file.
	 */
	rl = zfs_range_lock(zp, 0, UINT64_MAX, RL_WRITER);

	/*
	 * Nothing to do if file already at desired length.
	 */
	if (end >= zp->z_size) {
		zfs_range_unlock(rl);
		return (0);
	}

	error = dmu_free_long_range(zsb->z_os, zp->z_id, end,  -1);
	if (error) {
		zfs_range_unlock(rl);
		return (error);
	}
top:
	tx = dmu_tx_create(zsb->z_os);
	dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
	zfs_sa_upgrade_txholds(tx, zp);
	error = dmu_tx_assign(tx, TXG_NOWAIT);
	if (error) {
		if (error == ERESTART) {
			dmu_tx_wait(tx);
			dmu_tx_abort(tx);
			goto top;
		}
		dmu_tx_abort(tx);
		zfs_range_unlock(rl);
		return (error);
	}

	zp->z_size = end;
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zsb),
	    NULL, &zp->z_size, sizeof (zp->z_size));

	if (end == 0) {
		zp->z_pflags &= ~ZFS_SPARSE;
		SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb),
		    NULL, &zp->z_pflags, 8);
	}
	VERIFY(sa_bulk_update(zp->z_sa_hdl, bulk, count, tx) == 0);

	dmu_tx_commit(tx);

	zfs_range_unlock(rl);

	return (0);
}