예제 #1
0
int __osd_attr_init(const struct lu_env *env, udmu_objset_t *uos,
		    uint64_t oid, dmu_tx_t *tx, struct lu_attr *la)
{
	sa_bulk_attr_t	*bulk;
	sa_handle_t	*sa_hdl;
	struct osa_attr	*osa = &osd_oti_get(env)->oti_osa;
	uint64_t	 gen;
	uint64_t	 parent;
	uint64_t	 crtime[2];
	timestruc_t	 now;
	int		 cnt;
	int		 rc;

	gethrestime(&now);
	gen = dmu_tx_get_txg(tx);

	ZFS_TIME_ENCODE(&now, crtime);
	/* XXX: this should be real id of parent for ZPL access, but we have no
	 * such info in OSD, probably it can be part of dt_object_format */
	parent = 0;

	osa->atime[0] = la->la_atime;
	osa->ctime[0] = la->la_ctime;
	osa->mtime[0] = la->la_mtime;
	osa->mode = la->la_mode;
	osa->uid = la->la_uid;
	osa->gid = la->la_gid;
	osa->rdev = la->la_rdev;
	osa->nlink = la->la_nlink;
	osa->flags = la->la_flags;
	osa->size  = la->la_size;

	/* Now add in all of the "SA" attributes */
	rc = -sa_handle_get(uos->os, oid, NULL, SA_HDL_PRIVATE, &sa_hdl);
	if (rc)
		return rc;

	OBD_ALLOC(bulk, sizeof(sa_bulk_attr_t) * 13);
	if (bulk == NULL) {
		rc = -ENOMEM;
		goto out;
	}
	/*
	 * we need to create all SA below upon object create.
	 *
	 * XXX The attribute order matters since the accounting callback relies
	 * on static offsets (i.e. SA_*_OFFSET, see zfs_space_delta_cb()) to
	 * look up the UID/GID attributes. Moreover, the callback does not seem
	 * to support the spill block.
	 * We define attributes in the same order as SA_*_OFFSET in order to
	 * work around the problem. See ORI-610.
	 */
	cnt = 0;
	SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(uos), NULL, &osa->mode, 8);
	SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_SIZE(uos), NULL, &osa->size, 8);
	SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GEN(uos), NULL, &gen, 8);
	SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_UID(uos), NULL, &osa->uid, 8);
	SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GID(uos), NULL, &osa->gid, 8);
	SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_PARENT(uos), NULL, &parent, 8);
	SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(uos), NULL, &osa->flags, 8);
	SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(uos), NULL, osa->atime, 16);
	SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(uos), NULL, osa->mtime, 16);
	SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(uos), NULL, osa->ctime, 16);
	SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CRTIME(uos), NULL, crtime, 16);
	SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_LINKS(uos), NULL, &osa->nlink, 8);
	SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_RDEV(uos), NULL, &osa->rdev, 8);

	rc = -sa_replace_all_by_template(sa_hdl, bulk, cnt, tx);

	OBD_FREE(bulk, sizeof(sa_bulk_attr_t) * 13);
out:
	sa_handle_destroy(sa_hdl);
	return rc;
}
예제 #2
0
/*
 * Retrieve the attributes of a DMU object
 */
int __osd_object_attr_get(const struct lu_env *env, udmu_objset_t *uos,
			  struct osd_object *obj, struct lu_attr *la)
{
	struct osa_attr	*osa = &osd_oti_get(env)->oti_osa;
	sa_handle_t	*sa_hdl;
	sa_bulk_attr_t	*bulk;
	int		 cnt = 0;
	int		 rc;
	ENTRY;

	LASSERT(obj->oo_db != NULL);

	rc = -sa_handle_get(uos->os, obj->oo_db->db_object, NULL,
			    SA_HDL_PRIVATE, &sa_hdl);
	if (rc)
		RETURN(rc);

	OBD_ALLOC(bulk, sizeof(sa_bulk_attr_t) * 9);
	if (bulk == NULL)
		GOTO(out_sa, rc = -ENOMEM);

	la->la_valid |= LA_ATIME | LA_MTIME | LA_CTIME | LA_MODE | LA_TYPE |
			LA_SIZE | LA_UID | LA_GID | LA_FLAGS | LA_NLINK;

	SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(uos), NULL, osa->atime, 16);
	SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(uos), NULL, osa->mtime, 16);
	SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(uos), NULL, osa->ctime, 16);
	SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(uos), NULL, &osa->mode, 8);
	SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_SIZE(uos), NULL, &osa->size, 8);
	SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_LINKS(uos), NULL, &osa->nlink, 8);
	SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_UID(uos), NULL, &osa->uid, 8);
	SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GID(uos), NULL, &osa->gid, 8);
	SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(uos), NULL, &osa->flags, 8);

	rc = -sa_bulk_lookup(sa_hdl, bulk, cnt);
	if (rc)
		GOTO(out_bulk, rc);

	la->la_atime = osa->atime[0];
	la->la_mtime = osa->mtime[0];
	la->la_ctime = osa->ctime[0];
	la->la_mode = osa->mode;
	la->la_uid = osa->uid;
	la->la_gid = osa->gid;
	la->la_nlink = osa->nlink;
	la->la_flags = osa->flags;
	la->la_size = osa->size;

	if (S_ISCHR(la->la_mode) || S_ISBLK(la->la_mode)) {
		rc = -sa_lookup(sa_hdl, SA_ZPL_RDEV(uos), &osa->rdev, 8);
		if (rc)
			GOTO(out_bulk, rc);
		la->la_rdev = osa->rdev;
		la->la_valid |= LA_RDEV;
	}
out_bulk:
	OBD_FREE(bulk, sizeof(sa_bulk_attr_t) * 9);
out_sa:
	sa_handle_destroy(sa_hdl);

	RETURN(rc);
}
예제 #3
0
파일: sa.c 프로젝트: kelsieflynn/SamFlynnOS
/*
 * add/remove/replace a single attribute and then rewrite the entire set
 * of attributes.
 */
static int
sa_modify_attrs(sa_handle_t *hdl, sa_attr_type_t newattr,
    sa_data_op_t action, sa_data_locator_t *locator, void *datastart,
    uint16_t buflen, dmu_tx_t *tx)
{
	sa_os_t *sa = hdl->sa_os->os_sa;
	dmu_buf_impl_t *db = (dmu_buf_impl_t *)hdl->sa_bonus;
	dnode_t *dn;
	sa_bulk_attr_t *attr_desc;
	void *old_data[2];
	int bonus_attr_count = 0;
	int bonus_data_size, spill_data_size;
	int spill_attr_count = 0;
	int error;
	uint16_t length;
	int i, j, k, length_idx;
	sa_hdr_phys_t *hdr;
	sa_idx_tab_t *idx_tab;
	int attr_count;
	int count;

	ASSERT(MUTEX_HELD(&hdl->sa_lock));

	/* First make of copy of the old data */

	DB_DNODE_ENTER(db);
	dn = DB_DNODE(db);
	if (dn->dn_bonuslen != 0) {
		bonus_data_size = hdl->sa_bonus->db_size;
		old_data[0] = kmem_alloc(bonus_data_size, KM_SLEEP);
		bcopy(hdl->sa_bonus->db_data, old_data[0],
		    hdl->sa_bonus->db_size);
		bonus_attr_count = hdl->sa_bonus_tab->sa_layout->lot_attr_count;
	} else {
		old_data[0] = NULL;
	}
	DB_DNODE_EXIT(db);

	/* Bring spill buffer online if it isn't currently */

	if ((error = sa_get_spill(hdl)) == 0) {
		spill_data_size = hdl->sa_spill->db_size;
		old_data[1] = kmem_alloc(spill_data_size, KM_SLEEP);
		bcopy(hdl->sa_spill->db_data, old_data[1],
		    hdl->sa_spill->db_size);
		spill_attr_count =
		    hdl->sa_spill_tab->sa_layout->lot_attr_count;
	} else if (error && error != ENOENT) {
		if (old_data[0])
			kmem_free(old_data[0], bonus_data_size);
		return (error);
	} else {
		old_data[1] = NULL;
	}

	/* build descriptor of all attributes */

	attr_count = bonus_attr_count + spill_attr_count;
	if (action == SA_ADD)
		attr_count++;
	else if (action == SA_REMOVE)
		attr_count--;

	attr_desc = kmem_zalloc(sizeof (sa_bulk_attr_t) * attr_count, KM_SLEEP);

	/*
	 * loop through bonus and spill buffer if it exists, and
	 * build up new attr_descriptor to reset the attributes
	 */
	k = j = 0;
	count = bonus_attr_count;
	hdr = SA_GET_HDR(hdl, SA_BONUS);
	idx_tab = SA_IDX_TAB_GET(hdl, SA_BONUS);
	for (; k != 2; k++) {
		/* iterate over each attribute in layout */
		for (i = 0, length_idx = 0; i != count; i++) {
			sa_attr_type_t attr;

			attr = idx_tab->sa_layout->lot_attrs[i];
			if (attr == newattr) {
				if (action == SA_REMOVE) {
					j++;
					continue;
				}
				ASSERT(SA_REGISTERED_LEN(sa, attr) == 0);
				ASSERT(action == SA_REPLACE);
				SA_ADD_BULK_ATTR(attr_desc, j, attr,
				    locator, datastart, buflen);
			} else {
				length = SA_REGISTERED_LEN(sa, attr);
				if (length == 0) {
					length = hdr->sa_lengths[length_idx++];
				}

				SA_ADD_BULK_ATTR(attr_desc, j, attr,
				    NULL, (void *)
				    (TOC_OFF(idx_tab->sa_idx_tab[attr]) +
				    (uintptr_t)old_data[k]), length);
			}
		}
		if (k == 0 && hdl->sa_spill) {
			hdr = SA_GET_HDR(hdl, SA_SPILL);
			idx_tab = SA_IDX_TAB_GET(hdl, SA_SPILL);
			count = spill_attr_count;
		} else {
			break;
		}
	}
	if (action == SA_ADD) {
		length = SA_REGISTERED_LEN(sa, newattr);
		if (length == 0) {
			length = buflen;
		}
		SA_ADD_BULK_ATTR(attr_desc, j, newattr, locator,
		    datastart, buflen);
	}

	error = sa_build_layouts(hdl, attr_desc, attr_count, tx);

	if (old_data[0])
		kmem_free(old_data[0], bonus_data_size);
	if (old_data[1])
		kmem_free(old_data[1], spill_data_size);
	kmem_free(attr_desc, sizeof (sa_bulk_attr_t) * attr_count);

	return (error);
}
예제 #4
0
void
zfs_sa_upgrade(sa_handle_t *hdl, dmu_tx_t *tx)
{
	dmu_buf_t *db = sa_get_db(hdl);
	znode_t *zp = sa_get_userdata(hdl);
	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
	sa_bulk_attr_t bulk[20];
	int count = 0;
	sa_bulk_attr_t sa_attrs[20] = { 0 };
	zfs_acl_locator_cb_t locate = { 0 };
	uint64_t uid, gid, mode, rdev, xattr, parent;
	uint64_t crtime[2], mtime[2], ctime[2];
	zfs_acl_phys_t znode_acl;
	char scanstamp[AV_SCANSTAMP_SZ];
	boolean_t drop_lock = B_FALSE;

	/*
	 * No upgrade if ACL isn't cached
	 * since we won't know which locks are held
	 * and ready the ACL would require special "locked"
	 * interfaces that would be messy
	 */
	if (zp->z_acl_cached == NULL || ZTOV(zp)->v_type == VLNK)
		return;

	/*
	 * If the z_lock is held and we aren't the owner
	 * the just return since we don't want to deadlock
	 * trying to update the status of z_is_sa.  This
	 * file can then be upgraded at a later time.
	 *
	 * Otherwise, we know we are doing the
	 * sa_update() that caused us to enter this function.
	 */
	if (mutex_owner(&zp->z_lock) != curthread) {
		if (mutex_tryenter(&zp->z_lock) == 0)
			return;
		else
			drop_lock = B_TRUE;
	}

	/* First do a bulk query of the attributes that aren't cached */
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CRTIME(zfsvfs), NULL, &crtime, 16);
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL, &mode, 8);
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zfsvfs), NULL, &parent, 8);
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_XATTR(zfsvfs), NULL, &xattr, 8);
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_RDEV(zfsvfs), NULL, &rdev, 8);
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL, &uid, 8);
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL, &gid, 8);
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ZNODE_ACL(zfsvfs), NULL,
	    &znode_acl, 88);

	if (sa_bulk_lookup_locked(hdl, bulk, count) != 0)
		goto done;


	/*
	 * While the order here doesn't matter its best to try and organize
	 * it is such a way to pick up an already existing layout number
	 */
	count = 0;
	SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_MODE(zfsvfs), NULL, &mode, 8);
	SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_SIZE(zfsvfs), NULL,
	    &zp->z_size, 8);
	SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_GEN(zfsvfs),
	    NULL, &zp->z_gen, 8);
	SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_UID(zfsvfs), NULL, &uid, 8);
	SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_GID(zfsvfs), NULL, &gid, 8);
	SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_PARENT(zfsvfs),
	    NULL, &parent, 8);
	SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_FLAGS(zfsvfs), NULL,
	    &zp->z_pflags, 8);
	SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_ATIME(zfsvfs), NULL,
	    zp->z_atime, 16);
	SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_MTIME(zfsvfs), NULL,
	    &mtime, 16);
	SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_CTIME(zfsvfs), NULL,
	    &ctime, 16);
	SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_CRTIME(zfsvfs), NULL,
	    &crtime, 16);
	SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_LINKS(zfsvfs), NULL,
	    &zp->z_links, 8);
	if (zp->z_vnode->v_type == VBLK || zp->z_vnode->v_type == VCHR)
		SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_RDEV(zfsvfs), NULL,
		    &rdev, 8);
	SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_DACL_COUNT(zfsvfs), NULL,
	    &zp->z_acl_cached->z_acl_count, 8);

	if (zp->z_acl_cached->z_version < ZFS_ACL_VERSION_FUID)
		zfs_acl_xform(zp, zp->z_acl_cached, CRED());

	locate.cb_aclp = zp->z_acl_cached;
	SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_DACL_ACES(zfsvfs),
	    zfs_acl_data_locator, &locate, zp->z_acl_cached->z_acl_bytes);

	if (xattr)
		SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_XATTR(zfsvfs),
		    NULL, &xattr, 8);

	/* if scanstamp then add scanstamp */

	if (zp->z_pflags & ZFS_BONUS_SCANSTAMP) {
		bcopy((caddr_t)db->db_data + ZFS_OLD_ZNODE_PHYS_SIZE,
		    scanstamp, AV_SCANSTAMP_SZ);
		SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_SCANSTAMP(zfsvfs),
		    NULL, scanstamp, AV_SCANSTAMP_SZ);
		zp->z_pflags &= ~ZFS_BONUS_SCANSTAMP;
	}

	VERIFY(dmu_set_bonustype(db, DMU_OT_SA, tx) == 0);
	VERIFY(sa_replace_all_by_template_locked(hdl, sa_attrs,
	    count, tx) == 0);
	if (znode_acl.z_acl_extern_obj)
		VERIFY(0 == dmu_object_free(zfsvfs->z_os,
		    znode_acl.z_acl_extern_obj, tx));

	zp->z_is_sa = B_TRUE;
done:
	if (drop_lock)
		mutex_exit(&zp->z_lock);
}
예제 #5
0
int __osd_attr_init(const struct lu_env *env, struct osd_device *osd,
		    sa_handle_t *sa_hdl, dmu_tx_t *tx,
		    struct lu_attr *la, uint64_t parent)
{
	sa_bulk_attr_t	*bulk = osd_oti_get(env)->oti_attr_bulk;
	struct osa_attr	*osa = &osd_oti_get(env)->oti_osa;
	uint64_t	 gen;
	uint64_t	 crtime[2];
	timestruc_t	 now;
	int		 cnt;
	int		 rc;

	LASSERT(sa_hdl);

	gen = dmu_tx_get_txg(tx);
	gethrestime(&now);
	ZFS_TIME_ENCODE(&now, crtime);

	osa->atime[0] = la->la_atime;
	osa->ctime[0] = la->la_ctime;
	osa->mtime[0] = la->la_mtime;
	osa->mode = la->la_mode;
	osa->uid = la->la_uid;
	osa->gid = la->la_gid;
	osa->rdev = la->la_rdev;
	osa->nlink = la->la_nlink;
	osa->flags = attrs_fs2zfs(la->la_flags);
	osa->size  = la->la_size;

	/*
	 * we need to create all SA below upon object create.
	 *
	 * XXX The attribute order matters since the accounting callback relies
	 * on static offsets (i.e. SA_*_OFFSET, see zfs_space_delta_cb()) to
	 * look up the UID/GID attributes. Moreover, the callback does not seem
	 * to support the spill block.
	 * We define attributes in the same order as SA_*_OFFSET in order to
	 * work around the problem. See ORI-610.
	 */
	cnt = 0;
	SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(osd), NULL, &osa->mode, 8);
	SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_SIZE(osd), NULL, &osa->size, 8);
	SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GEN(osd), NULL, &gen, 8);
	SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_UID(osd), NULL, &osa->uid, 8);
	SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GID(osd), NULL, &osa->gid, 8);
	SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_PARENT(osd), NULL, &parent, 8);
	SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(osd), NULL, &osa->flags, 8);
	SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(osd), NULL, osa->atime, 16);
	SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(osd), NULL, osa->mtime, 16);
	SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(osd), NULL, osa->ctime, 16);
	SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CRTIME(osd), NULL, crtime, 16);
	SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_LINKS(osd), NULL, &osa->nlink, 8);
	SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_RDEV(osd), NULL, &osa->rdev, 8);
	LASSERT(cnt <= ARRAY_SIZE(osd_oti_get(env)->oti_attr_bulk));

	rc = -sa_replace_all_by_template(sa_hdl, bulk, cnt, tx);

	return rc;
}
예제 #6
0
/*
 * Create a new DMU object to hold a zfs znode.
 *
 *	IN:	dzp	- parent directory for new znode
 *		vap	- file attributes for new znode
 *		tx	- dmu transaction id for zap operations
 *		cr	- credentials of caller
 *		flag	- flags:
 *			  IS_ROOT_NODE	- new object will be root
 *			  IS_XATTR	- new object is an attribute
 *		bonuslen - length of bonus buffer
 *		setaclp  - File/Dir initial ACL
 *		fuidp	 - Tracks fuid allocation.
 *
 *	OUT:	zpp	- allocated znode
 *
 */
void
zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
    uint_t flag, znode_t **zpp, zfs_acl_ids_t *acl_ids)
{
	uint64_t	crtime[2], atime[2], mtime[2], ctime[2];
	uint64_t	mode, size, links, parent, pflags;
	uint64_t	dzp_pflags = 0;
	uint64_t	rdev = 0;
	zfs_sb_t	*zsb = ZTOZSB(dzp);
	dmu_buf_t	*db;
	timestruc_t	now;
	uint64_t	gen, obj;
	int		bonuslen;
	sa_handle_t	*sa_hdl;
	dmu_object_type_t obj_type;
	sa_bulk_attr_t	*sa_attrs;
	int		cnt = 0;
	zfs_acl_locator_cb_t locate = { 0 };

	if (zsb->z_replay) {
		obj = vap->va_nodeid;
		now = vap->va_ctime;		/* see zfs_replay_create() */
		gen = vap->va_nblocks;		/* ditto */
	} else {
		obj = 0;
		gethrestime(&now);
		gen = dmu_tx_get_txg(tx);
	}

	obj_type = zsb->z_use_sa ? DMU_OT_SA : DMU_OT_ZNODE;
	bonuslen = (obj_type == DMU_OT_SA) ?
	    DN_MAX_BONUSLEN : ZFS_OLD_ZNODE_PHYS_SIZE;

	/*
	 * Create a new DMU object.
	 */
	/*
	 * There's currently no mechanism for pre-reading the blocks that will
	 * be needed to allocate a new object, so we accept the small chance
	 * that there will be an i/o error and we will fail one of the
	 * assertions below.
	 */
	if (S_ISDIR(vap->va_mode)) {
		if (zsb->z_replay) {
			VERIFY0(zap_create_claim_norm(zsb->z_os, obj,
			    zsb->z_norm, DMU_OT_DIRECTORY_CONTENTS,
			    obj_type, bonuslen, tx));
		} else {
			obj = zap_create_norm(zsb->z_os,
			    zsb->z_norm, DMU_OT_DIRECTORY_CONTENTS,
			    obj_type, bonuslen, tx);
		}
	} else {
		if (zsb->z_replay) {
			VERIFY0(dmu_object_claim(zsb->z_os, obj,
			    DMU_OT_PLAIN_FILE_CONTENTS, 0,
			    obj_type, bonuslen, tx));
		} else {
			obj = dmu_object_alloc(zsb->z_os,
			    DMU_OT_PLAIN_FILE_CONTENTS, 0,
			    obj_type, bonuslen, tx);
		}
	}

	ZFS_OBJ_HOLD_ENTER(zsb, obj);
	VERIFY(0 == sa_buf_hold(zsb->z_os, obj, NULL, &db));

	/*
	 * If this is the root, fix up the half-initialized parent pointer
	 * to reference the just-allocated physical data area.
	 */
	if (flag & IS_ROOT_NODE) {
		dzp->z_id = obj;
	} else {
		dzp_pflags = dzp->z_pflags;
	}

	/*
	 * If parent is an xattr, so am I.
	 */
	if (dzp_pflags & ZFS_XATTR) {
		flag |= IS_XATTR;
	}

	if (zsb->z_use_fuids)
		pflags = ZFS_ARCHIVE | ZFS_AV_MODIFIED;
	else
		pflags = 0;

	if (S_ISDIR(vap->va_mode)) {
		size = 2;		/* contents ("." and "..") */
		links = (flag & (IS_ROOT_NODE | IS_XATTR)) ? 2 : 1;
	} else {
		size = links = 0;
	}

	if (S_ISBLK(vap->va_mode) || S_ISCHR(vap->va_mode))
		rdev = vap->va_rdev;

	parent = dzp->z_id;
	mode = acl_ids->z_mode;
	if (flag & IS_XATTR)
		pflags |= ZFS_XATTR;

	/*
	 * No execs denied will be deterimed when zfs_mode_compute() is called.
	 */
	pflags |= acl_ids->z_aclp->z_hints &
	    (ZFS_ACL_TRIVIAL|ZFS_INHERIT_ACE|ZFS_ACL_AUTO_INHERIT|
	    ZFS_ACL_DEFAULTED|ZFS_ACL_PROTECTED);

	ZFS_TIME_ENCODE(&now, crtime);
	ZFS_TIME_ENCODE(&now, ctime);

	if (vap->va_mask & ATTR_ATIME) {
		ZFS_TIME_ENCODE(&vap->va_atime, atime);
	} else {
		ZFS_TIME_ENCODE(&now, atime);
	}

	if (vap->va_mask & ATTR_MTIME) {
		ZFS_TIME_ENCODE(&vap->va_mtime, mtime);
	} else {
		ZFS_TIME_ENCODE(&now, mtime);
	}

	/* Now add in all of the "SA" attributes */
	VERIFY(0 == sa_handle_get_from_db(zsb->z_os, db, NULL, SA_HDL_SHARED,
	    &sa_hdl));

	/*
	 * Setup the array of attributes to be replaced/set on the new file
	 *
	 * order for  DMU_OT_ZNODE is critical since it needs to be constructed
	 * in the old znode_phys_t format.  Don't change this ordering
	 */
	sa_attrs = kmem_alloc(sizeof (sa_bulk_attr_t) * ZPL_END, KM_SLEEP);

	if (obj_type == DMU_OT_ZNODE) {
		SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ATIME(zsb),
		    NULL, &atime, 16);
		SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MTIME(zsb),
		    NULL, &mtime, 16);
		SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CTIME(zsb),
		    NULL, &ctime, 16);
		SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CRTIME(zsb),
		    NULL, &crtime, 16);
		SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GEN(zsb),
		    NULL, &gen, 8);
		SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MODE(zsb),
		    NULL, &mode, 8);
		SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_SIZE(zsb),
		    NULL, &size, 8);
		SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PARENT(zsb),
		    NULL, &parent, 8);
	} else {
		SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MODE(zsb),
		    NULL, &mode, 8);
		SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_SIZE(zsb),
		    NULL, &size, 8);
		SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GEN(zsb),
		    NULL, &gen, 8);
		SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_UID(zsb),
		    NULL, &acl_ids->z_fuid, 8);
		SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GID(zsb),
		    NULL, &acl_ids->z_fgid, 8);
		SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PARENT(zsb),
		    NULL, &parent, 8);
		SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_FLAGS(zsb),
		    NULL, &pflags, 8);
		SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ATIME(zsb),
		    NULL, &atime, 16);
		SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MTIME(zsb),
		    NULL, &mtime, 16);
		SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CTIME(zsb),
		    NULL, &ctime, 16);
		SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CRTIME(zsb),
		    NULL, &crtime, 16);
	}

	SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_LINKS(zsb), NULL, &links, 8);

	if (obj_type == DMU_OT_ZNODE) {
		SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_XATTR(zsb), NULL,
		    &empty_xattr, 8);
	}
	if (obj_type == DMU_OT_ZNODE ||
	    (S_ISBLK(vap->va_mode) || S_ISCHR(vap->va_mode))) {
		SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_RDEV(zsb),
		    NULL, &rdev, 8);
	}
	if (obj_type == DMU_OT_ZNODE) {
		SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_FLAGS(zsb),
		    NULL, &pflags, 8);
		SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_UID(zsb), NULL,
		    &acl_ids->z_fuid, 8);
		SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GID(zsb), NULL,
		    &acl_ids->z_fgid, 8);
		SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PAD(zsb), NULL, pad,
		    sizeof (uint64_t) * 4);
		SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ZNODE_ACL(zsb), NULL,
		    &acl_phys, sizeof (zfs_acl_phys_t));
	} else if (acl_ids->z_aclp->z_version >= ZFS_ACL_VERSION_FUID) {
		SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_DACL_COUNT(zsb), NULL,
		    &acl_ids->z_aclp->z_acl_count, 8);
		locate.cb_aclp = acl_ids->z_aclp;
		SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_DACL_ACES(zsb),
		    zfs_acl_data_locator, &locate,
		    acl_ids->z_aclp->z_acl_bytes);
		mode = zfs_mode_compute(mode, acl_ids->z_aclp, &pflags,
		    acl_ids->z_fuid, acl_ids->z_fgid);
	}

	VERIFY(sa_replace_all_by_template(sa_hdl, sa_attrs, cnt, tx) == 0);

	if (!(flag & IS_ROOT_NODE)) {
		*zpp = zfs_znode_alloc(zsb, db, 0, obj_type, obj, sa_hdl,
		    ZTOI(dzp));
		VERIFY(*zpp != NULL);
		VERIFY(dzp != NULL);
	} else {
		/*
		 * If we are creating the root node, the "parent" we
		 * passed in is the znode for the root.
		 */
		*zpp = dzp;

		(*zpp)->z_sa_hdl = sa_hdl;
	}

	(*zpp)->z_pflags = pflags;
	(*zpp)->z_mode = mode;

	if (obj_type == DMU_OT_ZNODE ||
	    acl_ids->z_aclp->z_version < ZFS_ACL_VERSION_FUID) {
		VERIFY0(zfs_aclset_common(*zpp, acl_ids->z_aclp, cr, tx));
	}
	kmem_free(sa_attrs, sizeof (sa_bulk_attr_t) * ZPL_END);
	ZFS_OBJ_HOLD_EXIT(zsb, obj);
}
예제 #7
0
/*
 * Free space in a file
 *
 *	IN:	zp	- znode of file to free data in.
 *		off	- start of range
 *		len	- end of range (0 => EOF)
 *		flag	- current file open mode flags.
 *		log	- TRUE if this action should be logged
 *
 * 	RETURN:	0 on success, error code on failure
 */
int
zfs_freesp(znode_t *zp, uint64_t off, uint64_t len, int flag, boolean_t log)
{
	struct inode *ip = ZTOI(zp);
	dmu_tx_t *tx;
	zfs_sb_t *zsb = ZTOZSB(zp);
	zilog_t *zilog = zsb->z_log;
	uint64_t mode;
	uint64_t mtime[2], ctime[2];
	sa_bulk_attr_t bulk[3];
	int count = 0;
	int error;

	if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_MODE(zsb), &mode,
	    sizeof (mode))) != 0)
		return (error);

	if (off > zp->z_size) {
		error =  zfs_extend(zp, off+len);
		if (error == 0 && log)
			goto log;
		else
			return (error);
	}

	/*
	 * Check for any locks in the region to be freed.
	 */
	if (ip->i_flock && mandatory_lock(ip)) {
		uint64_t length = (len ? len : zp->z_size - off);
		if (!lock_may_write(ip, off, length))
			return (SET_ERROR(EAGAIN));
	}

	if (len == 0) {
		error = zfs_trunc(zp, off);
	} else {
		if ((error = zfs_free_range(zp, off, len)) == 0 &&
		    off + len > zp->z_size)
			error = zfs_extend(zp, off+len);
	}
	if (error || !log)
		return (error);
log:
	tx = dmu_tx_create(zsb->z_os);
	dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
	zfs_sa_upgrade_txholds(tx, zp);
	error = dmu_tx_assign(tx, TXG_WAIT);
	if (error) {
		dmu_tx_abort(tx);
		return (error);
	}

	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zsb), NULL, mtime, 16);
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), NULL, ctime, 16);
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb),
	    NULL, &zp->z_pflags, 8);
	zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime, B_TRUE);
	error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
	ASSERT(error == 0);

	zfs_log_truncate(zilog, tx, TX_TRUNCATE, zp, off, len);

	dmu_tx_commit(tx);
	zfs_inode_update(zp);
	return (0);
}
예제 #8
0
파일: zfs_dir.c 프로젝트: ColinIanKing/zfs
/*
 * Unlink zp from dl, and mark zp for deletion if this was the last link. Can
 * fail if zp is a mount point (EBUSY) or a non-empty directory (ENOTEMPTY).
 * If 'unlinkedp' is NULL, we put unlinked znodes on the unlinked list.
 * If it's non-NULL, we use it to indicate whether the znode needs deletion,
 * and it's the caller's job to do it.
 */
int
zfs_link_destroy(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag,
    boolean_t *unlinkedp)
{
	znode_t *dzp = dl->dl_dzp;
	zfsvfs_t *zfsvfs = ZTOZSB(dzp);
	int zp_is_dir = S_ISDIR(ZTOI(zp)->i_mode);
	boolean_t unlinked = B_FALSE;
	sa_bulk_attr_t bulk[5];
	uint64_t mtime[2], ctime[2];
	uint64_t links;
	int count = 0;
	int error;

#ifdef HAVE_DNLC
	dnlc_remove(ZTOI(dzp), dl->dl_name);
#endif /* HAVE_DNLC */

	if (!(flag & ZRENAMING)) {
		mutex_enter(&zp->z_lock);

		if (zp_is_dir && !zfs_dirempty(zp)) {
			mutex_exit(&zp->z_lock);
			return (SET_ERROR(ENOTEMPTY));
		}

		/*
		 * If we get here, we are going to try to remove the object.
		 * First try removing the name from the directory; if that
		 * fails, return the error.
		 */
		error = zfs_dropname(dl, zp, dzp, tx, flag);
		if (error != 0) {
			mutex_exit(&zp->z_lock);
			return (error);
		}

		if (ZTOI(zp)->i_nlink <= zp_is_dir) {
			zfs_panic_recover("zfs: link count on %lu is %u, "
			    "should be at least %u", zp->z_id,
			    (int)ZTOI(zp)->i_nlink, zp_is_dir + 1);
			set_nlink(ZTOI(zp), zp_is_dir + 1);
		}
		drop_nlink(ZTOI(zp));
		if (ZTOI(zp)->i_nlink == zp_is_dir) {
			zp->z_unlinked = B_TRUE;
			clear_nlink(ZTOI(zp));
			unlinked = B_TRUE;
		} else {
			SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs),
			    NULL, &ctime, sizeof (ctime));
			SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs),
			    NULL, &zp->z_pflags, sizeof (zp->z_pflags));
			zfs_tstamp_update_setup(zp, STATE_CHANGED, mtime,
			    ctime);
		}
		links = ZTOI(zp)->i_nlink;
		SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs),
		    NULL, &links, sizeof (links));
		error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
		count = 0;
		ASSERT(error == 0);
		mutex_exit(&zp->z_lock);
	} else {
		error = zfs_dropname(dl, zp, dzp, tx, flag);
		if (error != 0)
			return (error);
	}

	mutex_enter(&dzp->z_lock);
	dzp->z_size--;		/* one dirent removed */
	if (zp_is_dir)
		drop_nlink(ZTOI(dzp));	/* ".." link from zp */
	links = ZTOI(dzp)->i_nlink;
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs),
	    NULL, &links, sizeof (links));
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs),
	    NULL, &dzp->z_size, sizeof (dzp->z_size));
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs),
	    NULL, ctime, sizeof (ctime));
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs),
	    NULL, mtime, sizeof (mtime));
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs),
	    NULL, &dzp->z_pflags, sizeof (dzp->z_pflags));
	zfs_tstamp_update_setup(dzp, CONTENT_MODIFIED, mtime, ctime);
	error = sa_bulk_update(dzp->z_sa_hdl, bulk, count, tx);
	ASSERT(error == 0);
	mutex_exit(&dzp->z_lock);

	if (unlinkedp != NULL)
		*unlinkedp = unlinked;
	else if (unlinked)
		zfs_unlinked_add(zp, tx);

	return (0);
}
예제 #9
0
파일: zfs_dir.c 프로젝트: BjoKaSH/zfs-osx
/*
 * Unlink zp from dl, and mark zp for deletion if this was the last link.
 * Can fail if zp is a mount point (EBUSY) or a non-empty directory (EEXIST).
 * If 'unlinkedp' is NULL, we put unlinked znodes on the unlinked list.
 * If it's non-NULL, we use it to indicate whether the znode needs deletion,
 * and it's the caller's job to do it.
 */
int
zfs_link_destroy(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag,
                 boolean_t *unlinkedp)
{
    znode_t *dzp = dl->dl_dzp;
    zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
    vnode_t *vp = ZTOV(zp);
#ifdef __APPLE__
    //int zp_is_dir = S_ISDIR(zp->z_phys->zp_mode);
    int zp_is_dir = S_ISDIR(zp->z_mode);
#else
    int zp_is_dir = (vp->v_type == VDIR);
#endif
    boolean_t unlinked = B_FALSE;
    sa_bulk_attr_t bulk[5];
    uint64_t mtime[2], ctime[2];
    int count = 0;
    int error;

#ifndef __APPLE__
    dnlc_remove(ZTOV(dzp), dl->dl_name);
#endif

    if (!(flag & ZRENAMING)) {

        if (vp) {
            if (vn_vfswlock(vp))		/* prevent new mounts on zp */
                return (EBUSY);

            if (vn_ismntpt(vp)) {		/* don't remove mount point */
                vn_vfsunlock(vp);
                return (EBUSY);
            }
        } /* if (vp) */

        mutex_enter(&zp->z_lock);
        if (zp_is_dir && !zfs_dirempty(zp)) {	/* dir not empty */
            mutex_exit(&zp->z_lock);
#ifdef __APPLE__
            return (ENOTEMPTY);
#else
            vn_vfsunlock(vp);
            return (EEXIST);
#endif
        }


        /*
         * If we get here, we are going to try to remove the object.
         * First try removing the name from the directory; if that
         * fails, return the error.
         */
        error = zfs_dropname(dl, zp, dzp, tx, flag);
        if (error != 0) {
            mutex_exit(&zp->z_lock);
            //vn_vfsunlock(vp);
            return (error);
        }


        if (zp->z_links <= zp_is_dir) {
#ifndef __APPLE__
            zfs_panic_recover("zfs: link count on %s is %u, "
                              "should be at least %u",
                              zp->z_vnode->v_path ? zp->z_vnode->v_path :
                              "<unknown>", (int)zp->z_links,
                              zp_is_dir + 1);
#endif
            zp->z_links = zp_is_dir + 1;
        }
        if (--zp->z_links == zp_is_dir) {
            zp->z_unlinked = B_TRUE;
            zp->z_links = 0;
            unlinked = B_TRUE;
        } else {
            SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs),
                             NULL, &ctime, sizeof (ctime));
            SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs),
                             NULL, &zp->z_pflags, sizeof (zp->z_pflags));
            zfs_tstamp_update_setup(zp, STATE_CHANGED, mtime, ctime,
                                    B_TRUE);
        }
        SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs),
                         NULL, &zp->z_links, sizeof (zp->z_links));
        error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
        count = 0;
        ASSERT(error == 0);
        mutex_exit(&zp->z_lock);
#ifndef __APPLE__
        vn_vfsunlock(vp);
#endif
    } else {
        error = zfs_dropname(dl, zp, dzp, tx, flag);
        if (error != 0)
            return (error);
    }

    mutex_enter(&dzp->z_lock);
    dzp->z_size--;		/* one dirent removed */
    dzp->z_links -= zp_is_dir;	/* ".." link from zp */
    SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs),
                     NULL, &dzp->z_links, sizeof (dzp->z_links));
    SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs),
                     NULL, &dzp->z_size, sizeof (dzp->z_size));
    SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs),
                     NULL, ctime, sizeof (ctime));
    SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs),
                     NULL, mtime, sizeof (mtime));
    SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs),
                     NULL, &dzp->z_pflags, sizeof (dzp->z_pflags));
    zfs_tstamp_update_setup(dzp, CONTENT_MODIFIED, mtime, ctime, B_TRUE);
    error = sa_bulk_update(dzp->z_sa_hdl, bulk, count, tx);
    ASSERT(error == 0);
    mutex_exit(&dzp->z_lock);

    /* Inform VFS to dump this znode as soon as possible... */
    if (unlinked && ZTOV(zp))
        vnode_recycle(ZTOV(zp));

    //error = zap_remove(zp->z_zfsvfs->z_os, dzp->z_id, dl->dl_name, tx);
    //ASSERT(error == 0);

    if (unlinkedp != NULL)
        *unlinkedp = unlinked;
    else if (unlinked)
        zfs_unlinked_add(zp, tx);

    return (0);
}
예제 #10
0
/*
 * Truncate a file
 *
 *	IN:	zp	- znode of file to free data in.
 *		end	- new end-of-file.
 *
 * 	RETURN:	0 on success, error code on failure
 */
static int
zfs_trunc(znode_t *zp, uint64_t end)
{
	zfs_sb_t *zsb = ZTOZSB(zp);
	dmu_tx_t *tx;
	rl_t *rl;
	int error;
	sa_bulk_attr_t bulk[2];
	int count = 0;

	/*
	 * We will change zp_size, lock the whole file.
	 */
	rl = zfs_range_lock(zp, 0, UINT64_MAX, RL_WRITER);

	/*
	 * Nothing to do if file already at desired length.
	 */
	if (end >= zp->z_size) {
		zfs_range_unlock(rl);
		return (0);
	}

	error = dmu_free_long_range(zsb->z_os, zp->z_id, end,  -1);
	if (error) {
		zfs_range_unlock(rl);
		return (error);
	}
top:
	tx = dmu_tx_create(zsb->z_os);
	dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
	zfs_sa_upgrade_txholds(tx, zp);
	error = dmu_tx_assign(tx, TXG_NOWAIT);
	if (error) {
		if (error == ERESTART) {
			dmu_tx_wait(tx);
			dmu_tx_abort(tx);
			goto top;
		}
		dmu_tx_abort(tx);
		zfs_range_unlock(rl);
		return (error);
	}

	zp->z_size = end;
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zsb),
	    NULL, &zp->z_size, sizeof (zp->z_size));

	if (end == 0) {
		zp->z_pflags &= ~ZFS_SPARSE;
		SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb),
		    NULL, &zp->z_pflags, 8);
	}
	VERIFY(sa_bulk_update(zp->z_sa_hdl, bulk, count, tx) == 0);

	dmu_tx_commit(tx);

	zfs_range_unlock(rl);

	return (0);
}
예제 #11
0
파일: zfs_dir.c 프로젝트: BjoKaSH/zfs-osx
/*
 * Link zp into dl.  Can only fail if zp has been unlinked.
 */
int
zfs_link_create(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag)
{
    znode_t *dzp = dl->dl_dzp;
    zfsvfs_t *zfsvfs = zp->z_zfsvfs;
#ifdef __APPLE__
    uint64_t value;
    /* OSX - don't access the vnode here since it might not be attached yet. */
    //int zp_is_dir = S_ISDIR(zp->z_phys->zp_mode);
    int zp_is_dir = S_ISDIR(zp->z_mode);
#else
    vnode_t *vp = ZTOV(zp);
    uint64_t value;
    int zp_is_dir = (vp->v_type == VDIR);
#endif
    sa_bulk_attr_t bulk[5];
    uint64_t mtime[2], ctime[2];
    int count = 0;
    int error;

    mutex_enter(&zp->z_lock);

    if (!(flag & ZRENAMING)) {
        if (zp->z_unlinked) {	/* no new links to unlinked zp */
            ASSERT(!(flag & (ZNEW | ZEXISTS)));
            mutex_exit(&zp->z_lock);
            return (ENOENT);
        }
        zp->z_links++;
        SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL,
                         &zp->z_links, sizeof (zp->z_links));
    }

    SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zfsvfs), NULL,
                     &dzp->z_id, sizeof (dzp->z_id));
    SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
                     &zp->z_pflags, sizeof (zp->z_pflags));

    if (!(flag & ZNEW)) {
        SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
                         ctime, sizeof (ctime));
        zfs_tstamp_update_setup(zp, STATE_CHANGED, mtime,
                                ctime, B_TRUE);
    }
    error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
    // Needed?
#ifdef __APPLE__
    zp->z_parent = dzp->z_id;
#endif

    mutex_exit(&zp->z_lock);


    mutex_enter(&dzp->z_lock);
    dzp->z_size++;
    dzp->z_links += zp_is_dir;

    count = 0;
    SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
                     &dzp->z_size, sizeof (dzp->z_size));
    SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL,
                     &dzp->z_links, sizeof (dzp->z_links));
    SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
                     mtime, sizeof (mtime));
    SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
                     ctime, sizeof (ctime));
    SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
                     &dzp->z_pflags, sizeof (dzp->z_pflags));
    zfs_tstamp_update_setup(dzp, CONTENT_MODIFIED, mtime, ctime, B_TRUE);
    error = sa_bulk_update(dzp->z_sa_hdl, bulk, count, tx);
    mutex_exit(&dzp->z_lock);

    value = zfs_dirent(zp, zp->z_mode);
    error = zap_add(zp->z_zfsvfs->z_os, dzp->z_id, dl->dl_name,
                    8, 1, &value, tx);
    ASSERT(error == 0);

#ifndef __APPLE__
    /* On Mac OS X, this is done up in VFS layer. */
    dnlc_update(ZTOV(dzp), dl->dl_name, vp);
#endif
    return (0);
}
예제 #12
0
/*
 * Set the attributes of an object
 *
 * The transaction passed to this routine must have
 * dmu_tx_hold_bonus(tx, oid) called and then assigned
 * to a transaction group.
 */
static int osd_attr_set(const struct lu_env *env, struct dt_object *dt,
			const struct lu_attr *la, struct thandle *handle)
{
	struct osd_thread_info	*info = osd_oti_get(env);
	sa_bulk_attr_t		*bulk = osd_oti_get(env)->oti_attr_bulk;
	struct osd_object	*obj = osd_dt_obj(dt);
	struct osd_device	*osd = osd_obj2dev(obj);
	struct osd_thandle	*oh;
	struct osa_attr		*osa = &info->oti_osa;
	__u64			 valid = la->la_valid;
	int			 cnt;
	int			 rc = 0;

	ENTRY;

	down_read(&obj->oo_guard);
	if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
		GOTO(out, rc = -ENOENT);

	LASSERT(handle != NULL);
	LASSERT(osd_invariant(obj));
	LASSERT(obj->oo_sa_hdl);

	oh = container_of0(handle, struct osd_thandle, ot_super);
	/* Assert that the transaction has been assigned to a
	   transaction group. */
	LASSERT(oh->ot_tx->tx_txg != 0);

	/* Only allow set size for regular file */
	if (!S_ISREG(dt->do_lu.lo_header->loh_attr))
		valid &= ~(LA_SIZE | LA_BLOCKS);

	if (valid & LA_CTIME && la->la_ctime == obj->oo_attr.la_ctime)
		valid &= ~LA_CTIME;

	if (valid & LA_MTIME && la->la_mtime == obj->oo_attr.la_mtime)
		valid &= ~LA_MTIME;

	if (valid & LA_ATIME && la->la_atime == obj->oo_attr.la_atime)
		valid &= ~LA_ATIME;

	if (valid == 0)
		GOTO(out, rc = 0);

	if (valid & LA_FLAGS) {
		struct lustre_mdt_attrs *lma;
		struct lu_buf buf;

		if (la->la_flags & LUSTRE_LMA_FL_MASKS) {
			CLASSERT(sizeof(info->oti_buf) >= sizeof(*lma));
			lma = (struct lustre_mdt_attrs *)&info->oti_buf;
			buf.lb_buf = lma;
			buf.lb_len = sizeof(info->oti_buf);
			rc = osd_xattr_get(env, &obj->oo_dt, &buf,
					   XATTR_NAME_LMA);
			if (rc > 0) {
				lma->lma_incompat =
					le32_to_cpu(lma->lma_incompat);
				lma->lma_incompat |=
					lustre_to_lma_flags(la->la_flags);
				lma->lma_incompat =
					cpu_to_le32(lma->lma_incompat);
				buf.lb_buf = lma;
				buf.lb_len = sizeof(*lma);
				rc = osd_xattr_set_internal(env, obj, &buf,
							    XATTR_NAME_LMA,
							    LU_XATTR_REPLACE,
							    oh);
			}
			if (rc < 0) {
				CWARN("%s: failed to set LMA flags: rc = %d\n",
				       osd->od_svname, rc);
				RETURN(rc);
			}
		}
	}

	/* do both accounting updates outside oo_attr_lock below */
	if ((valid & LA_UID) && (la->la_uid != obj->oo_attr.la_uid)) {
		/* Update user accounting. Failure isn't fatal, but we still
		 * log an error message */
		rc = -zap_increment_int(osd->od_os, osd->od_iusr_oid,
					la->la_uid, 1, oh->ot_tx);
		if (rc)
			CERROR("%s: failed to update accounting ZAP for user "
				"%d (%d)\n", osd->od_svname, la->la_uid, rc);
		rc = -zap_increment_int(osd->od_os, osd->od_iusr_oid,
					obj->oo_attr.la_uid, -1, oh->ot_tx);
		if (rc)
			CERROR("%s: failed to update accounting ZAP for user "
				"%d (%d)\n", osd->od_svname,
				obj->oo_attr.la_uid, rc);
	}
	if ((valid & LA_GID) && (la->la_gid != obj->oo_attr.la_gid)) {
		/* Update group accounting. Failure isn't fatal, but we still
		 * log an error message */
		rc = -zap_increment_int(osd->od_os, osd->od_igrp_oid,
					la->la_gid, 1, oh->ot_tx);
		if (rc)
			CERROR("%s: failed to update accounting ZAP for user "
				"%d (%d)\n", osd->od_svname, la->la_gid, rc);
		rc = -zap_increment_int(osd->od_os, osd->od_igrp_oid,
					obj->oo_attr.la_gid, -1, oh->ot_tx);
		if (rc)
			CERROR("%s: failed to update accounting ZAP for user "
				"%d (%d)\n", osd->od_svname,
				obj->oo_attr.la_gid, rc);
	}

	write_lock(&obj->oo_attr_lock);
	cnt = 0;
	if (valid & LA_ATIME) {
		osa->atime[0] = obj->oo_attr.la_atime = la->la_atime;
		SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(osd), NULL,
				 osa->atime, 16);
	}
	if (valid & LA_MTIME) {
		osa->mtime[0] = obj->oo_attr.la_mtime = la->la_mtime;
		SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(osd), NULL,
				 osa->mtime, 16);
	}
	if (valid & LA_CTIME) {
		osa->ctime[0] = obj->oo_attr.la_ctime = la->la_ctime;
		SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(osd), NULL,
				 osa->ctime, 16);
	}
	if (valid & LA_MODE) {
		/* mode is stored along with type, so read it first */
		obj->oo_attr.la_mode = (obj->oo_attr.la_mode & S_IFMT) |
			(la->la_mode & ~S_IFMT);
		osa->mode = obj->oo_attr.la_mode;
		SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(osd), NULL,
				 &osa->mode, 8);
	}
	if (valid & LA_SIZE) {
		osa->size = obj->oo_attr.la_size = la->la_size;
		SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_SIZE(osd), NULL,
				 &osa->size, 8);
	}
	if (valid & LA_NLINK) {
		osa->nlink = obj->oo_attr.la_nlink = la->la_nlink;
		SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_LINKS(osd), NULL,
				 &osa->nlink, 8);
	}
	if (valid & LA_RDEV) {
		osa->rdev = obj->oo_attr.la_rdev = la->la_rdev;
		SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_RDEV(osd), NULL,
				 &osa->rdev, 8);
	}
	if (valid & LA_FLAGS) {
		osa->flags = attrs_fs2zfs(la->la_flags);
		/* many flags are not supported by zfs, so ensure a good cached
		 * copy */
		obj->oo_attr.la_flags = attrs_zfs2fs(osa->flags);
		SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(osd), NULL,
				 &osa->flags, 8);
	}
	if (valid & LA_UID) {
		osa->uid = obj->oo_attr.la_uid = la->la_uid;
		SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_UID(osd), NULL,
				 &osa->uid, 8);
	}
	if (valid & LA_GID) {
		osa->gid = obj->oo_attr.la_gid = la->la_gid;
		SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GID(osd), NULL,
				 &osa->gid, 8);
	}
	obj->oo_attr.la_valid |= valid;
	write_unlock(&obj->oo_attr_lock);

	LASSERT(cnt <= ARRAY_SIZE(osd_oti_get(env)->oti_attr_bulk));
	rc = osd_object_sa_bulk_update(obj, bulk, cnt, oh);

out:
	up_read(&obj->oo_guard);
	RETURN(rc);
}
예제 #13
0
/*
 * Retrieve the attributes of a DMU object
 */
int __osd_object_attr_get(const struct lu_env *env, struct osd_device *o,
			  struct osd_object *obj, struct lu_attr *la)
{
	struct osa_attr	*osa = &osd_oti_get(env)->oti_osa;
	sa_bulk_attr_t	*bulk = osd_oti_get(env)->oti_attr_bulk;
	sa_handle_t	*sa_hdl;
	int		 cnt = 0;
	int		 rc;
	ENTRY;

	LASSERT(obj->oo_db != NULL);

	rc = -sa_handle_get(o->od_os, obj->oo_db->db_object, NULL,
			    SA_HDL_PRIVATE, &sa_hdl);
	if (rc)
		RETURN(rc);

	la->la_valid |= LA_ATIME | LA_MTIME | LA_CTIME | LA_MODE | LA_TYPE |
			LA_SIZE | LA_UID | LA_GID | LA_FLAGS | LA_NLINK;

	SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(o), NULL, osa->atime, 16);
	SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(o), NULL, osa->mtime, 16);
	SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(o), NULL, osa->ctime, 16);
	SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(o), NULL, &osa->mode, 8);
	SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_SIZE(o), NULL, &osa->size, 8);
	SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_LINKS(o), NULL, &osa->nlink, 8);
	SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_UID(o), NULL, &osa->uid, 8);
	SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GID(o), NULL, &osa->gid, 8);
	SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(o), NULL, &osa->flags, 8);
	LASSERT(cnt <= ARRAY_SIZE(osd_oti_get(env)->oti_attr_bulk));

	rc = -sa_bulk_lookup(sa_hdl, bulk, cnt);
	if (rc)
		GOTO(out_sa, rc);

	la->la_atime = osa->atime[0];
	la->la_mtime = osa->mtime[0];
	la->la_ctime = osa->ctime[0];
	la->la_mode = osa->mode;
	la->la_uid = osa->uid;
	la->la_gid = osa->gid;
	la->la_nlink = osa->nlink;
	la->la_flags = attrs_zfs2fs(osa->flags);
	la->la_size = osa->size;

	/* Try to get extra flag from LMA. Right now, only LMAI_ORPHAN
	 * flags is stored in LMA, and it is only for orphan directory */
	if (S_ISDIR(la->la_mode) && dt_object_exists(&obj->oo_dt)) {
		struct osd_thread_info *info = osd_oti_get(env);
		struct lustre_mdt_attrs *lma;
		struct lu_buf buf;

		lma = (struct lustre_mdt_attrs *)info->oti_buf;
		buf.lb_buf = lma;
		buf.lb_len = sizeof(info->oti_buf);
		rc = osd_xattr_get(env, &obj->oo_dt, &buf, XATTR_NAME_LMA);
		if (rc > 0) {
			rc = 0;
			lma->lma_incompat = le32_to_cpu(lma->lma_incompat);
			obj->oo_lma_flags =
				lma_to_lustre_flags(lma->lma_incompat);

		} else if (rc == -ENODATA) {
			rc = 0;
		}
	}

	if (S_ISCHR(la->la_mode) || S_ISBLK(la->la_mode)) {
		rc = -sa_lookup(sa_hdl, SA_ZPL_RDEV(o), &osa->rdev, 8);
		if (rc)
			GOTO(out_sa, rc);
		la->la_rdev = osa->rdev;
		la->la_valid |= LA_RDEV;
	}
out_sa:
	sa_handle_destroy(sa_hdl);

	RETURN(rc);
}
예제 #14
0
/*
 * Set the attributes of an object
 *
 * The transaction passed to this routine must have
 * dmu_tx_hold_bonus(tx, oid) called and then assigned
 * to a transaction group.
 */
static int osd_attr_set(const struct lu_env *env, struct dt_object *dt,
			const struct lu_attr *la, struct thandle *handle,
			struct lustre_capa *capa)
{
	struct osd_object	*obj = osd_dt_obj(dt);
	struct osd_device	*osd = osd_obj2dev(obj);
	udmu_objset_t		*uos = &osd->od_objset;
	struct osd_thandle	*oh;
	struct osa_attr		*osa = &osd_oti_get(env)->oti_osa;
	sa_bulk_attr_t		*bulk;
	int			 cnt;
	int			 rc = 0;

	ENTRY;
	LASSERT(handle != NULL);
	LASSERT(dt_object_exists(dt));
	LASSERT(osd_invariant(obj));
	LASSERT(obj->oo_sa_hdl);

	oh = container_of0(handle, struct osd_thandle, ot_super);
	/* Assert that the transaction has been assigned to a
	   transaction group. */
	LASSERT(oh->ot_tx->tx_txg != 0);

	if (la->la_valid == 0)
		RETURN(0);

	OBD_ALLOC(bulk, sizeof(sa_bulk_attr_t) * 10);
	if (bulk == NULL)
		RETURN(-ENOMEM);

	/* do both accounting updates outside oo_attr_lock below */
	if ((la->la_valid & LA_UID) && (la->la_uid != obj->oo_attr.la_uid)) {
		/* Update user accounting. Failure isn't fatal, but we still
		 * log an error message */
		rc = -zap_increment_int(osd->od_objset.os, osd->od_iusr_oid,
					la->la_uid, 1, oh->ot_tx);
		if (rc)
			CERROR("%s: failed to update accounting ZAP for user "
				"%d (%d)\n", osd->od_svname, la->la_uid, rc);
		rc = -zap_increment_int(osd->od_objset.os, osd->od_iusr_oid,
					obj->oo_attr.la_uid, -1, oh->ot_tx);
		if (rc)
			CERROR("%s: failed to update accounting ZAP for user "
				"%d (%d)\n", osd->od_svname,
				obj->oo_attr.la_uid, rc);
	}
	if ((la->la_valid & LA_GID) && (la->la_gid != obj->oo_attr.la_gid)) {
		/* Update group accounting. Failure isn't fatal, but we still
		 * log an error message */
		rc = -zap_increment_int(osd->od_objset.os, osd->od_igrp_oid,
					la->la_gid, 1, oh->ot_tx);
		if (rc)
			CERROR("%s: failed to update accounting ZAP for user "
				"%d (%d)\n", osd->od_svname, la->la_gid, rc);
		rc = -zap_increment_int(osd->od_objset.os, osd->od_igrp_oid,
					obj->oo_attr.la_gid, -1, oh->ot_tx);
		if (rc)
			CERROR("%s: failed to update accounting ZAP for user "
				"%d (%d)\n", osd->od_svname,
				obj->oo_attr.la_gid, rc);
	}

	write_lock(&obj->oo_attr_lock);
	cnt = 0;
	if (la->la_valid & LA_ATIME) {
		osa->atime[0] = obj->oo_attr.la_atime = la->la_atime;
		SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(uos), NULL,
				 osa->atime, 16);
	}
	if (la->la_valid & LA_MTIME) {
		osa->mtime[0] = obj->oo_attr.la_mtime = la->la_mtime;
		SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(uos), NULL,
				 osa->mtime, 16);
	}
	if (la->la_valid & LA_CTIME) {
		osa->ctime[0] = obj->oo_attr.la_ctime = la->la_ctime;
		SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(uos), NULL,
				 osa->ctime, 16);
	}
	if (la->la_valid & LA_MODE) {
		/* mode is stored along with type, so read it first */
		obj->oo_attr.la_mode = (obj->oo_attr.la_mode & S_IFMT) |
			(la->la_mode & ~S_IFMT);
		osa->mode = obj->oo_attr.la_mode;
		SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(uos), NULL,
				 &osa->mode, 8);
	}
	if (la->la_valid & LA_SIZE) {
		osa->size = obj->oo_attr.la_size = la->la_size;
		SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_SIZE(uos), NULL,
				 &osa->size, 8);
	}
	if (la->la_valid & LA_NLINK) {
		osa->nlink = obj->oo_attr.la_nlink = la->la_nlink;
		SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_LINKS(uos), NULL,
				 &osa->nlink, 8);
	}
	if (la->la_valid & LA_RDEV) {
		osa->rdev = obj->oo_attr.la_rdev = la->la_rdev;
		SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_RDEV(uos), NULL,
				 &osa->rdev, 8);
	}
	if (la->la_valid & LA_FLAGS) {
		osa->flags = obj->oo_attr.la_flags = la->la_flags;
		SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(uos), NULL,
				 &osa->flags, 8);
	}
	if (la->la_valid & LA_UID) {
		osa->uid = obj->oo_attr.la_uid = la->la_uid;
		SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_UID(uos), NULL,
				 &osa->uid, 8);
	}
	if (la->la_valid & LA_GID) {
		osa->gid = obj->oo_attr.la_gid = la->la_gid;
		SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GID(uos), NULL,
				 &osa->gid, 8);
	}
	obj->oo_attr.la_valid |= la->la_valid;
	write_unlock(&obj->oo_attr_lock);

	rc = osd_object_sa_bulk_update(obj, bulk, cnt, oh);

	OBD_FREE(bulk, sizeof(sa_bulk_attr_t) * 10);
	RETURN(rc);
}
예제 #15
0
int
zfs_rezget(znode_t *zp)
{
	zfs_sb_t *zsb = ZTOZSB(zp);
	dmu_object_info_t doi;
	dmu_buf_t *db;
	uint64_t obj_num = zp->z_id;
	uint64_t mode;
	sa_bulk_attr_t bulk[8];
	int err;
	int count = 0;
	uint64_t gen;

	ZFS_OBJ_HOLD_ENTER(zsb, obj_num);

	mutex_enter(&zp->z_acl_lock);
	if (zp->z_acl_cached) {
		zfs_acl_free(zp->z_acl_cached);
		zp->z_acl_cached = NULL;
	}
	mutex_exit(&zp->z_acl_lock);

	rw_enter(&zp->z_xattr_lock, RW_WRITER);
	if (zp->z_xattr_cached) {
		nvlist_free(zp->z_xattr_cached);
		zp->z_xattr_cached = NULL;
	}

	if (zp->z_xattr_parent) {
		iput(ZTOI(zp->z_xattr_parent));
		zp->z_xattr_parent = NULL;
	}
	rw_exit(&zp->z_xattr_lock);

	ASSERT(zp->z_sa_hdl == NULL);
	err = sa_buf_hold(zsb->z_os, obj_num, NULL, &db);
	if (err) {
		ZFS_OBJ_HOLD_EXIT(zsb, obj_num);
		return (err);
	}

	dmu_object_info_from_db(db, &doi);
	if (doi.doi_bonus_type != DMU_OT_SA &&
	    (doi.doi_bonus_type != DMU_OT_ZNODE ||
	    (doi.doi_bonus_type == DMU_OT_ZNODE &&
	    doi.doi_bonus_size < sizeof (znode_phys_t)))) {
		sa_buf_rele(db, NULL);
		ZFS_OBJ_HOLD_EXIT(zsb, obj_num);
		return (SET_ERROR(EINVAL));
	}

	zfs_znode_sa_init(zsb, zp, db, doi.doi_bonus_type, NULL);

	/* reload cached values */
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zsb), NULL,
	    &gen, sizeof (gen));
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zsb), NULL,
	    &zp->z_size, sizeof (zp->z_size));
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zsb), NULL,
	    &zp->z_links, sizeof (zp->z_links));
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb), NULL,
	    &zp->z_pflags, sizeof (zp->z_pflags));
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zsb), NULL,
	    &zp->z_atime, sizeof (zp->z_atime));
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zsb), NULL,
	    &zp->z_uid, sizeof (zp->z_uid));
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zsb), NULL,
	    &zp->z_gid, sizeof (zp->z_gid));
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zsb), NULL,
	    &mode, sizeof (mode));

	if (sa_bulk_lookup(zp->z_sa_hdl, bulk, count)) {
		zfs_znode_dmu_fini(zp);
		ZFS_OBJ_HOLD_EXIT(zsb, obj_num);
		return (SET_ERROR(EIO));
	}

	zp->z_mode = mode;

	if (gen != zp->z_gen) {
		zfs_znode_dmu_fini(zp);
		ZFS_OBJ_HOLD_EXIT(zsb, obj_num);
		return (SET_ERROR(EIO));
	}

	zp->z_unlinked = (zp->z_links == 0);
	zp->z_blksz = doi.doi_data_block_size;
	zfs_inode_update(zp);

	ZFS_OBJ_HOLD_EXIT(zsb, obj_num);

	return (0);
}
예제 #16
0
파일: zfs_dir.c 프로젝트: ColinIanKing/zfs
/*
 * Link zp into dl.  Can only fail if zp has been unlinked.
 */
int
zfs_link_create(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag)
{
	znode_t *dzp = dl->dl_dzp;
	zfsvfs_t *zfsvfs = ZTOZSB(zp);
	uint64_t value;
	int zp_is_dir = S_ISDIR(ZTOI(zp)->i_mode);
	sa_bulk_attr_t bulk[5];
	uint64_t mtime[2], ctime[2];
	uint64_t links;
	int count = 0;
	int error;

	mutex_enter(&zp->z_lock);

	if (!(flag & ZRENAMING)) {
		if (zp->z_unlinked) {	/* no new links to unlinked zp */
			ASSERT(!(flag & (ZNEW | ZEXISTS)));
			mutex_exit(&zp->z_lock);
			return (SET_ERROR(ENOENT));
		}
		if (!(flag & ZNEW)) {
			/*
			 * ZNEW nodes come from zfs_mknode() where the link
			 * count has already been initialised
			 */
			inc_nlink(ZTOI(zp));
			links = ZTOI(zp)->i_nlink;
			SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs),
			    NULL, &links, sizeof (links));
		}
	}
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zfsvfs), NULL,
	    &dzp->z_id, sizeof (dzp->z_id));
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
	    &zp->z_pflags, sizeof (zp->z_pflags));

	if (!(flag & ZNEW)) {
		SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
		    ctime, sizeof (ctime));
		zfs_tstamp_update_setup(zp, STATE_CHANGED, mtime,
		    ctime);
	}
	error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
	ASSERT(error == 0);

	mutex_exit(&zp->z_lock);

	mutex_enter(&dzp->z_lock);
	dzp->z_size++;
	if (zp_is_dir)
		inc_nlink(ZTOI(dzp));
	links = ZTOI(dzp)->i_nlink;
	count = 0;
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
	    &dzp->z_size, sizeof (dzp->z_size));
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL,
	    &links, sizeof (links));
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
	    mtime, sizeof (mtime));
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
	    ctime, sizeof (ctime));
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
	    &dzp->z_pflags, sizeof (dzp->z_pflags));
	zfs_tstamp_update_setup(dzp, CONTENT_MODIFIED, mtime, ctime);
	error = sa_bulk_update(dzp->z_sa_hdl, bulk, count, tx);
	ASSERT(error == 0);
	mutex_exit(&dzp->z_lock);

	value = zfs_dirent(zp, zp->z_mode);
	error = zap_add(ZTOZSB(zp)->z_os, dzp->z_id, dl->dl_name,
	    8, 1, &value, tx);
	ASSERT(error == 0);

	return (0);
}
예제 #17
0
/*
 * Construct a znode+inode and initialize.
 *
 * This does not do a call to dmu_set_user() that is
 * up to the caller to do, in case you don't want to
 * return the znode
 */
static znode_t *
zfs_znode_alloc(zfs_sb_t *zsb, dmu_buf_t *db, int blksz,
    dmu_object_type_t obj_type, uint64_t obj, sa_handle_t *hdl,
    struct inode *dip)
{
	znode_t	*zp;
	struct inode *ip;
	uint64_t mode;
	uint64_t parent;
	sa_bulk_attr_t bulk[9];
	int count = 0;

	ASSERT(zsb != NULL);

	ip = new_inode(zsb->z_sb);
	if (ip == NULL)
		return (NULL);

	zp = ITOZ(ip);
	ASSERT(zp->z_dirlocks == NULL);
	ASSERT3P(zp->z_acl_cached, ==, NULL);
	ASSERT3P(zp->z_xattr_cached, ==, NULL);
	ASSERT3P(zp->z_xattr_parent, ==, NULL);
	zp->z_moved = 0;
	zp->z_sa_hdl = NULL;
	zp->z_unlinked = 0;
	zp->z_atime_dirty = 0;
	zp->z_mapcnt = 0;
	zp->z_id = db->db_object;
	zp->z_blksz = blksz;
	zp->z_seq = 0x7A4653;
	zp->z_sync_cnt = 0;
	zp->z_is_zvol = B_FALSE;
	zp->z_is_mapped = B_FALSE;
	zp->z_is_ctldir = B_FALSE;
	zp->z_is_stale = B_FALSE;

	zfs_znode_sa_init(zsb, zp, db, obj_type, hdl);

	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zsb), NULL, &mode, 8);
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zsb), NULL, &zp->z_gen, 8);
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zsb), NULL, &zp->z_size, 8);
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zsb), NULL, &zp->z_links, 8);
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb), NULL,
	    &zp->z_pflags, 8);
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zsb), NULL,
	    &parent, 8);
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zsb), NULL,
	    &zp->z_atime, 16);
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zsb), NULL, &zp->z_uid, 8);
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zsb), NULL, &zp->z_gid, 8);

	if (sa_bulk_lookup(zp->z_sa_hdl, bulk, count) != 0 || zp->z_gen == 0) {
		if (hdl == NULL)
			sa_handle_destroy(zp->z_sa_hdl);

		goto error;
	}

	zp->z_mode = mode;

	/*
	 * xattr znodes hold a reference on their unique parent
	 */
	if (dip && zp->z_pflags & ZFS_XATTR) {
		igrab(dip);
		zp->z_xattr_parent = ITOZ(dip);
	}

	ip->i_ino = obj;
	zfs_inode_update(zp);
	zfs_inode_set_ops(zsb, ip);

	/*
	 * The only way insert_inode_locked() can fail is if the ip->i_ino
	 * number is already hashed for this super block.  This can never
	 * happen because the inode numbers map 1:1 with the object numbers.
	 *
	 * The one exception is rolling back a mounted file system, but in
	 * this case all the active inode are unhashed during the rollback.
	 */
	VERIFY3S(insert_inode_locked(ip), ==, 0);

	mutex_enter(&zsb->z_znodes_lock);
	list_insert_tail(&zsb->z_all_znodes, zp);
	zsb->z_nr_znodes++;
	membar_producer();
	mutex_exit(&zsb->z_znodes_lock);

	unlock_new_inode(ip);
	return (zp);

error:
	unlock_new_inode(ip);
	iput(ip);
	return (NULL);
}
예제 #18
0
/*
 * Link zp into dl.  Can only fail if zp has been unlinked.
 */
int
zfs_link_create(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag)
{
	znode_t *dzp = dl->dl_dzp;
	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
	vnode_t *vp = ZTOV(zp);
	uint64_t value;
	int zp_is_dir = (vnode_isdir(vp));
	sa_bulk_attr_t bulk[5];
	uint64_t mtime[2], ctime[2];
	int count = 0;
	int error;

	mutex_enter(&zp->z_lock);

	if (!(flag & ZRENAMING)) {
		if (zp->z_unlinked) {	/* no new links to unlinked zp */
			ASSERT(!(flag & (ZNEW | ZEXISTS)));
			mutex_exit(&zp->z_lock);
			return (SET_ERROR(ENOENT));
		}
		zp->z_links++;
		SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL,
		    &zp->z_links, sizeof (zp->z_links));

	}
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zfsvfs), NULL,
	    &dzp->z_id, sizeof (dzp->z_id));
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
	    &zp->z_pflags, sizeof (zp->z_pflags));

	if (!(flag & ZNEW)) {
		SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
		    ctime, sizeof (ctime));
		zfs_tstamp_update_setup(zp, STATE_CHANGED, mtime,
		    ctime, B_TRUE);
	}
	error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
	ASSERT(error == 0);

	mutex_exit(&zp->z_lock);

	mutex_enter(&dzp->z_lock);
	dzp->z_size++;
	dzp->z_links += zp_is_dir;
	count = 0;
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
	    &dzp->z_size, sizeof (dzp->z_size));
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL,
	    &dzp->z_links, sizeof (dzp->z_links));
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
	    mtime, sizeof (mtime));
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
	    ctime, sizeof (ctime));
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
	    &dzp->z_pflags, sizeof (dzp->z_pflags));
	zfs_tstamp_update_setup(dzp, CONTENT_MODIFIED, mtime, ctime, B_TRUE);
	error = sa_bulk_update(dzp->z_sa_hdl, bulk, count, tx);
	ASSERT(error == 0);
	mutex_exit(&dzp->z_lock);

	value = zfs_dirent(zp, zp->z_mode);
	error = zap_add(zp->z_zfsvfs->z_os, dzp->z_id, dl->dl_name,
	    8, 1, &value, tx);
	ASSERT(error == 0);

	dnlc_update(ZTOV(dzp), dl->dl_name, vp);

	return (0);
}
예제 #19
0
파일: zfs_znode.c 프로젝트: rosscameron/zfs
/*
 * Construct a znode+inode and initialize.
 *
 * This does not do a call to dmu_set_user() that is
 * up to the caller to do, in case you don't want to
 * return the znode
 */
static znode_t *
zfs_znode_alloc(zfs_sb_t *zsb, dmu_buf_t *db, int blksz,
                dmu_object_type_t obj_type, uint64_t obj, sa_handle_t *hdl,
                struct dentry *dentry, struct inode *dip)
{
    znode_t	*zp;
    struct inode *ip;
    uint64_t parent;
    sa_bulk_attr_t bulk[9];
    int count = 0;

    ASSERT(zsb != NULL);

    ip = new_inode(zsb->z_sb);
    if (ip == NULL)
        return (NULL);

    zp = ITOZ(ip);
    ASSERT(zp->z_dirlocks == NULL);
    ASSERT3P(zp->z_acl_cached, ==, NULL);
    ASSERT3P(zp->z_xattr_cached, ==, NULL);
    zp->z_moved = 0;
    zp->z_sa_hdl = NULL;
    zp->z_unlinked = 0;
    zp->z_atime_dirty = 0;
    zp->z_mapcnt = 0;
    zp->z_id = db->db_object;
    zp->z_blksz = blksz;
    zp->z_seq = 0x7A4653;
    zp->z_sync_cnt = 0;
    zp->z_is_zvol = B_FALSE;
    zp->z_is_mapped = B_FALSE;
    zp->z_is_ctldir = B_FALSE;

    zfs_znode_sa_init(zsb, zp, db, obj_type, hdl);

    SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zsb), NULL, &zp->z_mode, 8);
    SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zsb), NULL, &zp->z_gen, 8);
    SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zsb), NULL, &zp->z_size, 8);
    SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zsb), NULL, &zp->z_links, 8);
    SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb), NULL,
                     &zp->z_pflags, 8);
    SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zsb), NULL,
                     &parent, 8);
    SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zsb), NULL,
                     &zp->z_atime, 16);
    SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zsb), NULL, &zp->z_uid, 8);
    SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zsb), NULL, &zp->z_gid, 8);

    if (sa_bulk_lookup(zp->z_sa_hdl, bulk, count) != 0 || zp->z_gen == 0) {
        if (hdl == NULL)
            sa_handle_destroy(zp->z_sa_hdl);

        goto error;
    }

    ip->i_ino = obj;
    zfs_inode_update(zp);
    zfs_inode_set_ops(zsb, ip);

    if (insert_inode_locked(ip))
        goto error;

    if (dentry) {
        if (zpl_xattr_security_init(ip, dip, &dentry->d_name))
            goto error;

        d_instantiate(dentry, ip);
    }

    mutex_enter(&zsb->z_znodes_lock);
    list_insert_tail(&zsb->z_all_znodes, zp);
    zsb->z_nr_znodes++;
    membar_producer();
    mutex_exit(&zsb->z_znodes_lock);

    unlock_new_inode(ip);
    return (zp);

error:
    unlock_new_inode(ip);
    iput(ip);
    return NULL;
}