Exemplo n.º 1
0
/*
 * trim a range of the filesystem.
 *
 * Note: the parameters passed from userspace are byte ranges into the
 * filesystem which does not match to the format we use for filesystem block
 * addressing. FSB addressing is sparse (AGNO|AGBNO), while the incoming format
 * is a linear address range. Hence we need to use DADDR based conversions and
 * comparisons for determining the correct offset and regions to trim.
 */
int
xfs_ioc_trim(
	struct xfs_mount		*mp,
	struct fstrim_range __user	*urange)
{
	struct request_queue	*q = bdev_get_queue(mp->m_ddev_targp->bt_bdev);
	unsigned int		granularity = q->limits.discard_granularity;
	struct fstrim_range	range;
	xfs_daddr_t		start, end, minlen;
	xfs_agnumber_t		start_agno, end_agno, agno;
	__uint64_t		blocks_trimmed = 0;
	int			error, last_error = 0;

	if (!capable(CAP_SYS_ADMIN))
		return -XFS_ERROR(EPERM);
	if (!blk_queue_discard(q))
		return -XFS_ERROR(EOPNOTSUPP);
	if (copy_from_user(&range, urange, sizeof(range)))
		return -XFS_ERROR(EFAULT);

	/*
	 * Truncating down the len isn't actually quite correct, but using
	 * BBTOB would mean we trivially get overflows for values
	 * of ULLONG_MAX or slightly lower.  And ULLONG_MAX is the default
	 * used by the fstrim application.  In the end it really doesn't
	 * matter as trimming blocks is an advisory interface.
	 */
	if (range.start >= XFS_FSB_TO_B(mp, mp->m_sb.sb_dblocks) ||
	    range.minlen > XFS_FSB_TO_B(mp, XFS_ALLOC_AG_MAX_USABLE(mp)) ||
	    range.len < mp->m_sb.sb_blocksize)
		return -XFS_ERROR(EINVAL);

	start = BTOBB(range.start);
	end = start + BTOBBT(range.len) - 1;
	minlen = BTOBB(max_t(u64, granularity, range.minlen));

	if (end > XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks) - 1)
		end = XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)- 1;

	start_agno = xfs_daddr_to_agno(mp, start);
	end_agno = xfs_daddr_to_agno(mp, end);

	for (agno = start_agno; agno <= end_agno; agno++) {
		error = -xfs_trim_extents(mp, agno, start, end, minlen,
					  &blocks_trimmed);
		if (error)
			last_error = error;
	}

	if (last_error)
		return last_error;

	range.len = XFS_FSB_TO_B(mp, blocks_trimmed);
	if (copy_to_user(urange, &range, sizeof(range)))
		return -XFS_ERROR(EFAULT);
	return 0;
}
Exemplo n.º 2
0
/*
 * Calculate the minimum valid log size for the given superblock configuration.
 * Used to calculate the minimum log size at mkfs time, and to determine if
 * the log is large enough or not at mount time. Returns the minimum size in
 * filesystem block size units.
 */
int
xfs_log_calc_minimum_size(
	struct xfs_mount	*mp)
{
	struct xfs_trans_res	tres = {0};
	int			max_logres;
	int			min_logblks = 0;
	int			lsunit = 0;

	xfs_log_get_max_trans_res(mp, &tres);

	max_logres = xfs_log_calc_unit_res(mp, tres.tr_logres);
	if (tres.tr_logcount > 1)
		max_logres *= tres.tr_logcount;

	if (xfs_sb_version_haslogv2(&mp->m_sb) && mp->m_sb.sb_logsunit > 1)
		lsunit = BTOBB(mp->m_sb.sb_logsunit);

	/*
	 * Two factors should be taken into account for calculating the minimum
	 * log space.
	 * 1) The fundamental limitation is that no single transaction can be
	 *    larger than half size of the log.
	 *
	 *    From mkfs.xfs, this is considered by the XFS_MIN_LOG_FACTOR
	 *    define, which is set to 3. That means we can definitely fit
	 *    maximally sized 2 transactions in the log. We'll use this same
	 *    value here.
	 *
	 * 2) If the lsunit option is specified, a transaction requires 2 LSU
	 *    for the reservation because there are two log writes that can
	 *    require padding - the transaction data and the commit record which
	 *    are written separately and both can require padding to the LSU.
	 *    Consider that we can have an active CIL reservation holding 2*LSU,
	 *    but the CIL is not over a push threshold, in this case, if we
	 *    don't have enough log space for at one new transaction, which
	 *    includes another 2*LSU in the reservation, we will run into dead
	 *    loop situation in log space grant procedure. i.e.
	 *    xlog_grant_head_wait().
	 *
	 *    Hence the log size needs to be able to contain two maximally sized
	 *    and padded transactions, which is (2 * (2 * LSU + maxlres)).
	 *
	 * Also, the log size should be a multiple of the log stripe unit, round
	 * it up to lsunit boundary if lsunit is specified.
	 */
	if (lsunit) {
		min_logblks = roundup_64(BTOBB(max_logres), lsunit) +
			      2 * lsunit;
	} else
		min_logblks = BTOBB(max_logres) + 2 * BBSIZE;
	min_logblks *= XFS_MIN_LOG_FACTOR;

	return XFS_BB_TO_FSB(mp, min_logblks);
}
Exemplo n.º 3
0
STATIC int
xfs_vn_fiemap(
	struct inode		*inode,
	struct fiemap_extent_info *fieinfo,
	u64			start,
	u64			length)
{
	xfs_inode_t		*ip = XFS_I(inode);
	struct getbmapx		bm;
	int			error;

	error = fiemap_check_flags(fieinfo, XFS_FIEMAP_FLAGS);
	if (error)
		return error;

	/* Set up bmap header for xfs internal routine */
	bm.bmv_offset = BTOBB(start);
	/* Special case for whole file */
	if (length == FIEMAP_MAX_OFFSET)
		bm.bmv_length = -1LL;
	else
		bm.bmv_length = BTOBB(length);

	/* We add one because in getbmap world count includes the header */
	bm.bmv_count = !fieinfo->fi_extents_max ? MAXEXTNUM :
					fieinfo->fi_extents_max + 1;
	bm.bmv_count = min_t(__s32, bm.bmv_count,
			     (PAGE_SIZE * 16 / sizeof(struct getbmapx)));
	bm.bmv_iflags = BMV_IF_PREALLOC | BMV_IF_NO_HOLES;
	if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR)
		bm.bmv_iflags |= BMV_IF_ATTRFORK;
	if (!(fieinfo->fi_flags & FIEMAP_FLAG_SYNC))
		bm.bmv_iflags |= BMV_IF_DELALLOC;

	error = xfs_getbmap(ip, &bm, xfs_fiemap_format, fieinfo);
	if (error)
		return -error;

	return 0;
}
Exemplo n.º 4
0
STATIC int
xfs_vn_fiemap(
	struct inode		*inode,
	struct fiemap_extent_info *fieinfo,
	u64			start,
	u64			length)
{
	xfs_inode_t		*ip = XFS_I(inode);
	struct getbmapx		bm;
	int			error;

	error = fiemap_check_flags(fieinfo, XFS_FIEMAP_FLAGS);
	if (error)
		return error;

	/* Set up bmap header for xfs internal routine */
	bm.bmv_offset = BTOBB(start);
	/* Special case for whole file */
	if (length == FIEMAP_MAX_OFFSET)
		bm.bmv_length = -1LL;
	else
		bm.bmv_length = BTOBB(length);

	/* our formatter will tell xfs_getbmap when to stop. */
	bm.bmv_count = MAXEXTNUM;
	bm.bmv_iflags = BMV_IF_PREALLOC;
	if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR)
		bm.bmv_iflags |= BMV_IF_ATTRFORK;
	if (!(fieinfo->fi_flags & FIEMAP_FLAG_SYNC))
		bm.bmv_iflags |= BMV_IF_DELALLOC;

	error = xfs_getbmap(ip, &bm, xfs_fiemap_format, fieinfo);
	if (error)
		return -error;

	return 0;
}
Exemplo n.º 5
0
STATIC void
xfs_attr_rmtval_copyin(
	struct xfs_mount *mp,
	struct xfs_buf	*bp,
	xfs_ino_t	ino,
	int		*offset,
	int		*valuelen,
	__uint8_t	**src)
{
	char		*dst = bp->b_addr;
	xfs_daddr_t	bno = bp->b_bn;
	int		len = BBTOB(bp->b_length);
	int		blksize = mp->m_attr_geo->blksize;

	ASSERT(len >= blksize);

	while (len > 0 && *valuelen > 0) {
		int hdr_size;
		int byte_cnt = XFS_ATTR3_RMT_BUF_SPACE(mp, blksize);

		byte_cnt = min(*valuelen, byte_cnt);
		hdr_size = xfs_attr3_rmt_hdr_set(mp, dst, ino, *offset,
						 byte_cnt, bno);

		memcpy(dst + hdr_size, *src, byte_cnt);

		/*
		 * If this is the last block, zero the remainder of it.
		 * Check that we are actually the last block, too.
		 */
		if (byte_cnt + hdr_size < blksize) {
			ASSERT(*valuelen - byte_cnt == 0);
			ASSERT(len == blksize);
			memset(dst + hdr_size + byte_cnt, 0,
					blksize - hdr_size - byte_cnt);
		}

		/* roll buffer forwards */
		len -= blksize;
		dst += blksize;
		bno += BTOBB(blksize);

		/* roll attribute data forwards */
		*valuelen -= byte_cnt;
		*src += byte_cnt;
		*offset += byte_cnt;
	}
}
Exemplo n.º 6
0
/*
 * Helper functions to copy attribute data in and out of the one disk extents
 */
STATIC int
xfs_attr_rmtval_copyout(
	struct xfs_mount *mp,
	struct xfs_buf	*bp,
	xfs_ino_t	ino,
	int		*offset,
	int		*valuelen,
	__uint8_t	**dst)
{
	char		*src = bp->b_addr;
	xfs_daddr_t	bno = bp->b_bn;
	int		len = BBTOB(bp->b_length);
	int		blksize = mp->m_attr_geo->blksize;

	ASSERT(len >= blksize);

	while (len > 0 && *valuelen > 0) {
		int hdr_size = 0;
		int byte_cnt = XFS_ATTR3_RMT_BUF_SPACE(mp, blksize);

		byte_cnt = min(*valuelen, byte_cnt);

		if (xfs_sb_version_hascrc(&mp->m_sb)) {
			if (!xfs_attr3_rmt_hdr_ok(src, ino, *offset,
						  byte_cnt, bno)) {
				xfs_alert(mp,
"remote attribute header mismatch bno/off/len/owner (0x%llx/0x%x/Ox%x/0x%llx)",
					bno, *offset, byte_cnt, ino);
				return -EFSCORRUPTED;
			}
			hdr_size = sizeof(struct xfs_attr3_rmt_hdr);
		}

		memcpy(*dst, src + hdr_size, byte_cnt);

		/* roll buffer forwards */
		len -= blksize;
		src += blksize;
		bno += BTOBB(blksize);

		/* roll attribute data forwards */
		*valuelen -= byte_cnt;
		*dst += byte_cnt;
		*offset += byte_cnt;
	}
	return 0;
}
Exemplo n.º 7
0
static void
xfs_attr3_rmt_write_verify(
	struct xfs_buf	*bp)
{
	struct xfs_mount *mp = bp->b_target->bt_mount;
	xfs_failaddr_t	fa;
	int		blksize = mp->m_attr_geo->blksize;
	char		*ptr;
	int		len;
	xfs_daddr_t	bno;

	/* no verification of non-crc buffers */
	if (!xfs_sb_version_hascrc(&mp->m_sb))
		return;

	ptr = bp->b_addr;
	bno = bp->b_bn;
	len = BBTOB(bp->b_length);
	ASSERT(len >= blksize);

	while (len > 0) {
		struct xfs_attr3_rmt_hdr *rmt = (struct xfs_attr3_rmt_hdr *)ptr;

		fa = xfs_attr3_rmt_verify(mp, ptr, blksize, bno);
		if (fa) {
			xfs_verifier_error(bp, -EFSCORRUPTED, fa);
			return;
		}

		/*
		 * Ensure we aren't writing bogus LSNs to disk. See
		 * xfs_attr3_rmt_hdr_set() for the explanation.
		 */
		if (rmt->rm_lsn != cpu_to_be64(NULLCOMMITLSN)) {
			xfs_verifier_error(bp, -EFSCORRUPTED, __this_address);
			return;
		}
		xfs_update_cksum(ptr, blksize, XFS_ATTR3_RMT_CRC_OFF);

		len -= blksize;
		ptr += blksize;
		bno += BTOBB(blksize);
	}

	if (len != 0)
		xfs_verifier_error(bp, -EFSCORRUPTED, __this_address);
}
Exemplo n.º 8
0
static int
__xfs_attr3_rmt_read_verify(
	struct xfs_buf	*bp,
	bool		check_crc,
	xfs_failaddr_t	*failaddr)
{
	struct xfs_mount *mp = bp->b_target->bt_mount;
	char		*ptr;
	int		len;
	xfs_daddr_t	bno;
	int		blksize = mp->m_attr_geo->blksize;

	/* no verification of non-crc buffers */
	if (!xfs_sb_version_hascrc(&mp->m_sb))
		return 0;

	ptr = bp->b_addr;
	bno = bp->b_bn;
	len = BBTOB(bp->b_length);
	ASSERT(len >= blksize);

	while (len > 0) {
		if (check_crc &&
		    !xfs_verify_cksum(ptr, blksize, XFS_ATTR3_RMT_CRC_OFF)) {
			*failaddr = __this_address;
			return -EFSBADCRC;
		}
		*failaddr = xfs_attr3_rmt_verify(mp, ptr, blksize, bno);
		if (*failaddr)
			return -EFSCORRUPTED;
		len -= blksize;
		ptr += blksize;
		bno += BTOBB(blksize);
	}

	if (len != 0) {
		*failaddr = __this_address;
		return -EFSCORRUPTED;
	}

	return 0;
}
Exemplo n.º 9
0
static void
xfs_attr3_rmt_write_verify(
	struct xfs_buf	*bp)
{
	struct xfs_mount *mp = bp->b_target->bt_mount;
	struct xfs_buf_log_item	*bip = bp->b_fspriv;
	char		*ptr;
	int		len;
	xfs_daddr_t	bno;
	int		blksize = mp->m_attr_geo->blksize;

	/* no verification of non-crc buffers */
	if (!xfs_sb_version_hascrc(&mp->m_sb))
		return;

	ptr = bp->b_addr;
	bno = bp->b_bn;
	len = BBTOB(bp->b_length);
	ASSERT(len >= blksize);

	while (len > 0) {
		if (!xfs_attr3_rmt_verify(mp, ptr, blksize, bno)) {
			xfs_buf_ioerror(bp, -EFSCORRUPTED);
			xfs_verifier_error(bp);
			return;
		}
		if (bip) {
			struct xfs_attr3_rmt_hdr *rmt;

			rmt = (struct xfs_attr3_rmt_hdr *)ptr;
			rmt->rm_lsn = cpu_to_be64(bip->bli_item.li_lsn);
		}
		xfs_update_cksum(ptr, blksize, XFS_ATTR3_RMT_CRC_OFF);

		len -= blksize;
		ptr += blksize;
		bno += BTOBB(blksize);
	}
	ASSERT(len == 0);
}
Exemplo n.º 10
0
static void
xfs_attr3_rmt_read_verify(
	struct xfs_buf	*bp)
{
	struct xfs_mount *mp = bp->b_target->bt_mount;
	char		*ptr;
	int		len;
	xfs_daddr_t	bno;
	int		blksize = mp->m_attr_geo->blksize;

	/* no verification of non-crc buffers */
	if (!xfs_sb_version_hascrc(&mp->m_sb))
		return;

	ptr = bp->b_addr;
	bno = bp->b_bn;
	len = BBTOB(bp->b_length);
	ASSERT(len >= blksize);

	while (len > 0) {
		if (!xfs_verify_cksum(ptr, blksize, XFS_ATTR3_RMT_CRC_OFF)) {
			xfs_buf_ioerror(bp, -EFSBADCRC);
			break;
		}
		if (!xfs_attr3_rmt_verify(mp, ptr, blksize, bno)) {
			xfs_buf_ioerror(bp, -EFSCORRUPTED);
			break;
		}
		len -= blksize;
		ptr += blksize;
		bno += BTOBB(blksize);
	}

	if (bp->b_error)
		xfs_verifier_error(bp);
	else
		ASSERT(len == 0);
}
Exemplo n.º 11
0
/* ----- Kernel only functions below ----- */
STATIC int
xfs_readlink_bmap(
	struct xfs_inode	*ip,
	char			*link)
{
	struct xfs_mount	*mp = ip->i_mount;
	struct xfs_bmbt_irec	mval[XFS_SYMLINK_MAPS];
	struct xfs_buf		*bp;
	xfs_daddr_t		d;
	char			*cur_chunk;
	int			pathlen = ip->i_d.di_size;
	int			nmaps = XFS_SYMLINK_MAPS;
	int			byte_cnt;
	int			n;
	int			error = 0;
	int			fsblocks = 0;
	int			offset;

	fsblocks = xfs_symlink_blocks(mp, pathlen);
	error = xfs_bmapi_read(ip, 0, fsblocks, mval, &nmaps, 0);
	if (error)
		goto out;

	offset = 0;
	for (n = 0; n < nmaps; n++) {
		d = XFS_FSB_TO_DADDR(mp, mval[n].br_startblock);
		byte_cnt = XFS_FSB_TO_B(mp, mval[n].br_blockcount);

		bp = xfs_buf_read(mp->m_ddev_targp, d, BTOBB(byte_cnt), 0,
				  &xfs_symlink_buf_ops);
		if (!bp)
			return XFS_ERROR(ENOMEM);
		error = bp->b_error;
		if (error) {
			xfs_buf_ioerror_alert(bp, __func__);
			xfs_buf_relse(bp);

			/* bad CRC means corrupted metadata */
			if (error == EFSBADCRC)
				error = EFSCORRUPTED;
			goto out;
		}
		byte_cnt = XFS_SYMLINK_BUF_SPACE(mp, byte_cnt);
		if (pathlen < byte_cnt)
			byte_cnt = pathlen;

		cur_chunk = bp->b_addr;
		if (xfs_sb_version_hascrc(&mp->m_sb)) {
			if (!xfs_symlink_hdr_ok(mp, ip->i_ino, offset,
							byte_cnt, bp)) {
				error = EFSCORRUPTED;
				xfs_alert(mp,
"symlink header does not match required off/len/owner (0x%x/Ox%x,0x%llx)",
					offset, byte_cnt, ip->i_ino);
				xfs_buf_relse(bp);
				goto out;

			}

			cur_chunk += sizeof(struct xfs_dsymlink_hdr);
		}

		memcpy(link + offset, bp->b_addr, byte_cnt);

		pathlen -= byte_cnt;
		offset += byte_cnt;

		xfs_buf_relse(bp);
	}
	ASSERT(pathlen == 0);

	link[ip->i_d.di_size] = '\0';
	error = 0;

 out:
	return error;
}
Exemplo n.º 12
0
int
xfs_symlink(
	struct xfs_inode	*dp,
	struct xfs_name		*link_name,
	const char		*target_path,
	umode_t			mode,
	struct xfs_inode	**ipp)
{
	struct xfs_mount	*mp = dp->i_mount;
	struct xfs_trans	*tp = NULL;
	struct xfs_inode	*ip = NULL;
	int			error = 0;
	int			pathlen;
	struct xfs_bmap_free	free_list;
	xfs_fsblock_t		first_block;
	bool			unlock_dp_on_error = false;
	uint			cancel_flags;
	int			committed;
	xfs_fileoff_t		first_fsb;
	xfs_filblks_t		fs_blocks;
	int			nmaps;
	struct xfs_bmbt_irec	mval[XFS_SYMLINK_MAPS];
	xfs_daddr_t		d;
	const char		*cur_chunk;
	int			byte_cnt;
	int			n;
	xfs_buf_t		*bp;
	prid_t			prid;
	struct xfs_dquot	*udqp = NULL;
	struct xfs_dquot	*gdqp = NULL;
	struct xfs_dquot	*pdqp = NULL;
	uint			resblks;

	*ipp = NULL;

	trace_xfs_symlink(dp, link_name);

	if (XFS_FORCED_SHUTDOWN(mp))
		return XFS_ERROR(EIO);

	/*
	 * Check component lengths of the target path name.
	 */
	pathlen = strlen(target_path);
	if (pathlen >= MAXPATHLEN)      /* total string too long */
		return XFS_ERROR(ENAMETOOLONG);

	udqp = gdqp = NULL;
	if (dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
		prid = xfs_get_projid(dp);
	else
		prid = XFS_PROJID_DEFAULT;

	/*
	 * Make sure that we have allocated dquot(s) on disk.
	 */
	error = xfs_qm_vop_dqalloc(dp,
			xfs_kuid_to_uid(current_fsuid()),
			xfs_kgid_to_gid(current_fsgid()), prid,
			XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
			&udqp, &gdqp, &pdqp);
	if (error)
		goto std_return;

	tp = xfs_trans_alloc(mp, XFS_TRANS_SYMLINK);
	cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
	/*
	 * The symlink will fit into the inode data fork?
	 * There can't be any attributes so we get the whole variable part.
	 */
	if (pathlen <= XFS_LITINO(mp, dp->i_d.di_version))
		fs_blocks = 0;
	else
		fs_blocks = xfs_symlink_blocks(mp, pathlen);
	resblks = XFS_SYMLINK_SPACE_RES(mp, link_name->len, fs_blocks);
	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_symlink, resblks, 0);
	if (error == ENOSPC && fs_blocks == 0) {
		resblks = 0;
		error = xfs_trans_reserve(tp, &M_RES(mp)->tr_symlink, 0, 0);
	}
	if (error) {
		cancel_flags = 0;
		goto error_return;
	}

	xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
	unlock_dp_on_error = true;

	/*
	 * Check whether the directory allows new symlinks or not.
	 */
	if (dp->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) {
		error = XFS_ERROR(EPERM);
		goto error_return;
	}

	/*
	 * Reserve disk quota : blocks and inode.
	 */
	error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp,
						pdqp, resblks, 1, 0);
	if (error)
		goto error_return;

	/*
	 * Check for ability to enter directory entry, if no space reserved.
	 */
	error = xfs_dir_canenter(tp, dp, link_name, resblks);
	if (error)
		goto error_return;
	/*
	 * Initialize the bmap freelist prior to calling either
	 * bmapi or the directory create code.
	 */
	xfs_bmap_init(&free_list, &first_block);

	/*
	 * Allocate an inode for the symlink.
	 */
	error = xfs_dir_ialloc(&tp, dp, S_IFLNK | (mode & ~S_IFMT), 1, 0,
			       prid, resblks > 0, &ip, NULL);
	if (error) {
		if (error == ENOSPC)
			goto error_return;
		goto error1;
	}

	/*
	 * An error after we've joined dp to the transaction will result in the
	 * transaction cancel unlocking dp so don't do it explicitly in the
	 * error path.
	 */
	xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
	unlock_dp_on_error = false;

	/*
	 * Also attach the dquot(s) to it, if applicable.
	 */
	xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);

	if (resblks)
		resblks -= XFS_IALLOC_SPACE_RES(mp);
	/*
	 * If the symlink will fit into the inode, write it inline.
	 */
	if (pathlen <= XFS_IFORK_DSIZE(ip)) {
		xfs_idata_realloc(ip, pathlen, XFS_DATA_FORK);
		memcpy(ip->i_df.if_u1.if_data, target_path, pathlen);
		ip->i_d.di_size = pathlen;

		/*
		 * The inode was initially created in extent format.
		 */
		ip->i_df.if_flags &= ~(XFS_IFEXTENTS | XFS_IFBROOT);
		ip->i_df.if_flags |= XFS_IFINLINE;

		ip->i_d.di_format = XFS_DINODE_FMT_LOCAL;
		xfs_trans_log_inode(tp, ip, XFS_ILOG_DDATA | XFS_ILOG_CORE);

	} else {
		int	offset;

		first_fsb = 0;
		nmaps = XFS_SYMLINK_MAPS;

		error = xfs_bmapi_write(tp, ip, first_fsb, fs_blocks,
				  XFS_BMAPI_METADATA, &first_block, resblks,
				  mval, &nmaps, &free_list);
		if (error)
			goto error2;

		if (resblks)
			resblks -= fs_blocks;
		ip->i_d.di_size = pathlen;
		xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);

		cur_chunk = target_path;
		offset = 0;
		for (n = 0; n < nmaps; n++) {
			char	*buf;

			d = XFS_FSB_TO_DADDR(mp, mval[n].br_startblock);
			byte_cnt = XFS_FSB_TO_B(mp, mval[n].br_blockcount);
			bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
					       BTOBB(byte_cnt), 0);
			if (!bp) {
				error = ENOMEM;
				goto error2;
			}
			bp->b_ops = &xfs_symlink_buf_ops;

			byte_cnt = XFS_SYMLINK_BUF_SPACE(mp, byte_cnt);
			byte_cnt = min(byte_cnt, pathlen);

			buf = bp->b_addr;
			buf += xfs_symlink_hdr_set(mp, ip->i_ino, offset,
						   byte_cnt, bp);

			memcpy(buf, cur_chunk, byte_cnt);

			cur_chunk += byte_cnt;
			pathlen -= byte_cnt;
			offset += byte_cnt;

			xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SYMLINK_BUF);
			xfs_trans_log_buf(tp, bp, 0, (buf + byte_cnt - 1) -
							(char *)bp->b_addr);
		}
		ASSERT(pathlen == 0);
	}

	/*
	 * Create the directory entry for the symlink.
	 */
	error = xfs_dir_createname(tp, dp, link_name, ip->i_ino,
					&first_block, &free_list, resblks);
	if (error)
		goto error2;
	xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
	xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);

	/*
	 * If this is a synchronous mount, make sure that the
	 * symlink transaction goes to disk before returning to
	 * the user.
	 */
	if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) {
		xfs_trans_set_sync(tp);
	}

	error = xfs_bmap_finish(&tp, &free_list, &committed);
	if (error) {
		goto error2;
	}
	error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
	xfs_qm_dqrele(udqp);
	xfs_qm_dqrele(gdqp);
	xfs_qm_dqrele(pdqp);

	*ipp = ip;
	return 0;

 error2:
	IRELE(ip);
 error1:
	xfs_bmap_cancel(&free_list);
	cancel_flags |= XFS_TRANS_ABORT;
 error_return:
	xfs_trans_cancel(tp, cancel_flags);
	xfs_qm_dqrele(udqp);
	xfs_qm_dqrele(gdqp);
	xfs_qm_dqrele(pdqp);

	if (unlock_dp_on_error)
		xfs_iunlock(dp, XFS_ILOCK_EXCL);
 std_return:
	return error;
}
Exemplo n.º 13
0
int
xfs_symlink(
	struct xfs_inode	*dp,
	struct xfs_name		*link_name,
	const char		*target_path,
	umode_t			mode,
	struct xfs_inode	**ipp)
{
	struct xfs_mount	*mp = dp->i_mount;
	struct xfs_trans	*tp = NULL;
	struct xfs_inode	*ip = NULL;
	int			error = 0;
	int			pathlen;
	struct xfs_defer_ops	dfops;
	xfs_fsblock_t		first_block;
	bool                    unlock_dp_on_error = false;
	xfs_fileoff_t		first_fsb;
	xfs_filblks_t		fs_blocks;
	int			nmaps;
	struct xfs_bmbt_irec	mval[XFS_SYMLINK_MAPS];
	xfs_daddr_t		d;
	const char		*cur_chunk;
	int			byte_cnt;
	int			n;
	xfs_buf_t		*bp;
	prid_t			prid;
	struct xfs_dquot	*udqp = NULL;
	struct xfs_dquot	*gdqp = NULL;
	struct xfs_dquot	*pdqp = NULL;
	uint			resblks;

	*ipp = NULL;

	trace_xfs_symlink(dp, link_name);

	if (XFS_FORCED_SHUTDOWN(mp))
		return -EIO;

	/*
	 * Check component lengths of the target path name.
	 */
	pathlen = strlen(target_path);
	if (pathlen >= MAXPATHLEN)      /* total string too long */
		return -ENAMETOOLONG;

	udqp = gdqp = NULL;
	prid = xfs_get_initial_prid(dp);

	/*
	 * Make sure that we have allocated dquot(s) on disk.
	 */
	error = xfs_qm_vop_dqalloc(dp,
			xfs_kuid_to_uid(current_fsuid()),
			xfs_kgid_to_gid(current_fsgid()), prid,
			XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
			&udqp, &gdqp, &pdqp);
	if (error)
		return error;

	/*
	 * The symlink will fit into the inode data fork?
	 * There can't be any attributes so we get the whole variable part.
	 */
	if (pathlen <= XFS_LITINO(mp, dp->i_d.di_version))
		fs_blocks = 0;
	else
		fs_blocks = xfs_symlink_blocks(mp, pathlen);
	resblks = XFS_SYMLINK_SPACE_RES(mp, link_name->len, fs_blocks);

	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_symlink, resblks, 0, 0, &tp);
	if (error == -ENOSPC && fs_blocks == 0) {
		resblks = 0;
		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_symlink, 0, 0, 0,
				&tp);
	}
	if (error)
		goto out_release_inode;

	xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
	unlock_dp_on_error = true;

	/*
	 * Check whether the directory allows new symlinks or not.
	 */
	if (dp->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) {
		error = -EPERM;
		goto out_trans_cancel;
	}

	/*
	 * Reserve disk quota : blocks and inode.
	 */
	error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp,
						pdqp, resblks, 1, 0);
	if (error)
		goto out_trans_cancel;

	/*
	 * Check for ability to enter directory entry, if no space reserved.
	 */
	if (!resblks) {
		error = xfs_dir_canenter(tp, dp, link_name);
		if (error)
			goto out_trans_cancel;
	}
	/*
	 * Initialize the bmap freelist prior to calling either
	 * bmapi or the directory create code.
	 */
	xfs_defer_init(&dfops, &first_block);

	/*
	 * Allocate an inode for the symlink.
	 */
	error = xfs_dir_ialloc(&tp, dp, S_IFLNK | (mode & ~S_IFMT), 1, 0,
			       prid, resblks > 0, &ip, NULL);
	if (error)
		goto out_trans_cancel;

	/*
	 * Now we join the directory inode to the transaction.  We do not do it
	 * earlier because xfs_dir_ialloc might commit the previous transaction
	 * (and release all the locks).  An error from here on will result in
	 * the transaction cancel unlocking dp so don't do it explicitly in the
	 * error path.
	 */
	xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
	unlock_dp_on_error = false;

	/*
	 * Also attach the dquot(s) to it, if applicable.
	 */
	xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);

	if (resblks)
		resblks -= XFS_IALLOC_SPACE_RES(mp);
	/*
	 * If the symlink will fit into the inode, write it inline.
	 */
	if (pathlen <= XFS_IFORK_DSIZE(ip)) {
		xfs_init_local_fork(ip, XFS_DATA_FORK, target_path, pathlen);

		ip->i_d.di_size = pathlen;
		ip->i_d.di_format = XFS_DINODE_FMT_LOCAL;
		xfs_trans_log_inode(tp, ip, XFS_ILOG_DDATA | XFS_ILOG_CORE);
	} else {
		int	offset;

		first_fsb = 0;
		nmaps = XFS_SYMLINK_MAPS;

		error = xfs_bmapi_write(tp, ip, first_fsb, fs_blocks,
				  XFS_BMAPI_METADATA, &first_block, resblks,
				  mval, &nmaps, &dfops);
		if (error)
			goto out_bmap_cancel;

		if (resblks)
			resblks -= fs_blocks;
		ip->i_d.di_size = pathlen;
		xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);

		cur_chunk = target_path;
		offset = 0;
		for (n = 0; n < nmaps; n++) {
			char	*buf;

			d = XFS_FSB_TO_DADDR(mp, mval[n].br_startblock);
			byte_cnt = XFS_FSB_TO_B(mp, mval[n].br_blockcount);
			bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
					       BTOBB(byte_cnt), 0);
			if (!bp) {
				error = -ENOMEM;
				goto out_bmap_cancel;
			}
			bp->b_ops = &xfs_symlink_buf_ops;

			byte_cnt = XFS_SYMLINK_BUF_SPACE(mp, byte_cnt);
			byte_cnt = min(byte_cnt, pathlen);

			buf = bp->b_addr;
			buf += xfs_symlink_hdr_set(mp, ip->i_ino, offset,
						   byte_cnt, bp);

			memcpy(buf, cur_chunk, byte_cnt);

			cur_chunk += byte_cnt;
			pathlen -= byte_cnt;
			offset += byte_cnt;

			xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SYMLINK_BUF);
			xfs_trans_log_buf(tp, bp, 0, (buf + byte_cnt - 1) -
							(char *)bp->b_addr);
		}
		ASSERT(pathlen == 0);
	}

	/*
	 * Create the directory entry for the symlink.
	 */
	error = xfs_dir_createname(tp, dp, link_name, ip->i_ino,
					&first_block, &dfops, resblks);
	if (error)
		goto out_bmap_cancel;
	xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
	xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);

	/*
	 * If this is a synchronous mount, make sure that the
	 * symlink transaction goes to disk before returning to
	 * the user.
	 */
	if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) {
		xfs_trans_set_sync(tp);
	}

	error = xfs_defer_finish(&tp, &dfops, NULL);
	if (error)
		goto out_bmap_cancel;

	error = xfs_trans_commit(tp);
	if (error)
		goto out_release_inode;

	xfs_qm_dqrele(udqp);
	xfs_qm_dqrele(gdqp);
	xfs_qm_dqrele(pdqp);

	*ipp = ip;
	return 0;

out_bmap_cancel:
	xfs_defer_cancel(&dfops);
out_trans_cancel:
	xfs_trans_cancel(tp);
out_release_inode:
	/*
	 * Wait until after the current transaction is aborted to finish the
	 * setup of the inode and release the inode.  This prevents recursive
	 * transactions and deadlocks from xfs_inactive.
	 */
	if (ip) {
		xfs_finish_inode_setup(ip);
		IRELE(ip);
	}

	xfs_qm_dqrele(udqp);
	xfs_qm_dqrele(gdqp);
	xfs_qm_dqrele(pdqp);

	if (unlock_dp_on_error)
		xfs_iunlock(dp, XFS_ILOCK_EXCL);
	return error;
}
Exemplo n.º 14
0
static void
zero_log(
	struct xfs_mount	*mp)
{
	int			error;
	xfs_daddr_t		head_blk;
	xfs_daddr_t		tail_blk;
	struct xlog		*log = mp->m_log;

	memset(log, 0, sizeof(struct xlog));
	x.logBBsize = XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks);
	x.logBBstart = XFS_FSB_TO_DADDR(mp, mp->m_sb.sb_logstart);
	x.lbsize = BBSIZE;
	if (xfs_sb_version_hassector(&mp->m_sb))
		x.lbsize <<= (mp->m_sb.sb_logsectlog - BBSHIFT);

	log->l_dev = mp->m_logdev_targp;
	log->l_logBBsize = x.logBBsize;
	log->l_logBBstart = x.logBBstart;
	log->l_sectBBsize  = BTOBB(x.lbsize);
	log->l_mp = mp;
	if (xfs_sb_version_hassector(&mp->m_sb)) {
		log->l_sectbb_log = mp->m_sb.sb_logsectlog - BBSHIFT;
		ASSERT(log->l_sectbb_log <= mp->m_sectbb_log);
		/* for larger sector sizes, must have v2 or external log */
		ASSERT(log->l_sectbb_log == 0 ||
			log->l_logBBstart == 0 ||
			xfs_sb_version_haslogv2(&mp->m_sb));
		ASSERT(mp->m_sb.sb_logsectlog >= BBSHIFT);
	}
	log->l_sectbb_mask = (1 << log->l_sectbb_log) - 1;

	/*
	 * Find the log head and tail and alert the user to the situation if the
	 * log appears corrupted or contains data. In either case, we do not
	 * proceed past this point unless the user explicitly requests to zap
	 * the log.
	 */
	error = xlog_find_tail(log, &head_blk, &tail_blk);
	if (error) {
		do_warn(
		_("zero_log: cannot find log head/tail (xlog_find_tail=%d)\n"),
			error);
		if (!no_modify && !zap_log)
			do_error(_(
"ERROR: The log head and/or tail cannot be discovered. Attempt to mount the\n"
"filesystem to replay the log or use the -L option to destroy the log and\n"
"attempt a repair.\n"));
	} else {
		if (verbose) {
			do_warn(
	_("zero_log: head block %" PRId64 " tail block %" PRId64 "\n"),
				head_blk, tail_blk);
		}
		if (!no_modify && head_blk != tail_blk) {
			if (zap_log) {
				do_warn(_(
"ALERT: The filesystem has valuable metadata changes in a log which is being\n"
"destroyed because the -L option was used.\n"));
			} else {
				do_warn(_(
"ERROR: The filesystem has valuable metadata changes in a log which needs to\n"
"be replayed.  Mount the filesystem to replay the log, and unmount it before\n"
"re-running xfs_repair.  If you are unable to mount the filesystem, then use\n"
"the -L option to destroy the log and attempt a repair.\n"
"Note that destroying the log may cause corruption -- please attempt a mount\n"
"of the filesystem before doing this.\n"));
				exit(2);
			}
		}
	}

	/*
	 * Only clear the log when explicitly requested. Doing so is unnecessary
	 * unless something is wrong. Further, this resets the current LSN of
	 * the filesystem and creates more work for repair of v5 superblock
	 * filesystems.
	 */
	if (!no_modify && zap_log) {
		libxfs_log_clear(log->l_dev, NULL,
			XFS_FSB_TO_DADDR(mp, mp->m_sb.sb_logstart),
			(xfs_extlen_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks),
			&mp->m_sb.sb_uuid,
			xfs_sb_version_haslogv2(&mp->m_sb) ? 2 : 1,
			mp->m_sb.sb_logsunit, XLOG_FMT, XLOG_INIT_CYCLE, true);

		/* update the log data structure with new state */
		error = xlog_find_tail(log, &head_blk, &tail_blk);
		if (error || head_blk != tail_blk)
			do_error(_("failed to clear log"));
	}

	/*
	 * Finally, seed the max LSN from the current state of the log if this
	 * is a v5 filesystem.
	 */
	if (xfs_sb_version_hascrc(&mp->m_sb))
		libxfs_max_lsn = log->l_last_sync_lsn;
}
Exemplo n.º 15
0
int
xlog_print_record(
	struct xlog		*log,
	int			fd,
	int			num_ops,
	int			len,
	int			*read_type,
	char			**partial_buf,
	xlog_rec_header_t	*rhead,
	xlog_rec_ext_header_t	*xhdrs,
	int			bad_hdr_warn)
{
    char		*buf, *ptr;
    int			read_len, skip, lost_context = 0;
    int			ret, n, i, j, k;

    if (print_no_print)
	    return NO_ERROR;

    if (!len) {
	printf("\n");
	return NO_ERROR;
    }

    /* read_len must read up to some block boundary */
    read_len = (int) BBTOB(BTOBB(len));

    /* read_type => don't malloc() new buffer, use old one */
    if (*read_type == FULL_READ) {
	if ((ptr = buf = malloc(read_len)) == NULL) {
	    fprintf(stderr, _("%s: xlog_print_record: malloc failed\n"), progname);
	    exit(1);
	}
    } else {
	read_len -= *read_type;
	buf = (char *)((intptr_t)(*partial_buf) + (intptr_t)(*read_type));
	ptr = *partial_buf;
    }
    if ((ret = (int) read(fd, buf, read_len)) == -1) {
	fprintf(stderr, _("%s: xlog_print_record: read error\n"), progname);
	exit(1);
    }
    /* Did we overflow the end? */
    if (*read_type == FULL_READ &&
	BLOCK_LSN(be64_to_cpu(rhead->h_lsn)) + BTOBB(read_len) >=
		logBBsize) {
	*read_type = BBTOB(logBBsize - BLOCK_LSN(be64_to_cpu(rhead->h_lsn))-1);
	*partial_buf = buf;
	return PARTIAL_READ;
    }

    /* Did we read everything? */
    if ((ret == 0 && read_len != 0) || ret != read_len) {
	*read_type = ret;
	*partial_buf = buf;
	return PARTIAL_READ;
    }
    if (*read_type != FULL_READ)
	read_len += *read_type;

    /* Everything read in.  Start from beginning of buffer
     * Unpack the data, by putting the saved cycle-data back
     * into the first word of each BB.
     * Do some checks.
     */
    buf = ptr;
    for (i = 0; ptr < buf + read_len; ptr += BBSIZE, i++) {
	xlog_rec_header_t *rechead = (xlog_rec_header_t *)ptr;

	/* sanity checks */
	if (be32_to_cpu(rechead->h_magicno) == XLOG_HEADER_MAGIC_NUM) {
	    /* data should not have magicno as first word
	     * as it should by cycle#
	     */
	    free(buf);
	    return -1;
	} else {
	    /* verify cycle#
	     * FIXME: cycle+1 should be a macro pv#900369
	     */
	    if (be32_to_cpu(rhead->h_cycle) !=
			be32_to_cpu(*(__be32 *)ptr)) {
		if ((*read_type == FULL_READ) ||
		    (be32_to_cpu(rhead->h_cycle) + 1 !=
				be32_to_cpu(*(__be32 *)ptr))) {
			free(buf);
			return -1;
		}
	    }
	}

	/* copy back the data from the header */
	if (i < XLOG_HEADER_CYCLE_SIZE / BBSIZE) {
		/* from 1st header */
		*(__be32 *)ptr = rhead->h_cycle_data[i];
	}
	else {
		ASSERT(xhdrs != NULL);
		/* from extra headers */
		j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
		k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
		*(__be32 *)ptr = xhdrs[j-1].xh_cycle_data[k];
	}

    }

    ptr = buf;
    for (i=0; i<num_ops; i++) {
	int continued;

	xlog_op_header_t *op_head = (xlog_op_header_t *)ptr;

	print_xlog_op_line();
	xlog_print_op_header(op_head, i, &ptr);
	continued = ((op_head->oh_flags & XLOG_WAS_CONT_TRANS) ||
		     (op_head->oh_flags & XLOG_CONTINUE_TRANS));

	if (continued && be32_to_cpu(op_head->oh_len) == 0)
		continue;

	if (print_no_data) {
	    for (n = 0; n < be32_to_cpu(op_head->oh_len); n++) {
		printf("0x%02x ", (unsigned int)*ptr);
		if (n % 16 == 15)
			printf("\n");
		ptr++;
	    }
	    printf("\n");
	    continue;
	}

	/* print transaction data */
	if (xlog_print_find_tid(be32_to_cpu(op_head->oh_tid),
				op_head->oh_flags & XLOG_WAS_CONT_TRANS)) {
	    printf(_("Left over region from split log item\n"));
	    /* Skip this leftover bit */
	    ptr += be32_to_cpu(op_head->oh_len);
	    /* We've lost context; don't complain if next one looks bad too */
	    lost_context = 1;
	    continue;
	}

	if (be32_to_cpu(op_head->oh_len) != 0) {
	    if (*(uint *)ptr == XFS_TRANS_HEADER_MAGIC) {
		skip = xlog_print_trans_header(&ptr,
					be32_to_cpu(op_head->oh_len));
	    } else {
		switch (*(unsigned short *)ptr) {
		    case XFS_LI_BUF: {
			skip = xlog_print_trans_buffer(&ptr,
					be32_to_cpu(op_head->oh_len),
					&i, num_ops);
			break;
		    }
		    case XFS_LI_ICREATE: {
			skip = xlog_print_trans_icreate(&ptr,
					be32_to_cpu(op_head->oh_len),
					&i, num_ops);
			break;
		    }
		    case XFS_LI_INODE: {
			skip = xlog_print_trans_inode(log, &ptr,
					be32_to_cpu(op_head->oh_len),
					&i, num_ops, continued);
			break;
		    }
		    case XFS_LI_DQUOT: {
			skip = xlog_print_trans_dquot(&ptr,
					be32_to_cpu(op_head->oh_len),
					&i, num_ops);
			break;
		    }
		    case XFS_LI_EFI: {
			skip = xlog_print_trans_efi(&ptr,
					be32_to_cpu(op_head->oh_len),
					continued);
			break;
		    }
		    case XFS_LI_EFD: {
			skip = xlog_print_trans_efd(&ptr,
					be32_to_cpu(op_head->oh_len));
			break;
		    }
		    case XFS_LI_QUOTAOFF: {
			skip = xlog_print_trans_qoff(&ptr,
					be32_to_cpu(op_head->oh_len));
			break;
		    }
		    case XLOG_UNMOUNT_TYPE: {
			printf(_("Unmount filesystem\n"));
			skip = 0;
			break;
		    }
		    default: {
			if (bad_hdr_warn && !lost_context) {
				fprintf(stderr,
			_("%s: unknown log operation type (%x)\n"),
					progname, *(unsigned short *)ptr);
				if (print_exit) {
					free(buf);
					return BAD_HEADER;
				}
			} else {
				printf(
			_("Left over region from split log item\n"));
			}
			skip = 0;
			ptr += be32_to_cpu(op_head->oh_len);
			lost_context = 0;
		    }
		} /* switch */
	    } /* else */
	    if (skip != 0)
		xlog_print_add_to_trans(be32_to_cpu(op_head->oh_tid), skip);
	}
    }
    printf("\n");
    free(buf);
    return NO_ERROR;
}	/* xlog_print_record */
Exemplo n.º 16
0
/*
 * This code is gross and needs to be rewritten.
 */
void xfs_log_print(struct xlog  *log,
		   int          fd,
		   int		print_block_start)
{
    char			hbuf[XLOG_HEADER_SIZE];
    xlog_rec_header_t		*hdr = (xlog_rec_header_t *)&hbuf[0];
    xlog_rec_ext_header_t 	*xhdrs = NULL;
    int				num_ops, len, num_hdrs = 1;
    xfs_daddr_t			block_end = 0, block_start, blkno, error;
    xfs_daddr_t			zeroed_blkno = 0, cleared_blkno = 0;
    int				read_type = FULL_READ;
    char			*partial_buf;
    int				zeroed = 0;
    int				cleared = 0;
    int				first_hdr_found = 0;

    logBBsize = log->l_logBBsize;

    /*
     * Normally, block_start and block_end are the same value since we
     * are printing the entire log.  However, if the start block is given,
     * we still end at the end of the logical log.
     */
    if ((error = xlog_print_find_oldest(log, &block_end))) {
	fprintf(stderr, _("%s: problem finding oldest LR\n"), progname);
	return;
    }
    if (print_block_start == -1)
	block_start = block_end;
    else
	block_start = print_block_start;
    xlog_print_lseek(log, fd, block_start, SEEK_SET);
    blkno = block_start;

    for (;;) {
	if (read(fd, hbuf, 512) == 0) {
	    printf(_("%s: physical end of log\n"), progname);
	    print_xlog_record_line();
	    break;
	}
	if (print_only_data) {
	    printf(_("BLKNO: %lld\n"), (long long)blkno);
	    xlog_recover_print_data(hbuf, 512);
	    blkno++;
	    goto loop;
	}
	num_ops = xlog_print_rec_head(hdr, &len, first_hdr_found);
	blkno++;

	if (zeroed && num_ops != ZEROED_LOG) {
	    printf(_("%s: after %d zeroed blocks\n"), progname, zeroed);
	    /* once we find zeroed blocks - that's all we expect */
	    print_xlog_bad_zeroed(blkno-1);
	    /* reset count since we're assuming previous zeroed blocks
	     * were bad
	     */
	    zeroed = 0;
	}

	if (num_ops == ZEROED_LOG ||
	    num_ops == CLEARED_BLKS ||
	    num_ops == BAD_HEADER) {
	    if (num_ops == ZEROED_LOG) {
		if (zeroed == 0)
		    zeroed_blkno = blkno-1;
		zeroed++;
	    }
	    else if (num_ops == CLEARED_BLKS) {
		if (cleared == 0)
		    cleared_blkno = blkno-1;
		cleared++;
	    } else {
		if (!first_hdr_found)
			block_start = blkno;
		else
			print_xlog_bad_header(blkno-1, hbuf);
	    }

	    goto loop;
	}

	if (be32_to_cpu(hdr->h_version) == 2) {
	    if (xlog_print_extended_headers(fd, len, &blkno, hdr, &num_hdrs, &xhdrs) != 0)
		break;
	}

	error =	xlog_print_record(log, fd, num_ops, len, &read_type, &partial_buf,
				  hdr, xhdrs, first_hdr_found);
	first_hdr_found++;
	switch (error) {
	    case 0: {
		blkno += BTOBB(len);
		if (print_block_start != -1 &&
		    blkno >= block_end)		/* If start specified, we */
			goto end;		/* end early */
		break;
	    }
	    case -1: {
		print_xlog_bad_data(blkno-1);
		if (print_block_start != -1 &&
		    blkno >= block_end)		/* If start specified, */
			goto end;		/* we end early */
		xlog_print_lseek(log, fd, blkno, SEEK_SET);
		goto loop;
	    }
	    case PARTIAL_READ: {
		print_xlog_record_line();
		printf(_("%s: physical end of log\n"), progname);
		print_xlog_record_line();
		blkno = 0;
		xlog_print_lseek(log, fd, 0, SEEK_SET);
		/*
		 * We may have hit the end of the log when we started at 0.
		 * In this case, just end.
		 */
		if (block_start == 0)
			goto end;
		goto partial_log_read;
	    }
	    default: xlog_panic(_("illegal value"));
	}
	print_xlog_record_line();
loop:
	if (blkno >= logBBsize) {
	    if (cleared) {
		printf(_("%s: skipped %d cleared blocks in range: %lld - %lld\n"),
			progname, cleared,
			(long long)(cleared_blkno),
			(long long)(cleared + cleared_blkno - 1));
		if (cleared == logBBsize)
		    printf(_("%s: totally cleared log\n"), progname);

		cleared=0;
	    }
	    if (zeroed) {
		printf(_("%s: skipped %d zeroed blocks in range: %lld - %lld\n"),
			progname, zeroed,
			(long long)(zeroed_blkno),
			(long long)(zeroed + zeroed_blkno - 1));
		if (zeroed == logBBsize)
		    printf(_("%s: totally zeroed log\n"), progname);

		zeroed=0;
	    }
	    printf(_("%s: physical end of log\n"), progname);
	    print_xlog_record_line();
	    break;
	}
    }

    /* Do we need to print the first part of physical log? */
    if (block_start != 0) {
	blkno = 0;
	xlog_print_lseek(log, fd, 0, SEEK_SET);
	for (;;) {
	    if (read(fd, hbuf, 512) == 0) {
		xlog_panic(_("xlog_find_head: bad read"));
	    }
	    if (print_only_data) {
		printf(_("BLKNO: %lld\n"), (long long)blkno);
		xlog_recover_print_data(hbuf, 512);
		blkno++;
		goto loop2;
	    }
	    num_ops = xlog_print_rec_head(hdr, &len, first_hdr_found);
	    blkno++;

	    if (num_ops == ZEROED_LOG ||
		num_ops == CLEARED_BLKS ||
		num_ops == BAD_HEADER) {
		/* we only expect zeroed log entries  or cleared log
		 * entries at the end of the _physical_ log,
		 * so treat them the same as bad blocks here
		 */
		print_xlog_bad_header(blkno-1, hbuf);

		if (blkno >= block_end)
		    break;
		continue;
	    }

	    if (be32_to_cpu(hdr->h_version) == 2) {
		if (xlog_print_extended_headers(fd, len, &blkno, hdr, &num_hdrs, &xhdrs) != 0)
		    break;
	    }

partial_log_read:
	    error= xlog_print_record(log, fd, num_ops, len, &read_type,
				    &partial_buf, (xlog_rec_header_t *)hbuf,
				    xhdrs, first_hdr_found);
	    if (read_type != FULL_READ)
		len -= read_type;
	    read_type = FULL_READ;
	    if (!error)
		blkno += BTOBB(len);
	    else {
		print_xlog_bad_data(blkno-1);
		xlog_print_lseek(log, fd, blkno, SEEK_SET);
		goto loop2;
	    }
	    print_xlog_record_line();
loop2:
	    if (blkno >= block_end)
		break;
	}
    }

end:
    printf(_("%s: logical end of log\n"), progname);
    print_xlog_record_line();
}
Exemplo n.º 17
0
/* for V2 logs read each extra hdr and print it out */
static int
xlog_print_extended_headers(
	int			fd,
	int			len,
	xfs_daddr_t		*blkno,
	xlog_rec_header_t	*hdr,
	int 			*ret_num_hdrs,
	xlog_rec_ext_header_t	**ret_xhdrs)
{
	int			i, j;
	int			coverage_bb;
	int 			num_hdrs;
	int 			num_required;
	char			xhbuf[XLOG_HEADER_SIZE];
	xlog_rec_ext_header_t	*x;

	num_required = howmany(len, XLOG_HEADER_CYCLE_SIZE);
	num_hdrs = be32_to_cpu(hdr->h_size) / XLOG_HEADER_CYCLE_SIZE;
	if (be32_to_cpu(hdr->h_size) % XLOG_HEADER_CYCLE_SIZE)
		num_hdrs++;

	if (num_required > num_hdrs) {
	    print_xlog_bad_reqd_hdrs((*blkno)-1, num_required, num_hdrs);
	}

	if (num_hdrs == 1) {
	    free(*ret_xhdrs);
	    *ret_xhdrs = NULL;
	    *ret_num_hdrs = 1;
	    return 0;
	}

	if (*ret_xhdrs == NULL || num_hdrs > *ret_num_hdrs) {
	    xlog_reallocate_xhdrs(num_hdrs, ret_xhdrs);
	}

	*ret_num_hdrs = num_hdrs;

	/* don't include 1st header */
	for (i = 1, x = *ret_xhdrs; i < num_hdrs; i++, (*blkno)++, x++) {
	    /* read one extra header blk */
	    if (read(fd, xhbuf, 512) == 0) {
		printf(_("%s: physical end of log\n"), progname);
		print_xlog_record_line();
		/* reached the end so return 1 */
		return 1;
	    }
	    if (print_only_data) {
		printf(_("BLKNO: %lld\n"), (long long)*blkno);
		xlog_recover_print_data(xhbuf, 512);
	    }
	    else {
		if (i == num_hdrs - 1) {
		    /* last header */
		    coverage_bb = BTOBB(len) %
				    (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
		}
		else {
		    /* earliear header */
		    coverage_bb = XLOG_HEADER_CYCLE_SIZE / BBSIZE;
		}
		xlog_print_rec_xhead((xlog_rec_ext_header_t*)xhbuf, coverage_bb);
	    }

	    /* Copy from buffer into xhdrs array for later.
	     * Could endian convert here but then code later on
	     * will look asymmetric with the 1 hdr normal case
	     * which does endian coversion on access.
	     */
	    x->xh_cycle = ((xlog_rec_ext_header_t*)xhbuf)->xh_cycle;
	    for (j = 0; j < XLOG_HEADER_CYCLE_SIZE / BBSIZE; j++) {
		x->xh_cycle_data[j] =
		    ((xlog_rec_ext_header_t*)xhbuf)->xh_cycle_data[j];
	    }
	}
	return 0;
}
Exemplo n.º 18
0
extern int
HsmInitFileContext(
	hsm_f_ctxt_t	*contextp,
const	xfs_bstat_t	*statp)
{
	dmf_f_ctxt_t	*dmf_f_ctxtp = (dmf_f_ctxt_t *)contextp;
	XFSattrvalue1_t	*dmfattrp;
	int		attrlen;
	void		*hanp;
	size_t		hlen;
	dm_ino_t	ino;
	dm_igen_t	igen;
	int		fd;
	int		error;

	dmf_f_ctxtp->candidate = 0; /* assume file will NOT be of interest */

	/* Try and rule out a dualstate inode by doing some quick tests. */

	if ((statp->bs_mode & S_IFMT) != S_IFREG) {
		return 0;	/* not a regular file */
	}
	if ((statp->bs_xflags & XFS_XFLAG_HASATTR) == 0) {
		return 0;	/* no DMF attribute exists */
	}
	if ((statp->bs_dmevmask & DUALSTATE_BITS) != DUALSTATE_BITS) {
		return 0;	/* non-dualstate managed region bits */
	}

	/* We have a likely candidate, so we have to pay the price and look
	   for the DMF attribute.  (It could be in a disk block separate from
	   the inode.)
	*/

	ino = (dm_ino_t)statp->bs_ino;
	igen = (dm_igen_t)statp->bs_gen;
	if (dm_make_handle(&dmf_f_ctxtp->fsys.fsid, &ino, &igen, &hanp, &hlen) != 0) {
		return 0;	/* can't make a proper handle */
	}

	/* The following code should eventually be replaced with the
	   attr_multif-by-handle call when it is available.
	*/

	fd = open_by_handle(hanp, hlen, O_RDONLY);
	dm_handle_free(hanp, hlen);
	if (fd < 0) {
                return 0;
	}
	attrlen = sizeof(dmf_f_ctxtp->attrval);
	error = attr_getf(fd, DMF_ATTR_NAME, dmf_f_ctxtp->attrval,
		&attrlen, ATTR_ROOT);
	(void)close(fd);
	if (error) {
		return 0;
 	}

	if (attrlen != DMF_ATTR_LEN) {
                return 0;	/* not the right length to be the attribute */
	}

	dmfattrp = (XFSattrvalue1_t *)dmf_f_ctxtp->attrval;
	if (dmfattrp->version != XFS_ATTR_VERSION_1) {
		return 0;	/* we don't support this version */
	}
	if (dmfattrp->state[0] != '\0') {
		return 0;	/* should be zero */
	}
	if (dmfattrp->state[1] != DMF_ST_DUALSTATE &&
	    dmfattrp->state[1] != DMF_ST_UNMIGRATING) {
		return 0;
	}

	/* We have a DMF dual state file. */

	dmf_f_ctxtp->candidate = 1;
	dmf_f_ctxtp->filesize = BTOBB(statp->bs_size);
	return 0;
}
Exemplo n.º 19
0
/*
 * Zero file bytes between startoff and endoff inclusive.
 * The iolock is held exclusive and no blocks are buffered.
 *
 * This function is used by xfs_free_file_space() to zero
 * partial blocks when the range to free is not block aligned.
 * When unreserving space with boundaries that are not block
 * aligned we round up the start and round down the end
 * boundaries and then use this function to zero the parts of
 * the blocks that got dropped during the rounding.
 */
STATIC int
xfs_zero_remaining_bytes(
	xfs_inode_t		*ip,
	xfs_off_t		startoff,
	xfs_off_t		endoff)
{
	xfs_bmbt_irec_t		imap;
	xfs_fileoff_t		offset_fsb;
	xfs_off_t		lastoffset;
	xfs_off_t		offset;
	xfs_buf_t		*bp;
	xfs_mount_t		*mp = ip->i_mount;
	int			nimap;
	int			error = 0;

	/*
	 * Avoid doing I/O beyond eof - it's not necessary
	 * since nothing can read beyond eof.  The space will
	 * be zeroed when the file is extended anyway.
	 */
	if (startoff >= XFS_ISIZE(ip))
		return 0;

	if (endoff > XFS_ISIZE(ip))
		endoff = XFS_ISIZE(ip);

	bp = xfs_buf_get_uncached(XFS_IS_REALTIME_INODE(ip) ?
					mp->m_rtdev_targp : mp->m_ddev_targp,
				  BTOBB(mp->m_sb.sb_blocksize), 0);
	if (!bp)
		return XFS_ERROR(ENOMEM);

	xfs_buf_unlock(bp);

	for (offset = startoff; offset <= endoff; offset = lastoffset + 1) {
		uint lock_mode;

		offset_fsb = XFS_B_TO_FSBT(mp, offset);
		nimap = 1;

		lock_mode = xfs_ilock_data_map_shared(ip);
		error = xfs_bmapi_read(ip, offset_fsb, 1, &imap, &nimap, 0);
		xfs_iunlock(ip, lock_mode);

		if (error || nimap < 1)
			break;
		ASSERT(imap.br_blockcount >= 1);
		ASSERT(imap.br_startoff == offset_fsb);
		lastoffset = XFS_FSB_TO_B(mp, imap.br_startoff + 1) - 1;
		if (lastoffset > endoff)
			lastoffset = endoff;
		if (imap.br_startblock == HOLESTARTBLOCK)
			continue;
		ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
		if (imap.br_state == XFS_EXT_UNWRITTEN)
			continue;
		XFS_BUF_UNDONE(bp);
		XFS_BUF_UNWRITE(bp);
		XFS_BUF_READ(bp);
		XFS_BUF_SET_ADDR(bp, xfs_fsb_to_db(ip, imap.br_startblock));

		if (XFS_FORCED_SHUTDOWN(mp)) {
			error = XFS_ERROR(EIO);
			break;
		}
		xfs_buf_iorequest(bp);
		error = xfs_buf_iowait(bp);
		if (error) {
			xfs_buf_ioerror_alert(bp,
					"xfs_zero_remaining_bytes(read)");
			break;
		}
		memset(bp->b_addr +
			(offset - XFS_FSB_TO_B(mp, imap.br_startoff)),
		      0, lastoffset - offset + 1);
		XFS_BUF_UNDONE(bp);
		XFS_BUF_UNREAD(bp);
		XFS_BUF_WRITE(bp);

		if (XFS_FORCED_SHUTDOWN(mp)) {
			error = XFS_ERROR(EIO);
			break;
		}
		xfs_buf_iorequest(bp);
		error = xfs_buf_iowait(bp);
		if (error) {
			xfs_buf_ioerror_alert(bp,
					"xfs_zero_remaining_bytes(write)");
			break;
		}
	}
	xfs_buf_free(bp);
	return error;
}
Exemplo n.º 20
0
/* copy out attributes from cache entry */
int
afs_CopyOutAttrs(struct vcache *avc, struct vattr *attrs)
{
    struct volume *tvp;
    struct cell *tcell;
#if defined(AFS_FBSD_ENV) || defined(AFS_DFBSD_ENV)
    struct vnode *vp = AFSTOV(avc);
#endif
    int fakedir = 0;

    AFS_STATCNT(afs_CopyOutAttrs);
    if (afs_fakestat_enable && avc->mvstat == AFS_MVSTAT_MTPT)
	fakedir = 1;
    attrs->va_type = fakedir ? VDIR : vType(avc);
#if defined(AFS_SGI_ENV) || defined(AFS_AIX32_ENV) || defined(AFS_SUN5_ENV) || defined(AFS_DARWIN_ENV)
    attrs->va_mode = fakedir ? S_IFDIR | 0755 : (mode_t) (avc->f.m.Mode & 0xffff);
#else
    attrs->va_mode = fakedir ? VDIR | 0755 : avc->f.m.Mode;
#endif

    if (avc->f.m.Mode & (VSUID | VSGID)) {
	/* setuid or setgid, make sure we're allowed to run them from this cell */
	tcell = afs_GetCell(avc->f.fid.Cell, 0);
	if (tcell && (tcell->states & CNoSUID))
	    attrs->va_mode &= ~(VSUID | VSGID);
    }
#if defined(AFS_DARWIN_ENV)
    {
	if (!afs_darwin_realmodes) {
	    /* Mac OS X uses the mode bits to determine whether a file or
	     * directory is accessible, and believes them, even though under
	     * AFS they're almost assuredly wrong, especially if the local uid
	     * does not match the AFS ID.  So we set the mode bits
	     * conservatively.
	     */
	    if (S_ISDIR(attrs->va_mode)) {
		/* all access bits need to be set for directories, since even
		 * a mode 0 directory can still be used normally.
		 */
		attrs->va_mode |= ACCESSPERMS;
	    } else {
		/* for other files, replicate the user bits to group and other */
		mode_t ubits = (attrs->va_mode & S_IRWXU) >> 6;
		attrs->va_mode |= ubits | (ubits << 3);
	    }
	}
    }
#endif /* AFS_DARWIN_ENV */
    attrs->va_uid = fakedir ? 0 : avc->f.m.Owner;
    attrs->va_gid = fakedir ? 0 : avc->f.m.Group;	/* yeah! */
#if defined(AFS_SUN5_ENV)
    attrs->va_fsid = avc->v.v_vfsp->vfs_fsid.val[0];
#elif defined(AFS_DARWIN80_ENV)
    VATTR_RETURN(attrs, va_fsid, vfs_statfs(vnode_mount(AFSTOV(avc)))->f_fsid.val[0]);
#elif defined(AFS_DARWIN_ENV)
    attrs->va_fsid = avc->v->v_mount->mnt_stat.f_fsid.val[0];
#else /* ! AFS_DARWIN_ENV */
    attrs->va_fsid = 1;
#endif 
    if (avc->mvstat == AFS_MVSTAT_ROOT) {
	tvp = afs_GetVolume(&avc->f.fid, 0, READ_LOCK);
	/* The mount point's vnode. */
	if (tvp) {
	    attrs->va_nodeid =
	      afs_calc_inum(tvp->mtpoint.Cell,
			    tvp->mtpoint.Fid.Volume,
			    tvp->mtpoint.Fid.Vnode);
	    if (FidCmp(&afs_rootFid, &avc->f.fid) && !attrs->va_nodeid)
		attrs->va_nodeid = 2;
	    afs_PutVolume(tvp, READ_LOCK);
	} else
	    attrs->va_nodeid = 2;
    } else
	attrs->va_nodeid = 
	      afs_calc_inum(avc->f.fid.Cell,
			    avc->f.fid.Fid.Volume,
			    avc->f.fid.Fid.Vnode);
    attrs->va_nodeid &= 0x7fffffff;	/* Saber C hates negative inode #s! */
    attrs->va_nlink = fakedir ? 100 : avc->f.m.LinkCount;
    attrs->va_size = fakedir ? 4096 : avc->f.m.Length;
#if defined(AFS_FBSD_ENV) || defined(AFS_DFBSD_ENV)
        vnode_pager_setsize(vp, (u_long) attrs->va_size);
#endif
    attrs->va_atime.tv_sec = attrs->va_mtime.tv_sec = attrs->va_ctime.tv_sec =
	fakedir ? 0 : (int)avc->f.m.Date;
    /* set microseconds to be dataversion # so that we approximate NFS-style
     * use of mtime as a dataversion #.  We take it mod 512K because
     * microseconds *must* be less than a million, and 512K is the biggest
     * power of 2 less than such.  DataVersions are typically pretty small
     * anyway, so the difference between 512K and 1000000 shouldn't matter
     * much, and "&" is a lot faster than "%".
     */
#if defined(AFS_DARWIN_ENV) || defined(AFS_FBSD_ENV)
    /* nfs on these systems puts an 0 in nsec and stores the nfs usec (aka 
     * dataversion) in va_gen */

    attrs->va_atime.tv_nsec = attrs->va_mtime.tv_nsec =
	attrs->va_ctime.tv_nsec = 0;
    attrs->va_gen = hgetlo(avc->f.m.DataVersion);
#elif defined(AFS_SGI_ENV) || defined(AFS_SUN5_ENV) || defined(AFS_AIX41_ENV) || defined(AFS_OBSD_ENV) || defined(AFS_NBSD_ENV)
    attrs->va_atime.tv_nsec = attrs->va_mtime.tv_nsec =
	attrs->va_ctime.tv_nsec =
	(hgetlo(avc->f.m.DataVersion) & 0x7ffff) * 1000;
#else
    attrs->va_atime.tv_usec = attrs->va_mtime.tv_usec =
	attrs->va_ctime.tv_usec = (hgetlo(avc->f.m.DataVersion) & 0x7ffff);
#endif
#if defined(AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV)
    attrs->va_flags = 0;
#endif
#if defined(AFS_SGI_ENV) || defined(AFS_SUN5_ENV)
    attrs->va_blksize = AFS_BLKSIZE;	/* XXX Was 8192 XXX */
#else
    attrs->va_blocksize = AFS_BLKSIZE;	/* XXX Was 8192 XXX */
#endif
    attrs->va_rdev = 1;
#if defined(AFS_HPUX110_ENV)
    if (afs_globalVFS)
	attrs->va_fstype = afs_globalVFS->vfs_mtype;
#endif

    /*
     * Below return 0 (and not 1) blocks if the file is zero length. This conforms
     * better with the other filesystems that do return 0.      
     */
#if defined(AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV)
    attrs->va_bytes = (attrs->va_size ? (attrs->va_size + 1023) : 1024);
#ifdef	va_bytes_rsv
    attrs->va_bytes_rsv = -1;
#endif
#elif defined(AFS_HPUX_ENV)
    attrs->va_blocks = (attrs->va_size ? ((attrs->va_size + 1023)>>10) : 0);
#elif defined(AFS_SGI_ENV)
    attrs->va_blocks = BTOBB(attrs->va_size);
#elif defined(AFS_SUN5_ENV)
    attrs->va_nblocks = (attrs->va_size ? ((attrs->va_size + 1023)>>10)<<1:0);
#else /* everything else */
    attrs->va_blocks = (attrs->va_size ? ((attrs->va_size + 1023)>>10)<<1:0);
#endif
    return 0;
}
Exemplo n.º 21
0
int
xlog_print_rec_head(xlog_rec_header_t *head, int *len, int bad_hdr_warn)
{
    int i;
    char uub[64];
    int datalen,bbs;

    if (print_no_print)
	    return be32_to_cpu(head->h_num_logops);

    if (!head->h_magicno)
	return ZEROED_LOG;

    if (be32_to_cpu(head->h_magicno) != XLOG_HEADER_MAGIC_NUM) {
	if (bad_hdr_warn)
		printf(_("Header 0x%x wanted 0x%x\n"),
			be32_to_cpu(head->h_magicno),
			XLOG_HEADER_MAGIC_NUM);
	return BAD_HEADER;
    }

    /* check for cleared blocks written by xlog_clear_stale_blocks() */
    if (!head->h_len && !head->h_crc && !head->h_prev_block &&
	!head->h_num_logops && !head->h_size)
	return CLEARED_BLKS;

    datalen=be32_to_cpu(head->h_len);
    bbs=BTOBB(datalen);

    printf(_("cycle: %d	version: %d	"),
	    be32_to_cpu(head->h_cycle),
	    be32_to_cpu(head->h_version));
    print_lsn("	lsn", &head->h_lsn);
    print_lsn("	tail_lsn", &head->h_tail_lsn);
    printf("\n");
    printf(_("length of Log Record: %d	prev offset: %d		num ops: %d\n"),
	   datalen,
	    be32_to_cpu(head->h_prev_block),
	    be32_to_cpu(head->h_num_logops));

    if (print_overwrite) {
	printf(_("cycle num overwrites: "));
	for (i=0; i< MIN(bbs, XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++)
	    printf("%d - 0x%x  ",
		    i,
		    be32_to_cpu(head->h_cycle_data[i]));
	printf("\n");
    }

    platform_uuid_unparse(&head->h_fs_uuid, uub);
    printf(_("uuid: %s   format: "), uub);
    switch (be32_to_cpu(head->h_fmt)) {
	case XLOG_FMT_UNKNOWN:
	    printf(_("unknown\n"));
	    break;
	case XLOG_FMT_LINUX_LE:
	    printf(_("little endian linux\n"));
	    break;
	case XLOG_FMT_LINUX_BE:
	    printf(_("big endian linux\n"));
	    break;
	case XLOG_FMT_IRIX_BE:
	    printf(_("big endian irix\n"));
	    break;
	default:
	    printf("? (%d)\n", be32_to_cpu(head->h_fmt));
	    break;
    }
    printf(_("h_size: %d\n"), be32_to_cpu(head->h_size));

    *len = be32_to_cpu(head->h_len);
    return(be32_to_cpu(head->h_num_logops));
}	/* xlog_print_rec_head */
Exemplo n.º 22
0
/*
 * The data and realtime block counts returned (count, used, and
 * free) are all in basic block units.
 */
static int
mount_free_space_data(
	struct fs_path		*mount,
	__uint64_t		*bcount,
	__uint64_t		*bused,
	__uint64_t		*bfree,
	__uint64_t		*icount,
	__uint64_t		*iused,
	__uint64_t		*ifree,
	__uint64_t		*rcount,
	__uint64_t		*rused,
	__uint64_t		*rfree)
{
	struct xfs_fsop_counts	fscounts;
	struct xfs_fsop_geom	fsgeo;
	struct statfs		st;
	__uint64_t		logsize, count, free;
	int			fd;

	if ((fd = open(mount->fs_dir, O_RDONLY)) < 0) {
		exitcode = 1;
		fprintf(stderr, "%s: cannot open %s: %s\n",
			progname, mount->fs_dir, strerror(errno));
		return 0;
	}

	if (platform_fstatfs(fd, &st) < 0) {
		perror("fstatfs");
		close(fd);
		return 0;
	}
	if ((xfsctl(mount->fs_dir, fd, XFS_IOC_FSGEOMETRY_V1, &fsgeo)) < 0) {
		perror("XFS_IOC_FSGEOMETRY_V1");
		close(fd);
		return 0;
	}
	if ((xfsctl(mount->fs_dir, fd, XFS_IOC_FSCOUNTS, &fscounts)) < 0) {
		perror("XFS_IOC_FSCOUNTS");
		close(fd);
		return 0;
	}

	logsize = fsgeo.logstart ? fsgeo.logblocks : 0;
	count = (fsgeo.datablocks - logsize) * fsgeo.blocksize;
	free  = fscounts.freedata * fsgeo.blocksize;
	*bcount = BTOBB(count);
	*bfree  = BTOBB(free);
	*bused  = BTOBB(count - free);

	*icount = st.f_files;
	*ifree  = st.f_ffree;
	*iused  = st.f_files - st.f_ffree;

	count = fsgeo.rtextents * fsgeo.rtextsize * fsgeo.blocksize;
	free  = fscounts.freertx * fsgeo.rtextsize * fsgeo.blocksize;
	*rcount = BTOBB(count);
	*rfree  = BTOBB(free);
	*rused  = BTOBB(count - free);

	close(fd);
	return 1;
}