Beispiel #1
0
static xfs_daddr_t
xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
{
	if (XFS_IS_REALTIME_INODE(ip))
		 return XFS_FSB_TO_BB(ip->i_mount, fsb);
	return XFS_FSB_TO_DADDR(ip->i_mount, (fsb));
}
/*
 * Convert the given file system block to a disk block.  We have to treat it
 * differently based on whether the file is a real time file or not, because the
 * bmap code does.
 */
xfs_daddr_t
xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
{
	return (XFS_IS_REALTIME_INODE(ip) ? \
		 (xfs_daddr_t)XFS_FSB_TO_BB((ip)->i_mount, (fsb)) : \
		 XFS_FSB_TO_DADDR((ip)->i_mount, (fsb)));
}
Beispiel #3
0
/*
 * Invalidate buffers for per-AG btree blocks we're dumping.  This function
 * is not intended for use with file data repairs; we have bunmapi for that.
 */
int
xrep_invalidate_blocks(
	struct xfs_scrub	*sc,
	struct xfs_bitmap	*bitmap)
{
	struct xfs_bitmap_range	*bmr;
	struct xfs_bitmap_range	*n;
	struct xfs_buf		*bp;
	xfs_fsblock_t		fsbno;

	/*
	 * For each block in each extent, see if there's an incore buffer for
	 * exactly that block; if so, invalidate it.  The buffer cache only
	 * lets us look for one buffer at a time, so we have to look one block
	 * at a time.  Avoid invalidating AG headers and post-EOFS blocks
	 * because we never own those; and if we can't TRYLOCK the buffer we
	 * assume it's owned by someone else.
	 */
	for_each_xfs_bitmap_block(fsbno, bmr, n, bitmap) {
		/* Skip AG headers and post-EOFS blocks */
		if (!xfs_verify_fsbno(sc->mp, fsbno))
			continue;
		bp = xfs_buf_incore(sc->mp->m_ddev_targp,
				XFS_FSB_TO_DADDR(sc->mp, fsbno),
				XFS_FSB_TO_BB(sc->mp, 1), XBF_TRYLOCK);
		if (bp) {
			xfs_trans_bjoin(sc->tp, bp);
			xfs_trans_binval(sc->tp, bp);
		}
	}

	return 0;
}
Beispiel #4
0
/*
 * Read in the in-core dquot's on-disk metadata and return the buffer.
 * Returns ENOENT to signal a hole.
 */
STATIC int
xfs_dquot_disk_read(
	struct xfs_mount	*mp,
	struct xfs_dquot	*dqp,
	struct xfs_buf		**bpp)
{
	struct xfs_bmbt_irec	map;
	struct xfs_buf		*bp;
	struct xfs_inode	*quotip = xfs_quota_inode(mp, dqp->dq_flags);
	uint			lock_mode;
	int			nmaps = 1;
	int			error;

	lock_mode = xfs_ilock_data_map_shared(quotip);
	if (!xfs_this_quota_on(mp, dqp->dq_flags)) {
		/*
		 * Return if this type of quotas is turned off while we
		 * didn't have the quota inode lock.
		 */
		xfs_iunlock(quotip, lock_mode);
		return -ESRCH;
	}

	/*
	 * Find the block map; no allocations yet
	 */
	error = xfs_bmapi_read(quotip, dqp->q_fileoffset,
			XFS_DQUOT_CLUSTER_SIZE_FSB, &map, &nmaps, 0);
	xfs_iunlock(quotip, lock_mode);
	if (error)
		return error;

	ASSERT(nmaps == 1);
	ASSERT(map.br_blockcount >= 1);
	ASSERT(map.br_startblock != DELAYSTARTBLOCK);
	if (map.br_startblock == HOLESTARTBLOCK)
		return -ENOENT;

	trace_xfs_dqtobp_read(dqp);

	/*
	 * store the blkno etc so that we don't have to do the
	 * mapping all the time
	 */
	dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);

	error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno,
			mp->m_quotainfo->qi_dqchunklen, 0, &bp,
			&xfs_dquot_buf_ops);
	if (error) {
		ASSERT(bp == NULL);
		return error;
	}

	ASSERT(xfs_buf_islocked(bp));
	xfs_buf_set_ref(bp, XFS_DQUOT_REF);
	*bpp = bp;

	return 0;
}
Beispiel #5
0
/*
 * Read the value associated with an attribute from the out-of-line buffer
 * that we stored it in.
 */
int
xfs_attr_rmtval_get(
	struct xfs_da_args	*args)
{
	struct xfs_bmbt_irec	map[ATTR_RMTVALUE_MAPSIZE];
	struct xfs_mount	*mp = args->dp->i_mount;
	struct xfs_buf		*bp;
	xfs_dablk_t		lblkno = args->rmtblkno;
	__uint8_t		*dst = args->value;
	int			valuelen;
	int			nmap;
	int			error;
	int			blkcnt = args->rmtblkcnt;
	int			i;
	int			offset = 0;

	trace_xfs_attr_rmtval_get(args);

	ASSERT(!(args->flags & ATTR_KERNOVAL));
	ASSERT(args->rmtvaluelen == args->valuelen);

	valuelen = args->rmtvaluelen;
	while (valuelen > 0) {
		nmap = ATTR_RMTVALUE_MAPSIZE;
		error = xfs_bmapi_read(args->dp, (xfs_fileoff_t)lblkno,
				       blkcnt, map, &nmap,
				       XFS_BMAPI_ATTRFORK);
		if (error)
			return error;
		ASSERT(nmap >= 1);

		for (i = 0; (i < nmap) && (valuelen > 0); i++) {
			xfs_daddr_t	dblkno;
			int		dblkcnt;

			ASSERT((map[i].br_startblock != DELAYSTARTBLOCK) &&
			       (map[i].br_startblock != HOLESTARTBLOCK));
			dblkno = XFS_FSB_TO_DADDR(mp, map[i].br_startblock);
			dblkcnt = XFS_FSB_TO_BB(mp, map[i].br_blockcount);
			error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
						   dblkno, dblkcnt, 0, &bp,
						   &xfs_attr3_rmt_buf_ops);
			if (error)
				return error;

			error = xfs_attr_rmtval_copyout(mp, bp, args->dp->i_ino,
							&offset, &valuelen,
							&dst);
			xfs_buf_relse(bp);
			if (error)
				return error;

			/* roll attribute extent map forwards */
			lblkno += map[i].br_blockcount;
			blkcnt -= map[i].br_blockcount;
		}
	}
	ASSERT(valuelen == 0);
	return 0;
}
Beispiel #6
0
/* Initialize a new AG btree root block with zero entries. */
int
xrep_init_btblock(
	struct xfs_scrub		*sc,
	xfs_fsblock_t			fsb,
	struct xfs_buf			**bpp,
	xfs_btnum_t			btnum,
	const struct xfs_buf_ops	*ops)
{
	struct xfs_trans		*tp = sc->tp;
	struct xfs_mount		*mp = sc->mp;
	struct xfs_buf			*bp;

	trace_xrep_init_btblock(mp, XFS_FSB_TO_AGNO(mp, fsb),
			XFS_FSB_TO_AGBNO(mp, fsb), btnum);

	ASSERT(XFS_FSB_TO_AGNO(mp, fsb) == sc->sa.agno);
	bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, XFS_FSB_TO_DADDR(mp, fsb),
			XFS_FSB_TO_BB(mp, 1), 0);
	xfs_buf_zero(bp, 0, BBTOB(bp->b_length));
	xfs_btree_init_block(mp, bp, btnum, 0, 0, sc->sa.agno, 0);
	xfs_trans_buf_set_type(tp, bp, XFS_BLFT_BTREE_BUF);
	xfs_trans_log_buf(tp, bp, 0, bp->b_length);
	bp->b_ops = ops;
	*bpp = bp;

	return 0;
}
Beispiel #7
0
static void
xfs_bmbt_to_iomap(
	struct xfs_inode	*ip,
	struct iomap		*iomap,
	struct xfs_bmbt_irec	*imap)
{
	struct xfs_mount	*mp = ip->i_mount;

	if (imap->br_startblock == HOLESTARTBLOCK) {
		iomap->blkno = IOMAP_NULL_BLOCK;
		iomap->type = IOMAP_HOLE;
	} else if (imap->br_startblock == DELAYSTARTBLOCK) {
		iomap->blkno = IOMAP_NULL_BLOCK;
		iomap->type = IOMAP_DELALLOC;
	} else {
		iomap->blkno =
			XFS_FSB_TO_DADDR(ip->i_mount, imap->br_startblock);
		if (imap->br_state == XFS_EXT_UNWRITTEN)
			iomap->type = IOMAP_UNWRITTEN;
		else
			iomap->type = IOMAP_MAPPED;
	}
	iomap->offset = XFS_FSB_TO_B(mp, imap->br_startoff);
	iomap->length = XFS_FSB_TO_B(mp, imap->br_blockcount);
}
Beispiel #8
0
static int
sb_logzero(uuid_t *uuidp)
{
	int	cycle = XLOG_INIT_CYCLE;
	int	error;

	if (!sb_logcheck())
		return 0;

	/*
	 * The log must always move forward on v5 superblocks. Bump it to the
	 * next cycle.
	 */
	if (xfs_sb_version_hascrc(&mp->m_sb))
		cycle = mp->m_log->l_curr_cycle + 1;

	dbprintf(_("Clearing log and setting UUID\n"));

	error =  libxfs_log_clear(mp->m_logdev_targp, NULL,
			XFS_FSB_TO_DADDR(mp, mp->m_sb.sb_logstart),
			(xfs_extlen_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks),
			uuidp,
			xfs_sb_version_haslogv2(&mp->m_sb) ? 2 : 1,
			mp->m_sb.sb_logsunit, XLOG_FMT, cycle);
	if (error) {
		dbprintf(_("ERROR: cannot clear the log\n"));
		return 0;
	}

	return 1;
}
Beispiel #9
0
/*
 * Get a buffer for the bitmap or summary file block specified.
 * The buffer is returned read and locked.
 */
int
xfs_rtbuf_get(
	xfs_mount_t	*mp,		/* file system mount structure */
	xfs_trans_t	*tp,		/* transaction pointer */
	xfs_rtblock_t	block,		/* block number in bitmap or summary */
	int		issum,		/* is summary not bitmap */
	xfs_buf_t	**bpp)		/* output: buffer for the block */
{
	xfs_buf_t	*bp;		/* block buffer, result */
	xfs_inode_t	*ip;		/* bitmap or summary inode */
	xfs_bmbt_irec_t	map;
	int		nmap = 1;
	int		error;		/* error value */

	ip = issum ? mp->m_rsumip : mp->m_rbmip;

	error = xfs_bmapi_read(ip, block, 1, &map, &nmap, XFS_DATA_FORK);
	if (error)
		return error;

	if (nmap == 0 || !xfs_bmap_is_real_extent(&map))
		return -EFSCORRUPTED;

	ASSERT(map.br_startblock != NULLFSBLOCK);
	error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
				   XFS_FSB_TO_DADDR(mp, map.br_startblock),
				   mp->m_bsize, 0, &bp, &xfs_rtbuf_ops);
	if (error)
		return error;

	xfs_trans_buf_set_type(tp, bp, issum ? XFS_BLFT_RTSUMMARY_BUF
					     : XFS_BLFT_RTBITMAP_BUF);
	*bpp = bp;
	return 0;
}
Beispiel #10
0
static int
newfile(
	xfs_trans_t	*tp,
	xfs_inode_t	*ip,
	xfs_bmap_free_t	*flist,
	xfs_fsblock_t	*first,
	int		dolocal,
	int		logit,
	char		*buf,
	int		len)
{
	xfs_buf_t	*bp;
	xfs_daddr_t	d;
	int		error;
	int		flags;
	xfs_bmbt_irec_t	map;
	xfs_mount_t	*mp;
	xfs_extlen_t	nb;
	int		nmap;

	flags = 0;
	mp = ip->i_mount;
	if (dolocal && len <= XFS_IFORK_DSIZE(ip)) {
		xfs_idata_realloc(ip, len, XFS_DATA_FORK);
		if (buf)
			memmove(ip->i_df.if_u1.if_data, buf, len);
		ip->i_d.di_size = len;
		ip->i_df.if_flags &= ~XFS_IFEXTENTS;
		ip->i_df.if_flags |= XFS_IFINLINE;
		ip->i_d.di_format = XFS_DINODE_FMT_LOCAL;
		flags = XFS_ILOG_DDATA;
	} else if (len > 0) {
		nb = XFS_B_TO_FSB(mp, len);
		nmap = 1;
		error = libxfs_bmapi(tp, ip, 0, nb, XFS_BMAPI_WRITE, first, nb,
				&map, &nmap, flist);
		if (error) {
			fail(_("error allocating space for a file"), error);
		}
		if (nmap != 1) {
			fprintf(stderr,
				_("%s: cannot allocate space for file\n"),
				progname);
			exit(1);
		}
		d = XFS_FSB_TO_DADDR(mp, map.br_startblock);
		bp = libxfs_trans_get_buf(logit ? tp : 0, mp->m_dev, d,
			nb << mp->m_blkbb_log, 0);
		memmove(XFS_BUF_PTR(bp), buf, len);
		if (len < XFS_BUF_COUNT(bp))
			memset(XFS_BUF_PTR(bp) + len, 0, XFS_BUF_COUNT(bp) - len);
		if (logit)
			libxfs_trans_log_buf(tp, bp, 0, XFS_BUF_COUNT(bp) - 1);
		else
			libxfs_writebuf(bp, LIBXFS_EXIT_ON_FAILURE);
	}
	ip->i_d.di_size = len;
	return flags;
}
Beispiel #11
0
int copy_extent_to_buffer(xfs_mount_t *mp, xfs_bmbt_irec_t rec, void *buffer, off_t offset, size_t len) {
    xfs_buf_t *block_buffer;
    int64_t copylen, copy_start;
    xfs_daddr_t block, start, end;
    char *src;
    off_t cofs = offset;
    
    xfs_off_t block_start;
    xfs_daddr_t block_size = XFS_FSB_TO_B(mp, 1);
    //xfs_daddr_t extent_size = XFS_FSB_TO_B(mp, rec.br_blockcount);
    xfs_daddr_t extent_start = XFS_FSB_TO_B(mp, rec.br_startoff);

    /* compute a block to start reading from */
    if (offset >= extent_start) {
        start = XFS_B_TO_FSBT(mp, offset - extent_start);
    } else {
        buffer = buffer + extent_start - offset;
        cofs += extent_start - offset;
        start = 0;
    }

    end = min(rec.br_blockcount, XFS_B_TO_FSBT(mp, offset + len - extent_start - 1) + 1);

    for (block=start; block<end; block++) {
        block_start = XFS_FSB_TO_B(mp, (rec.br_startoff + block));        
        block_buffer = libxfs_readbuf(mp->m_dev, XFS_FSB_TO_DADDR(mp, (rec.br_startblock + block)), 
                                      XFS_FSB_TO_BB(mp, 1), 0);
        if (block_buffer == NULL) {
            printf("Buffer error\n");
            return XFS_ERROR(EIO);
        }

        src = block_buffer->b_addr;
        copy_start = block_start;
        copylen = block_size;
        if (block_start < offset) {
            copylen = block_size + block_start - offset;
            copy_start = (block_size - copylen) + block_start;
            src = block_buffer->b_addr + (block_size - copylen);
        }
        if ((block_start + block_size) > (offset + len)) {
            copylen = offset + len - copy_start;
        }

        if (copylen > 0) {
            memcpy(buffer, src, copylen);
            buffer += copylen;
            cofs += copylen;
        }
        libxfs_putbuf(block_buffer);
    }
    
    return 0;
}
Beispiel #12
0
static void
zero_log(xfs_mount_t *mp, libxfs_init_t *args)
{
    int logdev = (mp->m_sb.sb_logstart == 0) ? args->logdev : args->ddev;

    libxfs_log_clear(logdev,
                     XFS_FSB_TO_DADDR(mp, mp->m_sb.sb_logstart),
                     (xfs_extlen_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks),
                     &mp->m_sb.sb_uuid,
                     XLOG_FMT);
}
Beispiel #13
0
/* This routine brings in blocks from disk one by one and assembles them
 * in the value buffer. If get_bmapi gets smarter later to return an extent
 * or list of extents, that would be great. For now, we don't expect too
 * many blocks per remote value, so one by one is sufficient.
 */
static int
rmtval_get(xfs_mount_t *mp, xfs_ino_t ino, blkmap_t *blkmap,
		xfs_dablk_t blocknum, int valuelen, char* value)
{
	xfs_fsblock_t	bno;
	xfs_buf_t	*bp;
	int		clearit = 0, i = 0, length = 0, amountdone = 0;
	int		hdrsize = 0;

	if (xfs_sb_version_hascrc(&mp->m_sb))
		hdrsize = sizeof(struct xfs_attr3_rmt_hdr);

	/* ASSUMPTION: valuelen is a valid number, so use it for looping */
	/* Note that valuelen is not a multiple of blocksize */
	while (amountdone < valuelen) {
		bno = blkmap_get(blkmap, blocknum + i);
		if (bno == NULLFSBLOCK) {
			do_warn(
	_("remote block for attributes of inode %" PRIu64 " is missing\n"), ino);
			clearit = 1;
			break;
		}
		bp = libxfs_readbuf(mp->m_dev, XFS_FSB_TO_DADDR(mp, bno),
				    XFS_FSB_TO_BB(mp, 1), 0,
				    &xfs_attr3_rmt_buf_ops);
		if (!bp) {
			do_warn(
	_("can't read remote block for attributes of inode %" PRIu64 "\n"), ino);
			clearit = 1;
			break;
		}

		if (bp->b_error == -EFSBADCRC || bp->b_error == -EFSCORRUPTED) {
			do_warn(
	_("Corrupt remote block for attributes of inode %" PRIu64 "\n"), ino);
			clearit = 1;
			break;
		}

		ASSERT(mp->m_sb.sb_blocksize == XFS_BUF_COUNT(bp));

		length = MIN(XFS_BUF_COUNT(bp) - hdrsize, valuelen - amountdone);
		memmove(value, bp->b_addr + hdrsize, length);
		amountdone += length;
		value += length;
		i++;
		libxfs_putbuf(bp);
	}
	return (clearit);
}
Beispiel #14
0
void
make_bbmap(
	bbmap_t		*bbmap,
	int		nex,
	bmap_ext_t	*bmp)
{
	int		i;

	for (i = 0; i < nex; i++) {
		bbmap->b[i].bm_bn = XFS_FSB_TO_DADDR(mp, bmp[i].startblock);
		bbmap->b[i].bm_len = XFS_FSB_TO_BB(mp, bmp[i].blockcount);
	}
	bbmap->nmaps = nex;
}
Beispiel #15
0
/* Transform a rmapbt irec into a fsmap */
STATIC int
xfs_getfsmap_datadev_helper(
	struct xfs_btree_cur		*cur,
	struct xfs_rmap_irec		*rec,
	void				*priv)
{
	struct xfs_mount		*mp = cur->bc_mp;
	struct xfs_getfsmap_info	*info = priv;
	xfs_fsblock_t			fsb;
	xfs_daddr_t			rec_daddr;

	fsb = XFS_AGB_TO_FSB(mp, cur->bc_private.a.agno, rec->rm_startblock);
	rec_daddr = XFS_FSB_TO_DADDR(mp, fsb);

	return xfs_getfsmap_helper(cur->bc_tp, info, rec, rec_daddr);
}
Beispiel #16
0
STATIC int
xfs_qm_dqiter_bufs(
	struct xfs_mount	*mp,
	xfs_dqid_t		firstid,
	xfs_fsblock_t		bno,
	xfs_filblks_t		blkcnt,
	uint			flags,
	struct list_head	*buffer_list)
{
	struct xfs_buf		*bp;
	int			error;
	int			type;

	ASSERT(blkcnt > 0);
	type = flags & XFS_QMOPT_UQUOTA ? XFS_DQ_USER :
		(flags & XFS_QMOPT_PQUOTA ? XFS_DQ_PROJ : XFS_DQ_GROUP);
	error = 0;

	/*
	 * Blkcnt arg can be a very big number, and might even be
	 * larger than the log itself. So, we have to break it up into
	 * manageable-sized transactions.
	 * Note that we don't start a permanent transaction here; we might
	 * not be able to get a log reservation for the whole thing up front,
	 * and we don't really care to either, because we just discard
	 * everything if we were to crash in the middle of this loop.
	 */
	while (blkcnt--) {
		error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
			      XFS_FSB_TO_DADDR(mp, bno),
			      mp->m_quotainfo->qi_dqchunklen, 0, &bp,
			      &xfs_dquot_buf_ops);
		if (error)
			break;

		xfs_qm_reset_dqcounts(mp, bp, firstid, type);
		xfs_buf_delwri_queue(bp, buffer_list);
		xfs_buf_relse(bp);
		/*
		 * goto the next block.
		 */
		bno++;
		firstid += mp->m_quotainfo->qi_dqperchunk;
	}

	return error;
}
Beispiel #17
0
static void
scan_lbtree(
	xfs_fsblock_t	root,
	int		nlevels,
	scan_lbtree_f_t	func,
	extmap_t	**extmapp,
	typnm_t		btype)
{
	push_cur();
	set_cur(&typtab[btype], XFS_FSB_TO_DADDR(mp, root), blkbb, DB_RING_IGN,
		NULL);
	if (iocur_top->data == NULL) {
		dbprintf(_("can't read btree block %u/%u\n"),
			XFS_FSB_TO_AGNO(mp, root),
			XFS_FSB_TO_AGBNO(mp, root));
		return;
	}
	(*func)(iocur_top->data, nlevels - 1, extmapp, btype);
	pop_cur();
}
Beispiel #18
0
void
make_bbmap(
	bbmap_t		*bbmap,
	int		nex,
	bmap_ext_t	*bmp)
{
	int		d;
	xfs_dfsbno_t	dfsbno;
	int		i;
	int		j;
	int		k;

	for (i = 0, d = 0; i < nex; i++) {
		dfsbno = bmp[i].startblock;
		for (j = 0; j < bmp[i].blockcount; j++, dfsbno++) {
			for (k = 0; k < blkbb; k++)
				bbmap->b[d++] =
					XFS_FSB_TO_DADDR(mp, dfsbno) + k;
		}
	}
}
Beispiel #19
0
extern void readbitmap(char* device, image_head image_hdr, unsigned long* bitmap, int pui)
{

    xfs_agnumber_t  agno = 0;
    xfs_agblock_t   first_agbno;
    xfs_agnumber_t  num_ags;
    ag_header_t     ag_hdr;
    xfs_daddr_t     read_ag_off;
    int             read_ag_length;
    void            *read_ag_buf = NULL;
    xfs_off_t	    read_ag_position;            /* xfs_types.h: typedef __s64 */
    uint64_t	    sk, res, s_pos = 0;
    void            *btree_buf_data = NULL;
    int		    btree_buf_length;
    xfs_off_t	    btree_buf_position;
    xfs_agblock_t   bno;
    uint	    current_level;
    uint	    btree_levels;
    xfs_daddr_t     begin, next_begin, ag_begin, new_begin, ag_end;
						/* xfs_types.h: typedef __s64*/
    xfs_off_t       pos;
    xfs_alloc_ptr_t *ptr;
    xfs_alloc_rec_t *rec_ptr;
    int		    length;
    int		    i;
    uint64_t        size, sizeb;
    xfs_off_t	    w_position;
    int		    w_length;
    int		    wblocks;
    int		    w_size = 1 * 1024 * 1024;
    uint64_t        numblocks = 0;

    xfs_off_t	    logstart, logend;
    xfs_off_t	    logstart_pos, logend_pos;
    int		    log_length;

    struct xfs_btree_block *block;
    uint64_t current_block, block_count, prog_cur_block = 0;

    int start = 0;
    int bit_size = 1;
    progress_bar        prog;

    uint64_t bused = 0;
    uint64_t bfree = 0;

    /// init progress
    progress_init(&prog, start, image_hdr.totalblock, image_hdr.totalblock, BITMAP, bit_size);

    fs_open(device);

    first_agbno = (((XFS_AGFL_DADDR(mp) + 1) * source_sectorsize) + first_residue) / source_blocksize;

    num_ags = mp->m_sb.sb_agcount;

    log_mesg(1, 0, 0, fs_opt.debug, "ags = %i\n", num_ags);
    for (agno = 0; agno < num_ags ; agno++)  {
	/* read in first blocks of the ag */

	/* initial settings */
	log_mesg(2, 0, 0, fs_opt.debug, "read ag %i header\n", agno);

	read_ag_off = XFS_AG_DADDR(mp, agno, XFS_SB_DADDR);
	read_ag_length = first_agbno * source_blocksize;
	read_ag_position = (xfs_off_t) read_ag_off * (xfs_off_t) BBSIZE;
	read_ag_buf = malloc(read_ag_length);
	if(read_ag_buf == NULL){
	    log_mesg(0, 1, 1, fs_opt.debug, "%s, %i, ERROR:%s", __func__, __LINE__, strerror(errno));
	}
	memset(read_ag_buf, 0, read_ag_length);

	log_mesg(2, 0, 0, fs_opt.debug, "seek to read_ag_position %lli\n", read_ag_position);
	sk = lseek(source_fd, read_ag_position, SEEK_SET);
	current_block = (sk/source_blocksize);
	block_count = (read_ag_length/source_blocksize);
	set_bitmap(bitmap, sk, read_ag_length);
	log_mesg(2, 0, 0, fs_opt.debug, "read ag header fd = %llu(%i), length = %i(%i)\n", sk, current_block, read_ag_length, block_count);
	if ((res = read(source_fd, read_ag_buf, read_ag_length)) < 0)  {
	    log_mesg(1, 0, 1, fs_opt.debug, "read failure at offset %lld\n", read_ag_position);
	}

	ag_hdr.xfs_sb = (xfs_dsb_t *) (read_ag_buf);
	ASSERT(be32_to_cpu(ag_hdr.xfs_sb->sb_magicnum) == XFS_SB_MAGIC);
	ag_hdr.xfs_agf = (xfs_agf_t *) (read_ag_buf + source_sectorsize);
	ASSERT(be32_to_cpu(ag_hdr.xfs_agf->agf_magicnum) == XFS_AGF_MAGIC);
	ag_hdr.xfs_agi = (xfs_agi_t *) (read_ag_buf + 2 * source_sectorsize);
	ASSERT(be32_to_cpu(ag_hdr.xfs_agi->agi_magicnum) == XFS_AGI_MAGIC);
	ag_hdr.xfs_agfl = (xfs_agfl_t *) (read_ag_buf + 3 * source_sectorsize);
	log_mesg(2, 0, 0, fs_opt.debug, "ag header read ok\n");


	/* save what we need (agf) in the btree buffer */

	btree_buf_data = malloc(source_blocksize);
	if(btree_buf_data == NULL){
	    log_mesg(0, 1, 1, fs_opt.debug, "%s, %i, ERROR:%s", __func__, __LINE__, strerror(errno));
	}
	memset(btree_buf_data, 0, source_blocksize);
	memmove(btree_buf_data, ag_hdr.xfs_agf, source_sectorsize);
	ag_hdr.xfs_agf = (xfs_agf_t *) btree_buf_data;
	btree_buf_length = source_blocksize;

	///* traverse btree until we get to the leftmost leaf node */

	bno = be32_to_cpu(ag_hdr.xfs_agf->agf_roots[XFS_BTNUM_BNOi]);
	current_level = 0;
	btree_levels = be32_to_cpu(ag_hdr.xfs_agf->agf_levels[XFS_BTNUM_BNOi]);

	ag_end = XFS_AGB_TO_DADDR(mp, agno, be32_to_cpu(ag_hdr.xfs_agf->agf_length) - 1) + source_blocksize / BBSIZE;

	for (;;) {
	    /* none of this touches the w_buf buffer */

	    current_level++;

	    btree_buf_position = pos = (xfs_off_t)XFS_AGB_TO_DADDR(mp,agno,bno) << BBSHIFT;
	    btree_buf_length = source_blocksize;

	    sk = lseek(source_fd, btree_buf_position, SEEK_SET);
	    current_block = (sk/source_blocksize);
	    block_count = (btree_buf_length/source_blocksize);
	    set_bitmap(bitmap, sk, btree_buf_length);
	    log_mesg(2, 0, 0, fs_opt.debug, "read btree sf = %llu(%i), length = %i(%i)\n", sk, current_block, btree_buf_length, block_count);
	    read(source_fd, btree_buf_data, btree_buf_length);
	    block = (struct xfs_btree_block *)((char *)btree_buf_data + pos - btree_buf_position);

	    if (be16_to_cpu(block->bb_level) == 0)
		break;

	    ptr = XFS_ALLOC_PTR_ADDR(mp, block, 1, mp->m_alloc_mxr[1]);
	    bno = be32_to_cpu(ptr[0]);
	}
	log_mesg(2, 0, 0, fs_opt.debug, "btree read done\n");

	/* align first data copy but don't overwrite ag header */

	pos = read_ag_position >> BBSHIFT;
	length = read_ag_length >> BBSHIFT;
	next_begin = pos + length;
	ag_begin = next_begin;


	///* handle the rest of the ag */

	for (;;) {
	    if (be16_to_cpu(block->bb_level) != 0)  {
		log_mesg(0, 1, 1, fs_opt.debug, "WARNING:  source filesystem inconsistent.\nA leaf btree rec isn't a leaf. Aborting now.\n");
	    }

	    rec_ptr = XFS_ALLOC_REC_ADDR(mp, block, 1);
	    for (i = 0; i < be16_to_cpu(block->bb_numrecs); i++, rec_ptr++)  {
		/* calculate in daddr's */

		begin = next_begin;

		/*
		 * protect against pathological case of a
		 * hole right after the ag header in a
		 * mis-aligned case
		 */

		if (begin < ag_begin)
		    begin = ag_begin;

		/*
		 * round size up to ensure we copy a
		 * range bigger than required
		 */

		log_mesg(3, 0, 0, fs_opt.debug, "XFS_AGB_TO_DADDR = %llu, agno = %i, be32_to_cpu=%llu\n", XFS_AGB_TO_DADDR(mp, agno, be32_to_cpu(rec_ptr->ar_startblock)), agno, be32_to_cpu(rec_ptr->ar_startblock));
		sizeb = XFS_AGB_TO_DADDR(mp, agno, be32_to_cpu(rec_ptr->ar_startblock)) - begin;
		size = roundup(sizeb <<BBSHIFT, source_sectorsize);
		log_mesg(3, 0, 0, fs_opt.debug, "BB = %i size %i and sizeb %llu brgin = %llu\n", BBSHIFT, size, sizeb, begin);

		if (size > 0)  {
		    /* copy extent */

		    log_mesg(2, 0, 0, fs_opt.debug, "copy extent\n");
		    w_position = (xfs_off_t)begin << BBSHIFT;

		    while (size > 0)  {
			/*
			 * let lower layer do alignment
			 */
			if (size > w_size)  {
			    w_length = w_size;
			    size -= w_size;
			    sizeb -= wblocks;
			    numblocks += wblocks;
			} else  {
			    w_length = size;
			    numblocks += sizeb;
			    size = 0;
			}

			//read_wbuf(source_fd, &w_buf, mp);
			sk = lseek(source_fd, w_position, SEEK_SET);
			current_block = (sk/source_blocksize);
			block_count = (w_length/source_blocksize);
			set_bitmap(bitmap, sk, w_length);
			log_mesg(2, 0, 0, fs_opt.debug, "read ext sourcefd to w_buf source_fd=%llu(%i), length=%i(%i)\n", sk, current_block, w_length, block_count);
			sk = lseek(source_fd, w_length, SEEK_CUR);

			w_position += w_length;
		    }
		}

		/* round next starting point down */

		new_begin = XFS_AGB_TO_DADDR(mp, agno, be32_to_cpu(rec_ptr->ar_startblock) + be32_to_cpu(rec_ptr->ar_blockcount));
		next_begin = rounddown(new_begin, source_sectorsize >> BBSHIFT);
	    }

	    if (be32_to_cpu(block->bb_u.s.bb_rightsib) == NULLAGBLOCK){
		log_mesg(2, 0, 0, fs_opt.debug, "NULLAGBLOCK\n");
		break;
	    }

	    /* read in next btree record block */

	    btree_buf_position = pos = (xfs_off_t)XFS_AGB_TO_DADDR(mp, agno, be32_to_cpu(block->bb_u.s.bb_rightsib)) << BBSHIFT;
	    btree_buf_length = source_blocksize;

	    /* let read_wbuf handle alignment */

	    //read_wbuf(source_fd, &btree_buf, mp);
	    sk = lseek(source_fd, btree_buf_position, SEEK_SET);
	    current_block = (sk/source_blocksize);
	    block_count = (btree_buf_length/source_blocksize);
	    set_bitmap(bitmap, sk, btree_buf_length);
	    log_mesg(2, 0, 0, fs_opt.debug, "read btreebuf fd = %llu(%i), length = %i(%i) \n", sk, current_block, btree_buf_length, block_count);
	    read(source_fd, btree_buf_data, btree_buf_length);

	    block = (struct xfs_btree_block *)((char *) btree_buf_data + pos - btree_buf_position);
	    ASSERT(be32_to_cpu(block->bb_magic) == XFS_ABTB_MAGIC);

	}
	/*
	 * write out range of used blocks after last range
	 * of free blocks in AG
	 */
	if (next_begin < ag_end)  {
	    begin = next_begin;

	    sizeb = ag_end - begin;
	    size = roundup(sizeb << BBSHIFT, source_sectorsize);

	    if (size > 0)  {
		/* copy extent */

		w_position = (xfs_off_t) begin << BBSHIFT;

		while (size > 0)  {
		    /*
		     * let lower layer do alignment
		     */
		    if (size > w_size)  {
			w_length = w_size;
			size -= w_size;
			sizeb -= wblocks;
			numblocks += wblocks;
		    } else  {
			w_length = size;
			numblocks += sizeb;
			size = 0;
		    }

		    sk = lseek(source_fd, w_position, SEEK_SET);
		    current_block = (sk/source_blocksize);
		    block_count = (w_length/source_blocksize);
		    set_bitmap(bitmap, sk, w_length);
		    log_mesg(2, 0, 0, fs_opt.debug, "read ext fd = %llu(%i), length = %i(%i)\n", sk, current_block, w_length, block_count);
		    //read_wbuf(source_fd, &w_buf, mp);
		    lseek(source_fd, w_length, SEEK_CUR);
		    w_position += w_length;
		}
	    }
	}

	log_mesg(2, 0, 0, fs_opt.debug, "write a clean log\n");
	log_length = 1 * 1024 * 1024;
	logstart = XFS_FSB_TO_DADDR(mp, mp->m_sb.sb_logstart) << BBSHIFT;
	logstart_pos = rounddown(logstart, (xfs_off_t)log_length);
	if (logstart % log_length)  {  /* unaligned */
	    sk = lseek(source_fd, logstart_pos, SEEK_SET);
	    current_block = (sk/source_blocksize);
	    block_count = (log_length/source_blocksize);
	    set_bitmap(bitmap, sk, log_length);
	    log_mesg(2, 0, 0, fs_opt.debug, "read log start from %llu(%i) %i(%i)\n", sk, current_block, log_length, block_count);
	    sk = lseek(source_fd, log_length, SEEK_CUR);
	}

	logend = XFS_FSB_TO_DADDR(mp, mp->m_sb.sb_logstart) << BBSHIFT;
	logend += XFS_FSB_TO_B(mp, mp->m_sb.sb_logblocks);
	logend_pos = rounddown(logend, (xfs_off_t)log_length);
	if (logend % log_length)  {    /* unaligned */
	    sk = lseek(source_fd, logend_pos, SEEK_SET);
	    current_block = (sk/source_blocksize);
	    block_count = (log_length/source_blocksize);
	    set_bitmap(bitmap, sk, log_length);
	    log_mesg(2, 0, 0, fs_opt.debug, "read log end  from %llu(%i) %i(%i)\n", sk, current_block, log_length, block_count);
	    sk = lseek(source_fd, log_length, SEEK_CUR);
	}

	log_mesg(2, 0, 0, fs_opt.debug, "write a clean log done\n");

	prog_cur_block = image_hdr.totalblock/num_ags*(agno+1)-1;
	update_pui(&prog, prog_cur_block, prog_cur_block, 0);
    }

    for(current_block = 0; current_block <= image_hdr.totalblock; current_block++){
	if(pc_test_bit(current_block, bitmap))
	    bused++;
	else
	    bfree++;
    }
    log_mesg(0, 0, 0, fs_opt.debug, "bused = %lli, bfree = %lli\n", bused, bfree);

    fs_close();
    update_pui(&prog, 1, 1, 1);

}
Beispiel #20
0
static void
zero_log(xfs_mount_t *mp)
{
	int error;
	xlog_t	log;
	xfs_daddr_t head_blk, tail_blk;
	dev_t logdev = (mp->m_sb.sb_logstart == 0) ? x.logdev : x.ddev;

	memset(&log, 0, sizeof(log));
	if (!x.logdev)
		x.logdev = x.ddev;
	x.logBBsize = XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks);
	x.logBBstart = XFS_FSB_TO_DADDR(mp, mp->m_sb.sb_logstart);

	log.l_dev = logdev;
	log.l_logsize = BBTOB(x.logBBsize);
	log.l_logBBsize = x.logBBsize;
	log.l_logBBstart = x.logBBstart;
	log.l_mp = mp;
	if (xfs_sb_version_hassector(&mp->m_sb)) {
		log.l_sectbb_log = mp->m_sb.sb_logsectlog - BBSHIFT;
		ASSERT(log.l_sectbb_log <= mp->m_sectbb_log);
		/* for larger sector sizes, must have v2 or external log */
		ASSERT(log.l_sectbb_log == 0 ||
			log.l_logBBstart == 0 ||
			xfs_sb_version_haslogv2(&mp->m_sb));
		ASSERT(mp->m_sb.sb_logsectlog >= BBSHIFT);
	}
	log.l_sectbb_mask = (1 << log.l_sectbb_log) - 1;

	if ((error = xlog_find_tail(&log, &head_blk, &tail_blk))) {
		do_warn(_("zero_log: cannot find log head/tail "
			  "(xlog_find_tail=%d), zeroing it anyway\n"),
			error);
	} else {
		if (verbose) {
			do_warn(_("zero_log: head block %lld tail block %lld\n"),
				head_blk, tail_blk);
		}
		if (head_blk != tail_blk) {
			if (zap_log) {
				do_warn(_(
"ALERT: The filesystem has valuable metadata changes in a log which is being\n"
"destroyed because the -L option was used.\n"));
			} else {
				do_warn(_(
"ERROR: The filesystem has valuable metadata changes in a log which needs to\n"
"be replayed.  Mount the filesystem to replay the log, and unmount it before\n"
"re-running xfs_repair.  If you are unable to mount the filesystem, then use\n"
"the -L option to destroy the log and attempt a repair.\n"
"Note that destroying the log may cause corruption -- please attempt a mount\n"
"of the filesystem before doing this.\n"));
				exit(2);
			}
		}
	}

	libxfs_log_clear(logdev,
		XFS_FSB_TO_DADDR(mp, mp->m_sb.sb_logstart),
		(xfs_extlen_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks),
		&mp->m_sb.sb_uuid,
		xfs_sb_version_haslogv2(&mp->m_sb) ? 2 : 1,
		mp->m_sb.sb_logsunit, XLOG_FMT);
}
Beispiel #21
0
int
xfs_symlink(
	struct xfs_inode	*dp,
	struct xfs_name		*link_name,
	const char		*target_path,
	umode_t			mode,
	struct xfs_inode	**ipp)
{
	struct xfs_mount	*mp = dp->i_mount;
	struct xfs_trans	*tp = NULL;
	struct xfs_inode	*ip = NULL;
	int			error = 0;
	int			pathlen;
	struct xfs_defer_ops	dfops;
	xfs_fsblock_t		first_block;
	bool                    unlock_dp_on_error = false;
	xfs_fileoff_t		first_fsb;
	xfs_filblks_t		fs_blocks;
	int			nmaps;
	struct xfs_bmbt_irec	mval[XFS_SYMLINK_MAPS];
	xfs_daddr_t		d;
	const char		*cur_chunk;
	int			byte_cnt;
	int			n;
	xfs_buf_t		*bp;
	prid_t			prid;
	struct xfs_dquot	*udqp = NULL;
	struct xfs_dquot	*gdqp = NULL;
	struct xfs_dquot	*pdqp = NULL;
	uint			resblks;

	*ipp = NULL;

	trace_xfs_symlink(dp, link_name);

	if (XFS_FORCED_SHUTDOWN(mp))
		return -EIO;

	/*
	 * Check component lengths of the target path name.
	 */
	pathlen = strlen(target_path);
	if (pathlen >= MAXPATHLEN)      /* total string too long */
		return -ENAMETOOLONG;

	udqp = gdqp = NULL;
	prid = xfs_get_initial_prid(dp);

	/*
	 * Make sure that we have allocated dquot(s) on disk.
	 */
	error = xfs_qm_vop_dqalloc(dp,
			xfs_kuid_to_uid(current_fsuid()),
			xfs_kgid_to_gid(current_fsgid()), prid,
			XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
			&udqp, &gdqp, &pdqp);
	if (error)
		return error;

	/*
	 * The symlink will fit into the inode data fork?
	 * There can't be any attributes so we get the whole variable part.
	 */
	if (pathlen <= XFS_LITINO(mp, dp->i_d.di_version))
		fs_blocks = 0;
	else
		fs_blocks = xfs_symlink_blocks(mp, pathlen);
	resblks = XFS_SYMLINK_SPACE_RES(mp, link_name->len, fs_blocks);

	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_symlink, resblks, 0, 0, &tp);
	if (error == -ENOSPC && fs_blocks == 0) {
		resblks = 0;
		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_symlink, 0, 0, 0,
				&tp);
	}
	if (error)
		goto out_release_inode;

	xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
	unlock_dp_on_error = true;

	/*
	 * Check whether the directory allows new symlinks or not.
	 */
	if (dp->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) {
		error = -EPERM;
		goto out_trans_cancel;
	}

	/*
	 * Reserve disk quota : blocks and inode.
	 */
	error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp,
						pdqp, resblks, 1, 0);
	if (error)
		goto out_trans_cancel;

	/*
	 * Check for ability to enter directory entry, if no space reserved.
	 */
	if (!resblks) {
		error = xfs_dir_canenter(tp, dp, link_name);
		if (error)
			goto out_trans_cancel;
	}
	/*
	 * Initialize the bmap freelist prior to calling either
	 * bmapi or the directory create code.
	 */
	xfs_defer_init(&dfops, &first_block);

	/*
	 * Allocate an inode for the symlink.
	 */
	error = xfs_dir_ialloc(&tp, dp, S_IFLNK | (mode & ~S_IFMT), 1, 0,
			       prid, resblks > 0, &ip, NULL);
	if (error)
		goto out_trans_cancel;

	/*
	 * Now we join the directory inode to the transaction.  We do not do it
	 * earlier because xfs_dir_ialloc might commit the previous transaction
	 * (and release all the locks).  An error from here on will result in
	 * the transaction cancel unlocking dp so don't do it explicitly in the
	 * error path.
	 */
	xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
	unlock_dp_on_error = false;

	/*
	 * Also attach the dquot(s) to it, if applicable.
	 */
	xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);

	if (resblks)
		resblks -= XFS_IALLOC_SPACE_RES(mp);
	/*
	 * If the symlink will fit into the inode, write it inline.
	 */
	if (pathlen <= XFS_IFORK_DSIZE(ip)) {
		xfs_init_local_fork(ip, XFS_DATA_FORK, target_path, pathlen);

		ip->i_d.di_size = pathlen;
		ip->i_d.di_format = XFS_DINODE_FMT_LOCAL;
		xfs_trans_log_inode(tp, ip, XFS_ILOG_DDATA | XFS_ILOG_CORE);
	} else {
		int	offset;

		first_fsb = 0;
		nmaps = XFS_SYMLINK_MAPS;

		error = xfs_bmapi_write(tp, ip, first_fsb, fs_blocks,
				  XFS_BMAPI_METADATA, &first_block, resblks,
				  mval, &nmaps, &dfops);
		if (error)
			goto out_bmap_cancel;

		if (resblks)
			resblks -= fs_blocks;
		ip->i_d.di_size = pathlen;
		xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);

		cur_chunk = target_path;
		offset = 0;
		for (n = 0; n < nmaps; n++) {
			char	*buf;

			d = XFS_FSB_TO_DADDR(mp, mval[n].br_startblock);
			byte_cnt = XFS_FSB_TO_B(mp, mval[n].br_blockcount);
			bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
					       BTOBB(byte_cnt), 0);
			if (!bp) {
				error = -ENOMEM;
				goto out_bmap_cancel;
			}
			bp->b_ops = &xfs_symlink_buf_ops;

			byte_cnt = XFS_SYMLINK_BUF_SPACE(mp, byte_cnt);
			byte_cnt = min(byte_cnt, pathlen);

			buf = bp->b_addr;
			buf += xfs_symlink_hdr_set(mp, ip->i_ino, offset,
						   byte_cnt, bp);

			memcpy(buf, cur_chunk, byte_cnt);

			cur_chunk += byte_cnt;
			pathlen -= byte_cnt;
			offset += byte_cnt;

			xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SYMLINK_BUF);
			xfs_trans_log_buf(tp, bp, 0, (buf + byte_cnt - 1) -
							(char *)bp->b_addr);
		}
		ASSERT(pathlen == 0);
	}

	/*
	 * Create the directory entry for the symlink.
	 */
	error = xfs_dir_createname(tp, dp, link_name, ip->i_ino,
					&first_block, &dfops, resblks);
	if (error)
		goto out_bmap_cancel;
	xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
	xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);

	/*
	 * If this is a synchronous mount, make sure that the
	 * symlink transaction goes to disk before returning to
	 * the user.
	 */
	if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) {
		xfs_trans_set_sync(tp);
	}

	error = xfs_defer_finish(&tp, &dfops, NULL);
	if (error)
		goto out_bmap_cancel;

	error = xfs_trans_commit(tp);
	if (error)
		goto out_release_inode;

	xfs_qm_dqrele(udqp);
	xfs_qm_dqrele(gdqp);
	xfs_qm_dqrele(pdqp);

	*ipp = ip;
	return 0;

out_bmap_cancel:
	xfs_defer_cancel(&dfops);
out_trans_cancel:
	xfs_trans_cancel(tp);
out_release_inode:
	/*
	 * Wait until after the current transaction is aborted to finish the
	 * setup of the inode and release the inode.  This prevents recursive
	 * transactions and deadlocks from xfs_inactive.
	 */
	if (ip) {
		xfs_finish_inode_setup(ip);
		IRELE(ip);
	}

	xfs_qm_dqrele(udqp);
	xfs_qm_dqrele(gdqp);
	xfs_qm_dqrele(pdqp);

	if (unlock_dp_on_error)
		xfs_iunlock(dp, XFS_ILOCK_EXCL);
	return error;
}
Beispiel #22
0
/*
 * Process a refcount update intent item that was recovered from the log.
 * We need to update the refcountbt.
 */
int
xfs_cui_recover(
	struct xfs_mount		*mp,
	struct xfs_cui_log_item		*cuip)
{
	int				i;
	int				error = 0;
	unsigned int			refc_type;
	struct xfs_phys_extent		*refc;
	xfs_fsblock_t			startblock_fsb;
	bool				op_ok;
	struct xfs_cud_log_item		*cudp;
	struct xfs_trans		*tp;
	struct xfs_btree_cur		*rcur = NULL;
	enum xfs_refcount_intent_type	type;
	xfs_fsblock_t			firstfsb;
	xfs_fsblock_t			new_fsb;
	xfs_extlen_t			new_len;
	struct xfs_bmbt_irec		irec;
	struct xfs_defer_ops		dfops;
	bool				requeue_only = false;

	ASSERT(!test_bit(XFS_CUI_RECOVERED, &cuip->cui_flags));

	/*
	 * First check the validity of the extents described by the
	 * CUI.  If any are bad, then assume that all are bad and
	 * just toss the CUI.
	 */
	for (i = 0; i < cuip->cui_format.cui_nextents; i++) {
		refc = &cuip->cui_format.cui_extents[i];
		startblock_fsb = XFS_BB_TO_FSB(mp,
				   XFS_FSB_TO_DADDR(mp, refc->pe_startblock));
		switch (refc->pe_flags & XFS_REFCOUNT_EXTENT_TYPE_MASK) {
		case XFS_REFCOUNT_INCREASE:
		case XFS_REFCOUNT_DECREASE:
		case XFS_REFCOUNT_ALLOC_COW:
		case XFS_REFCOUNT_FREE_COW:
			op_ok = true;
			break;
		default:
			op_ok = false;
			break;
		}
		if (!op_ok || startblock_fsb == 0 ||
		    refc->pe_len == 0 ||
		    startblock_fsb >= mp->m_sb.sb_dblocks ||
		    refc->pe_len >= mp->m_sb.sb_agblocks ||
		    (refc->pe_flags & ~XFS_REFCOUNT_EXTENT_FLAGS)) {
			/*
			 * This will pull the CUI from the AIL and
			 * free the memory associated with it.
			 */
			set_bit(XFS_CUI_RECOVERED, &cuip->cui_flags);
			xfs_cui_release(cuip);
			return -EIO;
		}
	}

	/*
	 * Under normal operation, refcount updates are deferred, so we
	 * wouldn't be adding them directly to a transaction.  All
	 * refcount updates manage reservation usage internally and
	 * dynamically by deferring work that won't fit in the
	 * transaction.  Normally, any work that needs to be deferred
	 * gets attached to the same defer_ops that scheduled the
	 * refcount update.  However, we're in log recovery here, so we
	 * we create our own defer_ops and use that to finish up any
	 * work that doesn't fit.
	 */
	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
	if (error)
		return error;
	cudp = xfs_trans_get_cud(tp, cuip);

	xfs_defer_init(&dfops, &firstfsb);
	for (i = 0; i < cuip->cui_format.cui_nextents; i++) {
		refc = &cuip->cui_format.cui_extents[i];
		refc_type = refc->pe_flags & XFS_REFCOUNT_EXTENT_TYPE_MASK;
		switch (refc_type) {
		case XFS_REFCOUNT_INCREASE:
		case XFS_REFCOUNT_DECREASE:
		case XFS_REFCOUNT_ALLOC_COW:
		case XFS_REFCOUNT_FREE_COW:
			type = refc_type;
			break;
		default:
			error = -EFSCORRUPTED;
			goto abort_error;
		}
		if (requeue_only) {
			new_fsb = refc->pe_startblock;
			new_len = refc->pe_len;
		} else
			error = xfs_trans_log_finish_refcount_update(tp, cudp,
				&dfops, type, refc->pe_startblock, refc->pe_len,
				&new_fsb, &new_len, &rcur);
		if (error)
			goto abort_error;

		/* Requeue what we didn't finish. */
		if (new_len > 0) {
			irec.br_startblock = new_fsb;
			irec.br_blockcount = new_len;
			switch (type) {
			case XFS_REFCOUNT_INCREASE:
				error = xfs_refcount_increase_extent(
						tp->t_mountp, &dfops, &irec);
				break;
			case XFS_REFCOUNT_DECREASE:
				error = xfs_refcount_decrease_extent(
						tp->t_mountp, &dfops, &irec);
				break;
			case XFS_REFCOUNT_ALLOC_COW:
				error = xfs_refcount_alloc_cow_extent(
						tp->t_mountp, &dfops,
						irec.br_startblock,
						irec.br_blockcount);
				break;
			case XFS_REFCOUNT_FREE_COW:
				error = xfs_refcount_free_cow_extent(
						tp->t_mountp, &dfops,
						irec.br_startblock,
						irec.br_blockcount);
				break;
			default:
				ASSERT(0);
			}
			if (error)
				goto abort_error;
			requeue_only = true;
		}
	}

	xfs_refcount_finish_one_cleanup(tp, rcur, error);
	error = xfs_defer_finish(&tp, &dfops, NULL);
	if (error)
		goto abort_defer;
	set_bit(XFS_CUI_RECOVERED, &cuip->cui_flags);
	error = xfs_trans_commit(tp);
	return error;

abort_error:
	xfs_refcount_finish_one_cleanup(tp, rcur, error);
abort_defer:
	xfs_defer_cancel(&dfops);
	xfs_trans_cancel(tp);
	return error;
}
Beispiel #23
0
/*
 * Iterate over all allocated USR/GRP/PRJ dquots in the system, calling a
 * caller supplied function for every chunk of dquots that we find.
 */
STATIC int
xfs_qm_dqiterate(
	struct xfs_mount	*mp,
	struct xfs_inode	*qip,
	uint			flags,
	struct list_head	*buffer_list)
{
	struct xfs_bmbt_irec	*map;
	int			i, nmaps;	/* number of map entries */
	int			error;		/* return value */
	xfs_fileoff_t		lblkno;
	xfs_filblks_t		maxlblkcnt;
	xfs_dqid_t		firstid;
	xfs_fsblock_t		rablkno;
	xfs_filblks_t		rablkcnt;

	error = 0;
	/*
	 * This looks racy, but we can't keep an inode lock across a
	 * trans_reserve. But, this gets called during quotacheck, and that
	 * happens only at mount time which is single threaded.
	 */
	if (qip->i_d.di_nblocks == 0)
		return 0;

	map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), KM_SLEEP);

	lblkno = 0;
	maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
	do {
		nmaps = XFS_DQITER_MAP_SIZE;
		/*
		 * We aren't changing the inode itself. Just changing
		 * some of its data. No new blocks are added here, and
		 * the inode is never added to the transaction.
		 */
		xfs_ilock(qip, XFS_ILOCK_SHARED);
		error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno,
				       map, &nmaps, 0);
		xfs_iunlock(qip, XFS_ILOCK_SHARED);
		if (error)
			break;

		ASSERT(nmaps <= XFS_DQITER_MAP_SIZE);
		for (i = 0; i < nmaps; i++) {
			ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
			ASSERT(map[i].br_blockcount);


			lblkno += map[i].br_blockcount;

			if (map[i].br_startblock == HOLESTARTBLOCK)
				continue;

			firstid = (xfs_dqid_t) map[i].br_startoff *
				mp->m_quotainfo->qi_dqperchunk;
			/*
			 * Do a read-ahead on the next extent.
			 */
			if ((i+1 < nmaps) &&
			    (map[i+1].br_startblock != HOLESTARTBLOCK)) {
				rablkcnt =  map[i+1].br_blockcount;
				rablkno = map[i+1].br_startblock;
				while (rablkcnt--) {
					xfs_buf_readahead(mp->m_ddev_targp,
					       XFS_FSB_TO_DADDR(mp, rablkno),
					       mp->m_quotainfo->qi_dqchunklen,
					       NULL);
					rablkno++;
				}
			}
			/*
			 * Iterate thru all the blks in the extent and
			 * reset the counters of all the dquots inside them.
			 */
			error = xfs_qm_dqiter_bufs(mp, firstid,
						   map[i].br_startblock,
						   map[i].br_blockcount,
						   flags, buffer_list);
			if (error)
				goto out;
		}
	} while (nmaps > 0);

out:
	kmem_free(map);
	return error;
}
Beispiel #24
0
xfs_daddr_t
xfs_fsb_to_daddr(xfs_mount_t *mp, xfs_fsblock_t fsbno)
{
    return XFS_FSB_TO_DADDR(mp, fsbno);
}
/*
 * Maps a dquot to the buffer containing its on-disk version.
 * This returns a ptr to the buffer containing the on-disk dquot
 * in the bpp param, and a ptr to the on-disk dquot within that buffer
 */
STATIC int
xfs_qm_dqtobp(
	xfs_trans_t		**tpp,
	xfs_dquot_t		*dqp,
	xfs_disk_dquot_t	**O_ddpp,
	xfs_buf_t		**O_bpp,
	uint			flags)
{
	xfs_bmbt_irec_t map;
	int		nmaps, error;
	xfs_buf_t	*bp;
	xfs_inode_t	*quotip;
	xfs_mount_t	*mp;
	xfs_disk_dquot_t *ddq;
	xfs_dqid_t	id;
	boolean_t	newdquot;
	xfs_trans_t	*tp = (tpp ? *tpp : NULL);

	mp = dqp->q_mount;
	id = be32_to_cpu(dqp->q_core.d_id);
	nmaps = 1;
	newdquot = B_FALSE;

	/*
	 * If we don't know where the dquot lives, find out.
	 */
	if (dqp->q_blkno == (xfs_daddr_t) 0) {
		/* We use the id as an index */
		dqp->q_fileoffset = (xfs_fileoff_t)id / XFS_QM_DQPERBLK(mp);
		nmaps = 1;
		quotip = XFS_DQ_TO_QIP(dqp);
		xfs_ilock(quotip, XFS_ILOCK_SHARED);
		/*
		 * Return if this type of quotas is turned off while we didn't
		 * have an inode lock
		 */
		if (XFS_IS_THIS_QUOTA_OFF(dqp)) {
			xfs_iunlock(quotip, XFS_ILOCK_SHARED);
			return (ESRCH);
		}
		/*
		 * Find the block map; no allocations yet
		 */
		error = xfs_bmapi(NULL, quotip, dqp->q_fileoffset,
				  XFS_DQUOT_CLUSTER_SIZE_FSB,
				  XFS_BMAPI_METADATA,
				  NULL, 0, &map, &nmaps, NULL);

		xfs_iunlock(quotip, XFS_ILOCK_SHARED);
		if (error)
			return (error);
		ASSERT(nmaps == 1);
		ASSERT(map.br_blockcount == 1);

		/*
		 * offset of dquot in the (fixed sized) dquot chunk.
		 */
		dqp->q_bufoffset = (id % XFS_QM_DQPERBLK(mp)) *
			sizeof(xfs_dqblk_t);
		if (map.br_startblock == HOLESTARTBLOCK) {
			/*
			 * We don't allocate unless we're asked to
			 */
			if (!(flags & XFS_QMOPT_DQALLOC))
				return (ENOENT);

			ASSERT(tp);
			if ((error = xfs_qm_dqalloc(tpp, mp, dqp, quotip,
						dqp->q_fileoffset, &bp)))
				return (error);
			tp = *tpp;
			newdquot = B_TRUE;
		} else {
			/*
			 * store the blkno etc so that we don't have to do the
			 * mapping all the time
			 */
			dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
		}
	}
	ASSERT(dqp->q_blkno != DELAYSTARTBLOCK);
	ASSERT(dqp->q_blkno != HOLESTARTBLOCK);

	/*
	 * Read in the buffer, unless we've just done the allocation
	 * (in which case we already have the buf).
	 */
	if (! newdquot) {
		xfs_dqtrace_entry(dqp, "DQTOBP READBUF");
		if ((error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
					       dqp->q_blkno,
					       XFS_QI_DQCHUNKLEN(mp),
					       0, &bp))) {
			return (error);
		}
		if (error || !bp)
			return XFS_ERROR(error);
	}
	ASSERT(XFS_BUF_ISBUSY(bp));
	ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);

	/*
	 * calculate the location of the dquot inside the buffer.
	 */
	ddq = (xfs_disk_dquot_t *)((char *)XFS_BUF_PTR(bp) + dqp->q_bufoffset);

	/*
	 * A simple sanity check in case we got a corrupted dquot...
	 */
	if (xfs_qm_dqcheck(ddq, id, dqp->dq_flags & XFS_DQ_ALLTYPES,
			   flags & (XFS_QMOPT_DQREPAIR|XFS_QMOPT_DOWARN),
			   "dqtobp")) {
		if (!(flags & XFS_QMOPT_DQREPAIR)) {
			xfs_trans_brelse(tp, bp);
			return XFS_ERROR(EIO);
		}
		XFS_BUF_BUSY(bp); /* We dirtied this */
	}

	*O_bpp = bp;
	*O_ddpp = ddq;

	return (0);
}
/*
 * Allocate a block and fill it with dquots.
 * This is called when the bmapi finds a hole.
 */
STATIC int
xfs_qm_dqalloc(
	xfs_trans_t	**tpp,
	xfs_mount_t	*mp,
	xfs_dquot_t	*dqp,
	xfs_inode_t	*quotip,
	xfs_fileoff_t	offset_fsb,
	xfs_buf_t	**O_bpp)
{
	xfs_fsblock_t	firstblock;
	xfs_bmap_free_t flist;
	xfs_bmbt_irec_t map;
	int		nmaps, error, committed;
	xfs_buf_t	*bp;
	xfs_trans_t	*tp = *tpp;

	ASSERT(tp != NULL);
	xfs_dqtrace_entry(dqp, "DQALLOC");

	/*
	 * Initialize the bmap freelist prior to calling bmapi code.
	 */
	XFS_BMAP_INIT(&flist, &firstblock);
	xfs_ilock(quotip, XFS_ILOCK_EXCL);
	/*
	 * Return if this type of quotas is turned off while we didn't
	 * have an inode lock
	 */
	if (XFS_IS_THIS_QUOTA_OFF(dqp)) {
		xfs_iunlock(quotip, XFS_ILOCK_EXCL);
		return (ESRCH);
	}

	/*
	 * xfs_trans_commit normally decrements the vnode ref count
	 * when it unlocks the inode. Since we want to keep the quota
	 * inode around, we bump the vnode ref count now.
	 */
	VN_HOLD(XFS_ITOV(quotip));

	xfs_trans_ijoin(tp, quotip, XFS_ILOCK_EXCL);
	nmaps = 1;
	if ((error = xfs_bmapi(tp, quotip,
			      offset_fsb, XFS_DQUOT_CLUSTER_SIZE_FSB,
			      XFS_BMAPI_METADATA | XFS_BMAPI_WRITE,
			      &firstblock,
			      XFS_QM_DQALLOC_SPACE_RES(mp),
			      &map, &nmaps, &flist))) {
		goto error0;
	}
	ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB);
	ASSERT(nmaps == 1);
	ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
	       (map.br_startblock != HOLESTARTBLOCK));

	/*
	 * Keep track of the blkno to save a lookup later
	 */
	dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);

	/* now we can just get the buffer (there's nothing to read yet) */
	bp = xfs_trans_get_buf(tp, mp->m_ddev_targp,
			       dqp->q_blkno,
			       XFS_QI_DQCHUNKLEN(mp),
			       0);
	if (!bp || (error = XFS_BUF_GETERROR(bp)))
		goto error1;
	/*
	 * Make a chunk of dquots out of this buffer and log
	 * the entire thing.
	 */
	xfs_qm_init_dquot_blk(tp, mp, be32_to_cpu(dqp->q_core.d_id),
			      dqp->dq_flags & XFS_DQ_ALLTYPES, bp);

	/*
	 * xfs_bmap_finish() may commit the current transaction and
	 * start a second transaction if the freelist is not empty.
	 *
	 * Since we still want to modify this buffer, we need to
	 * ensure that the buffer is not released on commit of
	 * the first transaction and ensure the buffer is added to the
	 * second transaction.
	 *
	 * If there is only one transaction then don't stop the buffer
	 * from being released when it commits later on.
	 */

	xfs_trans_bhold(tp, bp);

	if ((error = xfs_bmap_finish(tpp, &flist, firstblock, &committed))) {
		goto error1;
	}

	if (committed) {
		tp = *tpp;
		xfs_trans_bjoin(tp, bp);
	} else {
		xfs_trans_bhold_release(tp, bp);
	}

	*O_bpp = bp;
	return 0;

      error1:
	xfs_bmap_cancel(&flist);
      error0:
	xfs_iunlock(quotip, XFS_ILOCK_EXCL);

	return (error);
}
Beispiel #27
0
/* ----- Kernel only functions below ----- */
STATIC int
xfs_readlink_bmap(
	struct xfs_inode	*ip,
	char			*link)
{
	struct xfs_mount	*mp = ip->i_mount;
	struct xfs_bmbt_irec	mval[XFS_SYMLINK_MAPS];
	struct xfs_buf		*bp;
	xfs_daddr_t		d;
	char			*cur_chunk;
	int			pathlen = ip->i_d.di_size;
	int			nmaps = XFS_SYMLINK_MAPS;
	int			byte_cnt;
	int			n;
	int			error = 0;
	int			fsblocks = 0;
	int			offset;

	fsblocks = xfs_symlink_blocks(mp, pathlen);
	error = xfs_bmapi_read(ip, 0, fsblocks, mval, &nmaps, 0);
	if (error)
		goto out;

	offset = 0;
	for (n = 0; n < nmaps; n++) {
		d = XFS_FSB_TO_DADDR(mp, mval[n].br_startblock);
		byte_cnt = XFS_FSB_TO_B(mp, mval[n].br_blockcount);

		bp = xfs_buf_read(mp->m_ddev_targp, d, BTOBB(byte_cnt), 0,
				  &xfs_symlink_buf_ops);
		if (!bp)
			return XFS_ERROR(ENOMEM);
		error = bp->b_error;
		if (error) {
			xfs_buf_ioerror_alert(bp, __func__);
			xfs_buf_relse(bp);

			/* bad CRC means corrupted metadata */
			if (error == EFSBADCRC)
				error = EFSCORRUPTED;
			goto out;
		}
		byte_cnt = XFS_SYMLINK_BUF_SPACE(mp, byte_cnt);
		if (pathlen < byte_cnt)
			byte_cnt = pathlen;

		cur_chunk = bp->b_addr;
		if (xfs_sb_version_hascrc(&mp->m_sb)) {
			if (!xfs_symlink_hdr_ok(mp, ip->i_ino, offset,
							byte_cnt, bp)) {
				error = EFSCORRUPTED;
				xfs_alert(mp,
"symlink header does not match required off/len/owner (0x%x/Ox%x,0x%llx)",
					offset, byte_cnt, ip->i_ino);
				xfs_buf_relse(bp);
				goto out;

			}

			cur_chunk += sizeof(struct xfs_dsymlink_hdr);
		}

		memcpy(link + offset, bp->b_addr, byte_cnt);

		pathlen -= byte_cnt;
		offset += byte_cnt;

		xfs_buf_relse(bp);
	}
	ASSERT(pathlen == 0);

	link[ip->i_d.di_size] = '\0';
	error = 0;

 out:
	return error;
}
Beispiel #28
0
/*
 * Free a symlink that has blocks associated with it.
 */
STATIC int
xfs_inactive_symlink_rmt(
	struct xfs_inode *ip)
{
	xfs_buf_t	*bp;
	int		committed;
	int		done;
	int		error;
	xfs_fsblock_t	first_block;
	xfs_bmap_free_t	free_list;
	int		i;
	xfs_mount_t	*mp;
	xfs_bmbt_irec_t	mval[XFS_SYMLINK_MAPS];
	int		nmaps;
	int		size;
	xfs_trans_t	*tp;

	mp = ip->i_mount;
	ASSERT(ip->i_df.if_flags & XFS_IFEXTENTS);
	/*
	 * We're freeing a symlink that has some
	 * blocks allocated to it.  Free the
	 * blocks here.  We know that we've got
	 * either 1 or 2 extents and that we can
	 * free them all in one bunmapi call.
	 */
	ASSERT(ip->i_d.di_nextents > 0 && ip->i_d.di_nextents <= 2);

	tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
	if (error) {
		xfs_trans_cancel(tp, 0);
		return error;
	}

	xfs_ilock(ip, XFS_ILOCK_EXCL);
	xfs_trans_ijoin(tp, ip, 0);

	/*
	 * Lock the inode, fix the size, and join it to the transaction.
	 * Hold it so in the normal path, we still have it locked for
	 * the second transaction.  In the error paths we need it
	 * held so the cancel won't rele it, see below.
	 */
	size = (int)ip->i_d.di_size;
	ip->i_d.di_size = 0;
	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
	/*
	 * Find the block(s) so we can inval and unmap them.
	 */
	done = 0;
	xfs_bmap_init(&free_list, &first_block);
	nmaps = ARRAY_SIZE(mval);
	error = xfs_bmapi_read(ip, 0, xfs_symlink_blocks(mp, size),
				mval, &nmaps, 0);
	if (error)
		goto error_trans_cancel;
	/*
	 * Invalidate the block(s). No validation is done.
	 */
	for (i = 0; i < nmaps; i++) {
		bp = xfs_trans_get_buf(tp, mp->m_ddev_targp,
			XFS_FSB_TO_DADDR(mp, mval[i].br_startblock),
			XFS_FSB_TO_BB(mp, mval[i].br_blockcount), 0);
		if (!bp) {
			error = ENOMEM;
			goto error_bmap_cancel;
		}
		xfs_trans_binval(tp, bp);
	}
	/*
	 * Unmap the dead block(s) to the free_list.
	 */
	error = xfs_bunmapi(tp, ip, 0, size, XFS_BMAPI_METADATA, nmaps,
			    &first_block, &free_list, &done);
	if (error)
		goto error_bmap_cancel;
	ASSERT(done);
	/*
	 * Commit the first transaction.  This logs the EFI and the inode.
	 */
	error = xfs_bmap_finish(&tp, &free_list, &committed);
	if (error)
		goto error_bmap_cancel;
	/*
	 * The transaction must have been committed, since there were
	 * actually extents freed by xfs_bunmapi.  See xfs_bmap_finish.
	 * The new tp has the extent freeing and EFDs.
	 */
	ASSERT(committed);
	/*
	 * The first xact was committed, so add the inode to the new one.
	 * Mark it dirty so it will be logged and moved forward in the log as
	 * part of every commit.
	 */
	xfs_trans_ijoin(tp, ip, 0);
	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
	/*
	 * Commit the transaction containing extent freeing and EFDs.
	 */
	error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
	if (error) {
		ASSERT(XFS_FORCED_SHUTDOWN(mp));
		goto error_unlock;
	}

	/*
	 * Remove the memory for extent descriptions (just bookkeeping).
	 */
	if (ip->i_df.if_bytes)
		xfs_idata_realloc(ip, -ip->i_df.if_bytes, XFS_DATA_FORK);
	ASSERT(ip->i_df.if_bytes == 0);

	xfs_iunlock(ip, XFS_ILOCK_EXCL);
	return 0;

error_bmap_cancel:
	xfs_bmap_cancel(&free_list);
error_trans_cancel:
	xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
error_unlock:
	xfs_iunlock(ip, XFS_ILOCK_EXCL);
	return error;
}
Beispiel #29
0
int
xfs_symlink(
	struct xfs_inode	*dp,
	struct xfs_name		*link_name,
	const char		*target_path,
	umode_t			mode,
	struct xfs_inode	**ipp)
{
	struct xfs_mount	*mp = dp->i_mount;
	struct xfs_trans	*tp = NULL;
	struct xfs_inode	*ip = NULL;
	int			error = 0;
	int			pathlen;
	struct xfs_bmap_free	free_list;
	xfs_fsblock_t		first_block;
	bool			unlock_dp_on_error = false;
	uint			cancel_flags;
	int			committed;
	xfs_fileoff_t		first_fsb;
	xfs_filblks_t		fs_blocks;
	int			nmaps;
	struct xfs_bmbt_irec	mval[XFS_SYMLINK_MAPS];
	xfs_daddr_t		d;
	const char		*cur_chunk;
	int			byte_cnt;
	int			n;
	xfs_buf_t		*bp;
	prid_t			prid;
	struct xfs_dquot	*udqp = NULL;
	struct xfs_dquot	*gdqp = NULL;
	struct xfs_dquot	*pdqp = NULL;
	uint			resblks;

	*ipp = NULL;

	trace_xfs_symlink(dp, link_name);

	if (XFS_FORCED_SHUTDOWN(mp))
		return XFS_ERROR(EIO);

	/*
	 * Check component lengths of the target path name.
	 */
	pathlen = strlen(target_path);
	if (pathlen >= MAXPATHLEN)      /* total string too long */
		return XFS_ERROR(ENAMETOOLONG);

	udqp = gdqp = NULL;
	if (dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
		prid = xfs_get_projid(dp);
	else
		prid = XFS_PROJID_DEFAULT;

	/*
	 * Make sure that we have allocated dquot(s) on disk.
	 */
	error = xfs_qm_vop_dqalloc(dp,
			xfs_kuid_to_uid(current_fsuid()),
			xfs_kgid_to_gid(current_fsgid()), prid,
			XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
			&udqp, &gdqp, &pdqp);
	if (error)
		goto std_return;

	tp = xfs_trans_alloc(mp, XFS_TRANS_SYMLINK);
	cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
	/*
	 * The symlink will fit into the inode data fork?
	 * There can't be any attributes so we get the whole variable part.
	 */
	if (pathlen <= XFS_LITINO(mp, dp->i_d.di_version))
		fs_blocks = 0;
	else
		fs_blocks = xfs_symlink_blocks(mp, pathlen);
	resblks = XFS_SYMLINK_SPACE_RES(mp, link_name->len, fs_blocks);
	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_symlink, resblks, 0);
	if (error == ENOSPC && fs_blocks == 0) {
		resblks = 0;
		error = xfs_trans_reserve(tp, &M_RES(mp)->tr_symlink, 0, 0);
	}
	if (error) {
		cancel_flags = 0;
		goto error_return;
	}

	xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
	unlock_dp_on_error = true;

	/*
	 * Check whether the directory allows new symlinks or not.
	 */
	if (dp->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) {
		error = XFS_ERROR(EPERM);
		goto error_return;
	}

	/*
	 * Reserve disk quota : blocks and inode.
	 */
	error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp,
						pdqp, resblks, 1, 0);
	if (error)
		goto error_return;

	/*
	 * Check for ability to enter directory entry, if no space reserved.
	 */
	error = xfs_dir_canenter(tp, dp, link_name, resblks);
	if (error)
		goto error_return;
	/*
	 * Initialize the bmap freelist prior to calling either
	 * bmapi or the directory create code.
	 */
	xfs_bmap_init(&free_list, &first_block);

	/*
	 * Allocate an inode for the symlink.
	 */
	error = xfs_dir_ialloc(&tp, dp, S_IFLNK | (mode & ~S_IFMT), 1, 0,
			       prid, resblks > 0, &ip, NULL);
	if (error) {
		if (error == ENOSPC)
			goto error_return;
		goto error1;
	}

	/*
	 * An error after we've joined dp to the transaction will result in the
	 * transaction cancel unlocking dp so don't do it explicitly in the
	 * error path.
	 */
	xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
	unlock_dp_on_error = false;

	/*
	 * Also attach the dquot(s) to it, if applicable.
	 */
	xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);

	if (resblks)
		resblks -= XFS_IALLOC_SPACE_RES(mp);
	/*
	 * If the symlink will fit into the inode, write it inline.
	 */
	if (pathlen <= XFS_IFORK_DSIZE(ip)) {
		xfs_idata_realloc(ip, pathlen, XFS_DATA_FORK);
		memcpy(ip->i_df.if_u1.if_data, target_path, pathlen);
		ip->i_d.di_size = pathlen;

		/*
		 * The inode was initially created in extent format.
		 */
		ip->i_df.if_flags &= ~(XFS_IFEXTENTS | XFS_IFBROOT);
		ip->i_df.if_flags |= XFS_IFINLINE;

		ip->i_d.di_format = XFS_DINODE_FMT_LOCAL;
		xfs_trans_log_inode(tp, ip, XFS_ILOG_DDATA | XFS_ILOG_CORE);

	} else {
		int	offset;

		first_fsb = 0;
		nmaps = XFS_SYMLINK_MAPS;

		error = xfs_bmapi_write(tp, ip, first_fsb, fs_blocks,
				  XFS_BMAPI_METADATA, &first_block, resblks,
				  mval, &nmaps, &free_list);
		if (error)
			goto error2;

		if (resblks)
			resblks -= fs_blocks;
		ip->i_d.di_size = pathlen;
		xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);

		cur_chunk = target_path;
		offset = 0;
		for (n = 0; n < nmaps; n++) {
			char	*buf;

			d = XFS_FSB_TO_DADDR(mp, mval[n].br_startblock);
			byte_cnt = XFS_FSB_TO_B(mp, mval[n].br_blockcount);
			bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
					       BTOBB(byte_cnt), 0);
			if (!bp) {
				error = ENOMEM;
				goto error2;
			}
			bp->b_ops = &xfs_symlink_buf_ops;

			byte_cnt = XFS_SYMLINK_BUF_SPACE(mp, byte_cnt);
			byte_cnt = min(byte_cnt, pathlen);

			buf = bp->b_addr;
			buf += xfs_symlink_hdr_set(mp, ip->i_ino, offset,
						   byte_cnt, bp);

			memcpy(buf, cur_chunk, byte_cnt);

			cur_chunk += byte_cnt;
			pathlen -= byte_cnt;
			offset += byte_cnt;

			xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SYMLINK_BUF);
			xfs_trans_log_buf(tp, bp, 0, (buf + byte_cnt - 1) -
							(char *)bp->b_addr);
		}
		ASSERT(pathlen == 0);
	}

	/*
	 * Create the directory entry for the symlink.
	 */
	error = xfs_dir_createname(tp, dp, link_name, ip->i_ino,
					&first_block, &free_list, resblks);
	if (error)
		goto error2;
	xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
	xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);

	/*
	 * If this is a synchronous mount, make sure that the
	 * symlink transaction goes to disk before returning to
	 * the user.
	 */
	if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) {
		xfs_trans_set_sync(tp);
	}

	error = xfs_bmap_finish(&tp, &free_list, &committed);
	if (error) {
		goto error2;
	}
	error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
	xfs_qm_dqrele(udqp);
	xfs_qm_dqrele(gdqp);
	xfs_qm_dqrele(pdqp);

	*ipp = ip;
	return 0;

 error2:
	IRELE(ip);
 error1:
	xfs_bmap_cancel(&free_list);
	cancel_flags |= XFS_TRANS_ABORT;
 error_return:
	xfs_trans_cancel(tp, cancel_flags);
	xfs_qm_dqrele(udqp);
	xfs_qm_dqrele(gdqp);
	xfs_qm_dqrele(pdqp);

	if (unlock_dp_on_error)
		xfs_iunlock(dp, XFS_ILOCK_EXCL);
 std_return:
	return error;
}
Beispiel #30
0
/*
 * Process a bmap update intent item that was recovered from the log.
 * We need to update some inode's bmbt.
 */
int
xfs_bui_recover(
	struct xfs_mount		*mp,
	struct xfs_bui_log_item		*buip,
	struct xfs_defer_ops		*dfops)
{
	int				error = 0;
	unsigned int			bui_type;
	struct xfs_map_extent		*bmap;
	xfs_fsblock_t			startblock_fsb;
	xfs_fsblock_t			inode_fsb;
	xfs_filblks_t			count;
	bool				op_ok;
	struct xfs_bud_log_item		*budp;
	enum xfs_bmap_intent_type	type;
	int				whichfork;
	xfs_exntst_t			state;
	struct xfs_trans		*tp;
	struct xfs_inode		*ip = NULL;
	struct xfs_bmbt_irec		irec;

	ASSERT(!test_bit(XFS_BUI_RECOVERED, &buip->bui_flags));

	/* Only one mapping operation per BUI... */
	if (buip->bui_format.bui_nextents != XFS_BUI_MAX_FAST_EXTENTS) {
		set_bit(XFS_BUI_RECOVERED, &buip->bui_flags);
		xfs_bui_release(buip);
		return -EIO;
	}

	/*
	 * First check the validity of the extent described by the
	 * BUI.  If anything is bad, then toss the BUI.
	 */
	bmap = &buip->bui_format.bui_extents[0];
	startblock_fsb = XFS_BB_TO_FSB(mp,
			   XFS_FSB_TO_DADDR(mp, bmap->me_startblock));
	inode_fsb = XFS_BB_TO_FSB(mp, XFS_FSB_TO_DADDR(mp,
			XFS_INO_TO_FSB(mp, bmap->me_owner)));
	switch (bmap->me_flags & XFS_BMAP_EXTENT_TYPE_MASK) {
	case XFS_BMAP_MAP:
	case XFS_BMAP_UNMAP:
		op_ok = true;
		break;
	default:
		op_ok = false;
		break;
	}
	if (!op_ok || startblock_fsb == 0 ||
	    bmap->me_len == 0 ||
	    inode_fsb == 0 ||
	    startblock_fsb >= mp->m_sb.sb_dblocks ||
	    bmap->me_len >= mp->m_sb.sb_agblocks ||
	    inode_fsb >= mp->m_sb.sb_dblocks ||
	    (bmap->me_flags & ~XFS_BMAP_EXTENT_FLAGS)) {
		/*
		 * This will pull the BUI from the AIL and
		 * free the memory associated with it.
		 */
		set_bit(XFS_BUI_RECOVERED, &buip->bui_flags);
		xfs_bui_release(buip);
		return -EIO;
	}

	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate,
			XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK), 0, 0, &tp);
	if (error)
		return error;
	budp = xfs_trans_get_bud(tp, buip);

	/* Grab the inode. */
	error = xfs_iget(mp, tp, bmap->me_owner, 0, XFS_ILOCK_EXCL, &ip);
	if (error)
		goto err_inode;

	if (VFS_I(ip)->i_nlink == 0)
		xfs_iflags_set(ip, XFS_IRECOVERY);

	/* Process deferred bmap item. */
	state = (bmap->me_flags & XFS_BMAP_EXTENT_UNWRITTEN) ?
			XFS_EXT_UNWRITTEN : XFS_EXT_NORM;
	whichfork = (bmap->me_flags & XFS_BMAP_EXTENT_ATTR_FORK) ?
			XFS_ATTR_FORK : XFS_DATA_FORK;
	bui_type = bmap->me_flags & XFS_BMAP_EXTENT_TYPE_MASK;
	switch (bui_type) {
	case XFS_BMAP_MAP:
	case XFS_BMAP_UNMAP:
		type = bui_type;
		break;
	default:
		error = -EFSCORRUPTED;
		goto err_inode;
	}
	xfs_trans_ijoin(tp, ip, 0);

	count = bmap->me_len;
	error = xfs_trans_log_finish_bmap_update(tp, budp, dfops, type,
			ip, whichfork, bmap->me_startoff,
			bmap->me_startblock, &count, state);
	if (error)
		goto err_inode;

	if (count > 0) {
		ASSERT(type == XFS_BMAP_UNMAP);
		irec.br_startblock = bmap->me_startblock;
		irec.br_blockcount = count;
		irec.br_startoff = bmap->me_startoff;
		irec.br_state = state;
		error = xfs_bmap_unmap_extent(tp->t_mountp, dfops, ip, &irec);
		if (error)
			goto err_inode;
	}

	set_bit(XFS_BUI_RECOVERED, &buip->bui_flags);
	error = xfs_trans_commit(tp);
	xfs_iunlock(ip, XFS_ILOCK_EXCL);
	IRELE(ip);

	return error;

err_inode:
	xfs_trans_cancel(tp);
	if (ip) {
		xfs_iunlock(ip, XFS_ILOCK_EXCL);
		IRELE(ip);
	}
	return error;
}