Exemple #1
0
static int gfs2_mkdir(struct inode *dir, struct dentry *dentry, int mode)
{
	struct gfs2_inode *dip = GFS2_I(dir), *ip;
	struct gfs2_sbd *sdp = GFS2_SB(dir);
	struct gfs2_holder ghs[2];
	struct inode *inode;
	struct buffer_head *dibh;
	int error;

	gfs2_holder_init(dip->i_gl, 0, 0, ghs);

	inode = gfs2_createi(ghs, &dentry->d_name, S_IFDIR | mode, 0);
	if (IS_ERR(inode)) {
		gfs2_holder_uninit(ghs);
		return PTR_ERR(inode);
	}

	ip = ghs[1].gh_gl->gl_object;

	ip->i_inode.i_nlink = 2;
	ip->i_disksize = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode);
	ip->i_diskflags |= GFS2_DIF_JDATA;
	ip->i_entries = 2;

	error = gfs2_meta_inode_buffer(ip, &dibh);

	if (!gfs2_assert_withdraw(sdp, !error)) {
		struct gfs2_dinode *di = (struct gfs2_dinode *)dibh->b_data;
		struct gfs2_dirent *dent = (struct gfs2_dirent *)(di+1);
		struct qstr str;

		gfs2_str2qstr(&str, ".");
		gfs2_trans_add_bh(ip->i_gl, dibh, 1);
		gfs2_qstr2dirent(&str, GFS2_DIRENT_SIZE(str.len), dent);
		dent->de_inum = di->di_num; /* already GFS2 endian */
		dent->de_type = cpu_to_be16(DT_DIR);
		di->di_entries = cpu_to_be32(1);

		gfs2_str2qstr(&str, "..");
		dent = (struct gfs2_dirent *)((char*)dent + GFS2_DIRENT_SIZE(1));
		gfs2_qstr2dirent(&str, dibh->b_size - GFS2_DIRENT_SIZE(1) - sizeof(struct gfs2_dinode), dent);

		gfs2_inum_out(dip, dent);
		dent->de_type = cpu_to_be16(DT_DIR);

		gfs2_dinode_out(ip, di);

		brelse(dibh);
	}

	error = gfs2_change_nlink(dip, +1);
	gfs2_assert_withdraw(sdp, !error); /* dip already pinned */

	gfs2_trans_end(sdp);
	if (dip->i_alloc->al_rgd)
		gfs2_inplace_release(dip);
	gfs2_quota_unlock(dip);
	gfs2_alloc_put(dip);

	gfs2_glock_dq_uninit_m(2, ghs);

	d_instantiate(dentry, inode);
	mark_inode_dirty(inode);

	return 0;
}
/*
 * ext2fs_rename_recalculate_fulr: If we have just entered a directory
 * into dvp at tulr, and we were about to remove one at fulr for an
 * entry named fcnp, fulr may be invalid.  So, if necessary,
 * recalculate it.
 */
static int
ext2fs_rename_recalculate_fulr(struct vnode *dvp,
    struct ufs_lookup_results *fulr, const struct ufs_lookup_results *tulr,
    const struct componentname *fcnp)
{
	struct mount *mp;
	struct ufsmount *ump;
	/* XXX int is a silly type for this; blame ufsmount::um_dirblksiz.  */
	int dirblksiz;
	doff_t search_start, search_end;
	doff_t offset;		/* Offset of entry we're examining.  */
	struct buf *bp;		/* I/O block we're examining.  */
	char *dirbuf;		/* Pointer into directory at search_start.  */
	struct ext2fs_direct *ep; /* Pointer to the entry we're examining.  */
	/* XXX direct::d_reclen is 16-bit;
	 * ufs_lookup_results::ulr_reclen is 32-bit.  Blah.  */
	uint32_t reclen;	/* Length of the entry we're examining.  */
	uint32_t prev_reclen;	/* Length of the preceding entry.  */
	int error;

	KASSERT(dvp != NULL);
	KASSERT(dvp->v_mount != NULL);
	KASSERT(VTOI(dvp) != NULL);
	KASSERT(fulr != NULL);
	KASSERT(tulr != NULL);
	KASSERT(fulr != tulr);
	KASSERT(ext2fs_rename_ulr_overlap_p(fulr, tulr));

	mp = dvp->v_mount;
	ump = VFSTOUFS(mp);
	KASSERT(ump != NULL);
	KASSERT(ump == VTOI(dvp)->i_ump);

	dirblksiz = ump->um_dirblksiz;
	KASSERT(0 < dirblksiz);
	KASSERT((dirblksiz & (dirblksiz - 1)) == 0);

	/* A directory block may not span across multiple I/O blocks.  */
	KASSERT(dirblksiz <= mp->mnt_stat.f_iosize);

	/* Find the bounds of the search.  */
	search_start = tulr->ulr_offset;
	KASSERT(fulr->ulr_reclen < (EXT2FS_MAXDIRSIZE - fulr->ulr_offset));
	search_end = (fulr->ulr_offset + fulr->ulr_reclen);

	/* Compaction must happen only within a directory block. (*)  */
	KASSERT(search_start <= search_end);
	KASSERT((search_end - (search_start &~ (dirblksiz - 1))) <= dirblksiz);

	dirbuf = NULL;
	bp = NULL;
	error = ext2fs_blkatoff(dvp, (off_t)search_start, &dirbuf, &bp);
	if (error)
		return error;
	KASSERT(dirbuf != NULL);
	KASSERT(bp != NULL);

	/*
	 * Guarantee we sha'n't go past the end of the buffer we got.
	 * dirbuf is bp->b_data + (search_start & (iosize - 1)), and
	 * the valid range is [bp->b_data, bp->b_data + bp->b_bcount).
	 */
	KASSERT((search_end - search_start) <=
	    (bp->b_bcount - (search_start & (mp->mnt_stat.f_iosize - 1))));

	prev_reclen = fulr->ulr_count;
	offset = search_start;

	/*
	 * Search from search_start to search_end for the entry matching
	 * fcnp, which must be there because we found it before and it
	 * should only at most have moved earlier.
	 */
	for (;;) {
		KASSERT(search_start <= offset);
		KASSERT(offset < search_end);

		/*
		 * Examine the directory entry at offset.
		 */
		ep = (struct ext2fs_direct *)
		    (dirbuf + (offset - search_start));
		reclen = fs2h16(ep->e2d_reclen);

		if (ep->e2d_ino == 0)
			goto next;	/* Entry is unused.  */

		if (fs2h32(ep->e2d_ino) == UFS_WINO)
			goto next;	/* Entry is whiteout.  */

		if (fcnp->cn_namelen != ep->e2d_namlen)
			goto next;	/* Wrong name length.  */

		if (memcmp(ep->e2d_name, fcnp->cn_nameptr, fcnp->cn_namelen))
			goto next;	/* Wrong name.  */

		/* Got it!  */
		break;

next:
		if (! ((reclen < search_end) &&
			(offset < (search_end - reclen)))) {
			brelse(bp, 0);
			return EIO;	/* XXX Panic?  What?  */
		}

		/* We may not move past the search end.  */
		KASSERT(reclen < search_end);
		KASSERT(offset < (search_end - reclen));

		/*
		 * We may not move across a directory block boundary;
		 * see (*) above.
		 */
		KASSERT((offset &~ (dirblksiz - 1)) ==
		    ((offset + reclen) &~ (dirblksiz - 1)));

		prev_reclen = reclen;
		offset += reclen;
	}

	/*
	 * Found the entry.  Record where.
	 */
	fulr->ulr_offset = offset;
	fulr->ulr_reclen = reclen;

	/*
	 * Record the preceding record length, but not if we're at the
	 * start of a directory block.
	 */
	fulr->ulr_count = ((offset & (dirblksiz - 1))? prev_reclen : 0);

	brelse(bp, 0);
	return 0;
}
static int gfs2_write_end(struct file *file, struct address_space *mapping,
			  loff_t pos, unsigned len, unsigned copied,
			  struct page *page, void *fsdata)
{
	struct inode *inode = page->mapping->host;
	struct gfs2_inode *ip = GFS2_I(inode);
	struct gfs2_sbd *sdp = GFS2_SB(inode);
	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
	struct buffer_head *dibh;
	struct gfs2_qadata *qa = ip->i_qadata;
	unsigned int from = pos & (PAGE_CACHE_SIZE - 1);
	unsigned int to = from + len;
	int ret;
	int i_size_changed = 0;

	BUG_ON(gfs2_glock_is_locked_by_me(ip->i_gl) == NULL);

	ret = gfs2_meta_inode_buffer(ip, &dibh);
	if (unlikely(ret)) {
		unlock_page(page);
		page_cache_release(page);
		goto failed;
	}

	gfs2_trans_add_bh(ip->i_gl, dibh, 1);

	if (gfs2_is_stuffed(ip))
		return gfs2_stuffed_write_end(inode, dibh, pos, len, copied, page);

	if (!gfs2_is_writeback(ip))
		gfs2_page_add_databufs(ip, page, from, to);

	/* inlined bits of generic_write_end to avoid marking the inode dirty
	   a second time: */
	ret = block_write_end(file, mapping, pos, len, copied, page, fsdata);
	if (pos + ret > ip->i_disksize) {
		ip->i_disksize = pos + ret;
		i_size_write(inode, pos + ret);
		i_size_changed = 1;
	}

	unlock_page(page);
	page_cache_release(page);

	if (i_size_changed) {
		gfs2_dinode_out(ip, dibh->b_data);
		mark_inode_dirty(inode);
	}

	if (inode == sdp->sd_rindex) {
		adjust_fs_space(inode);
		sdp->sd_rindex_uptodate = 0;
	}

	brelse(dibh);
	gfs2_trans_end(sdp);
failed:
	if (ip->i_res)
		gfs2_inplace_release(ip);
	if (qa) {
		gfs2_quota_unlock(ip);
		gfs2_qadata_put(ip);
	}
	if (inode == sdp->sd_rindex) {
		gfs2_glock_dq(&m_ip->i_gh);
		gfs2_holder_uninit(&m_ip->i_gh);
	}
	gfs2_glock_dq(&ip->i_gh);
	gfs2_holder_uninit(&ip->i_gh);
	return ret;
}
Exemple #4
0
static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
			      handle_t *handle,
			      u32 bits_wanted,
			      u32 min_bits,
			      u16 *bit_off,
			      unsigned int *num_bits,
			      u64 *bg_blkno,
			      u16 *bits_left)
{
	int status;
	u16 chain, tmp_bits;
	u32 tmp_used;
	u64 next_group;
	struct inode *alloc_inode = ac->ac_inode;
	struct buffer_head *group_bh = NULL;
	struct buffer_head *prev_group_bh = NULL;
	struct ocfs2_dinode *fe = (struct ocfs2_dinode *) ac->ac_bh->b_data;
	struct ocfs2_chain_list *cl = (struct ocfs2_chain_list *) &fe->id2.i_chain;
	struct ocfs2_group_desc *bg;

	chain = ac->ac_chain;
	mlog(0, "trying to alloc %u bits from chain %u, inode %llu\n",
	     bits_wanted, chain,
	     (unsigned long long)OCFS2_I(alloc_inode)->ip_blkno);

	status = ocfs2_read_block(OCFS2_SB(alloc_inode->i_sb),
				  le64_to_cpu(cl->cl_recs[chain].c_blkno),
				  &group_bh, OCFS2_BH_CACHED, alloc_inode);
	if (status < 0) {
		mlog_errno(status);
		goto bail;
	}
	bg = (struct ocfs2_group_desc *) group_bh->b_data;
	status = ocfs2_check_group_descriptor(alloc_inode->i_sb, fe, bg);
	if (status) {
		mlog_errno(status);
		goto bail;
	}

	status = -ENOSPC;
	/* for now, the chain search is a bit simplistic. We just use
	 * the 1st group with any empty bits. */
	while ((status = ac->ac_group_search(alloc_inode, group_bh,
					     bits_wanted, min_bits, bit_off,
					     &tmp_bits)) == -ENOSPC) {
		if (!bg->bg_next_group)
			break;

		if (prev_group_bh) {
			brelse(prev_group_bh);
			prev_group_bh = NULL;
		}
		next_group = le64_to_cpu(bg->bg_next_group);
		prev_group_bh = group_bh;
		group_bh = NULL;
		status = ocfs2_read_block(OCFS2_SB(alloc_inode->i_sb),
					  next_group, &group_bh,
					  OCFS2_BH_CACHED, alloc_inode);
		if (status < 0) {
			mlog_errno(status);
			goto bail;
		}
		bg = (struct ocfs2_group_desc *) group_bh->b_data;
		status = ocfs2_check_group_descriptor(alloc_inode->i_sb, fe, bg);
		if (status) {
			mlog_errno(status);
			goto bail;
		}
	}
	if (status < 0) {
		if (status != -ENOSPC)
			mlog_errno(status);
		goto bail;
	}

	mlog(0, "alloc succeeds: we give %u bits from block group %llu\n",
	     tmp_bits, (unsigned long long)le64_to_cpu(bg->bg_blkno));

	*num_bits = tmp_bits;

	BUG_ON(*num_bits == 0);

	/*
	 * Keep track of previous block descriptor read. When
	 * we find a target, if we have read more than X
	 * number of descriptors, and the target is reasonably
	 * empty, relink him to top of his chain.
	 *
	 * We've read 0 extra blocks and only send one more to
	 * the transaction, yet the next guy to search has a
	 * much easier time.
	 *
	 * Do this *after* figuring out how many bits we're taking out
	 * of our target group.
	 */
	if (ac->ac_allow_chain_relink &&
	    (prev_group_bh) &&
	    (ocfs2_block_group_reasonably_empty(bg, *num_bits))) {
		status = ocfs2_relink_block_group(handle, alloc_inode,
						  ac->ac_bh, group_bh,
						  prev_group_bh, chain);
		if (status < 0) {
			mlog_errno(status);
			goto bail;
		}
	}

	/* Ok, claim our bits now: set the info on dinode, chainlist
	 * and then the group */
	status = ocfs2_journal_access(handle,
				      alloc_inode,
				      ac->ac_bh,
				      OCFS2_JOURNAL_ACCESS_WRITE);
	if (status < 0) {
		mlog_errno(status);
		goto bail;
	}

	tmp_used = le32_to_cpu(fe->id1.bitmap1.i_used);
	fe->id1.bitmap1.i_used = cpu_to_le32(*num_bits + tmp_used);
	le32_add_cpu(&cl->cl_recs[chain].c_free, -(*num_bits));

	status = ocfs2_journal_dirty(handle,
				     ac->ac_bh);
	if (status < 0) {
		mlog_errno(status);
		goto bail;
	}

	status = ocfs2_block_group_set_bits(handle,
					    alloc_inode,
					    bg,
					    group_bh,
					    *bit_off,
					    *num_bits);
	if (status < 0) {
		mlog_errno(status);
		goto bail;
	}

	mlog(0, "Allocated %u bits from suballocator %llu\n", *num_bits,
	     (unsigned long long)le64_to_cpu(fe->i_blkno));

	*bg_blkno = le64_to_cpu(bg->bg_blkno);
	*bits_left = le16_to_cpu(bg->bg_free_bits_count);
bail:
	if (group_bh)
		brelse(group_bh);
	if (prev_group_bh)
		brelse(prev_group_bh);

	mlog_exit(status);
	return status;
}
Exemple #5
0
/*
 * We expect the block group allocator to already be locked.
 */
static int ocfs2_block_group_alloc(struct ocfs2_super *osb,
				   struct inode *alloc_inode,
				   struct buffer_head *bh)
{
	int status, credits;
	struct ocfs2_dinode *fe = (struct ocfs2_dinode *) bh->b_data;
	struct ocfs2_chain_list *cl;
	struct ocfs2_alloc_context *ac = NULL;
	handle_t *handle = NULL;
	u32 bit_off, num_bits;
	u16 alloc_rec;
	u64 bg_blkno;
	struct buffer_head *bg_bh = NULL;
	struct ocfs2_group_desc *bg;

	BUG_ON(ocfs2_is_cluster_bitmap(alloc_inode));

	mlog_entry_void();

	cl = &fe->id2.i_chain;
	status = ocfs2_reserve_clusters(osb,
					le16_to_cpu(cl->cl_cpg),
					&ac);
	if (status < 0) {
		if (status != -ENOSPC)
			mlog_errno(status);
		goto bail;
	}

	credits = ocfs2_calc_group_alloc_credits(osb->sb,
						 le16_to_cpu(cl->cl_cpg));
	handle = ocfs2_start_trans(osb, credits);
	if (IS_ERR(handle)) {
		status = PTR_ERR(handle);
		handle = NULL;
		mlog_errno(status);
		goto bail;
	}

	status = ocfs2_claim_clusters(osb,
				      handle,
				      ac,
				      le16_to_cpu(cl->cl_cpg),
				      &bit_off,
				      &num_bits);
	if (status < 0) {
		if (status != -ENOSPC)
			mlog_errno(status);
		goto bail;
	}

	alloc_rec = ocfs2_find_smallest_chain(cl);

	/* setup the group */
	bg_blkno = ocfs2_clusters_to_blocks(osb->sb, bit_off);
	mlog(0, "new descriptor, record %u, at block %llu\n",
	     alloc_rec, (unsigned long long)bg_blkno);

	bg_bh = sb_getblk(osb->sb, bg_blkno);
	if (!bg_bh) {
		status = -EIO;
		mlog_errno(status);
		goto bail;
	}
	ocfs2_set_new_buffer_uptodate(alloc_inode, bg_bh);

	status = ocfs2_block_group_fill(handle,
					alloc_inode,
					bg_bh,
					bg_blkno,
					alloc_rec,
					cl);
	if (status < 0) {
		mlog_errno(status);
		goto bail;
	}

	bg = (struct ocfs2_group_desc *) bg_bh->b_data;

	status = ocfs2_journal_access(handle, alloc_inode,
				      bh, OCFS2_JOURNAL_ACCESS_WRITE);
	if (status < 0) {
		mlog_errno(status);
		goto bail;
	}

	le32_add_cpu(&cl->cl_recs[alloc_rec].c_free,
		     le16_to_cpu(bg->bg_free_bits_count));
	le32_add_cpu(&cl->cl_recs[alloc_rec].c_total, le16_to_cpu(bg->bg_bits));
	cl->cl_recs[alloc_rec].c_blkno  = cpu_to_le64(bg_blkno);
	if (le16_to_cpu(cl->cl_next_free_rec) < le16_to_cpu(cl->cl_count))
		le16_add_cpu(&cl->cl_next_free_rec, 1);

	le32_add_cpu(&fe->id1.bitmap1.i_used, le16_to_cpu(bg->bg_bits) -
					le16_to_cpu(bg->bg_free_bits_count));
	le32_add_cpu(&fe->id1.bitmap1.i_total, le16_to_cpu(bg->bg_bits));
	le32_add_cpu(&fe->i_clusters, le16_to_cpu(cl->cl_cpg));

	status = ocfs2_journal_dirty(handle, bh);
	if (status < 0) {
		mlog_errno(status);
		goto bail;
	}

	spin_lock(&OCFS2_I(alloc_inode)->ip_lock);
	OCFS2_I(alloc_inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
	fe->i_size = cpu_to_le64(ocfs2_clusters_to_bytes(alloc_inode->i_sb,
					     le32_to_cpu(fe->i_clusters)));
	spin_unlock(&OCFS2_I(alloc_inode)->ip_lock);
	i_size_write(alloc_inode, le64_to_cpu(fe->i_size));
	alloc_inode->i_blocks = ocfs2_inode_sector_count(alloc_inode);

	status = 0;
bail:
	if (handle)
		ocfs2_commit_trans(osb, handle);

	if (ac)
		ocfs2_free_alloc_context(ac);

	if (bg_bh)
		brelse(bg_bh);

	mlog_exit(status);
	return status;
}
Exemple #6
0
static int
ext2_indirtrunc(struct inode *ip, daddr_t lbn, daddr_t dbn,
    daddr_t lastbn, int level, e4fs_daddr_t *countp)
{
	struct buf *bp;
	struct m_ext2fs *fs = ip->i_e2fs;
	struct vnode *vp;
	e2fs_daddr_t *bap, *copy;
	int i, nblocks, error = 0, allerror = 0;
	e2fs_lbn_t nb, nlbn, last;
	e4fs_daddr_t blkcount, factor, blocksreleased = 0;

	/*
	 * Calculate index in current block of last
	 * block to be kept.  -1 indicates the entire
	 * block so we need not calculate the index.
	 */
	factor = 1;
	for (i = SINGLE; i < level; i++)
		factor *= NINDIR(fs);
	last = lastbn;
	if (lastbn > 0)
		last /= factor;
	nblocks = btodb(fs->e2fs_bsize);
	/*
	 * Get buffer of block pointers, zero those entries corresponding
	 * to blocks to be free'd, and update on disk copy first.  Since
	 * double(triple) indirect before single(double) indirect, calls
	 * to bmap on these blocks will fail.  However, we already have
	 * the on disk address, so we have to set the b_blkno field
	 * explicitly instead of letting bread do everything for us.
	 */
	vp = ITOV(ip);
	bp = getblk(vp, lbn, (int)fs->e2fs_bsize, 0, 0, 0);
	if ((bp->b_flags & (B_DONE | B_DELWRI)) == 0) {
		bp->b_iocmd = BIO_READ;
		if (bp->b_bcount > bp->b_bufsize)
			panic("ext2_indirtrunc: bad buffer size");
		bp->b_blkno = dbn;
		vfs_busy_pages(bp, 0);
		bp->b_iooffset = dbtob(bp->b_blkno);
		bstrategy(bp);
		error = bufwait(bp);
	}
	if (error) {
		brelse(bp);
		*countp = 0;
		return (error);
	}

	bap = (e2fs_daddr_t *)bp->b_data;
	copy = malloc(fs->e2fs_bsize, M_TEMP, M_WAITOK);
	bcopy((caddr_t)bap, (caddr_t)copy, (u_int)fs->e2fs_bsize);
	bzero((caddr_t)&bap[last + 1],
	  (NINDIR(fs) - (last + 1)) * sizeof(e2fs_daddr_t));
	if (last == -1)
		bp->b_flags |= B_INVAL;
	if (DOINGASYNC(vp)) {
		bdwrite(bp);
	} else {
		error = bwrite(bp);
		if (error)
			allerror = error;
	}
	bap = copy;

	/*
	 * Recursively free totally unused blocks.
	 */
	for (i = NINDIR(fs) - 1, nlbn = lbn + 1 - i * factor; i > last;
	    i--, nlbn += factor) {
		nb = bap[i];
		if (nb == 0)
			continue;
		if (level > SINGLE) {
			if ((error = ext2_indirtrunc(ip, nlbn,
			    fsbtodb(fs, nb), (int32_t)-1, level - 1, &blkcount)) != 0)
				allerror = error;
			blocksreleased += blkcount;
		}
		ext2_blkfree(ip, nb, fs->e2fs_bsize);
		blocksreleased += nblocks;
	}

	/*
	 * Recursively free last partial block.
	 */
	if (level > SINGLE && lastbn >= 0) {
		last = lastbn % factor;
		nb = bap[i];
		if (nb != 0) {
			if ((error = ext2_indirtrunc(ip, nlbn, fsbtodb(fs, nb),
			    last, level - 1, &blkcount)) != 0)
				allerror = error;
			blocksreleased += blkcount;
		}
	}
	free(copy, M_TEMP);
	*countp = blocksreleased;
	return (allerror);
}
Exemple #7
0
/*
 *	isofs_find_entry()
 *
 * finds an entry in the specified directory with the wanted name. It
 * returns the inode number of the found entry, or 0 on error.
 */
static unsigned long
isofs_find_entry(struct inode *dir, struct dentry *dentry,
	unsigned long *block_rv, unsigned long *offset_rv,
	char *tmpname, struct iso_directory_record *tmpde)
{
	unsigned long bufsize = ISOFS_BUFFER_SIZE(dir);
	unsigned char bufbits = ISOFS_BUFFER_BITS(dir);
	unsigned long block, f_pos, offset, block_saved, offset_saved;
	struct buffer_head *bh = NULL;
	struct isofs_sb_info *sbi = ISOFS_SB(dir->i_sb);

	if (!ISOFS_I(dir)->i_first_extent)
		return 0;

	f_pos = 0;
	offset = 0;
	block = 0;

	while (f_pos < dir->i_size) {
		struct iso_directory_record *de;
		int de_len, match, i, dlen;
		char *dpnt;

		if (!bh) {
			bh = isofs_bread(dir, block);
			if (!bh)
				return 0;
		}

		de = (struct iso_directory_record *) (bh->b_data + offset);

		de_len = *(unsigned char *) de;
		if (!de_len) {
			brelse(bh);
			bh = NULL;
			f_pos = (f_pos + ISOFS_BLOCK_SIZE) & ~(ISOFS_BLOCK_SIZE - 1);
			block = f_pos >> bufbits;
			offset = 0;
			continue;
		}

		block_saved = bh->b_blocknr;
		offset_saved = offset;
		offset += de_len;
		f_pos += de_len;

		/* Make sure we have a full directory entry */
		if (offset >= bufsize) {
			int slop = bufsize - offset + de_len;
			memcpy(tmpde, de, slop);
			offset &= bufsize - 1;
			block++;
			brelse(bh);
			bh = NULL;
			if (offset) {
				bh = isofs_bread(dir, block);
				if (!bh)
					return 0;
				memcpy((void *) tmpde + slop, bh->b_data, offset);
			}
			de = tmpde;
		}

		dlen = de->name_len[0];
		dpnt = de->name;
		/* Basic sanity check, whether name doesn't exceed dir entry */
		if (de_len < dlen + sizeof(struct iso_directory_record)) {
			printk(KERN_NOTICE "iso9660: Corrupted directory entry"
			       " in block %lu of inode %lu\n", block,
			       dir->i_ino);
			return 0;
		}

		if (sbi->s_rock &&
		    ((i = get_rock_ridge_filename(de, tmpname, dir)))) {
			dlen = i;	/* possibly -1 */
			dpnt = tmpname;
#ifdef CONFIG_JOLIET
		} else if (sbi->s_joliet_level) {
			dlen = get_joliet_filename(de, tmpname, dir);
			dpnt = tmpname;
#endif
		} else if (sbi->s_mapping == 'a') {
			dlen = get_acorn_filename(de, tmpname, dir);
			dpnt = tmpname;
		} else if (sbi->s_mapping == 'n') {
			dlen = isofs_name_translate(de, tmpname, dir);
			dpnt = tmpname;
		}

		/*
		 * Skip hidden or associated files unless hide or showassoc,
		 * respectively, is set
		 */
		match = 0;
		if (dlen > 0 &&
			(!sbi->s_hide ||
				(!(de->flags[-sbi->s_high_sierra] & 1))) &&
			(sbi->s_showassoc ||
				(!(de->flags[-sbi->s_high_sierra] & 4)))) {
			match = (isofs_cmp(dentry, dpnt, dlen) == 0);
		}
		if (match) {
			isofs_normalize_block_and_offset(de,
							 &block_saved,
							 &offset_saved);
			*block_rv = block_saved;
			*offset_rv = offset_saved;
			brelse(bh);
			return 1;
		}
	}
Exemple #8
0
int
msdosfs_readdir(void *v)
{
	struct vop_readdir_args *ap = v;
	int error = 0;
	int diff;
	long n;
	int blsize;
	long on;
	long lost;
	long count;
	uint32_t dirsperblk;
	uint32_t cn, lbn;
	uint32_t fileno;
	long bias = 0;
	daddr64_t bn;
	struct buf *bp;
	struct denode *dep = VTODE(ap->a_vp);
	struct msdosfsmount *pmp = dep->de_pmp;
	struct direntry *dentp;
	struct dirent dirbuf;
	struct uio *uio = ap->a_uio;
	u_long *cookies = NULL;
	int ncookies = 0;
	off_t offset, wlast = -1;
	int chksum = -1;

#ifdef MSDOSFS_DEBUG
	printf("msdosfs_readdir(): vp %08x, uio %08x, cred %08x, eofflagp %08x\n",
	    ap->a_vp, uio, ap->a_cred, ap->a_eofflag);
#endif

	/*
	 * msdosfs_readdir() won't operate properly on regular files since
	 * it does i/o only with the filesystem vnode, and hence can
	 * retrieve the wrong block from the buffer cache for a plain file.
	 * So, fail attempts to readdir() on a plain file.
	 */
	if ((dep->de_Attributes & ATTR_DIRECTORY) == 0)
		return (ENOTDIR);

	/*
	 * To be safe, initialize dirbuf
	 */
	bzero(dirbuf.d_name, sizeof(dirbuf.d_name));

	/*
	 * If the user buffer is smaller than the size of one dos directory
	 * entry or the file offset is not a multiple of the size of a
	 * directory entry, then we fail the read.
	 */
	count = uio->uio_resid & ~(sizeof(struct direntry) - 1);
	offset = uio->uio_offset;
	if (count < sizeof(struct direntry) ||
	    (offset & (sizeof(struct direntry) - 1)))
		return (EINVAL);
	lost = uio->uio_resid - count;
	uio->uio_resid = count;

	if (ap->a_ncookies) {
		ncookies = uio->uio_resid / sizeof(struct direntry) + 3;
		cookies = malloc(ncookies * sizeof(u_long), M_TEMP, M_WAITOK);
		*ap->a_cookies = cookies;
		*ap->a_ncookies = ncookies;
	}

	dirsperblk = pmp->pm_BytesPerSec / sizeof(struct direntry);

	/*
	 * If they are reading from the root directory then, we simulate
	 * the . and .. entries since these don't exist in the root
	 * directory.  We also set the offset bias to make up for having to
	 * simulate these entries. By this I mean that at file offset 64 we
	 * read the first entry in the root directory that lives on disk.
	 */
	if (dep->de_StartCluster == MSDOSFSROOT
	    || (FAT32(pmp) && dep->de_StartCluster == pmp->pm_rootdirblk)) {
#if 0
		printf("msdosfs_readdir(): going after . or .. in root dir, offset %d\n",
		    offset);
#endif
		bias = 2 * sizeof(struct direntry);
		if (offset < bias) {
			for (n = (int)offset / sizeof(struct direntry);
			     n < 2; n++) {
			        if (FAT32(pmp))
				        dirbuf.d_fileno = pmp->pm_rootdirblk;
				else
				        dirbuf.d_fileno = 1;
				dirbuf.d_type = DT_DIR;
				switch (n) {
				case 0:
					dirbuf.d_namlen = 1;
					strlcpy(dirbuf.d_name, ".",
					    sizeof dirbuf.d_name);
					break;
				case 1:
					dirbuf.d_namlen = 2;
					strlcpy(dirbuf.d_name, "..",
					    sizeof dirbuf.d_name);
					break;
				}
				dirbuf.d_reclen = DIRENT_SIZE(&dirbuf);
				if (uio->uio_resid < dirbuf.d_reclen)
					goto out;
				error = uiomove((caddr_t) &dirbuf,
						dirbuf.d_reclen, uio);
				if (error)
					goto out;
				offset += sizeof(struct direntry);
				if (cookies) {
					*cookies++ = offset;
					if (--ncookies <= 0)
						goto out;
				}
			}
		}
	}

	while (uio->uio_resid > 0) {
		lbn = de_cluster(pmp, offset - bias);
		on = (offset - bias) & pmp->pm_crbomask;
		n = min(pmp->pm_bpcluster - on, uio->uio_resid);
		diff = dep->de_FileSize - (offset - bias);
		if (diff <= 0)
			break;
		n = min(n, diff);
		if ((error = pcbmap(dep, lbn, &bn, &cn, &blsize)) != 0)
			break;
		error = bread(pmp->pm_devvp, bn, blsize, NOCRED, &bp);
		if (error) {
			brelse(bp);
			return (error);
		}
		n = min(n, blsize - bp->b_resid);

		/*
		 * Convert from dos directory entries to fs-independent
		 * directory entries.
		 */
		for (dentp = (struct direntry *)(bp->b_data + on);
		     (char *)dentp < bp->b_data + on + n;
		     dentp++, offset += sizeof(struct direntry)) {
#if 0
			printf("rd: dentp %08x prev %08x crnt %08x deName %02x attr %02x\n",
			    dentp, prev, crnt, dentp->deName[0], dentp->deAttributes);
#endif
			/*
			 * If this is an unused entry, we can stop.
			 */
			if (dentp->deName[0] == SLOT_EMPTY) {
				brelse(bp);
				goto out;
			}
			/*
			 * Skip deleted entries.
			 */
			if (dentp->deName[0] == SLOT_DELETED) {
				chksum = -1;
				wlast = -1;
				continue;
			}

			/*
			 * Handle Win95 long directory entries
			 */
			if (dentp->deAttributes == ATTR_WIN95) {
				struct winentry *wep;
				if (pmp->pm_flags & MSDOSFSMNT_SHORTNAME)
					continue;
				wep = (struct winentry *)dentp;
				chksum = win2unixfn(wep, &dirbuf, chksum);
				if (wep->weCnt & WIN_LAST)
					wlast = offset;
				continue;
			}

			/*
			 * Skip volume labels
			 */
			if (dentp->deAttributes & ATTR_VOLUME) {
				chksum = -1;
				wlast = -1;
				continue;
			}

			/*
			 * This computation of d_fileno must match
			 * the computation of va_fileid in
			 * msdosfs_getattr.
			 */
			fileno = getushort(dentp->deStartCluster);
			if (FAT32(pmp))
			    fileno |= getushort(dentp->deHighClust) << 16;

			if (dentp->deAttributes & ATTR_DIRECTORY) {
				/* Special-case root */
				if (fileno == MSDOSFSROOT)  {
					fileno = FAT32(pmp) ?
					    pmp->pm_rootdirblk : 1;
				}

				dirbuf.d_fileno = fileno;
				dirbuf.d_type = DT_DIR;
			} else {
				if (getulong(dentp->deFileSize) == 0) {
					uint64_t fileno64;

					fileno64 = (cn == MSDOSFSROOT) ?
					    roottobn(pmp, 0) : cntobn(pmp, cn);

					fileno64 *= dirsperblk;
					fileno64 += dentp -
					    (struct direntry *)bp->b_data;

					fileno = fileidhash(fileno64);
				}

				dirbuf.d_fileno = fileno;
				dirbuf.d_type = DT_REG;
			}

			if (chksum != winChksum(dentp->deName))
				dirbuf.d_namlen = dos2unixfn(dentp->deName,
				    (u_char *)dirbuf.d_name,
				    pmp->pm_flags & MSDOSFSMNT_SHORTNAME);
			else
				dirbuf.d_name[dirbuf.d_namlen] = 0;
			chksum = -1;
			dirbuf.d_reclen = DIRENT_SIZE(&dirbuf);
			if (uio->uio_resid < dirbuf.d_reclen) {
				brelse(bp);
				/* Remember long-name offset. */
				if (wlast != -1)
					offset = wlast;
				goto out;
			}
			wlast = -1;
			error = uiomove((caddr_t) &dirbuf,
					dirbuf.d_reclen, uio);
			if (error) {
				brelse(bp);
				goto out;
			}
			if (cookies) {
				*cookies++ = offset + sizeof(struct direntry);
				if (--ncookies <= 0) {
					brelse(bp);
					goto out;
				}
			}
		}
		brelse(bp);
	}

out:
	/* Subtract unused cookies */
	if (ap->a_ncookies)
		*ap->a_ncookies -= ncookies;

	uio->uio_offset = offset;
	uio->uio_resid += lost;
	if (dep->de_FileSize - (offset - bias) <= 0)
		*ap->a_eofflag = 1;
	else
		*ap->a_eofflag = 0;
	return (error);
}
Exemple #9
0
int
msdosfs_read(void *v)
{
	struct vop_read_args *ap = v;
	int error = 0;
	uint32_t diff;
	int blsize;
	int isadir;
	uint32_t n;
	long on;
	daddr64_t lbn, rablock, rablkno;
	struct buf *bp;
	struct vnode *vp = ap->a_vp;
	struct denode *dep = VTODE(vp);
	struct msdosfsmount *pmp = dep->de_pmp;
	struct uio *uio = ap->a_uio;

	/*
	 * If they didn't ask for any data, then we are done.
	 */
	if (uio->uio_resid == 0)
		return (0);
	if (uio->uio_offset < 0)
		return (EINVAL);

	isadir = dep->de_Attributes & ATTR_DIRECTORY;
	do {
		if (uio->uio_offset >= dep->de_FileSize)
			return (0);

		lbn = de_cluster(pmp, uio->uio_offset);
		on = uio->uio_offset & pmp->pm_crbomask;
		n = min((uint32_t) (pmp->pm_bpcluster - on), uio->uio_resid);

		/*
		 * de_FileSize is uint32_t, and we know that uio_offset <
		 * de_FileSize, so uio->uio_offset < 2^32.  Therefore
		 * the cast to uint32_t on the next line is safe.
		 */
		diff = dep->de_FileSize - (uint32_t)uio->uio_offset;
		if (diff < n)
			n = diff;

		/* convert cluster # to block # if a directory */
		if (isadir) {
			error = pcbmap(dep, lbn, &lbn, 0, &blsize);
			if (error)
				return (error);
		}
		/*
		 * If we are operating on a directory file then be sure to
		 * do i/o with the vnode for the filesystem instead of the
		 * vnode for the directory.
		 */
		if (isadir) {
			error = bread(pmp->pm_devvp, lbn, blsize, NOCRED, &bp);
		} else {
			rablock = lbn + 1;
			rablkno = de_cn2bn(pmp, rablock);
			if (dep->de_lastr + 1 == lbn &&
			    de_cn2off(pmp, rablock) < dep->de_FileSize)
				error = breadn(vp, de_cn2bn(pmp, lbn),
				    pmp->pm_bpcluster, &rablkno,
				    &pmp->pm_bpcluster, 1, NOCRED, &bp);
			else
				error = bread(vp, de_cn2bn(pmp, lbn),
				    pmp->pm_bpcluster, NOCRED, &bp);
			dep->de_lastr = lbn;
		}
		n = min(n, pmp->pm_bpcluster - bp->b_resid);
		if (error) {
			brelse(bp);
			return (error);
		}
		error = uiomove(bp->b_data + on, (int) n, uio);
		brelse(bp);
	} while (error == 0 && uio->uio_resid > 0 && n != 0);
	if (!isadir && !(vp->v_mount->mnt_flag & MNT_NOATIME))
		dep->de_flag |= DE_ACCESS;
	return (error);
}
Exemple #10
0
static int simplefs_fill_super(struct super_block *sb, void *data, int silent) {
	struct buffer_head *bh;
	struct simplefs_super *rsb;
	struct simplefs_super_info *sbi;
	struct inode *root;
	int i, j, cnt, ret = 0;

	printk(KERN_INFO "simplefs_fill_super\n");
	if (!(bh = sb_bread(sb, SIMPLEFS_SUPER_BNO))) {
		printk(KERN_ERR "Simplefs: unable to read superblock\n");
		return -ENOMEM;
	}
	sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
	if (!sbi) {
		ret = -ENOMEM;
		goto out;
	}
	sbi->s_sb = bh;
	memcpy(&sbi->raw_super, bh->b_data, sizeof(sbi->raw_super));
	sb->s_fs_info = sbi;
	sb->s_blocksize = SIMPLEFS_BLOCKSIZE;
	sb->s_flags = sb->s_flags & ~MS_POSIXACL;

	/*
	 * set up enough so that it can read an inode
	 */
	sb->s_op = &simplefs_super_operations;
	root = simplefs_iget(sb, SIMPLEFS_ROOT_INO);
	if (!root) {
		printk(KERN_ERR "Simplefs: corrupt root inode\n");
		ret = -EINVAL;
		goto failed_root;
	}
	printk(KERN_INFO "simplefs_fill_super -> simplefs_iget ok: %ld\n", root->i_ino);
	
	sb->s_root = d_alloc_root(root);
	if (!sb->s_root) {
		iput(root);
		printk(KERN_ERR "Simplefs: get root dentry failed\n");
		ret = -ENOMEM;
		goto failed_root;
	}
	printk(KERN_INFO "simplefs_fill_super -> d_alloc_root ok\n");

	
	cnt = sbi->raw_super.s_inode_bitmap_blknr + sbi->raw_super.s_block_bitmap_blknr;
	if (!(sbi->s_bitmaps = kzalloc(sizeof(struct buffer_head *) * cnt, GFP_KERNEL)))
		goto failed_bitmap;
	for (i = 0, j = 0; i < cnt; i++) {
		if (!(bh = bitmap_load(sb, SIMPLEFS_SUPER_BNO + 1 + i)))
			goto failed_load;
		sbi->s_bitmaps[j++] = bh;
		printk(KERN_INFO "simplefs_fill_super bitmap ok:%d -> %d\n", cnt, i);
	}
	rsb = &sbi->raw_super;
	printk("fill super ok: (inode %d %d %d) (block %d %d %d)\n",
	       rsb->s_inode_bitmap_blknr, rsb->s_inode_blknr, rsb->s_free_inodes_count,
	       rsb->s_block_bitmap_blknr, rsb->s_block_blknr, rsb->s_free_blocks_count);
	return 0;
 failed_load:
	for (i = 0; i < j; i++) {
		brelse(sbi->s_bitmaps[i]);
	}
 failed_bitmap:
	dput(sb->s_root);
 failed_root:
	kfree(sbi);
 out:
	brelse(bh);
	sbi->s_sb = NULL;
	printk("simplefs get sb failed.\n");
	return ret;
}
Exemple #11
0
/*
 * Indirect blocks are now on the vnode for the file.  They are given negative
 * logical block numbers.  Indirect blocks are addressed by the negative
 * address of the first data block to which they point.  Double indirect blocks
 * are addressed by one less than the address of the first indirect block to
 * which they point.  Triple indirect blocks are addressed by one less than
 * the address of the first double indirect block to which they point.
 *
 * ext2_bmaparray does the bmap conversion, and if requested returns the
 * array of logical blocks which must be traversed to get to a block.
 * Each entry contains the offset into that block that gets you to the
 * next block and the disk address of the block (if it is assigned).
 */
static
int
ext2_bmaparray(struct vnode *vp, ext2_daddr_t bn, ext2_daddr_t *bnp,
	      struct indir *ap, int *nump, int *runp, int *runb)
{
	struct inode *ip;
	struct buf *bp;
	struct ext2mount *ump;
	struct mount *mp;
	struct ext2_sb_info *fs;
	struct indir a[NIADDR+1], *xap;
	ext2_daddr_t daddr;
	long metalbn;
	int error, maxrun, num;

	ip = VTOI(vp);
	mp = vp->v_mount;
	ump = VFSTOEXT2(mp);
	fs = ip->i_e2fs;
#ifdef DIAGNOSTIC
	if ((ap != NULL && nump == NULL) || (ap == NULL && nump != NULL))
		panic("ext2_bmaparray: invalid arguments");
#endif

	if (runp) {
		*runp = 0;
	}

	if (runb) {
		*runb = 0;
	}

	maxrun = mp->mnt_iosize_max / mp->mnt_stat.f_iosize - 1;

	xap = ap == NULL ? a : ap;
	if (!nump)
		nump = &num;
	error = ext2_getlbns(vp, bn, xap, nump);
	if (error)
		return (error);

	num = *nump;
	if (num == 0) {
		*bnp = blkptrtodb(ump, ip->i_db[bn]);
		if (*bnp == 0)
			*bnp = -1;
		else if (runp) {
			daddr_t bnb = bn;
			for (++bn; bn < NDADDR && *runp < maxrun &&
			    is_sequential(ump, ip->i_db[bn - 1], ip->i_db[bn]);
			    ++bn, ++*runp);
			bn = bnb;
			if (runb && (bn > 0)) {
				for (--bn; (bn >= 0) && (*runb < maxrun) &&
					is_sequential(ump, ip->i_db[bn],
						ip->i_db[bn+1]);
						--bn, ++*runb);
			}
		}
		return (0);
	}


	/* Get disk address out of indirect block array */
	daddr = ip->i_ib[xap->in_off];

	for (bp = NULL, ++xap; --num; ++xap) {
		/*
		 * Exit the loop if there is no disk address assigned yet and
		 * the indirect block isn't in the cache, or if we were
		 * looking for an indirect block and we've found it.
		 */

		metalbn = xap->in_lbn;
		if ((daddr == 0 &&
		     !findblk(vp, dbtodoff(fs, metalbn), FINDBLK_TEST)) ||
		    metalbn == bn) {
			break;
		}
		/*
		 * If we get here, we've either got the block in the cache
		 * or we have a disk address for it, go fetch it.
		 */
		if (bp)
			bqrelse(bp);

		xap->in_exists = 1;
		bp = getblk(vp, lblktodoff(fs, metalbn),
			    mp->mnt_stat.f_iosize, 0, 0);
		if ((bp->b_flags & B_CACHE) == 0) {
#ifdef DIAGNOSTIC
			if (!daddr)
				panic("ext2_bmaparray: indirect block not in cache");
#endif
			/*
			 * This runs through ext2_strategy using bio2 to
			 * cache the disk offset, then comes back through
			 * bio1.  So we want to wait on bio1
			 */
			bp->b_bio1.bio_done = biodone_sync;
			bp->b_bio1.bio_flags |= BIO_SYNC;
			bp->b_bio2.bio_offset = fsbtodoff(fs, daddr);
			bp->b_flags &= ~(B_INVAL|B_ERROR);
			bp->b_cmd = BUF_CMD_READ;
			vfs_busy_pages(bp->b_vp, bp);
			vn_strategy(bp->b_vp, &bp->b_bio1);
			error = biowait(&bp->b_bio1, "biord");
			if (error) {
				brelse(bp);
				return (error);
			}
		}

		daddr = ((ext2_daddr_t *)bp->b_data)[xap->in_off];
		if (num == 1 && daddr && runp) {
			for (bn = xap->in_off + 1;
			    bn < MNINDIR(ump) && *runp < maxrun &&
			    is_sequential(ump,
			    ((ext2_daddr_t *)bp->b_data)[bn - 1],
			    ((ext2_daddr_t *)bp->b_data)[bn]);
			    ++bn, ++*runp);
			bn = xap->in_off;
			if (runb && bn) {
				for(--bn; bn >= 0 && *runb < maxrun &&
			    		is_sequential(ump, ((daddr_t *)bp->b_data)[bn],
					    ((daddr_t *)bp->b_data)[bn+1]);
			    		--bn, ++*runb);
			}
		}
	}
	if (bp)
		bqrelse(bp);

	daddr = blkptrtodb(ump, daddr);
	*bnp = daddr == 0 ? -1 : daddr;
	return (0);
}
Exemple #12
0
static int gfs2_link(struct dentry *old_dentry, struct inode *dir,
		     struct dentry *dentry)
{
	struct gfs2_inode *dip = GFS2_I(dir);
	struct gfs2_sbd *sdp = GFS2_SB(dir);
	struct inode *inode = old_dentry->d_inode;
	struct gfs2_inode *ip = GFS2_I(inode);
	struct gfs2_holder ghs[2];
	struct buffer_head *dibh;
	int alloc_required;
	int error;

	if (S_ISDIR(inode->i_mode))
		return -EPERM;

	error = gfs2_rs_alloc(dip);
	if (error)
		return error;

	gfs2_holder_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs);
	gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + 1);

	error = gfs2_glock_nq(ghs); /* parent */
	if (error)
		goto out_parent;

	error = gfs2_glock_nq(ghs + 1); /* child */
	if (error)
		goto out_child;

	error = -ENOENT;
	if (inode->i_nlink == 0)
		goto out_gunlock;

	error = gfs2_permission(dir, MAY_WRITE | MAY_EXEC);
	if (error)
		goto out_gunlock;

	error = gfs2_dir_check(dir, &dentry->d_name, NULL);
	switch (error) {
	case -ENOENT:
		break;
	case 0:
		error = -EEXIST;
	default:
		goto out_gunlock;
	}

	error = -EINVAL;
	if (!dip->i_inode.i_nlink)
		goto out_gunlock;
	error = -EFBIG;
	if (dip->i_entries == (u32)-1)
		goto out_gunlock;
	error = -EPERM;
	if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
		goto out_gunlock;
	error = -EINVAL;
	if (!ip->i_inode.i_nlink)
		goto out_gunlock;
	error = -EMLINK;
	if (ip->i_inode.i_nlink == (u32)-1)
		goto out_gunlock;

	alloc_required = error = gfs2_diradd_alloc_required(dir, &dentry->d_name);
	if (error < 0)
		goto out_gunlock;
	error = 0;

	if (alloc_required) {
		error = gfs2_quota_lock_check(dip);
		if (error)
			goto out_gunlock;

		error = gfs2_inplace_reserve(dip, sdp->sd_max_dirres, 0);
		if (error)
			goto out_gunlock_q;

		error = gfs2_trans_begin(sdp, sdp->sd_max_dirres +
					 gfs2_rg_blocks(dip, sdp->sd_max_dirres) +
					 2 * RES_DINODE + RES_STATFS +
					 RES_QUOTA, 0);
		if (error)
			goto out_ipres;
	} else {
		error = gfs2_trans_begin(sdp, 2 * RES_DINODE + RES_LEAF, 0);
		if (error)
			goto out_ipres;
	}

	error = gfs2_meta_inode_buffer(ip, &dibh);
	if (error)
		goto out_end_trans;

	error = gfs2_dir_add(dir, &dentry->d_name, ip);
	if (error)
		goto out_brelse;

	gfs2_trans_add_meta(ip->i_gl, dibh);
	inc_nlink(&ip->i_inode);
	ip->i_inode.i_ctime = CURRENT_TIME;
	ihold(inode);
	d_instantiate(dentry, inode);
	mark_inode_dirty(inode);

out_brelse:
	brelse(dibh);
out_end_trans:
	gfs2_trans_end(sdp);
out_ipres:
	if (alloc_required)
		gfs2_inplace_release(dip);
out_gunlock_q:
	if (alloc_required)
		gfs2_quota_unlock(dip);
out_gunlock:
	gfs2_glock_dq(ghs + 1);
out_child:
	gfs2_glock_dq(ghs);
out_parent:
	gfs2_holder_uninit(ghs);
	gfs2_holder_uninit(ghs + 1);
	return error;
}
Exemple #13
0
static int
fuse_write_biobackend(struct vnode *vp, struct uio *uio,
    struct ucred *cred, struct fuse_filehandle *fufh, int ioflag)
{
	struct fuse_vnode_data *fvdat = VTOFUD(vp);
	struct buf *bp;
	daddr_t lbn;
	int bcount;
	int n, on, err = 0;

	const int biosize = fuse_iosize(vp);

	KASSERT(uio->uio_rw == UIO_WRITE, ("ncl_write mode"));
	FS_DEBUG("resid=%zx offset=%jx fsize=%jx\n",
	    uio->uio_resid, uio->uio_offset, fvdat->filesize);
	if (vp->v_type != VREG)
		return (EIO);
	if (uio->uio_offset < 0)
		return (EINVAL);
	if (uio->uio_resid == 0)
		return (0);
	if (ioflag & IO_APPEND)
		uio_setoffset(uio, fvdat->filesize);

	/*
         * Find all of this file's B_NEEDCOMMIT buffers.  If our writes
         * would exceed the local maximum per-file write commit size when
         * combined with those, we must decide whether to flush,
         * go synchronous, or return err.  We don't bother checking
         * IO_UNIT -- we just make all writes atomic anyway, as there's
         * no point optimizing for something that really won't ever happen.
         */
	do {
		if (fuse_isdeadfs(vp)) {
			err = ENXIO;
			break;
		}
		lbn = uio->uio_offset / biosize;
		on = uio->uio_offset & (biosize - 1);
		n = MIN((unsigned)(biosize - on), uio->uio_resid);

		FS_DEBUG2G("lbn %ju, on %d, n %d, uio offset %ju, uio resid %zd\n",
			(uintmax_t)lbn, on, n, 
			(uintmax_t)uio->uio_offset, uio->uio_resid);

again:
		/*
	         * Handle direct append and file extension cases, calculate
	         * unaligned buffer size.
	         */
		if (uio->uio_offset == fvdat->filesize && n) {
			/*
	                 * Get the buffer (in its pre-append state to maintain
	                 * B_CACHE if it was previously set).  Resize the
	                 * nfsnode after we have locked the buffer to prevent
	                 * readers from reading garbage.
	                 */
			bcount = on;
			FS_DEBUG("getting block from OS, bcount %d\n", bcount);
			bp = getblk(vp, lbn, bcount, PCATCH, 0, 0);

			if (bp != NULL) {
				long save;

				err = fuse_vnode_setsize(vp, cred, 
							 uio->uio_offset + n);
				if (err) {
					brelse(bp);
					break;
				}
				save = bp->b_flags & B_CACHE;
				bcount += n;
				allocbuf(bp, bcount);
				bp->b_flags |= save;
			}
		} else {
			/*
	                 * Obtain the locked cache block first, and then
	                 * adjust the file's size as appropriate.
	                 */
			bcount = on + n;
			if ((off_t)lbn * biosize + bcount < fvdat->filesize) {
				if ((off_t)(lbn + 1) * biosize < fvdat->filesize)
					bcount = biosize;
				else
					bcount = fvdat->filesize - 
					  (off_t)lbn *biosize;
			}
			FS_DEBUG("getting block from OS, bcount %d\n", bcount);
			bp = getblk(vp, lbn, bcount, PCATCH, 0, 0);
			if (bp && uio->uio_offset + n > fvdat->filesize) {
				err = fuse_vnode_setsize(vp, cred, 
							 uio->uio_offset + n);
				if (err) {
					brelse(bp);
					break;
				}
			}
		}

		if (!bp) {
			err = EINTR;
			break;
		}
		/*
	         * Issue a READ if B_CACHE is not set.  In special-append
	         * mode, B_CACHE is based on the buffer prior to the write
	         * op and is typically set, avoiding the read.  If a read
	         * is required in special append mode, the server will
	         * probably send us a short-read since we extended the file
	         * on our end, resulting in b_resid == 0 and, thusly,
	         * B_CACHE getting set.
	         *
	         * We can also avoid issuing the read if the write covers
	         * the entire buffer.  We have to make sure the buffer state
	         * is reasonable in this case since we will not be initiating
	         * I/O.  See the comments in kern/vfs_bio.c's getblk() for
	         * more information.
	         *
	         * B_CACHE may also be set due to the buffer being cached
	         * normally.
	         */

		if (on == 0 && n == bcount) {
			bp->b_flags |= B_CACHE;
			bp->b_flags &= ~B_INVAL;
			bp->b_ioflags &= ~BIO_ERROR;
		}
		if ((bp->b_flags & B_CACHE) == 0) {
			bp->b_iocmd = BIO_READ;
			vfs_busy_pages(bp, 0);
			fuse_io_strategy(vp, bp);
			if ((err = bp->b_error)) {
				brelse(bp);
				break;
			}
		}
		if (bp->b_wcred == NOCRED)
			bp->b_wcred = crhold(cred);

		/*
	         * If dirtyend exceeds file size, chop it down.  This should
	         * not normally occur but there is an append race where it
	         * might occur XXX, so we log it.
	         *
	         * If the chopping creates a reverse-indexed or degenerate
	         * situation with dirtyoff/end, we 0 both of them.
	         */

		if (bp->b_dirtyend > bcount) {
			FS_DEBUG("FUSE append race @%lx:%d\n",
			    (long)bp->b_blkno * biosize,
			    bp->b_dirtyend - bcount);
			bp->b_dirtyend = bcount;
		}
		if (bp->b_dirtyoff >= bp->b_dirtyend)
			bp->b_dirtyoff = bp->b_dirtyend = 0;

		/*
	         * If the new write will leave a contiguous dirty
	         * area, just update the b_dirtyoff and b_dirtyend,
	         * otherwise force a write rpc of the old dirty area.
	         *
	         * While it is possible to merge discontiguous writes due to
	         * our having a B_CACHE buffer ( and thus valid read data
	         * for the hole), we don't because it could lead to
	         * significant cache coherency problems with multiple clients,
	         * especially if locking is implemented later on.
	         *
	         * as an optimization we could theoretically maintain
	         * a linked list of discontinuous areas, but we would still
	         * have to commit them separately so there isn't much
	         * advantage to it except perhaps a bit of asynchronization.
	         */

		if (bp->b_dirtyend > 0 &&
		    (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) {
			/*
	                 * Yes, we mean it. Write out everything to "storage"
	                 * immediatly, without hesitation. (Apart from other
	                 * reasons: the only way to know if a write is valid
	                 * if its actually written out.)
	                 */
			bwrite(bp);
			if (bp->b_error == EINTR) {
				err = EINTR;
				break;
			}
			goto again;
		}
		err = uiomove((char *)bp->b_data + on, n, uio);

		/*
	         * Since this block is being modified, it must be written
	         * again and not just committed.  Since write clustering does
	         * not work for the stage 1 data write, only the stage 2
	         * commit rpc, we have to clear B_CLUSTEROK as well.
	         */
		bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);

		if (err) {
			bp->b_ioflags |= BIO_ERROR;
			bp->b_error = err;
			brelse(bp);
			break;
		}
		/*
	         * Only update dirtyoff/dirtyend if not a degenerate
	         * condition.
	         */
		if (n) {
			if (bp->b_dirtyend > 0) {
				bp->b_dirtyoff = MIN(on, bp->b_dirtyoff);
				bp->b_dirtyend = MAX((on + n), bp->b_dirtyend);
			} else {
				bp->b_dirtyoff = on;
				bp->b_dirtyend = on + n;
			}
			vfs_bio_set_valid(bp, on, n);
		}
		err = bwrite(bp);
		if (err)
			break;
	} while (uio->uio_resid > 0 && n > 0);

	if (fuse_sync_resize && (fvdat->flag & FN_SIZECHANGE) != 0)
		fuse_vnode_savesize(vp, cred);

	return (err);
}
Exemple #14
0
static int
fuse_read_biobackend(struct vnode *vp, struct uio *uio,
    struct ucred *cred, struct fuse_filehandle *fufh)
{
	struct buf *bp;
	daddr_t lbn;
	int bcount;
	int err = 0, n = 0, on = 0;
	off_t filesize;

	const int biosize = fuse_iosize(vp);

	FS_DEBUG("resid=%zx offset=%jx fsize=%jx\n",
	    uio->uio_resid, uio->uio_offset, VTOFUD(vp)->filesize);

	if (uio->uio_resid == 0)
		return (0);
	if (uio->uio_offset < 0)
		return (EINVAL);

	bcount = MIN(MAXBSIZE, biosize);
	filesize = VTOFUD(vp)->filesize;

	do {
		if (fuse_isdeadfs(vp)) {
			err = ENXIO;
			break;
		}
		lbn = uio->uio_offset / biosize;
		on = uio->uio_offset & (biosize - 1);

		FS_DEBUG2G("biosize %d, lbn %d, on %d\n", biosize, (int)lbn, on);

		/*
	         * Obtain the buffer cache block.  Figure out the buffer size
	         * when we are at EOF.  If we are modifying the size of the
	         * buffer based on an EOF condition we need to hold
	         * nfs_rslock() through obtaining the buffer to prevent
	         * a potential writer-appender from messing with n_size.
	         * Otherwise we may accidently truncate the buffer and
	         * lose dirty data.
	         *
	         * Note that bcount is *not* DEV_BSIZE aligned.
	         */
		if ((off_t)lbn * biosize >= filesize) {
			bcount = 0;
		} else if ((off_t)(lbn + 1) * biosize > filesize) {
			bcount = filesize - (off_t)lbn *biosize;
		}
		bp = getblk(vp, lbn, bcount, PCATCH, 0, 0);

		if (!bp)
			return (EINTR);

		/*
	         * If B_CACHE is not set, we must issue the read.  If this
	         * fails, we return an error.
	         */

		if ((bp->b_flags & B_CACHE) == 0) {
			bp->b_iocmd = BIO_READ;
			vfs_busy_pages(bp, 0);
			err = fuse_io_strategy(vp, bp);
			if (err) {
				brelse(bp);
				return (err);
			}
		}
		/*
	         * on is the offset into the current bp.  Figure out how many
	         * bytes we can copy out of the bp.  Note that bcount is
	         * NOT DEV_BSIZE aligned.
	         *
	         * Then figure out how many bytes we can copy into the uio.
	         */

		n = 0;
		if (on < bcount)
			n = MIN((unsigned)(bcount - on), uio->uio_resid);
		if (n > 0) {
			FS_DEBUG2G("feeding buffeater with %d bytes of buffer %p,"
				" saying %d was asked for\n",
				n, bp->b_data + on, n + (int)bp->b_resid);
			err = uiomove(bp->b_data + on, n, uio);
		}
		brelse(bp);
		FS_DEBUG2G("end of turn, err %d, uio->uio_resid %zd, n %d\n",
		    err, uio->uio_resid, n);
	} while (err == 0 && uio->uio_resid > 0 && n > 0);

	return (err);
}
static int nilfs_cpfile_clear_snapshot(struct inode *cpfile, __u64 cno)
{
	struct buffer_head *header_bh, *next_bh, *prev_bh, *cp_bh;
	struct nilfs_cpfile_header *header;
	struct nilfs_checkpoint *cp;
	struct nilfs_snapshot_list *list;
	__u64 next, prev;
	void *kaddr;
	int ret;

	if (cno == 0)
		return -ENOENT; 
	down_write(&NILFS_MDT(cpfile)->mi_sem);

	ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
	if (ret < 0)
		goto out_sem;
	kaddr = kmap_atomic(cp_bh->b_page);
	cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
	if (nilfs_checkpoint_invalid(cp)) {
		ret = -ENOENT;
		kunmap_atomic(kaddr);
		goto out_cp;
	}
	if (!nilfs_checkpoint_snapshot(cp)) {
		ret = 0;
		kunmap_atomic(kaddr);
		goto out_cp;
	}

	list = &cp->cp_snapshot_list;
	next = le64_to_cpu(list->ssl_next);
	prev = le64_to_cpu(list->ssl_prev);
	kunmap_atomic(kaddr);

	ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
	if (ret < 0)
		goto out_cp;
	if (next != 0) {
		ret = nilfs_cpfile_get_checkpoint_block(cpfile, next, 0,
							&next_bh);
		if (ret < 0)
			goto out_header;
	} else {
		next_bh = header_bh;
		get_bh(next_bh);
	}
	if (prev != 0) {
		ret = nilfs_cpfile_get_checkpoint_block(cpfile, prev, 0,
							&prev_bh);
		if (ret < 0)
			goto out_next;
	} else {
		prev_bh = header_bh;
		get_bh(prev_bh);
	}

	kaddr = kmap_atomic(next_bh->b_page);
	list = nilfs_cpfile_block_get_snapshot_list(
		cpfile, next, next_bh, kaddr);
	list->ssl_prev = cpu_to_le64(prev);
	kunmap_atomic(kaddr);

	kaddr = kmap_atomic(prev_bh->b_page);
	list = nilfs_cpfile_block_get_snapshot_list(
		cpfile, prev, prev_bh, kaddr);
	list->ssl_next = cpu_to_le64(next);
	kunmap_atomic(kaddr);

	kaddr = kmap_atomic(cp_bh->b_page);
	cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
	cp->cp_snapshot_list.ssl_next = cpu_to_le64(0);
	cp->cp_snapshot_list.ssl_prev = cpu_to_le64(0);
	nilfs_checkpoint_clear_snapshot(cp);
	kunmap_atomic(kaddr);

	kaddr = kmap_atomic(header_bh->b_page);
	header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr);
	le64_add_cpu(&header->ch_nsnapshots, -1);
	kunmap_atomic(kaddr);

	mark_buffer_dirty(next_bh);
	mark_buffer_dirty(prev_bh);
	mark_buffer_dirty(cp_bh);
	mark_buffer_dirty(header_bh);
	nilfs_mdt_mark_dirty(cpfile);

	brelse(prev_bh);

 out_next:
	brelse(next_bh);

 out_header:
	brelse(header_bh);

 out_cp:
	brelse(cp_bh);

 out_sem:
	up_write(&NILFS_MDT(cpfile)->mi_sem);
	return ret;
}
Exemple #16
0
/*
 * Write data to a file or directory.
 */
int
msdosfs_write(void *v)
{
	struct vop_write_args *ap = v;
	int n;
	int croffset;
	int resid;
	uint32_t osize;
	int error = 0;
	uint32_t count, lastcn;
	daddr64_t bn;
	struct buf *bp;
	int ioflag = ap->a_ioflag;
	struct uio *uio = ap->a_uio;
	struct proc *p = uio->uio_procp;
	struct vnode *vp = ap->a_vp;
	struct vnode *thisvp;
	struct denode *dep = VTODE(vp);
	struct msdosfsmount *pmp = dep->de_pmp;
	struct ucred *cred = ap->a_cred;

#ifdef MSDOSFS_DEBUG
	printf("msdosfs_write(vp %08x, uio %08x, ioflag %08x, cred %08x\n",
	    vp, uio, ioflag, cred);
	printf("msdosfs_write(): diroff %d, dirclust %d, startcluster %d\n",
	    dep->de_diroffset, dep->de_dirclust, dep->de_StartCluster);
#endif

	switch (vp->v_type) {
	case VREG:
		if (ioflag & IO_APPEND)
			uio->uio_offset = dep->de_FileSize;
		thisvp = vp;
		break;
	case VDIR:
		return EISDIR;
	default:
		panic("msdosfs_write(): bad file type");
	}

	if (uio->uio_offset < 0)
		return (EINVAL);

	if (uio->uio_resid == 0)
		return (0);

	/* Don't bother to try to write files larger than the f/s limit */
	if (uio->uio_offset + uio->uio_resid > MSDOSFS_FILESIZE_MAX)
		return (EFBIG);

	/*
	 * If they've exceeded their filesize limit, tell them about it.
	 */
	if (p &&
	    ((uio->uio_offset + uio->uio_resid) >
	    p->p_rlimit[RLIMIT_FSIZE].rlim_cur)) {
		psignal(p, SIGXFSZ);
		return (EFBIG);
	}

	/*
	 * If the offset we are starting the write at is beyond the end of
	 * the file, then they've done a seek.  Unix filesystems allow
	 * files with holes in them, DOS doesn't so we must fill the hole
	 * with zeroed blocks.
	 */
	if (uio->uio_offset > dep->de_FileSize) {
		if ((error = deextend(dep, uio->uio_offset, cred)) != 0)
			return (error);
	}

	/*
	 * Remember some values in case the write fails.
	 */
	resid = uio->uio_resid;
	osize = dep->de_FileSize;

	/*
	 * If we write beyond the end of the file, extend it to its ultimate
	 * size ahead of the time to hopefully get a contiguous area.
	 */
	if (uio->uio_offset + resid > osize) {
		count = de_clcount(pmp, uio->uio_offset + resid) -
			de_clcount(pmp, osize);
		if ((error = extendfile(dep, count, NULL, NULL, 0)) &&
		    (error != ENOSPC || (ioflag & IO_UNIT)))
			goto errexit;
		lastcn = dep->de_fc[FC_LASTFC].fc_frcn;
	} else
		lastcn = de_clcount(pmp, osize) - 1;

	do {
		if (de_cluster(pmp, uio->uio_offset) > lastcn) {
			error = ENOSPC;
			break;
		}

		bn = de_blk(pmp, uio->uio_offset);
		if ((uio->uio_offset & pmp->pm_crbomask) == 0
		    && (de_blk(pmp, uio->uio_offset + uio->uio_resid) > de_blk(pmp, uio->uio_offset)
			|| uio->uio_offset + uio->uio_resid >= dep->de_FileSize)) {
			/*
			 * If either the whole cluster gets written,
			 * or we write the cluster from its start beyond EOF,
			 * then no need to read data from disk.
			 */
			bp = getblk(thisvp, bn, pmp->pm_bpcluster, 0, 0);
			clrbuf(bp);
			/*
			 * Do the bmap now, since pcbmap needs buffers
			 * for the fat table. (see msdosfs_strategy)
			 */
			if (bp->b_blkno == bp->b_lblkno) {
				error = pcbmap(dep,
					       de_bn2cn(pmp, bp->b_lblkno),
					       &bp->b_blkno, 0, 0);
				if (error)
					bp->b_blkno = -1;
			}
			if (bp->b_blkno == -1) {
				brelse(bp);
				if (!error)
					error = EIO;		/* XXX */
				break;
			}
		} else {
			/*
			 * The block we need to write into exists, so read it in.
			 */
			error = bread(thisvp, bn, pmp->pm_bpcluster,
				      NOCRED, &bp);
			if (error) {
				brelse(bp);
				break;
			}
		}

		croffset = uio->uio_offset & pmp->pm_crbomask;
		n = min(uio->uio_resid, pmp->pm_bpcluster - croffset);
		if (uio->uio_offset + n > dep->de_FileSize) {
			dep->de_FileSize = uio->uio_offset + n;
			uvm_vnp_setsize(vp, dep->de_FileSize);
		}
		uvm_vnp_uncache(vp);
		/*
		 * Should these vnode_pager_* functions be done on dir
		 * files?
		 */

		/*
		 * Copy the data from user space into the buf header.
		 */
		error = uiomove(bp->b_data + croffset, n, uio);

		/*
		 * If they want this synchronous then write it and wait for
		 * it.  Otherwise, if on a cluster boundary write it
		 * asynchronously so we can move on to the next block
		 * without delay.  Otherwise do a delayed write because we
		 * may want to write somemore into the block later.
		 */
		if (ioflag & IO_SYNC)
			(void) bwrite(bp);
		else if (n + croffset == pmp->pm_bpcluster)
			bawrite(bp);
		else
			bdwrite(bp);
		dep->de_flag |= DE_UPDATE;
	} while (error == 0 && uio->uio_resid > 0);

	/*
	 * If the write failed and they want us to, truncate the file back
	 * to the size it was before the write was attempted.
	 */
errexit:
	if (error) {
		if (ioflag & IO_UNIT) {
			detrunc(dep, osize, ioflag & IO_SYNC, NOCRED, NULL);
			uio->uio_offset -= resid - uio->uio_resid;
			uio->uio_resid = resid;
		} else {
			detrunc(dep, dep->de_FileSize, ioflag & IO_SYNC, NOCRED, NULL);
			if (uio->uio_resid != resid)
				error = 0;
		}
	} else if (ioflag & IO_SYNC)
		error = deupdat(dep, 1);
	return (error);
}
Exemple #17
0
static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
{
	struct f2fs_sb_info *sbi;
	struct f2fs_super_block *raw_super;
	struct buffer_head *raw_super_buf;
	struct inode *root;
	long err = -EINVAL;
	int i;

	/* allocate memory for f2fs-specific super block info */
	sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);
	if (!sbi)
		return -ENOMEM;

	/* set a block size */
	if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) {
		f2fs_msg(sb, KERN_ERR, "unable to set blocksize");
		goto free_sbi;
	}

	err = read_raw_super_block(sb, &raw_super, &raw_super_buf);
	if (err)
		goto free_sbi;

	sb->s_fs_info = sbi;
	/* init some FS parameters */
	sbi->active_logs = NR_CURSEG_TYPE;

	set_opt(sbi, BG_GC);

#ifdef CONFIG_F2FS_FS_XATTR
	set_opt(sbi, XATTR_USER);
#endif
#ifdef CONFIG_F2FS_FS_POSIX_ACL
	set_opt(sbi, POSIX_ACL);
#endif
	/* parse mount options */
	err = parse_options(sb, (char *)data);
	if (err)
		goto free_sb_buf;

	sb->s_maxbytes = max_file_size(le32_to_cpu(raw_super->log_blocksize));
	sb->s_max_links = F2FS_LINK_MAX;
	get_random_bytes(&sbi->s_next_generation, sizeof(u32));

	sb->s_op = &f2fs_sops;
	sb->s_xattr = f2fs_xattr_handlers;
	sb->s_export_op = &f2fs_export_ops;
	sb->s_magic = F2FS_SUPER_MAGIC;
	sb->s_time_gran = 1;
	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
		(test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
	memcpy(sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));

	/* init f2fs-specific super block info */
	sbi->sb = sb;
	sbi->raw_super = raw_super;
	sbi->raw_super_buf = raw_super_buf;
	mutex_init(&sbi->gc_mutex);
	mutex_init(&sbi->writepages);
	mutex_init(&sbi->cp_mutex);
	mutex_init(&sbi->node_write);
	sbi->por_doing = false;
	spin_lock_init(&sbi->stat_lock);

	init_rwsem(&sbi->read_io.io_rwsem);
	sbi->read_io.sbi = sbi;
	sbi->read_io.bio = NULL;
	for (i = 0; i < NR_PAGE_TYPE; i++) {
		init_rwsem(&sbi->write_io[i].io_rwsem);
		sbi->write_io[i].sbi = sbi;
		sbi->write_io[i].bio = NULL;
	}

	init_rwsem(&sbi->cp_rwsem);
	init_waitqueue_head(&sbi->cp_wait);
	init_sb_info(sbi);

	/* get an inode for meta space */
	sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
	if (IS_ERR(sbi->meta_inode)) {
		f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode");
		err = PTR_ERR(sbi->meta_inode);
		goto free_sb_buf;
	}

	err = get_valid_checkpoint(sbi);
	if (err) {
		f2fs_msg(sb, KERN_ERR, "Failed to get valid F2FS checkpoint");
		goto free_meta_inode;
	}

	/* sanity checking of checkpoint */
	err = -EINVAL;
	if (sanity_check_ckpt(sbi)) {
		f2fs_msg(sb, KERN_ERR, "Invalid F2FS checkpoint");
		goto free_cp;
	}

	sbi->total_valid_node_count =
				le32_to_cpu(sbi->ckpt->valid_node_count);
	sbi->total_valid_inode_count =
				le32_to_cpu(sbi->ckpt->valid_inode_count);
	sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count);
	sbi->total_valid_block_count =
				le64_to_cpu(sbi->ckpt->valid_block_count);
	sbi->last_valid_block_count = sbi->total_valid_block_count;
	sbi->alloc_valid_block_count = 0;
	INIT_LIST_HEAD(&sbi->dir_inode_list);
	spin_lock_init(&sbi->dir_inode_lock);

	init_orphan_info(sbi);

	/* setup f2fs internal modules */
	err = build_segment_manager(sbi);
	if (err) {
		f2fs_msg(sb, KERN_ERR,
			"Failed to initialize F2FS segment manager");
		goto free_sm;
	}
	err = build_node_manager(sbi);
	if (err) {
		f2fs_msg(sb, KERN_ERR,
			"Failed to initialize F2FS node manager");
		goto free_nm;
	}

	build_gc_manager(sbi);

	/* get an inode for node space */
	sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
	if (IS_ERR(sbi->node_inode)) {
		f2fs_msg(sb, KERN_ERR, "Failed to read node inode");
		err = PTR_ERR(sbi->node_inode);
		goto free_nm;
	}

	/* if there are nt orphan nodes free them */
	recover_orphan_inodes(sbi);

	/* read root inode and dentry */
	root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
	if (IS_ERR(root)) {
		f2fs_msg(sb, KERN_ERR, "Failed to read root inode");
		err = PTR_ERR(root);
		goto free_node_inode;
	}
	if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
		err = -EINVAL;
		goto free_root_inode;
	}

	sb->s_root = d_make_root(root); /* allocate root dentry */
	if (!sb->s_root) {
		err = -ENOMEM;
		goto free_root_inode;
	}

	err = f2fs_build_stats(sbi);
	if (err)
		goto free_root_inode;

	if (f2fs_proc_root)
		sbi->s_proc = proc_mkdir(sb->s_id, f2fs_proc_root);

	if (sbi->s_proc)
		proc_create_data("segment_info", S_IRUGO, sbi->s_proc,
				 &f2fs_seq_segment_info_fops, sb);

	if (test_opt(sbi, DISCARD)) {
		struct request_queue *q = bdev_get_queue(sb->s_bdev);
		if (!blk_queue_discard(q))
			f2fs_msg(sb, KERN_WARNING,
					"mounting with \"discard\" option, but "
					"the device does not support discard");
	}

	sbi->s_kobj.kset = f2fs_kset;
	init_completion(&sbi->s_kobj_unregister);
	err = kobject_init_and_add(&sbi->s_kobj, &f2fs_ktype, NULL,
							"%s", sb->s_id);
	if (err)
		goto free_proc;

	/* recover fsynced data */
	if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) {
		err = recover_fsync_data(sbi);
		if (err)
			f2fs_msg(sb, KERN_ERR,
				"Cannot recover all fsync data errno=%ld", err);
	}

	/*
	 * If filesystem is not mounted as read-only then
	 * do start the gc_thread.
	 */
	if (!(sb->s_flags & MS_RDONLY)) {
		/* After POR, we can run background GC thread.*/
		err = start_gc_thread(sbi);
		if (err)
			goto free_kobj;
	}
	return 0;

free_kobj:
	kobject_del(&sbi->s_kobj);
free_proc:
	if (sbi->s_proc) {
		remove_proc_entry("segment_info", sbi->s_proc);
		remove_proc_entry(sb->s_id, f2fs_proc_root);
	}
	f2fs_destroy_stats(sbi);
free_root_inode:
	dput(sb->s_root);
	sb->s_root = NULL;
free_node_inode:
	iput(sbi->node_inode);
free_nm:
	destroy_node_manager(sbi);
free_sm:
	destroy_segment_manager(sbi);
free_cp:
	kfree(sbi->ckpt);
free_meta_inode:
	make_bad_inode(sbi->meta_inode);
	iput(sbi->meta_inode);
free_sb_buf:
	brelse(raw_super_buf);
free_sbi:
	kfree(sbi);
	return err;
}
Exemple #18
0
/*
 * Renames on files require moving the denode to a new hash queue since the
 * denode's location is used to compute which hash queue to put the file
 * in. Unless it is a rename in place.  For example "mv a b".
 *
 * What follows is the basic algorithm:
 *
 * if (file move) {
 *	if (dest file exists) {
 *		remove dest file
 *	}
 *	if (dest and src in same directory) {
 *		rewrite name in existing directory slot
 *	} else {
 *		write new entry in dest directory
 *		update offset and dirclust in denode
 *		move denode to new hash chain
 *		clear old directory entry
 *	}
 * } else {
 *	directory move
 *	if (dest directory exists) {
 *		if (dest is not empty) {
 *			return ENOTEMPTY
 *		}
 *		remove dest directory
 *	}
 *	if (dest and src in same directory) {
 *		rewrite name in existing entry
 *	} else {
 *		be sure dest is not a child of src directory
 *		write entry in dest directory
 *		update "." and ".." in moved directory
 *		update offset and dirclust in denode
 *		move denode to new hash chain
 *		clear old directory entry for moved directory
 *	}
 * }
 *
 * On entry:
 *	source's parent directory is unlocked
 *	source file or directory is unlocked
 *	destination's parent directory is locked
 *	destination file or directory is locked if it exists
 *
 * On exit:
 *	all denodes should be released
 *
 * Notes:
 * I'm not sure how the memory containing the pathnames pointed at by the
 * componentname structures is freed, there may be some memory bleeding
 * for each rename done.
 */
int
msdosfs_rename(void *v)
{
	struct vop_rename_args *ap = v;
	struct vnode *tvp = ap->a_tvp;
	struct vnode *tdvp = ap->a_tdvp;
	struct vnode *fvp = ap->a_fvp;
	struct vnode *fdvp = ap->a_fdvp;
	struct componentname *tcnp = ap->a_tcnp;
	struct componentname *fcnp = ap->a_fcnp;
	struct proc *p = curproc; /* XXX */
	struct denode *ip, *xp, *dp, *zp;
	u_char toname[11], oldname[11];
	uint32_t from_diroffset, to_diroffset;
	u_char to_count;
	int doingdirectory = 0, newparent = 0;
	int error;
	uint32_t cn, pcl;
	daddr64_t bn;
	struct msdosfsmount *pmp;
	struct direntry *dotdotp;
	struct buf *bp;

	pmp = VFSTOMSDOSFS(fdvp->v_mount);

#ifdef DIAGNOSTIC
	if ((tcnp->cn_flags & HASBUF) == 0 ||
	    (fcnp->cn_flags & HASBUF) == 0)
		panic("msdosfs_rename: no name");
#endif
	/*
	 * Check for cross-device rename.
	 */
	if ((fvp->v_mount != tdvp->v_mount) ||
	    (tvp && (fvp->v_mount != tvp->v_mount))) {
		error = EXDEV;
abortit:
		VOP_ABORTOP(tdvp, tcnp);
		if (tdvp == tvp)
			vrele(tdvp);
		else
			vput(tdvp);
		if (tvp)
			vput(tvp);
		VOP_ABORTOP(fdvp, fcnp);
		vrele(fdvp);
		vrele(fvp);
		return (error);
	}
	
	/*
	 * If source and dest are the same, do nothing.
	 */
	if (tvp == fvp) {
		error = 0;
		goto abortit;
	}

	/* */
	if ((error = vn_lock(fvp, LK_EXCLUSIVE | LK_RETRY, p)) != 0)
		goto abortit;
	dp = VTODE(fdvp);
	ip = VTODE(fvp);

	/*
	 * Be sure we are not renaming ".", "..", or an alias of ".". This
	 * leads to a crippled directory tree.  It's pretty tough to do a
	 * "ls" or "pwd" with the "." directory entry missing, and "cd .."
	 * doesn't work if the ".." entry is missing.
	 */
	if (ip->de_Attributes & ATTR_DIRECTORY) {
		/*
		 * Avoid ".", "..", and aliases of "." for obvious reasons.
		 */
		if ((fcnp->cn_namelen == 1 && fcnp->cn_nameptr[0] == '.') ||
		    dp == ip ||
		    (fcnp->cn_flags & ISDOTDOT) ||
		    (tcnp->cn_flags & ISDOTDOT) ||
		    (ip->de_flag & DE_RENAME)) {
			VOP_UNLOCK(fvp, 0, p);
			error = EINVAL;
			goto abortit;
		}
		ip->de_flag |= DE_RENAME;
		doingdirectory++;
	}

	/*
	 * When the target exists, both the directory
	 * and target vnodes are returned locked.
	 */
	dp = VTODE(tdvp);
	xp = tvp ? VTODE(tvp) : NULL;
	/*
	 * Remember direntry place to use for destination
	 */
	to_diroffset = dp->de_fndoffset;
	to_count = dp->de_fndcnt;

	/*
	 * If ".." must be changed (ie the directory gets a new
	 * parent) then the source directory must not be in the
	 * directory hierarchy above the target, as this would
	 * orphan everything below the source directory. Also
	 * the user must have write permission in the source so
	 * as to be able to change "..". We must repeat the call
	 * to namei, as the parent directory is unlocked by the
	 * call to doscheckpath().
	 */
	error = VOP_ACCESS(fvp, VWRITE, tcnp->cn_cred, tcnp->cn_proc);
	VOP_UNLOCK(fvp, 0, p);
	if (VTODE(fdvp)->de_StartCluster != VTODE(tdvp)->de_StartCluster)
		newparent = 1;
	vrele(fdvp);
	if (doingdirectory && newparent) {
		if (error)	/* write access check above */
			goto bad1;
		if (xp != NULL)
			vput(tvp);
		/*
		 * doscheckpath() vput()'s dp,
		 * so we have to do a relookup afterwards
		 */
		if ((error = doscheckpath(ip, dp)) != 0)
			goto out;
		if ((tcnp->cn_flags & SAVESTART) == 0)
			panic("msdosfs_rename: lost to startdir");
		if ((error = vfs_relookup(tdvp, &tvp, tcnp)) != 0)
			goto out;
		dp = VTODE(tdvp);
		xp = tvp ? VTODE(tvp) : NULL;
	}

	if (xp != NULL) {
		/*
		 * Target must be empty if a directory and have no links
		 * to it. Also, ensure source and target are compatible
		 * (both directories, or both not directories).
		 */
		if (xp->de_Attributes & ATTR_DIRECTORY) {
			if (!dosdirempty(xp)) {
				error = ENOTEMPTY;
				goto bad1;
			}
			if (!doingdirectory) {
				error = ENOTDIR;
				goto bad1;
			}
			cache_purge(tdvp);
		} else if (doingdirectory) {
			error = EISDIR;
			goto bad1;
		}
		if ((error = removede(dp, xp)) != 0)
			goto bad1;
		vput(tvp);
		xp = NULL;
	}

	/*
	 * Convert the filename in tcnp into a dos filename. We copy this
	 * into the denode and directory entry for the destination
	 * file/directory.
	 */
	if ((error = uniqdosname(VTODE(tdvp), tcnp, toname)) != 0)
		goto bad1;

	/*
	 * Since from wasn't locked at various places above,
	 * have to do a relookup here.
	 */
	fcnp->cn_flags &= ~MODMASK;
	fcnp->cn_flags |= LOCKPARENT | LOCKLEAF;
	if ((fcnp->cn_flags & SAVESTART) == 0)
		panic("msdosfs_rename: lost from startdir");
	if (!newparent)
		VOP_UNLOCK(tdvp, 0, p);
	(void) vfs_relookup(fdvp, &fvp, fcnp);
	if (fvp == NULL) {
		/*
		 * From name has disappeared.
		 */
		if (doingdirectory)
			panic("rename: lost dir entry");
		vrele(ap->a_fvp);
		if (newparent)
			VOP_UNLOCK(tdvp, 0, p);
		vrele(tdvp);
		return 0;
	}
	xp = VTODE(fvp);
	zp = VTODE(fdvp);
	from_diroffset = zp->de_fndoffset;

	/*
	 * Ensure that the directory entry still exists and has not
	 * changed till now. If the source is a file the entry may
	 * have been unlinked or renamed. In either case there is
	 * no further work to be done. If the source is a directory
	 * then it cannot have been rmdir'ed or renamed; this is
	 * prohibited by the DE_RENAME flag.
	 */
	if (xp != ip) {
		if (doingdirectory)
			panic("rename: lost dir entry");
		vrele(ap->a_fvp);
		if (newparent)
			VOP_UNLOCK(fdvp, 0, p);
		xp = NULL;
	} else {
		vrele(fvp);
		xp = NULL;

		/*
		 * First write a new entry in the destination
		 * directory and mark the entry in the source directory
		 * as deleted.  Then move the denode to the correct hash
		 * chain for its new location in the filesystem.  And, if
		 * we moved a directory, then update its .. entry to point
		 * to the new parent directory.
		 */
		bcopy(ip->de_Name, oldname, 11);
		bcopy(toname, ip->de_Name, 11);	/* update denode */
		dp->de_fndoffset = to_diroffset;
		dp->de_fndcnt = to_count;
		error = createde(ip, dp, (struct denode **)0, tcnp);
		if (error) {
			bcopy(oldname, ip->de_Name, 11);
			if (newparent)
				VOP_UNLOCK(fdvp, 0, p);
			goto bad;
		}
		ip->de_refcnt++;
		zp->de_fndoffset = from_diroffset;
		if ((error = removede(zp, ip)) != 0) {
			/* XXX should really panic here, fs is corrupt */
			if (newparent)
				VOP_UNLOCK(fdvp, 0, p);
			goto bad;
		}

		cache_purge(fvp);

		if (!doingdirectory) {
			error = pcbmap(dp, de_cluster(pmp, to_diroffset), 0,
				       &ip->de_dirclust, 0);
			if (error) {
				/* XXX should really panic here, fs is corrupt */
				if (newparent)
					VOP_UNLOCK(fdvp, 0, p);
				goto bad;
			}
			if (ip->de_dirclust != MSDOSFSROOT)
				ip->de_diroffset = to_diroffset & pmp->pm_crbomask;
		}
		reinsert(ip);
		if (newparent)
			VOP_UNLOCK(fdvp, 0, p);
	}

	/*
	 * If we moved a directory to a new parent directory, then we must
	 * fixup the ".." entry in the moved directory.
	 */
	if (doingdirectory && newparent) {
		cn = ip->de_StartCluster;
		if (cn == MSDOSFSROOT) {
			/* this should never happen */
			panic("msdosfs_rename: updating .. in root directory?");
		} else
			bn = cntobn(pmp, cn);
		error = bread(pmp->pm_devvp, bn, pmp->pm_bpcluster,
			      NOCRED, &bp);
		if (error) {
			/* XXX should really panic here, fs is corrupt */
			brelse(bp);
			goto bad;
		}
		dotdotp = (struct direntry *)bp->b_data;
		putushort(dotdotp[0].deStartCluster, cn);
		pcl = dp->de_StartCluster;
		if (FAT32(pmp) && pcl == pmp->pm_rootdirblk)
			pcl = 0;
		putushort(dotdotp[1].deStartCluster, pcl);
		if (FAT32(pmp)) {
			putushort(dotdotp[0].deHighClust, cn >> 16);
			putushort(dotdotp[1].deHighClust, pcl >> 16);
		}
		if ((error = bwrite(bp)) != 0) {
			/* XXX should really panic here, fs is corrupt */
			goto bad;
		}
	}
/*
 * Write disk label back to device after modification.
 */
static int
l32_writedisklabel(cdev_t dev, struct diskslices *ssp, struct diskslice *sp,
		   disklabel_t lpx)
{
	struct disklabel32 *lp;
	struct disklabel32 *dlp;
	struct buf *bp;
	const char *msg;
	int error = 0;

	lp = lpx.lab32;

	if (lp->d_partitions[RAW_PART].p_offset != 0)
		return (EXDEV);			/* not quite right */

	bp = geteblk((int)lp->d_secsize);
	bp->b_bio1.bio_offset = (off_t)LABELSECTOR32 * lp->d_secsize;
	bp->b_bio1.bio_done = biodone_sync;
	bp->b_bio1.bio_flags |= BIO_SYNC;
	bp->b_bcount = lp->d_secsize;

#if 1
	/*
	 * We read the label first to see if it's there,
	 * in which case we will put ours at the same offset into the block..
	 * (I think this is stupid [Julian])
	 * Note that you can't write a label out over a corrupted label!
	 * (also stupid.. how do you write the first one? by raw writes?)
	 */
	bp->b_flags &= ~B_INVAL;
	bp->b_cmd = BUF_CMD_READ;
	KKASSERT(dkpart(dev) == WHOLE_SLICE_PART);
	dev_dstrategy(dev, &bp->b_bio1);
	error = biowait(&bp->b_bio1, "labrd");
	if (error)
		goto done;
	for (dlp = (struct disklabel32 *)bp->b_data;
	    dlp <= (struct disklabel32 *)
	      ((char *)bp->b_data + lp->d_secsize - sizeof(*dlp));
	    dlp = (struct disklabel32 *)((char *)dlp + sizeof(long))) {
		if (dlp->d_magic == DISKMAGIC32 &&
		    dlp->d_magic2 == DISKMAGIC32 && dkcksum32(dlp) == 0) {
			*dlp = *lp;
			lpx.lab32 = dlp;
			msg = l32_fixlabel(NULL, sp, lpx, TRUE);
			if (msg) {
				error = EINVAL;
			} else {
				bp->b_cmd = BUF_CMD_WRITE;
				bp->b_bio1.bio_done = biodone_sync;
				bp->b_bio1.bio_flags |= BIO_SYNC;
				KKASSERT(dkpart(dev) == WHOLE_SLICE_PART);
				dev_dstrategy(dev, &bp->b_bio1);
				error = biowait(&bp->b_bio1, "labwr");
			}
			goto done;
		}
	}
	error = ESRCH;
done:
#else
	bzero(bp->b_data, lp->d_secsize);
	dlp = (struct disklabel32 *)bp->b_data;
	*dlp = *lp;
	bp->b_flags &= ~B_INVAL;
	bp->b_cmd = BUF_CMD_WRITE;
	bp->b_bio1.bio_done = biodone_sync;
	bp->b_bio1.bio_flags |= BIO_SYNC;
	BUF_STRATEGY(bp, 1);
	error = biowait(&bp->b_bio1, "labwr");
#endif
	bp->b_flags |= B_INVAL | B_AGE;
	brelse(bp);
	return (error);
}
Exemple #20
0
int
readliflabel(struct buf *bp, void (*strat)(struct buf *),
    struct disklabel *lp, int *partoffp, int spoofonly)
{
	struct buf *dbp = NULL;
	struct lifdir *p;
	struct lifvol *lvp;
	int error = 0;
	int fsoff = 0, openbsdstart = MAXLIFSPACE, i;

	/* read LIF volume header */
	bp->b_blkno = btodb(LIF_VOLSTART);
	bp->b_bcount = lp->d_secsize;
	CLR(bp->b_flags, B_READ | B_WRITE | B_DONE);
	SET(bp->b_flags, B_BUSY | B_READ | B_RAW);
	(*strat)(bp);
	if (biowait(bp))
		return bp->b_error;

	lvp = (struct lifvol *)bp->b_data;
	if (lvp->vol_id != LIF_VOL_ID) {
		error = EINVAL;		/* no LIF volume header */
		goto done;
	}

	dbp = geteblk(LIF_DIRSIZE);
	dbp->b_dev = bp->b_dev;

	/* read LIF directory */
	dbp->b_blkno = lifstodb(lvp->vol_addr);
	dbp->b_bcount = lp->d_secsize;
	CLR(dbp->b_flags, B_READ | B_WRITE | B_DONE);
	SET(dbp->b_flags, B_BUSY | B_READ | B_RAW);
	(*strat)(dbp);
	if (biowait(dbp)) {
		error = dbp->b_error;
		goto done;
	}

	/* scan for LIF_DIR_FS dir entry */
	for (i=0, p=(struct lifdir *)dbp->b_data; i < LIF_NUMDIR; p++, i++) {
		if (p->dir_type == LIF_DIR_FS || p->dir_type == LIF_DIR_HPLBL)
			break;
	}

	if (p->dir_type == LIF_DIR_FS) {
		fsoff = lifstodb(p->dir_addr);
		openbsdstart = 0;
		goto finished;
	}

	/* Only came here to find the offset... */
	if (partoffp)
		goto finished;

	if (p->dir_type == LIF_DIR_HPLBL) {
		struct hpux_label *hl;
		struct partition *pp;
		u_int8_t fstype;
		int i;

		/* read LIF directory */
		dbp->b_blkno = lifstodb(p->dir_addr);
		dbp->b_bcount = lp->d_secsize;
		CLR(dbp->b_flags, B_READ | B_WRITE | B_DONE);
		SET(dbp->b_flags, B_BUSY | B_READ | B_RAW);
		(*strat)(dbp);

		if (biowait(dbp)) {
			error = dbp->b_error;
			goto done;
		}

		hl = (struct hpux_label *)dbp->b_data;
		if (hl->hl_magic1 != hl->hl_magic2 ||
		    hl->hl_magic != HPUX_MAGIC || hl->hl_version != 1) {
			error = EINVAL;	 /* HPUX label magic mismatch */
			goto done;
		}

		lp->d_bbsize = 8192;
		lp->d_sbsize = 8192;
		for (i = 0; i < MAXPARTITIONS; i++) {
			DL_SETPSIZE(&lp->d_partitions[i], 0);
			DL_SETPOFFSET(&lp->d_partitions[i], 0);
			lp->d_partitions[i].p_fstype = 0;
		}

		for (i = 0; i < HPUX_MAXPART; i++) {
			if (!hl->hl_flags[i])
				continue;
			if (hl->hl_flags[i] == HPUX_PART_ROOT) {
				pp = &lp->d_partitions[0];
				fstype = FS_BSDFFS;
			} else if (hl->hl_flags[i] == HPUX_PART_SWAP) {
				pp = &lp->d_partitions[1];
				fstype = FS_SWAP;
			} else if (hl->hl_flags[i] == HPUX_PART_BOOT) {
				pp = &lp->d_partitions[RAW_PART + 1];
				fstype = FS_BSDFFS;
			} else
				continue;

			DL_SETPSIZE(pp, hl->hl_parts[i].hlp_length * 2);
			DL_SETPOFFSET(pp, hl->hl_parts[i].hlp_start * 2);
			pp->p_fstype = fstype;
		}

		DL_SETPSIZE(&lp->d_partitions[RAW_PART], DL_GETDSIZE(lp));
		DL_SETPOFFSET(&lp->d_partitions[RAW_PART], 0);
		lp->d_partitions[RAW_PART].p_fstype = FS_UNUSED;
		lp->d_npartitions = MAXPARTITIONS;
		lp->d_magic = DISKMAGIC;
		lp->d_magic2 = DISKMAGIC;
		lp->d_version = 1;
		lp->d_checksum = 0;
		lp->d_checksum = dkcksum(lp);
		/* drop through */
	}

finished:
	/* record the OpenBSD partition's placement for the caller */
	if (partoffp)
		*partoffp = fsoff;
	else {
		DL_SETBSTART(lp, openbsdstart);
		DL_SETBEND(lp, DL_GETDSIZE(lp));	/* XXX */
	}

	/* don't read the on-disk label if we are in spoofed-only mode */
	if (spoofonly)
		goto done;

	bp->b_blkno = fsoff + LABELSECTOR;
	bp->b_bcount = lp->d_secsize;
	CLR(bp->b_flags, B_READ | B_WRITE | B_DONE);
	SET(bp->b_flags, B_BUSY | B_READ | B_RAW);
	(*strat)(bp);
	if (biowait(bp)) {
		error = bp->b_error;
		goto done;
	}

	error = checkdisklabel(bp->b_data + LABELOFFSET, lp, openbsdstart,
	    DL_GETDSIZE(lp));	/* XXX */

done:
	if (dbp) {
		dbp->b_flags |= B_INVAL;
		brelse(dbp);
	}
	return (error);
}
Exemple #21
0
int
hpfs_removedirent (
	struct hpfsmount *hpmp,
	lsn_t lsn,
	char *name,
	int namelen,
	int *retp)
{
#if 0
	struct buf *bp;
	dirblk_t *dbp;
	struct hpfsdirent *dep;
	int deoff;
	int error, ret;

	dprintf(("hpfs_removedirent(0x%x, %.*s, %d): \n",
		 lsn, namelen, name, namelen));

	error = hpfs_breaddirblk (hpmp, lsn, &bp);
	if (error)
		return (error);

	dbp = (dirblk_t *) bp->b_data;
	deoff = sizeof(dirblk_t);
	dep = DB_DIRENT(dbp);

	while(!(dep->de_flag & DE_END)) {
		dprintf(("no: 0x%x, size: %d, name: %2d:%.*s, flag: 0x%x\n",
			dep->de_fnode, dep->de_size, dep->de_namelen,
			dep->de_namelen, dep->de_name, dep->de_flag));

		res = hpfs_cmpfname(hpmp, name, namelen,
				dep->de_name, dep->de_namelen, dep->de_cpid);
		if (res == 0) {
			if (dep->de_flag & DE_DOWN) {
				/*XXXXXX*/
			} else {
				/* XXX we can copy less */
				bcopy (DE_NEXTDE(dep), dep, DB_BSIZE - deoff - dep->de_reclen);
				dbp->d_freeoff -= dep->de_reclen;
				*retp = 0;
			}
			bdwrite (bp);
			return (0);
		} else if (res < 0)
			break;

		deoff += dep->de_reclen;
		dep = DB_NEXTDE(dep);
	}

	if (dep->de_flag & DE_DOWN) {
		error = hpfs_removede (hpmp, DE_DOWNLSN(dep), name, namelen, &ret);
		if (error) {
			brelse (bp);
			return (error);
		}
		if (ret == 0) {
			if (deoff > sizeof (dirblk_t)) {
			} else if (deoff + dep->de_reclen < dbp->db_freeoff) {
			}
		}
	} else {
		error = ENOENT;
	}

	brelse (bp);
	return (error);
#endif
	return (EOPNOTSUPP);
}
void nilfs_cpfile_put_checkpoint(struct inode *cpfile, __u64 cno,
				 struct buffer_head *bh)
{
	kunmap(bh->b_page);
	brelse(bh);
}
Exemple #23
0
/*
 * expects the suballoc inode to already be locked.
 */
static int ocfs2_free_suballoc_bits(handle_t *handle,
				    struct inode *alloc_inode,
				    struct buffer_head *alloc_bh,
				    unsigned int start_bit,
				    u64 bg_blkno,
				    unsigned int count)
{
	int status = 0;
	u32 tmp_used;
	struct ocfs2_super *osb = OCFS2_SB(alloc_inode->i_sb);
	struct ocfs2_dinode *fe = (struct ocfs2_dinode *) alloc_bh->b_data;
	struct ocfs2_chain_list *cl = &fe->id2.i_chain;
	struct buffer_head *group_bh = NULL;
	struct ocfs2_group_desc *group;

	mlog_entry_void();

	if (!OCFS2_IS_VALID_DINODE(fe)) {
		OCFS2_RO_ON_INVALID_DINODE(alloc_inode->i_sb, fe);
		status = -EIO;
		goto bail;
	}
	BUG_ON((count + start_bit) > ocfs2_bits_per_group(cl));

	mlog(0, "%llu: freeing %u bits from group %llu, starting at %u\n",
	     (unsigned long long)OCFS2_I(alloc_inode)->ip_blkno, count,
	     (unsigned long long)bg_blkno, start_bit);

	status = ocfs2_read_block(osb, bg_blkno, &group_bh, OCFS2_BH_CACHED,
				  alloc_inode);
	if (status < 0) {
		mlog_errno(status);
		goto bail;
	}

	group = (struct ocfs2_group_desc *) group_bh->b_data;
	status = ocfs2_check_group_descriptor(alloc_inode->i_sb, fe, group);
	if (status) {
		mlog_errno(status);
		goto bail;
	}
	BUG_ON((count + start_bit) > le16_to_cpu(group->bg_bits));

	status = ocfs2_block_group_clear_bits(handle, alloc_inode,
					      group, group_bh,
					      start_bit, count);
	if (status < 0) {
		mlog_errno(status);
		goto bail;
	}

	status = ocfs2_journal_access(handle, alloc_inode, alloc_bh,
				      OCFS2_JOURNAL_ACCESS_WRITE);
	if (status < 0) {
		mlog_errno(status);
		goto bail;
	}

	le32_add_cpu(&cl->cl_recs[le16_to_cpu(group->bg_chain)].c_free,
		     count);
	tmp_used = le32_to_cpu(fe->id1.bitmap1.i_used);
	fe->id1.bitmap1.i_used = cpu_to_le32(tmp_used - count);

	status = ocfs2_journal_dirty(handle, alloc_bh);
	if (status < 0) {
		mlog_errno(status);
		goto bail;
	}

bail:
	if (group_bh)
		brelse(group_bh);

	mlog_exit(status);
	return status;
}
int nilfs_cpfile_delete_checkpoints(struct inode *cpfile,
				    __u64 start,
				    __u64 end)
{
	struct buffer_head *header_bh, *cp_bh;
	struct nilfs_cpfile_header *header;
	struct nilfs_checkpoint *cp;
	size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size;
	__u64 cno;
	void *kaddr;
	unsigned long tnicps;
	int ret, ncps, nicps, count, i;

	if (unlikely(start == 0 || start > end)) {
		printk(KERN_ERR "%s: invalid range of checkpoint numbers: "
		       "[%llu, %llu)\n", __func__,
		       (unsigned long long)start, (unsigned long long)end);
		return -EINVAL;
	}

	down_write(&NILFS_MDT(cpfile)->mi_sem);

	ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
	if (ret < 0)
		goto out_sem;
	tnicps = 0;

	for (cno = start; cno < end; cno += ncps) {
		ncps = nilfs_cpfile_checkpoints_in_block(cpfile, cno, end);
		ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
		if (ret < 0) {
			if (ret != -ENOENT)
				break;
			
			ret = 0;
			continue;
		}

		kaddr = kmap_atomic(cp_bh->b_page);
		cp = nilfs_cpfile_block_get_checkpoint(
			cpfile, cno, cp_bh, kaddr);
		nicps = 0;
		for (i = 0; i < ncps; i++, cp = (void *)cp + cpsz) {
			WARN_ON(nilfs_checkpoint_snapshot(cp));
			if (!nilfs_checkpoint_invalid(cp)) {
				nilfs_checkpoint_set_invalid(cp);
				nicps++;
			}
		}
		if (nicps > 0) {
			tnicps += nicps;
			mark_buffer_dirty(cp_bh);
			nilfs_mdt_mark_dirty(cpfile);
			if (!nilfs_cpfile_is_in_first(cpfile, cno)) {
				count =
				  nilfs_cpfile_block_sub_valid_checkpoints(
						cpfile, cp_bh, kaddr, nicps);
				if (count == 0) {
					
					kunmap_atomic(kaddr);
					brelse(cp_bh);
					ret =
					  nilfs_cpfile_delete_checkpoint_block(
								   cpfile, cno);
					if (ret == 0)
						continue;
					printk(KERN_ERR
					       "%s: cannot delete block\n",
					       __func__);
					break;
				}
			}
		}

		kunmap_atomic(kaddr);
		brelse(cp_bh);
	}

	if (tnicps > 0) {
		kaddr = kmap_atomic(header_bh->b_page);
		header = nilfs_cpfile_block_get_header(cpfile, header_bh,
						       kaddr);
		le64_add_cpu(&header->ch_ncheckpoints, -(u64)tnicps);
		mark_buffer_dirty(header_bh);
		nilfs_mdt_mark_dirty(cpfile);
		kunmap_atomic(kaddr);
	}

	brelse(header_bh);

 out_sem:
	up_write(&NILFS_MDT(cpfile)->mi_sem);
	return ret;
}
Exemple #25
0
static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
				       struct ocfs2_alloc_context *ac,
				       int type,
				       u32 slot)
{
	int status;
	u32 bits_wanted = ac->ac_bits_wanted;
	struct inode *alloc_inode;
	struct buffer_head *bh = NULL;
	struct ocfs2_dinode *fe;
	u32 free_bits;

	mlog_entry_void();

	alloc_inode = ocfs2_get_system_file_inode(osb, type, slot);
	if (!alloc_inode) {
		mlog_errno(-EINVAL);
		return -EINVAL;
	}

	mutex_lock(&alloc_inode->i_mutex);

	status = ocfs2_meta_lock(alloc_inode, &bh, 1);
	if (status < 0) {
		mutex_unlock(&alloc_inode->i_mutex);
		iput(alloc_inode);

		mlog_errno(status);
		return status;
	}

	ac->ac_inode = alloc_inode;

	fe = (struct ocfs2_dinode *) bh->b_data;
	if (!OCFS2_IS_VALID_DINODE(fe)) {
		OCFS2_RO_ON_INVALID_DINODE(alloc_inode->i_sb, fe);
		status = -EIO;
		goto bail;
	}
	if (!(fe->i_flags & cpu_to_le32(OCFS2_CHAIN_FL))) {
		ocfs2_error(alloc_inode->i_sb, "Invalid chain allocator %llu",
			    (unsigned long long)le64_to_cpu(fe->i_blkno));
		status = -EIO;
		goto bail;
	}

	free_bits = le32_to_cpu(fe->id1.bitmap1.i_total) -
		le32_to_cpu(fe->id1.bitmap1.i_used);

	if (bits_wanted > free_bits) {
		/* cluster bitmap never grows */
		if (ocfs2_is_cluster_bitmap(alloc_inode)) {
			mlog(0, "Disk Full: wanted=%u, free_bits=%u\n",
			     bits_wanted, free_bits);
			status = -ENOSPC;
			goto bail;
		}

		status = ocfs2_block_group_alloc(osb, alloc_inode, bh);
		if (status < 0) {
			if (status != -ENOSPC)
				mlog_errno(status);
			goto bail;
		}
		atomic_inc(&osb->alloc_stats.bg_extends);

		/* You should never ask for this much metadata */
		BUG_ON(bits_wanted >
		       (le32_to_cpu(fe->id1.bitmap1.i_total)
			- le32_to_cpu(fe->id1.bitmap1.i_used)));
	}

	get_bh(bh);
	ac->ac_bh = bh;
bail:
	if (bh)
		brelse(bh);

	mlog_exit(status);
	return status;
}
static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop,
					  void *buf, unsigned cisz, size_t nci)
{
	struct buffer_head *bh;
	struct nilfs_cpfile_header *header;
	struct nilfs_checkpoint *cp;
	struct nilfs_cpinfo *ci = buf;
	__u64 curr = *cnop, next;
	unsigned long curr_blkoff, next_blkoff;
	void *kaddr;
	int n = 0, ret;

	down_read(&NILFS_MDT(cpfile)->mi_sem);

	if (curr == 0) {
		ret = nilfs_cpfile_get_header_block(cpfile, &bh);
		if (ret < 0)
			goto out;
		kaddr = kmap_atomic(bh->b_page);
		header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr);
		curr = le64_to_cpu(header->ch_snapshot_list.ssl_next);
		kunmap_atomic(kaddr);
		brelse(bh);
		if (curr == 0) {
			ret = 0;
			goto out;
		}
	} else if (unlikely(curr == ~(__u64)0)) {
		ret = 0;
		goto out;
	}

	curr_blkoff = nilfs_cpfile_get_blkoff(cpfile, curr);
	ret = nilfs_cpfile_get_checkpoint_block(cpfile, curr, 0, &bh);
	if (unlikely(ret < 0)) {
		if (ret == -ENOENT)
			ret = 0; 
		goto out;
	}
	kaddr = kmap_atomic(bh->b_page);
	while (n < nci) {
		cp = nilfs_cpfile_block_get_checkpoint(cpfile, curr, bh, kaddr);
		curr = ~(__u64)0; 
		if (unlikely(nilfs_checkpoint_invalid(cp) ||
			     !nilfs_checkpoint_snapshot(cp)))
			break;
		nilfs_cpfile_checkpoint_to_cpinfo(cpfile, cp, ci);
		ci = (void *)ci + cisz;
		n++;
		next = le64_to_cpu(cp->cp_snapshot_list.ssl_next);
		if (next == 0)
			break; 

		next_blkoff = nilfs_cpfile_get_blkoff(cpfile, next);
		if (curr_blkoff != next_blkoff) {
			kunmap_atomic(kaddr);
			brelse(bh);
			ret = nilfs_cpfile_get_checkpoint_block(cpfile, next,
								0, &bh);
			if (unlikely(ret < 0)) {
				WARN_ON(ret == -ENOENT);
				goto out;
			}
			kaddr = kmap_atomic(bh->b_page);
		}
		curr = next;
		curr_blkoff = next_blkoff;
	}
	kunmap_atomic(kaddr);
	brelse(bh);
	*cnop = curr;
	ret = n;

 out:
	up_read(&NILFS_MDT(cpfile)->mi_sem);
	return ret;
}
Exemple #27
0
static int gfs2_bmap_alloc(struct inode *inode, const sector_t lblock,
			   struct buffer_head *bh_map, struct metapath *mp,
			   const unsigned int sheight,
			   const unsigned int height,
			   const size_t maxlen)
{
	struct gfs2_inode *ip = GFS2_I(inode);
	struct gfs2_sbd *sdp = GFS2_SB(inode);
	struct super_block *sb = sdp->sd_vfs;
	struct buffer_head *dibh = mp->mp_bh[0];
	u64 bn, dblock = 0;
	unsigned n, i, blks, alloced = 0, iblks = 0, branch_start = 0;
	unsigned dblks = 0;
	unsigned ptrs_per_blk;
	const unsigned end_of_metadata = height - 1;
	int ret;
	int eob = 0;
	enum alloc_state state;
	__be64 *ptr;
	__be64 zero_bn = 0;

	BUG_ON(sheight < 1);
	BUG_ON(dibh == NULL);

	gfs2_trans_add_meta(ip->i_gl, dibh);

	if (height == sheight) {
		struct buffer_head *bh;
		/* Bottom indirect block exists, find unalloced extent size */
		ptr = metapointer(end_of_metadata, mp);
		bh = mp->mp_bh[end_of_metadata];
		dblks = gfs2_extent_length(bh->b_data, bh->b_size, ptr, maxlen,
					   &eob);
		BUG_ON(dblks < 1);
		state = ALLOC_DATA;
	} else {
		/* Need to allocate indirect blocks */
		ptrs_per_blk = height > 1 ? sdp->sd_inptrs : sdp->sd_diptrs;
		dblks = min(maxlen, (size_t)(ptrs_per_blk -
					     mp->mp_list[end_of_metadata]));
		if (height == ip->i_height) {
			/* Writing into existing tree, extend tree down */
			iblks = height - sheight;
			state = ALLOC_GROW_DEPTH;
		} else {
			/* Building up tree height */
			state = ALLOC_GROW_HEIGHT;
			iblks = height - ip->i_height;
			branch_start = metapath_branch_start(mp);
			iblks += (height - branch_start);
		}
	}

	/* start of the second part of the function (state machine) */

	blks = dblks + iblks;
	i = sheight;
	do {
		int error;
		n = blks - alloced;
		error = gfs2_alloc_blocks(ip, &bn, &n, 0, NULL);
		if (error)
			return error;
		alloced += n;
		if (state != ALLOC_DATA || gfs2_is_jdata(ip))
			gfs2_trans_add_unrevoke(sdp, bn, n);
		switch (state) {
		/* Growing height of tree */
		case ALLOC_GROW_HEIGHT:
			if (i == 1) {
				ptr = (__be64 *)(dibh->b_data +
						 sizeof(struct gfs2_dinode));
				zero_bn = *ptr;
			}
			for (; i - 1 < height - ip->i_height && n > 0; i++, n--)
				gfs2_indirect_init(mp, ip->i_gl, i, 0, bn++);
			if (i - 1 == height - ip->i_height) {
				i--;
				gfs2_buffer_copy_tail(mp->mp_bh[i],
						sizeof(struct gfs2_meta_header),
						dibh, sizeof(struct gfs2_dinode));
				gfs2_buffer_clear_tail(dibh,
						sizeof(struct gfs2_dinode) +
						sizeof(__be64));
				ptr = (__be64 *)(mp->mp_bh[i]->b_data +
					sizeof(struct gfs2_meta_header));
				*ptr = zero_bn;
				state = ALLOC_GROW_DEPTH;
				for(i = branch_start; i < height; i++) {
					if (mp->mp_bh[i] == NULL)
						break;
					brelse(mp->mp_bh[i]);
					mp->mp_bh[i] = NULL;
				}
				i = branch_start;
			}
			if (n == 0)
				break;
		/* Branching from existing tree */
		case ALLOC_GROW_DEPTH:
			if (i > 1 && i < height)
				gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[i-1]);
			for (; i < height && n > 0; i++, n--)
				gfs2_indirect_init(mp, ip->i_gl, i,
						   mp->mp_list[i-1], bn++);
			if (i == height)
				state = ALLOC_DATA;
			if (n == 0)
				break;
		/* Tree complete, adding data blocks */
		case ALLOC_DATA:
			BUG_ON(n > dblks);
			BUG_ON(mp->mp_bh[end_of_metadata] == NULL);
			gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[end_of_metadata]);
			dblks = n;
			ptr = metapointer(end_of_metadata, mp);
			dblock = bn;
			while (n-- > 0)
				*ptr++ = cpu_to_be64(bn++);
			if (buffer_zeronew(bh_map)) {
				ret = sb_issue_zeroout(sb, dblock, dblks,
						       GFP_NOFS);
				if (ret) {
					fs_err(sdp,
					       "Failed to zero data buffers\n");
					clear_buffer_zeronew(bh_map);
				}
			}
			break;
		}
	} while ((state != ALLOC_DATA) || !dblock);

	ip->i_height = height;
	gfs2_add_inode_blocks(&ip->i_inode, alloced);
	gfs2_dinode_out(ip, mp->mp_bh[0]->b_data);
	map_bh(bh_map, inode->i_sb, dblock);
	bh_map->b_size = dblks << inode->i_blkbits;
	set_buffer_new(bh_map);
	return 0;
}
static int nilfs_cpfile_set_snapshot(struct inode *cpfile, __u64 cno)
{
	struct buffer_head *header_bh, *curr_bh, *prev_bh, *cp_bh;
	struct nilfs_cpfile_header *header;
	struct nilfs_checkpoint *cp;
	struct nilfs_snapshot_list *list;
	__u64 curr, prev;
	unsigned long curr_blkoff, prev_blkoff;
	void *kaddr;
	int ret;

	if (cno == 0)
		return -ENOENT; 
	down_write(&NILFS_MDT(cpfile)->mi_sem);

	ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
	if (ret < 0)
		goto out_sem;
	kaddr = kmap_atomic(cp_bh->b_page);
	cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
	if (nilfs_checkpoint_invalid(cp)) {
		ret = -ENOENT;
		kunmap_atomic(kaddr);
		goto out_cp;
	}
	if (nilfs_checkpoint_snapshot(cp)) {
		ret = 0;
		kunmap_atomic(kaddr);
		goto out_cp;
	}
	kunmap_atomic(kaddr);

	ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
	if (ret < 0)
		goto out_cp;
	kaddr = kmap_atomic(header_bh->b_page);
	header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr);
	list = &header->ch_snapshot_list;
	curr_bh = header_bh;
	get_bh(curr_bh);
	curr = 0;
	curr_blkoff = 0;
	prev = le64_to_cpu(list->ssl_prev);
	while (prev > cno) {
		prev_blkoff = nilfs_cpfile_get_blkoff(cpfile, prev);
		curr = prev;
		if (curr_blkoff != prev_blkoff) {
			kunmap_atomic(kaddr);
			brelse(curr_bh);
			ret = nilfs_cpfile_get_checkpoint_block(cpfile, curr,
								0, &curr_bh);
			if (ret < 0)
				goto out_header;
			kaddr = kmap_atomic(curr_bh->b_page);
		}
		curr_blkoff = prev_blkoff;
		cp = nilfs_cpfile_block_get_checkpoint(
			cpfile, curr, curr_bh, kaddr);
		list = &cp->cp_snapshot_list;
		prev = le64_to_cpu(list->ssl_prev);
	}
	kunmap_atomic(kaddr);

	if (prev != 0) {
		ret = nilfs_cpfile_get_checkpoint_block(cpfile, prev, 0,
							&prev_bh);
		if (ret < 0)
			goto out_curr;
	} else {
		prev_bh = header_bh;
		get_bh(prev_bh);
	}

	kaddr = kmap_atomic(curr_bh->b_page);
	list = nilfs_cpfile_block_get_snapshot_list(
		cpfile, curr, curr_bh, kaddr);
	list->ssl_prev = cpu_to_le64(cno);
	kunmap_atomic(kaddr);

	kaddr = kmap_atomic(cp_bh->b_page);
	cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
	cp->cp_snapshot_list.ssl_next = cpu_to_le64(curr);
	cp->cp_snapshot_list.ssl_prev = cpu_to_le64(prev);
	nilfs_checkpoint_set_snapshot(cp);
	kunmap_atomic(kaddr);

	kaddr = kmap_atomic(prev_bh->b_page);
	list = nilfs_cpfile_block_get_snapshot_list(
		cpfile, prev, prev_bh, kaddr);
	list->ssl_next = cpu_to_le64(cno);
	kunmap_atomic(kaddr);

	kaddr = kmap_atomic(header_bh->b_page);
	header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr);
	le64_add_cpu(&header->ch_nsnapshots, 1);
	kunmap_atomic(kaddr);

	mark_buffer_dirty(prev_bh);
	mark_buffer_dirty(curr_bh);
	mark_buffer_dirty(cp_bh);
	mark_buffer_dirty(header_bh);
	nilfs_mdt_mark_dirty(cpfile);

	brelse(prev_bh);

 out_curr:
	brelse(curr_bh);

 out_header:
	brelse(header_bh);

 out_cp:
	brelse(cp_bh);

 out_sem:
	up_write(&NILFS_MDT(cpfile)->mi_sem);
	return ret;
}
Exemple #29
0
static int ext2_fill_super(struct super_block *sb, void *data, int silent)
{
	struct buffer_head * bh;
	struct ext2_sb_info * sbi;
	struct ext2_super_block * es;
	struct inode *root;
	unsigned long block;
	unsigned long sb_block = get_sb_block(&data);
	unsigned long logic_sb_block;
	unsigned long offset = 0;
	unsigned long def_mount_opts;
	long ret = -EINVAL;
	int blocksize = BLOCK_SIZE;
	int db_count;
	int i, j;
	__le32 features;
	int err;

	sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
	if (!sbi)
		return -ENOMEM;

	sbi->s_blockgroup_lock =
		kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
	if (!sbi->s_blockgroup_lock) {
		kfree(sbi);
		return -ENOMEM;
	}
	sb->s_fs_info = sbi;
	sbi->s_sb_block = sb_block;

	/*
	 * See what the current blocksize for the device is, and
	 * use that as the blocksize.  Otherwise (or if the blocksize
	 * is smaller than the default) use the default.
	 * This is important for devices that have a hardware
	 * sectorsize that is larger than the default.
	 */
	blocksize = sb_min_blocksize(sb, BLOCK_SIZE);
	if (!blocksize) {
		printk ("EXT2-fs: unable to set blocksize\n");
		goto failed_sbi;
	}

	/*
	 * If the superblock doesn't start on a hardware sector boundary,
	 * calculate the offset.  
	 */
	if (blocksize != BLOCK_SIZE) {
		logic_sb_block = (sb_block*BLOCK_SIZE) / blocksize;
		offset = (sb_block*BLOCK_SIZE) % blocksize;
	} else {
		logic_sb_block = sb_block;
	}

	if (!(bh = sb_bread(sb, logic_sb_block))) {
		printk ("EXT2-fs: unable to read superblock\n");
		goto failed_sbi;
	}
	/*
	 * Note: s_es must be initialized as soon as possible because
	 *       some ext2 macro-instructions depend on its value
	 */
	es = (struct ext2_super_block *) (((char *)bh->b_data) + offset);
	sbi->s_es = es;
	sb->s_magic = le16_to_cpu(es->s_magic);

	if (sb->s_magic != EXT2_SUPER_MAGIC)
		goto cantfind_ext2;

	/* Set defaults before we parse the mount options */
	def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
	if (def_mount_opts & EXT2_DEFM_DEBUG)
		set_opt(sbi->s_mount_opt, DEBUG);
	if (def_mount_opts & EXT2_DEFM_BSDGROUPS)
		set_opt(sbi->s_mount_opt, GRPID);
	if (def_mount_opts & EXT2_DEFM_UID16)
		set_opt(sbi->s_mount_opt, NO_UID32);
#ifdef CONFIG_EXT2_FS_XATTR
	if (def_mount_opts & EXT2_DEFM_XATTR_USER)
		set_opt(sbi->s_mount_opt, XATTR_USER);
#endif
#ifdef CONFIG_EXT2_FS_POSIX_ACL
	if (def_mount_opts & EXT2_DEFM_ACL)
		set_opt(sbi->s_mount_opt, POSIX_ACL);
#endif
	
	if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_PANIC)
		set_opt(sbi->s_mount_opt, ERRORS_PANIC);
	else if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_CONTINUE)
		set_opt(sbi->s_mount_opt, ERRORS_CONT);
	else
		set_opt(sbi->s_mount_opt, ERRORS_RO);

	sbi->s_resuid = le16_to_cpu(es->s_def_resuid);
	sbi->s_resgid = le16_to_cpu(es->s_def_resgid);
	
	set_opt(sbi->s_mount_opt, RESERVATION);

	if (!parse_options ((char *) data, sbi))
		goto failed_mount;

	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
		((EXT2_SB(sb)->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ?
		 MS_POSIXACL : 0);

	ext2_xip_verify_sb(sb); /* see if bdev supports xip, unset
				    EXT2_MOUNT_XIP if not */

	if (le32_to_cpu(es->s_rev_level) == EXT2_GOOD_OLD_REV &&
	    (EXT2_HAS_COMPAT_FEATURE(sb, ~0U) ||
	     EXT2_HAS_RO_COMPAT_FEATURE(sb, ~0U) ||
	     EXT2_HAS_INCOMPAT_FEATURE(sb, ~0U)))
		printk("EXT2-fs warning: feature flags set on rev 0 fs, "
		       "running e2fsck is recommended\n");
	/*
	 * Check feature flags regardless of the revision level, since we
	 * previously didn't change the revision level when setting the flags,
	 * so there is a chance incompat flags are set on a rev 0 filesystem.
	 */
	features = EXT2_HAS_INCOMPAT_FEATURE(sb, ~EXT2_FEATURE_INCOMPAT_SUPP);
	if (features) {
		printk("EXT2-fs: %s: couldn't mount because of "
		       "unsupported optional features (%x).\n",
		       sb->s_id, le32_to_cpu(features));
		goto failed_mount;
	}
	if (!(sb->s_flags & MS_RDONLY) &&
	    (features = EXT2_HAS_RO_COMPAT_FEATURE(sb, ~EXT2_FEATURE_RO_COMPAT_SUPP))){
		printk("EXT2-fs: %s: couldn't mount RDWR because of "
		       "unsupported optional features (%x).\n",
		       sb->s_id, le32_to_cpu(features));
		goto failed_mount;
	}

	blocksize = BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);

	if (ext2_use_xip(sb) && blocksize != PAGE_SIZE) {
		if (!silent)
			printk("XIP: Unsupported blocksize\n");
		goto failed_mount;
	}

	/* If the blocksize doesn't match, re-read the thing.. */
	if (sb->s_blocksize != blocksize) {
		brelse(bh);

		if (!sb_set_blocksize(sb, blocksize)) {
			printk(KERN_ERR "EXT2-fs: blocksize too small for device.\n");
			goto failed_sbi;
		}

		logic_sb_block = (sb_block*BLOCK_SIZE) / blocksize;
		offset = (sb_block*BLOCK_SIZE) % blocksize;
		bh = sb_bread(sb, logic_sb_block);
		if(!bh) {
			printk("EXT2-fs: Couldn't read superblock on "
			       "2nd try.\n");
			goto failed_sbi;
		}
		es = (struct ext2_super_block *) (((char *)bh->b_data) + offset);
		sbi->s_es = es;
		if (es->s_magic != cpu_to_le16(EXT2_SUPER_MAGIC)) {
			printk ("EXT2-fs: Magic mismatch, very weird !\n");
			goto failed_mount;
		}
	}

	sb->s_maxbytes = ext2_max_size(sb->s_blocksize_bits);

	if (le32_to_cpu(es->s_rev_level) == EXT2_GOOD_OLD_REV) {
		sbi->s_inode_size = EXT2_GOOD_OLD_INODE_SIZE;
		sbi->s_first_ino = EXT2_GOOD_OLD_FIRST_INO;
	} else {
		sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
		sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
		if ((sbi->s_inode_size < EXT2_GOOD_OLD_INODE_SIZE) ||
		    !is_power_of_2(sbi->s_inode_size) ||
		    (sbi->s_inode_size > blocksize)) {
			printk ("EXT2-fs: unsupported inode size: %d\n",
				sbi->s_inode_size);
			goto failed_mount;
		}
	}

	sbi->s_frag_size = EXT2_MIN_FRAG_SIZE <<
				   le32_to_cpu(es->s_log_frag_size);
	if (sbi->s_frag_size == 0)
		goto cantfind_ext2;
	sbi->s_frags_per_block = sb->s_blocksize / sbi->s_frag_size;

	sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
	sbi->s_frags_per_group = le32_to_cpu(es->s_frags_per_group);
	sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);

	if (EXT2_INODE_SIZE(sb) == 0)
		goto cantfind_ext2;
	sbi->s_inodes_per_block = sb->s_blocksize / EXT2_INODE_SIZE(sb);
	if (sbi->s_inodes_per_block == 0 || sbi->s_inodes_per_group == 0)
		goto cantfind_ext2;
	sbi->s_itb_per_group = sbi->s_inodes_per_group /
					sbi->s_inodes_per_block;
	sbi->s_desc_per_block = sb->s_blocksize /
					sizeof (struct ext2_group_desc);
	sbi->s_sbh = bh;
	sbi->s_mount_state = le16_to_cpu(es->s_state);
	sbi->s_addr_per_block_bits =
		ilog2 (EXT2_ADDR_PER_BLOCK(sb));
	sbi->s_desc_per_block_bits =
		ilog2 (EXT2_DESC_PER_BLOCK(sb));

	if (sb->s_magic != EXT2_SUPER_MAGIC)
		goto cantfind_ext2;

	if (sb->s_blocksize != bh->b_size) {
		if (!silent)
			printk ("VFS: Unsupported blocksize on dev "
				"%s.\n", sb->s_id);
		goto failed_mount;
	}

	if (sb->s_blocksize != sbi->s_frag_size) {
		printk ("EXT2-fs: fragsize %lu != blocksize %lu (not supported yet)\n",
			sbi->s_frag_size, sb->s_blocksize);
		goto failed_mount;
	}

	if (sbi->s_blocks_per_group > sb->s_blocksize * 8) {
		printk ("EXT2-fs: #blocks per group too big: %lu\n",
			sbi->s_blocks_per_group);
		goto failed_mount;
	}
	if (sbi->s_frags_per_group > sb->s_blocksize * 8) {
		printk ("EXT2-fs: #fragments per group too big: %lu\n",
			sbi->s_frags_per_group);
		goto failed_mount;
	}
	if (sbi->s_inodes_per_group > sb->s_blocksize * 8) {
		printk ("EXT2-fs: #inodes per group too big: %lu\n",
			sbi->s_inodes_per_group);
		goto failed_mount;
	}

	if (EXT2_BLOCKS_PER_GROUP(sb) == 0)
		goto cantfind_ext2;
 	sbi->s_groups_count = ((le32_to_cpu(es->s_blocks_count) -
 				le32_to_cpu(es->s_first_data_block) - 1)
 					/ EXT2_BLOCKS_PER_GROUP(sb)) + 1;
	db_count = (sbi->s_groups_count + EXT2_DESC_PER_BLOCK(sb) - 1) /
		   EXT2_DESC_PER_BLOCK(sb);
	sbi->s_group_desc = kmalloc (db_count * sizeof (struct buffer_head *), GFP_KERNEL);
	if (sbi->s_group_desc == NULL) {
		printk ("EXT2-fs: not enough memory\n");
		goto failed_mount;
	}
	bgl_lock_init(sbi->s_blockgroup_lock);
	sbi->s_debts = kcalloc(sbi->s_groups_count, sizeof(*sbi->s_debts), GFP_KERNEL);
	if (!sbi->s_debts) {
		printk ("EXT2-fs: not enough memory\n");
		goto failed_mount_group_desc;
	}
	for (i = 0; i < db_count; i++) {
		block = descriptor_loc(sb, logic_sb_block, i);
		sbi->s_group_desc[i] = sb_bread(sb, block);
		if (!sbi->s_group_desc[i]) {
			for (j = 0; j < i; j++)
				brelse (sbi->s_group_desc[j]);
			printk ("EXT2-fs: unable to read group descriptors\n");
			goto failed_mount_group_desc;
		}
	}
	if (!ext2_check_descriptors (sb)) {
		printk ("EXT2-fs: group descriptors corrupted!\n");
		goto failed_mount2;
	}
	sbi->s_gdb_count = db_count;
	get_random_bytes(&sbi->s_next_generation, sizeof(u32));
	spin_lock_init(&sbi->s_next_gen_lock);

	/* per fileystem reservation list head & lock */
	spin_lock_init(&sbi->s_rsv_window_lock);
	sbi->s_rsv_window_root = RB_ROOT;
	/*
	 * Add a single, static dummy reservation to the start of the
	 * reservation window list --- it gives us a placeholder for
	 * append-at-start-of-list which makes the allocation logic
	 * _much_ simpler.
	 */
	sbi->s_rsv_window_head.rsv_start = EXT2_RESERVE_WINDOW_NOT_ALLOCATED;
	sbi->s_rsv_window_head.rsv_end = EXT2_RESERVE_WINDOW_NOT_ALLOCATED;
	sbi->s_rsv_window_head.rsv_alloc_hit = 0;
	sbi->s_rsv_window_head.rsv_goal_size = 0;
	ext2_rsv_window_add(sb, &sbi->s_rsv_window_head);

	err = percpu_counter_init(&sbi->s_freeblocks_counter,
				ext2_count_free_blocks(sb));
	if (!err) {
		err = percpu_counter_init(&sbi->s_freeinodes_counter,
				ext2_count_free_inodes(sb));
	}
	if (!err) {
		err = percpu_counter_init(&sbi->s_dirs_counter,
				ext2_count_dirs(sb));
	}
	if (err) {
		printk(KERN_ERR "EXT2-fs: insufficient memory\n");
		goto failed_mount3;
	}
	/*
	 * set up enough so that it can read an inode
	 */
	sb->s_op = &ext2_sops;
	sb->s_export_op = &ext2_export_ops;
	sb->s_xattr = ext2_xattr_handlers;
	root = ext2_iget(sb, EXT2_ROOT_INO);
	if (IS_ERR(root)) {
		ret = PTR_ERR(root);
		goto failed_mount3;
	}
	if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
		iput(root);
		printk(KERN_ERR "EXT2-fs: corrupt root inode, run e2fsck\n");
		goto failed_mount3;
	}

	sb->s_root = d_alloc_root(root);
	if (!sb->s_root) {
		iput(root);
		printk(KERN_ERR "EXT2-fs: get root inode failed\n");
		ret = -ENOMEM;
		goto failed_mount3;
	}
	if (EXT2_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL))
		ext2_warning(sb, __func__,
			"mounting ext3 filesystem as ext2");
	ext2_setup_super (sb, es, sb->s_flags & MS_RDONLY);
	return 0;

cantfind_ext2:
	if (!silent)
		printk("VFS: Can't find an ext2 filesystem on dev %s.\n",
		       sb->s_id);
	goto failed_mount;
failed_mount3:
	percpu_counter_destroy(&sbi->s_freeblocks_counter);
	percpu_counter_destroy(&sbi->s_freeinodes_counter);
	percpu_counter_destroy(&sbi->s_dirs_counter);
failed_mount2:
	for (i = 0; i < db_count; i++)
		brelse(sbi->s_group_desc[i]);
failed_mount_group_desc:
	kfree(sbi->s_group_desc);
	kfree(sbi->s_debts);
failed_mount:
	brelse(bh);
failed_sbi:
	sb->s_fs_info = NULL;
	kfree(sbi->s_blockgroup_lock);
	kfree(sbi);
	return ret;
}
Exemple #30
0
/*
 * Balloc defines the structure of filesystem storage
 * by allocating the physical blocks on a device given
 * the inode and the logical block number in a file.
 */
int
ext2_balloc(struct inode *ip, e2fs_lbn_t lbn, int size, struct ucred *cred,
    struct buf **bpp, int flags)
{
	struct m_ext2fs *fs;
	struct ext2mount *ump;
	struct buf *bp, *nbp;
	struct vnode *vp = ITOV(ip);
	struct indir indirs[NIADDR + 2];
	e4fs_daddr_t nb, newb;
	e2fs_daddr_t *bap, pref;
	int osize, nsize, num, i, error;

	*bpp = NULL;
	if (lbn < 0)
		return (EFBIG);
	fs = ip->i_e2fs;
	ump = ip->i_ump;

	/*
	 * check if this is a sequential block allocation. 
	 * If so, increment next_alloc fields to allow ext2_blkpref 
	 * to make a good guess
	 */
	if (lbn == ip->i_next_alloc_block + 1) {
		ip->i_next_alloc_block++;
		ip->i_next_alloc_goal++;
	}

	/*
	 * The first NDADDR blocks are direct blocks
	 */
	if (lbn < NDADDR) {
		nb = ip->i_db[lbn];
		/* no new block is to be allocated, and no need to expand
		   the file */
		if (nb != 0 && ip->i_size >= (lbn + 1) * fs->e2fs_bsize) {
			error = bread(vp, lbn, fs->e2fs_bsize, NOCRED, &bp);
			if (error) {
				brelse(bp);
				return (error);
			}
			bp->b_blkno = fsbtodb(fs, nb);
			*bpp = bp;
			return (0);
		}
		if (nb != 0) {
			/*
			 * Consider need to reallocate a fragment.
			 */
			osize = fragroundup(fs, blkoff(fs, ip->i_size));
			nsize = fragroundup(fs, size);
			if (nsize <= osize) {
				error = bread(vp, lbn, osize, NOCRED, &bp);
				if (error) {
					brelse(bp);
					return (error);
				}
				bp->b_blkno = fsbtodb(fs, nb);
			} else {
			/* Godmar thinks: this shouldn't happen w/o fragments */
				printf("nsize %d(%d) > osize %d(%d) nb %d\n", 
					(int)nsize, (int)size, (int)osize, 
					(int)ip->i_size, (int)nb);
				panic(
				    "ext2_balloc: Something is terribly wrong");
/*
 * please note there haven't been any changes from here on -
 * FFS seems to work.
 */
			}
		} else {
			if (ip->i_size < (lbn + 1) * fs->e2fs_bsize)
				nsize = fragroundup(fs, size);
			else
				nsize = fs->e2fs_bsize;
			EXT2_LOCK(ump);
			error = ext2_alloc(ip, lbn,
			    ext2_blkpref(ip, lbn, (int)lbn, &ip->i_db[0], 0),
			    nsize, cred, &newb);
			if (error)
				return (error);
			bp = getblk(vp, lbn, nsize, 0, 0, 0);
			bp->b_blkno = fsbtodb(fs, newb);
			if (flags & BA_CLRBUF)
				vfs_bio_clrbuf(bp);
		}
		ip->i_db[lbn] = dbtofsb(fs, bp->b_blkno);
		ip->i_flag |= IN_CHANGE | IN_UPDATE;
		*bpp = bp;
		return (0);
	}
	/*
	 * Determine the number of levels of indirection.
	 */
	pref = 0;
	if ((error = ext2_getlbns(vp, lbn, indirs, &num)) != 0)
		return (error);
#ifdef INVARIANTS
	if (num < 1)
		panic ("ext2_balloc: ext2_getlbns returned indirect block");
#endif
	/*
	 * Fetch the first indirect block allocating if necessary.
	 */
	--num;
	nb = ip->i_ib[indirs[0].in_off];
	if (nb == 0) {
		EXT2_LOCK(ump);
		pref = ext2_blkpref(ip, lbn, indirs[0].in_off + 
					     EXT2_NDIR_BLOCKS, &ip->i_db[0], 0);
		if ((error = ext2_alloc(ip, lbn, pref, fs->e2fs_bsize, cred,
			&newb)))
			return (error);
		nb = newb;
		bp = getblk(vp, indirs[1].in_lbn, fs->e2fs_bsize, 0, 0, 0);
		bp->b_blkno = fsbtodb(fs, newb);
		vfs_bio_clrbuf(bp);
		/*
		 * Write synchronously so that indirect blocks
		 * never point at garbage.
		 */
		if ((error = bwrite(bp)) != 0) {
			ext2_blkfree(ip, nb, fs->e2fs_bsize);
			return (error);
		}
		ip->i_ib[indirs[0].in_off] = newb;
		ip->i_flag |= IN_CHANGE | IN_UPDATE;
	}
	/*
	 * Fetch through the indirect blocks, allocating as necessary.
	 */
	for (i = 1;;) {
		error = bread(vp,
		    indirs[i].in_lbn, (int)fs->e2fs_bsize, NOCRED, &bp);
		if (error) {
			brelse(bp);
			return (error);
		}
		bap = (e2fs_daddr_t *)bp->b_data;
		nb = bap[indirs[i].in_off];
		if (i == num)
			break;
		i += 1;
		if (nb != 0) {
			bqrelse(bp);
			continue;
		}
		EXT2_LOCK(ump);
		if (pref == 0)
			pref = ext2_blkpref(ip, lbn, indirs[i].in_off, bap,
						bp->b_lblkno);
		error =  ext2_alloc(ip, lbn, pref, (int)fs->e2fs_bsize, cred, &newb);
		if (error) {
			brelse(bp);
			return (error);
		}
		nb = newb;
		nbp = getblk(vp, indirs[i].in_lbn, fs->e2fs_bsize, 0, 0, 0);
		nbp->b_blkno = fsbtodb(fs, nb);
		vfs_bio_clrbuf(nbp);
		/*
		 * Write synchronously so that indirect blocks
		 * never point at garbage.
		 */
		if ((error = bwrite(nbp)) != 0) {
			ext2_blkfree(ip, nb, fs->e2fs_bsize);
			EXT2_UNLOCK(ump);
			brelse(bp);
			return (error);
		}
		bap[indirs[i - 1].in_off] = nb;
		/*
		 * If required, write synchronously, otherwise use
		 * delayed write.
		 */
		if (flags & IO_SYNC) {
			bwrite(bp);
		} else {
			if (bp->b_bufsize == fs->e2fs_bsize)
				bp->b_flags |= B_CLUSTEROK;
			bdwrite(bp);
		}
	}
	/*
	 * Get the data block, allocating if necessary.
	 */
	if (nb == 0) {
		EXT2_LOCK(ump);
		pref = ext2_blkpref(ip, lbn, indirs[i].in_off, &bap[0], 
				bp->b_lblkno);
		if ((error = ext2_alloc(ip,
		    lbn, pref, (int)fs->e2fs_bsize, cred, &newb)) != 0) {
			brelse(bp);
			return (error);
		}
		nb = newb;
		nbp = getblk(vp, lbn, fs->e2fs_bsize, 0, 0, 0);
		nbp->b_blkno = fsbtodb(fs, nb);
		if (flags & BA_CLRBUF)
			vfs_bio_clrbuf(nbp);
		bap[indirs[i].in_off] = nb;
		/*
		 * If required, write synchronously, otherwise use
		 * delayed write.
		 */
		if (flags & IO_SYNC) {
			bwrite(bp);
		} else {
		if (bp->b_bufsize == fs->e2fs_bsize)
				bp->b_flags |= B_CLUSTEROK;
			bdwrite(bp);
		}
		*bpp = nbp;
		return (0);
	}
	brelse(bp);
	if (flags & BA_CLRBUF) {
		int seqcount = (flags & BA_SEQMASK) >> BA_SEQSHIFT;
		if (seqcount && (vp->v_mount->mnt_flag & MNT_NOCLUSTERR) == 0) {
			error = cluster_read(vp, ip->i_size, lbn,
			    (int)fs->e2fs_bsize, NOCRED,
			    MAXBSIZE, seqcount, &nbp);
		} else {
			error = bread(vp, lbn, (int)fs->e2fs_bsize, NOCRED, &nbp);
		}
		if (error) {
			brelse(nbp);
			return (error);
		}
	} else {