Exemplo n.º 1
0
int
nandfs_markgc_segment(struct nandfs_device *fsdev, uint64_t seg)
{
	struct nandfs_node *su_node;
	struct nandfs_segment_usage *su_usage;
	struct buf *bp;
	uint64_t blk, offset;
	int error;

	su_node = fsdev->nd_su_node;

	VOP_LOCK(NTOV(su_node), LK_EXCLUSIVE);

	nandfs_seg_usage_blk_offset(fsdev, seg, &blk, &offset);

	error = nandfs_bread(su_node, blk, NOCRED, 0, &bp);
	if (error) {
		brelse(bp);
		VOP_UNLOCK(NTOV(su_node), 0);
		return (error);
	}

	su_usage = SU_USAGE_OFF(bp, offset);
	MPASS((su_usage->su_flags & NANDFS_SEGMENT_USAGE_GC) == 0);
	su_usage->su_flags |= NANDFS_SEGMENT_USAGE_GC;

	brelse(bp);
	VOP_UNLOCK(NTOV(su_node), 0);

	DPRINTF(SEG, ("%s: seg:%#jx\n", __func__, (uintmax_t)seg));

	return (0);
}
Exemplo n.º 2
0
int
nandfs_get_dat_bdescs(struct nandfs_device *nffsdev, struct nandfs_bdesc *bd,
    uint32_t nmembs)
{
	struct nandfs_node *dat_node;
	uint64_t map;
	uint32_t i;
	int error = 0;

	dat_node = nffsdev->nd_dat_node;

	VOP_LOCK(NTOV(dat_node), LK_EXCLUSIVE);

	for (i = 0; i < nmembs; i++) {
		DPRINTF(CLEAN,
		    ("%s: bd ino:%#jx oblk:%#jx blocknr:%#jx off:%#jx\n",
		    __func__,  (uintmax_t)bd[i].bd_ino,
		    (uintmax_t)bd[i].bd_oblocknr, (uintmax_t)bd[i].bd_blocknr,
		    (uintmax_t)bd[i].bd_offset));

		error = nandfs_bmap_lookup(dat_node, bd[i].bd_offset, &map);
		if (error)
			break;
		bd[i].bd_blocknr = map;
	}

	VOP_UNLOCK(NTOV(dat_node), 0);
	return (error);
}
Exemplo n.º 3
0
static int
nandfs_process_bdesc(struct nandfs_device *nffsdev, struct nandfs_bdesc *bd,
    uint64_t nmembs)
{
	struct nandfs_node *dat_node;
	struct buf *bp;
	uint64_t i;
	int error;

	dat_node = nffsdev->nd_dat_node;

	VOP_LOCK(NTOV(dat_node), LK_EXCLUSIVE);

	for (i = 0; i < nmembs; i++) {
		if (!bd[i].bd_alive)
			continue;
		DPRINTF(CLEAN, ("%s: idx %jx offset %jx\n",
		    __func__, i, bd[i].bd_offset));
		if (bd[i].bd_level) {
			error = nandfs_bread_meta(dat_node, bd[i].bd_offset,
			    NULL, 0, &bp);
			if (error) {
				nandfs_error("%s: cannot read dat node "
				    "level:%d\n", __func__, bd[i].bd_level);
				brelse(bp);
				VOP_UNLOCK(NTOV(dat_node), 0);
				return (error);
			}
			nandfs_dirty_buf_meta(bp, 1);
			nandfs_bmap_dirty_blocks(VTON(bp->b_vp), bp, 1);
		} else {
			error = nandfs_bread(dat_node, bd[i].bd_offset, NULL,
			    0, &bp);
			if (error) {
				nandfs_error("%s: cannot read dat node\n",
				    __func__);
				brelse(bp);
				VOP_UNLOCK(NTOV(dat_node), 0);
				return (error);
			}
			nandfs_dirty_buf(bp, 1);
		}
		DPRINTF(CLEAN, ("%s: bp: %p\n", __func__, bp));
	}

	VOP_UNLOCK(NTOV(dat_node), 0);

	return (0);
}
Exemplo n.º 4
0
/* Update block count of segment */
int
nandfs_update_segment(struct nandfs_device *fsdev, uint64_t seg, uint32_t nblks)
{
	struct nandfs_node *su_node;
	struct nandfs_segment_usage *su_usage;
	struct buf *bp;
	uint64_t blk, offset;
	int error;

	su_node = fsdev->nd_su_node;
	ASSERT_VOP_LOCKED(NTOV(su_node), __func__);

	nandfs_seg_usage_blk_offset(fsdev, seg, &blk, &offset);

	error = nandfs_bread(su_node, blk, NOCRED, 0, &bp);
	if (error) {
		nandfs_error("%s: read block:%jx to update\n",
		    __func__, blk);
		brelse(bp);
		return (error);
	}

	su_usage = SU_USAGE_OFF(bp, offset);
	su_usage->su_lastmod = fsdev->nd_ts.tv_sec;
	su_usage->su_flags = NANDFS_SEGMENT_USAGE_DIRTY;
	su_usage->su_nblocks += nblks;

	DPRINTF(SEG, ("%s: seg:%#jx inc:%#x cur:%#x\n",  __func__,
	    (uintmax_t)seg, nblks, su_usage->su_nblocks));

	nandfs_dirty_buf(bp, 1);

	return (0);
}
Exemplo n.º 5
0
static int
nandfs_bad_segment(struct nandfs_device *fsdev, uint64_t seg)
{
	struct nandfs_node *su_node;
	struct nandfs_segment_usage *su_usage;
	struct buf *bp;
	uint64_t blk, offset;
	int error;

	su_node = fsdev->nd_su_node;
	ASSERT_VOP_LOCKED(NTOV(su_node), __func__);

	nandfs_seg_usage_blk_offset(fsdev, seg, &blk, &offset);

	error = nandfs_bread(su_node, blk, NOCRED, 0, &bp);
	if (error) {
		brelse(bp);
		return (error);
	}

	su_usage = SU_USAGE_OFF(bp, offset);
	su_usage->su_lastmod = fsdev->nd_ts.tv_sec;
	su_usage->su_flags = NANDFS_SEGMENT_USAGE_ERROR;

	DPRINTF(SEG, ("%s: seg:%#jx\n", __func__, (uintmax_t)seg));

	nandfs_dirty_buf(bp, 1);

	return (0);
}
Exemplo n.º 6
0
int
nandfs_get_node_entry(struct nandfsmount *nmp, struct nandfs_inode **inode,
    uint64_t ino, struct buf **bp)
{
	struct nandfs_alloc_request req;
	struct nandfs_mdt *mdt;
	struct nandfs_node *ifile;
	struct vnode *vp;
	uint32_t index;
	int error = 0;

	req.entrynum = ino;
	mdt = &nmp->nm_nandfsdev->nd_ifile_mdt;
	ifile = nmp->nm_ifile_node;
	vp = NTOV(ifile);

	VOP_LOCK(vp, LK_EXCLUSIVE);
	error = nandfs_get_entry_block(mdt, ifile, &req, &index, 0);
	if (error) {
		VOP_UNLOCK(vp, 0);
		return (error);
	}

	*inode = ((struct nandfs_inode *) req.bp_entry->b_data) + index;
	*bp = req.bp_entry;
	VOP_UNLOCK(vp, 0);
	return (0);
}
Exemplo n.º 7
0
int
nandfs_node_update(struct nandfs_node *node)
{
	struct nandfs_alloc_request req;
	struct nandfsmount *nmp;
	struct nandfs_mdt *mdt;
	struct nandfs_node *ifile;
	struct nandfs_inode *inode;
	uint32_t index;
	int error = 0;

	nmp = node->nn_nmp;
	ifile = nmp->nm_ifile_node;
	ASSERT_VOP_LOCKED(NTOV(ifile), __func__);

	req.entrynum = node->nn_ino;
	mdt = &nmp->nm_nandfsdev->nd_ifile_mdt;

	DPRINTF(IFILE, ("%s: node:%p ino:%#jx\n",
	    __func__, &node->nn_inode, (uintmax_t)node->nn_ino));

	error = nandfs_get_entry_block(mdt, ifile, &req, &index, 0);
	if (error) {
		printf("nandfs_get_entry_block returned with ERROR=%d\n",
		    error);
		return (error);
	}

	inode = ((struct nandfs_inode *) req.bp_entry->b_data) + index;
	memcpy(inode, &node->nn_inode, sizeof(*inode));
	error = nandfs_dirty_buf(req.bp_entry, 0);

	return (error);
}
Exemplo n.º 8
0
int
nandfs_get_dat_vinfo(struct nandfs_device *nandfsdev, struct nandfs_vinfo *vinfo,
    uint32_t nmembs)
{
	struct nandfs_node *dat;
	struct nandfs_mdt *mdt;
	struct nandfs_alloc_request req;
	struct nandfs_dat_entry *dat_entry;
	uint32_t i, idx;
	int error = 0;

	dat = nandfsdev->nd_dat_node;
	mdt = &nandfsdev->nd_dat_mdt;

	DPRINTF(DAT, ("%s: nmembs %#x\n", __func__, nmembs));

	VOP_LOCK(NTOV(dat), LK_EXCLUSIVE);

	for (i = 0; i < nmembs; i++) {
		req.entrynum = vinfo[i].nvi_vblocknr;

		error = nandfs_get_entry_block(mdt, dat,&req, &idx, 0);
		if (error)
			break;

		dat_entry = ((struct nandfs_dat_entry *) req.bp_entry->b_data);
		vinfo[i].nvi_start = dat_entry[idx].de_start;
		vinfo[i].nvi_end = dat_entry[idx].de_end;
		vinfo[i].nvi_blocknr = dat_entry[idx].de_blocknr;

		DPRINTF(DAT, ("%s: vinfo: %jx[%jx-%jx]->%jx\n",
		    __func__, vinfo[i].nvi_vblocknr, vinfo[i].nvi_start,
		    vinfo[i].nvi_end, vinfo[i].nvi_blocknr));

		brelse(req.bp_entry);
	}

	VOP_UNLOCK(NTOV(dat), 0);
	return (error);
}
Exemplo n.º 9
0
int
nandfs_node_create(struct nandfsmount *nmp, struct nandfs_node **node,
    uint16_t mode)
{
	struct nandfs_alloc_request req;
	struct nandfs_device *nandfsdev;
	struct nandfs_mdt *mdt;
	struct nandfs_node *ifile;
	struct nandfs_inode *inode;
	struct vnode *vp;
	uint32_t entry;
	int error = 0;

	nandfsdev = nmp->nm_nandfsdev;
	mdt = &nandfsdev->nd_ifile_mdt;
	ifile = nmp->nm_ifile_node;
	vp = NTOV(ifile);

	VOP_LOCK(vp, LK_EXCLUSIVE);
	/* Allocate new inode in ifile */
	req.entrynum = nandfsdev->nd_last_ino + 1;
	error = nandfs_find_free_entry(mdt, ifile, &req);
	if (error) {
		VOP_UNLOCK(vp, 0);
		return (error);
	}

	error = nandfs_get_entry_block(mdt, ifile, &req, &entry, 1);
	if (error) {
		VOP_UNLOCK(vp, 0);
		return (error);
	}

	/* Inode initialization */
	inode = ((struct nandfs_inode *) req.bp_entry->b_data) + entry;
	nandfs_inode_init(inode, mode);

	error = nandfs_alloc_entry(mdt, &req);
	if (error) {
		VOP_UNLOCK(vp, 0);
		return (error);
	}

	VOP_UNLOCK(vp, 0);

	nandfsdev->nd_last_ino = req.entrynum;
	error = nandfs_get_node(nmp, req.entrynum, node);
	DPRINTF(IFILE, ("%s: node: %p ino: %#jx\n",
	    __func__, node, (uintmax_t)((*node)->nn_ino)));

	return (error);
}
Exemplo n.º 10
0
int
nandfs_vblock_end(struct nandfs_device *nandfsdev, nandfs_daddr_t vblock)
{
	struct nandfs_node *dat;
	struct nandfs_mdt *mdt;
	struct nandfs_alloc_request req;
	struct nandfs_dat_entry *dat_entry;
	uint64_t end;
	uint32_t entry;
	int locked, error;

	dat = nandfsdev->nd_dat_node;
	mdt = &nandfsdev->nd_dat_mdt;
	end = nandfsdev->nd_last_cno;

	locked = NANDFS_VOP_ISLOCKED(NTOV(dat));
	if (!locked)
		VOP_LOCK(NTOV(dat), LK_EXCLUSIVE);
	req.entrynum = vblock;

	error = nandfs_get_entry_block(mdt, dat, &req, &entry, 0);
	if (!error) {
		dat_entry = (struct nandfs_dat_entry *) req.bp_entry->b_data;
		dat_entry[entry].de_end = end;
		DPRINTF(DAT, ("%s: end vblock %#jx at checkpoint %#jx\n",
		    __func__, (uintmax_t)vblock, (uintmax_t)end));

		/*
		 * It is mostly called from syncer() so
		 * we want to force making buf dirty
		 */
		error = nandfs_dirty_buf(req.bp_entry, 1);
	}

	if (!locked)
		VOP_UNLOCK(NTOV(dat), 0);

	return (error);
}
Exemplo n.º 11
0
int
nandfs_vblock_free(struct nandfs_device *nandfsdev, nandfs_daddr_t vblock)
{
	struct nandfs_node *dat;
	struct nandfs_mdt *mdt;
	struct nandfs_alloc_request req;
	int error;

	dat = nandfsdev->nd_dat_node;
	mdt = &nandfsdev->nd_dat_mdt;

	VOP_LOCK(NTOV(dat), LK_EXCLUSIVE);
	req.entrynum = vblock;

	error = nandfs_find_entry(mdt, dat, &req);
	if (!error) {
		DPRINTF(DAT, ("%s: vblk %#jx\n", __func__, (uintmax_t)vblock));
		nandfs_free_entry(mdt, &req);
	}

	VOP_UNLOCK(NTOV(dat), 0);
	return (error);
}
Exemplo n.º 12
0
int
nandfs_get_seg_stat(struct nandfs_device *nandfsdev,
    struct nandfs_seg_stat *nss)
{
	struct nandfs_sufile_header *suhdr;
	struct nandfs_node *su_node;
	struct buf *bp;
	int err;

	su_node = nandfsdev->nd_su_node;

	NANDFS_WRITELOCK(nandfsdev);
	VOP_LOCK(NTOV(su_node), LK_SHARED);
	err = nandfs_bread(nandfsdev->nd_su_node, 0, NOCRED, 0, &bp);
	if (err) {
		brelse(bp);
		VOP_UNLOCK(NTOV(su_node), 0);
		NANDFS_WRITEUNLOCK(nandfsdev);
		return (-1);
	}

	suhdr = (struct nandfs_sufile_header *)bp->b_data;
	nss->nss_nsegs = nandfsdev->nd_fsdata.f_nsegments;
	nss->nss_ncleansegs = suhdr->sh_ncleansegs;
	nss->nss_ndirtysegs = suhdr->sh_ndirtysegs;
	nss->nss_ctime = 0;
	nss->nss_nongc_ctime = nandfsdev->nd_ts.tv_sec;
	nss->nss_prot_seq = nandfsdev->nd_seg_sequence;

	brelse(bp);
	VOP_UNLOCK(NTOV(su_node), 0);

	NANDFS_WRITEUNLOCK(nandfsdev);

	return (0);
}
Exemplo n.º 13
0
int
nandfs_node_destroy(struct nandfs_node *node)
{
	struct nandfs_alloc_request req;
	struct nandfsmount *nmp;
	struct nandfs_mdt *mdt;
	struct nandfs_node *ifile;
	struct vnode *vp;
	int error = 0;

	nmp = node->nn_nmp;
	req.entrynum = node->nn_ino;
	mdt = &nmp->nm_nandfsdev->nd_ifile_mdt;
	ifile = nmp->nm_ifile_node;
	vp = NTOV(ifile);

	DPRINTF(IFILE, ("%s: destroy node: %p ino: %#jx\n",
	    __func__, node, (uintmax_t)node->nn_ino));
	VOP_LOCK(vp, LK_EXCLUSIVE);

	error = nandfs_find_entry(mdt, ifile, &req);
	if (error) {
		nandfs_error("%s: finding entry error:%d node %p(%jx)",
		    __func__, error, node, node->nn_ino);
		VOP_UNLOCK(vp, 0);
		return (error);
	}

	nandfs_inode_destroy(&node->nn_inode);

	error = nandfs_free_entry(mdt, &req);
	if (error) {
		nandfs_error("%s: freing entry error:%d node %p(%jx)",
		    __func__, error, node, node->nn_ino);
		VOP_UNLOCK(vp, 0);
		return (error);
	}

	VOP_UNLOCK(vp, 0);
	DPRINTF(IFILE, ("%s: freed node %p ino %#jx\n",
	    __func__, node, (uintmax_t)node->nn_ino));
	return (error);
}
Exemplo n.º 14
0
/*
 * Make buffer dirty, it will be updated soon but first it need to be
 * gathered by syncer.
 */
int
nandfs_touch_segment(struct nandfs_device *fsdev, uint64_t seg)
{
	struct nandfs_node *su_node;
	struct buf *bp;
	uint64_t blk, offset;
	int error;

	su_node = fsdev->nd_su_node;
	ASSERT_VOP_LOCKED(NTOV(su_node), __func__);

	nandfs_seg_usage_blk_offset(fsdev, seg, &blk, &offset);

	error = nandfs_bread(su_node, blk, NOCRED, 0, &bp);
	if (error) {
		brelse(bp);
		nandfs_error("%s: cannot preallocate new segment\n", __func__);
		return (error);
	} else
		nandfs_dirty_buf(bp, 1);

	DPRINTF(SEG, ("%s: seg:%#jx\n", __func__, (uintmax_t)seg));
	return (error);
}
Exemplo n.º 15
0
/* Make segment free */
int
nandfs_free_segment(struct nandfs_device *fsdev, uint64_t seg)
{
	struct nandfs_node *su_node;
	struct nandfs_sufile_header *su_header;
	struct nandfs_segment_usage *su_usage;
	struct buf *bp_header, *bp;
	uint64_t blk, offset;
	int error;

	su_node = fsdev->nd_su_node;
	ASSERT_VOP_LOCKED(NTOV(su_node), __func__);

	/* Read su header */
	error = nandfs_bread(su_node, 0, NOCRED, 0, &bp_header);
	if (error) {
		brelse(bp_header);
		return (error);
	}

	su_header = (struct nandfs_sufile_header *)bp_header->b_data;
	nandfs_seg_usage_blk_offset(fsdev, seg, &blk, &offset);

	/* Read su usage block if other than su header block */
	if (blk != 0) {
		error = nandfs_bread(su_node, blk, NOCRED, 0, &bp);
		if (error) {
			brelse(bp);
			brelse(bp_header);
			return (error);
		}
	} else
		bp = bp_header;

	/* Reset su usage data */
	su_usage = SU_USAGE_OFF(bp, offset);
	su_usage->su_lastmod = fsdev->nd_ts.tv_sec;
	su_usage->su_nblocks = 0;
	su_usage->su_flags = 0;

	/* Update clean/dirty counter in header */
	su_header->sh_ncleansegs++;
	su_header->sh_ndirtysegs--;

	/*
	 *  Make buffers dirty, called by cleaner
	 *  so force dirty even if no much space left
	 *  on device
	 */
	nandfs_dirty_buf(bp_header, 1);
	if (bp != bp_header)
		nandfs_dirty_buf(bp, 1);

	/* Update free block count */
	fsdev->nd_super.s_free_blocks_count = su_header->sh_ncleansegs *
	    fsdev->nd_fsdata.f_blocks_per_segment;
	fsdev->nd_clean_segs++;

	DPRINTF(SEG, ("%s: seg:%#jx\n", __func__, (uintmax_t)seg));

	return (0);
}
Exemplo n.º 16
0
int
nandfs_vblock_alloc(struct nandfs_device *nandfsdev, nandfs_daddr_t *vblock)
{
	struct nandfs_node *dat;
	struct nandfs_mdt *mdt;
	struct nandfs_alloc_request req;
	struct nandfs_dat_entry *dat_entry;
	uint64_t start;
	uint32_t entry;
	int locked, error;

	dat = nandfsdev->nd_dat_node;
	mdt = &nandfsdev->nd_dat_mdt;
	start = nandfsdev->nd_last_cno + 1;

	locked = NANDFS_VOP_ISLOCKED(NTOV(dat));
	if (!locked)
		VOP_LOCK(NTOV(dat), LK_EXCLUSIVE);
	req.entrynum = 0;

	/* Alloc vblock number */
	error = nandfs_find_free_entry(mdt, dat, &req);
	if (error) {
		nandfs_error("%s: cannot find free vblk entry\n",
		    __func__);
		if (!locked)
			VOP_UNLOCK(NTOV(dat), 0);
		return (error);
	}

	/* Read/create buffer */
	error = nandfs_get_entry_block(mdt, dat, &req, &entry, 1);
	if (error) {
		nandfs_error("%s: cannot get free vblk entry\n",
		    __func__);
		nandfs_abort_entry(&req);
		if (!locked)
			VOP_UNLOCK(NTOV(dat), 0);
		return (error);
	}

	/* Fill out vblock data */
	dat_entry = (struct nandfs_dat_entry *) req.bp_entry->b_data;
	dat_entry[entry].de_start = start;
	dat_entry[entry].de_end = UINTMAX_MAX;
	dat_entry[entry].de_blocknr = 0;

	/* Commit allocation */
	error = nandfs_alloc_entry(mdt, &req);
	if (error) {
		nandfs_error("%s: cannot get free vblk entry\n",
		    __func__);
		if (!locked)
			VOP_UNLOCK(NTOV(dat), 0);
		return (error);
	}

	/* Return allocated vblock */
	*vblock = req.entrynum;
	DPRINTF(DAT, ("%s: allocated vblock %#jx\n",
	    __func__, (uintmax_t)*vblock));

	if (!locked)
		VOP_UNLOCK(NTOV(dat), 0);
	return (error);
}
Exemplo n.º 17
0
int
nandfs_get_segment_info_filter(struct nandfs_device *fsdev,
    struct nandfs_suinfo *nsi, uint32_t nmembs, uint64_t segment,
    uint64_t *nsegs, uint32_t filter, uint32_t nfilter)
{
	struct nandfs_segment_usage *su;
	struct nandfs_node *su_node;
	struct buf *bp;
	uint64_t curr, blocknr, blockoff, i;
	uint32_t flags;
	int err = 0;

	curr = ~(0);

	lockmgr(&fsdev->nd_seg_const, LK_EXCLUSIVE, NULL);
	su_node = fsdev->nd_su_node;

	VOP_LOCK(NTOV(su_node), LK_SHARED);

	bp = NULL;
	if (nsegs !=  NULL)
		*nsegs = 0;
	for (i = 0; i < nmembs; segment++) {
		if (segment == fsdev->nd_fsdata.f_nsegments)
			break;

		nandfs_seg_usage_blk_offset(fsdev, segment, &blocknr,
		    &blockoff);

		if (i == 0 || curr != blocknr) {
			if (bp != NULL)
				brelse(bp);
			err = nandfs_bread(su_node, blocknr, NOCRED,
			    0, &bp);
			if (err) {
				goto out;
			}
			curr = blocknr;
		}

		su = SU_USAGE_OFF(bp, blockoff);
		flags = su->su_flags;
		if (segment == fsdev->nd_seg_num ||
		    segment == fsdev->nd_next_seg_num)
			flags |= NANDFS_SEGMENT_USAGE_ACTIVE;

		if (nfilter != 0 && (flags & nfilter) != 0)
			continue;
		if (filter != 0 && (flags & filter) == 0)
			continue;

		nsi->nsi_num = segment;
		nsi->nsi_lastmod = su->su_lastmod;
		nsi->nsi_blocks = su->su_nblocks;
		nsi->nsi_flags = flags;
		nsi++;
		i++;
		if (nsegs != NULL)
			(*nsegs)++;
	}

out:
	if (bp != NULL)
		brelse(bp);
	VOP_UNLOCK(NTOV(su_node), 0);
	lockmgr(&fsdev->nd_seg_const, LK_RELEASE, NULL);

	return (err);
}
Exemplo n.º 18
0
/* Alloc new segment */
int
nandfs_alloc_segment(struct nandfs_device *fsdev, uint64_t *seg)
{
	struct nandfs_node *su_node;
	struct nandfs_sufile_header *su_header;
	struct nandfs_segment_usage *su_usage;
	struct buf *bp_header, *bp;
	uint64_t blk, vblk, offset, i, rest, nsegments;
	uint16_t seg_size;
	int error, found;

	seg_size = fsdev->nd_fsdata.f_segment_usage_size;
	nsegments = fsdev->nd_fsdata.f_nsegments;

	su_node = fsdev->nd_su_node;
	ASSERT_VOP_LOCKED(NTOV(su_node), __func__);

	/* Read header buffer */
	error = nandfs_bread(su_node, 0, NOCRED, 0, &bp_header);
	if (error) {
		brelse(bp_header);
		return (error);
	}

	su_header = (struct nandfs_sufile_header *)bp_header->b_data;

	/* Get last allocated segment */
	i = su_header->sh_last_alloc + 1;

	found = 0;
	bp = NULL;
	while (!found) {
		nandfs_seg_usage_blk_offset(fsdev, i, &blk, &offset);
		if(blk != 0) {
			error = nandfs_bmap_lookup(su_node, blk, &vblk);
			if (error) {
				nandfs_error("%s: cannot find vblk for blk "
				    "blk:%jx\n", __func__, blk);
				return (error);
			}
			if (vblk)
				error = nandfs_bread(su_node, blk, NOCRED, 0,
				    &bp);
			else
				error = nandfs_bcreate(su_node, blk, NOCRED, 0,
				    &bp);
			if (error) {
				nandfs_error("%s: cannot create/read "
				    "vblk:%jx\n", __func__, vblk);
				if (bp)
					brelse(bp);
				return (error);
			}

			su_usage = SU_USAGE_OFF(bp, offset);
		} else {
			su_usage = SU_USAGE_OFF(bp_header, offset);
			bp = bp_header;
		}

		rest = (fsdev->nd_blocksize - offset) / seg_size;
		/* Go through all su usage in block */
		while (rest) {
			/* When last check start from beggining */
			if (i == nsegments)
				break;

			if (!su_usage->su_flags) {
				su_usage->su_flags = 1;
				found = 1;
				break;
			}
			su_usage++;
			i++;

			/* If all checked return error */
			if (i == su_header->sh_last_alloc) {
				DPRINTF(SEG, ("%s: cannot allocate segment \n",
				    __func__));
				brelse(bp_header);
				if (blk != 0)
					brelse(bp);
				return (1);
			}
			rest--;
		}
		if (!found) {
			/* Otherwise read another block */
			if (blk != 0)
				brelse(bp);
			if (i == nsegments) {
				blk = 0;
				i = 0;
			} else
				blk++;
			offset = 0;
		}
	}

	if (found) {
		*seg = i;
		su_header->sh_last_alloc = i;
		su_header->sh_ncleansegs--;
		su_header->sh_ndirtysegs++;

		fsdev->nd_super.s_free_blocks_count = su_header->sh_ncleansegs *
		    fsdev->nd_fsdata.f_blocks_per_segment;
		fsdev->nd_clean_segs--;

		/*
		 * It is mostly called from syncer() so we want to force
		 * making buf dirty.
		 */
		error = nandfs_dirty_buf(bp_header, 1);
		if (error) {
			if (bp && bp != bp_header)
				brelse(bp);
			return (error);
		}
		if (bp && bp != bp_header)
			nandfs_dirty_buf(bp, 1);

		DPRINTF(SEG, ("%s: seg:%#jx\n", __func__, (uintmax_t)i));

		return (0);
	}

	DPRINTF(SEG, ("%s: failed\n", __func__));

	return (1);
}
Exemplo n.º 19
0
static int
nandfs_cleaner_clean_segments(struct nandfs_device *nffsdev,
    struct nandfs_vinfo *vinfo, uint32_t nvinfo,
    struct nandfs_period *pd, uint32_t npd,
    struct nandfs_bdesc *bdesc, uint32_t nbdesc,
    uint64_t *segments, uint32_t nsegs)
{
	struct nandfs_node *gc;
	struct buf *bp;
	uint32_t i;
	int error = 0;

	gc = nffsdev->nd_gc_node;

	DPRINTF(CLEAN, ("%s: enter\n", __func__));

	VOP_LOCK(NTOV(gc), LK_EXCLUSIVE);
	for (i = 0; i < nvinfo; i++) {
		if (!vinfo[i].nvi_alive)
			continue;
		DPRINTF(CLEAN, ("%s: read vblknr:%#jx blk:%#jx\n",
		    __func__, (uintmax_t)vinfo[i].nvi_vblocknr,
		    (uintmax_t)vinfo[i].nvi_blocknr));
		error = nandfs_bread(nffsdev->nd_gc_node, vinfo[i].nvi_blocknr,
		    NULL, 0, &bp);
		if (error) {
			nandfs_error("%s:%d", __FILE__, __LINE__);
			VOP_UNLOCK(NTOV(gc), 0);
			goto out;
		}
		nandfs_vblk_set(bp, vinfo[i].nvi_vblocknr);
		nandfs_buf_set(bp, NANDFS_VBLK_ASSIGNED);
		nandfs_dirty_buf(bp, 1);
	}
	VOP_UNLOCK(NTOV(gc), 0);

	/* Delete checkpoints */
	for (i = 0; i < npd; i++) {
		DPRINTF(CLEAN, ("delete checkpoint: %jx\n",
		    (uintmax_t)pd[i].p_start));
		error = nandfs_delete_cp(nffsdev->nd_cp_node, pd[i].p_start,
		    pd[i].p_end);
		if (error) {
			nandfs_error("%s:%d", __FILE__, __LINE__);
			goto out;
		}
	}

	/* Update vblocks */
	for (i = 0; i < nvinfo; i++) {
		if (vinfo[i].nvi_alive)
			continue;
		DPRINTF(CLEAN, ("freeing vblknr: %jx\n", vinfo[i].nvi_vblocknr));
		error = nandfs_vblock_free(nffsdev, vinfo[i].nvi_vblocknr);
		if (error) {
			nandfs_error("%s:%d", __FILE__, __LINE__);
			goto out;
		}
	}

	error = nandfs_process_bdesc(nffsdev, bdesc, nbdesc);
	if (error) {
		nandfs_error("%s:%d", __FILE__, __LINE__);
		goto out;
	}

	/* Add segments to clean */
	if (nffsdev->nd_free_count) {
		nffsdev->nd_free_base = realloc(nffsdev->nd_free_base,
		    (nffsdev->nd_free_count + nsegs) * sizeof(uint64_t),
		    M_NANDFSTEMP, M_WAITOK | M_ZERO);
		memcpy(&nffsdev->nd_free_base[nffsdev->nd_free_count], segments,
		    nsegs * sizeof(uint64_t));
		nffsdev->nd_free_count += nsegs;
	} else {
		nffsdev->nd_free_base = malloc(nsegs * sizeof(uint64_t),
		    M_NANDFSTEMP, M_WAITOK|M_ZERO);
		memcpy(nffsdev->nd_free_base, segments,
		    nsegs * sizeof(uint64_t));
		nffsdev->nd_free_count = nsegs;
	}

out:

	DPRINTF(CLEAN, ("%s: exit error %d\n", __func__, error));

	return (error);
}