int nandfs_node_update(struct nandfs_node *node) { struct nandfs_alloc_request req; struct nandfsmount *nmp; struct nandfs_mdt *mdt; struct nandfs_node *ifile; struct nandfs_inode *inode; uint32_t index; int error = 0; nmp = node->nn_nmp; ifile = nmp->nm_ifile_node; ASSERT_VOP_LOCKED(NTOV(ifile), __func__); req.entrynum = node->nn_ino; mdt = &nmp->nm_nandfsdev->nd_ifile_mdt; DPRINTF(IFILE, ("%s: node:%p ino:%#jx\n", __func__, &node->nn_inode, (uintmax_t)node->nn_ino)); error = nandfs_get_entry_block(mdt, ifile, &req, &index, 0); if (error) { printf("nandfs_get_entry_block returned with ERROR=%d\n", error); return (error); } inode = ((struct nandfs_inode *) req.bp_entry->b_data) + index; memcpy(inode, &node->nn_inode, sizeof(*inode)); error = nandfs_dirty_buf(req.bp_entry, 0); return (error); }
static int nandfs_bad_segment(struct nandfs_device *fsdev, uint64_t seg) { struct nandfs_node *su_node; struct nandfs_segment_usage *su_usage; struct buf *bp; uint64_t blk, offset; int error; su_node = fsdev->nd_su_node; ASSERT_VOP_LOCKED(NTOV(su_node), __func__); nandfs_seg_usage_blk_offset(fsdev, seg, &blk, &offset); error = nandfs_bread(su_node, blk, NOCRED, 0, &bp); if (error) { brelse(bp); return (error); } su_usage = SU_USAGE_OFF(bp, offset); su_usage->su_lastmod = fsdev->nd_ts.tv_sec; su_usage->su_flags = NANDFS_SEGMENT_USAGE_ERROR; DPRINTF(SEG, ("%s: seg:%#jx\n", __func__, (uintmax_t)seg)); nandfs_dirty_buf(bp, 1); return (0); }
/* Update block count of segment */ int nandfs_update_segment(struct nandfs_device *fsdev, uint64_t seg, uint32_t nblks) { struct nandfs_node *su_node; struct nandfs_segment_usage *su_usage; struct buf *bp; uint64_t blk, offset; int error; su_node = fsdev->nd_su_node; ASSERT_VOP_LOCKED(NTOV(su_node), __func__); nandfs_seg_usage_blk_offset(fsdev, seg, &blk, &offset); error = nandfs_bread(su_node, blk, NOCRED, 0, &bp); if (error) { nandfs_error("%s: read block:%jx to update\n", __func__, blk); brelse(bp); return (error); } su_usage = SU_USAGE_OFF(bp, offset); su_usage->su_lastmod = fsdev->nd_ts.tv_sec; su_usage->su_flags = NANDFS_SEGMENT_USAGE_DIRTY; su_usage->su_nblocks += nblks; DPRINTF(SEG, ("%s: seg:%#jx inc:%#x cur:%#x\n", __func__, (uintmax_t)seg, nblks, su_usage->su_nblocks)); nandfs_dirty_buf(bp, 1); return (0); }
int nandfs_free_entry(struct nandfs_mdt* mdt, struct nandfs_alloc_request *req) { struct nandfs_block_group_desc *descriptors; uint64_t bitmap_idx, bitmap_off; uint64_t group; uint32_t *mask, maskrw; nandfs_calc_idx_entry(mdt, req->entrynum, &group, &bitmap_idx, &bitmap_off); DPRINTF(ALLOC, ("nandfs_free_entry: req->entrynum=%jx bitmap_idx=%jx" " bitmap_off=%jx group=%jx\n", (uintmax_t)req->entrynum, (uintmax_t)bitmap_idx, (uintmax_t)bitmap_off, (uintmax_t)group)); /* Update counter of free entries for group */ descriptors = (struct nandfs_block_group_desc *) req->bp_desc->b_data; descriptors[group].bg_nfrees++; /* Set bit to indicate that entry is taken */ mask = (uint32_t *)req->bp_bitmap->b_data; maskrw = mask[bitmap_idx]; KASSERT(maskrw & (1 << bitmap_off), ("freeing unallocated vblock")); maskrw &= ~(1 << bitmap_off); mask[bitmap_idx] = maskrw; /* Make descriptor, bitmap and entry buffer dirty */ if (nandfs_dirty_buf(req->bp_desc, 0) == 0) { nandfs_dirty_buf(req->bp_bitmap, 1); nandfs_dirty_buf(req->bp_entry, 1); } else { brelse(req->bp_bitmap); brelse(req->bp_entry); return (-1); } return (0); }
static int nandfs_process_bdesc(struct nandfs_device *nffsdev, struct nandfs_bdesc *bd, uint64_t nmembs) { struct nandfs_node *dat_node; struct buf *bp; uint64_t i; int error; dat_node = nffsdev->nd_dat_node; VOP_LOCK(NTOV(dat_node), LK_EXCLUSIVE); for (i = 0; i < nmembs; i++) { if (!bd[i].bd_alive) continue; DPRINTF(CLEAN, ("%s: idx %jx offset %jx\n", __func__, i, bd[i].bd_offset)); if (bd[i].bd_level) { error = nandfs_bread_meta(dat_node, bd[i].bd_offset, NULL, 0, &bp); if (error) { nandfs_error("%s: cannot read dat node " "level:%d\n", __func__, bd[i].bd_level); brelse(bp); VOP_UNLOCK(NTOV(dat_node), 0); return (error); } nandfs_dirty_buf_meta(bp, 1); nandfs_bmap_dirty_blocks(VTON(bp->b_vp), bp, 1); } else { error = nandfs_bread(dat_node, bd[i].bd_offset, NULL, 0, &bp); if (error) { nandfs_error("%s: cannot read dat node\n", __func__); brelse(bp); VOP_UNLOCK(NTOV(dat_node), 0); return (error); } nandfs_dirty_buf(bp, 1); } DPRINTF(CLEAN, ("%s: bp: %p\n", __func__, bp)); } VOP_UNLOCK(NTOV(dat_node), 0); return (0); }
int nandfs_update_dirent(struct vnode *dvp, struct nandfs_node *fnode, struct nandfs_node *tnode) { struct nandfs_node *dir_node; struct nandfs_dir_entry *dirent; struct buf *bp; uint64_t file_size, blocknr; uint32_t blocksize, off; uint8_t *pos; int error; dir_node = VTON(dvp); file_size = dir_node->nn_inode.i_size; if (!file_size) return (0); DPRINTF(LOOKUP, ("chg direntry dvp %p ino %#jx to in %#jx at off %#jx\n", dvp, (uintmax_t)tnode->nn_ino, (uintmax_t)fnode->nn_ino, (uintmax_t)tnode->nn_diroff)); blocksize = dir_node->nn_nandfsdev->nd_blocksize; blocknr = tnode->nn_diroff / blocksize; off = tnode->nn_diroff % blocksize; error = nandfs_bread(dir_node, blocknr, NOCRED, 0, &bp); if (error) { brelse(bp); return (error); } pos = bp->b_data; dirent = (struct nandfs_dir_entry *) (pos + off); KASSERT((dirent->inode == tnode->nn_ino), ("direntry mismatch")); dirent->inode = fnode->nn_ino; error = nandfs_dirty_buf(bp, 0); if (error) return (error); return (0); }
int nandfs_vblock_end(struct nandfs_device *nandfsdev, nandfs_daddr_t vblock) { struct nandfs_node *dat; struct nandfs_mdt *mdt; struct nandfs_alloc_request req; struct nandfs_dat_entry *dat_entry; uint64_t end; uint32_t entry; int locked, error; dat = nandfsdev->nd_dat_node; mdt = &nandfsdev->nd_dat_mdt; end = nandfsdev->nd_last_cno; locked = NANDFS_VOP_ISLOCKED(NTOV(dat)); if (!locked) VOP_LOCK(NTOV(dat), LK_EXCLUSIVE); req.entrynum = vblock; error = nandfs_get_entry_block(mdt, dat, &req, &entry, 0); if (!error) { dat_entry = (struct nandfs_dat_entry *) req.bp_entry->b_data; dat_entry[entry].de_end = end; DPRINTF(DAT, ("%s: end vblock %#jx at checkpoint %#jx\n", __func__, (uintmax_t)vblock, (uintmax_t)end)); /* * It is mostly called from syncer() so * we want to force making buf dirty */ error = nandfs_dirty_buf(req.bp_entry, 1); } if (!locked) VOP_UNLOCK(NTOV(dat), 0); return (error); }
int nandfs_update_parent_dir(struct vnode *dvp, uint64_t newparent) { struct nandfs_dir_entry *dirent; struct nandfs_node *dir_node; struct buf *bp; int error; dir_node = VTON(dvp); error = nandfs_bread(dir_node, 0, NOCRED, 0, &bp); if (error) { brelse(bp); return (error); } dirent = (struct nandfs_dir_entry *)bp->b_data; dirent->inode = newparent; error = nandfs_dirty_buf(bp, 0); if (error) return (error); return (0); }
/* * Make buffer dirty, it will be updated soon but first it need to be * gathered by syncer. */ int nandfs_touch_segment(struct nandfs_device *fsdev, uint64_t seg) { struct nandfs_node *su_node; struct buf *bp; uint64_t blk, offset; int error; su_node = fsdev->nd_su_node; ASSERT_VOP_LOCKED(NTOV(su_node), __func__); nandfs_seg_usage_blk_offset(fsdev, seg, &blk, &offset); error = nandfs_bread(su_node, blk, NOCRED, 0, &bp); if (error) { brelse(bp); nandfs_error("%s: cannot preallocate new segment\n", __func__); return (error); } else nandfs_dirty_buf(bp, 1); DPRINTF(SEG, ("%s: seg:%#jx\n", __func__, (uintmax_t)seg)); return (error); }
/* Alloc new segment */ int nandfs_alloc_segment(struct nandfs_device *fsdev, uint64_t *seg) { struct nandfs_node *su_node; struct nandfs_sufile_header *su_header; struct nandfs_segment_usage *su_usage; struct buf *bp_header, *bp; uint64_t blk, vblk, offset, i, rest, nsegments; uint16_t seg_size; int error, found; seg_size = fsdev->nd_fsdata.f_segment_usage_size; nsegments = fsdev->nd_fsdata.f_nsegments; su_node = fsdev->nd_su_node; ASSERT_VOP_LOCKED(NTOV(su_node), __func__); /* Read header buffer */ error = nandfs_bread(su_node, 0, NOCRED, 0, &bp_header); if (error) { brelse(bp_header); return (error); } su_header = (struct nandfs_sufile_header *)bp_header->b_data; /* Get last allocated segment */ i = su_header->sh_last_alloc + 1; found = 0; bp = NULL; while (!found) { nandfs_seg_usage_blk_offset(fsdev, i, &blk, &offset); if(blk != 0) { error = nandfs_bmap_lookup(su_node, blk, &vblk); if (error) { nandfs_error("%s: cannot find vblk for blk " "blk:%jx\n", __func__, blk); return (error); } if (vblk) error = nandfs_bread(su_node, blk, NOCRED, 0, &bp); else error = nandfs_bcreate(su_node, blk, NOCRED, 0, &bp); if (error) { nandfs_error("%s: cannot create/read " "vblk:%jx\n", __func__, vblk); if (bp) brelse(bp); return (error); } su_usage = SU_USAGE_OFF(bp, offset); } else { su_usage = SU_USAGE_OFF(bp_header, offset); bp = bp_header; } rest = (fsdev->nd_blocksize - offset) / seg_size; /* Go through all su usage in block */ while (rest) { /* When last check start from beggining */ if (i == nsegments) break; if (!su_usage->su_flags) { su_usage->su_flags = 1; found = 1; break; } su_usage++; i++; /* If all checked return error */ if (i == su_header->sh_last_alloc) { DPRINTF(SEG, ("%s: cannot allocate segment \n", __func__)); brelse(bp_header); if (blk != 0) brelse(bp); return (1); } rest--; } if (!found) { /* Otherwise read another block */ if (blk != 0) brelse(bp); if (i == nsegments) { blk = 0; i = 0; } else blk++; offset = 0; } } if (found) { *seg = i; su_header->sh_last_alloc = i; su_header->sh_ncleansegs--; su_header->sh_ndirtysegs++; fsdev->nd_super.s_free_blocks_count = su_header->sh_ncleansegs * fsdev->nd_fsdata.f_blocks_per_segment; fsdev->nd_clean_segs--; /* * It is mostly called from syncer() so we want to force * making buf dirty. */ error = nandfs_dirty_buf(bp_header, 1); if (error) { if (bp && bp != bp_header) brelse(bp); return (error); } if (bp && bp != bp_header) nandfs_dirty_buf(bp, 1); DPRINTF(SEG, ("%s: seg:%#jx\n", __func__, (uintmax_t)i)); return (0); } DPRINTF(SEG, ("%s: failed\n", __func__)); return (1); }
/* Make segment free */ int nandfs_free_segment(struct nandfs_device *fsdev, uint64_t seg) { struct nandfs_node *su_node; struct nandfs_sufile_header *su_header; struct nandfs_segment_usage *su_usage; struct buf *bp_header, *bp; uint64_t blk, offset; int error; su_node = fsdev->nd_su_node; ASSERT_VOP_LOCKED(NTOV(su_node), __func__); /* Read su header */ error = nandfs_bread(su_node, 0, NOCRED, 0, &bp_header); if (error) { brelse(bp_header); return (error); } su_header = (struct nandfs_sufile_header *)bp_header->b_data; nandfs_seg_usage_blk_offset(fsdev, seg, &blk, &offset); /* Read su usage block if other than su header block */ if (blk != 0) { error = nandfs_bread(su_node, blk, NOCRED, 0, &bp); if (error) { brelse(bp); brelse(bp_header); return (error); } } else bp = bp_header; /* Reset su usage data */ su_usage = SU_USAGE_OFF(bp, offset); su_usage->su_lastmod = fsdev->nd_ts.tv_sec; su_usage->su_nblocks = 0; su_usage->su_flags = 0; /* Update clean/dirty counter in header */ su_header->sh_ncleansegs++; su_header->sh_ndirtysegs--; /* * Make buffers dirty, called by cleaner * so force dirty even if no much space left * on device */ nandfs_dirty_buf(bp_header, 1); if (bp != bp_header) nandfs_dirty_buf(bp, 1); /* Update free block count */ fsdev->nd_super.s_free_blocks_count = su_header->sh_ncleansegs * fsdev->nd_fsdata.f_blocks_per_segment; fsdev->nd_clean_segs++; DPRINTF(SEG, ("%s: seg:%#jx\n", __func__, (uintmax_t)seg)); return (0); }
static int nandfs_cleaner_clean_segments(struct nandfs_device *nffsdev, struct nandfs_vinfo *vinfo, uint32_t nvinfo, struct nandfs_period *pd, uint32_t npd, struct nandfs_bdesc *bdesc, uint32_t nbdesc, uint64_t *segments, uint32_t nsegs) { struct nandfs_node *gc; struct buf *bp; uint32_t i; int error = 0; gc = nffsdev->nd_gc_node; DPRINTF(CLEAN, ("%s: enter\n", __func__)); VOP_LOCK(NTOV(gc), LK_EXCLUSIVE); for (i = 0; i < nvinfo; i++) { if (!vinfo[i].nvi_alive) continue; DPRINTF(CLEAN, ("%s: read vblknr:%#jx blk:%#jx\n", __func__, (uintmax_t)vinfo[i].nvi_vblocknr, (uintmax_t)vinfo[i].nvi_blocknr)); error = nandfs_bread(nffsdev->nd_gc_node, vinfo[i].nvi_blocknr, NULL, 0, &bp); if (error) { nandfs_error("%s:%d", __FILE__, __LINE__); VOP_UNLOCK(NTOV(gc), 0); goto out; } nandfs_vblk_set(bp, vinfo[i].nvi_vblocknr); nandfs_buf_set(bp, NANDFS_VBLK_ASSIGNED); nandfs_dirty_buf(bp, 1); } VOP_UNLOCK(NTOV(gc), 0); /* Delete checkpoints */ for (i = 0; i < npd; i++) { DPRINTF(CLEAN, ("delete checkpoint: %jx\n", (uintmax_t)pd[i].p_start)); error = nandfs_delete_cp(nffsdev->nd_cp_node, pd[i].p_start, pd[i].p_end); if (error) { nandfs_error("%s:%d", __FILE__, __LINE__); goto out; } } /* Update vblocks */ for (i = 0; i < nvinfo; i++) { if (vinfo[i].nvi_alive) continue; DPRINTF(CLEAN, ("freeing vblknr: %jx\n", vinfo[i].nvi_vblocknr)); error = nandfs_vblock_free(nffsdev, vinfo[i].nvi_vblocknr); if (error) { nandfs_error("%s:%d", __FILE__, __LINE__); goto out; } } error = nandfs_process_bdesc(nffsdev, bdesc, nbdesc); if (error) { nandfs_error("%s:%d", __FILE__, __LINE__); goto out; } /* Add segments to clean */ if (nffsdev->nd_free_count) { nffsdev->nd_free_base = realloc(nffsdev->nd_free_base, (nffsdev->nd_free_count + nsegs) * sizeof(uint64_t), M_NANDFSTEMP, M_WAITOK | M_ZERO); memcpy(&nffsdev->nd_free_base[nffsdev->nd_free_count], segments, nsegs * sizeof(uint64_t)); nffsdev->nd_free_count += nsegs; } else { nffsdev->nd_free_base = malloc(nsegs * sizeof(uint64_t), M_NANDFSTEMP, M_WAITOK|M_ZERO); memcpy(nffsdev->nd_free_base, segments, nsegs * sizeof(uint64_t)); nffsdev->nd_free_count = nsegs; } out: DPRINTF(CLEAN, ("%s: exit error %d\n", __func__, error)); return (error); }
int nandfs_add_dirent(struct vnode *dvp, uint64_t ino, char *nameptr, long namelen, uint8_t type) { struct nandfs_node *dir_node = VTON(dvp); struct nandfs_dir_entry *dirent, *pdirent; uint32_t blocksize = dir_node->nn_nandfsdev->nd_blocksize; uint64_t filesize = dir_node->nn_inode.i_size; uint64_t inode_blks = dir_node->nn_inode.i_blocks; uint32_t off, rest; uint8_t *pos; struct buf *bp; int error; pdirent = NULL; bp = NULL; if (inode_blks) { error = nandfs_bread(dir_node, inode_blks - 1, NOCRED, 0, &bp); if (error) { brelse(bp); return (error); } pos = bp->b_data; off = 0; while (off < blocksize) { pdirent = (struct nandfs_dir_entry *) (pos + off); if (!pdirent->rec_len) { pdirent = NULL; break; } off += pdirent->rec_len; } if (pdirent) rest = pdirent->rec_len - NANDFS_DIR_REC_LEN(pdirent->name_len); else rest = blocksize; if (rest < NANDFS_DIR_REC_LEN(namelen)) { /* Do not update pdirent as new block is created */ pdirent = NULL; brelse(bp); /* Set to NULL to create new */ bp = NULL; filesize += rest; } } /* If no bp found create new */ if (!bp) { error = nandfs_bcreate(dir_node, inode_blks, NOCRED, 0, &bp); if (error) return (error); off = 0; pos = bp->b_data; } /* Modify pdirent if exists */ if (pdirent) { DPRINTF(LOOKUP, ("modify pdirent %p\n", pdirent)); /* modify last de */ off -= pdirent->rec_len; pdirent->rec_len = NANDFS_DIR_REC_LEN(pdirent->name_len); off += pdirent->rec_len; } /* Create new dirent */ dirent = (struct nandfs_dir_entry *) (pos + off); dirent->rec_len = blocksize - off; dirent->inode = ino; dirent->name_len = namelen; memset(dirent->name, 0, NANDFS_DIR_NAME_LEN(namelen)); memcpy(dirent->name, nameptr, namelen); dirent->file_type = type; filesize += NANDFS_DIR_REC_LEN(dirent->name_len); DPRINTF(LOOKUP, ("create dir_entry '%.*s' at %p with size %x " "new filesize: %jx\n", (int)namelen, dirent->name, dirent, dirent->rec_len, (uintmax_t)filesize)); error = nandfs_dirty_buf(bp, 0); if (error) return (error); dir_node->nn_inode.i_size = filesize; dir_node->nn_flags |= IN_CHANGE | IN_UPDATE; vnode_pager_setsize(dvp, filesize); return (0); }
int nandfs_remove_dirent(struct vnode *dvp, struct nandfs_node *node, struct componentname *cnp) { struct nandfs_node *dir_node; struct nandfs_dir_entry *dirent, *pdirent; struct buf *bp; uint64_t filesize, blocknr, ino, offset; uint32_t blocksize, limit, off; uint16_t newsize; uint8_t *pos; int error, found; dir_node = VTON(dvp); filesize = dir_node->nn_inode.i_size; if (!filesize) return (0); if (node) { offset = node->nn_diroff; ino = node->nn_ino; } else { offset = dir_node->nn_diroff; ino = NANDFS_WHT_INO; } dirent = pdirent = NULL; blocksize = dir_node->nn_nandfsdev->nd_blocksize; blocknr = offset / blocksize; DPRINTF(LOOKUP, ("rm direntry dvp %p node %p ino %#jx at off %#jx\n", dvp, node, (uintmax_t)ino, (uintmax_t)offset)); error = nandfs_bread(dir_node, blocknr, NOCRED, 0, &bp); if (error) { brelse(bp); return (error); } pos = bp->b_data; off = 0; found = 0; limit = offset % blocksize; pdirent = (struct nandfs_dir_entry *) bp->b_data; while (off <= limit) { dirent = (struct nandfs_dir_entry *) (pos + off); if ((off == limit) && (dirent->inode == ino)) { found = 1; break; } if (dirent->inode != 0) pdirent = dirent; off += dirent->rec_len; } if (!found) { nandfs_error("cannot find entry to remove"); brelse(bp); return (error); } DPRINTF(LOOKUP, ("rm dirent ino %#jx at %#x with size %#x\n", (uintmax_t)dirent->inode, off, dirent->rec_len)); newsize = (uintptr_t)dirent - (uintptr_t)pdirent; newsize += dirent->rec_len; pdirent->rec_len = newsize; dirent->inode = 0; error = nandfs_dirty_buf(bp, 0); if (error) return (error); dir_node->nn_flags |= IN_CHANGE | IN_UPDATE; /* If last one modify filesize */ if ((offset + NANDFS_DIR_REC_LEN(dirent->name_len)) == filesize) { filesize = blocknr * blocksize + ((uintptr_t)pdirent - (uintptr_t)pos) + NANDFS_DIR_REC_LEN(pdirent->name_len); dir_node->nn_inode.i_size = filesize; } return (0); }