static int nilfs_direct_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr) { union nilfs_bmap_ptr_req req; struct inode *dat = NULL; struct buffer_head *bh; int ret; if (key > NILFS_DIRECT_KEY_MAX) return -ENOENT; if (nilfs_direct_get_ptr(bmap, key) != NILFS_BMAP_INVALID_PTR) return -EEXIST; if (NILFS_BMAP_USE_VBN(bmap)) { req.bpr_ptr = nilfs_direct_find_target_v(bmap, key); dat = nilfs_bmap_get_dat(bmap); } ret = nilfs_bmap_prepare_alloc_ptr(bmap, &req, dat); if (!ret) { /* ptr must be a pointer to a buffer head. */ bh = (struct buffer_head *)((unsigned long)ptr); set_buffer_nilfs_volatile(bh); nilfs_bmap_commit_alloc_ptr(bmap, &req, dat); nilfs_direct_set_ptr(bmap, key, req.bpr_ptr); if (!nilfs_bmap_dirty(bmap)) nilfs_bmap_set_dirty(bmap); if (NILFS_BMAP_USE_VBN(bmap)) nilfs_bmap_set_target_v(bmap, key, req.bpr_ptr); nilfs_inode_add_blocks(bmap->b_inode, 1); } return ret; }
static int nilfs_direct_assign(struct nilfs_bmap *bmap, struct buffer_head **bh, sector_t blocknr, union nilfs_binfo *binfo) { __u64 key; __u64 ptr; key = nilfs_bmap_data_get_key(bmap, *bh); if (unlikely(key > NILFS_DIRECT_KEY_MAX)) { printk(KERN_CRIT "%s: invalid key: %llu\n", __func__, (unsigned long long)key); return -EINVAL; } ptr = nilfs_direct_get_ptr(bmap, key); if (unlikely(ptr == NILFS_BMAP_INVALID_PTR)) { printk(KERN_CRIT "%s: invalid pointer: %llu\n", __func__, (unsigned long long)ptr); return -EINVAL; } return NILFS_BMAP_USE_VBN(bmap) ? nilfs_direct_assign_v(bmap, key, ptr, bh, blocknr, binfo) : nilfs_direct_assign_p(bmap, key, ptr, bh, blocknr, binfo); }
static int nilfs_direct_propagate(struct nilfs_bmap *bmap, struct buffer_head *bh) { struct nilfs_palloc_req oldreq, newreq; struct inode *dat; __u64 key; __u64 ptr; int ret; if (!NILFS_BMAP_USE_VBN(bmap)) return 0; dat = nilfs_bmap_get_dat(bmap); key = nilfs_bmap_data_get_key(bmap, bh); ptr = nilfs_direct_get_ptr(bmap, key); if (!buffer_nilfs_volatile(bh)) { oldreq.pr_entry_nr = ptr; newreq.pr_entry_nr = ptr; ret = nilfs_dat_prepare_update(dat, &oldreq, &newreq); if (ret < 0) return ret; nilfs_dat_commit_update(dat, &oldreq, &newreq, bmap->b_ptr_type == NILFS_BMAP_PTR_VS); set_buffer_nilfs_volatile(bh); nilfs_direct_set_ptr(bmap, key, newreq.pr_entry_nr); } else ret = nilfs_dat_mark_dirty(dat, ptr); return ret; }
static int nilfs_direct_propagate(const struct nilfs_bmap *bmap, struct buffer_head *bh) { struct nilfs_direct *direct = (struct nilfs_direct *)bmap; return NILFS_BMAP_USE_VBN(bmap) ? nilfs_direct_propagate_v(direct, bh) : 0; }
static int nilfs_direct_prepare_insert(struct nilfs_direct *direct, __u64 key, union nilfs_bmap_ptr_req *req, struct nilfs_bmap_stats *stats) { int ret; if (NILFS_BMAP_USE_VBN(&direct->d_bmap)) req->bpr_ptr = nilfs_direct_find_target_v(direct, key); ret = nilfs_bmap_prepare_alloc_ptr(&direct->d_bmap, req); if (ret < 0) return ret; stats->bs_nblocks = 1; return 0; }
static void nilfs_direct_commit_insert(struct nilfs_direct *direct, union nilfs_bmap_ptr_req *req, __u64 key, __u64 ptr) { struct buffer_head *bh; /* ptr must be a pointer to a buffer head. */ bh = (struct buffer_head *)((unsigned long)ptr); set_buffer_nilfs_volatile(bh); nilfs_bmap_commit_alloc_ptr(&direct->d_bmap, req); nilfs_direct_set_ptr(direct, key, req->bpr_ptr); if (!nilfs_bmap_dirty(&direct->d_bmap)) nilfs_bmap_set_dirty(&direct->d_bmap); if (NILFS_BMAP_USE_VBN(&direct->d_bmap)) nilfs_direct_set_target_v(direct, key, req->bpr_ptr); }
static int nilfs_direct_lookup_contig(const struct nilfs_bmap *bmap, __u64 key, __u64 *ptrp, unsigned maxblocks) { struct nilfs_direct *direct = (struct nilfs_direct *)bmap; struct inode *dat = NULL; __u64 ptr, ptr2; sector_t blocknr; int ret, cnt; if (key > NILFS_DIRECT_KEY_MAX) return -ENOENT; ptr = nilfs_direct_get_ptr(direct, key); if (ptr == NILFS_BMAP_INVALID_PTR) return -ENOENT; if (NILFS_BMAP_USE_VBN(bmap)) { dat = nilfs_bmap_get_dat(bmap); ret = nilfs_dat_translate(dat, ptr, &blocknr); if (ret < 0) return ret; ptr = blocknr; } maxblocks = min_t(unsigned, maxblocks, NILFS_DIRECT_KEY_MAX - key + 1); for (cnt = 1; cnt < maxblocks && (ptr2 = nilfs_direct_get_ptr(direct, key + cnt)) != NILFS_BMAP_INVALID_PTR; cnt++) { if (dat) { ret = nilfs_dat_translate(dat, ptr2, &blocknr); if (ret < 0) return ret; ptr2 = blocknr; } if (ptr2 != ptr + cnt) break; } *ptrp = ptr; return cnt; }
/** * nilfs_bmap_lookup_at_level - find a data block or node block * @bmap: bmap * @key: key * @level: level * @ptrp: place to store the value associated to @key * * Description: nilfs_bmap_lookup_at_level() finds a record whose key * matches @key in the block at @level of the bmap. * * Return Value: On success, 0 is returned and the record associated with @key * is stored in the place pointed by @ptrp. On error, one of the following * negative error codes is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. * * %-ENOENT - A record associated with @key does not exist. */ int nilfs_bmap_lookup_at_level(struct nilfs_bmap *bmap, __u64 key, int level, __u64 *ptrp) { sector_t blocknr; int ret; down_read(&bmap->b_sem); ret = bmap->b_ops->bop_lookup(bmap, key, level, ptrp); if (ret < 0) goto out; if (NILFS_BMAP_USE_VBN(bmap)) { ret = nilfs_dat_translate(nilfs_bmap_get_dat(bmap), *ptrp, &blocknr); if (!ret) *ptrp = blocknr; } out: up_read(&bmap->b_sem); return ret; }
static int nilfs_direct_delete(struct nilfs_bmap *bmap, __u64 key) { union nilfs_bmap_ptr_req req; struct inode *dat; int ret; if (key > NILFS_DIRECT_KEY_MAX || nilfs_direct_get_ptr(bmap, key) == NILFS_BMAP_INVALID_PTR) return -ENOENT; dat = NILFS_BMAP_USE_VBN(bmap) ? nilfs_bmap_get_dat(bmap) : NULL; req.bpr_ptr = nilfs_direct_get_ptr(bmap, key); ret = nilfs_bmap_prepare_end_ptr(bmap, &req, dat); if (!ret) { nilfs_bmap_commit_end_ptr(bmap, &req, dat); nilfs_direct_set_ptr(bmap, key, NILFS_BMAP_INVALID_PTR); nilfs_inode_sub_blocks(bmap->b_inode, 1); } return ret; }