static PyObject * fs_blocks_in_bytes (Filesystem *self, PyObject *args, PyObject *kwargs) { unsigned long long bytes; uint64_t blocks; static char *kwlist[] = { "bytes", NULL }; if (!PyArg_ParseTupleAndKeywords (args, kwargs, "K:blocks_in_bytes", kwlist, &bytes)) return NULL; blocks = ocfs2_blocks_in_bytes (self->fs, bytes); return PyLong_FromUnsignedLongLong (blocks); }
void o2fsck_dir_block_iterate(o2fsck_state *ost, dirblock_iterator func, void *priv_data) { o2fsck_dirblocks *db = &ost->ost_dirblocks; ocfs2_filesys *fs = ost->ost_fs; o2fsck_dirblock_entry *dbe; struct rb_node *node; unsigned ret; errcode_t err; char *pre_cache_buf = NULL; int pre_cache_blocks = ocfs2_blocks_in_bytes(fs, 1024 * 1024); int cached_blocks = 0; o2fsck_reset_blocks_cached(); if (o2fsck_worth_caching(1)) { err = ocfs2_malloc_blocks(fs->fs_io, pre_cache_blocks, &pre_cache_buf); if (err) verbosef("Unable to allocate dirblock pre-cache " "buffer, %s\n", "ignoring"); } for (node = rb_first(&db->db_root); node; node = rb_next(node)) { if (!cached_blocks && pre_cache_buf) cached_blocks = try_to_cache(fs, node, pre_cache_buf, pre_cache_blocks); dbe = rb_entry(node, o2fsck_dirblock_entry, e_node); ret = func(dbe, priv_data); if (ret & OCFS2_DIRENT_ABORT) break; if (cached_blocks) cached_blocks--; } if (pre_cache_buf) ocfs2_free(&pre_cache_buf); }
/* * This function will truncate the file's cluster which exceeds * the cluster where new_size resides in and empty all the * bytes in the same cluster which exceeds new_size. */ static errcode_t ocfs2_zero_tail_and_truncate_full(ocfs2_filesys *fs, ocfs2_cached_inode *ci, uint64_t new_i_size, uint32_t *new_clusters, errcode_t (*free_clusters)(ocfs2_filesys *fs, uint32_t len, uint64_t start, void *free_data), void *free_data) { errcode_t ret; uint64_t new_size_in_blocks; struct truncate_ctxt ctxt; new_size_in_blocks = ocfs2_blocks_in_bytes(fs, new_i_size); ctxt.ino = ci->ci_blkno; ctxt.new_i_clusters = ci->ci_inode->i_clusters; ctxt.new_size_in_clusters = ocfs2_clusters_in_blocks(fs, new_size_in_blocks); ctxt.free_clusters = free_clusters; ctxt.free_data = free_data; ret = ocfs2_extent_iterate_inode(fs, ci->ci_inode, OCFS2_EXTENT_FLAG_DEPTH_TRAVERSE, NULL, truncate_iterate, &ctxt); if (ret) goto out; ret = ocfs2_zero_tail_for_truncate(ci, new_i_size); if (ret) goto out; if (new_clusters) *new_clusters = ctxt.new_i_clusters; out: return ret; }
errcode_t ocfs2_read_whole_file(ocfs2_filesys *fs, uint64_t blkno, char **buf, int *len) { struct read_whole_context ctx; errcode_t retval; char *inode_buf; struct ocfs2_dinode *di; /* So the caller can see nothing was read */ *len = 0; *buf = NULL; retval = ocfs2_malloc_block(fs->fs_io, &inode_buf); if (retval) return retval; retval = ocfs2_read_inode(fs, blkno, inode_buf); if (retval) goto out_free; di = (struct ocfs2_dinode *)inode_buf; /* Arbitrary limit for our malloc */ retval = OCFS2_ET_INVALID_ARGUMENT; if (di->i_size > INT_MAX) goto out_free; retval = ocfs2_malloc_blocks(fs->fs_io, ocfs2_blocks_in_bytes(fs, di->i_size), buf); if (retval) goto out_free; if (di->i_dyn_features & OCFS2_INLINE_DATA_FL) return ocfs2_inline_data_read(di, *buf, di->i_size, 0, (uint32_t *)len); ctx.buf = *buf; ctx.ptr = *buf; ctx.size = di->i_size; ctx.offset = 0; ctx.errcode = 0; retval = ocfs2_block_iterate(fs, blkno, 0, read_whole_func, &ctx); *len = ctx.size; if (ctx.offset < ctx.size) *len = ctx.offset; out_free: ocfs2_free(&inode_buf); if (!(*len)) { ocfs2_free(buf); *buf = NULL; } if (retval) return retval; return ctx.errcode; }
void o2fsck_init_cache(o2fsck_state *ost, enum o2fsck_cache_hint hint) { errcode_t ret; uint64_t blocks_wanted; int leave_room; ocfs2_filesys *fs = ost->ost_fs; int max_slots = OCFS2_RAW_SB(fs->fs_super)->s_max_slots; switch (hint) { case O2FSCK_CACHE_MODE_FULL: leave_room = 1; blocks_wanted = fs->fs_blocks; break; case O2FSCK_CACHE_MODE_JOURNAL: /* * We need enough blocks for all the journal * data. Let's guess at 256M journals. */ leave_room = 0; blocks_wanted = ocfs2_blocks_in_bytes(fs, max_slots * 1024 * 1024 * 256); break; case O2FSCK_CACHE_MODE_NONE: return; default: assert(0); } verbosef("Want %"PRIu64" blocks for the I/O cache\n", blocks_wanted); /* * leave_room means that we don't want our cache to be taking * all available memory. So we try to get twice as much as we * want; if that works, we know that getting exactly as much as * we want is going to be safe. */ if (leave_room) blocks_wanted <<= 1; if (blocks_wanted > INT_MAX) blocks_wanted = INT_MAX; while (blocks_wanted > 0) { io_destroy_cache(fs->fs_io); verbosef("Asking for %"PRIu64" blocks of I/O cache\n", blocks_wanted); ret = io_init_cache(fs->fs_io, blocks_wanted); if (!ret) { /* * We want to pin our cache; there's no point in * having a large cache if half of it is in swap. * However, some callers may not be privileged * enough, so once we get down to a small enough * number (512 blocks), we'll stop caring. */ ret = io_mlock_cache(fs->fs_io); if (ret && (blocks_wanted <= 512)) ret = 0; } if (!ret) { verbosef("Got %"PRIu64" blocks\n", blocks_wanted); /* * We've found an allocation that works. If * we're not leaving room, we're done. But if * we're leaving room, we clear leave_room and go * around again. We expect to succeed there. */ if (!leave_room) { cache_blocks = blocks_wanted; break; } verbosef("Leaving room for other %s\n", "allocations"); leave_room = 0; } blocks_wanted >>= 1; } }