int ocfs2_write_block(struct ocfs2_super *osb, struct buffer_head *bh, struct inode *inode) { int ret = 0; mlog_entry("(bh->b_blocknr = %llu, inode=%p)\n", (unsigned long long)bh->b_blocknr, inode); BUG_ON(bh->b_blocknr < OCFS2_SUPER_BLOCK_BLKNO); BUG_ON(buffer_jbd(bh)); /* No need to check for a soft readonly file system here. non * journalled writes are only ever done on system files which * can get modified during recovery even if read-only. */ if (ocfs2_is_hard_readonly(osb)) { ret = -EROFS; goto out; } mutex_lock(&OCFS2_I(inode)->ip_io_mutex); lock_buffer(bh); set_buffer_uptodate(bh); /* remove from dirty list before I/O. */ clear_buffer_dirty(bh); get_bh(bh); /* for end_buffer_write_sync() */ bh->b_end_io = end_buffer_write_sync; submit_bh(WRITE, bh); wait_on_buffer(bh); if (buffer_uptodate(bh)) { ocfs2_set_buffer_uptodate(inode, bh); } else { /* We don't need to remove the clustered uptodate * information for this bh as it's not marked locally * uptodate. */ ret = -EIO; put_bh(bh); } mutex_unlock(&OCFS2_I(inode)->ip_io_mutex); out: mlog_exit(ret); return ret; }
int ocfs2_write_block(struct ocfs2_super *osb, struct buffer_head *bh, struct ocfs2_caching_info *ci) { int ret = 0; trace_ocfs2_write_block((unsigned long long)bh->b_blocknr, ci); BUG_ON(bh->b_blocknr < OCFS2_SUPER_BLOCK_BLKNO); BUG_ON(buffer_jbd(bh)); /* No need to check for a soft readonly file system here. non * journalled writes are only ever done on system files which * can get modified during recovery even if read-only. */ if (ocfs2_is_hard_readonly(osb)) { ret = -EROFS; mlog_errno(ret); goto out; } ocfs2_metadata_cache_io_lock(ci); lock_buffer(bh); set_buffer_uptodate(bh); /* remove from dirty list before I/O. */ clear_buffer_dirty(bh); get_bh(bh); /* for end_buffer_write_sync() */ bh->b_end_io = end_buffer_write_sync; submit_bh(REQ_OP_WRITE, 0, bh); wait_on_buffer(bh); if (buffer_uptodate(bh)) { ocfs2_set_buffer_uptodate(ci, bh); } else { /* We don't need to remove the clustered uptodate * information for this bh as it's not marked locally * uptodate. */ ret = -EIO; mlog_errno(ret); } ocfs2_metadata_cache_io_unlock(ci); out: return ret; }
int ocfs2_should_update_atime(struct inode *inode, struct vfsmount *vfsmnt) { struct timespec now; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) return 0; if ((inode->i_flags & S_NOATIME) || ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode))) return 0; /* * We can be called with no vfsmnt structure - NFSD will * sometimes do this. * * Note that our action here is different than touch_atime() - * if we can't tell whether this is a noatime mount, then we * don't know whether to trust the value of s_atime_quantum. */ if (vfsmnt == NULL) return 0; if ((vfsmnt->mnt_flags & MNT_NOATIME) || ((vfsmnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))) return 0; if (vfsmnt->mnt_flags & MNT_RELATIME) { if ((timespec_compare(&inode->i_atime, &inode->i_mtime) <= 0) || (timespec_compare(&inode->i_atime, &inode->i_ctime) <= 0)) return 1; return 0; } now = CURRENT_TIME; if ((now.tv_sec - inode->i_atime.tv_sec <= osb->s_atime_quantum)) return 0; else return 1; }
/* * For local mounts, add heartbeat=none. * For userspace clusterstack, add cluster_stack=xxxx. * For o2cb with local heartbeat, add heartbeat=local. * For o2cb with global heartbeat, add heartbeat=global. */ static errcode_t add_mount_options(ocfs2_filesys *fs, struct o2cb_cluster_desc *cluster, char **optstr) { char *add, *extra = NULL; char stackstr[strlen(OCFS2_CLUSTER_STACK_ARG) + OCFS2_STACK_LABEL_LEN + 1]; struct ocfs2_super_block *sb = OCFS2_RAW_SB(fs->fs_super); if (ocfs2_mount_local(fs) || ocfs2_is_hard_readonly(fs)) { add = OCFS2_HB_NONE; goto addit; } if (cluster->c_stack && strcmp(cluster->c_stack, OCFS2_CLASSIC_CLUSTER_STACK)) { snprintf(stackstr, sizeof(stackstr), "%s%s", OCFS2_CLUSTER_STACK_ARG, cluster->c_stack); add = stackstr; goto addit; } if (ocfs2_cluster_o2cb_global_heartbeat(sb)) { add = OCFS2_HB_GLOBAL; goto addit; } add = OCFS2_HB_LOCAL; addit: if (*optstr && *(*optstr)) extra = xstrconcat3(*optstr, ",", add); else extra = xstrndup(add, strlen(add)); if (!extra) return OCFS2_ET_NO_MEMORY; *optstr = extra; return 0; }
/* pass it NULL and it will allocate a new handle object for you. If * you pass it a handle however, it may still return error, in which * case it has free'd the passed handle for you. */ handle_t *ocfs2_start_trans(struct ocfs2_super *osb, int max_buffs) { journal_t *journal = osb->journal->j_journal; handle_t *handle; BUG_ON(!osb || !osb->journal->j_journal); if (ocfs2_is_hard_readonly(osb)) return ERR_PTR(-EROFS); BUG_ON(osb->journal->j_state == OCFS2_JOURNAL_FREE); BUG_ON(max_buffs <= 0); /* JBD might support this, but our journalling code doesn't yet. */ if (journal_current_handle()) { mlog(ML_ERROR, "Recursive transaction attempted!\n"); BUG(); } down_read(&osb->journal->j_trans_barrier); handle = journal_start(journal, max_buffs); if (IS_ERR(handle)) { up_read(&osb->journal->j_trans_barrier); mlog_errno(PTR_ERR(handle)); if (is_journal_aborted(journal)) { ocfs2_abort(osb->sb, "Detected aborted journal"); handle = ERR_PTR(-EROFS); } } else { if (!ocfs2_mount_local(osb)) atomic_inc(&(osb->journal->j_num_trans)); } return handle; }
/* * Write super block and backups doesn't need to collaborate with journal, * so we don't need to lock ip_io_mutex and inode doesn't need to bea passed * into this function. */ int ocfs2_write_super_or_backup(struct ocfs2_super *osb, struct buffer_head *bh) { int ret = 0; mlog_entry_void(); BUG_ON(buffer_jbd(bh)); ocfs2_check_super_or_backup(osb->sb, bh->b_blocknr); if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) { ret = -EROFS; goto out; } lock_buffer(bh); set_buffer_uptodate(bh); /* remove from dirty list before I/O. */ clear_buffer_dirty(bh); get_bh(bh); /* for end_buffer_write_sync() */ bh->b_end_io = end_buffer_write_sync; submit_bh(WRITE, bh); wait_on_buffer(bh); if (!buffer_uptodate(bh)) { ret = -EIO; put_bh(bh); } out: mlog_exit(ret); return ret; }
handle_t *ocfs2_start_trans(struct ocfs2_super *osb, int max_buffs) { journal_t *journal = osb->journal->j_journal; handle_t *handle; BUG_ON(!osb || !osb->journal->j_journal); if (ocfs2_is_hard_readonly(osb)) return ERR_PTR(-EROFS); BUG_ON(osb->journal->j_state == OCFS2_JOURNAL_FREE); BUG_ON(max_buffs <= 0); if (journal_current_handle()) return jbd2_journal_start(journal, max_buffs); down_read(&osb->journal->j_trans_barrier); handle = jbd2_journal_start(journal, max_buffs); if (IS_ERR(handle)) { up_read(&osb->journal->j_trans_barrier); mlog_errno(PTR_ERR(handle)); if (is_journal_aborted(journal)) { ocfs2_abort(osb->sb, "Detected aborted journal"); handle = ERR_PTR(-EROFS); } } else { if (!ocfs2_mount_local(osb)) atomic_inc(&(osb->journal->j_num_trans)); } return handle; }
/* * Write super block and backups doesn't need to collaborate with journal, * so we don't need to lock ip_io_mutex and ci doesn't need to bea passed * into this function. */ int ocfs2_write_super_or_backup(struct ocfs2_super *osb, struct buffer_head *bh) { int ret = 0; struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data; BUG_ON(buffer_jbd(bh)); ocfs2_check_super_or_backup(osb->sb, bh->b_blocknr); if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) { ret = -EROFS; mlog_errno(ret); goto out; } lock_buffer(bh); set_buffer_uptodate(bh); /* remove from dirty list before I/O. */ clear_buffer_dirty(bh); get_bh(bh); /* for end_buffer_write_sync() */ bh->b_end_io = end_buffer_write_sync; ocfs2_compute_meta_ecc(osb->sb, bh->b_data, &di->i_check); submit_bh(REQ_OP_WRITE, 0, bh); wait_on_buffer(bh); if (!buffer_uptodate(bh)) { ret = -EIO; mlog_errno(ret); } out: return ret; }
/* Add a new group descriptor to global_bitmap. */ int ocfs2_group_add(struct inode *inode, struct ocfs2_new_group_input *input) { int ret; handle_t *handle; struct buffer_head *main_bm_bh = NULL; struct inode *main_bm_inode = NULL; struct ocfs2_dinode *fe = NULL; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct buffer_head *group_bh = NULL; struct ocfs2_group_desc *group = NULL; struct ocfs2_chain_list *cl; struct ocfs2_chain_rec *cr; u16 cl_bpc; if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) return -EROFS; main_bm_inode = ocfs2_get_system_file_inode(osb, GLOBAL_BITMAP_SYSTEM_INODE, OCFS2_INVALID_SLOT); if (!main_bm_inode) { ret = -EINVAL; mlog_errno(ret); goto out; } mutex_lock(&main_bm_inode->i_mutex); ret = ocfs2_inode_lock(main_bm_inode, &main_bm_bh, 1); if (ret < 0) { mlog_errno(ret); goto out_mutex; } fe = (struct ocfs2_dinode *)main_bm_bh->b_data; if (le16_to_cpu(fe->id2.i_chain.cl_cpg) != ocfs2_group_bitmap_size(osb->sb, 0, osb->s_feature_incompat) * 8) { mlog(ML_ERROR, "The disk is too old and small." " Force to do offline resize."); ret = -EINVAL; goto out_unlock; } ret = ocfs2_read_blocks_sync(osb, input->group, 1, &group_bh); if (ret < 0) { mlog(ML_ERROR, "Can't read the group descriptor # %llu " "from the device.", (unsigned long long)input->group); goto out_unlock; } ocfs2_set_new_buffer_uptodate(INODE_CACHE(inode), group_bh); ret = ocfs2_verify_group_and_input(main_bm_inode, fe, input, group_bh); if (ret) { mlog_errno(ret); goto out_unlock; } trace_ocfs2_group_add((unsigned long long)input->group, input->chain, input->clusters, input->frees); handle = ocfs2_start_trans(osb, OCFS2_GROUP_ADD_CREDITS); if (IS_ERR(handle)) { mlog_errno(PTR_ERR(handle)); ret = -EINVAL; goto out_unlock; } cl_bpc = le16_to_cpu(fe->id2.i_chain.cl_bpc); cl = &fe->id2.i_chain; cr = &cl->cl_recs[input->chain]; ret = ocfs2_journal_access_gd(handle, INODE_CACHE(main_bm_inode), group_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret < 0) { mlog_errno(ret); goto out_commit; } group = (struct ocfs2_group_desc *)group_bh->b_data; group->bg_next_group = cr->c_blkno; ocfs2_journal_dirty(handle, group_bh); ret = ocfs2_journal_access_di(handle, INODE_CACHE(main_bm_inode), main_bm_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret < 0) { mlog_errno(ret); goto out_commit; } if (input->chain == le16_to_cpu(cl->cl_next_free_rec)) { le16_add_cpu(&cl->cl_next_free_rec, 1); memset(cr, 0, sizeof(struct ocfs2_chain_rec)); } cr->c_blkno = cpu_to_le64(input->group); le32_add_cpu(&cr->c_total, input->clusters * cl_bpc); le32_add_cpu(&cr->c_free, input->frees * cl_bpc); le32_add_cpu(&fe->id1.bitmap1.i_total, input->clusters *cl_bpc); le32_add_cpu(&fe->id1.bitmap1.i_used, (input->clusters - input->frees) * cl_bpc); le32_add_cpu(&fe->i_clusters, input->clusters); ocfs2_journal_dirty(handle, main_bm_bh); spin_lock(&OCFS2_I(main_bm_inode)->ip_lock); OCFS2_I(main_bm_inode)->ip_clusters = le32_to_cpu(fe->i_clusters); le64_add_cpu(&fe->i_size, input->clusters << osb->s_clustersize_bits); spin_unlock(&OCFS2_I(main_bm_inode)->ip_lock); i_size_write(main_bm_inode, le64_to_cpu(fe->i_size)); ocfs2_update_super_and_backups(main_bm_inode, input->clusters); out_commit: ocfs2_commit_trans(osb, handle); out_unlock: brelse(group_bh); brelse(main_bm_bh); ocfs2_inode_unlock(main_bm_inode, 1); out_mutex: mutex_unlock(&main_bm_inode->i_mutex); iput(main_bm_inode); out: return ret; }
/* * Extend the filesystem to the new number of clusters specified. This entry * point is only used to extend the current filesystem to the end of the last * existing group. */ int ocfs2_group_extend(struct inode * inode, int new_clusters) { int ret; handle_t *handle; struct buffer_head *main_bm_bh = NULL; struct buffer_head *group_bh = NULL; struct inode *main_bm_inode = NULL; struct ocfs2_dinode *fe = NULL; struct ocfs2_group_desc *group = NULL; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); u16 cl_bpc; u32 first_new_cluster; u64 lgd_blkno; if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) return -EROFS; if (new_clusters < 0) return -EINVAL; else if (new_clusters == 0) return 0; main_bm_inode = ocfs2_get_system_file_inode(osb, GLOBAL_BITMAP_SYSTEM_INODE, OCFS2_INVALID_SLOT); if (!main_bm_inode) { ret = -EINVAL; mlog_errno(ret); goto out; } mutex_lock(&main_bm_inode->i_mutex); ret = ocfs2_inode_lock(main_bm_inode, &main_bm_bh, 1); if (ret < 0) { mlog_errno(ret); goto out_mutex; } fe = (struct ocfs2_dinode *)main_bm_bh->b_data; /* main_bm_bh is validated by inode read inside ocfs2_inode_lock(), * so any corruption is a code bug. */ BUG_ON(!OCFS2_IS_VALID_DINODE(fe)); if (le16_to_cpu(fe->id2.i_chain.cl_cpg) != ocfs2_group_bitmap_size(osb->sb, 0, osb->s_feature_incompat) * 8) { mlog(ML_ERROR, "The disk is too old and small. " "Force to do offline resize."); ret = -EINVAL; goto out_unlock; } first_new_cluster = le32_to_cpu(fe->i_clusters); lgd_blkno = ocfs2_which_cluster_group(main_bm_inode, first_new_cluster - 1); ret = ocfs2_read_group_descriptor(main_bm_inode, fe, lgd_blkno, &group_bh); if (ret < 0) { mlog_errno(ret); goto out_unlock; } group = (struct ocfs2_group_desc *)group_bh->b_data; cl_bpc = le16_to_cpu(fe->id2.i_chain.cl_bpc); if (le16_to_cpu(group->bg_bits) / cl_bpc + new_clusters > le16_to_cpu(fe->id2.i_chain.cl_cpg)) { ret = -EINVAL; goto out_unlock; } trace_ocfs2_group_extend( (unsigned long long)le64_to_cpu(group->bg_blkno), new_clusters); handle = ocfs2_start_trans(osb, OCFS2_GROUP_EXTEND_CREDITS); if (IS_ERR(handle)) { mlog_errno(PTR_ERR(handle)); ret = -EINVAL; goto out_unlock; } /* update the last group descriptor and inode. */ ret = ocfs2_update_last_group_and_inode(handle, main_bm_inode, main_bm_bh, group_bh, first_new_cluster, new_clusters); if (ret) { mlog_errno(ret); goto out_commit; } ocfs2_update_super_and_backups(main_bm_inode, new_clusters); out_commit: ocfs2_commit_trans(osb, handle); out_unlock: brelse(group_bh); brelse(main_bm_bh); ocfs2_inode_unlock(main_bm_inode, 1); out_mutex: mutex_unlock(&main_bm_inode->i_mutex); iput(main_bm_inode); out: return ret; }
int main(int argc, char **argv) { errcode_t ret = 0; struct mount_options mo; ocfs2_filesys *fs = NULL; struct o2cb_cluster_desc cluster; struct o2cb_region_desc desc; int clustered = 1; int group_join = 0; struct stat statbuf; const char *spec; char *opts_string = NULL; initialize_ocfs_error_table(); initialize_o2dl_error_table(); initialize_o2cb_error_table(); setbuf(stdout, NULL); setbuf(stderr, NULL); if (signal(SIGTERM, handle_signal) == SIG_ERR) { fprintf(stderr, "Could not set SIGTERM\n"); exit(1); } if (signal(SIGINT, handle_signal) == SIG_ERR) { fprintf(stderr, "Could not set SIGINT\n"); exit(1); } memset(&mo, 0, sizeof(mo)); read_options (argc, argv, &mo); ret = process_options(&mo); if (ret) goto bail; ret = ocfs2_open(mo.dev, OCFS2_FLAG_RO, 0, 0, &fs); //O_EXCL? if (ret) { com_err(progname, ret, "while opening device %s", mo.dev); goto bail; } clustered = (0 == ocfs2_mount_local(fs)); if (ocfs2_is_hard_readonly(fs) && (clustered || !(mo.flags & MS_RDONLY))) { ret = OCFS2_ET_IO; com_err(progname, ret, "while mounting read-only device in %s mode", (clustered ? "clustered" : "read-write")); goto bail; } if (verbose) printf("device=%s\n", mo.dev); ret = o2cb_setup_stack((char *)OCFS2_RAW_SB(fs->fs_super)->s_cluster_info.ci_stack); if (ret) { com_err(progname, ret, "while setting up stack\n"); goto bail; } if (clustered) { ret = o2cb_init(); if (ret) { com_err(progname, ret, "while trying initialize cluster"); goto bail; } ret = ocfs2_fill_cluster_desc(fs, &cluster); if (ret) { com_err(progname, ret, "while trying to determine cluster information"); goto bail; } ret = ocfs2_fill_heartbeat_desc(fs, &desc); if (ret) { com_err(progname, ret, "while trying to determine heartbeat information"); goto bail; } desc.r_persist = 1; desc.r_service = OCFS2_FS_NAME; } ret = add_mount_options(fs, &cluster, &mo.xtra_opts); if (ret) { com_err(progname, ret, "while adding mount options"); goto bail; } /* validate mount dir */ if (lstat(mo.dir, &statbuf)) { com_err(progname, 0, "mount directory %s does not exist", mo.dir); goto bail; } else if (stat(mo.dir, &statbuf)) { com_err(progname, 0, "mount directory %s is a broken symbolic " "link", mo.dir); goto bail; } else if (!S_ISDIR(statbuf.st_mode)) { com_err(progname, 0, "mount directory %s is not a directory", mo.dir); goto bail; } block_signals (SIG_BLOCK); if (clustered && !(mo.flags & MS_REMOUNT)) { ret = o2cb_begin_group_join(&cluster, &desc); if (ret) { block_signals (SIG_UNBLOCK); com_err(progname, ret, "while trying to join the group"); goto bail; } group_join = 1; } spec = canonicalize(mo.dev); ret = mount(spec, mo.dir, OCFS2_FS_NAME, mo.flags & ~MS_NOSYS, mo.xtra_opts); if (ret) { ret = errno; if (group_join) { /* We ignore the return code because the mount * failure is the important error. * complete_group_join() will handle cleaning up */ o2cb_complete_group_join(&cluster, &desc, errno); } block_signals (SIG_UNBLOCK); if (ret == -EROFS) com_err(progname, OCFS2_ET_RO_FILESYS, "while mounting %s " "on %s, please try fixing this by fsck.ocfs2 and then " "retry mounting", mo.dev, mo.dir); else com_err(progname, OCFS2_ET_INTERNAL_FAILURE, "while mounting %s on %s. Check 'dmesg' for more " "information on this error %d.", mo.dev, mo.dir, (int)ret); goto bail; } if (group_join) { ret = o2cb_complete_group_join(&cluster, &desc, 0); if (ret) { com_err(progname, ret, "while completing group join (WARNING)"); /* * XXX: GFS2 allows the mount to continue, so we * will do the same. I don't know how clean that * is, but I don't have a better solution. */ ret = 0; } } change_local_hb_io_priority(fs, mo.dev); opts_string = fix_opts_string(((mo.flags & ~MS_NOMTAB) | (clustered ? MS_NETDEV : 0)), mo.xtra_opts, NULL); update_mtab_entry(mo.dev, mo.dir, OCFS2_FS_NAME, opts_string, mo.flags, 0, 0); block_signals (SIG_UNBLOCK); bail: if (fs) ocfs2_close(fs); if (mo.dev) free(mo.dev); if (mo.dir) free(mo.dir); if (mo.opts) free(mo.opts); if (mo.xtra_opts) free(mo.xtra_opts); if (mo.type) free(mo.type); if (opts_string) free(opts_string); return ret ? 1 : 0; }
static int ocfs2_filecheck_repair_inode_block(struct super_block *sb, struct buffer_head *bh) { int changed = 0; struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data; if (!ocfs2_filecheck_validate_inode_block(sb, bh)) return 0; trace_ocfs2_filecheck_repair_inode_block( (unsigned long long)bh->b_blocknr); if (ocfs2_is_hard_readonly(OCFS2_SB(sb)) || ocfs2_is_soft_readonly(OCFS2_SB(sb))) { mlog(ML_ERROR, "Filecheck: cannot repair dinode #%llu " "on readonly filesystem\n", (unsigned long long)bh->b_blocknr); return -OCFS2_FILECHECK_ERR_READONLY; } if (buffer_jbd(bh)) { mlog(ML_ERROR, "Filecheck: cannot repair dinode #%llu, " "its buffer is in jbd\n", (unsigned long long)bh->b_blocknr); return -OCFS2_FILECHECK_ERR_INJBD; } if (!OCFS2_IS_VALID_DINODE(di)) { /* Cannot fix invalid inode block */ return -OCFS2_FILECHECK_ERR_INVALIDINO; } if (!(di->i_flags & cpu_to_le32(OCFS2_VALID_FL))) { /* Cannot just add VALID_FL flag back as a fix, * need more things to check here. */ return -OCFS2_FILECHECK_ERR_VALIDFLAG; } if (le64_to_cpu(di->i_blkno) != bh->b_blocknr) { di->i_blkno = cpu_to_le64(bh->b_blocknr); changed = 1; mlog(ML_ERROR, "Filecheck: reset dinode #%llu: i_blkno to %llu\n", (unsigned long long)bh->b_blocknr, (unsigned long long)le64_to_cpu(di->i_blkno)); } if (le32_to_cpu(di->i_fs_generation) != OCFS2_SB(sb)->fs_generation) { di->i_fs_generation = cpu_to_le32(OCFS2_SB(sb)->fs_generation); changed = 1; mlog(ML_ERROR, "Filecheck: reset dinode #%llu: fs_generation to %u\n", (unsigned long long)bh->b_blocknr, le32_to_cpu(di->i_fs_generation)); } if (changed || ocfs2_validate_meta_ecc(sb, bh->b_data, &di->i_check)) { ocfs2_compute_meta_ecc(sb, bh->b_data, &di->i_check); mark_buffer_dirty(bh); mlog(ML_ERROR, "Filecheck: reset dinode #%llu: compute meta ecc\n", (unsigned long long)bh->b_blocknr); } return 0; }