int osd_oi_lookup(struct osd_thread_info *info, struct osd_device *osd, const struct lu_fid *fid, struct osd_inode_id *id, enum oi_check_flags flags) { if (unlikely(fid_is_last_id(fid))) return osd_obj_spec_lookup(info, osd, fid, id); if (fid_is_on_ost(info, osd, fid, flags) || fid_is_llog(fid)) return osd_obj_map_lookup(info, osd, fid, id); if (unlikely(fid_seq(fid) == FID_SEQ_LOCAL_FILE)) { int rc; if (fid_is_fs_root(fid)) { osd_id_gen(id, osd_sb(osd)->s_root->d_inode->i_ino, osd_sb(osd)->s_root->d_inode->i_generation); return 0; } if (unlikely(fid_is_acct(fid))) return osd_acct_obj_lookup(info, osd, fid, id); /* For other special FIDs, try OI first, then do spec lookup */ rc = __osd_oi_lookup(info, osd, fid, id); if (rc == -ENOENT) return osd_obj_spec_lookup(info, osd, fid, id); return rc; } if (!osd->od_igif_inoi && fid_is_igif(fid)) { osd_id_gen(id, lu_igif_ino(fid), lu_igif_gen(fid)); return 0; } return __osd_oi_lookup(info, osd, fid, id); }
int osd_oi_update(struct osd_thread_info *info, struct osd_device *osd, const struct lu_fid *fid, const struct osd_inode_id *id, handle_t *th, enum oi_check_flags flags) { struct lu_fid *oi_fid = &info->oti_fid2; struct osd_inode_id *oi_id = &info->oti_id2; int rc = 0; if (unlikely(fid_is_last_id(fid))) return osd_obj_spec_update(info, osd, fid, id, th); if (fid_is_on_ost(info, osd, fid, flags) || fid_is_llog(fid)) return osd_obj_map_update(info, osd, fid, id, th); fid_cpu_to_be(oi_fid, fid); osd_id_pack(oi_id, id); rc = osd_oi_iam_refresh(info, osd_fid2oi(osd, fid), (const struct dt_rec *)oi_id, (const struct dt_key *)oi_fid, th, false); if (rc != 0) return rc; if (unlikely(fid_seq(fid) == FID_SEQ_LOCAL_FILE)) rc = osd_obj_spec_update(info, osd, fid, id, th); return rc; }
int fid_is_on_ost(struct osd_thread_info *info, struct osd_device *osd, const struct lu_fid *fid, enum oi_check_flags flags) { struct lu_seq_range *range = &info->oti_seq_range; int rc; ENTRY; if (flags & OI_KNOWN_ON_OST) RETURN(1); if (unlikely(fid_is_local_file(fid) || fid_is_igif(fid) || fid_is_llog(fid)) || fid_is_name_llog(fid) || fid_is_quota(fid)) RETURN(0); if (fid_is_idif(fid) || fid_is_last_id(fid)) RETURN(1); if (!(flags & OI_CHECK_FLD)) RETURN(0); rc = osd_fld_lookup(info->oti_env, osd, fid_seq(fid), range); if (rc != 0) { if (rc != -ENOENT) CERROR("%s: lookup FLD "DFID": rc = %d\n", osd_name(osd), PFID(fid), rc); RETURN(0); } if (fld_range_is_ost(range)) RETURN(1); RETURN(0); }
int osd_oi_lookup(struct osd_thread_info *info, struct osd_device *osd, const struct lu_fid *fid, struct osd_inode_id *id, enum oi_check_flags flags) { if (unlikely(fid_is_last_id(fid))) return osd_obj_spec_lookup(info, osd, fid, id); if (fid_is_on_ost(info, osd, fid, flags) || fid_is_llog(fid)) return osd_obj_map_lookup(info, osd, fid, id); if (fid_is_fs_root(fid)) { osd_id_gen(id, osd_sb(osd)->s_root->d_inode->i_ino, osd_sb(osd)->s_root->d_inode->i_generation); return 0; } if (unlikely(fid_is_acct(fid))) return osd_acct_obj_lookup(info, osd, fid, id); if (!osd->od_igif_inoi && fid_is_igif(fid)) { osd_id_gen(id, lu_igif_ino(fid), lu_igif_gen(fid)); return 0; } return __osd_oi_lookup(info, osd, fid, id); }
int osd_oi_delete(struct osd_thread_info *info, struct osd_device *osd, const struct lu_fid *fid, handle_t *th, enum oi_check_flags flags) { struct lu_fid *oi_fid = &info->oti_fid2; /* clear idmap cache */ if (lu_fid_eq(fid, &info->oti_cache.oic_fid)) fid_zero(&info->oti_cache.oic_fid); if (fid_is_last_id(fid)) return 0; if (fid_is_on_ost(info, osd, fid, flags) || fid_is_llog(fid)) return osd_obj_map_delete(info, osd, fid, th); fid_cpu_to_be(oi_fid, fid); return osd_oi_iam_delete(info, osd_fid2oi(osd, fid), (const struct dt_key *)oi_fid, th); }
int osd_oi_insert(struct osd_thread_info *info, struct osd_device *osd, const struct lu_fid *fid, const struct osd_inode_id *id, handle_t *th, enum oi_check_flags flags) { struct lu_fid *oi_fid = &info->oti_fid2; struct osd_inode_id *oi_id = &info->oti_id2; int rc = 0; if (unlikely(fid_is_last_id(fid))) return osd_obj_spec_insert(info, osd, fid, id, th); if (fid_is_on_ost(info, osd, fid, flags) || fid_is_llog(fid)) return osd_obj_map_insert(info, osd, fid, id, th); fid_cpu_to_be(oi_fid, fid); osd_id_pack(oi_id, id); rc = osd_oi_iam_refresh(info, osd_fid2oi(osd, fid), (const struct dt_rec *)oi_id, (const struct dt_key *)oi_fid, th, true); if (rc != 0) { struct inode *inode; struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs; if (rc != -EEXIST) return rc; rc = osd_oi_lookup(info, osd, fid, oi_id, 0); if (rc != 0) return rc; if (unlikely(osd_id_eq(id, oi_id))) return 1; /* Check whether the mapping for oi_id is valid or not. */ inode = osd_iget(info, osd, oi_id); if (IS_ERR(inode)) { rc = PTR_ERR(inode); if (rc == -ENOENT || rc == -ESTALE) goto update; return rc; } /* The EA inode should NOT be in OI, old OI scrub may added * such OI mapping by wrong, replace it. */ if (unlikely(osd_is_ea_inode(inode))) { iput(inode); goto update; } rc = osd_get_lma(info, inode, &info->oti_obj_dentry, lma); iput(inode); if (rc == -ENODATA) goto update; if (rc != 0) return rc; if (!(lma->lma_compat & LMAC_NOT_IN_OI) && lu_fid_eq(fid, &lma->lma_self_fid)) { CERROR("%.16s: the FID "DFID" is used by two objects: " "%u/%u %u/%u\n", LDISKFS_SB(osd_sb(osd))->s_es->s_volume_name, PFID(fid), oi_id->oii_ino, oi_id->oii_gen, id->oii_ino, id->oii_gen); return -EEXIST; } update: osd_id_pack(oi_id, id); rc = osd_oi_iam_refresh(info, osd_fid2oi(osd, fid), (const struct dt_rec *)oi_id, (const struct dt_key *)oi_fid, th, false); if (rc != 0) return rc; } if (unlikely(fid_seq(fid) == FID_SEQ_LOCAL_FILE)) rc = osd_obj_spec_insert(info, osd, fid, id, th); return rc; }
static int lfsck_master_oit_engine(const struct lu_env *env, struct lfsck_instance *lfsck) { struct lfsck_thread_info *info = lfsck_env_info(env); const struct dt_it_ops *iops = &lfsck->li_obj_oit->do_index_ops->dio_it; struct dt_it *di = lfsck->li_di_oit; struct lu_fid *fid = &info->lti_fid; struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram; struct ptlrpc_thread *thread = &lfsck->li_thread; __u32 idx = lfsck_dev_idx(lfsck->li_bottom); int rc; ENTRY; do { struct dt_object *target; bool update_lma = false; if (lfsck->li_di_dir != NULL) { rc = lfsck_master_dir_engine(env, lfsck); if (rc <= 0) RETURN(rc); } if (unlikely(lfsck->li_oit_over)) RETURN(1); if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_DELAY1) && cfs_fail_val > 0) { struct l_wait_info lwi; lwi = LWI_TIMEOUT(cfs_time_seconds(cfs_fail_val), NULL, NULL); l_wait_event(thread->t_ctl_waitq, !thread_is_running(thread), &lwi); } if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_CRASH)) RETURN(0); lfsck->li_current_oit_processed = 1; lfsck->li_new_scanned++; rc = iops->rec(env, di, (struct dt_rec *)fid, 0); if (rc != 0) { lfsck_fail(env, lfsck, true); if (rc < 0 && bk->lb_param & LPF_FAILOUT) RETURN(rc); else goto checkpoint; } if (fid_is_idif(fid)) { __u32 idx1 = fid_idif_ost_idx(fid); LASSERT(!lfsck->li_master); /* It is an old format device, update the LMA. */ if (idx != idx1) { struct ost_id *oi = &info->lti_oi; fid_to_ostid(fid, oi); ostid_to_fid(fid, oi, idx); update_lma = true; } } else if (!fid_is_norm(fid) && !fid_is_igif(fid) && !fid_is_last_id(fid) && !fid_is_root(fid) && !fid_seq_is_dot(fid_seq(fid))) { /* If the FID/object is only used locally and invisible * to external nodes, then LFSCK will not handle it. */ goto checkpoint; } target = lfsck_object_find(env, lfsck, fid); if (target == NULL) { goto checkpoint; } else if (IS_ERR(target)) { lfsck_fail(env, lfsck, true); if (bk->lb_param & LPF_FAILOUT) RETURN(PTR_ERR(target)); else goto checkpoint; } /* XXX: Currently, skip remote object, the consistency for * remote object will be processed in LFSCK phase III. */ if (dt_object_exists(target) && !dt_object_remote(target)) { if (update_lma) rc = lfsck_update_lma(env, lfsck, target); if (rc == 0) rc = lfsck_exec_oit(env, lfsck, target); } lfsck_object_put(env, target); if (rc != 0 && bk->lb_param & LPF_FAILOUT) RETURN(rc); checkpoint: rc = lfsck_checkpoint(env, lfsck); if (rc != 0 && bk->lb_param & LPF_FAILOUT) RETURN(rc); /* Rate control. */ lfsck_control_speed(lfsck); if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_FATAL1)) { spin_lock(&lfsck->li_lock); thread_set_flags(thread, SVC_STOPPING); spin_unlock(&lfsck->li_lock); RETURN(-EINVAL); } rc = iops->next(env, di); if (unlikely(rc > 0)) lfsck->li_oit_over = 1; else if (likely(rc == 0)) lfsck->li_current_oit_processed = 0; if (unlikely(!thread_is_running(thread))) RETURN(0); } while (rc == 0 || lfsck->li_di_dir != NULL); RETURN(rc); }