static void lfsck_bookmark_le_to_cpu(struct lfsck_bookmark *des, struct lfsck_bookmark *src) { des->lb_magic = le32_to_cpu(src->lb_magic); des->lb_version = le16_to_cpu(src->lb_version); des->lb_param = le16_to_cpu(src->lb_param); des->lb_speed_limit = le32_to_cpu(src->lb_speed_limit); des->lb_async_windows = le16_to_cpu(src->lb_async_windows); fid_le_to_cpu(&des->lb_lpf_fid, &src->lb_lpf_fid); fid_le_to_cpu(&des->lb_last_fid, &src->lb_last_fid); }
static int out_destroy(struct tgt_session_info *tsi) { struct tgt_thread_info *tti = tgt_th_info(tsi->tsi_env); struct update *update = tti->tti_u.update.tti_update; struct dt_object *obj = tti->tti_u.update.tti_dt_object; struct lu_fid *fid; int rc; ENTRY; fid = &update->u_fid; fid_le_to_cpu(fid, fid); if (!fid_is_sane(fid)) { CERROR("%s: invalid FID "DFID": rc = %d\n", tgt_name(tsi->tsi_tgt), PFID(fid), -EPROTO); RETURN(err_serious(-EPROTO)); } if (!lu_object_exists(&obj->do_lu)) RETURN(-ENOENT); rc = out_tx_destroy(tsi->tsi_env, obj, &tti->tti_tea, tti->tti_u.update.tti_update_reply, tti->tti_u.update.tti_update_reply_index); RETURN(rc); }
static void lfsck_namespace_le_to_cpu(struct lfsck_namespace *des, struct lfsck_namespace *src) { des->ln_magic = le32_to_cpu(src->ln_magic); des->ln_status = le32_to_cpu(src->ln_status); des->ln_flags = le32_to_cpu(src->ln_flags); des->ln_success_count = le32_to_cpu(src->ln_success_count); des->ln_run_time_phase1 = le32_to_cpu(src->ln_run_time_phase1); des->ln_run_time_phase2 = le32_to_cpu(src->ln_run_time_phase2); des->ln_time_last_complete = le64_to_cpu(src->ln_time_last_complete); des->ln_time_latest_start = le64_to_cpu(src->ln_time_latest_start); des->ln_time_last_checkpoint = le64_to_cpu(src->ln_time_last_checkpoint); lfsck_position_le_to_cpu(&des->ln_pos_latest_start, &src->ln_pos_latest_start); lfsck_position_le_to_cpu(&des->ln_pos_last_checkpoint, &src->ln_pos_last_checkpoint); lfsck_position_le_to_cpu(&des->ln_pos_first_inconsistent, &src->ln_pos_first_inconsistent); des->ln_items_checked = le64_to_cpu(src->ln_items_checked); des->ln_items_repaired = le64_to_cpu(src->ln_items_repaired); des->ln_items_failed = le64_to_cpu(src->ln_items_failed); des->ln_dirs_checked = le64_to_cpu(src->ln_dirs_checked); des->ln_mlinked_checked = le64_to_cpu(src->ln_mlinked_checked); des->ln_objs_checked_phase2 = le64_to_cpu(src->ln_objs_checked_phase2); des->ln_objs_repaired_phase2 = le64_to_cpu(src->ln_objs_repaired_phase2); des->ln_objs_failed_phase2 = le64_to_cpu(src->ln_objs_failed_phase2); des->ln_objs_nlink_repaired = le64_to_cpu(src->ln_objs_nlink_repaired); des->ln_objs_lost_found = le64_to_cpu(src->ln_objs_lost_found); fid_le_to_cpu(&des->ln_fid_latest_scanned_phase2, &src->ln_fid_latest_scanned_phase2); }
static int out_create(struct tgt_session_info *tsi) { struct tgt_thread_info *tti = tgt_th_info(tsi->tsi_env); struct update *update = tti->tti_u.update.tti_update; struct dt_object *obj = tti->tti_u.update.tti_dt_object; struct dt_object_format *dof = &tti->tti_u.update.tti_update_dof; struct obdo *lobdo = &tti->tti_u.update.tti_obdo; struct lu_attr *attr = &tti->tti_attr; struct lu_fid *fid = NULL; struct obdo *wobdo; int size; int rc; ENTRY; wobdo = update_param_buf(update, 0, &size); if (wobdo == NULL || size != sizeof(*wobdo)) { CERROR("%s: obdo is NULL, invalid RPC: rc = %d\n", tgt_name(tsi->tsi_tgt), -EPROTO); RETURN(err_serious(-EPROTO)); } obdo_le_to_cpu(wobdo, wobdo); lustre_get_wire_obdo(NULL, lobdo, wobdo); la_from_obdo(attr, lobdo, lobdo->o_valid); dof->dof_type = dt_mode_to_dft(attr->la_mode); if (S_ISDIR(attr->la_mode)) { int size; fid = update_param_buf(update, 1, &size); if (fid == NULL || size != sizeof(*fid)) { CERROR("%s: invalid fid: rc = %d\n", tgt_name(tsi->tsi_tgt), -EPROTO); RETURN(err_serious(-EPROTO)); } fid_le_to_cpu(fid, fid); if (!fid_is_sane(fid)) { CERROR("%s: invalid fid "DFID": rc = %d\n", tgt_name(tsi->tsi_tgt), PFID(fid), -EPROTO); RETURN(err_serious(-EPROTO)); } } if (lu_object_exists(&obj->do_lu)) RETURN(-EEXIST); rc = out_tx_create(tsi->tsi_env, obj, attr, fid, dof, &tti->tti_tea, tti->tti_u.update.tti_update_reply, tti->tti_u.update.tti_update_reply_index); RETURN(rc); }
static void lfsck_unpack_ent(struct lu_dirent *ent, __u64 *cookie) { fid_le_to_cpu(&ent->lde_fid, &ent->lde_fid); *cookie = le64_to_cpu(ent->lde_hash); ent->lde_reclen = le16_to_cpu(ent->lde_reclen); ent->lde_namelen = le16_to_cpu(ent->lde_namelen); ent->lde_attrs = le32_to_cpu(ent->lde_attrs); /* Make sure the name is terminated with '0'. * The data (type) after ent::lde_name maybe * broken, but we do not care. */ ent->lde_name[ent->lde_namelen] = 0; }
static int out_create(struct mdt_thread_info *info) { struct update *update = info->mti_u.update.mti_update; struct dt_object *obj = info->mti_u.update.mti_dt_object; struct dt_object_format *dof = &info->mti_u.update.mti_update_dof; struct obdo *lobdo = &info->mti_u.update.mti_obdo; struct lu_attr *attr = &info->mti_attr.ma_attr; struct lu_fid *fid = NULL; struct obdo *wobdo; int size; int rc; ENTRY; wobdo = update_param_buf(update, 0, &size); if (wobdo == NULL || size != sizeof(*wobdo)) { CERROR("%s: obdo is NULL, invalid RPC: rc = %d\n", mdt_obd_name(info->mti_mdt), -EPROTO); RETURN(err_serious(-EPROTO)); } obdo_le_to_cpu(wobdo, wobdo); lustre_get_wire_obdo(lobdo, wobdo); la_from_obdo(attr, lobdo, lobdo->o_valid); dof->dof_type = dt_mode_to_dft(attr->la_mode); if (S_ISDIR(attr->la_mode)) { int size; fid = update_param_buf(update, 1, &size); if (fid == NULL || size != sizeof(*fid)) { CERROR("%s: invalid fid: rc = %d\n", mdt_obd_name(info->mti_mdt), -EPROTO); RETURN(err_serious(-EPROTO)); } fid_le_to_cpu(fid, fid); if (!fid_is_sane(fid)) { CERROR("%s: invalid fid "DFID": rc = %d\n", mdt_obd_name(info->mti_mdt), PFID(fid), -EPROTO); RETURN(err_serious(-EPROTO)); } } rc = out_tx_create(info, obj, attr, fid, dof, &info->mti_handle, info->mti_u.update.mti_update_reply, info->mti_u.update.mti_update_reply_index); RETURN(rc); }
static int ll_nfs_get_name_filldir(void *cookie, const char *name, int namelen, loff_t hash, u64 ino, unsigned type) { /* It is hack to access lde_fid for comparison with lgd_fid. * So the input 'name' must be part of the 'lu_dirent'. */ struct lu_dirent *lde = container_of0(name, struct lu_dirent, lde_name); struct ll_getname_data *lgd = cookie; struct lu_fid fid; fid_le_to_cpu(&fid, &lde->lde_fid); if (lu_fid_eq(&fid, &lgd->lgd_fid)) { memcpy(lgd->lgd_name, name, namelen); lgd->lgd_name[namelen] = 0; lgd->lgd_found = 1; } return lgd->lgd_found; }
void mdt_dump_lmv(unsigned int level, const union lmv_mds_md *lmv) { const struct lmv_mds_md_v1 *lmm1; int i; if (likely(!cfs_cdebug_show(level, DEBUG_SUBSYSTEM))) return; lmm1 = &lmv->lmv_md_v1; CDEBUG(level, "magic 0x%08X, master %#X stripe_count %#x\n", le32_to_cpu(lmm1->lmv_magic), le32_to_cpu(lmm1->lmv_master_mdt_index), le32_to_cpu(lmm1->lmv_stripe_count)); for (i = 0; i < le32_to_cpu(lmm1->lmv_stripe_count); i++) { struct lu_fid fid; fid_le_to_cpu(&fid, &lmm1->lmv_stripe_fids[i]); CDEBUG(level, "idx %u subobj "DFID"\n", i, PFID(&fid)); } }
static int out_index_insert(struct tgt_session_info *tsi) { struct tgt_thread_info *tti = tgt_th_info(tsi->tsi_env); struct update *update = tti->tti_u.update.tti_update; struct dt_object *obj = tti->tti_u.update.tti_dt_object; struct lu_fid *fid; char *name; int rc = 0; int size; ENTRY; name = (char *)update_param_buf(update, 0, NULL); if (name == NULL) { CERROR("%s: empty name for index insert: rc = %d\n", tgt_name(tsi->tsi_tgt), -EPROTO); RETURN(err_serious(-EPROTO)); } fid = (struct lu_fid *)update_param_buf(update, 1, &size); if (fid == NULL || size != sizeof(*fid)) { CERROR("%s: invalid fid: rc = %d\n", tgt_name(tsi->tsi_tgt), -EPROTO); RETURN(err_serious(-EPROTO)); } fid_le_to_cpu(fid, fid); if (!fid_is_sane(fid)) { CERROR("%s: invalid FID "DFID": rc = %d\n", tgt_name(tsi->tsi_tgt), PFID(fid), -EPROTO); RETURN(err_serious(-EPROTO)); } rc = out_tx_index_insert(tsi->tsi_env, obj, name, fid, &tti->tti_tea, tti->tti_u.update.tti_update_reply, tti->tti_u.update.tti_update_reply_index); RETURN(rc); }
static int out_index_insert(struct mdt_thread_info *info) { struct update *update = info->mti_u.update.mti_update; struct dt_object *obj = info->mti_u.update.mti_dt_object; struct lu_fid *fid; char *name; int rc = 0; int size; ENTRY; name = (char *)update_param_buf(update, 0, NULL); if (name == NULL) { CERROR("%s: empty name for index insert: rc = %d\n", mdt_obd_name(info->mti_mdt), -EPROTO); RETURN(err_serious(-EPROTO)); } fid = (struct lu_fid *)update_param_buf(update, 1, &size); if (fid == NULL || size != sizeof(*fid)) { CERROR("%s: invalid fid: rc = %d\n", mdt_obd_name(info->mti_mdt), -EPROTO); RETURN(err_serious(-EPROTO)); } fid_le_to_cpu(fid, fid); if (!fid_is_sane(fid)) { CERROR("%s: invalid FID "DFID": rc = %d\n", mdt_obd_name(info->mti_mdt), PFID(fid), -EPROTO); RETURN(err_serious(-EPROTO)); } rc = out_tx_index_insert(info, obj, name, fid, &info->mti_handle, info->mti_u.update.mti_update_reply, info->mti_u.update.mti_update_reply_index); RETURN(rc); }
/** * delete unreferenced files and directories in the PENDING directory * * Files that remain in PENDING after client->MDS recovery has completed * have to be referenced (opened) by some client during recovery, or they * will be deleted here (for clients that did not complete recovery). * * \param mdd MDD device finishing recovery * * \retval 0 success * \retval -ve error */ static int orph_index_iterate(const struct lu_env *env, struct mdd_device *mdd) { struct dt_object *dor = mdd->mdd_orphans; struct lu_dirent *ent = &mdd_env_info(env)->mti_ent; const struct dt_it_ops *iops; struct dt_it *it; struct lu_fid fid; int key_sz = 0; int rc; __u64 cookie; ENTRY; /* In recovery phase, do not need for any lock here */ iops = &dor->do_index_ops->dio_it; it = iops->init(env, dor, LUDA_64BITHASH, BYPASS_CAPA); if (IS_ERR(it)) { rc = PTR_ERR(it); CERROR("%s: cannot clean PENDING: rc = %d\n", mdd2obd_dev(mdd)->obd_name, rc); GOTO(out, rc); } rc = iops->load(env, it, 0); if (rc < 0) GOTO(out_put, rc); if (rc == 0) { CERROR("%s: error loading iterator to clean PENDING\n", mdd2obd_dev(mdd)->obd_name); /* Index contains no zero key? */ GOTO(out_put, rc = -EIO); } do { key_sz = iops->key_size(env, it); /* filter out "." and ".." entries from PENDING dir. */ if (key_sz < 8) goto next; rc = iops->rec(env, it, (struct dt_rec *)ent, LUDA_64BITHASH); if (rc != 0) { CERROR("%s: fail to get FID for orphan it: rc = %d\n", mdd2obd_dev(mdd)->obd_name, rc); goto next; } fid_le_to_cpu(&fid, &ent->lde_fid); if (!fid_is_sane(&fid)) { CERROR("%s: bad FID "DFID" cleaning PENDING\n", mdd2obd_dev(mdd)->obd_name, PFID(&fid)); goto next; } /* kill orphan object */ cookie = iops->store(env, it); iops->put(env, it); rc = orph_key_test_and_del(env, mdd, &fid, (struct dt_key *)ent->lde_name); /* after index delete reset iterator */ if (rc == 0) rc = iops->get(env, it, (const void *)""); else rc = iops->load(env, it, cookie); next: rc = iops->next(env, it); } while (rc == 0); GOTO(out_put, rc = 0); out_put: iops->put(env, it); iops->fini(env, it); out: return rc; }
/** * Object updates between Targets. Because all the updates has been * dis-assemblied into object updates at sender side, so OUT will * call OSD API directly to execute these updates. * * In DNE phase I all of the updates in the request need to be executed * in one transaction, and the transaction has to be synchronously. * * Please refer to lustre/include/lustre/lustre_idl.h for req/reply * format. */ int out_handle(struct tgt_session_info *tsi) { const struct lu_env *env = tsi->tsi_env; struct tgt_thread_info *tti = tgt_th_info(env); struct thandle_exec_args *ta = &tti->tti_tea; struct req_capsule *pill = tsi->tsi_pill; struct dt_device *dt = tsi->tsi_tgt->lut_bottom; struct update_buf *ubuf; struct update *update; struct update_reply *update_reply; int bufsize; int count; int old_batchid = -1; unsigned off; int i; int rc = 0; int rc1 = 0; ENTRY; req_capsule_set(pill, &RQF_UPDATE_OBJ); bufsize = req_capsule_get_size(pill, &RMF_UPDATE, RCL_CLIENT); if (bufsize != UPDATE_BUFFER_SIZE) { CERROR("%s: invalid bufsize %d: rc = %d\n", tgt_name(tsi->tsi_tgt), bufsize, -EPROTO); RETURN(err_serious(-EPROTO)); } ubuf = req_capsule_client_get(pill, &RMF_UPDATE); if (ubuf == NULL) { CERROR("%s: No buf!: rc = %d\n", tgt_name(tsi->tsi_tgt), -EPROTO); RETURN(err_serious(-EPROTO)); } if (le32_to_cpu(ubuf->ub_magic) != UPDATE_BUFFER_MAGIC) { CERROR("%s: invalid magic %x expect %x: rc = %d\n", tgt_name(tsi->tsi_tgt), le32_to_cpu(ubuf->ub_magic), UPDATE_BUFFER_MAGIC, -EPROTO); RETURN(err_serious(-EPROTO)); } count = le32_to_cpu(ubuf->ub_count); if (count <= 0) { CERROR("%s: No update!: rc = %d\n", tgt_name(tsi->tsi_tgt), -EPROTO); RETURN(err_serious(-EPROTO)); } req_capsule_set_size(pill, &RMF_UPDATE_REPLY, RCL_SERVER, UPDATE_BUFFER_SIZE); rc = req_capsule_server_pack(pill); if (rc != 0) { CERROR("%s: Can't pack response: rc = %d\n", tgt_name(tsi->tsi_tgt), rc); RETURN(rc); } /* Prepare the update reply buffer */ update_reply = req_capsule_server_get(pill, &RMF_UPDATE_REPLY); if (update_reply == NULL) RETURN(err_serious(-EPROTO)); update_init_reply_buf(update_reply, count); tti->tti_u.update.tti_update_reply = update_reply; rc = out_tx_start(env, dt, ta); if (rc != 0) RETURN(rc); tti->tti_mult_trans = !req_is_replay(tgt_ses_req(tsi)); /* Walk through updates in the request to execute them synchronously */ off = cfs_size_round(offsetof(struct update_buf, ub_bufs[0])); for (i = 0; i < count; i++) { struct tgt_handler *h; struct dt_object *dt_obj; update = (struct update *)((char *)ubuf + off); if (old_batchid == -1) { old_batchid = update->u_batchid; } else if (old_batchid != update->u_batchid) { /* Stop the current update transaction, * create a new one */ rc = out_tx_end(env, ta); if (rc != 0) RETURN(rc); rc = out_tx_start(env, dt, ta); if (rc != 0) RETURN(rc); old_batchid = update->u_batchid; } fid_le_to_cpu(&update->u_fid, &update->u_fid); if (!fid_is_sane(&update->u_fid)) { CERROR("%s: invalid FID "DFID": rc = %d\n", tgt_name(tsi->tsi_tgt), PFID(&update->u_fid), -EPROTO); GOTO(out, rc = err_serious(-EPROTO)); } dt_obj = dt_locate(env, dt, &update->u_fid); if (IS_ERR(dt_obj)) GOTO(out, rc = PTR_ERR(dt_obj)); tti->tti_u.update.tti_dt_object = dt_obj; tti->tti_u.update.tti_update = update; tti->tti_u.update.tti_update_reply_index = i; h = out_handler_find(update->u_type); if (likely(h != NULL)) { /* For real modification RPC, check if the update * has been executed */ if (h->th_flags & MUTABOR) { struct ptlrpc_request *req = tgt_ses_req(tsi); if (out_check_resent(env, dt, dt_obj, req, out_reconstruct, update_reply, i)) GOTO(next, rc); } rc = h->th_act(tsi); } else { CERROR("%s: The unsupported opc: 0x%x\n", tgt_name(tsi->tsi_tgt), update->u_type); lu_object_put(env, &dt_obj->do_lu); GOTO(out, rc = -ENOTSUPP); } next: lu_object_put(env, &dt_obj->do_lu); if (rc < 0) GOTO(out, rc); off += cfs_size_round(update_size(update)); } out: rc1 = out_tx_end(env, ta); if (rc == 0) rc = rc1; RETURN(rc); }
ssize_t llu_iop_filldirentries(struct inode *dir, _SYSIO_OFF_T *basep, char *buf, size_t nbytes) { struct llu_inode_info *lli = llu_i2info(dir); struct intnl_stat *st = llu_i2stat(dir); loff_t pos = *basep; struct ll_dir_chain chain; struct page *page; int filled = 0; int rc; int done; __u16 type; ENTRY; liblustre_wait_event(0); if (st->st_size == 0) { CWARN("dir size is 0?\n"); RETURN(0); } if (pos == MDS_DIR_END_OFF) /* * end-of-file. */ RETURN(0); rc = 0; done = 0; ll_dir_chain_init(&chain); page = llu_dir_read_page(dir, pos, 0, &chain); while (rc == 0 && !done) { struct lu_dirpage *dp; struct lu_dirent *ent; if (!IS_ERR(page)) { /* * If page is empty (end of directoryis reached), * use this value. */ __u64 hash = MDS_DIR_END_OFF; __u64 next; dp = page->addr; for (ent = lu_dirent_start(dp); ent != NULL && !done; ent = lu_dirent_next(ent)) { char *name; int namelen; struct lu_fid fid; __u64 ino; hash = le64_to_cpu(ent->lde_hash); namelen = le16_to_cpu(ent->lde_namelen); if (hash < pos) /* * Skip until we find target hash * value. */ continue; if (namelen == 0) /* * Skip dummy record. */ continue; fid = ent->lde_fid; name = ent->lde_name; fid_le_to_cpu(&fid, &fid); ino = cl_fid_build_ino(&fid, 0); type = ll_dirent_type_get(ent); done = filldir(buf, nbytes, name, namelen, (loff_t)hash, ino, type, &filled); } next = le64_to_cpu(dp->ldp_hash_end); OBD_PAGE_FREE(page); if (!done) { pos = next; if (pos == MDS_DIR_END_OFF) /* * End of directory reached. */ done = 1; else if (1 /* chain is exhausted*/) /* * Normal case: continue to the next * page. */ page = llu_dir_read_page(dir, pos, 1, &chain); else { /* * go into overflow page. */ } } else { pos = hash; if (filled == 0) GOTO(out, filled = -EINVAL); } } else { rc = PTR_ERR(page); CERROR("error reading dir "DFID" at %lu: rc %d\n", PFID(&lli->lli_fid), (unsigned long)pos, rc); } } lli->lli_dir_pos = (loff_t)pos; *basep = lli->lli_dir_pos; out: ll_dir_chain_fini(&chain); liblustre_wait_event(0); RETURN(filled); }
int ll_dir_read(struct inode *inode, __u64 *ppos, struct md_op_data *op_data, struct dir_context *ctx) { struct ll_sb_info *sbi = ll_i2sbi(inode); __u64 pos = *ppos; int is_api32 = ll_need_32bit_api(sbi); int is_hash64 = sbi->ll_flags & LL_SBI_64BIT_HASH; struct page *page; bool done = false; int rc = 0; page = ll_get_dir_page(inode, op_data, pos); while (rc == 0 && !done) { struct lu_dirpage *dp; struct lu_dirent *ent; __u64 hash; __u64 next; if (IS_ERR(page)) { rc = PTR_ERR(page); break; } hash = MDS_DIR_END_OFF; dp = page_address(page); for (ent = lu_dirent_start(dp); ent && !done; ent = lu_dirent_next(ent)) { __u16 type; int namelen; struct lu_fid fid; __u64 lhash; __u64 ino; hash = le64_to_cpu(ent->lde_hash); if (hash < pos) /* * Skip until we find target hash * value. */ continue; namelen = le16_to_cpu(ent->lde_namelen); if (namelen == 0) /* * Skip dummy record. */ continue; if (is_api32 && is_hash64) lhash = hash >> 32; else lhash = hash; fid_le_to_cpu(&fid, &ent->lde_fid); ino = cl_fid_build_ino(&fid, is_api32); type = ll_dirent_type_get(ent); ctx->pos = lhash; /* For 'll_nfs_get_name_filldir()', it will try * to access the 'ent' through its 'lde_name', * so the parameter 'name' for 'ctx->actor()' * must be part of the 'ent'. */ done = !dir_emit(ctx, ent->lde_name, namelen, ino, type); } if (done) { pos = hash; ll_release_page(inode, page, false); break; } next = le64_to_cpu(dp->ldp_hash_end); pos = next; if (pos == MDS_DIR_END_OFF) { /* * End of directory reached. */ done = 1; ll_release_page(inode, page, false); } else { /* * Normal case: continue to the next * page. */ ll_release_page(inode, page, le32_to_cpu(dp->ldp_flags) & LDF_COLLIDE); next = pos; page = ll_get_dir_page(inode, op_data, pos); } }
/** * Object updates between Targets. Because all the updates has been * dis-assemblied into object updates in master MDD layer, so out * will skip MDD layer, and call OSD API directly to execute these * updates. * * In phase I, all of the updates in the request need to be executed * in one transaction, and the transaction has to be synchronously. * * Please refer to lustre/include/lustre/lustre_idl.h for req/reply * format. */ int out_handle(struct mdt_thread_info *info) { struct req_capsule *pill = info->mti_pill; struct update_buf *ubuf; struct update *update; struct thandle_exec_args *th = &info->mti_handle; int bufsize; int count; unsigned off; int i; int rc = 0; int rc1 = 0; ENTRY; req_capsule_set(pill, &RQF_UPDATE_OBJ); bufsize = req_capsule_get_size(pill, &RMF_UPDATE, RCL_CLIENT); if (bufsize != UPDATE_BUFFER_SIZE) { CERROR("%s: invalid bufsize %d: rc = %d\n", mdt_obd_name(info->mti_mdt), bufsize, -EPROTO); RETURN(err_serious(-EPROTO)); } ubuf = req_capsule_client_get(pill, &RMF_UPDATE); if (ubuf == NULL) { CERROR("%s: No buf!: rc = %d\n", mdt_obd_name(info->mti_mdt), -EPROTO); RETURN(err_serious(-EPROTO)); } if (le32_to_cpu(ubuf->ub_magic) != UPDATE_BUFFER_MAGIC) { CERROR("%s: invalid magic %x expect %x: rc = %d\n", mdt_obd_name(info->mti_mdt), le32_to_cpu(ubuf->ub_magic), UPDATE_BUFFER_MAGIC, -EPROTO); RETURN(err_serious(-EPROTO)); } count = le32_to_cpu(ubuf->ub_count); if (count <= 0) { CERROR("%s: No update!: rc = %d\n", mdt_obd_name(info->mti_mdt), -EPROTO); RETURN(err_serious(-EPROTO)); } req_capsule_set_size(pill, &RMF_UPDATE_REPLY, RCL_SERVER, UPDATE_BUFFER_SIZE); rc = req_capsule_server_pack(pill); if (rc != 0) { CERROR("%s: Can't pack response: rc = %d\n", mdt_obd_name(info->mti_mdt), rc); RETURN(rc); } /* Prepare the update reply buffer */ info->mti_u.update.mti_update_reply = req_capsule_server_get(pill, &RMF_UPDATE_REPLY); update_init_reply_buf(info->mti_u.update.mti_update_reply, count); rc = out_tx_start(info->mti_env, info->mti_mdt, th); if (rc != 0) RETURN(rc); /* Walk through updates in the request to execute them synchronously */ off = cfs_size_round(offsetof(struct update_buf, ub_bufs[0])); for (i = 0; i < count; i++) { struct out_handler *h; struct dt_object *dt_obj; update = (struct update *)((char *)ubuf + off); fid_le_to_cpu(&update->u_fid, &update->u_fid); if (!fid_is_sane(&update->u_fid)) { CERROR("%s: invalid FID "DFID": rc = %d\n", mdt_obd_name(info->mti_mdt), PFID(&update->u_fid), -EPROTO); GOTO(out, rc = err_serious(-EPROTO)); } dt_obj = out_object_find(info, &update->u_fid); if (IS_ERR(dt_obj)) GOTO(out, rc = PTR_ERR(dt_obj)); info->mti_u.update.mti_dt_object = dt_obj; info->mti_u.update.mti_update = update; info->mti_u.update.mti_update_reply_index = i; h = mdt_handler_find(update->u_type, out_handlers); if (likely(h != NULL)) { rc = h->mh_act(info); } else { CERROR("%s: The unsupported opc: 0x%x\n", mdt_obd_name(info->mti_mdt), update->u_type); lu_object_put(info->mti_env, &dt_obj->do_lu); GOTO(out, rc = -ENOTSUPP); } lu_object_put(info->mti_env, &dt_obj->do_lu); if (rc < 0) GOTO(out, rc); off += cfs_size_round(update_size(update)); } out: rc1 = out_tx_end(info, th); rc = rc == 0 ? rc1 : rc; info->mti_fail_id = OBD_FAIL_UPDATE_OBJ_NET; RETURN(rc); }