/** * Inserts (key, value) pair in \a directory object. * * \param dt osd index object * \param key key for index * \param rec record reference * \param th transaction handler * \param capa capability descriptor * \param ignore_quota update should not affect quota * * \retval 0 success * \retval -ve failure */ static int osd_dir_insert(const struct lu_env *env, struct dt_object *dt, const struct dt_rec *rec, const struct dt_key *key, struct thandle *th, struct lustre_capa *capa, int ignore_quota) { struct osd_thread_info *oti = osd_oti_get(env); struct osd_object *parent = osd_dt_obj(dt); struct osd_device *osd = osd_obj2dev(parent); struct lu_fid *fid = (struct lu_fid *)rec; struct osd_thandle *oh; struct osd_object *child; __u32 attr; int rc; ENTRY; LASSERT(parent->oo_db); LASSERT(udmu_object_is_zap(parent->oo_db)); LASSERT(dt_object_exists(dt)); LASSERT(osd_invariant(parent)); /* * zfs_readdir() generates ./.. on fly, but * we want own entries (.. at least) with a fid */ #if LUSTRE_VERSION_CODE >= OBD_OCD_VERSION(2, 3, 61, 0) #warning "fix '.' and '..' handling" #endif LASSERT(th != NULL); oh = container_of0(th, struct osd_thandle, ot_super); child = osd_object_find(env, dt, fid); if (IS_ERR(child)) RETURN(PTR_ERR(child)); LASSERT(child->oo_db); CLASSERT(sizeof(oti->oti_zde.lzd_reg) == 8); CLASSERT(sizeof(oti->oti_zde) % 8 == 0); attr = child->oo_dt.do_lu.lo_header ->loh_attr; oti->oti_zde.lzd_reg.zde_type = IFTODT(attr & S_IFMT); oti->oti_zde.lzd_reg.zde_dnode = child->oo_db->db_object; oti->oti_zde.lzd_fid = *fid; /* Insert (key,oid) into ZAP */ rc = -zap_add(osd->od_objset.os, parent->oo_db->db_object, (char *)key, 8, sizeof(oti->oti_zde) / 8, (void *)&oti->oti_zde, oh->ot_tx); osd_object_put(env, child); RETURN(rc); }
void lnet_selftest_structure_assertion(void) { CLASSERT(sizeof(struct srpc_msg) == 160); CLASSERT(sizeof(struct srpc_test_reqst) == 70); CLASSERT(offsetof(struct srpc_msg, msg_body.tes_reqst.tsr_concur) == 72); CLASSERT(offsetof(struct srpc_msg, msg_body.tes_reqst.tsr_ndest) == 78); CLASSERT(sizeof(struct srpc_stat_reply) == 136); CLASSERT(sizeof(struct srpc_stat_reqst) == 28); }
static int mdt_setattr_unpack_rec(struct mdt_thread_info *info) { struct lu_ucred *uc = mdt_ucred(info); struct md_attr *ma = &info->mti_attr; struct lu_attr *la = &ma->ma_attr; struct req_capsule *pill = info->mti_pill; struct mdt_reint_record *rr = &info->mti_rr; struct mdt_rec_setattr *rec; ENTRY; CLASSERT(sizeof(struct mdt_rec_setattr)== sizeof(struct mdt_rec_reint)); rec = req_capsule_client_get(pill, &RMF_REC_REINT); if (rec == NULL) RETURN(-EFAULT); /* This prior initialization is needed for old_init_ucred_reint() */ uc->uc_fsuid = rec->sa_fsuid; uc->uc_fsgid = rec->sa_fsgid; uc->uc_cap = rec->sa_cap; uc->uc_suppgids[0] = rec->sa_suppgid; uc->uc_suppgids[1] = -1; rr->rr_fid1 = &rec->sa_fid; la->la_valid = mdt_attr_valid_xlate(rec->sa_valid, rr, ma); /* If MDS_ATTR_xTIME is set without MDS_ATTR_xTIME_SET and * the client does not have OBD_CONNECT_FULL20, convert it * to LA_xTIME. LU-3036 */ if (!(exp_connect_flags(info->mti_exp) & OBD_CONNECT_FULL20)) { if (!(rec->sa_valid & MDS_ATTR_ATIME_SET) && (rec->sa_valid & MDS_ATTR_ATIME)) la->la_valid |= LA_ATIME; if (!(rec->sa_valid & MDS_ATTR_MTIME_SET) && (rec->sa_valid & MDS_ATTR_MTIME)) la->la_valid |= LA_MTIME; if (!(rec->sa_valid & MDS_ATTR_CTIME_SET) && (rec->sa_valid & MDS_ATTR_CTIME)) la->la_valid |= LA_CTIME; } la->la_mode = rec->sa_mode; la->la_flags = rec->sa_attr_flags; la->la_uid = rec->sa_uid; la->la_gid = rec->sa_gid; la->la_size = rec->sa_size; la->la_blocks = rec->sa_blocks; la->la_ctime = rec->sa_ctime; la->la_atime = rec->sa_atime; la->la_mtime = rec->sa_mtime; ma->ma_valid = MA_INODE; if (rec->sa_bias & MDS_DATA_MODIFIED) ma->ma_attr_flags |= MDS_DATA_MODIFIED; else ma->ma_attr_flags &= ~MDS_DATA_MODIFIED; if (req_capsule_get_size(pill, &RMF_CAPA1, RCL_CLIENT)) mdt_set_capainfo(info, 0, rr->rr_fid1, req_capsule_client_get(pill, &RMF_CAPA1)); RETURN(0); }
void mdc_unlink_pack(struct ptlrpc_request *req, struct md_op_data *op_data) { struct mdt_rec_unlink *rec; CLASSERT(sizeof(struct mdt_rec_reint) == sizeof(struct mdt_rec_unlink)); rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT); LASSERT(rec != NULL); rec->ul_opcode = op_data->op_cli_flags & CLI_RM_ENTRY ? REINT_RMENTRY : REINT_UNLINK; rec->ul_fsuid = op_data->op_fsuid; rec->ul_fsgid = op_data->op_fsgid; rec->ul_cap = op_data->op_cap; rec->ul_mode = op_data->op_mode; rec->ul_suppgid1= op_data->op_suppgids[0]; rec->ul_suppgid2= -1; rec->ul_fid1 = op_data->op_fid1; rec->ul_fid2 = op_data->op_fid2; rec->ul_time = op_data->op_mod_time; rec->ul_bias = op_data->op_bias; mdc_pack_name(req, &RMF_NAME, op_data->op_name, op_data->op_namelen); /* pack SELinux policy info if any */ mdc_file_sepol_pack(req); }
void mdc_unlink_pack(struct ptlrpc_request *req, struct md_op_data *op_data) { struct mdt_rec_unlink *rec; char *tmp; CLASSERT(sizeof(struct mdt_rec_reint) == sizeof(struct mdt_rec_unlink)); rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT); LASSERT(rec != NULL); rec->ul_opcode = op_data->op_cli_flags & CLI_RM_ENTRY ? REINT_RMENTRY : REINT_UNLINK; rec->ul_fsuid = op_data->op_fsuid; rec->ul_fsgid = op_data->op_fsgid; rec->ul_cap = op_data->op_cap; rec->ul_mode = op_data->op_mode; rec->ul_suppgid1= op_data->op_suppgids[0]; rec->ul_suppgid2= -1; rec->ul_fid1 = op_data->op_fid1; rec->ul_fid2 = op_data->op_fid2; rec->ul_time = op_data->op_mod_time; rec->ul_bias = op_data->op_bias; mdc_pack_capa(req, &RMF_CAPA1, op_data->op_capa1); tmp = req_capsule_client_get(&req->rq_pill, &RMF_NAME); LASSERT(tmp != NULL); LOGL0(op_data->op_name, op_data->op_namelen, tmp); }
static int mdt_rename_unpack(struct mdt_thread_info *info) { struct lu_ucred *uc = mdt_ucred(info); struct mdt_rec_rename *rec; struct md_attr *ma = &info->mti_attr; struct lu_attr *attr = &info->mti_attr.ma_attr; struct mdt_reint_record *rr = &info->mti_rr; struct req_capsule *pill = info->mti_pill; int rc; ENTRY; CLASSERT(sizeof(struct mdt_rec_rename) == sizeof(struct mdt_rec_reint)); rec = req_capsule_client_get(pill, &RMF_REC_REINT); if (rec == NULL) RETURN(-EFAULT); /* This prior initialization is needed for old_init_ucred_reint() */ uc->uc_fsuid = rec->rn_fsuid; uc->uc_fsgid = rec->rn_fsgid; uc->uc_cap = rec->rn_cap; uc->uc_suppgids[0] = rec->rn_suppgid1; uc->uc_suppgids[1] = rec->rn_suppgid2; attr->la_uid = rec->rn_fsuid; attr->la_gid = rec->rn_fsgid; rr->rr_fid1 = &rec->rn_fid1; rr->rr_fid2 = &rec->rn_fid2; attr->la_ctime = rec->rn_time; attr->la_mtime = rec->rn_time; /* rename_tgt contains the mode already */ attr->la_mode = rec->rn_mode; attr->la_valid = LA_UID | LA_GID | LA_CTIME | LA_MTIME | LA_MODE; if (req_capsule_get_size(pill, &RMF_CAPA1, RCL_CLIENT)) mdt_set_capainfo(info, 0, rr->rr_fid1, req_capsule_client_get(pill, &RMF_CAPA1)); if (req_capsule_get_size(pill, &RMF_CAPA2, RCL_CLIENT)) mdt_set_capainfo(info, 1, rr->rr_fid2, req_capsule_client_get(pill, &RMF_CAPA2)); rc = mdt_name_unpack(pill, &RMF_NAME, &rr->rr_name, 0); if (rc < 0) RETURN(rc); rc = mdt_name_unpack(pill, &RMF_SYMTGT, &rr->rr_tgt_name, 0); if (rc < 0) RETURN(rc); if (rec->rn_bias & MDS_VTX_BYPASS) ma->ma_attr_flags |= MDS_VTX_BYPASS; else ma->ma_attr_flags &= ~MDS_VTX_BYPASS; info->mti_spec.no_create = !!req_is_replay(mdt_info_req(info)); rc = mdt_dlmreq_unpack(info); RETURN(rc); }
static int osd_check_lma(const struct lu_env *env, struct osd_object *obj) { struct osd_thread_info *info = osd_oti_get(env); struct lu_buf buf; int rc; struct lustre_mdt_attrs *lma; ENTRY; CLASSERT(sizeof(info->oti_buf) >= sizeof(*lma)); lma = (struct lustre_mdt_attrs *)info->oti_buf; buf.lb_buf = lma; buf.lb_len = sizeof(info->oti_buf); rc = osd_xattr_get(env, &obj->oo_dt, &buf, XATTR_NAME_LMA); if (rc > 0) { rc = 0; lustre_lma_swab(lma); if (unlikely((lma->lma_incompat & ~LMA_INCOMPAT_SUPP) || CFS_FAIL_CHECK(OBD_FAIL_OSD_LMA_INCOMPAT))) { CWARN("%s: unsupported incompat LMA feature(s) %#x for " "fid = "DFID"\n", osd_obj2dev(obj)->od_svname, lma->lma_incompat & ~LMA_INCOMPAT_SUPP, PFID(lu_object_fid(&obj->oo_dt.do_lu))); rc = -EOPNOTSUPP; } } else if (rc == -ENODATA) { /* haven't initialize LMA xattr */ rc = 0; } RETURN(rc); }
void mdc_link_pack(struct ptlrpc_request *req, struct md_op_data *op_data) { struct mdt_rec_link *rec; char *tmp; CLASSERT(sizeof(struct mdt_rec_reint) == sizeof(struct mdt_rec_link)); rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT); LASSERT (rec != NULL); rec->lk_opcode = REINT_LINK; rec->lk_fsuid = op_data->op_fsuid;//current->fsuid; rec->lk_fsgid = op_data->op_fsgid;//current->fsgid; rec->lk_cap = op_data->op_cap;//current->cap_effective; rec->lk_suppgid1 = op_data->op_suppgids[0]; rec->lk_suppgid2 = op_data->op_suppgids[1]; rec->lk_fid1 = op_data->op_fid1; rec->lk_fid2 = op_data->op_fid2; rec->lk_time = op_data->op_mod_time; rec->lk_bias = op_data->op_bias; mdc_pack_capa(req, &RMF_CAPA1, op_data->op_capa1); mdc_pack_capa(req, &RMF_CAPA2, op_data->op_capa2); tmp = req_capsule_client_get(&req->rq_pill, &RMF_NAME); LOGL0(op_data->op_name, op_data->op_namelen, tmp); }
void ptlrpc_fill_bulk_md(lnet_md_t *md, struct ptlrpc_bulk_desc *desc, int mdidx) { int offset = mdidx * LNET_MAX_IOV; CLASSERT(PTLRPC_MAX_BRW_PAGES < LI_POISON); LASSERT(mdidx < desc->bd_md_max_brw); LASSERT(desc->bd_iov_count <= PTLRPC_MAX_BRW_PAGES); LASSERT(!(md->options & (LNET_MD_IOVEC | LNET_MD_KIOV | LNET_MD_PHYS))); md->length = max(0, desc->bd_iov_count - mdidx * LNET_MAX_IOV); md->length = min_t(unsigned int, LNET_MAX_IOV, md->length); if (ptlrpc_is_bulk_desc_kiov(desc->bd_type)) { md->options |= LNET_MD_KIOV; if (GET_ENC_KIOV(desc)) md->start = &BD_GET_ENC_KIOV(desc, offset); else md->start = &BD_GET_KIOV(desc, offset); } else { md->options |= LNET_MD_IOVEC; if (GET_ENC_KVEC(desc)) md->start = &BD_GET_ENC_KVEC(desc, offset); else md->start = &BD_GET_KVEC(desc, offset); } }
void mdc_setattr_pack(struct ptlrpc_request *req, struct md_op_data *op_data, void *ea, size_t ealen) { struct mdt_rec_setattr *rec; struct lov_user_md *lum = NULL; CLASSERT(sizeof(struct mdt_rec_reint) ==sizeof(struct mdt_rec_setattr)); rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT); mdc_setattr_pack_rec(rec, op_data); mdc_pack_capa(req, &RMF_CAPA1, op_data->op_capa1); if (ealen == 0) return; lum = req_capsule_client_get(&req->rq_pill, &RMF_EADATA); if (ea == NULL) { /* Remove LOV EA */ lum->lmm_magic = cpu_to_le32(LOV_USER_MAGIC_V1); lum->lmm_stripe_size = 0; lum->lmm_stripe_count = 0; lum->lmm_stripe_offset = (typeof(lum->lmm_stripe_offset))(-1); } else { memcpy(lum, ea, ealen); } }
/** * Core of osc_dlm_blocking_ast() logic. */ static void osc_lock_blocking(const struct lu_env *env, struct ldlm_lock *dlmlock, struct osc_lock *olck, int blocking) { struct cl_lock *lock = olck->ols_cl.cls_lock; LASSERT(olck->ols_lock == dlmlock); CLASSERT(OLS_BLOCKED < OLS_CANCELLED); LASSERT(!osc_lock_is_lockless(olck)); /* * Lock might be still addref-ed here, if e.g., blocking ast * is sent for a failed lock. */ osc_lock_unhold(olck); if (blocking && olck->ols_state < OLS_BLOCKED) /* * Move osc_lock into OLS_BLOCKED before canceling the lock, * because it recursively re-enters osc_lock_blocking(), with * the state set to OLS_CANCELLED. */ olck->ols_state = OLS_BLOCKED; /* * cancel and destroy lock at least once no matter how blocking ast is * entered (see comment above osc_ldlm_blocking_ast() for use * cases). cl_lock_cancel() and cl_lock_delete() are idempotent. */ cl_lock_cancel(env, lock); cl_lock_delete(env, lock); }
/** * to load a directory entry at a time and stored it in * iterator's in-memory data structure. * * \param di, struct osd_it_ea, iterator's in memory structure * * \retval +ve, iterator reached to end * \retval 0, iterator not reached to end * \retval -ve, on error */ static int osd_dir_it_next(const struct lu_env *env, struct dt_it *di) { struct osd_zap_it *it = (struct osd_zap_it *)di; zap_attribute_t *za = &osd_oti_get(env)->oti_za; int rc; /* temp. storage should be enough for any key supported by ZFS */ CLASSERT(sizeof(za->za_name) <= sizeof(it->ozi_name)); /* * the first ->next() moves the cursor to . * the second ->next() moves the cursor to .. * then we get to the real records and have to verify any exist */ if (it->ozi_pos <= 2) { it->ozi_pos++; if (it->ozi_pos <=2) RETURN(0); } zap_cursor_advance(it->ozi_zc); /* * According to current API we need to return error if its last entry. * zap_cursor_advance() does not return any value. So we need to call * retrieve to check if there is any record. We should make * changes to Iterator API to not return status for this API */ rc = osd_index_retrieve_skip_dots(it, za); if (rc == -ENOENT) /* end of dir */ RETURN(+1); RETURN(rc); }
static int mdt_rename_unpack(struct mdt_thread_info *info) { struct md_ucred *uc = mdt_ucred(info); struct mdt_rec_rename *rec; struct md_attr *ma = &info->mti_attr; struct lu_attr *attr = &info->mti_attr.ma_attr; struct mdt_reint_record *rr = &info->mti_rr; struct req_capsule *pill = info->mti_pill; int rc; ENTRY; CLASSERT(sizeof(struct mdt_rec_rename) == sizeof(struct mdt_rec_reint)); rec = req_capsule_client_get(pill, &RMF_REC_REINT); if (rec == NULL) RETURN(-EFAULT); uc->mu_fsuid = rec->rn_fsuid; uc->mu_fsgid = rec->rn_fsgid; uc->mu_cap = rec->rn_cap; uc->mu_suppgids[0] = rec->rn_suppgid1; uc->mu_suppgids[1] = rec->rn_suppgid2; attr->la_uid = rec->rn_fsuid; attr->la_gid = rec->rn_fsgid; rr->rr_fid1 = &rec->rn_fid1; rr->rr_fid2 = &rec->rn_fid2; attr->la_ctime = rec->rn_time; attr->la_mtime = rec->rn_time; /* rename_tgt contains the mode already */ attr->la_mode = rec->rn_mode; attr->la_valid = LA_UID | LA_GID | LA_CTIME | LA_MTIME | LA_MODE; if (req_capsule_get_size(pill, &RMF_CAPA1, RCL_CLIENT)) mdt_set_capainfo(info, 0, rr->rr_fid1, req_capsule_client_get(pill, &RMF_CAPA1)); if (req_capsule_get_size(pill, &RMF_CAPA2, RCL_CLIENT)) mdt_set_capainfo(info, 1, rr->rr_fid2, req_capsule_client_get(pill, &RMF_CAPA2)); info->mti_spec.sp_ck_split = !!(rec->rn_bias & MDS_CHECK_SPLIT); info->mti_cross_ref = !!(rec->rn_bias & MDS_CROSS_REF); rr->rr_name = req_capsule_client_get(pill, &RMF_NAME); rr->rr_tgt = req_capsule_client_get(pill, &RMF_SYMTGT); if (rr->rr_name == NULL || rr->rr_tgt == NULL) RETURN(-EFAULT); rr->rr_namelen = req_capsule_get_size(pill, &RMF_NAME, RCL_CLIENT) - 1; rr->rr_tgtlen = req_capsule_get_size(pill, &RMF_SYMTGT, RCL_CLIENT) - 1; if (!info->mti_cross_ref) LASSERT(rr->rr_namelen > 0 && rr->rr_tgtlen > 0); if (rec->rn_bias & MDS_VTX_BYPASS) ma->ma_attr_flags |= MDS_VTX_BYPASS; else ma->ma_attr_flags &= ~MDS_VTX_BYPASS; info->mti_spec.no_create = !!req_is_replay(mdt_info_req(info)); rc = mdt_dlmreq_unpack(info); RETURN(rc); }
void lustre_loa_init(struct lustre_ost_attrs *loa, const struct lu_fid *fid, __u32 compat, __u32 incompat) { CLASSERT(sizeof(*loa) == LMA_OLD_SIZE); memset(&loa->loa_parent_fid, 0, sizeof(*loa) - offsetof(typeof(*loa), loa_parent_fid)); lustre_lma_init(&loa->loa_lma, fid, compat, incompat); }
void initDemo( DeviceDataBase* deviceData = NULL ) { m_pDemo = NULL; m_pDemo = createFuncs[0]( deviceData ); CLASSERT( m_pDemo ); m_pDemo->init(); m_pDemo->reset(); }
/** * Pack LOV striping metadata for disk storage format (in little * endian byte order). * * This follows the getxattr() conventions. If \a buf_size is zero * then return the size needed. If \a buf_size is too small then * return -ERANGE. Otherwise return the size of the result. */ ssize_t lov_lsm_pack_v1v3(const struct lov_stripe_md *lsm, void *buf, size_t buf_size) { struct lov_mds_md_v1 *lmmv1 = buf; struct lov_mds_md_v3 *lmmv3 = buf; struct lov_ost_data_v1 *lmm_objects; size_t lmm_size; unsigned int i; ENTRY; lmm_size = lov_mds_md_size(lsm->lsm_entries[0]->lsme_stripe_count, lsm->lsm_magic); if (buf_size == 0) RETURN(lmm_size); if (buf_size < lmm_size) RETURN(-ERANGE); /* lmmv1 and lmmv3 point to the same struct and have the * same first fields */ lmmv1->lmm_magic = cpu_to_le32(lsm->lsm_magic); lmm_oi_cpu_to_le(&lmmv1->lmm_oi, &lsm->lsm_oi); lmmv1->lmm_stripe_size = cpu_to_le32( lsm->lsm_entries[0]->lsme_stripe_size); lmmv1->lmm_stripe_count = cpu_to_le16( lsm->lsm_entries[0]->lsme_stripe_count); lmmv1->lmm_pattern = cpu_to_le32(lsm->lsm_entries[0]->lsme_pattern); lmmv1->lmm_layout_gen = cpu_to_le16(lsm->lsm_layout_gen); if (lsm->lsm_magic == LOV_MAGIC_V3) { CLASSERT(sizeof(lsm->lsm_entries[0]->lsme_pool_name) == sizeof(lmmv3->lmm_pool_name)); strlcpy(lmmv3->lmm_pool_name, lsm->lsm_entries[0]->lsme_pool_name, sizeof(lmmv3->lmm_pool_name)); lmm_objects = lmmv3->lmm_objects; } else { lmm_objects = lmmv1->lmm_objects; } if (lsm->lsm_is_released) RETURN(lmm_size); for (i = 0; i < lsm->lsm_entries[0]->lsme_stripe_count; i++) { struct lov_oinfo *loi = lsm->lsm_entries[0]->lsme_oinfo[i]; ostid_cpu_to_le(&loi->loi_oi, &lmm_objects[i].l_ost_oi); lmm_objects[i].l_ost_gen = cpu_to_le32(loi->loi_ost_gen); lmm_objects[i].l_ost_idx = cpu_to_le32(loi->loi_ost_idx); } RETURN(lmm_size); }
void class_uuid_unparse(class_uuid_t uu, struct obd_uuid *out) { /* uu as an array of __u16's */ __u16 uuid[sizeof(class_uuid_t) / sizeof(__u16)]; CLASSERT(ARRAY_SIZE(uuid) == 8); uuid_unpack(uu, uuid, ARRAY_SIZE(uuid)); sprintf(out->uuid, "%04x%04x-%04x-%04x-%04x-%04x%04x%04x", uuid[0], uuid[1], uuid[2], uuid[3], uuid[4], uuid[5], uuid[6], uuid[7]); }
/* * Send non-intent quota request to master. * * \param env - the environment passed by the caller * \param exp - is the export to use to send the acquire RPC * \param qbody - quota body to be packed in request * \param sync - synchronous or asynchronous * \param completion - completion callback * \param qqi - is the qsd_qtype_info structure to pass to the completion * function * \param lqe - is the qid entry to be processed * * \retval 0 - success * \retval -ve - appropriate errors */ int qsd_send_dqacq(const struct lu_env *env, struct obd_export *exp, struct quota_body *qbody, bool sync, qsd_req_completion_t completion, struct qsd_qtype_info *qqi, struct lustre_handle *lockh, struct lquota_entry *lqe) { struct ptlrpc_request *req; struct quota_body *req_qbody; struct qsd_async_args *aa; int rc; ENTRY; LASSERT(exp); req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_QUOTA_DQACQ); if (req == NULL) GOTO(out, rc = -ENOMEM); req->rq_no_resend = req->rq_no_delay = 1; req->rq_no_retry_einprogress = 1; rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, QUOTA_DQACQ); if (rc) { ptlrpc_request_free(req); GOTO(out, rc); } req->rq_request_portal = MDS_READPAGE_PORTAL; req_qbody = req_capsule_client_get(&req->rq_pill, &RMF_QUOTA_BODY); *req_qbody = *qbody; ptlrpc_request_set_replen(req); CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args)); aa = ptlrpc_req_async_args(req); aa->aa_exp = exp; aa->aa_qqi = qqi; aa->aa_arg = (void *)lqe; aa->aa_completion = completion; lustre_handle_copy(&aa->aa_lockh, lockh); if (sync) { rc = ptlrpc_queue_wait(req); rc = qsd_dqacq_interpret(env, req, aa, rc); ptlrpc_req_finished(req); } else { req->rq_interpret_reply = qsd_dqacq_interpret; ptlrpcd_add_req(req); } RETURN(rc); out: completion(env, qqi, qbody, NULL, lockh, NULL, lqe, rc); return rc; }
void lprocfs_init_ops_stats(int num_private_stats, struct lprocfs_stats *stats) { LPROCFS_OBD_OP_INIT(num_private_stats, stats, iocontrol); LPROCFS_OBD_OP_INIT(num_private_stats, stats, get_info); LPROCFS_OBD_OP_INIT(num_private_stats, stats, set_info_async); LPROCFS_OBD_OP_INIT(num_private_stats, stats, setup); LPROCFS_OBD_OP_INIT(num_private_stats, stats, precleanup); LPROCFS_OBD_OP_INIT(num_private_stats, stats, cleanup); LPROCFS_OBD_OP_INIT(num_private_stats, stats, process_config); LPROCFS_OBD_OP_INIT(num_private_stats, stats, postrecov); LPROCFS_OBD_OP_INIT(num_private_stats, stats, add_conn); LPROCFS_OBD_OP_INIT(num_private_stats, stats, del_conn); LPROCFS_OBD_OP_INIT(num_private_stats, stats, connect); LPROCFS_OBD_OP_INIT(num_private_stats, stats, reconnect); LPROCFS_OBD_OP_INIT(num_private_stats, stats, disconnect); LPROCFS_OBD_OP_INIT(num_private_stats, stats, fid_init); LPROCFS_OBD_OP_INIT(num_private_stats, stats, fid_fini); LPROCFS_OBD_OP_INIT(num_private_stats, stats, fid_alloc); LPROCFS_OBD_OP_INIT(num_private_stats, stats, statfs); LPROCFS_OBD_OP_INIT(num_private_stats, stats, statfs_async); LPROCFS_OBD_OP_INIT(num_private_stats, stats, packmd); LPROCFS_OBD_OP_INIT(num_private_stats, stats, unpackmd); LPROCFS_OBD_OP_INIT(num_private_stats, stats, create); LPROCFS_OBD_OP_INIT(num_private_stats, stats, destroy); LPROCFS_OBD_OP_INIT(num_private_stats, stats, setattr); LPROCFS_OBD_OP_INIT(num_private_stats, stats, setattr_async); LPROCFS_OBD_OP_INIT(num_private_stats, stats, getattr); LPROCFS_OBD_OP_INIT(num_private_stats, stats, getattr_async); LPROCFS_OBD_OP_INIT(num_private_stats, stats, preprw); LPROCFS_OBD_OP_INIT(num_private_stats, stats, commitrw); LPROCFS_OBD_OP_INIT(num_private_stats, stats, change_cbdata); LPROCFS_OBD_OP_INIT(num_private_stats, stats, find_cbdata); LPROCFS_OBD_OP_INIT(num_private_stats, stats, init_export); LPROCFS_OBD_OP_INIT(num_private_stats, stats, destroy_export); LPROCFS_OBD_OP_INIT(num_private_stats, stats, llog_init); LPROCFS_OBD_OP_INIT(num_private_stats, stats, llog_finish); LPROCFS_OBD_OP_INIT(num_private_stats, stats, import_event); LPROCFS_OBD_OP_INIT(num_private_stats, stats, notify); LPROCFS_OBD_OP_INIT(num_private_stats, stats, health_check); LPROCFS_OBD_OP_INIT(num_private_stats, stats, get_uuid); LPROCFS_OBD_OP_INIT(num_private_stats, stats, quotacheck); LPROCFS_OBD_OP_INIT(num_private_stats, stats, quotactl); LPROCFS_OBD_OP_INIT(num_private_stats, stats, ping); LPROCFS_OBD_OP_INIT(num_private_stats, stats, pool_new); LPROCFS_OBD_OP_INIT(num_private_stats, stats, pool_rem); LPROCFS_OBD_OP_INIT(num_private_stats, stats, pool_add); LPROCFS_OBD_OP_INIT(num_private_stats, stats, pool_del); LPROCFS_OBD_OP_INIT(num_private_stats, stats, getref); LPROCFS_OBD_OP_INIT(num_private_stats, stats, putref); CLASSERT(NUM_OBD_STATS == OBD_COUNTER_OFFSET(putref) + 1); }
static int osc_io_data_version_start(const struct lu_env *env, const struct cl_io_slice *slice) { struct cl_data_version_io *dv = &slice->cis_io->u.ci_data_version; struct osc_io *oio = cl2osc_io(env, slice); struct obdo *oa = &oio->oi_oa; struct osc_async_cbargs *cbargs = &oio->oi_cbarg; struct osc_object *obj = cl2osc(slice->cis_obj); struct lov_oinfo *loi = obj->oo_oinfo; struct obd_export *exp = osc_export(obj); struct ptlrpc_request *req; struct ost_body *body; struct osc_data_version_args *dva; int rc; ENTRY; memset(oa, 0, sizeof(*oa)); oa->o_oi = loi->loi_oi; oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP; if (dv->dv_flags & (LL_DV_RD_FLUSH | LL_DV_WR_FLUSH)) { oa->o_valid |= OBD_MD_FLFLAGS; oa->o_flags |= OBD_FL_SRVLOCK; if (dv->dv_flags & LL_DV_WR_FLUSH) oa->o_flags |= OBD_FL_FLUSH; } init_completion(&cbargs->opc_sync); req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR); if (req == NULL) RETURN(-ENOMEM); rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR); if (rc < 0) { ptlrpc_request_free(req); RETURN(rc); } body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY); lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa); ptlrpc_request_set_replen(req); req->rq_interpret_reply = osc_data_version_interpret; CLASSERT(sizeof(*dva) <= sizeof(req->rq_async_args)); dva = ptlrpc_req_async_args(req); dva->dva_oio = oio; ptlrpcd_add_req(req); RETURN(0); }
/* packing of MDS records */ void mdc_open_pack(struct ptlrpc_request *req, struct md_op_data *op_data, umode_t mode, __u64 rdev, __u64 flags, const void *lmm, size_t lmmlen) { struct mdt_rec_create *rec; char *tmp; __u64 cr_flags; CLASSERT(sizeof(struct mdt_rec_reint) == sizeof(struct mdt_rec_create)); rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT); /* XXX do something about time, uid, gid */ rec->cr_opcode = REINT_OPEN; rec->cr_fsuid = from_kuid(&init_user_ns, current_fsuid()); rec->cr_fsgid = from_kgid(&init_user_ns, current_fsgid()); rec->cr_cap = cfs_curproc_cap_pack(); rec->cr_mode = mode; cr_flags = mds_pack_open_flags(flags); rec->cr_rdev = rdev; rec->cr_umask = current_umask(); if (op_data != NULL) { rec->cr_fid1 = op_data->op_fid1; rec->cr_fid2 = op_data->op_fid2; rec->cr_time = op_data->op_mod_time; rec->cr_suppgid1 = op_data->op_suppgids[0]; rec->cr_suppgid2 = op_data->op_suppgids[1]; rec->cr_bias = op_data->op_bias; rec->cr_open_handle_old = op_data->op_open_handle; if (op_data->op_name) { mdc_pack_name(req, &RMF_NAME, op_data->op_name, op_data->op_namelen); if (op_data->op_bias & MDS_CREATE_VOLATILE) cr_flags |= MDS_OPEN_VOLATILE; } mdc_file_secctx_pack(req, op_data->op_file_secctx_name, op_data->op_file_secctx, op_data->op_file_secctx_size); /* pack SELinux policy info if any */ mdc_file_sepol_pack(req); } if (lmm) { cr_flags |= MDS_OPEN_HAS_EA; tmp = req_capsule_client_get(&req->rq_pill, &RMF_EADATA); memcpy(tmp, lmm, lmmlen); } set_mrc_cr_flags(rec, cr_flags); }
static int mdt_setxattr_unpack(struct mdt_thread_info *info) { struct mdt_reint_record *rr = &info->mti_rr; struct md_ucred *uc = mdt_ucred(info); struct lu_attr *attr = &info->mti_attr.ma_attr; struct req_capsule *pill = info->mti_pill; struct mdt_rec_setxattr *rec; ENTRY; CLASSERT(sizeof(struct mdt_rec_setxattr) == sizeof(struct mdt_rec_reint)); rec = req_capsule_client_get(pill, &RMF_REC_REINT); if (rec == NULL) RETURN(-EFAULT); uc->mu_fsuid = rec->sx_fsuid; uc->mu_fsgid = rec->sx_fsgid; uc->mu_cap = rec->sx_cap; uc->mu_suppgids[0] = rec->sx_suppgid1; uc->mu_suppgids[1] = -1; rr->rr_opcode = rec->sx_opcode; rr->rr_fid1 = &rec->sx_fid; attr->la_valid = rec->sx_valid; attr->la_ctime = rec->sx_time; attr->la_size = rec->sx_size; attr->la_flags = rec->sx_flags; if (req_capsule_get_size(pill, &RMF_CAPA1, RCL_CLIENT)) mdt_set_capainfo(info, 0, rr->rr_fid1, req_capsule_client_get(pill, &RMF_CAPA1)); else mdt_set_capainfo(info, 0, rr->rr_fid1, BYPASS_CAPA); rr->rr_name = req_capsule_client_get(pill, &RMF_NAME); if (rr->rr_name == NULL) RETURN(-EFAULT); rr->rr_namelen = req_capsule_get_size(pill, &RMF_NAME, RCL_CLIENT) - 1; LASSERT(rr->rr_namelen > 0); rr->rr_eadatalen = req_capsule_get_size(pill, &RMF_EADATA, RCL_CLIENT); if (rr->rr_eadatalen > 0) { rr->rr_eadata = req_capsule_client_get(pill, &RMF_EADATA); if (rr->rr_eadata == NULL) RETURN(-EFAULT); } RETURN(0); }
/* packing of MDS records */ void mdc_open_pack(struct ptlrpc_request *req, struct md_op_data *op_data, umode_t mode, __u64 rdev, __u64 flags, const void *lmm, size_t lmmlen) { struct mdt_rec_create *rec; char *tmp; __u64 cr_flags; CLASSERT(sizeof(struct mdt_rec_reint) == sizeof(struct mdt_rec_create)); rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT); /* XXX do something about time, uid, gid */ rec->cr_opcode = REINT_OPEN; rec->cr_fsuid = from_kuid(&init_user_ns, current_fsuid()); rec->cr_fsgid = from_kgid(&init_user_ns, current_fsgid()); rec->cr_cap = cfs_curproc_cap_pack(); rec->cr_mode = mode; cr_flags = mds_pack_open_flags(flags); rec->cr_rdev = rdev; rec->cr_umask = current_umask(); if (op_data != NULL) { rec->cr_fid1 = op_data->op_fid1; rec->cr_fid2 = op_data->op_fid2; rec->cr_time = op_data->op_mod_time; rec->cr_suppgid1 = op_data->op_suppgids[0]; rec->cr_suppgid2 = op_data->op_suppgids[1]; rec->cr_bias = op_data->op_bias; rec->cr_old_handle = op_data->op_handle; mdc_pack_capa(req, &RMF_CAPA1, op_data->op_capa1); /* the next buffer is child capa, which is used for replay, * will be packed from the data in reply message. */ if (op_data->op_name) { mdc_pack_name(req, &RMF_NAME, op_data->op_name, op_data->op_namelen); if (op_data->op_bias & MDS_CREATE_VOLATILE) cr_flags |= MDS_OPEN_VOLATILE; } } if (lmm) { cr_flags |= MDS_OPEN_HAS_EA; tmp = req_capsule_client_get(&req->rq_pill, &RMF_EADATA); memcpy(tmp, lmm, lmmlen); } set_mrc_cr_flags(rec, cr_flags); }
static int mdt_link_unpack(struct mdt_thread_info *info) { struct md_ucred *uc = mdt_ucred(info); struct mdt_rec_link *rec; struct lu_attr *attr = &info->mti_attr.ma_attr; struct mdt_reint_record *rr = &info->mti_rr; struct req_capsule *pill = info->mti_pill; int rc; ENTRY; CLASSERT(sizeof(struct mdt_rec_link) == sizeof(struct mdt_rec_reint)); rec = req_capsule_client_get(pill, &RMF_REC_REINT); if (rec == NULL) RETURN(-EFAULT); uc->mu_fsuid = rec->lk_fsuid; uc->mu_fsgid = rec->lk_fsgid; uc->mu_cap = rec->lk_cap; uc->mu_suppgids[0] = rec->lk_suppgid1; uc->mu_suppgids[1] = rec->lk_suppgid2; attr->la_uid = rec->lk_fsuid; attr->la_gid = rec->lk_fsgid; rr->rr_fid1 = &rec->lk_fid1; rr->rr_fid2 = &rec->lk_fid2; attr->la_ctime = rec->lk_time; attr->la_mtime = rec->lk_time; attr->la_valid = LA_UID | LA_GID | LA_CTIME | LA_MTIME; if (req_capsule_get_size(pill, &RMF_CAPA1, RCL_CLIENT)) mdt_set_capainfo(info, 0, rr->rr_fid1, req_capsule_client_get(pill, &RMF_CAPA1)); if (req_capsule_get_size(pill, &RMF_CAPA2, RCL_CLIENT)) mdt_set_capainfo(info, 1, rr->rr_fid2, req_capsule_client_get(pill, &RMF_CAPA2)); info->mti_spec.sp_ck_split = !!(rec->lk_bias & MDS_CHECK_SPLIT); info->mti_cross_ref = !!(rec->lk_bias & MDS_CROSS_REF); rr->rr_name = req_capsule_client_get(pill, &RMF_NAME); if (rr->rr_name == NULL) RETURN(-EFAULT); rr->rr_namelen = req_capsule_get_size(pill, &RMF_NAME, RCL_CLIENT) - 1; if (!info->mti_cross_ref) LASSERT(rr->rr_namelen > 0); rc = mdt_dlmreq_unpack(info); RETURN(rc); }
static int mdt_link_unpack(struct mdt_thread_info *info) { struct lu_ucred *uc = mdt_ucred(info); struct mdt_rec_link *rec; struct lu_attr *attr = &info->mti_attr.ma_attr; struct mdt_reint_record *rr = &info->mti_rr; struct req_capsule *pill = info->mti_pill; int rc; ENTRY; CLASSERT(sizeof(struct mdt_rec_link) == sizeof(struct mdt_rec_reint)); rec = req_capsule_client_get(pill, &RMF_REC_REINT); if (rec == NULL) RETURN(-EFAULT); /* This prior initialization is needed for old_init_ucred_reint() */ uc->uc_fsuid = rec->lk_fsuid; uc->uc_fsgid = rec->lk_fsgid; uc->uc_cap = rec->lk_cap; uc->uc_suppgids[0] = rec->lk_suppgid1; uc->uc_suppgids[1] = rec->lk_suppgid2; attr->la_uid = rec->lk_fsuid; attr->la_gid = rec->lk_fsgid; rr->rr_fid1 = &rec->lk_fid1; rr->rr_fid2 = &rec->lk_fid2; attr->la_ctime = rec->lk_time; attr->la_mtime = rec->lk_time; attr->la_valid = LA_UID | LA_GID | LA_CTIME | LA_MTIME; if (req_capsule_get_size(pill, &RMF_CAPA1, RCL_CLIENT)) mdt_set_capainfo(info, 0, rr->rr_fid1, req_capsule_client_get(pill, &RMF_CAPA1)); if (req_capsule_get_size(pill, &RMF_CAPA2, RCL_CLIENT)) mdt_set_capainfo(info, 1, rr->rr_fid2, req_capsule_client_get(pill, &RMF_CAPA2)); rr->rr_name = req_capsule_client_get(pill, &RMF_NAME); if (rr->rr_name == NULL) RETURN(-EFAULT); rr->rr_namelen = req_capsule_get_size(pill, &RMF_NAME, RCL_CLIENT) - 1; LASSERT(rr->rr_namelen > 0); rc = mdt_dlmreq_unpack(info); RETURN(rc); }
int libcfs_ipif_query (char *name, int *up, __u32 *ip) { struct ifreq ifr; int nob; int rc; __u32 val; nob = strlen(name); if (nob >= IFNAMSIZ) { CERROR("Interface name %s too long\n", name); return -EINVAL; } CLASSERT (sizeof(ifr.ifr_name) >= IFNAMSIZ); strcpy(ifr.ifr_name, name); rc = libcfs_sock_ioctl(SIOCGIFFLAGS, (unsigned long)&ifr); if (rc != 0) { CERROR("Can't get flags for interface %s\n", name); return rc; } if ((ifr.ifr_flags & IFF_UP) == 0) { CDEBUG(D_NET, "Interface %s down\n", name); *up = 0; *ip = 0; return 0; } *up = 1; strcpy(ifr.ifr_name, name); ifr.ifr_addr.sa_family = AF_INET; rc = libcfs_sock_ioctl(SIOCGIFADDR, (unsigned long)&ifr); if (rc != 0) { CERROR("Can't get IP address for interface %s\n", name); return rc; } val = ((struct sockaddr_in *)&ifr.ifr_addr)->sin_addr.s_addr; *ip = ntohl(val); return 0; }
void ptlrpc_fill_bulk_md(lnet_md_t *md, struct ptlrpc_bulk_desc *desc, int mdidx) { CLASSERT(PTLRPC_MAX_BRW_PAGES < LI_POISON); LASSERT(mdidx < desc->bd_md_max_brw); LASSERT(desc->bd_iov_count <= PTLRPC_MAX_BRW_PAGES); LASSERT(!(md->options & (LNET_MD_IOVEC | LNET_MD_KIOV | LNET_MD_PHYS))); md->options |= LNET_MD_KIOV; md->length = max(0, desc->bd_iov_count - mdidx * LNET_MAX_IOV); md->length = min_t(unsigned int, LNET_MAX_IOV, md->length); if (desc->bd_enc_iov) md->start = &desc->bd_enc_iov[mdidx * LNET_MAX_IOV]; else md->start = &desc->bd_iov[mdidx * LNET_MAX_IOV]; }
/* packing of MDS records */ void mdc_create_pack(struct ptlrpc_request *req, struct md_op_data *op_data, const void *data, size_t datalen, umode_t mode, uid_t uid, gid_t gid, cfs_cap_t cap_effective, __u64 rdev) { struct mdt_rec_create *rec; char *tmp; __u64 flags; CLASSERT(sizeof(struct mdt_rec_reint) == sizeof(struct mdt_rec_create)); rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT); rec->cr_opcode = REINT_CREATE; rec->cr_fsuid = uid; rec->cr_fsgid = gid; rec->cr_cap = cap_effective; rec->cr_fid1 = op_data->op_fid1; rec->cr_fid2 = op_data->op_fid2; rec->cr_mode = mode; rec->cr_rdev = rdev; rec->cr_time = op_data->op_mod_time; rec->cr_suppgid1 = op_data->op_suppgids[0]; rec->cr_suppgid2 = op_data->op_suppgids[1]; flags = 0; if (op_data->op_bias & MDS_CREATE_VOLATILE) flags |= MDS_OPEN_VOLATILE; set_mrc_cr_flags(rec, flags); rec->cr_bias = op_data->op_bias; rec->cr_umask = current_umask(); mdc_pack_name(req, &RMF_NAME, op_data->op_name, op_data->op_namelen); if (data) { tmp = req_capsule_client_get(&req->rq_pill, &RMF_EADATA); memcpy(tmp, data, datalen); } mdc_file_secctx_pack(req, op_data->op_file_secctx_name, op_data->op_file_secctx, op_data->op_file_secctx_size); /* pack SELinux policy info if any */ mdc_file_sepol_pack(req); }
static void mdc_close_intent_pack(struct ptlrpc_request *req, struct md_op_data *op_data) { struct close_data *data; struct ldlm_lock *lock; enum mds_op_bias bias = op_data->op_bias; if (!(bias & (MDS_CLOSE_INTENT | MDS_CLOSE_MIGRATE))) return; data = req_capsule_client_get(&req->rq_pill, &RMF_CLOSE_DATA); LASSERT(data != NULL); lock = ldlm_handle2lock(&op_data->op_lease_handle); if (lock != NULL) { data->cd_handle = lock->l_remote_handle; LDLM_LOCK_PUT(lock); } ldlm_cli_cancel(&op_data->op_lease_handle, LCF_LOCAL); data->cd_data_version = op_data->op_data_version; data->cd_fid = op_data->op_fid2; if (bias & MDS_CLOSE_LAYOUT_SPLIT) { data->cd_mirror_id = op_data->op_mirror_id; } else if (bias & MDS_CLOSE_RESYNC_DONE) { struct close_data_resync_done *sync = &data->cd_resync; CLASSERT(sizeof(data->cd_resync) <= sizeof(data->cd_reserved)); sync->resync_count = op_data->op_data_size / sizeof(__u32); if (sync->resync_count <= INLINE_RESYNC_ARRAY_SIZE) { memcpy(sync->resync_ids_inline, op_data->op_data, op_data->op_data_size); } else { size_t count = sync->resync_count; memcpy(req_capsule_client_get(&req->rq_pill, &RMF_U32), op_data->op_data, count * sizeof(__u32)); } } }
/* packing of MDS records */ void mdc_create_pack(struct ptlrpc_request *req, struct md_op_data *op_data, const void *data, int datalen, __u32 mode, __u32 uid, __u32 gid, cfs_cap_t cap_effective, __u64 rdev) { struct mdt_rec_create *rec; char *tmp; __u64 flags; CLASSERT(sizeof(struct mdt_rec_reint) == sizeof(struct mdt_rec_create)); rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT); rec->cr_opcode = REINT_CREATE; rec->cr_fsuid = uid; rec->cr_fsgid = gid; rec->cr_cap = cap_effective; rec->cr_fid1 = op_data->op_fid1; rec->cr_fid2 = op_data->op_fid2; rec->cr_mode = mode; rec->cr_rdev = rdev; rec->cr_time = op_data->op_mod_time; rec->cr_suppgid1 = op_data->op_suppgids[0]; rec->cr_suppgid2 = op_data->op_suppgids[1]; flags = op_data->op_flags & MF_SOM_LOCAL_FLAGS; if (op_data->op_bias & MDS_CREATE_VOLATILE) flags |= MDS_OPEN_VOLATILE; set_mrc_cr_flags(rec, flags); rec->cr_bias = op_data->op_bias; rec->cr_umask = current_umask(); mdc_pack_capa(req, &RMF_CAPA1, op_data->op_capa1); tmp = req_capsule_client_get(&req->rq_pill, &RMF_NAME); LOGL0(op_data->op_name, op_data->op_namelen, tmp); if (data) { tmp = req_capsule_client_get(&req->rq_pill, &RMF_EADATA); memcpy(tmp, data, datalen); } }