static int llog_client_destroy(const struct lu_env *env, struct llog_handle *loghandle) { struct obd_import *imp; struct ptlrpc_request *req = NULL; struct llogd_body *body; int rc; LLOG_CLIENT_ENTRY(loghandle->lgh_ctxt, imp); req = ptlrpc_request_alloc_pack(imp, &RQF_LLOG_ORIGIN_HANDLE_DESTROY, LUSTRE_LOG_VERSION, LLOG_ORIGIN_HANDLE_DESTROY); if (req == NULL) GOTO(err_exit, rc =-ENOMEM); body = req_capsule_client_get(&req->rq_pill, &RMF_LLOGD_BODY); body->lgd_logid = loghandle->lgh_id; body->lgd_llh_flags = loghandle->lgh_hdr->llh_flags; if (!(body->lgd_llh_flags & LLOG_F_IS_PLAIN)) CERROR("%s: wrong llog flags %x\n", imp->imp_obd->obd_name, body->lgd_llh_flags); ptlrpc_request_set_replen(req); rc = ptlrpc_queue_wait(req); ptlrpc_req_finished(req); err_exit: LLOG_CLIENT_EXIT(loghandle->lgh_ctxt, imp); return rc; }
int osc_quotacheck(struct obd_device *unused, struct obd_export *exp, struct obd_quotactl *oqctl) { struct client_obd *cli = &exp->exp_obd->u.cli; struct ptlrpc_request *req; struct obd_quotactl *body; int rc; ENTRY; req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_OST_QUOTACHECK, LUSTRE_OST_VERSION, OST_QUOTACHECK); if (req == NULL) RETURN(-ENOMEM); body = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL); *body = *oqctl; ptlrpc_request_set_replen(req); /* the next poll will find -ENODATA, that means quotacheck is * going on */ cli->cl_qchk_stat = -ENODATA; rc = ptlrpc_queue_wait(req); if (rc) cli->cl_qchk_stat = rc; ptlrpc_req_finished(req); RETURN(rc); }
static int llog_client_next_block(const struct lu_env *env, struct llog_handle *loghandle, int *cur_idx, int next_idx, __u64 *cur_offset, void *buf, int len) { struct obd_import *imp; struct ptlrpc_request *req = NULL; struct llogd_body *body; void *ptr; int rc; LLOG_CLIENT_ENTRY(loghandle->lgh_ctxt, imp); req = ptlrpc_request_alloc_pack(imp, &RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK, LUSTRE_LOG_VERSION, LLOG_ORIGIN_HANDLE_NEXT_BLOCK); if (!req) { rc = -ENOMEM; goto err_exit; } body = req_capsule_client_get(&req->rq_pill, &RMF_LLOGD_BODY); body->lgd_logid = loghandle->lgh_id; body->lgd_ctxt_idx = loghandle->lgh_ctxt->loc_idx - 1; body->lgd_llh_flags = loghandle->lgh_hdr->llh_flags; body->lgd_index = next_idx; body->lgd_saved_index = *cur_idx; body->lgd_len = len; body->lgd_cur_offset = *cur_offset; req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_SERVER, len); ptlrpc_request_set_replen(req); rc = ptlrpc_queue_wait(req); if (rc) goto out; body = req_capsule_server_get(&req->rq_pill, &RMF_LLOGD_BODY); if (!body) { rc = -EFAULT; goto out; } /* The log records are swabbed as they are processed */ ptr = req_capsule_server_get(&req->rq_pill, &RMF_EADATA); if (!ptr) { rc = -EFAULT; goto out; } *cur_idx = body->lgd_saved_index; *cur_offset = body->lgd_cur_offset; memcpy(buf, ptr, len); out: ptlrpc_req_finished(req); err_exit: LLOG_CLIENT_EXIT(loghandle->lgh_ctxt, imp); return rc; }
static int llog_client_read_header(const struct lu_env *env, struct llog_handle *handle) { struct obd_import *imp; struct ptlrpc_request *req = NULL; struct llogd_body *body; struct llog_log_hdr *hdr; struct llog_rec_hdr *llh_hdr; int rc; LLOG_CLIENT_ENTRY(handle->lgh_ctxt, imp); req = ptlrpc_request_alloc_pack(imp, &RQF_LLOG_ORIGIN_HANDLE_READ_HEADER, LUSTRE_LOG_VERSION, LLOG_ORIGIN_HANDLE_READ_HEADER); if (req == NULL) { rc = -ENOMEM; goto err_exit; } body = req_capsule_client_get(&req->rq_pill, &RMF_LLOGD_BODY); body->lgd_logid = handle->lgh_id; body->lgd_ctxt_idx = handle->lgh_ctxt->loc_idx - 1; body->lgd_llh_flags = handle->lgh_hdr->llh_flags; ptlrpc_request_set_replen(req); rc = ptlrpc_queue_wait(req); if (rc) goto out; hdr = req_capsule_server_get(&req->rq_pill, &RMF_LLOG_LOG_HDR); if (hdr == NULL) { rc = -EFAULT; goto out; } memcpy(handle->lgh_hdr, hdr, sizeof(*hdr)); handle->lgh_last_idx = handle->lgh_hdr->llh_tail.lrt_index; /* sanity checks */ llh_hdr = &handle->lgh_hdr->llh_hdr; if (llh_hdr->lrh_type != LLOG_HDR_MAGIC) { CERROR("bad log header magic: %#x (expecting %#x)\n", llh_hdr->lrh_type, LLOG_HDR_MAGIC); rc = -EIO; } else if (llh_hdr->lrh_len != LLOG_CHUNK_SIZE) { CERROR("incorrectly sized log header: %#x " "(expecting %#x)\n", llh_hdr->lrh_len, LLOG_CHUNK_SIZE); CERROR("you may need to re-run lconf --write_conf.\n"); rc = -EIO; } out: ptlrpc_req_finished(req); err_exit: LLOG_CLIENT_EXIT(handle->lgh_ctxt, imp); return rc; }
/* * Send non-intent quota request to master. * * \param env - the environment passed by the caller * \param exp - is the export to use to send the acquire RPC * \param qbody - quota body to be packed in request * \param sync - synchronous or asynchronous * \param completion - completion callback * \param qqi - is the qsd_qtype_info structure to pass to the completion * function * \param lqe - is the qid entry to be processed * * \retval 0 - success * \retval -ve - appropriate errors */ int qsd_send_dqacq(const struct lu_env *env, struct obd_export *exp, struct quota_body *qbody, bool sync, qsd_req_completion_t completion, struct qsd_qtype_info *qqi, struct lustre_handle *lockh, struct lquota_entry *lqe) { struct ptlrpc_request *req; struct quota_body *req_qbody; struct qsd_async_args *aa; int rc; ENTRY; LASSERT(exp); req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_QUOTA_DQACQ); if (req == NULL) GOTO(out, rc = -ENOMEM); req->rq_no_resend = req->rq_no_delay = 1; req->rq_no_retry_einprogress = 1; rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, QUOTA_DQACQ); if (rc) { ptlrpc_request_free(req); GOTO(out, rc); } req->rq_request_portal = MDS_READPAGE_PORTAL; req_qbody = req_capsule_client_get(&req->rq_pill, &RMF_QUOTA_BODY); *req_qbody = *qbody; ptlrpc_request_set_replen(req); CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args)); aa = ptlrpc_req_async_args(req); aa->aa_exp = exp; aa->aa_qqi = qqi; aa->aa_arg = (void *)lqe; aa->aa_completion = completion; lustre_handle_copy(&aa->aa_lockh, lockh); if (sync) { rc = ptlrpc_queue_wait(req); rc = qsd_dqacq_interpret(env, req, aa, rc); ptlrpc_req_finished(req); } else { req->rq_interpret_reply = qsd_dqacq_interpret; ptlrpcd_add_req(req); } RETURN(rc); out: completion(env, qqi, qbody, NULL, lockh, NULL, lqe, rc); return rc; }
static int llog_client_prev_block(const struct lu_env *env, struct llog_handle *loghandle, int prev_idx, void *buf, int len) { struct obd_import *imp; struct ptlrpc_request *req = NULL; struct llogd_body *body; void *ptr; int rc; LLOG_CLIENT_ENTRY(loghandle->lgh_ctxt, imp); req = ptlrpc_request_alloc_pack(imp, &RQF_LLOG_ORIGIN_HANDLE_PREV_BLOCK, LUSTRE_LOG_VERSION, LLOG_ORIGIN_HANDLE_PREV_BLOCK); if (!req) { rc = -ENOMEM; goto err_exit; } body = req_capsule_client_get(&req->rq_pill, &RMF_LLOGD_BODY); body->lgd_logid = loghandle->lgh_id; body->lgd_ctxt_idx = loghandle->lgh_ctxt->loc_idx - 1; body->lgd_llh_flags = loghandle->lgh_hdr->llh_flags; body->lgd_index = prev_idx; body->lgd_len = len; req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_SERVER, len); ptlrpc_request_set_replen(req); rc = ptlrpc_queue_wait(req); if (rc) goto out; body = req_capsule_server_get(&req->rq_pill, &RMF_LLOGD_BODY); if (!body) { rc = -EFAULT; goto out; } ptr = req_capsule_server_get(&req->rq_pill, &RMF_EADATA); if (!ptr) { rc = -EFAULT; goto out; } memcpy(buf, ptr, len); out: ptlrpc_req_finished(req); err_exit: LLOG_CLIENT_EXIT(loghandle->lgh_ctxt, imp); return rc; }
/* mdc_setattr does its own semaphore handling */ static int mdc_reint(struct ptlrpc_request *request, int level) { int rc; request->rq_send_state = level; mdc_get_mod_rpc_slot(request, NULL); rc = ptlrpc_queue_wait(request); mdc_put_mod_rpc_slot(request, NULL); if (rc) CDEBUG(D_INFO, "error in handling %d\n", rc); else if (!req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY)) { rc = -EPROTO; } return rc; }
int ptlrpc_obd_ping(struct obd_device *obd) { int rc; struct ptlrpc_request *req; req = ptlrpc_prep_ping(obd->u.cli.cl_import); if (!req) return -ENOMEM; req->rq_send_state = LUSTRE_IMP_FULL; rc = ptlrpc_queue_wait(req); ptlrpc_req_finished(req); return rc; }
int ptlrpc_obd_ping(struct obd_device *obd) { int rc; struct ptlrpc_request *req; ENTRY; req = ptlrpc_prep_ping(obd->u.cli.cl_import); if (req == NULL) RETURN(-ENOMEM); req->rq_send_state = LUSTRE_IMP_FULL; rc = ptlrpc_queue_wait(req); ptlrpc_req_finished(req); RETURN(rc); }
int client_quota_check(struct obd_device *unused, struct obd_export *exp, struct obd_quotactl *oqctl) { struct client_obd *cli = &exp->exp_obd->u.cli; struct ptlrpc_request *req; struct obd_quotactl *body; const struct req_format *rf; int ver, opc, rc; ENTRY; if (!strcmp(exp->exp_obd->obd_type->typ_name, LUSTRE_MDC_NAME)) { rf = &RQF_MDS_QUOTACHECK; ver = LUSTRE_MDS_VERSION; opc = MDS_QUOTACHECK; } else if (!strcmp(exp->exp_obd->obd_type->typ_name, LUSTRE_OSC_NAME)) { rf = &RQF_OST_QUOTACHECK; ver = LUSTRE_OST_VERSION; opc = OST_QUOTACHECK; } else { RETURN(-EINVAL); } req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), rf, ver, opc); if (req == NULL) RETURN(-ENOMEM); body = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL); *body = *oqctl; ptlrpc_request_set_replen(req); /* the next poll will find -ENODATA, that means quotacheck is * going on */ cli->cl_qchk_stat = -ENODATA; rc = ptlrpc_queue_wait(req); if (rc) cli->cl_qchk_stat = rc; ptlrpc_req_finished(req); RETURN(rc); }
static int target_quotacheck_callback(struct obd_export *exp, struct obd_quotactl *oqctl) { struct ptlrpc_request *req; struct obd_quotactl *body; int rc; ENTRY; req = ptlrpc_request_alloc_pack(exp->exp_imp_reverse, &RQF_QC_CALLBACK, LUSTRE_OBD_VERSION, OBD_QC_CALLBACK); if (req == NULL) RETURN(-ENOMEM); body = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL); *body = *oqctl; ptlrpc_request_set_replen(req); rc = ptlrpc_queue_wait(req); ptlrpc_req_finished(req); RETURN(rc); }
int osc_quotactl(struct obd_device *unused, struct obd_export *exp, struct obd_quotactl *oqctl) { struct ptlrpc_request *req; struct obd_quotactl *oqc; int rc; ENTRY; req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_OST_QUOTACTL, LUSTRE_OST_VERSION, OST_QUOTACTL); if (req == NULL) RETURN(-ENOMEM); oqc = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL); *oqc = *oqctl; ptlrpc_request_set_replen(req); ptlrpc_at_set_req_timeout(req); req->rq_no_resend = 1; rc = ptlrpc_queue_wait(req); if (rc) CERROR("ptlrpc_queue_wait failed, rc: %d\n", rc); if (req->rq_repmsg && (oqc = req_capsule_server_get(&req->rq_pill, &RMF_OBD_QUOTACTL))) { *oqctl = *oqc; } else if (!rc) { CERROR ("Can't unpack obd_quotactl\n"); rc = -EPROTO; } ptlrpc_req_finished(req); RETURN(rc); }
/* * Fetch a global or slave index from the QMT. * * \param env - the environment passed by the caller * \param exp - is the export to use to issue the OBD_IDX_READ RPC * \param ii - is the index information to be packed in the request * on success, the index information returned by the server * is copied there. * \param npages - is the number of pages in the pages array * \param pages - is an array of @npages pages * * \retval 0 - success * \retval -ve - appropriate errors */ int qsd_fetch_index(const struct lu_env *env, struct obd_export *exp, struct idx_info *ii, unsigned int npages, struct page **pages, bool *need_swab) { struct ptlrpc_request *req; struct idx_info *req_ii; struct ptlrpc_bulk_desc *desc; int rc, i; ENTRY; LASSERT(exp); req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OBD_IDX_READ); if (req == NULL) RETURN(-ENOMEM); rc = ptlrpc_request_pack(req, LUSTRE_OBD_VERSION, OBD_IDX_READ); if (rc) { ptlrpc_request_free(req); RETURN(rc); } req->rq_request_portal = MDS_READPAGE_PORTAL; ptlrpc_at_set_req_timeout(req); /* allocate bulk descriptor */ desc = ptlrpc_prep_bulk_imp(req, npages, 1, BULK_PUT_SINK, MDS_BULK_PORTAL); if (desc == NULL) { ptlrpc_request_free(req); RETURN(-ENOMEM); } /* req now owns desc and will free it when it gets freed */ for (i = 0; i < npages; i++) ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_CACHE_SIZE); /* pack index information in request */ req_ii = req_capsule_client_get(&req->rq_pill, &RMF_IDX_INFO); *req_ii = *ii; ptlrpc_request_set_replen(req); /* send request to master and wait for RPC to complete */ rc = ptlrpc_queue_wait(req); if (rc) GOTO(out, rc); rc = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk, req->rq_bulk->bd_nob_transferred); if (rc < 0) GOTO(out, rc); else /* sptlrpc_cli_unwrap_bulk_read() returns the number of bytes * transferred*/ rc = 0; req_ii = req_capsule_server_get(&req->rq_pill, &RMF_IDX_INFO); *ii = *req_ii; *need_swab = ptlrpc_rep_need_swab(req); EXIT; out: ptlrpc_req_finished(req); return rc; }
/** * asks OST to clean precreate orphans * and gets next id for new objects */ static int osp_precreate_cleanup_orphans(struct lu_env *env, struct osp_device *d) { struct osp_thread_info *osi = osp_env_info(env); struct lu_fid *last_fid = &osi->osi_fid; struct ptlrpc_request *req = NULL; struct obd_import *imp; struct ost_body *body; struct l_wait_info lwi = { 0 }; int update_status = 0; int rc; int diff; ENTRY; /* * wait for local recovery to finish, so we can cleanup orphans * orphans are all objects since "last used" (assigned), but * there might be objects reserved and in some cases they won't * be used. we can't cleanup them till we're sure they won't be * used. also can't we allow new reservations because they may * end up getting orphans being cleaned up below. so we block * new reservations and wait till all reserved objects either * user or released. */ spin_lock(&d->opd_pre_lock); d->opd_pre_recovering = 1; spin_unlock(&d->opd_pre_lock); /* * The locking above makes sure the opd_pre_reserved check below will * catch all osp_precreate_reserve() calls who find * "!opd_pre_recovering". */ l_wait_event(d->opd_pre_waitq, (!d->opd_pre_reserved && d->opd_recovery_completed) || !osp_precreate_running(d) || d->opd_got_disconnected, &lwi); if (!osp_precreate_running(d) || d->opd_got_disconnected) GOTO(out, rc = -EAGAIN); CDEBUG(D_HA, "%s: going to cleanup orphans since "DFID"\n", d->opd_obd->obd_name, PFID(&d->opd_last_used_fid)); *last_fid = d->opd_last_used_fid; /* The OSP should already get the valid seq now */ LASSERT(!fid_is_zero(last_fid)); if (fid_oid(&d->opd_last_used_fid) < 2) { /* lastfid looks strange... ask OST */ rc = osp_get_lastfid_from_ost(env, d); if (rc) GOTO(out, rc); } imp = d->opd_obd->u.cli.cl_import; LASSERT(imp); req = ptlrpc_request_alloc(imp, &RQF_OST_CREATE); if (req == NULL) GOTO(out, rc = -ENOMEM); rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_CREATE); if (rc) { ptlrpc_request_free(req); req = NULL; GOTO(out, rc); } body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY); if (body == NULL) GOTO(out, rc = -EPROTO); body->oa.o_flags = OBD_FL_DELORPHAN; body->oa.o_valid = OBD_MD_FLFLAGS | OBD_MD_FLGROUP; fid_to_ostid(&d->opd_last_used_fid, &body->oa.o_oi); ptlrpc_request_set_replen(req); /* Don't resend the delorphan req */ req->rq_no_resend = req->rq_no_delay = 1; rc = ptlrpc_queue_wait(req); if (rc) { update_status = 1; GOTO(out, rc); } body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY); if (body == NULL) GOTO(out, rc = -EPROTO); /* * OST provides us with id new pool starts from in body->oa.o_id */ ostid_to_fid(last_fid, &body->oa.o_oi, d->opd_index); spin_lock(&d->opd_pre_lock); diff = lu_fid_diff(&d->opd_last_used_fid, last_fid); if (diff > 0) { d->opd_pre_grow_count = OST_MIN_PRECREATE + diff; d->opd_pre_last_created_fid = d->opd_last_used_fid; } else { d->opd_pre_grow_count = OST_MIN_PRECREATE; d->opd_pre_last_created_fid = *last_fid; } /* * This empties the pre-creation pool and effectively blocks any new * reservations. */ LASSERT(fid_oid(&d->opd_pre_last_created_fid) <= LUSTRE_DATA_SEQ_MAX_WIDTH); d->opd_pre_used_fid = d->opd_pre_last_created_fid; d->opd_pre_grow_slow = 0; spin_unlock(&d->opd_pre_lock); CDEBUG(D_HA, "%s: Got last_id "DFID" from OST, last_created "DFID "last_used is "DFID"\n", d->opd_obd->obd_name, PFID(last_fid), PFID(&d->opd_pre_last_created_fid), PFID(&d->opd_last_used_fid)); out: if (req) ptlrpc_req_finished(req); spin_lock(&d->opd_pre_lock); d->opd_pre_recovering = 0; spin_unlock(&d->opd_pre_lock); /* * If rc is zero, the pre-creation window should have been emptied. * Since waking up the herd would be useless without pre-created * objects, we defer the signal to osp_precreate_send() in that case. */ if (rc != 0) { if (update_status) { CERROR("%s: cannot cleanup orphans: rc = %d\n", d->opd_obd->obd_name, rc); /* we can't proceed from here, OST seem to * be in a bad shape, better to wait for * a new instance of the server and repeat * from the beginning. notify possible waiters * this OSP isn't quite functional yet */ osp_pre_update_status(d, rc); } else { wake_up(&d->opd_pre_user_waitq); } } RETURN(rc); }
static int seq_client_rpc(struct lu_client_seq *seq, struct lu_seq_range *output, __u32 opc, const char *opcname) { struct obd_export *exp = seq->lcs_exp; struct ptlrpc_request *req; struct lu_seq_range *out, *in; __u32 *op; unsigned int debug_mask; int rc; ENTRY; LASSERT(exp != NULL && !IS_ERR(exp)); req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_SEQ_QUERY, LUSTRE_MDS_VERSION, SEQ_QUERY); if (req == NULL) RETURN(-ENOMEM); /* Init operation code */ op = req_capsule_client_get(&req->rq_pill, &RMF_SEQ_OPC); *op = opc; /* Zero out input range, this is not recovery yet. */ in = req_capsule_client_get(&req->rq_pill, &RMF_SEQ_RANGE); lu_seq_range_init(in); ptlrpc_request_set_replen(req); in->lsr_index = seq->lcs_space.lsr_index; if (seq->lcs_type == LUSTRE_SEQ_METADATA) fld_range_set_mdt(in); else fld_range_set_ost(in); if (opc == SEQ_ALLOC_SUPER) { req->rq_request_portal = SEQ_CONTROLLER_PORTAL; req->rq_reply_portal = MDC_REPLY_PORTAL; /* During allocating super sequence for data object, * the current thread might hold the export of MDT0(MDT0 * precreating objects on this OST), and it will send the * request to MDT0 here, so we can not keep resending the * request here, otherwise if MDT0 is failed(umounted), * it can not release the export of MDT0 */ if (seq->lcs_type == LUSTRE_SEQ_DATA) req->rq_no_delay = req->rq_no_resend = 1; debug_mask = D_CONSOLE; } else { if (seq->lcs_type == LUSTRE_SEQ_METADATA) { req->rq_reply_portal = MDC_REPLY_PORTAL; req->rq_request_portal = SEQ_METADATA_PORTAL; } else { req->rq_reply_portal = OSC_REPLY_PORTAL; req->rq_request_portal = SEQ_DATA_PORTAL; } debug_mask = D_INFO; } /* Allow seq client RPC during recovery time. */ req->rq_allow_replay = 1; ptlrpc_at_set_req_timeout(req); rc = ptlrpc_queue_wait(req); if (rc) GOTO(out_req, rc); out = req_capsule_server_get(&req->rq_pill, &RMF_SEQ_RANGE); *output = *out; if (!lu_seq_range_is_sane(output)) { CERROR("%s: Invalid range received from server: " DRANGE"\n", seq->lcs_name, PRANGE(output)); GOTO(out_req, rc = -EINVAL); } if (lu_seq_range_is_exhausted(output)) { CERROR("%s: Range received from server is exhausted: " DRANGE"]\n", seq->lcs_name, PRANGE(output)); GOTO(out_req, rc = -EINVAL); } CDEBUG_LIMIT(debug_mask, "%s: Allocated %s-sequence "DRANGE"]\n", seq->lcs_name, opcname, PRANGE(output)); EXIT; out_req: ptlrpc_req_finished(req); return rc; }
static int osp_precreate_send(const struct lu_env *env, struct osp_device *d) { struct osp_thread_info *oti = osp_env_info(env); struct ptlrpc_request *req; struct obd_import *imp; struct ost_body *body; int rc, grow, diff; struct lu_fid *fid = &oti->osi_fid; ENTRY; /* don't precreate new objects till OST healthy and has free space */ if (unlikely(d->opd_pre_status)) { CDEBUG(D_INFO, "%s: don't send new precreate: rc = %d\n", d->opd_obd->obd_name, d->opd_pre_status); RETURN(0); } /* * if not connection/initialization is compeleted, ignore */ imp = d->opd_obd->u.cli.cl_import; LASSERT(imp); req = ptlrpc_request_alloc(imp, &RQF_OST_CREATE); if (req == NULL) RETURN(-ENOMEM); req->rq_request_portal = OST_CREATE_PORTAL; /* we should not resend create request - anyway we will have delorphan * and kill these objects */ req->rq_no_delay = req->rq_no_resend = 1; rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_CREATE); if (rc) { ptlrpc_request_free(req); RETURN(rc); } spin_lock(&d->opd_pre_lock); if (d->opd_pre_grow_count > d->opd_pre_max_grow_count / 2) d->opd_pre_grow_count = d->opd_pre_max_grow_count / 2; grow = d->opd_pre_grow_count; spin_unlock(&d->opd_pre_lock); body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY); LASSERT(body); *fid = d->opd_pre_last_created_fid; rc = osp_precreate_fids(env, d, fid, &grow); if (rc == 1) { /* Current seq has been used up*/ if (!osp_is_fid_client(d)) { osp_pre_update_status(d, -ENOSPC); rc = -ENOSPC; } wake_up(&d->opd_pre_waitq); GOTO(out_req, rc); } if (!osp_is_fid_client(d)) { /* Non-FID client will always send seq 0 because of * compatiblity */ LASSERTF(fid_is_idif(fid), "Invalid fid "DFID"\n", PFID(fid)); fid->f_seq = 0; } fid_to_ostid(fid, &body->oa.o_oi); body->oa.o_valid = OBD_MD_FLGROUP; ptlrpc_request_set_replen(req); rc = ptlrpc_queue_wait(req); if (rc) { CERROR("%s: can't precreate: rc = %d\n", d->opd_obd->obd_name, rc); GOTO(out_req, rc); } LASSERT(req->rq_transno == 0); body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY); if (body == NULL) GOTO(out_req, rc = -EPROTO); ostid_to_fid(fid, &body->oa.o_oi, d->opd_index); LASSERTF(lu_fid_diff(fid, &d->opd_pre_used_fid) > 0, "reply fid "DFID" pre used fid "DFID"\n", PFID(fid), PFID(&d->opd_pre_used_fid)); diff = lu_fid_diff(fid, &d->opd_pre_last_created_fid); spin_lock(&d->opd_pre_lock); if (diff < grow) { /* the OST has not managed to create all the * objects we asked for */ d->opd_pre_grow_count = max(diff, OST_MIN_PRECREATE); d->opd_pre_grow_slow = 1; } else { /* the OST is able to keep up with the work, * we could consider increasing grow_count * next time if needed */ d->opd_pre_grow_slow = 0; } d->opd_pre_last_created_fid = *fid; spin_unlock(&d->opd_pre_lock); CDEBUG(D_HA, "%s: current precreated pool: "DFID"-"DFID"\n", d->opd_obd->obd_name, PFID(&d->opd_pre_used_fid), PFID(&d->opd_pre_last_created_fid)); out_req: /* now we can wakeup all users awaiting for objects */ osp_pre_update_status(d, rc); wake_up(&d->opd_pre_user_waitq); ptlrpc_req_finished(req); RETURN(rc); }
static int osp_get_lastfid_from_ost(const struct lu_env *env, struct osp_device *d) { struct ptlrpc_request *req = NULL; struct obd_import *imp; struct lu_fid *last_fid; char *tmp; int rc; ENTRY; imp = d->opd_obd->u.cli.cl_import; LASSERT(imp); req = ptlrpc_request_alloc(imp, &RQF_OST_GET_INFO_LAST_FID); if (req == NULL) RETURN(-ENOMEM); req_capsule_set_size(&req->rq_pill, &RMF_GETINFO_KEY, RCL_CLIENT, sizeof(KEY_LAST_FID)); rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GET_INFO); if (rc) { ptlrpc_request_free(req); RETURN(rc); } tmp = req_capsule_client_get(&req->rq_pill, &RMF_GETINFO_KEY); memcpy(tmp, KEY_LAST_FID, sizeof(KEY_LAST_FID)); req->rq_no_delay = req->rq_no_resend = 1; last_fid = req_capsule_client_get(&req->rq_pill, &RMF_FID); fid_cpu_to_le(last_fid, &d->opd_last_used_fid); ptlrpc_request_set_replen(req); rc = ptlrpc_queue_wait(req); if (rc) { /* bad-bad OST.. let sysadm sort this out */ if (rc == -ENOTSUPP) { CERROR("%s: server does not support FID: rc = %d\n", d->opd_obd->obd_name, -ENOTSUPP); } ptlrpc_set_import_active(imp, 0); GOTO(out, rc); } last_fid = req_capsule_server_get(&req->rq_pill, &RMF_FID); if (last_fid == NULL) { CERROR("%s: Got last_fid failed.\n", d->opd_obd->obd_name); GOTO(out, rc = -EPROTO); } if (!fid_is_sane(last_fid)) { CERROR("%s: Got insane last_fid "DFID"\n", d->opd_obd->obd_name, PFID(last_fid)); GOTO(out, rc = -EPROTO); } /* Only update the last used fid, if the OST has objects for * this sequence, i.e. fid_oid > 0 */ if (fid_oid(last_fid) > 0) d->opd_last_used_fid = *last_fid; CDEBUG(D_HA, "%s: Got last_fid "DFID"\n", d->opd_obd->obd_name, PFID(last_fid)); out: ptlrpc_req_finished(req); RETURN(rc); }
static int seq_client_rpc(struct lu_client_seq *seq, struct lu_seq_range *output, __u32 opc, const char *opcname) { struct obd_export *exp = seq->lcs_exp; struct ptlrpc_request *req; struct lu_seq_range *out, *in; __u32 *op; int rc; ENTRY; req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_SEQ_QUERY, LUSTRE_MDS_VERSION, SEQ_QUERY); if (req == NULL) RETURN(-ENOMEM); /* Init operation code */ op = req_capsule_client_get(&req->rq_pill, &RMF_SEQ_OPC); *op = opc; /* Zero out input range, this is not recovery yet. */ in = req_capsule_client_get(&req->rq_pill, &RMF_SEQ_RANGE); range_init(in); ptlrpc_request_set_replen(req); if (seq->lcs_type == LUSTRE_SEQ_METADATA) { req->rq_request_portal = SEQ_METADATA_PORTAL; in->lsr_flags = LU_SEQ_RANGE_MDT; } else { LASSERTF(seq->lcs_type == LUSTRE_SEQ_DATA, "unknown lcs_type %u\n", seq->lcs_type); req->rq_request_portal = SEQ_DATA_PORTAL; in->lsr_flags = LU_SEQ_RANGE_OST; } if (opc == SEQ_ALLOC_SUPER) { /* Update index field of *in, it is required for * FLD update on super sequence allocator node. */ in->lsr_index = seq->lcs_space.lsr_index; req->rq_request_portal = SEQ_CONTROLLER_PORTAL; } else { LASSERTF(opc == SEQ_ALLOC_META, "unknown opcode %u\n, opc", opc); } ptlrpc_at_set_req_timeout(req); mdc_get_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL); rc = ptlrpc_queue_wait(req); mdc_put_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL); if (rc) GOTO(out_req, rc); out = req_capsule_server_get(&req->rq_pill, &RMF_SEQ_RANGE); *output = *out; if (!range_is_sane(output)) { CERROR("%s: Invalid range received from server: " DRANGE"\n", seq->lcs_name, PRANGE(output)); GOTO(out_req, rc = -EINVAL); } if (range_is_exhausted(output)) { CERROR("%s: Range received from server is exhausted: " DRANGE"]\n", seq->lcs_name, PRANGE(output)); GOTO(out_req, rc = -EINVAL); } CDEBUG(D_INFO, "%s: Allocated %s-sequence "DRANGE"]\n", seq->lcs_name, opcname, PRANGE(output)); EXIT; out_req: ptlrpc_req_finished(req); return rc; }
int osp_object_truncate(const struct lu_env *env, struct dt_object *dt, __u64 size) { struct osp_device *d = lu2osp_dev(dt->do_lu.lo_dev); struct ptlrpc_request *req = NULL; struct obd_import *imp; struct ost_body *body; struct obdo *oa = NULL; int rc; ENTRY; imp = d->opd_obd->u.cli.cl_import; LASSERT(imp); req = ptlrpc_request_alloc(imp, &RQF_OST_PUNCH); if (req == NULL) RETURN(-ENOMEM); /* XXX: capa support? */ /* osc_set_capa_size(req, &RMF_CAPA1, capa); */ rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_PUNCH); if (rc) { ptlrpc_request_free(req); RETURN(rc); } /* * XXX: decide how do we do here with resend * if we don't resend, then client may see wrong file size * if we do resend, then MDS thread can get stuck for quite long */ req->rq_no_resend = req->rq_no_delay = 1; req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */ ptlrpc_at_set_req_timeout(req); OBD_ALLOC_PTR(oa); if (oa == NULL) GOTO(out, rc = -ENOMEM); rc = fid_to_ostid(lu_object_fid(&dt->do_lu), &oa->o_oi); LASSERT(rc == 0); oa->o_size = size; oa->o_blocks = OBD_OBJECT_EOF; oa->o_valid = OBD_MD_FLSIZE | OBD_MD_FLBLOCKS | OBD_MD_FLID | OBD_MD_FLGROUP; body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY); LASSERT(body); lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa); /* XXX: capa support? */ /* osc_pack_capa(req, body, capa); */ ptlrpc_request_set_replen(req); rc = ptlrpc_queue_wait(req); if (rc) CERROR("can't punch object: %d\n", rc); out: ptlrpc_req_finished(req); if (oa) OBD_FREE_PTR(oa); RETURN(rc); }
static int osc_object_fiemap(const struct lu_env *env, struct cl_object *obj, struct ll_fiemap_info_key *fmkey, struct fiemap *fiemap, size_t *buflen) { struct obd_export *exp = osc_export(cl2osc(obj)); struct ldlm_res_id resid; union ldlm_policy_data policy; struct lustre_handle lockh; enum ldlm_mode mode = LCK_MINMODE; struct ptlrpc_request *req; struct fiemap *reply; char *tmp; int rc; ENTRY; fmkey->lfik_oa.o_oi = cl2osc(obj)->oo_oinfo->loi_oi; if (!(fmkey->lfik_fiemap.fm_flags & FIEMAP_FLAG_SYNC)) goto skip_locking; policy.l_extent.start = fmkey->lfik_fiemap.fm_start & PAGE_MASK; if (OBD_OBJECT_EOF - fmkey->lfik_fiemap.fm_length <= fmkey->lfik_fiemap.fm_start + PAGE_SIZE - 1) policy.l_extent.end = OBD_OBJECT_EOF; else policy.l_extent.end = (fmkey->lfik_fiemap.fm_start + fmkey->lfik_fiemap.fm_length + PAGE_SIZE - 1) & PAGE_MASK; ostid_build_res_name(&fmkey->lfik_oa.o_oi, &resid); mode = ldlm_lock_match(exp->exp_obd->obd_namespace, LDLM_FL_BLOCK_GRANTED | LDLM_FL_LVB_READY, &resid, LDLM_EXTENT, &policy, LCK_PR | LCK_PW, &lockh, 0); if (mode) { /* lock is cached on client */ if (mode != LCK_PR) { ldlm_lock_addref(&lockh, LCK_PR); ldlm_lock_decref(&lockh, LCK_PW); } } else { /* no cached lock, needs acquire lock on server side */ fmkey->lfik_oa.o_valid |= OBD_MD_FLFLAGS; fmkey->lfik_oa.o_flags |= OBD_FL_SRVLOCK; } skip_locking: req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GET_INFO_FIEMAP); if (req == NULL) GOTO(drop_lock, rc = -ENOMEM); req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_KEY, RCL_CLIENT, sizeof(*fmkey)); req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL, RCL_CLIENT, *buflen); req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL, RCL_SERVER, *buflen); rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GET_INFO); if (rc != 0) { ptlrpc_request_free(req); GOTO(drop_lock, rc); } tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_KEY); memcpy(tmp, fmkey, sizeof(*fmkey)); tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_VAL); memcpy(tmp, fiemap, *buflen); ptlrpc_request_set_replen(req); rc = ptlrpc_queue_wait(req); if (rc != 0) GOTO(fini_req, rc); reply = req_capsule_server_get(&req->rq_pill, &RMF_FIEMAP_VAL); if (reply == NULL) GOTO(fini_req, rc = -EPROTO); memcpy(fiemap, reply, *buflen); fini_req: ptlrpc_req_finished(req); drop_lock: if (mode) ldlm_lock_decref(&lockh, LCK_PR); RETURN(rc); }
static ssize_t osp_md_read(const struct lu_env *env, struct dt_object *dt, struct lu_buf *rbuf, loff_t *pos) { struct osp_device *osp = lu2osp_dev(dt->do_lu.lo_dev); struct dt_device *dt_dev = &osp->opd_dt_dev; struct lu_buf *lbuf = &osp_env_info(env)->osi_lb2; char *ptr = rbuf->lb_buf; struct osp_update_request *update; struct ptlrpc_request *req = NULL; struct out_read_reply *orr; struct ptlrpc_bulk_desc *desc; struct object_update_reply *reply; __u32 left_size; int nbufs; int i; int rc; ENTRY; /* Because it needs send the update buffer right away, * just create an update buffer, instead of attaching the * update_remote list of the thandle. */ update = osp_update_request_create(dt_dev); if (IS_ERR(update)) RETURN(PTR_ERR(update)); rc = osp_update_rpc_pack(env, read, update, OUT_READ, lu_object_fid(&dt->do_lu), rbuf->lb_len, *pos); if (rc != 0) { CERROR("%s: cannot insert update: rc = %d\n", dt_dev->dd_lu_dev.ld_obd->obd_name, rc); GOTO(out_update, rc); } CDEBUG(D_INFO, "%s "DFID" read offset %llu size %zu\n", dt_dev->dd_lu_dev.ld_obd->obd_name, PFID(lu_object_fid(&dt->do_lu)), *pos, rbuf->lb_len); rc = osp_prep_update_req(env, osp->opd_obd->u.cli.cl_import, update, &req); if (rc != 0) GOTO(out_update, rc); nbufs = (rbuf->lb_len + OUT_BULK_BUFFER_SIZE - 1) / OUT_BULK_BUFFER_SIZE; /* allocate bulk descriptor */ desc = ptlrpc_prep_bulk_imp(req, nbufs, 1, PTLRPC_BULK_PUT_SINK | PTLRPC_BULK_BUF_KVEC, MDS_BULK_PORTAL, &ptlrpc_bulk_kvec_ops); if (desc == NULL) GOTO(out, rc = -ENOMEM); /* split the buffer into small chunk size */ left_size = rbuf->lb_len; for (i = 0; i < nbufs; i++) { int read_size; read_size = left_size > OUT_BULK_BUFFER_SIZE ? OUT_BULK_BUFFER_SIZE : left_size; desc->bd_frag_ops->add_iov_frag(desc, ptr, read_size); ptr += read_size; } /* This will only be called with read-only update, and these updates * might be used to retrieve update log during recovery process, so * it will be allowed to send during recovery process */ req->rq_allow_replay = 1; req->rq_bulk_read = 1; /* send request to master and wait for RPC to complete */ rc = ptlrpc_queue_wait(req); if (rc != 0) GOTO(out, rc); rc = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk, req->rq_bulk->bd_nob_transferred); if (rc < 0) GOTO(out, rc); reply = req_capsule_server_sized_get(&req->rq_pill, &RMF_OUT_UPDATE_REPLY, OUT_UPDATE_REPLY_SIZE); if (reply->ourp_magic != UPDATE_REPLY_MAGIC) { CERROR("%s: invalid update reply magic %x expected %x:" " rc = %d\n", dt_dev->dd_lu_dev.ld_obd->obd_name, reply->ourp_magic, UPDATE_REPLY_MAGIC, -EPROTO); GOTO(out, rc = -EPROTO); } rc = object_update_result_data_get(reply, lbuf, 0); if (rc < 0) GOTO(out, rc); if (lbuf->lb_len < sizeof(*orr)) GOTO(out, rc = -EPROTO); orr = lbuf->lb_buf; orr_le_to_cpu(orr, orr); rc = orr->orr_size; *pos = orr->orr_offset; out: ptlrpc_req_finished(req); out_update: osp_update_request_destroy(update); RETURN(rc); }
int gss_do_ctx_init_rpc(__user char *buffer, unsigned long count) { struct obd_import *imp; struct ptlrpc_request *req; struct lgssd_ioctl_param param; struct obd_device *obd; char obdname[64]; long lsize; int rc; if (count != sizeof(param)) { CERROR("ioctl size %lu, expect %lu, please check lgss_keyring " "version\n", count, (unsigned long) sizeof(param)); RETURN(-EINVAL); } if (copy_from_user(¶m, buffer, sizeof(param))) { CERROR("failed copy data from lgssd\n"); RETURN(-EFAULT); } if (param.version != GSSD_INTERFACE_VERSION) { CERROR("gssd interface version %d (expect %d)\n", param.version, GSSD_INTERFACE_VERSION); RETURN(-EINVAL); } /* take name */ if (strncpy_from_user(obdname, param.uuid, sizeof(obdname)) <= 0) { CERROR("Invalid obdname pointer\n"); RETURN(-EFAULT); } obd = class_name2obd(obdname); if (!obd) { CERROR("no such obd %s\n", obdname); RETURN(-EINVAL); } if (unlikely(!obd->obd_set_up)) { CERROR("obd %s not setup\n", obdname); RETURN(-EINVAL); } spin_lock(&obd->obd_dev_lock); if (obd->obd_stopping) { CERROR("obd %s has stopped\n", obdname); spin_unlock(&obd->obd_dev_lock); RETURN(-EINVAL); } if (strcmp(obd->obd_type->typ_name, LUSTRE_MDC_NAME) && strcmp(obd->obd_type->typ_name, LUSTRE_OSC_NAME) && strcmp(obd->obd_type->typ_name, LUSTRE_MGC_NAME)) { CERROR("obd %s is not a client device\n", obdname); spin_unlock(&obd->obd_dev_lock); RETURN(-EINVAL); } spin_unlock(&obd->obd_dev_lock); down_read(&obd->u.cli.cl_sem); if (obd->u.cli.cl_import == NULL) { CERROR("obd %s: import has gone\n", obd->obd_name); up_read(&obd->u.cli.cl_sem); RETURN(-EINVAL); } imp = class_import_get(obd->u.cli.cl_import); up_read(&obd->u.cli.cl_sem); if (imp->imp_deactive) { CERROR("import has been deactivated\n"); class_import_put(imp); RETURN(-EINVAL); } req = ptlrpc_request_alloc_pack(imp, &RQF_SEC_CTX, LUSTRE_OBD_VERSION, SEC_CTX_INIT); if (req == NULL) { param.status = -ENOMEM; goto out_copy; } if (req->rq_cli_ctx->cc_sec->ps_id != param.secid) { CWARN("original secid %d, now has changed to %d, " "cancel this negotiation\n", param.secid, req->rq_cli_ctx->cc_sec->ps_id); param.status = -EINVAL; goto out_copy; } /* get token */ rc = ctx_init_pack_request(imp, req, param.lustre_svc, param.uid, param.gid, param.send_token_size, param.send_token); if (rc) { param.status = rc; goto out_copy; } ptlrpc_request_set_replen(req); rc = ptlrpc_queue_wait(req); if (rc) { /* If any _real_ denial be made, we expect server return * -EACCES reply or return success but indicate gss error * inside reply messsage. All other errors are treated as * timeout, caller might try the negotiation repeatedly, * leave recovery decisions to general ptlrpc layer. * * FIXME maybe some other error code shouldn't be treated * as timeout. */ param.status = rc; if (rc != -EACCES) param.status = -ETIMEDOUT; goto out_copy; } LASSERT(req->rq_repdata); lsize = ctx_init_parse_reply(req->rq_repdata, ptlrpc_rep_need_swab(req), param.reply_buf, param.reply_buf_size); if (lsize < 0) { param.status = (int) lsize; goto out_copy; } param.status = 0; param.reply_length = lsize; out_copy: if (copy_to_user(buffer, ¶m, sizeof(param))) rc = -EFAULT; else rc = 0; class_import_put(imp); ptlrpc_req_finished(req); RETURN(rc); }
/* This is a callback from the llog_* functions. * Assumes caller has already pushed us into the kernel context. */ static int llog_client_open(const struct lu_env *env, struct llog_handle *lgh, struct llog_logid *logid, char *name, enum llog_open_param open_param) { struct obd_import *imp; struct llogd_body *body; struct llog_ctxt *ctxt = lgh->lgh_ctxt; struct ptlrpc_request *req = NULL; int rc; LLOG_CLIENT_ENTRY(ctxt, imp); /* client cannot create llog */ LASSERTF(open_param != LLOG_OPEN_NEW, "%#x\n", open_param); LASSERT(lgh); req = ptlrpc_request_alloc(imp, &RQF_LLOG_ORIGIN_HANDLE_CREATE); if (!req) { rc = -ENOMEM; goto out; } if (name) req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT, strlen(name) + 1); rc = ptlrpc_request_pack(req, LUSTRE_LOG_VERSION, LLOG_ORIGIN_HANDLE_CREATE); if (rc) { ptlrpc_request_free(req); req = NULL; goto out; } ptlrpc_request_set_replen(req); body = req_capsule_client_get(&req->rq_pill, &RMF_LLOGD_BODY); if (logid) body->lgd_logid = *logid; body->lgd_ctxt_idx = ctxt->loc_idx - 1; if (name) { char *tmp; tmp = req_capsule_client_sized_get(&req->rq_pill, &RMF_NAME, strlen(name) + 1); LASSERT(tmp); strcpy(tmp, name); } rc = ptlrpc_queue_wait(req); if (rc) goto out; body = req_capsule_server_get(&req->rq_pill, &RMF_LLOGD_BODY); if (!body) { rc = -EFAULT; goto out; } lgh->lgh_id = body->lgd_logid; lgh->lgh_ctxt = ctxt; out: LLOG_CLIENT_EXIT(ctxt, imp); ptlrpc_req_finished(req); return rc; }
/* * Get intent per-ID lock or global-index lock from master. * * \param env - the environment passed by the caller * \param exp - is the export to use to send the intent RPC * \param qbody - quota body to be packed in request * \param sync - synchronous or asynchronous (pre-acquire) * \param it_op - IT_QUOTA_DQACQ or IT_QUOTA_CONN * \param completion - completion callback * \param qqi - is the qsd_qtype_info structure to pass to the completion * function * \param lvb - is the lvb associated with the lock and returned by the * server * \param arg - is an opaq argument passed to the completion callback * * \retval 0 - success * \retval -ve - appropriate errors */ int qsd_intent_lock(const struct lu_env *env, struct obd_export *exp, struct quota_body *qbody, bool sync, int it_op, qsd_req_completion_t completion, struct qsd_qtype_info *qqi, struct lquota_lvb *lvb, void *arg) { struct qsd_thread_info *qti = qsd_info(env); struct ptlrpc_request *req; struct qsd_async_args *aa = NULL; struct ldlm_intent *lit; struct quota_body *req_qbody; __u64 flags = LDLM_FL_HAS_INTENT; int rc; ENTRY; LASSERT(exp != NULL); LASSERT(!lustre_handle_is_used(&qbody->qb_lockh)); memset(&qti->qti_lockh, 0, sizeof(qti->qti_lockh)); req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LDLM_INTENT_QUOTA); if (req == NULL) GOTO(out, rc = -ENOMEM); req->rq_no_retry_einprogress = 1; rc = ldlm_prep_enqueue_req(exp, req, NULL, 0); if (rc) { ptlrpc_request_free(req); GOTO(out, rc); } lit = req_capsule_client_get(&req->rq_pill, &RMF_LDLM_INTENT); lit->opc = (__u64)it_op; req_qbody = req_capsule_client_get(&req->rq_pill, &RMF_QUOTA_BODY); *req_qbody = *qbody; req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER, sizeof(*lvb)); ptlrpc_request_set_replen(req); switch(it_op) { case IT_QUOTA_CONN: /* build resource name associated with global index */ fid_build_reg_res_name(&qbody->qb_fid, &qti->qti_resid); /* copy einfo template and fill ei_cbdata with qqi pointer */ memcpy(&qti->qti_einfo, &qsd_glb_einfo, sizeof(qti->qti_einfo)); qti->qti_einfo.ei_cbdata = qqi; /* don't cancel global lock on memory pressure */ flags |= LDLM_FL_NO_LRU; break; case IT_QUOTA_DQACQ: /* build resource name associated for per-ID quota lock */ fid_build_quota_res_name(&qbody->qb_fid, &qbody->qb_id, &qti->qti_resid); /* copy einfo template and fill ei_cbdata with lqe pointer */ memcpy(&qti->qti_einfo, &qsd_id_einfo, sizeof(qti->qti_einfo)); qti->qti_einfo.ei_cbdata = arg; break; default: LASSERTF(0, "invalid it_op %d", it_op); } /* build lock enqueue request */ rc = ldlm_cli_enqueue(exp, &req, &qti->qti_einfo, &qti->qti_resid, NULL, &flags, (void *)lvb, sizeof(*lvb), LVB_T_LQUOTA, &qti->qti_lockh, 1); if (rc < 0) { ptlrpc_req_finished(req); GOTO(out, rc); } /* grab reference on backend structure for the new lock */ switch(it_op) { case IT_QUOTA_CONN: /* grab reference on qqi for new lock */ #ifdef USE_LU_REF { struct ldlm_lock *lock; lock = ldlm_handle2lock(&qti->qti_lockh); if (lock == NULL) { ptlrpc_req_finished(req); GOTO(out, rc = -ENOLCK); } lu_ref_add(&qqi->qqi_reference, "glb_lock", lock); LDLM_LOCK_PUT(lock); } #endif qqi_getref(qqi); break; case IT_QUOTA_DQACQ: /* grab reference on lqe for new lock */ lqe_getref((struct lquota_entry *)arg); /* all acquire/release request are sent with no_resend and * no_delay flag */ req->rq_no_resend = req->rq_no_delay = 1; break; default: break; } CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args)); aa = ptlrpc_req_async_args(req); aa->aa_exp = exp; aa->aa_qqi = qqi; aa->aa_arg = arg; aa->aa_lvb = lvb; aa->aa_completion = completion; lustre_handle_copy(&aa->aa_lockh, &qti->qti_lockh); if (sync) { /* send lock enqueue request and wait for completion */ rc = ptlrpc_queue_wait(req); rc = qsd_intent_interpret(env, req, aa, rc); ptlrpc_req_finished(req); } else { /* queue lock request and return */ req->rq_interpret_reply = qsd_intent_interpret; ptlrpcd_add_req(req, PDL_POLICY_LOCAL, -1); } RETURN(rc); out: completion(env, qqi, qbody, NULL, &qti->qti_lockh, lvb, arg, rc); return rc; }