/* * Glimpse callback handler for all quota locks. This function extracts * information from the glimpse request. * * \param lock - is the lock targeted by the glimpse * \param data - is a pointer to the glimpse ptlrpc request * \param req - is the glimpse request * \param desc - is the glimpse descriptor describing the purpose of the glimpse * request. * \param lvb - is the pointer to the lvb in the reply buffer * * \retval 0 on success and \desc, \lvb & \arg point to a valid structures, * appropriate error on failure */ static int qsd_common_glimpse_ast(struct ptlrpc_request *req, struct ldlm_gl_lquota_desc **desc, void **lvb) { int rc; ENTRY; LASSERT(lustre_msg_get_opc(req->rq_reqmsg) == LDLM_GL_CALLBACK); /* glimpse on quota locks always packs a glimpse descriptor */ req_capsule_extend(&req->rq_pill, &RQF_LDLM_GL_DESC_CALLBACK); /* extract glimpse descriptor */ *desc = req_capsule_client_get(&req->rq_pill, &RMF_DLM_GL_DESC); if (*desc == NULL) RETURN(-EFAULT); /* prepare reply */ req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER, sizeof(struct lquota_lvb)); rc = req_capsule_server_pack(&req->rq_pill); if (rc != 0) { CERROR("Can't pack response, rc %d\n", rc); RETURN(rc); } /* extract lvb */ *lvb = req_capsule_server_get(&req->rq_pill, &RMF_DLM_LVB); RETURN(0); }
/** * Send request reply from request \a req reply buffer. * \a flags defines reply types * Returns 0 on success or error code */ int ptlrpc_send_reply(struct ptlrpc_request *req, int flags) { struct ptlrpc_reply_state *rs = req->rq_reply_state; struct ptlrpc_connection *conn; int rc; /* We must already have a reply buffer (only ptlrpc_error() may be * called without one). The reply generated by sptlrpc layer (e.g. * error notify, etc.) might have NULL rq->reqmsg; Otherwise we must * have a request buffer which is either the actual (swabbed) incoming * request, or a saved copy if this is a req saved in * target_queue_final_reply(). */ LASSERT(req->rq_no_reply == 0); LASSERT(req->rq_reqbuf != NULL); LASSERT(rs != NULL); LASSERT((flags & PTLRPC_REPLY_MAYBE_DIFFICULT) || !rs->rs_difficult); LASSERT(req->rq_repmsg != NULL); LASSERT(req->rq_repmsg == rs->rs_msg); LASSERT(rs->rs_cb_id.cbid_fn == reply_out_callback); LASSERT(rs->rs_cb_id.cbid_arg == rs); /* There may be no rq_export during failover */ if (unlikely(req->rq_export && req->rq_export->exp_obd && req->rq_export->exp_obd->obd_fail)) { /* Failed obd's only send ENODEV */ req->rq_type = PTL_RPC_MSG_ERR; req->rq_status = -ENODEV; CDEBUG(D_HA, "sending ENODEV from failed obd %d\n", req->rq_export->exp_obd->obd_minor); } /* In order to keep interoperability with the client (< 2.3) which * doesn't have pb_jobid in ptlrpc_body, We have to shrink the * ptlrpc_body in reply buffer to ptlrpc_body_v2, otherwise, the * reply buffer on client will be overflow. * * XXX Remove this whenever we drop the interoperability with * such client. */ req->rq_replen = lustre_shrink_msg(req->rq_repmsg, 0, sizeof(struct ptlrpc_body_v2), 1); if (req->rq_type != PTL_RPC_MSG_ERR) req->rq_type = PTL_RPC_MSG_REPLY; lustre_msg_set_type(req->rq_repmsg, req->rq_type); lustre_msg_set_status(req->rq_repmsg, ptlrpc_status_hton(req->rq_status)); lustre_msg_set_opc(req->rq_repmsg, req->rq_reqmsg ? lustre_msg_get_opc(req->rq_reqmsg) : 0); target_pack_pool_reply(req); ptlrpc_at_set_reply(req, flags); if (req->rq_export == NULL || req->rq_export->exp_connection == NULL) conn = ptlrpc_connection_get(req->rq_peer, req->rq_self, NULL); else conn = ptlrpc_connection_addref(req->rq_export->exp_connection); if (unlikely(conn == NULL)) { CERROR("not replying on NULL connection\n"); /* bug 9635 */ return -ENOTCONN; } ptlrpc_rs_addref(rs); /* +1 ref for the network */ rc = sptlrpc_svc_wrap_reply(req); if (unlikely(rc)) goto out; req->rq_sent = ktime_get_real_seconds(); rc = ptl_send_buf(&rs->rs_md_h, rs->rs_repbuf, rs->rs_repdata_len, (rs->rs_difficult && !rs->rs_no_ack) ? LNET_ACK_REQ : LNET_NOACK_REQ, &rs->rs_cb_id, conn, ptlrpc_req2svc(req)->srv_rep_portal, req->rq_xid, req->rq_reply_off); out: if (unlikely(rc != 0)) ptlrpc_req_drop_rs(req); ptlrpc_connection_put(conn); return rc; }
/* TODO: handle requests in a similar way as MDT: see mdt_handle_common() */ static int ldlm_callback_handler(struct ptlrpc_request *req) { struct ldlm_namespace *ns; struct ldlm_request *dlm_req; struct ldlm_lock *lock; int rc; /* Requests arrive in sender's byte order. The ptlrpc service * handler has already checked and, if necessary, byte-swapped the * incoming request message body, but I am responsible for the * message buffers. */ /* do nothing for sec context finalize */ if (lustre_msg_get_opc(req->rq_reqmsg) == SEC_CTX_FINI) return 0; req_capsule_init(&req->rq_pill, req, RCL_SERVER); if (req->rq_export == NULL) { rc = ldlm_callback_reply(req, -ENOTCONN); ldlm_callback_errmsg(req, "Operate on unconnected server", rc, NULL); return 0; } LASSERT(req->rq_export != NULL); LASSERT(req->rq_export->exp_obd != NULL); switch (lustre_msg_get_opc(req->rq_reqmsg)) { case LDLM_BL_CALLBACK: if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_BL_CALLBACK_NET)) return 0; break; case LDLM_CP_CALLBACK: if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CP_CALLBACK_NET)) return 0; break; case LDLM_GL_CALLBACK: if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_GL_CALLBACK_NET)) return 0; break; case LDLM_SET_INFO: rc = ldlm_handle_setinfo(req); ldlm_callback_reply(req, rc); return 0; case OBD_LOG_CANCEL: /* remove this eventually - for 1.4.0 compat */ CERROR("shouldn't be handling OBD_LOG_CANCEL on DLM thread\n"); req_capsule_set(&req->rq_pill, &RQF_LOG_CANCEL); if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOG_CANCEL_NET)) return 0; rc = llog_origin_handle_cancel(req); if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOG_CANCEL_REP)) return 0; ldlm_callback_reply(req, rc); return 0; case LLOG_ORIGIN_HANDLE_CREATE: req_capsule_set(&req->rq_pill, &RQF_LLOG_ORIGIN_HANDLE_CREATE); if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET)) return 0; rc = llog_origin_handle_open(req); ldlm_callback_reply(req, rc); return 0; case LLOG_ORIGIN_HANDLE_NEXT_BLOCK: req_capsule_set(&req->rq_pill, &RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK); if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET)) return 0; rc = llog_origin_handle_next_block(req); ldlm_callback_reply(req, rc); return 0; case LLOG_ORIGIN_HANDLE_READ_HEADER: req_capsule_set(&req->rq_pill, &RQF_LLOG_ORIGIN_HANDLE_READ_HEADER); if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET)) return 0; rc = llog_origin_handle_read_header(req); ldlm_callback_reply(req, rc); return 0; case LLOG_ORIGIN_HANDLE_CLOSE: if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET)) return 0; rc = llog_origin_handle_close(req); ldlm_callback_reply(req, rc); return 0; case OBD_QC_CALLBACK: req_capsule_set(&req->rq_pill, &RQF_QC_CALLBACK); if (OBD_FAIL_CHECK(OBD_FAIL_OBD_QC_CALLBACK_NET)) return 0; rc = ldlm_handle_qc_callback(req); ldlm_callback_reply(req, rc); return 0; default: CERROR("unknown opcode %u\n", lustre_msg_get_opc(req->rq_reqmsg)); ldlm_callback_reply(req, -EPROTO); return 0; } ns = req->rq_export->exp_obd->obd_namespace; LASSERT(ns != NULL); req_capsule_set(&req->rq_pill, &RQF_LDLM_CALLBACK); dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ); if (dlm_req == NULL) { rc = ldlm_callback_reply(req, -EPROTO); ldlm_callback_errmsg(req, "Operate without parameter", rc, NULL); return 0; } /* Force a known safe race, send a cancel to the server for a lock * which the server has already started a blocking callback on. */ if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE) && lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) { rc = ldlm_cli_cancel(&dlm_req->lock_handle[0], 0); if (rc < 0) CERROR("ldlm_cli_cancel: %d\n", rc); } lock = ldlm_handle2lock_long(&dlm_req->lock_handle[0], 0); if (!lock) { CDEBUG(D_DLMTRACE, "callback on lock "LPX64" - lock " "disappeared\n", dlm_req->lock_handle[0].cookie); rc = ldlm_callback_reply(req, -EINVAL); ldlm_callback_errmsg(req, "Operate with invalid parameter", rc, &dlm_req->lock_handle[0]); return 0; } if ((lock->l_flags & LDLM_FL_FAIL_LOC) && lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE); /* Copy hints/flags (e.g. LDLM_FL_DISCARD_DATA) from AST. */ lock_res_and_lock(lock); lock->l_flags |= ldlm_flags_from_wire(dlm_req->lock_flags & LDLM_AST_FLAGS); if (lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) { /* If somebody cancels lock and cache is already dropped, * or lock is failed before cp_ast received on client, * we can tell the server we have no lock. Otherwise, we * should send cancel after dropping the cache. */ if (((lock->l_flags & LDLM_FL_CANCELING) && (lock->l_flags & LDLM_FL_BL_DONE)) || (lock->l_flags & LDLM_FL_FAILED)) { LDLM_DEBUG(lock, "callback on lock " LPX64" - lock disappeared\n", dlm_req->lock_handle[0].cookie); unlock_res_and_lock(lock); LDLM_LOCK_RELEASE(lock); rc = ldlm_callback_reply(req, -EINVAL); ldlm_callback_errmsg(req, "Operate on stale lock", rc, &dlm_req->lock_handle[0]); return 0; } /* BL_AST locks are not needed in LRU. * Let ldlm_cancel_lru() be fast. */ ldlm_lock_remove_from_lru(lock); lock->l_flags |= LDLM_FL_BL_AST; } unlock_res_and_lock(lock); /* We want the ost thread to get this reply so that it can respond * to ost requests (write cache writeback) that might be triggered * in the callback. * * But we'd also like to be able to indicate in the reply that we're * cancelling right now, because it's unused, or have an intent result * in the reply, so we might have to push the responsibility for sending * the reply down into the AST handlers, alas. */ switch (lustre_msg_get_opc(req->rq_reqmsg)) { case LDLM_BL_CALLBACK: CDEBUG(D_INODE, "blocking ast\n"); req_capsule_extend(&req->rq_pill, &RQF_LDLM_BL_CALLBACK); if (!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK)) { rc = ldlm_callback_reply(req, 0); if (req->rq_no_reply || rc) ldlm_callback_errmsg(req, "Normal process", rc, &dlm_req->lock_handle[0]); } if (ldlm_bl_to_thread_lock(ns, &dlm_req->lock_desc, lock)) ldlm_handle_bl_callback(ns, &dlm_req->lock_desc, lock); break; case LDLM_CP_CALLBACK: CDEBUG(D_INODE, "completion ast\n"); req_capsule_extend(&req->rq_pill, &RQF_LDLM_CP_CALLBACK); ldlm_callback_reply(req, 0); ldlm_handle_cp_callback(req, ns, dlm_req, lock); break; case LDLM_GL_CALLBACK: CDEBUG(D_INODE, "glimpse ast\n"); req_capsule_extend(&req->rq_pill, &RQF_LDLM_GL_CALLBACK); ldlm_handle_gl_callback(req, ns, dlm_req, lock); break; default: LBUG(); /* checked above */ } return 0; }
/* TODO: handle requests in a similar way as MDT: see mdt_handle_common() */ int mgs_handle(struct ptlrpc_request *req) { int fail = OBD_FAIL_MGS_ALL_REPLY_NET; int opc, rc = 0; ENTRY; req_capsule_init(&req->rq_pill, req, RCL_SERVER); CFS_FAIL_TIMEOUT_MS(OBD_FAIL_MGS_PAUSE_REQ, cfs_fail_val); if (CFS_FAIL_CHECK(OBD_FAIL_MGS_ALL_REQUEST_NET)) RETURN(0); LASSERT(current->journal_info == NULL); opc = lustre_msg_get_opc(req->rq_reqmsg); if (opc == SEC_CTX_INIT || opc == SEC_CTX_INIT_CONT || opc == SEC_CTX_FINI) GOTO(out, rc = 0); if (opc != MGS_CONNECT) { if (!class_connected_export(req->rq_export)) { DEBUG_REQ(D_MGS, req, "operation on unconnected MGS\n"); req->rq_status = -ENOTCONN; GOTO(out, rc = -ENOTCONN); } } switch (opc) { case MGS_CONNECT: DEBUG_REQ(D_MGS, req, "connect"); /* MGS and MDS have same request format for connect */ req_capsule_set(&req->rq_pill, &RQF_MDS_CONNECT); rc = target_handle_connect(req); if (rc == 0) rc = mgs_connect_check_sptlrpc(req); if (!rc && (lustre_msg_get_conn_cnt(req->rq_reqmsg) > 1)) /* Make clients trying to reconnect after a MGS restart happy; also requires obd_replayable */ lustre_msg_add_op_flags(req->rq_repmsg, MSG_CONNECT_RECONNECT); break; case MGS_DISCONNECT: DEBUG_REQ(D_MGS, req, "disconnect"); /* MGS and MDS have same request format for disconnect */ req_capsule_set(&req->rq_pill, &RQF_MDS_DISCONNECT); rc = target_handle_disconnect(req); req->rq_status = rc; /* superfluous? */ break; case MGS_EXCEPTION: DEBUG_REQ(D_MGS, req, "exception"); rc = mgs_handle_exception(req); break; case MGS_TARGET_REG: DEBUG_REQ(D_MGS, req, "target add"); req_capsule_set(&req->rq_pill, &RQF_MGS_TARGET_REG); rc = mgs_handle_target_reg(req); break; case MGS_TARGET_DEL: DEBUG_REQ(D_MGS, req, "target del"); rc = mgs_handle_target_del(req); break; case MGS_SET_INFO: DEBUG_REQ(D_MGS, req, "set_info"); req_capsule_set(&req->rq_pill, &RQF_MGS_SET_INFO); rc = mgs_set_info_rpc(req); break; case MGS_CONFIG_READ: DEBUG_REQ(D_MGS, req, "read config"); req_capsule_set(&req->rq_pill, &RQF_MGS_CONFIG_READ); rc = mgs_config_read(req); break; case LDLM_ENQUEUE: DEBUG_REQ(D_MGS, req, "enqueue"); req_capsule_set(&req->rq_pill, &RQF_LDLM_ENQUEUE); rc = ldlm_handle_enqueue(req, ldlm_server_completion_ast, ldlm_server_blocking_ast, NULL); break; case LDLM_BL_CALLBACK: case LDLM_CP_CALLBACK: DEBUG_REQ(D_MGS, req, "callback"); CERROR("callbacks should not happen on MGS\n"); LBUG(); break; case OBD_PING: DEBUG_REQ(D_INFO, req, "ping"); req_capsule_set(&req->rq_pill, &RQF_OBD_PING); rc = target_handle_ping(req); break; case OBD_LOG_CANCEL: DEBUG_REQ(D_MGS, req, "log cancel"); rc = -ENOTSUPP; /* la la la */ break; case LLOG_ORIGIN_HANDLE_CREATE: DEBUG_REQ(D_MGS, req, "llog_init"); req_capsule_set(&req->rq_pill, &RQF_LLOG_ORIGIN_HANDLE_CREATE); rc = llog_origin_handle_create(req); if (rc == 0) (void)mgs_handle_fslog_hack(req); break; case LLOG_ORIGIN_HANDLE_NEXT_BLOCK: DEBUG_REQ(D_MGS, req, "llog next block"); req_capsule_set(&req->rq_pill, &RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK); rc = llog_origin_handle_next_block(req); break; case LLOG_ORIGIN_HANDLE_READ_HEADER: DEBUG_REQ(D_MGS, req, "llog read header"); req_capsule_set(&req->rq_pill, &RQF_LLOG_ORIGIN_HANDLE_READ_HEADER); rc = llog_origin_handle_read_header(req); break; case LLOG_ORIGIN_HANDLE_CLOSE: DEBUG_REQ(D_MGS, req, "llog close"); rc = llog_origin_handle_close(req); break; case LLOG_CATINFO: DEBUG_REQ(D_MGS, req, "llog catinfo"); req_capsule_set(&req->rq_pill, &RQF_LLOG_CATINFO); rc = llog_catinfo(req); break; default: req->rq_status = -ENOTSUPP; rc = ptlrpc_error(req); RETURN(rc); } LASSERT(current->journal_info == NULL); if (rc) CERROR("MGS handle cmd=%d rc=%d\n", opc, rc); out: target_send_reply(req, rc, fail); RETURN(0); }
int mdt_handle_idmap(struct mdt_thread_info *info) { struct ptlrpc_request *req = mdt_info_req(info); struct mdt_device *mdt = info->mti_mdt; struct mdt_export_data *med; struct ptlrpc_user_desc *pud = req->rq_user_desc; struct md_identity *identity; __u32 opc; int rc = 0; ENTRY; if (!req->rq_export) RETURN(0); med = mdt_req2med(req); if (!exp_connect_rmtclient(info->mti_exp)) RETURN(0); opc = lustre_msg_get_opc(req->rq_reqmsg); /* Bypass other opc */ if ((opc != SEC_CTX_INIT) && (opc != SEC_CTX_INIT_CONT) && (opc != SEC_CTX_FINI) && (opc != MDS_CONNECT)) RETURN(0); LASSERT(med->med_idmap); if (unlikely(!pud)) { CDEBUG(D_SEC, "remote client must run with rq_user_desc " "present\n"); RETURN(-EACCES); } if (req->rq_auth_mapped_uid == INVALID_UID) { CDEBUG(D_SEC, "invalid authorized mapped uid, please check " "/etc/lustre/idmap.conf!\n"); RETURN(-EACCES); } if (is_identity_get_disabled(mdt->mdt_identity_cache)) { CDEBUG(D_SEC, "remote client must run with identity_get " "enabled!\n"); RETURN(-EACCES); } identity = mdt_identity_get(mdt->mdt_identity_cache, req->rq_auth_mapped_uid); if (IS_ERR(identity)) { CDEBUG(D_SEC, "can't get mdt identity(%u), no mapping added\n", req->rq_auth_mapped_uid); RETURN(-EACCES); } switch (opc) { case SEC_CTX_INIT: case SEC_CTX_INIT_CONT: case MDS_CONNECT: rc = lustre_idmap_add(med->med_idmap, pud->pud_uid, identity->mi_uid, pud->pud_gid, identity->mi_gid); break; case SEC_CTX_FINI: rc = lustre_idmap_del(med->med_idmap, pud->pud_uid, identity->mi_uid, pud->pud_gid, identity->mi_gid); break; } mdt_identity_put(mdt->mdt_identity_cache, identity); if (rc) RETURN(rc); switch (opc) { case SEC_CTX_INIT: case SEC_CTX_INIT_CONT: case SEC_CTX_FINI: mdt_revoke_export_locks(req->rq_export); break; } RETURN(0); }