/** * Send request reply from request \a req reply buffer. * \a flags defines reply types * Returns 0 on success or error code */ int ptlrpc_send_reply(struct ptlrpc_request *req, int flags) { struct ptlrpc_reply_state *rs = req->rq_reply_state; struct ptlrpc_connection *conn; int rc; /* We must already have a reply buffer (only ptlrpc_error() may be * called without one). The reply generated by sptlrpc layer (e.g. * error notify, etc.) might have NULL rq->reqmsg; Otherwise we must * have a request buffer which is either the actual (swabbed) incoming * request, or a saved copy if this is a req saved in * target_queue_final_reply(). */ LASSERT(req->rq_no_reply == 0); LASSERT(req->rq_reqbuf != NULL); LASSERT(rs != NULL); LASSERT((flags & PTLRPC_REPLY_MAYBE_DIFFICULT) || !rs->rs_difficult); LASSERT(req->rq_repmsg != NULL); LASSERT(req->rq_repmsg == rs->rs_msg); LASSERT(rs->rs_cb_id.cbid_fn == reply_out_callback); LASSERT(rs->rs_cb_id.cbid_arg == rs); /* There may be no rq_export during failover */ if (unlikely(req->rq_export && req->rq_export->exp_obd && req->rq_export->exp_obd->obd_fail)) { /* Failed obd's only send ENODEV */ req->rq_type = PTL_RPC_MSG_ERR; req->rq_status = -ENODEV; CDEBUG(D_HA, "sending ENODEV from failed obd %d\n", req->rq_export->exp_obd->obd_minor); } /* In order to keep interoperability with the client (< 2.3) which * doesn't have pb_jobid in ptlrpc_body, We have to shrink the * ptlrpc_body in reply buffer to ptlrpc_body_v2, otherwise, the * reply buffer on client will be overflow. * * XXX Remove this whenever we drop the interoperability with * such client. */ req->rq_replen = lustre_shrink_msg(req->rq_repmsg, 0, sizeof(struct ptlrpc_body_v2), 1); if (req->rq_type != PTL_RPC_MSG_ERR) req->rq_type = PTL_RPC_MSG_REPLY; lustre_msg_set_type(req->rq_repmsg, req->rq_type); lustre_msg_set_status(req->rq_repmsg, ptlrpc_status_hton(req->rq_status)); lustre_msg_set_opc(req->rq_repmsg, req->rq_reqmsg ? lustre_msg_get_opc(req->rq_reqmsg) : 0); target_pack_pool_reply(req); ptlrpc_at_set_reply(req, flags); if (req->rq_export == NULL || req->rq_export->exp_connection == NULL) conn = ptlrpc_connection_get(req->rq_peer, req->rq_self, NULL); else conn = ptlrpc_connection_addref(req->rq_export->exp_connection); if (unlikely(conn == NULL)) { CERROR("not replying on NULL connection\n"); /* bug 9635 */ return -ENOTCONN; } ptlrpc_rs_addref(rs); /* +1 ref for the network */ rc = sptlrpc_svc_wrap_reply(req); if (unlikely(rc)) goto out; req->rq_sent = ktime_get_real_seconds(); rc = ptl_send_buf(&rs->rs_md_h, rs->rs_repbuf, rs->rs_repdata_len, (rs->rs_difficult && !rs->rs_no_ack) ? LNET_ACK_REQ : LNET_NOACK_REQ, &rs->rs_cb_id, conn, ptlrpc_req2svc(req)->srv_rep_portal, req->rq_xid, req->rq_reply_off); out: if (unlikely(rc != 0)) ptlrpc_req_drop_rs(req); ptlrpc_connection_put(conn); return rc; }
static int ctx_init_pack_request(struct obd_import *imp, struct ptlrpc_request *req, int lustre_srv, uid_t uid, gid_t gid, long token_size, char __user *token) { struct lustre_msg *msg = req->rq_reqbuf; struct gss_sec *gsec; struct gss_header *ghdr; struct ptlrpc_user_desc *pud; __u32 *p, size, offset = 2; rawobj_t obj; LASSERT(msg->lm_bufcount <= 4); LASSERT(req->rq_cli_ctx); LASSERT(req->rq_cli_ctx->cc_sec); /* gss hdr */ ghdr = lustre_msg_buf(msg, 0, sizeof(*ghdr)); ghdr->gh_version = PTLRPC_GSS_VERSION; ghdr->gh_sp = (__u8) imp->imp_sec->ps_part; ghdr->gh_flags = 0; ghdr->gh_proc = PTLRPC_GSS_PROC_INIT; ghdr->gh_seq = 0; ghdr->gh_svc = SPTLRPC_SVC_NULL; ghdr->gh_handle.len = 0; /* fix the user desc */ if (req->rq_pack_udesc) { ghdr->gh_flags |= LUSTRE_GSS_PACK_USER; pud = lustre_msg_buf(msg, offset, sizeof(*pud)); LASSERT(pud); pud->pud_uid = pud->pud_fsuid = uid; pud->pud_gid = pud->pud_fsgid = gid; pud->pud_cap = 0; pud->pud_ngroups = 0; offset++; } /* security payload */ p = lustre_msg_buf(msg, offset, 0); size = msg->lm_buflens[offset]; LASSERT(p); /* 1. lustre svc type */ LASSERT(size > 4); *p++ = cpu_to_le32(lustre_srv); size -= 4; /* 2. target uuid */ obj.len = strlen(imp->imp_obd->u.cli.cl_target_uuid.uuid) + 1; obj.data = imp->imp_obd->u.cli.cl_target_uuid.uuid; if (rawobj_serialize(&obj, &p, &size)) LBUG(); /* 3. reverse context handle. actually only needed by root user, * but we send it anyway. */ gsec = sec2gsec(req->rq_cli_ctx->cc_sec); obj.len = sizeof(gsec->gs_rvs_hdl); obj.data = (__u8 *) &gsec->gs_rvs_hdl; if (rawobj_serialize(&obj, &p, &size)) LBUG(); /* 4. now the token */ LASSERT(size >= (sizeof(__u32) + token_size)); *p++ = cpu_to_le32(((__u32) token_size)); if (copy_from_user(p, token, token_size)) { CERROR("can't copy token\n"); return -EFAULT; } size -= sizeof(__u32) + cfs_size_round4(token_size); req->rq_reqdata_len = lustre_shrink_msg(req->rq_reqbuf, offset, msg->lm_buflens[offset] - size, 0); return 0; }