int bulk_sec_desc_unpack(struct lustre_msg *msg, int offset, int swabbed) { struct ptlrpc_bulk_sec_desc *bsd; int size = msg->lm_buflens[offset]; bsd = lustre_msg_buf(msg, offset, sizeof(*bsd)); if (!bsd) { CERROR("Invalid bulk sec desc: size %d\n", size); return -EINVAL; } if (swabbed) __swab32s(&bsd->bsd_nob); if (unlikely(bsd->bsd_version != 0)) { CERROR("Unexpected version %u\n", bsd->bsd_version); return -EPROTO; } if (unlikely(bsd->bsd_type >= SPTLRPC_BULK_MAX)) { CERROR("Invalid type %u\n", bsd->bsd_type); return -EPROTO; } /* FIXME more sanity check here */ if (unlikely(bsd->bsd_svc != SPTLRPC_BULK_SVC_NULL && bsd->bsd_svc != SPTLRPC_BULK_SVC_INTG && bsd->bsd_svc != SPTLRPC_BULK_SVC_PRIV)) { CERROR("Invalid svc %u\n", bsd->bsd_svc); return -EPROTO; } return 0; }
int gss_cli_ctx_wrap_bulk(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req, struct ptlrpc_bulk_desc *desc) { struct gss_cli_ctx *gctx; struct lustre_msg *msg; struct ptlrpc_bulk_sec_desc *bsd; rawobj_t token; __u32 maj; int offset; int rc; ENTRY; LASSERT(req->rq_pack_bulk); LASSERT(req->rq_bulk_read || req->rq_bulk_write); LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type)); gctx = container_of(ctx, struct gss_cli_ctx, gc_base); LASSERT(gctx->gc_mechctx); switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) { case SPTLRPC_SVC_NULL: LASSERT(req->rq_reqbuf->lm_bufcount >= 3); msg = req->rq_reqbuf; offset = msg->lm_bufcount - 1; break; case SPTLRPC_SVC_AUTH: case SPTLRPC_SVC_INTG: LASSERT(req->rq_reqbuf->lm_bufcount >= 4); msg = req->rq_reqbuf; offset = msg->lm_bufcount - 2; break; case SPTLRPC_SVC_PRIV: LASSERT(req->rq_clrbuf->lm_bufcount >= 2); msg = req->rq_clrbuf; offset = msg->lm_bufcount - 1; break; default: LBUG(); } bsd = lustre_msg_buf(msg, offset, sizeof(*bsd)); bsd->bsd_version = 0; bsd->bsd_flags = 0; bsd->bsd_type = SPTLRPC_BULK_DEFAULT; bsd->bsd_svc = SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc); if (bsd->bsd_svc == SPTLRPC_BULK_SVC_NULL) RETURN(0); LASSERT(bsd->bsd_svc == SPTLRPC_BULK_SVC_INTG || bsd->bsd_svc == SPTLRPC_BULK_SVC_PRIV); if (req->rq_bulk_read) { /* * bulk read: prepare receiving pages only for privacy mode. */ if (bsd->bsd_svc == SPTLRPC_BULK_SVC_PRIV) return gss_cli_prep_bulk(req, desc); } else { /* * bulk write: sign or encrypt bulk pages. */ bsd->bsd_nob = desc->bd_nob; if (bsd->bsd_svc == SPTLRPC_BULK_SVC_INTG) { /* integrity mode */ token.data = bsd->bsd_data; token.len = lustre_msg_buflen(msg, offset) - sizeof(*bsd); maj = lgss_get_mic(gctx->gc_mechctx, 0, NULL, desc->bd_iov_count, GET_KIOV(desc), &token); if (maj != GSS_S_COMPLETE) { CWARN("failed to sign bulk data: %x\n", maj); RETURN(-EACCES); } } else { /* privacy mode */ if (desc->bd_iov_count == 0) RETURN(0); rc = sptlrpc_enc_pool_get_pages(desc); if (rc) { CERROR("bulk write: failed to allocate " "encryption pages: %d\n", rc); RETURN(rc); } token.data = bsd->bsd_data; token.len = lustre_msg_buflen(msg, offset) - sizeof(*bsd); maj = lgss_wrap_bulk(gctx->gc_mechctx, desc, &token, 0); if (maj != GSS_S_COMPLETE) { CWARN("fail to encrypt bulk data: %x\n", maj); RETURN(-EACCES); } } } RETURN(0); }
int gss_cli_ctx_unwrap_bulk(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req, struct ptlrpc_bulk_desc *desc) { struct gss_cli_ctx *gctx; struct lustre_msg *rmsg, *vmsg; struct ptlrpc_bulk_sec_desc *bsdr, *bsdv; rawobj_t token; __u32 maj; int roff, voff; ENTRY; LASSERT(req->rq_pack_bulk); LASSERT(req->rq_bulk_read || req->rq_bulk_write); LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type)); switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) { case SPTLRPC_SVC_NULL: vmsg = req->rq_repdata; LASSERT(vmsg != NULL && vmsg->lm_bufcount >= 3); voff = vmsg->lm_bufcount - 1; rmsg = req->rq_reqbuf; LASSERT(rmsg != NULL && rmsg->lm_bufcount >= 3); roff = rmsg->lm_bufcount - 1; /* last segment */ break; case SPTLRPC_SVC_AUTH: case SPTLRPC_SVC_INTG: vmsg = req->rq_repdata; LASSERT(vmsg != NULL && vmsg->lm_bufcount >= 4); voff = vmsg->lm_bufcount - 2; rmsg = req->rq_reqbuf; LASSERT(rmsg != NULL && rmsg->lm_bufcount >= 4); roff = rmsg->lm_bufcount - 2; /* second last segment */ break; case SPTLRPC_SVC_PRIV: vmsg = req->rq_repdata; LASSERT(vmsg != NULL && vmsg->lm_bufcount >= 2); voff = vmsg->lm_bufcount - 1; rmsg = req->rq_clrbuf; LASSERT(rmsg != NULL && rmsg->lm_bufcount >= 2); roff = rmsg->lm_bufcount - 1; /* last segment */ break; default: LBUG(); } bsdr = lustre_msg_buf(rmsg, roff, sizeof(*bsdr)); bsdv = lustre_msg_buf(vmsg, voff, sizeof(*bsdv)); LASSERT(bsdr && bsdv); if (bsdr->bsd_version != bsdv->bsd_version || bsdr->bsd_type != bsdv->bsd_type || bsdr->bsd_svc != bsdv->bsd_svc) { CERROR("bulk security descriptor mismatch: " "(%u,%u,%u) != (%u,%u,%u)\n", bsdr->bsd_version, bsdr->bsd_type, bsdr->bsd_svc, bsdv->bsd_version, bsdv->bsd_type, bsdv->bsd_svc); RETURN(-EPROTO); } LASSERT(bsdv->bsd_svc == SPTLRPC_BULK_SVC_NULL || bsdv->bsd_svc == SPTLRPC_BULK_SVC_INTG || bsdv->bsd_svc == SPTLRPC_BULK_SVC_PRIV); /* * in privacy mode if return success, make sure bd_nob_transferred * is the actual size of the clear text, otherwise upper layer * may be surprised. */ if (req->rq_bulk_write) { if (bsdv->bsd_flags & BSD_FL_ERR) { CERROR("server reported bulk i/o failure\n"); RETURN(-EIO); } if (bsdv->bsd_svc == SPTLRPC_BULK_SVC_PRIV) desc->bd_nob_transferred = desc->bd_nob; } else { /* * bulk read, upon return success, bd_nob_transferred is * the size of plain text actually received. */ gctx = container_of(ctx, struct gss_cli_ctx, gc_base); LASSERT(gctx->gc_mechctx); if (bsdv->bsd_svc == SPTLRPC_BULK_SVC_INTG) { int i, nob; /* fix the actual data size */ for (i = 0, nob = 0; i < desc->bd_iov_count; i++) { if (BD_GET_KIOV(desc, i).kiov_len + nob > desc->bd_nob_transferred) { BD_GET_KIOV(desc, i).kiov_len = desc->bd_nob_transferred - nob; } nob += BD_GET_KIOV(desc, i).kiov_len; } token.data = bsdv->bsd_data; token.len = lustre_msg_buflen(vmsg, voff) - sizeof(*bsdv); maj = lgss_verify_mic(gctx->gc_mechctx, 0, NULL, desc->bd_iov_count, GET_KIOV(desc), &token); if (maj != GSS_S_COMPLETE) { CERROR("failed to verify bulk read: %x\n", maj); RETURN(-EACCES); } } else if (bsdv->bsd_svc == SPTLRPC_BULK_SVC_PRIV) { desc->bd_nob = bsdv->bsd_nob; if (desc->bd_nob == 0) RETURN(0); token.data = bsdv->bsd_data; token.len = lustre_msg_buflen(vmsg, voff) - sizeof(*bsdr); maj = lgss_unwrap_bulk(gctx->gc_mechctx, desc, &token, 1); if (maj != GSS_S_COMPLETE) { CERROR("failed to decrypt bulk read: %x\n", maj); RETURN(-EACCES); } desc->bd_nob_transferred = desc->bd_nob; } } RETURN(0); }
static int ctx_init_pack_request(struct obd_import *imp, struct ptlrpc_request *req, int lustre_srv, uid_t uid, gid_t gid, long token_size, char __user *token) { struct lustre_msg *msg = req->rq_reqbuf; struct gss_sec *gsec; struct gss_header *ghdr; struct ptlrpc_user_desc *pud; __u32 *p, size, offset = 2; rawobj_t obj; LASSERT(msg->lm_bufcount <= 4); LASSERT(req->rq_cli_ctx); LASSERT(req->rq_cli_ctx->cc_sec); /* gss hdr */ ghdr = lustre_msg_buf(msg, 0, sizeof(*ghdr)); ghdr->gh_version = PTLRPC_GSS_VERSION; ghdr->gh_sp = (__u8) imp->imp_sec->ps_part; ghdr->gh_flags = 0; ghdr->gh_proc = PTLRPC_GSS_PROC_INIT; ghdr->gh_seq = 0; ghdr->gh_svc = SPTLRPC_SVC_NULL; ghdr->gh_handle.len = 0; /* fix the user desc */ if (req->rq_pack_udesc) { ghdr->gh_flags |= LUSTRE_GSS_PACK_USER; pud = lustre_msg_buf(msg, offset, sizeof(*pud)); LASSERT(pud); pud->pud_uid = pud->pud_fsuid = uid; pud->pud_gid = pud->pud_fsgid = gid; pud->pud_cap = 0; pud->pud_ngroups = 0; offset++; } /* security payload */ p = lustre_msg_buf(msg, offset, 0); size = msg->lm_buflens[offset]; LASSERT(p); /* 1. lustre svc type */ LASSERT(size > 4); *p++ = cpu_to_le32(lustre_srv); size -= 4; /* 2. target uuid */ obj.len = strlen(imp->imp_obd->u.cli.cl_target_uuid.uuid) + 1; obj.data = imp->imp_obd->u.cli.cl_target_uuid.uuid; if (rawobj_serialize(&obj, &p, &size)) LBUG(); /* 3. reverse context handle. actually only needed by root user, * but we send it anyway. */ gsec = sec2gsec(req->rq_cli_ctx->cc_sec); obj.len = sizeof(gsec->gs_rvs_hdl); obj.data = (__u8 *) &gsec->gs_rvs_hdl; if (rawobj_serialize(&obj, &p, &size)) LBUG(); /* 4. now the token */ LASSERT(size >= (sizeof(__u32) + token_size)); *p++ = cpu_to_le32(((__u32) token_size)); if (copy_from_user(p, token, token_size)) { CERROR("can't copy token\n"); return -EFAULT; } size -= sizeof(__u32) + cfs_size_round4(token_size); req->rq_reqdata_len = lustre_shrink_msg(req->rq_reqbuf, offset, msg->lm_buflens[offset] - size, 0); return 0; }
int gss_do_ctx_fini_rpc(struct gss_cli_ctx *gctx) { struct ptlrpc_cli_ctx *ctx = &gctx->gc_base; struct obd_import *imp = ctx->cc_sec->ps_import; struct ptlrpc_request *req; struct ptlrpc_user_desc *pud; int rc; ENTRY; LASSERT(atomic_read(&ctx->cc_refcount) > 0); if (cli_ctx_is_error(ctx) || !cli_ctx_is_uptodate(ctx)) { CDEBUG(D_SEC, "ctx %p(%u->%s) not uptodate, " "don't send destroy rpc\n", ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec)); RETURN(0); } might_sleep(); CWARN("%s ctx %p idx "LPX64" (%u->%s)\n", sec_is_reverse(ctx->cc_sec) ? "server finishing reverse" : "client finishing forward", ctx, gss_handle_to_u64(&gctx->gc_handle), ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec)); gctx->gc_proc = PTLRPC_GSS_PROC_DESTROY; req = ptlrpc_request_alloc(imp, &RQF_SEC_CTX); if (req == NULL) { CWARN("ctx %p(%u): fail to prepare rpc, destroy locally\n", ctx, ctx->cc_vcred.vc_uid); GOTO(out, rc = -ENOMEM); } rc = ptlrpc_request_bufs_pack(req, LUSTRE_OBD_VERSION, SEC_CTX_FINI, NULL, ctx); if (rc) { ptlrpc_request_free(req); GOTO(out_ref, rc); } /* fix the user desc */ if (req->rq_pack_udesc) { /* we rely the fact that this request is in AUTH mode, * and user_desc at offset 2. */ pud = lustre_msg_buf(req->rq_reqbuf, 2, sizeof(*pud)); LASSERT(pud); pud->pud_uid = pud->pud_fsuid = ctx->cc_vcred.vc_uid; pud->pud_gid = pud->pud_fsgid = ctx->cc_vcred.vc_gid; pud->pud_cap = 0; pud->pud_ngroups = 0; } req->rq_phase = RQ_PHASE_RPC; rc = ptl_send_rpc(req, 1); if (rc) CWARN("ctx %p(%u->%s): rpc error %d, destroy locally\n", ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec), rc); out_ref: ptlrpc_req_finished(req); out: RETURN(rc); }
static int ctx_init_parse_reply(struct lustre_msg *msg, int swabbed, char __user *outbuf, long outlen) { struct gss_rep_header *ghdr; __u32 obj_len, round_len; __u32 status, effective = 0; if (msg->lm_bufcount != 3) { CERROR("unexpected bufcount %u\n", msg->lm_bufcount); return -EPROTO; } ghdr = (struct gss_rep_header *) gss_swab_header(msg, 0, swabbed); if (ghdr == NULL) { CERROR("unable to extract gss reply header\n"); return -EPROTO; } if (ghdr->gh_version != PTLRPC_GSS_VERSION) { CERROR("invalid gss version %u\n", ghdr->gh_version); return -EPROTO; } if (outlen < (4 + 2) * 4 + cfs_size_round4(ghdr->gh_handle.len) + cfs_size_round4(msg->lm_buflens[2])) { CERROR("output buffer size %ld too small\n", outlen); return -EFAULT; } status = 0; effective = 0; if (copy_to_user(outbuf, &status, 4)) return -EFAULT; outbuf += 4; if (copy_to_user(outbuf, &ghdr->gh_major, 4)) return -EFAULT; outbuf += 4; if (copy_to_user(outbuf, &ghdr->gh_minor, 4)) return -EFAULT; outbuf += 4; if (copy_to_user(outbuf, &ghdr->gh_seqwin, 4)) return -EFAULT; outbuf += 4; effective += 4 * 4; /* handle */ obj_len = ghdr->gh_handle.len; round_len = (obj_len + 3) & ~3; if (copy_to_user(outbuf, &obj_len, 4)) return -EFAULT; outbuf += 4; if (copy_to_user(outbuf, (char *) ghdr->gh_handle.data, round_len)) return -EFAULT; outbuf += round_len; effective += 4 + round_len; /* out token */ obj_len = msg->lm_buflens[2]; round_len = (obj_len + 3) & ~3; if (copy_to_user(outbuf, &obj_len, 4)) return -EFAULT; outbuf += 4; if (copy_to_user(outbuf, lustre_msg_buf(msg, 2, 0), round_len)) return -EFAULT; outbuf += round_len; effective += 4 + round_len; return effective; }