void ptlrpc_fill_bulk_md(lnet_md_t *md, struct ptlrpc_bulk_desc *desc, int mdidx) { int offset = mdidx * LNET_MAX_IOV; CLASSERT(PTLRPC_MAX_BRW_PAGES < LI_POISON); LASSERT(mdidx < desc->bd_md_max_brw); LASSERT(desc->bd_iov_count <= PTLRPC_MAX_BRW_PAGES); LASSERT(!(md->options & (LNET_MD_IOVEC | LNET_MD_KIOV | LNET_MD_PHYS))); md->length = max(0, desc->bd_iov_count - mdidx * LNET_MAX_IOV); md->length = min_t(unsigned int, LNET_MAX_IOV, md->length); if (ptlrpc_is_bulk_desc_kiov(desc->bd_type)) { md->options |= LNET_MD_KIOV; if (GET_ENC_KIOV(desc)) md->start = &BD_GET_ENC_KIOV(desc, offset); else md->start = &BD_GET_KIOV(desc, offset); } else { md->options |= LNET_MD_IOVEC; if (GET_ENC_KVEC(desc)) md->start = &BD_GET_ENC_KVEC(desc, offset); else md->start = &BD_GET_KVEC(desc, offset); } }
int sptlrpc_get_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u8 alg, void *buf, int buflen) { struct cfs_crypto_hash_desc *hdesc; int hashsize; unsigned int bufsize; int i, err; LASSERT(alg > BULK_HASH_ALG_NULL && alg < BULK_HASH_ALG_MAX); LASSERT(buflen >= 4); hdesc = cfs_crypto_hash_init(cfs_hash_alg_id[alg], NULL, 0); if (IS_ERR(hdesc)) { CERROR("Unable to initialize checksum hash %s\n", cfs_crypto_hash_name(cfs_hash_alg_id[alg])); return PTR_ERR(hdesc); } hashsize = cfs_crypto_hash_digestsize(cfs_hash_alg_id[alg]); for (i = 0; i < desc->bd_iov_count; i++) { cfs_crypto_hash_update_page(hdesc, BD_GET_KIOV(desc, i).bv_page, BD_GET_KIOV(desc, i).bv_offset & ~PAGE_MASK, BD_GET_KIOV(desc, i).bv_len); } if (hashsize > buflen) { unsigned char hashbuf[CFS_CRYPTO_HASH_DIGESTSIZE_MAX]; bufsize = sizeof(hashbuf); LASSERTF(bufsize >= hashsize, "bufsize = %u < hashsize %u\n", bufsize, hashsize); err = cfs_crypto_hash_final(hdesc, hashbuf, &bufsize); memcpy(buf, hashbuf, buflen); } else { bufsize = buflen; err = cfs_crypto_hash_final(hdesc, buf, &bufsize); } return err; }
int gss_cli_ctx_unwrap_bulk(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req, struct ptlrpc_bulk_desc *desc) { struct gss_cli_ctx *gctx; struct lustre_msg *rmsg, *vmsg; struct ptlrpc_bulk_sec_desc *bsdr, *bsdv; rawobj_t token; __u32 maj; int roff, voff; ENTRY; LASSERT(req->rq_pack_bulk); LASSERT(req->rq_bulk_read || req->rq_bulk_write); LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type)); switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) { case SPTLRPC_SVC_NULL: vmsg = req->rq_repdata; LASSERT(vmsg != NULL && vmsg->lm_bufcount >= 3); voff = vmsg->lm_bufcount - 1; rmsg = req->rq_reqbuf; LASSERT(rmsg != NULL && rmsg->lm_bufcount >= 3); roff = rmsg->lm_bufcount - 1; /* last segment */ break; case SPTLRPC_SVC_AUTH: case SPTLRPC_SVC_INTG: vmsg = req->rq_repdata; LASSERT(vmsg != NULL && vmsg->lm_bufcount >= 4); voff = vmsg->lm_bufcount - 2; rmsg = req->rq_reqbuf; LASSERT(rmsg != NULL && rmsg->lm_bufcount >= 4); roff = rmsg->lm_bufcount - 2; /* second last segment */ break; case SPTLRPC_SVC_PRIV: vmsg = req->rq_repdata; LASSERT(vmsg != NULL && vmsg->lm_bufcount >= 2); voff = vmsg->lm_bufcount - 1; rmsg = req->rq_clrbuf; LASSERT(rmsg != NULL && rmsg->lm_bufcount >= 2); roff = rmsg->lm_bufcount - 1; /* last segment */ break; default: LBUG(); } bsdr = lustre_msg_buf(rmsg, roff, sizeof(*bsdr)); bsdv = lustre_msg_buf(vmsg, voff, sizeof(*bsdv)); LASSERT(bsdr && bsdv); if (bsdr->bsd_version != bsdv->bsd_version || bsdr->bsd_type != bsdv->bsd_type || bsdr->bsd_svc != bsdv->bsd_svc) { CERROR("bulk security descriptor mismatch: " "(%u,%u,%u) != (%u,%u,%u)\n", bsdr->bsd_version, bsdr->bsd_type, bsdr->bsd_svc, bsdv->bsd_version, bsdv->bsd_type, bsdv->bsd_svc); RETURN(-EPROTO); } LASSERT(bsdv->bsd_svc == SPTLRPC_BULK_SVC_NULL || bsdv->bsd_svc == SPTLRPC_BULK_SVC_INTG || bsdv->bsd_svc == SPTLRPC_BULK_SVC_PRIV); /* * in privacy mode if return success, make sure bd_nob_transferred * is the actual size of the clear text, otherwise upper layer * may be surprised. */ if (req->rq_bulk_write) { if (bsdv->bsd_flags & BSD_FL_ERR) { CERROR("server reported bulk i/o failure\n"); RETURN(-EIO); } if (bsdv->bsd_svc == SPTLRPC_BULK_SVC_PRIV) desc->bd_nob_transferred = desc->bd_nob; } else { /* * bulk read, upon return success, bd_nob_transferred is * the size of plain text actually received. */ gctx = container_of(ctx, struct gss_cli_ctx, gc_base); LASSERT(gctx->gc_mechctx); if (bsdv->bsd_svc == SPTLRPC_BULK_SVC_INTG) { int i, nob; /* fix the actual data size */ for (i = 0, nob = 0; i < desc->bd_iov_count; i++) { if (BD_GET_KIOV(desc, i).kiov_len + nob > desc->bd_nob_transferred) { BD_GET_KIOV(desc, i).kiov_len = desc->bd_nob_transferred - nob; } nob += BD_GET_KIOV(desc, i).kiov_len; } token.data = bsdv->bsd_data; token.len = lustre_msg_buflen(vmsg, voff) - sizeof(*bsdv); maj = lgss_verify_mic(gctx->gc_mechctx, 0, NULL, desc->bd_iov_count, GET_KIOV(desc), &token); if (maj != GSS_S_COMPLETE) { CERROR("failed to verify bulk read: %x\n", maj); RETURN(-EACCES); } } else if (bsdv->bsd_svc == SPTLRPC_BULK_SVC_PRIV) { desc->bd_nob = bsdv->bsd_nob; if (desc->bd_nob == 0) RETURN(0); token.data = bsdv->bsd_data; token.len = lustre_msg_buflen(vmsg, voff) - sizeof(*bsdr); maj = lgss_unwrap_bulk(gctx->gc_mechctx, desc, &token, 1); if (maj != GSS_S_COMPLETE) { CERROR("failed to decrypt bulk read: %x\n", maj); RETURN(-EACCES); } desc->bd_nob_transferred = desc->bd_nob; } } RETURN(0); }