Esempio n. 1
0
void ptlrpc_fill_bulk_md(lnet_md_t *md, struct ptlrpc_bulk_desc *desc,
			 int mdidx)
{
	int offset = mdidx * LNET_MAX_IOV;

	CLASSERT(PTLRPC_MAX_BRW_PAGES < LI_POISON);

	LASSERT(mdidx < desc->bd_md_max_brw);
	LASSERT(desc->bd_iov_count <= PTLRPC_MAX_BRW_PAGES);
	LASSERT(!(md->options & (LNET_MD_IOVEC | LNET_MD_KIOV |
				 LNET_MD_PHYS)));

	md->length = max(0, desc->bd_iov_count - mdidx * LNET_MAX_IOV);
	md->length = min_t(unsigned int, LNET_MAX_IOV, md->length);

	if (ptlrpc_is_bulk_desc_kiov(desc->bd_type)) {
		md->options |= LNET_MD_KIOV;
		if (GET_ENC_KIOV(desc))
			md->start = &BD_GET_ENC_KIOV(desc, offset);
		else
			md->start = &BD_GET_KIOV(desc, offset);
	} else {
		md->options |= LNET_MD_IOVEC;
		if (GET_ENC_KVEC(desc))
			md->start = &BD_GET_ENC_KVEC(desc, offset);
		else
			md->start = &BD_GET_KVEC(desc, offset);
	}
}
Esempio n. 2
0
void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc)
{
	int p_idx, g_idx;
	int i;

	LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));

	if (!GET_ENC_KIOV(desc))
		return;

	LASSERT(desc->bd_iov_count > 0);

	spin_lock(&page_pools.epp_lock);

	p_idx = page_pools.epp_free_pages / PAGES_PER_POOL;
	g_idx = page_pools.epp_free_pages % PAGES_PER_POOL;

	LASSERT(page_pools.epp_free_pages + desc->bd_iov_count <=
		page_pools.epp_total_pages);
	LASSERT(page_pools.epp_pools[p_idx]);

	for (i = 0; i < desc->bd_iov_count; i++) {
		LASSERT(BD_GET_ENC_KIOV(desc, i).bv_page);
		LASSERT(g_idx != 0 || page_pools.epp_pools[p_idx]);
		LASSERT(!page_pools.epp_pools[p_idx][g_idx]);

		page_pools.epp_pools[p_idx][g_idx] =
			BD_GET_ENC_KIOV(desc, i).bv_page;

		if (++g_idx == PAGES_PER_POOL) {
			p_idx++;
			g_idx = 0;
		}
	}

	page_pools.epp_free_pages += desc->bd_iov_count;

	enc_pools_wakeup();

	spin_unlock(&page_pools.epp_lock);

	kfree(GET_ENC_KIOV(desc));
	GET_ENC_KIOV(desc) = NULL;
}
Esempio n. 3
0
int gss_cli_ctx_wrap_bulk(struct ptlrpc_cli_ctx *ctx,
                          struct ptlrpc_request *req,
                          struct ptlrpc_bulk_desc *desc)
{
	struct gss_cli_ctx              *gctx;
	struct lustre_msg               *msg;
	struct ptlrpc_bulk_sec_desc     *bsd;
	rawobj_t                         token;
	__u32                            maj;
	int                              offset;
	int                              rc;
	ENTRY;

	LASSERT(req->rq_pack_bulk);
	LASSERT(req->rq_bulk_read || req->rq_bulk_write);
	LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));

	gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
	LASSERT(gctx->gc_mechctx);

	switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
	case SPTLRPC_SVC_NULL:
		LASSERT(req->rq_reqbuf->lm_bufcount >= 3);
		msg = req->rq_reqbuf;
		offset = msg->lm_bufcount - 1;
		break;
	case SPTLRPC_SVC_AUTH:
	case SPTLRPC_SVC_INTG:
		LASSERT(req->rq_reqbuf->lm_bufcount >= 4);
		msg = req->rq_reqbuf;
		offset = msg->lm_bufcount - 2;
		break;
	case SPTLRPC_SVC_PRIV:
		LASSERT(req->rq_clrbuf->lm_bufcount >= 2);
		msg = req->rq_clrbuf;
		offset = msg->lm_bufcount - 1;
		break;
	default:
		LBUG();
	}

	bsd = lustre_msg_buf(msg, offset, sizeof(*bsd));
	bsd->bsd_version = 0;
	bsd->bsd_flags = 0;
	bsd->bsd_type = SPTLRPC_BULK_DEFAULT;
	bsd->bsd_svc = SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc);

	if (bsd->bsd_svc == SPTLRPC_BULK_SVC_NULL)
		RETURN(0);

	LASSERT(bsd->bsd_svc == SPTLRPC_BULK_SVC_INTG ||
		bsd->bsd_svc == SPTLRPC_BULK_SVC_PRIV);

	if (req->rq_bulk_read) {
		/*
		 * bulk read: prepare receiving pages only for privacy mode.
		 */
		if (bsd->bsd_svc == SPTLRPC_BULK_SVC_PRIV)
			return gss_cli_prep_bulk(req, desc);
	} else {
		/*
		 * bulk write: sign or encrypt bulk pages.
		 */
		bsd->bsd_nob = desc->bd_nob;

		if (bsd->bsd_svc == SPTLRPC_BULK_SVC_INTG) {
			/* integrity mode */
			token.data = bsd->bsd_data;
			token.len = lustre_msg_buflen(msg, offset) -
				    sizeof(*bsd);

			maj = lgss_get_mic(gctx->gc_mechctx, 0, NULL,
					   desc->bd_iov_count,
					   GET_KIOV(desc),
					   &token);
			if (maj != GSS_S_COMPLETE) {
				CWARN("failed to sign bulk data: %x\n", maj);
				RETURN(-EACCES);
			}
		} else {
			/* privacy mode */
			if (desc->bd_iov_count == 0)
				RETURN(0);

			rc = sptlrpc_enc_pool_get_pages(desc);
			if (rc) {
				CERROR("bulk write: failed to allocate "
				       "encryption pages: %d\n", rc);
				RETURN(rc);
			}

			token.data = bsd->bsd_data;
			token.len = lustre_msg_buflen(msg, offset) -
				    sizeof(*bsd);

			maj = lgss_wrap_bulk(gctx->gc_mechctx, desc, &token, 0);
			if (maj != GSS_S_COMPLETE) {
				CWARN("fail to encrypt bulk data: %x\n", maj);
				RETURN(-EACCES);
			}
		}
	}

	RETURN(0);
}
Esempio n. 4
0
int gss_svc_wrap_bulk(struct ptlrpc_request *req,
                      struct ptlrpc_bulk_desc *desc)
{
        struct gss_svc_reqctx        *grctx;
        struct ptlrpc_bulk_sec_desc  *bsdr, *bsdv;
        rawobj_t                      token;
        __u32                         maj;
        int                           rc;
        ENTRY;

        LASSERT(req->rq_svc_ctx);
        LASSERT(req->rq_pack_bulk);
        LASSERT(req->rq_bulk_read);
	LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));

        grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);

        LASSERT(grctx->src_reqbsd);
        LASSERT(grctx->src_repbsd);
        LASSERT(grctx->src_ctx);
        LASSERT(grctx->src_ctx->gsc_mechctx);

        bsdr = grctx->src_reqbsd;
        bsdv = grctx->src_repbsd;

        /* bsdr has been sanity checked during unpacking */
        bsdv->bsd_version = 0;
        bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
        bsdv->bsd_svc = bsdr->bsd_svc;
        bsdv->bsd_flags = 0;

        switch (bsdv->bsd_svc) {
        case SPTLRPC_BULK_SVC_INTG:
                token.data = bsdv->bsd_data;
                token.len = grctx->src_repbsd_size - sizeof(*bsdv);

		maj = lgss_get_mic(grctx->src_ctx->gsc_mechctx, 0, NULL,
				   desc->bd_iov_count,
				   GET_KIOV(desc), &token);
		if (maj != GSS_S_COMPLETE) {
                        bsdv->bsd_flags |= BSD_FL_ERR;
                        CERROR("failed to sign bulk data: %x\n", maj);
                        RETURN(-EACCES);
                }
                break;
        case SPTLRPC_BULK_SVC_PRIV:
                bsdv->bsd_nob = desc->bd_nob;

                if (desc->bd_iov_count == 0) {
                        LASSERT(desc->bd_nob == 0);
                        break;
                }

                rc = sptlrpc_enc_pool_get_pages(desc);
                if (rc) {
                        bsdv->bsd_flags |= BSD_FL_ERR;
                        CERROR("bulk read: failed to allocate encryption "
                               "pages: %d\n", rc);
                        RETURN(rc);
                }

                token.data = bsdv->bsd_data;
                token.len = grctx->src_repbsd_size - sizeof(*bsdv);

                maj = lgss_wrap_bulk(grctx->src_ctx->gsc_mechctx,
                                     desc, &token, 1);
                if (maj != GSS_S_COMPLETE) {
                        bsdv->bsd_flags |= BSD_FL_ERR;
                        CERROR("failed to encrypt bulk data: %x\n", maj);
                        RETURN(-EACCES);
                }
                break;
        }

        RETURN(0);
}
Esempio n. 5
0
int gss_svc_unwrap_bulk(struct ptlrpc_request *req,
                        struct ptlrpc_bulk_desc *desc)
{
        struct gss_svc_reqctx        *grctx;
        struct ptlrpc_bulk_sec_desc  *bsdr, *bsdv;
        rawobj_t                      token;
        __u32                         maj;
        ENTRY;

        LASSERT(req->rq_svc_ctx);
        LASSERT(req->rq_pack_bulk);
        LASSERT(req->rq_bulk_write);
	LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));

        grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);

        LASSERT(grctx->src_reqbsd);
        LASSERT(grctx->src_repbsd);
        LASSERT(grctx->src_ctx);
        LASSERT(grctx->src_ctx->gsc_mechctx);

        bsdr = grctx->src_reqbsd;
        bsdv = grctx->src_repbsd;

        /* bsdr has been sanity checked during unpacking */
        bsdv->bsd_version = 0;
        bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
        bsdv->bsd_svc = bsdr->bsd_svc;
        bsdv->bsd_flags = 0;

        switch (bsdv->bsd_svc) {
        case SPTLRPC_BULK_SVC_INTG:
                token.data = bsdr->bsd_data;
                token.len = grctx->src_reqbsd_size - sizeof(*bsdr);

		maj = lgss_verify_mic(grctx->src_ctx->gsc_mechctx, 0, NULL,
				      desc->bd_iov_count,
				      GET_KIOV(desc), &token);
                if (maj != GSS_S_COMPLETE) {
                        bsdv->bsd_flags |= BSD_FL_ERR;
                        CERROR("failed to verify bulk signature: %x\n", maj);
                        RETURN(-EACCES);
                }
                break;
        case SPTLRPC_BULK_SVC_PRIV:
                if (bsdr->bsd_nob != desc->bd_nob) {
                        bsdv->bsd_flags |= BSD_FL_ERR;
                        CERROR("prepared nob %d doesn't match the actual "
                               "nob %d\n", desc->bd_nob, bsdr->bsd_nob);
                        RETURN(-EPROTO);
                }

                if (desc->bd_iov_count == 0) {
                        LASSERT(desc->bd_nob == 0);
                        break;
                }

                token.data = bsdr->bsd_data;
                token.len = grctx->src_reqbsd_size - sizeof(*bsdr);

                maj = lgss_unwrap_bulk(grctx->src_ctx->gsc_mechctx,
                                       desc, &token, 0);
                if (maj != GSS_S_COMPLETE) {
                        bsdv->bsd_flags |= BSD_FL_ERR;
                        CERROR("failed decrypt bulk data: %x\n", maj);
                        RETURN(-EACCES);
                }

		/* mimic gss_cli_ctx_unwrap_bulk */
		desc->bd_nob_transferred = desc->bd_nob;

                break;
        }

        RETURN(0);
}
Esempio n. 6
0
int gss_cli_ctx_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
                            struct ptlrpc_request *req,
                            struct ptlrpc_bulk_desc *desc)
{
        struct gss_cli_ctx              *gctx;
        struct lustre_msg               *rmsg, *vmsg;
        struct ptlrpc_bulk_sec_desc     *bsdr, *bsdv;
        rawobj_t                         token;
        __u32                            maj;
        int                              roff, voff;
        ENTRY;

        LASSERT(req->rq_pack_bulk);
        LASSERT(req->rq_bulk_read || req->rq_bulk_write);
	LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));

        switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
        case SPTLRPC_SVC_NULL:
                vmsg = req->rq_repdata;
		LASSERT(vmsg != NULL && vmsg->lm_bufcount >= 3);
                voff = vmsg->lm_bufcount - 1;

                rmsg = req->rq_reqbuf;
		LASSERT(rmsg != NULL && rmsg->lm_bufcount >= 3);
                roff = rmsg->lm_bufcount - 1; /* last segment */
                break;
        case SPTLRPC_SVC_AUTH:
        case SPTLRPC_SVC_INTG:
                vmsg = req->rq_repdata;
		LASSERT(vmsg != NULL && vmsg->lm_bufcount >= 4);
                voff = vmsg->lm_bufcount - 2;

                rmsg = req->rq_reqbuf;
		LASSERT(rmsg != NULL && rmsg->lm_bufcount >= 4);
                roff = rmsg->lm_bufcount - 2; /* second last segment */
                break;
        case SPTLRPC_SVC_PRIV:
                vmsg = req->rq_repdata;
		LASSERT(vmsg != NULL && vmsg->lm_bufcount >= 2);
                voff = vmsg->lm_bufcount - 1;

                rmsg = req->rq_clrbuf;
		LASSERT(rmsg != NULL && rmsg->lm_bufcount >= 2);
                roff = rmsg->lm_bufcount - 1; /* last segment */
                break;
        default:
                LBUG();
        }

        bsdr = lustre_msg_buf(rmsg, roff, sizeof(*bsdr));
        bsdv = lustre_msg_buf(vmsg, voff, sizeof(*bsdv));
        LASSERT(bsdr && bsdv);

        if (bsdr->bsd_version != bsdv->bsd_version ||
            bsdr->bsd_type != bsdv->bsd_type ||
            bsdr->bsd_svc != bsdv->bsd_svc) {
                CERROR("bulk security descriptor mismatch: "
                       "(%u,%u,%u) != (%u,%u,%u)\n",
                       bsdr->bsd_version, bsdr->bsd_type, bsdr->bsd_svc,
                       bsdv->bsd_version, bsdv->bsd_type, bsdv->bsd_svc);
                RETURN(-EPROTO);
        }

        LASSERT(bsdv->bsd_svc == SPTLRPC_BULK_SVC_NULL ||
                bsdv->bsd_svc == SPTLRPC_BULK_SVC_INTG ||
                bsdv->bsd_svc == SPTLRPC_BULK_SVC_PRIV);

        /*
         * in privacy mode if return success, make sure bd_nob_transferred
         * is the actual size of the clear text, otherwise upper layer
         * may be surprised.
         */
        if (req->rq_bulk_write) {
                if (bsdv->bsd_flags & BSD_FL_ERR) {
                        CERROR("server reported bulk i/o failure\n");
                        RETURN(-EIO);
                }

                if (bsdv->bsd_svc == SPTLRPC_BULK_SVC_PRIV)
                        desc->bd_nob_transferred = desc->bd_nob;
        } else {
                /*
                 * bulk read, upon return success, bd_nob_transferred is
                 * the size of plain text actually received.
                 */
                gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
                LASSERT(gctx->gc_mechctx);

		if (bsdv->bsd_svc == SPTLRPC_BULK_SVC_INTG) {
			int i, nob;

			/* fix the actual data size */
			for (i = 0, nob = 0; i < desc->bd_iov_count; i++) {
				if (BD_GET_KIOV(desc, i).kiov_len + nob >
				    desc->bd_nob_transferred) {
					BD_GET_KIOV(desc, i).kiov_len =
						desc->bd_nob_transferred - nob;
				}
				nob += BD_GET_KIOV(desc, i).kiov_len;
			}

			token.data = bsdv->bsd_data;
			token.len = lustre_msg_buflen(vmsg, voff) -
				    sizeof(*bsdv);

			maj = lgss_verify_mic(gctx->gc_mechctx, 0, NULL,
					      desc->bd_iov_count,
					      GET_KIOV(desc),
					      &token);
                        if (maj != GSS_S_COMPLETE) {
                                CERROR("failed to verify bulk read: %x\n", maj);
                                RETURN(-EACCES);
                        }
                } else if (bsdv->bsd_svc == SPTLRPC_BULK_SVC_PRIV) {
                        desc->bd_nob = bsdv->bsd_nob;
                        if (desc->bd_nob == 0)
                                RETURN(0);

                        token.data = bsdv->bsd_data;
                        token.len = lustre_msg_buflen(vmsg, voff) -
                                    sizeof(*bsdr);

                        maj = lgss_unwrap_bulk(gctx->gc_mechctx, desc,
                                               &token, 1);
                        if (maj != GSS_S_COMPLETE) {
                                CERROR("failed to decrypt bulk read: %x\n",
                                       maj);
                                RETURN(-EACCES);
                        }

                        desc->bd_nob_transferred = desc->bd_nob;
                }
        }

        RETURN(0);
}