void kg_iov_msglen(gss_iov_buffer_desc *iov, int iov_count, size_t *data_length_p, size_t *assoc_data_length_p) { int i; size_t data_length = 0, assoc_data_length = 0; assert(iov != GSS_C_NO_IOV_BUFFER); *data_length_p = *assoc_data_length_p = 0; for (i = 0; i < iov_count; i++) { OM_uint32 type = GSS_IOV_BUFFER_TYPE(iov[i].type); if (type == GSS_IOV_BUFFER_TYPE_SIGN_ONLY) assoc_data_length += iov[i].buffer.length; if (type == GSS_IOV_BUFFER_TYPE_DATA || type == GSS_IOV_BUFFER_TYPE_SIGN_ONLY) data_length += iov[i].buffer.length; } *data_length_p = data_length; *assoc_data_length_p = assoc_data_length; }
gss_iov_buffer_desc * _gk_find_buffer(gss_iov_buffer_desc *iov, int iov_count, OM_uint32 type) { int i; for (i = 0; i < iov_count; i++) if (type == GSS_IOV_BUFFER_TYPE(iov[i].type)) return &iov[i]; return NULL; }
krb5_boolean kg_integ_only_iov(gss_iov_buffer_desc *iov, int iov_count) { int i; krb5_boolean has_conf_data = FALSE; assert(iov != GSS_C_NO_IOV_BUFFER); for (i = 0; i < iov_count; i++) { if (GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_DATA) { has_conf_data = TRUE; break; } } return (has_conf_data == FALSE); }
int gssEapIsIntegrityOnly(gss_iov_buffer_desc *iov, int iov_count) { int i; krb5_boolean has_conf_data = FALSE; GSSEAP_ASSERT(iov != GSS_C_NO_IOV_BUFFER); for (i = 0; i < iov_count; i++) { if (GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_DATA) { has_conf_data = TRUE; break; } } return (has_conf_data == FALSE); }
gss_iov_buffer_t kg_locate_iov(gss_iov_buffer_desc *iov, int iov_count, OM_uint32 type) { int i; gss_iov_buffer_t p = GSS_C_NO_IOV_BUFFER; if (iov == GSS_C_NO_IOV_BUFFER) return GSS_C_NO_IOV_BUFFER; for (i = iov_count - 1; i >= 0; i--) { if (GSS_IOV_BUFFER_TYPE(iov[i].type) == type) { if (p == GSS_C_NO_IOV_BUFFER) p = &iov[i]; else return GSS_C_NO_IOV_BUFFER; } } return p; }
krb5_cryptotype kg_translate_flag_iov(OM_uint32 type) { krb5_cryptotype ktype; switch (GSS_IOV_BUFFER_TYPE(type)) { case GSS_IOV_BUFFER_TYPE_DATA: case GSS_IOV_BUFFER_TYPE_PADDING: ktype = KRB5_CRYPTO_TYPE_DATA; break; case GSS_IOV_BUFFER_TYPE_SIGN_ONLY: ktype = KRB5_CRYPTO_TYPE_SIGN_ONLY; break; default: ktype = KRB5_CRYPTO_TYPE_EMPTY; break; } return ktype; }
int gssEapMapCryptoFlag(OM_uint32 type) { int ktype; switch (GSS_IOV_BUFFER_TYPE(type)) { case GSS_IOV_BUFFER_TYPE_DATA: case GSS_IOV_BUFFER_TYPE_PADDING: ktype = KRB5_CRYPTO_TYPE_DATA; break; case GSS_IOV_BUFFER_TYPE_SIGN_ONLY: ktype = KRB5_CRYPTO_TYPE_SIGN_ONLY; break; default: ktype = KRB5_CRYPTO_TYPE_EMPTY; break; } return ktype; }
static OM_uint32 gssint_wrap_aead_iov_shim(gss_mechanism mech, OM_uint32 *minor_status, gss_ctx_id_t context_handle, int conf_req_flag, gss_qop_t qop_req, gss_buffer_t input_assoc_buffer, gss_buffer_t input_payload_buffer, int *conf_state, gss_buffer_t output_message_buffer) { gss_iov_buffer_desc iov[5]; OM_uint32 status; size_t offset; int i = 0, iov_count; /* HEADER | SIGN_ONLY_DATA | DATA | PADDING | TRAILER */ iov[i].type = GSS_IOV_BUFFER_TYPE_HEADER; iov[i].buffer.value = NULL; iov[i].buffer.length = 0; i++; if (input_assoc_buffer != GSS_C_NO_BUFFER) { iov[i].type = GSS_IOV_BUFFER_TYPE_SIGN_ONLY; iov[i].buffer = *input_assoc_buffer; i++; } iov[i].type = GSS_IOV_BUFFER_TYPE_DATA; iov[i].buffer = *input_payload_buffer; i++; iov[i].type = GSS_IOV_BUFFER_TYPE_PADDING; iov[i].buffer.value = NULL; iov[i].buffer.length = 0; i++; iov[i].type = GSS_IOV_BUFFER_TYPE_TRAILER; iov[i].buffer.value = NULL; iov[i].buffer.length = 0; i++; iov_count = i; assert(mech->gss_wrap_iov_length); status = mech->gss_wrap_iov_length(minor_status, context_handle, conf_req_flag, qop_req, NULL, iov, iov_count); if (status != GSS_S_COMPLETE) { map_error(minor_status, mech); return status; } /* Format output token (does not include associated data) */ for (i = 0, output_message_buffer->length = 0; i < iov_count; i++) { if (GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_SIGN_ONLY) continue; output_message_buffer->length += iov[i].buffer.length; } output_message_buffer->value = malloc(output_message_buffer->length); if (output_message_buffer->value == NULL) { *minor_status = ENOMEM; return GSS_S_FAILURE; } i = 0, offset = 0; /* HEADER */ iov[i].buffer.value = (unsigned char *)output_message_buffer->value + offset; offset += iov[i].buffer.length; i++; /* SIGN_ONLY_DATA */ if (input_assoc_buffer != GSS_C_NO_BUFFER) i++; /* DATA */ iov[i].buffer.value = (unsigned char *)output_message_buffer->value + offset; offset += iov[i].buffer.length; memcpy(iov[i].buffer.value, input_payload_buffer->value, iov[i].buffer.length); i++; /* PADDING */ iov[i].buffer.value = (unsigned char *)output_message_buffer->value + offset; offset += iov[i].buffer.length; i++; /* TRAILER */ iov[i].buffer.value = (unsigned char *)output_message_buffer->value + offset; offset += iov[i].buffer.length; i++; assert(offset == output_message_buffer->length); assert(mech->gss_wrap_iov); status = mech->gss_wrap_iov(minor_status, context_handle, conf_req_flag, qop_req, conf_state, iov, iov_count); if (status != GSS_S_COMPLETE) { OM_uint32 minor; map_error(minor_status, mech); gss_release_buffer(&minor, output_message_buffer); } return status; }
/* * Split a STREAM | SIGN_DATA | DATA into * HEADER | SIGN_DATA | DATA | PADDING | TRAILER */ static OM_uint32 kg_unseal_stream_iov(OM_uint32 *minor_status, krb5_gss_ctx_id_rec *ctx, int *conf_state, gss_qop_t *qop_state, gss_iov_buffer_desc *iov, int iov_count, int toktype) { unsigned char *ptr; unsigned int bodysize; OM_uint32 code = 0, major_status = GSS_S_FAILURE; krb5_context context = ctx->k5_context; int conf_req_flag, toktype2; int i = 0, j; gss_iov_buffer_desc *tiov = NULL; gss_iov_buffer_t stream, data = NULL; gss_iov_buffer_t theader, tdata = NULL, tpadding, ttrailer; assert(toktype == KG_TOK_WRAP_MSG); if (toktype != KG_TOK_WRAP_MSG || (ctx->gss_flags & GSS_C_DCE_STYLE)) { code = EINVAL; goto cleanup; } stream = kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_STREAM); assert(stream != NULL); ptr = (unsigned char *)stream->buffer.value; code = g_verify_token_header(ctx->mech_used, &bodysize, &ptr, -1, stream->buffer.length, 0); if (code != 0) { major_status = GSS_S_DEFECTIVE_TOKEN; goto cleanup; } if (bodysize < 2) { *minor_status = (OM_uint32)G_BAD_TOK_HEADER; return GSS_S_DEFECTIVE_TOKEN; } toktype2 = load_16_be(ptr); ptr += 2; bodysize -= 2; tiov = (gss_iov_buffer_desc *)calloc((size_t)iov_count + 2, sizeof(gss_iov_buffer_desc)); if (tiov == NULL) { code = ENOMEM; goto cleanup; } /* HEADER */ theader = &tiov[i++]; theader->type = GSS_IOV_BUFFER_TYPE_HEADER; theader->buffer.value = stream->buffer.value; theader->buffer.length = ptr - (unsigned char *)stream->buffer.value; if (bodysize < 14 || stream->buffer.length != theader->buffer.length + bodysize) { major_status = GSS_S_DEFECTIVE_TOKEN; goto cleanup; } theader->buffer.length += 14; /* n[SIGN_DATA] | DATA | m[SIGN_DATA] */ for (j = 0; j < iov_count; j++) { OM_uint32 type = GSS_IOV_BUFFER_TYPE(iov[j].type); if (type == GSS_IOV_BUFFER_TYPE_DATA) { if (data != NULL) { /* only a single DATA buffer can appear */ code = EINVAL; goto cleanup; } data = &iov[j]; tdata = &tiov[i]; } if (type == GSS_IOV_BUFFER_TYPE_DATA || type == GSS_IOV_BUFFER_TYPE_SIGN_ONLY) tiov[i++] = iov[j]; } if (data == NULL) { /* a single DATA buffer must be present */ code = EINVAL; goto cleanup; } /* PADDING | TRAILER */ tpadding = &tiov[i++]; tpadding->type = GSS_IOV_BUFFER_TYPE_PADDING; tpadding->buffer.length = 0; tpadding->buffer.value = NULL; ttrailer = &tiov[i++]; ttrailer->type = GSS_IOV_BUFFER_TYPE_TRAILER; switch (toktype2) { case KG2_TOK_MIC_MSG: case KG2_TOK_WRAP_MSG: case KG2_TOK_DEL_CTX: { size_t ec, rrc; krb5_enctype enctype; unsigned int k5_headerlen = 0; unsigned int k5_trailerlen = 0; if (ctx->have_acceptor_subkey) enctype = ctx->acceptor_subkey->keyblock.enctype; else enctype = ctx->subkey->keyblock.enctype; conf_req_flag = ((ptr[0] & FLAG_WRAP_CONFIDENTIAL) != 0); ec = conf_req_flag ? load_16_be(ptr + 2) : 0; rrc = load_16_be(ptr + 4); if (rrc != 0) { if (!gss_krb5int_rotate_left((unsigned char *)stream->buffer.value + 16, stream->buffer.length - 16, rrc)) { code = ENOMEM; goto cleanup; } store_16_be(0, ptr + 4); /* set RRC to zero */ } if (conf_req_flag) { code = krb5_c_crypto_length(context, enctype, KRB5_CRYPTO_TYPE_HEADER, &k5_headerlen); if (code != 0) goto cleanup; theader->buffer.length += k5_headerlen; /* length validated later */ } /* no PADDING for CFX, EC is used instead */ code = krb5_c_crypto_length(context, enctype, conf_req_flag ? KRB5_CRYPTO_TYPE_TRAILER : KRB5_CRYPTO_TYPE_CHECKSUM, &k5_trailerlen); if (code != 0) goto cleanup; ttrailer->buffer.length = ec + (conf_req_flag ? 16 : 0 /* E(Header) */) + k5_trailerlen; ttrailer->buffer.value = (unsigned char *)stream->buffer.value + stream->buffer.length - ttrailer->buffer.length; break; } case KG_TOK_MIC_MSG: case KG_TOK_WRAP_MSG: case KG_TOK_DEL_CTX: theader->buffer.length += ctx->cksum_size + kg_confounder_size(context, ctx->enc->keyblock.enctype); /* * we can't set the padding accurately until decryption; * kg_fixup_padding_iov() will take care of this */ tpadding->buffer.length = 1; tpadding->buffer.value = (unsigned char *)stream->buffer.value + stream->buffer.length - 1; /* no TRAILER for pre-CFX */ ttrailer->buffer.length = 0; ttrailer->buffer.value = NULL; break; default: code = (OM_uint32)G_BAD_TOK_HEADER; major_status = GSS_S_DEFECTIVE_TOKEN; goto cleanup; break; } /* IOV: -----------0-------------+---1---+--2--+----------------3--------------*/ /* Old: GSS-Header | Conf | Data | Pad | */ /* CFX: GSS-Header | Kerb-Header | Data | | EC | E(Header) | Kerb-Trailer */ /* GSS: -------GSS-HEADER--------+-DATA--+-PAD-+----------GSS-TRAILER----------*/ /* validate lengths */ if (stream->buffer.length < theader->buffer.length + tpadding->buffer.length + ttrailer->buffer.length) { code = (OM_uint32)KRB5_BAD_MSIZE; major_status = GSS_S_DEFECTIVE_TOKEN; goto cleanup; } /* setup data */ tdata->buffer.length = stream->buffer.length - ttrailer->buffer.length - tpadding->buffer.length - theader->buffer.length; assert(data != NULL); if (data->type & GSS_IOV_BUFFER_FLAG_ALLOCATE) { code = kg_allocate_iov(tdata, tdata->buffer.length); if (code != 0) goto cleanup; memcpy(tdata->buffer.value, (unsigned char *)stream->buffer.value + theader->buffer.length, tdata->buffer.length); } else tdata->buffer.value = (unsigned char *)stream->buffer.value + theader->buffer.length; assert(i <= iov_count + 2); major_status = kg_unseal_iov_token(&code, ctx, conf_state, qop_state, tiov, i, toktype); if (major_status == GSS_S_COMPLETE) *data = *tdata; else kg_release_iov(tdata, 1); cleanup: if (tiov != NULL) free(tiov); *minor_status = code; return major_status; }
/* * AEAD wrap API for a single piece of associated data, for compatibility * with MIT and as specified by draft-howard-gssapi-aead-00.txt. * * @ingroup gssapi */ GSSAPI_LIB_FUNCTION OM_uint32 GSSAPI_LIB_CALL gss_wrap_aead(OM_uint32 *minor_status, gss_ctx_id_t context_handle, int conf_req_flag, gss_qop_t qop_req, gss_buffer_t input_assoc_buffer, gss_buffer_t input_payload_buffer, int *conf_state, gss_buffer_t output_message_buffer) { OM_uint32 major_status, tmp, flags = 0; gss_iov_buffer_desc iov[5]; size_t i; unsigned char *p; memset(iov, 0, sizeof(iov)); iov[0].type = GSS_IOV_BUFFER_TYPE_HEADER; iov[1].type = GSS_IOV_BUFFER_TYPE_SIGN_ONLY; if (input_assoc_buffer) iov[1].buffer = *input_assoc_buffer; iov[2].type = GSS_IOV_BUFFER_TYPE_DATA; if (input_payload_buffer) iov[2].buffer.length = input_payload_buffer->length; gss_inquire_context(minor_status, context_handle, NULL, NULL, NULL, NULL, &flags, NULL, NULL); /* krb5 mech rejects padding/trailer if DCE-style is set */ iov[3].type = (flags & GSS_C_DCE_STYLE) ? GSS_IOV_BUFFER_TYPE_EMPTY : GSS_IOV_BUFFER_TYPE_PADDING; iov[4].type = (flags & GSS_C_DCE_STYLE) ? GSS_IOV_BUFFER_TYPE_EMPTY : GSS_IOV_BUFFER_TYPE_TRAILER; major_status = gss_wrap_iov_length(minor_status, context_handle, conf_req_flag, qop_req, conf_state, iov, 5); if (GSS_ERROR(major_status)) return major_status; for (i = 0, output_message_buffer->length = 0; i < 5; i++) { if (GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_SIGN_ONLY) continue; output_message_buffer->length += iov[i].buffer.length; } output_message_buffer->value = malloc(output_message_buffer->length); if (output_message_buffer->value == NULL) { *minor_status = ENOMEM; return GSS_S_FAILURE; } for (i = 0, p = output_message_buffer->value; i < 5; i++) { if (GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_SIGN_ONLY) continue; else if (GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_DATA) memcpy(p, input_payload_buffer->value, input_payload_buffer->length); iov[i].buffer.value = p; p += iov[i].buffer.length; } major_status = gss_wrap_iov(minor_status, context_handle, conf_req_flag, qop_req, conf_state, iov, 5); if (GSS_ERROR(major_status)) gss_release_buffer(&tmp, output_message_buffer); return major_status; }
OM_uint32 _gssapi_wrap_cfx_iov(OM_uint32 *minor_status, gsskrb5_ctx ctx, krb5_context context, int conf_req_flag, int *conf_state, gss_iov_buffer_desc *iov, int iov_count) { OM_uint32 major_status, junk; gss_iov_buffer_desc *header, *trailer, *padding; size_t gsshsize, k5hsize; size_t gsstsize, k5tsize; size_t rrc = 0, ec = 0; int i; gss_cfx_wrap_token token; krb5_error_code ret; int32_t seq_number; unsigned usage; krb5_crypto_iov *data = NULL; header = _gk_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_HEADER); if (header == NULL) { *minor_status = EINVAL; return GSS_S_FAILURE; } padding = _gk_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_PADDING); if (padding != NULL) { padding->buffer.length = 0; } trailer = _gk_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_TRAILER); major_status = _gk_verify_buffers(minor_status, ctx, header, padding, trailer); if (major_status != GSS_S_COMPLETE) { return major_status; } if (conf_req_flag) { size_t k5psize = 0; size_t k5pbase = 0; size_t k5bsize = 0; size_t size = 0; for (i = 0; i < iov_count; i++) { switch (GSS_IOV_BUFFER_TYPE(iov[i].type)) { case GSS_IOV_BUFFER_TYPE_DATA: size += iov[i].buffer.length; break; default: break; } } size += sizeof(gss_cfx_wrap_token_desc); *minor_status = krb5_crypto_length(context, ctx->crypto, KRB5_CRYPTO_TYPE_HEADER, &k5hsize); if (*minor_status) return GSS_S_FAILURE; *minor_status = krb5_crypto_length(context, ctx->crypto, KRB5_CRYPTO_TYPE_TRAILER, &k5tsize); if (*minor_status) return GSS_S_FAILURE; *minor_status = krb5_crypto_length(context, ctx->crypto, KRB5_CRYPTO_TYPE_PADDING, &k5pbase); if (*minor_status) return GSS_S_FAILURE; if (k5pbase > 1) { k5psize = k5pbase - (size % k5pbase); } else { k5psize = 0; } if (k5psize == 0 && IS_DCE_STYLE(ctx)) { *minor_status = krb5_crypto_getblocksize(context, ctx->crypto, &k5bsize); if (*minor_status) return GSS_S_FAILURE; ec = k5bsize; } else { ec = k5psize; } gsshsize = sizeof(gss_cfx_wrap_token_desc) + k5hsize; gsstsize = sizeof(gss_cfx_wrap_token_desc) + ec + k5tsize; } else { if (IS_DCE_STYLE(ctx)) { *minor_status = EINVAL; return GSS_S_FAILURE; } k5hsize = 0; *minor_status = krb5_crypto_length(context, ctx->crypto, KRB5_CRYPTO_TYPE_CHECKSUM, &k5tsize); if (*minor_status) return GSS_S_FAILURE; gsshsize = sizeof(gss_cfx_wrap_token_desc); gsstsize = k5tsize; } /* * */ if (trailer == NULL) { rrc = gsstsize; if (IS_DCE_STYLE(ctx)) rrc -= ec; gsshsize += gsstsize; gsstsize = 0; } else if (GSS_IOV_BUFFER_FLAGS(trailer->type) & GSS_IOV_BUFFER_FLAG_ALLOCATE) { major_status = _gk_allocate_buffer(minor_status, trailer, gsstsize); if (major_status) goto failure; } else if (trailer->buffer.length < gsstsize) { *minor_status = KRB5_BAD_MSIZE; major_status = GSS_S_FAILURE; goto failure; } else trailer->buffer.length = gsstsize; /* * */ if (GSS_IOV_BUFFER_FLAGS(header->type) & GSS_IOV_BUFFER_FLAG_ALLOCATE) { major_status = _gk_allocate_buffer(minor_status, header, gsshsize); if (major_status != GSS_S_COMPLETE) goto failure; } else if (header->buffer.length < gsshsize) { *minor_status = KRB5_BAD_MSIZE; major_status = GSS_S_FAILURE; goto failure; } else header->buffer.length = gsshsize; token = (gss_cfx_wrap_token)header->buffer.value; token->TOK_ID[0] = 0x05; token->TOK_ID[1] = 0x04; token->Flags = 0; token->Filler = 0xFF; if ((ctx->more_flags & LOCAL) == 0) token->Flags |= CFXSentByAcceptor; if (ctx->more_flags & ACCEPTOR_SUBKEY) token->Flags |= CFXAcceptorSubkey; if (ctx->more_flags & LOCAL) usage = KRB5_KU_USAGE_INITIATOR_SEAL; else usage = KRB5_KU_USAGE_ACCEPTOR_SEAL; if (conf_req_flag) { /* * In Wrap tokens with confidentiality, the EC field is * used to encode the size (in bytes) of the random filler. */ token->Flags |= CFXSealed; token->EC[0] = (ec >> 8) & 0xFF; token->EC[1] = (ec >> 0) & 0xFF; } else {