OM_uint32 kg_unseal_iov(OM_uint32 *minor_status, gss_ctx_id_t context_handle, int *conf_state, gss_qop_t *qop_state, gss_iov_buffer_desc *iov, int iov_count, int toktype) { krb5_gss_ctx_id_rec *ctx; OM_uint32 code; ctx = (krb5_gss_ctx_id_rec *)context_handle; if (!ctx->established) { *minor_status = KG_CTX_INCOMPLETE; return GSS_S_NO_CONTEXT; } if (kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_STREAM) != NULL) { code = kg_unseal_stream_iov(minor_status, ctx, conf_state, qop_state, iov, iov_count, toktype); } else { code = kg_unseal_iov_token(minor_status, ctx, conf_state, qop_state, iov, iov_count, toktype); } return code; }
static krb5_error_code make_seal_token_v1_iov(krb5_context context, krb5_gss_ctx_id_rec *ctx, int conf_req_flag, int *conf_state, gss_iov_buffer_desc *iov, int iov_count, int toktype) { krb5_error_code code = 0; gss_iov_buffer_t header; gss_iov_buffer_t padding; gss_iov_buffer_t trailer; krb5_checksum md5cksum; krb5_checksum cksum; size_t k5_headerlen = 0, k5_trailerlen = 0; size_t data_length = 0, assoc_data_length = 0; size_t tmsglen = 0, tlen; unsigned char *ptr; krb5_keyusage sign_usage = KG_USAGE_SIGN; assert(toktype == KG_TOK_WRAP_MSG); md5cksum.length = cksum.length = 0; md5cksum.contents = cksum.contents = NULL; header = kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_HEADER); if (header == NULL) return EINVAL; padding = kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_PADDING); if (padding == NULL && (ctx->gss_flags & GSS_C_DCE_STYLE) == 0) return EINVAL; trailer = kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_TRAILER); if (trailer != NULL) trailer->buffer.length = 0; /* Determine confounder length */ if (toktype == KG_TOK_WRAP_MSG || conf_req_flag) k5_headerlen = kg_confounder_size(context, ctx->enc); /* Check padding length */ if (toktype == KG_TOK_WRAP_MSG) { size_t k5_padlen = (ctx->sealalg == SEAL_ALG_MICROSOFT_RC4) ? 1 : 8; size_t gss_padlen; size_t conf_data_length; kg_iov_msglen(iov, iov_count, &data_length, &assoc_data_length); conf_data_length = k5_headerlen + data_length - assoc_data_length; if (k5_padlen == 1) gss_padlen = 1; /* one byte to indicate one byte of padding */ else gss_padlen = k5_padlen - (conf_data_length % k5_padlen); if (ctx->gss_flags & GSS_C_DCE_STYLE) { /* DCE will pad the actual data itself; padding buffer optional and will be zeroed */ gss_padlen = 0; if (conf_data_length % k5_padlen) code = KRB5_BAD_MSIZE; } else if (padding->type & GSS_IOV_BUFFER_FLAG_ALLOCATE) { code = kg_allocate_iov(padding, gss_padlen); } else if (padding->buffer.length < gss_padlen) { code = KRB5_BAD_MSIZE; } if (code != 0) goto cleanup; /* Initialize padding buffer to pad itself */ if (padding != NULL) { padding->buffer.length = gss_padlen; memset(padding->buffer.value, (int)gss_padlen, gss_padlen); } if (ctx->gss_flags & GSS_C_DCE_STYLE) tmsglen = k5_headerlen; /* confounder length */ else tmsglen = conf_data_length + padding->buffer.length + assoc_data_length; } /* Determine token size */ tlen = g_token_size(ctx->mech_used, 14 + ctx->cksum_size + tmsglen); k5_headerlen += tlen - tmsglen; if (header->type & GSS_IOV_BUFFER_FLAG_ALLOCATE) code = kg_allocate_iov(header, k5_headerlen); else if (header->buffer.length < k5_headerlen) code = KRB5_BAD_MSIZE; if (code != 0) goto cleanup; header->buffer.length = k5_headerlen; ptr = (unsigned char *)header->buffer.value; g_make_token_header(ctx->mech_used, 14 + ctx->cksum_size + tmsglen, &ptr, toktype); /* 0..1 SIGN_ALG */ store_16_le(ctx->signalg, &ptr[0]); /* 2..3 SEAL_ALG or Filler */ if (toktype == KG_TOK_WRAP_MSG && conf_req_flag) { store_16_le(ctx->sealalg, &ptr[2]); } else { /* No seal */ ptr[2] = 0xFF; ptr[3] = 0xFF; } /* 4..5 Filler */ ptr[4] = 0xFF; ptr[5] = 0xFF; /* pad the plaintext, encrypt if needed, and stick it in the token */ /* initialize the checksum */ switch (ctx->signalg) { case SGN_ALG_DES_MAC_MD5: case SGN_ALG_MD2_5: md5cksum.checksum_type = CKSUMTYPE_RSA_MD5; break; case SGN_ALG_HMAC_SHA1_DES3_KD: md5cksum.checksum_type = CKSUMTYPE_HMAC_SHA1_DES3; break; case SGN_ALG_HMAC_MD5: md5cksum.checksum_type = CKSUMTYPE_HMAC_MD5_ARCFOUR; if (toktype != KG_TOK_WRAP_MSG) sign_usage = 15; break; default: case SGN_ALG_DES_MAC: abort (); } code = krb5_c_checksum_length(context, md5cksum.checksum_type, &k5_trailerlen); if (code != 0) goto cleanup; md5cksum.length = k5_trailerlen; if (k5_headerlen != 0) { code = kg_make_confounder(context, ctx->enc, ptr + 14 + ctx->cksum_size); if (code != 0) goto cleanup; } /* compute the checksum */ code = kg_make_checksum_iov_v1(context, md5cksum.checksum_type, ctx->cksum_size, ctx->seq, ctx->enc, sign_usage, iov, iov_count, toktype, &md5cksum); if (code != 0) goto cleanup; switch (ctx->signalg) { case SGN_ALG_DES_MAC_MD5: case SGN_ALG_3: code = kg_encrypt(context, ctx->seq, KG_USAGE_SEAL, (g_OID_equal(ctx->mech_used, gss_mech_krb5_old) ? ctx->seq->contents : NULL), md5cksum.contents, md5cksum.contents, 16); if (code != 0) goto cleanup; cksum.length = ctx->cksum_size; cksum.contents = md5cksum.contents + 16 - cksum.length; memcpy(ptr + 14, cksum.contents, cksum.length); break; case SGN_ALG_HMAC_SHA1_DES3_KD: assert(md5cksum.length == ctx->cksum_size); memcpy(ptr + 14, md5cksum.contents, md5cksum.length); break; case SGN_ALG_HMAC_MD5: memcpy(ptr + 14, md5cksum.contents, ctx->cksum_size); break; } /* create the seq_num */ code = kg_make_seq_num(context, ctx->seq, ctx->initiate ? 0 : 0xFF, (OM_uint32)ctx->seq_send, ptr + 14, ptr + 6); if (code != 0) goto cleanup; if (conf_req_flag) { if (ctx->sealalg == SEAL_ALG_MICROSOFT_RC4) { unsigned char bigend_seqnum[4]; krb5_keyblock *enc_key; size_t i; store_32_be(ctx->seq_send, bigend_seqnum); code = krb5_copy_keyblock(context, ctx->enc, &enc_key); if (code != 0) goto cleanup; assert(enc_key->length == 16); for (i = 0; i < enc_key->length; i++) ((char *)enc_key->contents)[i] ^= 0xF0; code = kg_arcfour_docrypt_iov(context, enc_key, 0, bigend_seqnum, 4, iov, iov_count); krb5_free_keyblock(context, enc_key); } else { code = kg_encrypt_iov(context, ctx->proto, ((ctx->gss_flags & GSS_C_DCE_STYLE) != 0), 0 /*EC*/, 0 /*RRC*/, ctx->enc, KG_USAGE_SEAL, NULL, iov, iov_count); } if (code != 0) goto cleanup; } ctx->seq_send++; ctx->seq_send &= 0xFFFFFFFFL; code = 0; if (conf_state != NULL) *conf_state = conf_req_flag; cleanup: if (code != 0) kg_release_iov(iov, iov_count); krb5_free_checksum_contents(context, &md5cksum); return code; }
OM_uint32 kg_seal_iov_length(OM_uint32 *minor_status, gss_ctx_id_t context_handle, int conf_req_flag, gss_qop_t qop_req, int *conf_state, gss_iov_buffer_desc *iov, int iov_count) { krb5_gss_ctx_id_rec *ctx; gss_iov_buffer_t header, trailer, padding; size_t data_length, assoc_data_length; size_t gss_headerlen, gss_padlen, gss_trailerlen; unsigned int k5_headerlen = 0, k5_trailerlen = 0, k5_padlen = 0; krb5_error_code code; krb5_context context; int dce_style; if (qop_req != GSS_C_QOP_DEFAULT) { *minor_status = (OM_uint32)G_UNKNOWN_QOP; return GSS_S_FAILURE; } if (!kg_validate_ctx_id(context_handle)) { *minor_status = (OM_uint32)G_VALIDATE_FAILED; return GSS_S_NO_CONTEXT; } ctx = (krb5_gss_ctx_id_rec *)context_handle; if (!ctx->established) { *minor_status = KG_CTX_INCOMPLETE; return GSS_S_NO_CONTEXT; } header = kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_HEADER); if (header == NULL) { *minor_status = EINVAL; return GSS_S_FAILURE; } INIT_IOV_DATA(header); trailer = kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_TRAILER); if (trailer != NULL) { INIT_IOV_DATA(trailer); } dce_style = ((ctx->gss_flags & GSS_C_DCE_STYLE) != 0); /* For CFX, EC is used instead of padding, and is placed in header or trailer */ padding = kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_PADDING); if (padding == NULL) { if (conf_req_flag && ctx->proto == 0 && !dce_style) { *minor_status = EINVAL; return GSS_S_FAILURE; } } else { INIT_IOV_DATA(padding); } kg_iov_msglen(iov, iov_count, &data_length, &assoc_data_length); if (conf_req_flag && kg_integ_only_iov(iov, iov_count)) conf_req_flag = FALSE; context = ctx->k5_context; gss_headerlen = gss_padlen = gss_trailerlen = 0; if (ctx->proto == 1) { krb5_enctype enctype; size_t ec; if (ctx->have_acceptor_subkey) enctype = ctx->acceptor_subkey->enctype; else enctype = ctx->subkey->enctype; code = krb5_c_crypto_length(context, enctype, conf_req_flag ? KRB5_CRYPTO_TYPE_TRAILER : KRB5_CRYPTO_TYPE_CHECKSUM, &k5_trailerlen); if (code != 0) { *minor_status = code; return GSS_S_FAILURE; } if (conf_req_flag) { code = krb5_c_crypto_length(context, enctype, KRB5_CRYPTO_TYPE_HEADER, &k5_headerlen); if (code != 0) { *minor_status = code; return GSS_S_FAILURE; } } gss_headerlen = 16; /* Header */ if (conf_req_flag) { gss_headerlen += k5_headerlen; /* Kerb-Header */ gss_trailerlen = 16 /* E(Header) */ + k5_trailerlen; /* Kerb-Trailer */ code = krb5_c_padding_length(context, enctype, data_length - assoc_data_length + 16 /* E(Header) */, &k5_padlen); if (code != 0) { *minor_status = code; return GSS_S_FAILURE; } if (k5_padlen == 0 && dce_style) { /* Windows rejects AEAD tokens with non-zero EC */ code = krb5_c_block_size(context, enctype, &ec); if (code != 0) { *minor_status = code; return GSS_S_FAILURE; } } else ec = k5_padlen; gss_trailerlen += ec; } else { gss_trailerlen = k5_trailerlen; /* Kerb-Checksum */ } } else if (!dce_style) { k5_padlen = (ctx->sealalg == SEAL_ALG_MICROSOFT_RC4) ? 1 : 8; if (k5_padlen == 1) gss_padlen = 1; else gss_padlen = k5_padlen - ((data_length - assoc_data_length) % k5_padlen); } data_length += gss_padlen; if (ctx->proto == 0) { /* Header | Checksum | Confounder | Data | Pad */ size_t data_size; k5_headerlen = kg_confounder_size(context, ctx->enc); data_size = 14 /* Header */ + ctx->cksum_size + k5_headerlen; if (!dce_style) data_size += data_length; gss_headerlen = g_token_size(ctx->mech_used, data_size); /* g_token_size() will include data_size as well as the overhead, so * subtract data_length just to get the overhead (ie. token size) */ if (!dce_style) gss_headerlen -= data_length; } if (minor_status != NULL) *minor_status = 0; if (trailer == NULL) gss_headerlen += gss_trailerlen; else trailer->buffer.length = gss_trailerlen; assert(gss_padlen == 0 || padding != NULL); if (padding != NULL) padding->buffer.length = gss_padlen; header->buffer.length = gss_headerlen; if (conf_state != NULL) *conf_state = conf_req_flag; return GSS_S_COMPLETE; }
/* * Split a STREAM | SIGN_DATA | DATA into * HEADER | SIGN_DATA | DATA | PADDING | TRAILER */ static OM_uint32 kg_unseal_stream_iov(OM_uint32 *minor_status, krb5_gss_ctx_id_rec *ctx, int *conf_state, gss_qop_t *qop_state, gss_iov_buffer_desc *iov, int iov_count, int toktype) { unsigned char *ptr; unsigned int bodysize; OM_uint32 code = 0, major_status = GSS_S_FAILURE; krb5_context context = ctx->k5_context; int conf_req_flag, toktype2; int i = 0, j; gss_iov_buffer_desc *tiov = NULL; gss_iov_buffer_t stream, data = NULL; gss_iov_buffer_t theader, tdata = NULL, tpadding, ttrailer; assert(toktype == KG_TOK_WRAP_MSG); if (toktype != KG_TOK_WRAP_MSG || (ctx->gss_flags & GSS_C_DCE_STYLE)) { code = EINVAL; goto cleanup; } stream = kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_STREAM); assert(stream != NULL); ptr = (unsigned char *)stream->buffer.value; code = g_verify_token_header(ctx->mech_used, &bodysize, &ptr, -1, stream->buffer.length, 0); if (code != 0) { major_status = GSS_S_DEFECTIVE_TOKEN; goto cleanup; } if (bodysize < 2) { *minor_status = (OM_uint32)G_BAD_TOK_HEADER; return GSS_S_DEFECTIVE_TOKEN; } toktype2 = load_16_be(ptr); ptr += 2; bodysize -= 2; tiov = (gss_iov_buffer_desc *)calloc((size_t)iov_count + 2, sizeof(gss_iov_buffer_desc)); if (tiov == NULL) { code = ENOMEM; goto cleanup; } /* HEADER */ theader = &tiov[i++]; theader->type = GSS_IOV_BUFFER_TYPE_HEADER; theader->buffer.value = stream->buffer.value; theader->buffer.length = ptr - (unsigned char *)stream->buffer.value; if (bodysize < 14 || stream->buffer.length != theader->buffer.length + bodysize) { major_status = GSS_S_DEFECTIVE_TOKEN; goto cleanup; } theader->buffer.length += 14; /* n[SIGN_DATA] | DATA | m[SIGN_DATA] */ for (j = 0; j < iov_count; j++) { OM_uint32 type = GSS_IOV_BUFFER_TYPE(iov[j].type); if (type == GSS_IOV_BUFFER_TYPE_DATA) { if (data != NULL) { /* only a single DATA buffer can appear */ code = EINVAL; goto cleanup; } data = &iov[j]; tdata = &tiov[i]; } if (type == GSS_IOV_BUFFER_TYPE_DATA || type == GSS_IOV_BUFFER_TYPE_SIGN_ONLY) tiov[i++] = iov[j]; } if (data == NULL) { /* a single DATA buffer must be present */ code = EINVAL; goto cleanup; } /* PADDING | TRAILER */ tpadding = &tiov[i++]; tpadding->type = GSS_IOV_BUFFER_TYPE_PADDING; tpadding->buffer.length = 0; tpadding->buffer.value = NULL; ttrailer = &tiov[i++]; ttrailer->type = GSS_IOV_BUFFER_TYPE_TRAILER; switch (toktype2) { case KG2_TOK_MIC_MSG: case KG2_TOK_WRAP_MSG: case KG2_TOK_DEL_CTX: { size_t ec, rrc; krb5_enctype enctype; unsigned int k5_headerlen = 0; unsigned int k5_trailerlen = 0; if (ctx->have_acceptor_subkey) enctype = ctx->acceptor_subkey->keyblock.enctype; else enctype = ctx->subkey->keyblock.enctype; conf_req_flag = ((ptr[0] & FLAG_WRAP_CONFIDENTIAL) != 0); ec = conf_req_flag ? load_16_be(ptr + 2) : 0; rrc = load_16_be(ptr + 4); if (rrc != 0) { if (!gss_krb5int_rotate_left((unsigned char *)stream->buffer.value + 16, stream->buffer.length - 16, rrc)) { code = ENOMEM; goto cleanup; } store_16_be(0, ptr + 4); /* set RRC to zero */ } if (conf_req_flag) { code = krb5_c_crypto_length(context, enctype, KRB5_CRYPTO_TYPE_HEADER, &k5_headerlen); if (code != 0) goto cleanup; theader->buffer.length += k5_headerlen; /* length validated later */ } /* no PADDING for CFX, EC is used instead */ code = krb5_c_crypto_length(context, enctype, conf_req_flag ? KRB5_CRYPTO_TYPE_TRAILER : KRB5_CRYPTO_TYPE_CHECKSUM, &k5_trailerlen); if (code != 0) goto cleanup; ttrailer->buffer.length = ec + (conf_req_flag ? 16 : 0 /* E(Header) */) + k5_trailerlen; ttrailer->buffer.value = (unsigned char *)stream->buffer.value + stream->buffer.length - ttrailer->buffer.length; break; } case KG_TOK_MIC_MSG: case KG_TOK_WRAP_MSG: case KG_TOK_DEL_CTX: theader->buffer.length += ctx->cksum_size + kg_confounder_size(context, ctx->enc->keyblock.enctype); /* * we can't set the padding accurately until decryption; * kg_fixup_padding_iov() will take care of this */ tpadding->buffer.length = 1; tpadding->buffer.value = (unsigned char *)stream->buffer.value + stream->buffer.length - 1; /* no TRAILER for pre-CFX */ ttrailer->buffer.length = 0; ttrailer->buffer.value = NULL; break; default: code = (OM_uint32)G_BAD_TOK_HEADER; major_status = GSS_S_DEFECTIVE_TOKEN; goto cleanup; break; } /* IOV: -----------0-------------+---1---+--2--+----------------3--------------*/ /* Old: GSS-Header | Conf | Data | Pad | */ /* CFX: GSS-Header | Kerb-Header | Data | | EC | E(Header) | Kerb-Trailer */ /* GSS: -------GSS-HEADER--------+-DATA--+-PAD-+----------GSS-TRAILER----------*/ /* validate lengths */ if (stream->buffer.length < theader->buffer.length + tpadding->buffer.length + ttrailer->buffer.length) { code = (OM_uint32)KRB5_BAD_MSIZE; major_status = GSS_S_DEFECTIVE_TOKEN; goto cleanup; } /* setup data */ tdata->buffer.length = stream->buffer.length - ttrailer->buffer.length - tpadding->buffer.length - theader->buffer.length; assert(data != NULL); if (data->type & GSS_IOV_BUFFER_FLAG_ALLOCATE) { code = kg_allocate_iov(tdata, tdata->buffer.length); if (code != 0) goto cleanup; memcpy(tdata->buffer.value, (unsigned char *)stream->buffer.value + theader->buffer.length, tdata->buffer.length); } else tdata->buffer.value = (unsigned char *)stream->buffer.value + theader->buffer.length; assert(i <= iov_count + 2); major_status = kg_unseal_iov_token(&code, ctx, conf_state, qop_state, tiov, i, toktype); if (major_status == GSS_S_COMPLETE) *data = *tdata; else kg_release_iov(tdata, 1); cleanup: if (tiov != NULL) free(tiov); *minor_status = code; return major_status; }
/* * Caller must provide TOKEN | DATA | PADDING | TRAILER, except * for DCE in which case it can just provide TOKEN | DATA (must * guarantee that DATA is padded) */ static OM_uint32 kg_unseal_iov_token(OM_uint32 *minor_status, krb5_gss_ctx_id_rec *ctx, int *conf_state, gss_qop_t *qop_state, gss_iov_buffer_desc *iov, int iov_count, int toktype) { krb5_error_code code; krb5_context context = ctx->k5_context; unsigned char *ptr; gss_iov_buffer_t header; gss_iov_buffer_t padding; gss_iov_buffer_t trailer; size_t input_length; unsigned int bodysize; int toktype2; header = kg_locate_header_iov(iov, iov_count, toktype); if (header == NULL) { *minor_status = EINVAL; return GSS_S_FAILURE; } padding = kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_PADDING); trailer = kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_TRAILER); ptr = (unsigned char *)header->buffer.value; input_length = header->buffer.length; if ((ctx->gss_flags & GSS_C_DCE_STYLE) == 0 && toktype == KG_TOK_WRAP_MSG) { size_t data_length, assoc_data_length; kg_iov_msglen(iov, iov_count, &data_length, &assoc_data_length); input_length += data_length - assoc_data_length; if (padding != NULL) input_length += padding->buffer.length; if (trailer != NULL) input_length += trailer->buffer.length; } code = g_verify_token_header(ctx->mech_used, &bodysize, &ptr, -1, input_length, 0); if (code != 0) { *minor_status = code; return GSS_S_DEFECTIVE_TOKEN; } if (bodysize < 2) { *minor_status = (OM_uint32)G_BAD_TOK_HEADER; return GSS_S_DEFECTIVE_TOKEN; } toktype2 = load_16_be(ptr); ptr += 2; bodysize -= 2; switch (toktype2) { case KG2_TOK_MIC_MSG: case KG2_TOK_WRAP_MSG: case KG2_TOK_DEL_CTX: code = gss_krb5int_unseal_v3_iov(context, minor_status, ctx, iov, iov_count, conf_state, qop_state, toktype); break; case KG_TOK_MIC_MSG: case KG_TOK_WRAP_MSG: case KG_TOK_DEL_CTX: code = kg_unseal_v1_iov(context, minor_status, ctx, iov, iov_count, (size_t)(ptr - (unsigned char *)header->buffer.value), conf_state, qop_state, toktype); break; default: *minor_status = (OM_uint32)G_BAD_TOK_HEADER; code = GSS_S_DEFECTIVE_TOKEN; break; } if (code != 0) save_error_info(*minor_status, context); return code; }
static OM_uint32 kg_unseal_v1_iov(krb5_context context, OM_uint32 *minor_status, krb5_gss_ctx_id_rec *ctx, gss_iov_buffer_desc *iov, int iov_count, size_t token_wrapper_len, int *conf_state, gss_qop_t *qop_state, int toktype) { OM_uint32 code; gss_iov_buffer_t header; gss_iov_buffer_t trailer; unsigned char *ptr; int sealalg; int signalg; krb5_checksum cksum; krb5_checksum md5cksum; size_t cksum_len = 0; size_t conflen = 0; int direction; krb5_ui_4 seqnum; OM_uint32 retval; size_t sumlen; krb5_keyusage sign_usage = KG_USAGE_SIGN; md5cksum.length = cksum.length = 0; md5cksum.contents = cksum.contents = NULL; header = kg_locate_header_iov(iov, iov_count, toktype); assert(header != NULL); trailer = kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_TRAILER); if (trailer != NULL && trailer->buffer.length != 0) { *minor_status = (OM_uint32)KRB5_BAD_MSIZE; return GSS_S_DEFECTIVE_TOKEN; } if (header->buffer.length < token_wrapper_len + 14) { *minor_status = 0; return GSS_S_DEFECTIVE_TOKEN; } ptr = (unsigned char *)header->buffer.value + token_wrapper_len; signalg = ptr[0]; signalg |= ptr[1] << 8; sealalg = ptr[2]; sealalg |= ptr[3] << 8; if (ptr[4] != 0xFF || ptr[5] != 0xFF) { *minor_status = 0; return GSS_S_DEFECTIVE_TOKEN; } if (toktype != KG_TOK_WRAP_MSG && sealalg != 0xFFFF) { *minor_status = 0; return GSS_S_DEFECTIVE_TOKEN; } if (toktype == KG_TOK_WRAP_MSG && !(sealalg == 0xFFFF || sealalg == ctx->sealalg)) { *minor_status = 0; return GSS_S_DEFECTIVE_TOKEN; } if ((ctx->sealalg == SEAL_ALG_NONE && signalg > 1) || (ctx->sealalg == SEAL_ALG_1 && signalg != SGN_ALG_3) || (ctx->sealalg == SEAL_ALG_DES3KD && signalg != SGN_ALG_HMAC_SHA1_DES3_KD)|| (ctx->sealalg == SEAL_ALG_MICROSOFT_RC4 && signalg != SGN_ALG_HMAC_MD5)) { *minor_status = 0; return GSS_S_DEFECTIVE_TOKEN; } switch (signalg) { case SGN_ALG_DES_MAC_MD5: case SGN_ALG_MD2_5: case SGN_ALG_HMAC_MD5: cksum_len = 8; if (toktype != KG_TOK_WRAP_MSG) sign_usage = 15; break; case SGN_ALG_3: cksum_len = 16; break; case SGN_ALG_HMAC_SHA1_DES3_KD: cksum_len = 20; break; default: *minor_status = 0; return GSS_S_DEFECTIVE_TOKEN; } /* get the token parameters */ code = kg_get_seq_num(context, ctx->seq, ptr + 14, ptr + 6, &direction, &seqnum); if (code != 0) { *minor_status = code; return GSS_S_BAD_SIG; } /* decode the message, if SEAL */ if (toktype == KG_TOK_WRAP_MSG) { if (sealalg != 0xFFFF) { if (ctx->sealalg == SEAL_ALG_MICROSOFT_RC4) { unsigned char bigend_seqnum[4]; krb5_keyblock *enc_key; size_t i; store_32_be(seqnum, bigend_seqnum); code = krb5_k_key_keyblock(context, ctx->enc, &enc_key); if (code != 0) { retval = GSS_S_FAILURE; goto cleanup; } assert(enc_key->length == 16); for (i = 0; i < enc_key->length; i++) ((char *)enc_key->contents)[i] ^= 0xF0; code = kg_arcfour_docrypt_iov(context, enc_key, 0, &bigend_seqnum[0], 4, iov, iov_count); krb5_free_keyblock(context, enc_key); } else { code = kg_decrypt_iov(context, 0, ((ctx->gss_flags & GSS_C_DCE_STYLE) != 0), 0 /*EC*/, 0 /*RRC*/, ctx->enc, KG_USAGE_SEAL, NULL, iov, iov_count); } if (code != 0) { retval = GSS_S_FAILURE; goto cleanup; } } conflen = kg_confounder_size(context, ctx->enc->keyblock.enctype); } if (header->buffer.length != token_wrapper_len + 14 + cksum_len + conflen) { retval = GSS_S_DEFECTIVE_TOKEN; goto cleanup; } /* compute the checksum of the message */ /* initialize the checksum */ switch (signalg) { case SGN_ALG_DES_MAC_MD5: case SGN_ALG_MD2_5: case SGN_ALG_DES_MAC: case SGN_ALG_3: md5cksum.checksum_type = CKSUMTYPE_RSA_MD5; break; case SGN_ALG_HMAC_MD5: md5cksum.checksum_type = CKSUMTYPE_HMAC_MD5_ARCFOUR; break; case SGN_ALG_HMAC_SHA1_DES3_KD: md5cksum.checksum_type = CKSUMTYPE_HMAC_SHA1_DES3; break; default: abort(); } code = krb5_c_checksum_length(context, md5cksum.checksum_type, &sumlen); if (code != 0) { retval = GSS_S_FAILURE; goto cleanup; } md5cksum.length = sumlen; /* compute the checksum of the message */ code = kg_make_checksum_iov_v1(context, md5cksum.checksum_type, cksum_len, ctx->seq, ctx->enc, sign_usage, iov, iov_count, toktype, &md5cksum); if (code != 0) { retval = GSS_S_FAILURE; goto cleanup; } switch (signalg) { case SGN_ALG_DES_MAC_MD5: case SGN_ALG_3: code = kg_encrypt_inplace(context, ctx->seq, KG_USAGE_SEAL, (g_OID_equal(ctx->mech_used, gss_mech_krb5_old) ? ctx->seq->keyblock.contents : NULL), md5cksum.contents, 16); if (code != 0) { retval = GSS_S_FAILURE; goto cleanup; } cksum.length = cksum_len; cksum.contents = md5cksum.contents + 16 - cksum.length; code = k5_bcmp(cksum.contents, ptr + 14, cksum.length); break; case SGN_ALG_HMAC_SHA1_DES3_KD: case SGN_ALG_HMAC_MD5: code = k5_bcmp(md5cksum.contents, ptr + 14, cksum_len); break; default: code = 0; retval = GSS_S_DEFECTIVE_TOKEN; goto cleanup; break; } if (code != 0) { code = 0; retval = GSS_S_BAD_SIG; goto cleanup; } /* * For GSS_C_DCE_STYLE, the caller manages the padding, because the * pad length is in the RPC PDU. The value of the padding may be * uninitialized. For normal GSS, the last bytes of the decrypted * data contain the pad length. kg_fixup_padding_iov() will find * this and fixup the last data IOV appropriately. */ if (toktype == KG_TOK_WRAP_MSG && (ctx->gss_flags & GSS_C_DCE_STYLE) == 0) { retval = kg_fixup_padding_iov(&code, iov, iov_count); if (retval != GSS_S_COMPLETE) goto cleanup; } if (conf_state != NULL) *conf_state = (sealalg != 0xFFFF); if (qop_state != NULL) *qop_state = GSS_C_QOP_DEFAULT; if ((ctx->initiate && direction != 0xff) || (!ctx->initiate && direction != 0)) { *minor_status = (OM_uint32)G_BAD_DIRECTION; retval = GSS_S_BAD_SIG; } code = 0; retval = g_order_check(&ctx->seqstate, (gssint_uint64)seqnum); cleanup: krb5_free_checksum_contents(context, &md5cksum); *minor_status = code; return retval; }
OM_uint32 kg_fixup_padding_iov(OM_uint32 *minor_status, gss_iov_buffer_desc *iov, int iov_count) { gss_iov_buffer_t padding = NULL; gss_iov_buffer_t data = NULL; size_t padlength, relative_padlength; unsigned char *p; OM_uint32 minor; data = kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_DATA); padding = kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_PADDING); if (data == NULL) { *minor_status = 0; return GSS_S_COMPLETE; } if (padding == NULL || padding->buffer.length == 0) { *minor_status = EINVAL; return GSS_S_FAILURE; } p = (unsigned char *)padding->buffer.value; padlength = p[padding->buffer.length - 1]; if (data->buffer.length + padding->buffer.length < padlength || padlength == 0) { *minor_status = (OM_uint32)KRB5_BAD_MSIZE; return GSS_S_DEFECTIVE_TOKEN; } /* * kg_unseal_stream_iov() will place one byte of padding in the * padding buffer; its true value is unknown until after decryption. * * relative_padlength contains the number of bytes to compensate the * padding and data buffers by; it will be zero if the caller manages * the padding length. * * If the caller manages the padding length, then relative_padlength * wil be zero. * * eg. if the buffers are structured as follows: * * +---DATA---+-PAD-+ * | ABCDE444 | 4 | * +----------+-----+ * * after compensation they would look like: * * +-DATA--+-PAD--+ * | ABCDE | NULL | * +-------+------+ */ relative_padlength = padlength - padding->buffer.length; assert(data->buffer.length >= relative_padlength); data->buffer.length -= relative_padlength; if (padding->type & GSS_IOV_BUFFER_FLAG_ALLOCATED) { gss_release_buffer(&minor, &padding->buffer); padding->type &= ~(GSS_IOV_BUFFER_FLAG_ALLOCATED); } padding->buffer.length = 0; padding->buffer.value = NULL; return GSS_S_COMPLETE; }
/* * DCE_STYLE indicates actual RRC is EC + RRC * EC is extra rotate count for DCE_STYLE, pad length otherwise * RRC is rotate count. */ static krb5_error_code kg_translate_iov_v3(krb5_context context, int dce_style, size_t ec, size_t rrc, krb5_enctype enctype, gss_iov_buffer_desc *iov, int iov_count, krb5_crypto_iov **pkiov, size_t *pkiov_count) { gss_iov_buffer_t header; gss_iov_buffer_t trailer; int i = 0, j; size_t kiov_count; krb5_crypto_iov *kiov; unsigned int k5_headerlen = 0, k5_trailerlen = 0; size_t gss_headerlen, gss_trailerlen; krb5_error_code code; *pkiov = NULL; *pkiov_count = 0; header = kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_HEADER); assert(header != NULL); trailer = kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_TRAILER); assert(trailer == NULL || rrc == 0); code = krb5_c_crypto_length(context, enctype, KRB5_CRYPTO_TYPE_HEADER, &k5_headerlen); if (code != 0) return code; code = krb5_c_crypto_length(context, enctype, KRB5_CRYPTO_TYPE_TRAILER, &k5_trailerlen); if (code != 0) return code; /* Check header and trailer sizes */ gss_headerlen = 16 /* GSS-Header */ + k5_headerlen; /* Kerb-Header */ gss_trailerlen = ec + 16 /* E(GSS-Header) */ + k5_trailerlen; /* Kerb-Trailer */ /* If we're caller without a trailer, we must rotate by trailer length */ if (trailer == NULL) { size_t actual_rrc = rrc; if (dce_style) actual_rrc += ec; /* compensate for Windows bug */ if (actual_rrc != gss_trailerlen) return KRB5_BAD_MSIZE; gss_headerlen += gss_trailerlen; gss_trailerlen = 0; } else { if (trailer->buffer.length != gss_trailerlen) return KRB5_BAD_MSIZE; } if (header->buffer.length != gss_headerlen) return KRB5_BAD_MSIZE; kiov_count = 3 + iov_count; kiov = (krb5_crypto_iov *)malloc(kiov_count * sizeof(krb5_crypto_iov)); if (kiov == NULL) return ENOMEM; /* * The krb5 header is located at the end of the GSS header. */ kiov[i].flags = KRB5_CRYPTO_TYPE_HEADER; kiov[i].data.length = k5_headerlen; kiov[i].data.data = (char *)header->buffer.value + header->buffer.length - k5_headerlen; i++; for (j = 0; j < iov_count; j++) { kiov[i].flags = kg_translate_flag_iov(iov[j].type); if (kiov[i].flags == KRB5_CRYPTO_TYPE_EMPTY) continue; kiov[i].data.length = iov[j].buffer.length; kiov[i].data.data = (char *)iov[j].buffer.value; i++; } /* * The EC and encrypted GSS header are placed in the trailer, which may * be rotated directly after the plaintext header if no trailer buffer * is provided. */ kiov[i].flags = KRB5_CRYPTO_TYPE_DATA; kiov[i].data.length = ec + 16; /* E(Header) */ if (trailer == NULL) kiov[i].data.data = (char *)header->buffer.value + 16; else kiov[i].data.data = (char *)trailer->buffer.value; i++; /* * The krb5 trailer is placed after the encrypted copy of the * krb5 header (which may be in the GSS header or trailer). */ kiov[i].flags = KRB5_CRYPTO_TYPE_TRAILER; kiov[i].data.length = k5_trailerlen; kiov[i].data.data = kiov[i - 1].data.data + ec + 16; /* E(Header) */ i++; *pkiov = kiov; *pkiov_count = i; return 0; }
/* AEAD */ static krb5_error_code kg_translate_iov_v1(krb5_context context, krb5_enctype enctype, gss_iov_buffer_desc *iov, int iov_count, krb5_crypto_iov **pkiov, size_t *pkiov_count) { gss_iov_buffer_desc *header; gss_iov_buffer_desc *trailer; int i = 0, j; size_t kiov_count; krb5_crypto_iov *kiov; size_t conf_len; *pkiov = NULL; *pkiov_count = 0; conf_len = kg_confounder_size(context, enctype); header = kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_HEADER); assert(header != NULL); if (header->buffer.length < conf_len) return KRB5_BAD_MSIZE; trailer = kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_TRAILER); assert(trailer == NULL || trailer->buffer.length == 0); kiov_count = 3 + iov_count; kiov = (krb5_crypto_iov *)malloc(kiov_count * sizeof(krb5_crypto_iov)); if (kiov == NULL) return ENOMEM; /* For pre-CFX (raw enctypes) there is no krb5 header */ kiov[i].flags = KRB5_CRYPTO_TYPE_HEADER; kiov[i].data.length = 0; kiov[i].data.data = NULL; i++; /* For pre-CFX, the confounder is at the end of the GSS header */ kiov[i].flags = KRB5_CRYPTO_TYPE_DATA; kiov[i].data.length = conf_len; kiov[i].data.data = (char *)header->buffer.value + header->buffer.length - conf_len; i++; for (j = 0; j < iov_count; j++) { kiov[i].flags = kg_translate_flag_iov(iov[j].type); if (kiov[i].flags == KRB5_CRYPTO_TYPE_EMPTY) continue; kiov[i].data.length = iov[j].buffer.length; kiov[i].data.data = (char *)iov[j].buffer.value; i++; } kiov[i].flags = KRB5_CRYPTO_TYPE_TRAILER; kiov[i].data.length = 0; kiov[i].data.data = NULL; i++; *pkiov = kiov; *pkiov_count = i; return 0; }
krb5_error_code gss_krb5int_make_seal_token_v3_iov(krb5_context context, krb5_gss_ctx_id_rec *ctx, int conf_req_flag, int *conf_state, gss_iov_buffer_desc *iov, int iov_count, int toktype) { krb5_error_code code = 0; gss_iov_buffer_t header; gss_iov_buffer_t padding; gss_iov_buffer_t trailer; unsigned char acceptor_flag; unsigned short tok_id; unsigned char *outbuf = NULL; unsigned char *tbuf = NULL; int key_usage; size_t rrc = 0; unsigned int gss_headerlen, gss_trailerlen; krb5_key key; krb5_cksumtype cksumtype; size_t data_length, assoc_data_length; assert(ctx->big_endian == 0); assert(ctx->proto == 1); acceptor_flag = ctx->initiate ? 0 : FLAG_SENDER_IS_ACCEPTOR; key_usage = (toktype == KG_TOK_WRAP_MSG ? (ctx->initiate ? KG_USAGE_INITIATOR_SEAL : KG_USAGE_ACCEPTOR_SEAL) : (ctx->initiate ? KG_USAGE_INITIATOR_SIGN : KG_USAGE_ACCEPTOR_SIGN)); if (ctx->have_acceptor_subkey) { key = ctx->acceptor_subkey; cksumtype = ctx->acceptor_subkey_cksumtype; } else { key = ctx->subkey; cksumtype = ctx->cksumtype; } assert(key != NULL); assert(cksumtype != 0); kg_iov_msglen(iov, iov_count, &data_length, &assoc_data_length); header = kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_HEADER); if (header == NULL) return EINVAL; padding = kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_PADDING); if (padding != NULL) padding->buffer.length = 0; trailer = kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_TRAILER); if (toktype == KG_TOK_WRAP_MSG && conf_req_flag) { unsigned int k5_headerlen, k5_trailerlen, k5_padlen; size_t ec = 0; size_t conf_data_length = data_length - assoc_data_length; code = krb5_c_crypto_length(context, key->keyblock.enctype, KRB5_CRYPTO_TYPE_HEADER, &k5_headerlen); if (code != 0) goto cleanup; code = krb5_c_padding_length(context, key->keyblock.enctype, conf_data_length + 16 /* E(Header) */, &k5_padlen); if (code != 0) goto cleanup; if (k5_padlen == 0 && (ctx->gss_flags & GSS_C_DCE_STYLE)) { /* Windows rejects AEAD tokens with non-zero EC */ code = krb5_c_block_size(context, key->keyblock.enctype, &ec); if (code != 0) goto cleanup; } else ec = k5_padlen; code = krb5_c_crypto_length(context, key->keyblock.enctype, KRB5_CRYPTO_TYPE_TRAILER, &k5_trailerlen); if (code != 0) goto cleanup; gss_headerlen = 16 /* Header */ + k5_headerlen; gss_trailerlen = ec + 16 /* E(Header) */ + k5_trailerlen; if (trailer == NULL) { rrc = gss_trailerlen; /* Workaround for Windows bug where it rotates by EC + RRC */ if (ctx->gss_flags & GSS_C_DCE_STYLE) rrc -= ec; gss_headerlen += gss_trailerlen; } if (header->type & GSS_IOV_BUFFER_FLAG_ALLOCATE) { code = kg_allocate_iov(header, (size_t) gss_headerlen); } else if (header->buffer.length < gss_headerlen) code = KRB5_BAD_MSIZE; if (code != 0) goto cleanup; outbuf = (unsigned char *)header->buffer.value; header->buffer.length = (size_t) gss_headerlen; if (trailer != NULL) { if (trailer->type & GSS_IOV_BUFFER_FLAG_ALLOCATE) code = kg_allocate_iov(trailer, (size_t) gss_trailerlen); else if (trailer->buffer.length < gss_trailerlen) code = KRB5_BAD_MSIZE; if (code != 0) goto cleanup; trailer->buffer.length = (size_t) gss_trailerlen; } /* TOK_ID */ store_16_be(KG2_TOK_WRAP_MSG, outbuf); /* flags */ outbuf[2] = (acceptor_flag | (conf_req_flag ? FLAG_WRAP_CONFIDENTIAL : 0) | (ctx->have_acceptor_subkey ? FLAG_ACCEPTOR_SUBKEY : 0)); /* filler */ outbuf[3] = 0xFF; /* EC */ store_16_be(ec, outbuf + 4); /* RRC */ store_16_be(0, outbuf + 6); store_64_be(ctx->seq_send, outbuf + 8); /* EC | copy of header to be encrypted, located in (possibly rotated) trailer */ if (trailer == NULL) tbuf = (unsigned char *)header->buffer.value + 16; /* Header */ else tbuf = (unsigned char *)trailer->buffer.value; memset(tbuf, 0xFF, ec); memcpy(tbuf + ec, header->buffer.value, 16); code = kg_encrypt_iov(context, ctx->proto, ((ctx->gss_flags & GSS_C_DCE_STYLE) != 0), ec, rrc, key, key_usage, 0, iov, iov_count); if (code != 0) goto cleanup; /* RRC */ store_16_be(rrc, outbuf + 6); ctx->seq_send++; } else if (toktype == KG_TOK_WRAP_MSG && !conf_req_flag) { tok_id = KG2_TOK_WRAP_MSG; wrap_with_checksum: gss_headerlen = 16; code = krb5_c_crypto_length(context, key->keyblock.enctype, KRB5_CRYPTO_TYPE_CHECKSUM, &gss_trailerlen); if (code != 0) goto cleanup; assert(gss_trailerlen <= 0xFFFF); if (trailer == NULL) { rrc = gss_trailerlen; gss_headerlen += gss_trailerlen; } if (header->type & GSS_IOV_BUFFER_FLAG_ALLOCATE) code = kg_allocate_iov(header, (size_t) gss_headerlen); else if (header->buffer.length < gss_headerlen) code = KRB5_BAD_MSIZE; if (code != 0) goto cleanup; outbuf = (unsigned char *)header->buffer.value; header->buffer.length = (size_t) gss_headerlen; if (trailer != NULL) { if (trailer->type & GSS_IOV_BUFFER_FLAG_ALLOCATE) code = kg_allocate_iov(trailer, (size_t) gss_trailerlen); else if (trailer->buffer.length < gss_trailerlen) code = KRB5_BAD_MSIZE; if (code != 0) goto cleanup; trailer->buffer.length = (size_t) gss_trailerlen; } /* TOK_ID */ store_16_be(tok_id, outbuf); /* flags */ outbuf[2] = (acceptor_flag | (ctx->have_acceptor_subkey ? FLAG_ACCEPTOR_SUBKEY : 0)); /* filler */ outbuf[3] = 0xFF; if (toktype == KG_TOK_WRAP_MSG) { /* Use 0 for checksum calculation, substitute * checksum length later. */ /* EC */ store_16_be(0, outbuf + 4); /* RRC */ store_16_be(0, outbuf + 6); } else { /* MIC and DEL store 0xFF in EC and RRC */ store_16_be(0xFFFF, outbuf + 4); store_16_be(0xFFFF, outbuf + 6); } store_64_be(ctx->seq_send, outbuf + 8); code = kg_make_checksum_iov_v3(context, cksumtype, rrc, key, key_usage, iov, iov_count); if (code != 0) goto cleanup; ctx->seq_send++; if (toktype == KG_TOK_WRAP_MSG) { /* Fix up EC field */ store_16_be(gss_trailerlen, outbuf + 4); /* Fix up RRC field */ store_16_be(rrc, outbuf + 6); } } else if (toktype == KG_TOK_MIC_MSG) { tok_id = KG2_TOK_MIC_MSG; trailer = NULL; goto wrap_with_checksum; } else if (toktype == KG_TOK_DEL_CTX) { tok_id = KG2_TOK_DEL_CTX; goto wrap_with_checksum; } else { abort(); } code = 0; if (conf_state != NULL) *conf_state = conf_req_flag; cleanup: if (code != 0) kg_release_iov(iov, iov_count); return code; }
OM_uint32 gss_krb5int_unseal_v3_iov(krb5_context context, OM_uint32 *minor_status, krb5_gss_ctx_id_rec *ctx, gss_iov_buffer_desc *iov, int iov_count, int *conf_state, gss_qop_t *qop_state, int toktype) { OM_uint32 code; gss_iov_buffer_t header; gss_iov_buffer_t padding; gss_iov_buffer_t trailer; unsigned char acceptor_flag; unsigned char *ptr = NULL; int key_usage; size_t rrc, ec; size_t data_length, assoc_data_length; krb5_key key; gssint_uint64 seqnum; krb5_boolean valid; krb5_cksumtype cksumtype; int conf_flag = 0; if (ctx->big_endian != 0) return GSS_S_DEFECTIVE_TOKEN; if (qop_state != NULL) *qop_state = GSS_C_QOP_DEFAULT; header = kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_HEADER); assert(header != NULL); padding = kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_PADDING); if (padding != NULL && padding->buffer.length != 0) return GSS_S_DEFECTIVE_TOKEN; trailer = kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_TRAILER); acceptor_flag = ctx->initiate ? FLAG_SENDER_IS_ACCEPTOR : 0; key_usage = (toktype == KG_TOK_WRAP_MSG ? (!ctx->initiate ? KG_USAGE_INITIATOR_SEAL : KG_USAGE_ACCEPTOR_SEAL) : (!ctx->initiate ? KG_USAGE_INITIATOR_SIGN : KG_USAGE_ACCEPTOR_SIGN)); kg_iov_msglen(iov, iov_count, &data_length, &assoc_data_length); ptr = (unsigned char *)header->buffer.value; if (header->buffer.length < 16) { *minor_status = 0; return GSS_S_DEFECTIVE_TOKEN; } if ((ptr[2] & FLAG_SENDER_IS_ACCEPTOR) != acceptor_flag) { *minor_status = (OM_uint32)G_BAD_DIRECTION; return GSS_S_BAD_SIG; } if (ctx->have_acceptor_subkey && (ptr[2] & FLAG_ACCEPTOR_SUBKEY)) { key = ctx->acceptor_subkey; cksumtype = ctx->acceptor_subkey_cksumtype; } else { key = ctx->subkey; cksumtype = ctx->cksumtype; } assert(key != NULL); if (toktype == KG_TOK_WRAP_MSG) { unsigned int k5_trailerlen; if (load_16_be(ptr) != KG2_TOK_WRAP_MSG) goto defective; conf_flag = ((ptr[2] & FLAG_WRAP_CONFIDENTIAL) != 0); if (ptr[3] != 0xFF) goto defective; ec = load_16_be(ptr + 4); rrc = load_16_be(ptr + 6); seqnum = load_64_be(ptr + 8); code = krb5_c_crypto_length(context, key->keyblock.enctype, conf_flag ? KRB5_CRYPTO_TYPE_TRAILER : KRB5_CRYPTO_TYPE_CHECKSUM, &k5_trailerlen); if (code != 0) { *minor_status = code; return GSS_S_FAILURE; } /* Deal with RRC */ if (trailer == NULL) { size_t desired_rrc = k5_trailerlen; if (conf_flag) { desired_rrc += 16; /* E(Header) */ if ((ctx->gss_flags & GSS_C_DCE_STYLE) == 0) desired_rrc += ec; } /* According to MS, we only need to deal with a fixed RRC for DCE */ if (rrc != desired_rrc) goto defective; } else if (rrc != 0) { /* Should have been rotated by kg_unseal_stream_iov() */ goto defective; } if (conf_flag) { unsigned char *althdr; /* Decrypt */ code = kg_decrypt_iov(context, ctx->proto, ((ctx->gss_flags & GSS_C_DCE_STYLE) != 0), ec, rrc, key, key_usage, 0, iov, iov_count); if (code != 0) { *minor_status = code; return GSS_S_BAD_SIG; } /* Validate header integrity */ if (trailer == NULL) althdr = (unsigned char *)header->buffer.value + 16 + ec; else althdr = (unsigned char *)trailer->buffer.value + ec; if (load_16_be(althdr) != KG2_TOK_WRAP_MSG || althdr[2] != ptr[2] || althdr[3] != ptr[3] || memcmp(althdr + 8, ptr + 8, 8) != 0) { *minor_status = 0; return GSS_S_BAD_SIG; } } else { /* Verify checksum: note EC is checksum size here, not padding */ if (ec != k5_trailerlen) goto defective; /* Zero EC, RRC before computing checksum */ store_16_be(0, ptr + 4); store_16_be(0, ptr + 6); code = kg_verify_checksum_iov_v3(context, cksumtype, rrc, key, key_usage, iov, iov_count, &valid); if (code != 0 || valid == FALSE) { *minor_status = code; return GSS_S_BAD_SIG; } } code = g_order_check(&ctx->seqstate, seqnum); } else if (toktype == KG_TOK_MIC_MSG) { if (load_16_be(ptr) != KG2_TOK_MIC_MSG) goto defective; verify_mic_1: if (ptr[3] != 0xFF) goto defective; seqnum = load_64_be(ptr + 8); code = kg_verify_checksum_iov_v3(context, cksumtype, 0, key, key_usage, iov, iov_count, &valid); if (code != 0 || valid == FALSE) { *minor_status = code; return GSS_S_BAD_SIG; } code = g_order_check(&ctx->seqstate, seqnum); } else if (toktype == KG_TOK_DEL_CTX) { if (load_16_be(ptr) != KG2_TOK_DEL_CTX) goto defective; goto verify_mic_1; } else { goto defective; } *minor_status = 0; if (conf_state != NULL) *conf_state = conf_flag; return code; defective: *minor_status = 0; return GSS_S_DEFECTIVE_TOKEN; }