OM_uint32 gssEapEncodeInnerTokens(OM_uint32 *minor, struct gss_eap_token_buffer_set *tokens, gss_buffer_t buffer) { OM_uint32 major, tmpMinor; size_t required = 0, i; unsigned char *p; buffer->value = NULL; buffer->length = 0; for (i = 0; i < tokens->buffers.count; i++) { required += 8 + tokens->buffers.elements[i].length; } /* * We must always return a non-NULL token otherwise the calling state * machine assumes we are finished. Hence care in case malloc(0) does * return NULL. */ buffer->value = GSSEAP_MALLOC(required ? required : 1); if (buffer->value == NULL) { major = GSS_S_FAILURE; *minor = ENOMEM; goto cleanup; } buffer->length = required; p = (unsigned char *)buffer->value; for (i = 0; i < tokens->buffers.count; i++) { gss_buffer_t tokenBuffer = &tokens->buffers.elements[i]; GSSEAP_ASSERT((tokens->types[i] & ITOK_FLAG_VERIFIED) == 0); /* private flag */ /* * Extensions are encoded as type-length-value, where the upper * bit of the type indicates criticality. */ store_uint32_be(tokens->types[i], &p[0]); store_uint32_be(tokenBuffer->length, &p[4]); memcpy(&p[8], tokenBuffer->value, tokenBuffer->length); p += 8 + tokenBuffer->length; } GSSEAP_ASSERT(p == (unsigned char *)buffer->value + required); GSSEAP_ASSERT(buffer->value != NULL); major = GSS_S_COMPLETE; *minor = 0; cleanup: if (GSS_ERROR(major)) { gss_release_buffer(&tmpMinor, buffer); } return major; }
static OM_uint32 gssEapExportPartialContext(OM_uint32 *minor, gss_ctx_id_t ctx, gss_buffer_t token) { OM_uint32 major, tmpMinor; size_t length, serverLen = 0; unsigned char *p; char serverBuf[MAXHOSTNAMELEN]; if (ctx->acceptorCtx.radConn != NULL) { if (rs_conn_get_current_peer(ctx->acceptorCtx.radConn, serverBuf, sizeof(serverBuf)) != 0) { #if 0 return gssEapRadiusMapError(minor, rs_err_conn_pop(ctx->acceptorCtx.radConn)); #else serverBuf[0] = '\0'; /* not implemented yet */ #endif } serverLen = strlen(serverBuf); } length = 4 + serverLen + 4 + ctx->acceptorCtx.state.length; token->value = GSSEAP_MALLOC(length); if (token->value == NULL) { major = GSS_S_FAILURE; *minor = ENOMEM; goto cleanup; } token->length = length; p = (unsigned char *)token->value; store_uint32_be(serverLen, p); p += 4; if (serverLen != 0) { memcpy(p, serverBuf, serverLen); p += serverLen; } store_uint32_be(ctx->acceptorCtx.state.length, p); p += 4; if (ctx->acceptorCtx.state.length != 0) { memcpy(p, ctx->acceptorCtx.state.value, ctx->acceptorCtx.state.length); p += ctx->acceptorCtx.state.length; } GSSEAP_ASSERT(p == (unsigned char *)token->value + token->length); major = GSS_S_COMPLETE; *minor = 0; cleanup: if (GSS_ERROR(major)) gss_release_buffer(&tmpMinor, token); return major; }
OM_uint32 gssEapExportSecContext(OM_uint32 *minor, gss_ctx_id_t ctx, gss_buffer_t token) { OM_uint32 major, tmpMinor; size_t length; gss_buffer_desc initiatorName = GSS_C_EMPTY_BUFFER; gss_buffer_desc acceptorName = GSS_C_EMPTY_BUFFER; gss_buffer_desc partialCtx = GSS_C_EMPTY_BUFFER; gss_buffer_desc key; unsigned char *p; if ((CTX_IS_INITIATOR(ctx) && !CTX_IS_ESTABLISHED(ctx)) || ctx->mechanismUsed == GSS_C_NO_OID) { *minor = GSSEAP_CONTEXT_INCOMPLETE; return GSS_S_NO_CONTEXT; } key.length = KRB_KEY_LENGTH(&ctx->rfc3961Key); key.value = KRB_KEY_DATA(&ctx->rfc3961Key); if (ctx->initiatorName != GSS_C_NO_NAME) { major = gssEapExportNameInternal(minor, ctx->initiatorName, &initiatorName, EXPORT_NAME_FLAG_COMPOSITE); if (GSS_ERROR(major)) goto cleanup; } if (ctx->acceptorName != GSS_C_NO_NAME) { major = gssEapExportNameInternal(minor, ctx->acceptorName, &acceptorName, EXPORT_NAME_FLAG_COMPOSITE); if (GSS_ERROR(major)) goto cleanup; } #ifdef GSSEAP_ENABLE_ACCEPTOR /* * The partial context is only transmitted for unestablished acceptor * contexts. */ if (!CTX_IS_INITIATOR(ctx) && !CTX_IS_ESTABLISHED(ctx) && (ctx->flags & CTX_FLAG_KRB_REAUTH) == 0) { major = gssEapExportPartialContext(minor, ctx, &partialCtx); if (GSS_ERROR(major)) goto cleanup; } #endif length = 16; /* version, state, flags, */ length += 4 + ctx->mechanismUsed->length; /* mechanismUsed */ length += 12 + key.length; /* rfc3961Key.value */ length += 4 + initiatorName.length; /* initiatorName.value */ length += 4 + acceptorName.length; /* acceptorName.value */ length += 24 + sequenceSize(ctx->seqState); /* seqState */ if (partialCtx.value != NULL) length += 4 + partialCtx.length; /* partialCtx.value */ token->value = GSSEAP_MALLOC(length); if (token->value == NULL) { major = GSS_S_FAILURE; *minor = ENOMEM; goto cleanup; } token->length = length; p = (unsigned char *)token->value; store_uint32_be(EAP_EXPORT_CONTEXT_V1, &p[0]); /* version */ store_uint32_be(GSSEAP_SM_STATE(ctx), &p[4]); store_uint32_be(ctx->flags, &p[8]); store_uint32_be(ctx->gssFlags, &p[12]); p = store_oid(ctx->mechanismUsed, &p[16]); store_uint32_be(ctx->checksumType, &p[0]); store_uint32_be(ctx->encryptionType, &p[4]); p = store_buffer(&key, &p[8], FALSE); p = store_buffer(&initiatorName, p, FALSE); p = store_buffer(&acceptorName, p, FALSE); store_uint64_be(ctx->expiryTime, &p[0]); store_uint64_be(ctx->sendSeq, &p[8]); store_uint64_be(ctx->recvSeq, &p[16]); p += 24; major = sequenceExternalize(minor, ctx->seqState, &p, &length); if (GSS_ERROR(major)) goto cleanup; if (partialCtx.value != NULL) p = store_buffer(&partialCtx, p, FALSE); GSSEAP_ASSERT(p == (unsigned char *)token->value + token->length); major = GSS_S_COMPLETE; *minor = 0; cleanup: if (GSS_ERROR(major)) gss_release_buffer(&tmpMinor, token); gss_release_buffer(&tmpMinor, &initiatorName); gss_release_buffer(&tmpMinor, &acceptorName); gss_release_buffer(&tmpMinor, &partialCtx); return major; }
static OM_uint32 makeErrorToken(OM_uint32 *minor, OM_uint32 majorStatus, OM_uint32 minorStatus, struct gss_eap_token_buffer_set *token) { OM_uint32 major, tmpMinor; unsigned char errorData[8]; gss_buffer_desc errorBuffer; GSSEAP_ASSERT(GSS_ERROR(majorStatus)); /* * Only return error codes that the initiator could have caused, * to avoid information leakage. */ #if MECH_EAP if (IS_RADIUS_ERROR(minorStatus)) { /* Squash RADIUS error codes */ minorStatus = GSSEAP_RADIUS_PROT_FAILURE; } else if (!IS_WIRE_ERROR(minorStatus)) { #else if (!IS_WIRE_ERROR(minorStatus)) { #endif /* Don't return non-wire error codes */ return GSS_S_COMPLETE; } minorStatus -= ERROR_TABLE_BASE_eapg; store_uint32_be(majorStatus, &errorData[0]); store_uint32_be(minorStatus, &errorData[4]); major = gssEapAllocInnerTokens(&tmpMinor, 1, token); if (GSS_ERROR(major)) { *minor = tmpMinor; return major; } errorBuffer.length = sizeof(errorData); errorBuffer.value = errorData; major = duplicateBuffer(&tmpMinor, &errorBuffer, &token->buffers.elements[0]); if (GSS_ERROR(major)) { gssEapReleaseInnerTokens(&tmpMinor, token, 1); *minor = tmpMinor; return major; } token->buffers.count = 1; token->types[0] = ITOK_TYPE_CONTEXT_ERR | ITOK_FLAG_CRITICAL; *minor = 0; return GSS_S_COMPLETE; } OM_uint32 gssEapSmStep(OM_uint32 *minor, gss_cred_id_t cred, gss_ctx_id_t ctx, gss_name_t target, gss_OID mech, OM_uint32 reqFlags, OM_uint32 timeReq, gss_channel_bindings_t chanBindings, gss_buffer_t inputToken, gss_buffer_t outputToken, struct gss_eap_sm *sm, /* ordered by state */ size_t smCount) { OM_uint32 major, tmpMajor, tmpMinor; struct gss_eap_token_buffer_set inputTokens = { { 0, GSS_C_NO_BUFFER }, NULL }; struct gss_eap_token_buffer_set outputTokens = { { 0, GSS_C_NO_BUFFER }, NULL }; gss_buffer_desc unwrappedInputToken = GSS_C_EMPTY_BUFFER; gss_buffer_desc unwrappedOutputToken = GSS_C_EMPTY_BUFFER; unsigned int smFlags = 0; size_t i, j; int initialContextToken = 0; enum gss_eap_token_type tokType; GSSEAP_ASSERT(smCount > 0); *minor = 0; outputToken->length = 0; outputToken->value = NULL; if (inputToken != GSS_C_NO_BUFFER && inputToken->length != 0) { major = gssEapVerifyToken(minor, ctx, inputToken, &tokType, &unwrappedInputToken); if (GSS_ERROR(major)) goto cleanup; if (tokType != (CTX_IS_INITIATOR(ctx) ? TOK_TYPE_ACCEPTOR_CONTEXT : TOK_TYPE_INITIATOR_CONTEXT)) { major = GSS_S_DEFECTIVE_TOKEN; *minor = GSSEAP_WRONG_TOK_ID; goto cleanup; } } else if (!CTX_IS_INITIATOR(ctx) || ctx->state != GSSEAP_STATE_INITIAL) { major = GSS_S_DEFECTIVE_TOKEN; *minor = GSSEAP_WRONG_SIZE; goto cleanup; } else { initialContextToken = 1; } if (CTX_IS_ESTABLISHED(ctx)) { major = GSS_S_BAD_STATUS; *minor = GSSEAP_CONTEXT_ESTABLISHED; goto cleanup; } GSSEAP_ASSERT(ctx->state < GSSEAP_STATE_ESTABLISHED); major = gssEapDecodeInnerTokens(minor, &unwrappedInputToken, &inputTokens); if (GSS_ERROR(major)) goto cleanup; major = gssEapAllocInnerTokens(minor, smCount, &outputTokens); if (GSS_ERROR(major)) goto cleanup; ctx->inputTokens = &inputTokens; ctx->outputTokens = &outputTokens; /* Process all the tokens that are valid for the current state. */ for (i = 0; i < smCount; i++) { struct gss_eap_sm *smp = &sm[i]; int processToken = 0; gss_buffer_t innerInputToken = GSS_C_NO_BUFFER; OM_uint32 *inputTokenType = NULL; gss_buffer_desc innerOutputToken = GSS_C_EMPTY_BUFFER; if ((smp->validStates & ctx->state) == 0) continue; /* * We special case the first call to gss_init_sec_context so that * all token providers have the opportunity to generate an initial * context token. Providers where inputTokenType is ITOK_TYPE_NONE * are always called and generally act on state transition boundaries, * for example to advance the state after a series of optional tokens * (as is the case with the extension token exchange) or to generate * a new token after the state was advanced by a provider which did * not emit a token. */ if (smp->inputTokenType == ITOK_TYPE_NONE || initialContextToken) { processToken = 1; } else if ((smFlags & SM_FLAG_TRANSITED) == 0) { /* Don't regurgitate a token which belonds to a previous state. */ for (j = 0; j < inputTokens.buffers.count; j++) { if ((inputTokens.types[j] & ITOK_TYPE_MASK) == smp->inputTokenType) { if (processToken) { /* Check for duplicate inner tokens */ major = GSS_S_DEFECTIVE_TOKEN; *minor = GSSEAP_DUPLICATE_ITOK; break; } processToken = 1; innerInputToken = &inputTokens.buffers.elements[j]; inputTokenType = &inputTokens.types[j]; } } if (GSS_ERROR(major)) break; } if (processToken) { enum gss_eap_state oldState = ctx->state; smFlags = 0; if (inputTokenType != NULL && (*inputTokenType & ITOK_FLAG_CRITICAL)) smFlags |= SM_FLAG_INPUT_TOKEN_CRITICAL; major = smp->processToken(minor, cred, ctx, target, mech, reqFlags, timeReq, chanBindings, innerInputToken, &innerOutputToken, &smFlags); if (GSS_ERROR(major)) break; if (inputTokenType != NULL) *inputTokenType |= ITOK_FLAG_VERIFIED; if (ctx->state < oldState) i = 0; /* restart */ else if (ctx->state != oldState) smFlags |= SM_FLAG_TRANSITED; if (innerOutputToken.value != NULL) { outputTokens.buffers.elements[outputTokens.buffers.count] = innerOutputToken; GSSEAP_ASSERT(smp->outputTokenType != ITOK_TYPE_NONE); outputTokens.types[outputTokens.buffers.count] = smp->outputTokenType; if (smFlags & SM_FLAG_OUTPUT_TOKEN_CRITICAL) outputTokens.types[outputTokens.buffers.count] |= ITOK_FLAG_CRITICAL; outputTokens.buffers.count++; } /* * Break out if we made a state transition and have some tokens to send. */ if ((smFlags & SM_FLAG_TRANSITED) && ((smFlags & SM_FLAG_FORCE_SEND_TOKEN) || outputTokens.buffers.count != 0)) { SM_ASSERT_VALID(ctx, major); break; } } else if ((smp->itokFlags & SM_ITOK_FLAG_REQUIRED) && smp->inputTokenType != ITOK_TYPE_NONE) { /* Check for required inner tokens */ major = GSS_S_DEFECTIVE_TOKEN; *minor = GSSEAP_MISSING_REQUIRED_ITOK; break; } } GSSEAP_ASSERT(outputTokens.buffers.count <= smCount); /* Check we understood all critical tokens sent by peer */ if (!GSS_ERROR(major)) { for (j = 0; j < inputTokens.buffers.count; j++) { if ((inputTokens.types[j] & ITOK_FLAG_CRITICAL) && (inputTokens.types[j] & ITOK_FLAG_VERIFIED) == 0) { major = GSS_S_UNAVAILABLE; *minor = GSSEAP_CRIT_ITOK_UNAVAILABLE; goto cleanup; } } } /* Optionaly emit an error token if we are the acceptor */ if (GSS_ERROR(major)) { if (CTX_IS_INITIATOR(ctx)) goto cleanup; /* return error directly to caller */ /* replace any emitted tokens with error token */ gssEapReleaseInnerTokens(&tmpMinor, &outputTokens, 1); tmpMajor = makeErrorToken(&tmpMinor, major, *minor, &outputTokens); if (GSS_ERROR(tmpMajor)) { major = tmpMajor; *minor = tmpMinor; goto cleanup; } } /* Format output token from inner tokens */ if (outputTokens.buffers.count != 0 || /* inner tokens to send */ !CTX_IS_INITIATOR(ctx) || /* any leg acceptor */ !CTX_IS_ESTABLISHED(ctx)) { /* non-last leg initiator */ tmpMajor = gssEapEncodeInnerTokens(&tmpMinor, &outputTokens, &unwrappedOutputToken); if (tmpMajor == GSS_S_COMPLETE) { if (CTX_IS_INITIATOR(ctx)) tokType = TOK_TYPE_INITIATOR_CONTEXT; else tokType = TOK_TYPE_ACCEPTOR_CONTEXT; tmpMajor = gssEapMakeToken(&tmpMinor, ctx, &unwrappedOutputToken, tokType, outputToken); if (GSS_ERROR(tmpMajor)) { major = tmpMajor; *minor = tmpMinor; goto cleanup; } } } /* If the context is established, empty tokens only to be emitted by initiator */ GSSEAP_ASSERT(!CTX_IS_ESTABLISHED(ctx) || ((outputToken->length == 0) == CTX_IS_INITIATOR(ctx))); SM_ASSERT_VALID(ctx, major); cleanup: gssEapReleaseInnerTokens(&tmpMinor, &inputTokens, 0); gssEapReleaseInnerTokens(&tmpMinor, &inputTokens, 1); gss_release_buffer(&tmpMinor, &unwrappedOutputToken); ctx->inputTokens = NULL; ctx->outputTokens = NULL; return major; }
OM_uint32 gssEapPseudoRandom(OM_uint32 *minor, gss_const_ctx_id_t ctx, int prf_key, const gss_buffer_t prf_in, gss_buffer_t prf_out) { krb5_error_code code; int i; OM_uint32 tmpMinor; size_t prflen; krb5_data t, ns; unsigned char *p; krb5_context krbContext; ssize_t desired_output_len = prf_out->length; #ifdef HAVE_HEIMDAL_VERSION krb5_crypto krbCrypto = NULL; #endif *minor = 0; GSSEAP_KRB_INIT(&krbContext); KRB_DATA_INIT(&t); KRB_DATA_INIT(&ns); if (prf_key != GSS_C_PRF_KEY_PARTIAL && prf_key != GSS_C_PRF_KEY_FULL) { code = GSSEAP_BAD_PRF_KEY; goto cleanup; } #ifdef HAVE_HEIMDAL_VERSION code = krb5_crypto_prf_length(krbContext, ctx->encryptionType, &prflen); #else code = krb5_c_prf_length(krbContext, ctx->encryptionType, &prflen); #endif if (code != 0) goto cleanup; ns.length = 4 + prf_in->length; ns.data = GSSEAP_MALLOC(ns.length); if (ns.data == NULL) { code = ENOMEM; goto cleanup; } #ifdef HAVE_HEIMDAL_VERSION code = krb5_crypto_init(krbContext, &ctx->rfc3961Key, 0, &krbCrypto); if (code != 0) goto cleanup; #else t.length = prflen; t.data = GSSEAP_MALLOC(t.length); if (t.data == NULL) { code = ENOMEM; goto cleanup; } #endif memcpy((unsigned char *)ns.data + 4, prf_in->value, prf_in->length); i = 0; p = (unsigned char *)prf_out->value; while (desired_output_len > 0) { store_uint32_be(i, ns.data); #ifdef HAVE_HEIMDAL_VERSION code = krb5_crypto_prf(krbContext, krbCrypto, &ns, &t); #else code = krb5_c_prf(krbContext, &ctx->rfc3961Key, &ns, &t); #endif if (code != 0) goto cleanup; memcpy(p, t.data, MIN(t.length, desired_output_len)); p += t.length; desired_output_len -= t.length; i++; } cleanup: if (code != 0) gss_release_buffer(&tmpMinor, prf_out); if (ns.data != NULL) { memset(ns.data, 0, ns.length); GSSEAP_FREE(ns.data); } #ifdef HAVE_HEIMDAL_VERSION krb5_crypto_destroy(krbContext, krbCrypto); krb5_data_free(&t); #else if (t.data != NULL) { memset(t.data, 0, t.length); GSSEAP_FREE(t.data); } #endif *minor = code; return (code == 0) ? GSS_S_COMPLETE : GSS_S_FAILURE; }