OM_uint32 gssEapEncodeInnerTokens(OM_uint32 *minor, struct gss_eap_token_buffer_set *tokens, gss_buffer_t buffer) { OM_uint32 major, tmpMinor; size_t required = 0, i; unsigned char *p; buffer->value = NULL; buffer->length = 0; for (i = 0; i < tokens->buffers.count; i++) { required += 8 + tokens->buffers.elements[i].length; } /* * We must always return a non-NULL token otherwise the calling state * machine assumes we are finished. Hence care in case malloc(0) does * return NULL. */ buffer->value = GSSEAP_MALLOC(required ? required : 1); if (buffer->value == NULL) { major = GSS_S_FAILURE; *minor = ENOMEM; goto cleanup; } buffer->length = required; p = (unsigned char *)buffer->value; for (i = 0; i < tokens->buffers.count; i++) { gss_buffer_t tokenBuffer = &tokens->buffers.elements[i]; GSSEAP_ASSERT((tokens->types[i] & ITOK_FLAG_VERIFIED) == 0); /* private flag */ /* * Extensions are encoded as type-length-value, where the upper * bit of the type indicates criticality. */ store_uint32_be(tokens->types[i], &p[0]); store_uint32_be(tokenBuffer->length, &p[4]); memcpy(&p[8], tokenBuffer->value, tokenBuffer->length); p += 8 + tokenBuffer->length; } GSSEAP_ASSERT(p == (unsigned char *)buffer->value + required); GSSEAP_ASSERT(buffer->value != NULL); major = GSS_S_COMPLETE; *minor = 0; cleanup: if (GSS_ERROR(major)) { gss_release_buffer(&tmpMinor, buffer); } return major; }
void gssEapSmTransition(gss_ctx_id_t ctx, enum gss_eap_state state) { GSSEAP_ASSERT(state >= GSSEAP_STATE_INITIAL); GSSEAP_ASSERT(state <= GSSEAP_STATE_ESTABLISHED); fprintf(stderr, "GSS-EAP: state transition %s->%s\n", gssEapStateToString(GSSEAP_SM_STATE(ctx)), gssEapStateToString(state)); ctx->state = state; }
void gssEapIovMessageLength(gss_iov_buffer_desc *iov, int iov_count, size_t *data_length_p, size_t *assoc_data_length_p) { int i; size_t data_length = 0, assoc_data_length = 0; GSSEAP_ASSERT(iov != GSS_C_NO_IOV_BUFFER); *data_length_p = *assoc_data_length_p = 0; for (i = 0; i < iov_count; i++) { OM_uint32 type = GSS_IOV_BUFFER_TYPE(iov[i].type); if (type == GSS_IOV_BUFFER_TYPE_SIGN_ONLY) assoc_data_length += iov[i].buffer.length; if (type == GSS_IOV_BUFFER_TYPE_DATA || type == GSS_IOV_BUFFER_TYPE_SIGN_ONLY) data_length += iov[i].buffer.length; } *data_length_p = data_length; *assoc_data_length_p = assoc_data_length; }
static OM_uint32 gssEapExportPartialContext(OM_uint32 *minor, gss_ctx_id_t ctx, gss_buffer_t token) { OM_uint32 major, tmpMinor; size_t length, serverLen = 0; unsigned char *p; char serverBuf[MAXHOSTNAMELEN]; if (ctx->acceptorCtx.radConn != NULL) { if (rs_conn_get_current_peer(ctx->acceptorCtx.radConn, serverBuf, sizeof(serverBuf)) != 0) { #if 0 return gssEapRadiusMapError(minor, rs_err_conn_pop(ctx->acceptorCtx.radConn)); #else serverBuf[0] = '\0'; /* not implemented yet */ #endif } serverLen = strlen(serverBuf); } length = 4 + serverLen + 4 + ctx->acceptorCtx.state.length; token->value = GSSEAP_MALLOC(length); if (token->value == NULL) { major = GSS_S_FAILURE; *minor = ENOMEM; goto cleanup; } token->length = length; p = (unsigned char *)token->value; store_uint32_be(serverLen, p); p += 4; if (serverLen != 0) { memcpy(p, serverBuf, serverLen); p += serverLen; } store_uint32_be(ctx->acceptorCtx.state.length, p); p += 4; if (ctx->acceptorCtx.state.length != 0) { memcpy(p, ctx->acceptorCtx.state.value, ctx->acceptorCtx.state.length); p += ctx->acceptorCtx.state.length; } GSSEAP_ASSERT(p == (unsigned char *)token->value + token->length); major = GSS_S_COMPLETE; *minor = 0; cleanup: if (GSS_ERROR(major)) gss_release_buffer(&tmpMinor, token); return major; }
int gssEapAllocIov(gss_iov_buffer_t iov, size_t size) { GSSEAP_ASSERT(iov != GSS_C_NO_IOV_BUFFER); GSSEAP_ASSERT(iov->type & GSS_IOV_BUFFER_FLAG_ALLOCATE); iov->buffer.length = size; iov->buffer.value = GSSEAP_MALLOC(size); if (iov->buffer.value == NULL) { iov->buffer.length = 0; return ENOMEM; } iov->type |= GSS_IOV_BUFFER_FLAG_ALLOCATED; return 0; }
static void gssEapInitiatorInitAssert(void) { OM_uint32 major, minor; major = gssEapInitiatorInit(&minor); GSSEAP_ASSERT(!GSS_ERROR(major)); }
void gssEapReleaseIov(gss_iov_buffer_desc *iov, int iov_count) { int i; OM_uint32 min_stat; GSSEAP_ASSERT(iov != GSS_C_NO_IOV_BUFFER); for (i = 0; i < iov_count; i++) { if (iov[i].type & GSS_IOV_BUFFER_FLAG_ALLOCATED) { gss_release_buffer(&min_stat, &iov[i].buffer); iov[i].type &= ~(GSS_IOV_BUFFER_FLAG_ALLOCATED); } } }
/* * Return TRUE if cred available for mechanism. Caller need no acquire * lock because mechanisms list is immutable. */ int gssEapCredAvailable(gss_cred_id_t cred, gss_OID mech) { OM_uint32 minor; int present = 0; GSSEAP_ASSERT(mech != GSS_C_NO_OID); if (cred == GSS_C_NO_CREDENTIAL || cred->mechanisms == GSS_C_NO_OID_SET) return 1; gss_test_oid_set_member(&minor, mech, cred->mechanisms, &present); return present; }
static void peerSetInt(void *data, enum eapol_int_var variable, unsigned int value) { gss_ctx_id_t ctx = data; if (ctx == GSS_C_NO_CONTEXT) return; GSSEAP_ASSERT(CTX_IS_INITIATOR(ctx)); switch (variable) { case EAPOL_idleWhile: ctx->initiatorCtx.idleWhile = value; break; } }
int gssEapIsIntegrityOnly(gss_iov_buffer_desc *iov, int iov_count) { int i; krb5_boolean has_conf_data = FALSE; GSSEAP_ASSERT(iov != GSS_C_NO_IOV_BUFFER); for (i = 0; i < iov_count; i++) { if (GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_DATA) { has_conf_data = TRUE; break; } } return (has_conf_data == FALSE); }
static unsigned int peerGetInt(void *data, enum eapol_int_var variable) { gss_ctx_id_t ctx = data; if (ctx == GSS_C_NO_CONTEXT) return FALSE; GSSEAP_ASSERT(CTX_IS_INITIATOR(ctx)); switch (variable) { case EAPOL_idleWhile: return ctx->initiatorCtx.idleWhile; break; } return 0; }
bool gss_eap_radius_attr_provider::initWithGssContext(const gss_eap_attr_ctx *manager, const gss_cred_id_t gssCred, const gss_ctx_id_t gssCtx) { if (!gss_eap_attr_provider::initWithGssContext(manager, gssCred, gssCtx)) return false; if (gssCtx != GSS_C_NO_CONTEXT) { if (gssCtx->acceptorCtx.vps != NULL) { m_vps = copyAvps(gssCtx->acceptorCtx.vps); if (m_vps == NULL) return false; /* We assume libradsec validated this for us */ GSSEAP_ASSERT(rs_avp_find(m_vps, PW_MESSAGE_AUTHENTICATOR, 0) != NULL); m_authenticated = true; } } return true; }
OM_uint32 gssEapRadiusMapError(OM_uint32 *minor, struct rs_error *err) { int code; GSSEAP_ASSERT(err != NULL); code = rs_err_code(err, 0); if (code == RSE_OK) { *minor = 0; return GSS_S_COMPLETE; } *minor = RS_MAP_ERROR(code); gssEapSaveStatusInfo(*minor, "%s", rs_err_msg(err)); rs_err_free(err); return GSS_S_FAILURE; }
static JSONObject avpToJson(rs_const_avp *vp) { JSONObject obj; gss_eap_attrid attrid; GSSEAP_ASSERT(rs_avp_length(vp) <= RS_MAX_STRING_LEN); switch (rs_avp_typeof(vp)) { case RS_TYPE_INTEGER: obj.set("value", rs_avp_integer_value(vp)); break; case RS_TYPE_DATE: obj.set("value", rs_avp_date_value(vp)); break; case RS_TYPE_STRING: obj.set("value", rs_avp_string_value(vp)); break; default: { char *b64; if (base64Encode(rs_avp_octets_value_const_ptr(vp), rs_avp_length(vp), &b64) < 0) throw std::bad_alloc(); obj.set("value", b64); GSSEAP_FREE(b64); break; } } attrid = avpToAttrId(vp); obj.set("type", attrid.second); if (attrid.first != 0) obj.set("vendor", attrid.first); return obj; }
static bool isInternalAttributeP(const gss_eap_attrid &attrid) { bool bInternalAttribute = false; /* should have been filtered */ GSSEAP_ASSERT(!isSecretAttributeP(attrid)); switch (attrid.first) { case VENDORPEC_UKERNA: switch (attrid.second) { case PW_SAML_AAA_ASSERTION: bInternalAttribute = true; break; default: break; } break; case 0: switch (attrid.second) { case PW_GSS_ACCEPTOR_SERVICE_NAME: case PW_GSS_ACCEPTOR_HOST_NAME: case PW_GSS_ACCEPTOR_SERVICE_SPECIFICS: case PW_GSS_ACCEPTOR_REALM_NAME: bInternalAttribute = true; break; default: break; } break; default: break; } return bInternalAttribute; }
OM_uint32 gssEapExportSecContext(OM_uint32 *minor, gss_ctx_id_t ctx, gss_buffer_t token) { OM_uint32 major, tmpMinor; size_t length; gss_buffer_desc initiatorName = GSS_C_EMPTY_BUFFER; gss_buffer_desc acceptorName = GSS_C_EMPTY_BUFFER; gss_buffer_desc partialCtx = GSS_C_EMPTY_BUFFER; gss_buffer_desc key; unsigned char *p; if ((CTX_IS_INITIATOR(ctx) && !CTX_IS_ESTABLISHED(ctx)) || ctx->mechanismUsed == GSS_C_NO_OID) { *minor = GSSEAP_CONTEXT_INCOMPLETE; return GSS_S_NO_CONTEXT; } key.length = KRB_KEY_LENGTH(&ctx->rfc3961Key); key.value = KRB_KEY_DATA(&ctx->rfc3961Key); if (ctx->initiatorName != GSS_C_NO_NAME) { major = gssEapExportNameInternal(minor, ctx->initiatorName, &initiatorName, EXPORT_NAME_FLAG_COMPOSITE); if (GSS_ERROR(major)) goto cleanup; } if (ctx->acceptorName != GSS_C_NO_NAME) { major = gssEapExportNameInternal(minor, ctx->acceptorName, &acceptorName, EXPORT_NAME_FLAG_COMPOSITE); if (GSS_ERROR(major)) goto cleanup; } #ifdef GSSEAP_ENABLE_ACCEPTOR /* * The partial context is only transmitted for unestablished acceptor * contexts. */ if (!CTX_IS_INITIATOR(ctx) && !CTX_IS_ESTABLISHED(ctx) && (ctx->flags & CTX_FLAG_KRB_REAUTH) == 0) { major = gssEapExportPartialContext(minor, ctx, &partialCtx); if (GSS_ERROR(major)) goto cleanup; } #endif length = 16; /* version, state, flags, */ length += 4 + ctx->mechanismUsed->length; /* mechanismUsed */ length += 12 + key.length; /* rfc3961Key.value */ length += 4 + initiatorName.length; /* initiatorName.value */ length += 4 + acceptorName.length; /* acceptorName.value */ length += 24 + sequenceSize(ctx->seqState); /* seqState */ if (partialCtx.value != NULL) length += 4 + partialCtx.length; /* partialCtx.value */ token->value = GSSEAP_MALLOC(length); if (token->value == NULL) { major = GSS_S_FAILURE; *minor = ENOMEM; goto cleanup; } token->length = length; p = (unsigned char *)token->value; store_uint32_be(EAP_EXPORT_CONTEXT_V1, &p[0]); /* version */ store_uint32_be(GSSEAP_SM_STATE(ctx), &p[4]); store_uint32_be(ctx->flags, &p[8]); store_uint32_be(ctx->gssFlags, &p[12]); p = store_oid(ctx->mechanismUsed, &p[16]); store_uint32_be(ctx->checksumType, &p[0]); store_uint32_be(ctx->encryptionType, &p[4]); p = store_buffer(&key, &p[8], FALSE); p = store_buffer(&initiatorName, p, FALSE); p = store_buffer(&acceptorName, p, FALSE); store_uint64_be(ctx->expiryTime, &p[0]); store_uint64_be(ctx->sendSeq, &p[8]); store_uint64_be(ctx->recvSeq, &p[16]); p += 24; major = sequenceExternalize(minor, ctx->seqState, &p, &length); if (GSS_ERROR(major)) goto cleanup; if (partialCtx.value != NULL) p = store_buffer(&partialCtx, p, FALSE); GSSEAP_ASSERT(p == (unsigned char *)token->value + token->length); major = GSS_S_COMPLETE; *minor = 0; cleanup: if (GSS_ERROR(major)) gss_release_buffer(&tmpMinor, token); gss_release_buffer(&tmpMinor, &initiatorName); gss_release_buffer(&tmpMinor, &acceptorName); gss_release_buffer(&tmpMinor, &partialCtx); return major; }
static OM_uint32 gssEapImportPartialContext(OM_uint32 *minor, unsigned char **pBuf, size_t *pRemain, gss_ctx_id_t ctx) { OM_uint32 major; unsigned char *p = *pBuf; size_t remain = *pRemain; gss_buffer_desc buf; size_t ctxLength, serverLen; /* Length of partial RADIUS context */ CHECK_REMAIN(4); ctxLength = load_uint32_be(p); UPDATE_REMAIN(4); CHECK_REMAIN(ctxLength); remain = ctxLength; /* check against partial context length */ /* Selected RADIUS server */ CHECK_REMAIN(4); serverLen = load_uint32_be(p); UPDATE_REMAIN(4); if (serverLen != 0) { CHECK_REMAIN(serverLen); ctx->acceptorCtx.radServer = GSSEAP_MALLOC(serverLen + 1); if (ctx->acceptorCtx.radServer == NULL) { *minor = ENOMEM; return GSS_S_FAILURE; } memcpy(ctx->acceptorCtx.radServer, p, serverLen); ctx->acceptorCtx.radServer[serverLen] = '\0'; UPDATE_REMAIN(serverLen); } /* RADIUS state blob */ CHECK_REMAIN(4); buf.length = load_uint32_be(p); UPDATE_REMAIN(4); if (buf.length != 0) { CHECK_REMAIN(buf.length); buf.value = p; major = duplicateBuffer(minor, &buf, &ctx->acceptorCtx.state); if (GSS_ERROR(major)) return major; UPDATE_REMAIN(buf.length); } #ifdef GSSEAP_DEBUG GSSEAP_ASSERT(remain == 0); #endif *pBuf = p; *pRemain -= 4 + ctxLength; return GSS_S_COMPLETE; }
OM_uint32 gssEapImportContext(OM_uint32 *minor, gss_buffer_t token, gss_ctx_id_t ctx) { OM_uint32 major; unsigned char *p = (unsigned char *)token->value; size_t remain = token->length; if (remain < 16) { *minor = GSSEAP_TOK_TRUNC; return GSS_S_DEFECTIVE_TOKEN; } if (load_uint32_be(&p[0]) != EAP_EXPORT_CONTEXT_V1) { *minor = GSSEAP_BAD_CONTEXT_TOKEN; return GSS_S_DEFECTIVE_TOKEN; } ctx->state = load_uint32_be(&p[4]); ctx->flags = load_uint32_be(&p[8]); ctx->gssFlags = load_uint32_be(&p[12]); p += 16; remain -= 16; /* Validate state */ if (GSSEAP_SM_STATE(ctx) < GSSEAP_STATE_INITIAL || GSSEAP_SM_STATE(ctx) > GSSEAP_STATE_ESTABLISHED) return GSS_S_DEFECTIVE_TOKEN; /* Only acceptor can export partial context tokens */ if (CTX_IS_INITIATOR(ctx) && !CTX_IS_ESTABLISHED(ctx)) return GSS_S_DEFECTIVE_TOKEN; major = importMechanismOid(minor, &p, &remain, &ctx->mechanismUsed); if (GSS_ERROR(major)) return major; major = importKerberosKey(minor, &p, &remain, &ctx->checksumType, &ctx->encryptionType, &ctx->rfc3961Key); if (GSS_ERROR(major)) return major; /* Initiator name OID matches the context mechanism, so it's not encoded */ major = importName(minor, ctx->mechanismUsed, &p, &remain, &ctx->initiatorName); if (GSS_ERROR(major)) return major; major = importName(minor, GSS_C_NO_OID, &p, &remain, &ctx->acceptorName); if (GSS_ERROR(major)) return major; /* Check that, if context is established, names are valid */ if (CTX_IS_ESTABLISHED(ctx) && (CTX_IS_INITIATOR(ctx) ? ctx->acceptorName == GSS_C_NO_NAME : ctx->initiatorName == GSS_C_NO_NAME)) { return GSS_S_DEFECTIVE_TOKEN; } if (remain < 24 + sequenceSize(ctx->seqState)) { *minor = GSSEAP_TOK_TRUNC; return GSS_S_DEFECTIVE_TOKEN; } ctx->expiryTime = (time_t)load_uint64_be(&p[0]); ctx->sendSeq = load_uint64_be(&p[8]); ctx->recvSeq = load_uint64_be(&p[16]); p += 24; remain -= 24; major = sequenceInternalize(minor, &ctx->seqState, &p, &remain); if (GSS_ERROR(major)) return major; #ifdef GSSEAP_ENABLE_ACCEPTOR /* * The partial context should only be expected for unestablished * acceptor contexts. */ if (!CTX_IS_INITIATOR(ctx) && !CTX_IS_ESTABLISHED(ctx) && (ctx->flags & CTX_FLAG_KRB_REAUTH) == 0) { major = gssEapImportPartialContext(minor, &p, &remain, ctx); if (GSS_ERROR(major)) return major; } #ifdef GSSEAP_DEBUG GSSEAP_ASSERT(remain == 0); #endif #endif /* GSSEAP_ENABLE_ACCEPTOR */ major = GSS_S_COMPLETE; *minor = 0; return major; }
/* * DCE_STYLE indicates actual RRC is EC + RRC * EC is extra rotate count for DCE_STYLE, pad length otherwise * RRC is rotate count. */ static krb5_error_code mapIov(krb5_context context, int dce_style, size_t ec, size_t rrc, #ifdef HAVE_HEIMDAL_VERSION krb5_crypto crypto, #else krb5_keyblock *crypto, #endif gss_iov_buffer_desc *iov, int iov_count, krb5_crypto_iov **pkiov, size_t *pkiov_count) { gss_iov_buffer_t header; gss_iov_buffer_t trailer; int i = 0, j; size_t kiov_count; krb5_crypto_iov *kiov; size_t k5_headerlen = 0, k5_trailerlen = 0; size_t gss_headerlen, gss_trailerlen; krb5_error_code code; *pkiov = NULL; *pkiov_count = 0; header = gssEapLocateIov(iov, iov_count, GSS_IOV_BUFFER_TYPE_HEADER); GSSEAP_ASSERT(header != NULL); trailer = gssEapLocateIov(iov, iov_count, GSS_IOV_BUFFER_TYPE_TRAILER); GSSEAP_ASSERT(trailer == NULL || rrc == 0); code = krbCryptoLength(context, crypto, KRB5_CRYPTO_TYPE_HEADER, &k5_headerlen); if (code != 0) return code; code = krbCryptoLength(context, crypto, KRB5_CRYPTO_TYPE_TRAILER, &k5_trailerlen); if (code != 0) return code; /* Check header and trailer sizes */ gss_headerlen = 16 /* GSS-Header */ + k5_headerlen; /* Kerb-Header */ gss_trailerlen = ec + 16 /* E(GSS-Header) */ + k5_trailerlen; /* Kerb-Trailer */ /* If we're caller without a trailer, we must rotate by trailer length */ if (trailer == NULL) { size_t actual_rrc = rrc; if (dce_style) actual_rrc += ec; /* compensate for Windows bug */ if (actual_rrc != gss_trailerlen) return KRB5_BAD_MSIZE; gss_headerlen += gss_trailerlen; } else { if (trailer->buffer.length != gss_trailerlen) return KRB5_BAD_MSIZE; } if (header->buffer.length != gss_headerlen) return KRB5_BAD_MSIZE; kiov_count = 3 + iov_count; kiov = (krb5_crypto_iov *)GSSEAP_MALLOC(kiov_count * sizeof(krb5_crypto_iov)); if (kiov == NULL) return ENOMEM; /* * The krb5 header is located at the end of the GSS header. */ kiov[i].flags = KRB5_CRYPTO_TYPE_HEADER; kiov[i].data.length = k5_headerlen; kiov[i].data.data = (char *)header->buffer.value + header->buffer.length - k5_headerlen; i++; for (j = 0; j < iov_count; j++) { kiov[i].flags = gssEapMapCryptoFlag(iov[j].type); if (kiov[i].flags == KRB5_CRYPTO_TYPE_EMPTY) continue; kiov[i].data.length = iov[j].buffer.length; kiov[i].data.data = (char *)iov[j].buffer.value; i++; } /* * The EC and encrypted GSS header are placed in the trailer, which may * be rotated directly after the plaintext header if no trailer buffer * is provided. */ kiov[i].flags = KRB5_CRYPTO_TYPE_DATA; kiov[i].data.length = ec + 16; /* E(Header) */ if (trailer == NULL) kiov[i].data.data = (char *)header->buffer.value + 16; else kiov[i].data.data = (char *)trailer->buffer.value; i++; /* * The krb5 trailer is placed after the encrypted copy of the * krb5 header (which may be in the GSS header or trailer). */ kiov[i].flags = KRB5_CRYPTO_TYPE_TRAILER; kiov[i].data.length = k5_trailerlen; kiov[i].data.data = (char *)kiov[i - 1].data.data + ec + 16; /* E(Header) */ i++; *pkiov = kiov; *pkiov_count = i; return 0; }
static OM_uint32 makeErrorToken(OM_uint32 *minor, OM_uint32 majorStatus, OM_uint32 minorStatus, struct gss_eap_token_buffer_set *token) { OM_uint32 major, tmpMinor; unsigned char errorData[8]; gss_buffer_desc errorBuffer; GSSEAP_ASSERT(GSS_ERROR(majorStatus)); /* * Only return error codes that the initiator could have caused, * to avoid information leakage. */ #if MECH_EAP if (IS_RADIUS_ERROR(minorStatus)) { /* Squash RADIUS error codes */ minorStatus = GSSEAP_RADIUS_PROT_FAILURE; } else if (!IS_WIRE_ERROR(minorStatus)) { #else if (!IS_WIRE_ERROR(minorStatus)) { #endif /* Don't return non-wire error codes */ return GSS_S_COMPLETE; } minorStatus -= ERROR_TABLE_BASE_eapg; store_uint32_be(majorStatus, &errorData[0]); store_uint32_be(minorStatus, &errorData[4]); major = gssEapAllocInnerTokens(&tmpMinor, 1, token); if (GSS_ERROR(major)) { *minor = tmpMinor; return major; } errorBuffer.length = sizeof(errorData); errorBuffer.value = errorData; major = duplicateBuffer(&tmpMinor, &errorBuffer, &token->buffers.elements[0]); if (GSS_ERROR(major)) { gssEapReleaseInnerTokens(&tmpMinor, token, 1); *minor = tmpMinor; return major; } token->buffers.count = 1; token->types[0] = ITOK_TYPE_CONTEXT_ERR | ITOK_FLAG_CRITICAL; *minor = 0; return GSS_S_COMPLETE; } OM_uint32 gssEapSmStep(OM_uint32 *minor, gss_cred_id_t cred, gss_ctx_id_t ctx, gss_name_t target, gss_OID mech, OM_uint32 reqFlags, OM_uint32 timeReq, gss_channel_bindings_t chanBindings, gss_buffer_t inputToken, gss_buffer_t outputToken, struct gss_eap_sm *sm, /* ordered by state */ size_t smCount) { OM_uint32 major, tmpMajor, tmpMinor; struct gss_eap_token_buffer_set inputTokens = { { 0, GSS_C_NO_BUFFER }, NULL }; struct gss_eap_token_buffer_set outputTokens = { { 0, GSS_C_NO_BUFFER }, NULL }; gss_buffer_desc unwrappedInputToken = GSS_C_EMPTY_BUFFER; gss_buffer_desc unwrappedOutputToken = GSS_C_EMPTY_BUFFER; unsigned int smFlags = 0; size_t i, j; int initialContextToken = 0; enum gss_eap_token_type tokType; GSSEAP_ASSERT(smCount > 0); *minor = 0; outputToken->length = 0; outputToken->value = NULL; if (inputToken != GSS_C_NO_BUFFER && inputToken->length != 0) { major = gssEapVerifyToken(minor, ctx, inputToken, &tokType, &unwrappedInputToken); if (GSS_ERROR(major)) goto cleanup; if (tokType != (CTX_IS_INITIATOR(ctx) ? TOK_TYPE_ACCEPTOR_CONTEXT : TOK_TYPE_INITIATOR_CONTEXT)) { major = GSS_S_DEFECTIVE_TOKEN; *minor = GSSEAP_WRONG_TOK_ID; goto cleanup; } } else if (!CTX_IS_INITIATOR(ctx) || ctx->state != GSSEAP_STATE_INITIAL) { major = GSS_S_DEFECTIVE_TOKEN; *minor = GSSEAP_WRONG_SIZE; goto cleanup; } else { initialContextToken = 1; } if (CTX_IS_ESTABLISHED(ctx)) { major = GSS_S_BAD_STATUS; *minor = GSSEAP_CONTEXT_ESTABLISHED; goto cleanup; } GSSEAP_ASSERT(ctx->state < GSSEAP_STATE_ESTABLISHED); major = gssEapDecodeInnerTokens(minor, &unwrappedInputToken, &inputTokens); if (GSS_ERROR(major)) goto cleanup; major = gssEapAllocInnerTokens(minor, smCount, &outputTokens); if (GSS_ERROR(major)) goto cleanup; ctx->inputTokens = &inputTokens; ctx->outputTokens = &outputTokens; /* Process all the tokens that are valid for the current state. */ for (i = 0; i < smCount; i++) { struct gss_eap_sm *smp = &sm[i]; int processToken = 0; gss_buffer_t innerInputToken = GSS_C_NO_BUFFER; OM_uint32 *inputTokenType = NULL; gss_buffer_desc innerOutputToken = GSS_C_EMPTY_BUFFER; if ((smp->validStates & ctx->state) == 0) continue; /* * We special case the first call to gss_init_sec_context so that * all token providers have the opportunity to generate an initial * context token. Providers where inputTokenType is ITOK_TYPE_NONE * are always called and generally act on state transition boundaries, * for example to advance the state after a series of optional tokens * (as is the case with the extension token exchange) or to generate * a new token after the state was advanced by a provider which did * not emit a token. */ if (smp->inputTokenType == ITOK_TYPE_NONE || initialContextToken) { processToken = 1; } else if ((smFlags & SM_FLAG_TRANSITED) == 0) { /* Don't regurgitate a token which belonds to a previous state. */ for (j = 0; j < inputTokens.buffers.count; j++) { if ((inputTokens.types[j] & ITOK_TYPE_MASK) == smp->inputTokenType) { if (processToken) { /* Check for duplicate inner tokens */ major = GSS_S_DEFECTIVE_TOKEN; *minor = GSSEAP_DUPLICATE_ITOK; break; } processToken = 1; innerInputToken = &inputTokens.buffers.elements[j]; inputTokenType = &inputTokens.types[j]; } } if (GSS_ERROR(major)) break; } if (processToken) { enum gss_eap_state oldState = ctx->state; smFlags = 0; if (inputTokenType != NULL && (*inputTokenType & ITOK_FLAG_CRITICAL)) smFlags |= SM_FLAG_INPUT_TOKEN_CRITICAL; major = smp->processToken(minor, cred, ctx, target, mech, reqFlags, timeReq, chanBindings, innerInputToken, &innerOutputToken, &smFlags); if (GSS_ERROR(major)) break; if (inputTokenType != NULL) *inputTokenType |= ITOK_FLAG_VERIFIED; if (ctx->state < oldState) i = 0; /* restart */ else if (ctx->state != oldState) smFlags |= SM_FLAG_TRANSITED; if (innerOutputToken.value != NULL) { outputTokens.buffers.elements[outputTokens.buffers.count] = innerOutputToken; GSSEAP_ASSERT(smp->outputTokenType != ITOK_TYPE_NONE); outputTokens.types[outputTokens.buffers.count] = smp->outputTokenType; if (smFlags & SM_FLAG_OUTPUT_TOKEN_CRITICAL) outputTokens.types[outputTokens.buffers.count] |= ITOK_FLAG_CRITICAL; outputTokens.buffers.count++; } /* * Break out if we made a state transition and have some tokens to send. */ if ((smFlags & SM_FLAG_TRANSITED) && ((smFlags & SM_FLAG_FORCE_SEND_TOKEN) || outputTokens.buffers.count != 0)) { SM_ASSERT_VALID(ctx, major); break; } } else if ((smp->itokFlags & SM_ITOK_FLAG_REQUIRED) && smp->inputTokenType != ITOK_TYPE_NONE) { /* Check for required inner tokens */ major = GSS_S_DEFECTIVE_TOKEN; *minor = GSSEAP_MISSING_REQUIRED_ITOK; break; } } GSSEAP_ASSERT(outputTokens.buffers.count <= smCount); /* Check we understood all critical tokens sent by peer */ if (!GSS_ERROR(major)) { for (j = 0; j < inputTokens.buffers.count; j++) { if ((inputTokens.types[j] & ITOK_FLAG_CRITICAL) && (inputTokens.types[j] & ITOK_FLAG_VERIFIED) == 0) { major = GSS_S_UNAVAILABLE; *minor = GSSEAP_CRIT_ITOK_UNAVAILABLE; goto cleanup; } } } /* Optionaly emit an error token if we are the acceptor */ if (GSS_ERROR(major)) { if (CTX_IS_INITIATOR(ctx)) goto cleanup; /* return error directly to caller */ /* replace any emitted tokens with error token */ gssEapReleaseInnerTokens(&tmpMinor, &outputTokens, 1); tmpMajor = makeErrorToken(&tmpMinor, major, *minor, &outputTokens); if (GSS_ERROR(tmpMajor)) { major = tmpMajor; *minor = tmpMinor; goto cleanup; } } /* Format output token from inner tokens */ if (outputTokens.buffers.count != 0 || /* inner tokens to send */ !CTX_IS_INITIATOR(ctx) || /* any leg acceptor */ !CTX_IS_ESTABLISHED(ctx)) { /* non-last leg initiator */ tmpMajor = gssEapEncodeInnerTokens(&tmpMinor, &outputTokens, &unwrappedOutputToken); if (tmpMajor == GSS_S_COMPLETE) { if (CTX_IS_INITIATOR(ctx)) tokType = TOK_TYPE_INITIATOR_CONTEXT; else tokType = TOK_TYPE_ACCEPTOR_CONTEXT; tmpMajor = gssEapMakeToken(&tmpMinor, ctx, &unwrappedOutputToken, tokType, outputToken); if (GSS_ERROR(tmpMajor)) { major = tmpMajor; *minor = tmpMinor; goto cleanup; } } } /* If the context is established, empty tokens only to be emitted by initiator */ GSSEAP_ASSERT(!CTX_IS_ESTABLISHED(ctx) || ((outputToken->length == 0) == CTX_IS_INITIATOR(ctx))); SM_ASSERT_VALID(ctx, major); cleanup: gssEapReleaseInnerTokens(&tmpMinor, &inputTokens, 0); gssEapReleaseInnerTokens(&tmpMinor, &inputTokens, 1); gss_release_buffer(&tmpMinor, &unwrappedOutputToken); ctx->inputTokens = NULL; ctx->outputTokens = NULL; return major; }