OM_uint32 KRB5_CALLCONV iakerb_gss_import_sec_context(OM_uint32 *minor_status, gss_buffer_t interprocess_token, gss_ctx_id_t *context_handle) { OM_uint32 maj, tmpmin; krb5_error_code code; gss_ctx_id_t gssc; krb5_gss_ctx_id_t kctx; iakerb_ctx_id_t ctx; maj = krb5_gss_import_sec_context(minor_status, interprocess_token, &gssc); if (maj != GSS_S_COMPLETE) return maj; kctx = (krb5_gss_ctx_id_t)gssc; if (!kctx->established) { /* We don't currently support importing partially established * contexts. */ krb5_gss_delete_sec_context(&tmpmin, &gssc, GSS_C_NO_BUFFER); return GSS_S_FAILURE; } code = iakerb_alloc_context(&ctx, kctx->initiate); if (code != 0) { krb5_gss_delete_sec_context(&tmpmin, &gssc, GSS_C_NO_BUFFER); *minor_status = code; return GSS_S_FAILURE; } ctx->gssc = gssc; ctx->established = 1; *context_handle = (gss_ctx_id_t)ctx; return GSS_S_COMPLETE; }
/* * Delete an IAKERB context. This can also accept Kerberos context * handles. The heuristic is similar to SPNEGO's delete_sec_context. */ OM_uint32 KRB5_CALLCONV iakerb_gss_delete_sec_context(OM_uint32 *minor_status, gss_ctx_id_t *context_handle, gss_buffer_t output_token) { OM_uint32 major_status = GSS_S_COMPLETE; if (output_token != GSS_C_NO_BUFFER) { output_token->length = 0; output_token->value = NULL; } *minor_status = 0; if (*context_handle != GSS_C_NO_CONTEXT) { iakerb_ctx_id_t iakerb_ctx = (iakerb_ctx_id_t)*context_handle; if (iakerb_ctx->magic == KG_IAKERB_CONTEXT) { iakerb_release_context(iakerb_ctx); *context_handle = GSS_C_NO_CONTEXT; } else { assert(iakerb_ctx->magic == KG_CONTEXT); major_status = krb5_gss_delete_sec_context(minor_status, context_handle, output_token); } } return major_status; }
/* Delete the krb5 security context ctx. */ static void cleanup_context(gss_ctx_id_t ctx) { OM_uint32 major, minor; major = krb5_gss_delete_sec_context(&minor, &ctx, GSS_C_NO_BUFFER); check(major, minor, "gss_delete_sec_context"); }
OM_uint32 KRB5_CALLCONV gss_krb5_export_lucid_sec_context(OM_uint32 *minor_status, gss_ctx_id_t *context_handle, OM_uint32 version, void **kctx) { unsigned char oid_buf[GSS_KRB5_EXPORT_LUCID_SEC_CONTEXT_OID_LENGTH + 6]; gss_OID_desc req_oid; OM_uint32 major_status, minor; gss_buffer_set_t data_set = GSS_C_NO_BUFFER_SET; if (kctx == NULL) return GSS_S_CALL_INACCESSIBLE_WRITE; *kctx = NULL; req_oid.elements = oid_buf; req_oid.length = sizeof(oid_buf); major_status = generic_gss_oid_compose(minor_status, GSS_KRB5_EXPORT_LUCID_SEC_CONTEXT_OID, GSS_KRB5_EXPORT_LUCID_SEC_CONTEXT_OID_LENGTH, (int)version, &req_oid); if (GSS_ERROR(major_status)) return major_status; major_status = gss_inquire_sec_context_by_oid(minor_status, *context_handle, &req_oid, &data_set); if (GSS_ERROR(major_status)) return major_status; if (data_set == GSS_C_NO_BUFFER_SET || data_set->count != 1 || data_set->elements[0].length != sizeof(void *)) { *minor_status = EINVAL; return GSS_S_FAILURE; } *kctx = *((void **)data_set->elements[0].value); /* Clean up the context state (it is an error for * someone to attempt to use this context again) */ (void)krb5_gss_delete_sec_context(minor_status, context_handle, NULL); *context_handle = GSS_C_NO_CONTEXT; generic_gss_release_buffer_set(&minor, &data_set); return GSS_S_COMPLETE; }
/* * Release an IAKERB context */ static void iakerb_release_context(iakerb_ctx_id_t ctx) { OM_uint32 tmp; if (ctx == NULL) return; krb5_gss_release_cred(&tmp, &ctx->defcred); krb5_init_creds_free(ctx->k5c, ctx->icc); krb5_tkt_creds_free(ctx->k5c, ctx->tcc); krb5_gss_delete_sec_context(&tmp, &ctx->gssc, NULL); krb5_free_data_contents(ctx->k5c, &ctx->conv); krb5_get_init_creds_opt_free(ctx->k5c, ctx->gic_opts); krb5_free_context(ctx->k5c); free(ctx); }
/* Import a lucid context structure, creating a krb5 GSS context structure * sufficient for use by by wrap/unwrap/get_mic/verify_mic operations. */ static krb5_error_code import_lucid_sec_context_v1(const gss_krb5_lucid_context_v1_t *lctx, gss_ctx_id_t *context_handle_out) { krb5_error_code ret; krb5_gss_ctx_id_t gctx; OM_uint32 tmpmin; krb5_key key = NULL; gctx = k5alloc(sizeof(*gctx), &ret); if (gctx == NULL) return ret; gctx->initiate = lctx->initiate; gctx->krb_times.endtime = lctx->endtime; gctx->seq_send = lctx->send_seq; gctx->seq_recv = lctx->recv_seq; gctx->proto = lctx->protocol; if (lctx->protocol == 0) { /* Ignore sign_alg and seal_alg since they follow from the enctype. */ ret = lkey_to_key(&lctx->rfc1964_kd.ctx_key, &key); if (ret) goto cleanup; /* For raw enctypes, choose an enctype expected by kg_setup_keys. */ if (key->keyblock.enctype == ENCTYPE_DES_CBC_RAW) key->keyblock.enctype = ENCTYPE_DES_CBC_CRC; else if (key->keyblock.enctype == ENCTYPE_DES3_CBC_RAW) key->keyblock.enctype = ENCTYPE_DES3_CBC_SHA1; ret = kg_setup_keys(NULL, gctx, key, &gctx->cksumtype); if (ret) goto cleanup; if (gctx->proto != 0) { /* ctx_key did not have a pre-CFX enctype. */ ret = EINVAL; goto cleanup; } } else if (lctx->protocol == 1) { ret = lkey_to_key(&lctx->cfx_kd.ctx_key, &gctx->subkey); if (ret) goto cleanup; ret = get_cksumtype(gctx->subkey, &gctx->cksumtype); if (ret) goto cleanup; if (lctx->cfx_kd.have_acceptor_subkey) { gctx->have_acceptor_subkey = 1; ret = lkey_to_key(&lctx->cfx_kd.acceptor_subkey, &gctx->acceptor_subkey); if (ret) goto cleanup; ret = get_cksumtype(gctx->acceptor_subkey, &gctx->acceptor_subkey_cksumtype); if (ret) goto cleanup; } } gctx->seed_init = 0; gctx->established = 1; gctx->mech_used = (gss_OID_desc *)gss_mech_krb5; /* * The lucid context doesn't convey the gss_flags which indicate whether * the protocol needs replay or sequence protection. Assume we don't * (because RPCSEC_GSS doesn't). */ g_seqstate_init(&gctx->seqstate, gctx->seq_recv, 0, 0, gctx->proto); *context_handle_out = (gss_ctx_id_t)gctx; gctx = NULL; cleanup: krb5_k_free_key(NULL, key); krb5_gss_delete_sec_context(&tmpmin, (gss_ctx_id_t *)&gctx, NULL); return ret; }