/* Solaris Kerberos */ krb5_error_code krb5_open_pkcs11_session(CK_SESSION_HANDLE *hSession) { krb5_error_code retval = 0; CK_RV rv; CK_SLOT_ID_PTR slotlist = NULL_PTR; CK_ULONG slotcount; CK_ULONG i; /* List of all Slots */ rv = C_GetSlotList(FALSE, NULL_PTR, &slotcount); if (rv != CKR_OK) { KRB5_LOG(KRB5_ERR, "C_GetSlotList failed with 0x%x.", rv); retval = PKCS_ERR; goto cleanup; } if (slotcount == 0) { KRB5_LOG0(KRB5_ERR, "No slot is found in PKCS11."); retval = PKCS_ERR; goto cleanup; } slotlist = (CK_SLOT_ID_PTR)malloc(slotcount * sizeof(CK_SLOT_ID)); if (slotlist == NULL) { KRB5_LOG0(KRB5_ERR, "malloc failed for slotcount."); retval = PKCS_ERR; goto cleanup; } rv = C_GetSlotList(FALSE, slotlist, &slotcount); if (rv != CKR_OK) { KRB5_LOG(KRB5_ERR, "C_GetSlotList failed with 0x%x", rv); retval = PKCS_ERR; goto cleanup; } for (i = 0; i < slotcount; i++) { if (slot_supports_krb5(slotlist + i)) break; } if (i == slotcount){ KRB5_LOG0(KRB5_ERR, "Could not find slot which supports " "Kerberos"); retval = PKCS_ERR; goto cleanup; } rv = C_OpenSession(slotlist[i], CKF_SERIAL_SESSION, NULL_PTR, NULL_PTR, hSession); if (rv != CKR_OK) { retval = PKCS_ERR; } cleanup: if (slotlist != NULL) free(slotlist); return(retval); }
/*ARGSUSED*/ krb5_error_code KRB5_CALLCONV krb5_c_random_make_octets(krb5_context context, krb5_data *data) { /* * Solaris kerberos uses /dev/[u]random */ #ifndef _KERNEL /* User space code */ krb5_error_code err = 0; CK_RV rv; KRB5_LOG0(KRB5_INFO, "krb5_c_random_make_octets() start, user space using " "krb5_get_random_octets()\n"); rv = C_GenerateRandom(krb_ctx_hSession(context), (CK_BYTE_PTR)data->data, (CK_ULONG)data->length); if (rv != CKR_OK) { KRB5_LOG(KRB5_ERR, "C_GenerateRandom failed in " "krb5_c_random_make_octets: rv = 0x%x.", rv); err = PKCS_ERR; } if (err != 0) { KRB5_LOG0(KRB5_ERR, "krb5_c_random_make_octets() end, error"); return (err); } #else /* Kernel code section */ /* * Solaris Kerberos: for kernel code we use the randomness generator native * to Solaris 9. We avoid global variables and other nastiness this way. * * Using random_get_pseudo_bytes() instead of random_get_bytes() because it * will not return an error code if there isn't enough entropy but will use * a pseudo random algorithm to produce randomness. Most of the time it * should be as good as random_get_bytes() and we don't have to worry about * dealing with a non-fatal error. */ KRB5_LOG0(KRB5_INFO, "krb5_c_random_make_octets() start, kernel using " "random_get_pseudo_bytes()\n "); if(random_get_pseudo_bytes((uint8_t *)data->data, data->length) != 0) { KRB5_LOG0(KRB5_ERR, "krb5_c_random_make_octets() end, " "random_get_pseudo_bytes() error.\n"); return(KRB5_CRYPTO_INTERNAL); } #endif /* !_KERNEL */ KRB5_LOG0(KRB5_INFO, "krb5_c_random_make_octets() end\n"); return(0); }
/* ARGSUSED */ krb5_error_code krb5_hmac(krb5_context context, const krb5_keyblock *key, krb5_const krb5_data *input, krb5_data *output) { int rv = CRYPTO_FAILED; crypto_mechanism_t mac_mech; crypto_data_t dd; crypto_data_t mac; KRB5_LOG0(KRB5_INFO, "krb5_hmac() start"); if (output == NULL || output->data == NULL) { KRB5_LOG0(KRB5_INFO, "krb5_hmac() NULL output"); return (rv); } if (input == NULL || input->data == NULL) { KRB5_LOG0(KRB5_INFO, "krb5_hmac() NULL input"); return (rv); } dd.cd_format = CRYPTO_DATA_RAW; dd.cd_offset = 0; dd.cd_length = input->length; dd.cd_raw.iov_base = (char *)input->data; dd.cd_raw.iov_len = input->length; mac.cd_format = CRYPTO_DATA_RAW; mac.cd_offset = 0; mac.cd_length = output->length; mac.cd_raw.iov_base = (char *)output->data; mac.cd_raw.iov_len = output->length; mac_mech.cm_type = context->kef_hash_mt; mac_mech.cm_param = NULL; mac_mech.cm_param_len = 0; rv = crypto_mac(&mac_mech, &dd, (crypto_key_t *)&key->kef_key, key->key_tmpl, &mac, NULL); if (rv != CRYPTO_SUCCESS) { KRB5_LOG(KRB5_ERR,"crypto_mac error: %0x", rv); } KRB5_LOG(KRB5_INFO, "krb5_hmac() end ret=%d\n", rv); return(rv); }
int k5_ef_mac(krb5_context context, krb5_keyblock *key, krb5_data *ivec, const krb5_data *input, krb5_data *output) { int rv; iovec_t v1, v2; crypto_data_t d1, d2; crypto_mechanism_t mech; KRB5_LOG0(KRB5_INFO, "k5_ef_mac() start"); ASSERT(input != NULL); ASSERT(ivec != NULL); ASSERT(output != NULL); v2.iov_base = (void *)output->data; v2.iov_len = output->length; bzero(&d1, sizeof (d1)); bzero(&d2, sizeof (d2)); d2.cd_format = CRYPTO_DATA_RAW; d2.cd_offset = 0; d2.cd_length = output->length; d2.cd_raw = v2; mech.cm_type = context->kef_hash_mt; if (mech.cm_type == CRYPTO_MECH_INVALID) { KRB5_LOG(KRB5_ERR, "k5_ef_mac() invalid mech specified: 0x%llx", (long long)context->kef_hash_mt); return (CRYPTO_FAILED); } mech.cm_param = ivec->data; mech.cm_param_len = ivec->length; v1.iov_base = (void *)input->data; v1.iov_len = input->length; d1.cd_format = CRYPTO_DATA_RAW; d1.cd_offset = 0; d1.cd_length = input->length; d1.cd_raw = v1; rv = crypto_mac(&mech, &d1, &key->kef_key, key->key_tmpl, &d2, NULL); if (rv != CRYPTO_SUCCESS) { KRB5_LOG(KRB5_ERR, "k5_ef_mac(): crypto_mac error: %0x", rv); } return (rv); }
void KRB5_CALLCONV krb5_free_context(krb5_context ctx) { KRB5_LOG0(KRB5_INFO,"krb5_free_context() start"); #ifndef _KERNEL krb5_free_ef_handle(ctx); if (ctx->conf_tgs_ktypes) { FREE(ctx->conf_tgs_ktypes, sizeof(krb5_enctype) *(ctx->conf_tgs_ktypes_count)); ctx->conf_tgs_ktypes = 0; ctx->conf_tgs_ktypes_count = 0; } krb5_clear_error_message(ctx); #endif krb5_os_free_context(ctx); if (ctx->in_tkt_ktypes) { FREE(ctx->in_tkt_ktypes, sizeof(krb5_enctype) *(ctx->in_tkt_ktype_count+1) ); ctx->in_tkt_ktypes = 0; } if (ctx->tgs_ktypes) { FREE(ctx->tgs_ktypes, sizeof(krb5_enctype) *(ctx->tgs_ktype_count+1)); ctx->tgs_ktypes = 0; } if (ctx->default_realm) { FREE(ctx->default_realm, strlen(ctx->default_realm) + 1); ctx->default_realm = 0; } if (ctx->ser_ctx_count && ctx->ser_ctx) { FREE(ctx->ser_ctx,sizeof(krb5_ser_entry) * (ctx->ser_ctx_count) ); ctx->ser_ctx = 0; ctx->ser_ctx_count = 0; } ctx->magic = 0; FREE(ctx, sizeof(struct _krb5_context)); }
krb5_error_code k5_ef_hash(krb5_context context, CK_MECHANISM *mechanism, unsigned int icount, krb5_const krb5_data *input, krb5_data *output) { CK_RV rv; int i; CK_ULONG outlen = output->length; if ((rv = C_DigestInit(krb_ctx_hSession(context), mechanism)) != CKR_OK) { KRB5_LOG(KRB5_ERR, "C_DigestInit failed in k5_ef_hash: " "rv = 0x%x.", rv); return (PKCS_ERR); } for (i = 0; i < icount; i++) { if ((rv = C_DigestUpdate(krb_ctx_hSession(context), (CK_BYTE_PTR)input[i].data, (CK_ULONG)input[i].length)) != CKR_OK) { KRB5_LOG(KRB5_ERR, "C_DigestUpdate failed in k5_ef_hash: " "rv = 0x%x", rv); return (PKCS_ERR); } } if ((rv = C_DigestFinal(krb_ctx_hSession(context), (CK_BYTE_PTR)output->data, &outlen)) != CKR_OK) { KRB5_LOG(KRB5_ERR, "C_DigestFinal failed in k5_ef_hash: " "rv = 0x%x", rv); return (PKCS_ERR); } /* Narrowing conversion OK because hashes are much smaller than 2^32 */ output->length = outlen; KRB5_LOG0(KRB5_INFO, "k5_ef_hash() end"); return (0); }
static krb5_error_code krb5_dk_decrypt_maybe_trunc_hmac( krb5_context context, const struct krb5_enc_provider *enc, const struct krb5_hash_provider *hash, const krb5_keyblock *key, krb5_keyusage usage, const krb5_data *ivec, const krb5_data *input, krb5_data *output, size_t hmacsize) { krb5_error_code ret; size_t hashsize, blocksize, enclen, plainlen; unsigned char *plaindata = NULL, *cksum = NULL, *cn; krb5_data d1, d2; krb5_keyblock *derived_encr_key = NULL; krb5_keyblock *derived_hmac_key = NULL; KRB5_LOG0(KRB5_INFO, "krb5_dk_decrypt() start\n"); /* * Derive the encryption and hmac keys. * This routine is optimized to fetch the DK * from the original key's DK list. */ ret = init_derived_keydata(context, enc, (krb5_keyblock *)key, usage, &derived_encr_key, &derived_hmac_key); if (ret) return (ret); hashsize = hash->hashsize; blocksize = enc->block_size; if (hmacsize == 0) hmacsize = hashsize; else if (hmacsize > hashsize) return (KRB5KRB_AP_ERR_BAD_INTEGRITY); enclen = input->length - hmacsize; if ((plaindata = (unsigned char *) MALLOC(enclen)) == NULL) { ret = ENOMEM; goto cleanup; } /* decrypt the ciphertext */ d1.length = enclen; d1.data = input->data; d2.length = enclen; d2.data = (char *) plaindata; if ((ret = ((*(enc->decrypt))(context, derived_encr_key, ivec, &d1, &d2))) != 0) goto cleanup; if (ivec != NULL && ivec->length == blocksize) { cn = (unsigned char *) d1.data + d1.length - blocksize; } else { cn = NULL; } /* verify the hash */ if ((cksum = (unsigned char *) MALLOC(hashsize)) == NULL) { ret = ENOMEM; goto cleanup; } d1.length = hashsize; d1.data = (char *) cksum; #ifdef _KERNEL if ((ret = krb5_hmac(context, derived_hmac_key, &d2, &d1)) != 0) goto cleanup; #else if ((ret = krb5_hmac(context, hash, derived_hmac_key, 1, &d2, &d1)) != 0) goto cleanup; #endif /* _KERNEL */ if (memcmp(cksum, input->data+enclen, hmacsize) != 0) { ret = KRB5KRB_AP_ERR_BAD_INTEGRITY; goto cleanup; } /* because this encoding isn't self-describing wrt length, the best we can do here is to compute the length minus the confounder. */ plainlen = enclen - blocksize; if (output->length < plainlen) { ret = KRB5_BAD_MSIZE; goto cleanup; } output->length = plainlen; (void) memcpy(output->data, d2.data+blocksize, output->length); /* * AES crypto updates the ivec differently, it is handled * in the AES crypto routines directly. */ if (cn != NULL && key->enctype != ENCTYPE_AES128_CTS_HMAC_SHA1_96 && key->enctype != ENCTYPE_AES256_CTS_HMAC_SHA1_96) { (void) memcpy(ivec->data, cn, blocksize); } ret = 0; cleanup: if (plaindata) { (void) memset(plaindata, 0, enclen); FREE(plaindata, enclen); } if (cksum) { (void) memset(cksum, 0, hashsize); FREE(cksum, hashsize); } KRB5_LOG(KRB5_INFO, "krb5_dk_decrypt() end, ret=%d\n", ret); return(ret); }
int k5_ef_hash(krb5_context context, int icount, const krb5_data *input, krb5_data *output) { int i; int rv = CRYPTO_FAILED; iovec_t v1, v2; crypto_data_t d1, d2; crypto_mechanism_t mech; crypto_context_t ctxp; KRB5_LOG0(KRB5_INFO, "k5_ef_hash() start"); bzero(&d1, sizeof (d1)); bzero(&d2, sizeof (d2)); v2.iov_base = (void *)output->data; v2.iov_len = output->length; d2.cd_format = CRYPTO_DATA_RAW; d2.cd_offset = 0; d2.cd_length = output->length; d2.cd_raw = v2; mech.cm_type = context->kef_cksum_mt; if (mech.cm_type == CRYPTO_MECH_INVALID) { KRB5_LOG(KRB5_ERR, "k5_ef_hash() invalid mech specified: 0x%llx", (long long)context->kef_hash_mt); return (CRYPTO_FAILED); } mech.cm_param = 0; mech.cm_param_len = 0; rv = crypto_digest_init(&mech, &ctxp, NULL); if (rv != CRYPTO_SUCCESS) { KRB5_LOG(KRB5_ERR, "crypto_digest_init error: %0x", rv); return (rv); } for (i = 0; i < icount; i++) { v1.iov_base = (void *)input[i].data; v1.iov_len = input[i].length; d1.cd_length = input[i].length; d1.cd_format = CRYPTO_DATA_RAW; d1.cd_offset = 0; d1.cd_raw = v1; rv = crypto_digest_update(ctxp, &d1, NULL); if (rv != CRYPTO_SUCCESS) { KRB5_LOG(KRB5_ERR, "crypto_digest_update error: %0x", rv); crypto_cancel_ctx(ctxp); return (rv); } } rv = crypto_digest_final(ctxp, &d2, NULL); /* * crypto_digest_final() internally destroys the context. So, we * do not use the context any more. This means we do not call * crypto_cancel_ctx() for the failure case here unlike the failure * case of crypto_digest_update() where we do. */ if (rv != CRYPTO_SUCCESS) { KRB5_LOG(KRB5_ERR, "crypto_digest_final error: %0x", rv); } return (rv); }
krb5_error_code KRB5_CALLCONV krb5_c_make_checksum(krb5_context context, krb5_cksumtype cksumtype, const krb5_keyblock *key, krb5_keyusage usage, const krb5_data *input, krb5_checksum *cksum) { int i, e1, e2; krb5_data data; krb5_error_code ret = 0; size_t cksumlen; KRB5_LOG0(KRB5_INFO, "krb5_c_make_checksum() start."); for (i=0; i<krb5_cksumtypes_length; i++) { if (krb5_cksumtypes_list[i].ctype == cksumtype) break; } if (i == krb5_cksumtypes_length) return(KRB5_BAD_ENCTYPE); if (krb5_cksumtypes_list[i].keyhash) cksumlen = krb5_cksumtypes_list[i].keyhash->hashsize; else cksumlen = krb5_cksumtypes_list[i].hash->hashsize; #ifdef _KERNEL context->kef_cksum_mt = krb5_cksumtypes_list[i].kef_cksum_mt; #endif cksum->length = cksumlen; if ((cksum->contents = (krb5_octet *) MALLOC(cksum->length)) == NULL) return(ENOMEM); data.length = cksum->length; data.data = (char *) cksum->contents; if (krb5_cksumtypes_list[i].keyhash) { /* check if key is compatible */ if (krb5_cksumtypes_list[i].keyed_etype) { for (e1=0; e1<krb5_enctypes_length; e1++) if (krb5_enctypes_list[e1].etype == krb5_cksumtypes_list[i].keyed_etype) break; for (e2=0; e2<krb5_enctypes_length; e2++) if (krb5_enctypes_list[e2].etype == key->enctype) break; if ((e1 == krb5_enctypes_length) || (e2 == krb5_enctypes_length) || (krb5_enctypes_list[e1].enc != krb5_enctypes_list[e2].enc)) { ret = KRB5_BAD_ENCTYPE; goto cleanup; } } #ifdef _KERNEL context->kef_cipher_mt = krb5_enctypes_list[e1].kef_cipher_mt; context->kef_hash_mt = krb5_enctypes_list[e1].kef_hash_mt; if (key->kef_key.ck_data == NULL) { if ((ret = init_key_kef(context->kef_cipher_mt, (krb5_keyblock *)key))) goto cleanup; } #else if ((ret = init_key_uef(krb_ctx_hSession(context), (krb5_keyblock *)key))) return (ret); #endif /* _KERNEL */ ret = (*(krb5_cksumtypes_list[i].keyhash->hash))(context, key, usage, 0, input, &data); } else if (krb5_cksumtypes_list[i].flags & KRB5_CKSUMFLAG_DERIVE) { #ifdef _KERNEL context->kef_cipher_mt = get_cipher_mech_type(context, (krb5_keyblock *)key); context->kef_hash_mt = get_hash_mech_type(context, (krb5_keyblock *)key); /* * If the hash_mt is invalid, try using the cksum_mt * because "hash" and "checksum" are overloaded terms * in some places. */ if (context->kef_hash_mt == CRYPTO_MECH_INVALID) context->kef_hash_mt = context->kef_cksum_mt; #else ret = init_key_uef(krb_ctx_hSession(context), (krb5_keyblock *)key); if (ret) return (ret); #endif /* _KERNEL */ ret = krb5_dk_make_checksum(context, krb5_cksumtypes_list[i].hash, key, usage, input, &data); } else { /* * No key is used, hash and cksum are synonymous * in this case */ #ifdef _KERNEL context->kef_hash_mt = context->kef_cksum_mt; #endif /* _KERNEL */ ret = (*(krb5_cksumtypes_list[i].hash->hash))(context, 1, input, &data); } if (!ret) { cksum->magic = KV5M_CHECKSUM; cksum->checksum_type = cksumtype; if (krb5_cksumtypes_list[i].trunc_size) { krb5_octet *trunc; size_t old_len = cksum->length; /* * Solaris Kerberos: * The Kernel does not like 'realloc' (which is what * MIT code does here), so we do our own "realloc". */ cksum->length = krb5_cksumtypes_list[i].trunc_size; trunc = (krb5_octet *) MALLOC(cksum->length); if (trunc) { (void) memcpy(trunc, cksum->contents, cksum->length); FREE(cksum->contents, old_len); cksum->contents = trunc; } else { ret = ENOMEM; } } } cleanup: if (ret) { (void) memset(cksum->contents, 0, cksum->length); FREE(cksum->contents, cksum->length); cksum->length = 0; cksum->contents = NULL; } KRB5_LOG(KRB5_INFO, "krb5_c_make_checksum() end ret = %d\n", ret); return(ret); }
krb5_error_code krb5_hmac(krb5_context context, krb5_const struct krb5_hash_provider *hash, krb5_const krb5_keyblock *key, krb5_const unsigned int icount, krb5_const krb5_data *input, krb5_data *output) { size_t hashsize, blocksize; unsigned char *xorkey, *ihash; int i; krb5_data *hashin, hashout; krb5_error_code ret; /* Solaris Kerberos */ KRB5_LOG0(KRB5_INFO, "krb5_hmac() start\n"); if (hash == NULL) { KRB5_LOG0(KRB5_ERR, "krb5_hmac() error hash == NULL\n"); return(EINVAL); } if (key == NULL) { KRB5_LOG0(KRB5_ERR, "krb5_hmac() error key == NULL\n"); return(EINVAL); } if (input == NULL) { KRB5_LOG0(KRB5_ERR, "krb5_hmac() error input == NULL\n"); return(EINVAL); } if (output == NULL) { KRB5_LOG0(KRB5_ERR, "krb5_hmac() error output == NULL\n"); return(EINVAL); } hashsize = hash->hashsize; blocksize = hash->blocksize; if (key->length > blocksize) return(KRB5_CRYPTO_INTERNAL); if (output->length < hashsize) return(KRB5_BAD_MSIZE); /* if this isn't > 0, then there won't be enough space in this array to compute the outer hash */ if (icount == 0) return(KRB5_CRYPTO_INTERNAL); /* allocate space for the xor key, hash input vector, and inner hash */ if ((xorkey = (unsigned char *) MALLOC(blocksize)) == NULL) return(ENOMEM); if ((ihash = (unsigned char *) MALLOC(hashsize)) == NULL) { FREE(xorkey, blocksize); return(ENOMEM); } if ((hashin = (krb5_data *)MALLOC(sizeof(krb5_data)*(icount+1))) == NULL) { FREE(ihash, hashsize); FREE(xorkey, blocksize); return(ENOMEM); } /* create the inner padded key */ /* Solaris Kerberos */ (void) memset(xorkey, 0x36, blocksize); for (i=0; i<key->length; i++) xorkey[i] ^= key->contents[i]; /* compute the inner hash */ for (i=0; i<icount; i++) { hashin[0].length = blocksize; hashin[0].data = (char *) xorkey; hashin[i+1] = input[i]; } hashout.length = hashsize; hashout.data = (char *) ihash; /* Solaris Kerberos */ if ((ret = ((*(hash->hash))(context, icount+1, hashin, &hashout)))) goto cleanup; /* create the outer padded key */ /* Solaris Kerberos */ (void) memset(xorkey, 0x5c, blocksize); for (i=0; i<key->length; i++) xorkey[i] ^= key->contents[i]; /* compute the outer hash */ hashin[0].length = blocksize; hashin[0].data = (char *) xorkey; hashin[1] = hashout; output->length = hashsize; /* Solaris Kerberos */ if ((ret = ((*(hash->hash))(context, 2, hashin, output)))) (void) memset(output->data, 0, output->length); /* ret is set correctly by the prior call */ cleanup: /* Solaris Kerberos */ (void) memset(xorkey, 0, blocksize); (void) memset(ihash, 0, hashsize); FREE(hashin, sizeof(krb5_data)*(icount+1)); FREE(ihash, hashsize); FREE(xorkey, blocksize); /* Solaris Kerberos */ KRB5_LOG(KRB5_INFO, "krb5_hmac() end ret=%d\n", ret); return(ret); }
int foreach_localaddr (void *data, int (*pass1fn) (void *, struct sockaddr *), int (*betweenfn) (void *), int (*pass2fn) (void *, struct sockaddr *)) { /* Okay, this is kind of odd. We have to use each of the address families we care about, because with an AF_INET socket, extra interfaces like hme0:1 that have only AF_INET6 addresses will cause errors. Similarly, if hme0 has more AF_INET addresses than AF_INET6 addresses, we won't be able to retrieve all of the AF_INET addresses if we use an AF_INET6 socket. Since neither family is guaranteed to have the greater number of addresses, we should use both. If it weren't for this little quirk, we could use one socket of any type, and ask for addresses of all types. At least, it seems to work that way. */ /* Solaris kerberos: avoid using AF_NS if no define */ #if defined (KRB5_USE_INET6) && defined (KRB5_USE_NS) static const int afs[] = { AF_INET, AF_NS, AF_INET6 }; #elif defined (KRB5_USE_INET6) static const int afs[] = { AF_INET, AF_INET6 }; #else static const int afs[] = { AF_INET }; #endif #define N_AFS (sizeof (afs) / sizeof (afs[0])) struct { int af; int sock; void *buf; size_t buf_size; struct lifnum lifnum; } afp[N_AFS]; int code, i, j; int retval = 0, afidx; krb5_error_code sock_err = 0; struct lifreq *lifr, lifreq, *lifr2; #define FOREACH_AF() for (afidx = 0; afidx < N_AFS; afidx++) #define P (afp[afidx]) KRB5_LOG0(KRB5_INFO, "foreach_localaddr() start"); /* init */ FOREACH_AF () { P.af = afs[afidx]; P.sock = -1; P.buf = 0; } /* first pass: get raw data, discard uninteresting addresses, callback */ FOREACH_AF () { KRB5_LOG (KRB5_INFO, "foreach_localaddr() trying af %d", P.af); P.sock = socket (P.af, USE_TYPE, USE_PROTO); if (P.sock < 0) { sock_err = SOCKET_ERROR; Tperror ("socket"); continue; } P.lifnum.lifn_family = P.af; P.lifnum.lifn_flags = 0; P.lifnum.lifn_count = 0; code = ioctl (P.sock, SIOCGLIFNUM, &P.lifnum); if (code) { Tperror ("ioctl(SIOCGLIFNUM)"); retval = errno; goto punt; } KRB5_LOG (KRB5_INFO, "foreach_localaddr() lifn_count %d", P.lifnum.lifn_count); P.buf_size = P.lifnum.lifn_count * sizeof (struct lifreq) * 2; P.buf = malloc (P.buf_size); if (P.buf == NULL) { retval = errno; goto punt; } code = get_lifconf (P.af, P.sock, &P.buf_size, P.buf); if (code < 0) { retval = errno; goto punt; } for (i = 0; i < P.buf_size; i+= sizeof (*lifr)) { /*LINTED*/ lifr = (struct lifreq *)((caddr_t) P.buf+i); strncpy(lifreq.lifr_name, lifr->lifr_name, sizeof (lifreq.lifr_name)); KRB5_LOG (KRB5_INFO, "foreach_localaddr() interface %s", lifreq.lifr_name); /* ioctl unknown to lclint */ if (ioctl (P.sock, SIOCGLIFFLAGS, (char *)&lifreq) < 0) { Tperror ("ioctl(SIOCGLIFFLAGS)"); skip: KRB5_LOG (KRB5_INFO, "foreach_localaddr() skipping interface %s", lifr->lifr_name); /* mark for next pass */ lifr->lifr_name[0] = '\0'; continue; } #ifdef IFF_LOOPBACK /* None of the current callers want loopback addresses. */ if (lifreq.lifr_flags & IFF_LOOPBACK) { Tprintf ((" loopback\n")); goto skip; } #endif /* Ignore interfaces that are down. */ if ((lifreq.lifr_flags & IFF_UP) == 0) { Tprintf ((" down\n")); goto skip; } /* Make sure we didn't process this address already. */ for (j = 0; j < i; j += sizeof (*lifr2)) { /*LINTED*/ lifr2 = (struct lifreq *)((caddr_t) P.buf+j); if (lifr2->lifr_name[0] == '\0') continue; if (lifr2->lifr_addr.ss_family == lifr->lifr_addr.ss_family /* Compare address info. If this isn't good enough -- i.e., if random padding bytes turn out to differ when the addresses are the same -- then we'll have to do it on a per address family basis. */ && !memcmp (&lifr2->lifr_addr, &lifr->lifr_addr, sizeof (*lifr))) { Tprintf ((" duplicate addr\n")); KRB5_LOG0 (KRB5_INFO, "foreach_localaddr() dup addr"); goto skip; } } if ((*pass1fn) (data, ss2sa (&lifr->lifr_addr))) goto punt; } } /* Did we actually get any working sockets? */ FOREACH_AF () if (P.sock != -1) goto have_working_socket; retval = sock_err; goto punt; have_working_socket: if (betweenfn != NULL && (*betweenfn)(data)) goto punt; if (pass2fn) FOREACH_AF () if (P.sock >= 0) { for (i = 0; i < P.buf_size; i+= sizeof (*lifr)) { /*LINTED*/ lifr = (struct lifreq *)((caddr_t) P.buf+i); if (lifr->lifr_name[0] == '\0') /* Marked in first pass to be ignored. */ continue; KRB5_LOG (KRB5_INFO, "foreach_localaddr() doing pass2fn i = %d", i); if ((*pass2fn) (data, ss2sa (&lifr->lifr_addr))) goto punt; } } punt: FOREACH_AF () { closesocket(P.sock); free (P.buf); } return retval; }
krb5_error_code krb5_do_preauth(krb5_context context, krb5_kdc_req *request, krb5_pa_data **in_padata, krb5_pa_data ***out_padata, krb5_data *salt, krb5_data *s2kparams, krb5_enctype *etype, krb5_keyblock *as_key, krb5_prompter_fct prompter, void *prompter_data, krb5_gic_get_as_key_fct gak_fct, void *gak_data) { int h, i, j, out_pa_list_size; int seen_etype_info2 = 0; krb5_pa_data *out_pa = NULL, **out_pa_list = NULL; krb5_data scratch; krb5_etype_info etype_info = NULL; krb5_error_code ret; static const int paorder[] = { PA_INFO, PA_REAL }; int realdone; KRB5_LOG0(KRB5_INFO, "krb5_do_preauth() start"); if (in_padata == NULL) { *out_padata = NULL; return(0); } #ifdef DEBUG if (salt && salt->data && salt->length > 0) { fprintf (stderr, "salt len=%d", salt->length); if (salt->length > 0) fprintf (stderr, " '%*s'", salt->length, salt->data); fprintf (stderr, "; preauth data types:"); for (i = 0; in_padata[i]; i++) { fprintf (stderr, " %d", in_padata[i]->pa_type); } fprintf (stderr, "\n"); } #endif out_pa_list = NULL; out_pa_list_size = 0; /* first do all the informational preauths, then the first real one */ for (h=0; h<(sizeof(paorder)/sizeof(paorder[0])); h++) { realdone = 0; for (i=0; in_padata[i] && !realdone; i++) { int k, l, etype_found, valid_etype_found; /* * This is really gross, but is necessary to prevent * lossge when talking to a 1.0.x KDC, which returns an * erroneous PA-PW-SALT when it returns a KRB-ERROR * requiring additional preauth. */ switch (in_padata[i]->pa_type) { case KRB5_PADATA_ETYPE_INFO: case KRB5_PADATA_ETYPE_INFO2: { krb5_preauthtype pa_type = in_padata[i]->pa_type; if (etype_info) { if (seen_etype_info2 || pa_type != KRB5_PADATA_ETYPE_INFO2) continue; if (pa_type == KRB5_PADATA_ETYPE_INFO2) { krb5_free_etype_info( context, etype_info); etype_info = NULL; } } scratch.length = in_padata[i]->length; scratch.data = (char *) in_padata[i]->contents; if (pa_type == KRB5_PADATA_ETYPE_INFO2) { seen_etype_info2++; ret = decode_krb5_etype_info2(&scratch, &etype_info); } else ret = decode_krb5_etype_info(&scratch, &etype_info); if (ret) { ret = 0; /*Ignore error and etype_info element*/ krb5_free_etype_info( context, etype_info); etype_info = NULL; continue; } if (etype_info[0] == NULL) { krb5_free_etype_info(context, etype_info); etype_info = NULL; break; } /* * Select first etype in our request which is also in * etype-info (preferring client request ktype order). */ for (etype_found = 0, valid_etype_found = 0, k = 0; !etype_found && k < request->nktypes; k++) { for (l = 0; etype_info[l]; l++) { if (etype_info[l]->etype == request->ktype[k]) { etype_found++; break; } /* check if program has support for this etype for more * precise error reporting. */ if (valid_enctype(etype_info[l]->etype)) valid_etype_found++; } } if (!etype_found) { KRB5_LOG(KRB5_ERR, "error !etype_found, " "valid_etype_found = %d", valid_etype_found); if (valid_etype_found) { /* supported enctype but not requested */ ret = KRB5_CONFIG_ETYPE_NOSUPP; goto cleanup; } else { /* unsupported enctype */ ret = KRB5_PROG_ETYPE_NOSUPP; goto cleanup; } } scratch.data = (char *) etype_info[l]->salt; scratch.length = etype_info[l]->length; krb5_free_data_contents(context, salt); if (scratch.length == KRB5_ETYPE_NO_SALT) salt->data = NULL; else if ((ret = krb5int_copy_data_contents( context, &scratch, salt)) != 0) goto cleanup; *etype = etype_info[l]->etype; krb5_free_data_contents(context, s2kparams); if ((ret = krb5int_copy_data_contents(context, &etype_info[l]->s2kparams, s2kparams)) != 0) goto cleanup; break; } case KRB5_PADATA_PW_SALT: case KRB5_PADATA_AFS3_SALT: if (etype_info) continue; break; default: ; } for (j=0; pa_types[j].type >= 0; j++) { if ((in_padata[i]->pa_type == pa_types[j].type) && (pa_types[j].flags & paorder[h])) { out_pa = NULL; if ((ret = ((*pa_types[j].fct)(context, request, in_padata[i], &out_pa, salt, s2kparams, etype, as_key, prompter, prompter_data, gak_fct, gak_data)))) { goto cleanup; } if (out_pa) { if (out_pa_list == NULL) { if ((out_pa_list = (krb5_pa_data **) malloc(2*sizeof(krb5_pa_data *))) == NULL) { ret = ENOMEM; goto cleanup; } } else { if ((out_pa_list = (krb5_pa_data **) realloc(out_pa_list, (out_pa_list_size+2)* sizeof(krb5_pa_data *))) == NULL) { /* XXX this will leak the pointers which have already been allocated. oh well. */ ret = ENOMEM; goto cleanup; } } out_pa_list[out_pa_list_size++] = out_pa; } if (paorder[h] == PA_REAL) realdone = 1; } } } } if (out_pa_list) out_pa_list[out_pa_list_size++] = NULL; *out_padata = out_pa_list; if (etype_info) krb5_free_etype_info(context, etype_info); KRB5_LOG0(KRB5_INFO, "krb5_do_preauth() end"); return(0); cleanup: if (out_pa_list) { out_pa_list[out_pa_list_size++] = NULL; krb5_free_pa_data(context, out_pa_list); } if (etype_info) krb5_free_etype_info(context, etype_info); KRB5_LOG0(KRB5_INFO, "krb5_do_preauth() end"); return (ret); }