static void xorblock(unsigned char *out, const unsigned char *in) { int z; for (z = 0; z < BLOCK_SIZE/4; z++) { unsigned char *outptr = &out[z*4]; const unsigned char *inptr = &in[z*4]; /* * Use unaligned accesses. On x86, this will probably still be faster * than multiple byte accesses for unaligned data, and for aligned data * should be far better. (One test indicated about 2.4% faster * encryption for 1024-byte messages.) * * If some other CPU has really slow unaligned-word or byte accesses, * perhaps this function (or the load/store helpers?) should test for * alignment first. * * If byte accesses are faster than unaligned words, we may need to * conditionalize on CPU type, as that may be hard to determine * automatically. */ store_32_n(load_32_n(outptr) ^ load_32_n(inptr), outptr); } }
/* Load four bytes from the cache file and return their value as a 32-bit * unsigned integer according to the file format. Also append them to buf. */ static krb5_error_code read32(krb5_context context, krb5_ccache id, struct k5buf *buf, uint32_t *out) { krb5_error_code ret; char bytes[4]; k5_cc_mutex_assert_locked(context, &((fcc_data *)id->data)->lock); ret = read_bytes(context, id, bytes, 4); if (ret) return ret; if (buf != NULL) k5_buf_add_len(buf, bytes, 4); *out = (version(id) < 3) ? load_32_n(bytes) : load_32_be(bytes); return 0; }
int main () { /* Test some low-level assumptions the Kerberos code depends on. */ union { uint64_t n64; uint32_t n32; uint16_t n16; unsigned char b[9]; } u; static unsigned char buf[9] = { 0, 1, 2, 3, 4, 5, 6, 7, 8 }; assert(load_64_be(buf+1) == 0x0102030405060708LL); assert(load_64_le(buf+1) == 0x0807060504030201LL); assert(load_32_le(buf+2) == 0x05040302); assert(load_32_be(buf+2) == 0x02030405); assert(load_16_be(buf+3) == 0x0304); assert(load_16_le(buf+3) == 0x0403); u.b[0] = 0; assert((store_64_be(0x0102030405060708LL, u.b+1), !memcmp(buf, u.b, 9))); u.b[1] = 9; assert((store_64_le(0x0807060504030201LL, u.b+1), !memcmp(buf, u.b, 9))); u.b[2] = 10; assert((store_32_be(0x02030405, u.b+2), !memcmp(buf, u.b, 9))); u.b[3] = 11; assert((store_32_le(0x05040302, u.b+2), !memcmp(buf, u.b, 9))); u.b[4] = 12; assert((store_16_be(0x0304, u.b+3), !memcmp(buf, u.b, 9))); u.b[4] = 13; assert((store_16_le(0x0403, u.b+3), !memcmp(buf, u.b, 9))); /* Verify that load_*_n properly does native format. Assume the unaligned thing is okay. */ u.n64 = 0x090a0b0c0d0e0f00LL; assert(load_64_n((unsigned char *) &u.n64) == 0x090a0b0c0d0e0f00LL); u.n32 = 0x06070809; assert(load_32_n((unsigned char *) &u.n32) == 0x06070809); u.n16 = 0x0a0b; assert(load_16_n((unsigned char *) &u.n16) == 0x0a0b); return 0; }
krb5_error_code krb5int_fast_prep_req(krb5_context context, struct krb5int_fast_request_state *state, krb5_kdc_req *request, const krb5_data *to_be_checksummed, kdc_req_encoder_proc encoder, krb5_data **encoded_request) { krb5_error_code retval = 0; krb5_pa_data *pa_array[2]; krb5_pa_data pa[2]; krb5_fast_req fast_req; krb5_fast_armored_req *armored_req = NULL; krb5_data *encoded_fast_req = NULL; krb5_data *encoded_armored_req = NULL; krb5_data *local_encoded_result = NULL; krb5_data random_data; char random_buf[4]; assert(state != NULL); assert(state->fast_outer_request.padata == NULL); memset(pa_array, 0, sizeof pa_array); if (state->armor_key == NULL) { return encoder(request, encoded_request); } TRACE_FAST_ENCODE(context); /* Fill in a fresh random nonce for each inner request*/ random_data.length = 4; random_data.data = (char *)random_buf; retval = krb5_c_random_make_octets(context, &random_data); if (retval == 0) { request->nonce = 0x7fffffff & load_32_n(random_buf); state->nonce = request->nonce; } fast_req.req_body = request; if (fast_req.req_body->padata == NULL) { fast_req.req_body->padata = calloc(1, sizeof(krb5_pa_data *)); if (fast_req.req_body->padata == NULL) retval = ENOMEM; } fast_req.fast_options = state->fast_options; if (retval == 0) retval = encode_krb5_fast_req(&fast_req, &encoded_fast_req); if (retval == 0) { armored_req = calloc(1, sizeof(krb5_fast_armored_req)); if (armored_req == NULL) retval = ENOMEM; } if (retval == 0) armored_req->armor = state->armor; if (retval ==0) retval = krb5_c_make_checksum(context, 0, state->armor_key, KRB5_KEYUSAGE_FAST_REQ_CHKSUM, to_be_checksummed, &armored_req->req_checksum); if (retval == 0) retval = krb5_encrypt_helper(context, state->armor_key, KRB5_KEYUSAGE_FAST_ENC, encoded_fast_req, &armored_req->enc_part); if (retval == 0) retval = encode_krb5_pa_fx_fast_request(armored_req, &encoded_armored_req); if (retval==0) { pa[0].pa_type = KRB5_PADATA_FX_FAST; pa[0].contents = (unsigned char *) encoded_armored_req->data; pa[0].length = encoded_armored_req->length; pa_array[0] = &pa[0]; } state->fast_outer_request.padata = pa_array; if(retval == 0) retval = encoder(&state->fast_outer_request, &local_encoded_result); if (retval == 0) { *encoded_request = local_encoded_result; local_encoded_result = NULL; } if (encoded_armored_req) krb5_free_data(context, encoded_armored_req); if (armored_req) { armored_req->armor = NULL; /*owned by state*/ krb5_free_fast_armored_req(context, armored_req); } if (encoded_fast_req) krb5_free_data(context, encoded_fast_req); if (local_encoded_result) krb5_free_data(context, local_encoded_result); state->fast_outer_request.padata = NULL; return retval; }