/* * Perform the MPPE rekey algorithm, from RFC 3078, sec. 7.3. * Well, not what's written there, but rather what they meant. */ static void mppe_rekey(struct ppp_mppe_state * state, int initial_key) { struct scatterlist sg_in[1], sg_out[1]; get_new_key_from_sha(state); if (!initial_key) { crypto_cipher_setkey(state->arc4, state->sha1_digest, state->keylen); setup_sg(sg_in, state->sha1_digest, state->keylen); setup_sg(sg_out, state->session_key, state->keylen); if (crypto_cipher_encrypt(state->arc4, sg_out, sg_in, state->keylen) != 0) { printk(KERN_WARNING "mppe_rekey: cipher_encrypt failed\n"); } } else { memcpy(state->session_key, state->sha1_digest, state->keylen); } if (state->keylen == 8) { /* See RFC 3078 */ state->session_key[0] = 0xd1; state->session_key[1] = 0x26; state->session_key[2] = 0x9e; } crypto_cipher_setkey(state->arc4, state->session_key, state->keylen); }
/** Run AES performance benchmarks. */ static void bench_aes(void) { int len, i; char *b1, *b2; crypto_cipher_t *c; uint64_t start, end; const int bytes_per_iter = (1<<24); reset_perftime(); c = crypto_cipher_new(NULL); for (len = 1; len <= 8192; len *= 2) { int iters = bytes_per_iter / len; b1 = tor_malloc_zero(len); b2 = tor_malloc_zero(len); start = perftime(); for (i = 0; i < iters; ++i) { crypto_cipher_encrypt(c, b1, b2, len); } end = perftime(); tor_free(b1); tor_free(b2); printf("%d bytes: %.2f nsec per byte\n", len, NANOCOUNT(start, end, iters*len)); } crypto_cipher_free(c); }
int ikev2_encr_encrypt(int alg, const u8 *key, size_t key_len, const u8 *iv, const u8 *plain, u8 *crypt, size_t len) { struct crypto_cipher *cipher; int encr_alg; switch (alg) { case ENCR_3DES: encr_alg = CRYPTO_CIPHER_ALG_3DES; break; case ENCR_AES_CBC: encr_alg = CRYPTO_CIPHER_ALG_AES; break; default: wpa_printf(MSG_DEBUG, "IKEV2: Unsupported encr alg %d", alg); return -1; } cipher = crypto_cipher_init(encr_alg, iv, key, key_len); if (cipher == NULL) { wpa_printf(MSG_INFO, "IKEV2: Failed to initialize cipher"); return -1; } if (crypto_cipher_encrypt(cipher, plain, crypt, len) < 0) { wpa_printf(MSG_INFO, "IKEV2: Encryption failed"); crypto_cipher_deinit(cipher); return -1; } crypto_cipher_deinit(cipher); return 0; }
int ikev2_encr_encrypt(int alg, const u8 *key, size_t key_len, const u8 *iv, const u8 *plain, u8 *crypt, size_t len) { struct crypto_cipher *cipher; int encr_alg; #ifdef CCNS_PL if (alg == ENCR_3DES) { struct des3_key_s des3key; size_t i, blocks; u8 *pos; /* ECB mode is used incorrectly for 3DES!? */ if (key_len != 24) { wpa_printf(MSG_INFO, "IKEV2: Invalid encr key length"); return -1; } des3_key_setup(key, &des3key); blocks = len / 8; pos = crypt; for (i = 0; i < blocks; i++) { des3_encrypt(pos, &des3key, pos); pos += 8; } } else { #endif /* CCNS_PL */ switch (alg) { case ENCR_3DES: encr_alg = CRYPTO_CIPHER_ALG_3DES; break; case ENCR_AES_CBC: encr_alg = CRYPTO_CIPHER_ALG_AES; break; default: wpa_printf(MSG_DEBUG, "IKEV2: Unsupported encr alg %d", alg); return -1; } cipher = crypto_cipher_init(encr_alg, iv, key, key_len); if (cipher == NULL) { wpa_printf(MSG_INFO, "IKEV2: Failed to initialize cipher"); return -1; } if (crypto_cipher_encrypt(cipher, plain, crypt, len) < 0) { wpa_printf(MSG_INFO, "IKEV2: Encryption failed"); crypto_cipher_deinit(cipher); return -1; } crypto_cipher_deinit(cipher); #ifdef CCNS_PL } #endif /* CCNS_PL */ return 0; }
/* Perform WEP encryption on given skb that has at least 4 bytes of headroom * for IV and 4 bytes of tailroom for ICV. Both IV and ICV will be transmitted, * so the payload length increases with 8 bytes. * * WEP frame payload: IV + TX key idx, RC4(data), ICV = RC4(CRC32(data)) */ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv) { struct prism2_wep_data *wep = priv; u32 klen, len; u8 key[WEP_KEY_LEN + 3]; u8 *pos; cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE); #if((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)) || (OPENSUSE_SLED)) struct blkcipher_desc desc = {.tfm = wep->tx_tfm}; #endif u32 crc; u8 *icv; struct scatterlist sg; if (skb_headroom(skb) < 4 || skb_tailroom(skb) < 4 || skb->len < hdr_len) return -1; len = skb->len - hdr_len; pos = skb_push(skb, 4); memmove(pos, pos + 4, hdr_len); pos += hdr_len; klen = 3 + wep->key_len; wep->iv++; /* Fluhrer, Mantin, and Shamir have reported weaknesses in the key * scheduling algorithm of RC4. At least IVs (KeyByte + 3, 0xff, N) * can be used to speedup attacks, so avoid using them. */ if ((wep->iv & 0xff00) == 0xff00) { u8 B = (wep->iv >> 16) & 0xff; if (B >= 3 && B < klen) wep->iv += 0x0100; } /* Prepend 24-bit IV to RC4 key and TX frame */ *pos++ = key[0] = (wep->iv >> 16) & 0xff; *pos++ = key[1] = (wep->iv >> 8) & 0xff; *pos++ = key[2] = wep->iv & 0xff; *pos++ = wep->key_idx << 6; /* Copy rest of the WEP key (the secret part) */ memcpy(key + 3, wep->key, wep->key_len); if (!tcb_desc->bHwSec) { /* Append little-endian CRC32 and encrypt it to produce ICV */ #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)) crc = ~crc32_le(~0, pos, len); #else crc = ~ether_crc_le(len, pos); #endif icv = skb_put(skb, 4); icv[0] = crc; icv[1] = crc >> 8; icv[2] = crc >> 16; icv[3] = crc >> 24; #if((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)) && (!OPENSUSE_SLED)) crypto_cipher_setkey(wep->tfm, key, klen); sg.page = virt_to_page(pos); sg.offset = offset_in_page(pos); sg.length = len + 4; crypto_cipher_encrypt(wep->tfm, &sg, &sg, len + 4); return 0; #else crypto_blkcipher_setkey(wep->tx_tfm, key, klen); #if(LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)) sg.page = virt_to_page(pos); sg.offset = offset_in_page(pos); sg.length = len + 4; #else sg_init_one(&sg, pos, len+4); #endif return crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4); #endif }
/** Run unit tests for our AES functionality */ static void test_crypto_aes(void *arg) { char *data1 = NULL, *data2 = NULL, *data3 = NULL; crypto_cipher_t *env1 = NULL, *env2 = NULL; int i, j; char *mem_op_hex_tmp=NULL; int use_evp = !strcmp(arg,"evp"); evaluate_evp_for_aes(use_evp); evaluate_ctr_for_aes(); data1 = tor_malloc(1024); data2 = tor_malloc(1024); data3 = tor_malloc(1024); /* Now, test encryption and decryption with stream cipher. */ data1[0]='\0'; for (i = 1023; i>0; i -= 35) strncat(data1, "Now is the time for all good onions", i); memset(data2, 0, 1024); memset(data3, 0, 1024); env1 = crypto_cipher_new(NULL); test_neq_ptr(env1, 0); env2 = crypto_cipher_new(crypto_cipher_get_key(env1)); test_neq_ptr(env2, 0); /* Try encrypting 512 chars. */ crypto_cipher_encrypt(env1, data2, data1, 512); crypto_cipher_decrypt(env2, data3, data2, 512); test_memeq(data1, data3, 512); test_memneq(data1, data2, 512); /* Now encrypt 1 at a time, and get 1 at a time. */ for (j = 512; j < 560; ++j) { crypto_cipher_encrypt(env1, data2+j, data1+j, 1); } for (j = 512; j < 560; ++j) { crypto_cipher_decrypt(env2, data3+j, data2+j, 1); } test_memeq(data1, data3, 560); /* Now encrypt 3 at a time, and get 5 at a time. */ for (j = 560; j < 1024-5; j += 3) { crypto_cipher_encrypt(env1, data2+j, data1+j, 3); } for (j = 560; j < 1024-5; j += 5) { crypto_cipher_decrypt(env2, data3+j, data2+j, 5); } test_memeq(data1, data3, 1024-5); /* Now make sure that when we encrypt with different chunk sizes, we get the same results. */ crypto_cipher_free(env2); env2 = NULL; memset(data3, 0, 1024); env2 = crypto_cipher_new(crypto_cipher_get_key(env1)); test_neq_ptr(env2, NULL); for (j = 0; j < 1024-16; j += 17) { crypto_cipher_encrypt(env2, data3+j, data1+j, 17); } for (j= 0; j < 1024-16; ++j) { if (data2[j] != data3[j]) { printf("%d: %d\t%d\n", j, (int) data2[j], (int) data3[j]); } } test_memeq(data2, data3, 1024-16); crypto_cipher_free(env1); env1 = NULL; crypto_cipher_free(env2); env2 = NULL; /* NIST test vector for aes. */ /* IV starts at 0 */ env1 = crypto_cipher_new("\x80\x00\x00\x00\x00\x00\x00\x00" "\x00\x00\x00\x00\x00\x00\x00\x00"); crypto_cipher_encrypt(env1, data1, "\x00\x00\x00\x00\x00\x00\x00\x00" "\x00\x00\x00\x00\x00\x00\x00\x00", 16); test_memeq_hex(data1, "0EDD33D3C621E546455BD8BA1418BEC8"); /* Now test rollover. All these values are originally from a python * script. */ crypto_cipher_free(env1); env1 = crypto_cipher_new_with_iv( "\x80\x00\x00\x00\x00\x00\x00\x00" "\x00\x00\x00\x00\x00\x00\x00\x00", "\x00\x00\x00\x00\x00\x00\x00\x00" "\xff\xff\xff\xff\xff\xff\xff\xff"); memset(data2, 0, 1024); crypto_cipher_encrypt(env1, data1, data2, 32); test_memeq_hex(data1, "335fe6da56f843199066c14a00a40231" "cdd0b917dbc7186908a6bfb5ffd574d3"); crypto_cipher_free(env1); env1 = crypto_cipher_new_with_iv( "\x80\x00\x00\x00\x00\x00\x00\x00" "\x00\x00\x00\x00\x00\x00\x00\x00", "\x00\x00\x00\x00\xff\xff\xff\xff" "\xff\xff\xff\xff\xff\xff\xff\xff"); memset(data2, 0, 1024); crypto_cipher_encrypt(env1, data1, data2, 32); test_memeq_hex(data1, "e627c6423fa2d77832a02b2794094b73" "3e63c721df790d2c6469cc1953a3ffac"); crypto_cipher_free(env1); env1 = crypto_cipher_new_with_iv( "\x80\x00\x00\x00\x00\x00\x00\x00" "\x00\x00\x00\x00\x00\x00\x00\x00", "\xff\xff\xff\xff\xff\xff\xff\xff" "\xff\xff\xff\xff\xff\xff\xff\xff"); memset(data2, 0, 1024); crypto_cipher_encrypt(env1, data1, data2, 32); test_memeq_hex(data1, "2aed2bff0de54f9328efd070bf48f70a" "0EDD33D3C621E546455BD8BA1418BEC8"); /* Now check rollover on inplace cipher. */ crypto_cipher_free(env1); env1 = crypto_cipher_new_with_iv( "\x80\x00\x00\x00\x00\x00\x00\x00" "\x00\x00\x00\x00\x00\x00\x00\x00", "\xff\xff\xff\xff\xff\xff\xff\xff" "\xff\xff\xff\xff\xff\xff\xff\xff"); crypto_cipher_crypt_inplace(env1, data2, 64); test_memeq_hex(data2, "2aed2bff0de54f9328efd070bf48f70a" "0EDD33D3C621E546455BD8BA1418BEC8" "93e2c5243d6839eac58503919192f7ae" "1908e67cafa08d508816659c2e693191"); crypto_cipher_free(env1); env1 = crypto_cipher_new_with_iv( "\x80\x00\x00\x00\x00\x00\x00\x00" "\x00\x00\x00\x00\x00\x00\x00\x00", "\xff\xff\xff\xff\xff\xff\xff\xff" "\xff\xff\xff\xff\xff\xff\xff\xff"); crypto_cipher_crypt_inplace(env1, data2, 64); test_assert(tor_mem_is_zero(data2, 64)); done: tor_free(mem_op_hex_tmp); if (env1) crypto_cipher_free(env1); if (env2) crypto_cipher_free(env2); tor_free(data1); tor_free(data2); tor_free(data3); }
/* * Compress (encrypt) a packet. * It's strange to call this a compressor, since the output is always * MPPE_OVHD + 2 bytes larger than the input. */ static int mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf, int isize, int osize) { struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg; int proto; struct scatterlist sg_in[1], sg_out[1]; /* * Check that the protocol is in the range we handle. */ proto = PPP_PROTOCOL(ibuf); if (proto < 0x0021 || proto > 0x00fa) return 0; /* Make sure we have enough room to generate an encrypted packet. */ if (osize < isize + MPPE_OVHD + 2) { /* Drop the packet if we should encrypt it, but can't. */ printk(KERN_DEBUG "mppe_compress[%d]: osize too small! " "(have: %d need: %d)\n", state->unit, osize, osize + MPPE_OVHD + 2); return -1; } osize = isize + MPPE_OVHD + 2; /* * Copy over the PPP header and set control bits. */ obuf[0] = PPP_ADDRESS(ibuf); obuf[1] = PPP_CONTROL(ibuf); obuf[2] = PPP_COMP >> 8; /* isize + MPPE_OVHD + 1 */ obuf[3] = PPP_COMP; /* isize + MPPE_OVHD + 2 */ obuf += PPP_HDRLEN; state->ccount = (state->ccount + 1) % MPPE_CCOUNT_SPACE; if (state->debug >= 7) printk(KERN_DEBUG "mppe_compress[%d]: ccount %d\n", state->unit, state->ccount); obuf[0] = state->ccount >> 8; obuf[1] = state->ccount & 0xff; if (!state->stateful || /* stateless mode */ ((state->ccount & 0xff) == 0xff) || /* "flag" packet */ (state->bits & MPPE_BIT_FLUSHED)) { /* CCP Reset-Request */ /* We must rekey */ if (state->debug && state->stateful) printk(KERN_DEBUG "mppe_compress[%d]: rekeying\n", state->unit); mppe_rekey(state, 0); state->bits |= MPPE_BIT_FLUSHED; } obuf[0] |= state->bits; state->bits &= ~MPPE_BIT_FLUSHED; /* reset for next xmit */ obuf += MPPE_OVHD; ibuf += 2; /* skip to proto field */ isize -= 2; /* Encrypt packet */ setup_sg(sg_in, ibuf, isize); setup_sg(sg_out, obuf, osize); if (crypto_cipher_encrypt(state->arc4, sg_out, sg_in, isize) != 0) { printk(KERN_DEBUG "crypto_cypher_encrypt failed\n"); return -1; } state->stats.unc_bytes += isize; state->stats.unc_packets++; state->stats.comp_bytes += osize; state->stats.comp_packets++; return osize; }
/** * tlsv1_record_send - TLS record layer: Send a message * @rl: Pointer to TLS record layer data * @content_type: Content type (TLS_CONTENT_TYPE_*) * @buf: Buffer to send (with TLS_RECORD_HEADER_LEN octets reserved in the * beginning for record layer to fill in; payload filled in after this and * extra space in the end for HMAC). * @buf_size: Maximum buf size * @payload_len: Length of the payload * @out_len: Buffer for returning the used buf length * Returns: 0 on success, -1 on failure * * This function fills in the TLS record layer header, adds HMAC, and encrypts * the data using the current write cipher. */ int tlsv1_record_send(struct tlsv1_record_layer *rl, u8 content_type, u8 *buf, size_t buf_size, size_t payload_len, size_t *out_len) { u8 *pos, *ct_start, *length, *payload; struct crypto_hash *hmac; size_t clen; pos = buf; /* ContentType type */ ct_start = pos; *pos++ = content_type; /* ProtocolVersion version */ WPA_PUT_BE16(pos, TLS_VERSION); pos += 2; /* uint16 length */ length = pos; WPA_PUT_BE16(length, payload_len); pos += 2; /* opaque fragment[TLSPlaintext.length] */ payload = pos; pos += payload_len; if (rl->write_cipher_suite != TLS_NULL_WITH_NULL_NULL) { hmac = crypto_hash_init(rl->hash_alg, rl->write_mac_secret, rl->hash_size); if (hmac == NULL) { wpa_printf(MSG_DEBUG, "TLSv1: Record Layer - Failed " "to initialize HMAC"); return -1; } crypto_hash_update(hmac, rl->write_seq_num, TLS_SEQ_NUM_LEN); /* type + version + length + fragment */ crypto_hash_update(hmac, ct_start, pos - ct_start); clen = buf + buf_size - pos; if (clen < rl->hash_size) { wpa_printf(MSG_DEBUG, "TLSv1: Record Layer - Not " "enough room for MAC"); crypto_hash_finish(hmac, NULL, NULL); return -1; } if (crypto_hash_finish(hmac, pos, &clen) < 0) { wpa_printf(MSG_DEBUG, "TLSv1: Record Layer - Failed " "to calculate HMAC"); return -1; } wpa_hexdump(MSG_MSGDUMP, "TLSv1: Record Layer - Write HMAC", pos, clen); pos += clen; if (rl->iv_size) { size_t len = pos - payload; size_t pad; pad = (len + 1) % rl->iv_size; if (pad) pad = rl->iv_size - pad; if (pos + pad + 1 > buf + buf_size) { wpa_printf(MSG_DEBUG, "TLSv1: No room for " "block cipher padding"); return -1; } os_memset(pos, pad, pad + 1); pos += pad + 1; } if (crypto_cipher_encrypt(rl->write_cbc, payload, payload, pos - payload) < 0) return -1; } WPA_PUT_BE16(length, pos - length - 2); inc_byte_array(rl->write_seq_num, TLS_SEQ_NUM_LEN); *out_len = pos - buf; return 0; }
int esp_output(struct sk_buff *skb) { int err; struct dst_entry *dst = skb->dst; struct xfrm_state *x = dst->xfrm; struct iphdr *iph, *top_iph; struct ip_esp_hdr *esph; struct crypto_tfm *tfm; struct esp_data *esp; struct sk_buff *trailer; struct udphdr *uh = NULL; struct xfrm_encap_tmpl *encap = NULL; int blksize; int clen; int alen; int nfrags; union { struct iphdr iph; char buf[60]; } tmp_iph; /* First, if the skb is not checksummed, complete checksum. */ if (skb->ip_summed == CHECKSUM_HW && skb_checksum_help(skb) == NULL) { err = -EINVAL; goto error_nolock; } spin_lock_bh(&x->lock); err = xfrm_check_output(x, skb, AF_INET); if (err) goto error; err = -ENOMEM; /* Strip IP header in transport mode. Save it. */ if (!x->props.mode) { iph = skb->nh.iph; memcpy(&tmp_iph, iph, iph->ihl*4); __skb_pull(skb, iph->ihl*4); } /* Now skb is pure payload to encrypt */ /* Round to block size */ clen = skb->len; esp = x->data; alen = esp->auth.icv_trunc_len; tfm = esp->conf.tfm; blksize = (crypto_tfm_alg_blocksize(tfm) + 3) & ~3; clen = (clen + 2 + blksize-1)&~(blksize-1); if (esp->conf.padlen) clen = (clen + esp->conf.padlen-1)&~(esp->conf.padlen-1); if ((nfrags = skb_cow_data(skb, clen-skb->len+alen, &trailer)) < 0) goto error; /* Fill padding... */ do { int i; for (i=0; i<clen-skb->len - 2; i++) *(u8*)(trailer->tail + i) = i+1; } while (0); *(u8*)(trailer->tail + clen-skb->len - 2) = (clen - skb->len)-2; pskb_put(skb, trailer, clen - skb->len); encap = x->encap; iph = skb->nh.iph; if (x->props.mode) { top_iph = (struct iphdr*)skb_push(skb, x->props.header_len); esph = (struct ip_esp_hdr*)(top_iph+1); if (encap && encap->encap_type) { switch (encap->encap_type) { case UDP_ENCAP_ESPINUDP: uh = (struct udphdr*) esph; esph = (struct ip_esp_hdr*)(uh+1); top_iph->protocol = IPPROTO_UDP; break; default: printk(KERN_INFO "esp_output(): Unhandled encap: %u\n", encap->encap_type); top_iph->protocol = IPPROTO_ESP; break; } } else top_iph->protocol = IPPROTO_ESP; *(u8*)(trailer->tail - 1) = IPPROTO_IPIP; top_iph->ihl = 5; top_iph->version = 4; top_iph->tos = iph->tos; /* DS disclosed */ if (x->props.flags & XFRM_STATE_NOECN) IP_ECN_clear(top_iph); top_iph->tot_len = htons(skb->len + alen); top_iph->frag_off = iph->frag_off&htons(IP_DF); if (!(top_iph->frag_off)) ip_select_ident(top_iph, dst, 0); top_iph->ttl = iph->ttl; /* TTL disclosed */ top_iph->check = 0; top_iph->saddr = x->props.saddr.a4; top_iph->daddr = x->id.daddr.a4; memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options)); } else { esph = (struct ip_esp_hdr*)skb_push(skb, x->props.header_len); top_iph = (struct iphdr*)skb_push(skb, iph->ihl*4); memcpy(top_iph, &tmp_iph, iph->ihl*4); if (encap && encap->encap_type) { switch (encap->encap_type) { case UDP_ENCAP_ESPINUDP: uh = (struct udphdr*) esph; esph = (struct ip_esp_hdr*)(uh+1); top_iph->protocol = IPPROTO_UDP; break; default: printk(KERN_INFO "esp_output(): Unhandled encap: %u\n", encap->encap_type); top_iph->protocol = IPPROTO_ESP; break; } } else top_iph->protocol = IPPROTO_ESP; iph = &tmp_iph.iph; top_iph->tot_len = htons(skb->len + alen); top_iph->check = 0; top_iph->frag_off = iph->frag_off; *(u8*)(trailer->tail - 1) = iph->protocol; } /* this is non-NULL only with UDP Encapsulation */ if (encap && uh) { uh->source = encap->encap_sport; uh->dest = encap->encap_dport; uh->len = htons(skb->len + alen - sizeof(struct iphdr)); uh->check = 0; } esph->spi = x->id.spi; esph->seq_no = htonl(++x->replay.oseq); if (esp->conf.ivlen) crypto_cipher_set_iv(tfm, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm)); do { struct scatterlist sgbuf[nfrags>MAX_SG_ONSTACK ? 0 : nfrags]; struct scatterlist *sg = sgbuf; if (unlikely(nfrags > MAX_SG_ONSTACK)) { sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC); if (!sg) goto error; } skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen); crypto_cipher_encrypt(tfm, sg, sg, clen); if (unlikely(sg != sgbuf)) kfree(sg); } while (0); if (esp->conf.ivlen) { memcpy(esph->enc_data, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm)); crypto_cipher_get_iv(tfm, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm)); } if (esp->auth.icv_full_len) { esp->auth.icv(esp, skb, (u8*)esph-skb->data, sizeof(struct ip_esp_hdr) + esp->conf.ivlen+clen, trailer->tail); pskb_put(skb, trailer, alen); } ip_send_check(top_iph); skb->nh.raw = skb->data; x->curlft.bytes += skb->len; x->curlft.packets++; spin_unlock_bh(&x->lock); if ((skb->dst = dst_pop(dst)) == NULL) { err = -EHOSTUNREACH; goto error_nolock; } return NET_XMIT_BYPASS; error: spin_unlock_bh(&x->lock); error_nolock: kfree_skb(skb); return err; }
static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) { int err; int hdr_len; struct ipv6hdr *top_iph; struct ipv6_esp_hdr *esph; struct crypto_tfm *tfm; struct esp_data *esp; struct sk_buff *trailer; int blksize; int clen; int alen; int nfrags; esp = x->data; hdr_len = skb->h.raw - skb->data + sizeof(*esph) + esp->conf.ivlen; /* Strip IP+ESP header. */ __skb_pull(skb, hdr_len); /* Now skb is pure payload to encrypt */ err = -ENOMEM; /* Round to block size */ clen = skb->len; alen = esp->auth.icv_trunc_len; tfm = esp->conf.tfm; blksize = ALIGN(crypto_tfm_alg_blocksize(tfm), 4); clen = ALIGN(clen + 2, blksize); if (esp->conf.padlen) clen = ALIGN(clen, esp->conf.padlen); if ((nfrags = skb_cow_data(skb, clen-skb->len+alen, &trailer)) < 0) { goto error; } /* Fill padding... */ do { int i; for (i=0; i<clen-skb->len - 2; i++) *(u8*)(trailer->tail + i) = i+1; } while (0); *(u8*)(trailer->tail + clen-skb->len - 2) = (clen - skb->len)-2; pskb_put(skb, trailer, clen - skb->len); top_iph = (struct ipv6hdr *)__skb_push(skb, hdr_len); esph = (struct ipv6_esp_hdr *)skb->h.raw; top_iph->payload_len = htons(skb->len + alen - sizeof(*top_iph)); *(u8*)(trailer->tail - 1) = *skb->nh.raw; *skb->nh.raw = IPPROTO_ESP; esph->spi = x->id.spi; esph->seq_no = htonl(++x->replay.oseq); if (esp->conf.ivlen) crypto_cipher_set_iv(tfm, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm)); do { struct scatterlist *sg = &esp->sgbuf[0]; if (unlikely(nfrags > ESP_NUM_FAST_SG)) { sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC); if (!sg) goto error; } skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen); crypto_cipher_encrypt(tfm, sg, sg, clen); if (unlikely(sg != &esp->sgbuf[0])) kfree(sg); } while (0); if (esp->conf.ivlen) { memcpy(esph->enc_data, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm)); crypto_cipher_get_iv(tfm, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm)); } if (esp->auth.icv_full_len) { esp->auth.icv(esp, skb, (u8*)esph-skb->data, sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen+clen, trailer->tail); pskb_put(skb, trailer, alen); } err = 0; error: return err; }
/** Run AES performance benchmarks. */ static void bench_aes(void) { int len, i; char *b1, *b2, *temp; crypto_cipher_t *c; uint64_t start, end; const int bytes_per_iter = (1<<24); reset_perftime(); c = crypto_cipher_new(NULL); memmgr_destroy(); memmgr_init_check_shared_mem(SHARED_SIZE, UIO_DEVICE, BASE_ADDRESS); FPGA_AES *cipher1 = NULL; char default_iv[] = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"; cipher1 = fpga_aes_new_short_16((char*)crypto_cipher_get_key(c), default_iv, 2); if(cipher1 == NULL){ printf("\nCould not allocate cipher"); return; } b1 = memmgr_alloc(8192*2+16);//tor_malloc_zero(len); b2 = memmgr_alloc(8192*2+16);//tor_malloc_zero(len); temp = memmgr_alloc(8192*2+16); printf("\nKey: 0x"); for(i=0; i<16; i++){ printf("%02x", cipher1->key[i]); } printf("\n"); // printf("\nBytes per iter: %i", bytes_per_iter); for (len = 2; len <= 8192; len *= 2) { int iters = bytes_per_iter / len; // printf("\nIterations: %i\n", iters); memset(b1, 0, len); memset(b2, 0, len); memset(temp, 0, len); printf("\nb2: 0x"); for(i=0; i<len; i++){ printf("%02x", b2[i]); } start = perftime(); for (i = 0; i < iters; ++i) { // crypto_cipher_encrypt(c, b1, b2, len); // if(i == iters - 1){ // printf(" i: %i ", i); // } Aes_encrypt_memmgr(cipher1, b1, b2, len); } end = perftime(); memset(b2, 0, len); printf("\nFinal iv/iters: 0x%08x", iters); // for(i=1; i<16; i++){ // printf("%02x", cipher1->iv[i]); // } for(i=0; i<iters; ++i){ crypto_cipher_encrypt(c, temp, b2, len); } int incorrect = 0; printf("\nb1: 0x"); for(i=0; i<len; i++){ printf("%02x", b2[i]); } for(i=0; i<len; i++){ if(temp[i] != b1[i]){ incorrect++; printf("\nIncorrect: 0x%02x - 0x%02x fabric", temp[i], b1[i]); } } // tor_free(b1); // tor_free(b2); printf("%d bytes: %.2f nsec per byte\n", len, NANOCOUNT(start, end, iters*len)); printf("Num incorrect: %i\n", incorrect); //printf("start: %lu, end: %lu\n", start, end); } memmgr_free(b1); memmgr_free(b2); memmgr_free(temp); crypto_cipher_free(c); fpga_aes_free(cipher1); }
static int esp_output(struct sk_buff *skb) { int err; struct dst_entry *dst = skb->dst; struct xfrm_state *x = dst->xfrm; struct iphdr *top_iph; struct ip_esp_hdr *esph; struct crypto_tfm *tfm; struct esp_data *esp; struct sk_buff *trailer; int blksize; int clen; int alen; int nfrags; /* Strip IP+ESP header. */ __skb_pull(skb, skb->h.raw - skb->data); /* Now skb is pure payload to encrypt */ err = -ENOMEM; /* Round to block size */ clen = skb->len; esp = x->data; alen = esp->auth.icv_trunc_len; tfm = esp->conf.tfm; blksize = (crypto_tfm_alg_blocksize(tfm) + 3) & ~3; clen = (clen + 2 + blksize-1)&~(blksize-1); if (esp->conf.padlen) clen = (clen + esp->conf.padlen-1)&~(esp->conf.padlen-1); if ((nfrags = skb_cow_data(skb, clen-skb->len+alen, &trailer)) < 0) goto error; /* Fill padding... */ do { int i; for (i=0; i<clen-skb->len - 2; i++) *(u8*)(trailer->tail + i) = i+1; } while (0); *(u8*)(trailer->tail + clen-skb->len - 2) = (clen - skb->len)-2; pskb_put(skb, trailer, clen - skb->len); __skb_push(skb, skb->data - skb->nh.raw); top_iph = skb->nh.iph; esph = (struct ip_esp_hdr *)(skb->nh.raw + top_iph->ihl*4); top_iph->tot_len = htons(skb->len + alen); *(u8*)(trailer->tail - 1) = top_iph->protocol; /* this is non-NULL only with UDP Encapsulation */ if (x->encap) { struct xfrm_encap_tmpl *encap = x->encap; struct udphdr *uh; u32 *udpdata32; uh = (struct udphdr *)esph; uh->source = encap->encap_sport; uh->dest = encap->encap_dport; uh->len = htons(skb->len + alen - top_iph->ihl*4); uh->check = 0; switch (encap->encap_type) { default: case UDP_ENCAP_ESPINUDP: esph = (struct ip_esp_hdr *)(uh + 1); break; case UDP_ENCAP_ESPINUDP_NON_IKE: udpdata32 = (u32 *)(uh + 1); udpdata32[0] = udpdata32[1] = 0; esph = (struct ip_esp_hdr *)(udpdata32 + 2); break; } top_iph->protocol = IPPROTO_UDP; } else top_iph->protocol = IPPROTO_ESP; esph->spi = x->id.spi; esph->seq_no = htonl(++x->replay.oseq); if (esp->conf.ivlen) crypto_cipher_set_iv(tfm, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm)); do { struct scatterlist *sg = &esp->sgbuf[0]; if (unlikely(nfrags > ESP_NUM_FAST_SG)) { sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC); if (!sg) goto error; } skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen); crypto_cipher_encrypt(tfm, sg, sg, clen); if (unlikely(sg != &esp->sgbuf[0])) kfree(sg); } while (0); if (esp->conf.ivlen) { memcpy(esph->enc_data, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm)); crypto_cipher_get_iv(tfm, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm)); } if (esp->auth.icv_full_len) { esp->auth.icv(esp, skb, (u8*)esph-skb->data, sizeof(struct ip_esp_hdr) + esp->conf.ivlen+clen, trailer->tail); pskb_put(skb, trailer, alen); } ip_send_check(top_iph); err = 0; error: return err; }
static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv) { struct prism2_wep_data *wep = priv; u32 klen, len; u8 key[WEP_KEY_LEN + 3]; u8 *pos; cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE); #if((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)) || (OPENSUSE_SLED)) struct blkcipher_desc desc = {.tfm = wep->tx_tfm}; #endif u32 crc; u8 *icv; struct scatterlist sg; if (skb_headroom(skb) < 4 || skb_tailroom(skb) < 4 || skb->len < hdr_len) return -1; len = skb->len - hdr_len; pos = skb_push(skb, 4); memmove(pos, pos + 4, hdr_len); pos += hdr_len; klen = 3 + wep->key_len; wep->iv++; if ((wep->iv & 0xff00) == 0xff00) { u8 B = (wep->iv >> 16) & 0xff; if (B >= 3 && B < klen) wep->iv += 0x0100; } *pos++ = key[0] = (wep->iv >> 16) & 0xff; *pos++ = key[1] = (wep->iv >> 8) & 0xff; *pos++ = key[2] = wep->iv & 0xff; *pos++ = wep->key_idx << 6; memcpy(key + 3, wep->key, wep->key_len); if (!tcb_desc->bHwSec) { #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)) crc = ~crc32_le(~0, pos, len); #else crc = ~ether_crc_le(len, pos); #endif icv = skb_put(skb, 4); icv[0] = crc; icv[1] = crc >> 8; icv[2] = crc >> 16; icv[3] = crc >> 24; #if((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)) && (!OPENSUSE_SLED)) crypto_cipher_setkey(wep->tfm, key, klen); sg.page = virt_to_page(pos); sg.offset = offset_in_page(pos); sg.length = len + 4; crypto_cipher_encrypt(wep->tfm, &sg, &sg, len + 4); return 0; #else crypto_blkcipher_setkey(wep->tx_tfm, key, klen); #if(LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)) sg.page = virt_to_page(pos); sg.offset = offset_in_page(pos); sg.length = len + 4; #else sg_init_one(&sg, pos, len+4); #endif return crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4); #endif }
/* Perform WEP encryption on given skb that has at least 4 bytes of headroom * for IV and 4 bytes of tailroom for ICV. Both IV and ICV will be transmitted, * so the payload length increases with 8 bytes. * * WEP frame payload: IV + TX key idx, RC4(data), ICV = RC4(CRC32(data)) */ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv) { struct prism2_wep_data *wep = priv; u32 klen, len; u8 key[WEP_KEY_LEN + 3]; u8 *pos; cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE); #if ( !defined(BUILT_IN_CRYPTO) && ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)) || (OPENSUSE_SLED)) ) struct blkcipher_desc desc = {.tfm = wep->tx_tfm}; #endif u32 crc; u8 *icv; #ifdef _RTL8192_EXT_PATCH_ u8 broadcastaddr[6] = {0xff,0xff,0xff,0xff,0xff,0xff}; struct rtllib_hdr_3addr* tmp_header = (struct rtllib_hdr_3addr*)(skb->data); u8 is_broadcast_data = 0; u8 is_multicast_data = 0; #endif struct scatterlist sg; if (skb_headroom(skb) < 4 || skb_tailroom(skb) < 4 || skb->len < hdr_len){ printk("Error!!!headroom=%d tailroom=%d skblen=%d hdr_len=%d\n",skb_headroom(skb),skb_tailroom(skb),skb->len,hdr_len); return -1; } #ifdef _RTL8192_EXT_PATCH_ if(tcb_desc->badhoc==0){ if(memcmp(tmp_header->addr1,broadcastaddr,6) == 0){ is_broadcast_data = 1; tcb_desc->bHwSec = 0; } if(is_multicast_ether_addr(tmp_header->addr1)){ is_multicast_data = 1; tcb_desc->bHwSec = 0; } } #endif len = skb->len - hdr_len; pos = skb_push(skb, 4); memmove(pos, pos + 4, hdr_len); pos += hdr_len; klen = 3 + wep->key_len; wep->iv++; /* Fluhrer, Mantin, and Shamir have reported weaknesses in the key * scheduling algorithm of RC4. At least IVs (KeyByte + 3, 0xff, N) * can be used to speedup attacks, so avoid using them. */ if ((wep->iv & 0xff00) == 0xff00) { u8 B = (wep->iv >> 16) & 0xff; if (B >= 3 && B < klen) wep->iv += 0x0100; } /* Prepend 24-bit IV to RC4 key and TX frame */ *pos++ = key[0] = (wep->iv >> 16) & 0xff; *pos++ = key[1] = (wep->iv >> 8) & 0xff; *pos++ = key[2] = wep->iv & 0xff; *pos++ = wep->key_idx << 6; /* Copy rest of the WEP key (the secret part) */ memcpy(key + 3, wep->key, wep->key_len); if (!tcb_desc->bHwSec) { /* Append little-endian CRC32 and encrypt it to produce ICV */ #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)) crc = ~crc32_le(~0, pos, len); #else crc = ~ether_crc_le(len, pos); #endif icv = skb_put(skb, 4); icv[0] = crc; icv[1] = crc >> 8; icv[2] = crc >> 16; icv[3] = crc >> 24; #if(LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)) sg.page = virt_to_page(pos); sg.offset = offset_in_page(pos); sg.length = len + 4; #else sg_init_one(&sg, pos, len+4); #endif #if ( defined(BUILT_IN_CRYPTO) || ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)) && (!OPENSUSE_SLED)) ) crypto_cipher_setkey(wep->tfm, key, klen); crypto_cipher_encrypt(wep->tfm, &sg, &sg, len + 4); return 0; #else crypto_blkcipher_setkey(wep->tx_tfm, key, klen); return crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4); #endif }
int esp6_output(struct sk_buff *skb) { int err; int hdr_len = 0; struct dst_entry *dst = skb->dst; struct xfrm_state *x = dst->xfrm; struct ipv6hdr *iph = NULL, *top_iph; struct ipv6_esp_hdr *esph; struct crypto_tfm *tfm; struct esp_data *esp; struct sk_buff *trailer; int blksize; int clen; int alen; int nfrags; u8 *prevhdr; u8 nexthdr = 0; /* First, if the skb is not checksummed, complete checksum. */ if (skb->ip_summed == CHECKSUM_HW && skb_checksum_help(skb) == NULL) { err = -EINVAL; goto error_nolock; } spin_lock_bh(&x->lock); err = xfrm_check_output(x, skb, AF_INET6); if (err) goto error; err = -ENOMEM; /* Strip IP header in transport mode. Save it. */ if (!x->props.mode) { hdr_len = ip6_find_1stfragopt(skb, &prevhdr); nexthdr = *prevhdr; *prevhdr = IPPROTO_ESP; iph = kmalloc(hdr_len, GFP_ATOMIC); if (!iph) { err = -ENOMEM; goto error; } memcpy(iph, skb->nh.raw, hdr_len); __skb_pull(skb, hdr_len); } /* Now skb is pure payload to encrypt */ /* Round to block size */ clen = skb->len; esp = x->data; alen = esp->auth.icv_trunc_len; tfm = esp->conf.tfm; blksize = (crypto_tfm_alg_blocksize(tfm) + 3) & ~3; clen = (clen + 2 + blksize-1)&~(blksize-1); if (esp->conf.padlen) clen = (clen + esp->conf.padlen-1)&~(esp->conf.padlen-1); if ((nfrags = skb_cow_data(skb, clen-skb->len+alen, &trailer)) < 0) { if (!x->props.mode && iph) kfree(iph); goto error; } /* Fill padding... */ do { int i; for (i=0; i<clen-skb->len - 2; i++) *(u8*)(trailer->tail + i) = i+1; } while (0); *(u8*)(trailer->tail + clen-skb->len - 2) = (clen - skb->len)-2; pskb_put(skb, trailer, clen - skb->len); if (x->props.mode) { iph = skb->nh.ipv6h; top_iph = (struct ipv6hdr*)skb_push(skb, x->props.header_len); esph = (struct ipv6_esp_hdr*)(top_iph+1); *(u8*)(trailer->tail - 1) = IPPROTO_IPV6; top_iph->version = 6; top_iph->priority = iph->priority; top_iph->flow_lbl[0] = iph->flow_lbl[0]; top_iph->flow_lbl[1] = iph->flow_lbl[1]; top_iph->flow_lbl[2] = iph->flow_lbl[2]; if (x->props.flags & XFRM_STATE_NOECN) IP6_ECN_clear(top_iph); top_iph->nexthdr = IPPROTO_ESP; top_iph->payload_len = htons(skb->len + alen - sizeof(struct ipv6hdr)); top_iph->hop_limit = iph->hop_limit; ipv6_addr_copy(&top_iph->saddr, (struct in6_addr *)&x->props.saddr); ipv6_addr_copy(&top_iph->daddr, (struct in6_addr *)&x->id.daddr); } else { esph = (struct ipv6_esp_hdr*)skb_push(skb, x->props.header_len); skb->h.raw = (unsigned char*)esph; top_iph = (struct ipv6hdr*)skb_push(skb, hdr_len); memcpy(top_iph, iph, hdr_len); kfree(iph); top_iph->payload_len = htons(skb->len + alen - sizeof(struct ipv6hdr)); *(u8*)(trailer->tail - 1) = nexthdr; } esph->spi = x->id.spi; esph->seq_no = htonl(++x->replay.oseq); if (esp->conf.ivlen) crypto_cipher_set_iv(tfm, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm)); do { struct scatterlist sgbuf[nfrags>MAX_SG_ONSTACK ? 0 : nfrags]; struct scatterlist *sg = sgbuf; if (unlikely(nfrags > MAX_SG_ONSTACK)) { sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC); if (!sg) goto error; } skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen); crypto_cipher_encrypt(tfm, sg, sg, clen); if (unlikely(sg != sgbuf)) kfree(sg); } while (0); if (esp->conf.ivlen) { memcpy(esph->enc_data, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm)); crypto_cipher_get_iv(tfm, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm)); } if (esp->auth.icv_full_len) { esp->auth.icv(esp, skb, (u8*)esph-skb->data, sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen+clen, trailer->tail); pskb_put(skb, trailer, alen); } skb->nh.raw = skb->data; x->curlft.bytes += skb->len; x->curlft.packets++; spin_unlock_bh(&x->lock); if ((skb->dst = dst_pop(dst)) == NULL) { err = -EHOSTUNREACH; goto error_nolock; } return NET_XMIT_BYPASS; error: spin_unlock_bh(&x->lock); error_nolock: kfree_skb(skb); return err; }
/** * tlsv1_record_send - TLS record layer: Send a message * @rl: Pointer to TLS record layer data * @content_type: Content type (TLS_CONTENT_TYPE_*) * @buf: Buffer for the generated TLS message (needs to have extra space for * header, IV (TLS v1.1), and HMAC) * @buf_size: Maximum buf size * @payload: Payload to be sent * @payload_len: Length of the payload * @out_len: Buffer for returning the used buf length * Returns: 0 on success, -1 on failure * * This function fills in the TLS record layer header, adds HMAC, and encrypts * the data using the current write cipher. */ int tlsv1_record_send(struct tlsv1_record_layer *rl, u8 content_type, u8 *buf, size_t buf_size, const u8 *payload, size_t payload_len, size_t *out_len) { u8 *pos, *ct_start, *length, *cpayload; struct crypto_hash *hmac; size_t clen; int explicit_iv; pos = buf; if (pos + TLS_RECORD_HEADER_LEN > buf + buf_size) return -1; /* ContentType type */ ct_start = pos; *pos++ = content_type; /* ProtocolVersion version */ WPA_PUT_BE16(pos, rl->tls_version); pos += 2; /* uint16 length */ length = pos; WPA_PUT_BE16(length, payload_len); pos += 2; cpayload = pos; explicit_iv = rl->write_cipher_suite != TLS_NULL_WITH_NULL_NULL && rl->iv_size && rl->tls_version == TLS_VERSION_1_1; if (explicit_iv) { /* opaque IV[Cipherspec.block_length] */ if (pos + rl->iv_size > buf + buf_size) return -1; /* * Use random number R per the RFC 4346, 6.2.3.2 CBC Block * Cipher option 2a. */ if (os_get_random(pos, rl->iv_size)) return -1; pos += rl->iv_size; } /* * opaque fragment[TLSPlaintext.length] * (opaque content[TLSCompressed.length] in GenericBlockCipher) */ if (pos + payload_len > buf + buf_size) return -1; os_memmove(pos, payload, payload_len); pos += payload_len; if (rl->write_cipher_suite != TLS_NULL_WITH_NULL_NULL) { /* * MAC calculated over seq_num + TLSCompressed.type + * TLSCompressed.version + TLSCompressed.length + * TLSCompressed.fragment */ hmac = crypto_hash_init(rl->hash_alg, rl->write_mac_secret, rl->hash_size); if (hmac == NULL) { wpa_printf(MSG_DEBUG, "TLSv1: Record Layer - Failed " "to initialize HMAC"); return -1; } crypto_hash_update(hmac, rl->write_seq_num, TLS_SEQ_NUM_LEN); /* type + version + length + fragment */ crypto_hash_update(hmac, ct_start, TLS_RECORD_HEADER_LEN); crypto_hash_update(hmac, payload, payload_len); clen = buf + buf_size - pos; if (clen < rl->hash_size) { wpa_printf(MSG_DEBUG, "TLSv1: Record Layer - Not " "enough room for MAC"); crypto_hash_finish(hmac, NULL, NULL); return -1; } if (crypto_hash_finish(hmac, pos, &clen) < 0) { wpa_printf(MSG_DEBUG, "TLSv1: Record Layer - Failed " "to calculate HMAC"); return -1; } wpa_hexdump(MSG_MSGDUMP, "TLSv1: Record Layer - Write HMAC", pos, clen); pos += clen; if (rl->iv_size) { size_t len = pos - cpayload; size_t pad; pad = (len + 1) % rl->iv_size; if (pad) pad = rl->iv_size - pad; if (pos + pad + 1 > buf + buf_size) { wpa_printf(MSG_DEBUG, "TLSv1: No room for " "block cipher padding"); return -1; } os_memset(pos, pad, pad + 1); pos += pad + 1; } if (crypto_cipher_encrypt(rl->write_cbc, cpayload, cpayload, pos - cpayload) < 0) return -1; } WPA_PUT_BE16(length, pos - length - 2); inc_byte_array(rl->write_seq_num, TLS_SEQ_NUM_LEN); *out_len = pos - buf; return 0; }