static int padlock_ofb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out_arg, const unsigned char *in_arg, size_t nbytes) { struct padlock_cipher_data *cdata = ALIGNED_CIPHER_DATA(ctx); size_t chunk; /* * ctx->num is maintained in byte-oriented modes, such as CFB and OFB... */ if ((chunk = EVP_CIPHER_CTX_num(ctx))) { /* borrow chunk variable */ unsigned char *ivp = EVP_CIPHER_CTX_iv_noconst(ctx); if (chunk >= AES_BLOCK_SIZE) return 0; /* bogus value */ while (chunk < AES_BLOCK_SIZE && nbytes != 0) { *(out_arg++) = *(in_arg++) ^ ivp[chunk]; chunk++, nbytes--; } EVP_CIPHER_CTX_set_num(ctx, chunk % AES_BLOCK_SIZE); } if (nbytes == 0) return 1; memcpy(cdata->iv, EVP_CIPHER_CTX_iv(ctx), AES_BLOCK_SIZE); if ((chunk = nbytes & ~(AES_BLOCK_SIZE - 1))) { if (!padlock_ofb_encrypt(out_arg, in_arg, cdata, chunk)) return 0; nbytes -= chunk; } if (nbytes) { unsigned char *ivp = cdata->iv; out_arg += chunk; in_arg += chunk; EVP_CIPHER_CTX_set_num(ctx, nbytes); padlock_reload_key(); /* empirically found */ padlock_aes_block(ivp, ivp, cdata); padlock_reload_key(); /* empirically found */ while (nbytes) { *(out_arg++) = *(in_arg++) ^ *ivp; ivp++, nbytes--; } } memcpy(EVP_CIPHER_CTX_iv_noconst(ctx), cdata->iv, AES_BLOCK_SIZE); return 1; }
int padlock_aes_cipher_setkey (void *_ctx, const void *userkey, size_t keysize) { struct padlock_ctx *ctx = _ctx; struct padlock_cipher_data *pce; #ifdef HAVE_LIBNETTLE struct aes_ctx nc; #endif memset (_ctx, 0, sizeof (struct padlock_cipher_data)); pce = ALIGN16 (&ctx->expanded_key); pce->cword.b.encdec = (ctx->enc == 0); switch (keysize) { case 16: pce->cword.b.ksize = 0; pce->cword.b.rounds = 10; memcpy (pce->ks.rd_key, userkey, 16); pce->cword.b.keygen = 0; break; #ifdef HAVE_LIBNETTLE case 24: pce->cword.b.ksize = 1; pce->cword.b.rounds = 12; goto common_24_32; case 32: pce->cword.b.ksize = 2; pce->cword.b.rounds = 14; common_24_32: /* expand key using nettle */ if (ctx->enc) aes_set_encrypt_key (&nc, keysize, userkey); else aes_set_decrypt_key (&nc, keysize, userkey); memcpy (pce->ks.rd_key, nc.keys, sizeof (nc.keys)); pce->ks.rounds = nc.nrounds; pce->cword.b.keygen = 1; break; #endif default: return gnutls_assert_val (GNUTLS_E_ENCRYPTION_FAILED); } padlock_reload_key (); return 0; }
/* Prepare the encryption key for PadLock usage */ static int padlock_aes_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key, const unsigned char *iv, int enc) { struct padlock_cipher_data *cdata; int key_len = EVP_CIPHER_CTX_key_length(ctx) * 8; unsigned long mode = EVP_CIPHER_CTX_mode(ctx); if (key == NULL) return 0; /* ERROR */ cdata = ALIGNED_CIPHER_DATA(ctx); memset(cdata, 0, sizeof(*cdata)); /* Prepare Control word. */ if (mode == EVP_CIPH_OFB_MODE || mode == EVP_CIPH_CTR_MODE) cdata->cword.b.encdec = 0; else cdata->cword.b.encdec = (EVP_CIPHER_CTX_encrypting(ctx) == 0); cdata->cword.b.rounds = 10 + (key_len - 128) / 32; cdata->cword.b.ksize = (key_len - 128) / 64; switch (key_len) { case 128: /* * PadLock can generate an extended key for AES128 in hardware */ memcpy(cdata->ks.rd_key, key, AES_KEY_SIZE_128); cdata->cword.b.keygen = 0; break; case 192: case 256: /* * Generate an extended AES key in software. Needed for AES192/AES256 */ /* * Well, the above applies to Stepping 8 CPUs and is listed as * hardware errata. They most likely will fix it at some point and * then a check for stepping would be due here. */ if ((mode == EVP_CIPH_ECB_MODE || mode == EVP_CIPH_CBC_MODE) && !enc) AES_set_decrypt_key(key, key_len, &cdata->ks); else AES_set_encrypt_key(key, key_len, &cdata->ks); # ifndef AES_ASM /* * OpenSSL C functions use byte-swapped extended key. */ padlock_key_bswap(&cdata->ks); # endif cdata->cword.b.keygen = 1; break; default: /* ERROR */ return 0; } /* * This is done to cover for cases when user reuses the * context for new key. The catch is that if we don't do * this, padlock_eas_cipher might proceed with old key... */ padlock_reload_key(); return 1; }
static int padlock_cfb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out_arg, const unsigned char *in_arg, size_t nbytes) { struct padlock_cipher_data *cdata = ALIGNED_CIPHER_DATA(ctx); size_t chunk; if ((chunk = EVP_CIPHER_CTX_num(ctx))) { /* borrow chunk variable */ unsigned char *ivp = EVP_CIPHER_CTX_iv_noconst(ctx); if (chunk >= AES_BLOCK_SIZE) return 0; /* bogus value */ if (EVP_CIPHER_CTX_encrypting(ctx)) while (chunk < AES_BLOCK_SIZE && nbytes != 0) { ivp[chunk] = *(out_arg++) = *(in_arg++) ^ ivp[chunk]; chunk++, nbytes--; } else while (chunk < AES_BLOCK_SIZE && nbytes != 0) { unsigned char c = *(in_arg++); *(out_arg++) = c ^ ivp[chunk]; ivp[chunk++] = c, nbytes--; } EVP_CIPHER_CTX_set_num(ctx, chunk % AES_BLOCK_SIZE); } if (nbytes == 0) return 1; memcpy(cdata->iv, EVP_CIPHER_CTX_iv(ctx), AES_BLOCK_SIZE); if ((chunk = nbytes & ~(AES_BLOCK_SIZE - 1))) { if (!padlock_cfb_encrypt(out_arg, in_arg, cdata, chunk)) return 0; nbytes -= chunk; } if (nbytes) { unsigned char *ivp = cdata->iv; out_arg += chunk; in_arg += chunk; EVP_CIPHER_CTX_set_num(ctx, nbytes); if (cdata->cword.b.encdec) { cdata->cword.b.encdec = 0; padlock_reload_key(); padlock_aes_block(ivp, ivp, cdata); cdata->cword.b.encdec = 1; padlock_reload_key(); while (nbytes) { unsigned char c = *(in_arg++); *(out_arg++) = c ^ *ivp; *(ivp++) = c, nbytes--; } } else { padlock_reload_key(); padlock_aes_block(ivp, ivp, cdata); padlock_reload_key(); while (nbytes) { *ivp = *(out_arg++) = *(in_arg++) ^ *ivp; ivp++, nbytes--; } } } memcpy(EVP_CIPHER_CTX_iv_noconst(ctx), cdata->iv, AES_BLOCK_SIZE); return 1; }