/* We are actually checking for SHA512 */ static int check_phe_sha512 (void) { unsigned int edx = padlock_capability (); return ((edx & (0x3 << 25)) == (0x3 << 25)); }
static int check_padlock (void) { unsigned int edx = padlock_capability (); return ((edx & (0x3 << 6)) == (0x3 << 6)); }
/* * Load supported features of the CPU to see if the PadLock is available. */ static int padlock_available(void) { unsigned int edx = padlock_capability(); /* Fill up some flags */ padlock_use_ace = ((edx & (0x3 << 6)) == (0x3 << 6)); padlock_use_rng = ((edx & (0x3 << 2)) == (0x3 << 2)); return padlock_use_ace + padlock_use_rng; }
static void register_x86_padlock_crypto(unsigned capabilities) { int ret, phe; unsigned edx; if (check_via() == 0) return; if (capabilities == 0) edx = padlock_capability(); else edx = capabilities_to_via_edx(capabilities); if (check_padlock(edx)) { _gnutls_debug_log ("Padlock AES accelerator was detected\n"); ret = gnutls_crypto_single_cipher_register (GNUTLS_CIPHER_AES_128_CBC, 80, &_gnutls_aes_padlock, 0); if (ret < 0) { gnutls_assert(); } /* register GCM ciphers */ ret = gnutls_crypto_single_cipher_register (GNUTLS_CIPHER_AES_128_GCM, 80, &_gnutls_aes_gcm_padlock, 0); if (ret < 0) { gnutls_assert(); } #ifdef HAVE_LIBNETTLE ret = gnutls_crypto_single_cipher_register (GNUTLS_CIPHER_AES_192_CBC, 80, &_gnutls_aes_padlock, 0); if (ret < 0) { gnutls_assert(); } ret = gnutls_crypto_single_cipher_register (GNUTLS_CIPHER_AES_256_CBC, 80, &_gnutls_aes_padlock, 0); if (ret < 0) { gnutls_assert(); } ret = gnutls_crypto_single_cipher_register (GNUTLS_CIPHER_AES_256_GCM, 80, &_gnutls_aes_gcm_padlock, 0); if (ret < 0) { gnutls_assert(); } #endif } #ifdef HAVE_LIBNETTLE phe = check_phe(edx); if (phe && check_phe_partial()) { _gnutls_debug_log ("Padlock SHA1 and SHA256 (partial) accelerator was detected\n"); if (check_phe_sha512(edx)) { _gnutls_debug_log ("Padlock SHA512 (partial) accelerator was detected\n"); ret = gnutls_crypto_single_digest_register (GNUTLS_DIG_SHA384, 80, &_gnutls_sha_padlock_nano, 0); if (ret < 0) { gnutls_assert(); } ret = gnutls_crypto_single_digest_register (GNUTLS_DIG_SHA512, 80, &_gnutls_sha_padlock_nano, 0); if (ret < 0) { gnutls_assert(); } ret = gnutls_crypto_single_mac_register (GNUTLS_MAC_SHA384, 80, &_gnutls_hmac_sha_padlock_nano, 0); if (ret < 0) { gnutls_assert(); } ret = gnutls_crypto_single_mac_register (GNUTLS_MAC_SHA512, 80, &_gnutls_hmac_sha_padlock_nano, 0); if (ret < 0) { gnutls_assert(); } } ret = gnutls_crypto_single_digest_register(GNUTLS_DIG_SHA1, 80, &_gnutls_sha_padlock_nano, 0); if (ret < 0) { gnutls_assert(); } ret = gnutls_crypto_single_digest_register(GNUTLS_DIG_SHA224, 80, &_gnutls_sha_padlock_nano, 0); if (ret < 0) { gnutls_assert(); } ret = gnutls_crypto_single_digest_register(GNUTLS_DIG_SHA256, 80, &_gnutls_sha_padlock_nano, 0); if (ret < 0) { gnutls_assert(); } ret = gnutls_crypto_single_mac_register(GNUTLS_MAC_SHA1, 80, &_gnutls_hmac_sha_padlock_nano, 0); if (ret < 0) { gnutls_assert(); } /* we don't register MAC_SHA224 because it is not used by TLS */ ret = gnutls_crypto_single_mac_register(GNUTLS_MAC_SHA256, 80, &_gnutls_hmac_sha_padlock_nano, 0); if (ret < 0) { gnutls_assert(); } } else if (phe) { /* Original padlock PHE. Does not support incremental operations. */ _gnutls_debug_log ("Padlock SHA1 and SHA256 accelerator was detected\n"); ret = gnutls_crypto_single_digest_register(GNUTLS_DIG_SHA1, 80, &_gnutls_sha_padlock, 0); if (ret < 0) { gnutls_assert(); } ret = gnutls_crypto_single_digest_register(GNUTLS_DIG_SHA256, 80, &_gnutls_sha_padlock, 0); if (ret < 0) { gnutls_assert(); } ret = gnutls_crypto_single_mac_register(GNUTLS_MAC_SHA1, 80, &_gnutls_hmac_sha_padlock, 0); if (ret < 0) { gnutls_assert(); } ret = gnutls_crypto_single_mac_register(GNUTLS_MAC_SHA256, 80, &_gnutls_hmac_sha_padlock, 0); if (ret < 0) { gnutls_assert(); } } #endif return; }