static int __init sha256_mod_init(void) { int ret = crypto_register_shashes(algs, ARRAY_SIZE(algs)); if (ret) return ret; if (elf_hwcap & HWCAP_ASIMD) { ret = crypto_register_shashes(neon_algs, ARRAY_SIZE(neon_algs)); if (ret) crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); } return ret; }
static int __init sha512_ssse3_mod_init(void) { /* test for SSSE3 first */ if (cpu_has_ssse3) sha512_transform_asm = sha512_transform_ssse3; #ifdef CONFIG_AS_AVX /* allow AVX to override SSSE3, it's a little faster */ if (avx_usable()) { #ifdef CONFIG_AS_AVX2 if (boot_cpu_has(X86_FEATURE_AVX2)) sha512_transform_asm = sha512_transform_rorx; else #endif sha512_transform_asm = sha512_transform_avx; } #endif if (sha512_transform_asm) { #ifdef CONFIG_AS_AVX if (sha512_transform_asm == sha512_transform_avx) pr_info("Using AVX optimized SHA-512 implementation\n"); #ifdef CONFIG_AS_AVX2 else if (sha512_transform_asm == sha512_transform_rorx) pr_info("Using AVX2 optimized SHA-512 implementation\n"); #endif else #endif pr_info("Using SSSE3 optimized SHA-512 implementation\n"); return crypto_register_shashes(algs, ARRAY_SIZE(algs)); } pr_info("Neither AVX nor SSSE3 is available/usable.\n"); return -ENODEV; }
static int register_sha512_avx2(void) { if (avx2_usable()) return crypto_register_shashes(sha512_avx2_algs, ARRAY_SIZE(sha512_avx2_algs)); return 0; }
static int register_sha512_ssse3(void) { if (boot_cpu_has(X86_FEATURE_SSSE3)) return crypto_register_shashes(sha512_ssse3_algs, ARRAY_SIZE(sha512_ssse3_algs)); return 0; }
static int register_sha256_ni(void) { if (boot_cpu_has(X86_FEATURE_SHA_NI)) return crypto_register_shashes(sha256_ni_algs, ARRAY_SIZE(sha256_ni_algs)); return 0; }
static int __init sha512_arm_mod_init(void) { int err; err = crypto_register_shashes(sha512_arm_algs, ARRAY_SIZE(sha512_arm_algs)); if (err) return err; if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && cpu_has_neon()) { err = crypto_register_shashes(sha512_neon_algs, ARRAY_SIZE(sha512_neon_algs)); if (err) goto err_unregister; } return 0; err_unregister: crypto_unregister_shashes(sha512_arm_algs, ARRAY_SIZE(sha512_arm_algs)); return err; }
static int __init crc32_pmull_mod_init(void) { if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && (elf_hwcap & HWCAP_PMULL)) { crc32_pmull_algs[0].update = crc32_pmull_update; crc32_pmull_algs[1].update = crc32c_pmull_update; if (elf_hwcap & HWCAP_CRC32) { fallback_crc32 = crc32_armv8_le; fallback_crc32c = crc32c_armv8_le; } else { fallback_crc32 = crc32_le; fallback_crc32c = __crc32c_le; } } else if (!(elf_hwcap & HWCAP_CRC32)) { return -ENODEV; } return crypto_register_shashes(crc32_pmull_algs, ARRAY_SIZE(crc32_pmull_algs)); }
static int __init sha2_ce_mod_init(void) { return crypto_register_shashes(algs, ARRAY_SIZE(algs)); }
static int __init sha256_generic_mod_init(void) { return crypto_register_shashes(sha256_algs, ARRAY_SIZE(sha256_algs)); }
static int __init sha2_ce_mod_init(void) { if (!(elf_hwcap2 & HWCAP2_SHA2)) return -ENODEV; return crypto_register_shashes(algs, ARRAY_SIZE(algs)); }
static int __init crc_vx_mod_init(void) { return crypto_register_shashes(crc32_vx_algs, ARRAY_SIZE(crc32_vx_algs)); }