int __init p8_init(void) { int ret = 0; struct crypto_alg **alg_it; for (alg_it = algs; *alg_it; alg_it++) { ret = crypto_register_alg(*alg_it); printk(KERN_INFO "crypto_register_alg '%s' = %d\n", (*alg_it)->cra_name, ret); if (ret) { for (alg_it--; alg_it >= algs; alg_it--) crypto_unregister_alg(*alg_it); break; } } if (ret) return ret; ret = crypto_register_shash(&p8_ghash_alg); if (ret) { for (alg_it = algs; *alg_it; alg_it++) crypto_unregister_alg(*alg_it); } return ret; }
static int __init crypto_null_mod_init(void) { int ret = 0; ret = crypto_register_alg(&cipher_null); if (ret < 0) goto out; ret = crypto_register_alg(&skcipher_null); if (ret < 0) goto out_unregister_cipher; ret = crypto_register_shash(&digest_null); if (ret < 0) goto out_unregister_skcipher; ret = crypto_register_alg(&compress_null); if (ret < 0) goto out_unregister_digest; out: return ret; out_unregister_digest: crypto_unregister_shash(&digest_null); out_unregister_skcipher: crypto_unregister_alg(&skcipher_null); out_unregister_cipher: crypto_unregister_alg(&cipher_null); goto out; }
static void __exit AesEngineExit(void) { crypto_unregister_alg(&mcrypto_aes_ecb_alg); crypto_unregister_alg(&mcrypto_aes_cbc_alg); aes_engine_uninit(); }
static void __exit crypto_null_mod_fini(void) { crypto_unregister_alg(&compress_null); crypto_unregister_shash(&digest_null); crypto_unregister_alg(&skcipher_null); crypto_unregister_alg(&cipher_null); }
static void __exit prng_mod_fini(void) { crypto_unregister_alg(&rng_alg); #ifdef CONFIG_CRYPTO_FIPS crypto_unregister_alg(&fips_rng_alg); #endif return; }
static void __exit aes_s390_fini(void) { crypto_unregister_alg(&ctr_aes_alg); free_page((unsigned long) ctrblk); crypto_unregister_alg(&xts_aes_alg); crypto_unregister_alg(&cbc_aes_alg); crypto_unregister_alg(&ecb_aes_alg); crypto_unregister_alg(&aes_alg); }
static int __devinit geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id) { int ret; ret = pci_enable_device(dev); if (ret) return ret; ret = pci_request_regions(dev, "geode-aes"); if (ret) goto eenable; _iobase = pci_iomap(dev, 0, 0); if (_iobase == NULL) { ret = -ENOMEM; goto erequest; } spin_lock_init(&lock); /* Clear any pending activity */ iowrite32(AES_INTR_PENDING | AES_INTR_MASK, _iobase + AES_INTR_REG); ret = crypto_register_alg(&geode_alg); if (ret) goto eiomap; ret = crypto_register_alg(&geode_ecb_alg); if (ret) goto ealg; ret = crypto_register_alg(&geode_cbc_alg); if (ret) goto eecb; printk(KERN_NOTICE "geode-aes: GEODE AES engine enabled.\n"); return 0; eecb: crypto_unregister_alg(&geode_ecb_alg); ealg: crypto_unregister_alg(&geode_alg); eiomap: pci_iounmap(dev, _iobase); erequest: pci_release_regions(dev); eenable: pci_disable_device(dev); printk(KERN_ERR "geode-aes: GEODE AES initialization failed.\n"); return ret; }
static void geode_aes_remove(struct pci_dev *dev) { crypto_unregister_alg(&geode_alg); crypto_unregister_alg(&geode_ecb_alg); crypto_unregister_alg(&geode_cbc_alg); pci_iounmap(dev, _iobase); _iobase = NULL; pci_release_regions(dev); pci_disable_device(dev); }
static int __init xlr_crypt_alg_init(void) { int ret; if ((ret = crypto_register_alg(&xlr_aes_alg))) goto err_out; if ((ret = crypto_register_alg(&xlr_ecb_aes_alg))) goto err1; if ((ret = crypto_register_alg(&xlr_cbc_aes_alg))) goto err2; if ((ret = crypto_register_alg(&xlr_des_alg))) goto err3; if ((ret = crypto_register_alg(&xlr_ecb_des_alg))) goto err4; if ((ret = crypto_register_alg(&xlr_cbc_des_alg))) goto err5; if ((ret = crypto_register_alg(&xlr_des3_alg))) goto err6; if ((ret = crypto_register_alg(&xlr_ecb_des3_alg))) goto err7; if ((ret = crypto_register_alg(&xlr_cbc_des3_alg))) goto err8; // if ((ret = crypto_register_alg(&xlr_ctr_aes_alg))) // goto err9; printk(KERN_NOTICE "Using XLR hardware for AES/DES/3DES algorithm.\n"); return 0; //err9: // crypto_unregister_alg(&xlr_cbc_des3_alg); err8: crypto_unregister_alg(&xlr_ecb_des3_alg); err7: crypto_unregister_alg(&xlr_des3_alg); err6: crypto_unregister_alg(&xlr_cbc_des_alg); err5: crypto_unregister_alg(&xlr_ecb_des_alg); err4: crypto_unregister_alg(&xlr_des_alg); err3: crypto_unregister_alg(&xlr_cbc_aes_alg); err2: crypto_unregister_alg(&xlr_ecb_aes_alg); err1: crypto_unregister_alg(&xlr_aes_alg); err_out: printk(KERN_ERR "XLR hardware AES/DES/3DES initialization failed.\n"); return ret; }
/*! \fn void __exit ifxdeu_fini_aes (void) * \ingroup IFX_AES_FUNCTIONS * \brief unregister aes driver */ void __exit ifxdeu_fini_aes (void) { crypto_unregister_alg (&ifxdeu_aes_alg); crypto_unregister_alg (&ifxdeu_ecb_aes_alg); crypto_unregister_alg (&ifxdeu_cbc_aes_alg); crypto_unregister_alg (&ifxdeu_ctr_basic_aes_alg); crypto_unregister_alg (&ifxdeu_ctr_rfc3686_aes_alg); #ifdef CONFIG_CRYPTO_DEV_PWR_SAVE_MODE #ifdef CONFIG_CRYPTO_DEV_DMA FREE_MEMORY(aes_buff_in); FREE_MEMORY(aes_buff_out); #endif #endif /* CONFIG_CRYPTO_DEV_PWR_SAVE_MODE */ }
static int mv_remove(struct platform_device *pdev) { struct crypto_priv *cp = platform_get_drvdata(pdev); crypto_unregister_alg(&mv_aes_alg_ecb); crypto_unregister_alg(&mv_aes_alg_cbc); kthread_stop(cp->queue_th); free_irq(cp->irq, cp); memset(cp->sram, 0, cp->sram_size); iounmap(cp->sram); iounmap(cp->reg); kfree(cp); cpg = NULL; return 0; }
static int __init padlock_init(void) { int rc = -ENODEV; if (!cpu_has_phe) { printk(KERN_NOTICE PFX "VIA PadLock Hash Engine not detected.\n"); return -ENODEV; } if (!cpu_has_phe_enabled) { printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n"); return -ENODEV; } rc = crypto_register_alg(&sha1_alg); if (rc) goto out; rc = crypto_register_alg(&sha256_alg); if (rc) goto out_unreg1; printk(KERN_NOTICE PFX "Using VIA PadLock ACE for SHA1/SHA256 algorithms.\n"); return 0; out_unreg1: crypto_unregister_alg(&sha1_alg); out: printk(KERN_ERR PFX "VIA PadLock SHA1/SHA256 initialization failed.\n"); return rc; }
static int mv_cesa_add_algs(struct mv_cesa_dev *cesa) { int ret; int i, j; for (i = 0; i < cesa->caps->ncipher_algs; i++) { ret = crypto_register_alg(cesa->caps->cipher_algs[i]); if (ret) goto err_unregister_crypto; } for (i = 0; i < cesa->caps->nahash_algs; i++) { ret = crypto_register_ahash(cesa->caps->ahash_algs[i]); if (ret) goto err_unregister_ahash; } return 0; err_unregister_ahash: for (j = 0; j < i; j++) crypto_unregister_ahash(cesa->caps->ahash_algs[j]); i = cesa->caps->ncipher_algs; err_unregister_crypto: for (j = 0; j < i; j++) crypto_unregister_alg(cesa->caps->cipher_algs[j]); return ret; }
static void des_s390_exit(void) { while (des_s390_algs_num--) crypto_unregister_alg(des_s390_algs_ptr[des_s390_algs_num]); if (ctrblk) free_page((unsigned long) ctrblk); }
static int rk_crypto_register(struct rk_crypto_info *crypto_info) { unsigned int i, k; int err = 0; for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) { rk_cipher_algs[i]->dev = crypto_info; if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER) err = crypto_register_alg( &rk_cipher_algs[i]->alg.crypto); else err = crypto_register_ahash( &rk_cipher_algs[i]->alg.hash); if (err) goto err_cipher_algs; } return 0; err_cipher_algs: for (k = 0; k < i; k++) { if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER) crypto_unregister_alg(&rk_cipher_algs[k]->alg.crypto); else crypto_unregister_ahash(&rk_cipher_algs[i]->alg.hash); } return err; }
static void sahara_unregister_algs(struct sahara_dev *dev) { int i; for (i = 0; i < ARRAY_SIZE(aes_algs); i++) crypto_unregister_alg(&aes_algs[i]); }
static void __exit xlr_crypt_alg_fini(void) { crypto_unregister_alg(&xlr_cbc_aes_alg); crypto_unregister_alg(&xlr_ecb_aes_alg); crypto_unregister_alg(&xlr_aes_alg); crypto_unregister_alg(&xlr_cbc_des_alg); crypto_unregister_alg(&xlr_ecb_des_alg); crypto_unregister_alg(&xlr_des_alg); crypto_unregister_alg(&xlr_cbc_des3_alg); crypto_unregister_alg(&xlr_ecb_des3_alg); crypto_unregister_alg(&xlr_des3_alg); // crypto_unregister_alg(&xlr_ctr_aes_alg); }
void cfs_crypto_crc32_unregister(void) { #ifdef HAVE_STRUCT_SHASH_ALG crypto_unregister_shash(&alg); #else crypto_unregister_alg(&alg); #endif }
static int __init aes_init(void) { int ret; if (crypt_s390_func_available(KM_AES_128_ENCRYPT)) keylen_flag |= AES_KEYLEN_128; if (crypt_s390_func_available(KM_AES_192_ENCRYPT)) keylen_flag |= AES_KEYLEN_192; if (crypt_s390_func_available(KM_AES_256_ENCRYPT)) keylen_flag |= AES_KEYLEN_256; if (!keylen_flag) return -EOPNOTSUPP; /* z9 109 and z9 BC/EC only support 128 bit key length */ if (keylen_flag == AES_KEYLEN_128) { aes_alg.cra_u.cipher.cia_max_keysize = AES_MIN_KEY_SIZE; ecb_aes_alg.cra_u.blkcipher.max_keysize = AES_MIN_KEY_SIZE; cbc_aes_alg.cra_u.blkcipher.max_keysize = AES_MIN_KEY_SIZE; printk(KERN_INFO "aes_s390: hardware acceleration only available for" "128 bit keys\n"); } ret = crypto_register_alg(&aes_alg); if (ret) goto aes_err; ret = crypto_register_alg(&ecb_aes_alg); if (ret) goto ecb_aes_err; ret = crypto_register_alg(&cbc_aes_alg); if (ret) goto cbc_aes_err; out: return ret; cbc_aes_err: crypto_unregister_alg(&ecb_aes_alg); ecb_aes_err: crypto_unregister_alg(&aes_alg); aes_err: goto out; }
static void sunxi_ss_alg_unregister(void) { int i; for (i=0; i<ARRAY_SIZE(sunxi_ss_algs); i++) crypto_unregister_alg(&sunxi_ss_algs[i]); for (i=0; i<ARRAY_SIZE(sunxi_ss_algs_hash); i++) crypto_unregister_ahash(&sunxi_ss_algs_hash[i]); }
static void mv_cesa_remove_algs(struct mv_cesa_dev *cesa) { int i; for (i = 0; i < cesa->caps->nahash_algs; i++) crypto_unregister_ahash(cesa->caps->ahash_algs[i]); for (i = 0; i < cesa->caps->ncipher_algs; i++) crypto_unregister_alg(cesa->caps->cipher_algs[i]); }
void __exit p8_exit(void) { struct crypto_alg **alg_it; for (alg_it = algs; *alg_it; alg_it++) { printk(KERN_INFO "Removing '%s'\n", (*alg_it)->cra_name); crypto_unregister_alg(*alg_it); } crypto_unregister_shash(&p8_ghash_alg); }
static int __init padlock_init(void) { int ret; struct cpuinfo_x86 *c = &cpu_data(0); if (!cpu_has_xcrypt) { printk(KERN_NOTICE PFX "VIA PadLock not detected.\n"); return -ENODEV; } if (!cpu_has_xcrypt_enabled) { printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n"); return -ENODEV; } if ((ret = crypto_register_alg(&aes_alg))) goto aes_err; if ((ret = crypto_register_alg(&ecb_aes_alg))) goto ecb_aes_err; if ((ret = crypto_register_alg(&cbc_aes_alg))) goto cbc_aes_err; printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n"); if (c->x86 == 6 && c->x86_model == 15 && c->x86_mask == 2) { ecb_fetch_blocks = MAX_ECB_FETCH_BLOCKS; cbc_fetch_blocks = MAX_CBC_FETCH_BLOCKS; printk(KERN_NOTICE PFX "VIA Nano stepping 2 detected: enabling workaround.\n"); } out: return ret; cbc_aes_err: crypto_unregister_alg(&ecb_aes_alg); ecb_aes_err: crypto_unregister_alg(&aes_alg); aes_err: printk(KERN_ERR PFX "VIA PadLock AES initialization failed.\n"); goto out; }
static int init(void) { int ret = 0; if (!crypt_s390_func_available(KM_DEA_ENCRYPT) || !crypt_s390_func_available(KM_TDEA_128_ENCRYPT) || !crypt_s390_func_available(KM_TDEA_192_ENCRYPT)) return -ENOSYS; ret |= (crypto_register_alg(&des_alg) == 0) ? 0:1; ret |= (crypto_register_alg(&des3_128_alg) == 0) ? 0:2; ret |= (crypto_register_alg(&des3_192_alg) == 0) ? 0:4; if (ret) { crypto_unregister_alg(&des3_192_alg); crypto_unregister_alg(&des3_128_alg); crypto_unregister_alg(&des_alg); return -EEXIST; } return 0; }
static void rk_crypto_unregister(void) { unsigned int i; for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) { if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER) crypto_unregister_alg(&rk_cipher_algs[i]->alg.crypto); else crypto_unregister_ahash(&rk_cipher_algs[i]->alg.hash); } }
static int __init aes_s390_init(void) { int ret; if (crypt_s390_func_available(KM_AES_128_ENCRYPT)) keylen_flag |= AES_KEYLEN_128; if (crypt_s390_func_available(KM_AES_192_ENCRYPT)) keylen_flag |= AES_KEYLEN_192; if (crypt_s390_func_available(KM_AES_256_ENCRYPT)) keylen_flag |= AES_KEYLEN_256; if (!keylen_flag) return -EOPNOTSUPP; /* z9 109 and z9 BC/EC only support 128 bit key length */ if (keylen_flag == AES_KEYLEN_128) pr_info("AES hardware acceleration is only available for" " 128-bit keys\n"); ret = crypto_register_alg(&aes_alg); if (ret) goto aes_err; ret = crypto_register_alg(&ecb_aes_alg); if (ret) goto ecb_aes_err; ret = crypto_register_alg(&cbc_aes_alg); if (ret) goto cbc_aes_err; out: return ret; cbc_aes_err: crypto_unregister_alg(&ecb_aes_alg); ecb_aes_err: crypto_unregister_alg(&aes_alg); aes_err: goto out; }
static void __exit des_s390_exit(void) { if (ctrblk) { crypto_unregister_alg(&ctr_des_alg); crypto_unregister_alg(&ctr_des3_alg); free_page((unsigned long) ctrblk); } crypto_unregister_alg(&cbc_des3_alg); crypto_unregister_alg(&ecb_des3_alg); crypto_unregister_alg(&des3_alg); crypto_unregister_alg(&cbc_des_alg); crypto_unregister_alg(&ecb_des_alg); crypto_unregister_alg(&des_alg); }
static int __init AesEngineInit(void) { int err; spin_lock_init(&AES_Entry.page_lock); aes_engine_reset(); printk("MTK AES Engine Module, HW verson: %02X\n", sysRegRead(AES_INFO) >> 28); err = aes_engine_desc_init(); if (err != 0) { printk(KERN_WARNING "%s: ring_alloc FAILED!\n", AES_MODNAME); return err; } #if defined (CONFIG_CRYPTO_DEV_MTK_AES_INT) init_completion(&AES_Entry.op_complete); err = request_irq(SURFBOARDINT_AESENGINE, AesEngineIrqHandler, IRQF_DISABLED, "aes_engine", NULL); if (err) { printk("%s: IRQ %d is not free!\n", AES_MODNAME, SURFBOARDINT_AESENGINE); aes_engine_desc_free(); return err; } #endif aes_engine_start(); printk("%s: register %s crypto api\n", AES_MODNAME, mcrypto_aes_cbc_alg.cra_name); err = crypto_register_alg(&mcrypto_aes_cbc_alg); if (err) { printk("%s: register %s crypto api failed!\n", AES_MODNAME, mcrypto_aes_cbc_alg.cra_name); goto init_failed; } printk("%s: register %s crypto api\n", AES_MODNAME, mcrypto_aes_ecb_alg.cra_name); err = crypto_register_alg(&mcrypto_aes_ecb_alg); if (err) { printk("%s: register %s crypto api failed!\n", AES_MODNAME, mcrypto_aes_ecb_alg.cra_name); crypto_unregister_alg(&mcrypto_aes_cbc_alg); goto init_failed; } return 0; init_failed: aes_engine_uninit(); return err; }
static int __init init(void) { int ret; if (!crypt_s390_func_available(KIMD_SHA_512)) return -EOPNOTSUPP; if ((ret = crypto_register_alg(&sha512_alg)) < 0) goto out; if ((ret = crypto_register_alg(&sha384_alg)) < 0) crypto_unregister_alg(&sha512_alg); out: return ret; }
static int __init crc32c_mod_init(void) { int err; err = crypto_register_alg(&old_alg); if (err) return err; err = crypto_register_alg(&alg); if (err) crypto_unregister_alg(&old_alg); return err; }