/*! \fn void md5_hmac_update(struct crypto_tfm *tfm, const u8 *data, unsigned int len) * \ingroup IFX_MD5_HMAC_FUNCTIONS * \brief on-the-fly md5 hmac computation * \param tfm linux crypto algo transform * \param data input data * \param len size of input data */ static int md5_hmac_update(struct shash_desc *desc, const u8 *data, unsigned int len) { struct md5_hmac_ctx *mctx = crypto_shash_ctx(desc->tfm); const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f); mctx->byte_count += len; if (avail > len) { memcpy((char *)mctx->block + (sizeof(mctx->block) - avail), data, len); return 0; } memcpy((char *)mctx->block + (sizeof(mctx->block) - avail), data, avail); md5_hmac_transform(desc, mctx->block); data += avail; len -= avail; while (len >= sizeof(mctx->block)) { memcpy(mctx->block, data, sizeof(mctx->block)); md5_hmac_transform(desc, mctx->block); data += sizeof(mctx->block); len -= sizeof(mctx->block); } memcpy(mctx->block, data, len); return 0; }
static int chksum_digest(struct shash_desc *desc, const u8 *data, unsigned int length, u8 *out) { struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm); return __chksum_finup(&mctx->key, data, length, out); }
static int ghash_update(struct shash_desc *desc, const u8 *src, unsigned int srclen) { struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); u8 *dst = dctx->buffer; kernel_fpu_begin(); if (dctx->bytes) { int n = min(srclen, dctx->bytes); u8 *pos = dst + (GHASH_BLOCK_SIZE - dctx->bytes); dctx->bytes -= n; srclen -= n; while (n--) *pos++ ^= *src++; if (!dctx->bytes) clmul_ghash_mul(dst, &ctx->shash); } clmul_ghash_update(dst, src, srclen, &ctx->shash); kernel_fpu_end(); if (srclen & 0xf) { src += srclen - (srclen & 0xf); srclen &= 0xf; dctx->bytes = GHASH_BLOCK_SIZE - srclen; while (srclen--) *dst++ ^= *src++; } return 0; }
static int crc32c_pclmul_init(struct shash_desc *desc) { u32 *mctx = crypto_shash_ctx(desc->tfm); u32 *crcp = shash_desc_ctx(desc); *crcp = *mctx; return 0; }
static int crc32_vx_init(struct shash_desc *desc) { struct crc_ctx *mctx = crypto_shash_ctx(desc->tfm); struct crc_desc_ctx *ctx = shash_desc_ctx(desc); ctx->crc = mctx->key; return 0; }
static int adler32_init(struct shash_desc *desc) { u32 *mctx = crypto_shash_ctx(desc->tfm); u32 *cksump = shash_desc_ctx(desc); *cksump = *mctx; return 0; }
static int padlock_sha_import(struct shash_desc *desc, const void *in) { struct padlock_sha_desc *dctx = shash_desc_ctx(desc); struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm); dctx->fallback.tfm = ctx->fallback; dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; return crypto_shash_import(&dctx->fallback, in); }
/*! \fn void md5_hmac_init(struct crypto_tfm *tfm) * \ingroup IFX_MD5_HMAC_FUNCTIONS * \brief initialize md5 hmac context * \param tfm linux crypto algo transform */ static int md5_hmac_init(struct shash_desc *desc) { struct md5_hmac_ctx *mctx = crypto_shash_ctx(desc->tfm); mctx->dbn = 0; //dbn workaround md5_hmac_setkey_hw(mctx->key, mctx->keylen); return 0; }
static int ghash_final(struct shash_desc *desc, u8 *dst) { struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); u8 *buf = dctx->buffer; ghash_flush(ctx, dctx); memcpy(dst, buf, GHASH_BLOCK_SIZE); return 0; }
static int crc32be_vx_setkey(struct crypto_shash *tfm, const u8 *newkey, unsigned int newkeylen) { struct crc_ctx *mctx = crypto_shash_ctx(tfm); if (newkeylen != sizeof(mctx->key)) { crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } mctx->key = be32_to_cpu(*(__be32 *)newkey); return 0; }
static int adler32_setkey(struct crypto_shash *hash, const u8 *key, unsigned int keylen) { u32 *mctx = crypto_shash_ctx(hash); if (keylen != sizeof(u32)) { crypto_shash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } *mctx = *(u32 *)key; return 0; }
/* * Setting the seed allows arbitrary accumulators and flexible XOR policy * If your algorithm starts with ~0, then XOR with ~0 before you set * the seed. */ static int crc32c_pclmul_setkey(struct crypto_shash *hash, const u8 *key, unsigned int keylen) { u32 *mctx = crypto_shash_ctx(hash); if (keylen != sizeof(u32)) { crypto_shash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } *mctx = le32_to_cpup((__le32 *)key); return 0; }
/* * Setting the seed allows arbitrary accumulators and flexible XOR policy * If your algorithm starts with ~0, then XOR with ~0 before you set * the seed. */ static int chksum_setkey(struct crypto_shash *tfm, const u8 *key, unsigned int keylen) { struct chksum_ctx *mctx = crypto_shash_ctx(tfm); if (keylen != sizeof(mctx->key)) { crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } mctx->key = le32_to_cpu(*(__le32 *)key); return 0; }
/*! \fn static void md5_hmac_transform(struct crypto_tfm *tfm, u32 const *in) * \ingroup IFX_MD5_HMAC_FUNCTIONS * \brief save input block to context * \param tfm linux crypto algo transform * \param in 64-byte block of input */ static void md5_hmac_transform(struct shash_desc *desc, u32 const *in) { struct md5_hmac_ctx *mctx = crypto_shash_ctx(desc->tfm); memcpy(&temp[mctx->dbn<<4], in, 64); //dbn workaround mctx->dbn += 1; if ( (mctx->dbn<<4) > MD5_HMAC_DBN_TEMP_SIZE ) { printk("MD5_HMAC_DBN_TEMP_SIZE exceeded\n"); } }
static int ghash_setkey(struct crypto_shash *tfm, const u8 *key, unsigned int keylen) { struct ghash_ctx *ctx = crypto_shash_ctx(tfm); if (keylen != GHASH_BLOCK_SIZE) { crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } clmul_ghash_setkey(&ctx->shash, key); return 0; }
static int ghash_setkey(struct crypto_shash *tfm, const u8 *key, unsigned int keylen) { struct ghash_ctx *ctx = crypto_shash_ctx(tfm); if (keylen != GHASH_BLOCK_SIZE) { crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } if (ctx->gf128) gf128mul_free_4k(ctx->gf128); ctx->gf128 = gf128mul_init_4k_lle((be128 *)key); if (!ctx->gf128) return -ENOMEM; return 0; }
/*! \fn int md5_hmac_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) * \ingroup IFX_MD5_HMAC_FUNCTIONS * \brief sets md5 hmac key * \param tfm linux crypto algo transform * \param key input key * \param keylen key length greater than 64 bytes IS NOT SUPPORTED */ static int md5_hmac_setkey(struct crypto_shash *tfm, const u8 *key, unsigned int keylen) { struct md5_hmac_ctx *mctx = crypto_shash_ctx(tfm); volatile struct deu_hash_t *hash = (struct deu_hash_t *) HASH_START; //printk("copying keys to context with length %d\n", keylen); if (keylen > MAX_HASH_KEYLEN) { printk("Key length more than what DEU hash can handle\n"); return -EINVAL; } hash->KIDX |= 0x80000000; // reset all 16 words of the key to '0' memcpy(&mctx->key, key, keylen); mctx->keylen = keylen; return 0; }
static int ghash_update(struct shash_desc *desc, const u8 *src, unsigned int srclen) { struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); u8 *dst = dctx->buffer; if (!ctx->gf128) return -ENOKEY; if (dctx->bytes) { int n = min(srclen, dctx->bytes); u8 *pos = dst + (GHASH_BLOCK_SIZE - dctx->bytes); dctx->bytes -= n; srclen -= n; while (n--) *pos++ ^= *src++; if (!dctx->bytes) gf128mul_4k_lle((be128 *)dst, ctx->gf128); } while (srclen >= GHASH_BLOCK_SIZE) { crypto_xor(dst, src, GHASH_BLOCK_SIZE); gf128mul_4k_lle((be128 *)dst, ctx->gf128); src += GHASH_BLOCK_SIZE; srclen -= GHASH_BLOCK_SIZE; } if (srclen) { dctx->bytes = GHASH_BLOCK_SIZE - srclen; while (srclen--) *dst++ ^= *src++; } return 0; }
/*! \fn void md5_hmac_final(struct crypto_tfm *tfm, u8 *out) * \ingroup IFX_MD5_HMAC_FUNCTIONS * \brief compute final md5 hmac value * \param tfm linux crypto algo transform * \param out final md5 hmac output value */ static int md5_hmac_final(struct shash_desc *desc, u8 *out) { struct md5_hmac_ctx *mctx = crypto_shash_ctx(desc->tfm); const unsigned int offset = mctx->byte_count & 0x3f; char *p = (char *)mctx->block + offset; int padding = 56 - (offset + 1); volatile struct deu_hash_t *hashs = (struct deu_hash_t *) HASH_START; unsigned long flag; int i = 0; int dbn; u32 *in = &temp[0]; *p++ = 0x80; if (padding < 0) { memset(p, 0x00, padding + sizeof (u64)); md5_hmac_transform(desc, mctx->block); p = (char *)mctx->block; padding = 56; } memset(p, 0, padding); mctx->block[14] = endian_swap((mctx->byte_count + 64) << 3); // need to add 512 bit of the IPAD operation mctx->block[15] = 0x00000000; md5_hmac_transform(desc, mctx->block); CRTCL_SECT_START; //printk("\ndbn = %d\n", mctx->dbn); hashs->DBN = mctx->dbn; asm("sync"); *IFX_HASH_CON = 0x0703002D; //khs, go, init, ndc, endi, kyue, hmen, md5 //wait for processing while (hashs->controlr.BSY) { // this will not take long } for (dbn = 0; dbn < mctx->dbn; dbn++) { for (i = 0; i < 16; i++) { hashs->MR = in[i]; }; hashs->controlr.GO = 1; asm("sync"); //wait for processing while (hashs->controlr.BSY) { // this will not take long } in += 16; } #if 1 //wait for digest ready while (! hashs->controlr.DGRY) { // this will not take long } #endif *((u32 *) out + 0) = hashs->D1R; *((u32 *) out + 1) = hashs->D2R; *((u32 *) out + 2) = hashs->D3R; *((u32 *) out + 3) = hashs->D4R; *((u32 *) out + 4) = hashs->D5R; /* reset the context after we finish with the hash */ mctx->byte_count = 0; memset(&mctx->hash[0], 0, sizeof(MD5_HASH_WORDS)); memset(&mctx->block[0], 0, sizeof(MD5_BLOCK_WORDS)); memset(&temp[0], 0, MD5_HMAC_DBN_TEMP_SIZE); CRTCL_SECT_END; return 0; }
static int crc32c_pclmul_digest(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { return __crc32c_pclmul_finup(crypto_shash_ctx(desc->tfm), data, len, out); }
static int adler32_digest(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { return __adler32_finup(crypto_shash_ctx(desc->tfm), data, len, out); }