void s390_sha_update(struct crypto_tfm *tfm, const u8 *data, unsigned int len) { struct s390_sha_ctx *ctx = crypto_tfm_ctx(tfm); unsigned int bsize = crypto_tfm_alg_blocksize(tfm); unsigned int index; int ret; /* how much is already in the buffer? */ index = ctx->count & (bsize - 1); ctx->count += len; if ((index + len) < bsize) goto store; /* process one stored block */ if (index) { memcpy(ctx->buf + index, data, bsize - index); ret = crypt_s390_kimd(ctx->func, ctx->state, ctx->buf, bsize); BUG_ON(ret != bsize); data += bsize - index; len -= bsize - index; } /* process as many blocks as possible */ if (len >= bsize) { ret = crypt_s390_kimd(ctx->func, ctx->state, data, len & ~(bsize - 1)); BUG_ON(ret != (len & ~(bsize - 1))); data += ret; len -= ret; } store: if (len) memcpy(ctx->buf + index , data, len); }
static void sha512_update(struct crypto_tfm *tfm, const u8 *data, unsigned int len) { struct s390_sha512_ctx *sctx = crypto_tfm_ctx(tfm); unsigned int bsize = SHA512_BLOCK_SIZE; unsigned int index; int ret; /* how much is already in the buffer? */ index = sctx->count & (bsize - 1); sctx->count += len; if ((index + len) < bsize) goto store; /* process one stored block */ if (index) { memcpy(sctx->buf + index, data, bsize - index); ret = crypt_s390_kimd(KIMD_SHA_512, sctx->state, sctx->buf, bsize); BUG_ON(ret != bsize); data += bsize - index; len -= bsize - index; } /* process as many blocks as possible */ if (len >= bsize) { ret = crypt_s390_kimd(KIMD_SHA_512, sctx->state, data, len & ~(bsize - 1)); BUG_ON(ret != (len & ~(bsize - 1))); data += ret; len -= ret; } store: /* anything left? */ if (len) memcpy(sctx->buf + index , data, len); }
static void sha1_update(struct crypto_tfm *tfm, const u8 *data, unsigned int len) { struct crypt_s390_sha1_ctx *sctx; long imd_len; sctx = crypto_tfm_ctx(tfm); sctx->count += len * 8; //message bit length //anything in buffer yet? -> must be completed if (sctx->buf_len && (sctx->buf_len + len) >= SHA1_BLOCK_SIZE) { //complete full block and hash memcpy(sctx->buffer + sctx->buf_len, data, SHA1_BLOCK_SIZE - sctx->buf_len); crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buffer, SHA1_BLOCK_SIZE); data += SHA1_BLOCK_SIZE - sctx->buf_len; len -= SHA1_BLOCK_SIZE - sctx->buf_len; sctx->buf_len = 0; } //rest of data contains full blocks? imd_len = len & ~0x3ful; if (imd_len){ crypt_s390_kimd(KIMD_SHA_1, sctx->state, data, imd_len); data += imd_len; len -= imd_len; } //anything left? store in buffer if (len){ memcpy(sctx->buffer + sctx->buf_len , data, len); sctx->buf_len += len; } }
/* Add padding and return the message digest. */ static void sha1_final(struct crypto_tfm *tfm, u8 *out) { struct s390_sha1_ctx *sctx = crypto_tfm_ctx(tfm); u64 bits; unsigned int index, end; int ret; /* must perform manual padding */ index = sctx->count & 0x3f; end = (index < 56) ? SHA1_BLOCK_SIZE : (2 * SHA1_BLOCK_SIZE); /* start pad with 1 */ sctx->buf[index] = 0x80; /* pad with zeros */ index++; memset(sctx->buf + index, 0x00, end - index - 8); /* append message length */ bits = sctx->count * 8; memcpy(sctx->buf + end - 8, &bits, sizeof(bits)); ret = crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buf, end); BUG_ON(ret != end); /* copy digest to out */ memcpy(out, sctx->state, SHA1_DIGEST_SIZE); /* wipe context */ memset(sctx, 0, sizeof *sctx); }
static void sha512_final(struct crypto_tfm *tfm, u8 *out) { struct s390_sha512_ctx *sctx = crypto_tfm_ctx(tfm); unsigned int bsize = SHA512_BLOCK_SIZE; u64 bits; unsigned int index, end; int ret; /* must perform manual padding */ index = sctx->count & (bsize - 1); end = (index < bsize - 16) ? bsize : (2 * bsize); sctx->buf[index] = 0x80; /* start pad with 1 */ index++; /* pad with zeros */ memset(sctx->buf + index, 0x00, end - index - 8); /* * Append message length. Well, SHA-512 wants a 128 bit lenght value, * nevertheless I use u64, 64 bit message length should be enough for now. */ bits = sctx->count * 8; memcpy(sctx->buf + end - 8, &bits, sizeof(bits)); ret = crypt_s390_kimd(KIMD_SHA_512, sctx->state, sctx->buf, end); BUG_ON(ret != end); memcpy(out, sctx->state, SHA512_DIGEST_SIZE); /* copy digest to out */ memset(sctx, 0, sizeof *sctx); /* wipe context */ }
/* Add padding and return the message digest. */ static void sha1_final(struct crypto_tfm *tfm, u8 *out) { struct crypt_s390_sha1_ctx *sctx = crypto_tfm_ctx(tfm); //must perform manual padding pad_message(sctx); crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buffer, sctx->buf_len); //copy digest to out memcpy(out, sctx->state, SHA1_DIGEST_SIZE); /* Wipe context */ memset(sctx, 0, sizeof *sctx); }
static void sha256_update(struct crypto_tfm *tfm, const u8 *data, unsigned int len) { struct s390_sha256_ctx *sctx = crypto_tfm_ctx(tfm); unsigned int index; int ret; /* how much is already in the buffer? */ index = sctx->count / 8 & 0x3f; /* update message bit length */ sctx->count += len * 8; if ((index + len) < SHA256_BLOCK_SIZE) goto store; /* process one stored block */ if (index) { memcpy(sctx->buf + index, data, SHA256_BLOCK_SIZE - index); ret = crypt_s390_kimd(KIMD_SHA_256, sctx->state, sctx->buf, SHA256_BLOCK_SIZE); BUG_ON(ret != SHA256_BLOCK_SIZE); data += SHA256_BLOCK_SIZE - index; len -= SHA256_BLOCK_SIZE - index; } /* process as many blocks as possible */ if (len >= SHA256_BLOCK_SIZE) { ret = crypt_s390_kimd(KIMD_SHA_256, sctx->state, data, len & ~(SHA256_BLOCK_SIZE - 1)); BUG_ON(ret != (len & ~(SHA256_BLOCK_SIZE - 1))); data += ret; len -= ret; } store: /* anything left? */ if (len) memcpy(sctx->buf + index , data, len); }
/* Add padding and return the message digest */ static void sha256_final(struct crypto_tfm *tfm, u8 *out) { struct s390_sha256_ctx *sctx = crypto_tfm_ctx(tfm); /* must perform manual padding */ pad_message(sctx); crypt_s390_kimd(KIMD_SHA_256, sctx->state, sctx->buf, sctx->count / 8); /* copy digest to out */ memcpy(out, sctx->state, SHA256_DIGEST_SIZE); /* wipe context */ memset(sctx, 0, sizeof *sctx); }
static int init(void) { struct crypt_s390_query_status status = { .high = 0, .low = 0 }; printk(KERN_INFO "crypt_s390: querying available crypto functions\n"); crypt_s390_km(KM_QUERY, &status, NULL, NULL, 0); printk(KERN_INFO "KM:\t%016llx %016llx\n", (unsigned long long) status.high, (unsigned long long) status.low); status.high = status.low = 0; crypt_s390_kmc(KMC_QUERY, &status, NULL, NULL, 0); printk(KERN_INFO "KMC:\t%016llx %016llx\n", (unsigned long long) status.high, (unsigned long long) status.low); status.high = status.low = 0; crypt_s390_kimd(KIMD_QUERY, &status, NULL, 0); printk(KERN_INFO "KIMD:\t%016llx %016llx\n", (unsigned long long) status.high, (unsigned long long) status.low); status.high = status.low = 0; crypt_s390_klmd(KLMD_QUERY, &status, NULL, 0); printk(KERN_INFO "KLMD:\t%016llx %016llx\n", (unsigned long long) status.high, (unsigned long long) status.low); status.high = status.low = 0; crypt_s390_kmac(KMAC_QUERY, &status, NULL, 0); printk(KERN_INFO "KMAC:\t%016llx %016llx\n", (unsigned long long) status.high, (unsigned long long) status.low); query_available_functions(); return -ECANCELED; } static void __exit cleanup(void) { } module_init(init); module_exit(cleanup); MODULE_LICENSE("GPL");
int s390_sha_final(struct shash_desc *desc, u8 *out) { struct s390_sha_ctx *ctx = shash_desc_ctx(desc); unsigned int bsize = crypto_shash_blocksize(desc->tfm); u64 bits; unsigned int index, end, plen; int ret; /* SHA-512 uses 128 bit padding length */ plen = (bsize > SHA256_BLOCK_SIZE) ? 16 : 8; /* must perform manual padding */ index = ctx->count & (bsize - 1); end = (index < bsize - plen) ? bsize : (2 * bsize); /* start pad with 1 */ ctx->buf[index] = 0x80; index++; /* pad with zeros */ memset(ctx->buf + index, 0x00, end - index - 8); /* * Append message length. Well, SHA-512 wants a 128 bit length value, * nevertheless we use u64, should be enough for now... */ bits = ctx->count * 8; memcpy(ctx->buf + end - 8, &bits, sizeof(bits)); ret = crypt_s390_kimd(ctx->func, ctx->state, ctx->buf, end); BUG_ON(ret != end); /* copy digest to out */ memcpy(out, ctx->state, crypto_shash_digestsize(desc->tfm)); /* wipe context */ memset(ctx, 0, sizeof *ctx); return 0; }