void do_integrity_check(void) { u8 *rbuf = (u8 *) ZIMAGE_ADDR; u32 len; u8 hmac[SHA256_DIGEST_SIZE]; struct hash_desc desc; struct scatterlist sg; u8* key="12345678"; printk(KERN_INFO "FIPS: do kernel integrity check\n"); if (integrity_checked || in_fips_err()) return; if ( *((u32*) &rbuf[36]) != 0x016F2818) { printk(KERN_ERR "FIPS: invalid zImage magic number."); set_in_fips_err(); goto err; } len = *(u32*)&rbuf[44] - *(u32*)&rbuf[40]; if (len < 0) { printk(KERN_ERR "FIPS: invalid zImage calculated len"); set_in_fips_err(); goto err; } desc.tfm = crypto_alloc_hash("hmac(sha256)",0,0); if (IS_ERR(desc.tfm)) { printk(KERN_ERR "FIPS: integ failed to allocate tfm %ld\n",PTR_ERR(desc.tfm)); set_in_fips_err(); goto err; } sg_init_one(&sg, rbuf, len); crypto_hash_setkey(desc.tfm,key,strlen(key)); crypto_hash_digest(&desc,&sg,len,hmac); if (!strncmp(hmac,&rbuf[len],SHA256_DIGEST_SIZE)) { printk(KERN_INFO "FIPS: integrity check passed\n"); } else { printk(KERN_ERR "FIPS: integrity check failed\n"); set_in_fips_err(); } err: integrity_checked=true; crypto_free_hash(desc.tfm); return; }
int blkcipher_walk_done(struct blkcipher_desc *desc, struct blkcipher_walk *walk, int err) { unsigned int nbytes = 0; #ifdef CONFIG_CRYPTO_FIPS if (unlikely(in_fips_err())) return (-EACCES); #endif if (likely(err >= 0)) { unsigned int n = walk->nbytes - err; if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW))) n = blkcipher_done_fast(walk, n); else if (WARN_ON(err)) { err = -EINVAL; goto err; } else n = blkcipher_done_slow(walk, n); nbytes = walk->total - n; err = 0; } scatterwalk_done(&walk->in, 0, nbytes); scatterwalk_done(&walk->out, 1, nbytes); err: walk->total = nbytes; walk->nbytes = nbytes; if (nbytes) { crypto_yield(desc->flags); return blkcipher_walk_next(desc, walk); } if (walk->iv != desc->info) memcpy(desc->info, walk->iv, walk->ivsize); if (walk->buffer != walk->page) kfree(walk->buffer); if (walk->page) free_page((unsigned long)walk->page); return err; }
static void cipher_encrypt_unaligned(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { unsigned long alignmask = crypto_tfm_alg_alignmask(tfm); struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher; #ifdef CONFIG_CRYPTO_FIPS if (unlikely(in_fips_err())) return; #endif if (unlikely(((unsigned long)dst | (unsigned long)src) & alignmask)) { cipher_crypt_unaligned(cipher->cia_encrypt, tfm, dst, src); return; } cipher->cia_encrypt(tfm, dst, src); }
static void cipher_crypt_unaligned(void (*fn)(struct crypto_tfm *, u8 *, const u8 *), struct crypto_tfm *tfm, u8 *dst, const u8 *src) { unsigned long alignmask = crypto_tfm_alg_alignmask(tfm); unsigned int size = crypto_tfm_alg_blocksize(tfm); u8 buffer[size + alignmask]; u8 *tmp = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); #ifdef CONFIG_CRYPTO_FIPS if (unlikely(in_fips_err())) return; #endif memcpy(tmp, src, size); fn(tfm, tmp, tmp); memcpy(dst, tmp, size); }
int skcipher_geniv_init(struct crypto_tfm *tfm) { struct crypto_instance *inst = (void *)tfm->__crt_alg; struct crypto_ablkcipher *cipher; #ifdef CONFIG_CRYPTO_FIPS if (unlikely(in_fips_err())) return (-EACCES); #endif cipher = crypto_spawn_skcipher(crypto_instance_ctx(inst)); if (IS_ERR(cipher)) return PTR_ERR(cipher); tfm->crt_ablkcipher.base = cipher; tfm->crt_ablkcipher.reqsize += crypto_ablkcipher_reqsize(cipher); return 0; }
static int crypto_check_alg(struct crypto_alg *alg) { // [email protected] - FIPS mode self test failure error - starts if(unlikely(in_fips_err())) { printk(KERN_ERR "crypto_check_alg failed due to FIPS error: %s", alg->cra_name); return -EACCES; } // [email protected] - FIPS mode self test failure error - ends if (alg->cra_alignmask & (alg->cra_alignmask + 1)) return -EINVAL; if (alg->cra_blocksize > PAGE_SIZE / 8) return -EINVAL; if (alg->cra_priority < 0) return -EINVAL; return crypto_set_driver_name(alg); }
static int crypto_check_alg(struct crypto_alg *alg) { #ifdef CRYPTO_FIPS if (unlikely(in_fips_err())) { printk(KERN_ERR "crypto_check_alg failed due to FIPS error: %s", alg->cra_name); return -EACCES; } #endif if (alg->cra_alignmask & (alg->cra_alignmask + 1)) return -EINVAL; if (alg->cra_blocksize > PAGE_SIZE / 8) return -EINVAL; if (alg->cra_priority < 0) return -EINVAL; return crypto_set_driver_name(alg); }
/* * Returns DEFAULT_BLK_SZ bytes of random data per call * returns 0 if generation succeded, <0 if something went wrong */ static int _get_more_prng_bytes(struct prng_context *ctx, int cont_test) { int i; unsigned char tmp[DEFAULT_BLK_SZ]; unsigned char *output = NULL; dbgprint(KERN_CRIT "Calling _get_more_prng_bytes for context %p\n", ctx); hexdump("Input DT: ", ctx->DT, DEFAULT_BLK_SZ); hexdump("Input I: ", ctx->I, DEFAULT_BLK_SZ); hexdump("Input V: ", ctx->V, DEFAULT_BLK_SZ); /* * This algorithm is a 3 stage state machine */ for (i = 0; i < 3; i++) { switch (i) { case 0: /* * Start by encrypting the counter value * This gives us an intermediate value I */ memcpy(tmp, ctx->DT, DEFAULT_BLK_SZ); output = ctx->I; hexdump("tmp stage 0: ", tmp, DEFAULT_BLK_SZ); break; case 1: /* * Next xor I with our secret vector V * encrypt that result to obtain our * pseudo random data which we output */ xor_vectors(ctx->I, ctx->V, tmp, DEFAULT_BLK_SZ); hexdump("tmp stage 1: ", tmp, DEFAULT_BLK_SZ); output = ctx->rand_data; break; case 2: // [email protected] - disable kernel panic in FIPS mode - starts if(in_fips_err()) { return -EINVAL; } // [email protected] - disable kernel panic in FIPS mode - ends /* * First check that we didn't produce the same * random data that we did last time around through this */ if (!memcmp(ctx->rand_data, ctx->last_rand_data, DEFAULT_BLK_SZ)) { if (cont_test) { // [email protected] - disable kernel panic in FIPS mode - starts set_in_fips_err(); // [email protected] - disable kernel panic in FIPS mode - ends } printk(KERN_ERR "ctx %p Failed repetition check!\n", ctx); ctx->flags |= PRNG_NEED_RESET; return -EINVAL; } memcpy(ctx->last_rand_data, ctx->rand_data, DEFAULT_BLK_SZ); /* * Lastly xor the random data with I * and encrypt that to obtain a new secret vector V */ xor_vectors(ctx->rand_data, ctx->I, tmp, DEFAULT_BLK_SZ); output = ctx->V; hexdump("tmp stage 2: ", tmp, DEFAULT_BLK_SZ); break; } /* do the encryption */ crypto_cipher_encrypt_one(ctx->tfm, output, tmp); } /* * Now update our DT value */ for (i = DEFAULT_BLK_SZ - 1; i >= 0; i--) { ctx->DT[i] += 1; if (ctx->DT[i] != 0) break; } dbgprint("Returning new block for context %p\n", ctx); ctx->rand_data_valid = 0; hexdump("Output DT: ", ctx->DT, DEFAULT_BLK_SZ); hexdump("Output I: ", ctx->I, DEFAULT_BLK_SZ); hexdump("Output V: ", ctx->V, DEFAULT_BLK_SZ); hexdump("New Random Data: ", ctx->rand_data, DEFAULT_BLK_SZ); return 0; }
void do_integrity_check(void) { u8 *rbuf = 0; u32 len; u8 hmac[SHA256_DIGEST_SIZE]; struct hash_desc desc; struct scatterlist sg; u8 *key = "12345678"; int i, step_len = PAGE_SIZE, err; u8 *pAllocBuf = 0; printk(KERN_INFO "FIPS: integrity start\n"); if (unlikely(!need_integrity_check || in_fips_err())) { printk(KERN_INFO "FIPS: integrity check not needed\n"); return; } rbuf = (u8*)phys_to_virt((unsigned long)CONFIG_CRYPTO_FIPS_INTEG_COPY_ADDRESS); if (*((u32 *) &rbuf[36]) != 0x016F2818) { printk(KERN_ERR "FIPS: invalid zImage magic number."); set_in_fips_err(); goto err1; } if (*(u32 *) &rbuf[44] <= *(u32 *) &rbuf[40]) { printk(KERN_ERR "FIPS: invalid zImage calculated len"); set_in_fips_err(); goto err1; } len = *(u32 *) &rbuf[44] - *(u32 *) &rbuf[40]; printk(KERN_INFO "FIPS: integrity actual zImageLen = %d\n", len); printk(KERN_INFO "FIPS: do kernel integrity check address: %lx \n", (unsigned long)rbuf); desc.tfm = crypto_alloc_hash("hmac(sha256)", 0, 0); if (IS_ERR(desc.tfm)) { printk(KERN_ERR "FIPS: integ failed to allocate tfm %ld\n", PTR_ERR(desc.tfm)); set_in_fips_err(); goto err1; } #if FIPS_FUNC_TEST == 2 rbuf[1024] = rbuf[1024] + 1; #endif crypto_hash_setkey(desc.tfm, key, strlen(key)); pAllocBuf = kmalloc(step_len,GFP_KERNEL); if (!pAllocBuf) { printk(KERN_INFO "Fail to alloc memory, length %d\n", step_len); set_in_fips_err(); goto err1; } err = crypto_hash_init(&desc); if (err) { printk(KERN_INFO "fail at crypto_hash_init\n"); set_in_fips_err(); kfree(pAllocBuf); goto err1; } for (i = 0; i < len; i += step_len) { //last is reached if (i + step_len >= len - 1) { memcpy(pAllocBuf, &rbuf[i], len - i); sg_init_one(&sg, pAllocBuf, len - i); err = crypto_hash_update(&desc, &sg, len - i); if (err) { printk(KERN_INFO "Fail to crypto_hash_update1\n"); set_in_fips_err(); goto err; } err = crypto_hash_final(&desc, hmac); if (err) { printk(KERN_INFO "Fail to crypto_hash_final\n"); set_in_fips_err(); goto err; } } else { memcpy(pAllocBuf, &rbuf[i], step_len); sg_init_one(&sg, pAllocBuf, step_len); err = crypto_hash_update(&desc, &sg, step_len); if (err) { printk(KERN_INFO "Fail to crypto_hash_update\n"); set_in_fips_err(); goto err; } } } #if FIPS_FUNC_TEST == 2 rbuf[1024] = rbuf[1024] - 1; #endif if (!strncmp(hmac, &rbuf[len], SHA256_DIGEST_SIZE)) { printk(KERN_INFO "FIPS: integrity check passed\n"); } else { printk(KERN_ERR "FIPS: integrity check failed. hmac:%lx, buf:%lx.\n",(long) hmac, (long)rbuf[len] ); set_in_fips_err(); } err: kfree(pAllocBuf); crypto_free_hash(desc.tfm); err1: need_integrity_check = false; /* if(integrity_mem_reservoir != 0) { printk(KERN_NOTICE "FIPS free integrity_mem_reservoir = %ld\n", integrity_mem_reservoir); free_bootmem((unsigned long)CONFIG_CRYPTO_FIPS_INTEG_COPY_ADDRESS, integrity_mem_reservoir); } */ }
static int async_encrypt(struct ablkcipher_request *req) { struct crypto_tfm *tfm = req->base.tfm; struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; struct blkcipher_desc desc = { .tfm = __crypto_blkcipher_cast(tfm), .info = req->info, .flags = req->base.flags, }; #ifdef CONFIG_CRYPTO_FIPS if (unlikely(in_fips_err())) return (-EACCES); #endif return alg->encrypt(&desc, req->dst, req->src, req->nbytes); } static int async_decrypt(struct ablkcipher_request *req) { struct crypto_tfm *tfm = req->base.tfm; struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; struct blkcipher_desc desc = { .tfm = __crypto_blkcipher_cast(tfm), .info = req->info, .flags = req->base.flags, }; #ifdef CONFIG_CRYPTO_FIPS if (unlikely(in_fips_err())) return (-EACCES); #endif return alg->decrypt(&desc, req->dst, req->src, req->nbytes); } static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg, u32 type, u32 mask) { struct blkcipher_alg *cipher = &alg->cra_blkcipher; unsigned int len = alg->cra_ctxsize; if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK && cipher->ivsize) { len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1); len += cipher->ivsize; } return len; } static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm) { struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher; struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; crt->setkey = async_setkey; crt->encrypt = async_encrypt; crt->decrypt = async_decrypt; if (!alg->ivsize) { crt->givencrypt = skcipher_null_givencrypt; crt->givdecrypt = skcipher_null_givdecrypt; } crt->base = __crypto_ablkcipher_cast(tfm); crt->ivsize = alg->ivsize; return 0; } static int crypto_init_blkcipher_ops_sync(struct crypto_tfm *tfm) { struct blkcipher_tfm *crt = &tfm->crt_blkcipher; struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; unsigned long align = crypto_tfm_alg_alignmask(tfm) + 1; unsigned long addr; crt->setkey = setkey; crt->encrypt = alg->encrypt; crt->decrypt = alg->decrypt; addr = (unsigned long)crypto_tfm_ctx(tfm); addr = ALIGN(addr, align); addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align); crt->iv = (void *)addr; return 0; } static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask) { struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; if (alg->ivsize > PAGE_SIZE / 8) return -EINVAL; if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK) return crypto_init_blkcipher_ops_sync(tfm); else return crypto_init_blkcipher_ops_async(tfm); } #ifdef CONFIG_NET static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_report_blkcipher rblkcipher; strncpy(rblkcipher.type, "blkcipher", sizeof(rblkcipher.type)); strncpy(rblkcipher.geniv, alg->cra_blkcipher.geniv ?: "<default>", sizeof(rblkcipher.geniv)); rblkcipher.blocksize = alg->cra_blocksize; rblkcipher.min_keysize = alg->cra_blkcipher.min_keysize; rblkcipher.max_keysize = alg->cra_blkcipher.max_keysize; rblkcipher.ivsize = alg->cra_blkcipher.ivsize; if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER, sizeof(struct crypto_report_blkcipher), &rblkcipher)) goto nla_put_failure; return 0; nla_put_failure: return -EMSGSIZE; } #else static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg) { return -ENOSYS; } #endif static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg) __attribute__ ((unused)); static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg) { seq_printf(m, "type : blkcipher\n"); seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); seq_printf(m, "min keysize : %u\n", alg->cra_blkcipher.min_keysize); seq_printf(m, "max keysize : %u\n", alg->cra_blkcipher.max_keysize); seq_printf(m, "ivsize : %u\n", alg->cra_blkcipher.ivsize); seq_printf(m, "geniv : %s\n", alg->cra_blkcipher.geniv ?: "<default>"); } const struct crypto_type crypto_blkcipher_type = { .ctxsize = crypto_blkcipher_ctxsize, .init = crypto_init_blkcipher_ops, #ifdef CONFIG_PROC_FS .show = crypto_blkcipher_show, #endif .report = crypto_blkcipher_report, }; EXPORT_SYMBOL_GPL(crypto_blkcipher_type); static int crypto_grab_nivcipher(struct crypto_skcipher_spawn *spawn, const char *name, u32 type, u32 mask) { struct crypto_alg *alg; int err; type = crypto_skcipher_type(type); mask = crypto_skcipher_mask(mask)| CRYPTO_ALG_GENIV; alg = crypto_alg_mod_lookup(name, type, mask); if (IS_ERR(alg)) return PTR_ERR(alg); err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask); crypto_mod_put(alg); return err; } struct crypto_instance *skcipher_geniv_alloc(struct crypto_template *tmpl, struct rtattr **tb, u32 type, u32 mask) { struct { int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key, unsigned int keylen); int (*encrypt)(struct ablkcipher_request *req); int (*decrypt)(struct ablkcipher_request *req); unsigned int min_keysize; unsigned int max_keysize; unsigned int ivsize; const char *geniv; } balg; const char *name; struct crypto_skcipher_spawn *spawn; struct crypto_attr_type *algt; struct crypto_instance *inst; struct crypto_alg *alg; int err; #ifdef CONFIG_CRYPTO_FIPS if (unlikely(in_fips_err())) return ERR_PTR(-EACCES); #endif algt = crypto_get_attr_type(tb); if (IS_ERR(algt)) return ERR_CAST(algt); if ((algt->type ^ (CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV)) & algt->mask) return ERR_PTR(-EINVAL); name = crypto_attr_alg_name(tb[1]); if (IS_ERR(name)) return ERR_CAST(name); inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) return ERR_PTR(-ENOMEM); spawn = crypto_instance_ctx(inst); /* Ignore async algorithms if necessary. */ mask |= crypto_requires_sync(algt->type, algt->mask); crypto_set_skcipher_spawn(spawn, inst); err = crypto_grab_nivcipher(spawn, name, type, mask); if (err) goto err_free_inst; alg = crypto_skcipher_spawn_alg(spawn); if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_BLKCIPHER) { balg.ivsize = alg->cra_blkcipher.ivsize; balg.min_keysize = alg->cra_blkcipher.min_keysize; balg.max_keysize = alg->cra_blkcipher.max_keysize; balg.setkey = async_setkey; balg.encrypt = async_encrypt; balg.decrypt = async_decrypt; balg.geniv = alg->cra_blkcipher.geniv; } else { balg.ivsize = alg->cra_ablkcipher.ivsize; balg.min_keysize = alg->cra_ablkcipher.min_keysize; balg.max_keysize = alg->cra_ablkcipher.max_keysize; balg.setkey = alg->cra_ablkcipher.setkey; balg.encrypt = alg->cra_ablkcipher.encrypt; balg.decrypt = alg->cra_ablkcipher.decrypt; balg.geniv = alg->cra_ablkcipher.geniv; } err = -EINVAL; if (!balg.ivsize) goto err_drop_alg; /* * This is only true if we're constructing an algorithm with its * default IV generator. For the default generator we elide the * template name and double-check the IV generator. */ if (algt->mask & CRYPTO_ALG_GENIV) { if (!balg.geniv) balg.geniv = crypto_default_geniv(alg); err = -EAGAIN; if (strcmp(tmpl->name, balg.geniv)) goto err_drop_alg; memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); memcpy(inst->alg.cra_driver_name, alg->cra_driver_name, CRYPTO_MAX_ALG_NAME); } else { err = -ENAMETOOLONG; if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", tmpl->name, alg->cra_name) >= CRYPTO_MAX_ALG_NAME) goto err_drop_alg; if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", tmpl->name, alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto err_drop_alg; } inst->alg.cra_flags = CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV; inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC; inst->alg.cra_priority = alg->cra_priority; inst->alg.cra_blocksize = alg->cra_blocksize; inst->alg.cra_alignmask = alg->cra_alignmask; inst->alg.cra_type = &crypto_givcipher_type; inst->alg.cra_ablkcipher.ivsize = balg.ivsize; inst->alg.cra_ablkcipher.min_keysize = balg.min_keysize; inst->alg.cra_ablkcipher.max_keysize = balg.max_keysize; inst->alg.cra_ablkcipher.geniv = balg.geniv; inst->alg.cra_ablkcipher.setkey = balg.setkey; inst->alg.cra_ablkcipher.encrypt = balg.encrypt; inst->alg.cra_ablkcipher.decrypt = balg.decrypt; out: return inst; err_drop_alg: crypto_drop_skcipher(spawn); err_free_inst: kfree(inst); inst = ERR_PTR(err); goto out; } EXPORT_SYMBOL_GPL(skcipher_geniv_alloc); void skcipher_geniv_free(struct crypto_instance *inst) { crypto_drop_skcipher(crypto_instance_ctx(inst)); kfree(inst); }