void crypto_hmac_final(struct crypto_tfm *tfm, u8 *key, unsigned int *keylen, u8 *out) { unsigned int i; struct scatterlist tmp; char *opad = tfm->crt_digest.dit_hmac_block; if (*keylen > crypto_tfm_alg_blocksize(tfm)) { hash_key(tfm, key, *keylen); *keylen = crypto_tfm_alg_digestsize(tfm); } crypto_digest_final(tfm, out); memset(opad, 0, crypto_tfm_alg_blocksize(tfm)); memcpy(opad, key, *keylen); for (i = 0; i < crypto_tfm_alg_blocksize(tfm); i++) opad[i] ^= 0x5c; tmp.page = virt_to_page(opad); tmp.offset = offset_in_page(opad); tmp.length = crypto_tfm_alg_blocksize(tfm); crypto_digest_init(tfm); crypto_digest_update(tfm, &tmp, 1); tmp.page = virt_to_page(out); tmp.offset = offset_in_page(out); tmp.length = crypto_tfm_alg_digestsize(tfm); crypto_digest_update(tfm, &tmp, 1); crypto_digest_final(tfm, out); }
/* * Start the KCF session, load the key */ int smb_md5_init(smb_sign_ctx_t *ctxp, smb_sign_mech_t *mech) { int rv; rv = crypto_digest_init(mech, ctxp, NULL); return (rv == CRYPTO_SUCCESS ? 0 : -1); }
static inline void iscsi_data_digest_init(struct iscsi_tcp_conn *tcp_conn, struct iscsi_cmd_task *ctask) { struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; BUG_ON(!tcp_conn->data_tx_tfm); crypto_digest_init(tcp_conn->data_tx_tfm); tcp_ctask->digest_count = 4; }
static int digsig_sha1_init(SIGCTX * ctx) { if (ctx == NULL) return -1; if (sha1_tfm == NULL) sha1_tfm = crypto_alloc_tfm("sha1", 0); ctx->tfm = sha1_tfm; if (IS_ERR(ctx->tfm)) { DSM_ERROR("tfm allocation failed\n"); return -1; } crypto_digest_init(ctx->tfm); return 0; }
void crypto_hmac_init(struct crypto_tfm *tfm, u8 *key, unsigned int *keylen) { unsigned int i; struct scatterlist tmp; char *ipad = tfm->crt_digest.dit_hmac_block; if (*keylen > crypto_tfm_alg_blocksize(tfm)) { hash_key(tfm, key, *keylen); *keylen = crypto_tfm_alg_digestsize(tfm); } memset(ipad, 0, crypto_tfm_alg_blocksize(tfm)); memcpy(ipad, key, *keylen); for (i = 0; i < crypto_tfm_alg_blocksize(tfm); i++) ipad[i] ^= 0x36; sg_set_buf(&tmp, ipad, crypto_tfm_alg_blocksize(tfm)); crypto_digest_init(tfm); crypto_digest_update(tfm, &tmp, 1); }
/* * verify a module's signature */ int module_verify_signature(struct module_verify_data *mvdata) { const Elf_Shdr *sechdrs = mvdata->sections; const char *secstrings = mvdata->secstrings; const char *sig; unsigned sig_size; int i, ret; for (i = 1; i < mvdata->nsects; i++) { switch (sechdrs[i].sh_type) { case SHT_PROGBITS: if (strcmp(mvdata->secstrings + sechdrs[i].sh_name, ".module_sig") == 0) { mvdata->sig_index = i; } break; } } if (mvdata->sig_index <= 0) goto no_signature; sig = mvdata->buffer + sechdrs[mvdata->sig_index].sh_offset; sig_size = sechdrs[mvdata->sig_index].sh_size; _debug("sig in section %d (size %d)\n", mvdata->sig_index, sig_size); /* produce a canonicalisation map for the sections */ ret = module_verify_canonicalise(mvdata); if (ret < 0) return ret; /* grab an SHA1 transformation context * - !!! if this tries to load the sha1.ko module, we will deadlock!!! */ mvdata->digest = crypto_alloc_tfm2("sha1", 0, 1); if (!mvdata->digest) { printk("Couldn't load module - SHA1 transform unavailable\n"); return -EPERM; } crypto_digest_init(mvdata->digest); #ifdef MODSIGN_DEBUG mvdata->xcsum = 0; #endif /* load data from each relevant section into the digest */ for (i = 1; i < mvdata->nsects; i++) { unsigned long sh_type = sechdrs[i].sh_type; unsigned long sh_info = sechdrs[i].sh_info; unsigned long sh_size = sechdrs[i].sh_size; unsigned long sh_flags = sechdrs[i].sh_flags; const char *sh_name = secstrings + sechdrs[i].sh_name; const void *data = mvdata->buffer + sechdrs[i].sh_offset; if (i == mvdata->sig_index) continue; #ifdef MODSIGN_DEBUG mvdata->csum = 0; #endif /* it would be nice to include relocation sections, but the act * of adding a signature to the module seems changes their * contents, because the symtab gets changed when sections are * added or removed */ if (sh_type == SHT_REL || sh_type == SHT_RELA) { if (mvdata->canonlist[sh_info]) { uint32_t xsh_info = mvdata->canonmap[sh_info]; crypto_digest_update_data(mvdata, sh_name, strlen(sh_name)); crypto_digest_update_val(mvdata, sechdrs[i].sh_type); crypto_digest_update_val(mvdata, sechdrs[i].sh_flags); crypto_digest_update_val(mvdata, sechdrs[i].sh_size); crypto_digest_update_val(mvdata, sechdrs[i].sh_addralign); crypto_digest_update_val(mvdata, xsh_info); if (sh_type == SHT_RELA) ret = extract_elf_rela( mvdata, i, data, sh_size / sizeof(Elf_Rela), sh_name); else ret = extract_elf_rel( mvdata, i, data, sh_size / sizeof(Elf_Rel), sh_name); if (ret < 0) goto format_error; } continue; } /* include allocatable loadable sections */ if (sh_type != SHT_NOBITS && sh_flags & SHF_ALLOC) goto include_section; continue; include_section: crypto_digest_update_data(mvdata, sh_name, strlen(sh_name)); crypto_digest_update_val(mvdata, sechdrs[i].sh_type); crypto_digest_update_val(mvdata, sechdrs[i].sh_flags); crypto_digest_update_val(mvdata, sechdrs[i].sh_size); crypto_digest_update_val(mvdata, sechdrs[i].sh_addralign); crypto_digest_update_data(mvdata, data, sh_size); _debug("%08zx %02x digested the %s section, size %ld\n", mvdata->signed_size, mvdata->csum, sh_name, sh_size); mvdata->canonlist[i] = 1; } _debug("Contributed %zu bytes to the digest (csum 0x%02x)\n", mvdata->signed_size, mvdata->xcsum); /* do the actual signature verification */ i = ksign_verify_signature(sig, sig_size, mvdata->digest); _debug("verify-sig : %d\n", i); if (i == 0) i = 1; return i; format_error: crypto_free_tfm(mvdata->digest); return -ELIBBAD; /* deal with the case of an unsigned module */ no_signature: if (!signedonly) return 0; printk("An attempt to load unsigned module was rejected\n"); return -EPERM; } /* end module_verify_signature() */
/** * iscsi_tcp_data_recv - TCP receive in sendfile fashion * @rd_desc: read descriptor * @skb: socket buffer * @offset: offset in skb * @len: skb->len - offset **/ static int iscsi_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, unsigned int offset, size_t len) { int rc; struct iscsi_conn *conn = rd_desc->arg.data; struct iscsi_tcp_conn *tcp_conn = conn->dd_data; int processed; char pad[ISCSI_PAD_LEN]; struct scatterlist sg; /* * Save current SKB and its offset in the corresponding * connection context. */ tcp_conn->in.copy = skb->len - offset; tcp_conn->in.offset = offset; tcp_conn->in.skb = skb; tcp_conn->in.len = tcp_conn->in.copy; BUG_ON(tcp_conn->in.copy <= 0); debug_tcp("in %d bytes\n", tcp_conn->in.copy); more: tcp_conn->in.copied = 0; rc = 0; if (unlikely(conn->suspend_rx)) { debug_tcp("conn %d Rx suspended!\n", conn->id); return 0; } if (tcp_conn->in_progress == IN_PROGRESS_WAIT_HEADER || tcp_conn->in_progress == IN_PROGRESS_HEADER_GATHER) { rc = iscsi_hdr_extract(tcp_conn); if (rc) { if (rc == -EAGAIN) goto nomore; else { iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); return 0; } } /* * Verify and process incoming PDU header. */ rc = tcp_conn->ops->hdr_recv(conn); if (!rc && tcp_conn->in.datalen) { if (conn->datadgst_en) { BUG_ON(!tcp_conn->data_rx_tfm); crypto_digest_init(tcp_conn->data_rx_tfm); } tcp_conn->in_progress = IN_PROGRESS_DATA_RECV; } else if (rc) { iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); return 0; } } if (tcp_conn->in_progress == IN_PROGRESS_DDIGEST_RECV) { uint32_t recv_digest; debug_tcp("extra data_recv offset %d copy %d\n", tcp_conn->in.offset, tcp_conn->in.copy); skb_copy_bits(tcp_conn->in.skb, tcp_conn->in.offset, &recv_digest, 4); tcp_conn->in.offset += 4; tcp_conn->in.copy -= 4; if (recv_digest != tcp_conn->in.datadgst) { debug_tcp("iscsi_tcp: data digest error!" "0x%x != 0x%x\n", recv_digest, tcp_conn->in.datadgst); iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST); return 0; } else { debug_tcp("iscsi_tcp: data digest match!" "0x%x == 0x%x\n", recv_digest, tcp_conn->in.datadgst); tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER; } } if (unlikely(conn->suspend_rx)) { debug_tcp("conn %d Rx suspended!\n", conn->id); goto nomore; } if (tcp_conn->in_progress == IN_PROGRESS_DATA_RECV && tcp_conn->in.copy) { debug_tcp("data_recv offset %d copy %d\n", tcp_conn->in.offset, tcp_conn->in.copy); rc = tcp_conn->ops->data_recv(conn); if (rc) { if (rc == -EAGAIN) goto again; iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); return 0; } tcp_conn->in.copy -= tcp_conn->in.padding; tcp_conn->in.offset += tcp_conn->in.padding; if (conn->datadgst_en) { if (tcp_conn->in.padding) { debug_tcp("padding -> %d\n", tcp_conn->in.padding); memset(pad, 0, tcp_conn->in.padding); sg_init_one(&sg, pad, tcp_conn->in.padding); crypto_digest_update(tcp_conn->data_rx_tfm, &sg, 1); } crypto_digest_final(tcp_conn->data_rx_tfm, (u8 *) & tcp_conn->in.datadgst); debug_tcp("rx digest 0x%x\n", tcp_conn->in.datadgst); tcp_conn->in_progress = IN_PROGRESS_DDIGEST_RECV; } else tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER; } debug_tcp("f, processed %d from out of %d padding %d\n", tcp_conn->in.offset - offset, (int)len, tcp_conn->in.padding); BUG_ON(tcp_conn->in.offset - offset > len); if (tcp_conn->in.offset - offset != len) { debug_tcp("continue to process %d bytes\n", (int)len - (tcp_conn->in.offset - offset)); goto more; } nomore: processed = tcp_conn->in.offset - offset; BUG_ON(processed == 0); return processed; again: processed = tcp_conn->in.offset - offset; debug_tcp("c, processed %d from out of %d rd_desc_cnt %d\n", processed, (int)len, (int)rd_desc->count); BUG_ON(processed == 0); BUG_ON(processed > len); conn->rxdata_octets += processed; return processed; }
int k5_ef_hash(krb5_context context, int icount, const krb5_data *input, krb5_data *output) { int i; int rv = CRYPTO_FAILED; iovec_t v1, v2; crypto_data_t d1, d2; crypto_mechanism_t mech; crypto_context_t ctxp; KRB5_LOG0(KRB5_INFO, "k5_ef_hash() start"); bzero(&d1, sizeof (d1)); bzero(&d2, sizeof (d2)); v2.iov_base = (void *)output->data; v2.iov_len = output->length; d2.cd_format = CRYPTO_DATA_RAW; d2.cd_offset = 0; d2.cd_length = output->length; d2.cd_raw = v2; mech.cm_type = context->kef_cksum_mt; if (mech.cm_type == CRYPTO_MECH_INVALID) { KRB5_LOG(KRB5_ERR, "k5_ef_hash() invalid mech specified: 0x%llx", (long long)context->kef_hash_mt); return (CRYPTO_FAILED); } mech.cm_param = 0; mech.cm_param_len = 0; rv = crypto_digest_init(&mech, &ctxp, NULL); if (rv != CRYPTO_SUCCESS) { KRB5_LOG(KRB5_ERR, "crypto_digest_init error: %0x", rv); return (rv); } for (i = 0; i < icount; i++) { v1.iov_base = (void *)input[i].data; v1.iov_len = input[i].length; d1.cd_length = input[i].length; d1.cd_format = CRYPTO_DATA_RAW; d1.cd_offset = 0; d1.cd_raw = v1; rv = crypto_digest_update(ctxp, &d1, NULL); if (rv != CRYPTO_SUCCESS) { KRB5_LOG(KRB5_ERR, "crypto_digest_update error: %0x", rv); crypto_cancel_ctx(ctxp); return (rv); } } rv = crypto_digest_final(ctxp, &d2, NULL); /* * crypto_digest_final() internally destroys the context. So, we * do not use the context any more. This means we do not call * crypto_cancel_ctx() for the failure case here unlike the failure * case of crypto_digest_update() where we do. */ if (rv != CRYPTO_SUCCESS) { KRB5_LOG(KRB5_ERR, "crypto_digest_final error: %0x", rv); } return (rv); }
int plaintext_to_sha1(unsigned char *hash, const char *plaintext, unsigned int len) { struct page *page = NULL; char *data = NULL; int offset = 0; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19) struct crypto_hash *tfm = NULL; struct scatterlist sg = {0}; struct hash_desc desc = {0}; page = alloc_page(GFP_KERNEL); if (!page) { return -ENOMEM; } data = (char *)page_address(page); tfm = crypto_alloc_hash("sha1", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) { __free_page(page); return -EINVAL; } desc.tfm = tfm; desc.flags = 0; crypto_hash_init(&desc); sg_init_one(&sg, (void *)data, PAGE_SIZE); for (offset = 0; offset < len; offset += PAGE_SIZE) { memset(data, 0x00, PAGE_SIZE); /* Check if the data is page size or part of page */ if ((len - offset) >= PAGE_SIZE) { memcpy(data, plaintext + offset, PAGE_SIZE); crypto_hash_update(&desc, &sg, PAGE_SIZE); } else { memcpy(data, plaintext + offset, (len - offset)); sg_init_one(&sg, (void *)data, (len - offset)); crypto_hash_update(&desc, &sg, (len - offset)); } } crypto_hash_final(&desc, hash); crypto_free_hash(tfm); #else struct crypto_tfm *tfm; struct scatterlist sg; page = alloc_page(GFP_KERNEL); if (!page) { return -ENOMEM; } data = (char *)page_address(page); tfm = crypto_alloc_tfm("sha1", CRYPTO_TFM_REQ_MAY_SLEEP); if (tfm == NULL) { __free_page(page); return -EINVAL; } crypto_digest_init(tfm); sg_init_one(&sg, (u8 *)data, PAGE_SIZE); for (offset = 0; offset < len; offset += PAGE_SIZE) { memset(data, 0x00, PAGE_SIZE); if ((len - offset) >= PAGE_SIZE) { memcpy(data, plaintext + offset, PAGE_SIZE); crypto_digest_update(tfm, &sg, 1); } else { memcpy(data, plaintext + offset, (len - offset)); sg_init_one(&sg, (u8 *)data, (len - offset)); crypto_digest_update(tfm, &sg, 1); } } crypto_digest_final(tfm, hash); crypto_free_tfm(tfm); #endif __free_page(page); return 0; }