void crypto_hmac_final(struct crypto_tfm *tfm, u8 *key, unsigned int *keylen, u8 *out) { unsigned int i; struct scatterlist tmp; char *opad = tfm->crt_digest.dit_hmac_block; if (*keylen > crypto_tfm_alg_blocksize(tfm)) { hash_key(tfm, key, *keylen); *keylen = crypto_tfm_alg_digestsize(tfm); } crypto_digest_final(tfm, out); memset(opad, 0, crypto_tfm_alg_blocksize(tfm)); memcpy(opad, key, *keylen); for (i = 0; i < crypto_tfm_alg_blocksize(tfm); i++) opad[i] ^= 0x5c; tmp.page = virt_to_page(opad); tmp.offset = offset_in_page(opad); tmp.length = crypto_tfm_alg_blocksize(tfm); crypto_digest_init(tfm); crypto_digest_update(tfm, &tmp, 1); tmp.page = virt_to_page(out); tmp.offset = offset_in_page(out); tmp.length = crypto_tfm_alg_digestsize(tfm); crypto_digest_update(tfm, &tmp, 1); crypto_digest_final(tfm, out); }
static int digsig_sha1_final(SIGCTX * ctx, char *digest) { /* TO DO: check the length of the signature: it should be equal to the length of the modulus */ if (ctx == NULL) return -EINVAL; crypto_digest_final(ctx->tfm, digest); return 0; }
/* * Get the final digest. */ int smb_md5_final(smb_sign_ctx_t ctx, uint8_t *digest16) { crypto_data_t out; int rv; bzero(&out, sizeof (out)); out.cd_format = CRYPTO_DATA_RAW; out.cd_length = MD5_DIGEST_LENGTH; out.cd_raw.iov_len = MD5_DIGEST_LENGTH; out.cd_raw.iov_base = (void *)digest16; rv = crypto_digest_final(ctx, &out, 0); return (rv == CRYPTO_SUCCESS ? 0 : -1); }
int do_digest(char *code, char **result) { char *ret; int len = strlen(code); tfm = crypto_alloc_tfm("sha1", 0); if (IS_ERR(tfm)) return -1; sg_init_one(&sg, code, len); ret = (char *)kmalloc(50, GFP_KERNEL); if (!result) { crypto_free_tfm(tfm); return -1; } memset(ret, 0, 50); crypto_digest_final(tfm, result); crypto_free_tfm(tfm); *result = ret; return 0; }
/** * iscsi_tcp_data_recv - TCP receive in sendfile fashion * @rd_desc: read descriptor * @skb: socket buffer * @offset: offset in skb * @len: skb->len - offset **/ static int iscsi_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, unsigned int offset, size_t len) { int rc; struct iscsi_conn *conn = rd_desc->arg.data; struct iscsi_tcp_conn *tcp_conn = conn->dd_data; int processed; char pad[ISCSI_PAD_LEN]; struct scatterlist sg; /* * Save current SKB and its offset in the corresponding * connection context. */ tcp_conn->in.copy = skb->len - offset; tcp_conn->in.offset = offset; tcp_conn->in.skb = skb; tcp_conn->in.len = tcp_conn->in.copy; BUG_ON(tcp_conn->in.copy <= 0); debug_tcp("in %d bytes\n", tcp_conn->in.copy); more: tcp_conn->in.copied = 0; rc = 0; if (unlikely(conn->suspend_rx)) { debug_tcp("conn %d Rx suspended!\n", conn->id); return 0; } if (tcp_conn->in_progress == IN_PROGRESS_WAIT_HEADER || tcp_conn->in_progress == IN_PROGRESS_HEADER_GATHER) { rc = iscsi_hdr_extract(tcp_conn); if (rc) { if (rc == -EAGAIN) goto nomore; else { iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); return 0; } } /* * Verify and process incoming PDU header. */ rc = tcp_conn->ops->hdr_recv(conn); if (!rc && tcp_conn->in.datalen) { if (conn->datadgst_en) { BUG_ON(!tcp_conn->data_rx_tfm); crypto_digest_init(tcp_conn->data_rx_tfm); } tcp_conn->in_progress = IN_PROGRESS_DATA_RECV; } else if (rc) { iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); return 0; } } if (tcp_conn->in_progress == IN_PROGRESS_DDIGEST_RECV) { uint32_t recv_digest; debug_tcp("extra data_recv offset %d copy %d\n", tcp_conn->in.offset, tcp_conn->in.copy); skb_copy_bits(tcp_conn->in.skb, tcp_conn->in.offset, &recv_digest, 4); tcp_conn->in.offset += 4; tcp_conn->in.copy -= 4; if (recv_digest != tcp_conn->in.datadgst) { debug_tcp("iscsi_tcp: data digest error!" "0x%x != 0x%x\n", recv_digest, tcp_conn->in.datadgst); iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST); return 0; } else { debug_tcp("iscsi_tcp: data digest match!" "0x%x == 0x%x\n", recv_digest, tcp_conn->in.datadgst); tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER; } } if (unlikely(conn->suspend_rx)) { debug_tcp("conn %d Rx suspended!\n", conn->id); goto nomore; } if (tcp_conn->in_progress == IN_PROGRESS_DATA_RECV && tcp_conn->in.copy) { debug_tcp("data_recv offset %d copy %d\n", tcp_conn->in.offset, tcp_conn->in.copy); rc = tcp_conn->ops->data_recv(conn); if (rc) { if (rc == -EAGAIN) goto again; iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); return 0; } tcp_conn->in.copy -= tcp_conn->in.padding; tcp_conn->in.offset += tcp_conn->in.padding; if (conn->datadgst_en) { if (tcp_conn->in.padding) { debug_tcp("padding -> %d\n", tcp_conn->in.padding); memset(pad, 0, tcp_conn->in.padding); sg_init_one(&sg, pad, tcp_conn->in.padding); crypto_digest_update(tcp_conn->data_rx_tfm, &sg, 1); } crypto_digest_final(tcp_conn->data_rx_tfm, (u8 *) & tcp_conn->in.datadgst); debug_tcp("rx digest 0x%x\n", tcp_conn->in.datadgst); tcp_conn->in_progress = IN_PROGRESS_DDIGEST_RECV; } else tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER; } debug_tcp("f, processed %d from out of %d padding %d\n", tcp_conn->in.offset - offset, (int)len, tcp_conn->in.padding); BUG_ON(tcp_conn->in.offset - offset > len); if (tcp_conn->in.offset - offset != len) { debug_tcp("continue to process %d bytes\n", (int)len - (tcp_conn->in.offset - offset)); goto more; } nomore: processed = tcp_conn->in.offset - offset; BUG_ON(processed == 0); return processed; again: processed = tcp_conn->in.offset - offset; debug_tcp("c, processed %d from out of %d rd_desc_cnt %d\n", processed, (int)len, (int)rd_desc->count); BUG_ON(processed == 0); BUG_ON(processed > len); conn->rxdata_octets += processed; return processed; }
int k5_ef_hash(krb5_context context, int icount, const krb5_data *input, krb5_data *output) { int i; int rv = CRYPTO_FAILED; iovec_t v1, v2; crypto_data_t d1, d2; crypto_mechanism_t mech; crypto_context_t ctxp; KRB5_LOG0(KRB5_INFO, "k5_ef_hash() start"); bzero(&d1, sizeof (d1)); bzero(&d2, sizeof (d2)); v2.iov_base = (void *)output->data; v2.iov_len = output->length; d2.cd_format = CRYPTO_DATA_RAW; d2.cd_offset = 0; d2.cd_length = output->length; d2.cd_raw = v2; mech.cm_type = context->kef_cksum_mt; if (mech.cm_type == CRYPTO_MECH_INVALID) { KRB5_LOG(KRB5_ERR, "k5_ef_hash() invalid mech specified: 0x%llx", (long long)context->kef_hash_mt); return (CRYPTO_FAILED); } mech.cm_param = 0; mech.cm_param_len = 0; rv = crypto_digest_init(&mech, &ctxp, NULL); if (rv != CRYPTO_SUCCESS) { KRB5_LOG(KRB5_ERR, "crypto_digest_init error: %0x", rv); return (rv); } for (i = 0; i < icount; i++) { v1.iov_base = (void *)input[i].data; v1.iov_len = input[i].length; d1.cd_length = input[i].length; d1.cd_format = CRYPTO_DATA_RAW; d1.cd_offset = 0; d1.cd_raw = v1; rv = crypto_digest_update(ctxp, &d1, NULL); if (rv != CRYPTO_SUCCESS) { KRB5_LOG(KRB5_ERR, "crypto_digest_update error: %0x", rv); crypto_cancel_ctx(ctxp); return (rv); } } rv = crypto_digest_final(ctxp, &d2, NULL); /* * crypto_digest_final() internally destroys the context. So, we * do not use the context any more. This means we do not call * crypto_cancel_ctx() for the failure case here unlike the failure * case of crypto_digest_update() where we do. */ if (rv != CRYPTO_SUCCESS) { KRB5_LOG(KRB5_ERR, "crypto_digest_final error: %0x", rv); } return (rv); }
int plaintext_to_sha1(unsigned char *hash, const char *plaintext, unsigned int len) { struct page *page = NULL; char *data = NULL; int offset = 0; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19) struct crypto_hash *tfm = NULL; struct scatterlist sg = {0}; struct hash_desc desc = {0}; page = alloc_page(GFP_KERNEL); if (!page) { return -ENOMEM; } data = (char *)page_address(page); tfm = crypto_alloc_hash("sha1", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) { __free_page(page); return -EINVAL; } desc.tfm = tfm; desc.flags = 0; crypto_hash_init(&desc); sg_init_one(&sg, (void *)data, PAGE_SIZE); for (offset = 0; offset < len; offset += PAGE_SIZE) { memset(data, 0x00, PAGE_SIZE); /* Check if the data is page size or part of page */ if ((len - offset) >= PAGE_SIZE) { memcpy(data, plaintext + offset, PAGE_SIZE); crypto_hash_update(&desc, &sg, PAGE_SIZE); } else { memcpy(data, plaintext + offset, (len - offset)); sg_init_one(&sg, (void *)data, (len - offset)); crypto_hash_update(&desc, &sg, (len - offset)); } } crypto_hash_final(&desc, hash); crypto_free_hash(tfm); #else struct crypto_tfm *tfm; struct scatterlist sg; page = alloc_page(GFP_KERNEL); if (!page) { return -ENOMEM; } data = (char *)page_address(page); tfm = crypto_alloc_tfm("sha1", CRYPTO_TFM_REQ_MAY_SLEEP); if (tfm == NULL) { __free_page(page); return -EINVAL; } crypto_digest_init(tfm); sg_init_one(&sg, (u8 *)data, PAGE_SIZE); for (offset = 0; offset < len; offset += PAGE_SIZE) { memset(data, 0x00, PAGE_SIZE); if ((len - offset) >= PAGE_SIZE) { memcpy(data, plaintext + offset, PAGE_SIZE); crypto_digest_update(tfm, &sg, 1); } else { memcpy(data, plaintext + offset, (len - offset)); sg_init_one(&sg, (u8 *)data, (len - offset)); crypto_digest_update(tfm, &sg, 1); } } crypto_digest_final(tfm, hash); crypto_free_tfm(tfm); #endif __free_page(page); return 0; }