/* Transfer single request. */ static uint32_t transfer_user_req(uint32_t flags, uint32_t context, struct kfips_request_context *rctx, struct kfips_ctrl __user *ctrl, void __user *data) { /* Note: rctx and rctx->req->src must be valid. */ /* Control information. */ long comb; struct ablkcipher_request *req = rctx->req; struct kfips_transform_context *ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)); uint32_t len = req->nbytes; uint32_t keylen = ctx->keylen; comb = __copy_to_user(&ctrl->key, ctx->key, ctx->keylen); comb |= __copy_to_user(&ctrl->iv, req->info, AES_BLOCK_SIZE); comb |= __put_user(len, &ctrl->len) == -EFAULT; comb |= __put_user(keylen, &ctrl->keylen) == -EFAULT; comb |= sg_copy_to_user_buffer(req->src, sg_count(req->src, req->nbytes), data, req->nbytes); if (!comb) return keylen; /* Success, return key length. */ else return 0; /* Failure, return 0. */ }
static int dcp_aes_cbc_decrypt(struct ablkcipher_request *req) { struct crypto_tfm *tfm = crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req)); struct dcp_op *ctx = crypto_ablkcipher_ctx( crypto_ablkcipher_reqtfm(req)); if (unlikely(ctx->keylen != AES_KEYSIZE_128)) { int err = 0; ablkcipher_request_set_tfm(req, ctx->fallback); err = crypto_ablkcipher_decrypt(req); ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm)); return err; } return dcp_aes_cbc_crypt(req, DCP_AES | DCP_DEC | DCP_CBC); }
static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt) { struct crypto_tfm *tfm = crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req)); struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm); struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req); struct qce_alg_template *tmpl = to_cipher_tmpl(tfm); int ret; rctx->flags = tmpl->alg_flags; rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT; if (IS_AES(rctx->flags) && ctx->enc_keylen != AES_KEYSIZE_128 && ctx->enc_keylen != AES_KEYSIZE_256) { SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); skcipher_request_set_sync_tfm(subreq, ctx->fallback); skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL); skcipher_request_set_crypt(subreq, req->src, req->dst, req->nbytes, req->info); ret = encrypt ? crypto_skcipher_encrypt(subreq) : crypto_skcipher_decrypt(subreq); skcipher_request_zero(subreq); return ret; } return tmpl->qce->async_req_enqueue(tmpl->qce, &req->base); }
static int crypto_rfc3686_crypt(struct ablkcipher_request *req) { struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); struct crypto_rfc3686_ctx *ctx = crypto_ablkcipher_ctx(tfm); struct crypto_ablkcipher *child = ctx->child; unsigned long align = crypto_ablkcipher_alignmask(tfm); struct crypto_rfc3686_req_ctx *rctx = (void *)PTR_ALIGN((u8 *)ablkcipher_request_ctx(req), align + 1); struct ablkcipher_request *subreq = &rctx->subreq; u8 *iv = rctx->iv; /* set up counter block */ memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE); memcpy(iv + CTR_RFC3686_NONCE_SIZE, req->info, CTR_RFC3686_IV_SIZE); /* initialize counter portion of counter block */ *(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) = cpu_to_be32(1); ablkcipher_request_set_tfm(subreq, child); ablkcipher_request_set_callback(subreq, req->base.flags, req->base.complete, req->base.data); ablkcipher_request_set_crypt(subreq, req->src, req->dst, req->nbytes, iv); return crypto_ablkcipher_encrypt(subreq); }
static int ctr_aes_decrypt(struct ablkcipher_request *areq) { struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); struct aes_ctx *ctx = crypto_ablkcipher_ctx(cipher); return lq_aes_queue_mgr(ctx, areq, areq->info, CRYPTO_DIR_DECRYPT, 4); }
static int sahara_aes_crypt(struct ablkcipher_request *req, unsigned long mode) { struct sahara_ctx *ctx = crypto_ablkcipher_ctx( crypto_ablkcipher_reqtfm(req)); struct sahara_aes_reqctx *rctx = ablkcipher_request_ctx(req); struct sahara_dev *dev = dev_ptr; int err = 0; int busy; dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n", req->nbytes, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC)); if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) { dev_err(dev->device, "request size is not exact amount of AES blocks\n"); return -EINVAL; } ctx->dev = dev; rctx->mode = mode; spin_lock_bh(&dev->lock); err = ablkcipher_enqueue_request(&dev->queue, req); busy = test_and_set_bit(FLAGS_BUSY, &dev->flags); spin_unlock_bh(&dev->lock); if (!busy) tasklet_schedule(&dev->queue_task); return err; }
static int sahara_aes_cbc_decrypt(struct ablkcipher_request *req) { struct crypto_tfm *tfm = crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req)); struct sahara_ctx *ctx = crypto_ablkcipher_ctx( crypto_ablkcipher_reqtfm(req)); int err; if (unlikely(ctx->keylen != AES_KEYSIZE_128)) { ablkcipher_request_set_tfm(req, ctx->fallback); err = crypto_ablkcipher_decrypt(req); ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm)); return err; } return sahara_aes_crypt(req, FLAGS_CBC); }
static int aes_encrypt (struct ablkcipher_request *areq) { struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); struct aes_ctx *ctx = crypto_ablkcipher_ctx(cipher); return lq_aes_queue_mgr(ctx, areq, NULL, CRYPTO_DIR_ENCRYPT, 5); }
static int aes_dma_stop(struct aes_hwa_ctx *ctx) { struct tf_crypto_aes_operation_state *state = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(ctx->req)); int err = 0; size_t count; dprintk(KERN_INFO "aes_dma_stop(%p)\n", ctx); tf_aes_save_registers(state); if (!AES_CTRL_IS_MODE_ECB(state->CTRL)) { u32 *ptr = (u32 *) ctx->req->info; ptr[0] = state->AES_IV_0; ptr[1] = state->AES_IV_1; ptr[2] = state->AES_IV_2; ptr[3] = state->AES_IV_3; } OUTREG32(&paes_reg->AES_SYSCONFIG, 0); omap_stop_dma(ctx->dma_lch_in); omap_stop_dma(ctx->dma_lch_out); tf_crypto_disable_clock(PUBLIC_CRYPTO_AES1_CLOCK_REG); if (!(ctx->flags & FLAGS_FAST)) { dma_sync_single_for_device(NULL, ctx->dma_addr_out, ctx->dma_size, DMA_FROM_DEVICE); #ifdef CONFIG_TF_DRIVER_FAULT_INJECTION tf_aes_fault_injection(paes_reg->AES_CTRL, ctx->buf_out); #endif /* Copy data */ count = sg_copy(&ctx->out_sg, &ctx->out_offset, ctx->buf_out, ctx->buflen, ctx->dma_size, 1); if (count != ctx->dma_size) err = -EINVAL; } else { dma_unmap_sg(NULL, ctx->out_sg, 1, DMA_FROM_DEVICE); dma_unmap_sg(NULL, ctx->in_sg, 1, DMA_TO_DEVICE); #ifdef CONFIG_TF_DRIVER_FAULT_INJECTION tf_aes_fault_injection(paes_reg->AES_CTRL, sg_virt(ctx->out_sg)); #endif } if (err || !ctx->total) ctx->req->base.complete(&ctx->req->base, err); return err; }
static void sahara_aes_queue_task(unsigned long data) { struct sahara_dev *dev = (struct sahara_dev *)data; struct crypto_async_request *async_req, *backlog; struct sahara_ctx *ctx; struct sahara_aes_reqctx *rctx; struct ablkcipher_request *req; int ret; spin_lock(&dev->lock); backlog = crypto_get_backlog(&dev->queue); async_req = crypto_dequeue_request(&dev->queue); if (!async_req) clear_bit(FLAGS_BUSY, &dev->flags); spin_unlock(&dev->lock); if (!async_req) return; if (backlog) backlog->complete(backlog, -EINPROGRESS); req = ablkcipher_request_cast(async_req); /* Request is ready to be dispatched by the device */ dev_dbg(dev->device, "dispatch request (nbytes=%d, src=%p, dst=%p)\n", req->nbytes, req->src, req->dst); /* assign new request to device */ dev->req = req; dev->total = req->nbytes; dev->in_sg = req->src; dev->out_sg = req->dst; rctx = ablkcipher_request_ctx(req); ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)); rctx->mode &= FLAGS_MODE_MASK; dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode; if ((dev->flags & FLAGS_CBC) && req->info) memcpy(dev->iv_base, req->info, AES_KEYSIZE_128); /* assign new context to device */ ctx->dev = dev; dev->ctx = ctx; ret = sahara_hw_descriptor_create(dev); if (ret < 0) { spin_lock(&dev->lock); clear_bit(FLAGS_BUSY, &dev->flags); spin_unlock(&dev->lock); dev->req->base.complete(&dev->req->base, ret); } }
int crypto4xx_decrypt(struct ablkcipher_request *req) { struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm); unsigned int ivlen = crypto_ablkcipher_ivsize( crypto_ablkcipher_reqtfm(req)); __le32 iv[ivlen]; if (ivlen) crypto4xx_memcpy_to_le32(iv, req->info, ivlen); return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst, req->nbytes, iv, ivlen, ctx->sa_in, ctx->sa_len, 0); }
int __ablk_encrypt(struct ablkcipher_request *req) { struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm); struct blkcipher_desc desc; desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm); desc.info = req->info; desc.flags = 0; return crypto_blkcipher_crt(desc.tfm)->encrypt( &desc, req->dst, req->src, req->nbytes); }
static int sahara_aes_process(struct ablkcipher_request *req) { struct sahara_dev *dev = dev_ptr; struct sahara_ctx *ctx; struct sahara_aes_reqctx *rctx; int ret; unsigned long timeout; /* Request is ready to be dispatched by the device */ dev_dbg(dev->device, "dispatch request (nbytes=%d, src=%p, dst=%p)\n", req->nbytes, req->src, req->dst); /* assign new request to device */ dev->total = req->nbytes; dev->in_sg = req->src; dev->out_sg = req->dst; rctx = ablkcipher_request_ctx(req); ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)); rctx->mode &= FLAGS_MODE_MASK; dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode; if ((dev->flags & FLAGS_CBC) && req->info) memcpy(dev->iv_base, req->info, AES_KEYSIZE_128); /* assign new context to device */ dev->ctx = ctx; reinit_completion(&dev->dma_completion); ret = sahara_hw_descriptor_create(dev); if (ret) return -EINVAL; timeout = wait_for_completion_timeout(&dev->dma_completion, msecs_to_jiffies(SAHARA_TIMEOUT_MS)); if (!timeout) { dev_err(dev->device, "AES timeout\n"); return -ETIMEDOUT; } dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg, DMA_TO_DEVICE); dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg, DMA_FROM_DEVICE); return 0; }
static int s5p_aes_crypt(struct ablkcipher_request *req, unsigned long mode) { struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); struct s5p_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); struct s5p_aes_reqctx *reqctx = ablkcipher_request_ctx(req); struct s5p_aes_dev *dev = ctx->dev; if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) { pr_err("request size is not exact amount of AES blocks\n"); return -EINVAL; } reqctx->mode = mode; return s5p_aes_handle_req(dev, req); }
int ablk_encrypt(struct ablkcipher_request *req) { struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm); if (!may_use_simd()) { struct ablkcipher_request *cryptd_req = ablkcipher_request_ctx(req); memcpy(cryptd_req, req, sizeof(*req)); ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); return crypto_ablkcipher_encrypt(cryptd_req); } else { return __ablk_encrypt(req); } }
static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req, crypto_completion_t complete) { struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req); struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); struct cryptd_state *state = cryptd_get_state(crypto_ablkcipher_tfm(tfm)); int err; rctx->complete = req->base.complete; req->base.complete = complete; spin_lock_bh(&state->lock); err = ablkcipher_enqueue_request(&state->queue, req); spin_unlock_bh(&state->lock); wake_up_process(state->task); return err; }
static int virtio_crypto_ablkcipher_encrypt(struct ablkcipher_request *req) { struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req); struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(atfm); struct virtio_crypto_request *vc_req = ablkcipher_request_ctx(req); struct virtio_crypto *vcrypto = ctx->vcrypto; int ret; /* Use the first data virtqueue as default */ struct data_queue *data_vq = &vcrypto->data_vq[0]; vc_req->ablkcipher_ctx = ctx; vc_req->ablkcipher_req = req; ret = __virtio_crypto_ablkcipher_do_req(vc_req, req, data_vq, 1); if (ret < 0) { pr_err("virtio_crypto: Encryption failed!\n"); return ret; } return -EINPROGRESS; }
static int rfc3686_aes_encrypt(struct ablkcipher_request *areq) { struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); struct aes_ctx *ctx = crypto_ablkcipher_ctx(cipher); int ret; u8 *info = areq->info; u8 rfc3686_iv[16]; memcpy(rfc3686_iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE); memcpy(rfc3686_iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE); /* initialize counter portion of counter block */ *(__be32 *)(rfc3686_iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) = cpu_to_be32(1); areq->info = rfc3686_iv; ret = lq_aes_queue_mgr(ctx, areq, areq->info, CRYPTO_DIR_ENCRYPT, 4); areq->info = info; return ret; }
static int ablk_decrypt(struct ablkcipher_request *req) { struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); if (!irq_fpu_usable()) { struct ablkcipher_request *cryptd_req = ablkcipher_request_ctx(req); memcpy(cryptd_req, req, sizeof(*req)); ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); return crypto_ablkcipher_decrypt(cryptd_req); } else { struct blkcipher_desc desc; desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm); desc.info = req->info; desc.flags = 0; return crypto_blkcipher_crt(desc.tfm)->decrypt( &desc, req->dst, req->src, req->nbytes); } }
static void dcp_queue_task(unsigned long data) { struct dcp_dev *dev = (struct dcp_dev *) data; struct crypto_async_request *async_req, *backlog; struct crypto_ablkcipher *tfm; struct dcp_op *ctx; struct dcp_dev_req_ctx *rctx; struct ablkcipher_request *req; unsigned long flags; spin_lock_irqsave(&dev->queue_lock, flags); backlog = crypto_get_backlog(&dev->queue); async_req = crypto_dequeue_request(&dev->queue); spin_unlock_irqrestore(&dev->queue_lock, flags); if (!async_req) goto ret_nothing_done; if (backlog) backlog->complete(backlog, -EINPROGRESS); req = ablkcipher_request_cast(async_req); tfm = crypto_ablkcipher_reqtfm(req); rctx = ablkcipher_request_ctx(req); ctx = crypto_ablkcipher_ctx(tfm); if (!req->src || !req->dst) goto ret_nothing_done; ctx->flags |= rctx->mode; ctx->req = req; dcp_crypt(dev, ctx); return; ret_nothing_done: clear_bit(DCP_FLAG_BUSY, &dev->flags); }
static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt) { struct crypto_tfm *tfm = crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req)); struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm); struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req); struct qce_alg_template *tmpl = to_cipher_tmpl(tfm); int ret; rctx->flags = tmpl->alg_flags; rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT; if (IS_AES(rctx->flags) && ctx->enc_keylen != AES_KEYSIZE_128 && ctx->enc_keylen != AES_KEYSIZE_256) { ablkcipher_request_set_tfm(req, ctx->fallback); ret = encrypt ? crypto_ablkcipher_encrypt(req) : crypto_ablkcipher_decrypt(req); ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm)); return ret; } return tmpl->qce->async_req_enqueue(tmpl->qce, &req->base); }
void unmap_ablkcipher_request(struct device *dev, struct ablkcipher_request *req) { struct ablkcipher_req_ctx *areq_ctx; unsigned int iv_size; areq_ctx = ablkcipher_request_ctx(req); iv_size = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req)); if (likely(areq_ctx->gen_ctx.iv_dma_addr != 0)) { DX_LOG_DEBUG("Unmapped iv: iv_dma_addr=0x%08lX iv_size=%d\n", (unsigned long)areq_ctx->gen_ctx.iv_dma_addr, iv_size); dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr, iv_size, DMA_TO_DEVICE); } /*In case a pool was set, a table was allocated and should be released */ if (areq_ctx->dma_buf_type == DX_DMA_BUF_MLLI) { dma_pool_free(areq_ctx->mlli_params.curr_pool, areq_ctx->mlli_params.mlli_virt_addr, areq_ctx->mlli_params.mlli_dma_addr); } if (areq_ctx->sec_dir != DX_SRC_DMA_IS_SECURE) { dma_unmap_sg(dev, req->src, areq_ctx->in_nents, DMA_BIDIRECTIONAL); } DX_LOG_DEBUG("Unmapped sg src: req->src=0x%08lX\n", (unsigned long)sg_virt(req->src)); if (likely(req->src != req->dst)) { if (areq_ctx->sec_dir != DX_DST_DMA_IS_SECURE) { dma_unmap_sg(dev, req->dst, areq_ctx->out_nents, DMA_BIDIRECTIONAL); DX_LOG_DEBUG("Unmapped sg dst: req->dst=0x%08lX\n", (unsigned long)sg_virt(req->dst)); } } }
int ablk_decrypt(struct ablkcipher_request *req) { struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm); if (!may_use_simd()) { struct ablkcipher_request *cryptd_req = ablkcipher_request_ctx(req); *cryptd_req = *req; ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); return crypto_ablkcipher_decrypt(cryptd_req); } else { struct blkcipher_desc desc; desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm); desc.info = req->info; desc.flags = 0; return crypto_blkcipher_crt(desc.tfm)->decrypt( &desc, req->dst, req->src, req->nbytes); } }
static int ss_aes_one_req(sunxi_ss_t *sss, struct ablkcipher_request *req) { int ret = 0; struct crypto_ablkcipher *tfm = NULL; ss_aes_ctx_t *ctx = NULL; ss_aes_req_ctx_t *req_ctx = NULL; int key_map_flag = 0; int iv_map_flag = 0; SS_ENTER(); if (!req->src || !req->dst) { SS_ERR("Invalid sg: src = %p, dst = %p\n", req->src, req->dst); return -EINVAL; } ss_dev_lock(); tfm = crypto_ablkcipher_reqtfm(req); req_ctx = ablkcipher_request_ctx(req); ctx = crypto_ablkcipher_ctx(tfm); /* A31 SS need update key each cycle in decryption. */ if ((ctx->comm.flags & SS_FLAG_NEW_KEY) || (req_ctx->dir == SS_DIR_DECRYPT)) { SS_DBG("KEY address = %p, size = %d\n", ctx->key, ctx->key_size); ss_key_set(ctx->key, ctx->key_size); dma_map_single(&sss->pdev->dev, ctx->key, ctx->key_size, DMA_MEM_TO_DEV); key_map_flag = 1; ctx->comm.flags &= ~SS_FLAG_NEW_KEY; } #ifdef SS_CTS_MODE_ENABLE if (((req_ctx->mode == SS_AES_MODE_CBC) || (req_ctx->mode == SS_AES_MODE_CTS)) && (req->info != NULL)) { #else if ((req_ctx->mode == SS_AES_MODE_CBC) && (req->info != NULL)) { #endif SS_DBG("IV address = %p, size = %d\n", req->info, crypto_ablkcipher_ivsize(tfm)); memcpy(ctx->iv, req->info, crypto_ablkcipher_ivsize(tfm)); ss_iv_set(ctx->iv, crypto_ablkcipher_ivsize(tfm)); dma_map_single(&sss->pdev->dev, ctx->iv, crypto_ablkcipher_ivsize(tfm), DMA_MEM_TO_DEV); iv_map_flag = 1; } #ifdef SS_CTR_MODE_ENABLE if (req_ctx->mode == SS_AES_MODE_CTR) { SS_DBG("Cnt address = %p, size = %d\n", req->info, crypto_ablkcipher_ivsize(tfm)); if (ctx->cnt == 0) memcpy(ctx->iv, req->info, crypto_ablkcipher_ivsize(tfm)); SS_DBG("CNT: %08x %08x %08x %08x \n", *(int *)&ctx->iv[0], *(int *)&ctx->iv[4], *(int *)&ctx->iv[8], *(int *)&ctx->iv[12]); ss_cnt_set(ctx->iv, crypto_ablkcipher_ivsize(tfm)); dma_map_single(&sss->pdev->dev, ctx->iv, crypto_ablkcipher_ivsize(tfm), DMA_MEM_TO_DEV); iv_map_flag = 1; } #endif if (req_ctx->type == SS_METHOD_RSA) ss_rsa_width_set(crypto_ablkcipher_ivsize(tfm)); req_ctx->dma_src.sg = req->src; req_ctx->dma_dst.sg = req->dst; ret = ss_aes_start(ctx, req_ctx, req->nbytes); if (ret < 0) SS_ERR("ss_aes_start fail(%d)\n", ret); ss_dev_unlock(); if (req->base.complete) req->base.complete(&req->base, ret); if (key_map_flag == 1) dma_unmap_single(&ss_dev->pdev->dev, virt_to_phys(ctx->key), ctx->key_size, DMA_MEM_TO_DEV); if (iv_map_flag == 1) dma_unmap_single(&sss->pdev->dev, virt_to_phys(ctx->iv), crypto_ablkcipher_ivsize(tfm), DMA_MEM_TO_DEV); #ifdef SS_CTR_MODE_ENABLE if (req_ctx->mode == SS_AES_MODE_CTR) { ss_cnt_get(ctx->comm.flow, ctx->iv, crypto_ablkcipher_ivsize(tfm)); SS_DBG("CNT: %08x %08x %08x %08x \n", *(int *)&ctx->iv[0], *(int *)&ctx->iv[4], *(int *)&ctx->iv[8], *(int *)&ctx->iv[12]); } #endif ctx->cnt += req->nbytes; return ret; } static int ss_hash_one_req(sunxi_ss_t *sss, struct ahash_request *req) { int ret = 0; ss_aes_req_ctx_t *req_ctx = NULL; ss_hash_ctx_t *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); SS_ENTER(); if (!req->src) { SS_ERR("Invalid sg: src = %p\n", req->src); return -EINVAL; } ss_dev_lock(); req_ctx = ahash_request_ctx(req); req_ctx->dma_src.sg = req->src; ss_hash_padding_data_prepare(ctx, req->result, req->nbytes); ret = ss_hash_start(ctx, req_ctx, req->nbytes); if (ret < 0) SS_ERR("ss_hash_start fail(%d)\n", ret); ss_dev_unlock(); if (req->base.complete) req->base.complete(&req->base, ret); return ret; }
static int aes_dma_start(struct aes_hwa_ctx *ctx) { int err, fast = 0, in, out; size_t count; dma_addr_t addr_in, addr_out; struct omap_dma_channel_params dma_params; struct tf_crypto_aes_operation_state *state = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(ctx->req)); static size_t last_count; unsigned long flags; in = IS_ALIGNED((u32)ctx->in_sg->offset, sizeof(u32)); out = IS_ALIGNED((u32)ctx->out_sg->offset, sizeof(u32)); fast = in && out; if (fast) { count = min(ctx->total, sg_dma_len(ctx->in_sg)); count = min(count, sg_dma_len(ctx->out_sg)); if (count != ctx->total) return -EINVAL; /* Only call dma_map_sg if it has not yet been done */ if (!(ctx->req->base.flags & CRYPTO_TFM_REQ_DMA_VISIBLE)) { err = dma_map_sg(NULL, ctx->in_sg, 1, DMA_TO_DEVICE); if (!err) return -EINVAL; err = dma_map_sg(NULL, ctx->out_sg, 1, DMA_FROM_DEVICE); if (!err) { dma_unmap_sg( NULL, ctx->in_sg, 1, DMA_TO_DEVICE); return -EINVAL; } } ctx->req->base.flags &= ~CRYPTO_TFM_REQ_DMA_VISIBLE; addr_in = sg_dma_address(ctx->in_sg); addr_out = sg_dma_address(ctx->out_sg); ctx->flags |= FLAGS_FAST; } else { count = sg_copy(&ctx->in_sg, &ctx->in_offset, ctx->buf_in, ctx->buflen, ctx->total, 0); addr_in = ctx->dma_addr_in; addr_out = ctx->dma_addr_out; ctx->flags &= ~FLAGS_FAST; } ctx->total -= count; /* Configure HWA */ tf_crypto_enable_clock(PUBLIC_CRYPTO_AES1_CLOCK_REG); tf_aes_restore_registers(state, ctx->flags & FLAGS_ENCRYPT ? 1 : 0); OUTREG32(&paes_reg->AES_SYSCONFIG, INREG32(&paes_reg->AES_SYSCONFIG) | AES_SYSCONFIG_DMA_REQ_OUT_EN_BIT | AES_SYSCONFIG_DMA_REQ_IN_EN_BIT); ctx->dma_size = count; if (!fast) dma_sync_single_for_device(NULL, addr_in, count, DMA_TO_DEVICE); dma_params.data_type = OMAP_DMA_DATA_TYPE_S32; dma_params.frame_count = count / AES_BLOCK_SIZE; dma_params.elem_count = DMA_CEN_Elts_per_Frame_AES; dma_params.src_ei = 0; dma_params.src_fi = 0; dma_params.dst_ei = 0; dma_params.dst_fi = 0; dma_params.sync_mode = OMAP_DMA_SYNC_FRAME; dma_params.read_prio = 0; dma_params.write_prio = 0; /* IN */ dma_params.trigger = ctx->dma_in; dma_params.src_or_dst_synch = OMAP_DMA_DST_SYNC; dma_params.dst_start = AES1_REGS_HW_ADDR + 0x60; dma_params.dst_amode = OMAP_DMA_AMODE_CONSTANT; dma_params.src_start = addr_in; dma_params.src_amode = OMAP_DMA_AMODE_POST_INC; if (reconfigure_dma) { omap_set_dma_params(ctx->dma_lch_in, &dma_params); omap_set_dma_dest_burst_mode(ctx->dma_lch_in, OMAP_DMA_DATA_BURST_8); omap_set_dma_src_burst_mode(ctx->dma_lch_in, OMAP_DMA_DATA_BURST_8); omap_set_dma_src_data_pack(ctx->dma_lch_in, 1); } else { if (last_count != count) omap_set_dma_transfer_params(ctx->dma_lch_in, dma_params.data_type, dma_params.elem_count, dma_params.frame_count, dma_params.sync_mode, dma_params.trigger, dma_params.src_or_dst_synch); /* Configure input start address */ __raw_writel(dma_params.src_start, omap_dma_base + (0x60 * (ctx->dma_lch_in) + 0x9c)); } /* OUT */ dma_params.trigger = ctx->dma_out; dma_params.src_or_dst_synch = OMAP_DMA_SRC_SYNC; dma_params.src_start = AES1_REGS_HW_ADDR + 0x60; dma_params.src_amode = OMAP_DMA_AMODE_CONSTANT; dma_params.dst_start = addr_out; dma_params.dst_amode = OMAP_DMA_AMODE_POST_INC; if (reconfigure_dma) { omap_set_dma_params(ctx->dma_lch_out, &dma_params); omap_set_dma_dest_burst_mode(ctx->dma_lch_out, OMAP_DMA_DATA_BURST_8); omap_set_dma_src_burst_mode(ctx->dma_lch_out, OMAP_DMA_DATA_BURST_8); omap_set_dma_dest_data_pack(ctx->dma_lch_out, 1); reconfigure_dma = false; } else { if (last_count != count) { omap_set_dma_transfer_params(ctx->dma_lch_out, dma_params.data_type, dma_params.elem_count, dma_params.frame_count, dma_params.sync_mode, dma_params.trigger, dma_params.src_or_dst_synch); last_count = count; } /* Configure output start address */ __raw_writel(dma_params.dst_start, omap_dma_base + (0x60 * (ctx->dma_lch_out) + 0xa0)); } /* Is this really needed? */ omap_enable_dma_irq(ctx->dma_lch_in, OMAP_DMA_BLOCK_IRQ); omap_enable_dma_irq(ctx->dma_lch_out, OMAP_DMA_BLOCK_IRQ); wmb(); omap_start_dma(ctx->dma_lch_in); omap_start_dma(ctx->dma_lch_out); spin_lock_irqsave(&ctx->lock, flags); if (ctx->next_req) { struct ablkcipher_request *req = ablkcipher_request_cast(ctx->next_req); if (!(ctx->next_req->flags & CRYPTO_TFM_REQ_DMA_VISIBLE)) { err = dma_map_sg(NULL, req->src, 1, DMA_TO_DEVICE); if (!err) { /* Silently fail for now... */ spin_unlock_irqrestore(&ctx->lock, flags); return 0; } err = dma_map_sg(NULL, req->dst, 1, DMA_FROM_DEVICE); if (!err) { dma_unmap_sg(NULL, req->src, 1, DMA_TO_DEVICE); /* Silently fail for now... */ spin_unlock_irqrestore(&ctx->lock, flags); return 0; } ctx->next_req->flags |= CRYPTO_TFM_REQ_DMA_VISIBLE; ctx->next_req = NULL; } } if (ctx->backlog) { ctx->backlog->complete(ctx->backlog, -EINPROGRESS); ctx->backlog = NULL; } spin_unlock_irqrestore(&ctx->lock, flags); return 0; }
int map_ablkcipher_request(struct device *dev, struct ablkcipher_request *req) { struct ablkcipher_req_ctx *areq_ctx = ablkcipher_request_ctx(req); unsigned int iv_size = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req)); struct mlli_params *mlli_params = &areq_ctx->mlli_params; struct sg_data_array sg_data; struct buff_mgr_handle *buff_mgr = crypto_drvdata->buff_mgr_handle; int dummy = 0; int rc = 0; areq_ctx->sec_dir = 0; areq_ctx->dma_buf_type = DX_DMA_BUF_DLLI; mlli_params->curr_pool = NULL; sg_data.num_of_sg = 0; /* Map IV buffer */ if (likely(iv_size != 0) ) { dump_byte_array("iv", (uint8_t *)req->info, iv_size); areq_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, (void *)req->info, iv_size, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr))) { DX_LOG_ERR("Mapping iv %u B at va=0x%08lX " "for DMA failed\n",iv_size, (unsigned long)req->info); return -ENOMEM; } DX_LOG_DEBUG("Mapped iv %u B at va=0x%08lX to dma=0x%08lX\n", iv_size, (unsigned long)req->info, (unsigned long)areq_ctx->gen_ctx.iv_dma_addr); } else { areq_ctx->gen_ctx.iv_dma_addr = 0; } /* Map the src sg */ if ( sg_is_last(req->src) && (sg_page(req->src) == NULL) && sg_dma_address(req->src)) { /* The source is secure no mapping is needed */ areq_ctx->sec_dir = DX_SRC_DMA_IS_SECURE; areq_ctx->in_nents = 1; } else { if ( unlikely( dx_map_sg( dev,req->src, req->nbytes, DMA_BIDIRECTIONAL, &areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy))){ rc = -ENOMEM; goto fail_unmap_iv; } if ( areq_ctx->in_nents > 1 ) { areq_ctx->dma_buf_type = DX_DMA_BUF_MLLI; } } if ( unlikely(req->src == req->dst)) { if ( areq_ctx->sec_dir == DX_SRC_DMA_IS_SECURE ) { DX_LOG_ERR("Secure key inplace operation " "is not supported \n"); /* both sides are secure */ rc = -ENOMEM; goto fail_unmap_din; } /* Handle inplace operation */ if ( unlikely(areq_ctx->dma_buf_type == DX_DMA_BUF_MLLI) ) { areq_ctx->out_nents = 0; buffer_mgr_set_sg_entry(&sg_data, areq_ctx->in_nents, req->src, req->nbytes, true); } } else { if ( sg_is_last(req->dst) && (sg_page(req->dst) == NULL) && sg_dma_address(req->dst)) { if ( areq_ctx->sec_dir == DX_SRC_DMA_IS_SECURE ) { DX_LOG_ERR("Secure key in both sides is" "not supported \n"); /* both sides are secure */ rc = -ENOMEM; goto fail_unmap_din; } /* The dest is secure no mapping is needed */ areq_ctx->sec_dir = DX_DST_DMA_IS_SECURE; areq_ctx->out_nents = 1; } else { /* Map the dst sg */ if ( unlikely( dx_map_sg(dev,req->dst, req->nbytes, DMA_BIDIRECTIONAL, &areq_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy))){ rc = -ENOMEM; goto fail_unmap_din; } if ( areq_ctx->out_nents > 1 ) { areq_ctx->dma_buf_type = DX_DMA_BUF_MLLI; } } if ( unlikely( (areq_ctx->dma_buf_type == DX_DMA_BUF_MLLI) ) ) { if (areq_ctx->sec_dir != DX_SRC_DMA_IS_SECURE) { buffer_mgr_set_sg_entry(&sg_data, areq_ctx->in_nents, req->src, req->nbytes, true); } if (areq_ctx->sec_dir != DX_DST_DMA_IS_SECURE) { buffer_mgr_set_sg_entry(&sg_data, areq_ctx->out_nents, req->dst, req->nbytes, true); } } /*few entries */ } /* !inplace */ if (unlikely(areq_ctx->dma_buf_type == DX_DMA_BUF_MLLI)) { #if (DX_DEV_SIGNATURE == DX_CC441P_SIG) if (areq_ctx->sec_dir) { /* one of the sides is secure, can't use MLLI*/ rc = -EINVAL; goto fail_unmap_dout; } #endif mlli_params->curr_pool = buff_mgr->mlli_buffs_pool; if (unlikely(buffer_mgr_build_mlli(dev, &sg_data, mlli_params))) { rc = -ENOMEM; goto fail_unmap_dout; } } /*MLLI case*/ DX_LOG_DEBUG(" buf type = %s \n", dx_get_buff_type(areq_ctx->dma_buf_type)); return 0; fail_unmap_dout: if (areq_ctx->sec_dir != DX_DST_DMA_IS_SECURE) { dma_unmap_sg(dev, req->dst, areq_ctx->out_nents, DMA_BIDIRECTIONAL); } fail_unmap_din: if (areq_ctx->sec_dir != DX_SRC_DMA_IS_SECURE) { dma_unmap_sg(dev, req->src, areq_ctx->in_nents, DMA_BIDIRECTIONAL); } fail_unmap_iv: if (areq_ctx->gen_ctx.iv_dma_addr != 0) { dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr, iv_size, DMA_TO_DEVICE); } return rc; }
static int qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req) { struct ablkcipher_request *req = ablkcipher_request_cast(async_req); struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req); struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm); struct qce_device *qce = tmpl->qce; enum dma_data_direction dir_src, dir_dst; struct scatterlist *sg; bool diff_dst; gfp_t gfp; int ret; rctx->iv = req->info; rctx->ivsize = crypto_ablkcipher_ivsize(ablkcipher); rctx->cryptlen = req->nbytes; diff_dst = (req->src != req->dst) ? true : false; dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL; dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL; rctx->src_nents = sg_nents_for_len(req->src, req->nbytes); if (diff_dst) rctx->dst_nents = sg_nents_for_len(req->dst, req->nbytes); else rctx->dst_nents = rctx->src_nents; if (rctx->src_nents < 0) { dev_err(qce->dev, "Invalid numbers of src SG.\n"); return rctx->src_nents; } if (rctx->dst_nents < 0) { dev_err(qce->dev, "Invalid numbers of dst SG.\n"); return -rctx->dst_nents; } rctx->dst_nents += 1; gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; ret = sg_alloc_table(&rctx->dst_tbl, rctx->dst_nents, gfp); if (ret) return ret; sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ); sg = qce_sgtable_add(&rctx->dst_tbl, req->dst); if (IS_ERR(sg)) { ret = PTR_ERR(sg); goto error_free; } sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg); if (IS_ERR(sg)) { ret = PTR_ERR(sg); goto error_free; } sg_mark_end(sg); rctx->dst_sg = rctx->dst_tbl.sgl; ret = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst); if (ret < 0) goto error_free; if (diff_dst) { ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src); if (ret < 0) goto error_unmap_dst; rctx->src_sg = req->src; } else { rctx->src_sg = rctx->dst_sg; } ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, rctx->src_nents, rctx->dst_sg, rctx->dst_nents, qce_ablkcipher_done, async_req); if (ret) goto error_unmap_src; qce_dma_issue_pending(&qce->dma); ret = qce_start(async_req, tmpl->crypto_alg_type, req->nbytes, 0); if (ret) goto error_terminate; return 0; error_terminate: qce_dma_terminate_all(&qce->dma); error_unmap_src: if (diff_dst) dma_unmap_sg(qce->dev, req->src, rctx->src_nents, dir_src); error_unmap_dst: dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst); error_free: sg_free_table(&rctx->dst_tbl); return ret; }
static int __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_request *vc_req, struct ablkcipher_request *req, struct data_queue *data_vq, __u8 op) { struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); unsigned int ivsize = crypto_ablkcipher_ivsize(tfm); struct virtio_crypto_ablkcipher_ctx *ctx = vc_req->ablkcipher_ctx; struct virtio_crypto *vcrypto = ctx->vcrypto; struct virtio_crypto_op_data_req *req_data; int src_nents, dst_nents; int err; unsigned long flags; struct scatterlist outhdr, iv_sg, status_sg, **sgs; int i; u64 dst_len; unsigned int num_out = 0, num_in = 0; int sg_total; uint8_t *iv; src_nents = sg_nents_for_len(req->src, req->nbytes); dst_nents = sg_nents(req->dst); pr_debug("virtio_crypto: Number of sgs (src_nents: %d, dst_nents: %d)\n", src_nents, dst_nents); /* Why 3? outhdr + iv + inhdr */ sg_total = src_nents + dst_nents + 3; sgs = kzalloc_node(sg_total * sizeof(*sgs), GFP_ATOMIC, dev_to_node(&vcrypto->vdev->dev)); if (!sgs) return -ENOMEM; req_data = kzalloc_node(sizeof(*req_data), GFP_ATOMIC, dev_to_node(&vcrypto->vdev->dev)); if (!req_data) { kfree(sgs); return -ENOMEM; } vc_req->req_data = req_data; vc_req->type = VIRTIO_CRYPTO_SYM_OP_CIPHER; /* Head of operation */ if (op) { req_data->header.session_id = cpu_to_le64(ctx->enc_sess_info.session_id); req_data->header.opcode = cpu_to_le32(VIRTIO_CRYPTO_CIPHER_ENCRYPT); } else { req_data->header.session_id = cpu_to_le64(ctx->dec_sess_info.session_id); req_data->header.opcode = cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DECRYPT); } req_data->u.sym_req.op_type = cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER); req_data->u.sym_req.u.cipher.para.iv_len = cpu_to_le32(ivsize); req_data->u.sym_req.u.cipher.para.src_data_len = cpu_to_le32(req->nbytes); dst_len = virtio_crypto_alg_sg_nents_length(req->dst); if (unlikely(dst_len > U32_MAX)) { pr_err("virtio_crypto: The dst_len is beyond U32_MAX\n"); err = -EINVAL; goto free; } pr_debug("virtio_crypto: src_len: %u, dst_len: %llu\n", req->nbytes, dst_len); if (unlikely(req->nbytes + dst_len + ivsize + sizeof(vc_req->status) > vcrypto->max_size)) { pr_err("virtio_crypto: The length is too big\n"); err = -EINVAL; goto free; } req_data->u.sym_req.u.cipher.para.dst_data_len = cpu_to_le32((uint32_t)dst_len); /* Outhdr */ sg_init_one(&outhdr, req_data, sizeof(*req_data)); sgs[num_out++] = &outhdr; /* IV */ /* * Avoid to do DMA from the stack, switch to using * dynamically-allocated for the IV */ iv = kzalloc_node(ivsize, GFP_ATOMIC, dev_to_node(&vcrypto->vdev->dev)); if (!iv) { err = -ENOMEM; goto free; } memcpy(iv, req->info, ivsize); sg_init_one(&iv_sg, iv, ivsize); sgs[num_out++] = &iv_sg; vc_req->iv = iv; /* Source data */ for (i = 0; i < src_nents; i++) sgs[num_out++] = &req->src[i]; /* Destination data */ for (i = 0; i < dst_nents; i++) sgs[num_out + num_in++] = &req->dst[i]; /* Status */ sg_init_one(&status_sg, &vc_req->status, sizeof(vc_req->status)); sgs[num_out + num_in++] = &status_sg; vc_req->sgs = sgs; spin_lock_irqsave(&data_vq->lock, flags); err = virtqueue_add_sgs(data_vq->vq, sgs, num_out, num_in, vc_req, GFP_ATOMIC); virtqueue_kick(data_vq->vq); spin_unlock_irqrestore(&data_vq->lock, flags); if (unlikely(err < 0)) goto free_iv; return 0; free_iv: kzfree(iv); free: kzfree(req_data); kfree(sgs); return err; }
int ss_aes_one_req(sunxi_ss_t *sss, struct ablkcipher_request *req) { int ret = 0; struct crypto_ablkcipher *tfm = NULL; ss_aes_ctx_t *ctx = NULL; ss_aes_req_ctx_t *req_ctx = NULL; SS_ENTER(); if (!req->src || !req->dst) { SS_ERR("Invalid sg: src = %p, dst = %p\n", req->src, req->dst); return -EINVAL; } ss_dev_lock(); tfm = crypto_ablkcipher_reqtfm(req); req_ctx = ablkcipher_request_ctx(req); ctx = crypto_ablkcipher_ctx(tfm); /* A31 SS need update key each cycle in decryption. */ if ((ctx->comm.flags & SS_FLAG_NEW_KEY) || (req_ctx->dir == SS_DIR_DECRYPT)) { SS_DBG("KEY address = %p, size = %d\n", ctx->key, ctx->key_size); ss_key_set(ctx->key, ctx->key_size); ctx->comm.flags &= ~SS_FLAG_NEW_KEY; } #ifdef SS_CTS_MODE_ENABLE if (((req_ctx->mode == SS_AES_MODE_CBC) || (req_ctx->mode == SS_AES_MODE_CTS)) && (req->info != NULL)) { #else if ((req_ctx->mode == SS_AES_MODE_CBC) && (req->info != NULL)) { #endif SS_DBG("IV address = %p, size = %d\n", req->info, crypto_ablkcipher_ivsize(tfm)); ss_iv_set(req->info, crypto_ablkcipher_ivsize(tfm)); } #ifdef SS_CTR_MODE_ENABLE if (req_ctx->mode == SS_AES_MODE_CTR) { SS_DBG("Cnt address = %p, size = %d\n", req->info, crypto_ablkcipher_ivsize(tfm)); if (ctx->cnt == 0) memcpy(ctx->iv, req->info, crypto_ablkcipher_ivsize(tfm)); SS_DBG("CNT: %08x %08x %08x %08x \n", *(int *)&ctx->iv[0], *(int *)&ctx->iv[4], *(int *)&ctx->iv[8], *(int *)&ctx->iv[12]); ss_cnt_set(ctx->iv, crypto_ablkcipher_ivsize(tfm)); } #endif req_ctx->dma_src.sg = req->src; req_ctx->dma_dst.sg = req->dst; ret = ss_aes_start(ctx, req_ctx, req->nbytes); if (ret < 0) SS_ERR("ss_aes_start fail(%d)\n", ret); ss_dev_unlock(); #ifdef SS_CTR_MODE_ENABLE if (req_ctx->mode == SS_AES_MODE_CTR) { ss_cnt_get(ctx->comm.flow, ctx->iv, crypto_ablkcipher_ivsize(tfm)); SS_DBG("CNT: %08x %08x %08x %08x \n", *(int *)&ctx->iv[0], *(int *)&ctx->iv[4], *(int *)&ctx->iv[8], *(int *)&ctx->iv[12]); } #endif ctx->cnt += req->nbytes; if (req->base.complete) req->base.complete(&req->base, ret); return ret; } irqreturn_t sunxi_ss_irq_handler(int irq, void *dev_id) { sunxi_ss_t *sss = (sunxi_ss_t *)dev_id; unsigned long flags = 0; int pending = 0; spin_lock_irqsave(&sss->lock, flags); pending = ss_pending_get(); SS_DBG("SS pending %#x\n", pending); spin_unlock_irqrestore(&sss->lock, flags); return IRQ_HANDLED; }
static int process_next_packet(struct aes_container *aes_con, struct ablkcipher_request *areq, int state) { u8 *iv; int mode, dir, err = -EINVAL; unsigned long queue_flag; u32 inc, nbytes, remain, chunk_size; struct scatterlist *src = NULL; struct scatterlist *dst = NULL; struct crypto_ablkcipher *cipher; struct aes_ctx *ctx; spin_lock_irqsave(&aes_queue->lock, queue_flag); dir = aes_con->encdec; mode = aes_con->mode; iv = aes_con->iv; if (state & PROCESS_SCATTER) { src = scatterwalk_sg_next(areq->src); dst = scatterwalk_sg_next(areq->dst); if (!src || !dst) { spin_unlock_irqrestore(&aes_queue->lock, queue_flag); return 1; } } else if (state & PROCESS_NEW_PACKET) { src = areq->src; dst = areq->dst; } remain = aes_con->bytes_processed; chunk_size = src->length; if (remain > DEU_MAX_PACKET_SIZE) inc = DEU_MAX_PACKET_SIZE; else if (remain > chunk_size) inc = chunk_size; else inc = remain; remain -= inc; aes_con->nbytes = inc; if (state & PROCESS_SCATTER) { aes_con->src_buf += aes_con->nbytes; aes_con->dst_buf += aes_con->nbytes; } lq_sg_init(aes_con, src, dst); nbytes = aes_con->nbytes; //printk("debug - Line: %d, func: %s, reqsize: %d, scattersize: %d\n", // __LINE__, __func__, nbytes, chunk_size); cipher = crypto_ablkcipher_reqtfm(areq); ctx = crypto_ablkcipher_ctx(cipher); if (aes_queue->hw_status == AES_IDLE) aes_queue->hw_status = AES_STARTED; aes_con->bytes_processed -= aes_con->nbytes; err = ablkcipher_enqueue_request(&aes_queue->list, &aes_con->arequest); if (err == -EBUSY) { spin_unlock_irqrestore(&aes_queue->lock, queue_flag); printk("Failed to enqueue request, ln: %d, err: %d\n", __LINE__, err); return -EINVAL; } spin_unlock_irqrestore(&aes_queue->lock, queue_flag); err = lq_deu_aes_core(ctx, aes_con->dst_buf, aes_con->src_buf, iv, nbytes, dir, mode); return err; }