static int afalg_cipher_init(EVP_CIPHER_CTX *ctx, const unsigned char *key, const unsigned char *iv, int enc) { int ciphertype; int ret; afalg_ctx *actx; char ciphername[ALG_MAX_SALG_NAME]; if (ctx == NULL || key == NULL) { ALG_WARN("%s: Null Parameter\n", __func__); return 0; } if (EVP_CIPHER_CTX_cipher(ctx) == NULL) { ALG_WARN("%s: Cipher object NULL\n", __func__); return 0; } actx = EVP_CIPHER_CTX_get_cipher_data(ctx); if (actx == NULL) { ALG_WARN("%s: Cipher data NULL\n", __func__); return 0; } ciphertype = EVP_CIPHER_CTX_nid(ctx); switch (ciphertype) { case NID_aes_128_cbc: strncpy(ciphername, "cbc(aes)", ALG_MAX_SALG_NAME); break; default: ALG_WARN("%s: Unsupported Cipher type %d\n", __func__, ciphertype); return 0; } ciphername[ALG_MAX_SALG_NAME-1]='\0'; if (ALG_AES_IV_LEN != EVP_CIPHER_CTX_iv_length(ctx)) { ALG_WARN("%s: Unsupported IV length :%d\n", __func__, EVP_CIPHER_CTX_iv_length(ctx)); return 0; } /* Setup AFALG socket for crypto processing */ ret = afalg_create_sk(actx, "skcipher", ciphername); if (ret < 1) return 0; ret = afalg_set_key(actx, key, EVP_CIPHER_CTX_key_length(ctx)); if (ret < 1) goto err; /* Setup AIO ctx to allow async AFALG crypto processing */ if (afalg_init_aio(&actx->aio) == 0) goto err; # ifdef ALG_ZERO_COPY pipe(actx->zc_pipe); # endif actx->init_done = MAGIC_INIT_NUM; return 1; err: close(actx->sfd); close(actx->bfd); return 0; }
static int afalg_fin_cipher_aio(afalg_aio *aio, int sfd, unsigned char *buf, size_t len) { int r; int retry = 0; unsigned int done = 0; struct iocb *cb; struct timespec timeout; struct io_event events[MAX_INFLIGHTS]; u_int64_t eval = 0; timeout.tv_sec = 0; timeout.tv_nsec = 0; /* if efd has not been initialised yet do it here */ if (aio->mode == MODE_UNINIT) { r = afalg_setup_async_event_notification(aio); if (r == 0) return 0; } cb = &(aio->cbt[0 % MAX_INFLIGHTS]); memset(cb, '\0', sizeof(*cb)); cb->aio_fildes = sfd; cb->aio_lio_opcode = IOCB_CMD_PREAD; /* * The pointer has to be converted to unsigned value first to avoid * sign extension on cast to 64 bit value in 32-bit builds */ cb->aio_buf = (size_t)buf; cb->aio_offset = 0; cb->aio_data = 0; cb->aio_nbytes = len; cb->aio_flags = IOCB_FLAG_RESFD; cb->aio_resfd = aio->efd; /* * Perform AIO read on AFALG socket, this in turn performs an async * crypto operation in kernel space */ r = io_read(aio->aio_ctx, 1, &cb); if (r < 0) { ALG_PWARN("%s(%d): io_read failed : ", __FILE__, __LINE__); return 0; } do { /* While AIO read is being performed pause job */ ASYNC_pause_job(); /* Check for completion of AIO read */ r = read(aio->efd, &eval, sizeof(eval)); if (r < 0) { if (errno == EAGAIN || errno == EWOULDBLOCK) continue; ALG_PERR("%s(%d): read failed for event fd : ", __FILE__, __LINE__); return 0; } else if (r == 0 || eval <= 0) { ALG_WARN("%s(%d): eventfd read %d bytes, eval = %lu\n", __FILE__, __LINE__, r, eval); } if (eval > 0) { /* Get results of AIO read */ r = io_getevents(aio->aio_ctx, 1, MAX_INFLIGHTS, events, &timeout); if (r > 0) { /* * events.res indicates the actual status of the operation. * Handle the error condition first. */ if (events[0].res < 0) { /* * Underlying operation cannot be completed at the time * of previous submission. Resubmit for the operation. */ if (events[0].res == -EBUSY && retry++ < 3) { r = io_read(aio->aio_ctx, 1, &cb); if (r < 0) { ALG_PERR("%s(%d): retry %d for io_read failed : ", __FILE__, __LINE__, retry); return 0; } continue; } else { /* * Retries exceed for -EBUSY or unrecoverable error * condition for this instance of operation. */ ALG_WARN ("%s(%d): Crypto Operation failed with code %lld\n", __FILE__, __LINE__, events[0].res); return 0; } } /* Operation successful. */ done = 1; } else if (r < 0) { ALG_PERR("%s(%d): io_getevents failed : ", __FILE__, __LINE__); return 0; } else { ALG_WARN("%s(%d): io_geteventd read 0 bytes\n", __FILE__, __LINE__); } } } while (!done); return 1; }
static int afalg_start_cipher_sk(afalg_ctx *actx, const unsigned char *in, size_t inl, const unsigned char *iv, unsigned int enc) { struct msghdr msg = { 0 }; struct cmsghdr *cmsg; struct iovec iov; ssize_t sbytes; # ifdef ALG_ZERO_COPY int ret; # endif char cbuf[CMSG_SPACE(ALG_IV_LEN(ALG_AES_IV_LEN)) + CMSG_SPACE(ALG_OP_LEN)]; memset(cbuf, 0, sizeof(cbuf)); msg.msg_control = cbuf; msg.msg_controllen = sizeof(cbuf); /* * cipher direction (i.e. encrypt or decrypt) and iv are sent to the * kernel as part of sendmsg()'s ancillary data */ cmsg = CMSG_FIRSTHDR(&msg); afalg_set_op_sk(cmsg, enc); cmsg = CMSG_NXTHDR(&msg, cmsg); afalg_set_iv_sk(cmsg, iv, ALG_AES_IV_LEN); /* iov that describes input data */ iov.iov_base = (unsigned char *)in; iov.iov_len = inl; msg.msg_flags = MSG_MORE; # ifdef ALG_ZERO_COPY /* * ZERO_COPY mode * Works best when buffer is 4k aligned * OPENS: out of place processing (i.e. out != in) */ /* Input data is not sent as part of call to sendmsg() */ msg.msg_iovlen = 0; msg.msg_iov = NULL; /* Sendmsg() sends iv and cipher direction to the kernel */ sbytes = sendmsg(actx->sfd, &msg, 0); if (sbytes < 0) { ALG_PERR("%s: sendmsg failed for zero copy cipher operation : ", __func__); return 0; } /* * vmsplice and splice are used to pin the user space input buffer for * kernel space processing avoiding copys from user to kernel space */ ret = vmsplice(actx->zc_pipe[1], &iov, 1, SPLICE_F_GIFT); if (ret < 0) { ALG_PERR("%s: vmsplice failed : ", __func__); return 0; } ret = splice(actx->zc_pipe[0], NULL, actx->sfd, NULL, inl, 0); if (ret < 0) { ALG_PERR("%s: splice failed : ", __func__); return 0; } # else msg.msg_iovlen = 1; msg.msg_iov = &iov; /* Sendmsg() sends iv, cipher direction and input data to the kernel */ sbytes = sendmsg(actx->sfd, &msg, 0); if (sbytes < 0) { ALG_PERR("%s: sendmsg failed for cipher operation : ", __func__); return 0; } if (sbytes != (ssize_t) inl) { ALG_WARN("Cipher operation send bytes %zd != inlen %zd\n", sbytes, inl); return 0; } # endif return 1; }