static void virtio_crypto_handle_dataq_bh(VirtIODevice *vdev, VirtQueue *vq) { VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev); VirtIOCryptoQueue *q = &vcrypto->vqs[virtio_crypto_vq2q(virtio_get_queue_index(vq))]; /* This happens when device was stopped but VCPU wasn't. */ if (!vdev->vm_running) { return; } virtio_queue_set_notification(vq, 0); qemu_bh_schedule(q->dataq_bh); }
static int virtio_crypto_handle_request(VirtIOCryptoReq *request) { VirtIOCrypto *vcrypto = request->vcrypto; VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto); VirtQueueElement *elem = &request->elem; int queue_index = virtio_crypto_vq2q(virtio_get_queue_index(request->vq)); struct virtio_crypto_op_data_req req; int ret; struct iovec *in_iov; struct iovec *out_iov; unsigned in_num; unsigned out_num; uint32_t opcode; uint8_t status = VIRTIO_CRYPTO_ERR; uint64_t session_id; CryptoDevBackendSymOpInfo *sym_op_info = NULL; Error *local_err = NULL; if (elem->out_num < 1 || elem->in_num < 1) { virtio_error(vdev, "virtio-crypto dataq missing headers"); return -1; } out_num = elem->out_num; out_iov = elem->out_sg; in_num = elem->in_num; in_iov = elem->in_sg; if (unlikely(iov_to_buf(out_iov, out_num, 0, &req, sizeof(req)) != sizeof(req))) { virtio_error(vdev, "virtio-crypto request outhdr too short"); return -1; } iov_discard_front(&out_iov, &out_num, sizeof(req)); if (in_iov[in_num - 1].iov_len < sizeof(struct virtio_crypto_inhdr)) { virtio_error(vdev, "virtio-crypto request inhdr too short"); return -1; } /* We always touch the last byte, so just see how big in_iov is. */ request->in_len = iov_size(in_iov, in_num); request->in = (void *)in_iov[in_num - 1].iov_base + in_iov[in_num - 1].iov_len - sizeof(struct virtio_crypto_inhdr); iov_discard_back(in_iov, &in_num, sizeof(struct virtio_crypto_inhdr)); /* * The length of operation result, including dest_data * and digest_result if exists. */ request->in_num = in_num; request->in_iov = in_iov; opcode = ldl_le_p(&req.header.opcode); session_id = ldq_le_p(&req.header.session_id); switch (opcode) { case VIRTIO_CRYPTO_CIPHER_ENCRYPT: case VIRTIO_CRYPTO_CIPHER_DECRYPT: ret = virtio_crypto_handle_sym_req(vcrypto, &req.u.sym_req, &sym_op_info, out_iov, out_num); /* Serious errors, need to reset virtio crypto device */ if (ret == -EFAULT) { return -1; } else if (ret == -VIRTIO_CRYPTO_NOTSUPP) { virtio_crypto_req_complete(request, VIRTIO_CRYPTO_NOTSUPP); virtio_crypto_free_request(request); } else { sym_op_info->session_id = session_id; /* Set request's parameter */ request->flags = CRYPTODEV_BACKEND_ALG_SYM; request->u.sym_op_info = sym_op_info; ret = cryptodev_backend_crypto_operation(vcrypto->cryptodev, request, queue_index, &local_err); if (ret < 0) { status = -ret; if (local_err) { error_report_err(local_err); } } else { /* ret == VIRTIO_CRYPTO_OK */ status = ret; } virtio_crypto_req_complete(request, status); virtio_crypto_free_request(request); } break; case VIRTIO_CRYPTO_HASH: case VIRTIO_CRYPTO_MAC: case VIRTIO_CRYPTO_AEAD_ENCRYPT: case VIRTIO_CRYPTO_AEAD_DECRYPT: default: error_report("virtio-crypto unsupported dataq opcode: %u", opcode); virtio_crypto_req_complete(request, VIRTIO_CRYPTO_NOTSUPP); virtio_crypto_free_request(request); } return 0; }
/* Raise an interrupt to signal guest, if necessary */ void virtio_blk_data_plane_notify(VirtIOBlockDataPlane *s, VirtQueue *vq) { set_bit(virtio_get_queue_index(vq), s->batch_notify_vqs); qemu_bh_schedule(s->bh); }