/* * Send a control event to the guest. */ static size_t send_control_event(VirtIOCrypto *crdev, uint16_t event, uint16_t value) { struct virtio_crypto_control cpkt; VirtQueueElement elem; VirtQueue *vq; FUNC_IN; stw_p(&cpkt.event, event); stw_p(&cpkt.value, value); vq = crdev->c_ivq; if (!virtio_queue_ready(vq)) { return 0; } if (!virtqueue_pop(vq, &elem)) { return 0; } memcpy(elem.in_sg[0].iov_base, &cpkt, sizeof(cpkt)); virtqueue_push(vq, &elem, sizeof(cpkt)); virtio_notify(&crdev->vdev, vq); FUNC_OUT; return sizeof(cpkt); }
static void virtio_balloon_stat(void *opaque, BalloonInfo *info) { VirtIOBalloon *dev = opaque; #if 0 /* Disable guest-provided stats for now. For more details please check: * https://bugzilla.redhat.com/show_bug.cgi?id=623903 * * If you do enable it (which is probably not going to happen as we * need a new command for it), remember that you also need to fill the * appropriate members of the BalloonInfo structure so that the stats * are returned to the client. */ if (dev->vdev.guest_features & (1 << VIRTIO_BALLOON_F_STATS_VQ)) { virtqueue_push(dev->svq, &dev->stats_vq_elem, dev->stats_vq_offset); virtio_notify(&dev->vdev, dev->svq); return; } #endif /* Stats are not supported. Clear out any stale values that might * have been set by a more featureful guest kernel. */ reset_stats(dev); info->actual = ram_size - ((uint64_t) dev->actual << VIRTIO_BALLOON_PFN_SHIFT); }
static void virtio_balloon_stat(void *opaque, MonitorCompletion cb, void *cb_data) { VirtIOBalloon *dev = opaque; /* For now, only allow one request at a time. This restriction can be * removed later by queueing callback and data pairs. */ if (dev->stats_callback != NULL) { return; } dev->stats_callback = cb; dev->stats_opaque_callback_data = cb_data; if (ENABLE_GUEST_STATS && (dev->vdev.guest_features & (1 << VIRTIO_BALLOON_F_STATS_VQ))) { virtqueue_push(dev->svq, &dev->stats_vq_elem, dev->stats_vq_offset); virtio_notify(&dev->vdev, dev->svq); return; } /* Stats are not supported. Clear out any stale values that might * have been set by a more featureful guest kernel. */ reset_stats(dev); complete_stats_request(dev); }
static void virtio_balloon_handle_output(VirtIODevice *vdev, VirtQueue *vq) { VirtIOBalloon *s = to_virtio_balloon(vdev); VirtQueueElement elem; while (virtqueue_pop(vq, &elem)) { size_t offset = 0; uint32_t pfn; while (iov_to_buf(elem.out_sg, elem.out_num, &pfn, offset, 4) == 4) { ram_addr_t pa; ram_addr_t addr; pa = (ram_addr_t)ldl_p(&pfn) << VIRTIO_BALLOON_PFN_SHIFT; offset += 4; addr = cpu_get_physical_page_desc(pa); if ((addr & ~TARGET_PAGE_MASK) != IO_MEM_RAM) continue; /* Using qemu_get_ram_ptr is bending the rules a bit, but should be OK because we only want a single page. */ balloon_page(qemu_get_ram_ptr(addr), !!(vq == s->dvq)); } virtqueue_push(vq, &elem, offset); virtio_notify(vdev, vq); } }
static size_t send_buffer(VirtIOCrypto *crdev, const uint8_t *buf, size_t size) { VirtQueueElement elem; VirtQueue *vq; unsigned int len; vq = crdev->ivq; /* Is the virtqueue ready? */ if (!virtio_queue_ready(vq)) { return 0; } /* Is there a buffer in the queue? */ /* If not we can not send data to the Guest. */ /* ? */ if( (!virtqueue_pop(vq,&elem)) ){ return 0; } memcpy(elem.in_sg[0].iov_base,buf, size); virtqueue_push(vq,&elem,size); virtio_notify(&crdev->vdev,vq); len = size; return len; }
/* * Handler for ovq virtqueue. * Called when guest sends data to the host. */ static void handle_output(VirtIODevice *vdev, VirtQueue *vq) { VirtIOCrypto *crdev = DO_UPCAST(VirtIOCrypto, vdev, vdev); VirtQueueElement elem; uint8_t *buf; size_t buf_size; FUNC_IN; /* pop buffer from virtqueue and check return value */ /* ? */ if(!virtqueue_pop(vq,&elem)) return ; /* FIXME: Are we sure all data is in one sg list?? */ buf = elem.out_sg[0].iov_base; buf_size = elem.out_sg[0].iov_len; crypto_handle_ioctl_packet(crdev, buf, buf_size); virtqueue_push(vq,&elem,0); virtio_notify(vdev,vq); /* Put buffer back and notify guest. */ /* ? */ FUNC_OUT; }
/* * Handler for control c_ovq virtqueue. * Called when guest sends a control message to the host. */ static void control_out(VirtIODevice *vdev, VirtQueue *vq) { VirtQueueElement elem; VirtIOCrypto *crdev; uint8_t *buf; size_t len; FUNC_IN; crdev = DO_UPCAST(VirtIOCrypto, vdev, vdev); len = 0; buf = NULL; if (!virtqueue_pop(vq, &elem)) return ; len = iov_size(elem.out_sg, elem.out_num); buf = g_malloc(len); iov_to_buf(elem.out_sg, elem.out_num, 0, buf, len); handle_control_message(crdev, buf, len); virtqueue_push(vq, &elem, 0); virtio_notify(vdev, vq); g_free(buf); FUNC_OUT; }
static void vhost_vsock_send_transport_reset(VHostVSock *vsock) { VirtQueueElement *elem; VirtQueue *vq = vsock->event_vq; struct virtio_vsock_event event = { .id = cpu_to_le32(VIRTIO_VSOCK_EVENT_TRANSPORT_RESET), }; elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); if (!elem) { error_report("vhost-vsock missed transport reset event"); return; } if (elem->out_num) { error_report("invalid vhost-vsock event virtqueue element with " "out buffers"); goto out; } if (iov_from_buf(elem->in_sg, elem->in_num, 0, &event, sizeof(event)) != sizeof(event)) { error_report("vhost-vsock event virtqueue element is too short"); goto out; } virtqueue_push(vq, elem, sizeof(event)); virtio_notify(VIRTIO_DEVICE(vsock), vq); out: g_free(elem); }
static void virtio_balloon_handle_output(VirtIODevice *vdev, VirtQueue *vq) { VirtIOBalloon *s = VIRTIO_BALLOON(vdev); VirtQueueElement elem; MemoryRegionSection section; while (virtqueue_pop(vq, &elem)) { size_t offset = 0; uint32_t pfn; while (iov_to_buf(elem.out_sg, elem.out_num, offset, &pfn, 4) == 4) { ram_addr_t pa; ram_addr_t addr; int p = virtio_ldl_p(vdev, &pfn); pa = (ram_addr_t) p << VIRTIO_BALLOON_PFN_SHIFT; offset += 4; /* FIXME: remove get_system_memory(), but how? */ section = memory_region_find(get_system_memory(), pa, 1); if (!int128_nz(section.size) || !memory_region_is_ram(section.mr)) continue; /* Using memory_region_get_ram_ptr is bending the rules a bit, but should be OK because we only want a single page. */ addr = section.offset_within_region; balloon_page(memory_region_get_ram_ptr(section.mr) + addr, !!(vq == s->dvq)); memory_region_unref(section.mr); } virtqueue_push(vq, &elem, offset); virtio_notify(vdev, vq); } }
static void virtio_blk_req_complete(VirtIOBlockReq *req, int status) { VirtIOBlock *s = req->dev; req->in->status = status; virtqueue_push(s->vq, &req->elem, req->qiov.size + sizeof(*req->in)); virtio_notify(&s->vdev, s->vq); qemu_free(req); }
static void virtio_blk_req_complete(VirtIOBlockReq *req, int status) { VirtIOBlock *s = req->dev; trace_virtio_blk_req_complete(req, status); stb_p(&req->in->status, status); virtqueue_push(s->vq, &req->elem, req->qiov.size + sizeof(*req->in)); virtio_notify(&s->vdev, s->vq); }
static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq) { VirtIOBlock *s = to_virtio_blk(vdev); VirtIOBlockReq *req; while ((req = virtio_blk_get_request(s))) { int i; if (req->elem.out_num < 1 || req->elem.in_num < 1) { fprintf(stderr, "virtio-blk missing headers\n"); exit(1); } if (req->elem.out_sg[0].iov_len < sizeof(*req->out) || req->elem.in_sg[req->elem.in_num - 1].iov_len < sizeof(*req->in)) { fprintf(stderr, "virtio-blk header not in correct element\n"); exit(1); } req->out = (void *)req->elem.out_sg[0].iov_base; req->in = (void *)req->elem.in_sg[req->elem.in_num - 1].iov_base; if (req->out->type & VIRTIO_BLK_T_SCSI_CMD) { unsigned int len = sizeof(*req->in); req->in->status = VIRTIO_BLK_S_UNSUPP; virtqueue_push(vq, &req->elem, len); virtio_notify(vdev, vq); qemu_free(req); } else if (req->out->type & VIRTIO_BLK_T_OUT) { if (virtio_blk_handle_write(req) < 0) break; } else { for (i = 0; i < req->elem.in_num - 1; i++) req->size += req->elem.in_sg[i].iov_len; req->buffer = qemu_memalign(512, req->size); if (req->buffer == NULL) { qemu_free(req); break; } bdrv_aio_read(s->bs, req->out->sector, req->buffer, req->size / 512, virtio_blk_rw_complete, req); } } /* * FIXME: Want to check for completions before returning to guest mode, * so cached reads and writes are reported as quickly as possible. But * that should be done in the generic block layer. */ }
static void virtio_balloon_handle_output(VirtIODevice *vdev, VirtQueue *vq) { VirtIOBalloon *s = VIRTIO_BALLOON(vdev); VirtQueueElement *elem; MemoryRegionSection section; for (;;) { size_t offset = 0; uint32_t pfn; elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); if (!elem) { return; } while (iov_to_buf(elem->out_sg, elem->out_num, offset, &pfn, 4) == 4) { hwaddr pa; int p = virtio_ldl_p(vdev, &pfn); pa = (hwaddr) p << VIRTIO_BALLOON_PFN_SHIFT; offset += 4; section = memory_region_find(get_system_memory(), pa, BALLOON_PAGE_SIZE); if (!section.mr) { trace_virtio_balloon_bad_addr(pa); continue; } if (!memory_region_is_ram(section.mr) || memory_region_is_rom(section.mr) || memory_region_is_romd(section.mr)) { trace_virtio_balloon_bad_addr(pa); memory_region_unref(section.mr); continue; } trace_virtio_balloon_handle_output(memory_region_name(section.mr), pa); if (!qemu_balloon_is_inhibited()) { if (vq == s->ivq) { balloon_inflate_page(s, section.mr, section.offset_within_region); } else if (vq == s->dvq) { balloon_deflate_page(s, section.mr, section.offset_within_region); } else { g_assert_not_reached(); } } memory_region_unref(section.mr); } virtqueue_push(vq, elem, offset); virtio_notify(vdev, vq); g_free(elem); } }
void vioconstart(struct tty *tp) { struct viocon_softc *sc = dev2sc(tp->t_dev); struct virtio_softc *vsc; struct viocon_port *vp = dev2port(tp->t_dev); struct virtqueue *vq; u_char *buf; int s, cnt, slot, ret, ndone; vsc = sc->sc_virtio; vq = vp->vp_tx; s = spltty(); ndone = viocon_tx_drain(vp, vq); if (ISSET(tp->t_state, TS_BUSY)) { if (ndone > 0) CLR(tp->t_state, TS_BUSY); else goto out; } if (ISSET(tp->t_state, TS_TIMEOUT | TS_TTSTOP)) goto out; if (tp->t_outq.c_cc == 0) goto out; ndone = 0; while (tp->t_outq.c_cc > 0) { ret = virtio_enqueue_prep(vq, &slot); if (ret == EAGAIN) break; KASSERT(ret == 0); ret = virtio_enqueue_reserve(vq, slot, 1); KASSERT(ret == 0); buf = vp->vp_tx_buf + slot * BUFSIZE; cnt = q_to_b(&tp->t_outq, buf, BUFSIZE); bus_dmamap_sync(vsc->sc_dmat, vp->vp_dmamap, vp->vp_tx_buf - vp->vp_rx_buf + slot * BUFSIZE, cnt, BUS_DMASYNC_PREWRITE); virtio_enqueue_p(vq, slot, vp->vp_dmamap, vp->vp_tx_buf - vp->vp_rx_buf + slot * BUFSIZE, cnt, 1); virtio_enqueue_commit(vsc, vq, slot, 0); ndone++; } if (ret == EAGAIN) SET(tp->t_state, TS_BUSY); if (ndone > 0) virtio_notify(vsc, vq); ttwakeupwr(tp); out: splx(s); }
static void virtio_blk_complete_request(VirtIOBlockReq *req, unsigned char status) { VirtIOBlock *s = req->dev; VirtIODevice *vdev = VIRTIO_DEVICE(s); trace_virtio_blk_req_complete(req, status); stb_p(&req->in->status, status); virtqueue_push(s->vq, &req->elem, req->in_len); virtio_notify(vdev, s->vq); }
static void virtio_crypto_req_complete(VirtIOCryptoReq *req, uint8_t status) { VirtIOCrypto *vcrypto = req->vcrypto; VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto); if (req->flags == CRYPTODEV_BACKEND_ALG_SYM) { virtio_crypto_sym_input_data_helper(vdev, req, status, req->u.sym_op_info); } stb_p(&req->in->status, status); virtqueue_push(req->vq, &req->elem, req->in_len); virtio_notify(vdev, req->vq); }
static void virtio_balloon_receive_stats(VirtIODevice *vdev, VirtQueue *vq) { VirtIOBalloon *s = VIRTIO_BALLOON(vdev); VirtQueueElement *elem; VirtIOBalloonStat stat; size_t offset = 0; qemu_timeval tv; elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); if (!elem) { goto out; } if (s->stats_vq_elem != NULL) { /* This should never happen if the driver follows the spec. */ virtqueue_push(vq, s->stats_vq_elem, 0); virtio_notify(vdev, vq); g_free(s->stats_vq_elem); } s->stats_vq_elem = elem; /* Initialize the stats to get rid of any stale values. This is only * needed to handle the case where a guest supports fewer stats than it * used to (ie. it has booted into an old kernel). */ reset_stats(s); while (iov_to_buf(elem->out_sg, elem->out_num, offset, &stat, sizeof(stat)) == sizeof(stat)) { uint16_t tag = virtio_tswap16(vdev, stat.tag); uint64_t val = virtio_tswap64(vdev, stat.val); offset += sizeof(stat); if (tag < VIRTIO_BALLOON_S_NR) s->stats[tag] = val; } s->stats_vq_offset = offset; if (qemu_gettimeofday(&tv) < 0) { fprintf(stderr, "warning: %s: failed to get time of day\n", __func__); goto out; } s->stats_last_update = tv.tv_sec; out: if (balloon_stats_enabled(s)) { balloon_stats_change_timer(s, s->stats_poll_interval); } }
static void balloon_stats_poll_cb(void *opaque) { VirtIOBalloon *s = opaque; VirtIODevice *vdev = VIRTIO_DEVICE(s); if (!balloon_stats_supported(s)) { /* re-schedule */ balloon_stats_change_timer(s, s->stats_poll_interval); return; } virtqueue_push(s->svq, &s->stats_vq_elem, s->stats_vq_offset); virtio_notify(vdev, s->svq); }
void virtio_9p_push_and_notify(V9fsPDU *pdu) { V9fsState *s = pdu->s; V9fsVirtioState *v = container_of(s, V9fsVirtioState, state); VirtQueueElement *elem = v->elems[pdu->idx]; /* push onto queue and notify */ virtqueue_push(v->vq, elem, pdu->size); g_free(elem); v->elems[pdu->idx] = NULL; /* FIXME: we should batch these completions */ virtio_notify(VIRTIO_DEVICE(v), v->vq); }
static void virtio_blk_req_complete(VirtIOBlockReq *req, unsigned char status) { VirtIOBlock *s = req->dev; VirtIODevice *vdev = VIRTIO_DEVICE(s); trace_virtio_blk_req_complete(vdev, req, status); stb_p(&req->in->status, status); virtqueue_push(req->vq, &req->elem, req->in_len); if (s->dataplane_started && !s->dataplane_disabled) { virtio_blk_data_plane_notify(s->dataplane, req->vq); } else { virtio_notify(vdev, req->vq); } }
static void virtio_console_handle_output(VirtIODevice *vdev, VirtQueue *vq) { VirtIOConsole *s = to_virtio_console(vdev); VirtQueueElement elem; while (virtqueue_pop(vq, &elem)) { ssize_t len = 0; int d; for (d=0; d < elem.out_num; d++) len += qemu_chr_write(s->chr, elem.out_sg[d].iov_base,elem.out_sg[d].iov_len); virtqueue_push(vq, &elem, len); virtio_notify(vdev, vq); } }
/* TX */ static void virtio_net_flush_tx(VirtIONet *n, VirtQueue *vq) { VirtQueueElement elem; int has_vnet_hdr = 0; if (!(n->vdev.status & VIRTIO_CONFIG_S_DRIVER_OK)) return; while (virtqueue_pop(vq, &elem)) { ssize_t len = 0; unsigned int out_num = elem.out_num; struct iovec *out_sg = &elem.out_sg[0]; unsigned hdr_len; /* hdr_len refers to the header received from the guest */ hdr_len = n->mergeable_rx_bufs ? sizeof(struct virtio_net_hdr_mrg_rxbuf) : sizeof(struct virtio_net_hdr); if (out_num < 1 || out_sg->iov_len != hdr_len) { fprintf(stderr, "virtio-net header not in first element\n"); exit(1); } /* ignore the header if GSO is not supported */ if (!has_vnet_hdr) { out_num--; out_sg++; len += hdr_len; } else if (n->mergeable_rx_bufs) { /* tapfd expects a struct virtio_net_hdr */ hdr_len -= sizeof(struct virtio_net_hdr); out_sg->iov_len -= hdr_len; len += hdr_len; } len += qemu_sendv_packet(n->vc, out_sg, out_num); virtqueue_push(vq, &elem, len); virtio_notify(&n->vdev, vq); } }
static void virtio_ballloon_get_free_page_hints(void *opaque) { VirtIOBalloon *dev = opaque; VirtIODevice *vdev = VIRTIO_DEVICE(dev); VirtQueue *vq = dev->free_page_vq; bool continue_to_get_hints; do { qemu_mutex_lock(&dev->free_page_lock); virtio_queue_set_notification(vq, 0); continue_to_get_hints = get_free_page_hints(dev); qemu_mutex_unlock(&dev->free_page_lock); virtio_notify(vdev, vq); /* * Start to poll the vq once the reporting started. Otherwise, continue * only when there are entries on the vq, which need to be given back. */ } while (continue_to_get_hints || dev->free_page_report_status == FREE_PAGE_REPORT_S_START); virtio_queue_set_notification(vq, 1); }
void viocon_rx_fill(struct viocon_port *vp) { struct virtqueue *vq = vp->vp_rx; struct virtio_softc *vsc = vp->vp_sc->sc_virtio; int r, slot, ndone = 0; while ((r = virtio_enqueue_prep(vq, &slot)) == 0) { if (virtio_enqueue_reserve(vq, slot, 1) != 0) break; bus_dmamap_sync(vsc->sc_dmat, vp->vp_dmamap, slot * BUFSIZE, BUFSIZE, BUS_DMASYNC_PREREAD); virtio_enqueue_p(vq, slot, vp->vp_dmamap, slot * BUFSIZE, BUFSIZE, 0); virtio_enqueue_commit(vsc, vq, slot, 0); ndone++; } KASSERT(r == 0 || r == EAGAIN); if (ndone > 0) virtio_notify(vsc, vq); }
static void vcon_read(void *opaque, const uint8_t *buf, int size) { VirtIOConsole *s = (VirtIOConsole *) opaque; VirtQueueElement elem; int offset = 0; /* The current kernel implementation has only one outstanding input * buffer of PAGE_SIZE. Nevertheless, this function is prepared to * handle multiple buffers with multiple sg element for input */ while (offset < size) { int i = 0; if (!virtqueue_pop(s->ivq, &elem)) break; while (offset < size && i < elem.in_num) { int len = MIN(elem.in_sg[i].iov_len, size - offset); memcpy(elem.in_sg[i].iov_base, buf + offset, len); offset += len; i++; } virtqueue_push(s->ivq, &elem, size); } virtio_notify(&s->vdev, s->ivq); }
/* Send data from a char device over to the guest */ static void chr_read(void *opaque, const void *buf, size_t size) { VirtIORNG *vrng = opaque; VirtIODevice *vdev = VIRTIO_DEVICE(vrng); VirtQueueElement *elem; size_t len; int offset; if (!is_guest_ready(vrng)) { return; } vrng->quota_remaining -= size; offset = 0; while (offset < size) { elem = virtqueue_pop(vrng->vq, sizeof(VirtQueueElement)); if (!elem) { break; } len = iov_from_buf(elem->in_sg, elem->in_num, 0, buf + offset, size - offset); offset += len; virtqueue_push(vrng->vq, elem, len); trace_virtio_rng_pushed(vrng, len); g_free(elem); } virtio_notify(vdev, vrng->vq); if (!virtio_queue_empty(vrng->vq)) { /* If we didn't drain the queue, call virtio_rng_process * to take care of asking for more data as appropriate. */ virtio_rng_process(vrng); } }
static void virtio_crypto_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) { VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev); struct virtio_crypto_op_ctrl_req ctrl; VirtQueueElement *elem; struct iovec *in_iov; struct iovec *out_iov; unsigned in_num; unsigned out_num; uint32_t queue_id; uint32_t opcode; struct virtio_crypto_session_input input; int64_t session_id; uint8_t status; size_t s; for (;;) { elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); if (!elem) { break; } if (elem->out_num < 1 || elem->in_num < 1) { virtio_error(vdev, "virtio-crypto ctrl missing headers"); virtqueue_detach_element(vq, elem, 0); g_free(elem); break; } out_num = elem->out_num; out_iov = elem->out_sg; in_num = elem->in_num; in_iov = elem->in_sg; if (unlikely(iov_to_buf(out_iov, out_num, 0, &ctrl, sizeof(ctrl)) != sizeof(ctrl))) { virtio_error(vdev, "virtio-crypto request ctrl_hdr too short"); virtqueue_detach_element(vq, elem, 0); g_free(elem); break; } iov_discard_front(&out_iov, &out_num, sizeof(ctrl)); opcode = ldl_le_p(&ctrl.header.opcode); queue_id = ldl_le_p(&ctrl.header.queue_id); switch (opcode) { case VIRTIO_CRYPTO_CIPHER_CREATE_SESSION: memset(&input, 0, sizeof(input)); session_id = virtio_crypto_create_sym_session(vcrypto, &ctrl.u.sym_create_session, queue_id, opcode, out_iov, out_num); /* Serious errors, need to reset virtio crypto device */ if (session_id == -EFAULT) { virtqueue_detach_element(vq, elem, 0); break; } else if (session_id == -VIRTIO_CRYPTO_NOTSUPP) { stl_le_p(&input.status, VIRTIO_CRYPTO_NOTSUPP); } else if (session_id == -VIRTIO_CRYPTO_ERR) { stl_le_p(&input.status, VIRTIO_CRYPTO_ERR); } else { /* Set the session id */ stq_le_p(&input.session_id, session_id); stl_le_p(&input.status, VIRTIO_CRYPTO_OK); } s = iov_from_buf(in_iov, in_num, 0, &input, sizeof(input)); if (unlikely(s != sizeof(input))) { virtio_error(vdev, "virtio-crypto input incorrect"); virtqueue_detach_element(vq, elem, 0); break; } virtqueue_push(vq, elem, sizeof(input)); virtio_notify(vdev, vq); break; case VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION: case VIRTIO_CRYPTO_HASH_DESTROY_SESSION: case VIRTIO_CRYPTO_MAC_DESTROY_SESSION: case VIRTIO_CRYPTO_AEAD_DESTROY_SESSION: status = virtio_crypto_handle_close_session(vcrypto, &ctrl.u.destroy_session, queue_id); /* The status only occupy one byte, we can directly use it */ s = iov_from_buf(in_iov, in_num, 0, &status, sizeof(status)); if (unlikely(s != sizeof(status))) { virtio_error(vdev, "virtio-crypto status incorrect"); virtqueue_detach_element(vq, elem, 0); break; } virtqueue_push(vq, elem, sizeof(status)); virtio_notify(vdev, vq); break; case VIRTIO_CRYPTO_HASH_CREATE_SESSION: case VIRTIO_CRYPTO_MAC_CREATE_SESSION: case VIRTIO_CRYPTO_AEAD_CREATE_SESSION: default: error_report("virtio-crypto unsupported ctrl opcode: %d", opcode); memset(&input, 0, sizeof(input)); stl_le_p(&input.status, VIRTIO_CRYPTO_NOTSUPP); s = iov_from_buf(in_iov, in_num, 0, &input, sizeof(input)); if (unlikely(s != sizeof(input))) { virtio_error(vdev, "virtio-crypto input incorrect"); virtqueue_detach_element(vq, elem, 0); break; } virtqueue_push(vq, elem, sizeof(input)); virtio_notify(vdev, vq); break; } /* end switch case */ g_free(elem); } /* end for loop */ }
/* Command queue. */ static void virtio_audio_handle_cmd(VirtIODevice *vdev, VirtQueue *vq) { VirtIOAudio *s = to_virtio_audio(vdev); VirtIOAudioStream *stream; VirtQueueElement elem; int out_n; uint32_t *p; int len; size_t out_bytes; uint32_t value; while (virtqueue_pop(s->cmd_vq, &elem)) { size_t bytes_transferred = 0; for (out_n = 0; out_n < elem.out_num; out_n++) { p = (uint32_t *)elem.out_sg[out_n].iov_base; len = elem.out_sg[out_n].iov_len; while (len > 0) { if (len < 12) { BADF("Bad command length\n"); break; } DPRINTF("Command %d %d %d\n", ldl_p(p), ldl_p(p + 1), ldl_p (p + 2)); value = ldl_p(p + 1); if (value >= NUM_STREAMS) break; stream = &s->stream[value]; value = ldl_p(p + 2); switch (ldl_p(p)) { case VIRTIO_AUDIO_CMD_SET_ENDIAN: stream->fmt.endianness = value; break; case VIRTIO_AUDIO_CMD_SET_CHANNELS: stream->fmt.nchannels = value; break; case VIRTIO_AUDIO_CMD_SET_FMT: stream->fmt.fmt = value; break; case VIRTIO_AUDIO_CMD_SET_FREQ: stream->fmt.freq = value; break; case VIRTIO_AUDIO_CMD_INIT: out_bytes = 0; if (value == 1) { if (stream->out_voice) { AUD_close_out(&s->card, stream->out_voice); stream->out_voice = NULL; } stream->in_voice = AUD_open_in(&s->card, stream->in_voice, "virtio-audio.in", stream, virtio_audio_callback, &stream->fmt); virtio_audio_cmd_result(0, &elem, &out_bytes); } else if (value == 0) { if (stream->in_voice) { AUD_close_in(&s->card, stream->in_voice); stream->in_voice = NULL; } stream->out_voice = AUD_open_out(&s->card, stream->out_voice, "virtio-audio.out", stream, virtio_audio_callback, &stream->fmt); value = AUD_get_buffer_size_out(stream->out_voice); virtio_audio_cmd_result(value, &elem, &out_bytes); } else { // let us close all down if (stream->out_voice) { AUD_close_out(&s->card, stream->out_voice); stream->out_voice = NULL; } if (stream->in_voice) { AUD_close_in(&s->card, stream->in_voice); stream->in_voice = NULL; } } bytes_transferred += out_bytes; break; case VIRTIO_AUDIO_CMD_RUN: if (stream->in_voice) { AUD_set_active_in(stream->in_voice, value); } else if (stream->out_voice) { AUD_set_active_out(stream->out_voice, value); } else { DPRINTF("Cannot execute CMD_RUN as no voice is active\n"); } break; } p += 3; len -= 12; bytes_transferred += 12; } } virtqueue_push(s->cmd_vq, &elem, bytes_transferred); virtio_notify(vdev, s->cmd_vq); } }
static void vq_handle_output(VirtIODevice *vdev, VirtQueue *vq) { VirtQueueElement elem; while(virtqueue_pop(vq, &elem)) { struct param *p = elem.out_sg[0].iov_base; //for all library routines: get required arguments from buffer, execute, and push results back in virtqueue switch (p->syscall_type) { case CUINIT: { p->result = cuInit(p->flags); break; } case CUDRIVERGETVERSION: { p->result = cuDriverGetVersion(&p->val1); break; } case CUDEVICEGETCOUNT: { p->result = cuDeviceGetCount(&p->val1); break; } case CUDEVICEGET: { p->result = cuDeviceGet(&p->device, p->val1); break; } case CUDEVICECOMPUTECAPABILITY: { p->result = cuDeviceComputeCapability(&p->val1, &p->val2, p->device); break; } case CUDEVICEGETNAME: { p->result = cuDeviceGetName(elem.in_sg[0].iov_base, p->val1, p->device); break; } case CUDEVICEGETATTRIBUTE: { p->result = cuDeviceGetAttribute(&p->val1, p->attrib, p->device); break; } case CUCTXCREATE: { p->result = cuCtxCreate(&p->ctx, p->flags, p->device); break; } case CUCTXDESTROY: { p->result = cuCtxDestroy(p->ctx); break; } case CUCTXGETCURRENT: { p->result = cuCtxGetCurrent(&p->ctx); break; } case CUCTXGETDEVICE: { p->result = cuCtxGetDevice(&p->device); break; } case CUCTXPOPCURRENT: { p->result = cuCtxPopCurrent(&p->ctx); break; } case CUCTXSETCURRENT: { p->result = cuCtxSetCurrent(p->ctx); break; } case CUCTXSYNCHRONIZE: { p->result = cuCtxSynchronize(); break; } case CUMODULELOAD: { //hardcoded path - needs improvement //all .cubin files should be stored in $QEMU_NFS_PATH - currently $QEMU_NFS_PATH is shared between host and guest with NFS char *binname = malloc((strlen((char *)elem.out_sg[1].iov_base)+strlen(getenv("QEMU_NFS_PATH")+1))*sizeof(char)); if (!binname) { p->result = 0; virtqueue_push(vq, &elem, 0); break; } strcpy(binname, getenv("QEMU_NFS_PATH")); strcat(binname, (char *)elem.out_sg[1].iov_base); //change current CUDA context //each CUDA contets has its own virtual memory space - isolation is ensured by switching contexes if (cuCtxSetCurrent(p->ctx) != 0) { p->result = 999; break; } p->result = cuModuleLoad(&p->module, binname); free(binname); break; } case CUMODULEGETGLOBAL: { char *name = malloc(100*sizeof(char)); if (!name) { p->result = 999; break; } strcpy(name, (char *)elem.out_sg[1].iov_base); p->result = cuModuleGetGlobal(&p->dptr,&p->size1,p->module,(const char *)name); break; } case CUMODULEUNLOAD: { p->result = cuModuleUnload(p->module); break; } case CUMEMALLOC: { if (cuCtxSetCurrent(p->ctx) != 0) { p->result = 999; break; } p->result = cuMemAlloc(&p->dptr, p->bytesize); break; } case CUMEMALLOCPITCH: { if (cuCtxSetCurrent(p->ctx) != 0) { p->result = 999; break; } p->result = cuMemAllocPitch(&p->dptr, &p->size3, p->size1, p->size2, p->bytesize); break; } //large buffers are alocated in smaller chuncks in guest kernel space //gets each chunck seperately and copies it to device memory case CUMEMCPYHTOD: { int i; size_t offset; unsigned long s, nr_pages = p->nr_pages; if (cuCtxSetCurrent(p->ctx) != 0) { p->result = 999; break; } offset = 0; for (i=0; i<nr_pages; i++) { s = *(long *)elem.out_sg[1+2*i+1].iov_base; p->result = cuMemcpyHtoD(p->dptr+offset, elem.out_sg[1+2*i].iov_base, s); if (p->result != 0) break; offset += s; } break; } case CUMEMCPYHTODASYNC: { int i; size_t offset; unsigned long s, nr_pages = p->nr_pages; if (cuCtxSetCurrent(p->ctx) != 0) { p->result = 999; break; } offset = 0; for (i=0; i<nr_pages; i++) { s = *(long *)elem.out_sg[1+2*i+1].iov_base; p->result = cuMemcpyHtoDAsync(p->dptr+offset, elem.out_sg[1+2*i].iov_base, s, p->stream); if (p->result != 0) break; offset += s; } break; } case CUMEMCPYDTODASYNC: { p->result = cuMemcpyDtoDAsync(p->dptr, p->dptr1, p->size1, p->stream); break; } case CUMEMCPYDTOH: { int i; unsigned long s, nr_pages = p->nr_pages; if (cuCtxSetCurrent(p->ctx) != 0) { p->result = 999; break; } size_t offset = 0; for (i=0; i<nr_pages; i++) { s = *(long *)elem.in_sg[0+2*i+1].iov_base; p->result = cuMemcpyDtoH(elem.in_sg[0+2*i].iov_base, p->dptr+offset, s); if (p->result != 0) break; offset += s; } break; } case CUMEMCPYDTOHASYNC: { int i; unsigned long s, nr_pages = p->nr_pages; if (cuCtxSetCurrent(p->ctx) != 0) { p->result = 999; break; } size_t offset = 0; for (i=0; i<nr_pages; i++) { s = *(long *)elem.in_sg[0+2*i+1].iov_base; p->result = cuMemcpyDtoHAsync(elem.in_sg[0+2*i].iov_base, p->dptr+offset, s, p->stream); if (p->result != 0) break; offset += s; } break; } case CUMEMSETD32: { p->result = cuMemsetD32(p->dptr, p->bytecount, p->bytesize); break; } case CUMEMFREE: { p->result = cuMemFree(p->dptr); break; } case CUMODULEGETFUNCTION: { char *name = (char *)elem.out_sg[1].iov_base; name[p->length] = '\0'; if (cuCtxSetCurrent(p->ctx) != 0) { p->result = 999; break; } p->result = cuModuleGetFunction(&p->function, p->module, name); break; } case CULAUNCHKERNEL: { void **args = malloc(p->val1*sizeof(void *)); if (!args) { p->result = 9999; break; } int i; for (i=0; i<p->val1; i++) { args[i] = elem.out_sg[1+i].iov_base; } if (cuCtxSetCurrent(p->ctx) != 0) { p->result = 999; break; } p->result = cuLaunchKernel(p->function, p->gridDimX, p->gridDimY, p->gridDimZ, p->blockDimX, p->blockDimY, p->blockDimZ, p->bytecount, 0, args, 0); free(args); break; } case CUEVENTCREATE: { p->result = cuEventCreate(&p->event1, p->flags); break; } case CUEVENTDESTROY: { p->result = cuEventDestroy(p->event1); break; } case CUEVENTRECORD: { p->result = cuEventRecord(p->event1, p->stream); break; } case CUEVENTSYNCHRONIZE: { p->result = cuEventSynchronize(p->event1); break; } case CUEVENTELAPSEDTIME: { p->result = cuEventElapsedTime(&p->pMilliseconds, p->event1, p->event2); break; } case CUSTREAMCREATE: { p->result = cuStreamCreate(&p->stream, 0); break; } case CUSTREAMSYNCHRONIZE: { p->result = cuStreamSynchronize(p->stream); break; } case CUSTREAMQUERY: { p->result = cuStreamQuery(p->stream); break; } case CUSTREAMDESTROY: { p->result = cuStreamDestroy(p->stream); break; } default: printf("Unknown syscall_type\n"); } virtqueue_push(vq, &elem, 0); } //notify frontend - trigger virtual interrupt virtio_notify(vdev, vq); return; }
static void virtio_net_receive(void *opaque, const uint8_t *buf, int size) { VirtIONet *n = opaque; struct virtio_net_hdr_mrg_rxbuf *mhdr = NULL; size_t hdr_len, offset, i; if (!do_virtio_net_can_receive(n, size)) return; /* hdr_len refers to the header we supply to the guest */ hdr_len = n->mergeable_rx_bufs ? sizeof(struct virtio_net_hdr_mrg_rxbuf) : sizeof(struct virtio_net_hdr); offset = i = 0; while (offset < size) { VirtQueueElement elem; int len, total; struct iovec sg[VIRTQUEUE_MAX_SIZE]; len = total = 0; if ((i != 0 && !n->mergeable_rx_bufs) || virtqueue_pop(n->rx_vq, &elem) == 0) { if (i == 0) return; fprintf(stderr, "virtio-net truncating packet\n"); exit(1); } if (elem.in_num < 1) { fprintf(stderr, "virtio-net receive queue contains no in buffers\n"); exit(1); } if (!n->mergeable_rx_bufs && elem.in_sg[0].iov_len != hdr_len) { fprintf(stderr, "virtio-net header not in first element\n"); exit(1); } memcpy(&sg, &elem.in_sg[0], sizeof(sg[0]) * elem.in_num); if (i == 0) { if (n->mergeable_rx_bufs) mhdr = (struct virtio_net_hdr_mrg_rxbuf *)sg[0].iov_base; offset += receive_header(n, sg, elem.in_num, buf + offset, size - offset, hdr_len); total += hdr_len; } /* copy in packet. ugh */ len = iov_fill(sg, elem.in_num, buf + offset, size - offset); total += len; /* signal other side */ virtqueue_fill(n->rx_vq, &elem, total, i++); offset += len; } if (mhdr) mhdr->num_buffers = i; virtqueue_flush(n->rx_vq, i); virtio_notify(&n->vdev, n->rx_vq); }