/* * Send a control event to the guest. */ static size_t send_control_event(VirtIOCrypto *crdev, uint16_t event, uint16_t value) { struct virtio_crypto_control cpkt; VirtQueueElement elem; VirtQueue *vq; FUNC_IN; stw_p(&cpkt.event, event); stw_p(&cpkt.value, value); vq = crdev->c_ivq; if (!virtio_queue_ready(vq)) { return 0; } if (!virtqueue_pop(vq, &elem)) { return 0; } memcpy(elem.in_sg[0].iov_base, &cpkt, sizeof(cpkt)); virtqueue_push(vq, &elem, sizeof(cpkt)); virtio_notify(&crdev->vdev, vq); FUNC_OUT; return sizeof(cpkt); }
static void vhost_vsock_send_transport_reset(VHostVSock *vsock) { VirtQueueElement *elem; VirtQueue *vq = vsock->event_vq; struct virtio_vsock_event event = { .id = cpu_to_le32(VIRTIO_VSOCK_EVENT_TRANSPORT_RESET), }; elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); if (!elem) { error_report("vhost-vsock missed transport reset event"); return; } if (elem->out_num) { error_report("invalid vhost-vsock event virtqueue element with " "out buffers"); goto out; } if (iov_from_buf(elem->in_sg, elem->in_num, 0, &event, sizeof(event)) != sizeof(event)) { error_report("vhost-vsock event virtqueue element is too short"); goto out; } virtqueue_push(vq, elem, sizeof(event)); virtio_notify(VIRTIO_DEVICE(vsock), vq); out: g_free(elem); }
static void virtio_balloon_receive_stats(VirtIODevice *vdev, VirtQueue *vq) { VirtIOBalloon *s = DO_UPCAST(VirtIOBalloon, vdev, vdev); VirtQueueElement *elem = &s->stats_vq_elem; VirtIOBalloonStat stat; size_t offset = 0; if (!virtqueue_pop(vq, elem)) { return; } /* Initialize the stats to get rid of any stale values. This is only * needed to handle the case where a guest supports fewer stats than it * used to (ie. it has booted into an old kernel). */ reset_stats(s); while (iov_to_buf(elem->out_sg, elem->out_num, &stat, offset, sizeof(stat)) == sizeof(stat)) { uint16_t tag = tswap16(stat.tag); uint64_t val = tswap64(stat.val); offset += sizeof(stat); if (tag < VIRTIO_BALLOON_S_NR) s->stats[tag] = val; } s->stats_vq_offset = offset; complete_stats_request(s); }
static void virtio_balloon_handle_output(VirtIODevice *vdev, VirtQueue *vq) { VirtIOBalloon *s = VIRTIO_BALLOON(vdev); VirtQueueElement elem; MemoryRegionSection section; while (virtqueue_pop(vq, &elem)) { size_t offset = 0; uint32_t pfn; while (iov_to_buf(elem.out_sg, elem.out_num, offset, &pfn, 4) == 4) { ram_addr_t pa; ram_addr_t addr; pa = (ram_addr_t)ldl_p(&pfn) << VIRTIO_BALLOON_PFN_SHIFT; offset += 4; /* FIXME: remove get_system_memory(), but how? */ section = memory_region_find(get_system_memory(), pa, 1); if (!int128_nz(section.size) || !memory_region_is_ram(section.mr)) continue; /* Using memory_region_get_ram_ptr is bending the rules a bit, but should be OK because we only want a single page. */ addr = section.offset_within_region; balloon_page(memory_region_get_ram_ptr(section.mr) + addr, !!(vq == s->dvq)); memory_region_unref(section.mr); } virtqueue_push(vq, &elem, offset); virtio_notify(vdev, vq); } }
static size_t send_buffer(VirtIOCrypto *crdev, const uint8_t *buf, size_t size) { VirtQueueElement elem; VirtQueue *vq; unsigned int len; vq = crdev->ivq; /* Is the virtqueue ready? */ if (!virtio_queue_ready(vq)) { return 0; } /* Is there a buffer in the queue? */ /* If not we can not send data to the Guest. */ /* ? */ if( (!virtqueue_pop(vq,&elem)) ){ return 0; } memcpy(elem.in_sg[0].iov_base,buf, size); virtqueue_push(vq,&elem,size); virtio_notify(&crdev->vdev,vq); len = size; return len; }
/* * Handler for ovq virtqueue. * Called when guest sends data to the host. */ static void handle_output(VirtIODevice *vdev, VirtQueue *vq) { VirtIOCrypto *crdev = DO_UPCAST(VirtIOCrypto, vdev, vdev); VirtQueueElement elem; uint8_t *buf; size_t buf_size; FUNC_IN; /* pop buffer from virtqueue and check return value */ /* ? */ if(!virtqueue_pop(vq,&elem)) return ; /* FIXME: Are we sure all data is in one sg list?? */ buf = elem.out_sg[0].iov_base; buf_size = elem.out_sg[0].iov_len; crypto_handle_ioctl_packet(crdev, buf, buf_size); virtqueue_push(vq,&elem,0); virtio_notify(vdev,vq); /* Put buffer back and notify guest. */ /* ? */ FUNC_OUT; }
/* * Handler for control c_ovq virtqueue. * Called when guest sends a control message to the host. */ static void control_out(VirtIODevice *vdev, VirtQueue *vq) { VirtQueueElement elem; VirtIOCrypto *crdev; uint8_t *buf; size_t len; FUNC_IN; crdev = DO_UPCAST(VirtIOCrypto, vdev, vdev); len = 0; buf = NULL; if (!virtqueue_pop(vq, &elem)) return ; len = iov_size(elem.out_sg, elem.out_num); buf = g_malloc(len); iov_to_buf(elem.out_sg, elem.out_num, 0, buf, len); handle_control_message(crdev, buf, len); virtqueue_push(vq, &elem, 0); virtio_notify(vdev, vq); g_free(buf); FUNC_OUT; }
static void virtio_balloon_handle_output(VirtIODevice *vdev, VirtQueue *vq) { VirtIOBalloon *s = to_virtio_balloon(vdev); VirtQueueElement elem; while (virtqueue_pop(vq, &elem)) { size_t offset = 0; uint32_t pfn; while (iov_to_buf(elem.out_sg, elem.out_num, &pfn, offset, 4) == 4) { ram_addr_t pa; ram_addr_t addr; pa = (ram_addr_t)ldl_p(&pfn) << VIRTIO_BALLOON_PFN_SHIFT; offset += 4; addr = cpu_get_physical_page_desc(pa); if ((addr & ~TARGET_PAGE_MASK) != IO_MEM_RAM) continue; /* Using qemu_get_ram_ptr is bending the rules a bit, but should be OK because we only want a single page. */ balloon_page(qemu_get_ram_ptr(addr), !!(vq == s->dvq)); } virtqueue_push(vq, &elem, offset); virtio_notify(vdev, vq); } }
static VirtIOBlockReq *virtio_blk_get_request(VirtIOBlock *s, VirtQueue *vq) { VirtIOBlockReq *req = virtqueue_pop(vq, sizeof(VirtIOBlockReq)); if (req) { virtio_blk_init_request(s, vq, req); } return req; }
static VirtIOCryptoReq * virtio_crypto_get_request(VirtIOCrypto *s, VirtQueue *vq) { VirtIOCryptoReq *req = virtqueue_pop(vq, sizeof(VirtIOCryptoReq)); if (req) { virtio_crypto_init_request(s, vq, req); } return req; }
static void virtio_balloon_handle_output(VirtIODevice *vdev, VirtQueue *vq) { VirtIOBalloon *s = VIRTIO_BALLOON(vdev); VirtQueueElement *elem; MemoryRegionSection section; for (;;) { size_t offset = 0; uint32_t pfn; elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); if (!elem) { return; } while (iov_to_buf(elem->out_sg, elem->out_num, offset, &pfn, 4) == 4) { hwaddr pa; int p = virtio_ldl_p(vdev, &pfn); pa = (hwaddr) p << VIRTIO_BALLOON_PFN_SHIFT; offset += 4; section = memory_region_find(get_system_memory(), pa, BALLOON_PAGE_SIZE); if (!section.mr) { trace_virtio_balloon_bad_addr(pa); continue; } if (!memory_region_is_ram(section.mr) || memory_region_is_rom(section.mr) || memory_region_is_romd(section.mr)) { trace_virtio_balloon_bad_addr(pa); memory_region_unref(section.mr); continue; } trace_virtio_balloon_handle_output(memory_region_name(section.mr), pa); if (!qemu_balloon_is_inhibited()) { if (vq == s->ivq) { balloon_inflate_page(s, section.mr, section.offset_within_region); } else if (vq == s->dvq) { balloon_deflate_page(s, section.mr, section.offset_within_region); } else { g_assert_not_reached(); } } memory_region_unref(section.mr); } virtqueue_push(vq, elem, offset); virtio_notify(vdev, vq); g_free(elem); } }
static void handle_9p_output(VirtIODevice *vdev, VirtQueue *vq) { V9fsVirtioState *v = (V9fsVirtioState *)vdev; V9fsState *s = &v->state; V9fsPDU *pdu; ssize_t len; VirtQueueElement *elem; while ((pdu = pdu_alloc(s))) { struct { uint32_t size_le; uint8_t id; uint16_t tag_le; } QEMU_PACKED out; elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); if (!elem) { goto out_free_pdu; } if (elem->in_num == 0) { virtio_error(vdev, "The guest sent a VirtFS request without space for " "the reply"); goto out_free_req; } QEMU_BUILD_BUG_ON(sizeof(out) != 7); v->elems[pdu->idx] = elem; len = iov_to_buf(elem->out_sg, elem->out_num, 0, &out, sizeof(out)); if (len != sizeof(out)) { virtio_error(vdev, "The guest sent a malformed VirtFS request: " "header size is %zd, should be 7", len); goto out_free_req; } pdu->size = le32_to_cpu(out.size_le); pdu->id = out.id; pdu->tag = le16_to_cpu(out.tag_le); qemu_co_queue_init(&pdu->complete); pdu_submit(pdu); } return; out_free_req: virtqueue_detach_element(vq, elem, 0); g_free(elem); out_free_pdu: pdu_free(pdu); }
static VirtIOBlockReq *virtio_blk_get_request(VirtIOBlock *s) { VirtIOBlockReq *req = virtio_blk_alloc_request(s); if (!virtqueue_pop(s->vq, &req->elem)) { virtio_blk_free_request(req); return NULL; } return req; }
static bool get_free_page_hints(VirtIOBalloon *dev) { VirtQueueElement *elem; VirtIODevice *vdev = VIRTIO_DEVICE(dev); VirtQueue *vq = dev->free_page_vq; bool ret = true; while (dev->block_iothread) { qemu_cond_wait(&dev->free_page_cond, &dev->free_page_lock); } elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); if (!elem) { return false; } if (elem->out_num) { uint32_t id; size_t size = iov_to_buf(elem->out_sg, elem->out_num, 0, &id, sizeof(id)); virtio_tswap32s(vdev, &id); if (unlikely(size != sizeof(id))) { virtio_error(vdev, "received an incorrect cmd id"); ret = false; goto out; } if (id == dev->free_page_report_cmd_id) { dev->free_page_report_status = FREE_PAGE_REPORT_S_START; } else { /* * Stop the optimization only when it has started. This * avoids a stale stop sign for the previous command. */ if (dev->free_page_report_status == FREE_PAGE_REPORT_S_START) { dev->free_page_report_status = FREE_PAGE_REPORT_S_STOP; } } } if (elem->in_num) { if (dev->free_page_report_status == FREE_PAGE_REPORT_S_START) { qemu_guest_free_page_hint(elem->in_sg[0].iov_base, elem->in_sg[0].iov_len); } } out: virtqueue_push(vq, elem, 1); g_free(elem); return ret; }
static void virtio_balloon_receive_stats(VirtIODevice *vdev, VirtQueue *vq) { VirtIOBalloon *s = VIRTIO_BALLOON(vdev); VirtQueueElement *elem; VirtIOBalloonStat stat; size_t offset = 0; qemu_timeval tv; elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); if (!elem) { goto out; } if (s->stats_vq_elem != NULL) { /* This should never happen if the driver follows the spec. */ virtqueue_push(vq, s->stats_vq_elem, 0); virtio_notify(vdev, vq); g_free(s->stats_vq_elem); } s->stats_vq_elem = elem; /* Initialize the stats to get rid of any stale values. This is only * needed to handle the case where a guest supports fewer stats than it * used to (ie. it has booted into an old kernel). */ reset_stats(s); while (iov_to_buf(elem->out_sg, elem->out_num, offset, &stat, sizeof(stat)) == sizeof(stat)) { uint16_t tag = virtio_tswap16(vdev, stat.tag); uint64_t val = virtio_tswap64(vdev, stat.val); offset += sizeof(stat); if (tag < VIRTIO_BALLOON_S_NR) s->stats[tag] = val; } s->stats_vq_offset = offset; if (qemu_gettimeofday(&tv) < 0) { fprintf(stderr, "warning: %s: failed to get time of day\n", __func__); goto out; } s->stats_last_update = tv.tv_sec; out: if (balloon_stats_enabled(s)) { balloon_stats_change_timer(s, s->stats_poll_interval); } }
static void virtio_audio_callback(void *opaque, int avail) { VirtIOAudioStream *stream = opaque; int n; DPRINTF("Callback (%d)\n", avail); if ((!stream->in_voice)&&(!stream->out_voice)) { DPRINTF("Skipping callback as no voice is selected!\n"); } while (avail) { while (stream->data_left == 0) { if (stream->has_buffer) { virtqueue_push(stream->data_vq, &stream->elem, stream->data_offset); virtio_notify(&stream->dev->vdev, stream->data_vq); stream->has_buffer = 0; } if (!virtqueue_pop(stream->data_vq, &stream->elem)) { /* Buffer underrun. */ stream->has_buffer = 0; DPRINTF("Underrun\n"); break; } stream->data_offset = 0; stream->data_left = 0; stream->has_buffer = 1; if (stream->in_voice) { for (n = 0; n < stream->elem.in_num; n++) stream->data_left += stream->elem.in_sg[n].iov_len; } else if (stream->out_voice) { for (n = 0; n < stream->elem.out_num; n++) stream->data_left += stream->elem.out_sg[n].iov_len; } } if (stream->data_left == 0) break; n = virtio_audio_fill(stream, stream->data_offset, avail); stream->data_left -= n; stream->data_offset += n; avail -= n; if (!n) break; } if (stream->data_left == 0 && stream->has_buffer) { virtqueue_push(stream->data_vq, &stream->elem, stream->data_offset); virtio_notify(&stream->dev->vdev, stream->data_vq); stream->has_buffer = 0; } }
static void virtio_console_handle_output(VirtIODevice *vdev, VirtQueue *vq) { VirtIOConsole *s = to_virtio_console(vdev); VirtQueueElement elem; while (virtqueue_pop(vq, &elem)) { ssize_t len = 0; int d; for (d=0; d < elem.out_num; d++) len += qemu_chr_write(s->chr, elem.out_sg[d].iov_base,elem.out_sg[d].iov_len); virtqueue_push(vq, &elem, len); virtio_notify(vdev, vq); } }
static void virtio_balloon_handle_output(VirtIODevice *vdev, VirtQueue *vq) { VirtIOBalloon *s = VIRTIO_BALLOON(vdev); VirtQueueElement *elem; MemoryRegionSection section; for (;;) { size_t offset = 0; uint32_t pfn; elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); if (!elem) { return; } while (iov_to_buf(elem->out_sg, elem->out_num, offset, &pfn, 4) == 4) { ram_addr_t pa; ram_addr_t addr; int p = virtio_ldl_p(vdev, &pfn); pa = (ram_addr_t) p << VIRTIO_BALLOON_PFN_SHIFT; offset += 4; /* FIXME: remove get_system_memory(), but how? */ section = memory_region_find(get_system_memory(), pa, 1); if (!int128_nz(section.size) || !memory_region_is_ram(section.mr) || memory_region_is_rom(section.mr) || memory_region_is_romd(section.mr)) { trace_virtio_balloon_bad_addr(pa); memory_region_unref(section.mr); continue; } trace_virtio_balloon_handle_output(memory_region_name(section.mr), pa); /* Using memory_region_get_ram_ptr is bending the rules a bit, but should be OK because we only want a single page. */ addr = section.offset_within_region; balloon_page(memory_region_get_ram_ptr(section.mr) + addr, !!(vq == s->dvq)); memory_region_unref(section.mr); } virtqueue_push(vq, elem, offset); virtio_notify(vdev, vq); g_free(elem); } }
static void handle_9p_output(VirtIODevice *vdev, VirtQueue *vq) { V9fsVirtioState *v = (V9fsVirtioState *)vdev; V9fsState *s = &v->state; V9fsPDU *pdu; ssize_t len; VirtQueueElement *elem; while ((pdu = pdu_alloc(s))) { P9MsgHeader out; elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); if (!elem) { goto out_free_pdu; } if (elem->in_num == 0) { virtio_error(vdev, "The guest sent a VirtFS request without space for " "the reply"); goto out_free_req; } QEMU_BUILD_BUG_ON(sizeof(out) != 7); v->elems[pdu->idx] = elem; len = iov_to_buf(elem->out_sg, elem->out_num, 0, &out, sizeof(out)); if (len != sizeof(out)) { virtio_error(vdev, "The guest sent a malformed VirtFS request: " "header size is %zd, should be 7", len); goto out_free_req; } pdu_submit(pdu, &out); } return; out_free_req: virtqueue_detach_element(vq, elem, 0); g_free(elem); out_free_pdu: pdu_free(pdu); }
/* TX */ static void virtio_net_flush_tx(VirtIONet *n, VirtQueue *vq) { VirtQueueElement elem; int has_vnet_hdr = 0; if (!(n->vdev.status & VIRTIO_CONFIG_S_DRIVER_OK)) return; while (virtqueue_pop(vq, &elem)) { ssize_t len = 0; unsigned int out_num = elem.out_num; struct iovec *out_sg = &elem.out_sg[0]; unsigned hdr_len; /* hdr_len refers to the header received from the guest */ hdr_len = n->mergeable_rx_bufs ? sizeof(struct virtio_net_hdr_mrg_rxbuf) : sizeof(struct virtio_net_hdr); if (out_num < 1 || out_sg->iov_len != hdr_len) { fprintf(stderr, "virtio-net header not in first element\n"); exit(1); } /* ignore the header if GSO is not supported */ if (!has_vnet_hdr) { out_num--; out_sg++; len += hdr_len; } else if (n->mergeable_rx_bufs) { /* tapfd expects a struct virtio_net_hdr */ hdr_len -= sizeof(struct virtio_net_hdr); out_sg->iov_len -= hdr_len; len += hdr_len; } len += qemu_sendv_packet(n->vc, out_sg, out_num); virtqueue_push(vq, &elem, len); virtio_notify(&n->vdev, vq); } }
static void vcon_read(void *opaque, const uint8_t *buf, int size) { VirtIOConsole *s = (VirtIOConsole *) opaque; VirtQueueElement elem; int offset = 0; /* The current kernel implementation has only one outstanding input * buffer of PAGE_SIZE. Nevertheless, this function is prepared to * handle multiple buffers with multiple sg element for input */ while (offset < size) { int i = 0; if (!virtqueue_pop(s->ivq, &elem)) break; while (offset < size && i < elem.in_num) { int len = MIN(elem.in_sg[i].iov_len, size - offset); memcpy(elem.in_sg[i].iov_base, buf + offset, len); offset += len; i++; } virtqueue_push(s->ivq, &elem, size); } virtio_notify(&s->vdev, s->ivq); }
/* Send data from a char device over to the guest */ static void chr_read(void *opaque, const void *buf, size_t size) { VirtIORNG *vrng = opaque; VirtIODevice *vdev = VIRTIO_DEVICE(vrng); VirtQueueElement *elem; size_t len; int offset; if (!is_guest_ready(vrng)) { return; } vrng->quota_remaining -= size; offset = 0; while (offset < size) { elem = virtqueue_pop(vrng->vq, sizeof(VirtQueueElement)); if (!elem) { break; } len = iov_from_buf(elem->in_sg, elem->in_num, 0, buf + offset, size - offset); offset += len; virtqueue_push(vrng->vq, elem, len); trace_virtio_rng_pushed(vrng, len); g_free(elem); } virtio_notify(vdev, vrng->vq); if (!virtio_queue_empty(vrng->vq)) { /* If we didn't drain the queue, call virtio_rng_process * to take care of asking for more data as appropriate. */ virtio_rng_process(vrng); } }
static void vq_handle_output(VirtIODevice *vdev, VirtQueue *vq) { VirtQueueElement elem; while(virtqueue_pop(vq, &elem)) { struct param *p = elem.out_sg[0].iov_base; //for all library routines: get required arguments from buffer, execute, and push results back in virtqueue switch (p->syscall_type) { case CUINIT: { p->result = cuInit(p->flags); break; } case CUDRIVERGETVERSION: { p->result = cuDriverGetVersion(&p->val1); break; } case CUDEVICEGETCOUNT: { p->result = cuDeviceGetCount(&p->val1); break; } case CUDEVICEGET: { p->result = cuDeviceGet(&p->device, p->val1); break; } case CUDEVICECOMPUTECAPABILITY: { p->result = cuDeviceComputeCapability(&p->val1, &p->val2, p->device); break; } case CUDEVICEGETNAME: { p->result = cuDeviceGetName(elem.in_sg[0].iov_base, p->val1, p->device); break; } case CUDEVICEGETATTRIBUTE: { p->result = cuDeviceGetAttribute(&p->val1, p->attrib, p->device); break; } case CUCTXCREATE: { p->result = cuCtxCreate(&p->ctx, p->flags, p->device); break; } case CUCTXDESTROY: { p->result = cuCtxDestroy(p->ctx); break; } case CUCTXGETCURRENT: { p->result = cuCtxGetCurrent(&p->ctx); break; } case CUCTXGETDEVICE: { p->result = cuCtxGetDevice(&p->device); break; } case CUCTXPOPCURRENT: { p->result = cuCtxPopCurrent(&p->ctx); break; } case CUCTXSETCURRENT: { p->result = cuCtxSetCurrent(p->ctx); break; } case CUCTXSYNCHRONIZE: { p->result = cuCtxSynchronize(); break; } case CUMODULELOAD: { //hardcoded path - needs improvement //all .cubin files should be stored in $QEMU_NFS_PATH - currently $QEMU_NFS_PATH is shared between host and guest with NFS char *binname = malloc((strlen((char *)elem.out_sg[1].iov_base)+strlen(getenv("QEMU_NFS_PATH")+1))*sizeof(char)); if (!binname) { p->result = 0; virtqueue_push(vq, &elem, 0); break; } strcpy(binname, getenv("QEMU_NFS_PATH")); strcat(binname, (char *)elem.out_sg[1].iov_base); //change current CUDA context //each CUDA contets has its own virtual memory space - isolation is ensured by switching contexes if (cuCtxSetCurrent(p->ctx) != 0) { p->result = 999; break; } p->result = cuModuleLoad(&p->module, binname); free(binname); break; } case CUMODULEGETGLOBAL: { char *name = malloc(100*sizeof(char)); if (!name) { p->result = 999; break; } strcpy(name, (char *)elem.out_sg[1].iov_base); p->result = cuModuleGetGlobal(&p->dptr,&p->size1,p->module,(const char *)name); break; } case CUMODULEUNLOAD: { p->result = cuModuleUnload(p->module); break; } case CUMEMALLOC: { if (cuCtxSetCurrent(p->ctx) != 0) { p->result = 999; break; } p->result = cuMemAlloc(&p->dptr, p->bytesize); break; } case CUMEMALLOCPITCH: { if (cuCtxSetCurrent(p->ctx) != 0) { p->result = 999; break; } p->result = cuMemAllocPitch(&p->dptr, &p->size3, p->size1, p->size2, p->bytesize); break; } //large buffers are alocated in smaller chuncks in guest kernel space //gets each chunck seperately and copies it to device memory case CUMEMCPYHTOD: { int i; size_t offset; unsigned long s, nr_pages = p->nr_pages; if (cuCtxSetCurrent(p->ctx) != 0) { p->result = 999; break; } offset = 0; for (i=0; i<nr_pages; i++) { s = *(long *)elem.out_sg[1+2*i+1].iov_base; p->result = cuMemcpyHtoD(p->dptr+offset, elem.out_sg[1+2*i].iov_base, s); if (p->result != 0) break; offset += s; } break; } case CUMEMCPYHTODASYNC: { int i; size_t offset; unsigned long s, nr_pages = p->nr_pages; if (cuCtxSetCurrent(p->ctx) != 0) { p->result = 999; break; } offset = 0; for (i=0; i<nr_pages; i++) { s = *(long *)elem.out_sg[1+2*i+1].iov_base; p->result = cuMemcpyHtoDAsync(p->dptr+offset, elem.out_sg[1+2*i].iov_base, s, p->stream); if (p->result != 0) break; offset += s; } break; } case CUMEMCPYDTODASYNC: { p->result = cuMemcpyDtoDAsync(p->dptr, p->dptr1, p->size1, p->stream); break; } case CUMEMCPYDTOH: { int i; unsigned long s, nr_pages = p->nr_pages; if (cuCtxSetCurrent(p->ctx) != 0) { p->result = 999; break; } size_t offset = 0; for (i=0; i<nr_pages; i++) { s = *(long *)elem.in_sg[0+2*i+1].iov_base; p->result = cuMemcpyDtoH(elem.in_sg[0+2*i].iov_base, p->dptr+offset, s); if (p->result != 0) break; offset += s; } break; } case CUMEMCPYDTOHASYNC: { int i; unsigned long s, nr_pages = p->nr_pages; if (cuCtxSetCurrent(p->ctx) != 0) { p->result = 999; break; } size_t offset = 0; for (i=0; i<nr_pages; i++) { s = *(long *)elem.in_sg[0+2*i+1].iov_base; p->result = cuMemcpyDtoHAsync(elem.in_sg[0+2*i].iov_base, p->dptr+offset, s, p->stream); if (p->result != 0) break; offset += s; } break; } case CUMEMSETD32: { p->result = cuMemsetD32(p->dptr, p->bytecount, p->bytesize); break; } case CUMEMFREE: { p->result = cuMemFree(p->dptr); break; } case CUMODULEGETFUNCTION: { char *name = (char *)elem.out_sg[1].iov_base; name[p->length] = '\0'; if (cuCtxSetCurrent(p->ctx) != 0) { p->result = 999; break; } p->result = cuModuleGetFunction(&p->function, p->module, name); break; } case CULAUNCHKERNEL: { void **args = malloc(p->val1*sizeof(void *)); if (!args) { p->result = 9999; break; } int i; for (i=0; i<p->val1; i++) { args[i] = elem.out_sg[1+i].iov_base; } if (cuCtxSetCurrent(p->ctx) != 0) { p->result = 999; break; } p->result = cuLaunchKernel(p->function, p->gridDimX, p->gridDimY, p->gridDimZ, p->blockDimX, p->blockDimY, p->blockDimZ, p->bytecount, 0, args, 0); free(args); break; } case CUEVENTCREATE: { p->result = cuEventCreate(&p->event1, p->flags); break; } case CUEVENTDESTROY: { p->result = cuEventDestroy(p->event1); break; } case CUEVENTRECORD: { p->result = cuEventRecord(p->event1, p->stream); break; } case CUEVENTSYNCHRONIZE: { p->result = cuEventSynchronize(p->event1); break; } case CUEVENTELAPSEDTIME: { p->result = cuEventElapsedTime(&p->pMilliseconds, p->event1, p->event2); break; } case CUSTREAMCREATE: { p->result = cuStreamCreate(&p->stream, 0); break; } case CUSTREAMSYNCHRONIZE: { p->result = cuStreamSynchronize(p->stream); break; } case CUSTREAMQUERY: { p->result = cuStreamQuery(p->stream); break; } case CUSTREAMDESTROY: { p->result = cuStreamDestroy(p->stream); break; } default: printf("Unknown syscall_type\n"); } virtqueue_push(vq, &elem, 0); } //notify frontend - trigger virtual interrupt virtio_notify(vdev, vq); return; }
static void virtio_crypto_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) { VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev); struct virtio_crypto_op_ctrl_req ctrl; VirtQueueElement *elem; struct iovec *in_iov; struct iovec *out_iov; unsigned in_num; unsigned out_num; uint32_t queue_id; uint32_t opcode; struct virtio_crypto_session_input input; int64_t session_id; uint8_t status; size_t s; for (;;) { elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); if (!elem) { break; } if (elem->out_num < 1 || elem->in_num < 1) { virtio_error(vdev, "virtio-crypto ctrl missing headers"); virtqueue_detach_element(vq, elem, 0); g_free(elem); break; } out_num = elem->out_num; out_iov = elem->out_sg; in_num = elem->in_num; in_iov = elem->in_sg; if (unlikely(iov_to_buf(out_iov, out_num, 0, &ctrl, sizeof(ctrl)) != sizeof(ctrl))) { virtio_error(vdev, "virtio-crypto request ctrl_hdr too short"); virtqueue_detach_element(vq, elem, 0); g_free(elem); break; } iov_discard_front(&out_iov, &out_num, sizeof(ctrl)); opcode = ldl_le_p(&ctrl.header.opcode); queue_id = ldl_le_p(&ctrl.header.queue_id); switch (opcode) { case VIRTIO_CRYPTO_CIPHER_CREATE_SESSION: memset(&input, 0, sizeof(input)); session_id = virtio_crypto_create_sym_session(vcrypto, &ctrl.u.sym_create_session, queue_id, opcode, out_iov, out_num); /* Serious errors, need to reset virtio crypto device */ if (session_id == -EFAULT) { virtqueue_detach_element(vq, elem, 0); break; } else if (session_id == -VIRTIO_CRYPTO_NOTSUPP) { stl_le_p(&input.status, VIRTIO_CRYPTO_NOTSUPP); } else if (session_id == -VIRTIO_CRYPTO_ERR) { stl_le_p(&input.status, VIRTIO_CRYPTO_ERR); } else { /* Set the session id */ stq_le_p(&input.session_id, session_id); stl_le_p(&input.status, VIRTIO_CRYPTO_OK); } s = iov_from_buf(in_iov, in_num, 0, &input, sizeof(input)); if (unlikely(s != sizeof(input))) { virtio_error(vdev, "virtio-crypto input incorrect"); virtqueue_detach_element(vq, elem, 0); break; } virtqueue_push(vq, elem, sizeof(input)); virtio_notify(vdev, vq); break; case VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION: case VIRTIO_CRYPTO_HASH_DESTROY_SESSION: case VIRTIO_CRYPTO_MAC_DESTROY_SESSION: case VIRTIO_CRYPTO_AEAD_DESTROY_SESSION: status = virtio_crypto_handle_close_session(vcrypto, &ctrl.u.destroy_session, queue_id); /* The status only occupy one byte, we can directly use it */ s = iov_from_buf(in_iov, in_num, 0, &status, sizeof(status)); if (unlikely(s != sizeof(status))) { virtio_error(vdev, "virtio-crypto status incorrect"); virtqueue_detach_element(vq, elem, 0); break; } virtqueue_push(vq, elem, sizeof(status)); virtio_notify(vdev, vq); break; case VIRTIO_CRYPTO_HASH_CREATE_SESSION: case VIRTIO_CRYPTO_MAC_CREATE_SESSION: case VIRTIO_CRYPTO_AEAD_CREATE_SESSION: default: error_report("virtio-crypto unsupported ctrl opcode: %d", opcode); memset(&input, 0, sizeof(input)); stl_le_p(&input.status, VIRTIO_CRYPTO_NOTSUPP); s = iov_from_buf(in_iov, in_num, 0, &input, sizeof(input)); if (unlikely(s != sizeof(input))) { virtio_error(vdev, "virtio-crypto input incorrect"); virtqueue_detach_element(vq, elem, 0); break; } virtqueue_push(vq, elem, sizeof(input)); virtio_notify(vdev, vq); break; } /* end switch case */ g_free(elem); } /* end for loop */ }
static void virtio_net_receive(void *opaque, const uint8_t *buf, int size) { VirtIONet *n = opaque; struct virtio_net_hdr_mrg_rxbuf *mhdr = NULL; size_t hdr_len, offset, i; if (!do_virtio_net_can_receive(n, size)) return; /* hdr_len refers to the header we supply to the guest */ hdr_len = n->mergeable_rx_bufs ? sizeof(struct virtio_net_hdr_mrg_rxbuf) : sizeof(struct virtio_net_hdr); offset = i = 0; while (offset < size) { VirtQueueElement elem; int len, total; struct iovec sg[VIRTQUEUE_MAX_SIZE]; len = total = 0; if ((i != 0 && !n->mergeable_rx_bufs) || virtqueue_pop(n->rx_vq, &elem) == 0) { if (i == 0) return; fprintf(stderr, "virtio-net truncating packet\n"); exit(1); } if (elem.in_num < 1) { fprintf(stderr, "virtio-net receive queue contains no in buffers\n"); exit(1); } if (!n->mergeable_rx_bufs && elem.in_sg[0].iov_len != hdr_len) { fprintf(stderr, "virtio-net header not in first element\n"); exit(1); } memcpy(&sg, &elem.in_sg[0], sizeof(sg[0]) * elem.in_num); if (i == 0) { if (n->mergeable_rx_bufs) mhdr = (struct virtio_net_hdr_mrg_rxbuf *)sg[0].iov_base; offset += receive_header(n, sg, elem.in_num, buf + offset, size - offset, hdr_len); total += hdr_len; } /* copy in packet. ugh */ len = iov_fill(sg, elem.in_num, buf + offset, size - offset); total += len; /* signal other side */ virtqueue_fill(n->rx_vq, &elem, total, i++); offset += len; } if (mhdr) mhdr->num_buffers = i; virtqueue_flush(n->rx_vq, i); virtio_notify(&n->vdev, n->rx_vq); }
/* Command queue. */ static void virtio_audio_handle_cmd(VirtIODevice *vdev, VirtQueue *vq) { VirtIOAudio *s = to_virtio_audio(vdev); VirtIOAudioStream *stream; VirtQueueElement elem; int out_n; uint32_t *p; int len; size_t out_bytes; uint32_t value; while (virtqueue_pop(s->cmd_vq, &elem)) { size_t bytes_transferred = 0; for (out_n = 0; out_n < elem.out_num; out_n++) { p = (uint32_t *)elem.out_sg[out_n].iov_base; len = elem.out_sg[out_n].iov_len; while (len > 0) { if (len < 12) { BADF("Bad command length\n"); break; } DPRINTF("Command %d %d %d\n", ldl_p(p), ldl_p(p + 1), ldl_p (p + 2)); value = ldl_p(p + 1); if (value >= NUM_STREAMS) break; stream = &s->stream[value]; value = ldl_p(p + 2); switch (ldl_p(p)) { case VIRTIO_AUDIO_CMD_SET_ENDIAN: stream->fmt.endianness = value; break; case VIRTIO_AUDIO_CMD_SET_CHANNELS: stream->fmt.nchannels = value; break; case VIRTIO_AUDIO_CMD_SET_FMT: stream->fmt.fmt = value; break; case VIRTIO_AUDIO_CMD_SET_FREQ: stream->fmt.freq = value; break; case VIRTIO_AUDIO_CMD_INIT: out_bytes = 0; if (value == 1) { if (stream->out_voice) { AUD_close_out(&s->card, stream->out_voice); stream->out_voice = NULL; } stream->in_voice = AUD_open_in(&s->card, stream->in_voice, "virtio-audio.in", stream, virtio_audio_callback, &stream->fmt); virtio_audio_cmd_result(0, &elem, &out_bytes); } else if (value == 0) { if (stream->in_voice) { AUD_close_in(&s->card, stream->in_voice); stream->in_voice = NULL; } stream->out_voice = AUD_open_out(&s->card, stream->out_voice, "virtio-audio.out", stream, virtio_audio_callback, &stream->fmt); value = AUD_get_buffer_size_out(stream->out_voice); virtio_audio_cmd_result(value, &elem, &out_bytes); } else { // let us close all down if (stream->out_voice) { AUD_close_out(&s->card, stream->out_voice); stream->out_voice = NULL; } if (stream->in_voice) { AUD_close_in(&s->card, stream->in_voice); stream->in_voice = NULL; } } bytes_transferred += out_bytes; break; case VIRTIO_AUDIO_CMD_RUN: if (stream->in_voice) { AUD_set_active_in(stream->in_voice, value); } else if (stream->out_voice) { AUD_set_active_out(stream->out_voice, value); } else { DPRINTF("Cannot execute CMD_RUN as no voice is active\n"); } break; } p += 3; len -= 12; bytes_transferred += 12; } } virtqueue_push(s->cmd_vq, &elem, bytes_transferred); virtio_notify(vdev, s->cmd_vq); } }