static int s390_virtio_hcall_notify(const uint64_t *args) { uint64_t mem = args[0]; int r = 0, i; if (mem > ram_size) { VirtIOS390Device *dev = s390_virtio_bus_find_vring(s390_bus, mem, &i); if (dev) { /* * Older kernels will use the virtqueue before setting DRIVER_OK. * In this case the feature bits are not yet up to date, meaning * that several funny things can happen, e.g. the guest thinks * EVENT_IDX is on and QEMU thinks it is off. Let's force a feature * and status sync. */ if (!(dev->vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) { s390_virtio_device_update_status(dev); } virtio_queue_notify(dev->vdev, i); } else { r = -EINVAL; } } else { /* Early printk */ } return r; }
static void syborg_virtio_writel(void *opaque, target_phys_addr_t offset, uint32_t value) { SyborgVirtIOProxy *s = opaque; VirtIODevice *vdev = s->vdev; DPRINTF("writel 0x%x = 0x%x\n", (int)offset, value); if (offset >= SYBORG_VIRTIO_CONFIG) { return virtio_config_writel(vdev, offset - SYBORG_VIRTIO_CONFIG, value); } switch (offset >> 2) { case SYBORG_VIRTIO_GUEST_FEATURES: if (vdev->set_features) vdev->set_features(vdev, value); vdev->guest_features = value; break; case SYBORG_VIRTIO_QUEUE_BASE: if (value == 0) virtio_reset(vdev); else virtio_queue_set_addr(vdev, vdev->queue_sel, value); break; case SYBORG_VIRTIO_QUEUE_SEL: if (value < VIRTIO_PCI_QUEUE_MAX) vdev->queue_sel = value; break; case SYBORG_VIRTIO_QUEUE_NOTIFY: if (value < VIRTIO_PCI_QUEUE_MAX) { virtio_queue_notify(vdev, value); } break; case SYBORG_VIRTIO_STATUS: virtio_set_status(vdev, value & 0xFF); if (vdev->status == 0) virtio_reset(vdev); break; case SYBORG_VIRTIO_INT_ENABLE: s->int_enable = value; virtio_update_irq(vdev); break; case SYBORG_VIRTIO_INT_STATUS: vdev->isr &= ~value; virtio_update_irq(vdev); break; default: BADF("Bad write offset 0x%x\n", (int)offset); break; } }
int s390_virtio_hypercall(CPUS390XState *env, uint64_t mem, uint64_t hypercall) { int r = 0, i; dprintf("KVM hypercall: %ld\n", hypercall); switch (hypercall) { case KVM_S390_VIRTIO_NOTIFY: if (mem > ram_size) { VirtIOS390Device *dev = s390_virtio_bus_find_vring(s390_bus, mem, &i); if (dev) { virtio_queue_notify(dev->vdev, i); } else { r = -EINVAL; } } else { /* Early printk */ } break; case KVM_S390_VIRTIO_RESET: { VirtIOS390Device *dev; dev = s390_virtio_bus_find_mem(s390_bus, mem); virtio_reset(dev->vdev); stb_phys(dev->dev_offs + VIRTIO_DEV_OFFS_STATUS, 0); s390_virtio_device_sync(dev); s390_virtio_reset_idx(dev); break; } case KVM_S390_VIRTIO_SET_STATUS: { VirtIOS390Device *dev; dev = s390_virtio_bus_find_mem(s390_bus, mem); if (dev) { s390_virtio_device_update_status(dev); } else { r = -EINVAL; } break; } default: r = -EINVAL; break; } return r; }
int s390_virtio_hypercall(CPUState *env) { int r = 0, i; target_ulong mem = env->regs[2]; dprintf("KVM hypercall: %ld\n", env->regs[1]); switch (env->regs[1]) { case KVM_S390_VIRTIO_NOTIFY: if (mem > ram_size) { VirtIOS390Device *dev = s390_virtio_bus_find_vring(s390_bus, mem, &i); if (dev) { virtio_queue_notify(dev->vdev, i); } else { r = -EINVAL; } } else { /* Early printk */ } break; case KVM_S390_VIRTIO_RESET: { VirtIOS390Device *dev; dev = s390_virtio_bus_find_mem(s390_bus, mem); virtio_reset(dev->vdev); s390_virtio_device_sync(dev); break; } case KVM_S390_VIRTIO_SET_STATUS: { VirtIOS390Device *dev; dev = s390_virtio_bus_find_mem(s390_bus, mem); if (dev) { s390_virtio_device_update_status(dev); } else { r = -EINVAL; } break; } default: r = -EINVAL; break; } env->regs[2] = r; return 0; }
int s390_virtio_hypercall(CPUState *env) { int r = 0, i; target_ulong mem = env->regs[2]; dprintf("KVM hypercall: %ld\n", env->regs[1]); switch (env->regs[1]) { case KVM_S390_VIRTIO_NOTIFY: if (mem > ram_size) { VirtIOS390Device *dev = s390_virtio_bus_find_vring(s390_bus, mem, &i); if (dev) { virtio_queue_notify(dev->vdev, i); } else { r = -EINVAL; } } else { /* Early printk */ } break; case KVM_S390_VIRTIO_RESET: { /* Virtio_reset resets the internal addresses, so we'd have to sync them up again. We don't want to reallocate a vring though, so let's just not reset. */ /* virtio_reset(dev->vdev); */ break; } case KVM_S390_VIRTIO_SET_STATUS: { VirtIOS390Device *dev; dev = s390_virtio_bus_find_mem(s390_bus, mem); if (dev) { s390_virtio_device_update_status(dev); } else { r = -EINVAL; } break; } default: r = -EINVAL; break; } env->regs[2] = r; return 0; }
static int s390_virtio_hcall_notify(const uint64_t *args) { uint64_t mem = args[0]; int r = 0, i; if (mem > ram_size) { VirtIOS390Device *dev = s390_virtio_bus_find_vring(s390_bus, mem, &i); if (dev) { virtio_queue_notify(dev->vdev, i); } else { r = -EINVAL; } } else { /* Early printk */ } return r; }
static int virtio_ccw_hcall_notify(const uint64_t *args) { uint64_t subch_id = args[0]; uint64_t queue = args[1]; SubchDev *sch; int cssid, ssid, schid, m; if (ioinst_disassemble_sch_ident(subch_id, &m, &cssid, &ssid, &schid)) { return -EINVAL; } sch = css_find_subch(m, cssid, ssid, schid); if (!sch || !css_subch_visible(sch)) { return -EINVAL; } if (queue >= VIRTIO_CCW_QUEUE_MAX) { return -EINVAL; } virtio_queue_notify(virtio_ccw_get_vdev(sch), queue); return 0; }
/** * virtio_9p_transact * * Perform a 9P transaction over the VIRTIO queue interface. This function is * registered with the p9.c library via p9_reg_transport() to provide * connectivity to the 9P server. * * @param tx[in] Data to send, mapped to first queue item. * @param tx_size[in] Size of data to send. * @param rx[out] Data to receive, mappend to second queue item. * @param rx_size[out] Size of data received. * @return 0 = success, -ve = error. */ static int virtio_9p_transact(void *opaque, uint8_t *tx, int tx_size, uint8_t *rx, int *rx_size) { struct virtio_device *dev = opaque; struct vring_desc *desc; int id, i; uint32_t vq_size; struct vring_desc *vq_desc; struct vring_avail *vq_avail; struct vring_used *vq_used; volatile uint16_t *current_used_idx; uint16_t last_used_idx; /* Virt IO queues. */ vq_size = virtio_get_qsize(dev, 0); vq_desc = virtio_get_vring_desc(dev, 0); vq_avail = virtio_get_vring_avail(dev, 0); vq_used = virtio_get_vring_used(dev, 0); last_used_idx = vq_used->idx; current_used_idx = &vq_used->idx; /* Determine descriptor index */ id = (vq_avail->idx * 3) % vq_size; /* TX in first queue item. */ dprint_buffer("TX", tx, tx_size); desc = &vq_desc[id]; desc->addr = (uint64_t)tx; desc->len = tx_size; desc->flags = VRING_DESC_F_NEXT; desc->next = (id + 1) % vq_size; /* RX in the second queue item. */ desc = &vq_desc[(id + 1) % vq_size]; desc->addr = (uint64_t)rx; desc->len = *rx_size; desc->flags = VRING_DESC_F_WRITE; desc->next = 0; /* Tell HV that the queue is ready */ vq_avail->ring[vq_avail->idx % vq_size] = id; mb(); vq_avail->idx += 1; virtio_queue_notify(dev, 0); /* Receive the response. */ i = 10000000; while (*current_used_idx == last_used_idx && i-- > 0) { // do something better mb(); } if (i == 0) { return -1; } *rx_size = MIN(*rx_size, le32_to_cpu(*(uint32_t*)(&rx[0]))); dprint_buffer("RX", rx, *rx_size); return 0; }
/** * Read blocks * @param reg pointer to "reg" property * @param buf pointer to destination buffer * @param blocknum block number of the first block that should be read * @param cnt amount of blocks that should be read * @return number of blocks that have been read successfully */ int virtioblk_read(struct virtio_device *dev, char *buf, long blocknum, long cnt) { struct vring_desc *desc; int id; static struct virtio_blk_req blkhdr; //struct virtio_blk_config *blkconf; uint64_t capacity; uint32_t vq_size, time; struct vring_desc *vq_desc; /* Descriptor vring */ struct vring_avail *vq_avail; /* "Available" vring */ struct vring_used *vq_used; /* "Used" vring */ volatile uint8_t status = -1; volatile uint16_t *current_used_idx; uint16_t last_used_idx; //printf("virtioblk_read: dev=%p buf=%p blocknum=%li count=%li\n", // dev, buf, blocknum, cnt); /* Check whether request is within disk capacity */ capacity = virtio_get_config(dev, 0, sizeof(capacity)); if (blocknum + cnt - 1 > capacity) { puts("virtioblk_read: Access beyond end of device!"); return 0; } vq_size = virtio_get_qsize(dev, 0); vq_desc = virtio_get_vring_desc(dev, 0); vq_avail = virtio_get_vring_avail(dev, 0); vq_used = virtio_get_vring_used(dev, 0); last_used_idx = vq_used->idx; current_used_idx = &vq_used->idx; /* Set up header */ blkhdr.type = VIRTIO_BLK_T_IN | VIRTIO_BLK_T_BARRIER; blkhdr.ioprio = 1; blkhdr.sector = blocknum; /* Determine descriptor index */ id = (vq_avail->idx * 3) % vq_size; /* Set up virtqueue descriptor for header */ desc = &vq_desc[id]; desc->addr = (uint64_t)&blkhdr; desc->len = sizeof(struct virtio_blk_req); desc->flags = VRING_DESC_F_NEXT; desc->next = (id + 1) % vq_size; /* Set up virtqueue descriptor for data */ desc = &vq_desc[(id + 1) % vq_size]; desc->addr = (uint64_t)buf; desc->len = cnt * 512; desc->flags = VRING_DESC_F_NEXT | VRING_DESC_F_WRITE; desc->next = (id + 2) % vq_size; /* Set up virtqueue descriptor for status */ desc = &vq_desc[(id + 2) % vq_size]; desc->addr = (uint64_t)&status; desc->len = 1; desc->flags = VRING_DESC_F_WRITE; desc->next = 0; vq_avail->ring[vq_avail->idx % vq_size] = id; mb(); vq_avail->idx += 1; /* Tell HV that the queue is ready */ virtio_queue_notify(dev, 0); /* Wait for host to consume the descriptor */ time = SLOF_GetTimer() + VIRTIO_TIMEOUT; while (*current_used_idx == last_used_idx) { // do something better mb(); if (time < SLOF_GetTimer()) break; } if (status == 0) return cnt; printf("virtioblk_read failed! status = %i\n", status); return 0; }
static void virtio_mmio_write(void *opaque, hwaddr offset, uint64_t value, unsigned size) { VirtIOMMIOProxy *proxy = (VirtIOMMIOProxy *)opaque; VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); DPRINTF("virtio_mmio_write offset 0x%x value 0x%" PRIx64 "\n", (int)offset, value); if (!vdev) { /* If no backend is present, we just make all registers * write-ignored. This allows us to provide transports with * no backend plugged in. */ return; } if (offset >= VIRTIO_MMIO_CONFIG) { offset -= VIRTIO_MMIO_CONFIG; switch (size) { case 1: virtio_config_writeb(vdev, offset, value); break; case 2: virtio_config_writew(vdev, offset, value); break; case 4: virtio_config_writel(vdev, offset, value); break; default: abort(); } return; } if (size != 4) { DPRINTF("wrong size access to register!\n"); return; } switch (offset) { case VIRTIO_MMIO_HOSTFEATURESSEL: proxy->host_features_sel = value; break; case VIRTIO_MMIO_GUESTFEATURES: if (!proxy->guest_features_sel) { virtio_set_features(vdev, value); } break; case VIRTIO_MMIO_GUESTFEATURESSEL: proxy->guest_features_sel = value; break; case VIRTIO_MMIO_GUESTPAGESIZE: proxy->guest_page_shift = ctz32(value); if (proxy->guest_page_shift > 31) { proxy->guest_page_shift = 0; } DPRINTF("guest page size %" PRIx64 " shift %d\n", value, proxy->guest_page_shift); break; case VIRTIO_MMIO_QUEUESEL: if (value < VIRTIO_QUEUE_MAX) { vdev->queue_sel = value; } break; case VIRTIO_MMIO_QUEUENUM: DPRINTF("mmio_queue write %d max %d\n", (int)value, VIRTQUEUE_MAX_SIZE); virtio_queue_set_num(vdev, vdev->queue_sel, value); /* Note: only call this function for legacy devices */ virtio_queue_update_rings(vdev, vdev->queue_sel); break; case VIRTIO_MMIO_QUEUEALIGN: /* Note: this is only valid for legacy devices */ virtio_queue_set_align(vdev, vdev->queue_sel, value); break; case VIRTIO_MMIO_QUEUEPFN: if (value == 0) { virtio_reset(vdev); } else { virtio_queue_set_addr(vdev, vdev->queue_sel, value << proxy->guest_page_shift); } break; case VIRTIO_MMIO_QUEUENOTIFY: if (value < VIRTIO_QUEUE_MAX) { virtio_queue_notify(vdev, value); } break; case VIRTIO_MMIO_INTERRUPTACK: atomic_and(&vdev->isr, ~value); virtio_update_irq(vdev); break; case VIRTIO_MMIO_STATUS: if (!(value & VIRTIO_CONFIG_S_DRIVER_OK)) { virtio_mmio_stop_ioeventfd(proxy); } virtio_set_status(vdev, value & 0xff); if (value & VIRTIO_CONFIG_S_DRIVER_OK) { virtio_mmio_start_ioeventfd(proxy); } if (vdev->status == 0) { virtio_reset(vdev); } break; case VIRTIO_MMIO_MAGIC: case VIRTIO_MMIO_VERSION: case VIRTIO_MMIO_DEVICEID: case VIRTIO_MMIO_VENDORID: case VIRTIO_MMIO_HOSTFEATURES: case VIRTIO_MMIO_QUEUENUMMAX: case VIRTIO_MMIO_INTERRUPTSTATUS: DPRINTF("write to readonly register\n"); break; default: DPRINTF("bad register offset\n"); } }