void virtio_reinit_start(struct virtio_softc *sc) { int i; virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_ACK); virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER); for (i = 0; i < sc->sc_nvqs; i++) { int n; struct virtqueue *vq = &sc->sc_vqs[i]; bus_space_write_2(sc->sc_iot, sc->sc_ioh, VIRTIO_CONFIG_QUEUE_SELECT, vq->vq_index); n = bus_space_read_2(sc->sc_iot, sc->sc_ioh, VIRTIO_CONFIG_QUEUE_SIZE); if (n == 0) /* vq disappeared */ continue; if (n != vq->vq_num) { panic("%s: virtqueue size changed, vq index %d\n", device_xname(sc->sc_dev), vq->vq_index); } virtio_init_vq(sc, vq); bus_space_write_4(sc->sc_iot, sc->sc_ioh, VIRTIO_CONFIG_QUEUE_ADDRESS, (vq->vq_dmamap->dm_segs[0].ds_addr / VIRTIO_PAGE_SIZE)); } }
/** * Initialize virtio-block device. * @param dev pointer to virtio device information */ int virtioblk_init(struct virtio_device *dev) { struct vring_avail *vq_avail; /* Reset device */ // XXX That will clear the virtq base. We need to move // initializing it to here anyway // // virtio_reset_device(dev); /* Acknowledge device. */ virtio_set_status(dev, VIRTIO_STAT_ACKNOWLEDGE); /* Tell HV that we know how to drive the device. */ virtio_set_status(dev, VIRTIO_STAT_ACKNOWLEDGE|VIRTIO_STAT_DRIVER); /* Device specific setup - we do not support special features right now */ virtio_set_guest_features(dev, 0); vq_avail = virtio_get_vring_avail(dev, 0); vq_avail->flags = VRING_AVAIL_F_NO_INTERRUPT; vq_avail->idx = 0; /* Tell HV that setup succeeded */ virtio_set_status(dev, VIRTIO_STAT_ACKNOWLEDGE|VIRTIO_STAT_DRIVER |VIRTIO_STAT_DRIVER_OK); return 0; }
/** * virtio_9p_init * * Establish the VIRTIO connection for use with the 9P server. Setup queues * and negotiate capabilities. Setup the 9P (Client) library. * * @param reg[in] Pointer to device tree node for VIRTIO/9P interface. * @param tx_buf[in] TX buffer for use by 9P Client lib - 8K in size. * @param rx_buf[in] TX buffer for use by 9P Client lib - 8K in size. * @param buf_size Somewhat redundant, buffer size expected to be 8k. * @return 0 = success, -ve = error. */ int virtio_9p_init(struct virtio_device *dev, void *tx_buf, void *rx_buf, int buf_size) { struct vring_avail *vq_avail; int status = VIRTIO_STAT_ACKNOWLEDGE; /* Check for double open */ if (__buf_size) return -1; __buf_size = buf_size; dprintf("%s : device at %p\n", __func__, dev->base); dprintf("%s : type is %04x\n", __func__, dev->type); /* Keep it disabled until the driver is 1.0 capable */ dev->is_modern = false; virtio_reset_device(dev); /* Acknowledge device. */ virtio_set_status(dev, status); /* Tell HV that we know how to drive the device. */ status |= VIRTIO_STAT_DRIVER; virtio_set_status(dev, status); /* Device specific setup - we do not support special features */ virtio_set_guest_features(dev, 0); if (virtio_queue_init_vq(dev, &vq, 0)) goto dev_error; vq_avail = virtio_get_vring_avail(dev, 0); vq_avail->flags = VRING_AVAIL_F_NO_INTERRUPT; vq_avail->idx = 0; /* Tell HV that setup succeeded */ status |= VIRTIO_STAT_DRIVER_OK; virtio_set_status(dev, status); /* Setup 9P library. */ p9_reg_transport(virtio_9p_transact, dev,(uint8_t *)tx_buf, (uint8_t *)rx_buf); dprintf("%s : complete\n", __func__); return 0; dev_error: printf("%s: failed\n", __func__); status |= VIRTIO_STAT_FAILED; virtio_set_status(dev, status); return -1; }
/** * Shutdown the virtio-block device. * @param dev pointer to virtio device information */ void virtioblk_shutdown(struct virtio_device *dev) { /* Quiesce device */ virtio_set_status(dev, VIRTIO_STAT_FAILED); /* Reset device */ virtio_reset_device(dev); }
/** * virtio_9p_init * * Establish the VIRTIO connection for use with the 9P server. Setup queues * and negotiate capabilities. Setup the 9P (Client) library. * * @param reg[in] Pointer to device tree node for VIRTIO/9P interface. * @param tx_buf[in] TX buffer for use by 9P Client lib - 8K in size. * @param rx_buf[in] TX buffer for use by 9P Client lib - 8K in size. * @param buf_size Somewhat redundant, buffer size expected to be 8k. * @return 0 = success, -ve = error. */ int virtio_9p_init(struct virtio_device *dev, void *tx_buf, void *rx_buf, int buf_size) { struct vring_avail *vq_avail; /* Check for double open */ if (__buf_size) return -1; __buf_size = buf_size; dprintf("%s : device at %p\n", __func__, dev->base); dprintf("%s : type is %04x\n", __func__, dev->type); /* Reset device */ // XXX That will clear the virtq base. We need to move // initializing it to here anyway // // virtio_reset_device(dev); /* Acknowledge device. */ virtio_set_status(dev, VIRTIO_STAT_ACKNOWLEDGE); /* Tell HV that we know how to drive the device. */ virtio_set_status(dev, VIRTIO_STAT_ACKNOWLEDGE | VIRTIO_STAT_DRIVER); /* Device specific setup - we do not support special features */ virtio_set_guest_features(dev, 0); vq_avail = virtio_get_vring_avail(dev, 0); vq_avail->flags = VRING_AVAIL_F_NO_INTERRUPT; vq_avail->idx = 0; /* Tell HV that setup succeeded */ virtio_set_status(dev, VIRTIO_STAT_ACKNOWLEDGE | VIRTIO_STAT_DRIVER |VIRTIO_STAT_DRIVER_OK); /* Setup 9P library. */ p9_reg_transport(virtio_9p_transact, dev,(uint8_t *)tx_buf, (uint8_t *)rx_buf); dprintf("%s : complete\n", __func__); return 0; }
void virtio_reinit_start(struct virtio_softc *sc) { int i; virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_ACK); virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER); for (i = 0; i < sc->sc_nvqs; i++) { int n; struct virtqueue *vq = &sc->sc_vqs[i]; n = virtio_read_queue_size(sc, vq->vq_index); if (n == 0) /* vq disappeared */ continue; if (n != vq->vq_num) { panic("%s: virtqueue size changed, vq index %d\n", sc->sc_dev.dv_xname, vq->vq_index); } virtio_init_vq(sc, vq, 1); virtio_setup_queue(sc, vq->vq_index, vq->vq_dmamap->dm_segs[0].ds_addr / VIRTIO_PAGE_SIZE); } }
int init_virtio_memballoon_pci(pci_dev_header_t *pci_hdr, virtio_dev_t *dev,uint32_t *msi_vector) { unsigned long addr; unsigned long features; int i; memb_dev = dev; virtio_set_status(dev, virtio_get_status(dev) + VIRTIO_CONFIG_S_ACKNOWLEDGE); DEBUG("Initializing VIRTIO memory balloon status :%x : \n", virtio_get_status(dev)); virtio_set_status(dev, virtio_get_status(dev) + VIRTIO_CONFIG_S_DRIVER); addr = dev->pci_ioaddr + VIRTIO_PCI_HOST_FEATURES; features = inl(addr); DEBUG(" driver Initialising VIRTIO memory balloon hostfeatures :%x:\n", features); virtio_createQueue(0, dev, 2);/* both are send queues*/ virtio_createQueue(1, dev, 2); virtio_set_status(dev, virtio_get_status(dev) + VIRTIO_CONFIG_S_DRIVER_OK); DEBUG(" NEW Initialising.. VIRTIO PCI COMPLETED with driver ok :%x \n"); inb(dev->pci_ioaddr + VIRTIO_PCI_ISR); }
static void syborg_virtio_writel(void *opaque, target_phys_addr_t offset, uint32_t value) { SyborgVirtIOProxy *s = opaque; VirtIODevice *vdev = s->vdev; DPRINTF("writel 0x%x = 0x%x\n", (int)offset, value); if (offset >= SYBORG_VIRTIO_CONFIG) { return virtio_config_writel(vdev, offset - SYBORG_VIRTIO_CONFIG, value); } switch (offset >> 2) { case SYBORG_VIRTIO_GUEST_FEATURES: if (vdev->set_features) vdev->set_features(vdev, value); vdev->guest_features = value; break; case SYBORG_VIRTIO_QUEUE_BASE: if (value == 0) virtio_reset(vdev); else virtio_queue_set_addr(vdev, vdev->queue_sel, value); break; case SYBORG_VIRTIO_QUEUE_SEL: if (value < VIRTIO_PCI_QUEUE_MAX) vdev->queue_sel = value; break; case SYBORG_VIRTIO_QUEUE_NOTIFY: if (value < VIRTIO_PCI_QUEUE_MAX) { virtio_queue_notify(vdev, value); } break; case SYBORG_VIRTIO_STATUS: virtio_set_status(vdev, value & 0xFF); if (vdev->status == 0) virtio_reset(vdev); break; case SYBORG_VIRTIO_INT_ENABLE: s->int_enable = value; virtio_update_irq(vdev); break; case SYBORG_VIRTIO_INT_STATUS: vdev->isr &= ~value; virtio_update_irq(vdev); break; default: BADF("Bad write offset 0x%x\n", (int)offset); break; } }
void virtio_reinit_end(struct virtio_softc *sc) { virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK); }
static void virtio_attach(device_t parent, device_t self, void *aux) { struct virtio_softc *sc = device_private(self); struct pci_attach_args *pa = (struct pci_attach_args *)aux; pci_chipset_tag_t pc = pa->pa_pc; pcitag_t tag = pa->pa_tag; int revision; pcireg_t id; char const *intrstr; pci_intr_handle_t ih; revision = PCI_REVISION(pa->pa_class); if (revision != 0) { aprint_normal(": unknown revision 0x%02x; giving up\n", revision); return; } aprint_normal("\n"); aprint_naive("\n"); /* subsystem ID shows what I am */ id = pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG); aprint_normal_dev(self, "%s Virtio %s Device (rev. 0x%02x)\n", pci_findvendor(id), (PCI_PRODUCT(id)<NDEVNAMES? virtio_device_name[PCI_PRODUCT(id)]:"Unknown"), revision); sc->sc_dev = self; sc->sc_pc = pc; sc->sc_tag = tag; sc->sc_iot = pa->pa_iot; sc->sc_dmat = pa->pa_dmat; sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI; if (pci_mapreg_map(pa, PCI_MAPREG_START, PCI_MAPREG_TYPE_IO, 0, &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_iosize)) { aprint_error_dev(self, "can't map i/o space\n"); return; } virtio_device_reset(sc); virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_ACK); virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER); /* XXX: use softc as aux... */ sc->sc_childdevid = PCI_PRODUCT(id); sc->sc_child = NULL; config_found(self, sc, NULL); if (sc->sc_child == NULL) { aprint_error_dev(self, "no matching child driver; not configured\n"); return; } if (sc->sc_child == (void*)1) { /* this shows error */ aprint_error_dev(self, "virtio configuration failed\n"); virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED); return; } if (pci_intr_map(pa, &ih)) { aprint_error_dev(self, "couldn't map interrupt\n"); virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED); return; } intrstr = pci_intr_string(pc, ih); sc->sc_ih = pci_intr_establish(pc, ih, sc->sc_ipl, virtio_intr, sc); if (sc->sc_ih == NULL) { aprint_error_dev(self, "couldn't establish interrupt"); if (intrstr != NULL) aprint_error(" at %s", intrstr); aprint_error("\n"); virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED); return; } aprint_normal_dev(self, "interrupting at %s\n", intrstr); virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK); return; }
phantom_device_t *driver_virtio_disk_probe( pci_cfg_t *pci, int stage ) { (void) stage; if(vdev.pci) { printf("Just one drv instance yet\n"); return 0; } vdev.interrupt = driver_virtio_disk_interrupt; vdev.name = "VirtIODisk0"; // Say we need it. Not sure, really, that we do. :) vdev.guest_features = VIRTIO_BLK_F_BARRIER; if( virtio_probe( &vdev, pci ) ) return 0; //u_int8_t status = virtio_get_status( &vdev ); //printf("Status is: 0x%x\n", status ); SHOW_FLOW( 1, "Features are: %b", vdev.host_features, "\020\1BARRIER\2SIZE_MAX\3SEG_MAX\5GEOM\6RDONLY\7BLK_SIZE" ); rodisk = vdev.host_features & (1<<VIRTIO_BLK_F_RO); if(rodisk) SHOW_FLOW0( 1, "Disk is RDONLY"); SHOW_FLOW( 1, "Registered at IRQ %d IO 0x%X", vdev.irq, vdev.basereg ); phantom_device_t * dev = (phantom_device_t *)malloc(sizeof(phantom_device_t)); dev->name = "VirtIO Disk"; dev->seq_number = seq_number++; dev->drv_private = &vdev; virtio_set_status( &vdev, VIRTIO_CONFIG_S_DRIVER ); struct virtio_blk_config cfg; virtio_get_config_struct( &vdev, &cfg, sizeof(cfg) ); SHOW_FLOW( 1, "VIRTIO disk size is %d Mb", cfg.capacity/2048 ); virtio_set_status( &vdev, VIRTIO_CONFIG_S_DRIVER|VIRTIO_CONFIG_S_DRIVER_OK ); #if 0 printf("Will write to disk\n"); //getchar(); static char test[512] = "Hello virtio disk"; physaddr_t pa; void *va; hal_pv_alloc( &pa, &va, sizeof(test) ); strlcpy( va, test, sizeof(test) ); driver_virtio_disk_write( &vdev, pa, sizeof(test), 0, 0 ); printf("Write to disk requested\n"); //getchar(); #endif phantom_disk_partition_t *p = phantom_create_virtio_partition_struct( cfg.capacity, &vdev ); (void) p; #if 0 errno_t ret = phantom_register_disk_drive(p); if( ret ) SHOW_ERROR( 0, "Can't register VirtIO drive: %d", ret ); #endif return dev; }
static void virtio_mmio_write(void *opaque, hwaddr offset, uint64_t value, unsigned size) { VirtIOMMIOProxy *proxy = (VirtIOMMIOProxy *)opaque; VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); DPRINTF("virtio_mmio_write offset 0x%x value 0x%" PRIx64 "\n", (int)offset, value); if (!vdev) { /* If no backend is present, we just make all registers * write-ignored. This allows us to provide transports with * no backend plugged in. */ return; } if (offset >= VIRTIO_MMIO_CONFIG) { offset -= VIRTIO_MMIO_CONFIG; switch (size) { case 1: virtio_config_writeb(vdev, offset, value); break; case 2: virtio_config_writew(vdev, offset, value); break; case 4: virtio_config_writel(vdev, offset, value); break; default: abort(); } return; } if (size != 4) { DPRINTF("wrong size access to register!\n"); return; } switch (offset) { case VIRTIO_MMIO_HOSTFEATURESSEL: proxy->host_features_sel = value; break; case VIRTIO_MMIO_GUESTFEATURES: if (!proxy->guest_features_sel) { virtio_set_features(vdev, value); } break; case VIRTIO_MMIO_GUESTFEATURESSEL: proxy->guest_features_sel = value; break; case VIRTIO_MMIO_GUESTPAGESIZE: proxy->guest_page_shift = ctz32(value); if (proxy->guest_page_shift > 31) { proxy->guest_page_shift = 0; } DPRINTF("guest page size %" PRIx64 " shift %d\n", value, proxy->guest_page_shift); break; case VIRTIO_MMIO_QUEUESEL: if (value < VIRTIO_QUEUE_MAX) { vdev->queue_sel = value; } break; case VIRTIO_MMIO_QUEUENUM: DPRINTF("mmio_queue write %d max %d\n", (int)value, VIRTQUEUE_MAX_SIZE); virtio_queue_set_num(vdev, vdev->queue_sel, value); /* Note: only call this function for legacy devices */ virtio_queue_update_rings(vdev, vdev->queue_sel); break; case VIRTIO_MMIO_QUEUEALIGN: /* Note: this is only valid for legacy devices */ virtio_queue_set_align(vdev, vdev->queue_sel, value); break; case VIRTIO_MMIO_QUEUEPFN: if (value == 0) { virtio_reset(vdev); } else { virtio_queue_set_addr(vdev, vdev->queue_sel, value << proxy->guest_page_shift); } break; case VIRTIO_MMIO_QUEUENOTIFY: if (value < VIRTIO_QUEUE_MAX) { virtio_queue_notify(vdev, value); } break; case VIRTIO_MMIO_INTERRUPTACK: atomic_and(&vdev->isr, ~value); virtio_update_irq(vdev); break; case VIRTIO_MMIO_STATUS: if (!(value & VIRTIO_CONFIG_S_DRIVER_OK)) { virtio_mmio_stop_ioeventfd(proxy); } virtio_set_status(vdev, value & 0xff); if (value & VIRTIO_CONFIG_S_DRIVER_OK) { virtio_mmio_start_ioeventfd(proxy); } if (vdev->status == 0) { virtio_reset(vdev); } break; case VIRTIO_MMIO_MAGIC: case VIRTIO_MMIO_VERSION: case VIRTIO_MMIO_DEVICEID: case VIRTIO_MMIO_VENDORID: case VIRTIO_MMIO_HOSTFEATURES: case VIRTIO_MMIO_QUEUENUMMAX: case VIRTIO_MMIO_INTERRUPTSTATUS: DPRINTF("write to readonly register\n"); break; default: DPRINTF("bad register offset\n"); } }
static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw) { int ret; VqInfoBlock info; uint8_t status; VirtioFeatDesc features; void *config; hwaddr indicators; VqConfigBlock vq_config; VirtioCcwDevice *dev = sch->driver_data; bool check_len; int len; hwaddr hw_len; if (!dev) { return -EINVAL; } trace_virtio_ccw_interpret_ccw(sch->cssid, sch->ssid, sch->schid, ccw.cmd_code); check_len = !((ccw.flags & CCW_FLAG_SLI) && !(ccw.flags & CCW_FLAG_DC)); /* Look at the command. */ switch (ccw.cmd_code) { case CCW_CMD_SET_VQ: if (check_len) { if (ccw.count != sizeof(info)) { ret = -EINVAL; break; } } else if (ccw.count < sizeof(info)) { /* Can't execute command. */ ret = -EINVAL; break; } if (!ccw.cda) { ret = -EFAULT; } else { info.queue = ldq_phys(ccw.cda); info.align = ldl_phys(ccw.cda + sizeof(info.queue)); info.index = lduw_phys(ccw.cda + sizeof(info.queue) + sizeof(info.align)); info.num = lduw_phys(ccw.cda + sizeof(info.queue) + sizeof(info.align) + sizeof(info.index)); ret = virtio_ccw_set_vqs(sch, info.queue, info.align, info.index, info.num); sch->curr_status.scsw.count = 0; } break; case CCW_CMD_VDEV_RESET: virtio_reset(dev->vdev); ret = 0; break; case CCW_CMD_READ_FEAT: if (check_len) { if (ccw.count != sizeof(features)) { ret = -EINVAL; break; } } else if (ccw.count < sizeof(features)) { /* Can't execute command. */ ret = -EINVAL; break; } if (!ccw.cda) { ret = -EFAULT; } else { features.index = ldub_phys(ccw.cda + sizeof(features.features)); if (features.index < ARRAY_SIZE(dev->host_features)) { features.features = dev->host_features[features.index]; } else { /* Return zeroes if the guest supports more feature bits. */ features.features = 0; } stl_le_phys(ccw.cda, features.features); sch->curr_status.scsw.count = ccw.count - sizeof(features); ret = 0; } break; case CCW_CMD_WRITE_FEAT: if (check_len) { if (ccw.count != sizeof(features)) { ret = -EINVAL; break; } } else if (ccw.count < sizeof(features)) { /* Can't execute command. */ ret = -EINVAL; break; } if (!ccw.cda) { ret = -EFAULT; } else { features.index = ldub_phys(ccw.cda + sizeof(features.features)); features.features = ldl_le_phys(ccw.cda); if (features.index < ARRAY_SIZE(dev->host_features)) { if (dev->vdev->set_features) { dev->vdev->set_features(dev->vdev, features.features); } dev->vdev->guest_features = features.features; } else { /* * If the guest supports more feature bits, assert that it * passes us zeroes for those we don't support. */ if (features.features) { fprintf(stderr, "Guest bug: features[%i]=%x (expected 0)\n", features.index, features.features); /* XXX: do a unit check here? */ } } sch->curr_status.scsw.count = ccw.count - sizeof(features); ret = 0; } break; case CCW_CMD_READ_CONF: if (check_len) { if (ccw.count > dev->vdev->config_len) { ret = -EINVAL; break; } } len = MIN(ccw.count, dev->vdev->config_len); if (!ccw.cda) { ret = -EFAULT; } else { dev->vdev->get_config(dev->vdev, dev->vdev->config); /* XXX config space endianness */ cpu_physical_memory_write(ccw.cda, dev->vdev->config, len); sch->curr_status.scsw.count = ccw.count - len; ret = 0; } break; case CCW_CMD_WRITE_CONF: if (check_len) { if (ccw.count > dev->vdev->config_len) { ret = -EINVAL; break; } } len = MIN(ccw.count, dev->vdev->config_len); hw_len = len; if (!ccw.cda) { ret = -EFAULT; } else { config = cpu_physical_memory_map(ccw.cda, &hw_len, 0); if (!config) { ret = -EFAULT; } else { len = hw_len; /* XXX config space endianness */ memcpy(dev->vdev->config, config, len); cpu_physical_memory_unmap(config, hw_len, 0, hw_len); if (dev->vdev->set_config) { dev->vdev->set_config(dev->vdev, dev->vdev->config); } sch->curr_status.scsw.count = ccw.count - len; ret = 0; } } break; case CCW_CMD_WRITE_STATUS: if (check_len) { if (ccw.count != sizeof(status)) { ret = -EINVAL; break; } } else if (ccw.count < sizeof(status)) { /* Can't execute command. */ ret = -EINVAL; break; } if (!ccw.cda) { ret = -EFAULT; } else { status = ldub_phys(ccw.cda); virtio_set_status(dev->vdev, status); if (dev->vdev->status == 0) { virtio_reset(dev->vdev); } sch->curr_status.scsw.count = ccw.count - sizeof(status); ret = 0; } break; case CCW_CMD_SET_IND: if (check_len) { if (ccw.count != sizeof(indicators)) { ret = -EINVAL; break; } } else if (ccw.count < sizeof(indicators)) { /* Can't execute command. */ ret = -EINVAL; break; } indicators = ldq_phys(ccw.cda); if (!indicators) { ret = -EFAULT; } else { dev->indicators = indicators; sch->curr_status.scsw.count = ccw.count - sizeof(indicators); ret = 0; } break; case CCW_CMD_SET_CONF_IND: if (check_len) { if (ccw.count != sizeof(indicators)) { ret = -EINVAL; break; } } else if (ccw.count < sizeof(indicators)) { /* Can't execute command. */ ret = -EINVAL; break; } indicators = ldq_phys(ccw.cda); if (!indicators) { ret = -EFAULT; } else { dev->indicators2 = indicators; sch->curr_status.scsw.count = ccw.count - sizeof(indicators); ret = 0; } break; case CCW_CMD_READ_VQ_CONF: if (check_len) { if (ccw.count != sizeof(vq_config)) { ret = -EINVAL; break; } } else if (ccw.count < sizeof(vq_config)) { /* Can't execute command. */ ret = -EINVAL; break; } if (!ccw.cda) { ret = -EFAULT; } else { vq_config.index = lduw_phys(ccw.cda); vq_config.num_max = virtio_queue_get_num(dev->vdev, vq_config.index); stw_phys(ccw.cda + sizeof(vq_config.index), vq_config.num_max); sch->curr_status.scsw.count = ccw.count - sizeof(vq_config); ret = 0; } break; default: ret = -ENOSYS; break; } return ret; }
/** * Module init for virtio via PCI. * Checks whether we're reponsible for the given device and set up * the virtqueue configuration. */ int vn_module_init_pci(snk_kernel_t *snk_kernel_int, pci_config_t *conf) { uint64_t bar; int i; dprintk("virtionet: doing virtionet_module_init_pci!\n"); virtiodev.type = VIRTIO_TYPE_PCI; /* Check whether the driver can handle this device by verifying vendor, * device id and class code. */ if (conf->vendor_id != 0x1af4) { dprintk("virtionet: unsupported vendor id\n"); return -1; } if (conf->device_id < 0x1000 || conf->device_id > 0x103f) { dprintk("virtionet: unsupported device id\n"); return -1; } if (conf->class_code != 0x20000) { dprintk("virtionet: unsupported class code\n"); return -1; } bar = snk_kernel_interface->pci_config_read(conf->puid, 4, conf->bus, conf->devfn, 0x10); if (!(bar & 1)) { printk("First BAR is not an I/O BAR!\n"); return -1; } bar &= ~3ULL; dprintk("untranslated bar = %llx\n", bar); snk_kernel_interface->translate_addr((void *)&bar); dprintk("translated bar = %llx\n", bar); virtiodev.base = (void*)bar; /* Reset device */ virtio_reset_device(&virtiodev); /* The queue information can be retrieved via the virtio header that * can be found in the I/O BAR. First queue is the receive queue, * second the transmit queue, and the forth is the control queue for * networking options. * We are only interested in the receive and transmit queue here. */ for (i=VQ_RX; i<=VQ_TX; i++) { /* Select ring (0=RX, 1=TX): */ vq[i].id = i-VQ_RX; ci_write_16(virtiodev.base+VIRTIOHDR_QUEUE_SELECT, cpu_to_le16(vq[i].id)); vq[i].size = le16_to_cpu(ci_read_16(virtiodev.base+VIRTIOHDR_QUEUE_SIZE)); vq[i].desc = malloc_aligned(virtio_vring_size(vq[i].size), 4096); if (!vq[i].desc) { printk("malloc failed!\n"); return -1; } memset(vq[i].desc, 0, virtio_vring_size(vq[i].size)); ci_write_32(virtiodev.base+VIRTIOHDR_QUEUE_ADDRESS, cpu_to_le32((long)vq[i].desc / 4096)); vq[i].avail = (void*)vq[i].desc + vq[i].size * sizeof(struct vring_desc); vq[i].used = (void*)VQ_ALIGN((long)vq[i].avail + vq[i].size * sizeof(struct vring_avail)); dprintk("%i: vq.id = %lx\nvq.size =%lx\n vq.avail =%lx\nvq.used=%lx\n", i, vq[i].id, vq[i].size, vq[i].avail, vq[i].used); } /* Copy MAC address */ for (i = 0; i < 6; i++) { virtionet_interface.mac_addr[i] = ci_read_8(virtiodev.base+VIRTIOHDR_MAC_ADDRESS+i); } /* Acknowledge device. */ virtio_set_status(&virtiodev, VIRTIO_STAT_ACKNOWLEDGE); return 0; }
void virtio_pci_attach(struct device *parent, struct device *self, void *aux) { struct virtio_pci_softc *sc = (struct virtio_pci_softc *)self; struct virtio_softc *vsc = &sc->sc_sc; struct pci_attach_args *pa = (struct pci_attach_args *)aux; pci_chipset_tag_t pc = pa->pa_pc; pcitag_t tag = pa->pa_tag; int revision; pcireg_t id; char const *intrstr; pci_intr_handle_t ih; revision = PCI_REVISION(pa->pa_class); if (revision != 0) { printf("unknown revision 0x%02x; giving up\n", revision); return; } /* subsystem ID shows what I am */ id = PCI_PRODUCT(pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG)); printf(": Virtio %s Device", virtio_device_string(id)); #ifdef notyet if (pci_get_capability(pc, tag, PCI_CAP_MSIX, NULL, NULL)) printf(", msix capable"); #endif printf("\n"); vsc->sc_ops = &virtio_pci_ops; sc->sc_pc = pc; vsc->sc_dmat = pa->pa_dmat; sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI; if (pci_mapreg_map(pa, PCI_MAPREG_START, PCI_MAPREG_TYPE_IO, 0, &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_iosize, 0)) { printf("%s: can't map i/o space\n", vsc->sc_dev.dv_xname); return; } virtio_device_reset(vsc); virtio_pci_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_ACK); virtio_pci_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER); /* XXX: use softc as aux... */ vsc->sc_childdevid = id; vsc->sc_child = NULL; config_found(self, sc, NULL); if (vsc->sc_child == NULL) { printf("%s: no matching child driver; not configured\n", vsc->sc_dev.dv_xname); goto fail_1; } if (vsc->sc_child == VIRTIO_CHILD_ERROR) { printf("%s: virtio configuration failed\n", vsc->sc_dev.dv_xname); goto fail_1; } if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) { printf("%s: couldn't map interrupt\n", vsc->sc_dev.dv_xname); goto fail_2; } intrstr = pci_intr_string(pc, ih); /* * We always set the IPL_MPSAFE flag in order to do the relatively * expensive ISR read without lock, and then grab the kernel lock in * the interrupt handler. * For now, we don't support IPL_MPSAFE vq_done functions. */ KASSERT((vsc->sc_ipl & IPL_MPSAFE) == 0); sc->sc_ih = pci_intr_establish(pc, ih, vsc->sc_ipl | IPL_MPSAFE, virtio_pci_intr, sc, vsc->sc_dev.dv_xname); if (sc->sc_ih == NULL) { printf("%s: couldn't establish interrupt", vsc->sc_dev.dv_xname); if (intrstr != NULL) printf(" at %s", intrstr); printf("\n"); goto fail_2; } printf("%s: %s\n", vsc->sc_dev.dv_xname, intrstr); virtio_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK); return; fail_2: config_detach(vsc->sc_child, 0); fail_1: /* no pci_mapreg_unmap() or pci_intr_unmap() */ virtio_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED); }