static void pci_hostbridge_setup(struct pci_devinst *pi, uint16_t vendor, uint16_t device) { /* config space */ pci_set_cfgdata16(pi, PCIR_VENDOR, vendor); pci_set_cfgdata16(pi, PCIR_DEVICE, device); pci_set_cfgdata8(pi, PCIR_HDRTYPE, PCIM_HDRTYPE_NORMAL); pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_BRIDGE); pci_set_cfgdata8(pi, PCIR_SUBCLASS, PCIS_BRIDGE_HOST); pci_emul_add_pciecap(pi, PCIEM_TYPE_ROOT_PORT); }
static int pci_hostbridge_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts) { /* config space */ pci_set_cfgdata16(pi, PCIR_VENDOR, 0x1275); /* NetApp */ pci_set_cfgdata16(pi, PCIR_DEVICE, 0x1275); /* NetApp */ pci_set_cfgdata8(pi, PCIR_HDRTYPE, PCIM_HDRTYPE_NORMAL); pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_BRIDGE); pci_set_cfgdata8(pi, PCIR_SUBCLASS, PCIS_BRIDGE_HOST); pci_emul_add_pciecap(pi, PCIEM_TYPE_ROOT_PORT); return (0); }
static int pci_wdt_init(struct vmctx *ctx, struct pci_vdev *dev, char *opts) { /*the wdt just has one inistance */ if (wdt_state.reboot_enabled && wdt_state.timer1_val) { perror("wdt can't be initialized twice, please check!"); return -1; } /* init wdt state info */ wdt_state.timer.clockid = CLOCK_MONOTONIC; if (acrn_timer_init(&wdt_state.timer, wdt_expired_handler, dev) != 0) { return -1; } wdt_state.reboot_enabled = true; wdt_state.intr_enabled = false; wdt_state.intr_active = false; wdt_state.locked = false; wdt_state.wdt_armed = false; wdt_state.wdt_enabled = false; wdt_state.stage = 1; wdt_state.timer1_val = DEFAULT_MAX_TIMER_VAL; wdt_state.timer2_val = DEFAULT_MAX_TIMER_VAL; wdt_state.unlock_state = 0; pci_emul_alloc_bar(dev, 0, PCIBAR_MEM32, WDT_REG_BAR_SIZE); /* initialize config space */ pci_set_cfgdata16(dev, PCIR_VENDOR, PCI_VENDOR_ID_INTEL); pci_set_cfgdata16(dev, PCIR_DEVICE, PCI_DEVICE_ID_INTEL_ESB); pci_set_cfgdata8(dev, PCIR_CLASS, PCIC_BASEPERIPH); pci_set_cfgdata8(dev, PCIR_SUBCLASS, PCIS_BASEPERIPH_OTHER); pci_emul_add_msicap(dev, 1); pci_lintr_request(dev); #ifdef WDT_DEBUG dbg_file = fopen("/tmp/wdt_log", "w+"); #endif DPRINTF("%s: iobar =0x%lx, size=%ld\n", __func__, dev->bar[0].addr, dev->bar[0].size); return 0; }
static int pci_vtcon_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts) { struct pci_vtcon_softc *sc; char *portname = NULL; char *portpath = NULL; char *opt; int i; sc = calloc(1, sizeof(struct pci_vtcon_softc)); sc->vsc_config = calloc(1, sizeof(struct pci_vtcon_config)); sc->vsc_config->max_nr_ports = VTCON_MAXPORTS; sc->vsc_config->cols = 80; sc->vsc_config->rows = 25; vi_softc_linkup(&sc->vsc_vs, &vtcon_vi_consts, sc, pi, sc->vsc_queues); sc->vsc_vs.vs_mtx = &sc->vsc_mtx; for (i = 0; i < VTCON_MAXQ; i++) { sc->vsc_queues[i].vq_qsize = VTCON_RINGSZ; sc->vsc_queues[i].vq_notify = i % 2 == 0 ? pci_vtcon_notify_rx : pci_vtcon_notify_tx; } /* initialize config space */ pci_set_cfgdata16(pi, PCIR_DEVICE, VIRTIO_DEV_CONSOLE); pci_set_cfgdata16(pi, PCIR_VENDOR, VIRTIO_VENDOR); pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_SIMPLECOMM); pci_set_cfgdata16(pi, PCIR_SUBDEV_0, VIRTIO_TYPE_CONSOLE); pci_set_cfgdata16(pi, PCIR_SUBVEND_0, VIRTIO_VENDOR); if (vi_intr_init(&sc->vsc_vs, 1, fbsdrun_virtio_msix())) return (1); vi_set_io_bar(&sc->vsc_vs, 0); /* create control port */ sc->vsc_control_port.vsp_sc = sc; sc->vsc_control_port.vsp_txq = 2; sc->vsc_control_port.vsp_rxq = 3; sc->vsc_control_port.vsp_cb = pci_vtcon_control_tx; sc->vsc_control_port.vsp_enabled = true; while ((opt = strsep(&opts, ",")) != NULL) { portname = strsep(&opt, "="); portpath = strdup(opt); /* create port */ if (pci_vtcon_sock_add(sc, portname, portpath) < 0) { fprintf(stderr, "cannot create port %s: %s\n", portname, strerror(errno)); return (1); } } return (0); }
static __inline void CFGWRITE(struct pci_devinst *pi, int coff, uint32_t val, int bytes) { if (bytes == 1) pci_set_cfgdata8(pi, coff, val); else if (bytes == 2) pci_set_cfgdata16(pi, coff, val); else pci_set_cfgdata32(pi, coff, val); }
static int pci_lpc_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts) { /* * Do not allow more than one LPC bridge to be configured. */ if (lpc_bridge != NULL) return (-1); if (lpc_init() != 0) return (-1); /* initialize config space */ pci_set_cfgdata16(pi, PCIR_DEVICE, LPC_DEV); pci_set_cfgdata16(pi, PCIR_VENDOR, LPC_VENDOR); pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_BRIDGE); pci_set_cfgdata8(pi, PCIR_SUBCLASS, PCIS_BRIDGE_ISA); lpc_bridge = pi; return (0); }
static int pci_vtrnd_init(struct pci_devinst *pi, UNUSED char *opts) { struct pci_vtrnd_softc *sc; int fd; int len; uint8_t v; /* * Should always be able to open /dev/random. */ fd = open("/dev/random", O_RDONLY | O_NONBLOCK); assert(fd >= 0); /* * Check that device is seeded and non-blocking. */ len = (int) read(fd, &v, sizeof(v)); if (len <= 0) { WPRINTF(("vtrnd: /dev/random not ready, read(): %d", len)); return (1); } sc = calloc(1, sizeof(struct pci_vtrnd_softc)); vi_softc_linkup(&sc->vrsc_vs, &vtrnd_vi_consts, sc, pi, &sc->vrsc_vq); sc->vrsc_vs.vs_mtx = &sc->vrsc_mtx; sc->vrsc_vq.vq_qsize = VTRND_RINGSZ; /* keep /dev/random opened while emulating */ sc->vrsc_fd = fd; /* initialize config space */ pci_set_cfgdata16(pi, PCIR_DEVICE, VIRTIO_DEV_RANDOM); pci_set_cfgdata16(pi, PCIR_VENDOR, VIRTIO_VENDOR); pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_CRYPTO); pci_set_cfgdata16(pi, PCIR_SUBDEV_0, VIRTIO_TYPE_ENTROPY); pci_set_cfgdata16(pi, PCIR_SUBVEND_0, VIRTIO_VENDOR); if (vi_intr_init(&sc->vrsc_vs, 1, fbsdrun_virtio_msix())) return (1); vi_set_io_bar(&sc->vrsc_vs, 0); return (0); }
static int passthru_add_msicap(struct pci_devinst *pi, int msgnum, int nextptr) { int capoff, i; struct msicap msicap; u_char *capdata; pci_populate_msicap(&msicap, msgnum, nextptr); /* * XXX * Copy the msi capability structure in the last 16 bytes of the * config space. This is wrong because it could shadow something * useful to the device. */ capoff = 256 - roundup(sizeof(msicap), 4); capdata = (u_char *)&msicap; for (i = 0; i < sizeof(msicap); i++) pci_set_cfgdata8(pi, capoff + i, capdata[i]); return (capoff); }
static int pci_uart_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts) { struct pci_uart_softc *sc; uint64_t bar; int ivec; sc = malloc(sizeof(struct pci_uart_softc)); memset(sc, 0, sizeof(struct pci_uart_softc)); pi->pi_arg = sc; sc->pi = pi; pthread_mutex_init(&sc->mtx, NULL); /* initialize config space */ pci_set_cfgdata16(pi, PCIR_DEVICE, COM_DEV); pci_set_cfgdata16(pi, PCIR_VENDOR, COM_VENDOR); pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_SIMPLECOMM); if (pci_is_legacy(pi)) { pci_uart_legacy_res(&bar, &ivec); pci_emul_alloc_pbar(pi, 0, bar, PCIBAR_IO, 8); } else { ivec = -1; pci_emul_alloc_bar(pi, 0, PCIBAR_IO, 8); } pci_lintr_request(pi, ivec); if (opts != NULL && !strcmp("stdio", opts) && !pci_uart_stdio) { pci_uart_stdio = 1; sc->stdio = 1; } pci_uart_reset(sc); return (0); }
static int pci_uart_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts) { struct uart_softc *sc; pci_emul_alloc_bar(pi, 0, PCIBAR_IO, UART_IO_BAR_SIZE); pci_lintr_request(pi); /* initialize config space */ pci_set_cfgdata16(pi, PCIR_DEVICE, COM_DEV); pci_set_cfgdata16(pi, PCIR_VENDOR, COM_VENDOR); pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_SIMPLECOMM); sc = uart_init(pci_uart_intr_assert, pci_uart_intr_deassert, pi); pi->pi_arg = sc; if (uart_set_backend(sc, opts) != 0) { fprintf(stderr, "Unable to initialize backend '%s' for " "pci uart at %d:%d\n", opts, pi->pi_slot, pi->pi_func); return (-1); } return (0); }
static int pci_vtnet_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts) { MD5_CTX mdctx; unsigned char digest[16]; char nstr[80]; char tname[MAXCOMLEN + 1]; struct pci_vtnet_softc *sc; char *devname; char *vtopts; int mac_provided; sc = calloc(1, sizeof(struct pci_vtnet_softc)); pthread_mutex_init(&sc->vsc_mtx, NULL); vi_softc_linkup(&sc->vsc_vs, &vtnet_vi_consts, sc, pi, sc->vsc_queues); sc->vsc_vs.vs_mtx = &sc->vsc_mtx; sc->vsc_queues[VTNET_RXQ].vq_qsize = VTNET_RINGSZ; sc->vsc_queues[VTNET_RXQ].vq_notify = pci_vtnet_ping_rxq; sc->vsc_queues[VTNET_TXQ].vq_qsize = VTNET_RINGSZ; sc->vsc_queues[VTNET_TXQ].vq_notify = pci_vtnet_ping_txq; #ifdef notyet sc->vsc_queues[VTNET_CTLQ].vq_qsize = VTNET_RINGSZ; sc->vsc_queues[VTNET_CTLQ].vq_notify = pci_vtnet_ping_ctlq; #endif /* * Attempt to open the tap device and read the MAC address * if specified */ mac_provided = 0; sc->vsc_tapfd = -1; if (opts != NULL) { char tbuf[80]; int err; devname = vtopts = strdup(opts); (void) strsep(&vtopts, ","); if (vtopts != NULL) { err = pci_vtnet_parsemac(vtopts, sc->vsc_config.mac); if (err != 0) { free(devname); return (err); } mac_provided = 1; } strcpy(tbuf, "/dev/"); strlcat(tbuf, devname, sizeof(tbuf)); free(devname); sc->vsc_tapfd = open(tbuf, O_RDWR); if (sc->vsc_tapfd == -1) { WPRINTF(("open of tap device %s failed\n", tbuf)); } else { /* * Set non-blocking and register for read * notifications with the event loop */ int opt = 1; if (ioctl(sc->vsc_tapfd, FIONBIO, &opt) < 0) { WPRINTF(("tap device O_NONBLOCK failed\n")); close(sc->vsc_tapfd); sc->vsc_tapfd = -1; } sc->vsc_mevp = mevent_add(sc->vsc_tapfd, EVF_READ, pci_vtnet_tap_callback, sc); if (sc->vsc_mevp == NULL) { WPRINTF(("Could not register event\n")); close(sc->vsc_tapfd); sc->vsc_tapfd = -1; } } } /* * The default MAC address is the standard NetApp OUI of 00-a0-98, * followed by an MD5 of the PCI slot/func number and dev name */ if (!mac_provided) { snprintf(nstr, sizeof(nstr), "%d-%d-%s", pi->pi_slot, pi->pi_func, vmname); MD5Init(&mdctx); MD5Update(&mdctx, nstr, strlen(nstr)); MD5Final(digest, &mdctx); sc->vsc_config.mac[0] = 0x00; sc->vsc_config.mac[1] = 0xa0; sc->vsc_config.mac[2] = 0x98; sc->vsc_config.mac[3] = digest[0]; sc->vsc_config.mac[4] = digest[1]; sc->vsc_config.mac[5] = digest[2]; } /* initialize config space */ pci_set_cfgdata16(pi, PCIR_DEVICE, VIRTIO_DEV_NET); pci_set_cfgdata16(pi, PCIR_VENDOR, VIRTIO_VENDOR); pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_NETWORK); pci_set_cfgdata16(pi, PCIR_SUBDEV_0, VIRTIO_TYPE_NET); pci_lintr_request(pi); /* link always up */ sc->vsc_config.status = 1; /* use BAR 1 to map MSI-X table and PBA, if we're using MSI-X */ if (vi_intr_init(&sc->vsc_vs, 1, fbsdrun_virtio_msix())) return (1); /* use BAR 0 to map config regs in IO space */ vi_set_io_bar(&sc->vsc_vs, 0); sc->resetting = 0; sc->rx_merge = 1; sc->rx_vhdrlen = sizeof(struct virtio_net_rxhdr); sc->rx_in_progress = 0; pthread_mutex_init(&sc->rx_mtx, NULL); /* * Initialize tx semaphore & spawn TX processing thread. * As of now, only one thread for TX desc processing is * spawned. */ sc->tx_in_progress = 0; pthread_mutex_init(&sc->tx_mtx, NULL); pthread_cond_init(&sc->tx_cond, NULL); pthread_create(&sc->tx_tid, NULL, pci_vtnet_tx_thread, (void *)sc); snprintf(tname, sizeof(tname), "vtnet-%d:%d tx", pi->pi_slot, pi->pi_func); pthread_set_name_np(sc->tx_tid, tname); return (0); }
static int pci_vtblk_init(struct pci_devinst *pi, char *opts) { char bident[sizeof("XX:X:X")]; struct blockif_ctxt *bctxt; MD5_CTX mdctx; u_char digest[16]; struct pci_vtblk_softc *sc; off_t size; int i, sectsz, sts, sto; if (opts == NULL) { printf("virtio-block: backing device required\n"); return (1); } /* * The supplied backing file has to exist */ snprintf(bident, sizeof(bident), "%d:%d", pi->pi_slot, pi->pi_func); bctxt = blockif_open(opts, bident); if (bctxt == NULL) { perror("Could not open backing file"); return (1); } size = blockif_size(bctxt); sectsz = blockif_sectsz(bctxt); blockif_psectsz(bctxt, &sts, &sto); sc = calloc(1, sizeof(struct pci_vtblk_softc)); sc->bc = bctxt; for (i = 0; i < VTBLK_RINGSZ; i++) { struct pci_vtblk_ioreq *io = &sc->vbsc_ios[i]; io->io_req.br_callback = pci_vtblk_done; io->io_req.br_param = io; io->io_sc = sc; io->io_idx = (uint16_t)i; } pthread_mutex_init(&sc->vsc_mtx, NULL); /* init virtio softc and virtqueues */ vi_softc_linkup(&sc->vbsc_vs, &vtblk_vi_consts, sc, pi, &sc->vbsc_vq); sc->vbsc_vs.vs_mtx = &sc->vsc_mtx; sc->vbsc_vq.vq_qsize = VTBLK_RINGSZ; /* sc->vbsc_vq.vq_notify = we have no per-queue notify */ /* * Create an identifier for the backing file. Use parts of the * md5 sum of the filename */ MD5Init(&mdctx); MD5Update(&mdctx, opts, (unsigned)strlen(opts)); MD5Final(digest, &mdctx); snprintf(sc->vbsc_ident, VTBLK_BLK_ID_BYTES, "BHYVE-%02X%02X-%02X%02X-%02X%02X", digest[0], digest[1], digest[2], digest[3], digest[4], digest[5]); /* setup virtio block config space */ sc->vbsc_cfg.vbc_capacity = (uint64_t)(size / DEV_BSIZE); /* 512-byte units */ sc->vbsc_cfg.vbc_size_max = 0; /* not negotiated */ sc->vbsc_cfg.vbc_seg_max = BLOCKIF_IOV_MAX; sc->vbsc_cfg.vbc_geometry.cylinders = 0; /* no geometry */ sc->vbsc_cfg.vbc_geometry.heads = 0; sc->vbsc_cfg.vbc_geometry.sectors = 0; sc->vbsc_cfg.vbc_blk_size = (uint32_t)sectsz; sc->vbsc_cfg.vbc_topology.physical_block_exp = (uint8_t)((sts > sectsz) ? (ffsll(sts / sectsz) - 1) : 0); sc->vbsc_cfg.vbc_topology.alignment_offset = (uint8_t)((sto != 0) ? ((sts - sto) / sectsz) : 0); sc->vbsc_cfg.vbc_topology.min_io_size = 0; sc->vbsc_cfg.vbc_topology.opt_io_size = 0; sc->vbsc_cfg.vbc_writeback = 0; /* * Should we move some of this into virtio.c? Could * have the device, class, and subdev_0 as fields in * the virtio constants structure. */ pci_set_cfgdata16(pi, PCIR_DEVICE, VIRTIO_DEV_BLOCK); pci_set_cfgdata16(pi, PCIR_VENDOR, VIRTIO_VENDOR); pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_STORAGE); pci_set_cfgdata16(pi, PCIR_SUBDEV_0, VIRTIO_TYPE_BLOCK); pci_set_cfgdata16(pi, PCIR_SUBVEND_0, VIRTIO_VENDOR); if (vi_intr_init(&sc->vbsc_vs, 1, fbsdrun_virtio_msix())) { blockif_close(sc->bc); free(sc); return (1); } vi_set_io_bar(&sc->vbsc_vs, 0); return (0); }
static int pci_fbuf_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts) { int error, prot; struct pci_fbuf_softc *sc; if (fbuf_sc != NULL) { fprintf(stderr, "Only one frame buffer device is allowed.\n"); return (-1); } sc = calloc(1, sizeof(struct pci_fbuf_softc)); pi->pi_arg = sc; /* initialize config space */ pci_set_cfgdata16(pi, PCIR_DEVICE, 0x40FB); pci_set_cfgdata16(pi, PCIR_VENDOR, 0xFB5D); pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_DISPLAY); pci_set_cfgdata8(pi, PCIR_SUBCLASS, PCIS_DISPLAY_VGA); error = pci_emul_alloc_bar(pi, 0, PCIBAR_MEM32, DMEMSZ); assert(error == 0); error = pci_emul_alloc_bar(pi, 1, PCIBAR_MEM32, FB_SIZE); assert(error == 0); error = pci_emul_add_msicap(pi, PCI_FBUF_MSI_MSGS); assert(error == 0); sc->fbaddr = pi->pi_bar[1].addr; sc->memregs.fbsize = FB_SIZE; sc->memregs.width = COLS_DEFAULT; sc->memregs.height = ROWS_DEFAULT; sc->memregs.depth = 32; sc->fsc_pi = pi; error = pci_fbuf_parse_opts(sc, opts); if (error != 0) goto done; sc->fb_base = vm_create_devmem(ctx, VM_FRAMEBUFFER, "framebuffer", FB_SIZE); if (sc->fb_base == MAP_FAILED) { error = -1; goto done; } DPRINTF(DEBUG_INFO, ("fbuf frame buffer base: %p [sz %lu]\r\n", sc->fb_base, FB_SIZE)); /* * Map the framebuffer into the guest address space. * XXX This may fail if the BAR is different than a prior * run. In this case flag the error. This will be fixed * when a change_memseg api is available. */ prot = PROT_READ | PROT_WRITE; if (vm_mmap_memseg(ctx, sc->fbaddr, VM_FRAMEBUFFER, 0, FB_SIZE, prot) != 0) { fprintf(stderr, "pci_fbuf: mapseg failed - try deleting VM and restarting\n"); error = -1; goto done; } console_init(sc->memregs.width, sc->memregs.height, sc->fb_base); console_fb_register(pci_fbuf_render, sc); sc->vgasc = vga_init(!sc->use_vga); sc->gc_image = console_get_image(); fbuf_sc = sc; memset((void *)sc->fb_base, 0, FB_SIZE); error = rfb_init(sc->rfb_host, sc->rfb_port, sc->rfb_wait); done: if (error) free(sc); return (error); }
static int cfginitmsi(struct passthru_softc *sc) { int i, ptr, capptr, cap, sts, caplen, table_size; uint32_t u32; struct pcisel sel; struct pci_devinst *pi; struct msixcap msixcap; uint32_t *msixcap_ptr; pi = sc->psc_pi; sel = sc->psc_sel; /* * Parse the capabilities and cache the location of the MSI * and MSI-X capabilities. */ sts = read_config(&sel, PCIR_STATUS, 2); if (sts & PCIM_STATUS_CAPPRESENT) { ptr = read_config(&sel, PCIR_CAP_PTR, 1); while (ptr != 0 && ptr != 0xff) { cap = read_config(&sel, ptr + PCICAP_ID, 1); if (cap == PCIY_MSI) { /* * Copy the MSI capability into the config * space of the emulated pci device */ sc->psc_msi.capoff = ptr; sc->psc_msi.msgctrl = read_config(&sel, ptr + 2, 2); sc->psc_msi.emulated = 0; caplen = msi_caplen(sc->psc_msi.msgctrl); capptr = ptr; while (caplen > 0) { u32 = read_config(&sel, capptr, 4); pci_set_cfgdata32(pi, capptr, u32); caplen -= 4; capptr += 4; } } else if (cap == PCIY_MSIX) { /* * Copy the MSI-X capability */ sc->psc_msix.capoff = ptr; caplen = 12; msixcap_ptr = (uint32_t*) &msixcap; capptr = ptr; while (caplen > 0) { u32 = read_config(&sel, capptr, 4); *msixcap_ptr = u32; pci_set_cfgdata32(pi, capptr, u32); caplen -= 4; capptr += 4; msixcap_ptr++; } } ptr = read_config(&sel, ptr + PCICAP_NEXTPTR, 1); } } if (sc->psc_msix.capoff != 0) { pi->pi_msix.pba_bar = msixcap.pba_info & PCIM_MSIX_BIR_MASK; pi->pi_msix.pba_offset = msixcap.pba_info & ~PCIM_MSIX_BIR_MASK; pi->pi_msix.table_bar = msixcap.table_info & PCIM_MSIX_BIR_MASK; pi->pi_msix.table_offset = msixcap.table_info & ~PCIM_MSIX_BIR_MASK; pi->pi_msix.table_count = MSIX_TABLE_COUNT(msixcap.msgctrl); pi->pi_msix.pba_size = PBA_SIZE(pi->pi_msix.table_count); /* Allocate the emulated MSI-X table array */ table_size = pi->pi_msix.table_count * MSIX_TABLE_ENTRY_SIZE; pi->pi_msix.table = malloc(table_size); bzero(pi->pi_msix.table, table_size); /* Mask all table entries */ for (i = 0; i < pi->pi_msix.table_count; i++) { pi->pi_msix.table[i].vector_control |= PCIM_MSIX_VCTRL_MASK; } } #ifdef LEGACY_SUPPORT /* * If the passthrough device does not support MSI then craft a * MSI capability for it. We link the new MSI capability at the * head of the list of capabilities. */ if ((sts & PCIM_STATUS_CAPPRESENT) != 0 && sc->psc_msi.capoff == 0) { int origptr, msiptr; origptr = read_config(&sel, PCIR_CAP_PTR, 1); msiptr = passthru_add_msicap(pi, 1, origptr); sc->psc_msi.capoff = msiptr; sc->psc_msi.msgctrl = pci_get_cfgdata16(pi, msiptr + 2); sc->psc_msi.emulated = 1; pci_set_cfgdata8(pi, PCIR_CAP_PTR, msiptr); } #endif /* Make sure one of the capabilities is present */ if (sc->psc_msi.capoff == 0 && sc->psc_msix.capoff == 0) return (-1); else return (0); }
static int pci_vtblk_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts) { struct stat sbuf; struct pci_vtblk_softc *sc; off_t size; int fd; int sectsz; if (opts == NULL) { printf("virtio-block: backing device required\n"); return (1); } /* * Access to guest memory is required. Fail if * memory not mapped */ if (paddr_guest2host(0) == NULL) return (1); /* * The supplied backing file has to exist */ fd = open(opts, O_RDWR); if (fd < 0) { perror("Could not open backing file"); return (1); } if (fstat(fd, &sbuf) < 0) { perror("Could not stat backing file"); close(fd); return (1); } /* * Deal with raw devices */ size = sbuf.st_size; sectsz = DEV_BSIZE; if (S_ISCHR(sbuf.st_mode)) { if (ioctl(fd, DIOCGMEDIASIZE, &size) < 0 || ioctl(fd, DIOCGSECTORSIZE, §sz)) { perror("Could not fetch dev blk/sector size"); close(fd); return (1); } assert(size != 0); assert(sectsz != 0); } sc = malloc(sizeof(struct pci_vtblk_softc)); memset(sc, 0, sizeof(struct pci_vtblk_softc)); pi->pi_arg = sc; sc->vbsc_pi = pi; sc->vbsc_fd = fd; /* setup virtio block config space */ sc->vbsc_cfg.vbc_capacity = size / sectsz; sc->vbsc_cfg.vbc_seg_max = VTBLK_MAXSEGS; sc->vbsc_cfg.vbc_blk_size = sectsz; sc->vbsc_cfg.vbc_size_max = 0; /* not negotiated */ sc->vbsc_cfg.vbc_geom_c = 0; /* no geometry */ sc->vbsc_cfg.vbc_geom_h = 0; sc->vbsc_cfg.vbc_geom_s = 0; sc->vbsc_cfg.vbc_sectors_max = 0; /* initialize config space */ pci_set_cfgdata16(pi, PCIR_DEVICE, VIRTIO_DEV_BLOCK); pci_set_cfgdata16(pi, PCIR_VENDOR, VIRTIO_VENDOR); pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_STORAGE); pci_set_cfgdata16(pi, PCIR_SUBDEV_0, VIRTIO_TYPE_BLOCK); pci_emul_add_msicap(pi, 1); pci_emul_alloc_bar(pi, 0, PCIBAR_IO, VTBLK_REGSZ); return (0); }
static int pci_vtblk_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts) { struct stat sbuf; MD5_CTX mdctx; u_char digest[16]; struct pci_vtblk_softc *sc; off_t size; int fd; int sectsz; if (opts == NULL) { printf("virtio-block: backing device required\n"); return (1); } /* * The supplied backing file has to exist */ fd = open(opts, O_RDWR); if (fd < 0) { perror("Could not open backing file"); return (1); } if (fstat(fd, &sbuf) < 0) { perror("Could not stat backing file"); close(fd); return (1); } /* * Deal with raw devices */ size = sbuf.st_size; sectsz = DEV_BSIZE; if (S_ISCHR(sbuf.st_mode)) { if (ioctl(fd, DIOCGMEDIASIZE, &size) < 0 || ioctl(fd, DIOCGSECTORSIZE, §sz)) { perror("Could not fetch dev blk/sector size"); close(fd); return (1); } assert(size != 0); assert(sectsz != 0); } sc = calloc(1, sizeof(struct pci_vtblk_softc)); /* record fd of storage device/file */ sc->vbsc_fd = fd; pthread_mutex_init(&sc->vsc_mtx, NULL); /* init virtio softc and virtqueues */ vi_softc_linkup(&sc->vbsc_vs, &vtblk_vi_consts, sc, pi, &sc->vbsc_vq); sc->vbsc_vs.vs_mtx = &sc->vsc_mtx; sc->vbsc_vq.vq_qsize = VTBLK_RINGSZ; /* sc->vbsc_vq.vq_notify = we have no per-queue notify */ /* * Create an identifier for the backing file. Use parts of the * md5 sum of the filename */ MD5Init(&mdctx); MD5Update(&mdctx, opts, strlen(opts)); MD5Final(digest, &mdctx); sprintf(sc->vbsc_ident, "BHYVE-%02X%02X-%02X%02X-%02X%02X", digest[0], digest[1], digest[2], digest[3], digest[4], digest[5]); /* setup virtio block config space */ sc->vbsc_cfg.vbc_capacity = size / DEV_BSIZE; /* 512-byte units */ sc->vbsc_cfg.vbc_seg_max = VTBLK_MAXSEGS; sc->vbsc_cfg.vbc_blk_size = sectsz; sc->vbsc_cfg.vbc_size_max = 0; /* not negotiated */ sc->vbsc_cfg.vbc_geom_c = 0; /* no geometry */ sc->vbsc_cfg.vbc_geom_h = 0; sc->vbsc_cfg.vbc_geom_s = 0; sc->vbsc_cfg.vbc_sectors_max = 0; /* * Should we move some of this into virtio.c? Could * have the device, class, and subdev_0 as fields in * the virtio constants structure. */ pci_set_cfgdata16(pi, PCIR_DEVICE, VIRTIO_DEV_BLOCK); pci_set_cfgdata16(pi, PCIR_VENDOR, VIRTIO_VENDOR); pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_STORAGE); pci_set_cfgdata16(pi, PCIR_SUBDEV_0, VIRTIO_TYPE_BLOCK); pci_lintr_request(pi); if (vi_intr_init(&sc->vbsc_vs, 1, fbsdrun_virtio_msix())) return (1); vi_set_io_bar(&sc->vbsc_vs, 0); return (0); }
static int pci_vtnet_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts) { MD5_CTX mdctx; unsigned char digest[16]; char nstr[80]; char tname[MAXCOMLEN + 1]; struct pci_vtnet_softc *sc; char *devname; char *vtopts; int mac_provided; sc = calloc(1, sizeof(struct pci_vtnet_softc)); pthread_mutex_init(&sc->vsc_mtx, NULL); vi_softc_linkup(&sc->vsc_vs, &vtnet_vi_consts, sc, pi, sc->vsc_queues); sc->vsc_vs.vs_mtx = &sc->vsc_mtx; sc->vsc_queues[VTNET_RXQ].vq_qsize = VTNET_RINGSZ; sc->vsc_queues[VTNET_RXQ].vq_notify = pci_vtnet_ping_rxq; sc->vsc_queues[VTNET_TXQ].vq_qsize = VTNET_RINGSZ; sc->vsc_queues[VTNET_TXQ].vq_notify = pci_vtnet_ping_txq; #ifdef notyet sc->vsc_queues[VTNET_CTLQ].vq_qsize = VTNET_RINGSZ; sc->vsc_queues[VTNET_CTLQ].vq_notify = pci_vtnet_ping_ctlq; #endif /* * Attempt to open the tap device and read the MAC address * if specified */ mac_provided = 0; sc->vsc_tapfd = -1; sc->vsc_nmd = NULL; if (opts != NULL) { int err; devname = vtopts = strdup(opts); (void) strsep(&vtopts, ","); if (vtopts != NULL) { err = pci_vtnet_parsemac(vtopts, sc->vsc_config.mac); if (err != 0) { free(devname); return (err); } mac_provided = 1; } if (strncmp(devname, "vale", 4) == 0) pci_vtnet_netmap_setup(sc, devname); if (strncmp(devname, "tap", 3) == 0 || strncmp(devname, "vmnet", 5) == 0) pci_vtnet_tap_setup(sc, devname); free(devname); } /* * The default MAC address is the standard NetApp OUI of 00-a0-98, * followed by an MD5 of the PCI slot/func number and dev name */ if (!mac_provided) { snprintf(nstr, sizeof(nstr), "%d-%d-%s", pi->pi_slot, pi->pi_func, vmname); MD5Init(&mdctx); MD5Update(&mdctx, nstr, strlen(nstr)); MD5Final(digest, &mdctx); sc->vsc_config.mac[0] = 0x00; sc->vsc_config.mac[1] = 0xa0; sc->vsc_config.mac[2] = 0x98; sc->vsc_config.mac[3] = digest[0]; sc->vsc_config.mac[4] = digest[1]; sc->vsc_config.mac[5] = digest[2]; } /* initialize config space */ pci_set_cfgdata16(pi, PCIR_DEVICE, VIRTIO_DEV_NET); pci_set_cfgdata16(pi, PCIR_VENDOR, VIRTIO_VENDOR); pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_NETWORK); pci_set_cfgdata16(pi, PCIR_SUBDEV_0, VIRTIO_TYPE_NET); pci_set_cfgdata16(pi, PCIR_SUBVEND_0, VIRTIO_VENDOR); /* Link is up if we managed to open tap device or vale port. */ sc->vsc_config.status = (opts == NULL || sc->vsc_tapfd >= 0 || sc->vsc_nmd != NULL); /* use BAR 1 to map MSI-X table and PBA, if we're using MSI-X */ if (vi_intr_init(&sc->vsc_vs, 1, fbsdrun_virtio_msix())) return (1); /* use BAR 0 to map config regs in IO space */ vi_set_io_bar(&sc->vsc_vs, 0); sc->resetting = 0; sc->rx_merge = 1; sc->rx_vhdrlen = sizeof(struct virtio_net_rxhdr); sc->rx_in_progress = 0; pthread_mutex_init(&sc->rx_mtx, NULL); /* * Initialize tx semaphore & spawn TX processing thread. * As of now, only one thread for TX desc processing is * spawned. */ sc->tx_in_progress = 0; pthread_mutex_init(&sc->tx_mtx, NULL); pthread_cond_init(&sc->tx_cond, NULL); pthread_create(&sc->tx_tid, NULL, pci_vtnet_tx_thread, (void *)sc); snprintf(tname, sizeof(tname), "vtnet-%d:%d tx", pi->pi_slot, pi->pi_func); pthread_set_name_np(sc->tx_tid, tname); return (0); }