static int
pci_wdt_init(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
{
	/*the wdt just has one inistance */
	if (wdt_state.reboot_enabled && wdt_state.timer1_val) {
		perror("wdt can't be initialized twice, please check!");
		return -1;
	}

	/* init wdt state info */
	wdt_state.timer.clockid = CLOCK_MONOTONIC;
	if (acrn_timer_init(&wdt_state.timer, wdt_expired_handler, dev) != 0) {
		return -1;
	}

	wdt_state.reboot_enabled = true;
	wdt_state.intr_enabled = false;
	wdt_state.intr_active = false;
	wdt_state.locked = false;
	wdt_state.wdt_armed = false;
	wdt_state.wdt_enabled = false;

	wdt_state.stage = 1;
	wdt_state.timer1_val = DEFAULT_MAX_TIMER_VAL;
	wdt_state.timer2_val = DEFAULT_MAX_TIMER_VAL;
	wdt_state.unlock_state = 0;

	pci_emul_alloc_bar(dev, 0, PCIBAR_MEM32, WDT_REG_BAR_SIZE);

	/* initialize config space */
	pci_set_cfgdata16(dev, PCIR_VENDOR, PCI_VENDOR_ID_INTEL);
	pci_set_cfgdata16(dev, PCIR_DEVICE, PCI_DEVICE_ID_INTEL_ESB);
	pci_set_cfgdata8(dev, PCIR_CLASS, PCIC_BASEPERIPH);
	pci_set_cfgdata8(dev, PCIR_SUBCLASS, PCIS_BASEPERIPH_OTHER);

	pci_emul_add_msicap(dev, 1);
	pci_lintr_request(dev);

#ifdef WDT_DEBUG
	dbg_file = fopen("/tmp/wdt_log", "w+");
#endif

	DPRINTF("%s: iobar =0x%lx, size=%ld\n", __func__,
			dev->bar[0].addr, dev->bar[0].size);

	return 0;
}
Exemple #2
0
/*
 * Initialize MSI-X vector capabilities if we're to use MSI-X,
 * or MSI capabilities if not.
 *
 * We assume we want one MSI-X vector per queue, here, plus one
 * for the config vec.
 */
int
vi_intr_init(struct virtio_softc *vs, int barnum, int use_msix)
{
	int nvec;

	if (use_msix) {
		vs->vs_flags |= VIRTIO_USE_MSIX;
		vi_reset_dev(vs); /* set all vectors to NO_VECTOR */
		nvec = vs->vs_vc->vc_nvq + 1;
		if (pci_emul_add_msixcap(vs->vs_pi, nvec, barnum))
			return (1);
	} else
		vs->vs_flags &= ~VIRTIO_USE_MSIX;
	/* Only 1 MSI vector for bhyve */
	pci_emul_add_msicap(vs->vs_pi, 1);
	return (0);
}
Exemple #3
0
static int
pci_fbuf_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts)
{
	int error, prot;
	struct pci_fbuf_softc *sc;
	
	if (fbuf_sc != NULL) {
		fprintf(stderr, "Only one frame buffer device is allowed.\n");
		return (-1);
	}

	sc = calloc(1, sizeof(struct pci_fbuf_softc));

	pi->pi_arg = sc;

	/* initialize config space */
	pci_set_cfgdata16(pi, PCIR_DEVICE, 0x40FB);
	pci_set_cfgdata16(pi, PCIR_VENDOR, 0xFB5D);
	pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_DISPLAY);
	pci_set_cfgdata8(pi, PCIR_SUBCLASS, PCIS_DISPLAY_VGA);

	error = pci_emul_alloc_bar(pi, 0, PCIBAR_MEM32, DMEMSZ);
	assert(error == 0);

	error = pci_emul_alloc_bar(pi, 1, PCIBAR_MEM32, FB_SIZE);
	assert(error == 0);

	error = pci_emul_add_msicap(pi, PCI_FBUF_MSI_MSGS);
	assert(error == 0);

	sc->fbaddr = pi->pi_bar[1].addr;
	sc->memregs.fbsize = FB_SIZE;
	sc->memregs.width  = COLS_DEFAULT;
	sc->memregs.height = ROWS_DEFAULT;
	sc->memregs.depth  = 32;

	sc->fsc_pi = pi;

	error = pci_fbuf_parse_opts(sc, opts);
	if (error != 0)
		goto done;

	sc->fb_base = vm_create_devmem(ctx, VM_FRAMEBUFFER, "framebuffer", FB_SIZE);
	if (sc->fb_base == MAP_FAILED) {
		error = -1;
		goto done;
	}
	DPRINTF(DEBUG_INFO, ("fbuf frame buffer base: %p [sz %lu]\r\n",
	        sc->fb_base, FB_SIZE));

	/*
	 * Map the framebuffer into the guest address space.
	 * XXX This may fail if the BAR is different than a prior
	 * run. In this case flag the error. This will be fixed
	 * when a change_memseg api is available.
	 */
	prot = PROT_READ | PROT_WRITE;
	if (vm_mmap_memseg(ctx, sc->fbaddr, VM_FRAMEBUFFER, 0, FB_SIZE, prot) != 0) {
		fprintf(stderr, "pci_fbuf: mapseg failed - try deleting VM and restarting\n");
		error = -1;
		goto done;
	}

	console_init(sc->memregs.width, sc->memregs.height, sc->fb_base);
	console_fb_register(pci_fbuf_render, sc);

	sc->vgasc = vga_init(!sc->use_vga);
	sc->gc_image = console_get_image();

	fbuf_sc = sc;

	memset((void *)sc->fb_base, 0, FB_SIZE);

	error = rfb_init(sc->rfb_host, sc->rfb_port, sc->rfb_wait);
done:
	if (error)
		free(sc);

	return (error);
}
Exemple #4
0
static int
pci_vtblk_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts)
{
	struct stat sbuf;
	struct pci_vtblk_softc *sc;
	off_t size;	
	int fd;
	int sectsz;

	if (opts == NULL) {
		printf("virtio-block: backing device required\n");
		return (1);
	}

	/*
	 * Access to guest memory is required. Fail if
	 * memory not mapped
	 */
	if (paddr_guest2host(0) == NULL)
		return (1);

	/*
	 * The supplied backing file has to exist
	 */
	fd = open(opts, O_RDWR);
	if (fd < 0) {
		perror("Could not open backing file");
		return (1);
	}

	if (fstat(fd, &sbuf) < 0) {
		perror("Could not stat backing file");
		close(fd);
		return (1);
	}

	/*
	 * Deal with raw devices
	 */
	size = sbuf.st_size;
	sectsz = DEV_BSIZE;
	if (S_ISCHR(sbuf.st_mode)) {
		if (ioctl(fd, DIOCGMEDIASIZE, &size) < 0 ||
		    ioctl(fd, DIOCGSECTORSIZE, &sectsz)) {
			perror("Could not fetch dev blk/sector size");
			close(fd);
			return (1);
		}
		assert(size != 0);
		assert(sectsz != 0);
	}

	sc = malloc(sizeof(struct pci_vtblk_softc));
	memset(sc, 0, sizeof(struct pci_vtblk_softc));

	pi->pi_arg = sc;
	sc->vbsc_pi = pi;
	sc->vbsc_fd = fd;

	/* setup virtio block config space */
	sc->vbsc_cfg.vbc_capacity = size / sectsz;
	sc->vbsc_cfg.vbc_seg_max = VTBLK_MAXSEGS;
	sc->vbsc_cfg.vbc_blk_size = sectsz;
	sc->vbsc_cfg.vbc_size_max = 0;	/* not negotiated */
	sc->vbsc_cfg.vbc_geom_c = 0;	/* no geometry */
	sc->vbsc_cfg.vbc_geom_h = 0;
	sc->vbsc_cfg.vbc_geom_s = 0;
	sc->vbsc_cfg.vbc_sectors_max = 0;

	/* initialize config space */
	pci_set_cfgdata16(pi, PCIR_DEVICE, VIRTIO_DEV_BLOCK);
	pci_set_cfgdata16(pi, PCIR_VENDOR, VIRTIO_VENDOR);
	pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_STORAGE);
	pci_set_cfgdata16(pi, PCIR_SUBDEV_0, VIRTIO_TYPE_BLOCK);
	pci_emul_add_msicap(pi, 1);
	pci_emul_alloc_bar(pi, 0, PCIBAR_IO, VTBLK_REGSZ);

	return (0);
}