Exemple #1
0
/**
 * Shutdown the virtio-block device.
 * @param  dev  pointer to virtio device information
 */
void
virtioblk_shutdown(struct virtio_device *dev)
{
	/* Quiesce device */
	virtio_set_status(dev, VIRTIO_STAT_FAILED);

	/* Reset device */
	virtio_reset_device(dev);
}
Exemple #2
0
status_t virtio_gpu_init(struct virtio_device *dev, uint32_t host_features)
{
    LTRACEF("dev %p, host_features 0x%x\n", dev, host_features);

    /* allocate a new gpu device */
    struct virtio_gpu_dev *gdev = malloc(sizeof(struct virtio_gpu_dev));
    if (!gdev)
        return ERR_NO_MEMORY;

    mutex_init(&gdev->lock);
    event_init(&gdev->io_event, false, EVENT_FLAG_AUTOUNSIGNAL);
    event_init(&gdev->flush_event, false, EVENT_FLAG_AUTOUNSIGNAL);

    gdev->dev = dev;
    dev->priv = gdev;

    gdev->pmode_id = -1;
    gdev->next_resource_id = 1;

    /* allocate memory for a gpu request */
#if WITH_KERNEL_VM
    gdev->gpu_request = pmm_alloc_kpage();
    gdev->gpu_request_phys = vaddr_to_paddr(gdev->gpu_request);
#else
    gdev->gpu_request = malloc(sizeof(struct virtio_gpu_resp_display_info)); // XXX get size better
    gdev->gpu_request_phys = (paddr_t)gdev->gpu_request;
#endif

    /* make sure the device is reset */
    virtio_reset_device(dev);

    volatile struct virtio_gpu_config *config = (struct virtio_gpu_config *)dev->config_ptr;
    dump_gpu_config(config);

    /* ack and set the driver status bit */
    virtio_status_acknowledge_driver(dev);

    // XXX check features bits and ack/nak them

    /* allocate a virtio ring */
    virtio_alloc_ring(dev, 0, 16);

    /* set our irq handler */
    dev->irq_driver_callback = &virtio_gpu_irq_driver_callback;
    dev->config_change_callback = &virtio_gpu_config_change_callback;

    /* set DRIVER_OK */
    virtio_status_driver_ok(dev);

    /* save the main device we've found */
    the_gdev = gdev;

    printf("found virtio gpu device\n");

    return NO_ERROR;
}
Exemple #3
0
static void
virtio_blk_cleanup(void)
{
	/* Just free the memory we allocated */
	virtio_blk_free_requests();
	virtio_reset_device(blk_dev);
	virtio_free_queues(blk_dev);
	virtio_free_device(blk_dev);
	blk_dev = NULL;
}
/**
 * virtio_9p_init
 *
 * Establish the VIRTIO connection for use with the 9P server. Setup queues
 * and negotiate capabilities. Setup the 9P (Client) library.
 *
 * @param reg[in]	Pointer to device tree node for VIRTIO/9P interface.
 * @param tx_buf[in]	TX buffer for use by 9P Client lib - 8K in size.
 * @param rx_buf[in]	TX buffer for use by 9P Client lib - 8K in size.
 * @param buf_size	Somewhat redundant, buffer size expected to be 8k.
 * @return	0 = success, -ve = error.
 */
int virtio_9p_init(struct virtio_device *dev, void *tx_buf, void *rx_buf,
		   int buf_size)
{
	struct vring_avail *vq_avail;
	int status = VIRTIO_STAT_ACKNOWLEDGE;

	/* Check for double open */
	if (__buf_size)
		return -1;
	__buf_size = buf_size;

        dprintf("%s : device at %p\n", __func__, dev->base);
        dprintf("%s : type is %04x\n", __func__, dev->type);

	/* Keep it disabled until the driver is 1.0 capable */
	dev->is_modern = false;

	virtio_reset_device(dev);

	/* Acknowledge device. */
	virtio_set_status(dev, status);

	/* Tell HV that we know how to drive the device. */
	status |= VIRTIO_STAT_DRIVER;
	virtio_set_status(dev, status);

	/* Device specific setup - we do not support special features */
	virtio_set_guest_features(dev,  0);

	if (virtio_queue_init_vq(dev, &vq, 0))
		goto dev_error;

	vq_avail = virtio_get_vring_avail(dev, 0);
	vq_avail->flags = VRING_AVAIL_F_NO_INTERRUPT;
	vq_avail->idx = 0;

	/* Tell HV that setup succeeded */
	status |= VIRTIO_STAT_DRIVER_OK;
	virtio_set_status(dev, status);

	/* Setup 9P library. */
	p9_reg_transport(virtio_9p_transact, dev,(uint8_t *)tx_buf,
			(uint8_t *)rx_buf);

	dprintf("%s : complete\n", __func__);
	return 0;

dev_error:
	printf("%s: failed\n", __func__);
	status |= VIRTIO_STAT_FAILED;
	virtio_set_status(dev, status);
	return -1;
}
Exemple #5
0
/*
 * The driver is terminating.  Clean up.
 */
static void
virtio_net_stop(void)
{

	dput(("Terminating"));

	free_contig(data_vir, PACKET_BUF_SZ);
	free_contig(hdrs_vir, BUF_PACKETS * sizeof(hdrs_vir[0]));
	free(packets);

	virtio_reset_device(net_dev);
	virtio_free_queues(net_dev);
	virtio_free_device(net_dev);
	net_dev = NULL;
}
Exemple #6
0
static void
sef_cb_signal_handler(int signo)
{
	if (signo != SIGTERM)
		return;

	dput(("Terminating"));

	free_contig(data_vir, PACKET_BUF_SZ);
	free_contig(hdrs_vir, BUF_PACKETS * sizeof(hdrs_vir[0]));
	free(packets);

	virtio_reset_device(net_dev);
	virtio_free_queues(net_dev);
	virtio_free_device(net_dev);
	net_dev = NULL;

	exit(1);
}
Exemple #7
0
status_t virtio_block_init(struct virtio_device *dev, uint32_t host_features)
{
    LTRACEF("dev %p, host_features 0x%x\n", dev, host_features);

    /* allocate a new block device */
    struct virtio_block_dev *bdev = malloc(sizeof(struct virtio_block_dev));
    if (!bdev)
        return ERR_NO_MEMORY;

    mutex_init(&bdev->lock);
    event_init(&bdev->io_event, false, EVENT_FLAG_AUTOUNSIGNAL);

    bdev->dev = dev;
    dev->priv = bdev;

    bdev->blk_req = memalign(sizeof(struct virtio_blk_req), sizeof(struct virtio_blk_req));
#if WITH_KERNEL_VM
    arch_mmu_query((vaddr_t)bdev->blk_req, &bdev->blk_req_phys, NULL);
#else
    bdev->blk_freq_phys = (uint64_t)(uintptr_t)bdev->blk_req;
#endif
    LTRACEF("blk_req structure at %p (0x%lx phys)\n", bdev->blk_req, bdev->blk_req_phys);

#if WITH_KERNEL_VM
    arch_mmu_query((vaddr_t)&bdev->blk_response, &bdev->blk_response_phys, NULL);
#else
    bdev->blk_response_phys = (uint64_t)(uintptr_t)&bdev->blk_response;
#endif

    /* make sure the device is reset */
    virtio_reset_device(dev);

    volatile struct virtio_blk_config *config = (struct virtio_blk_config *)dev->config_ptr;

    LTRACEF("capacity 0x%llx\n", config->capacity);
    LTRACEF("size_max 0x%x\n", config->size_max);
    LTRACEF("seg_max  0x%x\n", config->seg_max);
    LTRACEF("blk_size 0x%x\n", config->blk_size);

    /* ack and set the driver status bit */
    virtio_status_acknowledge_driver(dev);

    // XXX check features bits and ack/nak them

    /* allocate a virtio ring */
    virtio_alloc_ring(dev, 0, 256);

    /* set our irq handler */
    dev->irq_driver_callback = &virtio_block_irq_driver_callback;

    /* set DRIVER_OK */
    virtio_status_driver_ok(dev);

    /* construct the block device */
    static uint8_t found_index = 0;
    char buf[16];
    snprintf(buf, sizeof(buf), "virtio%u", found_index++);
    bio_initialize_bdev(&bdev->bdev, buf,
                        config->blk_size, config->capacity,
                        0, NULL);

    /* override our block device hooks */
    bdev->bdev.read_block = &virtio_bdev_read_block;
    bdev->bdev.write_block = &virtio_bdev_write_block;

    bio_register_device(&bdev->bdev);

    printf("found virtio block device of size %lld\n", config->capacity * config->blk_size);

    return NO_ERROR;
}
Exemple #8
0
/**
 * Module init for virtio via PCI.
 * Checks whether we're reponsible for the given device and set up
 * the virtqueue configuration.
 */
int
vn_module_init_pci(snk_kernel_t *snk_kernel_int, pci_config_t *conf)
{
	uint64_t bar;
	int i;

	dprintk("virtionet: doing virtionet_module_init_pci!\n");

	virtiodev.type = VIRTIO_TYPE_PCI;

	/* Check whether the driver can handle this device by verifying vendor,
	 * device id and class code. */
	if (conf->vendor_id != 0x1af4) {
		dprintk("virtionet: unsupported vendor id\n");
		return -1;
	}
	if (conf->device_id < 0x1000 || conf->device_id > 0x103f) {
		dprintk("virtionet: unsupported device id\n");
		return -1;
	}
	if (conf->class_code != 0x20000) {
		dprintk("virtionet: unsupported class code\n");
		return -1;
	}

	bar = snk_kernel_interface->pci_config_read(conf->puid, 4, conf->bus,
						    conf->devfn, 0x10);

	if (!(bar & 1)) {
		printk("First BAR is not an I/O BAR!\n");
		return -1;
	}
	bar &= ~3ULL;

	dprintk("untranslated bar = %llx\n", bar);

	snk_kernel_interface->translate_addr((void *)&bar);

	dprintk("translated bar = %llx\n", bar);
	virtiodev.base = (void*)bar;

	/* Reset device */
	virtio_reset_device(&virtiodev);

	/* The queue information can be retrieved via the virtio header that
	 * can be found in the I/O BAR. First queue is the receive queue,
	 * second the transmit queue, and the forth is the control queue for
	 * networking options.
	 * We are only interested in the receive and transmit queue here. */

	for (i=VQ_RX; i<=VQ_TX; i++) {
		/* Select ring (0=RX, 1=TX): */
		vq[i].id = i-VQ_RX;
		ci_write_16(virtiodev.base+VIRTIOHDR_QUEUE_SELECT,
			    cpu_to_le16(vq[i].id));

		vq[i].size = le16_to_cpu(ci_read_16(virtiodev.base+VIRTIOHDR_QUEUE_SIZE));
		vq[i].desc = malloc_aligned(virtio_vring_size(vq[i].size), 4096);
		if (!vq[i].desc) {
			printk("malloc failed!\n");
			return -1;
		}
		memset(vq[i].desc, 0, virtio_vring_size(vq[i].size));
		ci_write_32(virtiodev.base+VIRTIOHDR_QUEUE_ADDRESS,
			    cpu_to_le32((long)vq[i].desc / 4096));
		vq[i].avail = (void*)vq[i].desc
				    + vq[i].size * sizeof(struct vring_desc);
		vq[i].used = (void*)VQ_ALIGN((long)vq[i].avail
				    + vq[i].size * sizeof(struct vring_avail));

		dprintk("%i: vq.id = %lx\nvq.size =%lx\n vq.avail =%lx\nvq.used=%lx\n",
			i, vq[i].id, vq[i].size, vq[i].avail, vq[i].used);
	}

	/* Copy MAC address */
	for (i = 0; i < 6; i++) {
		virtionet_interface.mac_addr[i]
				= ci_read_8(virtiodev.base+VIRTIOHDR_MAC_ADDRESS+i);
	}

	/* Acknowledge device. */
	virtio_set_status(&virtiodev, VIRTIO_STAT_ACKNOWLEDGE);

	return 0;
}