Example #1
0
static int virtioblk_read(struct dev *dev, void *buffer, size_t count, blkno_t blkno, int flags) {
  struct virtioblk *vblk = (struct virtioblk *) dev->privdata;
  struct virtioblk_request req;
  struct scatterlist sg[3];
  int rc;

  // Setup read request
  virtioblk_setup_request(&req, sg, buffer, count);
  req.hdr.type = VIRTIO_BLK_T_IN;
  req.hdr.ioprio = 0;
  req.hdr.sector = blkno;
  
  // Issue request
  rc = virtio_enqueue(&vblk->vq, sg, 1, 2, &req);
  if (rc < 0) return rc;
  virtio_kick(&vblk->vq);
  
  // Wait for request to complete
  enter_wait(THREAD_WAIT_DEVIO);

  // Check status code
  switch (req.status) {
    case VIRTIO_BLK_S_OK: rc = req.size - 1; break;
    case VIRTIO_BLK_S_UNSUPP: rc = -ENODEV; break;
    case VIRTIO_BLK_S_IOERR: rc = -EIO; break;
    default: rc = -EUNKNOWN; break;
  }

  return rc;
}
Example #2
0
static int install_virtiocon(struct unit *unit) {
  struct virtiocon *vcon;
  int rc;
  int size;
  int i;

  // Setup unit information
  if (!unit) return -ENOSYS;
  unit->vendorname = "VIRTIO";
  unit->productname = "VIRTIO Virtual Console Device";

  // Allocate memory for device
  vcon = kmalloc(sizeof(struct virtiocon));
  if (vcon == NULL) return -ENOMEM;
  memset(vcon, 0, sizeof(struct virtiocon));

  // Initialize virtual device
  rc = virtio_device_init(&vcon->vd, unit, VIRTIO_CON_F_SIZE);
  if (rc < 0) return rc;

  // Get console device configuration
  virtio_get_config(&vcon->vd, &vcon->config, sizeof(vcon->config));

  // Initialize queues for console
  rc = virtio_queue_init(&vcon->input_queue, &vcon->vd, 0, virtiocon_input_callback);
  if (rc < 0) return rc;
  rc = virtio_queue_init(&vcon->output_queue, &vcon->vd, 1, virtiocon_output_callback);
  if (rc < 0) return rc;

  // Fill input queue
  size = virtio_queue_size(&vcon->input_queue);
  for (i = 0; i < size; ++i) {
    struct scatterlist sg[1];
    char *data = kmalloc(PAGESIZE);
    if (!data) return -ENOMEM;
    sg[0].data = data;
    sg[0].size = PAGESIZE;
    virtio_enqueue(&vcon->input_queue, sg, 0, 1, data);
  }
  virtio_kick(&vcon->input_queue);

  // Create device
  vcon->devno = dev_make("vc#", &virtiocon_driver, unit, vcon);
  virtio_setup_complete(&vcon->vd, 1);
  kprintf(KERN_INFO "%s: virtio console, %dx%d, %d ports, feats=%d\n", 
          device(vcon->devno)->name, 
          vcon->config.cols, vcon->config.rows, vcon->config.max_ports, vcon->vd.features);

  return 0;
}
Example #3
0
static int virtiocon_write(struct dev *dev, void *buffer, size_t count, blkno_t blkno, int flags) {
  struct virtiocon *vcon = (struct virtiocon *) dev->privdata;
  struct scatterlist sg[1];
  int rc;

  // Issue request
  sg[0].data = buffer;
  sg[0].size = count;
  rc = virtio_enqueue(&vcon->output_queue, sg, 1, 0, self());
  if (rc < 0) return rc;
  virtio_kick(&vcon->output_queue);
  
  // Wait for request to complete
  enter_wait(THREAD_WAIT_DEVIO);

  return count;
}
Example #4
0
static status_t send_command_response(struct virtio_gpu_dev *gdev, const void *cmd, size_t cmd_len, void **_res, size_t res_len)
{
    DEBUG_ASSERT(gdev);
    DEBUG_ASSERT(cmd);
    DEBUG_ASSERT(_res);
    DEBUG_ASSERT(cmd_len + res_len < PAGE_SIZE);

    LTRACEF("gdev %p, cmd %p, cmd_len %zu, res %p, res_len %zu\n", gdev, cmd, cmd_len, _res, res_len);

    uint16_t i;
    struct vring_desc *desc = virtio_alloc_desc_chain(gdev->dev, 0, 2, &i);
    DEBUG_ASSERT(desc);

    memcpy(gdev->gpu_request, cmd, cmd_len);

    desc->addr = gdev->gpu_request_phys;
    desc->len = cmd_len;
    desc->flags |= VRING_DESC_F_NEXT;

    /* set the second descriptor to the response with the write bit set */
    desc = virtio_desc_index_to_desc(gdev->dev, 0, desc->next);
    DEBUG_ASSERT(desc);

    void *res = (void *)((uint8_t *)gdev->gpu_request + cmd_len);
    *_res = res;
    paddr_t res_phys = gdev->gpu_request_phys + cmd_len;
    memset(res, 0, res_len);

    desc->addr = res_phys;
    desc->len = res_len;
    desc->flags = VRING_DESC_F_WRITE;

    /* submit the transfer */
    virtio_submit_chain(gdev->dev, 0, i);

    /* kick it off */
    virtio_kick(gdev->dev, 0);

    /* wait for result */
    event_wait(&gdev->io_event);

    return NO_ERROR;
}
Example #5
0
ssize_t virtio_block_read_write(struct virtio_device *dev, void *buf, off_t offset, size_t len, bool write)
{
    struct virtio_block_dev *bdev = (struct virtio_block_dev *)dev->priv;

    uint16_t i;
    struct vring_desc *desc;
    paddr_t pa;
    vaddr_t va = (vaddr_t)buf;

    LTRACEF("dev %p, buf %p, offset 0x%llx, len %zu\n", dev, buf, offset, len);

    mutex_acquire(&bdev->lock);

    /* set up the request */
    bdev->blk_req->type = write ? VIRTIO_BLK_T_OUT : VIRTIO_BLK_T_IN;
    bdev->blk_req->ioprio = 0;
    bdev->blk_req->sector = offset / 512;
    LTRACEF("blk_req type %u ioprio %u sector %llu\n",
            bdev->blk_req->type, bdev->blk_req->ioprio, bdev->blk_req->sector);

    /* put together a transfer */
    desc = virtio_alloc_desc_chain(dev, 0, 3, &i);
    LTRACEF("after alloc chain desc %p, i %u\n", desc, i);

    // XXX not cache safe.
    // At the moment only tested on arm qemu, which doesn't emulate cache.

    /* set up the descriptor pointing to the head */
    desc->addr = bdev->blk_req_phys;
    desc->len = sizeof(struct virtio_blk_req);
    desc->flags |= VRING_DESC_F_NEXT;

    /* set up the descriptor pointing to the buffer */
    desc = virtio_desc_index_to_desc(dev, 0, desc->next);
#if WITH_KERNEL_VM
    /* translate the first buffer */
    arch_mmu_query(va, &pa, NULL);
    desc->addr = (uint64_t)pa;
    /* desc->len is filled in below */
#else
    desc->addr = (uint64_t)(uintptr_t)buf;
    desc->len = len;
#endif
    desc->flags |= write ? 0 : VRING_DESC_F_WRITE; /* mark buffer as write-only if its a block read */
    desc->flags |= VRING_DESC_F_NEXT;

#if WITH_KERNEL_VM
    /* see if we need to add more descriptors due to scatter gather */
    paddr_t next_pa = PAGE_ALIGN(pa + 1);
    desc->len = MIN(next_pa - pa, len);
    LTRACEF("first descriptor va 0x%lx desc->addr 0x%llx desc->len %u\n", va, desc->addr, desc->len);
    len -= desc->len;
    while (len > 0) {
        /* amount of source buffer handled by this iteration of the loop */
        size_t len_tohandle = MIN(len, PAGE_SIZE);

        /* translate the next page in the buffer */
        va = PAGE_ALIGN(va + 1);
        arch_mmu_query(va, &pa, NULL);
        LTRACEF("va now 0x%lx, pa 0x%lx, next_pa 0x%lx, remaining len %zu\n", va, pa, next_pa, len);

        /* is the new translated physical address contiguous to the last one? */
        if (next_pa == pa) {
            LTRACEF("extending last one by %zu bytes\n", len_tohandle);
            desc->len += len_tohandle;
        } else {
            uint16_t next_i = virtio_alloc_desc(dev, 0);
            struct vring_desc *next_desc = virtio_desc_index_to_desc(dev, 0, next_i);
            DEBUG_ASSERT(next_desc);

            LTRACEF("doesn't extend, need new desc, allocated desc %i (%p)\n", next_i, next_desc);

            /* fill this descriptor in and put it after the last one but before the response descriptor */
            next_desc->addr = (uint64_t)pa;
            next_desc->len = len_tohandle;
            next_desc->flags = write ? 0 : VRING_DESC_F_WRITE; /* mark buffer as write-only if its a block read */
            next_desc->flags |= VRING_DESC_F_NEXT;
            next_desc->next = desc->next;
            desc->next = next_i;

            desc = next_desc;
        }
        len -= len_tohandle;
        next_pa += PAGE_SIZE;
    }
#endif

    /* set up the descriptor pointing to the response */
    desc = virtio_desc_index_to_desc(dev, 0, desc->next);
    desc->addr = bdev->blk_response_phys;
    desc->len = 1;
    desc->flags = VRING_DESC_F_WRITE;

    /* submit the transfer */
    virtio_submit_chain(dev, 0, i);

    /* kick it off */
    virtio_kick(dev, 0);

    /* wait for the transfer to complete */
    event_wait(&bdev->io_event);

    LTRACEF("status 0x%hhx\n", bdev->blk_response);

    mutex_release(&bdev->lock);

    return len;
}
Example #6
0
ssize_t virtio_block_read_write(struct virtio_device *dev, void *buf, off_t offset, size_t len, bool write)
{
    struct virtio_block_dev *bdev = (struct virtio_block_dev *)dev->priv;

    uint16_t i;
    struct vring_desc *desc;
    paddr_t pa;

    LTRACEF("dev %p, buf %p, offset 0x%llx, len %zu\n", dev, buf, offset, len);

    mutex_acquire(&bdev->lock);

    /* set up the request */
    bdev->blk_req->type = write ? VIRTIO_BLK_T_OUT : VIRTIO_BLK_T_IN;
    bdev->blk_req->ioprio = 0;
    bdev->blk_req->sector = offset / 512;
    LTRACEF("blk_req type %u ioprio %u sector %llu\n",
            bdev->blk_req->type, bdev->blk_req->ioprio, bdev->blk_req->sector);

    /* put together a transfer */
    desc = virtio_alloc_desc_chain(dev, 0, 3, &i);
    LTRACEF("after alloc chain desc %p, i %u\n", desc, i);

    // XXX not cache safe.
    // At the moment only tested on arm qemu, which doesn't emulate cache.

    /* set up the descriptor pointing to the head */
    desc->addr = bdev->blk_req_phys;
    desc->len = sizeof(struct virtio_blk_req);
    desc->flags |= VRING_DESC_F_NEXT;

    /* set up the descriptor pointing to the buffer */
    desc = virtio_desc_index_to_desc(dev, 0, desc->next);
#if WITH_KERNEL_VM
    // XXX handle bufs that cross page boundaries
    arch_mmu_query((vaddr_t)buf, &pa, NULL);
    desc->addr = (uint64_t)pa;
#else
    desc->addr = (uint64_t)(uintptr_t)buf;
#endif
    desc->len = len;
    desc->flags |= write ? 0 : VRING_DESC_F_WRITE; /* mark buffer as write-only if its a block read */
    desc->flags |= VRING_DESC_F_NEXT;

    /* set up the descriptor pointing to the response */
    desc = virtio_desc_index_to_desc(dev, 0, desc->next);
    desc->addr = bdev->blk_response_phys;
    desc->len = 1;
    desc->flags = VRING_DESC_F_WRITE;

    /* submit the transfer */
    virtio_submit_chain(dev, 0, i);

    /* kick it off */
    virtio_kick(dev, 0);

    /* wait for the transfer to complete */
    event_wait(&bdev->io_event);

    LTRACEF("status 0x%hhx\n", bdev->blk_response);

    mutex_release(&bdev->lock);

    return len;
}