/** * rpmsg_virtio_enqueue_buffer * * Places buffer on the virtqueue for consumption by the other side. * * @param rvdev - pointer to rpmsg virtio * @param buffer - buffer pointer * @param len - buffer length * @param idx - buffer index * * @return - status of function execution */ static int rpmsg_virtio_enqueue_buffer(struct rpmsg_virtio_device *rvdev, void *buffer, uint32_t len, uint16_t idx) { unsigned int role = rpmsg_virtio_get_role(rvdev); #ifndef VIRTIO_SLAVE_ONLY if (role == RPMSG_MASTER) { struct virtqueue_buf vqbuf; (void)idx; /* Initialize buffer node */ vqbuf.buf = buffer; vqbuf.len = len; return virtqueue_add_buffer(rvdev->svq, &vqbuf, 1, 0, buffer); } #endif /*!VIRTIO_SLAVE_ONLY*/ #ifndef VIRTIO_MASTER_ONLY if (role == RPMSG_REMOTE) { (void)buffer; return virtqueue_add_consumed_buffer(rvdev->svq, idx, len); } #endif /*!VIRTIO_MASTER_ONLY*/ return 0; }
/** * rpmsg_return_buffer * * Places the used buffer back on the virtqueue. * * @param rdev - pointer to remote core * @param buffer - buffer pointer * @param len - buffer length * @param idx - buffer index * */ void rpmsg_return_buffer(struct remote_device *rdev, void *buffer, unsigned long len, unsigned short idx) { struct llist node; /* Initialize buffer node */ node.data = buffer; node.attr = len; node.next = RPMSG_NULL; node.prev = RPMSG_NULL; if (rdev->role == RPMSG_REMOTE) { virtqueue_add_buffer(rdev->rvq, &node, 0, 1, buffer); } else { virtqueue_add_consumed_buffer(rdev->rvq, idx, len); } }
int rpmsg_init_vdev(struct rpmsg_virtio_device *rvdev, struct virtio_device *vdev, rpmsg_ns_bind_cb ns_bind_cb, struct metal_io_region *shm_io, struct rpmsg_virtio_shm_pool *shpool) { struct rpmsg_device *rdev; const char *vq_names[RPMSG_NUM_VRINGS]; vq_callback *callback[RPMSG_NUM_VRINGS]; int status; unsigned int i, role; rdev = &rvdev->rdev; memset(rdev, 0, sizeof(*rdev)); metal_mutex_init(&rdev->lock); rvdev->vdev = vdev; rdev->ns_bind_cb = ns_bind_cb; vdev->priv = rvdev; rdev->ops.send_offchannel_raw = rpmsg_virtio_send_offchannel_raw; role = rpmsg_virtio_get_role(rvdev); #ifndef VIRTIO_MASTER_ONLY if (role == RPMSG_REMOTE) { /* wait synchro with the master */ rpmsg_virtio_wait_remote_ready(rvdev); } #endif /*!VIRTIO_MASTER_ONLY*/ vdev->features = rpmsg_virtio_get_features(rvdev); rdev->support_ns = !!(vdev->features & (1 << VIRTIO_RPMSG_F_NS)); #ifndef VIRTIO_SLAVE_ONLY if (role == RPMSG_MASTER) { /* * Since device is RPMSG Remote so we need to manage the * shared buffers. Create shared memory pool to handle buffers. */ if (!shpool) return RPMSG_ERR_PARAM; if (!shpool->size) return RPMSG_ERR_NO_BUFF; rvdev->shpool = shpool; vq_names[0] = "rx_vq"; vq_names[1] = "tx_vq"; callback[0] = rpmsg_virtio_rx_callback; callback[1] = rpmsg_virtio_tx_callback; rvdev->rvq = vdev->vrings_info[0].vq; rvdev->svq = vdev->vrings_info[1].vq; } #endif /*!VIRTIO_SLAVE_ONLY*/ #ifndef VIRTIO_MASTER_ONLY (void)shpool; if (role == RPMSG_REMOTE) { vq_names[0] = "tx_vq"; vq_names[1] = "rx_vq"; callback[0] = rpmsg_virtio_tx_callback; callback[1] = rpmsg_virtio_rx_callback; rvdev->rvq = vdev->vrings_info[1].vq; rvdev->svq = vdev->vrings_info[0].vq; } #endif /*!VIRTIO_MASTER_ONLY*/ rvdev->shbuf_io = shm_io; /* Create virtqueues for remote device */ status = rpmsg_virtio_create_virtqueues(rvdev, 0, RPMSG_NUM_VRINGS, vq_names, callback); if (status != RPMSG_SUCCESS) return status; /* * Suppress "tx-complete" interrupts * since send method use busy loop when buffer pool exhaust */ virtqueue_disable_cb(rvdev->svq); /* TODO: can have a virtio function to set the shared memory I/O */ for (i = 0; i < RPMSG_NUM_VRINGS; i++) { struct virtqueue *vq; vq = vdev->vrings_info[i].vq; vq->shm_io = shm_io; } #ifndef VIRTIO_SLAVE_ONLY if (role == RPMSG_MASTER) { struct virtqueue_buf vqbuf; unsigned int idx; void *buffer; vqbuf.len = RPMSG_BUFFER_SIZE; for (idx = 0; idx < rvdev->rvq->vq_nentries; idx++) { /* Initialize TX virtqueue buffers for remote device */ buffer = rpmsg_virtio_shm_pool_get_buffer(shpool, RPMSG_BUFFER_SIZE); if (!buffer) { return RPMSG_ERR_NO_BUFF; } vqbuf.buf = buffer; metal_io_block_set(shm_io, metal_io_virt_to_offset(shm_io, buffer), 0x00, RPMSG_BUFFER_SIZE); status = virtqueue_add_buffer(rvdev->rvq, &vqbuf, 0, 1, buffer); if (status != RPMSG_SUCCESS) { return status; } } } #endif /*!VIRTIO_SLAVE_ONLY*/ /* Initialize channels and endpoints list */ metal_list_init(&rdev->endpoints); /* * Create name service announcement endpoint if device supports name * service announcement feature. */ if (rdev->support_ns) { rpmsg_init_ept(&rdev->ns_ept, "NS", RPMSG_NS_EPT_ADDR, RPMSG_NS_EPT_ADDR, rpmsg_virtio_ns_callback, NULL); rpmsg_register_endpoint(rdev, &rdev->ns_ept); } #ifndef VIRTIO_SLAVE_ONLY if (role == RPMSG_MASTER) rpmsg_virtio_set_status(rvdev, VIRTIO_CONFIG_STATUS_DRIVER_OK); #endif /*!VIRTIO_SLAVE_ONLY*/ return status; }
/** *------------------------------------------------------------------------ * The rest of the file implements the virtio device interface as defined * by the virtio.h file. *------------------------------------------------------------------------ */ int rpmsg_rdev_create_virtqueues(struct virtio_device *dev, int flags, int nvqs, const char *names[], vq_callback * callbacks[], struct virtqueue *vqs_[]) { struct remote_device *rdev; struct vring_alloc_info ring_info; struct virtqueue *vqs[RPMSG_MAX_VQ_PER_RDEV]; struct proc_vring *vring_table; void *buffer; struct llist node; int idx, num_vrings, status; (void)flags; (void)vqs_; rdev = (struct remote_device *)dev; /* Get the vring HW info for the given virtio device */ vring_table = hil_get_vring_info(&rdev->proc->vdev, &num_vrings); if (num_vrings > nvqs) { return RPMSG_ERR_MAX_VQ; } /* Create virtqueue for each vring. */ for (idx = 0; idx < num_vrings; idx++) { INIT_VRING_ALLOC_INFO(ring_info, vring_table[idx]); if (rdev->role == RPMSG_REMOTE) { env_memset((void *)ring_info.phy_addr, 0x00, vring_size(vring_table[idx].num_descs, vring_table[idx].align)); } status = virtqueue_create(dev, idx, (char *)names[idx], &ring_info, callbacks[idx], hil_vring_notify, &vqs[idx]); if (status != RPMSG_SUCCESS) { return status; } } //FIXME - a better way to handle this , tx for master is rx for remote and vice versa. if (rdev->role == RPMSG_MASTER) { rdev->tvq = vqs[0]; rdev->rvq = vqs[1]; } else { rdev->tvq = vqs[1]; rdev->rvq = vqs[0]; } if (rdev->role == RPMSG_REMOTE) { for (idx = 0; ((idx < rdev->rvq->vq_nentries) && (idx < rdev->mem_pool->total_buffs / 2)); idx++) { /* Initialize TX virtqueue buffers for remote device */ buffer = sh_mem_get_buffer(rdev->mem_pool); if (!buffer) { return RPMSG_ERR_NO_BUFF; } node.data = buffer; node.attr = RPMSG_BUFFER_SIZE; node.next = RPMSG_NULL; env_memset(buffer, 0x00, RPMSG_BUFFER_SIZE); status = virtqueue_add_buffer(rdev->rvq, &node, 0, 1, buffer); if (status != RPMSG_SUCCESS) { return status; } } } return RPMSG_SUCCESS; }