/** * rpmsg_rdev_deinit * * This function un-initializes the remote device. * * @param rdev - pointer to remote device to deinit. * * @return - none * */ void rpmsg_rdev_deinit(struct remote_device *rdev) { struct llist *rp_chnl_head, *rp_chnl_temp, *node; struct rpmsg_channel *rp_chnl; rp_chnl_head = rdev->rp_channels; while (rp_chnl_head != RPMSG_NULL) { rp_chnl_temp = rp_chnl_head->next; rp_chnl = (struct rpmsg_channel *)rp_chnl_head->data; if (rdev->channel_destroyed) { rdev->channel_destroyed(rp_chnl); } if ((rdev->support_ns) && (rdev->role == RPMSG_MASTER)) { rpmsg_send_ns_message(rdev, rp_chnl, RPMSG_NS_DESTROY); } /* Delete default endpoint for channel */ if (rp_chnl->rp_ept) { rpmsg_destroy_ept(rp_chnl->rp_ept); } _rpmsg_delete_channel(rp_chnl); rp_chnl_head = rp_chnl_temp; } /* Delete name service endpoint */ node = rpmsg_rdev_get_endpoint_from_addr(rdev, RPMSG_NS_EPT_ADDR); if (node) { _destroy_endpoint(rdev, (struct rpmsg_endpoint *)node->data); } if (rdev->rvq) { virtqueue_free(rdev->rvq); } if (rdev->tvq) { virtqueue_free(rdev->tvq); } if (rdev->mem_pool) { sh_mem_delete_pool(rdev->mem_pool); } if (rdev->lock) { env_delete_mutex(rdev->lock); } if (rdev->proc) { hil_delete_proc(rdev->proc); rdev->proc = 0; } env_free_memory(rdev); }
static void vtpci_free_virtqueues(struct vtpci_softc *sc) { struct vtpci_virtqueue *vqx; int i; sc->vtpci_nvqs = 0; for (i = 0; i < VIRTIO_MAX_VIRTQUEUES; i++) { vqx = &sc->vtpci_vqx[i]; if (vqx->vq != NULL) { virtqueue_free(vqx->vq); vqx->vq = NULL; } } }
static void vtpci_free_virtqueues(struct vtpci_softc *sc) { struct vtpci_virtqueue *vqx; int idx; for (idx = 0; idx < sc->vtpci_nvqs; idx++) { vqx = &sc->vtpci_vqs[idx]; vtpci_select_virtqueue(sc, idx); vtpci_write_config_4(sc, VIRTIO_PCI_QUEUE_PFN, 0); virtqueue_free(vqx->vtv_vq); vqx->vtv_vq = NULL; } free(sc->vtpci_vqs, M_DEVBUF); sc->vtpci_vqs = NULL; sc->vtpci_nvqs = 0; }
int virtqueue_alloc(device_t dev, uint16_t queue, uint16_t size, int align, vm_paddr_t highaddr, struct vq_alloc_info *info, struct virtqueue **vqp) { struct virtqueue *vq; int error; *vqp = NULL; error = 0; if (size == 0) { device_printf(dev, "virtqueue %d (%s) does not exist (size is zero)\n", queue, info->vqai_name); return (ENODEV); } else if (!powerof2(size)) { device_printf(dev, "virtqueue %d (%s) size is not a power of 2: %d\n", queue, info->vqai_name, size); return (ENXIO); } else if (info->vqai_maxindirsz > VIRTIO_MAX_INDIRECT) { device_printf(dev, "virtqueue %d (%s) requested too many " "indirect descriptors: %d, max %d\n", queue, info->vqai_name, info->vqai_maxindirsz, VIRTIO_MAX_INDIRECT); return (EINVAL); } vq = kmalloc(sizeof(struct virtqueue) + size * sizeof(struct vq_desc_extra), M_DEVBUF, M_INTWAIT | M_ZERO); if (vq == NULL) { device_printf(dev, "cannot allocate virtqueue\n"); return (ENOMEM); } vq->vq_dev = dev; strlcpy(vq->vq_name, info->vqai_name, sizeof(vq->vq_name)); vq->vq_queue_index = queue; vq->vq_alignment = align; vq->vq_nentries = size; vq->vq_free_cnt = size; vq->vq_intrhand = info->vqai_intr; vq->vq_intrhand_arg = info->vqai_intr_arg; if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_RING_F_EVENT_IDX) != 0) vq->vq_flags |= VIRTQUEUE_FLAG_EVENT_IDX; if (info->vqai_maxindirsz > 1) { error = virtqueue_init_indirect(vq, info->vqai_maxindirsz); if (error) goto fail; } vq->vq_ring_size = round_page(vring_size(size, align)); vq->vq_ring_mem = contigmalloc(vq->vq_ring_size, M_DEVBUF, M_WAITOK | M_ZERO, 0, highaddr, PAGE_SIZE, 0); if (vq->vq_ring_mem == NULL) { device_printf(dev, "cannot allocate memory for virtqueue ring\n"); error = ENOMEM; goto fail; } vq_ring_init(vq); virtqueue_disable_intr(vq); *vqp = vq; fail: if (error) virtqueue_free(vq); return (error); }