/** * @brief : callback for virtqueue */ static void vq_hwkey_callback(struct virtqueue *vq) { struct EmulHwkeyEvent hwkey_event; #if 0 printk(KERN_INFO "vq hwkey callback\n"); #endif while (1) { memcpy(&hwkey_event, &vh->vbuf[vqidx], sizeof(hwkey_event)); if (hwkey_event.event_type == 0) { break; } printk(KERN_ERR "keycode: %d, event_type: %d, vqidx: %d\n", hwkey_event.keycode, hwkey_event.event_type, vqidx); if (hwkey_event.event_type == KEY_PRESSED) { input_event(vh->idev, EV_KEY, hwkey_event.keycode, true); } else if (hwkey_event.event_type == KEY_RELEASED) { input_event(vh->idev, EV_KEY, hwkey_event.keycode, false); } else { printk(KERN_ERR "Unknown event type\n"); return; } input_sync(vh->idev); memset(&vh->vbuf[vqidx], 0x00, sizeof(hwkey_event)); vqidx++; if (vqidx == MAX_BUF_COUNT) { vqidx = 0; } } virtqueue_kick(vh->vq); }
/* * On error we are losing the status update, which isn't critical as * this is typically used for stuff like keyboard leds. */ static int virtinput_send_status(struct virtio_input *vi, u16 type, u16 code, s32 value) { struct virtio_input_event *stsbuf; struct scatterlist sg[1]; unsigned long flags; int rc; stsbuf = kzalloc(sizeof(*stsbuf), GFP_ATOMIC); if (!stsbuf) return -ENOMEM; stsbuf->type = cpu_to_le16(type); stsbuf->code = cpu_to_le16(code); stsbuf->value = cpu_to_le32(value); sg_init_one(sg, stsbuf, sizeof(*stsbuf)); spin_lock_irqsave(&vi->lock, flags); if (vi->ready) { rc = virtqueue_add_outbuf(vi->sts, sg, 1, stsbuf, GFP_ATOMIC); virtqueue_kick(vi->sts); } else { rc = -ENODEV; } spin_unlock_irqrestore(&vi->lock, flags); if (rc != 0) kfree(stsbuf); return rc; }
/* called when an rx buffer is used, and it's time to digest a message */ static void rpmsg_recv_done(struct virtqueue *rvq) { struct virtproc_info *vrp = rvq->vdev->priv; struct device *dev = &rvq->vdev->dev; struct rpmsg_hdr *msg; unsigned int len, msgs_received = 0; int err; msg = virtqueue_get_buf(rvq, &len); if (!msg) { dev_err(dev, "uhm, incoming signal, but no used buffer ?\n"); return; } while (msg) { err = rpmsg_recv_single(vrp, dev, msg, len); if (err) break; msgs_received++; msg = virtqueue_get_buf(rvq, &len); }; dev_dbg(dev, "Received %u messages\n", msgs_received); /* tell the remote processor we added another available rx buffer */ if (msgs_received) virtqueue_kick(vrp->rvq); }
static void rpmsg_recv_done(struct virtqueue *rvq) { struct rpmsg_hdr *msg; unsigned int len; struct rpmsg_endpoint *ept; struct scatterlist sg; unsigned long offset; void *sim_addr; struct virtproc_info *vrp = rvq->vdev->priv; struct device *dev = &rvq->vdev->dev; int err; /* if fail to acquire the lock, remove stage is happening, then just return */ if(!mutex_trylock(&vrp->rm_lock)) return; /* make sure the descriptors are updated before reading */ rmb(); msg = virtqueue_get_buf(rvq, &len); if (!msg) { dev_err(dev, "uhm, incoming signal, but no used buffer ?\n"); return; } dev_dbg(dev, "From: 0x%x, To: 0x%x, Len: %d, Flags: %d, Unused: %d\n", msg->src, msg->dst, msg->len, msg->flags, msg->unused); #if 0 print_hex_dump(KERN_DEBUG, "rpmsg_virtio RX: ", DUMP_PREFIX_NONE, 16, 1, msg, sizeof(*msg) + msg->len, true); #endif /* fetch the callback of the appropriate user */ spin_lock(&vrp->endpoints_lock); ept = idr_find(&vrp->endpoints, msg->dst); spin_unlock(&vrp->endpoints_lock); if (ept && ept->cb) ept->cb(ept->rpdev, msg->data, msg->len, ept->priv, msg->src); else dev_warn(dev, "msg received with no recepient\n"); /* add the buffer back to the remote processor's virtqueue */ offset = ((unsigned long) msg) - ((unsigned long) vrp->rbufs); sim_addr = vrp->sim_base + offset; sg_init_one(&sg, sim_addr, sizeof(*msg) + len); err = virtqueue_add_buf_gfp(vrp->rvq, &sg, 0, 1, msg, GFP_KERNEL); if (err < 0) { dev_err(dev, "failed to add a virtqueue buffer: %d\n", err); return; } /* descriptors must be written before kicking remote processor */ wmb(); /* tell the remote processor we added another available rx buffer */ virtqueue_kick(vrp->rvq); mutex_unlock(&vrp->rm_lock); }
/* called when an rx buffer is used, and it's time to digest a message */ static void rpmsg_recv_done(struct virtqueue *rvq) { struct rpmsg_hdr *msg; unsigned int len; struct rpmsg_endpoint *ept; struct scatterlist sg; struct virtproc_info *vrp = rvq->vdev->priv; struct device *dev = &rvq->vdev->dev; int err; msg = virtqueue_get_buf(rvq, &len); if (!msg) { dev_err(dev, "uhm, incoming signal, but no used buffer ?\n"); return; } dev_dbg(dev, "From: 0x%x, To: 0x%x, Len: %d, Flags: %d, Reserved: %d\n", msg->src, msg->dst, msg->len, msg->flags, msg->reserved); #if 0 print_hex_dump(KERN_DEBUG, "rpmsg_virtio RX: ", DUMP_PREFIX_NONE, 16, 1, msg, sizeof(*msg) + msg->len, true); #endif /* * We currently use fixed-sized buffers, so trivially sanitize * the reported payload length. */ if (len > RPMSG_BUF_SIZE || msg->len > (len - sizeof(struct rpmsg_hdr))) { dev_warn(dev, "inbound msg too big: (%d, %d)\n", len, msg->len); return; } /* use the dst addr to fetch the callback of the appropriate user */ mutex_lock(&vrp->endpoints_lock); ept = idr_find(&vrp->endpoints, msg->dst); mutex_unlock(&vrp->endpoints_lock); if (ept && ept->cb) ept->cb(ept->rpdev, msg->data, msg->len, ept->priv, msg->src); else dev_warn(dev, "msg received with no recepient\n"); /* publish the real size of the buffer */ sg_init_one(&sg, msg, RPMSG_BUF_SIZE); /* add the buffer back to the remote processor's virtqueue */ err = virtqueue_add_buf(vrp->rvq, &sg, 0, 1, msg, GFP_KERNEL); if (err < 0) { dev_err(dev, "failed to add a virtqueue buffer: %d\n", err); return; } /* tell the remote processor we added another available rx buffer */ virtqueue_kick(vrp->rvq); }
/* The host will fill any buffer we give it with sweet, sweet randomness. */ static void register_buffer(struct virtrng_info *vi, u8 *buf, size_t size) { struct scatterlist sg; sg_init_one(&sg, buf, size); /* There should always be room for one buffer. */ virtqueue_add_inbuf(vi->vq, &sg, 1, buf, GFP_KERNEL); virtqueue_kick(vi->vq); }
static void register_buffer(void) { struct scatterlist sg; sg_init_one(&sg, random_data+data_left, RANDOM_DATA_SIZE-data_left); /* There should always be room for one buffer. */ if (virtqueue_add_buf(vq, &sg, 0, 1, random_data) < 0) BUG(); virtqueue_kick(vq); }
/* The host will fill any buffer we give it with sweet, sweet randomness. */ static void register_buffer(u8 *buf, size_t size) { struct scatterlist sg; sg_init_one(&sg, buf, size); /* There should always be room for one buffer. */ if (virtqueue_add_buf(vq, &sg, 0, 1, buf) < 0) BUG(); virtqueue_kick(vq); }
static int p9_virtio_request(struct p9_client *client, struct p9_req_t *req) { int err; int in, out, out_sgs, in_sgs; unsigned long flags; struct virtio_chan *chan = client->trans; struct scatterlist *sgs[2]; p9_debug(P9_DEBUG_TRANS, "9p debug: virtio request\n"); req->status = REQ_STATUS_SENT; req_retry: spin_lock_irqsave(&chan->lock, flags); out_sgs = in_sgs = 0; /* Handle out VirtIO ring buffers */ out = pack_sg_list(chan->sg, 0, VIRTQUEUE_NUM, req->tc->sdata, req->tc->size); if (out) sgs[out_sgs++] = chan->sg; in = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, req->rc->sdata, req->rc->capacity); if (in) sgs[out_sgs + in_sgs++] = chan->sg + out; err = virtqueue_add_sgs(chan->vq, sgs, out_sgs, in_sgs, req->tc, GFP_ATOMIC); if (err < 0) { if (err == -ENOSPC) { chan->ring_bufs_avail = 0; spin_unlock_irqrestore(&chan->lock, flags); err = wait_event_interruptible(*chan->vc_wq, chan->ring_bufs_avail); if (err == -ERESTARTSYS) return err; p9_debug(P9_DEBUG_TRANS, "Retry virtio request\n"); goto req_retry; } else { spin_unlock_irqrestore(&chan->lock, flags); p9_debug(P9_DEBUG_TRANS, "virtio rpc add_sgs returned failure\n"); return -EIO; } } virtqueue_kick(chan->vq); spin_unlock_irqrestore(&chan->lock, flags); p9_debug(P9_DEBUG_TRANS, "virtio request kicked\n"); return 0; }
NTSTATUS VioCryptInterruptEnable(IN WDFINTERRUPT Interrupt, IN WDFDEVICE wdfDevice) { PDEVICE_CONTEXT context = GetDeviceContext(wdfDevice); UNREFERENCED_PARAMETER(Interrupt); virtqueue_enable_cb(context->ControlQueue); virtqueue_kick(context->ControlQueue); Trace(TRACE_LEVEL_VERBOSE, "[%s]", __FUNCTION__); return STATUS_SUCCESS; }
BOOLEAN SynchronizedFlushRoutine( IN PVOID DeviceExtension, IN PVOID Context ) { PADAPTER_EXTENSION adaptExt = (PADAPTER_EXTENSION)DeviceExtension; PSCSI_REQUEST_BLOCK Srb = (PSCSI_REQUEST_BLOCK) Context; PRHEL_SRB_EXTENSION srbExt = (PRHEL_SRB_EXTENSION)Srb->SrbExtension; ULONG fragLen; PVOID va; ULONGLONG pa; SET_VA_PA(); srbExt->vbr.out_hdr.sector = 0; srbExt->vbr.out_hdr.ioprio = 0; srbExt->vbr.req = (struct request *)Srb; srbExt->vbr.out_hdr.type = VIRTIO_BLK_T_FLUSH; srbExt->out = 1; srbExt->in = 1; srbExt->vbr.sg[0].physAddr = ScsiPortGetPhysicalAddress(DeviceExtension, NULL, &srbExt->vbr.out_hdr, &fragLen); srbExt->vbr.sg[0].length = sizeof(srbExt->vbr.out_hdr); srbExt->vbr.sg[1].physAddr = ScsiPortGetPhysicalAddress(DeviceExtension, NULL, &srbExt->vbr.status, &fragLen); srbExt->vbr.sg[1].length = sizeof(srbExt->vbr.status); if (virtqueue_add_buf(adaptExt->vq, &srbExt->vbr.sg[0], srbExt->out, srbExt->in, &srbExt->vbr, va, pa) >= 0) { virtqueue_kick(adaptExt->vq); return TRUE; } virtqueue_kick(adaptExt->vq); #ifdef USE_STORPORT StorPortBusy(DeviceExtension, 2); #endif return FALSE; }
BOOLEAN SendSRB( IN PVOID DeviceExtension, IN PSRB_TYPE Srb ) { PADAPTER_EXTENSION adaptExt = (PADAPTER_EXTENSION)DeviceExtension; PSRB_EXTENSION srbExt = SRB_EXTENSION(Srb); PVOID va = NULL; ULONGLONG pa = 0; ULONG QueueNumber = 0; ULONG OldIrql = 0; ULONG MessageId = 0; BOOLEAN kick = FALSE; STOR_LOCK_HANDLE LockHandle = { 0 }; ULONG status = STOR_STATUS_SUCCESS; ENTER_FN(); SET_VA_PA(); if (adaptExt->num_queues > 1) { QueueNumber = adaptExt->cpu_to_vq_map[srbExt->cpu] + VIRTIO_SCSI_REQUEST_QUEUE_0; MessageId = QueueNumber + 1; } else { QueueNumber = VIRTIO_SCSI_REQUEST_QUEUE_0; } VioScsiAcquireSpinLock(DeviceExtension, MessageId, &LockHandle); if (virtqueue_add_buf(adaptExt->vq[QueueNumber], &srbExt->sg[0], srbExt->out, srbExt->in, &srbExt->cmd, va, pa) >= 0){ kick = TRUE; } else { RhelDbgPrint(TRACE_LEVEL_ERROR, ("%s Can not add packet to queue.\n", __FUNCTION__)); //FIXME } VioScsiReleaseSpinLock(DeviceExtension, MessageId, &LockHandle); if (kick == TRUE) { virtqueue_kick(adaptExt->vq[QueueNumber]); } if (adaptExt->num_queues > 1) { if (CHECKFLAG(adaptExt->perfFlags, STOR_PERF_OPTIMIZE_FOR_COMPLETION_DURING_STARTIO)) { // ProcessQueue(DeviceExtension, MessageId, FALSE); } } EXIT_FN(); return kick; }
/** * rpmsg_virtio_rx_callback * * Rx callback function. * * @param vq - pointer to virtqueue on which messages is received * */ static void rpmsg_virtio_rx_callback(struct virtqueue *vq) { struct virtio_device *vdev = vq->vq_dev; struct rpmsg_virtio_device *rvdev = vdev->priv; struct rpmsg_device *rdev = &rvdev->rdev; struct rpmsg_endpoint *ept; struct rpmsg_hdr *rp_hdr; uint32_t len; uint16_t idx; int status; metal_mutex_acquire(&rdev->lock); /* Process the received data from remote node */ rp_hdr = rpmsg_virtio_get_rx_buffer(rvdev, &len, &idx); metal_mutex_release(&rdev->lock); while (rp_hdr) { /* Get the channel node from the remote device channels list. */ metal_mutex_acquire(&rdev->lock); ept = rpmsg_get_ept_from_addr(rdev, rp_hdr->dst); metal_mutex_release(&rdev->lock); if (ept) { if (ept->dest_addr == RPMSG_ADDR_ANY) { /* * First message received from the remote side, * update channel destination address */ ept->dest_addr = rp_hdr->src; } status = ept->cb(ept, RPMSG_LOCATE_DATA(rp_hdr), rp_hdr->len, rp_hdr->src, ept->priv); RPMSG_ASSERT(status >= 0, "unexpected callback status\r\n"); } metal_mutex_acquire(&rdev->lock); /* Return used buffers. */ rpmsg_virtio_return_buffer(rvdev, rp_hdr, len, idx); rp_hdr = rpmsg_virtio_get_rx_buffer(rvdev, &len, &idx); if (rp_hdr == NULL) { /* tell peer we return some rx buffer */ virtqueue_kick(rvdev->rvq); } metal_mutex_release(&rdev->lock); } }
size_t VIOSerialSendBuffers(IN PVIOSERIAL_PORT Port, IN PVOID Buffer, IN size_t Length) { struct virtqueue *vq = GetOutQueue(Port); struct VirtIOBufferDescriptor sg[QUEUE_DESCRIPTORS]; PVOID buffer = Buffer; size_t length = Length; int out = 0; int ret; TraceEvents(TRACE_LEVEL_VERBOSE, DBG_WRITE, "--> %s Buffer: %p Length: %d\n", __FUNCTION__, Buffer, Length); if (BYTES_TO_PAGES(Length) > QUEUE_DESCRIPTORS) { return 0; } while (length > 0) { sg[out].physAddr = MmGetPhysicalAddress(buffer); sg[out].length = min(length, PAGE_SIZE); buffer = (PVOID)((LONG_PTR)buffer + sg[out].length); length -= sg[out].length; out += 1; } WdfSpinLockAcquire(Port->OutVqLock); ret = virtqueue_add_buf(vq, sg, out, 0, Buffer, NULL, 0); virtqueue_kick(vq); if (ret >= 0) { Port->OutVqFull = (ret == 0); } else { Length = 0; TraceEvents(TRACE_LEVEL_ERROR, DBG_WRITE, "Error adding buffer to queue (ret = %d)\n", ret); } WdfSpinLockRelease(Port->OutVqLock); TraceEvents(TRACE_LEVEL_VERBOSE, DBG_WRITE, "<-- %s\n", __FUNCTION__); return Length; }
static void virtinput_fill_evt(struct virtio_input *vi) { unsigned long flags; int i, size; spin_lock_irqsave(&vi->lock, flags); size = virtqueue_get_vring_size(vi->evt); if (size > ARRAY_SIZE(vi->evts)) size = ARRAY_SIZE(vi->evts); for (i = 0; i < size; i++) virtinput_queue_evtbuf(vi, &vi->evts[i]); virtqueue_kick(vi->evt); spin_unlock_irqrestore(&vi->lock, flags); }
static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq) { struct scatterlist sg; unsigned int len; sg_init_one(&sg, vb->pfns, sizeof(vb->pfns[0]) * vb->num_pfns); /* We should always be able to add one buffer to an empty queue. */ virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL); virtqueue_kick(vq); /* When host has read buffer, this completes via balloon_ack */ wait_event(vb->acked, virtqueue_get_buf(vq, &len)); }
/* * Send a buffer to the output virtqueue of the given crypto_device. * If nonblock is false wait until the host acknowledges the data receive. */ ssize_t send_buf(struct crypto_device *crdev, void *in_buf, size_t in_count, bool nonblock) { struct scatterlist sg[1]; struct virtqueue *out_vq; ssize_t ret = 0; unsigned int len; debug("Entering\n"); out_vq = crdev->ovq; /* Discard any consumed buffers. */ reclaim_consumed_buffers(crdev); sg_init_one(sg, in_buf, in_count); /* add sg list to virtqueue and notify host */ ret = virtqueue_add_buf(out_vq, sg, 1, 0, in_buf); if (ret < 0) { debug("Oops! Error adding buffer to vqueue\n"); in_count = 0; goto done; } if (ret == 0) { printk(KERN_WARNING "ovq_full!!!!\n"); crdev->ovq_full = true; } virtqueue_kick(out_vq); if (nonblock) goto done; /* * if nonblock is false we wait until the host acknowledges it pushed * out the data we sent. */ while (!virtqueue_get_buf(out_vq, &len)) cpu_relax(); debug("Leaving\n"); done: /* * We're expected to return the amount of data we wrote -- all * of it */ return in_count; }
static void stats_handle_request(struct virtio_balloon *vb) { struct virtqueue *vq; struct scatterlist sg; unsigned int len, num_stats; num_stats = update_balloon_stats(vb); vq = vb->stats_vq; if (!virtqueue_get_buf(vq, &len)) return; sg_init_one(&sg, vb->stats, sizeof(vb->stats[0]) * num_stats); virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL); virtqueue_kick(vq); }
static int send_cmd_id_stop(struct virtio_balloon *vb) { struct scatterlist sg; struct virtqueue *vq = vb->free_page_vq; int err, unused; /* Detach all the used buffers from the vq */ while (virtqueue_get_buf(vq, &unused)) ; sg_init_one(&sg, &vb->cmd_id_stop, sizeof(vb->cmd_id_stop)); err = virtqueue_add_outbuf(vq, &sg, 1, &vb->cmd_id_stop, GFP_KERNEL); if (!err) virtqueue_kick(vq); return err; }
/* * Add a buffer to the specified virtqueue. * Callers should take appropriate locks. */ int add_inbuf(struct virtqueue *vq, struct crypto_vq_buffer *buf) { struct scatterlist sg[1]; int ret; debug("Entering\n"); /* Let's make the scatter-gather list. */ sg_init_one(sg, buf->buf, buf->size); /* Ok now add buf to virtqueue and notify host. */ ret = virtqueue_add_buf(vq, sg, 0, 1, buf); virtqueue_kick(vq); debug("Leaving\n"); return ret; }
NTSTATUS VirtRngEvtInterruptEnable(IN WDFINTERRUPT Interrupt, IN WDFDEVICE AssociatedDevice) { PDEVICE_CONTEXT context = GetDeviceContext( WdfInterruptGetDevice(Interrupt)); TraceEvents(TRACE_LEVEL_VERBOSE, DBG_INTERRUPT, "--> %!FUNC! Interrupt: %p Device: %p", Interrupt, AssociatedDevice); virtqueue_enable_cb(context->VirtQueue); virtqueue_kick(context->VirtQueue); TraceEvents(TRACE_LEVEL_VERBOSE, DBG_INTERRUPT, "<-- %!FUNC!"); return STATUS_SUCCESS; }
static int get_free_page_and_send(struct virtio_balloon *vb) { struct virtqueue *vq = vb->free_page_vq; struct page *page; struct scatterlist sg; int err, unused; void *p; /* Detach all the used buffers from the vq */ while (virtqueue_get_buf(vq, &unused)) ; page = alloc_pages(VIRTIO_BALLOON_FREE_PAGE_ALLOC_FLAG, VIRTIO_BALLOON_FREE_PAGE_ORDER); /* * When the allocation returns NULL, it indicates that we have got all * the possible free pages, so return -EINTR to stop. */ if (!page) return -EINTR; p = page_address(page); sg_init_one(&sg, p, VIRTIO_BALLOON_FREE_PAGE_SIZE); /* There is always 1 entry reserved for the cmd id to use. */ if (vq->num_free > 1) { err = virtqueue_add_inbuf(vq, &sg, 1, p, GFP_KERNEL); if (unlikely(err)) { free_pages((unsigned long)p, VIRTIO_BALLOON_FREE_PAGE_ORDER); return err; } virtqueue_kick(vq); spin_lock_irq(&vb->free_page_list_lock); balloon_page_push(&vb->free_page_list, page); vb->num_free_page_blocks++; spin_unlock_irq(&vb->free_page_list_lock); } else { /* * The vq has no available entry to add this page block, so * just free it. */ free_pages((unsigned long)p, VIRTIO_BALLOON_FREE_PAGE_ORDER); } return 0; }
VOID VIOSerialSendCtrlMsg( IN WDFDEVICE Device, IN ULONG id, IN USHORT event, IN USHORT value ) { struct VirtIOBufferDescriptor sg; struct virtqueue *vq; UINT len; PPORTS_DEVICE pContext = GetPortsDevice(Device); VIRTIO_CONSOLE_CONTROL cpkt; if (!pContext->isHostMultiport) { return; } vq = pContext->c_ovq; TraceEvents(TRACE_LEVEL_INFORMATION, DBG_PNP, "--> %s vq = %p\n", __FUNCTION__, vq); cpkt.id = id; cpkt.event = event; cpkt.value = value; sg.physAddr = MmGetPhysicalAddress(&cpkt); sg.length = sizeof(cpkt); WdfWaitLockAcquire(pContext->COutVqLock, NULL); if(0 <= virtqueue_add_buf(vq, &sg, 1, 0, &cpkt, NULL, 0)) { virtqueue_kick(vq); while(!virtqueue_get_buf(vq, &len)) { LARGE_INTEGER interval; interval.QuadPart = -1; KeDelayExecutionThread(KernelMode, FALSE, &interval); } } WdfWaitLockRelease(pContext->COutVqLock); TraceEvents(TRACE_LEVEL_INFORMATION, DBG_PNP, "<-- %s\n", __FUNCTION__); }
static void virtinput_recv_events(struct virtqueue *vq) { struct virtio_input *vi = vq->vdev->priv; struct virtio_input_event *event; unsigned long flags; unsigned int len; spin_lock_irqsave(&vi->lock, flags); if (vi->ready) { while ((event = virtqueue_get_buf(vi->evt, &len)) != NULL) { spin_unlock_irqrestore(&vi->lock, flags); input_event(vi->idev, le16_to_cpu(event->type), le16_to_cpu(event->code), le32_to_cpu(event->value)); spin_lock_irqsave(&vi->lock, flags); virtinput_queue_evtbuf(vi, event); } virtqueue_kick(vq); } spin_unlock_irqrestore(&vi->lock, flags); }
BOOLEAN SynchronizedKickEventRoutine( IN PVOID DeviceExtension, IN PVOID Context ) { PADAPTER_EXTENSION adaptExt = (PADAPTER_EXTENSION)DeviceExtension; PVirtIOSCSIEventNode eventNode = (PVirtIOSCSIEventNode) Context; PVOID va = NULL; ULONGLONG pa = 0; ENTER_FN(); if (virtqueue_add_buf(adaptExt->vq[VIRTIO_SCSI_EVENTS_QUEUE], &eventNode->sg, 0, 1, eventNode, va, pa) >= 0){ virtqueue_kick(adaptExt->vq[VIRTIO_SCSI_EVENTS_QUEUE]); return TRUE; } EXIT_ERR(); return FALSE; }
static int init_vqs(struct virtio_balloon *vb) { struct virtqueue *vqs[3]; vq_callback_t *callbacks[] = { balloon_ack, balloon_ack, stats_request }; static const char * const names[] = { "inflate", "deflate", "stats" }; int err, nvqs; /* * We expect two virtqueues: inflate and deflate, and * optionally stat. */ nvqs = virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ) ? 3 : 2; err = virtio_find_vqs(vb->vdev, nvqs, vqs, callbacks, names, NULL); if (err) return err; vb->inflate_vq = vqs[0]; vb->deflate_vq = vqs[1]; if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) { struct scatterlist sg; unsigned int num_stats; vb->stats_vq = vqs[2]; /* * Prime this virtqueue with one buffer so the hypervisor can * use it to signal us later (it can't be broken yet!). */ num_stats = update_balloon_stats(vb); sg_init_one(&sg, vb->stats, sizeof(vb->stats[0]) * num_stats); if (virtqueue_add_outbuf(vb->stats_vq, &sg, 1, vb, GFP_KERNEL) < 0) BUG(); virtqueue_kick(vb->stats_vq); } return 0; }
/* * rpmsg_rdev_notify * * This function checks whether remote device is up or not. If it is up then * notification is sent based on device role to start IPC. * * @param rdev - pointer to remote device * * @return - status of function execution * */ int rpmsg_rdev_notify(struct remote_device *rdev) { int status = RPMSG_SUCCESS; if (rdev->role == RPMSG_REMOTE) { status = hil_get_status(rdev->proc); /* * Let the remote device know that Master is ready for * communication. */ if (!status) virtqueue_kick(rdev->rvq); } else { status = hil_set_status(rdev->proc); } if (status == RPMSG_SUCCESS) { rdev->state = RPMSG_DEV_STATE_ACTIVE; } return status; }
/* * Send a control message to the Guest. */ ssize_t send_control_msg(struct crypto_device *crdev, unsigned int event, unsigned int value) { struct scatterlist sg[1]; struct virtio_crypto_control cpkt; struct virtqueue *vq; unsigned int len; debug("Entering\n"); cpkt.event = event; cpkt.value = value; vq = crdev->c_ovq; sg_init_one(sg, &cpkt, sizeof(cpkt)); if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt) >= 0) { virtqueue_kick(vq); /* Wait until host gets the message. */ while (!virtqueue_get_buf(vq, &len)) cpu_relax(); } debug("Leaving\n"); return 0; }
static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *bd) { struct virtio_blk *vblk = hctx->queue->queuedata; struct request *req = bd->rq; struct virtblk_req *vbr = blk_mq_rq_to_pdu(req); unsigned long flags; unsigned int num; int qid = hctx->queue_num; int err; bool notify = false; BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems); vbr->req = req; if (req->cmd_flags & REQ_FLUSH) { vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_FLUSH); vbr->out_hdr.sector = 0; vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req)); } else { switch (req->cmd_type) { case REQ_TYPE_FS: vbr->out_hdr.type = 0; vbr->out_hdr.sector = cpu_to_virtio64(vblk->vdev, blk_rq_pos(vbr->req)); vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req)); break; case REQ_TYPE_BLOCK_PC: vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_SCSI_CMD); vbr->out_hdr.sector = 0; vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req)); break; case REQ_TYPE_DRV_PRIV: vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_GET_ID); vbr->out_hdr.sector = 0; vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req)); break; default: /* We don't put anything else in the queue. */ BUG(); } } blk_mq_start_request(req); num = blk_rq_map_sg(hctx->queue, vbr->req, vbr->sg); if (num) { if (rq_data_dir(vbr->req) == WRITE) vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_OUT); else vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_IN); } spin_lock_irqsave(&vblk->vqs[qid].lock, flags); err = __virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num); if (err) { virtqueue_kick(vblk->vqs[qid].vq); blk_mq_stop_hw_queue(hctx); spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); /* Out of mem doesn't actually happen, since we fall back * to direct descriptors */ if (err == -ENOMEM || err == -ENOSPC) return BLK_MQ_RQ_QUEUE_BUSY; return BLK_MQ_RQ_QUEUE_ERROR; } if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq)) notify = true; spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); if (notify) virtqueue_notify(vblk->vqs[qid].vq); return BLK_MQ_RQ_QUEUE_OK; }
/** * This function sends rpmsg "message" to remote device. * * @param rdev - pointer to rpmsg device * @param src - source address of channel * @param dst - destination address of channel * @param data - data to transmit * @param size - size of data * @param wait - boolean, wait or not for buffer to become * available * * @return - size of data sent or negative value for failure. * */ static int rpmsg_virtio_send_offchannel_raw(struct rpmsg_device *rdev, uint32_t src, uint32_t dst, const void *data, int size, int wait) { struct rpmsg_virtio_device *rvdev; struct rpmsg_hdr rp_hdr; void *buffer = NULL; uint16_t idx; int tick_count; uint32_t buff_len; int status; struct metal_io_region *io; /* Get the associated remote device for channel. */ rvdev = metal_container_of(rdev, struct rpmsg_virtio_device, rdev); status = rpmsg_virtio_get_status(rvdev); /* Validate device state */ if (!(status & VIRTIO_CONFIG_STATUS_DRIVER_OK)) { return RPMSG_ERR_DEV_STATE; } if (wait) tick_count = RPMSG_TICK_COUNT / RPMSG_TICKS_PER_INTERVAL; else tick_count = 0; while (1) { int avail_size; /* Lock the device to enable exclusive access to virtqueues */ metal_mutex_acquire(&rdev->lock); avail_size = _rpmsg_virtio_get_buffer_size(rvdev); if (size <= avail_size) buffer = rpmsg_virtio_get_tx_buffer(rvdev, &buff_len, &idx); metal_mutex_release(&rdev->lock); if (buffer || !tick_count) break; if (avail_size != 0) return RPMSG_ERR_BUFF_SIZE; metal_sleep_usec(RPMSG_TICKS_PER_INTERVAL); tick_count--; } if (!buffer) return RPMSG_ERR_NO_BUFF; /* Initialize RPMSG header. */ rp_hdr.dst = dst; rp_hdr.src = src; rp_hdr.len = size; rp_hdr.reserved = 0; /* Copy data to rpmsg buffer. */ io = rvdev->shbuf_io; status = metal_io_block_write(io, metal_io_virt_to_offset(io, buffer), &rp_hdr, sizeof(rp_hdr)); RPMSG_ASSERT(status == sizeof(rp_hdr), "failed to write header\r\n"); status = metal_io_block_write(io, metal_io_virt_to_offset(io, RPMSG_LOCATE_DATA(buffer)), data, size); RPMSG_ASSERT(status == size, "failed to write buffer\r\n"); metal_mutex_acquire(&rdev->lock); /* Enqueue buffer on virtqueue. */ status = rpmsg_virtio_enqueue_buffer(rvdev, buffer, buff_len, idx); RPMSG_ASSERT(status == VQUEUE_SUCCESS, "failed to enqueue buffer\r\n"); /* Let the other side know that there is a job to process. */ virtqueue_kick(rvdev->svq); metal_mutex_release(&rdev->lock); return size; }