/* called when an rx buffer is used, and it's time to digest a message */ static void rpmsg_recv_done(struct virtqueue *rvq) { struct virtproc_info *vrp = rvq->vdev->priv; struct device *dev = &rvq->vdev->dev; struct rpmsg_hdr *msg; unsigned int len, msgs_received = 0; int err; msg = virtqueue_get_buf(rvq, &len); if (!msg) { dev_err(dev, "uhm, incoming signal, but no used buffer ?\n"); return; } while (msg) { err = rpmsg_recv_single(vrp, dev, msg, len); if (err) break; msgs_received++; msg = virtqueue_get_buf(rvq, &len); }; dev_dbg(dev, "Received %u messages\n", msgs_received); /* tell the remote processor we added another available rx buffer */ if (msgs_received) virtqueue_kick(vrp->rvq); }
VOID BalloonInterruptDpc( IN WDFINTERRUPT WdfInterrupt, IN WDFOBJECT WdfDevice ) { unsigned int len; PDEVICE_CONTEXT devCtx = GetDeviceContext(WdfDevice); BOOLEAN bHostAck = FALSE; UNREFERENCED_PARAMETER( WdfInterrupt ); TraceEvents(TRACE_LEVEL_INFORMATION, DBG_DPC, "--> %s\n", __FUNCTION__); if (virtqueue_get_buf(devCtx->InfVirtQueue, &len)) { bHostAck = TRUE; } if (virtqueue_get_buf(devCtx->DefVirtQueue, &len)) { bHostAck = TRUE; } if(bHostAck) { KeSetEvent (&devCtx->HostAckEvent, IO_NO_INCREMENT, FALSE); } if (devCtx->StatVirtQueue && virtqueue_get_buf(devCtx->StatVirtQueue, &len)) { WDFREQUEST request = devCtx->PendingWriteRequest; devCtx->HandleWriteRequest = TRUE; if ((request != NULL) && (WdfRequestUnmarkCancelable(request) != STATUS_CANCELLED)) { NTSTATUS status; PVOID buffer; size_t length = 0; devCtx->PendingWriteRequest = NULL; status = WdfRequestRetrieveInputBuffer(request, 0, &buffer, &length); if (!NT_SUCCESS(status)) { length = 0; } WdfRequestCompleteWithInformation(request, status, length); } } if(devCtx->Thread) { KeSetEvent(&devCtx->WakeUpThread, 0, FALSE); } }
static void req_done(struct virtqueue *vq) { struct virtio_chan *chan = vq->vdev->priv; struct p9_fcall *rc; unsigned int len; struct p9_req_t *req; unsigned long flags; p9_debug(P9_DEBUG_TRANS, ": request done\n"); while (1) { spin_lock_irqsave(&chan->lock, flags); rc = virtqueue_get_buf(chan->vq, &len); if (rc == NULL) { spin_unlock_irqrestore(&chan->lock, flags); break; } chan->ring_bufs_avail = 1; spin_unlock_irqrestore(&chan->lock, flags); /* Wakeup if anyone waiting for VirtIO ring space. */ wake_up(chan->vc_wq); p9_debug(P9_DEBUG_TRANS, ": rc %p\n", rc); p9_debug(P9_DEBUG_TRANS, ": lookup tag %d\n", rc->tag); req = p9_tag_lookup(chan->client, rc->tag); req->status = REQ_STATUS_RCVD; p9_client_cb(chan->client, req); } }
static int virtio_crypto_alg_ablkcipher_close_session( struct virtio_crypto_ablkcipher_ctx *ctx, int encrypt) { struct scatterlist outhdr, status_sg, *sgs[2]; unsigned int tmp; struct virtio_crypto_destroy_session_req *destroy_session; struct virtio_crypto *vcrypto = ctx->vcrypto; int err; unsigned int num_out = 0, num_in = 0; spin_lock(&vcrypto->ctrl_lock); vcrypto->ctrl_status.status = VIRTIO_CRYPTO_ERR; /* Pad ctrl header */ vcrypto->ctrl.header.opcode = cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION); /* Set the default virtqueue id to 0 */ vcrypto->ctrl.header.queue_id = 0; destroy_session = &vcrypto->ctrl.u.destroy_session; if (encrypt) destroy_session->session_id = cpu_to_le64(ctx->enc_sess_info.session_id); else destroy_session->session_id = cpu_to_le64(ctx->dec_sess_info.session_id); sg_init_one(&outhdr, &vcrypto->ctrl, sizeof(vcrypto->ctrl)); sgs[num_out++] = &outhdr; /* Return status and session id back */ sg_init_one(&status_sg, &vcrypto->ctrl_status.status, sizeof(vcrypto->ctrl_status.status)); sgs[num_out + num_in++] = &status_sg; err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out, num_in, vcrypto, GFP_ATOMIC); if (err < 0) { spin_unlock(&vcrypto->ctrl_lock); return err; } virtqueue_kick(vcrypto->ctrl_vq); while (!virtqueue_get_buf(vcrypto->ctrl_vq, &tmp) && !virtqueue_is_broken(vcrypto->ctrl_vq)) cpu_relax(); if (vcrypto->ctrl_status.status != VIRTIO_CRYPTO_OK) { spin_unlock(&vcrypto->ctrl_lock); pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n", vcrypto->ctrl_status.status, destroy_session->session_id); return -EINVAL; } spin_unlock(&vcrypto->ctrl_lock); return 0; }
VOID VIOSerialCtrlWorkHandler( IN WDFDEVICE Device ) { struct virtqueue *vq; PPORT_BUFFER buf; UINT len; NTSTATUS status = STATUS_SUCCESS; PPORTS_DEVICE pContext = GetPortsDevice(Device); TraceEvents(TRACE_LEVEL_VERBOSE, DBG_PNP, "--> %s\n", __FUNCTION__); vq = pContext->c_ivq; ASSERT(vq); WdfSpinLockAcquire(pContext->CInVqLock); while ((buf = virtqueue_get_buf(vq, &len))) { WdfSpinLockRelease(pContext->CInVqLock); buf->len = len; buf->offset = 0; VIOSerialHandleCtrlMsg(Device, buf); WdfSpinLockAcquire(pContext->CInVqLock); status = VIOSerialAddInBuf(vq, buf); if (!NT_SUCCESS(status)) { TraceEvents(TRACE_LEVEL_ERROR, DBG_PNP, "%s::%d Error adding buffer to queue\n", __FUNCTION__, __LINE__); VIOSerialFreeBuffer(buf); } } TraceEvents(TRACE_LEVEL_VERBOSE, DBG_PNP, "<-- %s\n", __FUNCTION__); WdfSpinLockRelease(pContext->CInVqLock); }
/* * Host sent us a control message. * Called in interrupt context! */ static void control_in_intr(struct virtqueue *vq) { struct crypto_device *crdev; struct crypto_vq_buffer *buf; unsigned int len; debug("Entering\n"); crdev = vq->vdev->priv; while ((buf = virtqueue_get_buf(vq, &len))) { buf->len = len; buf->offset = 0; handle_control_message(crdev, buf); spin_lock_irq(&crdev->c_lock); if (add_inbuf(crdev->c_ivq, buf) < 0) { printk(KERN_WARNING "Error adding buffer to queue\n"); free_buf(buf); } spin_unlock_irq(&crdev->c_lock); } debug("Leaving\n"); }
static void virtblk_done(struct virtqueue *vq) { struct virtio_blk *vblk = vq->vdev->priv; bool req_done = false; int qid = vq->index; struct virtblk_req *vbr; unsigned long flags; unsigned int len; spin_lock_irqsave(&vblk->vqs[qid].lock, flags); do { virtqueue_disable_cb(vq); while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) { blk_mq_complete_request(vbr->req, vbr->req->errors); req_done = true; } if (unlikely(virtqueue_is_broken(vq))) break; } while (!virtqueue_enable_cb(vq)); /* In case queue is stopped waiting for more buffers. */ if (req_done) blk_mq_start_stopped_hw_queues(vblk->disk->queue, true); spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); }
static void rpmsg_recv_done(struct virtqueue *rvq) { struct rpmsg_hdr *msg; unsigned int len; struct rpmsg_endpoint *ept; struct scatterlist sg; unsigned long offset; void *sim_addr; struct virtproc_info *vrp = rvq->vdev->priv; struct device *dev = &rvq->vdev->dev; int err; /* if fail to acquire the lock, remove stage is happening, then just return */ if(!mutex_trylock(&vrp->rm_lock)) return; /* make sure the descriptors are updated before reading */ rmb(); msg = virtqueue_get_buf(rvq, &len); if (!msg) { dev_err(dev, "uhm, incoming signal, but no used buffer ?\n"); return; } dev_dbg(dev, "From: 0x%x, To: 0x%x, Len: %d, Flags: %d, Unused: %d\n", msg->src, msg->dst, msg->len, msg->flags, msg->unused); #if 0 print_hex_dump(KERN_DEBUG, "rpmsg_virtio RX: ", DUMP_PREFIX_NONE, 16, 1, msg, sizeof(*msg) + msg->len, true); #endif /* fetch the callback of the appropriate user */ spin_lock(&vrp->endpoints_lock); ept = idr_find(&vrp->endpoints, msg->dst); spin_unlock(&vrp->endpoints_lock); if (ept && ept->cb) ept->cb(ept->rpdev, msg->data, msg->len, ept->priv, msg->src); else dev_warn(dev, "msg received with no recepient\n"); /* add the buffer back to the remote processor's virtqueue */ offset = ((unsigned long) msg) - ((unsigned long) vrp->rbufs); sim_addr = vrp->sim_base + offset; sg_init_one(&sg, sim_addr, sizeof(*msg) + len); err = virtqueue_add_buf_gfp(vrp->rvq, &sg, 0, 1, msg, GFP_KERNEL); if (err < 0) { dev_err(dev, "failed to add a virtqueue buffer: %d\n", err); return; } /* descriptors must be written before kicking remote processor */ wmb(); /* tell the remote processor we added another available rx buffer */ virtqueue_kick(vrp->rvq); mutex_unlock(&vrp->rm_lock); }
static void random_recv_done(struct virtqueue *vq) { /* We can get spurious callbacks, e.g. shared IRQs + virtio_pci. */ if (!virtqueue_get_buf(vq, &data_avail)) return; complete(&have_data); }
/* called when an rx buffer is used, and it's time to digest a message */ static void rpmsg_recv_done(struct virtqueue *rvq) { struct rpmsg_hdr *msg; unsigned int len; struct rpmsg_endpoint *ept; struct scatterlist sg; struct virtproc_info *vrp = rvq->vdev->priv; struct device *dev = &rvq->vdev->dev; int err; msg = virtqueue_get_buf(rvq, &len); if (!msg) { dev_err(dev, "uhm, incoming signal, but no used buffer ?\n"); return; } dev_dbg(dev, "From: 0x%x, To: 0x%x, Len: %d, Flags: %d, Reserved: %d\n", msg->src, msg->dst, msg->len, msg->flags, msg->reserved); #if 0 print_hex_dump(KERN_DEBUG, "rpmsg_virtio RX: ", DUMP_PREFIX_NONE, 16, 1, msg, sizeof(*msg) + msg->len, true); #endif /* * We currently use fixed-sized buffers, so trivially sanitize * the reported payload length. */ if (len > RPMSG_BUF_SIZE || msg->len > (len - sizeof(struct rpmsg_hdr))) { dev_warn(dev, "inbound msg too big: (%d, %d)\n", len, msg->len); return; } /* use the dst addr to fetch the callback of the appropriate user */ mutex_lock(&vrp->endpoints_lock); ept = idr_find(&vrp->endpoints, msg->dst); mutex_unlock(&vrp->endpoints_lock); if (ept && ept->cb) ept->cb(ept->rpdev, msg->data, msg->len, ept->priv, msg->src); else dev_warn(dev, "msg received with no recepient\n"); /* publish the real size of the buffer */ sg_init_one(&sg, msg, RPMSG_BUF_SIZE); /* add the buffer back to the remote processor's virtqueue */ err = virtqueue_add_buf(vrp->rvq, &sg, 0, 1, msg, GFP_KERNEL); if (err < 0) { dev_err(dev, "failed to add a virtqueue buffer: %d\n", err); return; } /* tell the remote processor we added another available rx buffer */ virtqueue_kick(vrp->rvq); }
VOID VIOSerialReclaimConsumedBuffers(IN PVIOSERIAL_PORT Port) { WDFREQUEST request; PSINGLE_LIST_ENTRY iter; PVOID buffer; UINT len; struct virtqueue *vq = GetOutQueue(Port); TraceEvents(TRACE_LEVEL_VERBOSE, DBG_QUEUEING, "--> %s\n", __FUNCTION__); if (vq) { while ((buffer = virtqueue_get_buf(vq, &len)) != NULL) { if (Port->PendingWriteRequest != NULL) { request = Port->PendingWriteRequest; Port->PendingWriteRequest = NULL; if (WdfRequestUnmarkCancelable(request) != STATUS_CANCELLED) { WdfRequestCompleteWithInformation(request, STATUS_SUCCESS, (size_t)WdfRequestGetInformation(request)); } else { TraceEvents(TRACE_LEVEL_INFORMATION, DBG_QUEUEING, "Request %p was cancelled.\n", request); } } iter = &Port->WriteBuffersList; while (iter->Next != NULL) { PWRITE_BUFFER_ENTRY entry = CONTAINING_RECORD(iter->Next, WRITE_BUFFER_ENTRY, ListEntry); if (buffer == entry->Buffer) { iter->Next = entry->ListEntry.Next; ExFreePoolWithTag(buffer, VIOSERIAL_DRIVER_MEMORY_TAG); ExFreePoolWithTag(entry, VIOSERIAL_DRIVER_MEMORY_TAG); } else { iter = iter->Next; } }; Port->OutVqFull = FALSE; } } TraceEvents(TRACE_LEVEL_VERBOSE, DBG_QUEUEING, "<-- %s Full: %d\n", __FUNCTION__, Port->OutVqFull); }
static void random_recv_done(struct virtqueue *vq) { struct virtrng_info *vi = vq->vdev->priv; /* We can get spurious callbacks, e.g. shared IRQs + virtio_pci. */ if (!virtqueue_get_buf(vi->vq, &vi->data_avail)) return; complete(&vi->have_data); }
/* Free consumed buffers. */ void reclaim_consumed_buffers(struct crypto_device *crdev) { void *buf; unsigned int len; debug("Entering\n"); while ((buf = virtqueue_get_buf(crdev->ovq, &len))) kfree(buf); debug("Leaving\n"); }
// this procedure must be called with port InBuf spinlock held VOID VIOSerialDiscardPortDataLocked( IN PVIOSERIAL_PORT port ) { struct virtqueue *vq; PPORT_BUFFER buf = NULL; UINT len; NTSTATUS status = STATUS_SUCCESS; UINT ret = 0; TraceEvents(TRACE_LEVEL_INFORMATION, DBG_PNP, "--> %s\n", __FUNCTION__); vq = GetInQueue(port); if (port->InBuf) { buf = port->InBuf; } else if (vq) { buf = (PPORT_BUFFER)virtqueue_get_buf(vq, &len); } while (buf) { status = VIOSerialAddInBuf(vq, buf); if(!NT_SUCCESS(status)) { ++ret; VIOSerialFreeBuffer(buf); } buf = (PPORT_BUFFER)virtqueue_get_buf(vq, &len); } port->InBuf = NULL; if (ret > 0) { TraceEvents(TRACE_LEVEL_ERROR, DBG_PNP, "%s::%d Error adding %u buffers back to queue\n", __FUNCTION__, __LINE__, ret); } TraceEvents(TRACE_LEVEL_INFORMATION, DBG_PNP,"<-- %s\n", __FUNCTION__); }
static void random_recv_done(struct virtqueue *vq) { unsigned int len; /* We can get spurious callbacks, e.g. shared IRQs + virtio_pci. */ if (!virtqueue_get_buf(vq, &len)) return; data_left += len; complete(&have_data); }
/* This is invoked whenever the remote processor completed processing * a TX msg we just sent, and the buffer is put back to the used ring. */ static void cfv_release_used_buf(struct virtqueue *vq_tx) { struct cfv_info *cfv = vq_tx->vdev->priv; unsigned long flags; BUG_ON(vq_tx != cfv->vq_tx); for (;;) { unsigned int len; struct buf_info *buf_info; /* Get used buffer from used ring to recycle used descriptors */ spin_lock_irqsave(&cfv->tx_lock, flags); buf_info = virtqueue_get_buf(vq_tx, &len); spin_unlock_irqrestore(&cfv->tx_lock, flags); /* Stop looping if there are no more buffers to free */ if (!buf_info) break; free_buf_info(cfv, buf_info); /* watermark_tx indicates if we previously stopped the tx * queues. If we have enough free stots in the virtio ring, * re-establish memory reserved and open up tx queues. */ if (cfv->vq_tx->num_free <= cfv->watermark_tx) continue; /* Re-establish memory reserve */ if (cfv->reserved_mem == 0 && cfv->genpool) cfv->reserved_mem = gen_pool_alloc(cfv->genpool, cfv->reserved_size); /* Open up the tx queues */ if (cfv->reserved_mem) { cfv->watermark_tx = virtqueue_get_vring_size(cfv->vq_tx); netif_tx_wake_all_queues(cfv->ndev); /* Buffers are recycled in cfv_netdev_tx, so * disable notifications when queues are opened. */ virtqueue_disable_cb(cfv->vq_tx); ++cfv->stats.tx_flow_on; } else { /* if no memory reserve, wait for more free slots */ WARN_ON(cfv->watermark_tx > virtqueue_get_vring_size(cfv->vq_tx)); cfv->watermark_tx += virtqueue_get_vring_size(cfv->vq_tx) / 4; } } }
static void virtinput_recv_status(struct virtqueue *vq) { struct virtio_input *vi = vq->vdev->priv; struct virtio_input_event *stsbuf; unsigned long flags; unsigned int len; spin_lock_irqsave(&vi->lock, flags); while ((stsbuf = virtqueue_get_buf(vi->sts, &len)) != NULL) kfree(stsbuf); spin_unlock_irqrestore(&vi->lock, flags); }
/* * Send a buffer to the output virtqueue of the given crypto_device. * If nonblock is false wait until the host acknowledges the data receive. */ ssize_t send_buf(struct crypto_device *crdev, void *in_buf, size_t in_count, bool nonblock) { struct scatterlist sg[1]; struct virtqueue *out_vq; ssize_t ret = 0; unsigned int len; debug("Entering\n"); out_vq = crdev->ovq; /* Discard any consumed buffers. */ reclaim_consumed_buffers(crdev); sg_init_one(sg, in_buf, in_count); /* add sg list to virtqueue and notify host */ ret = virtqueue_add_buf(out_vq, sg, 1, 0, in_buf); if (ret < 0) { debug("Oops! Error adding buffer to vqueue\n"); in_count = 0; goto done; } if (ret == 0) { printk(KERN_WARNING "ovq_full!!!!\n"); crdev->ovq_full = true; } virtqueue_kick(out_vq); if (nonblock) goto done; /* * if nonblock is false we wait until the host acknowledges it pushed * out the data we sent. */ while (!virtqueue_get_buf(out_vq, &len)) cpu_relax(); debug("Leaving\n"); done: /* * We're expected to return the amount of data we wrote -- all * of it */ return in_count; }
static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq) { struct scatterlist sg; unsigned int len; sg_init_one(&sg, vb->pfns, sizeof(vb->pfns[0]) * vb->num_pfns); /* We should always be able to add one buffer to an empty queue. */ virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL); virtqueue_kick(vq); /* When host has read buffer, this completes via balloon_ack */ wait_event(vb->acked, virtqueue_get_buf(vq, &len)); }
static void stats_handle_request(struct virtio_balloon *vb) { struct virtqueue *vq; struct scatterlist sg; unsigned int len, num_stats; num_stats = update_balloon_stats(vb); vq = vb->stats_vq; if (!virtqueue_get_buf(vq, &len)) return; sg_init_one(&sg, vb->stats, sizeof(vb->stats[0]) * num_stats); virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL); virtqueue_kick(vq); }
static int send_cmd_id_stop(struct virtio_balloon *vb) { struct scatterlist sg; struct virtqueue *vq = vb->free_page_vq; int err, unused; /* Detach all the used buffers from the vq */ while (virtqueue_get_buf(vq, &unused)) ; sg_init_one(&sg, &vb->cmd_id_stop, sizeof(vb->cmd_id_stop)); err = virtqueue_add_outbuf(vq, &sg, 1, &vb->cmd_id_stop, GFP_KERNEL); if (!err) virtqueue_kick(vq); return err; }
/* minimal buf "allocator" that is just enough for now */ static void *get_a_buf(struct virtproc_info *vrp) { unsigned int len; void *buf = NULL; /* make sure the descriptors are updated before reading */ rmb(); /* either pick the next unused buffer */ if (vrp->last_sbuf < vrp->num_bufs / 2) buf = vrp->sbufs + vrp->buf_size * vrp->last_sbuf++; /* or recycle a used one */ else buf = virtqueue_get_buf(vrp->svq, &len); return buf; }
/* * Get a buffer from the ivq. * If we already have one return it. */ struct crypto_vq_buffer *get_inbuf(struct crypto_device *crdev) { struct crypto_vq_buffer *buf; unsigned int len; debug("Entering\n"); if (crdev->inbuf) return crdev->inbuf; buf = virtqueue_get_buf(crdev->ivq, &len); if (buf) { buf->len = len; buf->offset = 0; } debug("Leaving\n"); return buf; }
//interrupt handler //get available buffers from virtqueue and add them in list static void vq_has_data(struct virtqueue *vq) { unsigned int len; unsigned long flags, flags1; struct wait_struct *tmp; void *d; while((d = virtqueue_get_buf(vq, &len))) { tmp = (struct wait_struct *)kmalloc(sizeof(struct wait_struct), GFP_ATOMIC); if (!tmp) return; tmp->data = d; spin_lock_irqsave(&wait_data.lock, flags); list_add(&(tmp->list), &(wait_data.list)); spin_unlock_irqrestore(&wait_data.lock, flags); } wake_up_interruptible_all(&cudrvdata.wq); }
static int get_free_page_and_send(struct virtio_balloon *vb) { struct virtqueue *vq = vb->free_page_vq; struct page *page; struct scatterlist sg; int err, unused; void *p; /* Detach all the used buffers from the vq */ while (virtqueue_get_buf(vq, &unused)) ; page = alloc_pages(VIRTIO_BALLOON_FREE_PAGE_ALLOC_FLAG, VIRTIO_BALLOON_FREE_PAGE_ORDER); /* * When the allocation returns NULL, it indicates that we have got all * the possible free pages, so return -EINTR to stop. */ if (!page) return -EINTR; p = page_address(page); sg_init_one(&sg, p, VIRTIO_BALLOON_FREE_PAGE_SIZE); /* There is always 1 entry reserved for the cmd id to use. */ if (vq->num_free > 1) { err = virtqueue_add_inbuf(vq, &sg, 1, p, GFP_KERNEL); if (unlikely(err)) { free_pages((unsigned long)p, VIRTIO_BALLOON_FREE_PAGE_ORDER); return err; } virtqueue_kick(vq); spin_lock_irq(&vb->free_page_list_lock); balloon_page_push(&vb->free_page_list, page); vb->num_free_page_blocks++; spin_unlock_irq(&vb->free_page_list_lock); } else { /* * The vq has no available entry to add this page block, so * just free it. */ free_pages((unsigned long)p, VIRTIO_BALLOON_FREE_PAGE_ORDER); } return 0; }
VOID VIOSerialSendCtrlMsg( IN WDFDEVICE Device, IN ULONG id, IN USHORT event, IN USHORT value ) { struct VirtIOBufferDescriptor sg; struct virtqueue *vq; UINT len; PPORTS_DEVICE pContext = GetPortsDevice(Device); VIRTIO_CONSOLE_CONTROL cpkt; if (!pContext->isHostMultiport) { return; } vq = pContext->c_ovq; TraceEvents(TRACE_LEVEL_INFORMATION, DBG_PNP, "--> %s vq = %p\n", __FUNCTION__, vq); cpkt.id = id; cpkt.event = event; cpkt.value = value; sg.physAddr = MmGetPhysicalAddress(&cpkt); sg.length = sizeof(cpkt); WdfWaitLockAcquire(pContext->COutVqLock, NULL); if(0 <= virtqueue_add_buf(vq, &sg, 1, 0, &cpkt, NULL, 0)) { virtqueue_kick(vq); while(!virtqueue_get_buf(vq, &len)) { LARGE_INTEGER interval; interval.QuadPart = -1; KeDelayExecutionThread(KernelMode, FALSE, &interval); } } WdfWaitLockRelease(pContext->COutVqLock); TraceEvents(TRACE_LEVEL_INFORMATION, DBG_PNP, "<-- %s\n", __FUNCTION__); }
static void *get_a_tx_buf(struct virtproc_info *vrp) { unsigned int len; void *ret; mutex_lock(&vrp->tx_lock); if (vrp->last_sbuf < RPMSG_NUM_BUFS / 2) ret = vrp->sbufs + RPMSG_BUF_SIZE * vrp->last_sbuf++; else ret = virtqueue_get_buf(vrp->svq, &len); mutex_unlock(&vrp->tx_lock); return ret; }
static void virtinput_recv_events(struct virtqueue *vq) { struct virtio_input *vi = vq->vdev->priv; struct virtio_input_event *event; unsigned long flags; unsigned int len; spin_lock_irqsave(&vi->lock, flags); if (vi->ready) { while ((event = virtqueue_get_buf(vi->evt, &len)) != NULL) { spin_unlock_irqrestore(&vi->lock, flags); input_event(vi->idev, le16_to_cpu(event->type), le16_to_cpu(event->code), le32_to_cpu(event->value)); spin_lock_irqsave(&vi->lock, flags); virtinput_queue_evtbuf(vi, event); } virtqueue_kick(vq); } spin_unlock_irqrestore(&vi->lock, flags); }
/* super simple buffer "allocator" that is just enough for now */ static void *get_a_tx_buf(struct virtproc_info *vrp) { unsigned int len; void *ret; /* support multiple concurrent senders */ mutex_lock(&vrp->tx_lock); /* * either pick the next unused tx buffer * (half of our buffers are used for sending messages) */ if (vrp->last_sbuf < RPMSG_NUM_BUFS / 2) ret = vrp->sbufs + RPMSG_BUF_SIZE * vrp->last_sbuf++; /* or recycle a used one */ else ret = virtqueue_get_buf(vrp->svq, &len); mutex_unlock(&vrp->tx_lock); return ret; }
PVOID VIOSerialGetInBuf( IN PVIOSERIAL_PORT port ) { PPORT_BUFFER buf = NULL; struct virtqueue *vq = GetInQueue(port); UINT len; TraceEvents(TRACE_LEVEL_VERBOSE, DBG_QUEUEING, "--> %s\n", __FUNCTION__); if (vq) { buf = virtqueue_get_buf(vq, &len); if (buf) { buf->len = len; buf->offset = 0; } } TraceEvents(TRACE_LEVEL_VERBOSE, DBG_QUEUEING, "<-- %s\n", __FUNCTION__); return buf; }