static int DsRequestCb(void *notifyData, // IN: callback data VMCIDatagram *msg) // IN: datagram { /* FIXME: On-stack 300byte buffer is no-no. Besides that it is ignored anyway. */ char replyBuffer[VMCI_DS_MAX_MSG_SIZE + sizeof(VMCIDatagram)]; VMCIDatagram *replyMsg = (VMCIDatagram *)replyBuffer; int written, retval; VMCIPrivilegeFlags srcPrivFlags; VMCI_DEBUG_LOG((LGPFX"Got request from context: %d\n", msg->src.context)); if (VMCIDatagram_GetPrivFlags(msg->src, &srcPrivFlags) != VMCI_SUCCESS) { retval = VMCI_ERROR_INVALID_ARGS; goto done; } replyMsg->dst = msg->src; replyMsg->src = dsAPI.handle; DsHandleMessage(VMCI_DG_PAYLOAD(msg), VMCI_DG_PAYLOAD(replyMsg), VMCI_DS_MAX_MSG_SIZE, &written, msg->src.context, srcPrivFlags); replyMsg->payloadSize = written; /* Send reply back to source handle. */ retval = VMCIDatagramSendInt(replyMsg); done: if (retval >= VMCI_SUCCESS) { VMCI_DEBUG_LOG((LGPFX"Successfully replied with %d bytes\n", written)); } else { VMCILOG((LGPFX"Failed to reply to request: %d.\n", retval)); } return retval; }
/* * Verify that the host supports the hypercalls we need. If it does not, * try to find fallback hypercalls and use those instead. Returns * true if required hypercalls (or fallback hypercalls) are * supported by the host, false otherwise. */ static int vmci_check_host_caps(struct pci_dev *pdev) { bool result; struct vmci_resource_query_msg *msg; u32 msg_size = sizeof(struct vmci_resource_query_hdr) + VMCI_UTIL_NUM_RESOURCES * sizeof(u32); struct vmci_datagram *check_msg; check_msg = kmalloc(msg_size, GFP_KERNEL); if (!check_msg) { dev_err(&pdev->dev, "%s: Insufficient memory\n", __func__); return -ENOMEM; } check_msg->dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, VMCI_RESOURCES_QUERY); check_msg->src = VMCI_ANON_SRC_HANDLE; check_msg->payload_size = msg_size - VMCI_DG_HEADERSIZE; msg = (struct vmci_resource_query_msg *)VMCI_DG_PAYLOAD(check_msg); msg->num_resources = VMCI_UTIL_NUM_RESOURCES; msg->resources[0] = VMCI_GET_CONTEXT_ID; /* Checks that hyper calls are supported */ result = vmci_send_datagram(check_msg) == 0x01; kfree(check_msg); dev_dbg(&pdev->dev, "%s: Host capability check: %s\n", __func__, result ? "PASSED" : "FAILED"); /* We need the vector. There are no fallbacks. */ return result ? 0 : -ENXIO; }
static Bool VMCIUtilCheckHostCapabilities(void) { int result; VMCIResourcesQueryMsg *msg; uint32 msgSize = sizeof(VMCIResourcesQueryHdr) + VMCI_UTIL_NUM_RESOURCES * sizeof(VMCI_Resource); VMCIDatagram *checkMsg = VMCI_AllocKernelMem(msgSize, VMCI_MEMORY_NONPAGED); if (checkMsg == NULL) { VMCI_WARNING((LGPFX"Check host: Insufficient memory.\n")); return FALSE; } checkMsg->dst = VMCI_MAKE_HANDLE(VMCI_HYPERVISOR_CONTEXT_ID, VMCI_RESOURCES_QUERY); checkMsg->src = VMCI_ANON_SRC_HANDLE; checkMsg->payloadSize = msgSize - VMCI_DG_HEADERSIZE; msg = (VMCIResourcesQueryMsg *)VMCI_DG_PAYLOAD(checkMsg); msg->numResources = VMCI_UTIL_NUM_RESOURCES; msg->resources[0] = VMCI_GET_CONTEXT_ID; result = VMCI_SendDatagram(checkMsg); VMCI_FreeKernelMem(checkMsg, msgSize); /* We need the vector. There are no fallbacks. */ return (result == 0x1); }
static int VPageChannelDgRecvFunc(void *clientData, // IN VMCIDatagram *dg) // IN { VPageChannel *channel = (VPageChannel *)clientData; ASSERT(channel); ASSERT(dg); if (dg->src.context != VMCI_HOST_CONTEXT_ID || dg->src.resource != channel->peerDgHandle.resource) { VMCI_WARNING((LGPFX"Received a packet from an unknown source " "(channel=%p) (handle=0x%x:0x%x).\n", channel, dg->src.context, dg->src.resource)); return VMCI_ERROR_NO_ACCESS; } if (dg->payloadSize < sizeof (VPageChannelPacket)) { VMCI_WARNING((LGPFX"Received invalid packet (channel=%p) " "(size=%"FMT64"u).\n", channel, dg->payloadSize)); return VMCI_ERROR_INVALID_ARGS; } return VPageChannelRecvPacket(channel, VMCI_DG_PAYLOAD(dg)); }
static int HgfsVmciChannelTerminateSession(HgfsTransportChannel *channel) { int ret = 0; VMCIDatagram *dg; HgfsVmciTransportHeader *transportHeader; HgfsVmciHeaderNode *headerNode; dg = kmalloc(sizeof *dg + sizeof *transportHeader, GFP_KERNEL); if (NULL == dg) { LOG(4, (KERN_WARNING "%s failed to allocate\n", __func__)); return -ENOMEM; } /* Initialize datagram */ dg->src = *(VMCIHandle *)channel->priv; dg->dst = VMCI_MAKE_HANDLE(VMCI_HYPERVISOR_CONTEXT_ID, VMCI_HGFS_TRANSPORT); dg->payloadSize = sizeof *transportHeader; transportHeader = VMCI_DG_PAYLOAD(dg); headerNode = &transportHeader->node; headerNode->pktType = HGFS_TH_TERMINATE_SESSION; headerNode->version = HGFS_VMCI_VERSION_1; transportHeader->iovCount = 0; LOG(1, (KERN_WARNING "Terminating session with host \n")); if ((ret = vmci_datagram_send(dg)) < VMCI_SUCCESS) { if (ret == HGFS_VMCI_TRANSPORT_ERROR) { LOG(0, (KERN_WARNING "HGFS Transport error occured. Don't blame VMCI\n")); } LOG(0, (KERN_WARNING "Cannot communicate with Server.\n")); } else { int i; for (i = 0; i < gHgfsShmemPages.totalPageCount; i++) { free_page(gHgfsShmemPages.list[i].va); } } kfree(dg); return ret; }
static int HgfsVmciChannelCallback(void *data, // IN: unused VMCIDatagram *dg) // IN: datagram { HgfsVmciAsyncReply *reply = (HgfsVmciAsyncReply *)VMCI_DG_PAYLOAD(dg); HgfsVmciHeaderNode *replyNode = &reply->node; HgfsTransportChannel *channel; LOG(10, (KERN_WARNING "Received VMCI channel Callback \n")); if (replyNode->version != HGFS_VMCI_VERSION_1) { return HGFS_VMCI_VERSION_MISMATCH; } switch (replyNode->pktType) { case HGFS_ASYNC_IOREP: LOG(10, (KERN_WARNING "Received ID%"FMT64"x \n", reply->response.id)); HgfsVmciChannelCompleteRequest(reply->response.id); break; case HGFS_ASYNC_IOREQ_SHMEM: HgfsRequestAsyncShmemDispatch(reply->shmem.iov, reply->shmem.count); break; case HGFS_ASYNC_IOREQ_GET_PAGES: channel = HgfsGetVmciChannel(); LOG(10, (KERN_WARNING "Should send pages to the host\n")); HgfsVmciChannelPassGuestPages(channel); break; default: ASSERT(0); return HGFS_VMCI_TRANSPORT_ERROR; } return 0; }
static int HgfsVmciChannelSend(HgfsTransportChannel *channel, // IN: Channel HgfsReq *req) // IN: request to send { int ret; int iovCount = 0; VMCIDatagram *dg; HgfsVmciTransportHeader *transportHeader; HgfsVmciHeaderNode *headerNode; HgfsVmciTransportStatus *transportStatus; size_t transportHeaderSize; size_t bufferSize; size_t total; uint64 pa; uint64 len; uint64 id; int j; ASSERT(req); ASSERT(req->buffer); ASSERT(req->state == HGFS_REQ_STATE_UNSENT || req->state == HGFS_REQ_STATE_ALLOCATED); ASSERT(req->payloadSize <= req->bufferSize); /* Note that req->bufferSize does not include chunk used by the transport. */ total = req->bufferSize + sizeof (HgfsVmciTransportStatus); /* Calculate number of entries for metaPacket */ iovCount = (total + (size_t)req->buffer % PAGE_SIZE - 1)/ PAGE_SIZE + 1; ASSERT(total + (size_t)req->buffer % PAGE_SIZE <= PAGE_SIZE); transportHeaderSize = sizeof *transportHeader + (iovCount + req->numEntries - 1) * sizeof (HgfsIov); dg = kmalloc(sizeof *dg + transportHeaderSize, GFP_KERNEL); if (NULL == dg) { LOG(4, (KERN_WARNING "%s failed to allocate\n", __func__)); return -ENOMEM; } /* Initialize datagram */ dg->src = *(VMCIHandle *)channel->priv; dg->dst = VMCI_MAKE_HANDLE(VMCI_HYPERVISOR_CONTEXT_ID, VMCI_HGFS_TRANSPORT); dg->payloadSize = transportHeaderSize; transportHeader = VMCI_DG_PAYLOAD(dg); headerNode = &transportHeader->node; headerNode->version = HGFS_VMCI_VERSION_1; headerNode->pktType = HGFS_TH_REQUEST; total = req->bufferSize + sizeof (HgfsVmciTransportStatus); bufferSize = 0; for (iovCount = 0; bufferSize < req->bufferSize; iovCount++) { /* * req->buffer should have been allocated by kmalloc()/ __get_free_pages(). * Specifically, it cannot be a buffer that is mapped from high memory. * virt_to_phys() does not work for those. */ pa = virt_to_phys(req->buffer + bufferSize); len = total < (PAGE_SIZE - pa % PAGE_SIZE) ? total : (PAGE_SIZE - pa % PAGE_SIZE); bufferSize += len; total -= len; transportHeader->iov[iovCount].pa = pa; transportHeader->iov[iovCount].len = len; LOG(8, ("iovCount = %u PA = %"FMT64"x len=%u\n", iovCount, transportHeader->iov[iovCount].pa, transportHeader->iov[iovCount].len)); } /* Right now we do not expect discontigous request packet */ ASSERT(iovCount == 1); ASSERT(total == 0); ASSERT(bufferSize == req->bufferSize + sizeof (HgfsVmciTransportStatus)); LOG(0, (KERN_WARNING "Size of request is %Zu\n", req->payloadSize)); for (j = 0; j < req->numEntries; j++, iovCount++) { /* I will have to probably do page table walk here, haven't figured it out yet */ ASSERT(req->dataPacket); transportHeader->iov[iovCount].pa = page_to_phys(req->dataPacket[j].page); transportHeader->iov[iovCount].pa += req->dataPacket[j].offset; transportHeader->iov[iovCount].len = req->dataPacket[j].len; LOG(8, ("iovCount = %u PA = %"FMT64"x len=%u\n", iovCount, transportHeader->iov[iovCount].pa, transportHeader->iov[iovCount].len)); } transportHeader->iovCount = iovCount; /* Initialize transport Status */ transportStatus = (HgfsVmciTransportStatus *)req->buffer; transportStatus->status = HGFS_TS_IO_PENDING; transportStatus->size = req->bufferSize + sizeof (HgfsVmciTransportStatus); /* * Don't try to set req->state after vmci_datagram_send(). * It may be too late then. We could have received a datagram by then and * datagram handler expects request's state to be submitted. */ req->state = HGFS_REQ_STATE_SUBMITTED; id = req->id; if ((ret = vmci_datagram_send(dg)) < VMCI_SUCCESS) { if (ret == HGFS_VMCI_TRANSPORT_ERROR) { LOG(0, (KERN_WARNING "HGFS Transport error occured. Don't blame VMCI\n")); } else if (ret == HGFS_VMCI_VERSION_MISMATCH) { LOG(0, (KERN_WARNING "Version mismatch\n")); } req->state = HGFS_REQ_STATE_UNSENT; kfree(dg); return -EIO; } LOG(0, (KERN_WARNING "Hgfs Received response\n")); HgfsVmciChannelCompleteRequest(id); kfree(dg); return 0; }
static Bool HgfsVmciChannelPassGuestPages(HgfsTransportChannel *channel) // IN: { Bool retVal = TRUE; int ret; int i; int j = 0; size_t transportHeaderSize; HgfsVmciTransportHeader *transportHeader = NULL; HgfsVmciHeaderNode *headerNode; VMCIDatagram *dg; if (!gHgfsShmemPages.freePageCount) { return TRUE; } transportHeaderSize = sizeof (HgfsVmciTransportHeader) + (gHgfsShmemPages.freePageCount - 1) * sizeof (HgfsAsyncIov); dg = kmalloc(sizeof *dg + transportHeaderSize, GFP_ATOMIC); if (!dg) { LOG(4, (KERN_WARNING "%s failed to allocate\n", __func__)); retVal = FALSE; goto exit; } transportHeader = VMCI_DG_PAYLOAD(dg); headerNode = &transportHeader->node; for (i = 0; i < gHgfsShmemPages.totalPageCount; i++) { if (gHgfsShmemPages.list[i].free) { transportHeader->asyncIov[j].index = i; transportHeader->asyncIov[j].va = gHgfsShmemPages.list[i].va; transportHeader->asyncIov[j].pa = gHgfsShmemPages.list[i].pa; transportHeader->asyncIov[j].len = PAGE_SIZE; j++; } } dg->src = *(VMCIHandle *)channel->priv; dg->dst = VMCI_MAKE_HANDLE(VMCI_HYPERVISOR_CONTEXT_ID, VMCI_HGFS_TRANSPORT); dg->payloadSize = transportHeaderSize; headerNode->version = HGFS_VMCI_VERSION_1; headerNode->pktType = HGFS_TH_REP_GET_PAGES; ASSERT(gHgfsShmemPages.freePageCount == j); transportHeader->iovCount = j; LOG(10, (KERN_WARNING "Sending %d Guest pages \n", i)); if ((ret = vmci_datagram_send(dg)) < VMCI_SUCCESS) { if (ret == HGFS_VMCI_TRANSPORT_ERROR) { LOG(0, (KERN_WARNING "HGFS Transport error occured. Don't blame VMCI\n")); } retVal = FALSE; } exit: if (retVal) { /* We successfully sent pages the the host. Mark all pages as allocated */ for (i = 0; i < gHgfsShmemPages.totalPageCount; i++) { gHgfsShmemPages.list[i].free = FALSE; } gHgfsShmemPages.freePageCount = 0; } kfree(dg); return retVal; }
static int VPageChannelSendControl(VPageChannel *channel, // IN VPageChannelPacketType type, // IN char *message, // IN int len, // IN int numElems, // IN VPageChannelElem *elems) // IN { int retval; VPageChannelPacket *packet; VMCIDatagram *dg; ASSERT(channel); ASSERT(type == VPCPacket_Data || type == VPCPacket_GuestConnect || type == VPCPacket_SetRecvBuffer || type == VPCPacket_GuestDisconnect); dg = NULL; retval = VPageChannelAllocDatagram(channel, len, numElems, &dg); if (retval < VMCI_SUCCESS) { return retval; } packet = (VPageChannelPacket *)VMCI_DG_PAYLOAD(dg); packet->type = type; packet->msgLen = len; packet->numElems = numElems; if (len) { ASSERT(message); memcpy(VPAGECHANNEL_PACKET_MESSAGE(packet), message, len); } if (numElems) { ASSERT(elems); memcpy(VPAGECHANNEL_PACKET_ELEMS(packet), elems, numElems * sizeof (VPageChannelElem)); } retval = vmci_datagram_send(dg); if (retval < VMCI_SUCCESS) { VMCI_WARNING((LGPFX"Failed to send packet (channel=%p) to " "(handle=0x%x:0x%x) (err=%d).\n", channel, dg->dst.context, dg->dst.resource, retval)); } else { /* * We don't care about how many bytes were sent, and callers may not * expect > 0 to mean success, so just convert to exactly success. */ retval = VMCI_SUCCESS; } VMCI_FreeKernelMem(dg, VMCI_DG_SIZE(dg)); return retval; }