static int HgfsVmciChannelTerminateSession(HgfsTransportChannel *channel) { int ret = 0; VMCIDatagram *dg; HgfsVmciTransportHeader *transportHeader; HgfsVmciHeaderNode *headerNode; dg = kmalloc(sizeof *dg + sizeof *transportHeader, GFP_KERNEL); if (NULL == dg) { LOG(4, (KERN_WARNING "%s failed to allocate\n", __func__)); return -ENOMEM; } /* Initialize datagram */ dg->src = *(VMCIHandle *)channel->priv; dg->dst = VMCI_MAKE_HANDLE(VMCI_HYPERVISOR_CONTEXT_ID, VMCI_HGFS_TRANSPORT); dg->payloadSize = sizeof *transportHeader; transportHeader = VMCI_DG_PAYLOAD(dg); headerNode = &transportHeader->node; headerNode->pktType = HGFS_TH_TERMINATE_SESSION; headerNode->version = HGFS_VMCI_VERSION_1; transportHeader->iovCount = 0; LOG(1, (KERN_WARNING "Terminating session with host \n")); if ((ret = vmci_datagram_send(dg)) < VMCI_SUCCESS) { if (ret == HGFS_VMCI_TRANSPORT_ERROR) { LOG(0, (KERN_WARNING "HGFS Transport error occured. Don't blame VMCI\n")); } LOG(0, (KERN_WARNING "Cannot communicate with Server.\n")); } else { int i; for (i = 0; i < gHgfsShmemPages.totalPageCount; i++) { free_page(gHgfsShmemPages.list[i].va); } } kfree(dg); return ret; }
static int HgfsVmciChannelSend(HgfsTransportChannel *channel, // IN: Channel HgfsReq *req) // IN: request to send { int ret; int iovCount = 0; VMCIDatagram *dg; HgfsVmciTransportHeader *transportHeader; HgfsVmciHeaderNode *headerNode; HgfsVmciTransportStatus *transportStatus; size_t transportHeaderSize; size_t bufferSize; size_t total; uint64 pa; uint64 len; uint64 id; int j; ASSERT(req); ASSERT(req->buffer); ASSERT(req->state == HGFS_REQ_STATE_UNSENT || req->state == HGFS_REQ_STATE_ALLOCATED); ASSERT(req->payloadSize <= req->bufferSize); /* Note that req->bufferSize does not include chunk used by the transport. */ total = req->bufferSize + sizeof (HgfsVmciTransportStatus); /* Calculate number of entries for metaPacket */ iovCount = (total + (size_t)req->buffer % PAGE_SIZE - 1)/ PAGE_SIZE + 1; ASSERT(total + (size_t)req->buffer % PAGE_SIZE <= PAGE_SIZE); transportHeaderSize = sizeof *transportHeader + (iovCount + req->numEntries - 1) * sizeof (HgfsIov); dg = kmalloc(sizeof *dg + transportHeaderSize, GFP_KERNEL); if (NULL == dg) { LOG(4, (KERN_WARNING "%s failed to allocate\n", __func__)); return -ENOMEM; } /* Initialize datagram */ dg->src = *(VMCIHandle *)channel->priv; dg->dst = VMCI_MAKE_HANDLE(VMCI_HYPERVISOR_CONTEXT_ID, VMCI_HGFS_TRANSPORT); dg->payloadSize = transportHeaderSize; transportHeader = VMCI_DG_PAYLOAD(dg); headerNode = &transportHeader->node; headerNode->version = HGFS_VMCI_VERSION_1; headerNode->pktType = HGFS_TH_REQUEST; total = req->bufferSize + sizeof (HgfsVmciTransportStatus); bufferSize = 0; for (iovCount = 0; bufferSize < req->bufferSize; iovCount++) { /* * req->buffer should have been allocated by kmalloc()/ __get_free_pages(). * Specifically, it cannot be a buffer that is mapped from high memory. * virt_to_phys() does not work for those. */ pa = virt_to_phys(req->buffer + bufferSize); len = total < (PAGE_SIZE - pa % PAGE_SIZE) ? total : (PAGE_SIZE - pa % PAGE_SIZE); bufferSize += len; total -= len; transportHeader->iov[iovCount].pa = pa; transportHeader->iov[iovCount].len = len; LOG(8, ("iovCount = %u PA = %"FMT64"x len=%u\n", iovCount, transportHeader->iov[iovCount].pa, transportHeader->iov[iovCount].len)); } /* Right now we do not expect discontigous request packet */ ASSERT(iovCount == 1); ASSERT(total == 0); ASSERT(bufferSize == req->bufferSize + sizeof (HgfsVmciTransportStatus)); LOG(0, (KERN_WARNING "Size of request is %Zu\n", req->payloadSize)); for (j = 0; j < req->numEntries; j++, iovCount++) { /* I will have to probably do page table walk here, haven't figured it out yet */ ASSERT(req->dataPacket); transportHeader->iov[iovCount].pa = page_to_phys(req->dataPacket[j].page); transportHeader->iov[iovCount].pa += req->dataPacket[j].offset; transportHeader->iov[iovCount].len = req->dataPacket[j].len; LOG(8, ("iovCount = %u PA = %"FMT64"x len=%u\n", iovCount, transportHeader->iov[iovCount].pa, transportHeader->iov[iovCount].len)); } transportHeader->iovCount = iovCount; /* Initialize transport Status */ transportStatus = (HgfsVmciTransportStatus *)req->buffer; transportStatus->status = HGFS_TS_IO_PENDING; transportStatus->size = req->bufferSize + sizeof (HgfsVmciTransportStatus); /* * Don't try to set req->state after vmci_datagram_send(). * It may be too late then. We could have received a datagram by then and * datagram handler expects request's state to be submitted. */ req->state = HGFS_REQ_STATE_SUBMITTED; id = req->id; if ((ret = vmci_datagram_send(dg)) < VMCI_SUCCESS) { if (ret == HGFS_VMCI_TRANSPORT_ERROR) { LOG(0, (KERN_WARNING "HGFS Transport error occured. Don't blame VMCI\n")); } else if (ret == HGFS_VMCI_VERSION_MISMATCH) { LOG(0, (KERN_WARNING "Version mismatch\n")); } req->state = HGFS_REQ_STATE_UNSENT; kfree(dg); return -EIO; } LOG(0, (KERN_WARNING "Hgfs Received response\n")); HgfsVmciChannelCompleteRequest(id); kfree(dg); return 0; }
static Bool HgfsVmciChannelPassGuestPages(HgfsTransportChannel *channel) // IN: { Bool retVal = TRUE; int ret; int i; int j = 0; size_t transportHeaderSize; HgfsVmciTransportHeader *transportHeader = NULL; HgfsVmciHeaderNode *headerNode; VMCIDatagram *dg; if (!gHgfsShmemPages.freePageCount) { return TRUE; } transportHeaderSize = sizeof (HgfsVmciTransportHeader) + (gHgfsShmemPages.freePageCount - 1) * sizeof (HgfsAsyncIov); dg = kmalloc(sizeof *dg + transportHeaderSize, GFP_ATOMIC); if (!dg) { LOG(4, (KERN_WARNING "%s failed to allocate\n", __func__)); retVal = FALSE; goto exit; } transportHeader = VMCI_DG_PAYLOAD(dg); headerNode = &transportHeader->node; for (i = 0; i < gHgfsShmemPages.totalPageCount; i++) { if (gHgfsShmemPages.list[i].free) { transportHeader->asyncIov[j].index = i; transportHeader->asyncIov[j].va = gHgfsShmemPages.list[i].va; transportHeader->asyncIov[j].pa = gHgfsShmemPages.list[i].pa; transportHeader->asyncIov[j].len = PAGE_SIZE; j++; } } dg->src = *(VMCIHandle *)channel->priv; dg->dst = VMCI_MAKE_HANDLE(VMCI_HYPERVISOR_CONTEXT_ID, VMCI_HGFS_TRANSPORT); dg->payloadSize = transportHeaderSize; headerNode->version = HGFS_VMCI_VERSION_1; headerNode->pktType = HGFS_TH_REP_GET_PAGES; ASSERT(gHgfsShmemPages.freePageCount == j); transportHeader->iovCount = j; LOG(10, (KERN_WARNING "Sending %d Guest pages \n", i)); if ((ret = vmci_datagram_send(dg)) < VMCI_SUCCESS) { if (ret == HGFS_VMCI_TRANSPORT_ERROR) { LOG(0, (KERN_WARNING "HGFS Transport error occured. Don't blame VMCI\n")); } retVal = FALSE; } exit: if (retVal) { /* We successfully sent pages the the host. Mark all pages as allocated */ for (i = 0; i < gHgfsShmemPages.totalPageCount; i++) { gHgfsShmemPages.list[i].free = FALSE; } gHgfsShmemPages.freePageCount = 0; } kfree(dg); return retVal; }
static int VPageChannelSendControl(VPageChannel *channel, // IN VPageChannelPacketType type, // IN char *message, // IN int len, // IN int numElems, // IN VPageChannelElem *elems) // IN { int retval; VPageChannelPacket *packet; VMCIDatagram *dg; ASSERT(channel); ASSERT(type == VPCPacket_Data || type == VPCPacket_GuestConnect || type == VPCPacket_SetRecvBuffer || type == VPCPacket_GuestDisconnect); dg = NULL; retval = VPageChannelAllocDatagram(channel, len, numElems, &dg); if (retval < VMCI_SUCCESS) { return retval; } packet = (VPageChannelPacket *)VMCI_DG_PAYLOAD(dg); packet->type = type; packet->msgLen = len; packet->numElems = numElems; if (len) { ASSERT(message); memcpy(VPAGECHANNEL_PACKET_MESSAGE(packet), message, len); } if (numElems) { ASSERT(elems); memcpy(VPAGECHANNEL_PACKET_ELEMS(packet), elems, numElems * sizeof (VPageChannelElem)); } retval = vmci_datagram_send(dg); if (retval < VMCI_SUCCESS) { VMCI_WARNING((LGPFX"Failed to send packet (channel=%p) to " "(handle=0x%x:0x%x) (err=%d).\n", channel, dg->dst.context, dg->dst.resource, retval)); } else { /* * We don't care about how many bytes were sent, and callers may not * expect > 0 to mean success, so just convert to exactly success. */ retval = VMCI_SUCCESS; } VMCI_FreeKernelMem(dg, VMCI_DG_SIZE(dg)); return retval; }