static Bool VMCIUtilCheckHostCapabilities(void) { int result; VMCIResourcesQueryMsg *msg; uint32 msgSize = sizeof(VMCIResourcesQueryHdr) + VMCI_UTIL_NUM_RESOURCES * sizeof(VMCI_Resource); VMCIDatagram *checkMsg = VMCI_AllocKernelMem(msgSize, VMCI_MEMORY_NONPAGED); if (checkMsg == NULL) { VMCI_WARNING((LGPFX"Check host: Insufficient memory.\n")); return FALSE; } checkMsg->dst = VMCI_MAKE_HANDLE(VMCI_HYPERVISOR_CONTEXT_ID, VMCI_RESOURCES_QUERY); checkMsg->src = VMCI_ANON_SRC_HANDLE; checkMsg->payloadSize = msgSize - VMCI_DG_HEADERSIZE; msg = (VMCIResourcesQueryMsg *)VMCI_DG_PAYLOAD(checkMsg); msg->numResources = VMCI_UTIL_NUM_RESOURCES; msg->resources[0] = VMCI_GET_CONTEXT_ID; result = VMCI_SendDatagram(checkMsg); VMCI_FreeKernelMem(checkMsg, msgSize); /* We need the vector. There are no fallbacks. */ return (result == 0x1); }
void VMCIDs_AddContext(VMCIId contextID) // IN: { VMCIContext *context = VMCIContext_Get(contextID); if (context != NULL) { VMCILockFlags flags; VMCIGroup_AddMember(dsAPI.groupHandle, VMCI_MAKE_HANDLE(contextID, VMCI_CONTEXT_RESOURCE_ID), FALSE); VMCI_GrabLock(&context->lock, &flags); VMCIHandleArray_AppendEntry(&context->groupArray, dsAPI.groupHandle); VMCI_ReleaseLock(&context->lock, flags); VMCIContext_Release(context); } }
static int HgfsVmciChannelTerminateSession(HgfsTransportChannel *channel) { int ret = 0; VMCIDatagram *dg; HgfsVmciTransportHeader *transportHeader; HgfsVmciHeaderNode *headerNode; dg = kmalloc(sizeof *dg + sizeof *transportHeader, GFP_KERNEL); if (NULL == dg) { LOG(4, (KERN_WARNING "%s failed to allocate\n", __func__)); return -ENOMEM; } /* Initialize datagram */ dg->src = *(VMCIHandle *)channel->priv; dg->dst = VMCI_MAKE_HANDLE(VMCI_HYPERVISOR_CONTEXT_ID, VMCI_HGFS_TRANSPORT); dg->payloadSize = sizeof *transportHeader; transportHeader = VMCI_DG_PAYLOAD(dg); headerNode = &transportHeader->node; headerNode->pktType = HGFS_TH_TERMINATE_SESSION; headerNode->version = HGFS_VMCI_VERSION_1; transportHeader->iovCount = 0; LOG(1, (KERN_WARNING "Terminating session with host \n")); if ((ret = vmci_datagram_send(dg)) < VMCI_SUCCESS) { if (ret == HGFS_VMCI_TRANSPORT_ERROR) { LOG(0, (KERN_WARNING "HGFS Transport error occured. Don't blame VMCI\n")); } LOG(0, (KERN_WARNING "Cannot communicate with Server.\n")); } else { int i; for (i = 0; i < gHgfsShmemPages.totalPageCount; i++) { free_page(gHgfsShmemPages.list[i].va); } } kfree(dg); return ret; }
VMCIId VMCI_GetContextID(void) { if (VMCI_GuestPersonalityActive()) { if (Atomic_Read(&vmContextID) == VMCI_INVALID_ID) { uint32 result; VMCIDatagram getCidMsg; getCidMsg.dst = VMCI_MAKE_HANDLE(VMCI_HYPERVISOR_CONTEXT_ID, VMCI_GET_CONTEXT_ID); getCidMsg.src = VMCI_ANON_SRC_HANDLE; getCidMsg.payloadSize = 0; result = VMCI_SendDatagram(&getCidMsg); Atomic_Write(&vmContextID, result); } return Atomic_Read(&vmContextID); } else if (VMCI_HostPersonalityActive()) { return VMCI_HOST_CONTEXT_ID; } return VMCI_INVALID_ID; }
void VMCIDs_RemoveContext(VMCIId contextID) // IN: { VMCIContext *context; if (!dsAPI.isInitialized) { return; } context = VMCIContext_Get(contextID); if (context != NULL) { VMCILockFlags flags; VMCI_GrabLock(&context->lock, &flags); VMCIHandleArray_RemoveEntry(context->groupArray, dsAPI.groupHandle); VMCI_ReleaseLock(&context->lock, flags); VMCIContext_Release(context); VMCIGroup_RemoveMember(dsAPI.groupHandle, VMCI_MAKE_HANDLE(contextID, VMCI_CONTEXT_RESOURCE_ID)); DsRemoveRegistrationsContext(contextID); } }
static int DatagramCreateHnd(VMCIId resourceID, // IN: uint32 flags, // IN: VMCIPrivilegeFlags privFlags, // IN: VMCIDatagramRecvCB recvCB, // IN: void *clientData, // IN: VMCIHandle *outHandle) // OUT: { int result; VMCIId contextID; VMCIHandle handle; DatagramEntry *entry; ASSERT(recvCB != NULL); ASSERT(outHandle != NULL); ASSERT(!(privFlags & ~VMCI_PRIVILEGE_ALL_FLAGS)); if ((flags & VMCI_FLAG_WELLKNOWN_DG_HND) != 0) { return VMCI_ERROR_INVALID_ARGS; } else { if ((flags & VMCI_FLAG_ANYCID_DG_HND) != 0) { contextID = VMCI_INVALID_ID; } else { contextID = vmci_get_context_id(); if (contextID == VMCI_INVALID_ID) { return VMCI_ERROR_NO_RESOURCES; } } if (resourceID == VMCI_INVALID_ID) { resourceID = VMCIResource_GetID(contextID); if (resourceID == VMCI_INVALID_ID) { return VMCI_ERROR_NO_HANDLE; } } handle = VMCI_MAKE_HANDLE(contextID, resourceID); } entry = VMCI_AllocKernelMem(sizeof *entry, VMCI_MEMORY_NONPAGED); if (entry == NULL) { VMCI_WARNING((LGPFX"Failed allocating memory for datagram entry.\n")); return VMCI_ERROR_NO_MEM; } if (!VMCI_CanScheduleDelayedWork()) { if (flags & VMCI_FLAG_DG_DELAYED_CB) { VMCI_FreeKernelMem(entry, sizeof *entry); return VMCI_ERROR_INVALID_ARGS; } entry->runDelayed = FALSE; } else { entry->runDelayed = (flags & VMCI_FLAG_DG_DELAYED_CB) ? TRUE : FALSE; } entry->flags = flags; entry->recvCB = recvCB; entry->clientData = clientData; VMCI_CreateEvent(&entry->destroyEvent); entry->privFlags = privFlags; /* Make datagram resource live. */ result = VMCIResource_Add(&entry->resource, VMCI_RESOURCE_TYPE_DATAGRAM, handle, DatagramFreeCB, entry); if (result != VMCI_SUCCESS) { VMCI_WARNING((LGPFX"Failed to add new resource (handle=0x%x:0x%x).\n", handle.context, handle.resource)); VMCI_DestroyEvent(&entry->destroyEvent); VMCI_FreeKernelMem(entry, sizeof *entry); return result; } *outHandle = handle; return VMCI_SUCCESS; }
static int HgfsVmciChannelSend(HgfsTransportChannel *channel, // IN: Channel HgfsReq *req) // IN: request to send { int ret; int iovCount = 0; VMCIDatagram *dg; HgfsVmciTransportHeader *transportHeader; HgfsVmciHeaderNode *headerNode; HgfsVmciTransportStatus *transportStatus; size_t transportHeaderSize; size_t bufferSize; size_t total; uint64 pa; uint64 len; uint64 id; int j; ASSERT(req); ASSERT(req->buffer); ASSERT(req->state == HGFS_REQ_STATE_UNSENT || req->state == HGFS_REQ_STATE_ALLOCATED); ASSERT(req->payloadSize <= req->bufferSize); /* Note that req->bufferSize does not include chunk used by the transport. */ total = req->bufferSize + sizeof (HgfsVmciTransportStatus); /* Calculate number of entries for metaPacket */ iovCount = (total + (size_t)req->buffer % PAGE_SIZE - 1)/ PAGE_SIZE + 1; ASSERT(total + (size_t)req->buffer % PAGE_SIZE <= PAGE_SIZE); transportHeaderSize = sizeof *transportHeader + (iovCount + req->numEntries - 1) * sizeof (HgfsIov); dg = kmalloc(sizeof *dg + transportHeaderSize, GFP_KERNEL); if (NULL == dg) { LOG(4, (KERN_WARNING "%s failed to allocate\n", __func__)); return -ENOMEM; } /* Initialize datagram */ dg->src = *(VMCIHandle *)channel->priv; dg->dst = VMCI_MAKE_HANDLE(VMCI_HYPERVISOR_CONTEXT_ID, VMCI_HGFS_TRANSPORT); dg->payloadSize = transportHeaderSize; transportHeader = VMCI_DG_PAYLOAD(dg); headerNode = &transportHeader->node; headerNode->version = HGFS_VMCI_VERSION_1; headerNode->pktType = HGFS_TH_REQUEST; total = req->bufferSize + sizeof (HgfsVmciTransportStatus); bufferSize = 0; for (iovCount = 0; bufferSize < req->bufferSize; iovCount++) { /* * req->buffer should have been allocated by kmalloc()/ __get_free_pages(). * Specifically, it cannot be a buffer that is mapped from high memory. * virt_to_phys() does not work for those. */ pa = virt_to_phys(req->buffer + bufferSize); len = total < (PAGE_SIZE - pa % PAGE_SIZE) ? total : (PAGE_SIZE - pa % PAGE_SIZE); bufferSize += len; total -= len; transportHeader->iov[iovCount].pa = pa; transportHeader->iov[iovCount].len = len; LOG(8, ("iovCount = %u PA = %"FMT64"x len=%u\n", iovCount, transportHeader->iov[iovCount].pa, transportHeader->iov[iovCount].len)); } /* Right now we do not expect discontigous request packet */ ASSERT(iovCount == 1); ASSERT(total == 0); ASSERT(bufferSize == req->bufferSize + sizeof (HgfsVmciTransportStatus)); LOG(0, (KERN_WARNING "Size of request is %Zu\n", req->payloadSize)); for (j = 0; j < req->numEntries; j++, iovCount++) { /* I will have to probably do page table walk here, haven't figured it out yet */ ASSERT(req->dataPacket); transportHeader->iov[iovCount].pa = page_to_phys(req->dataPacket[j].page); transportHeader->iov[iovCount].pa += req->dataPacket[j].offset; transportHeader->iov[iovCount].len = req->dataPacket[j].len; LOG(8, ("iovCount = %u PA = %"FMT64"x len=%u\n", iovCount, transportHeader->iov[iovCount].pa, transportHeader->iov[iovCount].len)); } transportHeader->iovCount = iovCount; /* Initialize transport Status */ transportStatus = (HgfsVmciTransportStatus *)req->buffer; transportStatus->status = HGFS_TS_IO_PENDING; transportStatus->size = req->bufferSize + sizeof (HgfsVmciTransportStatus); /* * Don't try to set req->state after vmci_datagram_send(). * It may be too late then. We could have received a datagram by then and * datagram handler expects request's state to be submitted. */ req->state = HGFS_REQ_STATE_SUBMITTED; id = req->id; if ((ret = vmci_datagram_send(dg)) < VMCI_SUCCESS) { if (ret == HGFS_VMCI_TRANSPORT_ERROR) { LOG(0, (KERN_WARNING "HGFS Transport error occured. Don't blame VMCI\n")); } else if (ret == HGFS_VMCI_VERSION_MISMATCH) { LOG(0, (KERN_WARNING "Version mismatch\n")); } req->state = HGFS_REQ_STATE_UNSENT; kfree(dg); return -EIO; } LOG(0, (KERN_WARNING "Hgfs Received response\n")); HgfsVmciChannelCompleteRequest(id); kfree(dg); return 0; }
static Bool HgfsVmciChannelPassGuestPages(HgfsTransportChannel *channel) // IN: { Bool retVal = TRUE; int ret; int i; int j = 0; size_t transportHeaderSize; HgfsVmciTransportHeader *transportHeader = NULL; HgfsVmciHeaderNode *headerNode; VMCIDatagram *dg; if (!gHgfsShmemPages.freePageCount) { return TRUE; } transportHeaderSize = sizeof (HgfsVmciTransportHeader) + (gHgfsShmemPages.freePageCount - 1) * sizeof (HgfsAsyncIov); dg = kmalloc(sizeof *dg + transportHeaderSize, GFP_ATOMIC); if (!dg) { LOG(4, (KERN_WARNING "%s failed to allocate\n", __func__)); retVal = FALSE; goto exit; } transportHeader = VMCI_DG_PAYLOAD(dg); headerNode = &transportHeader->node; for (i = 0; i < gHgfsShmemPages.totalPageCount; i++) { if (gHgfsShmemPages.list[i].free) { transportHeader->asyncIov[j].index = i; transportHeader->asyncIov[j].va = gHgfsShmemPages.list[i].va; transportHeader->asyncIov[j].pa = gHgfsShmemPages.list[i].pa; transportHeader->asyncIov[j].len = PAGE_SIZE; j++; } } dg->src = *(VMCIHandle *)channel->priv; dg->dst = VMCI_MAKE_HANDLE(VMCI_HYPERVISOR_CONTEXT_ID, VMCI_HGFS_TRANSPORT); dg->payloadSize = transportHeaderSize; headerNode->version = HGFS_VMCI_VERSION_1; headerNode->pktType = HGFS_TH_REP_GET_PAGES; ASSERT(gHgfsShmemPages.freePageCount == j); transportHeader->iovCount = j; LOG(10, (KERN_WARNING "Sending %d Guest pages \n", i)); if ((ret = vmci_datagram_send(dg)) < VMCI_SUCCESS) { if (ret == HGFS_VMCI_TRANSPORT_ERROR) { LOG(0, (KERN_WARNING "HGFS Transport error occured. Don't blame VMCI\n")); } retVal = FALSE; } exit: if (retVal) { /* We successfully sent pages the the host. Mark all pages as allocated */ for (i = 0; i < gHgfsShmemPages.totalPageCount; i++) { gHgfsShmemPages.list[i].free = FALSE; } gHgfsShmemPages.freePageCount = 0; } kfree(dg); return retVal; }
int VPageChannel_CreateInVM(VPageChannel **channel, // IN/OUT VMCIId resourceId, // IN VMCIId peerResourceId, // IN uint64 produceQSize, // IN uint64 consumeQSize, // IN uint32 channelFlags, // IN VPageChannelRecvCB recvCB, // IN void *clientRecvData, // IN VPageChannelAllocElemFn elemAllocFn, // IN void *allocClientData, // IN VPageChannelFreeElemFn elemFreeFn, // IN void *freeClientData, // IN int defaultRecvBuffers, // IN int maxRecvBuffers) // IN { int retval; int flags; VPageChannel *pageChannel; ASSERT(channel); ASSERT(VMCI_INVALID_ID != resourceId); ASSERT(VMCI_INVALID_ID != peerResourceId); ASSERT(recvCB); if (channelFlags & ~(VPAGECHANNEL_FLAGS_ALL)) { VMCI_WARNING((LGPFX"Invalid argument (flags=0x%x).\n", channelFlags)); return VMCI_ERROR_INVALID_ARGS; } pageChannel = VMCI_AllocKernelMem(sizeof *pageChannel, VMCI_MEMORY_NONPAGED); if (!pageChannel) { VMCI_WARNING((LGPFX"Failed to allocate channel memory.\n")); return VMCI_ERROR_NO_MEM; } /* * XXX, we should support a default internal allocation function. */ memset(pageChannel, 0, sizeof *pageChannel); pageChannel->state = VPCState_Unconnected; pageChannel->dgHandle = VMCI_INVALID_HANDLE; pageChannel->attachSubId = VMCI_INVALID_ID; pageChannel->detachSubId = VMCI_INVALID_ID; pageChannel->qpHandle = VMCI_INVALID_HANDLE; pageChannel->qpair = NULL; pageChannel->doorbellHandle = VMCI_INVALID_HANDLE; pageChannel->peerDoorbellHandle = VMCI_INVALID_HANDLE; pageChannel->flags = channelFlags; pageChannel->recvCB = recvCB; pageChannel->clientRecvData = clientRecvData; pageChannel->elemAllocFn = elemAllocFn; pageChannel->allocClientData = allocClientData; pageChannel->elemFreeFn = elemFreeFn; pageChannel->freeClientData = freeClientData; pageChannel->resourceId = resourceId; pageChannel->peerDgHandle = VMCI_MAKE_HANDLE(VMCI_HOST_CONTEXT_ID, peerResourceId); Atomic_Write32(&pageChannel->curRecvBufs, 0); pageChannel->recvBufsTarget = defaultRecvBuffers; pageChannel->defaultRecvBufs = defaultRecvBuffers; pageChannel->maxRecvBufs = maxRecvBuffers + VMCI_PACKET_RECV_THRESHOLD; pageChannel->produceQSize = produceQSize; pageChannel->consumeQSize = consumeQSize; /* * Create a datagram handle over which we will connection handshake packets * (once the queuepair is created we can send packets over that instead). * This handle has a delayed callback regardless of the channel flags, * because we may have to create a queuepair inside the callback. */ flags = VMCI_FLAG_DG_DELAYED_CB; retval = vmci_datagram_create_handle(resourceId, flags, VPageChannelDgRecvFunc, pageChannel, &pageChannel->dgHandle); if (retval < VMCI_SUCCESS) { VMCI_WARNING((LGPFX"Failed to create datagram handle " "(channel=%p) (err=%d).\n", channel, retval)); goto error; } VMCI_DEBUG_LOG(10, (LGPFX"Created datagram (channel=%p) " "(handle=0x%x:0x%x).\n", channel, pageChannel->dgHandle.context, pageChannel->dgHandle.resource)); /* * Create a doorbell handle. This is used by the peer to signal the * arrival of packets in the queuepair. This handle has a delayed * callback depending on the channel flags. */ flags = channelFlags & VPAGECHANNEL_FLAGS_RECV_DELAYED ? VMCI_FLAG_DELAYED_CB : 0; retval = vmci_doorbell_create(&pageChannel->doorbellHandle, flags, VMCI_PRIVILEGE_FLAG_RESTRICTED, VPageChannelDoorbellCallback, pageChannel); if (retval < VMCI_SUCCESS) { VMCI_WARNING((LGPFX"Failed to create doorbell " "(channel=%p) (err=%d).\n", channel, retval)); goto error; } VMCI_DEBUG_LOG(10, (LGPFX"Created doorbell (channel=%p) " "(handle=0x%x:0x%x).\n", channel, pageChannel->doorbellHandle.context, pageChannel->doorbellHandle.resource)); /* * Now create the queuepair, over which we can pass data packets. */ retval = VPageChannelCreateQueuePair(pageChannel); if (retval < VMCI_SUCCESS) { goto error; } /* * Set the receiving buffers before sending the connection message to * avoid a race when the connection is made, but there is no receiving * buffer yet. */ if (defaultRecvBuffers) { int numElems = defaultRecvBuffers + VMCI_PACKET_RECV_THRESHOLD; if (0 == VPageChannelAddRecvBuffers(pageChannel, numElems, TRUE)) { /* * AddRecvBuffers() returns the number of buffers actually added. If * we failed to add any at all, then fail. */ retval = VMCI_ERROR_NO_MEM; goto error; } } retval = VPageChannelSendConnectionMessage(pageChannel); if (retval < VMCI_SUCCESS) { goto error; } VMCI_DEBUG_LOG(10, (LGPFX"Created (channel=%p) (handle=0x%x:0x%x).\n", pageChannel, pageChannel->dgHandle.context, pageChannel->dgHandle.resource)); *channel = pageChannel; return retval; error: VPageChannel_Destroy(pageChannel); return retval; }