示例#1
0
VMCIHashTable *
VMCIHashTable_Create(int size)
{
   VMCIHashTable *table = VMCI_AllocKernelMem(sizeof *table,
                                              VMCI_MEMORY_NONPAGED);
   if (table == NULL) {
      return NULL;
   }

   table->entries = VMCI_AllocKernelMem(sizeof *table->entries * size,
                                        VMCI_MEMORY_NONPAGED);
   if (table->entries == NULL) {
      VMCI_FreeKernelMem(table, sizeof *table);
      return NULL;
   }
   memset(table->entries, 0, sizeof *table->entries * size);
   table->size = size;

   if (VMCIHashTableInitLock(&table->lock, "VMCIHashTableLock") < VMCI_SUCCESS) {
      VMCI_FreeKernelMem(table->entries, sizeof *table->entries * size);
      VMCI_FreeKernelMem(table, sizeof *table);
      return NULL;
   }

   return table;
}
示例#2
0
void
DsListDestroy(DsList *list)  // IN:
{
   if (list == NULL) {
      return;
   }
   if (list->elements != NULL) {
      VMCI_FreeKernelMem(list->elements, list->capacity * sizeof *list->elements);
      list->elements = NULL;
   }
   VMCI_FreeKernelMem(list, sizeof *list);
}
示例#3
0
void
VMCI_FreePPNSet(PPNSet *ppnSet) // IN:
{
    ASSERT(ppnSet);
    if (ppnSet->initialized) {
        /* Do not call these functions on NULL inputs. */
        ASSERT(ppnSet->producePPNs && ppnSet->consumePPNs);
        VMCI_FreeKernelMem(ppnSet->producePPNs,
                           ppnSet->numProducePages * sizeof *ppnSet->producePPNs);
        VMCI_FreeKernelMem(ppnSet->consumePPNs,
                           ppnSet->numConsumePages * sizeof *ppnSet->consumePPNs);
    }
    memset(ppnSet, 0, sizeof *ppnSet);
}
示例#4
0
static Bool
VMCIUtilCheckHostCapabilities(void)
{
   int result;
   VMCIResourcesQueryMsg *msg;
   uint32 msgSize = sizeof(VMCIResourcesQueryHdr) +
      VMCI_UTIL_NUM_RESOURCES * sizeof(VMCI_Resource);
   VMCIDatagram *checkMsg = VMCI_AllocKernelMem(msgSize, VMCI_MEMORY_NONPAGED);

   if (checkMsg == NULL) {
      VMCI_WARNING((LGPFX"Check host: Insufficient memory.\n"));
      return FALSE;
   }

   checkMsg->dst = VMCI_MAKE_HANDLE(VMCI_HYPERVISOR_CONTEXT_ID,
                                    VMCI_RESOURCES_QUERY);
   checkMsg->src = VMCI_ANON_SRC_HANDLE;
   checkMsg->payloadSize = msgSize - VMCI_DG_HEADERSIZE;
   msg = (VMCIResourcesQueryMsg *)VMCI_DG_PAYLOAD(checkMsg);

   msg->numResources = VMCI_UTIL_NUM_RESOURCES;
   msg->resources[0] = VMCI_GET_CONTEXT_ID;

   result = VMCI_SendDatagram(checkMsg);
   VMCI_FreeKernelMem(checkMsg, msgSize);

   /* We need the vector. There are no fallbacks. */
   return (result == 0x1);
}
示例#5
0
int
vmci_datagram_destroy_handle(VMCIHandle handle) // IN
{
   DatagramEntry *entry;
   VMCIResource *resource = VMCIResource_Get(handle,
                                             VMCI_RESOURCE_TYPE_DATAGRAM);
   if (resource == NULL) {
      VMCI_DEBUG_LOG(4, (LGPFX"Failed to destroy datagram (handle=0x%x:0x%x).\n",
                         handle.context, handle.resource));
      return VMCI_ERROR_NOT_FOUND;
   }
   entry = RESOURCE_CONTAINER(resource, DatagramEntry, resource);

   VMCIResource_Remove(handle, VMCI_RESOURCE_TYPE_DATAGRAM);

   /*
    * We now wait on the destroyEvent and release the reference we got
    * above.
    */
   VMCI_WaitOnEvent(&entry->destroyEvent, DatagramReleaseCB, entry);

   /*
    * We know that we are now the only reference to the above entry so
     * can safely free it.
     */
   VMCI_DestroyEvent(&entry->destroyEvent);
   VMCI_FreeKernelMem(entry, sizeof *entry);

   return VMCI_SUCCESS;
}
示例#6
0
static Bool
DsListInit(DsList **list, // OUT:
           int capacity)  // IN:
{
   DsList *l = VMCI_AllocKernelMem(sizeof(DsList),
                                   VMCI_MEMORY_NONPAGED | VMCI_MEMORY_ATOMIC);

   ASSERT(list);
   ASSERT(capacity >= 1);
   
   if (l == NULL) {
      return FALSE;
   }
   l->size = 0;
   l->capacity = capacity;
   l->elements = VMCI_AllocKernelMem(sizeof(DsListElement) * capacity,
                                     VMCI_MEMORY_NONPAGED | VMCI_MEMORY_ATOMIC);
   if (l->elements == NULL) {
      VMCI_FreeKernelMem(l, sizeof *l);
      return FALSE;
   }
   
   *list = l;
   return TRUE;
}
示例#7
0
int
VMCIDatagram_InvokeGuestHandler(VMCIDatagram *dg) // IN
{
#if defined(VMKERNEL)
   VMCI_WARNING((LGPFX"Cannot dispatch within guest in VMKERNEL.\n"));
   return VMCI_ERROR_DST_UNREACHABLE;
#else // VMKERNEL
   int retval;
   VMCIResource *resource;
   DatagramEntry *dstEntry;

   ASSERT(dg);

   resource = VMCIResource_Get(dg->dst, VMCI_RESOURCE_TYPE_DATAGRAM);
   if (NULL == resource) {
      VMCI_DEBUG_LOG(4, (LGPFX"destination (handle=0x%x:0x%x) doesn't exist.\n",
                         dg->dst.context, dg->dst.resource));
      return VMCI_ERROR_NO_HANDLE;
   }

   dstEntry = RESOURCE_CONTAINER(resource, DatagramEntry, resource);
   if (dstEntry->runDelayed) {
      VMCIDelayedDatagramInfo *dgInfo;

      dgInfo = VMCI_AllocKernelMem(sizeof *dgInfo + (size_t)dg->payloadSize,
                                   (VMCI_MEMORY_ATOMIC | VMCI_MEMORY_NONPAGED));
      if (NULL == dgInfo) {
         VMCIResource_Release(resource);
         retval = VMCI_ERROR_NO_MEM;
         goto exit;
      }

      dgInfo->inDGHostQueue = FALSE;
      dgInfo->entry = dstEntry;
      memcpy(&dgInfo->msg, dg, VMCI_DG_SIZE(dg));

      retval = VMCI_ScheduleDelayedWork(VMCIDatagramDelayedDispatchCB, dgInfo);
      if (retval < VMCI_SUCCESS) {
         VMCI_WARNING((LGPFX"Failed to schedule delayed work for datagram "
                       "(result=%d).\n", retval));
         VMCI_FreeKernelMem(dgInfo, sizeof *dgInfo + (size_t)dg->payloadSize);
         VMCIResource_Release(resource);
         dgInfo = NULL;
         goto exit;
      }
   } else {
      dstEntry->recvCB(dstEntry->clientData, dg);
      VMCIResource_Release(resource);
      retval = VMCI_SUCCESS;
   }

exit:
   return retval;
#endif // VMKERNEL
}
示例#8
0
static int
DsListInsert(DsList *list,       // IN:
             const char *name,   // IN:
             VMCIHandle handle,  // IN:
             VMCIId contextID)   // IN:
{
   int nameLen;
   char *nameMem;

   if (!list || !name || VMCI_HANDLE_EQUAL(handle, VMCI_INVALID_HANDLE) ||
       contextID == VMCI_INVALID_ID) {
      return VMCI_ERROR_INVALID_ARGS;
   }
   
   /* Check for duplicates */
   if (DsListLookupIndex(list, name) >= 0) {
      return VMCI_ERROR_ALREADY_EXISTS;
   }
   
   if (list->capacity == list->size) {
      /* We need to expand the list */
      int newCapacity = list->capacity * 2;
      DsListElement *elms = VMCI_AllocKernelMem(sizeof(DsListElement) * 
                                                newCapacity,
                                                VMCI_MEMORY_NONPAGED |
                                                VMCI_MEMORY_ATOMIC);
      if (elms == NULL) {
         return VMCI_ERROR_NO_MEM;
      }
      memcpy(elms, list->elements, sizeof(DsListElement) * list->capacity);
      VMCI_FreeKernelMem(list->elements,
                         sizeof *list->elements * list->capacity);
      list->elements = elms;
      list->capacity = newCapacity;
   }
   
   ASSERT(list->capacity > list->size);
   
   nameLen = strlen(name) + 1;
   nameMem = VMCI_AllocKernelMem(nameLen,
                                 VMCI_MEMORY_NONPAGED | VMCI_MEMORY_ATOMIC);
   if (nameMem == NULL) {
      return VMCI_ERROR_NO_MEM;
   }
   memcpy(nameMem, name, nameLen);
   
   list->elements[list->size].name   = nameMem;
   list->elements[list->size].handle = handle;
   list->elements[list->size].contextID = contextID;
   list->size = list->size + 1;

   return VMCI_SUCCESS;
}
示例#9
0
static void
VMCIDatagramDelayedDispatchCB(void *data) // IN
{
   Bool inDGHostQueue;
   VMCIDelayedDatagramInfo *dgInfo = (VMCIDelayedDatagramInfo *)data;

   ASSERT(data);

   dgInfo->entry->recvCB(dgInfo->entry->clientData, &dgInfo->msg);

   VMCIResource_Release(&dgInfo->entry->resource);

   inDGHostQueue = dgInfo->inDGHostQueue;
   VMCI_FreeKernelMem(dgInfo, sizeof *dgInfo + (size_t)dgInfo->msg.payloadSize);

   if (inDGHostQueue) {
      Atomic_Dec(&delayedDGHostQueueSize);
   }
}
示例#10
0
void
VPageChannel_Destroy(VPageChannel *channel) // IN/OUT
{
   ASSERT(channel);

   VPageChannelDestroyQueuePair(channel);

   if (!VMCI_HANDLE_INVALID(channel->doorbellHandle)) {
      vmci_doorbell_destroy(channel->doorbellHandle);
   }

   if (!VMCI_HANDLE_INVALID(channel->dgHandle)) {
      vmci_datagram_destroy_handle(channel->dgHandle);
   }

   channel->state = VPCState_Free;
   VMCI_FreeKernelMem(channel, sizeof *channel);

   VMCI_DEBUG_LOG(10,
                  (LGPFX"Destroyed (channel=%p).\n",
                   channel));
}
示例#11
0
static int
DsListRemoveElement(DsList *list, // IN:
                    int index)    // IN: index of the element to remove
{
   if (!list || index < 0) {
      return VMCI_ERROR_INVALID_ARGS;
   }
   if (index > list->size - 1) {
      return VMCI_ERROR_NOT_FOUND;
   }

   /* Free name. */
   VMCI_FreeKernelMem(list->elements[index].name,
                      strlen(list->elements[index].name) + 1);
   /* Move elements one spot up. */
   memmove(&list->elements[index],
           &list->elements[index + 1],
           (list->size - index - 1) * sizeof list->elements[index]);
   list->size--;
   /* Zero out the last element. */
   memset(&list->elements[list->size], 0, sizeof list->elements[list->size]);

   return VMCI_SUCCESS;
}
示例#12
0
static void
VPageChannelDoDoorbellCallback(VPageChannel *channel) // IN/OUT
{
   Bool inUse;
   unsigned long flags;
   VPageChannelPacket packetHeader;

   ASSERT(channel);

   if (VPCState_Connected != channel->state) {
      VMCI_WARNING((LGPFX"Not connected (channel=%p).\n",
                    channel));
      return;
   }

   VPageChannelAcquireRecvLock(channel, &flags);
   inUse = channel->inPoll;
   channel->inPoll = TRUE;
   VPageChannelReleaseRecvLock(channel, flags);

   if (inUse) {
      return;
   }

retry:
   while (vmci_qpair_consume_buf_ready(channel->qpair) >= sizeof packetHeader) {
      ssize_t retSize, totalSize;
      VPageChannelPacket *packet;

      retSize = vmci_qpair_peek(channel->qpair, &packetHeader,
                                sizeof packetHeader,
                                /* XXX, UTIL_VMKERNEL_BUFFER for VMKernel. */
                                0);
      if (retSize < sizeof packetHeader) {
         /*
          * XXX, deal with partial read.
          */

         VMCI_WARNING((LGPFX"Failed to peek (channel=%p) "
                       "(required=%"FMTSZ"d) (err=%"FMTSZ"d).\n",
                       channel,
                       sizeof packetHeader,
                       retSize));
         break;
      }

      totalSize = sizeof packetHeader + packetHeader.msgLen +
         packetHeader.numElems * sizeof(VPageChannelElem);

      retSize = vmci_qpair_consume_buf_ready(channel->qpair);
      if (retSize < totalSize) {
         /*
          * XXX, deal with partial read.
          */

         VMCI_WARNING((LGPFX"Received partial packet (channel=%p) "
                       "(type=%d) (len=%d) (num elems=%d) (avail=%"FMTSZ"d) "
                       "(requested=%"FMTSZ"d).\n",
                       channel,
                       packetHeader.type,
                       packetHeader.msgLen,
                       packetHeader.numElems,
                       retSize,
                       totalSize));
         break;
      }

      packet = (VPageChannelPacket *)
         VMCI_AllocKernelMem(totalSize, VMCI_MEMORY_ATOMIC);
      if (!packet) {
         VMCI_WARNING((LGPFX"Failed to allocate packet (channel=%p) "
                       "(size=%"FMTSZ"d).\n",
                       channel,
                       totalSize));
         break;
      }

      retSize = vmci_qpair_dequeue(channel->qpair, packet,
                                   totalSize,
                                   /* XXX, UTIL_VMKERNEL_BUFFER for VMKernel. */
                                   0);
      if (retSize < totalSize) {
         /*
          * XXX, deal with partial read.
          */

         VMCI_WARNING((LGPFX"Failed to dequeue (channel=%p) "
                       "(required=%"FMTSZ"d) (err=%"FMTSZ"d).\n",
                       channel,
                       totalSize,
                       retSize));
         VMCI_FreeKernelMem(packet, totalSize);
         break;
      }

      VPageChannelRecvPacket(channel, packet);
      VMCI_FreeKernelMem(packet, totalSize);
   }

   VPageChannelAcquireRecvLock(channel, &flags);

   /*
    * The doorbell may have been notified between when we we finished reading
    * data and when we grabbed the lock.  If that happens, then there may be
    * data, but we bailed out of that second notification because inPoll was
    * already set.  So that we don't miss anything, do a final check here under
    * the lock for any data that might have arrived.
    */

   if (vmci_qpair_consume_buf_ready(channel->qpair) >= sizeof packetHeader) {
      VPageChannelReleaseRecvLock(channel, flags);
      goto retry;
   }

   channel->inPoll = FALSE;
   VPageChannelReleaseRecvLock(channel, flags);
}
示例#13
0
static int
VMCIDatagramDispatchAsHost(VMCIId contextID,  // IN:
                           VMCIDatagram *dg)  // IN:
{
   int retval;
   size_t dgSize;
   VMCIPrivilegeFlags srcPrivFlags;

   ASSERT(dg);
   ASSERT(VMCI_HostPersonalityActive());

   dgSize = VMCI_DG_SIZE(dg);

   if (contextID == VMCI_HOST_CONTEXT_ID &&
       dg->dst.context == VMCI_HYPERVISOR_CONTEXT_ID) {
      VMCI_DEBUG_LOG(4, (LGPFX"Host cannot talk to hypervisor\n"));
      return VMCI_ERROR_DST_UNREACHABLE;
   }

   ASSERT(dg->dst.context != VMCI_HYPERVISOR_CONTEXT_ID);

   /* Chatty. */
   // VMCI_DEBUG_LOG(10, (LGPFX"Sending from (handle=0x%x:0x%x) to "
   //                     "(handle=0x%x:0x%x) (size=%u bytes).\n",
   //                     dg->src.context, dg->src.resource,
   //                     dg->dst.context, dg->dst.resource, (uint32)dgSize));

   /*
    * Check that source handle matches sending context.
    */
   if (dg->src.context != contextID) {
      VMCI_DEBUG_LOG(4, (LGPFX"Sender context (ID=0x%x) is not owner of src "
                         "datagram entry (handle=0x%x:0x%x).\n",
                         contextID, dg->src.context, dg->src.resource));
      return VMCI_ERROR_NO_ACCESS;
   }

   /*
    * Get hold of privileges of sending endpoint.
    */

   retval = VMCIDatagramGetPrivFlagsInt(contextID, dg->src, &srcPrivFlags);
   if (retval != VMCI_SUCCESS) {
      VMCI_WARNING((LGPFX"Couldn't get privileges (handle=0x%x:0x%x).\n",
                    dg->src.context, dg->src.resource));
      return retval;
   }

   /* Determine if we should route to host or guest destination. */
   if (dg->dst.context == VMCI_HOST_CONTEXT_ID) {
      /* Route to host datagram entry. */
      DatagramEntry *dstEntry;
      VMCIResource *resource;

      if (dg->src.context == VMCI_HYPERVISOR_CONTEXT_ID &&
          dg->dst.resource == VMCI_EVENT_HANDLER) {
         return VMCIEvent_Dispatch(dg);
      }

      resource = VMCIResource_Get(dg->dst, VMCI_RESOURCE_TYPE_DATAGRAM);
      if (resource == NULL) {
         VMCI_DEBUG_LOG(4, (LGPFX"Sending to invalid destination "
                            "(handle=0x%x:0x%x).\n",
                            dg->dst.context, dg->dst.resource));
         return VMCI_ERROR_INVALID_RESOURCE;
      }
      dstEntry = RESOURCE_CONTAINER(resource, DatagramEntry, resource);
      if (VMCIDenyInteraction(srcPrivFlags, dstEntry->privFlags)) {
         VMCIResource_Release(resource);
         return VMCI_ERROR_NO_ACCESS;
      }
      ASSERT(dstEntry->recvCB);

      /*
       * If a VMCI datagram destined for the host is also sent by the
       * host, we always run it delayed. This ensures that no locks
       * are held when the datagram callback runs.
       */

      if (dstEntry->runDelayed ||
          (dg->src.context == VMCI_HOST_CONTEXT_ID &&
           VMCI_CanScheduleDelayedWork())) {
         VMCIDelayedDatagramInfo *dgInfo;

         if (Atomic_FetchAndAdd(&delayedDGHostQueueSize, 1) ==
             VMCI_MAX_DELAYED_DG_HOST_QUEUE_SIZE) {
            Atomic_Dec(&delayedDGHostQueueSize);
            VMCIResource_Release(resource);
            return VMCI_ERROR_NO_MEM;
         }

         dgInfo = VMCI_AllocKernelMem(sizeof *dgInfo + (size_t)dg->payloadSize,
                                      (VMCI_MEMORY_ATOMIC |
                                       VMCI_MEMORY_NONPAGED));
         if (NULL == dgInfo) {
            Atomic_Dec(&delayedDGHostQueueSize);
            VMCIResource_Release(resource);
            return VMCI_ERROR_NO_MEM;
         }

         dgInfo->inDGHostQueue = TRUE;
         dgInfo->entry = dstEntry;
         memcpy(&dgInfo->msg, dg, dgSize);

         retval = VMCI_ScheduleDelayedWork(VMCIDatagramDelayedDispatchCB, dgInfo);
         if (retval < VMCI_SUCCESS) {
            VMCI_WARNING((LGPFX"Failed to schedule delayed work for datagram "
                          "(result=%d).\n", retval));
            VMCI_FreeKernelMem(dgInfo, sizeof *dgInfo + (size_t)dg->payloadSize);
            VMCIResource_Release(resource);
            Atomic_Dec(&delayedDGHostQueueSize);
            return retval;
         }
      } else {
         retval = dstEntry->recvCB(dstEntry->clientData, dg);
         VMCIResource_Release(resource);
         if (retval < VMCI_SUCCESS) {
            return retval;
         }
      }
   } else {
      /*
       * Route to destination VM context.
       */

      VMCIDatagram *newDG;

      if (contextID != dg->dst.context) {
         if (VMCIDenyInteraction(srcPrivFlags,
                              vmci_context_get_priv_flags(dg->dst.context))) {
            VMCI_DEBUG_LOG(4, (LGPFX"Interaction denied (%X/%X - %X/%X)\n",
                           contextID, srcPrivFlags,
                           dg->dst.context,
                           vmci_context_get_priv_flags(dg->dst.context)));
            return VMCI_ERROR_NO_ACCESS;
         } else if (VMCI_CONTEXT_IS_VM(contextID)) {
            /*
             * If the sending context is a VM, it cannot reach another VM.
             */

            if (!vmkernel) {
               VMCI_DEBUG_LOG(4, (LGPFX"Datagram communication between VMs not "
                                  "supported (src=0x%x, dst=0x%x).\n",
                                  contextID, dg->dst.context));
               return VMCI_ERROR_DST_UNREACHABLE;
            }
         }
      }

      /* We make a copy to enqueue. */
      newDG = VMCI_AllocKernelMem(dgSize, VMCI_MEMORY_NORMAL);
      if (newDG == NULL) {
         VMCI_DEBUG_LOG(4, (LGPFX"No memory for datagram\n"));
         return VMCI_ERROR_NO_MEM;
      }
      memcpy(newDG, dg, dgSize);
      retval = VMCIContext_EnqueueDatagram(dg->dst.context, newDG);
      if (retval < VMCI_SUCCESS) {
         VMCI_FreeKernelMem(newDG, dgSize);
         VMCI_DEBUG_LOG(4, (LGPFX"Enqueue failed\n"));
         return retval;
      }
   }

   /* The datagram is freed when the context reads it. */

   /* Chatty. */
   // VMCI_DEBUG_LOG(10, (LGPFX"Sent datagram (size=%u bytes).\n",
   //                     (uint32)dgSize));

   /*
    * We currently truncate the size to signed 32 bits. This doesn't
    * matter for this handler as it only support 4Kb messages.
    */

   return (int)dgSize;
}
示例#14
0
void
VMCIHost_ReleaseUserMemory(PageStoreAttachInfo *attach,      // IN/OUT
                           VMCIQueue *produceQ,              // OUT
                           VMCIQueue *consumeQ)              // OUT
{

#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
    int i;

    ASSERT(attach->producePages);
    ASSERT(attach->consumePages);

    kunmap(attach->producePages[0]);
    kunmap(attach->consumePages[0]);

    for (i = 0; i < attach->numProducePages; i++) {
        ASSERT(attach->producePages[i]);

        set_page_dirty(attach->producePages[i]);
        page_cache_release(attach->producePages[i]);
    }

    for (i = 0; i < attach->numConsumePages; i++) {
        ASSERT(attach->consumePages[i]);

        set_page_dirty(attach->consumePages[i]);
        page_cache_release(attach->consumePages[i]);
    }

    VMCI_FreeKernelMem(attach->producePages,
                       attach->numProducePages *
                       sizeof attach->producePages[0]);
    VMCI_FreeKernelMem(attach->consumePages,
                       attach->numConsumePages *
                       sizeof attach->consumePages[0]);
#else
    /*
     * Host queue pair support for earlier kernels temporarily
     * disabled. See bug 365496.
     */

    ASSERT_NOT_IMPLEMENTED(FALSE);
#if 0
    kunmap(attach->produceIoBuf->maplist[0]);
    kunmap(attach->consumeIoBuf->maplist[0]);

    mark_dirty_kiobuf(attach->produceIoBuf,
                      attach->numProducePages * PAGE_SIZE);
    unmap_kiobuf(attach->produceIoBuf);

    mark_dirty_kiobuf(attach->consumeIoBuf,
                      attach->numConsumePages * PAGE_SIZE);
    unmap_kiobuf(attach->consumeIoBuf);

    VMCI_FreeKernelMem(attach->produceIoBuf,
                       sizeof *attach->produceIoBuf);
    VMCI_FreeKernelMem(attach->consumeIoBuf,
                       sizeof *attach->consumeIoBuf);
#endif
#endif
}
示例#15
0
static int
DatagramCreateHnd(VMCIId resourceID,            // IN:
                  uint32 flags,                 // IN:
                  VMCIPrivilegeFlags privFlags, // IN:
                  VMCIDatagramRecvCB recvCB,    // IN:
                  void *clientData,             // IN:
                  VMCIHandle *outHandle)        // OUT:

{
   int result;
   VMCIId contextID;
   VMCIHandle handle;
   DatagramEntry *entry;

   ASSERT(recvCB != NULL);
   ASSERT(outHandle != NULL);
   ASSERT(!(privFlags & ~VMCI_PRIVILEGE_ALL_FLAGS));

   if ((flags & VMCI_FLAG_WELLKNOWN_DG_HND) != 0) {
      return VMCI_ERROR_INVALID_ARGS;
   } else {
      if ((flags & VMCI_FLAG_ANYCID_DG_HND) != 0) {
         contextID = VMCI_INVALID_ID;
      } else {
         contextID = vmci_get_context_id();
         if (contextID == VMCI_INVALID_ID) {
            return VMCI_ERROR_NO_RESOURCES;
         }
      }

      if (resourceID == VMCI_INVALID_ID) {
         resourceID = VMCIResource_GetID(contextID);
         if (resourceID == VMCI_INVALID_ID) {
            return VMCI_ERROR_NO_HANDLE;
         }
      }

      handle = VMCI_MAKE_HANDLE(contextID, resourceID);
   }

   entry = VMCI_AllocKernelMem(sizeof *entry, VMCI_MEMORY_NONPAGED);
   if (entry == NULL) {
      VMCI_WARNING((LGPFX"Failed allocating memory for datagram entry.\n"));
      return VMCI_ERROR_NO_MEM;
   }

   if (!VMCI_CanScheduleDelayedWork()) {
      if (flags & VMCI_FLAG_DG_DELAYED_CB) {
         VMCI_FreeKernelMem(entry, sizeof *entry);
         return VMCI_ERROR_INVALID_ARGS;
      }
      entry->runDelayed = FALSE;
   } else {
      entry->runDelayed = (flags & VMCI_FLAG_DG_DELAYED_CB) ? TRUE : FALSE;
   }

   entry->flags = flags;
   entry->recvCB = recvCB;
   entry->clientData = clientData;
   VMCI_CreateEvent(&entry->destroyEvent);
   entry->privFlags = privFlags;

   /* Make datagram resource live. */
   result = VMCIResource_Add(&entry->resource, VMCI_RESOURCE_TYPE_DATAGRAM,
                             handle, DatagramFreeCB, entry);
   if (result != VMCI_SUCCESS) {
      VMCI_WARNING((LGPFX"Failed to add new resource (handle=0x%x:0x%x).\n",
                    handle.context, handle.resource));
      VMCI_DestroyEvent(&entry->destroyEvent);
      VMCI_FreeKernelMem(entry, sizeof *entry);
      return result;
   }
   *outHandle = handle;

   return VMCI_SUCCESS;
}
示例#16
0
void
VMCI_FreeBuffer(VMCIBuffer buf, // IN:
                size_t size)    // IN: Unused on Linux
{
    VMCI_FreeKernelMem(buf, size);
}
示例#17
0
int
VMCI_AllocPPNSet(void *produceQ,         // IN:
                 uint64 numProducePages, // IN: for queue plus header
                 void *consumeQ,         // IN:
                 uint64 numConsumePages, // IN: for queue plus header
                 PPNSet *ppnSet)         // OUT:
{
    VMCIPpnList producePPNs;
    VMCIPpnList consumePPNs;
    uint64 i;

    if (!produceQ || !numProducePages || !consumeQ || !numConsumePages ||
            !ppnSet) {
        return VMCI_ERROR_INVALID_ARGS;
    }

    if (ppnSet->initialized) {
        return VMCI_ERROR_ALREADY_EXISTS;
    }

    producePPNs =
        VMCI_AllocKernelMem(numProducePages * sizeof *producePPNs,
                            VMCI_MEMORY_NORMAL);
    if (!producePPNs) {
        return VMCI_ERROR_NO_MEM;
    }

    consumePPNs =
        VMCI_AllocKernelMem(numConsumePages * sizeof *consumePPNs,
                            VMCI_MEMORY_NORMAL);
    if (!consumePPNs) {
        VMCI_FreeKernelMem(producePPNs, numProducePages * sizeof *producePPNs);
        return VMCI_ERROR_NO_MEM;
    }

    producePPNs[0] = VMCIKVaToMPN(produceQ);
    for (i = 1; i < numProducePages; i++) {
        unsigned long pfn;

        producePPNs[i] = pfn = page_to_pfn(((VMCIQueue *)produceQ)->page[i - 1]);

        /*
         * Fail allocation if PFN isn't supported by hypervisor.
         */

        if (sizeof pfn > sizeof *producePPNs &&
                pfn != producePPNs[i]) {
            goto ppnError;
        }
    }
    consumePPNs[0] = VMCIKVaToMPN(consumeQ);
    for (i = 1; i < numConsumePages; i++) {
        unsigned long pfn;

        consumePPNs[i] = pfn = page_to_pfn(((VMCIQueue *)consumeQ)->page[i - 1]);

        /*
         * Fail allocation if PFN isn't supported by hypervisor.
         */

        if (sizeof pfn > sizeof *consumePPNs &&
                pfn != consumePPNs[i]) {
            goto ppnError;
        }
    }

    ppnSet->numProducePages = numProducePages;
    ppnSet->numConsumePages = numConsumePages;
    ppnSet->producePPNs = producePPNs;
    ppnSet->consumePPNs = consumePPNs;
    ppnSet->initialized = TRUE;
    return VMCI_SUCCESS;

ppnError:
    VMCI_FreeKernelMem(producePPNs, numProducePages * sizeof *producePPNs);
    VMCI_FreeKernelMem(consumePPNs, numConsumePages * sizeof *consumePPNs);
    return VMCI_ERROR_INVALID_ARGS;
}
示例#18
0
int
VMCIHost_GetUserMemory(PageStoreAttachInfo *attach,      // IN/OUT
                       VMCIQueue *produceQ,              // OUT
                       VMCIQueue *consumeQ)              // OUT
{
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
    int retval;
    int err = VMCI_SUCCESS;


    attach->producePages =
        VMCI_AllocKernelMem(attach->numProducePages * sizeof attach->producePages[0],
                            VMCI_MEMORY_NORMAL);
    if (attach->producePages == NULL) {
        return VMCI_ERROR_NO_MEM;
    }
    attach->consumePages =
        VMCI_AllocKernelMem(attach->numConsumePages * sizeof attach->consumePages[0],
                            VMCI_MEMORY_NORMAL);
    if (attach->consumePages == NULL) {
        err = VMCI_ERROR_NO_MEM;
        goto errorDealloc;
    }

    down_write(&current->mm->mmap_sem);
    retval = get_user_pages(current,
                            current->mm,
                            (VA)attach->produceBuffer,
                            attach->numProducePages,
                            1, 0,
                            attach->producePages,
                            NULL);
    if (retval < attach->numProducePages) {
        Log("get_user_pages(produce) failed: %d\n", retval);
        if (retval > 0) {
            int i;
            for (i = 0; i < retval; i++) {
                page_cache_release(attach->producePages[i]);
            }
        }
        err = VMCI_ERROR_NO_MEM;
        goto out;
    }

    retval = get_user_pages(current,
                            current->mm,
                            (VA)attach->consumeBuffer,
                            attach->numConsumePages,
                            1, 0,
                            attach->consumePages,
                            NULL);
    if (retval < attach->numConsumePages) {
        int i;
        Log("get_user_pages(consume) failed: %d\n", retval);
        if (retval > 0) {
            for (i = 0; i < retval; i++) {
                page_cache_release(attach->consumePages[i]);
            }
        }
        for (i = 0; i < attach->numProducePages; i++) {
            page_cache_release(attach->producePages[i]);
        }
        err = VMCI_ERROR_NO_MEM;
    }

    if (err == VMCI_SUCCESS) {
        produceQ->queueHeaderPtr = kmap(attach->producePages[0]);
        produceQ->page = &attach->producePages[1];
        consumeQ->queueHeaderPtr = kmap(attach->consumePages[0]);
        consumeQ->page = &attach->consumePages[1];
    }

out:
    up_write(&current->mm->mmap_sem);

errorDealloc:
    if (err < VMCI_SUCCESS) {
        if (attach->producePages != NULL) {
            VMCI_FreeKernelMem(attach->producePages,
                               attach->numProducePages *
                               sizeof attach->producePages[0]);
        }
        if (attach->consumePages != NULL) {
            VMCI_FreeKernelMem(attach->consumePages,
                               attach->numConsumePages *
                               sizeof attach->consumePages[0]);
        }
    }

    return err;

#else
    /*
     * Host queue pair support for earlier kernels temporarily
     * disabled. See bug 365496.
     */

    ASSERT_NOT_IMPLEMENTED(FALSE);
#if 0
    attach->produceIoBuf = VMCI_AllocKernelMem(sizeof *attach->produceIoBuf,
                           VMCI_MEMORY_NORMAL);
    if (attach->produceIoBuf == NULL) {
        return VMCI_ERROR_NO_MEM;
    }

    attach->consumeIoBuf = VMCI_AllocKernelMem(sizeof *attach->consumeIoBuf,
                           VMCI_MEMORY_NORMAL);
    if (attach->consumeIoBuf == NULL) {
        VMCI_FreeKernelMem(attach->produceIoBuf,
                           sizeof *attach->produceIoBuf);
        return VMCI_ERROR_NO_MEM;
    }

    retval = map_user_kiobuf(WRITE, attach->produceIoBuf,
                             (VA)attach->produceBuffer,
                             attach->numProducePages * PAGE_SIZE);
    if (retval < 0) {
        err = VMCI_ERROR_NO_ACCESS;
        goto out;
    }

    retval = map_user_kiobuf(WRITE, attach->consumeIoBuf,
                             (VA)attach->consumeBuffer,
                             attach->numConsumePages * PAGE_SIZE);
    if (retval < 0) {
        unmap_kiobuf(attach->produceIoBuf);
        err = VMCI_ERROR_NO_ACCESS;
    }

    if (err == VMCI_SUCCESS) {
        produceQ->queueHeaderPtr = kmap(attach->produceIoBuf->maplist[0]);
        produceQ->page = &attach->produceIoBuf->maplist[1];
        consumeQ->queueHeaderPtr = kmap(attach->consumeIoBuf->maplist[0]);
        consumeQ->page = &attach->consumeIoBuf->maplist[1];
    }

out:

    if (err < VMCI_SUCCESS) {
        if (attach->produceIoBuf != NULL) {
            VMCI_FreeKernelMem(attach->produceIoBuf,
                               sizeof *attach->produceIoBuf);
        }
        if (attach->consumeIoBuf != NULL) {
            VMCI_FreeKernelMem(attach->consumeIoBuf,
                               sizeof *attach->consumeIoBuf);
        }
    }

    return err;
#else // 0 -- Instead just return FALSE
    return FALSE;
#endif // 0
#endif // Linux version >= 2.6.0
}
示例#19
0
static int
VPageChannelAddRecvBuffers(VPageChannel *channel,     // IN
                           int numElems,              // IN
                           Bool onInit)               // IN
{
   int n;
   int sent;
   int maxElems;
   Bool isAtomic;
   size_t size;
   unsigned long flags;
   VPageChannelElem *elems;
   VPageChannelPacket *packet;

   ASSERT(channel);

   sent = 0;
   size = 0;
   elems = NULL;
   packet = NULL;

   if (onInit || (channel->flags & VPAGECHANNEL_FLAGS_RECV_DELAYED)) {
      /*
       * If we are initializing the channel, or we are running in a delayed
       * context (recv() in this case), then we can using blocking allocation
       * and we can allocate large packets.  Also, no need to take the
       * send lock here, we can just take it for each packet.
       */

      isAtomic = FALSE;
      maxElems = VMCI_PACKET_DGRAM_MAX_ELEMS;
      flags = 0; /* Silence compiler. */
   } else {
      /*
       * We're in an atomic context.  We must allocate page-sized packets
       * atomically and send them over the queuepair.  Since this can
       * cause a lot of signalling, we optimize by taking the send lock
       * once for all packets, and only signalling when we are done.
       */

      isAtomic = TRUE;
      maxElems = VMCI_PACKET_PAGE_MAX_ELEMS;
      VPageChannelAcquireSendLock(channel, &flags);
   }

   n = min_t(int, maxElems, numElems);
   while (n > 0) {
      int retval;
      int allocNum;

      /*
       * First packet is always big enough to cover any remaining elements,
       * so just allocate it once.
       */

      if (NULL == packet) {
         size = sizeof(VPageChannelPacket) + (n * sizeof(VPageChannelElem));
         packet = (VPageChannelPacket *)
            VMCI_AllocKernelMem(size,
                        isAtomic ? VMCI_MEMORY_ATOMIC : VMCI_MEMORY_NORMAL);
         if (packet == NULL) {
            VMCI_WARNING((LGPFX"Failed to allocate packet (channel=%p) "
                          "(size=%"FMTSZ"u).\n",
                          channel,
                          size));
            goto exit;
         }

         packet->type = VPCPacket_SetRecvBuffer;
         packet->msgLen = 0;
         elems = VPAGECHANNEL_PACKET_ELEMS(packet);
      }

      allocNum = channel->elemAllocFn(channel->allocClientData, elems, n);
      if (0 == allocNum) {
         /*
          * If the client failed to allocate any elements at all then just
          * bail out and return whatever number we managed to send so far
          * (if any).
          */

         VMCI_WARNING((LGPFX"Failed to allocate receive buffer (channel=%p) "
                       "(expected=%d).\n",
                       channel,
                       n));
         goto exit;
      }

      /*
       * We wanted "n" elements, but we might only have "allocNum" because
       * that's all the client could allocate.  Pass down whatever we got.
       */

      packet->numElems = allocNum;

      if (onInit) {
         retval = VPageChannelSendControl(channel, VPCPacket_SetRecvBuffer,
                                          NULL, 0, allocNum, elems);
      } else {
         /*
          * Do not ask for the lock here if we are atomic, we take care of
          * that ourselves.  Similarly, if we are atomic then we will do our
          * own signalling, so inform the send that there is a signal already
          * pending.
          */

         retval = VPageChannelSendPacket(channel, packet,
                                     isAtomic ? FALSE : TRUE,  // needsLock
                                     isAtomic ? TRUE : FALSE); // signalPending
         /*
          * XXX, what if this is a non-blocking queuepair and we fail to
          * send because it's full and we can't wait?  Is it even worth it
          * to loop?
          */
      }
      if (retval < VMCI_SUCCESS) {
         /*
          * Failure to send is fatal.  Release the client's elements and
          * bail out.
          */

         VMCI_WARNING((LGPFX"Failed to set receive buffers (channel=%p) "
                       "(err=%d).\n",
                       channel,
                       retval));
         channel->elemFreeFn(channel->freeClientData, elems, allocNum);
         goto exit;
      }

      Atomic_Add32(&channel->curRecvBufs, allocNum);

      sent += allocNum;
      numElems -= allocNum;
      n = min_t(int, maxElems, numElems);
   }

exit:
   if (isAtomic) {
      /*
       * We're done sending packets, so now we can signal.  Even if we only
       * sent some of the requested buffers, we must signal anyway, otherwise
       * the peer won't know about the ones we did send.
       */

      (void)VPageChannelSignal(channel);
      VPageChannelReleaseSendLock(channel, flags);
   }
   if (NULL != packet) {
      VMCI_FreeKernelMem(packet, size);
   }
   return sent;
}
示例#20
0
int
VPageChannel_Send(VPageChannel *channel,       // IN/OUT
                  VPageChannelPacketType type, // IN
                  char *message,               // IN
                  int len,                     // IN
                  VPageChannelBuffer *buffer)  // IN
{
   int retval;
   int numElems;
   ssize_t totalSize;
   VPageChannelPacket *packet;

   ASSERT(channel);

   if (VPCState_Connected != channel->state) {
      VMCI_WARNING((LGPFX"Not connected (channel=%p).\n",
                    channel));
      return VMCI_ERROR_DST_UNREACHABLE;
   }

   if (buffer) {
      numElems = buffer->numElems;
   } else {
      numElems = 0;
   }

   totalSize = sizeof(VPageChannelPacket) + len +
      numElems * sizeof(VPageChannelElem);
   packet = (VPageChannelPacket *)
      VMCI_AllocKernelMem(totalSize,
                        channel->flags & VPAGECHANNEL_FLAGS_SEND_WHILE_ATOMIC ?
                        VMCI_MEMORY_ATOMIC : VMCI_MEMORY_NORMAL);
   if (!packet) {
      VMCI_WARNING((LGPFX"Failed to allocate packet (channel=%p) "
                    "(size=%"FMTSZ"d).",
                    channel,
                    totalSize));
      return VMCI_ERROR_NO_MEM;
   }

   packet->type = type;
   packet->msgLen = len;
   packet->numElems = numElems;

   if (len) {
      ASSERT(message);
      memcpy(VPAGECHANNEL_PACKET_MESSAGE(packet), message, len);
   }

   if (numElems) {
      ASSERT(buffer);
      ASSERT(buffer->elems);
      memcpy(VPAGECHANNEL_PACKET_ELEMS(packet), buffer->elems,
             numElems * sizeof (VPageChannelElem));
   }

   retval = VPageChannel_SendPacket(channel, packet);

   VMCI_FreeKernelMem(packet, totalSize);

   return retval;
}
示例#21
0
static int
VPageChannelSendControl(VPageChannel *channel,       // IN
                        VPageChannelPacketType type, // IN
                        char *message,               // IN
                        int len,                     // IN
                        int numElems,                // IN
                        VPageChannelElem *elems)     // IN
{
   int retval;
   VPageChannelPacket *packet;
   VMCIDatagram *dg;

   ASSERT(channel);
   ASSERT(type == VPCPacket_Data ||
          type == VPCPacket_GuestConnect ||
          type == VPCPacket_SetRecvBuffer ||
          type == VPCPacket_GuestDisconnect);

   dg = NULL;
   retval = VPageChannelAllocDatagram(channel, len, numElems, &dg);
   if (retval < VMCI_SUCCESS) {
      return retval;
   }

   packet = (VPageChannelPacket *)VMCI_DG_PAYLOAD(dg);
   packet->type = type;
   packet->msgLen = len;
   packet->numElems = numElems;

   if (len) {
      ASSERT(message);
      memcpy(VPAGECHANNEL_PACKET_MESSAGE(packet), message, len);
   }

   if (numElems) {
      ASSERT(elems);
      memcpy(VPAGECHANNEL_PACKET_ELEMS(packet), elems,
             numElems * sizeof (VPageChannelElem));
   }

   retval = vmci_datagram_send(dg);
   if (retval < VMCI_SUCCESS) {
      VMCI_WARNING((LGPFX"Failed to send packet (channel=%p) to "
                    "(handle=0x%x:0x%x) (err=%d).\n",
                    channel,
                    dg->dst.context,
                    dg->dst.resource,
                    retval));
   } else {
      /*
       * We don't care about how many bytes were sent, and callers may not
       * expect > 0 to mean success, so just convert to exactly success.
       */

      retval = VMCI_SUCCESS;
   }

   VMCI_FreeKernelMem(dg, VMCI_DG_SIZE(dg));

   return retval;
}