예제 #1
0
static Bool
DsListInit(DsList **list, // OUT:
           int capacity)  // IN:
{
   DsList *l = VMCI_AllocKernelMem(sizeof(DsList),
                                   VMCI_MEMORY_NONPAGED | VMCI_MEMORY_ATOMIC);

   ASSERT(list);
   ASSERT(capacity >= 1);
   
   if (l == NULL) {
      return FALSE;
   }
   l->size = 0;
   l->capacity = capacity;
   l->elements = VMCI_AllocKernelMem(sizeof(DsListElement) * capacity,
                                     VMCI_MEMORY_NONPAGED | VMCI_MEMORY_ATOMIC);
   if (l->elements == NULL) {
      VMCI_FreeKernelMem(l, sizeof *l);
      return FALSE;
   }
   
   *list = l;
   return TRUE;
}
예제 #2
0
VMCIHashTable *
VMCIHashTable_Create(int size)
{
   VMCIHashTable *table = VMCI_AllocKernelMem(sizeof *table,
                                              VMCI_MEMORY_NONPAGED);
   if (table == NULL) {
      return NULL;
   }

   table->entries = VMCI_AllocKernelMem(sizeof *table->entries * size,
                                        VMCI_MEMORY_NONPAGED);
   if (table->entries == NULL) {
      VMCI_FreeKernelMem(table, sizeof *table);
      return NULL;
   }
   memset(table->entries, 0, sizeof *table->entries * size);
   table->size = size;

   if (VMCIHashTableInitLock(&table->lock, "VMCIHashTableLock") < VMCI_SUCCESS) {
      VMCI_FreeKernelMem(table->entries, sizeof *table->entries * size);
      VMCI_FreeKernelMem(table, sizeof *table);
      return NULL;
   }

   return table;
}
예제 #3
0
static int
DsListInsert(DsList *list,       // IN:
             const char *name,   // IN:
             VMCIHandle handle,  // IN:
             VMCIId contextID)   // IN:
{
   int nameLen;
   char *nameMem;

   if (!list || !name || VMCI_HANDLE_EQUAL(handle, VMCI_INVALID_HANDLE) ||
       contextID == VMCI_INVALID_ID) {
      return VMCI_ERROR_INVALID_ARGS;
   }
   
   /* Check for duplicates */
   if (DsListLookupIndex(list, name) >= 0) {
      return VMCI_ERROR_ALREADY_EXISTS;
   }
   
   if (list->capacity == list->size) {
      /* We need to expand the list */
      int newCapacity = list->capacity * 2;
      DsListElement *elms = VMCI_AllocKernelMem(sizeof(DsListElement) * 
                                                newCapacity,
                                                VMCI_MEMORY_NONPAGED |
                                                VMCI_MEMORY_ATOMIC);
      if (elms == NULL) {
         return VMCI_ERROR_NO_MEM;
      }
      memcpy(elms, list->elements, sizeof(DsListElement) * list->capacity);
      VMCI_FreeKernelMem(list->elements,
                         sizeof *list->elements * list->capacity);
      list->elements = elms;
      list->capacity = newCapacity;
   }
   
   ASSERT(list->capacity > list->size);
   
   nameLen = strlen(name) + 1;
   nameMem = VMCI_AllocKernelMem(nameLen,
                                 VMCI_MEMORY_NONPAGED | VMCI_MEMORY_ATOMIC);
   if (nameMem == NULL) {
      return VMCI_ERROR_NO_MEM;
   }
   memcpy(nameMem, name, nameLen);
   
   list->elements[list->size].name   = nameMem;
   list->elements[list->size].handle = handle;
   list->elements[list->size].contextID = contextID;
   list->size = list->size + 1;

   return VMCI_SUCCESS;
}
예제 #4
0
static Bool
VMCIUtilCheckHostCapabilities(void)
{
   int result;
   VMCIResourcesQueryMsg *msg;
   uint32 msgSize = sizeof(VMCIResourcesQueryHdr) +
      VMCI_UTIL_NUM_RESOURCES * sizeof(VMCI_Resource);
   VMCIDatagram *checkMsg = VMCI_AllocKernelMem(msgSize, VMCI_MEMORY_NONPAGED);

   if (checkMsg == NULL) {
      VMCI_WARNING((LGPFX"Check host: Insufficient memory.\n"));
      return FALSE;
   }

   checkMsg->dst = VMCI_MAKE_HANDLE(VMCI_HYPERVISOR_CONTEXT_ID,
                                    VMCI_RESOURCES_QUERY);
   checkMsg->src = VMCI_ANON_SRC_HANDLE;
   checkMsg->payloadSize = msgSize - VMCI_DG_HEADERSIZE;
   msg = (VMCIResourcesQueryMsg *)VMCI_DG_PAYLOAD(checkMsg);

   msg->numResources = VMCI_UTIL_NUM_RESOURCES;
   msg->resources[0] = VMCI_GET_CONTEXT_ID;

   result = VMCI_SendDatagram(checkMsg);
   VMCI_FreeKernelMem(checkMsg, msgSize);

   /* We need the vector. There are no fallbacks. */
   return (result == 0x1);
}
예제 #5
0
int
VMCIDatagram_InvokeGuestHandler(VMCIDatagram *dg) // IN
{
#if defined(VMKERNEL)
   VMCI_WARNING((LGPFX"Cannot dispatch within guest in VMKERNEL.\n"));
   return VMCI_ERROR_DST_UNREACHABLE;
#else // VMKERNEL
   int retval;
   VMCIResource *resource;
   DatagramEntry *dstEntry;

   ASSERT(dg);

   resource = VMCIResource_Get(dg->dst, VMCI_RESOURCE_TYPE_DATAGRAM);
   if (NULL == resource) {
      VMCI_DEBUG_LOG(4, (LGPFX"destination (handle=0x%x:0x%x) doesn't exist.\n",
                         dg->dst.context, dg->dst.resource));
      return VMCI_ERROR_NO_HANDLE;
   }

   dstEntry = RESOURCE_CONTAINER(resource, DatagramEntry, resource);
   if (dstEntry->runDelayed) {
      VMCIDelayedDatagramInfo *dgInfo;

      dgInfo = VMCI_AllocKernelMem(sizeof *dgInfo + (size_t)dg->payloadSize,
                                   (VMCI_MEMORY_ATOMIC | VMCI_MEMORY_NONPAGED));
      if (NULL == dgInfo) {
         VMCIResource_Release(resource);
         retval = VMCI_ERROR_NO_MEM;
         goto exit;
      }

      dgInfo->inDGHostQueue = FALSE;
      dgInfo->entry = dstEntry;
      memcpy(&dgInfo->msg, dg, VMCI_DG_SIZE(dg));

      retval = VMCI_ScheduleDelayedWork(VMCIDatagramDelayedDispatchCB, dgInfo);
      if (retval < VMCI_SUCCESS) {
         VMCI_WARNING((LGPFX"Failed to schedule delayed work for datagram "
                       "(result=%d).\n", retval));
         VMCI_FreeKernelMem(dgInfo, sizeof *dgInfo + (size_t)dg->payloadSize);
         VMCIResource_Release(resource);
         dgInfo = NULL;
         goto exit;
      }
   } else {
      dstEntry->recvCB(dstEntry->clientData, dg);
      VMCIResource_Release(resource);
      retval = VMCI_SUCCESS;
   }

exit:
   return retval;
#endif // VMKERNEL
}
예제 #6
0
static int
VPageChannelAllocDatagram(VPageChannel *channel,       // IN
                          size_t messageLen,           // IN
                          int numElems,                // IN
                          VMCIDatagram **outDg)        // OUT
{
   size_t size;
   VMCIDatagram *dg;

   ASSERT(channel);
   ASSERT(outDg);

   *outDg = NULL;

   size = VMCI_DG_HEADERSIZE + sizeof(VPageChannelPacket) + messageLen +
      numElems * sizeof (VPageChannelElem);

   if (size > VMCI_MAX_DG_SIZE) {
      VMCI_WARNING((LGPFX"Requested datagram size too large (channel=%p) "
                   "(size=%"FMTSZ"u).",
                   channel,
                   size));
      return VMCI_ERROR_PAYLOAD_TOO_LARGE;
   }

   dg = VMCI_AllocKernelMem(size, VMCI_MEMORY_ATOMIC);
   if (!dg) {
      VMCI_WARNING((LGPFX"Failed to allocate datagram (channel=%p).",
                    channel));
      return VMCI_ERROR_NO_MEM;
   }

   memset(dg, 0, size);
   dg->dst = channel->peerDgHandle;
   dg->src = channel->dgHandle;
   dg->payloadSize = size - VMCI_DG_HEADERSIZE;

   /* Chatty. */
   // VMCI_DEBUG_LOG(10,
   //                (LGPFX"Allocated datagram (payload=%"FMT64"u).\n",
   //                 dg->payloadSize));

   *outDg = dg;

   return VMCI_SUCCESS;
}
예제 #7
0
static int
VMCIDatagramDispatchAsHost(VMCIId contextID,  // IN:
                           VMCIDatagram *dg)  // IN:
{
   int retval;
   size_t dgSize;
   VMCIPrivilegeFlags srcPrivFlags;

   ASSERT(dg);
   ASSERT(VMCI_HostPersonalityActive());

   dgSize = VMCI_DG_SIZE(dg);

   if (contextID == VMCI_HOST_CONTEXT_ID &&
       dg->dst.context == VMCI_HYPERVISOR_CONTEXT_ID) {
      VMCI_DEBUG_LOG(4, (LGPFX"Host cannot talk to hypervisor\n"));
      return VMCI_ERROR_DST_UNREACHABLE;
   }

   ASSERT(dg->dst.context != VMCI_HYPERVISOR_CONTEXT_ID);

   /* Chatty. */
   // VMCI_DEBUG_LOG(10, (LGPFX"Sending from (handle=0x%x:0x%x) to "
   //                     "(handle=0x%x:0x%x) (size=%u bytes).\n",
   //                     dg->src.context, dg->src.resource,
   //                     dg->dst.context, dg->dst.resource, (uint32)dgSize));

   /*
    * Check that source handle matches sending context.
    */
   if (dg->src.context != contextID) {
      VMCI_DEBUG_LOG(4, (LGPFX"Sender context (ID=0x%x) is not owner of src "
                         "datagram entry (handle=0x%x:0x%x).\n",
                         contextID, dg->src.context, dg->src.resource));
      return VMCI_ERROR_NO_ACCESS;
   }

   /*
    * Get hold of privileges of sending endpoint.
    */

   retval = VMCIDatagramGetPrivFlagsInt(contextID, dg->src, &srcPrivFlags);
   if (retval != VMCI_SUCCESS) {
      VMCI_WARNING((LGPFX"Couldn't get privileges (handle=0x%x:0x%x).\n",
                    dg->src.context, dg->src.resource));
      return retval;
   }

   /* Determine if we should route to host or guest destination. */
   if (dg->dst.context == VMCI_HOST_CONTEXT_ID) {
      /* Route to host datagram entry. */
      DatagramEntry *dstEntry;
      VMCIResource *resource;

      if (dg->src.context == VMCI_HYPERVISOR_CONTEXT_ID &&
          dg->dst.resource == VMCI_EVENT_HANDLER) {
         return VMCIEvent_Dispatch(dg);
      }

      resource = VMCIResource_Get(dg->dst, VMCI_RESOURCE_TYPE_DATAGRAM);
      if (resource == NULL) {
         VMCI_DEBUG_LOG(4, (LGPFX"Sending to invalid destination "
                            "(handle=0x%x:0x%x).\n",
                            dg->dst.context, dg->dst.resource));
         return VMCI_ERROR_INVALID_RESOURCE;
      }
      dstEntry = RESOURCE_CONTAINER(resource, DatagramEntry, resource);
      if (VMCIDenyInteraction(srcPrivFlags, dstEntry->privFlags)) {
         VMCIResource_Release(resource);
         return VMCI_ERROR_NO_ACCESS;
      }
      ASSERT(dstEntry->recvCB);

      /*
       * If a VMCI datagram destined for the host is also sent by the
       * host, we always run it delayed. This ensures that no locks
       * are held when the datagram callback runs.
       */

      if (dstEntry->runDelayed ||
          (dg->src.context == VMCI_HOST_CONTEXT_ID &&
           VMCI_CanScheduleDelayedWork())) {
         VMCIDelayedDatagramInfo *dgInfo;

         if (Atomic_FetchAndAdd(&delayedDGHostQueueSize, 1) ==
             VMCI_MAX_DELAYED_DG_HOST_QUEUE_SIZE) {
            Atomic_Dec(&delayedDGHostQueueSize);
            VMCIResource_Release(resource);
            return VMCI_ERROR_NO_MEM;
         }

         dgInfo = VMCI_AllocKernelMem(sizeof *dgInfo + (size_t)dg->payloadSize,
                                      (VMCI_MEMORY_ATOMIC |
                                       VMCI_MEMORY_NONPAGED));
         if (NULL == dgInfo) {
            Atomic_Dec(&delayedDGHostQueueSize);
            VMCIResource_Release(resource);
            return VMCI_ERROR_NO_MEM;
         }

         dgInfo->inDGHostQueue = TRUE;
         dgInfo->entry = dstEntry;
         memcpy(&dgInfo->msg, dg, dgSize);

         retval = VMCI_ScheduleDelayedWork(VMCIDatagramDelayedDispatchCB, dgInfo);
         if (retval < VMCI_SUCCESS) {
            VMCI_WARNING((LGPFX"Failed to schedule delayed work for datagram "
                          "(result=%d).\n", retval));
            VMCI_FreeKernelMem(dgInfo, sizeof *dgInfo + (size_t)dg->payloadSize);
            VMCIResource_Release(resource);
            Atomic_Dec(&delayedDGHostQueueSize);
            return retval;
         }
      } else {
         retval = dstEntry->recvCB(dstEntry->clientData, dg);
         VMCIResource_Release(resource);
         if (retval < VMCI_SUCCESS) {
            return retval;
         }
      }
   } else {
      /*
       * Route to destination VM context.
       */

      VMCIDatagram *newDG;

      if (contextID != dg->dst.context) {
         if (VMCIDenyInteraction(srcPrivFlags,
                              vmci_context_get_priv_flags(dg->dst.context))) {
            VMCI_DEBUG_LOG(4, (LGPFX"Interaction denied (%X/%X - %X/%X)\n",
                           contextID, srcPrivFlags,
                           dg->dst.context,
                           vmci_context_get_priv_flags(dg->dst.context)));
            return VMCI_ERROR_NO_ACCESS;
         } else if (VMCI_CONTEXT_IS_VM(contextID)) {
            /*
             * If the sending context is a VM, it cannot reach another VM.
             */

            if (!vmkernel) {
               VMCI_DEBUG_LOG(4, (LGPFX"Datagram communication between VMs not "
                                  "supported (src=0x%x, dst=0x%x).\n",
                                  contextID, dg->dst.context));
               return VMCI_ERROR_DST_UNREACHABLE;
            }
         }
      }

      /* We make a copy to enqueue. */
      newDG = VMCI_AllocKernelMem(dgSize, VMCI_MEMORY_NORMAL);
      if (newDG == NULL) {
         VMCI_DEBUG_LOG(4, (LGPFX"No memory for datagram\n"));
         return VMCI_ERROR_NO_MEM;
      }
      memcpy(newDG, dg, dgSize);
      retval = VMCIContext_EnqueueDatagram(dg->dst.context, newDG);
      if (retval < VMCI_SUCCESS) {
         VMCI_FreeKernelMem(newDG, dgSize);
         VMCI_DEBUG_LOG(4, (LGPFX"Enqueue failed\n"));
         return retval;
      }
   }

   /* The datagram is freed when the context reads it. */

   /* Chatty. */
   // VMCI_DEBUG_LOG(10, (LGPFX"Sent datagram (size=%u bytes).\n",
   //                     (uint32)dgSize));

   /*
    * We currently truncate the size to signed 32 bits. This doesn't
    * matter for this handler as it only support 4Kb messages.
    */

   return (int)dgSize;
}
예제 #8
0
static int
DatagramCreateHnd(VMCIId resourceID,            // IN:
                  uint32 flags,                 // IN:
                  VMCIPrivilegeFlags privFlags, // IN:
                  VMCIDatagramRecvCB recvCB,    // IN:
                  void *clientData,             // IN:
                  VMCIHandle *outHandle)        // OUT:

{
   int result;
   VMCIId contextID;
   VMCIHandle handle;
   DatagramEntry *entry;

   ASSERT(recvCB != NULL);
   ASSERT(outHandle != NULL);
   ASSERT(!(privFlags & ~VMCI_PRIVILEGE_ALL_FLAGS));

   if ((flags & VMCI_FLAG_WELLKNOWN_DG_HND) != 0) {
      return VMCI_ERROR_INVALID_ARGS;
   } else {
      if ((flags & VMCI_FLAG_ANYCID_DG_HND) != 0) {
         contextID = VMCI_INVALID_ID;
      } else {
         contextID = vmci_get_context_id();
         if (contextID == VMCI_INVALID_ID) {
            return VMCI_ERROR_NO_RESOURCES;
         }
      }

      if (resourceID == VMCI_INVALID_ID) {
         resourceID = VMCIResource_GetID(contextID);
         if (resourceID == VMCI_INVALID_ID) {
            return VMCI_ERROR_NO_HANDLE;
         }
      }

      handle = VMCI_MAKE_HANDLE(contextID, resourceID);
   }

   entry = VMCI_AllocKernelMem(sizeof *entry, VMCI_MEMORY_NONPAGED);
   if (entry == NULL) {
      VMCI_WARNING((LGPFX"Failed allocating memory for datagram entry.\n"));
      return VMCI_ERROR_NO_MEM;
   }

   if (!VMCI_CanScheduleDelayedWork()) {
      if (flags & VMCI_FLAG_DG_DELAYED_CB) {
         VMCI_FreeKernelMem(entry, sizeof *entry);
         return VMCI_ERROR_INVALID_ARGS;
      }
      entry->runDelayed = FALSE;
   } else {
      entry->runDelayed = (flags & VMCI_FLAG_DG_DELAYED_CB) ? TRUE : FALSE;
   }

   entry->flags = flags;
   entry->recvCB = recvCB;
   entry->clientData = clientData;
   VMCI_CreateEvent(&entry->destroyEvent);
   entry->privFlags = privFlags;

   /* Make datagram resource live. */
   result = VMCIResource_Add(&entry->resource, VMCI_RESOURCE_TYPE_DATAGRAM,
                             handle, DatagramFreeCB, entry);
   if (result != VMCI_SUCCESS) {
      VMCI_WARNING((LGPFX"Failed to add new resource (handle=0x%x:0x%x).\n",
                    handle.context, handle.resource));
      VMCI_DestroyEvent(&entry->destroyEvent);
      VMCI_FreeKernelMem(entry, sizeof *entry);
      return result;
   }
   *outHandle = handle;

   return VMCI_SUCCESS;
}
예제 #9
0
static void
VPageChannelDoDoorbellCallback(VPageChannel *channel) // IN/OUT
{
   Bool inUse;
   unsigned long flags;
   VPageChannelPacket packetHeader;

   ASSERT(channel);

   if (VPCState_Connected != channel->state) {
      VMCI_WARNING((LGPFX"Not connected (channel=%p).\n",
                    channel));
      return;
   }

   VPageChannelAcquireRecvLock(channel, &flags);
   inUse = channel->inPoll;
   channel->inPoll = TRUE;
   VPageChannelReleaseRecvLock(channel, flags);

   if (inUse) {
      return;
   }

retry:
   while (vmci_qpair_consume_buf_ready(channel->qpair) >= sizeof packetHeader) {
      ssize_t retSize, totalSize;
      VPageChannelPacket *packet;

      retSize = vmci_qpair_peek(channel->qpair, &packetHeader,
                                sizeof packetHeader,
                                /* XXX, UTIL_VMKERNEL_BUFFER for VMKernel. */
                                0);
      if (retSize < sizeof packetHeader) {
         /*
          * XXX, deal with partial read.
          */

         VMCI_WARNING((LGPFX"Failed to peek (channel=%p) "
                       "(required=%"FMTSZ"d) (err=%"FMTSZ"d).\n",
                       channel,
                       sizeof packetHeader,
                       retSize));
         break;
      }

      totalSize = sizeof packetHeader + packetHeader.msgLen +
         packetHeader.numElems * sizeof(VPageChannelElem);

      retSize = vmci_qpair_consume_buf_ready(channel->qpair);
      if (retSize < totalSize) {
         /*
          * XXX, deal with partial read.
          */

         VMCI_WARNING((LGPFX"Received partial packet (channel=%p) "
                       "(type=%d) (len=%d) (num elems=%d) (avail=%"FMTSZ"d) "
                       "(requested=%"FMTSZ"d).\n",
                       channel,
                       packetHeader.type,
                       packetHeader.msgLen,
                       packetHeader.numElems,
                       retSize,
                       totalSize));
         break;
      }

      packet = (VPageChannelPacket *)
         VMCI_AllocKernelMem(totalSize, VMCI_MEMORY_ATOMIC);
      if (!packet) {
         VMCI_WARNING((LGPFX"Failed to allocate packet (channel=%p) "
                       "(size=%"FMTSZ"d).\n",
                       channel,
                       totalSize));
         break;
      }

      retSize = vmci_qpair_dequeue(channel->qpair, packet,
                                   totalSize,
                                   /* XXX, UTIL_VMKERNEL_BUFFER for VMKernel. */
                                   0);
      if (retSize < totalSize) {
         /*
          * XXX, deal with partial read.
          */

         VMCI_WARNING((LGPFX"Failed to dequeue (channel=%p) "
                       "(required=%"FMTSZ"d) (err=%"FMTSZ"d).\n",
                       channel,
                       totalSize,
                       retSize));
         VMCI_FreeKernelMem(packet, totalSize);
         break;
      }

      VPageChannelRecvPacket(channel, packet);
      VMCI_FreeKernelMem(packet, totalSize);
   }

   VPageChannelAcquireRecvLock(channel, &flags);

   /*
    * The doorbell may have been notified between when we we finished reading
    * data and when we grabbed the lock.  If that happens, then there may be
    * data, but we bailed out of that second notification because inPoll was
    * already set.  So that we don't miss anything, do a final check here under
    * the lock for any data that might have arrived.
    */

   if (vmci_qpair_consume_buf_ready(channel->qpair) >= sizeof packetHeader) {
      VPageChannelReleaseRecvLock(channel, flags);
      goto retry;
   }

   channel->inPoll = FALSE;
   VPageChannelReleaseRecvLock(channel, flags);
}
예제 #10
0
static int
VPageChannelAddRecvBuffers(VPageChannel *channel,     // IN
                           int numElems,              // IN
                           Bool onInit)               // IN
{
   int n;
   int sent;
   int maxElems;
   Bool isAtomic;
   size_t size;
   unsigned long flags;
   VPageChannelElem *elems;
   VPageChannelPacket *packet;

   ASSERT(channel);

   sent = 0;
   size = 0;
   elems = NULL;
   packet = NULL;

   if (onInit || (channel->flags & VPAGECHANNEL_FLAGS_RECV_DELAYED)) {
      /*
       * If we are initializing the channel, or we are running in a delayed
       * context (recv() in this case), then we can using blocking allocation
       * and we can allocate large packets.  Also, no need to take the
       * send lock here, we can just take it for each packet.
       */

      isAtomic = FALSE;
      maxElems = VMCI_PACKET_DGRAM_MAX_ELEMS;
      flags = 0; /* Silence compiler. */
   } else {
      /*
       * We're in an atomic context.  We must allocate page-sized packets
       * atomically and send them over the queuepair.  Since this can
       * cause a lot of signalling, we optimize by taking the send lock
       * once for all packets, and only signalling when we are done.
       */

      isAtomic = TRUE;
      maxElems = VMCI_PACKET_PAGE_MAX_ELEMS;
      VPageChannelAcquireSendLock(channel, &flags);
   }

   n = min_t(int, maxElems, numElems);
   while (n > 0) {
      int retval;
      int allocNum;

      /*
       * First packet is always big enough to cover any remaining elements,
       * so just allocate it once.
       */

      if (NULL == packet) {
         size = sizeof(VPageChannelPacket) + (n * sizeof(VPageChannelElem));
         packet = (VPageChannelPacket *)
            VMCI_AllocKernelMem(size,
                        isAtomic ? VMCI_MEMORY_ATOMIC : VMCI_MEMORY_NORMAL);
         if (packet == NULL) {
            VMCI_WARNING((LGPFX"Failed to allocate packet (channel=%p) "
                          "(size=%"FMTSZ"u).\n",
                          channel,
                          size));
            goto exit;
         }

         packet->type = VPCPacket_SetRecvBuffer;
         packet->msgLen = 0;
         elems = VPAGECHANNEL_PACKET_ELEMS(packet);
      }

      allocNum = channel->elemAllocFn(channel->allocClientData, elems, n);
      if (0 == allocNum) {
         /*
          * If the client failed to allocate any elements at all then just
          * bail out and return whatever number we managed to send so far
          * (if any).
          */

         VMCI_WARNING((LGPFX"Failed to allocate receive buffer (channel=%p) "
                       "(expected=%d).\n",
                       channel,
                       n));
         goto exit;
      }

      /*
       * We wanted "n" elements, but we might only have "allocNum" because
       * that's all the client could allocate.  Pass down whatever we got.
       */

      packet->numElems = allocNum;

      if (onInit) {
         retval = VPageChannelSendControl(channel, VPCPacket_SetRecvBuffer,
                                          NULL, 0, allocNum, elems);
      } else {
         /*
          * Do not ask for the lock here if we are atomic, we take care of
          * that ourselves.  Similarly, if we are atomic then we will do our
          * own signalling, so inform the send that there is a signal already
          * pending.
          */

         retval = VPageChannelSendPacket(channel, packet,
                                     isAtomic ? FALSE : TRUE,  // needsLock
                                     isAtomic ? TRUE : FALSE); // signalPending
         /*
          * XXX, what if this is a non-blocking queuepair and we fail to
          * send because it's full and we can't wait?  Is it even worth it
          * to loop?
          */
      }
      if (retval < VMCI_SUCCESS) {
         /*
          * Failure to send is fatal.  Release the client's elements and
          * bail out.
          */

         VMCI_WARNING((LGPFX"Failed to set receive buffers (channel=%p) "
                       "(err=%d).\n",
                       channel,
                       retval));
         channel->elemFreeFn(channel->freeClientData, elems, allocNum);
         goto exit;
      }

      Atomic_Add32(&channel->curRecvBufs, allocNum);

      sent += allocNum;
      numElems -= allocNum;
      n = min_t(int, maxElems, numElems);
   }

exit:
   if (isAtomic) {
      /*
       * We're done sending packets, so now we can signal.  Even if we only
       * sent some of the requested buffers, we must signal anyway, otherwise
       * the peer won't know about the ones we did send.
       */

      (void)VPageChannelSignal(channel);
      VPageChannelReleaseSendLock(channel, flags);
   }
   if (NULL != packet) {
      VMCI_FreeKernelMem(packet, size);
   }
   return sent;
}
예제 #11
0
int
VPageChannel_Send(VPageChannel *channel,       // IN/OUT
                  VPageChannelPacketType type, // IN
                  char *message,               // IN
                  int len,                     // IN
                  VPageChannelBuffer *buffer)  // IN
{
   int retval;
   int numElems;
   ssize_t totalSize;
   VPageChannelPacket *packet;

   ASSERT(channel);

   if (VPCState_Connected != channel->state) {
      VMCI_WARNING((LGPFX"Not connected (channel=%p).\n",
                    channel));
      return VMCI_ERROR_DST_UNREACHABLE;
   }

   if (buffer) {
      numElems = buffer->numElems;
   } else {
      numElems = 0;
   }

   totalSize = sizeof(VPageChannelPacket) + len +
      numElems * sizeof(VPageChannelElem);
   packet = (VPageChannelPacket *)
      VMCI_AllocKernelMem(totalSize,
                        channel->flags & VPAGECHANNEL_FLAGS_SEND_WHILE_ATOMIC ?
                        VMCI_MEMORY_ATOMIC : VMCI_MEMORY_NORMAL);
   if (!packet) {
      VMCI_WARNING((LGPFX"Failed to allocate packet (channel=%p) "
                    "(size=%"FMTSZ"d).",
                    channel,
                    totalSize));
      return VMCI_ERROR_NO_MEM;
   }

   packet->type = type;
   packet->msgLen = len;
   packet->numElems = numElems;

   if (len) {
      ASSERT(message);
      memcpy(VPAGECHANNEL_PACKET_MESSAGE(packet), message, len);
   }

   if (numElems) {
      ASSERT(buffer);
      ASSERT(buffer->elems);
      memcpy(VPAGECHANNEL_PACKET_ELEMS(packet), buffer->elems,
             numElems * sizeof (VPageChannelElem));
   }

   retval = VPageChannel_SendPacket(channel, packet);

   VMCI_FreeKernelMem(packet, totalSize);

   return retval;
}
예제 #12
0
int
VPageChannel_CreateInVM(VPageChannel **channel,              // IN/OUT
                        VMCIId resourceId,                   // IN
                        VMCIId peerResourceId,               // IN
                        uint64 produceQSize,                 // IN
                        uint64 consumeQSize,                 // IN
                        uint32 channelFlags,                 // IN
                        VPageChannelRecvCB recvCB,           // IN
                        void *clientRecvData,                // IN
                        VPageChannelAllocElemFn elemAllocFn, // IN
                        void *allocClientData,               // IN
                        VPageChannelFreeElemFn elemFreeFn,   // IN
                        void *freeClientData,                // IN
                        int defaultRecvBuffers,              // IN
                        int maxRecvBuffers)                  // IN
{
   int retval;
   int flags;
   VPageChannel *pageChannel;

   ASSERT(channel);
   ASSERT(VMCI_INVALID_ID != resourceId);
   ASSERT(VMCI_INVALID_ID != peerResourceId);
   ASSERT(recvCB);

   if (channelFlags & ~(VPAGECHANNEL_FLAGS_ALL)) {
      VMCI_WARNING((LGPFX"Invalid argument (flags=0x%x).\n",
                    channelFlags));
      return VMCI_ERROR_INVALID_ARGS;
   }

   pageChannel =
      VMCI_AllocKernelMem(sizeof *pageChannel, VMCI_MEMORY_NONPAGED);
   if (!pageChannel) {
      VMCI_WARNING((LGPFX"Failed to allocate channel memory.\n"));
      return VMCI_ERROR_NO_MEM;
   }

   /*
    * XXX, we should support a default internal allocation function.
    */

   memset(pageChannel, 0, sizeof *pageChannel);
   pageChannel->state = VPCState_Unconnected;
   pageChannel->dgHandle = VMCI_INVALID_HANDLE;
   pageChannel->attachSubId = VMCI_INVALID_ID;
   pageChannel->detachSubId = VMCI_INVALID_ID;
   pageChannel->qpHandle = VMCI_INVALID_HANDLE;
   pageChannel->qpair = NULL;
   pageChannel->doorbellHandle = VMCI_INVALID_HANDLE;
   pageChannel->peerDoorbellHandle = VMCI_INVALID_HANDLE;
   pageChannel->flags = channelFlags;
   pageChannel->recvCB = recvCB;
   pageChannel->clientRecvData = clientRecvData;
   pageChannel->elemAllocFn = elemAllocFn;
   pageChannel->allocClientData = allocClientData;
   pageChannel->elemFreeFn = elemFreeFn;
   pageChannel->freeClientData = freeClientData;
   pageChannel->resourceId = resourceId;
   pageChannel->peerDgHandle = VMCI_MAKE_HANDLE(VMCI_HOST_CONTEXT_ID,
                                                  peerResourceId);
   Atomic_Write32(&pageChannel->curRecvBufs, 0);
   pageChannel->recvBufsTarget = defaultRecvBuffers;
   pageChannel->defaultRecvBufs = defaultRecvBuffers;
   pageChannel->maxRecvBufs = maxRecvBuffers + VMCI_PACKET_RECV_THRESHOLD;
   pageChannel->produceQSize = produceQSize;
   pageChannel->consumeQSize = consumeQSize;

   /*
    * Create a datagram handle over which we will connection handshake packets
    * (once the queuepair is created we can send packets over that instead).
    * This handle has a delayed callback regardless of the channel flags,
    * because we may have to create a queuepair inside the callback.
    */

   flags = VMCI_FLAG_DG_DELAYED_CB;
   retval = vmci_datagram_create_handle(resourceId, flags,
                                        VPageChannelDgRecvFunc, pageChannel,
                                        &pageChannel->dgHandle);
   if (retval < VMCI_SUCCESS) {
      VMCI_WARNING((LGPFX"Failed to create datagram handle "
                    "(channel=%p) (err=%d).\n",
                    channel,
                    retval));
      goto error;
   }

   VMCI_DEBUG_LOG(10,
                  (LGPFX"Created datagram (channel=%p) "
                   "(handle=0x%x:0x%x).\n",
                   channel,
                   pageChannel->dgHandle.context,
                   pageChannel->dgHandle.resource));

   /*
    * Create a doorbell handle.  This is used by the peer to signal the
    * arrival of packets in the queuepair.  This handle has a delayed
    * callback depending on the channel flags.
    */

   flags = channelFlags & VPAGECHANNEL_FLAGS_RECV_DELAYED ?
      VMCI_FLAG_DELAYED_CB : 0;
   retval = vmci_doorbell_create(&pageChannel->doorbellHandle,
                                 flags, VMCI_PRIVILEGE_FLAG_RESTRICTED,
                                 VPageChannelDoorbellCallback, pageChannel);
   if (retval < VMCI_SUCCESS) {
      VMCI_WARNING((LGPFX"Failed to create doorbell "
                    "(channel=%p) (err=%d).\n",
                    channel,
                    retval));
      goto error;
   }

   VMCI_DEBUG_LOG(10,
                  (LGPFX"Created doorbell (channel=%p) "
                   "(handle=0x%x:0x%x).\n",
                   channel,
                   pageChannel->doorbellHandle.context,
                   pageChannel->doorbellHandle.resource));

   /*
    * Now create the queuepair, over which we can pass data packets.
    */

   retval = VPageChannelCreateQueuePair(pageChannel);
   if (retval < VMCI_SUCCESS) {
      goto error;
   }

   /*
    * Set the receiving buffers before sending the connection message to
    * avoid a race when the connection is made, but there is no receiving
    * buffer yet.
    */

   if (defaultRecvBuffers) {
      int numElems = defaultRecvBuffers + VMCI_PACKET_RECV_THRESHOLD;
      if (0 == VPageChannelAddRecvBuffers(pageChannel, numElems, TRUE)) {
         /*
          * AddRecvBuffers() returns the number of buffers actually added.  If
          * we failed to add any at all, then fail.
          */

         retval = VMCI_ERROR_NO_MEM;
         goto error;
      }
   }

   retval = VPageChannelSendConnectionMessage(pageChannel);
   if (retval < VMCI_SUCCESS) {
      goto error;
   }

   VMCI_DEBUG_LOG(10,
                  (LGPFX"Created (channel=%p) (handle=0x%x:0x%x).\n",
                   pageChannel,
                   pageChannel->dgHandle.context,
                   pageChannel->dgHandle.resource));

   *channel = pageChannel;

   return retval;

 error:
   VPageChannel_Destroy(pageChannel);
   return retval;
}
예제 #13
0
int
VMCI_AllocPPNSet(void *produceQ,         // IN:
                 uint64 numProducePages, // IN: for queue plus header
                 void *consumeQ,         // IN:
                 uint64 numConsumePages, // IN: for queue plus header
                 PPNSet *ppnSet)         // OUT:
{
    VMCIPpnList producePPNs;
    VMCIPpnList consumePPNs;
    uint64 i;

    if (!produceQ || !numProducePages || !consumeQ || !numConsumePages ||
            !ppnSet) {
        return VMCI_ERROR_INVALID_ARGS;
    }

    if (ppnSet->initialized) {
        return VMCI_ERROR_ALREADY_EXISTS;
    }

    producePPNs =
        VMCI_AllocKernelMem(numProducePages * sizeof *producePPNs,
                            VMCI_MEMORY_NORMAL);
    if (!producePPNs) {
        return VMCI_ERROR_NO_MEM;
    }

    consumePPNs =
        VMCI_AllocKernelMem(numConsumePages * sizeof *consumePPNs,
                            VMCI_MEMORY_NORMAL);
    if (!consumePPNs) {
        VMCI_FreeKernelMem(producePPNs, numProducePages * sizeof *producePPNs);
        return VMCI_ERROR_NO_MEM;
    }

    producePPNs[0] = VMCIKVaToMPN(produceQ);
    for (i = 1; i < numProducePages; i++) {
        unsigned long pfn;

        producePPNs[i] = pfn = page_to_pfn(((VMCIQueue *)produceQ)->page[i - 1]);

        /*
         * Fail allocation if PFN isn't supported by hypervisor.
         */

        if (sizeof pfn > sizeof *producePPNs &&
                pfn != producePPNs[i]) {
            goto ppnError;
        }
    }
    consumePPNs[0] = VMCIKVaToMPN(consumeQ);
    for (i = 1; i < numConsumePages; i++) {
        unsigned long pfn;

        consumePPNs[i] = pfn = page_to_pfn(((VMCIQueue *)consumeQ)->page[i - 1]);

        /*
         * Fail allocation if PFN isn't supported by hypervisor.
         */

        if (sizeof pfn > sizeof *consumePPNs &&
                pfn != consumePPNs[i]) {
            goto ppnError;
        }
    }

    ppnSet->numProducePages = numProducePages;
    ppnSet->numConsumePages = numConsumePages;
    ppnSet->producePPNs = producePPNs;
    ppnSet->consumePPNs = consumePPNs;
    ppnSet->initialized = TRUE;
    return VMCI_SUCCESS;

ppnError:
    VMCI_FreeKernelMem(producePPNs, numProducePages * sizeof *producePPNs);
    VMCI_FreeKernelMem(consumePPNs, numConsumePages * sizeof *consumePPNs);
    return VMCI_ERROR_INVALID_ARGS;
}
예제 #14
0
VMCIBuffer
VMCI_AllocBuffer(size_t size, int flags)
{
    return VMCI_AllocKernelMem(size, flags);
}
예제 #15
0
int
VMCIHost_GetUserMemory(PageStoreAttachInfo *attach,      // IN/OUT
                       VMCIQueue *produceQ,              // OUT
                       VMCIQueue *consumeQ)              // OUT
{
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
    int retval;
    int err = VMCI_SUCCESS;


    attach->producePages =
        VMCI_AllocKernelMem(attach->numProducePages * sizeof attach->producePages[0],
                            VMCI_MEMORY_NORMAL);
    if (attach->producePages == NULL) {
        return VMCI_ERROR_NO_MEM;
    }
    attach->consumePages =
        VMCI_AllocKernelMem(attach->numConsumePages * sizeof attach->consumePages[0],
                            VMCI_MEMORY_NORMAL);
    if (attach->consumePages == NULL) {
        err = VMCI_ERROR_NO_MEM;
        goto errorDealloc;
    }

    down_write(&current->mm->mmap_sem);
    retval = get_user_pages(current,
                            current->mm,
                            (VA)attach->produceBuffer,
                            attach->numProducePages,
                            1, 0,
                            attach->producePages,
                            NULL);
    if (retval < attach->numProducePages) {
        Log("get_user_pages(produce) failed: %d\n", retval);
        if (retval > 0) {
            int i;
            for (i = 0; i < retval; i++) {
                page_cache_release(attach->producePages[i]);
            }
        }
        err = VMCI_ERROR_NO_MEM;
        goto out;
    }

    retval = get_user_pages(current,
                            current->mm,
                            (VA)attach->consumeBuffer,
                            attach->numConsumePages,
                            1, 0,
                            attach->consumePages,
                            NULL);
    if (retval < attach->numConsumePages) {
        int i;
        Log("get_user_pages(consume) failed: %d\n", retval);
        if (retval > 0) {
            for (i = 0; i < retval; i++) {
                page_cache_release(attach->consumePages[i]);
            }
        }
        for (i = 0; i < attach->numProducePages; i++) {
            page_cache_release(attach->producePages[i]);
        }
        err = VMCI_ERROR_NO_MEM;
    }

    if (err == VMCI_SUCCESS) {
        produceQ->queueHeaderPtr = kmap(attach->producePages[0]);
        produceQ->page = &attach->producePages[1];
        consumeQ->queueHeaderPtr = kmap(attach->consumePages[0]);
        consumeQ->page = &attach->consumePages[1];
    }

out:
    up_write(&current->mm->mmap_sem);

errorDealloc:
    if (err < VMCI_SUCCESS) {
        if (attach->producePages != NULL) {
            VMCI_FreeKernelMem(attach->producePages,
                               attach->numProducePages *
                               sizeof attach->producePages[0]);
        }
        if (attach->consumePages != NULL) {
            VMCI_FreeKernelMem(attach->consumePages,
                               attach->numConsumePages *
                               sizeof attach->consumePages[0]);
        }
    }

    return err;

#else
    /*
     * Host queue pair support for earlier kernels temporarily
     * disabled. See bug 365496.
     */

    ASSERT_NOT_IMPLEMENTED(FALSE);
#if 0
    attach->produceIoBuf = VMCI_AllocKernelMem(sizeof *attach->produceIoBuf,
                           VMCI_MEMORY_NORMAL);
    if (attach->produceIoBuf == NULL) {
        return VMCI_ERROR_NO_MEM;
    }

    attach->consumeIoBuf = VMCI_AllocKernelMem(sizeof *attach->consumeIoBuf,
                           VMCI_MEMORY_NORMAL);
    if (attach->consumeIoBuf == NULL) {
        VMCI_FreeKernelMem(attach->produceIoBuf,
                           sizeof *attach->produceIoBuf);
        return VMCI_ERROR_NO_MEM;
    }

    retval = map_user_kiobuf(WRITE, attach->produceIoBuf,
                             (VA)attach->produceBuffer,
                             attach->numProducePages * PAGE_SIZE);
    if (retval < 0) {
        err = VMCI_ERROR_NO_ACCESS;
        goto out;
    }

    retval = map_user_kiobuf(WRITE, attach->consumeIoBuf,
                             (VA)attach->consumeBuffer,
                             attach->numConsumePages * PAGE_SIZE);
    if (retval < 0) {
        unmap_kiobuf(attach->produceIoBuf);
        err = VMCI_ERROR_NO_ACCESS;
    }

    if (err == VMCI_SUCCESS) {
        produceQ->queueHeaderPtr = kmap(attach->produceIoBuf->maplist[0]);
        produceQ->page = &attach->produceIoBuf->maplist[1];
        consumeQ->queueHeaderPtr = kmap(attach->consumeIoBuf->maplist[0]);
        consumeQ->page = &attach->consumeIoBuf->maplist[1];
    }

out:

    if (err < VMCI_SUCCESS) {
        if (attach->produceIoBuf != NULL) {
            VMCI_FreeKernelMem(attach->produceIoBuf,
                               sizeof *attach->produceIoBuf);
        }
        if (attach->consumeIoBuf != NULL) {
            VMCI_FreeKernelMem(attach->consumeIoBuf,
                               sizeof *attach->consumeIoBuf);
        }
    }

    return err;
#else // 0 -- Instead just return FALSE
    return FALSE;
#endif // 0
#endif // Linux version >= 2.6.0
}