Exemplo n.º 1
0
/**                                                                 
  Completes the Map() operation and releases any corresponding resources.
            
  @param  This                  A pointer to the EFI_UFS_HOST_CONTROLLER_PROTOCOL instance.                                      
  @param  Mapping               The mapping value returned from Map().
                                  
  @retval EFI_SUCCESS           The range was unmapped.
  @retval EFI_DEVICE_ERROR      The data was not committed to the target system memory.
                                   
**/
EFI_STATUS
EFIAPI
UfsHcUnmap (
  IN  EDKII_UFS_HOST_CONTROLLER_PROTOCOL *This,
  IN  VOID                               *Mapping
  )
{
  UFS_HOST_CONTROLLER_PRIVATE_DATA  *Private;
  EFI_PCI_IO_PROTOCOL               *PciIo;
  EFI_STATUS                        Status;

  if ((This == NULL) || (Mapping == NULL)) {
    return EFI_INVALID_PARAMETER;
  }

  Private = UFS_HOST_CONTROLLER_PRIVATE_DATA_FROM_UFSHC (This);
  PciIo   = Private->PciIo;

  Status  = PciIo->Unmap (PciIo, Mapping);
  return Status;
}
Exemplo n.º 2
0
/**
  Free the memory block from the memory pool.

  @param  Pool           The memory pool to free the block from.
  @param  Block          The memory block to free.

**/
VOID
UsbHcFreeMemBlock (
  IN USBHC_MEM_POOL       *Pool,
  IN USBHC_MEM_BLOCK      *Block
  )
{
  EFI_PCI_IO_PROTOCOL     *PciIo;

  ASSERT ((Pool != NULL) && (Block != NULL));

  PciIo = Pool->PciIo;

  //
  // Unmap the common buffer then free the structures
  //
  PciIo->Unmap (PciIo, Block->Mapping);
  PciIo->FreeBuffer (PciIo, EFI_SIZE_TO_PAGES (Block->BufLen), Block->BufHost);

  gBS->FreePool (Block->Bits);
  gBS->FreePool (Block);
}
Exemplo n.º 3
0
/**
  Allocate a block of memory to be used by the buffer pool.

  @param  Pool           The buffer pool to allocate memory for.
  @param  Pages          How many pages to allocate.

  @return The allocated memory block or NULL if failed.

**/
USBHC_MEM_BLOCK *
UsbHcAllocMemBlock (
  IN  USBHC_MEM_POOL      *Pool,
  IN  UINTN               Pages
  )
{
  USBHC_MEM_BLOCK         *Block;
  EFI_PCI_IO_PROTOCOL     *PciIo;
  VOID                    *BufHost;
  VOID                    *Mapping;
  EFI_PHYSICAL_ADDRESS    MappedAddr;
  UINTN                   Bytes;
  EFI_STATUS              Status;

  PciIo = Pool->PciIo;

  Block = AllocateZeroPool (sizeof (USBHC_MEM_BLOCK));
  if (Block == NULL) {
    return NULL;
  }

  //
  // each bit in the bit array represents USBHC_MEM_UNIT
  // bytes of memory in the memory block.
  //
  ASSERT (USBHC_MEM_UNIT * 8 <= EFI_PAGE_SIZE);

  Block->BufLen   = EFI_PAGES_TO_SIZE (Pages);
  Block->BitsLen  = Block->BufLen / (USBHC_MEM_UNIT * 8);
  Block->Bits     = AllocateZeroPool (Block->BitsLen);

  if (Block->Bits == NULL) {
    gBS->FreePool (Block);
    return NULL;
  }

  //
  // Allocate the number of Pages of memory, then map it for
  // bus master read and write.
  //
  Status = PciIo->AllocateBuffer (
                    PciIo,
                    AllocateAnyPages,
                    EfiBootServicesData,
                    Pages,
                    &BufHost,
                    0
                    );

  if (EFI_ERROR (Status)) {
    goto FREE_BITARRAY;
  }

  Bytes = EFI_PAGES_TO_SIZE (Pages);
  Status = PciIo->Map (
                    PciIo,
                    EfiPciIoOperationBusMasterCommonBuffer,
                    BufHost,
                    &Bytes,
                    &MappedAddr,
                    &Mapping
                    );

  if (EFI_ERROR (Status) || (Bytes != EFI_PAGES_TO_SIZE (Pages))) {
    goto FREE_BUFFER;
  }

  //
  // Check whether the data structure used by the host controller
  // should be restricted into the same 4G
  //
  if (Pool->Check4G && (Pool->Which4G != USB_HC_HIGH_32BIT (MappedAddr))) {
    PciIo->Unmap (PciIo, Mapping);
    goto FREE_BUFFER;
  }

  Block->BufHost  = BufHost;
  Block->Buf      = (UINT8 *) ((UINTN) MappedAddr);
  Block->Mapping  = Mapping;

  return Block;

FREE_BUFFER:
  PciIo->FreeBuffer (PciIo, Pages, BufHost);

FREE_BITARRAY:
  gBS->FreePool (Block->Bits);
  gBS->FreePool (Block);
  return NULL;
}
/**
  Starts a device controller or a bus controller.

  The Start() function is designed to be invoked from the EFI boot service ConnectController().
  As a result, much of the error checking on the parameters to Start() has been moved into this
  common boot service. It is legal to call Start() from other locations,
  but the following calling restrictions must be followed or the system behavior will not be deterministic.
  1. ControllerHandle must be a valid EFI_HANDLE.
  2. If RemainingDevicePath is not NULL, then it must be a pointer to a naturally aligned
     EFI_DEVICE_PATH_PROTOCOL.
  3. Prior to calling Start(), the Supported() function for the driver specified by This must
     have been called with the same calling parameters, and Supported() must have returned EFI_SUCCESS.

  @param[in]  This                 A pointer to the EFI_DRIVER_BINDING_PROTOCOL instance.
  @param[in]  ControllerHandle     The handle of the controller to start. This handle
                                   must support a protocol interface that supplies
                                   an I/O abstraction to the driver.
  @param[in]  RemainingDevicePath  A pointer to the remaining portion of a device path.  This
                                   parameter is ignored by device drivers, and is optional for bus
                                   drivers. For a bus driver, if this parameter is NULL, then handles
                                   for all the children of Controller are created by this driver.
                                   If this parameter is not NULL and the first Device Path Node is
                                   not the End of Device Path Node, then only the handle for the
                                   child device specified by the first Device Path Node of
                                   RemainingDevicePath is created by this driver.
                                   If the first Device Path Node of RemainingDevicePath is
                                   the End of Device Path Node, no child handle is created by this
                                   driver.

  @retval EFI_SUCCESS              The device was started.
  @retval EFI_DEVICE_ERROR         The device could not be started due to a device error.Currently not implemented.
  @retval EFI_OUT_OF_RESOURCES     The request could not be completed due to a lack of resources.
  @retval Others                   The driver failded to start the device.

**/
EFI_STATUS
EFIAPI
NvmExpressDriverBindingStart (
  IN EFI_DRIVER_BINDING_PROTOCOL  *This,
  IN EFI_HANDLE                   Controller,
  IN EFI_DEVICE_PATH_PROTOCOL     *RemainingDevicePath
  )
{
  EFI_STATUS                          Status;
  EFI_PCI_IO_PROTOCOL                 *PciIo;
  NVME_CONTROLLER_PRIVATE_DATA        *Private;
  EFI_DEVICE_PATH_PROTOCOL            *ParentDevicePath;
  UINT32                              NamespaceId;
  EFI_PHYSICAL_ADDRESS                MappedAddr;
  UINTN                               Bytes;
  EFI_NVM_EXPRESS_PASS_THRU_PROTOCOL  *Passthru;

  DEBUG ((EFI_D_INFO, "NvmExpressDriverBindingStart: start\n"));

  Private          = NULL;
  Passthru         = NULL;
  ParentDevicePath = NULL;

  Status = gBS->OpenProtocol (
                  Controller,
                  &gEfiDevicePathProtocolGuid,
                  (VOID **) &ParentDevicePath,
                  This->DriverBindingHandle,
                  Controller,
                  EFI_OPEN_PROTOCOL_BY_DRIVER
                  );
  if ((EFI_ERROR (Status)) && (Status != EFI_ALREADY_STARTED)) {
    return Status;
  }

  Status = gBS->OpenProtocol (
                  Controller,
                  &gEfiPciIoProtocolGuid,
                  (VOID **) &PciIo,
                  This->DriverBindingHandle,
                  Controller,
                  EFI_OPEN_PROTOCOL_BY_DRIVER
                  );

  if (EFI_ERROR (Status) && (Status != EFI_ALREADY_STARTED)) {
    return Status;
  }

  //
  // Check EFI_ALREADY_STARTED to reuse the original NVME_CONTROLLER_PRIVATE_DATA.
  //
  if (Status != EFI_ALREADY_STARTED) {
    Private = AllocateZeroPool (sizeof (NVME_CONTROLLER_PRIVATE_DATA));

    if (Private == NULL) {
      DEBUG ((EFI_D_ERROR, "NvmExpressDriverBindingStart: allocating pool for Nvme Private Data failed!\n"));
      Status = EFI_OUT_OF_RESOURCES;
      goto Exit;
    }

    //
    // 6 x 4kB aligned buffers will be carved out of this buffer.
    // 1st 4kB boundary is the start of the admin submission queue.
    // 2nd 4kB boundary is the start of the admin completion queue.
    // 3rd 4kB boundary is the start of I/O submission queue #1.
    // 4th 4kB boundary is the start of I/O completion queue #1.
    // 5th 4kB boundary is the start of I/O submission queue #2.
    // 6th 4kB boundary is the start of I/O completion queue #2.
    //
    // Allocate 6 pages of memory, then map it for bus master read and write.
    //
    Status = PciIo->AllocateBuffer (
                      PciIo,
                      AllocateAnyPages,
                      EfiBootServicesData,
                      6,
                      (VOID**)&Private->Buffer,
                      0
                      );
    if (EFI_ERROR (Status)) {
      goto Exit;
    }

    Bytes = EFI_PAGES_TO_SIZE (6);
    Status = PciIo->Map (
                      PciIo,
                      EfiPciIoOperationBusMasterCommonBuffer,
                      Private->Buffer,
                      &Bytes,
                      &MappedAddr,
                      &Private->Mapping
                      );

    if (EFI_ERROR (Status) || (Bytes != EFI_PAGES_TO_SIZE (6))) {
      goto Exit;
    }

    Private->BufferPciAddr = (UINT8 *)(UINTN)MappedAddr;

    Private->Signature = NVME_CONTROLLER_PRIVATE_DATA_SIGNATURE;
    Private->ControllerHandle          = Controller;
    Private->ImageHandle               = This->DriverBindingHandle;
    Private->DriverBindingHandle       = This->DriverBindingHandle;
    Private->PciIo                     = PciIo;
    Private->ParentDevicePath          = ParentDevicePath;
    Private->Passthru.Mode             = &Private->PassThruMode;
    Private->Passthru.PassThru         = NvmExpressPassThru;
    Private->Passthru.GetNextNamespace = NvmExpressGetNextNamespace;
    Private->Passthru.BuildDevicePath  = NvmExpressBuildDevicePath;
    Private->Passthru.GetNamespace     = NvmExpressGetNamespace;
    CopyMem (&Private->PassThruMode, &gEfiNvmExpressPassThruMode, sizeof (EFI_NVM_EXPRESS_PASS_THRU_MODE));
    InitializeListHead (&Private->AsyncPassThruQueue);
    InitializeListHead (&Private->UnsubmittedSubtasks);

    Status = NvmeControllerInit (Private);
    if (EFI_ERROR(Status)) {
      goto Exit;
    }

    //
    // Start the asynchronous I/O completion monitor
    //
    Status = gBS->CreateEvent (
                    EVT_TIMER | EVT_NOTIFY_SIGNAL,
                    TPL_NOTIFY,
                    ProcessAsyncTaskList,
                    Private,
                    &Private->TimerEvent
                    );
    if (EFI_ERROR (Status)) {
      goto Exit;
    }

    Status = gBS->SetTimer (
                    Private->TimerEvent,
                    TimerPeriodic,
                    NVME_HC_ASYNC_TIMER
                    );
    if (EFI_ERROR (Status)) {
      goto Exit;
    }

    Status = gBS->InstallMultipleProtocolInterfaces (
                    &Controller,
                    &gEfiNvmExpressPassThruProtocolGuid,
                    &Private->Passthru,
                    NULL
                    );
    if (EFI_ERROR (Status)) {
      goto Exit;
    }

    NvmeRegisterShutdownNotification ();
  } else {
    Status = gBS->OpenProtocol (
                    Controller,
                    &gEfiNvmExpressPassThruProtocolGuid,
                    (VOID **) &Passthru,
                    This->DriverBindingHandle,
                    Controller,
                    EFI_OPEN_PROTOCOL_GET_PROTOCOL
                    );
    if (EFI_ERROR (Status)) {
      goto Exit;
    }

    Private = NVME_CONTROLLER_PRIVATE_DATA_FROM_PASS_THRU (Passthru);
  }

  if (RemainingDevicePath == NULL) {
    //
    // Enumerate all NVME namespaces in the controller
    //
    Status = DiscoverAllNamespaces (
               Private
               );

  } else if (!IsDevicePathEnd (RemainingDevicePath)) {
    //
    // Enumerate the specified NVME namespace
    //
    Status = Private->Passthru.GetNamespace (
                                 &Private->Passthru,
                                 RemainingDevicePath,
                                 &NamespaceId
                                 );

    if (!EFI_ERROR (Status)) {
      Status = EnumerateNvmeDevNamespace (
                 Private,
                 NamespaceId
                 );
    }
  }

  DEBUG ((EFI_D_INFO, "NvmExpressDriverBindingStart: end successfully\n"));
  return EFI_SUCCESS;

Exit:
  if ((Private != NULL) && (Private->Mapping != NULL)) {
    PciIo->Unmap (PciIo, Private->Mapping);
  }

  if ((Private != NULL) && (Private->Buffer != NULL)) {
    PciIo->FreeBuffer (PciIo, 6, Private->Buffer);
  }

  if ((Private != NULL) && (Private->ControllerData != NULL)) {
    FreePool (Private->ControllerData);
  }

  if (Private != NULL) {
    if (Private->TimerEvent != NULL) {
      gBS->CloseEvent (Private->TimerEvent);
    }

    FreePool (Private);
  }

  gBS->CloseProtocol (
         Controller,
         &gEfiPciIoProtocolGuid,
         This->DriverBindingHandle,
         Controller
         );

  gBS->CloseProtocol (
         Controller,
         &gEfiDevicePathProtocolGuid,
         This->DriverBindingHandle,
         Controller
         );

  DEBUG ((EFI_D_INFO, "NvmExpressDriverBindingStart: end with %r\n", Status));

  return Status;
}
/**
  Call back function when the timer event is signaled.

  @param[in]  Event     The Event this notify function registered to.
  @param[in]  Context   Pointer to the context data registered to the
                        Event.

**/
VOID
EFIAPI
ProcessAsyncTaskList (
  IN EFI_EVENT                    Event,
  IN VOID*                        Context
  )
{
  NVME_CONTROLLER_PRIVATE_DATA         *Private;
  EFI_PCI_IO_PROTOCOL                  *PciIo;
  NVME_CQ                              *Cq;
  UINT16                               QueueId;
  UINT32                               Data;
  LIST_ENTRY                           *Link;
  LIST_ENTRY                           *NextLink;
  NVME_PASS_THRU_ASYNC_REQ             *AsyncRequest;
  NVME_BLKIO2_SUBTASK                  *Subtask;
  NVME_BLKIO2_REQUEST                  *BlkIo2Request;
  EFI_BLOCK_IO2_TOKEN                  *Token;
  BOOLEAN                              HasNewItem;
  EFI_STATUS                           Status;

  Private    = (NVME_CONTROLLER_PRIVATE_DATA*)Context;
  QueueId    = 2;
  Cq         = Private->CqBuffer[QueueId] + Private->CqHdbl[QueueId].Cqh;
  HasNewItem = FALSE;
  PciIo      = Private->PciIo;

  //
  // Submit asynchronous subtasks to the NVMe Submission Queue
  //
  for (Link = GetFirstNode (&Private->UnsubmittedSubtasks);
       !IsNull (&Private->UnsubmittedSubtasks, Link);
       Link = NextLink) {
    NextLink      = GetNextNode (&Private->UnsubmittedSubtasks, Link);
    Subtask       = NVME_BLKIO2_SUBTASK_FROM_LINK (Link);
    BlkIo2Request = Subtask->BlockIo2Request;
    Token         = BlkIo2Request->Token;
    RemoveEntryList (Link);
    BlkIo2Request->UnsubmittedSubtaskNum--;

    //
    // If any previous subtask fails, do not process subsequent ones.
    //
    if (Token->TransactionStatus != EFI_SUCCESS) {
      if (IsListEmpty (&BlkIo2Request->SubtasksQueue) &&
          BlkIo2Request->LastSubtaskSubmitted &&
          (BlkIo2Request->UnsubmittedSubtaskNum == 0)) {
        //
        // Remove the BlockIo2 request from the device asynchronous queue.
        //
        RemoveEntryList (&BlkIo2Request->Link);
        FreePool (BlkIo2Request);
        gBS->SignalEvent (Token->Event);
      }

      FreePool (Subtask->CommandPacket->NvmeCmd);
      FreePool (Subtask->CommandPacket->NvmeCompletion);
      FreePool (Subtask->CommandPacket);
      FreePool (Subtask);

      continue;
    }

    Status = Private->Passthru.PassThru (
                                 &Private->Passthru,
                                 Subtask->NamespaceId,
                                 Subtask->CommandPacket,
                                 Subtask->Event
                                 );
    if (Status == EFI_NOT_READY) {
      InsertHeadList (&Private->UnsubmittedSubtasks, Link);
      BlkIo2Request->UnsubmittedSubtaskNum++;
      break;
    } else if (EFI_ERROR (Status)) {
      Token->TransactionStatus = EFI_DEVICE_ERROR;

      if (IsListEmpty (&BlkIo2Request->SubtasksQueue) &&
          Subtask->IsLast) {
        //
        // Remove the BlockIo2 request from the device asynchronous queue.
        //
        RemoveEntryList (&BlkIo2Request->Link);
        FreePool (BlkIo2Request);
        gBS->SignalEvent (Token->Event);
      }

      FreePool (Subtask->CommandPacket->NvmeCmd);
      FreePool (Subtask->CommandPacket->NvmeCompletion);
      FreePool (Subtask->CommandPacket);
      FreePool (Subtask);
    } else {
      InsertTailList (&BlkIo2Request->SubtasksQueue, Link);
      if (Subtask->IsLast) {
        BlkIo2Request->LastSubtaskSubmitted = TRUE;
      }
    }
  }

  while (Cq->Pt != Private->Pt[QueueId]) {
    ASSERT (Cq->Sqid == QueueId);

    HasNewItem = TRUE;

    //
    // Find the command with given Command Id.
    //
    for (Link = GetFirstNode (&Private->AsyncPassThruQueue);
         !IsNull (&Private->AsyncPassThruQueue, Link);
         Link = NextLink) {
      NextLink = GetNextNode (&Private->AsyncPassThruQueue, Link);
      AsyncRequest = NVME_PASS_THRU_ASYNC_REQ_FROM_THIS (Link);
      if (AsyncRequest->CommandId == Cq->Cid) {
        //
        // Copy the Respose Queue entry for this command to the callers
        // response buffer.
        //
        CopyMem (
          AsyncRequest->Packet->NvmeCompletion,
          Cq,
          sizeof(EFI_NVM_EXPRESS_COMPLETION)
          );

        //
        // Free the resources allocated before cmd submission
        //
        if (AsyncRequest->MapData != NULL) {
          PciIo->Unmap (PciIo, AsyncRequest->MapData);
        }
        if (AsyncRequest->MapMeta != NULL) {
          PciIo->Unmap (PciIo, AsyncRequest->MapMeta);
        }
        if (AsyncRequest->MapPrpList != NULL) {
          PciIo->Unmap (PciIo, AsyncRequest->MapPrpList);
        }
        if (AsyncRequest->PrpListHost != NULL) {
          PciIo->FreeBuffer (
                   PciIo,
                   AsyncRequest->PrpListNo,
                   AsyncRequest->PrpListHost
                   );
        }

        RemoveEntryList (Link);
        gBS->SignalEvent (AsyncRequest->CallerEvent);
        FreePool (AsyncRequest);

        //
        // Update submission queue head.
        //
        Private->AsyncSqHead = Cq->Sqhd;
        break;
      }
    }

    Private->CqHdbl[QueueId].Cqh++;
    if (Private->CqHdbl[QueueId].Cqh > NVME_ASYNC_CCQ_SIZE) {
      Private->CqHdbl[QueueId].Cqh = 0;
      Private->Pt[QueueId] ^= 1;
    }

    Cq = Private->CqBuffer[QueueId] + Private->CqHdbl[QueueId].Cqh;
  }

  if (HasNewItem) {
    Data  = ReadUnaligned32 ((UINT32*)&Private->CqHdbl[QueueId]);
    PciIo->Mem.Write (
                 PciIo,
                 EfiPciIoWidthUint32,
                 NVME_BAR,
                 NVME_CQHDBL_OFFSET(QueueId, Private->Cap.Dstrd),
                 1,
                 &Data
                 );
  }
}
Exemplo n.º 6
0
/**
  Sends an NVM Express Command Packet to an NVM Express controller or namespace. This function supports
  both blocking I/O and non-blocking I/O. The blocking I/O functionality is required, and the non-blocking
  I/O functionality is optional.


  @param[in]     This                A pointer to the EFI_NVM_EXPRESS_PASS_THRU_PROTOCOL instance.
  @param[in]     NamespaceId         A 32 bit namespace ID as defined in the NVMe specification to which the NVM Express Command
                                     Packet will be sent.  A value of 0 denotes the NVM Express controller, a value of all 0xFF's
                                     (all bytes are 0xFF) in the namespace ID specifies that the command packet should be sent to
                                     all valid namespaces.
  @param[in,out] Packet              A pointer to the NVM Express Command Packet.
  @param[in]     Event               If non-blocking I/O is not supported then Event is ignored, and blocking I/O is performed.
                                     If Event is NULL, then blocking I/O is performed. If Event is not NULL and non-blocking I/O
                                     is supported, then non-blocking I/O is performed, and Event will be signaled when the NVM
                                     Express Command Packet completes.

  @retval EFI_SUCCESS                The NVM Express Command Packet was sent by the host. TransferLength bytes were transferred
                                     to, or from DataBuffer.
  @retval EFI_BAD_BUFFER_SIZE        The NVM Express Command Packet was not executed. The number of bytes that could be transferred
                                     is returned in TransferLength.
  @retval EFI_NOT_READY              The NVM Express Command Packet could not be sent because the controller is not ready. The caller
                                     may retry again later.
  @retval EFI_DEVICE_ERROR           A device error occurred while attempting to send the NVM Express Command Packet.
  @retval EFI_INVALID_PARAMETER      NamespaceId or the contents of EFI_NVM_EXPRESS_PASS_THRU_COMMAND_PACKET are invalid. The NVM
                                     Express Command Packet was not sent, so no additional status information is available.
  @retval EFI_UNSUPPORTED            The command described by the NVM Express Command Packet is not supported by the NVM Express
                                     controller. The NVM Express Command Packet was not sent so no additional status information
                                     is available.
  @retval EFI_TIMEOUT                A timeout occurred while waiting for the NVM Express Command Packet to execute.

**/
EFI_STATUS
EFIAPI
NvmExpressPassThru (
  IN     EFI_NVM_EXPRESS_PASS_THRU_PROTOCOL          *This,
  IN     UINT32                                      NamespaceId,
  IN OUT EFI_NVM_EXPRESS_PASS_THRU_COMMAND_PACKET    *Packet,
  IN     EFI_EVENT                                   Event OPTIONAL
  )
{
  NVME_CONTROLLER_PRIVATE_DATA   *Private;
  EFI_STATUS                     Status;
  EFI_PCI_IO_PROTOCOL            *PciIo;
  NVME_SQ                        *Sq;
  NVME_CQ                        *Cq;
  UINT16                         QueueId;
  UINT32                         Bytes;
  UINT16                         Offset;
  EFI_EVENT                      TimerEvent;
  EFI_PCI_IO_PROTOCOL_OPERATION  Flag;
  EFI_PHYSICAL_ADDRESS           PhyAddr;
  VOID                           *MapData;
  VOID                           *MapMeta;
  VOID                           *MapPrpList;
  UINTN                          MapLength;
  UINT64                         *Prp;
  VOID                           *PrpListHost;
  UINTN                          PrpListNo;
  UINT32                         Attributes;
  UINT32                         IoAlign;
  UINT32                         MaxTransLen;
  UINT32                         Data;
  NVME_PASS_THRU_ASYNC_REQ       *AsyncRequest;
  EFI_TPL                        OldTpl;

  //
  // check the data fields in Packet parameter.
  //
  if ((This == NULL) || (Packet == NULL)) {
    return EFI_INVALID_PARAMETER;
  }

  if ((Packet->NvmeCmd == NULL) || (Packet->NvmeCompletion == NULL)) {
    return EFI_INVALID_PARAMETER;
  }

  if (Packet->QueueType != NVME_ADMIN_QUEUE && Packet->QueueType != NVME_IO_QUEUE) {
    return EFI_INVALID_PARAMETER;
  }

  //
  // 'Attributes' with neither EFI_NVM_EXPRESS_PASS_THRU_ATTRIBUTES_LOGICAL nor
  // EFI_NVM_EXPRESS_PASS_THRU_ATTRIBUTES_PHYSICAL set is an illegal
  // configuration.
  //
  Attributes  = This->Mode->Attributes;
  if ((Attributes & (EFI_NVM_EXPRESS_PASS_THRU_ATTRIBUTES_PHYSICAL |
    EFI_NVM_EXPRESS_PASS_THRU_ATTRIBUTES_LOGICAL)) == 0) {
    return EFI_INVALID_PARAMETER;
  }

  //
  // Buffer alignment check for TransferBuffer & MetadataBuffer.
  //
  IoAlign     = This->Mode->IoAlign;
  if (IoAlign > 0 && (((UINTN) Packet->TransferBuffer & (IoAlign - 1)) != 0)) {
    return EFI_INVALID_PARAMETER;
  }

  if (IoAlign > 0 && (((UINTN) Packet->MetadataBuffer & (IoAlign - 1)) != 0)) {
    return EFI_INVALID_PARAMETER;
  }

  Private     = NVME_CONTROLLER_PRIVATE_DATA_FROM_PASS_THRU (This);

  //
  // Check NamespaceId is valid or not.
  //
  if ((NamespaceId > Private->ControllerData->Nn) &&
      (NamespaceId != (UINT32) -1)) {
    return EFI_INVALID_PARAMETER;
  }

  //
  // Check whether TransferLength exceeds the maximum data transfer size.
  //
  if (Private->ControllerData->Mdts != 0) {
    MaxTransLen = (1 << (Private->ControllerData->Mdts)) *
                  (1 << (Private->Cap.Mpsmin + 12));
    if (Packet->TransferLength > MaxTransLen) {
      Packet->TransferLength = MaxTransLen;
      return EFI_BAD_BUFFER_SIZE;
    }
  }

  PciIo       = Private->PciIo;
  MapData     = NULL;
  MapMeta     = NULL;
  MapPrpList  = NULL;
  PrpListHost = NULL;
  PrpListNo   = 0;
  Prp         = NULL;
  TimerEvent  = NULL;
  Status      = EFI_SUCCESS;

  if (Packet->QueueType == NVME_ADMIN_QUEUE) {
    QueueId = 0;
  } else {
    if (Event == NULL) {
      QueueId = 1;
    } else {
      QueueId = 2;

      //
      // Submission queue full check.
      //
      if ((Private->SqTdbl[QueueId].Sqt + 1) % (NVME_ASYNC_CSQ_SIZE + 1) ==
          Private->AsyncSqHead) {
        return EFI_NOT_READY;
      }
    }
  }
  Sq  = Private->SqBuffer[QueueId] + Private->SqTdbl[QueueId].Sqt;
  Cq  = Private->CqBuffer[QueueId] + Private->CqHdbl[QueueId].Cqh;

  if (Packet->NvmeCmd->Nsid != NamespaceId) {
    return EFI_INVALID_PARAMETER;
  }

  ZeroMem (Sq, sizeof (NVME_SQ));
  Sq->Opc  = (UINT8)Packet->NvmeCmd->Cdw0.Opcode;
  Sq->Fuse = (UINT8)Packet->NvmeCmd->Cdw0.FusedOperation;
  Sq->Cid  = Private->Cid[QueueId]++;
  Sq->Nsid = Packet->NvmeCmd->Nsid;

  //
  // Currently we only support PRP for data transfer, SGL is NOT supported.
  //
  ASSERT (Sq->Psdt == 0);
  if (Sq->Psdt != 0) {
    DEBUG ((EFI_D_ERROR, "NvmExpressPassThru: doesn't support SGL mechanism\n"));
    return EFI_UNSUPPORTED;
  }

  Sq->Prp[0] = (UINT64)(UINTN)Packet->TransferBuffer;
  //
  // If the NVMe cmd has data in or out, then mapping the user buffer to the PCI controller specific addresses.
  // Note here we don't handle data buffer for CreateIOSubmitionQueue and CreateIOCompletionQueue cmds because
  // these two cmds are special which requires their data buffer must support simultaneous access by both the
  // processor and a PCI Bus Master. It's caller's responsbility to ensure this.
  //
  if (((Sq->Opc & (BIT0 | BIT1)) != 0) && (Sq->Opc != NVME_ADMIN_CRIOCQ_CMD) && (Sq->Opc != NVME_ADMIN_CRIOSQ_CMD)) {
    if ((Packet->TransferLength == 0) || (Packet->TransferBuffer == NULL)) {
      return EFI_INVALID_PARAMETER;
    }

    if ((Sq->Opc & BIT0) != 0) {
      Flag = EfiPciIoOperationBusMasterRead;
    } else {
      Flag = EfiPciIoOperationBusMasterWrite;
    }

    MapLength = Packet->TransferLength;
    Status = PciIo->Map (
                      PciIo,
                      Flag,
                      Packet->TransferBuffer,
                      &MapLength,
                      &PhyAddr,
                      &MapData
                      );
    if (EFI_ERROR (Status) || (Packet->TransferLength != MapLength)) {
      return EFI_OUT_OF_RESOURCES;
    }

    Sq->Prp[0] = PhyAddr;
    Sq->Prp[1] = 0;

    if((Packet->MetadataLength != 0) && (Packet->MetadataBuffer != NULL)) {
      MapLength = Packet->MetadataLength;
      Status = PciIo->Map (
                        PciIo,
                        Flag,
                        Packet->MetadataBuffer,
                        &MapLength,
                        &PhyAddr,
                        &MapMeta
                        );
      if (EFI_ERROR (Status) || (Packet->MetadataLength != MapLength)) {
        PciIo->Unmap (
                 PciIo,
                 MapData
                 );

        return EFI_OUT_OF_RESOURCES;
      }
      Sq->Mptr = PhyAddr;
    }
  }
  //
  // If the buffer size spans more than two memory pages (page size as defined in CC.Mps),
  // then build a PRP list in the second PRP submission queue entry.
  //
  Offset = ((UINT16)Sq->Prp[0]) & (EFI_PAGE_SIZE - 1);
  Bytes  = Packet->TransferLength;

  if ((Offset + Bytes) > (EFI_PAGE_SIZE * 2)) {
    //
    // Create PrpList for remaining data buffer.
    //
    PhyAddr = (Sq->Prp[0] + EFI_PAGE_SIZE) & ~(EFI_PAGE_SIZE - 1);
    Prp = NvmeCreatePrpList (PciIo, PhyAddr, EFI_SIZE_TO_PAGES(Offset + Bytes) - 1, &PrpListHost, &PrpListNo, &MapPrpList);
    if (Prp == NULL) {
      goto EXIT;
    }

    Sq->Prp[1] = (UINT64)(UINTN)Prp;
  } else if ((Offset + Bytes) > EFI_PAGE_SIZE) {
    Sq->Prp[1] = (Sq->Prp[0] + EFI_PAGE_SIZE) & ~(EFI_PAGE_SIZE - 1);
  }

  if(Packet->NvmeCmd->Flags & CDW2_VALID) {
    Sq->Rsvd2 = (UINT64)Packet->NvmeCmd->Cdw2;
  }
  if(Packet->NvmeCmd->Flags & CDW3_VALID) {
    Sq->Rsvd2 |= LShiftU64 ((UINT64)Packet->NvmeCmd->Cdw3, 32);
  }
  if(Packet->NvmeCmd->Flags & CDW10_VALID) {
    Sq->Payload.Raw.Cdw10 = Packet->NvmeCmd->Cdw10;
  }
  if(Packet->NvmeCmd->Flags & CDW11_VALID) {
    Sq->Payload.Raw.Cdw11 = Packet->NvmeCmd->Cdw11;
  }
  if(Packet->NvmeCmd->Flags & CDW12_VALID) {
    Sq->Payload.Raw.Cdw12 = Packet->NvmeCmd->Cdw12;
  }
  if(Packet->NvmeCmd->Flags & CDW13_VALID) {
    Sq->Payload.Raw.Cdw13 = Packet->NvmeCmd->Cdw13;
  }
  if(Packet->NvmeCmd->Flags & CDW14_VALID) {
    Sq->Payload.Raw.Cdw14 = Packet->NvmeCmd->Cdw14;
  }
  if(Packet->NvmeCmd->Flags & CDW15_VALID) {
    Sq->Payload.Raw.Cdw15 = Packet->NvmeCmd->Cdw15;
  }

  //
  // Ring the submission queue doorbell.
  //
  if ((Event != NULL) && (QueueId != 0)) {
    Private->SqTdbl[QueueId].Sqt =
      (Private->SqTdbl[QueueId].Sqt + 1) % (NVME_ASYNC_CSQ_SIZE + 1);
  } else {
    Private->SqTdbl[QueueId].Sqt ^= 1;
  }
  Data = ReadUnaligned32 ((UINT32*)&Private->SqTdbl[QueueId]);
  Status = PciIo->Mem.Write (
               PciIo,
               EfiPciIoWidthUint32,
               NVME_BAR,
               NVME_SQTDBL_OFFSET(QueueId, Private->Cap.Dstrd),
               1,
               &Data
               );

  if (EFI_ERROR (Status)) {
    goto EXIT;
  }

  //
  // For non-blocking requests, return directly if the command is placed
  // in the submission queue.
  //
  if ((Event != NULL) && (QueueId != 0)) {
    AsyncRequest = AllocateZeroPool (sizeof (NVME_PASS_THRU_ASYNC_REQ));
    if (AsyncRequest == NULL) {
      Status = EFI_DEVICE_ERROR;
      goto EXIT;
    }

    AsyncRequest->Signature     = NVME_PASS_THRU_ASYNC_REQ_SIG;
    AsyncRequest->Packet        = Packet;
    AsyncRequest->CommandId     = Sq->Cid;
    AsyncRequest->CallerEvent   = Event;
    AsyncRequest->MapData       = MapData;
    AsyncRequest->MapMeta       = MapMeta;
    AsyncRequest->MapPrpList    = MapPrpList;
    AsyncRequest->PrpListNo     = PrpListNo;
    AsyncRequest->PrpListHost   = PrpListHost;

    OldTpl = gBS->RaiseTPL (TPL_NOTIFY);
    InsertTailList (&Private->AsyncPassThruQueue, &AsyncRequest->Link);
    gBS->RestoreTPL (OldTpl);

    return EFI_SUCCESS;
  }

  Status = gBS->CreateEvent (
                  EVT_TIMER,
                  TPL_CALLBACK,
                  NULL,
                  NULL,
                  &TimerEvent
                  );
  if (EFI_ERROR (Status)) {
    goto EXIT;
  }

  Status = gBS->SetTimer(TimerEvent, TimerRelative, Packet->CommandTimeout);

  if (EFI_ERROR(Status)) {
    goto EXIT;
  }

  //
  // Wait for completion queue to get filled in.
  //
  Status = EFI_TIMEOUT;
  while (EFI_ERROR (gBS->CheckEvent (TimerEvent))) {
    if (Cq->Pt != Private->Pt[QueueId]) {
      Status = EFI_SUCCESS;
      break;
    }
  }

  //
  // Check the NVMe cmd execution result
  //
  if (Status != EFI_TIMEOUT) {
    if ((Cq->Sct == 0) && (Cq->Sc == 0)) {
      Status = EFI_SUCCESS;
    } else {
      Status = EFI_DEVICE_ERROR;
      //
      // Copy the Respose Queue entry for this command to the callers response buffer
      //
      CopyMem(Packet->NvmeCompletion, Cq, sizeof(EFI_NVM_EXPRESS_COMPLETION));
    
      //
      // Dump every completion entry status for debugging.
      //
      DEBUG_CODE_BEGIN();
        NvmeDumpStatus(Cq);
      DEBUG_CODE_END();
    }
  } else {
    //
    // Timeout occurs for an NVMe command. Reset the controller to abort the
    // outstanding commands.
    //
    DEBUG ((DEBUG_ERROR, "NvmExpressPassThru: Timeout occurs for an NVMe command.\n"));

    //
    // Disable the timer to trigger the process of async transfers temporarily.
    //
    Status = gBS->SetTimer (Private->TimerEvent, TimerCancel, 0);
    if (EFI_ERROR (Status)) {
      goto EXIT;
    }

    //
    // Reset the NVMe controller.
    //
    Status = NvmeControllerInit (Private);
    if (!EFI_ERROR (Status)) {
      Status = AbortAsyncPassThruTasks (Private);
      if (!EFI_ERROR (Status)) {
        //
        // Re-enable the timer to trigger the process of async transfers.
        //
        Status = gBS->SetTimer (Private->TimerEvent, TimerPeriodic, NVME_HC_ASYNC_TIMER);
        if (!EFI_ERROR (Status)) {
          //
          // Return EFI_TIMEOUT to indicate a timeout occurs for NVMe PassThru command.
          //
          Status = EFI_TIMEOUT;
        }
      }
    } else {
      Status = EFI_DEVICE_ERROR;
    }

    goto EXIT;
  }

  if ((Private->CqHdbl[QueueId].Cqh ^= 1) == 0) {
    Private->Pt[QueueId] ^= 1;
  }

  Data = ReadUnaligned32 ((UINT32*)&Private->CqHdbl[QueueId]);
  Status = PciIo->Mem.Write (
               PciIo,
               EfiPciIoWidthUint32,
               NVME_BAR,
               NVME_CQHDBL_OFFSET(QueueId, Private->Cap.Dstrd),
               1,
               &Data
               );

  //
  // For now, the code does not support the non-blocking feature for admin queue.
  // If Event is not NULL for admin queue, signal the caller's event here.
  //
  if (Event != NULL) {
    ASSERT (QueueId == 0);
    gBS->SignalEvent (Event);
  }

EXIT:
  if (MapData != NULL) {
    PciIo->Unmap (
             PciIo,
             MapData
             );
  }

  if (MapMeta != NULL) {
    PciIo->Unmap (
             PciIo,
             MapMeta
             );
  }

  if (MapPrpList != NULL) {
    PciIo->Unmap (
             PciIo,
             MapPrpList
             );
  }

  if (Prp != NULL) {
    PciIo->FreeBuffer (PciIo, PrpListNo, PrpListHost);
  }

  if (TimerEvent != NULL) {
    gBS->CloseEvent (TimerEvent);
  }
  return Status;
}
Exemplo n.º 7
0
/**
  Aborts the asynchronous PassThru requests.

  @param[in] Private        The pointer to the NVME_CONTROLLER_PRIVATE_DATA
                            data structure.

  @retval EFI_SUCCESS       The asynchronous PassThru requests have been aborted.
  @return EFI_DEVICE_ERROR  Fail to abort all the asynchronous PassThru requests.

**/
EFI_STATUS
AbortAsyncPassThruTasks (
  IN NVME_CONTROLLER_PRIVATE_DATA    *Private
  )
{
  EFI_PCI_IO_PROTOCOL                *PciIo;
  LIST_ENTRY                         *Link;
  LIST_ENTRY                         *NextLink;
  NVME_BLKIO2_SUBTASK                *Subtask;
  NVME_BLKIO2_REQUEST                *BlkIo2Request;
  NVME_PASS_THRU_ASYNC_REQ           *AsyncRequest;
  EFI_BLOCK_IO2_TOKEN                *Token;
  EFI_TPL                            OldTpl;
  EFI_STATUS                         Status;

  PciIo  = Private->PciIo;
  OldTpl = gBS->RaiseTPL (TPL_NOTIFY);

  //
  // Cancel the unsubmitted subtasks.
  //
  for (Link = GetFirstNode (&Private->UnsubmittedSubtasks);
       !IsNull (&Private->UnsubmittedSubtasks, Link);
       Link = NextLink) {
    NextLink      = GetNextNode (&Private->UnsubmittedSubtasks, Link);
    Subtask       = NVME_BLKIO2_SUBTASK_FROM_LINK (Link);
    BlkIo2Request = Subtask->BlockIo2Request;
    Token         = BlkIo2Request->Token;

    BlkIo2Request->UnsubmittedSubtaskNum--;
    if (Subtask->IsLast) {
      BlkIo2Request->LastSubtaskSubmitted = TRUE;
    }
    Token->TransactionStatus = EFI_ABORTED;

    RemoveEntryList (Link);
    InsertTailList (&BlkIo2Request->SubtasksQueue, Link);
    gBS->SignalEvent (Subtask->Event);
  }

  //
  // Cleanup the resources for the asynchronous PassThru requests.
  //
  for (Link = GetFirstNode (&Private->AsyncPassThruQueue);
       !IsNull (&Private->AsyncPassThruQueue, Link);
       Link = NextLink) {
    NextLink = GetNextNode (&Private->AsyncPassThruQueue, Link);
    AsyncRequest = NVME_PASS_THRU_ASYNC_REQ_FROM_THIS (Link);

    if (AsyncRequest->MapData != NULL) {
      PciIo->Unmap (PciIo, AsyncRequest->MapData);
    }
    if (AsyncRequest->MapMeta != NULL) {
      PciIo->Unmap (PciIo, AsyncRequest->MapMeta);
    }
    if (AsyncRequest->MapPrpList != NULL) {
      PciIo->Unmap (PciIo, AsyncRequest->MapPrpList);
    }
    if (AsyncRequest->PrpListHost != NULL) {
      PciIo->FreeBuffer (
               PciIo,
               AsyncRequest->PrpListNo,
               AsyncRequest->PrpListHost
               );
    }

    RemoveEntryList (Link);
    gBS->SignalEvent (AsyncRequest->CallerEvent);
    FreePool (AsyncRequest);
  }

  if (IsListEmpty (&Private->AsyncPassThruQueue) &&
      IsListEmpty (&Private->UnsubmittedSubtasks)) {
    Status = EFI_SUCCESS;
  } else {
    Status = EFI_DEVICE_ERROR;
  }

  gBS->RestoreTPL (OldTpl);

  return Status;
}