/** Sends an NVM Express Command Packet to an NVM Express controller or namespace. This function only supports blocking execution of the command. @param[in] Private The pointer to the NVME_CONTEXT Data structure. @param[in] NamespaceId Is a 32 bit Namespace ID to which the Express HCI command packet will be sent. A Value of 0 denotes the NVM Express controller, a Value of all 0FFh in the namespace ID specifies that the command packet should be sent to all valid namespaces. @param[in,out] Packet A pointer to the EDKII PEI NVM Express PassThru Command Packet to send to the NVMe namespace specified by NamespaceId. @retval EFI_SUCCESS The EDKII PEI NVM Express Command Packet was sent by the host. TransferLength bytes were transferred to, or from DataBuffer. @retval EFI_NOT_READY The EDKII PEI NVM Express Command Packet could not be sent because the controller is not ready. The caller may retry again later. @retval EFI_DEVICE_ERROR A device error occurred while attempting to send the EDKII PEI NVM Express Command Packet. @retval EFI_INVALID_PARAMETER Namespace, or the contents of EDKII_PEI_NVM_EXPRESS_PASS_THRU_COMMAND_PACKET are invalid. The EDKII PEI NVM Express Command Packet was not sent, so no additional status information is available. @retval EFI_UNSUPPORTED The command described by the EDKII PEI NVM Express Command Packet is not supported by the host adapter. The EDKII PEI NVM Express Command Packet was not sent, so no additional status information is available. @retval EFI_TIMEOUT A timeout occurred while waiting for the EDKII PEI NVM Express Command Packet to execute. **/ EFI_STATUS NvmePassThru ( IN PEI_NVME_CONTROLLER_PRIVATE_DATA *Private, IN UINT32 NamespaceId, IN OUT EDKII_PEI_NVM_EXPRESS_PASS_THRU_COMMAND_PACKET *Packet ) { EFI_STATUS Status; NVME_SQ *Sq; NVME_CQ *Cq; UINT8 QueueId; UINTN SqSize; UINTN CqSize; EDKII_IOMMU_OPERATION MapOp; UINTN MapLength; EFI_PHYSICAL_ADDRESS PhyAddr; VOID *MapData; VOID *MapMeta; UINT32 Bytes; UINT32 Offset; UINT32 Data32; UINT64 Timer; // // Check the data fields in Packet parameter // if (Packet == NULL) { DEBUG (( DEBUG_ERROR, "%a, Invalid parameter: Packet(%lx)\n", __FUNCTION__, (UINTN)Packet )); return EFI_INVALID_PARAMETER; } if ((Packet->NvmeCmd == NULL) || (Packet->NvmeCompletion == NULL)) { DEBUG (( DEBUG_ERROR, "%a, Invalid parameter: NvmeCmd (%lx)/NvmeCompletion(%lx)\n", __FUNCTION__, (UINTN)Packet->NvmeCmd, (UINTN)Packet->NvmeCompletion )); return EFI_INVALID_PARAMETER; } if (Packet->QueueType != NVME_ADMIN_QUEUE && Packet->QueueType != NVME_IO_QUEUE) { DEBUG (( DEBUG_ERROR, "%a, Invalid parameter: QueueId(%lx)\n", __FUNCTION__, (UINTN)Packet->QueueType )); return EFI_INVALID_PARAMETER; } QueueId = Packet->QueueType; Sq = Private->SqBuffer[QueueId] + Private->SqTdbl[QueueId].Sqt; Cq = Private->CqBuffer[QueueId] + Private->CqHdbl[QueueId].Cqh; if (QueueId == NVME_ADMIN_QUEUE) { SqSize = NVME_ASQ_SIZE + 1; CqSize = NVME_ACQ_SIZE + 1; } else { SqSize = NVME_CSQ_SIZE + 1; CqSize = NVME_CCQ_SIZE + 1; } if (Packet->NvmeCmd->Nsid != NamespaceId) { DEBUG (( DEBUG_ERROR, "%a: Nsid mismatch (%x, %x)\n", __FUNCTION__, Packet->NvmeCmd->Nsid, NamespaceId )); return EFI_INVALID_PARAMETER; } ZeroMem (Sq, sizeof (NVME_SQ)); Sq->Opc = Packet->NvmeCmd->Cdw0.Opcode; Sq->Fuse = Packet->NvmeCmd->Cdw0.FusedOperation; Sq->Cid = Packet->NvmeCmd->Cdw0.Cid; Sq->Nsid = Packet->NvmeCmd->Nsid; // // Currently we only support PRP for data transfer, SGL is NOT supported // ASSERT (Sq->Psdt == 0); if (Sq->Psdt != 0) { DEBUG ((DEBUG_ERROR, "%a: Does not support SGL mechanism.\n", __FUNCTION__)); return EFI_UNSUPPORTED; } Sq->Prp[0] = (UINT64)(UINTN)Packet->TransferBuffer; Sq->Prp[1] = 0; MapData = NULL; MapMeta = NULL; Status = EFI_SUCCESS; // // If the NVMe cmd has data in or out, then mapping the user buffer to the PCI controller // specific addresses. // if ((Sq->Opc & (BIT0 | BIT1)) != 0) { if (((Packet->TransferLength != 0) && (Packet->TransferBuffer == NULL)) || ((Packet->TransferLength == 0) && (Packet->TransferBuffer != NULL))) { return EFI_INVALID_PARAMETER; } // // Currently, we only support creating IO submission/completion queues that are // allocated internally by the driver. // if ((Packet->QueueType == NVME_ADMIN_QUEUE) && ((Sq->Opc == NVME_ADMIN_CRIOCQ_CMD) || (Sq->Opc == NVME_ADMIN_CRIOSQ_CMD))) { if ((Packet->TransferBuffer != Private->SqBuffer[NVME_IO_QUEUE]) && (Packet->TransferBuffer != Private->CqBuffer[NVME_IO_QUEUE])) { DEBUG (( DEBUG_ERROR, "%a: Does not support external IO queues creation request.\n", __FUNCTION__ )); return EFI_UNSUPPORTED; } } else { if ((Sq->Opc & BIT0) != 0) { MapOp = EdkiiIoMmuOperationBusMasterRead; } else { MapOp = EdkiiIoMmuOperationBusMasterWrite; } if ((Packet->TransferLength != 0) && (Packet->TransferBuffer != NULL)) { MapLength = Packet->TransferLength; Status = IoMmuMap ( MapOp, Packet->TransferBuffer, &MapLength, &PhyAddr, &MapData ); if (EFI_ERROR (Status) || (MapLength != Packet->TransferLength)) { Status = EFI_OUT_OF_RESOURCES; DEBUG ((DEBUG_ERROR, "%a: Fail to map data buffer.\n", __FUNCTION__)); goto Exit; } Sq->Prp[0] = PhyAddr; } if((Packet->MetadataLength != 0) && (Packet->MetadataBuffer != NULL)) { MapLength = Packet->MetadataLength; Status = IoMmuMap ( MapOp, Packet->MetadataBuffer, &MapLength, &PhyAddr, &MapMeta ); if (EFI_ERROR (Status) || (MapLength != Packet->MetadataLength)) { Status = EFI_OUT_OF_RESOURCES; DEBUG ((DEBUG_ERROR, "%a: Fail to map meta data buffer.\n", __FUNCTION__)); goto Exit; } Sq->Mptr = PhyAddr; } } } // // If the Buffer Size spans more than two memory pages (page Size as defined in CC.Mps), // then build a PRP list in the second PRP submission queue entry. // Offset = ((UINT32)Sq->Prp[0]) & (EFI_PAGE_SIZE - 1); Bytes = Packet->TransferLength; if ((Offset + Bytes) > (EFI_PAGE_SIZE * 2)) { // // Create PrpList for remaining Data Buffer. // PhyAddr = (Sq->Prp[0] + EFI_PAGE_SIZE) & ~(EFI_PAGE_SIZE - 1); Sq->Prp[1] = NvmeCreatePrpList ( Private, PhyAddr, EFI_SIZE_TO_PAGES(Offset + Bytes) - 1 ); if (Sq->Prp[1] == 0) { Status = EFI_OUT_OF_RESOURCES; DEBUG ((DEBUG_ERROR, "%a: Create PRP list fail, Status - %r\n", __FUNCTION__, Status)); goto Exit; } } else if ((Offset + Bytes) > EFI_PAGE_SIZE) { Sq->Prp[1] = (Sq->Prp[0] + EFI_PAGE_SIZE) & ~(EFI_PAGE_SIZE - 1); } if (Packet->NvmeCmd->Flags & CDW10_VALID) { Sq->Payload.Raw.Cdw10 = Packet->NvmeCmd->Cdw10; } if (Packet->NvmeCmd->Flags & CDW11_VALID) { Sq->Payload.Raw.Cdw11 = Packet->NvmeCmd->Cdw11; } if (Packet->NvmeCmd->Flags & CDW12_VALID) { Sq->Payload.Raw.Cdw12 = Packet->NvmeCmd->Cdw12; } if (Packet->NvmeCmd->Flags & CDW13_VALID) { Sq->Payload.Raw.Cdw13 = Packet->NvmeCmd->Cdw13; } if (Packet->NvmeCmd->Flags & CDW14_VALID) { Sq->Payload.Raw.Cdw14 = Packet->NvmeCmd->Cdw14; } if (Packet->NvmeCmd->Flags & CDW15_VALID) { Sq->Payload.Raw.Cdw15 = Packet->NvmeCmd->Cdw15; } // // Ring the submission queue doorbell. // Private->SqTdbl[QueueId].Sqt++; if (Private->SqTdbl[QueueId].Sqt == SqSize) { Private->SqTdbl[QueueId].Sqt = 0; } Data32 = ReadUnaligned32 ((UINT32 *)&Private->SqTdbl[QueueId]); Status = NVME_SET_SQTDBL (Private, QueueId, &Data32); if (EFI_ERROR (Status)) { DEBUG ((DEBUG_ERROR, "%a: NVME_SET_SQTDBL fail, Status - %r\n", __FUNCTION__, Status)); goto Exit; } // // Wait for completion queue to get filled in. // Status = EFI_TIMEOUT; Timer = 0; while (Timer < Packet->CommandTimeout) { if (Cq->Pt != Private->Pt[QueueId]) { Status = EFI_SUCCESS; break; } MicroSecondDelay (NVME_POLL_INTERVAL); Timer += NVME_POLL_INTERVAL; } if (Status == EFI_TIMEOUT) { // // Timeout occurs for an NVMe command, reset the controller to abort the outstanding command // DEBUG ((DEBUG_ERROR, "%a: Timeout occurs for the PassThru command.\n", __FUNCTION__)); Status = NvmeControllerInit (Private); if (EFI_ERROR (Status)) { Status = EFI_DEVICE_ERROR; } else { // // Return EFI_TIMEOUT to indicate a timeout occurs for PassThru command // Status = EFI_TIMEOUT; } goto Exit; } // // Move forward the Completion Queue head // Private->CqHdbl[QueueId].Cqh++; if (Private->CqHdbl[QueueId].Cqh == CqSize) { Private->CqHdbl[QueueId].Cqh = 0; Private->Pt[QueueId] ^= 1; } // // Copy the Respose Queue entry for this command to the callers response buffer // CopyMem (Packet->NvmeCompletion, Cq, sizeof (EDKII_PEI_NVM_EXPRESS_COMPLETION)); // // Check the NVMe cmd execution result // Status = NvmeCheckCqStatus (Cq); NVME_SET_CQHDBL (Private, QueueId, &Private->CqHdbl[QueueId]); Exit: if (MapMeta != NULL) { IoMmuUnmap (MapMeta); } if (MapData != NULL) { IoMmuUnmap (MapData); } return Status; }
/** Sends an NVM Express Command Packet to an NVM Express controller or namespace. This function supports both blocking I/O and non-blocking I/O. The blocking I/O functionality is required, and the non-blocking I/O functionality is optional. @param[in] This A pointer to the EFI_NVM_EXPRESS_PASS_THRU_PROTOCOL instance. @param[in] NamespaceId A 32 bit namespace ID as defined in the NVMe specification to which the NVM Express Command Packet will be sent. A value of 0 denotes the NVM Express controller, a value of all 0xFF's (all bytes are 0xFF) in the namespace ID specifies that the command packet should be sent to all valid namespaces. @param[in,out] Packet A pointer to the NVM Express Command Packet. @param[in] Event If non-blocking I/O is not supported then Event is ignored, and blocking I/O is performed. If Event is NULL, then blocking I/O is performed. If Event is not NULL and non-blocking I/O is supported, then non-blocking I/O is performed, and Event will be signaled when the NVM Express Command Packet completes. @retval EFI_SUCCESS The NVM Express Command Packet was sent by the host. TransferLength bytes were transferred to, or from DataBuffer. @retval EFI_BAD_BUFFER_SIZE The NVM Express Command Packet was not executed. The number of bytes that could be transferred is returned in TransferLength. @retval EFI_NOT_READY The NVM Express Command Packet could not be sent because the controller is not ready. The caller may retry again later. @retval EFI_DEVICE_ERROR A device error occurred while attempting to send the NVM Express Command Packet. @retval EFI_INVALID_PARAMETER NamespaceId or the contents of EFI_NVM_EXPRESS_PASS_THRU_COMMAND_PACKET are invalid. The NVM Express Command Packet was not sent, so no additional status information is available. @retval EFI_UNSUPPORTED The command described by the NVM Express Command Packet is not supported by the NVM Express controller. The NVM Express Command Packet was not sent so no additional status information is available. @retval EFI_TIMEOUT A timeout occurred while waiting for the NVM Express Command Packet to execute. **/ EFI_STATUS EFIAPI NvmExpressPassThru ( IN EFI_NVM_EXPRESS_PASS_THRU_PROTOCOL *This, IN UINT32 NamespaceId, IN OUT EFI_NVM_EXPRESS_PASS_THRU_COMMAND_PACKET *Packet, IN EFI_EVENT Event OPTIONAL ) { NVME_CONTROLLER_PRIVATE_DATA *Private; EFI_STATUS Status; EFI_PCI_IO_PROTOCOL *PciIo; NVME_SQ *Sq; NVME_CQ *Cq; UINT16 QueueId; UINT32 Bytes; UINT16 Offset; EFI_EVENT TimerEvent; EFI_PCI_IO_PROTOCOL_OPERATION Flag; EFI_PHYSICAL_ADDRESS PhyAddr; VOID *MapData; VOID *MapMeta; VOID *MapPrpList; UINTN MapLength; UINT64 *Prp; VOID *PrpListHost; UINTN PrpListNo; UINT32 Attributes; UINT32 IoAlign; UINT32 MaxTransLen; UINT32 Data; NVME_PASS_THRU_ASYNC_REQ *AsyncRequest; EFI_TPL OldTpl; // // check the data fields in Packet parameter. // if ((This == NULL) || (Packet == NULL)) { return EFI_INVALID_PARAMETER; } if ((Packet->NvmeCmd == NULL) || (Packet->NvmeCompletion == NULL)) { return EFI_INVALID_PARAMETER; } if (Packet->QueueType != NVME_ADMIN_QUEUE && Packet->QueueType != NVME_IO_QUEUE) { return EFI_INVALID_PARAMETER; } // // 'Attributes' with neither EFI_NVM_EXPRESS_PASS_THRU_ATTRIBUTES_LOGICAL nor // EFI_NVM_EXPRESS_PASS_THRU_ATTRIBUTES_PHYSICAL set is an illegal // configuration. // Attributes = This->Mode->Attributes; if ((Attributes & (EFI_NVM_EXPRESS_PASS_THRU_ATTRIBUTES_PHYSICAL | EFI_NVM_EXPRESS_PASS_THRU_ATTRIBUTES_LOGICAL)) == 0) { return EFI_INVALID_PARAMETER; } // // Buffer alignment check for TransferBuffer & MetadataBuffer. // IoAlign = This->Mode->IoAlign; if (IoAlign > 0 && (((UINTN) Packet->TransferBuffer & (IoAlign - 1)) != 0)) { return EFI_INVALID_PARAMETER; } if (IoAlign > 0 && (((UINTN) Packet->MetadataBuffer & (IoAlign - 1)) != 0)) { return EFI_INVALID_PARAMETER; } Private = NVME_CONTROLLER_PRIVATE_DATA_FROM_PASS_THRU (This); // // Check NamespaceId is valid or not. // if ((NamespaceId > Private->ControllerData->Nn) && (NamespaceId != (UINT32) -1)) { return EFI_INVALID_PARAMETER; } // // Check whether TransferLength exceeds the maximum data transfer size. // if (Private->ControllerData->Mdts != 0) { MaxTransLen = (1 << (Private->ControllerData->Mdts)) * (1 << (Private->Cap.Mpsmin + 12)); if (Packet->TransferLength > MaxTransLen) { Packet->TransferLength = MaxTransLen; return EFI_BAD_BUFFER_SIZE; } } PciIo = Private->PciIo; MapData = NULL; MapMeta = NULL; MapPrpList = NULL; PrpListHost = NULL; PrpListNo = 0; Prp = NULL; TimerEvent = NULL; Status = EFI_SUCCESS; if (Packet->QueueType == NVME_ADMIN_QUEUE) { QueueId = 0; } else { if (Event == NULL) { QueueId = 1; } else { QueueId = 2; // // Submission queue full check. // if ((Private->SqTdbl[QueueId].Sqt + 1) % (NVME_ASYNC_CSQ_SIZE + 1) == Private->AsyncSqHead) { return EFI_NOT_READY; } } } Sq = Private->SqBuffer[QueueId] + Private->SqTdbl[QueueId].Sqt; Cq = Private->CqBuffer[QueueId] + Private->CqHdbl[QueueId].Cqh; if (Packet->NvmeCmd->Nsid != NamespaceId) { return EFI_INVALID_PARAMETER; } ZeroMem (Sq, sizeof (NVME_SQ)); Sq->Opc = (UINT8)Packet->NvmeCmd->Cdw0.Opcode; Sq->Fuse = (UINT8)Packet->NvmeCmd->Cdw0.FusedOperation; Sq->Cid = Private->Cid[QueueId]++; Sq->Nsid = Packet->NvmeCmd->Nsid; // // Currently we only support PRP for data transfer, SGL is NOT supported. // ASSERT (Sq->Psdt == 0); if (Sq->Psdt != 0) { DEBUG ((EFI_D_ERROR, "NvmExpressPassThru: doesn't support SGL mechanism\n")); return EFI_UNSUPPORTED; } Sq->Prp[0] = (UINT64)(UINTN)Packet->TransferBuffer; // // If the NVMe cmd has data in or out, then mapping the user buffer to the PCI controller specific addresses. // Note here we don't handle data buffer for CreateIOSubmitionQueue and CreateIOCompletionQueue cmds because // these two cmds are special which requires their data buffer must support simultaneous access by both the // processor and a PCI Bus Master. It's caller's responsbility to ensure this. // if (((Sq->Opc & (BIT0 | BIT1)) != 0) && (Sq->Opc != NVME_ADMIN_CRIOCQ_CMD) && (Sq->Opc != NVME_ADMIN_CRIOSQ_CMD)) { if ((Packet->TransferLength == 0) || (Packet->TransferBuffer == NULL)) { return EFI_INVALID_PARAMETER; } if ((Sq->Opc & BIT0) != 0) { Flag = EfiPciIoOperationBusMasterRead; } else { Flag = EfiPciIoOperationBusMasterWrite; } MapLength = Packet->TransferLength; Status = PciIo->Map ( PciIo, Flag, Packet->TransferBuffer, &MapLength, &PhyAddr, &MapData ); if (EFI_ERROR (Status) || (Packet->TransferLength != MapLength)) { return EFI_OUT_OF_RESOURCES; } Sq->Prp[0] = PhyAddr; Sq->Prp[1] = 0; if((Packet->MetadataLength != 0) && (Packet->MetadataBuffer != NULL)) { MapLength = Packet->MetadataLength; Status = PciIo->Map ( PciIo, Flag, Packet->MetadataBuffer, &MapLength, &PhyAddr, &MapMeta ); if (EFI_ERROR (Status) || (Packet->MetadataLength != MapLength)) { PciIo->Unmap ( PciIo, MapData ); return EFI_OUT_OF_RESOURCES; } Sq->Mptr = PhyAddr; } } // // If the buffer size spans more than two memory pages (page size as defined in CC.Mps), // then build a PRP list in the second PRP submission queue entry. // Offset = ((UINT16)Sq->Prp[0]) & (EFI_PAGE_SIZE - 1); Bytes = Packet->TransferLength; if ((Offset + Bytes) > (EFI_PAGE_SIZE * 2)) { // // Create PrpList for remaining data buffer. // PhyAddr = (Sq->Prp[0] + EFI_PAGE_SIZE) & ~(EFI_PAGE_SIZE - 1); Prp = NvmeCreatePrpList (PciIo, PhyAddr, EFI_SIZE_TO_PAGES(Offset + Bytes) - 1, &PrpListHost, &PrpListNo, &MapPrpList); if (Prp == NULL) { goto EXIT; } Sq->Prp[1] = (UINT64)(UINTN)Prp; } else if ((Offset + Bytes) > EFI_PAGE_SIZE) { Sq->Prp[1] = (Sq->Prp[0] + EFI_PAGE_SIZE) & ~(EFI_PAGE_SIZE - 1); } if(Packet->NvmeCmd->Flags & CDW2_VALID) { Sq->Rsvd2 = (UINT64)Packet->NvmeCmd->Cdw2; } if(Packet->NvmeCmd->Flags & CDW3_VALID) { Sq->Rsvd2 |= LShiftU64 ((UINT64)Packet->NvmeCmd->Cdw3, 32); } if(Packet->NvmeCmd->Flags & CDW10_VALID) { Sq->Payload.Raw.Cdw10 = Packet->NvmeCmd->Cdw10; } if(Packet->NvmeCmd->Flags & CDW11_VALID) { Sq->Payload.Raw.Cdw11 = Packet->NvmeCmd->Cdw11; } if(Packet->NvmeCmd->Flags & CDW12_VALID) { Sq->Payload.Raw.Cdw12 = Packet->NvmeCmd->Cdw12; } if(Packet->NvmeCmd->Flags & CDW13_VALID) { Sq->Payload.Raw.Cdw13 = Packet->NvmeCmd->Cdw13; } if(Packet->NvmeCmd->Flags & CDW14_VALID) { Sq->Payload.Raw.Cdw14 = Packet->NvmeCmd->Cdw14; } if(Packet->NvmeCmd->Flags & CDW15_VALID) { Sq->Payload.Raw.Cdw15 = Packet->NvmeCmd->Cdw15; } // // Ring the submission queue doorbell. // if ((Event != NULL) && (QueueId != 0)) { Private->SqTdbl[QueueId].Sqt = (Private->SqTdbl[QueueId].Sqt + 1) % (NVME_ASYNC_CSQ_SIZE + 1); } else { Private->SqTdbl[QueueId].Sqt ^= 1; } Data = ReadUnaligned32 ((UINT32*)&Private->SqTdbl[QueueId]); Status = PciIo->Mem.Write ( PciIo, EfiPciIoWidthUint32, NVME_BAR, NVME_SQTDBL_OFFSET(QueueId, Private->Cap.Dstrd), 1, &Data ); if (EFI_ERROR (Status)) { goto EXIT; } // // For non-blocking requests, return directly if the command is placed // in the submission queue. // if ((Event != NULL) && (QueueId != 0)) { AsyncRequest = AllocateZeroPool (sizeof (NVME_PASS_THRU_ASYNC_REQ)); if (AsyncRequest == NULL) { Status = EFI_DEVICE_ERROR; goto EXIT; } AsyncRequest->Signature = NVME_PASS_THRU_ASYNC_REQ_SIG; AsyncRequest->Packet = Packet; AsyncRequest->CommandId = Sq->Cid; AsyncRequest->CallerEvent = Event; AsyncRequest->MapData = MapData; AsyncRequest->MapMeta = MapMeta; AsyncRequest->MapPrpList = MapPrpList; AsyncRequest->PrpListNo = PrpListNo; AsyncRequest->PrpListHost = PrpListHost; OldTpl = gBS->RaiseTPL (TPL_NOTIFY); InsertTailList (&Private->AsyncPassThruQueue, &AsyncRequest->Link); gBS->RestoreTPL (OldTpl); return EFI_SUCCESS; } Status = gBS->CreateEvent ( EVT_TIMER, TPL_CALLBACK, NULL, NULL, &TimerEvent ); if (EFI_ERROR (Status)) { goto EXIT; } Status = gBS->SetTimer(TimerEvent, TimerRelative, Packet->CommandTimeout); if (EFI_ERROR(Status)) { goto EXIT; } // // Wait for completion queue to get filled in. // Status = EFI_TIMEOUT; while (EFI_ERROR (gBS->CheckEvent (TimerEvent))) { if (Cq->Pt != Private->Pt[QueueId]) { Status = EFI_SUCCESS; break; } } // // Check the NVMe cmd execution result // if (Status != EFI_TIMEOUT) { if ((Cq->Sct == 0) && (Cq->Sc == 0)) { Status = EFI_SUCCESS; } else { Status = EFI_DEVICE_ERROR; // // Copy the Respose Queue entry for this command to the callers response buffer // CopyMem(Packet->NvmeCompletion, Cq, sizeof(EFI_NVM_EXPRESS_COMPLETION)); // // Dump every completion entry status for debugging. // DEBUG_CODE_BEGIN(); NvmeDumpStatus(Cq); DEBUG_CODE_END(); } } else { // // Timeout occurs for an NVMe command. Reset the controller to abort the // outstanding commands. // DEBUG ((DEBUG_ERROR, "NvmExpressPassThru: Timeout occurs for an NVMe command.\n")); // // Disable the timer to trigger the process of async transfers temporarily. // Status = gBS->SetTimer (Private->TimerEvent, TimerCancel, 0); if (EFI_ERROR (Status)) { goto EXIT; } // // Reset the NVMe controller. // Status = NvmeControllerInit (Private); if (!EFI_ERROR (Status)) { Status = AbortAsyncPassThruTasks (Private); if (!EFI_ERROR (Status)) { // // Re-enable the timer to trigger the process of async transfers. // Status = gBS->SetTimer (Private->TimerEvent, TimerPeriodic, NVME_HC_ASYNC_TIMER); if (!EFI_ERROR (Status)) { // // Return EFI_TIMEOUT to indicate a timeout occurs for NVMe PassThru command. // Status = EFI_TIMEOUT; } } } else { Status = EFI_DEVICE_ERROR; } goto EXIT; } if ((Private->CqHdbl[QueueId].Cqh ^= 1) == 0) { Private->Pt[QueueId] ^= 1; } Data = ReadUnaligned32 ((UINT32*)&Private->CqHdbl[QueueId]); Status = PciIo->Mem.Write ( PciIo, EfiPciIoWidthUint32, NVME_BAR, NVME_CQHDBL_OFFSET(QueueId, Private->Cap.Dstrd), 1, &Data ); // // For now, the code does not support the non-blocking feature for admin queue. // If Event is not NULL for admin queue, signal the caller's event here. // if (Event != NULL) { ASSERT (QueueId == 0); gBS->SignalEvent (Event); } EXIT: if (MapData != NULL) { PciIo->Unmap ( PciIo, MapData ); } if (MapMeta != NULL) { PciIo->Unmap ( PciIo, MapMeta ); } if (MapPrpList != NULL) { PciIo->Unmap ( PciIo, MapPrpList ); } if (Prp != NULL) { PciIo->FreeBuffer (PciIo, PrpListNo, PrpListHost); } if (TimerEvent != NULL) { gBS->CloseEvent (TimerEvent); } return Status; }