/** Initialize the VirtIo MMIO Device @param[in] BaseAddress Base Address of the VirtIo MMIO Device @param[in, out] Device The driver instance to configure. @retval EFI_SUCCESS Setup complete. @retval EFI_UNSUPPORTED The driver is not a VirtIo MMIO device. **/ STATIC EFI_STATUS EFIAPI VirtioMmioInit ( IN PHYSICAL_ADDRESS BaseAddress, IN OUT VIRTIO_MMIO_DEVICE *Device ) { UINT32 MagicValue; UINT32 VendorId; UINT32 Version; // // Initialize VirtIo Mmio Device // CopyMem (&Device->VirtioDevice, &mMmioDeviceProtocolTemplate, sizeof (VIRTIO_DEVICE_PROTOCOL)); Device->BaseAddress = BaseAddress; Device->VirtioDevice.Revision = VIRTIO_SPEC_REVISION (0, 9, 5); Device->VirtioDevice.SubSystemDeviceId = MmioRead32 (BaseAddress + VIRTIO_MMIO_OFFSET_DEVICE_ID); // // Double-check MMIO-specific values // MagicValue = VIRTIO_CFG_READ (Device, VIRTIO_MMIO_OFFSET_MAGIC); if (MagicValue != VIRTIO_MMIO_MAGIC) { return EFI_UNSUPPORTED; } Version = VIRTIO_CFG_READ (Device, VIRTIO_MMIO_OFFSET_VERSION); if (Version != 1) { return EFI_UNSUPPORTED; } // // Double-check MMIO-specific values // VendorId = VIRTIO_CFG_READ (Device, VIRTIO_MMIO_OFFSET_VENDOR_ID); if (VendorId != VIRTIO_VENDOR_ID) { // // The ARM Base and Foundation Models do not report a valid VirtIo VendorId. // They return a value of 0x0 for the VendorId. // DEBUG((EFI_D_WARN, "VirtioMmioInit: Warning: The VendorId (0x%X) does not " "match the VirtIo VendorId (0x%X).\n", VendorId, VIRTIO_VENDOR_ID)); } return EFI_SUCCESS; }
/** Initialize the VirtIo PCI Device @param[in, out] Dev The driver instance to configure. The caller is responsible for Device->PciIo's validity (ie. working IO access to the underlying virtio-pci device). @retval EFI_SUCCESS Setup complete. @retval EFI_UNSUPPORTED The underlying IO device doesn't support the provided address offset and read size. @return Error codes from PciIo->Pci.Read(). **/ STATIC EFI_STATUS EFIAPI VirtioPciInit ( IN OUT VIRTIO_PCI_DEVICE *Device ) { EFI_STATUS Status; EFI_PCI_IO_PROTOCOL *PciIo; PCI_TYPE00 Pci; ASSERT (Device != NULL); PciIo = Device->PciIo; ASSERT (PciIo != NULL); ASSERT (PciIo->Pci.Read != NULL); Status = PciIo->Pci.Read ( PciIo, // (protocol, device) // handle EfiPciIoWidthUint32, // access width & copy // mode 0, // Offset sizeof (Pci) / sizeof (UINT32), // Count &Pci // target buffer ); if (EFI_ERROR (Status)) { return Status; } // // Copy protocol template // CopyMem (&Device->VirtioDevice, &mDeviceProtocolTemplate, sizeof (VIRTIO_DEVICE_PROTOCOL)); // // Initialize the protocol interface attributes // Device->VirtioDevice.Revision = VIRTIO_SPEC_REVISION (0, 9, 5); Device->VirtioDevice.SubSystemDeviceId = Pci.Device.SubsystemID; // // Note: We don't support the MSI-X capability. If we did, // the offset would become 24 after enabling MSI-X. // Device->DeviceSpecificConfigurationOffset = VIRTIO_DEVICE_SPECIFIC_CONFIGURATION_OFFSET_PCI; return EFI_SUCCESS; }
EFI_STATUS Status; if (FieldSize != BufferSize) { return EFI_INVALID_PARAMETER; } Dev = VIRTIO_1_0_FROM_VIRTIO_DEVICE (This); Status = Virtio10Transfer (Dev->PciIo, &Dev->SpecificConfig, FALSE, FieldOffset, FieldSize, Buffer); return Status; } STATIC CONST VIRTIO_DEVICE_PROTOCOL mVirtIoTemplate = { VIRTIO_SPEC_REVISION (1, 0, 0), 0, // SubSystemDeviceId, filled in dynamically Virtio10GetDeviceFeatures, Virtio10SetGuestFeatures, Virtio10SetQueueAddress, Virtio10SetQueueSel, Virtio10SetQueueNotify, Virtio10SetQueueAlign, Virtio10SetPageSize, Virtio10GetQueueNumMax, Virtio10SetQueueNum, Virtio10GetDeviceStatus, Virtio10SetDeviceStatus, Virtio10WriteDevice, Virtio10ReadDevice };
STATIC EFI_STATUS EFIAPI VirtioRngInit ( IN OUT VIRTIO_RNG_DEV *Dev ) { UINT8 NextDevStat; EFI_STATUS Status; UINT16 QueueSize; UINT64 Features; UINT64 RingBaseShift; // // Execute virtio-0.9.5, 2.2.1 Device Initialization Sequence. // NextDevStat = 0; // step 1 -- reset device Status = Dev->VirtIo->SetDeviceStatus (Dev->VirtIo, NextDevStat); if (EFI_ERROR (Status)) { goto Failed; } NextDevStat |= VSTAT_ACK; // step 2 -- acknowledge device presence Status = Dev->VirtIo->SetDeviceStatus (Dev->VirtIo, NextDevStat); if (EFI_ERROR (Status)) { goto Failed; } NextDevStat |= VSTAT_DRIVER; // step 3 -- we know how to drive it Status = Dev->VirtIo->SetDeviceStatus (Dev->VirtIo, NextDevStat); if (EFI_ERROR (Status)) { goto Failed; } // // Set Page Size - MMIO VirtIo Specific // Status = Dev->VirtIo->SetPageSize (Dev->VirtIo, EFI_PAGE_SIZE); if (EFI_ERROR (Status)) { goto Failed; } // // step 4a -- retrieve and validate features // Status = Dev->VirtIo->GetDeviceFeatures (Dev->VirtIo, &Features); if (EFI_ERROR (Status)) { goto Failed; } Features &= VIRTIO_F_VERSION_1 | VIRTIO_F_IOMMU_PLATFORM; // // In virtio-1.0, feature negotiation is expected to complete before queue // discovery, and the device can also reject the selected set of features. // if (Dev->VirtIo->Revision >= VIRTIO_SPEC_REVISION (1, 0, 0)) { Status = Virtio10WriteFeatures (Dev->VirtIo, Features, &NextDevStat); if (EFI_ERROR (Status)) { goto Failed; } } // // step 4b -- allocate request virtqueue, just use #0 // Status = Dev->VirtIo->SetQueueSel (Dev->VirtIo, 0); if (EFI_ERROR (Status)) { goto Failed; } Status = Dev->VirtIo->GetQueueNumMax (Dev->VirtIo, &QueueSize); if (EFI_ERROR (Status)) { goto Failed; } // // VirtioRngGetRNG() uses one descriptor // if (QueueSize < 1) { Status = EFI_UNSUPPORTED; goto Failed; } Status = VirtioRingInit (Dev->VirtIo, QueueSize, &Dev->Ring); if (EFI_ERROR (Status)) { goto Failed; } // // If anything fails from here on, we must release the ring resources. // Status = VirtioRingMap ( Dev->VirtIo, &Dev->Ring, &RingBaseShift, &Dev->RingMap ); if (EFI_ERROR (Status)) { goto ReleaseQueue; } // // Additional steps for MMIO: align the queue appropriately, and set the // size. If anything fails from here on, we must unmap the ring resources. // Status = Dev->VirtIo->SetQueueNum (Dev->VirtIo, QueueSize); if (EFI_ERROR (Status)) { goto UnmapQueue; } Status = Dev->VirtIo->SetQueueAlign (Dev->VirtIo, EFI_PAGE_SIZE); if (EFI_ERROR (Status)) { goto UnmapQueue; } // // step 4c -- Report GPFN (guest-physical frame number) of queue. // Status = Dev->VirtIo->SetQueueAddress ( Dev->VirtIo, &Dev->Ring, RingBaseShift ); if (EFI_ERROR (Status)) { goto UnmapQueue; } // // step 5 -- Report understood features and guest-tuneables. // if (Dev->VirtIo->Revision < VIRTIO_SPEC_REVISION (1, 0, 0)) { Features &= ~(UINT64)(VIRTIO_F_VERSION_1 | VIRTIO_F_IOMMU_PLATFORM); Status = Dev->VirtIo->SetGuestFeatures (Dev->VirtIo, Features); if (EFI_ERROR (Status)) { goto UnmapQueue; } } // // step 6 -- initialization complete // NextDevStat |= VSTAT_DRIVER_OK; Status = Dev->VirtIo->SetDeviceStatus (Dev->VirtIo, NextDevStat); if (EFI_ERROR (Status)) { goto UnmapQueue; } // // populate the exported interface's attributes // Dev->Rng.GetInfo = VirtioRngGetInfo; Dev->Rng.GetRNG = VirtioRngGetRNG; return EFI_SUCCESS; UnmapQueue: Dev->VirtIo->UnmapSharedBuffer (Dev->VirtIo, Dev->RingMap); ReleaseQueue: VirtioRingUninit (Dev->VirtIo, &Dev->Ring); Failed: // // Notify the host about our failure to setup: virtio-0.9.5, 2.2.2.1 Device // Status. VirtIo access failure here should not mask the original error. // NextDevStat |= VSTAT_FAILED; Dev->VirtIo->SetDeviceStatus (Dev->VirtIo, NextDevStat); return Status; // reached only via Failed above }
STATIC EFI_STATUS EFIAPI VirtioBlkInit ( IN OUT VBLK_DEV *Dev ) { UINT8 NextDevStat; EFI_STATUS Status; UINT64 Features; UINT64 NumSectors; UINT32 BlockSize; UINT8 PhysicalBlockExp; UINT8 AlignmentOffset; UINT32 OptIoSize; UINT16 QueueSize; UINT64 RingBaseShift; PhysicalBlockExp = 0; AlignmentOffset = 0; OptIoSize = 0; // // Execute virtio-0.9.5, 2.2.1 Device Initialization Sequence. // NextDevStat = 0; // step 1 -- reset device Status = Dev->VirtIo->SetDeviceStatus (Dev->VirtIo, NextDevStat); if (EFI_ERROR (Status)) { goto Failed; } NextDevStat |= VSTAT_ACK; // step 2 -- acknowledge device presence Status = Dev->VirtIo->SetDeviceStatus (Dev->VirtIo, NextDevStat); if (EFI_ERROR (Status)) { goto Failed; } NextDevStat |= VSTAT_DRIVER; // step 3 -- we know how to drive it Status = Dev->VirtIo->SetDeviceStatus (Dev->VirtIo, NextDevStat); if (EFI_ERROR (Status)) { goto Failed; } // // Set Page Size - MMIO VirtIo Specific // Status = Dev->VirtIo->SetPageSize (Dev->VirtIo, EFI_PAGE_SIZE); if (EFI_ERROR (Status)) { goto Failed; } // // step 4a -- retrieve and validate features // Status = Dev->VirtIo->GetDeviceFeatures (Dev->VirtIo, &Features); if (EFI_ERROR (Status)) { goto Failed; } Status = VIRTIO_CFG_READ (Dev, Capacity, &NumSectors); if (EFI_ERROR (Status)) { goto Failed; } if (NumSectors == 0) { Status = EFI_UNSUPPORTED; goto Failed; } if (Features & VIRTIO_BLK_F_BLK_SIZE) { Status = VIRTIO_CFG_READ (Dev, BlkSize, &BlockSize); if (EFI_ERROR (Status)) { goto Failed; } if (BlockSize == 0 || BlockSize % 512 != 0 || ModU64x32 (NumSectors, BlockSize / 512) != 0) { // // We can only handle a logical block consisting of whole sectors, // and only a disk composed of whole logical blocks. // Status = EFI_UNSUPPORTED; goto Failed; } } else { BlockSize = 512; } if (Features & VIRTIO_BLK_F_TOPOLOGY) { Status = VIRTIO_CFG_READ (Dev, Topology.PhysicalBlockExp, &PhysicalBlockExp); if (EFI_ERROR (Status)) { goto Failed; } if (PhysicalBlockExp >= 32) { Status = EFI_UNSUPPORTED; goto Failed; } Status = VIRTIO_CFG_READ (Dev, Topology.AlignmentOffset, &AlignmentOffset); if (EFI_ERROR (Status)) { goto Failed; } Status = VIRTIO_CFG_READ (Dev, Topology.OptIoSize, &OptIoSize); if (EFI_ERROR (Status)) { goto Failed; } } Features &= VIRTIO_BLK_F_BLK_SIZE | VIRTIO_BLK_F_TOPOLOGY | VIRTIO_BLK_F_RO | VIRTIO_BLK_F_FLUSH | VIRTIO_F_VERSION_1 | VIRTIO_F_IOMMU_PLATFORM; // // In virtio-1.0, feature negotiation is expected to complete before queue // discovery, and the device can also reject the selected set of features. // if (Dev->VirtIo->Revision >= VIRTIO_SPEC_REVISION (1, 0, 0)) { Status = Virtio10WriteFeatures (Dev->VirtIo, Features, &NextDevStat); if (EFI_ERROR (Status)) { goto Failed; } } // // step 4b -- allocate virtqueue // Status = Dev->VirtIo->SetQueueSel (Dev->VirtIo, 0); if (EFI_ERROR (Status)) { goto Failed; } Status = Dev->VirtIo->GetQueueNumMax (Dev->VirtIo, &QueueSize); if (EFI_ERROR (Status)) { goto Failed; } if (QueueSize < 3) { // SynchronousRequest() uses at most three descriptors Status = EFI_UNSUPPORTED; goto Failed; } Status = VirtioRingInit (Dev->VirtIo, QueueSize, &Dev->Ring); if (EFI_ERROR (Status)) { goto Failed; } // // If anything fails from here on, we must release the ring resources // Status = VirtioRingMap ( Dev->VirtIo, &Dev->Ring, &RingBaseShift, &Dev->RingMap ); if (EFI_ERROR (Status)) { goto ReleaseQueue; } // // Additional steps for MMIO: align the queue appropriately, and set the // size. If anything fails from here on, we must unmap the ring resources. // Status = Dev->VirtIo->SetQueueNum (Dev->VirtIo, QueueSize); if (EFI_ERROR (Status)) { goto UnmapQueue; } Status = Dev->VirtIo->SetQueueAlign (Dev->VirtIo, EFI_PAGE_SIZE); if (EFI_ERROR (Status)) { goto UnmapQueue; } // // step 4c -- Report GPFN (guest-physical frame number) of queue. // Status = Dev->VirtIo->SetQueueAddress ( Dev->VirtIo, &Dev->Ring, RingBaseShift ); if (EFI_ERROR (Status)) { goto UnmapQueue; } // // step 5 -- Report understood features. // if (Dev->VirtIo->Revision < VIRTIO_SPEC_REVISION (1, 0, 0)) { Features &= ~(UINT64)(VIRTIO_F_VERSION_1 | VIRTIO_F_IOMMU_PLATFORM); Status = Dev->VirtIo->SetGuestFeatures (Dev->VirtIo, Features); if (EFI_ERROR (Status)) { goto UnmapQueue; } } // // step 6 -- initialization complete // NextDevStat |= VSTAT_DRIVER_OK; Status = Dev->VirtIo->SetDeviceStatus (Dev->VirtIo, NextDevStat); if (EFI_ERROR (Status)) { goto UnmapQueue; } // // Populate the exported interface's attributes; see UEFI spec v2.4, 12.9 EFI // Block I/O Protocol. // Dev->BlockIo.Revision = 0; Dev->BlockIo.Media = &Dev->BlockIoMedia; Dev->BlockIo.Reset = &VirtioBlkReset; Dev->BlockIo.ReadBlocks = &VirtioBlkReadBlocks; Dev->BlockIo.WriteBlocks = &VirtioBlkWriteBlocks; Dev->BlockIo.FlushBlocks = &VirtioBlkFlushBlocks; Dev->BlockIoMedia.MediaId = 0; Dev->BlockIoMedia.RemovableMedia = FALSE; Dev->BlockIoMedia.MediaPresent = TRUE; Dev->BlockIoMedia.LogicalPartition = FALSE; Dev->BlockIoMedia.ReadOnly = (BOOLEAN) ((Features & VIRTIO_BLK_F_RO) != 0); Dev->BlockIoMedia.WriteCaching = (BOOLEAN) ((Features & VIRTIO_BLK_F_FLUSH) != 0); Dev->BlockIoMedia.BlockSize = BlockSize; Dev->BlockIoMedia.IoAlign = 0; Dev->BlockIoMedia.LastBlock = DivU64x32 (NumSectors, BlockSize / 512) - 1; DEBUG ((DEBUG_INFO, "%a: LbaSize=0x%x[B] NumBlocks=0x%Lx[Lba]\n", __FUNCTION__, Dev->BlockIoMedia.BlockSize, Dev->BlockIoMedia.LastBlock + 1)); if (Features & VIRTIO_BLK_F_TOPOLOGY) { Dev->BlockIo.Revision = EFI_BLOCK_IO_PROTOCOL_REVISION3; Dev->BlockIoMedia.LowestAlignedLba = AlignmentOffset; Dev->BlockIoMedia.LogicalBlocksPerPhysicalBlock = 1u << PhysicalBlockExp; Dev->BlockIoMedia.OptimalTransferLengthGranularity = OptIoSize; DEBUG ((DEBUG_INFO, "%a: FirstAligned=0x%Lx[Lba] PhysBlkSize=0x%x[Lba]\n", __FUNCTION__, Dev->BlockIoMedia.LowestAlignedLba, Dev->BlockIoMedia.LogicalBlocksPerPhysicalBlock)); DEBUG ((DEBUG_INFO, "%a: OptimalTransferLengthGranularity=0x%x[Lba]\n", __FUNCTION__, Dev->BlockIoMedia.OptimalTransferLengthGranularity)); } return EFI_SUCCESS; UnmapQueue: Dev->VirtIo->UnmapSharedBuffer (Dev->VirtIo, Dev->RingMap); ReleaseQueue: VirtioRingUninit (Dev->VirtIo, &Dev->Ring); Failed: // // Notify the host about our failure to setup: virtio-0.9.5, 2.2.2.1 Device // Status. VirtIo access failure here should not mask the original error. // NextDevStat |= VSTAT_FAILED; Dev->VirtIo->SetDeviceStatus (Dev->VirtIo, NextDevStat); return Status; // reached only via Failed above }