static void vi_detect_hw_virtualization(struct amdgpu_device *adev) { uint32_t reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER); /* bit0: 0 means pf and 1 means vf */ /* bit31: 0 means disable IOV and 1 means enable */ if (reg & 1) adev->virtualization.virtual_caps |= AMDGPU_SRIOV_CAPS_IS_VF; if (reg & 0x80000000) adev->virtualization.virtual_caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV; if (reg == 0) { if (is_virtual_machine()) /* passthrough mode exclus sr-iov mode */ adev->virtualization.virtual_caps |= AMDGPU_PASSTHROUGH_MODE; } }
static void nbio_v7_4_detect_hw_virt(struct amdgpu_device *adev) { uint32_t reg; reg = RREG32_SOC15(NBIO, 0, mmRCC_IOV_FUNC_IDENTIFIER); if (reg & 1) adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF; if (reg & 0x80000000) adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV; if (!reg) { if (is_virtual_machine()) /* passthrough mode exclus sriov mod */ adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; } }
static void vi_detect_hw_virtualization(struct amdgpu_device *adev) { uint32_t reg = 0; if (adev->asic_type == CHIP_TONGA || adev->asic_type == CHIP_FIJI) { reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER); /* bit0: 0 means pf and 1 means vf */ if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, FUNC_IDENTIFIER)) adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF; /* bit31: 0 means disable IOV and 1 means enable */ if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, IOV_ENABLE)) adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV; } if (reg == 0) { if (is_virtual_machine()) /* passthrough mode exclus sr-iov mode */ adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; } }