/** * amdgpu_driver_load_kms - Main load function for KMS. * * @dev: drm dev pointer * @flags: device flags * * This is the main load function for KMS (all asics). * Returns 0 on success, error on failure. */ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) { struct amdgpu_device *adev; int r, acpi_status; adev = kzalloc(sizeof(struct amdgpu_device), GFP_KERNEL); if (adev == NULL) { return -ENOMEM; } dev->dev_private = (void *)adev; if ((amdgpu_runtime_pm != 0) && amdgpu_has_atpx() && ((flags & AMD_IS_APU) == 0)) flags |= AMD_IS_PX; /* amdgpu_device_init should report only fatal error * like memory allocation failure or iomapping failure, * or memory manager initialization failure, it must * properly initialize the GPU MC controller and permit * VRAM allocation */ r = amdgpu_device_init(adev, dev, dev->pdev, flags); if (r) { dev_err(&dev->pdev->dev, "Fatal error during GPU init\n"); goto out; } /* Call ACPI methods: require modeset init * but failure is not fatal */ if (!r) { acpi_status = amdgpu_acpi_init(adev); if (acpi_status) dev_dbg(&dev->pdev->dev, "Error during ACPI methods call\n"); } amdgpu_amdkfd_load_interface(adev); amdgpu_amdkfd_device_probe(adev); amdgpu_amdkfd_device_init(adev); if (amdgpu_device_is_px(dev)) { pm_runtime_use_autosuspend(dev->dev); pm_runtime_set_autosuspend_delay(dev->dev, 5000); pm_runtime_set_active(dev->dev); pm_runtime_allow(dev->dev); pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); } out: if (r) amdgpu_driver_unload_kms(dev); return r; }
/** * amdgpu_driver_load_kms - Main load function for KMS. * * @dev: drm dev pointer * @flags: device flags * * This is the main load function for KMS (all asics). * Returns 0 on success, error on failure. */ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) { struct amdgpu_device *adev; int r, acpi_status; #ifdef CONFIG_DRM_AMDGPU_SI if (!amdgpu_si_support) { switch (flags & AMD_ASIC_MASK) { case CHIP_TAHITI: case CHIP_PITCAIRN: case CHIP_VERDE: case CHIP_OLAND: case CHIP_HAINAN: dev_info(dev->dev, "SI support provided by radeon.\n"); dev_info(dev->dev, "Use radeon.si_support=0 amdgpu.si_support=1 to override.\n" ); return -ENODEV; } } #endif #ifdef CONFIG_DRM_AMDGPU_CIK if (!amdgpu_cik_support) { switch (flags & AMD_ASIC_MASK) { case CHIP_KAVERI: case CHIP_BONAIRE: case CHIP_HAWAII: case CHIP_KABINI: case CHIP_MULLINS: dev_info(dev->dev, "CIK support provided by radeon.\n"); dev_info(dev->dev, "Use radeon.cik_support=0 amdgpu.cik_support=1 to override.\n" ); return -ENODEV; } } #endif adev = kzalloc(sizeof(struct amdgpu_device), GFP_KERNEL); if (adev == NULL) { return -ENOMEM; } dev->dev_private = (void *)adev; if ((amdgpu_runtime_pm != 0) && amdgpu_has_atpx() && (amdgpu_is_atpx_hybrid() || amdgpu_has_atpx_dgpu_power_cntl()) && ((flags & AMD_IS_APU) == 0) && !pci_is_thunderbolt_attached(dev->pdev)) flags |= AMD_IS_PX; /* amdgpu_device_init should report only fatal error * like memory allocation failure or iomapping failure, * or memory manager initialization failure, it must * properly initialize the GPU MC controller and permit * VRAM allocation */ r = amdgpu_device_init(adev, dev, dev->pdev, flags); if (r) { dev_err(&dev->pdev->dev, "Fatal error during GPU init\n"); goto out; } /* Call ACPI methods: require modeset init * but failure is not fatal */ if (!r) { acpi_status = amdgpu_acpi_init(adev); if (acpi_status) dev_dbg(&dev->pdev->dev, "Error during ACPI methods call\n"); } amdgpu_amdkfd_device_probe(adev); amdgpu_amdkfd_device_init(adev); if (amdgpu_device_is_px(dev)) { pm_runtime_use_autosuspend(dev->dev); pm_runtime_set_autosuspend_delay(dev->dev, 5000); pm_runtime_set_active(dev->dev); pm_runtime_allow(dev->dev); pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); } if (amdgpu_sriov_vf(adev)) amdgpu_virt_release_full_gpu(adev, true); out: if (r) { /* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */ if (adev->rmmio && amdgpu_device_is_px(dev)) pm_runtime_put_noidle(dev->dev); amdgpu_driver_unload_kms(dev); } return r; }