Exemplo n.º 1
0
static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
				struct amdgpu_irq_src *source,
				struct amdgpu_iv_entry *entry)
{
	struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src];
	uint32_t status = 0;
	u64 addr;

	addr = (u64)entry->src_data[0] << 12;
	addr |= ((u64)entry->src_data[1] & 0xf) << 44;

	if (!amdgpu_sriov_vf(adev)) {
		status = RREG32(hub->vm_l2_pro_fault_status);
		WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
	}

	if (printk_ratelimit()) {
		dev_err(adev->dev,
			"[%s] VMC page fault (src_id:%u ring:%u vmid:%u pasid:%u)\n",
			entry->vmid_src ? "mmhub" : "gfxhub",
			entry->src_id, entry->ring_id, entry->vmid,
			entry->pasid);
		dev_err(adev->dev, "  at page 0x%016llx from %d\n",
			addr, entry->client_id);
		if (!amdgpu_sriov_vf(adev))
			dev_err(adev->dev,
				"VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
				status);
	}

	return 0;
}
Exemplo n.º 2
0
static int psp_ras_load(struct psp_context *psp)
{
	int ret;
	struct psp_gfx_cmd_resp *cmd;

	/*
	 * TODO: bypass the loading in sriov for now
	 */
	if (amdgpu_sriov_vf(psp->adev))
		return 0;

	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
	if (!cmd)
		return -ENOMEM;

	memset(psp->fw_pri_buf, 0, PSP_1_MEG);
	memcpy(psp->fw_pri_buf, psp->ta_ras_start_addr, psp->ta_ras_ucode_size);

	psp_prep_ras_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr,
			psp->ras.ras_shared_mc_addr,
			psp->ta_ras_ucode_size, PSP_RAS_SHARED_MEM_SIZE);

	ret = psp_cmd_submit_buf(psp, NULL, cmd,
			psp->fence_buf_mc_addr);

	if (!ret) {
		psp->ras.ras_initialized = 1;
		psp->ras.session_id = cmd->resp.session_id;
	}

	kfree(cmd);

	return ret;
}
Exemplo n.º 3
0
void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd,
				      struct kfd_local_mem_info *mem_info)
{
	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
	uint64_t address_mask = adev->dev->dma_mask ? ~*adev->dev->dma_mask :
					     ~((1ULL << 32) - 1);
	resource_size_t aper_limit = adev->gmc.aper_base + adev->gmc.aper_size;

	memset(mem_info, 0, sizeof(*mem_info));
	if (!(adev->gmc.aper_base & address_mask || aper_limit & address_mask)) {
		mem_info->local_mem_size_public = adev->gmc.visible_vram_size;
		mem_info->local_mem_size_private = adev->gmc.real_vram_size -
				adev->gmc.visible_vram_size;
	} else {
		mem_info->local_mem_size_public = 0;
		mem_info->local_mem_size_private = adev->gmc.real_vram_size;
	}
	mem_info->vram_width = adev->gmc.vram_width;

	pr_debug("Address base: %pap limit %pap public 0x%llx private 0x%llx\n",
			&adev->gmc.aper_base, &aper_limit,
			mem_info->local_mem_size_public,
			mem_info->local_mem_size_private);

	if (amdgpu_sriov_vf(adev))
		mem_info->mem_clk_max = adev->clock.default_mclk / 100;
	else if (adev->powerplay.pp_funcs)
		mem_info->mem_clk_max = amdgpu_dpm_get_mclk(adev, false) / 100;
	else
		mem_info->mem_clk_max = 100;
}
Exemplo n.º 4
0
static int psp_asd_load(struct psp_context *psp)
{
	int ret;
	struct psp_gfx_cmd_resp *cmd;

	/* If PSP version doesn't match ASD version, asd loading will be failed.
	 * add workaround to bypass it for sriov now.
	 * TODO: add version check to make it common
	 */
	if (amdgpu_sriov_vf(psp->adev))
		return 0;

	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
	if (!cmd)
		return -ENOMEM;

	memset(psp->fw_pri_buf, 0, PSP_1_MEG);
	memcpy(psp->fw_pri_buf, psp->asd_start_addr, psp->asd_ucode_size);

	psp_prep_asd_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->asd_shared_mc_addr,
			     psp->asd_ucode_size, PSP_ASD_SHARED_MEM_SIZE);

	ret = psp_cmd_submit_buf(psp, NULL, cmd,
				 psp->fence_buf_mc_addr, 2);

	kfree(cmd);

	return ret;
}
Exemplo n.º 5
0
static int psp_hw_start(struct psp_context *psp)
{
	struct amdgpu_device *adev = psp->adev;
	int ret;

	if (!amdgpu_sriov_vf(adev) || !adev->in_gpu_reset) {
		ret = psp_bootloader_load_sysdrv(psp);
		if (ret)
			return ret;

		ret = psp_bootloader_load_sos(psp);
		if (ret)
			return ret;
	}

	ret = psp_ring_create(psp, PSP_RING_TYPE__KM);
	if (ret)
		return ret;

	ret = psp_tmr_load(psp);
	if (ret)
		return ret;

	ret = psp_asd_load(psp);
	if (ret)
		return ret;

	return 0;
}
Exemplo n.º 6
0
static void dce_virtual_crtc_dpms(struct drm_crtc *crtc, int mode)
{
	struct drm_device *dev = crtc->dev;
	struct amdgpu_device *adev = dev->dev_private;
	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
	unsigned type;

	if (amdgpu_sriov_vf(adev))
		return;

	switch (mode) {
	case DRM_MODE_DPMS_ON:
		amdgpu_crtc->enabled = true;
		/* Make sure VBLANK interrupts are still enabled */
		type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
		amdgpu_irq_update(adev, &adev->crtc_irq, type);
		drm_crtc_vblank_on(crtc);
		break;
	case DRM_MODE_DPMS_STANDBY:
	case DRM_MODE_DPMS_SUSPEND:
	case DRM_MODE_DPMS_OFF:
		drm_crtc_vblank_off(crtc);
		amdgpu_crtc->enabled = false;
		break;
	}
}
Exemplo n.º 7
0
int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
{
	int ret;
	struct psp_gfx_cmd_resp *cmd;

	/*
	 * TODO: bypass the loading in sriov for now
	 */
	if (amdgpu_sriov_vf(psp->adev))
		return 0;

	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
	if (!cmd)
		return -ENOMEM;

	psp_prep_ras_ta_invoke_cmd_buf(cmd, ta_cmd_id,
			psp->ras.session_id);

	ret = psp_cmd_submit_buf(psp, NULL, cmd,
			psp->fence_buf_mc_addr);

	kfree(cmd);

	return ret;
}
Exemplo n.º 8
0
/**
 * amdgpu_driver_unload_kms - Main unload function for KMS.
 *
 * @dev: drm dev pointer
 *
 * This is the main unload function for KMS (all asics).
 * Returns 0 on success.
 */
void amdgpu_driver_unload_kms(struct drm_device *dev)
{
	struct amdgpu_device *adev = dev->dev_private;

	if (adev == NULL)
		return;

	if (adev->rmmio == NULL)
		goto done_free;

	if (amdgpu_sriov_vf(adev))
		amdgpu_virt_request_full_gpu(adev, false);

	if (amdgpu_device_is_px(dev)) {
		pm_runtime_get_sync(dev->dev);
		pm_runtime_forbid(dev->dev);
	}

	amdgpu_amdkfd_device_fini(adev);

	amdgpu_acpi_fini(adev);

	amdgpu_device_fini(adev);

done_free:
	kfree(adev);
	dev->dev_private = NULL;
}
Exemplo n.º 9
0
static void soc15_common_get_clockgating_state(void *handle, u32 *flags)
{
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
	int data;

	if (amdgpu_sriov_vf(adev))
		*flags = 0;

	adev->nbio_funcs->get_clockgating_state(adev, flags);

	/* AMD_CG_SUPPORT_HDP_LS */
	data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
	if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK)
		*flags |= AMD_CG_SUPPORT_HDP_LS;

	/* AMD_CG_SUPPORT_DRM_MGCG */
	data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
	if (!(data & 0x01000000))
		*flags |= AMD_CG_SUPPORT_DRM_MGCG;

	/* AMD_CG_SUPPORT_DRM_LS */
	data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL));
	if (data & 0x1)
		*flags |= AMD_CG_SUPPORT_DRM_LS;

	/* AMD_CG_SUPPORT_ROM_MGCG */
	data = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0));
	if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK))
		*flags |= AMD_CG_SUPPORT_ROM_MGCG;

	/* AMD_CG_SUPPORT_DF_MGCG */
	data = RREG32(SOC15_REG_OFFSET(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater));
	if (data & DF_MGCG_ENABLE_15_CYCLE_DELAY)
		*flags |= AMD_CG_SUPPORT_DF_MGCG;
}
Exemplo n.º 10
0
/**
 * amdgpu_irq_reset_work_func - execute gpu reset
 *
 * @work: work struct
 *
 * Execute scheduled gpu reset (cayman+).
 * This function is called when the irq handler
 * thinks we need a gpu reset.
 */
static void amdgpu_irq_reset_work_func(struct work_struct *work)
{
	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
						  reset_work);

	if (!amdgpu_sriov_vf(adev))
		amdgpu_gpu_reset(adev);
}
Exemplo n.º 11
0
static int psp_hw_start(struct psp_context *psp)
{
	struct amdgpu_device *adev = psp->adev;
	int ret;

	if (!amdgpu_sriov_vf(adev) || !adev->in_gpu_reset) {
		ret = psp_bootloader_load_sysdrv(psp);
		if (ret) {
			DRM_ERROR("PSP load sysdrv failed!\n");
			return ret;
		}

		ret = psp_bootloader_load_sos(psp);
		if (ret) {
			DRM_ERROR("PSP load sos failed!\n");
			return ret;
		}
	}

	ret = psp_ring_create(psp, PSP_RING_TYPE__KM);
	if (ret) {
		DRM_ERROR("PSP create ring failed!\n");
		return ret;
	}

	ret = psp_tmr_load(psp);
	if (ret) {
		DRM_ERROR("PSP load tmr failed!\n");
		return ret;
	}

	ret = psp_asd_load(psp);
	if (ret) {
		DRM_ERROR("PSP load asd failed!\n");
		return ret;
	}

	if (adev->gmc.xgmi.num_physical_nodes > 1) {
		ret = psp_xgmi_initialize(psp);
		/* Warning the XGMI seesion initialize failure
		 * Instead of stop driver initialization
		 */
		if (ret)
			dev_err(psp->adev->dev,
				"XGMI: Failed to initialize XGMI session\n");
	}


	if (psp->adev->psp.ta_fw) {
		ret = psp_ras_initialize(psp);
		if (ret)
			dev_err(psp->adev->dev,
					"RAS: Failed to initialize RAS\n");
	}

	return 0;
}
Exemplo n.º 12
0
static int soc15_common_sw_init(void *handle)
{
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;

	if (amdgpu_sriov_vf(adev))
		xgpu_ai_mailbox_add_irq_id(adev);

	return 0;
}
Exemplo n.º 13
0
int amdgpu_debugfs_fence_init(struct amdgpu_device *adev)
{
#if defined(CONFIG_DEBUG_FS)
	if (amdgpu_sriov_vf(adev))
		return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list_sriov, 1);
	return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list, 2);
#else
	return 0;
#endif
}
Exemplo n.º 14
0
/**
 * amdgpu_virt_free_mm_table() - free mm table memory
 * @amdgpu:	amdgpu device.
 * Free MM table memory
 */
void amdgpu_virt_free_mm_table(struct amdgpu_device *adev)
{
	if (!amdgpu_sriov_vf(adev) || !adev->virt.mm_table.gpu_addr)
		return;

	amdgpu_bo_free_kernel(&adev->virt.mm_table.bo,
			      &adev->virt.mm_table.gpu_addr,
			      (void *)&adev->virt.mm_table.cpu_addr);
	adev->virt.mm_table.gpu_addr = 0;
}
Exemplo n.º 15
0
/**
 * amdgpu_driver_open_kms - drm callback for open
 *
 * @dev: drm dev pointer
 * @file_priv: drm file
 *
 * On device open, init vm on cayman+ (all asics).
 * Returns 0 on success, error on failure.
 */
int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
{
	struct amdgpu_device *adev = dev->dev_private;
	struct amdgpu_fpriv *fpriv;
	int r;

	file_priv->driver_priv = NULL;

	r = pm_runtime_get_sync(dev->dev);
	if (r < 0)
		return r;

	fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
	if (unlikely(!fpriv)) {
		r = -ENOMEM;
		goto out_suspend;
	}

	r = amdgpu_vm_init(adev, &fpriv->vm,
			   AMDGPU_VM_CONTEXT_GFX, 0);
	if (r) {
		kfree(fpriv);
		goto out_suspend;
	}

	fpriv->prt_va = amdgpu_vm_bo_add(adev, &fpriv->vm, NULL);
	if (!fpriv->prt_va) {
		r = -ENOMEM;
		amdgpu_vm_fini(adev, &fpriv->vm);
		kfree(fpriv);
		goto out_suspend;
	}

	if (amdgpu_sriov_vf(adev)) {
		r = amdgpu_map_static_csa(adev, &fpriv->vm, &fpriv->csa_va);
		if (r) {
			amdgpu_vm_fini(adev, &fpriv->vm);
			kfree(fpriv);
			goto out_suspend;
		}
	}

	mutex_init(&fpriv->bo_list_lock);
	idr_init(&fpriv->bo_list_handles);

	amdgpu_ctx_mgr_init(&fpriv->ctx_mgr);

	file_priv->driver_priv = fpriv;

out_suspend:
	pm_runtime_mark_last_busy(dev->dev);
	pm_runtime_put_autosuspend(dev->dev);

	return r;
}
Exemplo n.º 16
0
static int soc15_common_hw_fini(void *handle)
{
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;

	/* disable the doorbell aperture */
	soc15_enable_doorbell_aperture(adev, false);
	if (amdgpu_sriov_vf(adev))
		xgpu_ai_mailbox_put_irq(adev);

	return 0;
}
Exemplo n.º 17
0
static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
					struct amdgpu_gmc *mc)
{
	u64 base = 0;
	if (!amdgpu_sriov_vf(adev))
		base = mmhub_v1_0_get_fb_location(adev);
	amdgpu_device_vram_location(adev, &adev->gmc, base);
	amdgpu_device_gart_location(adev, mc);
	/* base offset of vram pages */
	adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
}
Exemplo n.º 18
0
uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
{
	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;

	/* the sclk is in quantas of 10kHz */
	if (amdgpu_sriov_vf(adev))
		return adev->clock.default_sclk / 100;
	else if (adev->powerplay.pp_funcs)
		return amdgpu_dpm_get_sclk(adev, false) / 100;
	else
		return 100;
}
Exemplo n.º 19
0
/**
 * amdgpu_driver_postclose_kms - drm callback for post close
 *
 * @dev: drm dev pointer
 * @file_priv: drm file
 *
 * On device post close, tear down vm on cayman+ (all asics).
 */
void amdgpu_driver_postclose_kms(struct drm_device *dev,
				 struct drm_file *file_priv)
{
	struct amdgpu_device *adev = dev->dev_private;
	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
	struct amdgpu_bo_list *list;
	struct amdgpu_bo *pd;
	unsigned int pasid;
	int handle;

	if (!fpriv)
		return;

	pm_runtime_get_sync(dev->dev);

	if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_UVD) != NULL)
		amdgpu_uvd_free_handles(adev, file_priv);
	if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCE) != NULL)
		amdgpu_vce_free_handles(adev, file_priv);

	amdgpu_vm_bo_rmv(adev, fpriv->prt_va);

	if (amdgpu_sriov_vf(adev)) {
		/* TODO: how to handle reserve failure */
		BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true));
		amdgpu_vm_bo_rmv(adev, fpriv->csa_va);
		fpriv->csa_va = NULL;
		amdgpu_bo_unreserve(adev->virt.csa_obj);
	}

	pasid = fpriv->vm.pasid;
	pd = amdgpu_bo_ref(fpriv->vm.root.base.bo);

	amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
	amdgpu_vm_fini(adev, &fpriv->vm);

	if (pasid)
		amdgpu_pasid_free_delayed(pd->tbo.resv, pasid);
	amdgpu_bo_unref(&pd);

	idr_for_each_entry(&fpriv->bo_list_handles, list, handle)
		amdgpu_bo_list_put(list);

	idr_destroy(&fpriv->bo_list_handles);
	mutex_destroy(&fpriv->bo_list_lock);

	kfree(fpriv);
	file_priv->driver_priv = NULL;

	pm_runtime_mark_last_busy(dev->dev);
	pm_runtime_put_autosuspend(dev->dev);
}
Exemplo n.º 20
0
static int gmc_v9_0_late_init(void *handle)
{
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
	/*
	 * The latest engine allocation on gfx9 is:
	 * Engine 0, 1: idle
	 * Engine 2, 3: firmware
	 * Engine 4~13: amdgpu ring, subject to change when ring number changes
	 * Engine 14~15: idle
	 * Engine 16: kfd tlb invalidation
	 * Engine 17: Gart flushes
	 */
	unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 4, 4 };
	unsigned i;
	int r;

	/*
	 * TODO - Uncomment once GART corruption issue is fixed.
	 */
	/* amdgpu_bo_late_init(adev); */

	for(i = 0; i < adev->num_rings; ++i) {
		struct amdgpu_ring *ring = adev->rings[i];
		unsigned vmhub = ring->funcs->vmhub;

		ring->vm_inv_eng = vm_inv_eng[vmhub]++;
		dev_info(adev->dev, "ring %u(%s) uses VM inv eng %u on hub %u\n",
			 ring->idx, ring->name, ring->vm_inv_eng,
			 ring->funcs->vmhub);
	}

	/* Engine 16 is used for KFD and 17 for GART flushes */
	for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i)
		BUG_ON(vm_inv_eng[i] > 16);

	if (adev->asic_type == CHIP_VEGA10 && !amdgpu_sriov_vf(adev)) {
		r = gmc_v9_0_ecc_available(adev);
		if (r == 1) {
			DRM_INFO("ECC is active.\n");
		} else if (r == 0) {
			DRM_INFO("ECC is not present.\n");
			adev->df_funcs->enable_ecc_force_par_wr_rmw(adev, false);
		} else {
			DRM_ERROR("gmc_v9_0_ecc_available() failed. r: %d\n", r);
			return r;
		}
	}

	return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
}
Exemplo n.º 21
0
static int gmc_v9_0_hw_fini(void *handle)
{
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;

	if (amdgpu_sriov_vf(adev)) {
		/* full access mode, so don't touch any GMC register */
		DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
		return 0;
	}

	amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
	gmc_v9_0_gart_disable(adev);

	return 0;
}
Exemplo n.º 22
0
static int amdgpu_create_pp_handle(struct amdgpu_device *adev)
{
	struct amd_pp_init pp_init;
	struct amd_powerplay *amd_pp;
	int ret;

	amd_pp = &(adev->powerplay);
	pp_init.chip_family = adev->family;
	pp_init.chip_id = adev->asic_type;
	pp_init.pm_en = (amdgpu_dpm != 0 && !amdgpu_sriov_vf(adev)) ? true : false;
	pp_init.feature_mask = amdgpu_pp_feature_mask;
	pp_init.device = amdgpu_cgs_create_device(adev);
	ret = amd_powerplay_create(&pp_init, &(amd_pp->pp_handle));
	if (ret)
		return -EINVAL;
	return 0;
}
Exemplo n.º 23
0
static int soc15_common_set_clockgating_state(void *handle,
					    enum amd_clockgating_state state)
{
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;

	if (amdgpu_sriov_vf(adev))
		return 0;

	switch (adev->asic_type) {
	case CHIP_VEGA10:
	case CHIP_VEGA12:
	case CHIP_VEGA20:
		adev->nbio_funcs->update_medium_grain_clock_gating(adev,
				state == AMD_CG_STATE_GATE ? true : false);
		adev->nbio_funcs->update_medium_grain_light_sleep(adev,
				state == AMD_CG_STATE_GATE ? true : false);
		soc15_update_hdp_light_sleep(adev,
				state == AMD_CG_STATE_GATE ? true : false);
		soc15_update_drm_clock_gating(adev,
				state == AMD_CG_STATE_GATE ? true : false);
		soc15_update_drm_light_sleep(adev,
				state == AMD_CG_STATE_GATE ? true : false);
		soc15_update_rom_medium_grain_clock_gating(adev,
				state == AMD_CG_STATE_GATE ? true : false);
		adev->df_funcs->update_medium_grain_clock_gating(adev,
				state == AMD_CG_STATE_GATE ? true : false);
		break;
	case CHIP_RAVEN:
		adev->nbio_funcs->update_medium_grain_clock_gating(adev,
				state == AMD_CG_STATE_GATE ? true : false);
		adev->nbio_funcs->update_medium_grain_light_sleep(adev,
				state == AMD_CG_STATE_GATE ? true : false);
		soc15_update_hdp_light_sleep(adev,
				state == AMD_CG_STATE_GATE ? true : false);
		soc15_update_drm_clock_gating(adev,
				state == AMD_CG_STATE_GATE ? true : false);
		soc15_update_drm_light_sleep(adev,
				state == AMD_CG_STATE_GATE ? true : false);
		soc15_update_rom_medium_grain_clock_gating(adev,
				state == AMD_CG_STATE_GATE ? true : false);
		break;
	default:
		break;
	}
	return 0;
}
Exemplo n.º 24
0
static void vi_init_golden_registers(struct amdgpu_device *adev)
{
	/* Some of the registers might be dependent on GRBM_GFX_INDEX */
	mutex_lock(&adev->grbm_idx_mutex);

	if (amdgpu_sriov_vf(adev)) {
		xgpu_vi_init_golden_registers(adev);
		mutex_unlock(&adev->grbm_idx_mutex);
		return;
	}

	switch (adev->asic_type) {
	case CHIP_TOPAZ:
		amdgpu_device_program_register_sequence(adev,
							iceland_mgcg_cgcg_init,
							ARRAY_SIZE(iceland_mgcg_cgcg_init));
		break;
	case CHIP_FIJI:
		amdgpu_device_program_register_sequence(adev,
							fiji_mgcg_cgcg_init,
							ARRAY_SIZE(fiji_mgcg_cgcg_init));
		break;
	case CHIP_TONGA:
		amdgpu_device_program_register_sequence(adev,
							tonga_mgcg_cgcg_init,
							ARRAY_SIZE(tonga_mgcg_cgcg_init));
		break;
	case CHIP_CARRIZO:
		amdgpu_device_program_register_sequence(adev,
							cz_mgcg_cgcg_init,
							ARRAY_SIZE(cz_mgcg_cgcg_init));
		break;
	case CHIP_STONEY:
		amdgpu_device_program_register_sequence(adev,
							stoney_mgcg_cgcg_init,
							ARRAY_SIZE(stoney_mgcg_cgcg_init));
		break;
	case CHIP_POLARIS11:
	case CHIP_POLARIS10:
	case CHIP_POLARIS12:
	default:
		break;
	}
	mutex_unlock(&adev->grbm_idx_mutex);
}
Exemplo n.º 25
0
/**
 * amdgpu_driver_postclose_kms - drm callback for post close
 *
 * @dev: drm dev pointer
 * @file_priv: drm file
 *
 * On device post close, tear down vm on cayman+ (all asics).
 */
void amdgpu_driver_postclose_kms(struct drm_device *dev,
				 struct drm_file *file_priv)
{
	struct amdgpu_device *adev = dev->dev_private;
	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
	struct amdgpu_bo_list *list;
	int handle;

	if (!fpriv)
		return;

	pm_runtime_get_sync(dev->dev);

	amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);

	if (adev->asic_type != CHIP_RAVEN) {
		amdgpu_uvd_free_handles(adev, file_priv);
		amdgpu_vce_free_handles(adev, file_priv);
	}

	amdgpu_vm_bo_rmv(adev, fpriv->prt_va);

	if (amdgpu_sriov_vf(adev)) {
		/* TODO: how to handle reserve failure */
		BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true));
		amdgpu_vm_bo_rmv(adev, fpriv->csa_va);
		fpriv->csa_va = NULL;
		amdgpu_bo_unreserve(adev->virt.csa_obj);
	}

	amdgpu_vm_fini(adev, &fpriv->vm);

	idr_for_each_entry(&fpriv->bo_list_handles, list, handle)
		amdgpu_bo_list_free(list);

	idr_destroy(&fpriv->bo_list_handles);
	mutex_destroy(&fpriv->bo_list_lock);

	kfree(fpriv);
	file_priv->driver_priv = NULL;

	pm_runtime_mark_last_busy(dev->dev);
	pm_runtime_put_autosuspend(dev->dev);
}
Exemplo n.º 26
0
static int psp_np_fw_load(struct psp_context *psp)
{
	int i, ret;
	struct amdgpu_firmware_info *ucode;
	struct amdgpu_device* adev = psp->adev;

	for (i = 0; i < adev->firmware.max_ucodes; i++) {
		ucode = &adev->firmware.ucode[i];
		if (!ucode->fw)
			continue;

		if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
		    psp_smu_reload_quirk(psp))
			continue;
		if (amdgpu_sriov_vf(adev) &&
		   (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA0
		    || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1
		    || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G))
			/*skip ucode loading in SRIOV VF */
			continue;

		ret = psp_prep_cmd_buf(ucode, psp->cmd);
		if (ret)
			return ret;

		ret = psp_cmd_submit_buf(psp, ucode, psp->cmd,
					 psp->fence_buf_mc_addr, i + 3);
		if (ret)
			return ret;

#if 0
		/* check if firmware loaded sucessfully */
		if (!amdgpu_psp_check_fw_loading_status(adev, i))
			return -EINVAL;
#endif
	}

	return 0;
}
Exemplo n.º 27
0
static int amdgpu_pp_hw_init(void *handle)
{
	int ret = 0;
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;

	if (adev->pp_enabled && adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
		amdgpu_ucode_init_bo(adev);

	if (adev->powerplay.ip_funcs->hw_init)
		ret = adev->powerplay.ip_funcs->hw_init(
					adev->powerplay.pp_handle);

	if (ret == PP_DPM_DISABLED) {
		adev->pm.dpm_enabled = false;
		return 0;
	}

	if ((amdgpu_dpm != 0) && !amdgpu_sriov_vf(adev))
		adev->pm.dpm_enabled = true;

	return ret;
}
Exemplo n.º 28
0
/**
 * amdgpu_virt_alloc_mm_table() - alloc memory for mm table
 * @amdgpu:	amdgpu device.
 * MM table is used by UVD and VCE for its initialization
 * Return: Zero if allocate success.
 */
int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev)
{
	int r;

	if (!amdgpu_sriov_vf(adev) || adev->virt.mm_table.gpu_addr)
		return 0;

	r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
				    AMDGPU_GEM_DOMAIN_VRAM,
				    &adev->virt.mm_table.bo,
				    &adev->virt.mm_table.gpu_addr,
				    (void *)&adev->virt.mm_table.cpu_addr);
	if (r) {
		DRM_ERROR("failed to alloc mm table and error = %d.\n", r);
		return r;
	}

	memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE);
	DRM_INFO("MM table gpu addr = 0x%llx, cpu addr = %p.\n",
		 adev->virt.mm_table.gpu_addr,
		 adev->virt.mm_table.cpu_addr);
	return 0;
}
Exemplo n.º 29
0
static int psp_xgmi_unload(struct psp_context *psp)
{
	int ret;
	struct psp_gfx_cmd_resp *cmd;

	/*
	 * TODO: bypass the unloading in sriov for now
	 */
	if (amdgpu_sriov_vf(psp->adev))
		return 0;

	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
	if (!cmd)
		return -ENOMEM;

	psp_prep_xgmi_ta_unload_cmd_buf(cmd, psp->xgmi_context.session_id);

	ret = psp_cmd_submit_buf(psp, NULL, cmd,
				 psp->fence_buf_mc_addr);

	kfree(cmd);

	return ret;
}
Exemplo n.º 30
0
static int psp_load_fw(struct amdgpu_device *adev)
{
	int ret;
	struct psp_context *psp = &adev->psp;

	if (amdgpu_sriov_vf(adev) && adev->in_gpu_reset != 0)
		goto skip_memalloc;

	psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
	if (!psp->cmd)
		return -ENOMEM;

	ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
					AMDGPU_GEM_DOMAIN_GTT,
					&psp->fw_pri_bo,
					&psp->fw_pri_mc_addr,
					&psp->fw_pri_buf);
	if (ret)
		goto failed;

	ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE,
					AMDGPU_GEM_DOMAIN_VRAM,
					&psp->fence_buf_bo,
					&psp->fence_buf_mc_addr,
					&psp->fence_buf);
	if (ret)
		goto failed_mem2;

	ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE,
				      AMDGPU_GEM_DOMAIN_VRAM,
				      &psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
				      (void **)&psp->cmd_buf_mem);
	if (ret)
		goto failed_mem1;

	memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE);

	ret = psp_ring_init(psp, PSP_RING_TYPE__KM);
	if (ret)
		goto failed_mem;

	ret = psp_tmr_init(psp);
	if (ret)
		goto failed_mem;

	ret = psp_asd_init(psp);
	if (ret)
		goto failed_mem;

skip_memalloc:
	ret = psp_hw_start(psp);
	if (ret)
		goto failed_mem;

	ret = psp_np_fw_load(psp);
	if (ret)
		goto failed_mem;

	return 0;

failed_mem:
	amdgpu_bo_free_kernel(&psp->cmd_buf_bo,
			      &psp->cmd_buf_mc_addr,
			      (void **)&psp->cmd_buf_mem);
failed_mem1:
	amdgpu_bo_free_kernel(&psp->fence_buf_bo,
			      &psp->fence_buf_mc_addr, &psp->fence_buf);
failed_mem2:
	amdgpu_bo_free_kernel(&psp->fw_pri_bo,
			      &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
failed:
	kfree(psp->cmd);
	psp->cmd = NULL;
	return ret;
}