/** * cik_sdma_get_rptr - get the current read pointer * * @rdev: radeon_device pointer * @ring: radeon ring pointer * * Get the current rptr from the hardware (CIK+). */ uint32_t cik_sdma_get_rptr(struct radeon_device *rdev, struct radeon_ring *ring) { u32 rptr, reg; if (rdev->wb.enabled) { rptr = rdev->wb.wb[ring->rptr_offs/4]; } else { if (ring->idx == R600_RING_TYPE_DMA_INDEX) reg = SDMA0_GFX_RB_RPTR + SDMA0_REGISTER_OFFSET; else reg = SDMA0_GFX_RB_RPTR + SDMA1_REGISTER_OFFSET; rptr = RREG32(reg); } return (rptr & 0x3fffc) >> 2; }
static void gmc_v7_0_mc_stop(struct amdgpu_device *adev) { u32 blackout; gmc_v7_0_wait_for_idle((void *)adev); blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL); if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) { /* Block CPU access */ WREG32(mmBIF_FB_EN, 0); /* blackout the MC */ blackout = REG_SET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0); WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1); } /* wait for the MC to settle */ udelay(100); }
static void si_dma_stop(struct amdgpu_device *adev) { struct amdgpu_ring *ring; u32 rb_cntl; unsigned i; for (i = 0; i < adev->sdma.num_instances; i++) { ring = &adev->sdma.instance[i].ring; /* dma0 */ rb_cntl = RREG32(DMA_RB_CNTL + sdma_offsets[i]); rb_cntl &= ~DMA_RB_ENABLE; WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl); if (adev->mman.buffer_funcs_ring == ring) amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); ring->ready = false; } }
static int mgag200_device_init(struct drm_device *dev, uint32_t flags) { struct mga_device *mdev = dev->dev_private; int ret, option; mdev->type = flags; /* Hardcode the number of CRTCs to 1 */ mdev->num_crtc = 1; pci_read_config_dword(dev->pdev, PCI_MGA_OPTION, &option); mdev->has_sdram = !(option & (1 << 14)); /* BAR 0 is the framebuffer, BAR 1 contains registers */ mdev->rmmio_base = pci_resource_start(mdev->dev->pdev, 1); mdev->rmmio_size = pci_resource_len(mdev->dev->pdev, 1); if (!request_mem_region(mdev->rmmio_base, mdev->rmmio_size, "mgadrmfb_mmio")) { DRM_ERROR("can't reserve mmio registers\n"); return -ENOMEM; } mdev->rmmio = pci_iomap(dev->pdev, 1, 0); if (mdev->rmmio == NULL) return -ENOMEM; /* stash G200 SE model number for later use */ if (IS_G200_SE(mdev)) mdev->reg_1e24 = RREG32(0x1e24); ret = mga_vram_init(mdev); if (ret) { release_mem_region(mdev->rmmio_base, mdev->rmmio_size); return ret; } mdev->bpp_shifts[0] = 0; mdev->bpp_shifts[1] = 1; mdev->bpp_shifts[2] = 0; mdev->bpp_shifts[3] = 2; return 0; }
/** * cik_sdma_enable - stop the async dma engines * * @adev: amdgpu_device pointer * @enable: enable/disable the DMA MEs. * * Halt or unhalt the async dma engines (CIK). */ static void cik_sdma_enable(struct amdgpu_device *adev, bool enable) { u32 me_cntl; int i; if (!enable) { cik_sdma_gfx_stop(adev); cik_sdma_rlc_stop(adev); } for (i = 0; i < adev->sdma.num_instances; i++) { me_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]); if (enable) me_cntl &= ~SDMA0_F32_CNTL__HALT_MASK; else me_cntl |= SDMA0_F32_CNTL__HALT_MASK; WREG32(mmSDMA0_F32_CNTL + sdma_offsets[i], me_cntl); } }
void rs690_line_buffer_adjust(struct radeon_device *rdev, struct drm_display_mode *mode1, struct drm_display_mode *mode2) { u32 tmp; /* * Line Buffer Setup * There is a single line buffer shared by both display controllers. * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between * the display controllers. The paritioning can either be done * manually or via one of four preset allocations specified in bits 1:0: * 0 - line buffer is divided in half and shared between crtc * 1 - D1 gets 3/4 of the line buffer, D2 gets 1/4 * 2 - D1 gets the whole buffer * 3 - D1 gets 1/4 of the line buffer, D2 gets 3/4 * Setting bit 2 of DC_LB_MEMORY_SPLIT controls switches to manual * allocation mode. In manual allocation mode, D1 always starts at 0, * D1 end/2 is specified in bits 14:4; D2 allocation follows D1. */ tmp = RREG32(DC_LB_MEMORY_SPLIT) & ~DC_LB_MEMORY_SPLIT_MASK; tmp &= ~DC_LB_MEMORY_SPLIT_SHIFT_MODE; /* auto */ if (mode1 && mode2) { if (mode1->hdisplay > mode2->hdisplay) { if (mode1->hdisplay > 2560) tmp |= DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q; else tmp |= DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; } else if (mode2->hdisplay > mode1->hdisplay) { if (mode2->hdisplay > 2560) tmp |= DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; else tmp |= DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; } else tmp |= AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; } else if (mode1) { tmp |= DC_LB_MEMORY_SPLIT_D1_ONLY; } else if (mode2) { tmp |= DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; } WREG32(DC_LB_MEMORY_SPLIT, tmp); }
void dce4_hdmi_audio_set_dto(struct radeon_device *rdev, struct radeon_crtc *crtc, unsigned int clock) { unsigned int max_ratio = clock / 24000; u32 dto_phase; u32 wallclock_ratio; u32 value; if (max_ratio >= 8) { dto_phase = 192 * 1000; wallclock_ratio = 3; } else if (max_ratio >= 4) { dto_phase = 96 * 1000; wallclock_ratio = 2; } else if (max_ratio >= 2) { dto_phase = 48 * 1000; wallclock_ratio = 1; } else { dto_phase = 24 * 1000; wallclock_ratio = 0; } value = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK; value |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio); value &= ~DCCG_AUDIO_DTO1_USE_512FBR_DTO; WREG32(DCCG_AUDIO_DTO0_CNTL, value); /* Two dtos; generally use dto0 for HDMI */ value = 0; if (crtc) value |= DCCG_AUDIO_DTO0_SOURCE_SEL(crtc->crtc_id); WREG32(DCCG_AUDIO_DTO_SOURCE, value); /* Express [24MHz / target pixel clock] as an exact rational * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator */ WREG32(DCCG_AUDIO_DTO0_PHASE, dto_phase); WREG32(DCCG_AUDIO_DTO0_MODULE, clock); }
/** * cik_sdma_gfx_stop - stop the gfx async dma engines * * @rdev: radeon_device pointer * * Stop the gfx async dma ring buffers (CIK). */ static void cik_sdma_gfx_stop(struct radeon_device *rdev) { u32 rb_cntl, reg_offset; int i; radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); for (i = 0; i < 2; i++) { if (i == 0) reg_offset = SDMA0_REGISTER_OFFSET; else reg_offset = SDMA1_REGISTER_OFFSET; rb_cntl = RREG32(SDMA0_GFX_RB_CNTL + reg_offset); rb_cntl &= ~SDMA_RB_ENABLE; WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl); WREG32(SDMA0_GFX_IB_CNTL + reg_offset, 0); } rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false; rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false; }
/** * cik_sdma_gfx_stop - stop the gfx async dma engines * * @adev: amdgpu_device pointer * * Stop the gfx async dma ring buffers (CIK). */ static void cik_sdma_gfx_stop(struct amdgpu_device *adev) { struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring; struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring; u32 rb_cntl; int i; if ((adev->mman.buffer_funcs_ring == sdma0) || (adev->mman.buffer_funcs_ring == sdma1)) amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); for (i = 0; i < adev->sdma.num_instances; i++) { rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]); rb_cntl &= ~SDMA0_GFX_RB_CNTL__RB_ENABLE_MASK; WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], 0); } sdma0->ready = false; sdma1->ready = false; }
/** * gmc_v8_0_set_fault_enable_default - update VM fault handling * * @adev: amdgpu_device pointer * @value: true redirects VM faults to the default page */ static void gmc_v7_0_set_fault_enable_default(struct amdgpu_device *adev, bool value) { u32 tmp; tmp = RREG32(mmVM_CONTEXT1_CNTL); tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value); tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value); tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_DEFAULT, value); tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value); WREG32(mmVM_CONTEXT1_CNTL, tmp); }
static void vi_detect_hw_virtualization(struct amdgpu_device *adev) { uint32_t reg = 0; if (adev->asic_type == CHIP_TONGA || adev->asic_type == CHIP_FIJI) { reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER); /* bit0: 0 means pf and 1 means vf */ if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, FUNC_IDENTIFIER)) adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF; /* bit31: 0 means disable IOV and 1 means enable */ if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, IOV_ENABLE)) adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV; } if (reg == 0) { if (is_virtual_machine()) /* passthrough mode exclus sr-iov mode */ adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; } }
int rs600_mc_init(struct radeon_device *rdev) { uint32_t tmp; int r; if (r100_debugfs_rbbm_init(rdev)) { DRM_ERROR("Failed to register debugfs file for RBBM !\n"); } rs600_gpu_init(rdev); rs600_gart_disable(rdev); /* Setup GPU memory space */ rdev->mc.vram_location = 0xFFFFFFFFUL; rdev->mc.gtt_location = 0xFFFFFFFFUL; r = radeon_mc_setup(rdev); if (r) { return r; } /* Program GPU memory space */ /* Enable bus master */ tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS; WREG32(RADEON_BUS_CNTL, tmp); /* FIXME: What does AGP means for such chipset ? */ WREG32_MC(RS600_MC_AGP_LOCATION, 0x0FFFFFFF); /* FIXME: are this AGP reg in indirect MC range ? */ WREG32_MC(RS600_MC_AGP_BASE, 0); WREG32_MC(RS600_MC_AGP_BASE_2, 0); rs600_mc_disable_clients(rdev); if (rs600_mc_wait_for_idle(rdev)) { printk(KERN_WARNING "Failed to wait MC idle while " "programming pipes. Bad things might happen.\n"); } tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; tmp = REG_SET(RS600_MC_FB_TOP, tmp >> 16); tmp |= REG_SET(RS600_MC_FB_START, rdev->mc.vram_location >> 16); WREG32_MC(RS600_MC_FB_LOCATION, tmp); WREG32(RS690_HDP_FB_LOCATION, rdev->mc.vram_location >> 16); return 0; }
static void uvd_v5_0_get_clockgating_state(void *handle, u32 *flags) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; int data; mutex_lock(&adev->pm.mutex); if (RREG32_SMC(ixCURRENT_PG_STATUS) & CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) { DRM_INFO("Cannot get clockgating state when UVD is powergated.\n"); goto out; } /* AMD_CG_SUPPORT_UVD_MGCG */ data = RREG32(mmUVD_CGC_CTRL); if (data & UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK) *flags |= AMD_CG_SUPPORT_UVD_MGCG; out: mutex_unlock(&adev->pm.mutex); }
int psp_wait_for(struct psp_context *psp, uint32_t reg_index, uint32_t reg_val, uint32_t mask, bool check_changed) { uint32_t val; int i; struct amdgpu_device *adev = psp->adev; for (i = 0; i < adev->usec_timeout; i++) { val = RREG32(reg_index); if (check_changed) { if (val != reg_val) return 0; } else { if ((val & mask) == reg_val) return 0; } udelay(1); } return -ETIME; }
/** * cik_ih_get_wptr - get the IH ring buffer wptr * * @adev: amdgpu_device pointer * * Get the IH ring buffer wptr from either the register * or the writeback memory buffer (CIK). Also check for * ring buffer overflow and deal with it. * Used by cik_irq_process(). * Returns the value of the wptr. */ static u32 cik_ih_get_wptr(struct amdgpu_device *adev) { u32 wptr, tmp; wptr = le32_to_cpu(adev->wb.wb[adev->irq.ih.wptr_offs]); if (wptr & IH_RB_WPTR__RB_OVERFLOW_MASK) { wptr &= ~IH_RB_WPTR__RB_OVERFLOW_MASK; /* When a ring buffer overflow happen start parsing interrupt * from the last not overwritten vector (wptr + 16). Hopefully * this should allow us to catchup. */ dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", wptr, adev->irq.ih.rptr, (wptr + 16) & adev->irq.ih.ptr_mask); adev->irq.ih.rptr = (wptr + 16) & adev->irq.ih.ptr_mask; tmp = RREG32(mmIH_RB_CNTL); tmp |= IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK; WREG32(mmIH_RB_CNTL, tmp); } return (wptr & adev->irq.ih.ptr_mask); }
static void vi_gpu_pci_config_reset(struct amdgpu_device *adev) { u32 i; dev_info(adev->dev, "GPU pci config reset\n"); /* disable BM */ pci_clear_master(adev->pdev); /* reset */ amdgpu_pci_config_reset(adev); udelay(100); /* wait for asic to come out of reset */ for (i = 0; i < adev->usec_timeout; i++) { if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) break; udelay(1); } }
void r600_pcie_gart_tlb_flush(struct radeon_device *rdev) { unsigned i; u32 tmp; WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12); WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12); WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1)); for (i = 0; i < rdev->usec_timeout; i++) { tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE); tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT; if (tmp == 2) { printk(KERN_WARNING "[drm] r600 flush TLB failed\n"); return; } if (tmp) { return; } udelay(1); } }
/** * gmc_v7_0_set_prt - set PRT VM fault * * @adev: amdgpu_device pointer * @enable: enable/disable VM fault handling for PRT */ static void gmc_v7_0_set_prt(struct amdgpu_device *adev, bool enable) { uint32_t tmp; if (enable && !adev->mc.prt_warning) { dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n"); adev->mc.prt_warning = true; } tmp = RREG32(mmVM_PRT_CNTL); tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL, CB_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable); tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL, CB_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable); tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL, TC_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable); tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL, TC_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable); tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL, L2_CACHE_STORE_INVALID_ENTRIES, enable); tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL, L1_TLB_STORE_INVALID_ENTRIES, enable); tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL, MASK_PDE0_FAULT, enable); WREG32(mmVM_PRT_CNTL, tmp); if (enable) { uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT; uint32_t high = adev->vm_manager.max_pfn; WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low); WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low); WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, low); WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, low); WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, high); WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, high); WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, high); WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, high); } else {
/** * cik_sdma_enable - stop the async dma engines * * @rdev: radeon_device pointer * @enable: enable/disable the DMA MEs. * * Halt or unhalt the async dma engines (CIK). */ void cik_sdma_enable(struct radeon_device *rdev, bool enable) { u32 me_cntl, reg_offset; int i; if (enable == false) { cik_sdma_gfx_stop(rdev); cik_sdma_rlc_stop(rdev); } for (i = 0; i < 2; i++) { if (i == 0) reg_offset = SDMA0_REGISTER_OFFSET; else reg_offset = SDMA1_REGISTER_OFFSET; me_cntl = RREG32(SDMA0_ME_CNTL + reg_offset); if (enable) me_cntl &= ~SDMA_HALT; else me_cntl |= SDMA_HALT; WREG32(SDMA0_ME_CNTL + reg_offset, me_cntl); } }
static int uvd_v6_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; bool enable = (state == AMD_CG_STATE_GATE) ? true : false; if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) return 0; if (enable) { if (adev->flags & AMD_IS_APU) cz_set_uvd_clock_gating_branches(adev, enable); else tonga_set_uvd_clock_gating_branches(adev, enable); uvd_v6_0_set_uvd_dynamic_clock_mode(adev, true); } else { uint32_t data = RREG32(mmUVD_CGC_CTRL); data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; WREG32(mmUVD_CGC_CTRL, data); } return 0; }
static void legacy_crtc_load_lut(struct drm_crtc *crtc) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; int i; uint32_t dac2_cntl; dac2_cntl = RREG32(RADEON_DAC_CNTL2); if (radeon_crtc->crtc_id == 0) dac2_cntl &= (uint32_t)~RADEON_DAC2_PALETTE_ACC_CTL; else dac2_cntl |= RADEON_DAC2_PALETTE_ACC_CTL; WREG32(RADEON_DAC_CNTL2, dac2_cntl); WREG8(RADEON_PALETTE_INDEX, 0); for (i = 0; i < 256; i++) { WREG32(RADEON_PALETTE_30_DATA, (radeon_crtc->lut_r[i] << 20) | (radeon_crtc->lut_g[i] << 10) | (radeon_crtc->lut_b[i] << 0)); } }
/** * vega10_ih_irq_init - init and enable the interrupt ring * * @adev: amdgpu_device pointer * * Allocate a ring buffer for the interrupt controller, * enable the RLC, disable interrupts, enable the IH * ring buffer and enable it (VI). * Called at device load and reume. * Returns 0 for success, errors for failure. */ static int vega10_ih_irq_init(struct amdgpu_device *adev) { int ret = 0; int rb_bufsz; u32 ih_rb_cntl, ih_doorbell_rtpr; u32 tmp; u64 wptr_off; /* disable irqs */ vega10_ih_disable_interrupts(adev); if (adev->flags & AMD_IS_APU) nbio_v7_0_ih_control(adev); else nbio_v6_1_ih_control(adev); ih_rb_cntl = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL)); /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/ if (adev->irq.ih.use_bus_addr) { WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE), adev->irq.ih.rb_dma_addr >> 8); WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI), ((u64)adev->irq.ih.rb_dma_addr >> 40) & 0xff); ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_SPACE, 1); } else {
void soc15_program_register_sequence(struct amdgpu_device *adev, const struct soc15_reg_golden *regs, const u32 array_size) { const struct soc15_reg_golden *entry; u32 tmp, reg; int i; for (i = 0; i < array_size; ++i) { entry = ®s[i]; reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg; if (entry->and_mask == 0xffffffff) { tmp = entry->or_mask; } else { tmp = RREG32(reg); tmp &= ~(entry->and_mask); tmp |= entry->or_mask; } WREG32(reg, tmp); } }
static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src]; uint32_t status = 0; u64 addr; addr = (u64)entry->src_data[0] << 12; addr |= ((u64)entry->src_data[1] & 0xf) << 44; if (!amdgpu_sriov_vf(adev)) { status = RREG32(hub->vm_l2_pro_fault_status); WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1); } if (printk_ratelimit()) { struct amdgpu_task_info task_info = { 0 }; amdgpu_vm_get_task_info(adev, entry->pasid, &task_info); dev_err(adev->dev, "[%s] VMC page fault (src_id:%u ring:%u vmid:%u pasid:%u, for process %s pid %d thread %s pid %d\n)\n", entry->vmid_src ? "mmhub" : "gfxhub", entry->src_id, entry->ring_id, entry->vmid, entry->pasid, task_info.process_name, task_info.tgid, task_info.task_name, task_info.pid); dev_err(adev->dev, " at address 0x%016llx from %d\n", addr, entry->client_id); if (!amdgpu_sriov_vf(adev)) dev_err(adev->dev, "VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n", status); } return 0; }
static int cik_sdma_soft_reset(void *handle) { u32 srbm_soft_reset = 0; struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 tmp = RREG32(mmSRBM_STATUS2); if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) { /* sdma0 */ tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET); tmp |= SDMA0_F32_CNTL__HALT_MASK; WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp); srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK; } if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) { /* sdma1 */ tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET); tmp |= SDMA0_F32_CNTL__HALT_MASK; WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp); srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK; } if (srbm_soft_reset) { cik_sdma_print_status((void *)adev); tmp = RREG32(mmSRBM_SOFT_RESET); tmp |= srbm_soft_reset; dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); WREG32(mmSRBM_SOFT_RESET, tmp); tmp = RREG32(mmSRBM_SOFT_RESET); udelay(50); tmp &= ~srbm_soft_reset; WREG32(mmSRBM_SOFT_RESET, tmp); tmp = RREG32(mmSRBM_SOFT_RESET); /* Wait a little for things to settle down */ udelay(50); cik_sdma_print_status((void *)adev); } return 0; }
static int radeon_debugfs_ring_info(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; struct radeon_device *rdev = dev->dev_private; int ridx = *(int*)node->info_ent->data; struct radeon_ring *ring = &rdev->ring[ridx]; unsigned count, i, j; u32 tmp; radeon_ring_free_size(rdev, ring); count = (ring->ring_size / 4) - ring->ring_free_dw; tmp = radeon_ring_get_wptr(rdev, ring); seq_printf(m, "wptr(0x%04x): 0x%08x [%5d]\n", ring->wptr_reg, tmp, tmp); tmp = radeon_ring_get_rptr(rdev, ring); seq_printf(m, "rptr(0x%04x): 0x%08x [%5d]\n", ring->rptr_reg, tmp, tmp); if (ring->rptr_save_reg) { seq_printf(m, "rptr next(0x%04x): 0x%08x\n", ring->rptr_save_reg, RREG32(ring->rptr_save_reg)); } seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n", ring->wptr, ring->wptr); seq_printf(m, "driver's copy of the rptr: 0x%08x [%5d]\n", ring->rptr, ring->rptr); seq_printf(m, "last semaphore signal addr : 0x%016llx\n", ring->last_semaphore_signal_addr); seq_printf(m, "last semaphore wait addr : 0x%016llx\n", ring->last_semaphore_wait_addr); seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw); seq_printf(m, "%u dwords in ring\n", count); /* print 8 dw before current rptr as often it's the last executed * packet that is the root issue */ i = (ring->rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask; for (j = 0; j <= (count + 32); j++) { seq_printf(m, "r[%5d]=0x%08x\n", i, ring->ring[i]); i = (i + 1) & ring->ptr_mask; } return 0; }
void dce4_hdmi_set_color_depth(struct drm_encoder *encoder, u32 offset, int bpc) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); uint32_t val; val = RREG32(HDMI_CONTROL + offset); val &= ~HDMI_DEEP_COLOR_ENABLE; val &= ~HDMI_DEEP_COLOR_DEPTH_MASK; switch (bpc) { case 0: case 6: case 8: case 16: default: DRM_DEBUG("%s: Disabling hdmi deep color for %d bpc.\n", connector->name, bpc); break; case 10: val |= HDMI_DEEP_COLOR_ENABLE; val |= HDMI_DEEP_COLOR_DEPTH(HDMI_30BIT_DEEP_COLOR); DRM_DEBUG("%s: Enabling hdmi deep color 30 for 10 bpc.\n", connector->name); break; case 12: val |= HDMI_DEEP_COLOR_ENABLE; val |= HDMI_DEEP_COLOR_DEPTH(HDMI_36BIT_DEEP_COLOR); DRM_DEBUG("%s: Enabling hdmi deep color 36 for 12 bpc.\n", connector->name); break; } WREG32(HDMI_CONTROL + offset, val); }
static bool uvd_v6_0_is_idle(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK); }
/** * uvd_v6_0_ring_get_wptr - get write pointer * * @ring: amdgpu_ring pointer * * Returns the current hardware write pointer */ static uint32_t uvd_v6_0_ring_get_wptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; return RREG32(mmUVD_RBC_RB_WPTR); }
/** * uvd_v6_0_start - start UVD block * * @adev: amdgpu_device pointer * * Setup and start the UVD block */ static int uvd_v6_0_start(struct amdgpu_device *adev) { struct amdgpu_ring *ring = &adev->uvd.ring; uint32_t rb_bufsz, tmp; uint32_t lmi_swap_cntl; uint32_t mp_swap_cntl; int i, j, r; /* disable DPG */ WREG32_P(mmUVD_POWER_STATUS, 0, ~UVD_POWER_STATUS__UVD_PG_MODE_MASK); /* disable byte swapping */ lmi_swap_cntl = 0; mp_swap_cntl = 0; uvd_v6_0_mc_resume(adev); /* disable clock gating */ WREG32_FIELD(UVD_CGC_CTRL, DYN_CLOCK_MODE, 0); /* disable interupt */ WREG32_FIELD(UVD_MASTINT_EN, VCPU_EN, 0); /* stall UMC and register bus before resetting VCPU */ WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 1); mdelay(1); /* put LMI, VCPU, RBC etc... into reset */ WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK | UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK | UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK | UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK | UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK); mdelay(5); /* take UVD block out of reset */ WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_UVD, 0); mdelay(5); /* initialize UVD memory controller */ WREG32(mmUVD_LMI_CTRL, (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK | UVD_LMI_CTRL__REQ_MODE_MASK | UVD_LMI_CTRL__DISABLE_ON_FWV_FAIL_MASK); #ifdef __BIG_ENDIAN /* swap (8 in 32) RB and IB */ lmi_swap_cntl = 0xa; mp_swap_cntl = 0; #endif WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl); WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl); WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040); WREG32(mmUVD_MPC_SET_MUXA1, 0x0); WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040); WREG32(mmUVD_MPC_SET_MUXB1, 0x0); WREG32(mmUVD_MPC_SET_ALU, 0); WREG32(mmUVD_MPC_SET_MUX, 0x88); /* take all subblocks out of reset, except VCPU */ WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); mdelay(5); /* enable VCPU clock */ WREG32(mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK); /* enable UMC */ WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 0); /* boot up the VCPU */ WREG32(mmUVD_SOFT_RESET, 0); mdelay(10); for (i = 0; i < 10; ++i) { uint32_t status; for (j = 0; j < 100; ++j) { status = RREG32(mmUVD_STATUS); if (status & 2) break; mdelay(10); } r = 0; if (status & 2) break; DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n"); WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 1); mdelay(10); WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 0); mdelay(10); r = -1; } if (r) { DRM_ERROR("UVD not responding, giving up!!!\n"); return r; } /* enable master interrupt */ WREG32_P(mmUVD_MASTINT_EN, (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK), ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK)); /* clear the bit 4 of UVD_STATUS */ WREG32_P(mmUVD_STATUS, 0, ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT)); /* force RBC into idle state */ rb_bufsz = order_base_2(ring->ring_size); tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz); tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0); tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); WREG32(mmUVD_RBC_RB_CNTL, tmp); /* set the write pointer delay */ WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0); /* set the wb address */ WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2)); /* programm the RB_BASE for ring buffer */ WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, lower_32_bits(ring->gpu_addr)); WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH, upper_32_bits(ring->gpu_addr)); /* Initialize the ring buffer's read and write pointers */ WREG32(mmUVD_RBC_RB_RPTR, 0); ring->wptr = RREG32(mmUVD_RBC_RB_RPTR); WREG32(mmUVD_RBC_RB_WPTR, ring->wptr); WREG32_FIELD(UVD_RBC_RB_CNTL, RB_NO_FETCH, 0); return 0; }