static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id, uint32_t queue_id) { uint32_t mec = (++pipe_id / VI_PIPE_PER_MEC) + 1; uint32_t pipe = (pipe_id % VI_PIPE_PER_MEC); lock_srbm(kgd, mec, pipe, queue_id, 0); }
static void set_scratch_backing_va(struct kgd_dev *kgd, uint64_t va, uint32_t vmid) { struct amdgpu_device *adev = (struct amdgpu_device *) kgd; lock_srbm(kgd, 0, 0, 0, vmid); WREG32(mmSH_HIDDEN_PRIVATE_BASE_VMID, va); unlock_srbm(kgd); }
static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id, uint32_t queue_id) { struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); lock_srbm(kgd, mec, pipe, queue_id, 0); }
static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid, uint32_t sh_mem_config, uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases) { struct amdgpu_device *adev = get_amdgpu_device(kgd); lock_srbm(kgd, 0, 0, 0, vmid); WREG32(mmSH_MEM_CONFIG, sh_mem_config); WREG32(mmSH_MEM_APE1_BASE, sh_mem_ape1_base); WREG32(mmSH_MEM_APE1_LIMIT, sh_mem_ape1_limit); WREG32(mmSH_MEM_BASES, sh_mem_bases); unlock_srbm(kgd); }
static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id) { struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t mec; uint32_t pipe; mec = (++pipe_id / VI_PIPE_PER_MEC) + 1; pipe = (pipe_id % VI_PIPE_PER_MEC); lock_srbm(kgd, mec, pipe, 0, 0); WREG32(mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK); unlock_srbm(kgd); return 0; }
static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id) { struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t mec; uint32_t pipe; mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); lock_srbm(kgd, mec, pipe, 0, 0); WREG32(mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK | CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK); unlock_srbm(kgd); return 0; }