static void xgpu_ai_mailbox_send_ack(struct amdgpu_device *adev) { u32 reg; int timeout = AI_MAILBOX_TIMEDOUT; u32 mask = REG_FIELD_MASK(BIF_BX_PF0_MAILBOX_CONTROL, RCV_MSG_VALID); reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_CONTROL)); reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_CONTROL, RCV_MSG_ACK, 1); WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_CONTROL), reg); /*Wait for RCV_MSG_VALID to be 0*/ reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_CONTROL)); while (reg & mask) { if (timeout <= 0) { pr_err("RCV_MSG_VALID is not cleared\n"); break; } mdelay(1); timeout -=1; reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_CONTROL)); } }
static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev, enum idh_request req, u32 data1, u32 data2, u32 data3) { u32 reg; int r; reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0)); reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0, MSGBUF_DATA, req); WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0), reg); WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW1), data1); WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW2), data2); WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW3), data3); xgpu_ai_mailbox_set_valid(adev, true); /* start to poll ack */ r = xgpu_ai_poll_ack(adev); if (r) pr_err("Doesn't get ack from pf, continue\n"); xgpu_ai_mailbox_set_valid(adev, false); }
bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev) { /* By now all MMIO pages except mailbox are blocked */ /* if blocking is enabled in hypervisor. Choose the */ /* SCRATCH_REG0 to test. */ return RREG32_NO_KIQ(0xc040) == 0xffffffff; }
static int xgpu_ai_send_access_requests(struct amdgpu_device *adev, enum idh_request req) { int r; xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0); /* start to check msg if request is idh_req_gpu_init_access */ if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_FINI_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) { r = xgpu_ai_poll_msg(adev, IDH_READY_TO_ACCESS_GPU); if (r) { pr_err("Doesn't get READY_TO_ACCESS_GPU from pf, give up\n"); return r; } /* Retrieve checksum from mailbox2 */ if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) { adev->virt.fw_reserve.checksum_key = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2)); } } return 0; }
static void xgpu_vi_mailbox_set_valid(struct amdgpu_device *adev, bool val) { u32 reg; reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL); reg = REG_SET_FIELD(reg, MAILBOX_CONTROL, TRN_MSG_VALID, val ? 1 : 0); WREG32_NO_KIQ(mmMAILBOX_CONTROL, reg); }
/** * gmc_v9_0_flush_gpu_tlb - gart tlb flush callback * * @adev: amdgpu_device pointer * @vmid: vm instance to flush * * Flush the TLB for the requested page table. */ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid) { /* Use register 17 for GART */ const unsigned eng = 17; unsigned i, j; spin_lock(&adev->gmc.invalidate_lock); for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { struct amdgpu_vmhub *hub = &adev->vmhub[i]; u32 tmp = gmc_v9_0_get_invalidate_req(vmid); WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp); /* Busy wait for ACK.*/ for (j = 0; j < 100; j++) { tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng); tmp &= 1 << vmid; if (tmp) break; cpu_relax(); } if (j < 100) continue; /* Wait for ACK with a delay.*/ for (j = 0; j < adev->usec_timeout; j++) { tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng); tmp &= 1 << vmid; if (tmp) break; udelay(1); } if (j < adev->usec_timeout) continue; DRM_ERROR("Timeout waiting for VM flush ACK!\n"); } spin_unlock(&adev->gmc.invalidate_lock); }
static void xgpu_ai_mailbox_set_valid(struct amdgpu_device *adev, bool val) { u32 reg; reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_CONTROL)); reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_CONTROL, TRN_MSG_VALID, val ? 1 : 0); WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_CONTROL), reg); }
static int xgpu_vi_poll_ack(struct amdgpu_device *adev) { int r = 0, timeout = VI_MAILBOX_TIMEDOUT; u32 mask = REG_FIELD_MASK(MAILBOX_CONTROL, TRN_MSG_ACK); u32 reg; reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL); while (!(reg & mask)) { if (timeout <= 0) { pr_err("Doesn't get ack from pf.\n"); r = -ETIME; break; } mdelay(5); timeout -= 5; reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL); } return r; }
static void xgpu_vi_mailbox_trans_msg(struct amdgpu_device *adev, enum idh_request req) { u32 reg; reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW0); reg = REG_SET_FIELD(reg, MAILBOX_MSGBUF_TRN_DW0, MSGBUF_DATA, req); WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW0, reg); xgpu_vi_mailbox_set_valid(adev, true); }
static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned type, enum amdgpu_interrupt_state state) { u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL)); tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, VALID_INT_EN, (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0); WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp); return 0; }
static int xgpu_vi_set_mailbox_rcv_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned type, enum amdgpu_interrupt_state state) { u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL); tmp = REG_SET_FIELD(tmp, MAILBOX_INT_CNTL, VALID_INT_EN, (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0); WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp); return 0; }
static int xgpu_vi_mailbox_rcv_msg(struct amdgpu_device *adev, enum idh_event event) { u32 reg; u32 mask = REG_FIELD_MASK(MAILBOX_CONTROL, RCV_MSG_VALID); /* workaround: host driver doesn't set VALID for CMPL now */ if (event != IDH_FLR_NOTIFICATION_CMPL) { reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL); if (!(reg & mask)) return -ENOENT; } reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0); if (reg != event) return -ENOENT; /* send ack to PF */ xgpu_vi_mailbox_send_ack(adev); return 0; }
static int xgpu_ai_mailbox_rcv_msg(struct amdgpu_device *adev, enum idh_event event) { u32 reg; u32 mask = REG_FIELD_MASK(BIF_BX_PF0_MAILBOX_CONTROL, RCV_MSG_VALID); if (event != IDH_FLR_NOTIFICATION_CMPL) { reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_CONTROL)); if (!(reg & mask)) return -ENOENT; } reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0)); if (reg != event) return -ENOENT; xgpu_ai_mailbox_send_ack(adev); return 0; }
static void xgpu_ai_mailbox_trans_msg(struct amdgpu_device *adev, enum idh_request req) { u32 reg; reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0)); reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0, MSGBUF_DATA, req); WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0), reg); xgpu_ai_mailbox_set_valid(adev, true); }
static int xgpu_ai_mailbox_rcv_msg(struct amdgpu_device *adev, enum idh_event event) { u32 reg; reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0)); if (reg != event) return -ENOENT; xgpu_ai_mailbox_send_ack(adev); return 0; }
static int xgpu_ai_poll_ack(struct amdgpu_device *adev) { int r = 0, timeout = AI_MAILBOX_TIMEDOUT; u32 mask = REG_FIELD_MASK(BIF_BX_PF0_MAILBOX_CONTROL, TRN_MSG_ACK); u32 reg; reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_CONTROL)); while (!(reg & mask)) { if (timeout <= 0) { pr_err("Doesn't get ack from pf.\n"); r = -ETIME; break; } msleep(1); timeout -= 1; reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_CONTROL)); } return r; }
static int xgpu_ai_get_pp_clk(struct amdgpu_device *adev, u32 type, char *buf) { int r = 0; u32 req, val, size; if (!amdgim_is_hwperf(adev) || buf == NULL) return -EBADRQC; switch(type) { case PP_SCLK: req = IDH_IRQ_GET_PP_SCLK; break; case PP_MCLK: req = IDH_IRQ_GET_PP_MCLK; break; default: return -EBADRQC; } mutex_lock(&adev->virt.dpm_mutex); xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0); r = xgpu_ai_poll_msg(adev, IDH_SUCCESS); if (!r && adev->fw_vram_usage.va != NULL) { val = RREG32_NO_KIQ( SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW1)); size = strnlen((((char *)adev->virt.fw_reserve.p_pf2vf) + val), PAGE_SIZE); if (size < PAGE_SIZE) strcpy(buf,((char *)adev->virt.fw_reserve.p_pf2vf + val)); else size = 0; r = size; goto out; } r = xgpu_ai_poll_msg(adev, IDH_FAIL); if(r) pr_info("%s DPM request failed", (type == PP_SCLK)? "SCLK" : "MCLK"); out: mutex_unlock(&adev->virt.dpm_mutex); return r; }
static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev, enum idh_request req, u32 data1, u32 data2, u32 data3) { u32 reg; int r; uint8_t trn; /* IMPORTANT: * clear TRN_MSG_VALID valid to clear host's RCV_MSG_ACK * and with host's RCV_MSG_ACK cleared hw automatically clear host's RCV_MSG_ACK * which lead to VF's TRN_MSG_ACK cleared, otherwise below xgpu_ai_poll_ack() * will return immediatly */ do { xgpu_ai_mailbox_set_valid(adev, false); trn = xgpu_ai_peek_ack(adev); if (trn) { pr_err("trn=%x ACK should not assert! wait again !\n", trn); msleep(1); } } while(trn); reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0)); reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0, MSGBUF_DATA, req); WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0), reg); WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW1), data1); WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW2), data2); WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW3), data3); xgpu_ai_mailbox_set_valid(adev, true); /* start to poll ack */ r = xgpu_ai_poll_ack(adev); if (r) pr_err("Doesn't get ack from pf, continue\n"); xgpu_ai_mailbox_set_valid(adev, false); }
/* * this peek_msg could *only* be called in IRQ routine becuase in IRQ routine * RCV_MSG_VALID filed of BIF_BX_PF0_MAILBOX_CONTROL must already be set to 1 * by host. * * if called no in IRQ routine, this peek_msg cannot guaranteed to return the * correct value since it doesn't return the RCV_DW0 under the case that * RCV_MSG_VALID is set by host. */ static enum idh_event xgpu_ai_mailbox_peek_msg(struct amdgpu_device *adev) { return RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0)); }