static void adf_enable_error_correction(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_device = accel_dev->hw_device; struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR]; void __iomem *csr = misc_bar->virt_addr; unsigned int val, i; /* Enable Accel Engine error detection & correction */ for (i = 0; i < hw_device->get_num_aes(hw_device); i++) { val = ADF_CSR_RD(csr, ADF_DH895XCC_AE_CTX_ENABLES(i)); val |= ADF_DH895XCC_ENABLE_AE_ECC_ERR; ADF_CSR_WR(csr, ADF_DH895XCC_AE_CTX_ENABLES(i), val); val = ADF_CSR_RD(csr, ADF_DH895XCC_AE_MISC_CONTROL(i)); val |= ADF_DH895XCC_ENABLE_AE_ECC_PARITY_CORR; ADF_CSR_WR(csr, ADF_DH895XCC_AE_MISC_CONTROL(i), val); } /* Enable shared memory error detection & correction */ for (i = 0; i < hw_device->get_num_accels(hw_device); i++) { val = ADF_CSR_RD(csr, ADF_DH895XCC_UERRSSMSH(i)); val |= ADF_DH895XCC_ERRSSMSH_EN; ADF_CSR_WR(csr, ADF_DH895XCC_UERRSSMSH(i), val); val = ADF_CSR_RD(csr, ADF_DH895XCC_CERRSSMSH(i)); val |= ADF_DH895XCC_ERRSSMSH_EN; ADF_CSR_WR(csr, ADF_DH895XCC_CERRSSMSH(i), val); } }
static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr) { uint32_t arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET + (ADF_ARB_REG_SLOT * txq->hw_bundle_number); uint32_t value; PMD_INIT_FUNC_TRACE(); value = ADF_CSR_RD(base_addr, arb_csr_offset); value ^= (0x01 << txq->hw_queue_number); ADF_CSR_WR(base_addr, arb_csr_offset, value); }
static int qat_hal_init_esram(struct icp_qat_fw_loader_handle *handle) { void __iomem *csr_addr = handle->hal_ep_csr_addr_v + ESRAM_AUTO_INIT_CSR_OFFSET; unsigned int csr_val, times = 30; csr_val = ADF_CSR_RD(csr_addr, 0); if ((csr_val & ESRAM_AUTO_TINIT) && (csr_val & ESRAM_AUTO_TINIT_DONE)) return 0; csr_val = ADF_CSR_RD(csr_addr, 0); csr_val |= ESRAM_AUTO_TINIT; ADF_CSR_WR(csr_addr, 0, csr_val); do { qat_hal_wait_cycles(handle, 0, ESRAM_AUTO_INIT_USED_CYCLES, 0); csr_val = ADF_CSR_RD(csr_addr, 0); } while (!(csr_val & ESRAM_AUTO_TINIT_DONE) && times--); if ((!times)) { pr_err("QAT: Fail to init eSram!\n"); return -EFAULT; } return 0; }
static int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev, u32 ae, void *in, void *out) { struct adf_admin_comms *admin = accel_dev->admin; int offset = ae * ADF_ADMINMSG_LEN * 2; void __iomem *mailbox = admin->mailbox_addr; int mb_offset = ae * ADF_DH895XCC_MAILBOX_STRIDE; int times, received; mutex_lock(&admin->lock); if (ADF_CSR_RD(mailbox, mb_offset) == 1) { mutex_unlock(&admin->lock); return -EAGAIN; } memcpy(admin->virt_addr + offset, in, ADF_ADMINMSG_LEN); ADF_CSR_WR(mailbox, mb_offset, 1); received = 0; for (times = 0; times < 50; times++) { msleep(20); if (ADF_CSR_RD(mailbox, mb_offset) == 0) { received = 1; break; } } if (received) memcpy(out, admin->virt_addr + offset + ADF_ADMINMSG_LEN, ADF_ADMINMSG_LEN); else dev_err(&GET_DEV(accel_dev), "Failed to send admin msg to accelerator\n"); mutex_unlock(&admin->lock); return received ? 0 : -EFAULT; }
static irqreturn_t adf_isr(int irq, void *privdata) { struct adf_accel_dev *accel_dev = privdata; struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct adf_bar *pmisc = &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)]; void __iomem *pmisc_bar_addr = pmisc->virt_addr; u32 v_int; /* Read VF INT source CSR to determine the source of VF interrupt */ v_int = ADF_CSR_RD(pmisc_bar_addr, ADF_VINTSOU_OFFSET); /* Check for PF2VF interrupt */ if (v_int & ADF_VINTSOU_PF2VF) { /* Disable PF to VF interrupt */ adf_disable_pf2vf_interrupts(accel_dev); /* Schedule tasklet to handle interrupt BH */ tasklet_hi_schedule(&accel_dev->vf.pf2vf_bh_tasklet); return IRQ_HANDLED; } /* Check bundle interrupt */ if (v_int & ADF_VINTSOU_BUN) { struct adf_etr_data *etr_data = accel_dev->transport; struct adf_etr_bank_data *bank = &etr_data->banks[0]; /* Disable Flag and Coalesce Ring Interrupts */ WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number, 0); tasklet_hi_schedule(&bank->resp_handler); return IRQ_HANDLED; } return IRQ_NONE; }
static void adf_pf2vf_bh_handler(void *data) { struct adf_accel_dev *accel_dev = data; struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct adf_bar *pmisc = &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)]; void __iomem *pmisc_bar_addr = pmisc->virt_addr; u32 msg; /* Read the message from PF */ msg = ADF_CSR_RD(pmisc_bar_addr, hw_data->get_pf2vf_offset(0)); if (!(msg & ADF_PF2VF_MSGORIGIN_SYSTEM)) /* Ignore legacy non-system (non-kernel) PF2VF messages */ goto err; switch ((msg & ADF_PF2VF_MSGTYPE_MASK) >> ADF_PF2VF_MSGTYPE_SHIFT) { case ADF_PF2VF_MSGTYPE_RESTARTING: { struct adf_vf_stop_data *stop_data; dev_dbg(&GET_DEV(accel_dev), "Restarting msg received from PF 0x%x\n", msg); clear_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status); stop_data = kzalloc(sizeof(*stop_data), GFP_ATOMIC); if (!stop_data) { dev_err(&GET_DEV(accel_dev), "Couldn't schedule stop for vf_%d\n", accel_dev->accel_id); return; } stop_data->accel_dev = accel_dev; INIT_WORK(&stop_data->work, adf_dev_stop_async); queue_work(adf_vf_stop_wq, &stop_data->work); /* To ack, clear the PF2VFINT bit */ msg &= ~BIT(0); ADF_CSR_WR(pmisc_bar_addr, hw_data->get_pf2vf_offset(0), msg); return; } case ADF_PF2VF_MSGTYPE_VERSION_RESP: dev_dbg(&GET_DEV(accel_dev), "Version resp received from PF 0x%x\n", msg); accel_dev->vf.pf_version = (msg & ADF_PF2VF_VERSION_RESP_VERS_MASK) >> ADF_PF2VF_VERSION_RESP_VERS_SHIFT; accel_dev->vf.compatible = (msg & ADF_PF2VF_VERSION_RESP_RESULT_MASK) >> ADF_PF2VF_VERSION_RESP_RESULT_SHIFT; complete(&accel_dev->vf.iov_msg_completion); break; default: goto err; } /* To ack, clear the PF2VFINT bit */ msg &= ~BIT(0); ADF_CSR_WR(pmisc_bar_addr, hw_data->get_pf2vf_offset(0), msg); /* Re-enable PF2VF interrupts */ adf_enable_pf2vf_interrupts(accel_dev); return; err: dev_err(&GET_DEV(accel_dev), "Unknown message from PF (0x%x); leaving PF2VF ints disabled\n", msg); }