static void adf_enable_error_correction(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_device = accel_dev->hw_device; struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR]; void __iomem *csr = misc_bar->virt_addr; unsigned int val, i; /* Enable Accel Engine error detection & correction */ for (i = 0; i < hw_device->get_num_aes(hw_device); i++) { val = ADF_CSR_RD(csr, ADF_DH895XCC_AE_CTX_ENABLES(i)); val |= ADF_DH895XCC_ENABLE_AE_ECC_ERR; ADF_CSR_WR(csr, ADF_DH895XCC_AE_CTX_ENABLES(i), val); val = ADF_CSR_RD(csr, ADF_DH895XCC_AE_MISC_CONTROL(i)); val |= ADF_DH895XCC_ENABLE_AE_ECC_PARITY_CORR; ADF_CSR_WR(csr, ADF_DH895XCC_AE_MISC_CONTROL(i), val); } /* Enable shared memory error detection & correction */ for (i = 0; i < hw_device->get_num_accels(hw_device); i++) { val = ADF_CSR_RD(csr, ADF_DH895XCC_UERRSSMSH(i)); val |= ADF_DH895XCC_ERRSSMSH_EN; ADF_CSR_WR(csr, ADF_DH895XCC_UERRSSMSH(i), val); val = ADF_CSR_RD(csr, ADF_DH895XCC_CERRSSMSH(i)); val |= ADF_DH895XCC_ERRSSMSH_EN; ADF_CSR_WR(csr, ADF_DH895XCC_CERRSSMSH(i), val); } }
int adf_init_admin_comms(struct adf_accel_dev *accel_dev) { struct adf_admin_comms *admin; struct adf_bar *pmisc = &GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR]; void __iomem *csr = pmisc->virt_addr; void __iomem *mailbox = csr + ADF_DH895XCC_MAILBOX_BASE_OFFSET; uint64_t reg_val; admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev))); if (!admin) return -ENOMEM; admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE, &admin->phy_addr, GFP_KERNEL); if (!admin->virt_addr) { dev_err(&GET_DEV(accel_dev), "Failed to allocate dma buff\n"); kfree(admin); return -ENOMEM; } reg_val = (uint64_t)admin->phy_addr; ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGUR_OFFSET, reg_val >> 32); ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGLR_OFFSET, reg_val); mutex_init(&admin->lock); admin->mailbox_addr = mailbox; accel_dev->admin = admin; return 0; }
int adf_init_admin_comms(struct adf_accel_dev *accel_dev) { struct adf_admin_comms *admin; struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct adf_bar *pmisc = &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)]; void __iomem *csr = pmisc->virt_addr; void __iomem *mailbox = csr + ADF_DH895XCC_MAILBOX_BASE_OFFSET; u64 reg_val; admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev))); if (!admin) return -ENOMEM; admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE, &admin->phy_addr, GFP_KERNEL); if (!admin->virt_addr) { dev_err(&GET_DEV(accel_dev), "Failed to allocate dma buff\n"); kfree(admin); return -ENOMEM; } admin->const_tbl_addr = dma_map_single(&GET_DEV(accel_dev), (void *) const_tab, 1024, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(&GET_DEV(accel_dev), admin->const_tbl_addr))) { dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE, admin->virt_addr, admin->phy_addr); kfree(admin); return -ENOMEM; } reg_val = (u64)admin->phy_addr; ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGUR_OFFSET, reg_val >> 32); ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGLR_OFFSET, reg_val); mutex_init(&admin->lock); admin->mailbox_addr = mailbox; accel_dev->admin = admin; return 0; }
static irqreturn_t adf_isr(int irq, void *privdata) { struct adf_accel_dev *accel_dev = privdata; struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct adf_bar *pmisc = &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)]; void __iomem *pmisc_bar_addr = pmisc->virt_addr; u32 v_int; /* Read VF INT source CSR to determine the source of VF interrupt */ v_int = ADF_CSR_RD(pmisc_bar_addr, ADF_VINTSOU_OFFSET); /* Check for PF2VF interrupt */ if (v_int & ADF_VINTSOU_PF2VF) { /* Disable PF to VF interrupt */ adf_disable_pf2vf_interrupts(accel_dev); /* Schedule tasklet to handle interrupt BH */ tasklet_hi_schedule(&accel_dev->vf.pf2vf_bh_tasklet); return IRQ_HANDLED; } /* Check bundle interrupt */ if (v_int & ADF_VINTSOU_BUN) { struct adf_etr_data *etr_data = accel_dev->transport; struct adf_etr_bank_data *bank = &etr_data->banks[0]; /* Disable Flag and Coalesce Ring Interrupts */ WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number, 0); tasklet_hi_schedule(&bank->resp_handler); return IRQ_HANDLED; } return IRQ_NONE; }
static void adf_pf2vf_bh_handler(void *data) { struct adf_accel_dev *accel_dev = data; struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct adf_bar *pmisc = &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)]; void __iomem *pmisc_bar_addr = pmisc->virt_addr; u32 msg; /* Read the message from PF */ msg = ADF_CSR_RD(pmisc_bar_addr, hw_data->get_pf2vf_offset(0)); if (!(msg & ADF_PF2VF_MSGORIGIN_SYSTEM)) /* Ignore legacy non-system (non-kernel) PF2VF messages */ goto err; switch ((msg & ADF_PF2VF_MSGTYPE_MASK) >> ADF_PF2VF_MSGTYPE_SHIFT) { case ADF_PF2VF_MSGTYPE_RESTARTING: { struct adf_vf_stop_data *stop_data; dev_dbg(&GET_DEV(accel_dev), "Restarting msg received from PF 0x%x\n", msg); clear_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status); stop_data = kzalloc(sizeof(*stop_data), GFP_ATOMIC); if (!stop_data) { dev_err(&GET_DEV(accel_dev), "Couldn't schedule stop for vf_%d\n", accel_dev->accel_id); return; } stop_data->accel_dev = accel_dev; INIT_WORK(&stop_data->work, adf_dev_stop_async); queue_work(adf_vf_stop_wq, &stop_data->work); /* To ack, clear the PF2VFINT bit */ msg &= ~BIT(0); ADF_CSR_WR(pmisc_bar_addr, hw_data->get_pf2vf_offset(0), msg); return; } case ADF_PF2VF_MSGTYPE_VERSION_RESP: dev_dbg(&GET_DEV(accel_dev), "Version resp received from PF 0x%x\n", msg); accel_dev->vf.pf_version = (msg & ADF_PF2VF_VERSION_RESP_VERS_MASK) >> ADF_PF2VF_VERSION_RESP_VERS_SHIFT; accel_dev->vf.compatible = (msg & ADF_PF2VF_VERSION_RESP_RESULT_MASK) >> ADF_PF2VF_VERSION_RESP_RESULT_SHIFT; complete(&accel_dev->vf.iov_msg_completion); break; default: goto err; } /* To ack, clear the PF2VFINT bit */ msg &= ~BIT(0); ADF_CSR_WR(pmisc_bar_addr, hw_data->get_pf2vf_offset(0), msg); /* Re-enable PF2VF interrupts */ adf_enable_pf2vf_interrupts(accel_dev); return; err: dev_err(&GET_DEV(accel_dev), "Unknown message from PF (0x%x); leaving PF2VF ints disabled\n", msg); }