예제 #1
0
static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
{
	struct adf_hw_device_data *hw_device = accel_dev->hw_device;
	struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR];
	void __iomem *csr = misc_bar->virt_addr;
	unsigned int val, i;

	/* Enable Accel Engine error detection & correction */
	for (i = 0; i < hw_device->get_num_aes(hw_device); i++) {
		val = ADF_CSR_RD(csr, ADF_DH895XCC_AE_CTX_ENABLES(i));
		val |= ADF_DH895XCC_ENABLE_AE_ECC_ERR;
		ADF_CSR_WR(csr, ADF_DH895XCC_AE_CTX_ENABLES(i), val);
		val = ADF_CSR_RD(csr, ADF_DH895XCC_AE_MISC_CONTROL(i));
		val |= ADF_DH895XCC_ENABLE_AE_ECC_PARITY_CORR;
		ADF_CSR_WR(csr, ADF_DH895XCC_AE_MISC_CONTROL(i), val);
	}

	/* Enable shared memory error detection & correction */
	for (i = 0; i < hw_device->get_num_accels(hw_device); i++) {
		val = ADF_CSR_RD(csr, ADF_DH895XCC_UERRSSMSH(i));
		val |= ADF_DH895XCC_ERRSSMSH_EN;
		ADF_CSR_WR(csr, ADF_DH895XCC_UERRSSMSH(i), val);
		val = ADF_CSR_RD(csr, ADF_DH895XCC_CERRSSMSH(i));
		val |= ADF_DH895XCC_ERRSSMSH_EN;
		ADF_CSR_WR(csr, ADF_DH895XCC_CERRSSMSH(i), val);
	}
}
예제 #2
0
int adf_init_admin_comms(struct adf_accel_dev *accel_dev)
{
	struct adf_admin_comms *admin;
	struct adf_bar *pmisc = &GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR];
	void __iomem *csr = pmisc->virt_addr;
	void __iomem *mailbox = csr + ADF_DH895XCC_MAILBOX_BASE_OFFSET;
	uint64_t reg_val;

	admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL,
			     dev_to_node(&GET_DEV(accel_dev)));
	if (!admin)
		return -ENOMEM;
	admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
					       &admin->phy_addr, GFP_KERNEL);
	if (!admin->virt_addr) {
		dev_err(&GET_DEV(accel_dev), "Failed to allocate dma buff\n");
		kfree(admin);
		return -ENOMEM;
	}
	reg_val = (uint64_t)admin->phy_addr;
	ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGUR_OFFSET, reg_val >> 32);
	ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGLR_OFFSET, reg_val);
	mutex_init(&admin->lock);
	admin->mailbox_addr = mailbox;
	accel_dev->admin = admin;
	return 0;
}
예제 #3
0
파일: qat_qp.c 프로젝트: 0day-ci/dpdk
static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr)
{
	uint32_t arb_csr_offset =  ADF_ARB_RINGSRVARBEN_OFFSET +
					(ADF_ARB_REG_SLOT *
							txq->hw_bundle_number);
	uint32_t value;

	PMD_INIT_FUNC_TRACE();
	value = ADF_CSR_RD(base_addr, arb_csr_offset);
	value ^= (0x01 << txq->hw_queue_number);
	ADF_CSR_WR(base_addr, arb_csr_offset, value);
}
int adf_init_admin_comms(struct adf_accel_dev *accel_dev)
{
	struct adf_admin_comms *admin;
	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
	struct adf_bar *pmisc =
		&GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
	void __iomem *csr = pmisc->virt_addr;
	void __iomem *mailbox = csr + ADF_DH895XCC_MAILBOX_BASE_OFFSET;
	u64 reg_val;

	admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL,
			     dev_to_node(&GET_DEV(accel_dev)));
	if (!admin)
		return -ENOMEM;
	admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
					       &admin->phy_addr, GFP_KERNEL);
	if (!admin->virt_addr) {
		dev_err(&GET_DEV(accel_dev), "Failed to allocate dma buff\n");
		kfree(admin);
		return -ENOMEM;
	}

	admin->const_tbl_addr = dma_map_single(&GET_DEV(accel_dev),
					       (void *) const_tab, 1024,
					       DMA_TO_DEVICE);

	if (unlikely(dma_mapping_error(&GET_DEV(accel_dev),
				       admin->const_tbl_addr))) {
		dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
				  admin->virt_addr, admin->phy_addr);
		kfree(admin);
		return -ENOMEM;
	}
	reg_val = (u64)admin->phy_addr;
	ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGUR_OFFSET, reg_val >> 32);
	ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGLR_OFFSET, reg_val);
	mutex_init(&admin->lock);
	admin->mailbox_addr = mailbox;
	accel_dev->admin = admin;
	return 0;
}
예제 #5
0
파일: qat_hal.c 프로젝트: GavinHwa/linux
static int qat_hal_init_esram(struct icp_qat_fw_loader_handle *handle)
{
	void __iomem *csr_addr = handle->hal_ep_csr_addr_v +
				 ESRAM_AUTO_INIT_CSR_OFFSET;
	unsigned int csr_val, times = 30;

	csr_val = ADF_CSR_RD(csr_addr, 0);
	if ((csr_val & ESRAM_AUTO_TINIT) && (csr_val & ESRAM_AUTO_TINIT_DONE))
		return 0;

	csr_val = ADF_CSR_RD(csr_addr, 0);
	csr_val |= ESRAM_AUTO_TINIT;
	ADF_CSR_WR(csr_addr, 0, csr_val);

	do {
		qat_hal_wait_cycles(handle, 0, ESRAM_AUTO_INIT_USED_CYCLES, 0);
		csr_val = ADF_CSR_RD(csr_addr, 0);
	} while (!(csr_val & ESRAM_AUTO_TINIT_DONE) && times--);
	if ((!times)) {
		pr_err("QAT: Fail to init eSram!\n");
		return -EFAULT;
	}
	return 0;
}
static int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev, u32 ae,
				  void *in, void *out)
{
	struct adf_admin_comms *admin = accel_dev->admin;
	int offset = ae * ADF_ADMINMSG_LEN * 2;
	void __iomem *mailbox = admin->mailbox_addr;
	int mb_offset = ae * ADF_DH895XCC_MAILBOX_STRIDE;
	int times, received;

	mutex_lock(&admin->lock);

	if (ADF_CSR_RD(mailbox, mb_offset) == 1) {
		mutex_unlock(&admin->lock);
		return -EAGAIN;
	}

	memcpy(admin->virt_addr + offset, in, ADF_ADMINMSG_LEN);
	ADF_CSR_WR(mailbox, mb_offset, 1);
	received = 0;
	for (times = 0; times < 50; times++) {
		msleep(20);
		if (ADF_CSR_RD(mailbox, mb_offset) == 0) {
			received = 1;
			break;
		}
	}
	if (received)
		memcpy(out, admin->virt_addr + offset +
		       ADF_ADMINMSG_LEN, ADF_ADMINMSG_LEN);
	else
		dev_err(&GET_DEV(accel_dev),
			"Failed to send admin msg to accelerator\n");

	mutex_unlock(&admin->lock);
	return received ? 0 : -EFAULT;
}
예제 #7
0
파일: adf_vf_isr.c 프로젝트: AK101111/linux
static void adf_pf2vf_bh_handler(void *data)
{
	struct adf_accel_dev *accel_dev = data;
	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
	struct adf_bar *pmisc =
			&GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
	void __iomem *pmisc_bar_addr = pmisc->virt_addr;
	u32 msg;

	/* Read the message from PF */
	msg = ADF_CSR_RD(pmisc_bar_addr, hw_data->get_pf2vf_offset(0));

	if (!(msg & ADF_PF2VF_MSGORIGIN_SYSTEM))
		/* Ignore legacy non-system (non-kernel) PF2VF messages */
		goto err;

	switch ((msg & ADF_PF2VF_MSGTYPE_MASK) >> ADF_PF2VF_MSGTYPE_SHIFT) {
	case ADF_PF2VF_MSGTYPE_RESTARTING: {
		struct adf_vf_stop_data *stop_data;

		dev_dbg(&GET_DEV(accel_dev),
			"Restarting msg received from PF 0x%x\n", msg);

		clear_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);

		stop_data = kzalloc(sizeof(*stop_data), GFP_ATOMIC);
		if (!stop_data) {
			dev_err(&GET_DEV(accel_dev),
				"Couldn't schedule stop for vf_%d\n",
				accel_dev->accel_id);
			return;
		}
		stop_data->accel_dev = accel_dev;
		INIT_WORK(&stop_data->work, adf_dev_stop_async);
		queue_work(adf_vf_stop_wq, &stop_data->work);
		/* To ack, clear the PF2VFINT bit */
		msg &= ~BIT(0);
		ADF_CSR_WR(pmisc_bar_addr, hw_data->get_pf2vf_offset(0), msg);
		return;
	}
	case ADF_PF2VF_MSGTYPE_VERSION_RESP:
		dev_dbg(&GET_DEV(accel_dev),
			"Version resp received from PF 0x%x\n", msg);
		accel_dev->vf.pf_version =
			(msg & ADF_PF2VF_VERSION_RESP_VERS_MASK) >>
			ADF_PF2VF_VERSION_RESP_VERS_SHIFT;
		accel_dev->vf.compatible =
			(msg & ADF_PF2VF_VERSION_RESP_RESULT_MASK) >>
			ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
		complete(&accel_dev->vf.iov_msg_completion);
		break;
	default:
		goto err;
	}

	/* To ack, clear the PF2VFINT bit */
	msg &= ~BIT(0);
	ADF_CSR_WR(pmisc_bar_addr, hw_data->get_pf2vf_offset(0), msg);

	/* Re-enable PF2VF interrupts */
	adf_enable_pf2vf_interrupts(accel_dev);
	return;
err:
	dev_err(&GET_DEV(accel_dev),
		"Unknown message from PF (0x%x); leaving PF2VF ints disabled\n",
		msg);
}