示例#1
0
static int
lio_cn23xx_pf_soft_reset(struct octeon_device *oct)
{

	lio_write_csr64(oct, LIO_CN23XX_SLI_WIN_WR_MASK_REG, 0xFF);

	lio_dev_dbg(oct, "BIST enabled for CN23XX soft reset\n");

	lio_write_csr64(oct, LIO_CN23XX_SLI_SCRATCH1, 0x1234ULL);

	/* Initiate chip-wide soft reset */
	lio_pci_readq(oct, LIO_CN23XX_RST_SOFT_RST);
	lio_pci_writeq(oct, 1, LIO_CN23XX_RST_SOFT_RST);

	/* Wait for 100ms as Octeon resets. */
	lio_mdelay(100);

	if (lio_read_csr64(oct, LIO_CN23XX_SLI_SCRATCH1)) {
		lio_dev_err(oct, "Soft reset failed\n");
		return (1);
	}

	lio_dev_dbg(oct, "Reset completed\n");

	/* restore the  reset value */
	lio_write_csr64(oct, LIO_CN23XX_SLI_WIN_WR_MASK_REG, 0xFF);

	return (0);
}
示例#2
0
static void
lio_cn23xx_pf_setup_global_mac_regs(struct octeon_device *oct)
{
	uint64_t	reg_val;
	uint16_t	mac_no = oct->pcie_port;
	uint16_t	pf_num = oct->pf_num;
	/* programming SRN and TRS for each MAC(0..3)  */

	lio_dev_dbg(oct, "%s: Using pcie port %d\n", __func__, mac_no);
	/* By default, mapping all 64 IOQs to  a single MACs */

	reg_val =
	    lio_read_csr64(oct, LIO_CN23XX_SLI_PKT_MAC_RINFO64(mac_no, pf_num));

	/* setting SRN <6:0>  */
	reg_val = pf_num * LIO_CN23XX_PF_MAX_RINGS;

	/* setting TRS <23:16> */
	reg_val = reg_val |
	    (oct->sriov_info.trs << LIO_CN23XX_PKT_MAC_CTL_RINFO_TRS_BIT_POS);

	/* write these settings to MAC register */
	lio_write_csr64(oct, LIO_CN23XX_SLI_PKT_MAC_RINFO64(mac_no, pf_num),
			reg_val);

	lio_dev_dbg(oct, "SLI_PKT_MAC(%d)_PF(%d)_RINFO : 0x%016llx\n", mac_no,
		    pf_num,
		    LIO_CAST64(lio_read_csr64(oct,
				   LIO_CN23XX_SLI_PKT_MAC_RINFO64(mac_no,
								  pf_num))));
}
示例#3
0
static void
cn23xx_vf_setup_iq_regs(struct lio_device *lio_dev, uint32_t iq_no)
{
	struct lio_instr_queue *iq = lio_dev->instr_queue[iq_no];
	uint64_t pkt_in_done = 0;

	PMD_INIT_FUNC_TRACE();

	/* Write the start of the input queue's ring and its size */
	lio_write_csr64(lio_dev, CN23XX_SLI_IQ_BASE_ADDR64(iq_no),
			iq->base_addr_dma);
	lio_write_csr(lio_dev, CN23XX_SLI_IQ_SIZE(iq_no), iq->max_count);

	/* Remember the doorbell & instruction count register addr
	 * for this queue
	 */
	iq->doorbell_reg = (uint8_t *)lio_dev->hw_addr +
				CN23XX_SLI_IQ_DOORBELL(iq_no);
	iq->inst_cnt_reg = (uint8_t *)lio_dev->hw_addr +
				CN23XX_SLI_IQ_INSTR_COUNT64(iq_no);
	lio_dev_dbg(lio_dev, "InstQ[%d]:dbell reg @ 0x%p instcnt_reg @ 0x%p\n",
		    iq_no, iq->doorbell_reg, iq->inst_cnt_reg);

	/* Store the current instruction counter (used in flush_iq
	 * calculation)
	 */
	pkt_in_done = rte_read64(iq->inst_cnt_reg);

	/* Clear the count by writing back what we read, but don't
	 * enable data traffic here
	 */
	rte_write64(pkt_in_done, iq->inst_cnt_reg);
}
示例#4
0
static void
lio_cn23xx_pf_enable_error_reporting(struct octeon_device *oct)
{
	uint32_t	corrtable_err_status, uncorrectable_err_mask, regval;

	regval = lio_read_pci_cfg(oct, LIO_CN23XX_CFG_PCIE_DEVCTL);
	if (regval & LIO_CN23XX_CFG_PCIE_DEVCTL_MASK) {
		uncorrectable_err_mask = 0;
		corrtable_err_status = 0;
		uncorrectable_err_mask =
		    lio_read_pci_cfg(oct,
				     LIO_CN23XX_CFG_PCIE_UNCORRECT_ERR_MASK);
		corrtable_err_status =
		    lio_read_pci_cfg(oct,
				     LIO_CN23XX_CFG_PCIE_CORRECT_ERR_STATUS);
		lio_dev_err(oct, "PCI-E Fatal error detected;\n"
			    "\tdev_ctl_status_reg = 0x%08x\n"
			    "\tuncorrectable_error_mask_reg = 0x%08x\n"
			    "\tcorrectable_error_status_reg = 0x%08x\n",
			    regval, uncorrectable_err_mask,
			    corrtable_err_status);
	}

	regval |= 0xf;	/* Enable Link error reporting */

	lio_dev_dbg(oct, "Enabling PCI-E error reporting..\n");
	lio_write_pci_cfg(oct, LIO_CN23XX_CFG_PCIE_DEVCTL, regval);
}
示例#5
0
static void
lio_cn23xx_pf_interrupt_handler(void *dev)
{
	struct octeon_device	*oct = (struct octeon_device *)dev;
	struct lio_cn23xx_pf	*cn23xx = (struct lio_cn23xx_pf *)oct->chip;
	uint64_t		intr64;

	lio_dev_dbg(oct, "In %s octeon_dev @ %p\n", __func__, oct);
	intr64 = lio_read_csr64(oct, cn23xx->intr_sum_reg64);

	oct->int_status = 0;

	if (intr64 & LIO_CN23XX_INTR_ERR)
		lio_dev_err(oct, "Error Intr: 0x%016llx\n",
			    LIO_CAST64(intr64));

	if (oct->msix_on != LIO_FLAG_MSIX_ENABLED) {
		if (intr64 & LIO_CN23XX_INTR_PKT_DATA)
			oct->int_status |= LIO_DEV_INTR_PKT_DATA;
	}

	if (intr64 & (LIO_CN23XX_INTR_DMA0_FORCE))
		oct->int_status |= LIO_DEV_INTR_DMA0_FORCE;

	if (intr64 & (LIO_CN23XX_INTR_DMA1_FORCE))
		oct->int_status |= LIO_DEV_INTR_DMA1_FORCE;

	/* Clear the current interrupts */
	lio_write_csr64(oct, cn23xx->intr_sum_reg64, intr64);
}
示例#6
0
static void
lio_cn23xx_pf_get_pcie_qlmport(struct octeon_device *oct)
{
	oct->pcie_port = (lio_read_csr32(oct,
					 LIO_CN23XX_SLI_MAC_NUMBER)) & 0xff;

	lio_dev_dbg(oct, "CN23xx uses PCIE Port %d\n",
		    oct->pcie_port);
}
示例#7
0
static void
lio_cn23xx_pf_setup_iq_regs(struct octeon_device *oct, uint32_t iq_no)
{
	struct lio_instr_queue	*iq = oct->instr_queue[iq_no];
	uint64_t		pkt_in_done;

	iq_no += oct->sriov_info.pf_srn;

	/* Write the start of the input queue's ring and its size  */
	lio_write_csr64(oct, LIO_CN23XX_SLI_IQ_BASE_ADDR64(iq_no),
			iq->base_addr_dma);
	lio_write_csr32(oct, LIO_CN23XX_SLI_IQ_SIZE(iq_no), iq->max_count);

	/*
	 * Remember the doorbell & instruction count register addr
	 * for this queue
	 */
	iq->doorbell_reg = LIO_CN23XX_SLI_IQ_DOORBELL(iq_no);
	iq->inst_cnt_reg = LIO_CN23XX_SLI_IQ_INSTR_COUNT64(iq_no);
	lio_dev_dbg(oct, "InstQ[%d]:dbell reg @ 0x%x instcnt_reg @ 0x%x\n",
		    iq_no, iq->doorbell_reg, iq->inst_cnt_reg);

	/*
	 * Store the current instruction counter (used in flush_iq
	 * calculation)
	 */
	pkt_in_done = lio_read_csr64(oct, iq->inst_cnt_reg);

	if (oct->msix_on) {
		/* Set CINT_ENB to enable IQ interrupt   */
		lio_write_csr64(oct, iq->inst_cnt_reg,
				(pkt_in_done | LIO_CN23XX_INTR_CINT_ENB));
	} else {
		/*
		 * Clear the count by writing back what we read, but don't
		 * enable interrupts
		 */
		lio_write_csr64(oct, iq->inst_cnt_reg, pkt_in_done);
	}

	iq->reset_instr_cnt = 0;
}
示例#8
0
static int
lio_cn23xx_pf_sriov_config(struct octeon_device *oct)
{
	struct lio_cn23xx_pf	*cn23xx = (struct lio_cn23xx_pf *)oct->chip;
	uint32_t		num_pf_rings, total_rings, max_rings;
	cn23xx->conf = (struct lio_config *)lio_get_config_info(oct, LIO_23XX);

	max_rings = LIO_CN23XX_PF_MAX_RINGS;

	if (oct->sriov_info.num_pf_rings) {
		num_pf_rings = oct->sriov_info.num_pf_rings;
		if (num_pf_rings > max_rings) {
			num_pf_rings = min(mp_ncpus, max_rings);
			lio_dev_warn(oct, "num_queues_per_pf requested %u is more than available rings (%u). Reducing to %u\n",
				     oct->sriov_info.num_pf_rings,
				     max_rings, num_pf_rings);
		}
	} else {
#ifdef RSS
		num_pf_rings = min(rss_getnumbuckets(), mp_ncpus);
#else
		num_pf_rings = min(mp_ncpus, max_rings);
#endif

	}

	total_rings = num_pf_rings;
	oct->sriov_info.trs = total_rings;
	oct->sriov_info.pf_srn = total_rings - num_pf_rings;
	oct->sriov_info.num_pf_rings = num_pf_rings;

	lio_dev_dbg(oct, "trs:%d pf_srn:%d num_pf_rings:%d\n",
		    oct->sriov_info.trs, oct->sriov_info.pf_srn,
		    oct->sriov_info.num_pf_rings);

	return (0);
}
示例#9
0
int
cn23xx_pfvf_handshake(struct lio_device *lio_dev)
{
	struct lio_mbox_cmd mbox_cmd;
	struct lio_version *lio_ver = (struct lio_version *)&mbox_cmd.data[0];
	uint32_t q_no, count = 0;
	rte_atomic64_t status;
	uint32_t pfmajor;
	uint32_t vfmajor;
	uint32_t ret;

	PMD_INIT_FUNC_TRACE();

	/* Sending VF_ACTIVE indication to the PF driver */
	lio_dev_dbg(lio_dev, "requesting info from PF\n");

	mbox_cmd.msg.mbox_msg64 = 0;
	mbox_cmd.msg.s.type = LIO_MBOX_REQUEST;
	mbox_cmd.msg.s.resp_needed = 1;
	mbox_cmd.msg.s.cmd = LIO_VF_ACTIVE;
	mbox_cmd.msg.s.len = 2;
	mbox_cmd.data[0] = 0;
	lio_ver->major = LIO_BASE_MAJOR_VERSION;
	lio_ver->minor = LIO_BASE_MINOR_VERSION;
	lio_ver->micro = LIO_BASE_MICRO_VERSION;
	mbox_cmd.q_no = 0;
	mbox_cmd.recv_len = 0;
	mbox_cmd.recv_status = 0;
	mbox_cmd.fn = (lio_mbox_callback)cn23xx_pfvf_hs_callback;
	mbox_cmd.fn_arg = (void *)&status;

	if (lio_mbox_write(lio_dev, &mbox_cmd)) {
		lio_dev_err(lio_dev, "Write to mailbox failed\n");
		return -1;
	}

	rte_atomic64_set(&status, 0);

	do {
		rte_delay_ms(1);
	} while ((rte_atomic64_read(&status) == 0) && (count++ < 10000));

	ret = rte_atomic64_read(&status);
	if (ret == 0) {
		lio_dev_err(lio_dev, "cn23xx_pfvf_handshake timeout\n");
		return -1;
	}

	for (q_no = 0; q_no < lio_dev->num_iqs; q_no++)
		lio_dev->instr_queue[q_no]->txpciq.s.pkind =
						lio_dev->pfvf_hsword.pkind;

	vfmajor = LIO_BASE_MAJOR_VERSION;
	pfmajor = ret >> 16;
	if (pfmajor != vfmajor) {
		lio_dev_err(lio_dev,
			    "VF LiquidIO driver (major version %d) is not compatible with LiquidIO PF driver (major version %d)\n",
			    vfmajor, pfmajor);
		ret = -EPERM;
	} else {
		lio_dev_dbg(lio_dev,
			    "VF LiquidIO driver (major version %d), LiquidIO PF driver (major version %d)\n",
			    vfmajor, pfmajor);
		ret = 0;
	}

	lio_dev_dbg(lio_dev, "got data from PF pkind is %d\n",
		    lio_dev->pfvf_hsword.pkind);

	return ret;
}