Пример #1
0
static void
lio_cn23xx_pf_setup_global_mac_regs(struct octeon_device *oct)
{
	uint64_t	reg_val;
	uint16_t	mac_no = oct->pcie_port;
	uint16_t	pf_num = oct->pf_num;
	/* programming SRN and TRS for each MAC(0..3)  */

	lio_dev_dbg(oct, "%s: Using pcie port %d\n", __func__, mac_no);
	/* By default, mapping all 64 IOQs to  a single MACs */

	reg_val =
	    lio_read_csr64(oct, LIO_CN23XX_SLI_PKT_MAC_RINFO64(mac_no, pf_num));

	/* setting SRN <6:0>  */
	reg_val = pf_num * LIO_CN23XX_PF_MAX_RINGS;

	/* setting TRS <23:16> */
	reg_val = reg_val |
	    (oct->sriov_info.trs << LIO_CN23XX_PKT_MAC_CTL_RINFO_TRS_BIT_POS);

	/* write these settings to MAC register */
	lio_write_csr64(oct, LIO_CN23XX_SLI_PKT_MAC_RINFO64(mac_no, pf_num),
			reg_val);

	lio_dev_dbg(oct, "SLI_PKT_MAC(%d)_PF(%d)_RINFO : 0x%016llx\n", mac_no,
		    pf_num,
		    LIO_CAST64(lio_read_csr64(oct,
				   LIO_CN23XX_SLI_PKT_MAC_RINFO64(mac_no,
								  pf_num))));
}
Пример #2
0
static int
lio_cn23xx_pf_reset_io_queues(struct octeon_device *oct)
{
	uint64_t	d64;
	uint32_t	ern, loop = BUSY_READING_REG_PF_LOOP_COUNT;
	uint32_t	q_no, srn;
	int		ret_val = 0;

	srn = oct->sriov_info.pf_srn;
	ern = srn + oct->sriov_info.num_pf_rings;

	/* As per HRM reg description, s/w cant write 0 to ENB. */
	/* to make the queue off, need to set the RST bit. */

	/* Reset the Enable bit for all the 64 IQs.  */
	for (q_no = srn; q_no < ern; q_no++) {
		/* set RST bit to 1. This bit applies to both IQ and OQ */
		d64 = lio_read_csr64(oct,
				     LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
		d64 = d64 | LIO_CN23XX_PKT_INPUT_CTL_RST;
		lio_write_csr64(oct,
				LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no), d64);
	}

	/* wait until the RST bit is clear or the RST and quiet bits are set */
	for (q_no = srn; q_no < ern; q_no++) {
		volatile uint64_t reg_val =
			lio_read_csr64(oct,
				       LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
		while ((reg_val & LIO_CN23XX_PKT_INPUT_CTL_RST) &&
		       !(reg_val & LIO_CN23XX_PKT_INPUT_CTL_QUIET) &&
		       loop) {
			reg_val = lio_read_csr64(oct,
				       LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
			loop--;
		}

		if (!loop) {
			lio_dev_err(oct,
				    "clearing the reset reg failed or setting the quiet reg failed for qno: %u\n",
				    q_no);
			return (-1);
		}

		reg_val &= ~LIO_CN23XX_PKT_INPUT_CTL_RST;
		lio_write_csr64(oct, LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
				reg_val);

		reg_val = lio_read_csr64(oct,
					 LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
		if (reg_val & LIO_CN23XX_PKT_INPUT_CTL_RST) {
			lio_dev_err(oct, "clearing the reset failed for qno: %u\n",
				    q_no);
			ret_val = -1;
		}
	}

	return (ret_val);
}
Пример #3
0
static int
cn23xx_vf_reset_io_queues(struct lio_device *lio_dev, uint32_t num_queues)
{
	uint32_t loop = CN23XX_VF_BUSY_READING_REG_LOOP_COUNT;
	uint64_t d64, q_no;
	int ret_val = 0;

	PMD_INIT_FUNC_TRACE();

	for (q_no = 0; q_no < num_queues; q_no++) {
		/* set RST bit to 1. This bit applies to both IQ and OQ */
		d64 = lio_read_csr64(lio_dev,
				     CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
		d64 = d64 | CN23XX_PKT_INPUT_CTL_RST;
		lio_write_csr64(lio_dev, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
				d64);
	}

	/* wait until the RST bit is clear or the RST and QUIET bits are set */
	for (q_no = 0; q_no < num_queues; q_no++) {
		volatile uint64_t reg_val;

		reg_val	= lio_read_csr64(lio_dev,
					 CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
		while ((reg_val & CN23XX_PKT_INPUT_CTL_RST) &&
				!(reg_val & CN23XX_PKT_INPUT_CTL_QUIET) &&
				loop) {
			reg_val = lio_read_csr64(
					lio_dev,
					CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
			loop = loop - 1;
		}

		if (loop == 0) {
			lio_dev_err(lio_dev,
				    "clearing the reset reg failed or setting the quiet reg failed for qno: %lu\n",
				    (unsigned long)q_no);
			return -1;
		}

		reg_val = reg_val & ~CN23XX_PKT_INPUT_CTL_RST;
		lio_write_csr64(lio_dev, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
				reg_val);

		reg_val = lio_read_csr64(
		    lio_dev, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
		if (reg_val & CN23XX_PKT_INPUT_CTL_RST) {
			lio_dev_err(lio_dev,
				    "clearing the reset failed for qno: %lu\n",
				    (unsigned long)q_no);
			ret_val = -1;
		}
	}

	return ret_val;
}
Пример #4
0
static int
cn23xx_vf_enable_io_queues(struct lio_device *lio_dev)
{
	uint32_t q_no;

	PMD_INIT_FUNC_TRACE();

	for (q_no = 0; q_no < lio_dev->num_iqs; q_no++) {
		uint64_t reg_val;

		/* set the corresponding IQ IS_64B bit */
		if (lio_dev->io_qmask.iq64B & (1ULL << q_no)) {
			reg_val = lio_read_csr64(
					lio_dev,
					CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
			reg_val = reg_val | CN23XX_PKT_INPUT_CTL_IS_64B;
			lio_write_csr64(lio_dev,
					CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
					reg_val);
		}

		/* set the corresponding IQ ENB bit */
		if (lio_dev->io_qmask.iq & (1ULL << q_no)) {
			reg_val = lio_read_csr64(
					lio_dev,
					CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
			reg_val = reg_val | CN23XX_PKT_INPUT_CTL_RING_ENB;
			lio_write_csr64(lio_dev,
					CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
					reg_val);
		}
	}
	for (q_no = 0; q_no < lio_dev->num_oqs; q_no++) {
		uint32_t reg_val;

		/* set the corresponding OQ ENB bit */
		if (lio_dev->io_qmask.oq & (1ULL << q_no)) {
			reg_val = lio_read_csr(
					lio_dev,
					CN23XX_SLI_OQ_PKT_CONTROL(q_no));
			reg_val = reg_val | CN23XX_PKT_OUTPUT_CTL_RING_ENB;
			lio_write_csr(lio_dev,
				      CN23XX_SLI_OQ_PKT_CONTROL(q_no),
				      reg_val);
		}
	}

	return 0;
}
Пример #5
0
int
cn23xx_vf_set_io_queues_off(struct lio_device *lio_dev)
{
	uint32_t loop = CN23XX_VF_BUSY_READING_REG_LOOP_COUNT;
	uint64_t q_no;

	/* Disable the i/p and o/p queues for this Octeon.
	 * IOQs will already be in reset.
	 * If RST bit is set, wait for Quiet bit to be set
	 * Once Quiet bit is set, clear the RST bit
	 */
	PMD_INIT_FUNC_TRACE();

	for (q_no = 0; q_no < lio_dev->sriov_info.rings_per_vf; q_no++) {
		volatile uint64_t reg_val;

		reg_val = lio_read_csr64(lio_dev,
					 CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
		while ((reg_val & CN23XX_PKT_INPUT_CTL_RST) && !(reg_val &
					 CN23XX_PKT_INPUT_CTL_QUIET) && loop) {
			reg_val = lio_read_csr64(
					lio_dev,
					CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
			loop = loop - 1;
		}

		if (loop == 0) {
			lio_dev_err(lio_dev,
				    "clearing the reset reg failed or setting the quiet reg failed for qno %lu\n",
				    (unsigned long)q_no);
			return -1;
		}

		reg_val = reg_val & ~CN23XX_PKT_INPUT_CTL_RST;
		lio_write_csr64(lio_dev, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
				reg_val);

		reg_val = lio_read_csr64(lio_dev,
					 CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
		if (reg_val & CN23XX_PKT_INPUT_CTL_RST) {
			lio_dev_err(lio_dev, "unable to reset qno %lu\n",
				    (unsigned long)q_no);
			return -1;
		}
	}

	return 0;
}
Пример #6
0
static void
lio_cn23xx_pf_interrupt_handler(void *dev)
{
	struct octeon_device	*oct = (struct octeon_device *)dev;
	struct lio_cn23xx_pf	*cn23xx = (struct lio_cn23xx_pf *)oct->chip;
	uint64_t		intr64;

	lio_dev_dbg(oct, "In %s octeon_dev @ %p\n", __func__, oct);
	intr64 = lio_read_csr64(oct, cn23xx->intr_sum_reg64);

	oct->int_status = 0;

	if (intr64 & LIO_CN23XX_INTR_ERR)
		lio_dev_err(oct, "Error Intr: 0x%016llx\n",
			    LIO_CAST64(intr64));

	if (oct->msix_on != LIO_FLAG_MSIX_ENABLED) {
		if (intr64 & LIO_CN23XX_INTR_PKT_DATA)
			oct->int_status |= LIO_DEV_INTR_PKT_DATA;
	}

	if (intr64 & (LIO_CN23XX_INTR_DMA0_FORCE))
		oct->int_status |= LIO_DEV_INTR_DMA0_FORCE;

	if (intr64 & (LIO_CN23XX_INTR_DMA1_FORCE))
		oct->int_status |= LIO_DEV_INTR_DMA1_FORCE;

	/* Clear the current interrupts */
	lio_write_csr64(oct, cn23xx->intr_sum_reg64, intr64);
}
Пример #7
0
static int
cn23xx_vf_setup_global_input_regs(struct lio_device *lio_dev)
{
	uint64_t q_no;
	uint64_t d64;

	PMD_INIT_FUNC_TRACE();

	if (cn23xx_vf_reset_io_queues(lio_dev,
				      lio_dev->sriov_info.rings_per_vf))
		return -1;

	for (q_no = 0; q_no < (lio_dev->sriov_info.rings_per_vf); q_no++) {
		lio_write_csr64(lio_dev, CN23XX_SLI_IQ_DOORBELL(q_no),
				0xFFFFFFFF);

		d64 = lio_read_csr64(lio_dev,
				     CN23XX_SLI_IQ_INSTR_COUNT64(q_no));

		d64 &= 0xEFFFFFFFFFFFFFFFL;

		lio_write_csr64(lio_dev, CN23XX_SLI_IQ_INSTR_COUNT64(q_no),
				d64);

		/* Select ES, RO, NS, RDSIZE,DPTR Fomat#0 for
		 * the Input Queues
		 */
		lio_write_csr64(lio_dev, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
				CN23XX_PKT_INPUT_CTL_MASK);
	}

	return 0;
}
Пример #8
0
static int
lio_cn23xx_pf_soft_reset(struct octeon_device *oct)
{

	lio_write_csr64(oct, LIO_CN23XX_SLI_WIN_WR_MASK_REG, 0xFF);

	lio_dev_dbg(oct, "BIST enabled for CN23XX soft reset\n");

	lio_write_csr64(oct, LIO_CN23XX_SLI_SCRATCH1, 0x1234ULL);

	/* Initiate chip-wide soft reset */
	lio_pci_readq(oct, LIO_CN23XX_RST_SOFT_RST);
	lio_pci_writeq(oct, 1, LIO_CN23XX_RST_SOFT_RST);

	/* Wait for 100ms as Octeon resets. */
	lio_mdelay(100);

	if (lio_read_csr64(oct, LIO_CN23XX_SLI_SCRATCH1)) {
		lio_dev_err(oct, "Soft reset failed\n");
		return (1);
	}

	lio_dev_dbg(oct, "Reset completed\n");

	/* restore the  reset value */
	lio_write_csr64(oct, LIO_CN23XX_SLI_WIN_WR_MASK_REG, 0xFF);

	return (0);
}
Пример #9
0
int
lio_cn23xx_pf_fw_loaded(struct octeon_device *oct)
{
	uint64_t	val;

	val = lio_read_csr64(oct, LIO_CN23XX_SLI_SCRATCH2);
	return ((val >> SCR2_BIT_FW_LOADED) & 1ULL);
}
Пример #10
0
static void
lio_cn23xx_pf_disable_interrupt(struct octeon_device *oct, uint8_t intr_flag)
{
	struct lio_cn23xx_pf	*cn23xx = (struct lio_cn23xx_pf *)oct->chip;
	uint64_t		intr_val = 0;

	/* Disable Interrupts */
	if (intr_flag == OCTEON_ALL_INTR) {
		lio_write_csr64(oct, cn23xx->intr_enb_reg64, 0);
	} else if (intr_flag & OCTEON_OUTPUT_INTR) {
		intr_val = lio_read_csr64(oct, cn23xx->intr_enb_reg64);
		intr_val &= ~LIO_CN23XX_INTR_PKT_DATA;
		lio_write_csr64(oct, cn23xx->intr_enb_reg64, intr_val);
	}
}
Пример #11
0
static void
lio_cn23xx_pf_enable_interrupt(struct octeon_device *oct, uint8_t intr_flag)
{
	struct lio_cn23xx_pf	*cn23xx = (struct lio_cn23xx_pf *)oct->chip;
	uint64_t		intr_val = 0;

	/* Divide the single write to multiple writes based on the flag. */
	/* Enable Interrupt */
	if (intr_flag == OCTEON_ALL_INTR) {
		lio_write_csr64(oct, cn23xx->intr_enb_reg64,
				cn23xx->intr_mask64);
	} else if (intr_flag & OCTEON_OUTPUT_INTR) {
		intr_val = lio_read_csr64(oct, cn23xx->intr_enb_reg64);
		intr_val |= LIO_CN23XX_INTR_PKT_DATA;
		lio_write_csr64(oct, cn23xx->intr_enb_reg64, intr_val);
	}
}
Пример #12
0
static void
lio_cn23xx_pf_setup_iq_regs(struct octeon_device *oct, uint32_t iq_no)
{
	struct lio_instr_queue	*iq = oct->instr_queue[iq_no];
	uint64_t		pkt_in_done;

	iq_no += oct->sriov_info.pf_srn;

	/* Write the start of the input queue's ring and its size  */
	lio_write_csr64(oct, LIO_CN23XX_SLI_IQ_BASE_ADDR64(iq_no),
			iq->base_addr_dma);
	lio_write_csr32(oct, LIO_CN23XX_SLI_IQ_SIZE(iq_no), iq->max_count);

	/*
	 * Remember the doorbell & instruction count register addr
	 * for this queue
	 */
	iq->doorbell_reg = LIO_CN23XX_SLI_IQ_DOORBELL(iq_no);
	iq->inst_cnt_reg = LIO_CN23XX_SLI_IQ_INSTR_COUNT64(iq_no);
	lio_dev_dbg(oct, "InstQ[%d]:dbell reg @ 0x%x instcnt_reg @ 0x%x\n",
		    iq_no, iq->doorbell_reg, iq->inst_cnt_reg);

	/*
	 * Store the current instruction counter (used in flush_iq
	 * calculation)
	 */
	pkt_in_done = lio_read_csr64(oct, iq->inst_cnt_reg);

	if (oct->msix_on) {
		/* Set CINT_ENB to enable IQ interrupt   */
		lio_write_csr64(oct, iq->inst_cnt_reg,
				(pkt_in_done | LIO_CN23XX_INTR_CINT_ENB));
	} else {
		/*
		 * Clear the count by writing back what we read, but don't
		 * enable interrupts
		 */
		lio_write_csr64(oct, iq->inst_cnt_reg, pkt_in_done);
	}

	iq->reset_instr_cnt = 0;
}
Пример #13
0
static uint64_t
lio_cn23xx_pf_msix_interrupt_handler(void *dev)
{
	struct lio_ioq_vector	*ioq_vector = (struct lio_ioq_vector *)dev;
	struct octeon_device	*oct = ioq_vector->oct_dev;
	struct lio_droq		*droq = oct->droq[ioq_vector->droq_index];
	uint64_t		pkts_sent;
	uint64_t		ret = 0;

	if (droq == NULL) {
		lio_dev_err(oct, "23XX bringup FIXME: oct pfnum:%d ioq_vector->ioq_num :%d droq is NULL\n",
			    oct->pf_num, ioq_vector->ioq_num);
		return (0);
	}
	pkts_sent = lio_read_csr64(oct, droq->pkts_sent_reg);

	/*
	 * If our device has interrupted, then proceed. Also check
	 * for all f's if interrupt was triggered on an error
	 * and the PCI read fails.
	 */
	if (!pkts_sent || (pkts_sent == 0xFFFFFFFFFFFFFFFFULL))
		return (ret);

	/* Write count reg in sli_pkt_cnts to clear these int. */
	if (pkts_sent & LIO_CN23XX_INTR_PO_INT)
		ret |= LIO_MSIX_PO_INT;

	if (pkts_sent & LIO_CN23XX_INTR_PI_INT)
		/* We will clear the count when we update the read_index. */
		ret |= LIO_MSIX_PI_INT;

	/*
	 * Never need to handle msix mbox intr for pf. They arrive on the last
	 * msix
	 */
	return (ret);
}
Пример #14
0
int
cn23xx_vf_setup_device(struct lio_device *lio_dev)
{
	uint64_t reg_val;

	PMD_INIT_FUNC_TRACE();

	/* INPUT_CONTROL[RPVF] gives the VF IOq count */
	reg_val = lio_read_csr64(lio_dev, CN23XX_SLI_IQ_PKT_CONTROL64(0));

	lio_dev->pf_num = (reg_val >> CN23XX_PKT_INPUT_CTL_PF_NUM_POS) &
				CN23XX_PKT_INPUT_CTL_PF_NUM_MASK;
	lio_dev->vf_num = (reg_val >> CN23XX_PKT_INPUT_CTL_VF_NUM_POS) &
				CN23XX_PKT_INPUT_CTL_VF_NUM_MASK;

	reg_val = reg_val >> CN23XX_PKT_INPUT_CTL_RPVF_POS;

	lio_dev->sriov_info.rings_per_vf =
				reg_val & CN23XX_PKT_INPUT_CTL_RPVF_MASK;

	lio_dev->default_config = lio_get_conf(lio_dev);
	if (lio_dev->default_config == NULL)
		return -1;

	lio_dev->fn_list.setup_iq_regs		= cn23xx_vf_setup_iq_regs;
	lio_dev->fn_list.setup_oq_regs		= cn23xx_vf_setup_oq_regs;
	lio_dev->fn_list.setup_mbox		= cn23xx_vf_setup_mbox;
	lio_dev->fn_list.free_mbox		= cn23xx_vf_free_mbox;

	lio_dev->fn_list.setup_device_regs	= cn23xx_vf_setup_device_regs;

	lio_dev->fn_list.enable_io_queues	= cn23xx_vf_enable_io_queues;
	lio_dev->fn_list.disable_io_queues	= cn23xx_vf_disable_io_queues;

	return 0;
}
Пример #15
0
static int
lio_cn23xx_pf_setup_global_input_regs(struct octeon_device *oct)
{
	struct lio_cn23xx_pf	*cn23xx = (struct lio_cn23xx_pf *)oct->chip;
	struct lio_instr_queue	*iq;
	uint64_t		intr_threshold;
	uint64_t		pf_num, reg_val;
	uint32_t		q_no, ern, srn;

	pf_num = oct->pf_num;

	srn = oct->sriov_info.pf_srn;
	ern = srn + oct->sriov_info.num_pf_rings;

	if (lio_cn23xx_pf_reset_io_queues(oct))
		return (-1);

	/*
	 * Set the MAC_NUM and PVF_NUM in IQ_PKT_CONTROL reg
	 * for all queues.Only PF can set these bits.
	 * bits 29:30 indicate the MAC num.
	 * bits 32:47 indicate the PVF num.
	 */
	for (q_no = 0; q_no < ern; q_no++) {
		reg_val = oct->pcie_port <<
			LIO_CN23XX_PKT_INPUT_CTL_MAC_NUM_POS;

		reg_val |= pf_num << LIO_CN23XX_PKT_INPUT_CTL_PF_NUM_POS;

		lio_write_csr64(oct, LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
				reg_val);
	}

	/*
	 * Select ES, RO, NS, RDSIZE,DPTR Fomat#0 for
	 * pf queues
	 */
	for (q_no = srn; q_no < ern; q_no++) {
		uint32_t	inst_cnt_reg;

		iq = oct->instr_queue[q_no];
		if (iq != NULL)
			inst_cnt_reg = iq->inst_cnt_reg;
		else
			inst_cnt_reg = LIO_CN23XX_SLI_IQ_INSTR_COUNT64(q_no);

		reg_val =
		    lio_read_csr64(oct, LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no));

		reg_val |= LIO_CN23XX_PKT_INPUT_CTL_MASK;

		lio_write_csr64(oct, LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
				reg_val);

		/* Set WMARK level for triggering PI_INT */
		/* intr_threshold = LIO_CN23XX_DEF_IQ_INTR_THRESHOLD & */
		intr_threshold = LIO_GET_IQ_INTR_PKT_CFG(cn23xx->conf) &
		    LIO_CN23XX_PKT_IN_DONE_WMARK_MASK;

		lio_write_csr64(oct, inst_cnt_reg,
				(lio_read_csr64(oct, inst_cnt_reg) &
				 ~(LIO_CN23XX_PKT_IN_DONE_WMARK_MASK <<
				   LIO_CN23XX_PKT_IN_DONE_WMARK_BIT_POS)) |
				(intr_threshold <<
				 LIO_CN23XX_PKT_IN_DONE_WMARK_BIT_POS));
	}
	return (0);
}
Пример #16
0
static void
lio_cn23xx_pf_disable_io_queues(struct octeon_device *oct)
{
	volatile uint64_t	d64;
	volatile uint32_t	d32;
	int			loop;
	unsigned int		q_no;
	uint32_t		ern, srn;

	srn = oct->sriov_info.pf_srn;
	ern = srn + oct->num_iqs;

	/* Disable Input Queues. */
	for (q_no = srn; q_no < ern; q_no++) {
		loop = lio_ms_to_ticks(1000);

		/* start the Reset for a particular ring */
		d64 = lio_read_csr64(oct,
				     LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
		d64 &= ~LIO_CN23XX_PKT_INPUT_CTL_RING_ENB;
		d64 |= LIO_CN23XX_PKT_INPUT_CTL_RST;
		lio_write_csr64(oct, LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
				d64);

		/*
		 * Wait until hardware indicates that the particular IQ
		 * is out of reset.
		 */
		d64 = lio_read_csr64(oct, LIO_CN23XX_SLI_PKT_IOQ_RING_RST);
		while (!(d64 & BIT_ULL(q_no)) && loop--) {
			d64 = lio_read_csr64(oct,
					     LIO_CN23XX_SLI_PKT_IOQ_RING_RST);
			lio_sleep_timeout(1);
			loop--;
		}

		/* Reset the doorbell register for this Input Queue. */
		lio_write_csr32(oct, LIO_CN23XX_SLI_IQ_DOORBELL(q_no),
				0xFFFFFFFF);
		while (((lio_read_csr64(oct,
					LIO_CN23XX_SLI_IQ_DOORBELL(q_no))) !=
			0ULL) && loop--) {
			lio_sleep_timeout(1);
		}
	}

	/* Disable Output Queues. */
	for (q_no = srn; q_no < ern; q_no++) {
		loop = lio_ms_to_ticks(1000);

		/*
		 * Wait until hardware indicates that the particular IQ
		 * is out of reset.It given that SLI_PKT_RING_RST is
		 * common for both IQs and OQs
		 */
		d64 = lio_read_csr64(oct, LIO_CN23XX_SLI_PKT_IOQ_RING_RST);
		while (!(d64 & BIT_ULL(q_no)) && loop--) {
			d64 = lio_read_csr64(oct,
					     LIO_CN23XX_SLI_PKT_IOQ_RING_RST);
			lio_sleep_timeout(1);
			loop--;
		}

		/* Reset the doorbell register for this Output Queue. */
		lio_write_csr32(oct, LIO_CN23XX_SLI_OQ_PKTS_CREDIT(q_no),
				0xFFFFFFFF);
		while ((lio_read_csr64(oct,
				       LIO_CN23XX_SLI_OQ_PKTS_CREDIT(q_no)) !=
			0ULL) && loop--) {
			lio_sleep_timeout(1);
		}

		/* clear the SLI_PKT(0..63)_CNTS[CNT] reg value */
		d32 = lio_read_csr32(oct, LIO_CN23XX_SLI_OQ_PKTS_SENT(q_no));
		lio_write_csr32(oct, LIO_CN23XX_SLI_OQ_PKTS_SENT(q_no),	d32);
	}
}
Пример #17
0
static int
lio_cn23xx_pf_enable_io_queues(struct octeon_device *oct)
{
	uint64_t	reg_val;
	uint32_t	ern, loop = BUSY_READING_REG_PF_LOOP_COUNT;
	uint32_t	q_no, srn;

	srn = oct->sriov_info.pf_srn;
	ern = srn + oct->num_iqs;

	for (q_no = srn; q_no < ern; q_no++) {
		/* set the corresponding IQ IS_64B bit */
		if (oct->io_qmask.iq64B & BIT_ULL(q_no - srn)) {
			reg_val = lio_read_csr64(oct,
					LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
			reg_val = reg_val | LIO_CN23XX_PKT_INPUT_CTL_IS_64B;
			lio_write_csr64(oct,
					LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
					reg_val);
		}
		/* set the corresponding IQ ENB bit */
		if (oct->io_qmask.iq & BIT_ULL(q_no - srn)) {
			/*
			 * IOQs are in reset by default in PEM2 mode,
			 * clearing reset bit
			 */
			reg_val = lio_read_csr64(oct,
					LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no));

			if (reg_val & LIO_CN23XX_PKT_INPUT_CTL_RST) {
				while ((reg_val &
					LIO_CN23XX_PKT_INPUT_CTL_RST) &&
				       !(reg_val &
					 LIO_CN23XX_PKT_INPUT_CTL_QUIET) &&
				       loop) {
					reg_val = lio_read_csr64(oct,
					LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
					loop--;
				}
				if (!loop) {
					lio_dev_err(oct, "clearing the reset reg failed or setting the quiet reg failed for qno: %u\n",
						    q_no);
					return (-1);
				}
				reg_val = reg_val &
					~LIO_CN23XX_PKT_INPUT_CTL_RST;
				lio_write_csr64(oct,
					LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
					reg_val);

				reg_val = lio_read_csr64(oct,
					LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
				if (reg_val & LIO_CN23XX_PKT_INPUT_CTL_RST) {
					lio_dev_err(oct, "clearing the reset failed for qno: %u\n",
						    q_no);
					return (-1);
				}
			}
			reg_val = lio_read_csr64(oct,
					LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
			reg_val = reg_val | LIO_CN23XX_PKT_INPUT_CTL_RING_ENB;
			lio_write_csr64(oct,
					LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
					reg_val);
		}
	}
	for (q_no = srn; q_no < ern; q_no++) {
		uint32_t	reg_val;
		/* set the corresponding OQ ENB bit */
		if (oct->io_qmask.oq & BIT_ULL(q_no - srn)) {
			reg_val = lio_read_csr32(oct,
					LIO_CN23XX_SLI_OQ_PKT_CONTROL(q_no));
			reg_val = reg_val | LIO_CN23XX_PKT_OUTPUT_CTL_RING_ENB;
			lio_write_csr32(oct,
					LIO_CN23XX_SLI_OQ_PKT_CONTROL(q_no),
					reg_val);
		}
	}
	return (0);
}