static void cn23xx_vf_setup_global_output_regs(struct lio_device *lio_dev) { uint32_t reg_val; uint32_t q_no; PMD_INIT_FUNC_TRACE(); for (q_no = 0; q_no < lio_dev->sriov_info.rings_per_vf; q_no++) { lio_write_csr(lio_dev, CN23XX_SLI_OQ_PKTS_CREDIT(q_no), 0xFFFFFFFF); reg_val = lio_read_csr(lio_dev, CN23XX_SLI_OQ_PKTS_SENT(q_no)); reg_val &= 0xEFFFFFFFFFFFFFFFL; lio_write_csr(lio_dev, CN23XX_SLI_OQ_PKTS_SENT(q_no), reg_val); reg_val = lio_read_csr(lio_dev, CN23XX_SLI_OQ_PKT_CONTROL(q_no)); /* set IPTR & DPTR */ reg_val |= (CN23XX_PKT_OUTPUT_CTL_IPTR | CN23XX_PKT_OUTPUT_CTL_DPTR); /* reset BMODE */ reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_BMODE); /* No Relaxed Ordering, No Snoop, 64-bit Byte swap * for Output Queue Scatter List * reset ROR_P, NSR_P */ reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ROR_P); reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_NSR_P); #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ES_P); #elif RTE_BYTE_ORDER == RTE_BIG_ENDIAN reg_val |= (CN23XX_PKT_OUTPUT_CTL_ES_P); #endif /* No Relaxed Ordering, No Snoop, 64-bit Byte swap * for Output Queue Data * reset ROR, NSR */ reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ROR); reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_NSR); /* set the ES bit */ reg_val |= (CN23XX_PKT_OUTPUT_CTL_ES); /* write all the selected settings */ lio_write_csr(lio_dev, CN23XX_SLI_OQ_PKT_CONTROL(q_no), reg_val); } }
static void cn23xx_vf_setup_oq_regs(struct lio_device *lio_dev, uint32_t oq_no) { struct lio_droq *droq = lio_dev->droq[oq_no]; PMD_INIT_FUNC_TRACE(); lio_write_csr64(lio_dev, CN23XX_SLI_OQ_BASE_ADDR64(oq_no), droq->desc_ring_dma); lio_write_csr(lio_dev, CN23XX_SLI_OQ_SIZE(oq_no), droq->max_count); lio_write_csr(lio_dev, CN23XX_SLI_OQ_BUFF_INFO_SIZE(oq_no), (droq->buffer_size | (OCTEON_RH_SIZE << 16))); /* Get the mapped address of the pkt_sent and pkts_credit regs */ droq->pkts_sent_reg = (uint8_t *)lio_dev->hw_addr + CN23XX_SLI_OQ_PKTS_SENT(oq_no); droq->pkts_credit_reg = (uint8_t *)lio_dev->hw_addr + CN23XX_SLI_OQ_PKTS_CREDIT(oq_no); }