/**
 * Configure a port for internal and/or external loopback. Internal loopback
 * causes packets sent by the port to be received by Octeon. External loopback
 * causes packets received from the wire to sent out again.
 *
 * @ipd_port: IPD/PKO port to loopback.
 * @enable_internal:
 *		   Non zero if you want internal loopback
 * @enable_external:
 *		   Non zero if you want external loopback
 *
 * Returns Zero on success, negative on failure.
 */
int __cvmx_helper_rgmii_configure_loopback(int ipd_port, int enable_internal,
					   int enable_external)
{
	int interface = cvmx_helper_get_interface_num(ipd_port);
	int index = cvmx_helper_get_interface_index_num(ipd_port);
	int original_enable;
	union cvmx_gmxx_prtx_cfg gmx_cfg;
	union cvmx_asxx_prt_loop asxx_prt_loop;

	/* Read the current enable state and save it */
	gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
	original_enable = gmx_cfg.s.en;
	/* Force port to be disabled */
	gmx_cfg.s.en = 0;
	if (enable_internal) {
		/* Force speed if we're doing internal loopback */
		gmx_cfg.s.duplex = 1;
		gmx_cfg.s.slottime = 1;
		gmx_cfg.s.speed = 1;
		cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 1);
		cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x200);
		cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0x2000);
	}
	cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);

	/* Set the loopback bits */
	asxx_prt_loop.u64 = cvmx_read_csr(CVMX_ASXX_PRT_LOOP(interface));
	if (enable_internal)
		asxx_prt_loop.s.int_loop |= 1 << index;
	else
		asxx_prt_loop.s.int_loop &= ~(1 << index);
	if (enable_external)
		asxx_prt_loop.s.ext_loop |= 1 << index;
	else
		asxx_prt_loop.s.ext_loop &= ~(1 << index);
	cvmx_write_csr(CVMX_ASXX_PRT_LOOP(interface), asxx_prt_loop.u64);

	/* Force enables in internal loopback */
	if (enable_internal) {
		uint64_t tmp;
		tmp = cvmx_read_csr(CVMX_ASXX_TX_PRT_EN(interface));
		cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(interface),
			       (1 << index) | tmp);
		tmp = cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(interface));
		cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface),
			       (1 << index) | tmp);
		original_enable = 1;
	}

	/* Restore the enable state */
	gmx_cfg.s.en = original_enable;
	cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
	return 0;
}
Beispiel #2
0
/**
 * Put an RGMII interface in loopback mode. Internal packets sent
 * out will be received back again on the same port. Externally
 * received packets will echo back out.
 *
 * @param port   IPD port number to loop.
 */
void cvmx_helper_rgmii_internal_loopback(int port)
{
    int interface = (port >> 4) & 1;
    int index = port & 0xf;
    uint64_t tmp;

    cvmx_gmxx_prtx_cfg_t gmx_cfg;
    gmx_cfg.u64 = 0;
    gmx_cfg.s.duplex = 1;
    gmx_cfg.s.slottime = 1;
    gmx_cfg.s.speed = 1;
    cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 1);
    cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x200);
    cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0x2000);
    cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
    tmp = cvmx_read_csr(CVMX_ASXX_PRT_LOOP(interface));
    cvmx_write_csr(CVMX_ASXX_PRT_LOOP(interface), (1 << index) | tmp);
    tmp = cvmx_read_csr(CVMX_ASXX_TX_PRT_EN(interface));
    cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(interface), (1 << index) | tmp);
    tmp = cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(interface));
    cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface), (1 << index) | tmp);
    gmx_cfg.s.en = 1;
    cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
}
Beispiel #3
0
/**
 * @INTERNAL
 * Configure an IPD/PKO port for the specified link state. This
 * function does not influence auto negotiation at the PHY level.
 * The passed link state must always match the link state returned
 * by cvmx_helper_link_get(). It is normally best to use
 * cvmx_helper_link_autoconf() instead.
 *
 * @param ipd_port  IPD/PKO port to configure
 * @param link_info The new link state
 *
 * @return Zero on success, negative on failure
 */
int __cvmx_helper_rgmii_link_set(int ipd_port, cvmx_helper_link_info_t link_info)
{
    int result = 0;
    int interface = cvmx_helper_get_interface_num(ipd_port);
    int index = cvmx_helper_get_interface_index_num(ipd_port);
    cvmx_gmxx_prtx_cfg_t original_gmx_cfg;
    cvmx_gmxx_prtx_cfg_t new_gmx_cfg;
    cvmx_pko_mem_queue_qos_t pko_mem_queue_qos;
    cvmx_pko_mem_queue_qos_t pko_mem_queue_qos_save[16];
    cvmx_gmxx_tx_ovr_bp_t gmx_tx_ovr_bp;
    cvmx_gmxx_tx_ovr_bp_t gmx_tx_ovr_bp_save;
    int i;

    /* Ignore speed sets in the simulator */
    if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM)
        return 0;

    /* Read the current settings so we know the current enable state */
    original_gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
    new_gmx_cfg = original_gmx_cfg;

    /* Disable the lowest level RX */
    cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface),
                   cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(interface)) & ~(1<<index));

    memset(pko_mem_queue_qos_save, 0, sizeof(pko_mem_queue_qos_save));
    /* Disable all queues so that TX should become idle */
    for (i=0; i<cvmx_pko_get_num_queues(ipd_port); i++)
    {
        int queue = cvmx_pko_get_base_queue(ipd_port) + i;
        cvmx_write_csr(CVMX_PKO_REG_READ_IDX, queue);
        pko_mem_queue_qos.u64 = cvmx_read_csr(CVMX_PKO_MEM_QUEUE_QOS);
        pko_mem_queue_qos.s.pid = ipd_port;
        pko_mem_queue_qos.s.qid = queue;
        pko_mem_queue_qos_save[i] = pko_mem_queue_qos;
        pko_mem_queue_qos.s.qos_mask = 0;
        cvmx_write_csr(CVMX_PKO_MEM_QUEUE_QOS, pko_mem_queue_qos.u64);
    }

    /* Disable backpressure */
    gmx_tx_ovr_bp.u64 = cvmx_read_csr(CVMX_GMXX_TX_OVR_BP(interface));
    gmx_tx_ovr_bp_save = gmx_tx_ovr_bp;
    gmx_tx_ovr_bp.s.bp &= ~(1<<index);
    gmx_tx_ovr_bp.s.en |= 1<<index;
    cvmx_write_csr(CVMX_GMXX_TX_OVR_BP(interface), gmx_tx_ovr_bp.u64);
    cvmx_read_csr(CVMX_GMXX_TX_OVR_BP(interface));

    /* Poll the GMX state machine waiting for it to become idle. Preferably we
        should only change speed when it is idle. If it doesn't become idle we
        will still do the speed change, but there is a slight chance that GMX
        will lockup */
    cvmx_write_csr(CVMX_NPI_DBG_SELECT, interface*0x800 + index*0x100 + 0x880);
    CVMX_WAIT_FOR_FIELD64(CVMX_DBG_DATA, cvmx_dbg_data_t, data&7, ==, 0, 10000);
    CVMX_WAIT_FOR_FIELD64(CVMX_DBG_DATA, cvmx_dbg_data_t, data&0xf, ==, 0, 10000);

    /* Disable the port before we make any changes */
    new_gmx_cfg.s.en = 0;
    cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), new_gmx_cfg.u64);
    cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));

    /* Set full/half duplex */
    if (!link_info.s.link_up)
        new_gmx_cfg.s.duplex = 1;   /* Force full duplex on down links */
    else
        new_gmx_cfg.s.duplex = link_info.s.full_duplex;

    /* Set the link speed. Anything unknown is set to 1Gbps */
    if (link_info.s.speed == 10)
    {
        new_gmx_cfg.s.slottime = 0;
        new_gmx_cfg.s.speed = 0;
    }
    else if (link_info.s.speed == 100)
    {
        new_gmx_cfg.s.slottime = 0;
        new_gmx_cfg.s.speed = 0;
    }
    else
    {
        new_gmx_cfg.s.slottime = 1;
        new_gmx_cfg.s.speed = 1;
    }

    /* Adjust the clocks */
    if (link_info.s.speed == 10)
    {
        cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 50);
        cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x40);
        cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0);
    }
    else if (link_info.s.speed == 100)
    {
        cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 5);
        cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x40);
        cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0);
    }
    else
    {
        cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 1);
        cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x200);
        cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0x2000);
    }

    if (OCTEON_IS_MODEL(OCTEON_CN30XX) || OCTEON_IS_MODEL(OCTEON_CN50XX))
    {
        if ((link_info.s.speed == 10) || (link_info.s.speed == 100))
        {
            cvmx_gmxx_inf_mode_t mode;
            mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));

            /*
            ** Port  .en  .type  .p0mii  Configuration
            ** ----  ---  -----  ------  -----------------------------------------
            **  X      0     X      X    All links are disabled.
            **  0      1     X      0    Port 0 is RGMII
            **  0      1     X      1    Port 0 is MII
            **  1      1     0      X    Ports 1 and 2 are configured as RGMII ports.
            **  1      1     1      X    Port 1: GMII/MII; Port 2: disabled. GMII or
            **                           MII port is selected by GMX_PRT1_CFG[SPEED].
            */

            /* In MII mode, CLK_CNT = 1. */
            if (((index == 0) && (mode.s.p0mii == 1)) || ((index != 0) && (mode.s.type == 1)))
            {
                cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 1);
            }
        }
    }

    /* Do a read to make sure all setup stuff is complete */
    cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));

    /* Save the new GMX setting without enabling the port */
    cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), new_gmx_cfg.u64);

    /* Enable the lowest level RX */
    cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface),
                   cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(interface)) | (1<<index));

    /* Re-enable the TX path */
    for (i=0; i<cvmx_pko_get_num_queues(ipd_port); i++)
    {
        int queue = cvmx_pko_get_base_queue(ipd_port) + i;
        cvmx_write_csr(CVMX_PKO_REG_READ_IDX, queue);
        cvmx_write_csr(CVMX_PKO_MEM_QUEUE_QOS, pko_mem_queue_qos_save[i].u64);
    }

    /* Restore backpressure */
    cvmx_write_csr(CVMX_GMXX_TX_OVR_BP(interface), gmx_tx_ovr_bp_save.u64);

    /* Restore the GMX enable state. Port config is complete */
    new_gmx_cfg.s.en = original_gmx_cfg.s.en;
    cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), new_gmx_cfg.u64);

    return result;
}