int cvmx_spi_reset_cb(int interface, cvmx_spi_mode_t mode)
{
    union cvmx_spxx_dbg_deskew_ctl spxx_dbg_deskew_ctl;
    union cvmx_spxx_clk_ctl spxx_clk_ctl;
    union cvmx_spxx_bist_stat spxx_bist_stat;
    union cvmx_spxx_int_msk spxx_int_msk;
    union cvmx_stxx_int_msk stxx_int_msk;
    union cvmx_spxx_trn4_ctl spxx_trn4_ctl;
    int index;
    uint64_t MS = cvmx_sysinfo_get()->cpu_clock_hz / 1000;


    spxx_int_msk.u64 = cvmx_read_csr(CVMX_SPXX_INT_MSK(interface));
    cvmx_write_csr(CVMX_SPXX_INT_MSK(interface), 0);
    stxx_int_msk.u64 = cvmx_read_csr(CVMX_STXX_INT_MSK(interface));
    cvmx_write_csr(CVMX_STXX_INT_MSK(interface), 0);


    cvmx_write_csr(CVMX_SRXX_COM_CTL(interface), 0);
    cvmx_write_csr(CVMX_STXX_COM_CTL(interface), 0);
    spxx_clk_ctl.u64 = 0;
    spxx_clk_ctl.s.runbist = 1;
    cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64);
    cvmx_wait(10 * MS);
    spxx_bist_stat.u64 = cvmx_read_csr(CVMX_SPXX_BIST_STAT(interface));
    if (spxx_bist_stat.s.stat0)
        cvmx_dprintf
        ("ERROR SPI%d: BIST failed on receive datapath FIFO\n",
         interface);
    if (spxx_bist_stat.s.stat1)
        cvmx_dprintf("ERROR SPI%d: BIST failed on RX calendar table\n",
                     interface);
    if (spxx_bist_stat.s.stat2)
        cvmx_dprintf("ERROR SPI%d: BIST failed on TX calendar table\n",
                     interface);


    for (index = 0; index < 32; index++) {
        union cvmx_srxx_spi4_calx srxx_spi4_calx;
        union cvmx_stxx_spi4_calx stxx_spi4_calx;

        srxx_spi4_calx.u64 = 0;
        srxx_spi4_calx.s.oddpar = 1;
        cvmx_write_csr(CVMX_SRXX_SPI4_CALX(index, interface),
                       srxx_spi4_calx.u64);

        stxx_spi4_calx.u64 = 0;
        stxx_spi4_calx.s.oddpar = 1;
        cvmx_write_csr(CVMX_STXX_SPI4_CALX(index, interface),
                       stxx_spi4_calx.u64);
    }


    cvmx_write_csr(CVMX_SPXX_INT_REG(interface),
                   cvmx_read_csr(CVMX_SPXX_INT_REG(interface)));
    cvmx_write_csr(CVMX_SPXX_INT_MSK(interface), spxx_int_msk.u64);
    cvmx_write_csr(CVMX_STXX_INT_REG(interface),
                   cvmx_read_csr(CVMX_STXX_INT_REG(interface)));
    cvmx_write_csr(CVMX_STXX_INT_MSK(interface), stxx_int_msk.u64);


    spxx_clk_ctl.u64 = 0;
    spxx_clk_ctl.s.seetrn = 0;
    spxx_clk_ctl.s.clkdly = 0x10;
    spxx_clk_ctl.s.runbist = 0;
    spxx_clk_ctl.s.statdrv = 0;

    spxx_clk_ctl.s.statrcv = 1;
    spxx_clk_ctl.s.sndtrn = 0;
    spxx_clk_ctl.s.drptrn = 0;
    spxx_clk_ctl.s.rcvtrn = 0;
    spxx_clk_ctl.s.srxdlck = 0;
    cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64);
    cvmx_wait(100 * MS);


    spxx_clk_ctl.s.srxdlck = 1;
    cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64);


    cvmx_wait(100 * MS);


    spxx_trn4_ctl.s.trntest = 0;
    spxx_trn4_ctl.s.jitter = 1;
    spxx_trn4_ctl.s.clr_boot = 1;
    spxx_trn4_ctl.s.set_boot = 0;
    if (OCTEON_IS_MODEL(OCTEON_CN58XX))
        spxx_trn4_ctl.s.maxdist = 3;
    else
        spxx_trn4_ctl.s.maxdist = 8;
    spxx_trn4_ctl.s.macro_en = 1;
    spxx_trn4_ctl.s.mux_en = 1;
    cvmx_write_csr(CVMX_SPXX_TRN4_CTL(interface), spxx_trn4_ctl.u64);

    spxx_dbg_deskew_ctl.u64 = 0;
    cvmx_write_csr(CVMX_SPXX_DBG_DESKEW_CTL(interface),
                   spxx_dbg_deskew_ctl.u64);

    return 0;
}
예제 #2
0
static void cvm_oct_rgmii_poll(struct ifnet *ifp)
{
	cvm_oct_private_t *priv = (cvm_oct_private_t *)ifp->if_softc;
	cvmx_helper_link_info_t link_info;

	/* Take the global register lock since we are going to touch
	   registers that affect more than one port */
	mtx_lock_spin(&global_register_lock);

	link_info = cvmx_helper_link_get(priv->port);
	if (link_info.u64 == priv->link_info) {

		/* If the 10Mbps preamble workaround is supported and we're
		   at 10Mbps we may need to do some special checking */
		if (USE_10MBPS_PREAMBLE_WORKAROUND && (link_info.s.speed == 10)) {

			/* Read the GMXX_RXX_INT_REG[PCTERR] bit and
			   see if we are getting preamble errors */
			int interface = INTERFACE(priv->port);
			int index = INDEX(priv->port);
			cvmx_gmxx_rxx_int_reg_t gmxx_rxx_int_reg;
			gmxx_rxx_int_reg.u64 = cvmx_read_csr(CVMX_GMXX_RXX_INT_REG(index, interface));
			if (gmxx_rxx_int_reg.s.pcterr) {

				/* We are getting preamble errors at 10Mbps.
				   Most likely the PHY is giving us packets
				   with mis aligned preambles. In order to get
				   these packets we need to disable preamble
				   checking and do it in software */
				cvmx_gmxx_rxx_frm_ctl_t gmxx_rxx_frm_ctl;
				cvmx_ipd_sub_port_fcs_t ipd_sub_port_fcs;

				/* Disable preamble checking */
				gmxx_rxx_frm_ctl.u64 = cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface));
				gmxx_rxx_frm_ctl.s.pre_chk = 0;
				cvmx_write_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface), gmxx_rxx_frm_ctl.u64);

				/* Disable FCS stripping */
				ipd_sub_port_fcs.u64 = cvmx_read_csr(CVMX_IPD_SUB_PORT_FCS);
				ipd_sub_port_fcs.s.port_bit &= 0xffffffffull ^ (1ull<<priv->port);
				cvmx_write_csr(CVMX_IPD_SUB_PORT_FCS, ipd_sub_port_fcs.u64);

				/* Clear any error bits */
				cvmx_write_csr(CVMX_GMXX_RXX_INT_REG(index, interface), gmxx_rxx_int_reg.u64);
				DEBUGPRINT("%s: Using 10Mbps with software preamble removal\n", if_name(ifp));
			}
		}
		mtx_unlock_spin(&global_register_lock);
		return;
	}

	/* If the 10Mbps preamble workaround is allowed we need to on
	   preamble checking, FCS stripping, and clear error bits on
	   every speed change. If errors occur during 10Mbps operation
	   the above code will change this stuff */
	if (USE_10MBPS_PREAMBLE_WORKAROUND) {

		cvmx_gmxx_rxx_frm_ctl_t gmxx_rxx_frm_ctl;
		cvmx_ipd_sub_port_fcs_t ipd_sub_port_fcs;
		cvmx_gmxx_rxx_int_reg_t gmxx_rxx_int_reg;
		int interface = INTERFACE(priv->port);
		int index = INDEX(priv->port);

		/* Enable preamble checking */
		gmxx_rxx_frm_ctl.u64 = cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface));
		gmxx_rxx_frm_ctl.s.pre_chk = 1;
		cvmx_write_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface), gmxx_rxx_frm_ctl.u64);
		/* Enable FCS stripping */
		ipd_sub_port_fcs.u64 = cvmx_read_csr(CVMX_IPD_SUB_PORT_FCS);
		ipd_sub_port_fcs.s.port_bit |= 1ull<<priv->port;
		cvmx_write_csr(CVMX_IPD_SUB_PORT_FCS, ipd_sub_port_fcs.u64);
		/* Clear any error bits */
		gmxx_rxx_int_reg.u64 = cvmx_read_csr(CVMX_GMXX_RXX_INT_REG(index, interface));
		cvmx_write_csr(CVMX_GMXX_RXX_INT_REG(index, interface), gmxx_rxx_int_reg.u64);
	}

	if (priv->miibus == NULL) {
		link_info = cvmx_helper_link_autoconf(priv->port);
		priv->link_info = link_info.u64;
		priv->need_link_update = 1;
	}
	mtx_unlock_spin(&global_register_lock);
}
예제 #3
0
static int cvm_oct_spi_rml_interrupt(void *dev_id)
{
	int return_status = FILTER_STRAY;
	cvmx_npi_rsl_int_blocks_t rsl_int_blocks;

	/* Check and see if this interrupt was caused by the GMX block */
	rsl_int_blocks.u64 = cvmx_read_csr(CVMX_NPI_RSL_INT_BLOCKS);
	if (rsl_int_blocks.s.spx1) { /* 19 - SPX1_INT_REG & STX1_INT_REG */

		cvmx_spxx_int_reg_t spx_int_reg;
		cvmx_stxx_int_reg_t stx_int_reg;

		spx_int_reg.u64 = cvmx_read_csr(CVMX_SPXX_INT_REG(1));
		cvmx_write_csr(CVMX_SPXX_INT_REG(1), spx_int_reg.u64);
		if (!need_retrain[1]) {

			spx_int_reg.u64 &= cvmx_read_csr(CVMX_SPXX_INT_MSK(1));
			if (spx_int_reg.s.spf)
				printf("SPI1: SRX Spi4 interface down\n");
			if (spx_int_reg.s.calerr)
				printf("SPI1: SRX Spi4 Calendar table parity error\n");
			if (spx_int_reg.s.syncerr)
				printf("SPI1: SRX Consecutive Spi4 DIP4 errors have exceeded SPX_ERR_CTL[ERRCNT]\n");
			if (spx_int_reg.s.diperr)
				printf("SPI1: SRX Spi4 DIP4 error\n");
			if (spx_int_reg.s.tpaovr)
				printf("SPI1: SRX Selected port has hit TPA overflow\n");
			if (spx_int_reg.s.rsverr)
				printf("SPI1: SRX Spi4 reserved control word detected\n");
			if (spx_int_reg.s.drwnng)
				printf("SPI1: SRX Spi4 receive FIFO drowning/overflow\n");
			if (spx_int_reg.s.clserr)
				printf("SPI1: SRX Spi4 packet closed on non-16B alignment without EOP\n");
			if (spx_int_reg.s.spiovr)
				printf("SPI1: SRX Spi4 async FIFO overflow\n");
			if (spx_int_reg.s.abnorm)
				printf("SPI1: SRX Abnormal packet termination (ERR bit)\n");
			if (spx_int_reg.s.prtnxa)
				printf("SPI1: SRX Port out of range\n");
		}

		stx_int_reg.u64 = cvmx_read_csr(CVMX_STXX_INT_REG(1));
		cvmx_write_csr(CVMX_STXX_INT_REG(1), stx_int_reg.u64);
		if (!need_retrain[1]) {

			stx_int_reg.u64 &= cvmx_read_csr(CVMX_STXX_INT_MSK(1));
			if (stx_int_reg.s.syncerr)
				printf("SPI1: STX Interface encountered a fatal error\n");
			if (stx_int_reg.s.frmerr)
				printf("SPI1: STX FRMCNT has exceeded STX_DIP_CNT[MAXFRM]\n");
			if (stx_int_reg.s.unxfrm)
				printf("SPI1: STX Unexpected framing sequence\n");
			if (stx_int_reg.s.nosync)
				printf("SPI1: STX ERRCNT has exceeded STX_DIP_CNT[MAXDIP]\n");
			if (stx_int_reg.s.diperr)
				printf("SPI1: STX DIP2 error on the Spi4 Status channel\n");
			if (stx_int_reg.s.datovr)
				printf("SPI1: STX Spi4 FIFO overflow error\n");
			if (stx_int_reg.s.ovrbst)
				printf("SPI1: STX Transmit packet burst too big\n");
			if (stx_int_reg.s.calpar1)
				printf("SPI1: STX Calendar Table Parity Error Bank1\n");
			if (stx_int_reg.s.calpar0)
				printf("SPI1: STX Calendar Table Parity Error Bank0\n");
		}

		cvmx_write_csr(CVMX_SPXX_INT_MSK(1), 0);
		cvmx_write_csr(CVMX_STXX_INT_MSK(1), 0);
		need_retrain[1] = 1;
		return_status = FILTER_HANDLED;
	}

	if (rsl_int_blocks.s.spx0) { /* 18 - SPX0_INT_REG & STX0_INT_REG */
		cvmx_spxx_int_reg_t spx_int_reg;
		cvmx_stxx_int_reg_t stx_int_reg;

		spx_int_reg.u64 = cvmx_read_csr(CVMX_SPXX_INT_REG(0));
		cvmx_write_csr(CVMX_SPXX_INT_REG(0), spx_int_reg.u64);
		if (!need_retrain[0]) {

			spx_int_reg.u64 &= cvmx_read_csr(CVMX_SPXX_INT_MSK(0));
			if (spx_int_reg.s.spf)
				printf("SPI0: SRX Spi4 interface down\n");
			if (spx_int_reg.s.calerr)
				printf("SPI0: SRX Spi4 Calendar table parity error\n");
			if (spx_int_reg.s.syncerr)
				printf("SPI0: SRX Consecutive Spi4 DIP4 errors have exceeded SPX_ERR_CTL[ERRCNT]\n");
			if (spx_int_reg.s.diperr)
				printf("SPI0: SRX Spi4 DIP4 error\n");
			if (spx_int_reg.s.tpaovr)
				printf("SPI0: SRX Selected port has hit TPA overflow\n");
			if (spx_int_reg.s.rsverr)
				printf("SPI0: SRX Spi4 reserved control word detected\n");
			if (spx_int_reg.s.drwnng)
				printf("SPI0: SRX Spi4 receive FIFO drowning/overflow\n");
			if (spx_int_reg.s.clserr)
				printf("SPI0: SRX Spi4 packet closed on non-16B alignment without EOP\n");
			if (spx_int_reg.s.spiovr)
				printf("SPI0: SRX Spi4 async FIFO overflow\n");
			if (spx_int_reg.s.abnorm)
				printf("SPI0: SRX Abnormal packet termination (ERR bit)\n");
			if (spx_int_reg.s.prtnxa)
				printf("SPI0: SRX Port out of range\n");
		}

		stx_int_reg.u64 = cvmx_read_csr(CVMX_STXX_INT_REG(0));
		cvmx_write_csr(CVMX_STXX_INT_REG(0), stx_int_reg.u64);
		if (!need_retrain[0]) {

			stx_int_reg.u64 &= cvmx_read_csr(CVMX_STXX_INT_MSK(0));
			if (stx_int_reg.s.syncerr)
				printf("SPI0: STX Interface encountered a fatal error\n");
			if (stx_int_reg.s.frmerr)
				printf("SPI0: STX FRMCNT has exceeded STX_DIP_CNT[MAXFRM]\n");
			if (stx_int_reg.s.unxfrm)
				printf("SPI0: STX Unexpected framing sequence\n");
			if (stx_int_reg.s.nosync)
				printf("SPI0: STX ERRCNT has exceeded STX_DIP_CNT[MAXDIP]\n");
			if (stx_int_reg.s.diperr)
				printf("SPI0: STX DIP2 error on the Spi4 Status channel\n");
			if (stx_int_reg.s.datovr)
				printf("SPI0: STX Spi4 FIFO overflow error\n");
			if (stx_int_reg.s.ovrbst)
				printf("SPI0: STX Transmit packet burst too big\n");
			if (stx_int_reg.s.calpar1)
				printf("SPI0: STX Calendar Table Parity Error Bank1\n");
			if (stx_int_reg.s.calpar0)
				printf("SPI0: STX Calendar Table Parity Error Bank0\n");
		}

		cvmx_write_csr(CVMX_SPXX_INT_MSK(0), 0);
		cvmx_write_csr(CVMX_STXX_INT_MSK(0), 0);
		need_retrain[0] = 1;
		return_status = FILTER_HANDLED;
	}

	return return_status;
}
예제 #4
0
static void octeon_cf_set_dmamode(struct ata_port *ap, struct ata_device *dev)
{
	struct octeon_cf_data *ocd = dev->link->ap->dev->platform_data;
	union cvmx_mio_boot_dma_timx dma_tim;
	unsigned int oe_a;
	unsigned int oe_n;
	unsigned int dma_ackh;
	unsigned int dma_arq;
	unsigned int pause;
	unsigned int T0, Tkr, Td;
	unsigned int tim_mult;

	const struct ata_timing *timing;

	timing = ata_timing_find_mode(dev->dma_mode);
	T0	= timing->cycle;
	Td	= timing->active;
	Tkr	= timing->recover;
	dma_ackh = timing->dmack_hold;

	dma_tim.u64 = 0;
	/* dma_tim.s.tim_mult = 0 --> 4x */
	tim_mult = 4;

	/* not spec'ed, value in eclocks, not affected by tim_mult */
	dma_arq = 8;
	pause = 25 - dma_arq * 1000 /
		(octeon_get_clock_rate() / 1000000); /* Tz */

	oe_a = Td;
	/* Tkr from cf spec, lengthened to meet T0 */
	oe_n = max(T0 - oe_a, Tkr);

	dma_tim.s.dmack_pi = 1;

	dma_tim.s.oe_n = ns_to_tim_reg(tim_mult, oe_n);
	dma_tim.s.oe_a = ns_to_tim_reg(tim_mult, oe_a);

	/*
	 * This is tI, C.F. spec. says 0, but Sony CF card requires
	 * more, we use 20 nS.
	 */
	dma_tim.s.dmack_s = ns_to_tim_reg(tim_mult, 20);
	dma_tim.s.dmack_h = ns_to_tim_reg(tim_mult, dma_ackh);

	dma_tim.s.dmarq = dma_arq;
	dma_tim.s.pause = ns_to_tim_reg(tim_mult, pause);

	dma_tim.s.rd_dly = 0;	/* Sample right on edge */

	/*  writes only */
	dma_tim.s.we_n = ns_to_tim_reg(tim_mult, oe_n);
	dma_tim.s.we_a = ns_to_tim_reg(tim_mult, oe_a);

	pr_debug("ns to ticks (mult %d) of %d is: %d\n", tim_mult, 60,
		 ns_to_tim_reg(tim_mult, 60));
	pr_debug("oe_n: %d, oe_a: %d, dmack_s: %d, dmack_h: "
		 "%d, dmarq: %d, pause: %d\n",
		 dma_tim.s.oe_n, dma_tim.s.oe_a, dma_tim.s.dmack_s,
		 dma_tim.s.dmack_h, dma_tim.s.dmarq, dma_tim.s.pause);

	cvmx_write_csr(CVMX_MIO_BOOT_DMA_TIMX(ocd->dma_engine),
		       dma_tim.u64);

}
예제 #5
0
static int cvm_oct_rgmii_rml_interrupt(void *dev_id)
{
	cvmx_npi_rsl_int_blocks_t rsl_int_blocks;
	int index;
	int return_status = FILTER_STRAY;

	rsl_int_blocks.u64 = cvmx_read_csr(CVMX_NPI_RSL_INT_BLOCKS);

	/* Check and see if this interrupt was caused by the GMX0 block */
	if (rsl_int_blocks.s.gmx0) {

		int interface = 0;
		/* Loop through every port of this interface */
		for (index = 0; index < cvmx_helper_ports_on_interface(interface); index++) {

			/* Read the GMX interrupt status bits */
			cvmx_gmxx_rxx_int_reg_t gmx_rx_int_reg;
			gmx_rx_int_reg.u64 = cvmx_read_csr(CVMX_GMXX_RXX_INT_REG(index, interface));
			gmx_rx_int_reg.u64 &= cvmx_read_csr(CVMX_GMXX_RXX_INT_EN(index, interface));
			/* Poll the port if inband status changed */
			if (gmx_rx_int_reg.s.phy_dupx || gmx_rx_int_reg.s.phy_link || gmx_rx_int_reg.s.phy_spd) {

				struct ifnet *ifp = cvm_oct_device[cvmx_helper_get_ipd_port(interface, index)];
				if (ifp)
					cvm_oct_rgmii_poll(ifp);
				gmx_rx_int_reg.u64 = 0;
				gmx_rx_int_reg.s.phy_dupx = 1;
				gmx_rx_int_reg.s.phy_link = 1;
				gmx_rx_int_reg.s.phy_spd = 1;
				cvmx_write_csr(CVMX_GMXX_RXX_INT_REG(index, interface), gmx_rx_int_reg.u64);
				return_status = FILTER_HANDLED;
			}
		}
	}

	/* Check and see if this interrupt was caused by the GMX1 block */
	if (rsl_int_blocks.s.gmx1) {

		int interface = 1;
		/* Loop through every port of this interface */
		for (index = 0; index < cvmx_helper_ports_on_interface(interface); index++) {

			/* Read the GMX interrupt status bits */
			cvmx_gmxx_rxx_int_reg_t gmx_rx_int_reg;
			gmx_rx_int_reg.u64 = cvmx_read_csr(CVMX_GMXX_RXX_INT_REG(index, interface));
			gmx_rx_int_reg.u64 &= cvmx_read_csr(CVMX_GMXX_RXX_INT_EN(index, interface));
			/* Poll the port if inband status changed */
			if (gmx_rx_int_reg.s.phy_dupx || gmx_rx_int_reg.s.phy_link || gmx_rx_int_reg.s.phy_spd) {

				struct ifnet *ifp = cvm_oct_device[cvmx_helper_get_ipd_port(interface, index)];
				if (ifp)
					cvm_oct_rgmii_poll(ifp);
				gmx_rx_int_reg.u64 = 0;
				gmx_rx_int_reg.s.phy_dupx = 1;
				gmx_rx_int_reg.s.phy_link = 1;
				gmx_rx_int_reg.s.phy_spd = 1;
				cvmx_write_csr(CVMX_GMXX_RXX_INT_REG(index, interface), gmx_rx_int_reg.u64);
				return_status = FILTER_HANDLED;
			}
		}
	}
	return return_status;
}
예제 #6
0
void oct_directfw_set()
{
    uint32_t port;
    oct_directfw = srv_dp_sync->dp_directfw_able;
    oct_directfw_sleeptime = srv_dp_sync->dp_directfw_sleep_time;
    if(oct_directfw)
    {
        for(port = OCT_PHY_PORT_FIRST; port < OCT_PHY_PORT_MAX; port++)
        {
            cvmx_pip_port_tag_cfg_t tag_config;
            /*config group*/
            tag_config.u64 = cvmx_read_csr(CVMX_PIP_PRT_TAGX(port));
            tag_config.s.grp = FROM_INPUT_PORT_GROUP;
            if( running_core_num == 4)
            {
                /*config tuple of hash value*/
                tag_config.cn70xx.ip4_src_flag = 0;
                tag_config.cn70xx.ip4_dst_flag = 0;
                tag_config.cn70xx.ip4_sprt_flag = 0;
                tag_config.cn70xx.ip4_dprt_flag = 0;
                tag_config.cn70xx.ip4_pctl_flag = 0;

                tag_config.cn70xx.grptag = 0;
                tag_config.cn70xx.grptagmask = 0;
                tag_config.cn70xx.grptagbase = 0;
            }

            cvmx_write_csr(CVMX_PIP_PRT_TAGX(port), tag_config.u64);

            cvmx_wait_usec(1000);
        }
    }
    else
    {
        for(port = OCT_PHY_PORT_FIRST; port < OCT_PHY_PORT_MAX; port++)
        {
            cvmx_pip_port_tag_cfg_t tag_config;
            /*config group*/
            tag_config.u64 = cvmx_read_csr(CVMX_PIP_PRT_TAGX(port));
            tag_config.s.grp = FROM_INPUT_PORT_GROUP;
            if( running_core_num == 4)
            {
                /*config tuple of hash value*/
                tag_config.cn70xx.ip4_src_flag = 1;
                tag_config.cn70xx.ip4_dst_flag = 1;
                tag_config.cn70xx.ip4_sprt_flag = 1;
                tag_config.cn70xx.ip4_dprt_flag = 1;
                tag_config.cn70xx.ip4_pctl_flag = 1;

                tag_config.cn70xx.grptag = 1;
                tag_config.cn70xx.grptagmask = 0xc;
                tag_config.cn70xx.grptagbase = 1;
            }

            cvmx_write_csr(CVMX_PIP_PRT_TAGX(port), tag_config.u64);

            cvmx_wait_usec(1000);
        }
    }

}
예제 #7
0
/**
 * Initialize a USB port for use. This must be called before any
 * other access to the Octeon USB port is made. The port starts
 * off in the disabled state.
 *
 * @param usb    Pointer to an empty cvmx_usbd_state_t structure
 *               that will be populated by the initialize call.
 *               This structure is then passed to all other USB
 *               functions.
 * @param usb_port_number
 *               Which Octeon USB port to initialize.
 * @param flags  Flags to control hardware initialization. See
 *               cvmx_usbd_initialize_flags_t for the flag
 *               definitions. Some flags are mandatory.
 *
 * @return Zero or a negative on error.
 */
int cvmx_usbd_initialize(cvmx_usbd_state_t *usb,
                                      int usb_port_number,
                                      cvmx_usbd_initialize_flags_t flags)
{
    cvmx_usbnx_clk_ctl_t usbn_clk_ctl;
    cvmx_usbnx_usbp_ctl_status_t usbn_usbp_ctl_status;

    if (cvmx_unlikely(flags & CVMX_USBD_INITIALIZE_FLAGS_DEBUG))
        cvmx_dprintf("%s: Called\n", __FUNCTION__);

    memset(usb, 0, sizeof(usb));
    usb->init_flags = flags;
    usb->index = usb_port_number;

    /* Try to determine clock type automatically */
    if ((usb->init_flags & (CVMX_USBD_INITIALIZE_FLAGS_CLOCK_XO_XI |
                  CVMX_USBD_INITIALIZE_FLAGS_CLOCK_XO_GND)) == 0)
    {
        if (__cvmx_helper_board_usb_get_clock_type() == USB_CLOCK_TYPE_CRYSTAL_12)
            usb->init_flags |= CVMX_USBD_INITIALIZE_FLAGS_CLOCK_XO_XI;  /* Only 12 MHZ crystals are supported */
        else
            usb->init_flags |= CVMX_USBD_INITIALIZE_FLAGS_CLOCK_XO_GND;
    }

    if (usb->init_flags & CVMX_USBD_INITIALIZE_FLAGS_CLOCK_XO_GND)
    {
        /* Check for auto ref clock frequency */
        if (!(usb->init_flags & CVMX_USBD_INITIALIZE_FLAGS_CLOCK_MHZ_MASK))
            switch (__cvmx_helper_board_usb_get_clock_type())
            {
                case USB_CLOCK_TYPE_REF_12:
                    usb->init_flags |= CVMX_USBD_INITIALIZE_FLAGS_CLOCK_12MHZ;
                    break;
                case USB_CLOCK_TYPE_REF_24:
                    usb->init_flags |= CVMX_USBD_INITIALIZE_FLAGS_CLOCK_24MHZ;
                    break;
                case USB_CLOCK_TYPE_REF_48:
                default:
                    usb->init_flags |= CVMX_USBD_INITIALIZE_FLAGS_CLOCK_48MHZ;
                    break;
            }
    }

    /* Power On Reset and PHY Initialization */

    /* 1. Wait for DCOK to assert (nothing to do) */
    /* 2a. Write USBN0/1_CLK_CTL[POR] = 1 and
        USBN0/1_CLK_CTL[HRST,PRST,HCLK_RST] = 0 */
    usbn_clk_ctl.u64 = cvmx_read_csr(CVMX_USBNX_CLK_CTL(usb->index));
    usbn_clk_ctl.s.por = 1;
    usbn_clk_ctl.s.hrst = 0;
    usbn_clk_ctl.s.prst = 0;
    usbn_clk_ctl.s.hclk_rst = 0;
    usbn_clk_ctl.s.enable = 0;
    /* 2b. Select the USB reference clock/crystal parameters by writing
        appropriate values to USBN0/1_CLK_CTL[P_C_SEL, P_RTYPE, P_COM_ON] */
    if (usb->init_flags & CVMX_USBD_INITIALIZE_FLAGS_CLOCK_XO_GND)
    {
        /* The USB port uses 12/24/48MHz 2.5V board clock
            source at USB_XO. USB_XI should be tied to GND.
            Most Octeon evaluation boards require this setting */
        if (OCTEON_IS_MODEL(OCTEON_CN3XXX))
        {
            usbn_clk_ctl.cn31xx.p_rclk  = 1; /* From CN31XX,CN30XX manual */
            usbn_clk_ctl.cn31xx.p_xenbn = 0;
        }
        else if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN50XX))
            usbn_clk_ctl.cn56xx.p_rtype = 2; /* From CN56XX,CN50XX manual */
        else
            usbn_clk_ctl.cn52xx.p_rtype = 1; /* From CN52XX manual */

        switch (usb->init_flags & CVMX_USBD_INITIALIZE_FLAGS_CLOCK_MHZ_MASK)
        {
            case CVMX_USBD_INITIALIZE_FLAGS_CLOCK_12MHZ:
                usbn_clk_ctl.s.p_c_sel = 0;
                break;
            case CVMX_USBD_INITIALIZE_FLAGS_CLOCK_24MHZ:
                usbn_clk_ctl.s.p_c_sel = 1;
                break;
            case CVMX_USBD_INITIALIZE_FLAGS_CLOCK_48MHZ:
                usbn_clk_ctl.s.p_c_sel = 2;
                break;
        }
    }
    else
    {
        /* The USB port uses a 12MHz crystal as clock source
            at USB_XO and USB_XI */
        if (OCTEON_IS_MODEL(OCTEON_CN3XXX))
        {
            usbn_clk_ctl.cn31xx.p_rclk  = 1; /* From CN31XX,CN30XX manual */
            usbn_clk_ctl.cn31xx.p_xenbn = 1;
        }
        else if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN50XX))
            usbn_clk_ctl.cn56xx.p_rtype = 0; /* From CN56XX,CN50XX manual */
        else
            usbn_clk_ctl.cn52xx.p_rtype = 0; /* From CN52XX manual */

        usbn_clk_ctl.s.p_c_sel = 0;
    }
    /* 2c. Select the HCLK via writing USBN0/1_CLK_CTL[DIVIDE, DIVIDE2] and
        setting USBN0/1_CLK_CTL[ENABLE] = 1.  Divide the core clock down such
        that USB is as close as possible to 125Mhz */
    {
        int divisor = (cvmx_clock_get_rate(CVMX_CLOCK_CORE)+125000000-1)/125000000;
        if (divisor < 4)  /* Lower than 4 doesn't seem to work properly */
            divisor = 4;
        usbn_clk_ctl.s.divide = divisor;
        usbn_clk_ctl.s.divide2 = 0;
    }
    cvmx_write_csr(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
    /* 2d. Write USBN0/1_CLK_CTL[HCLK_RST] = 1 */
    usbn_clk_ctl.s.hclk_rst = 1;
    cvmx_write_csr(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
    /* 2e.  Wait 64 core-clock cycles for HCLK to stabilize */
    cvmx_wait(64);
    /* 3. Program the power-on reset field in the USBN clock-control register:
        USBN_CLK_CTL[POR] = 0 */
    usbn_clk_ctl.s.por = 0;
    cvmx_write_csr(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
    /* 4. Wait 1 ms for PHY clock to start */
    cvmx_wait_usec(1000);
    /* 5. Program the Reset input from automatic test equipment field in the
        USBP control and status register: USBN_USBP_CTL_STATUS[ATE_RESET] = 1 */
    usbn_usbp_ctl_status.u64 = cvmx_read_csr(CVMX_USBNX_USBP_CTL_STATUS(usb->index));
    usbn_usbp_ctl_status.s.ate_reset = 1;
    cvmx_write_csr(CVMX_USBNX_USBP_CTL_STATUS(usb->index), usbn_usbp_ctl_status.u64);
    /* 6. Wait 10 cycles */
    cvmx_wait(10);
    /* 7. Clear ATE_RESET field in the USBN clock-control register:
        USBN_USBP_CTL_STATUS[ATE_RESET] = 0 */
    usbn_usbp_ctl_status.s.ate_reset = 0;
    cvmx_write_csr(CVMX_USBNX_USBP_CTL_STATUS(usb->index), usbn_usbp_ctl_status.u64);
    /* 8. Program the PHY reset field in the USBN clock-control register:
        USBN_CLK_CTL[PRST] = 1 */
    usbn_clk_ctl.s.prst = 1;
    cvmx_write_csr(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
    /* 9. Program the USBP control and status register to select host or
        device mode. USBN_USBP_CTL_STATUS[HST_MODE] = 0 for host, = 1 for
        device */
    usbn_usbp_ctl_status.s.hst_mode = 1;
    usbn_usbp_ctl_status.s.dm_pulld = 0;
    usbn_usbp_ctl_status.s.dp_pulld = 0;
    cvmx_write_csr(CVMX_USBNX_USBP_CTL_STATUS(usb->index), usbn_usbp_ctl_status.u64);
    /* 10. Wait 1 µs */
    cvmx_wait_usec(1);
    /* 11. Program the hreset_n field in the USBN clock-control register:
        USBN_CLK_CTL[HRST] = 1 */
    usbn_clk_ctl.s.hrst = 1;
    cvmx_write_csr(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
    /* 12. Proceed to USB core initialization */
    usbn_clk_ctl.s.enable = 1;
    cvmx_write_csr(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
    cvmx_wait_usec(1);

    /* Program the following fields in the global AHB configuration
        register (USBC_GAHBCFG)
        DMA mode, USBC_GAHBCFG[DMAEn]: 1 = DMA mode, 0 = slave mode
        Burst length, USBC_GAHBCFG[HBSTLEN] = 0
        Nonperiodic TxFIFO empty level (slave mode only),
        USBC_GAHBCFG[NPTXFEMPLVL]
        Periodic TxFIFO empty level (slave mode only),
        USBC_GAHBCFG[PTXFEMPLVL]
        Global interrupt mask, USBC_GAHBCFG[GLBLINTRMSK] = 1 */
    {
        cvmx_usbcx_gahbcfg_t usbcx_gahbcfg;
        usbcx_gahbcfg.u32 = 0;
        usbcx_gahbcfg.s.dmaen = 1;
        usbcx_gahbcfg.s.hbstlen = 0;
        usbcx_gahbcfg.s.nptxfemplvl = 1;
        usbcx_gahbcfg.s.ptxfemplvl = 1;
        usbcx_gahbcfg.s.glblintrmsk = 1;
        __cvmx_usbd_write_csr32(usb, CVMX_USBCX_GAHBCFG(usb->index), usbcx_gahbcfg.u32);
    }

    /* Program the following fields in USBC_GUSBCFG register.
        HS/FS timeout calibration, USBC_GUSBCFG[TOUTCAL] = 0
        ULPI DDR select, USBC_GUSBCFG[DDRSEL] = 0
        USB turnaround time, USBC_GUSBCFG[USBTRDTIM] = 0x5
        PHY low-power clock select, USBC_GUSBCFG[PHYLPWRCLKSEL] = 0 */
    {
        cvmx_usbcx_gusbcfg_t usbcx_gusbcfg;
        usbcx_gusbcfg.u32 = __cvmx_usbd_read_csr32(usb, CVMX_USBCX_GUSBCFG(usb->index));
        usbcx_gusbcfg.s.toutcal = 0;
        usbcx_gusbcfg.s.ddrsel = 0;
        usbcx_gusbcfg.s.usbtrdtim = 0x5;
        usbcx_gusbcfg.s.phylpwrclksel = 0;
        __cvmx_usbd_write_csr32(usb, CVMX_USBCX_GUSBCFG(usb->index), usbcx_gusbcfg.u32);
    }

    /* Program the following fields in the USBC0/1_DCFG register:
        Device speed, USBC0/1_DCFG[DEVSPD] = 0 (high speed)
        Non-zero-length status OUT handshake, USBC0/1_DCFG[NZSTSOUTHSHK]=0
        Periodic frame interval (if periodic endpoints are supported),
        USBC0/1_DCFG[PERFRINT] = 1 */
    {
        cvmx_usbcx_dcfg_t usbcx_dcfg;
        usbcx_dcfg.u32 = __cvmx_usbd_read_csr32(usb, CVMX_USBCX_DCFG(usb->index));
        usbcx_dcfg.s.devspd = 0;
        usbcx_dcfg.s.nzstsouthshk = 0;
        usbcx_dcfg.s.perfrint = 1;
        __cvmx_usbd_write_csr32(usb, CVMX_USBCX_DCFG(usb->index), usbcx_dcfg.u32);
    }

    /* Program the USBC0/1_GINTMSK register */
    {
        cvmx_usbcx_gintmsk_t usbcx_gintmsk;
        usbcx_gintmsk.u32 = __cvmx_usbd_read_csr32(usb, CVMX_USBCX_GINTMSK(usb->index));
        usbcx_gintmsk.s.oepintmsk = 1;
        usbcx_gintmsk.s.inepintmsk = 1;
        usbcx_gintmsk.s.enumdonemsk = 1;
        usbcx_gintmsk.s.usbrstmsk = 1;
        usbcx_gintmsk.s.usbsuspmsk = 1;
        __cvmx_usbd_write_csr32(usb, CVMX_USBCX_GINTMSK(usb->index), usbcx_gintmsk.u32);
    }

    cvmx_usbd_disable(usb);
    return 0;
}
예제 #8
0
static int cvm_oct_probe(struct platform_device *pdev)
{
	int num_interfaces;
	int interface;
	int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE;
	int qos;
	struct device_node *pip;

	octeon_mdiobus_force_mod_depencency();
	pr_notice("cavium-ethernet %s\n", OCTEON_ETHERNET_VERSION);

	pip = pdev->dev.of_node;
	if (!pip) {
		pr_err("Error: No 'pip' in /aliases\n");
		return -EINVAL;
	}

	cvm_oct_poll_queue = create_singlethread_workqueue("octeon-ethernet");
	if (cvm_oct_poll_queue == NULL) {
		pr_err("octeon-ethernet: Cannot create workqueue");
		return -ENOMEM;
	}

	cvm_oct_configure_common_hw();

	cvmx_helper_initialize_packet_io_global();

	/* Change the input group for all ports before input is enabled */
	num_interfaces = cvmx_helper_get_number_of_interfaces();
	for (interface = 0; interface < num_interfaces; interface++) {
		int num_ports = cvmx_helper_ports_on_interface(interface);
		int port;

		for (port = cvmx_helper_get_ipd_port(interface, 0);
		     port < cvmx_helper_get_ipd_port(interface, num_ports);
		     port++) {
			union cvmx_pip_prt_tagx pip_prt_tagx;
			pip_prt_tagx.u64 =
			    cvmx_read_csr(CVMX_PIP_PRT_TAGX(port));
			pip_prt_tagx.s.grp = pow_receive_group;
			cvmx_write_csr(CVMX_PIP_PRT_TAGX(port),
				       pip_prt_tagx.u64);
		}
	}

	cvmx_helper_ipd_and_packet_input_enable();

	memset(cvm_oct_device, 0, sizeof(cvm_oct_device));

	/*
	 * Initialize the FAU used for counting packet buffers that
	 * need to be freed.
	 */
	cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);

	/* Initialize the FAU used for counting tx SKBs that need to be freed */
	cvmx_fau_atomic_write32(FAU_TOTAL_TX_TO_CLEAN, 0);

	if ((pow_send_group != -1)) {
		struct net_device *dev;
		pr_info("\tConfiguring device for POW only access\n");
		dev = alloc_etherdev(sizeof(struct octeon_ethernet));
		if (dev) {
			/* Initialize the device private structure. */
			struct octeon_ethernet *priv = netdev_priv(dev);

			dev->netdev_ops = &cvm_oct_pow_netdev_ops;
			priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED;
			priv->port = CVMX_PIP_NUM_INPUT_PORTS;
			priv->queue = -1;
			strcpy(dev->name, "pow%d");
			for (qos = 0; qos < 16; qos++)
				skb_queue_head_init(&priv->tx_free_list[qos]);

			if (register_netdev(dev) < 0) {
				pr_err("Failed to register ethernet device for POW\n");
				free_netdev(dev);
			} else {
				cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev;
				pr_info("%s: POW send group %d, receive group %d\n",
					dev->name, pow_send_group,
					pow_receive_group);
			}
		} else {
			pr_err("Failed to allocate ethernet device for POW\n");
		}
	}

	num_interfaces = cvmx_helper_get_number_of_interfaces();
	for (interface = 0; interface < num_interfaces; interface++) {
		cvmx_helper_interface_mode_t imode =
		    cvmx_helper_interface_get_mode(interface);
		int num_ports = cvmx_helper_ports_on_interface(interface);
		int port;
		int port_index;

		for (port_index = 0, port = cvmx_helper_get_ipd_port(interface, 0);
		     port < cvmx_helper_get_ipd_port(interface, num_ports);
		     port_index++, port++) {
			struct octeon_ethernet *priv;
			struct net_device *dev =
			    alloc_etherdev(sizeof(struct octeon_ethernet));
			if (!dev) {
				pr_err("Failed to allocate ethernet device for port %d\n", port);
				continue;
			}

			/* Initialize the device private structure. */
			priv = netdev_priv(dev);
			priv->of_node = cvm_oct_node_for_port(pip, interface, port_index);

			INIT_DELAYED_WORK(&priv->port_periodic_work,
					  cvm_oct_periodic_worker);
			priv->imode = imode;
			priv->port = port;
			priv->queue = cvmx_pko_get_base_queue(priv->port);
			priv->fau = fau - cvmx_pko_get_num_queues(port) * 4;
			for (qos = 0; qos < 16; qos++)
				skb_queue_head_init(&priv->tx_free_list[qos]);
			for (qos = 0; qos < cvmx_pko_get_num_queues(port);
			     qos++)
				cvmx_fau_atomic_write32(priv->fau + qos * 4, 0);

			switch (priv->imode) {

			/* These types don't support ports to IPD/PKO */
			case CVMX_HELPER_INTERFACE_MODE_DISABLED:
			case CVMX_HELPER_INTERFACE_MODE_PCIE:
			case CVMX_HELPER_INTERFACE_MODE_PICMG:
				break;

			case CVMX_HELPER_INTERFACE_MODE_NPI:
				dev->netdev_ops = &cvm_oct_npi_netdev_ops;
				strcpy(dev->name, "npi%d");
				break;

			case CVMX_HELPER_INTERFACE_MODE_XAUI:
				dev->netdev_ops = &cvm_oct_xaui_netdev_ops;
				strcpy(dev->name, "xaui%d");
				break;

			case CVMX_HELPER_INTERFACE_MODE_LOOP:
				dev->netdev_ops = &cvm_oct_npi_netdev_ops;
				strcpy(dev->name, "loop%d");
				break;

			case CVMX_HELPER_INTERFACE_MODE_SGMII:
				dev->netdev_ops = &cvm_oct_sgmii_netdev_ops;
				strcpy(dev->name, "eth%d");
				break;

			case CVMX_HELPER_INTERFACE_MODE_SPI:
				dev->netdev_ops = &cvm_oct_spi_netdev_ops;
				strcpy(dev->name, "spi%d");
				break;

			case CVMX_HELPER_INTERFACE_MODE_RGMII:
			case CVMX_HELPER_INTERFACE_MODE_GMII:
				dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
				strcpy(dev->name, "eth%d");
				break;
			}

			if (!dev->netdev_ops) {
				free_netdev(dev);
			} else if (register_netdev(dev) < 0) {
				pr_err("Failed to register ethernet device "
					 "for interface %d, port %d\n",
					 interface, priv->port);
				free_netdev(dev);
			} else {
				cvm_oct_device[priv->port] = dev;
				fau -=
				    cvmx_pko_get_num_queues(priv->port) *
				    sizeof(uint32_t);
				queue_delayed_work(cvm_oct_poll_queue,
						   &priv->port_periodic_work, HZ);
			}
		}
	}

	cvm_oct_tx_initialize();
	cvm_oct_rx_initialize();

	/*
	 * 150 uS: about 10 1500-byte packtes at 1GE.
	 */
	cvm_oct_tx_poll_interval = 150 * (octeon_get_clock_rate() / 1000000);

	queue_delayed_work(cvm_oct_poll_queue, &cvm_oct_rx_refill_work, HZ);

	return 0;
}
예제 #9
0
파일: if_octm.c 프로젝트: 2asoft/freebsd
static int
octm_attach(device_t dev)
{
	struct ifnet *ifp;
	struct octm_softc *sc;
	cvmx_mixx_irhwm_t mixx_irhwm;
	cvmx_mixx_intena_t mixx_intena;
	uint64_t mac;
	int error;
	int irq;
	int rid;

	sc = device_get_softc(dev);
	sc->sc_dev = dev;
	sc->sc_port = device_get_unit(dev);

	switch (sc->sc_port) {
	case 0:
		irq = OCTEON_IRQ_MII;
		break;
	case 1:
		irq = OCTEON_IRQ_MII1;
		break;
	default:
		device_printf(dev, "unsupported management port %u.\n", sc->sc_port);
		return (ENXIO);
	}

	/*
	 * Set MAC address for this management port.
	 */
	mac = 0;
	memcpy((u_int8_t *)&mac + 2, cvmx_sysinfo_get()->mac_addr_base, 6);
	mac += sc->sc_port;

	cvmx_mgmt_port_set_mac(sc->sc_port, mac);

	/* No watermark for input ring.  */
	mixx_irhwm.u64 = 0;
	cvmx_write_csr(CVMX_MIXX_IRHWM(sc->sc_port), mixx_irhwm.u64);

	/* Enable input ring interrupts.  */
	mixx_intena.u64 = 0;
	mixx_intena.s.ithena = 1;
	cvmx_write_csr(CVMX_MIXX_INTENA(sc->sc_port), mixx_intena.u64);

	/* Allocate and establish interrupt.  */
	rid = 0;
	sc->sc_intr = bus_alloc_resource(sc->sc_dev, SYS_RES_IRQ, &rid,
	    irq, irq, 1, RF_ACTIVE);
	if (sc->sc_intr == NULL) {
		device_printf(dev, "unable to allocate IRQ.\n");
		return (ENXIO);
	}

	error = bus_setup_intr(sc->sc_dev, sc->sc_intr, INTR_TYPE_NET, NULL,
	    octm_rx_intr, sc, &sc->sc_intr_cookie);
	if (error != 0) {
		device_printf(dev, "unable to setup interrupt.\n");
		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_intr);
		return (ENXIO);
	}

	bus_describe_intr(sc->sc_dev, sc->sc_intr, sc->sc_intr_cookie, "rx");

	/* XXX Possibly should enable TX interrupts.  */

	ifp = if_alloc(IFT_ETHER);
	if (ifp == NULL) {
		device_printf(dev, "cannot allocate ifnet.\n");
		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_intr);
		return (ENOMEM);
	}

	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
	ifp->if_mtu = ETHERMTU;
	ifp->if_init = octm_init;
	ifp->if_softc = sc;
	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST | IFF_ALLMULTI;
	ifp->if_ioctl = octm_ioctl;

	sc->sc_ifp = ifp;
	sc->sc_flags = ifp->if_flags;

	ifmedia_init(&sc->sc_ifmedia, 0, octm_medchange, octm_medstat);

	ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
	ifmedia_set(&sc->sc_ifmedia, IFM_ETHER | IFM_AUTO);

	ether_ifattach(ifp, (const u_int8_t *)&mac + 2);

	ifp->if_transmit = octm_transmit;

	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
	ifp->if_capabilities = IFCAP_VLAN_MTU;
	ifp->if_capenable = ifp->if_capabilities;

	IFQ_SET_MAXLEN(&ifp->if_snd, CVMX_MGMT_PORT_NUM_TX_BUFFERS);
	ifp->if_snd.ifq_drv_maxlen = CVMX_MGMT_PORT_NUM_TX_BUFFERS;
	IFQ_SET_READY(&ifp->if_snd);

	return (bus_generic_attach(dev));
}
예제 #10
0
void
platform_ipi_send(int cpuid)
{
	cvmx_write_csr(CVMX_CIU_MBOX_SETX(cpuid), 1);
	mips_wbflush();
}
예제 #11
0
파일: cvmx-qlm.c 프로젝트: xtra72/s805
/**
 * Measure the reference clock of a QLM
 *
 * @param qlm    QLM to measure
 *
 * @return Clock rate in Hz
 *       */
int cvmx_qlm_measure_clock(int qlm)
{
	cvmx_mio_ptp_clock_cfg_t ptp_clock;
	uint64_t count;
	uint64_t start_cycle, stop_cycle;
#ifdef CVMX_BUILD_FOR_UBOOT
	int ref_clock[16] = {0};
#else
	static int ref_clock[16] = {0};
#endif

	if (ref_clock[qlm])
		return ref_clock[qlm];

	if (OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))
		return -1;

	/* Force the reference to 156.25Mhz when running in simulation.
	   This supports the most speeds */
#ifdef CVMX_BUILD_FOR_UBOOT
	if (gd->arch.board_desc.board_type == CVMX_BOARD_TYPE_SIM)
		return 156250000;
#elif !defined(CVMX_BUILD_FOR_LINUX_HOST)
	if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM)
		return 156250000;
#endif
	/* Disable the PTP event counter while we configure it */
	ptp_clock.u64 = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_CFG);	/* For CN63XXp1 errata */
	ptp_clock.s.evcnt_en = 0;
	cvmx_write_csr(CVMX_MIO_PTP_CLOCK_CFG, ptp_clock.u64);
	/* Count on rising edge, Choose which QLM to count */
	ptp_clock.u64 = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_CFG);	/* For CN63XXp1 errata */
	ptp_clock.s.evcnt_edge = 0;
	ptp_clock.s.evcnt_in = 0x10 + qlm;
	cvmx_write_csr(CVMX_MIO_PTP_CLOCK_CFG, ptp_clock.u64);
	/* Clear MIO_PTP_EVT_CNT */
	cvmx_read_csr(CVMX_MIO_PTP_EVT_CNT);	/* For CN63XXp1 errata */
	count = cvmx_read_csr(CVMX_MIO_PTP_EVT_CNT);
	cvmx_write_csr(CVMX_MIO_PTP_EVT_CNT, -count);
	/* Set MIO_PTP_EVT_CNT to 1 billion */
	cvmx_write_csr(CVMX_MIO_PTP_EVT_CNT, 1000000000);
	/* Enable the PTP event counter */
	ptp_clock.u64 = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_CFG);	/* For CN63XXp1 errata */
	ptp_clock.s.evcnt_en = 1;
	cvmx_write_csr(CVMX_MIO_PTP_CLOCK_CFG, ptp_clock.u64);
	start_cycle = cvmx_clock_get_count(CVMX_CLOCK_CORE);
	/* Wait for 50ms */
	cvmx_wait_usec(50000);
	/* Read the counter */
	cvmx_read_csr(CVMX_MIO_PTP_EVT_CNT);	/* For CN63XXp1 errata */
	count = cvmx_read_csr(CVMX_MIO_PTP_EVT_CNT);
	stop_cycle = cvmx_clock_get_count(CVMX_CLOCK_CORE);
	/* Disable the PTP event counter */
	ptp_clock.u64 = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_CFG);	/* For CN63XXp1 errata */
	ptp_clock.s.evcnt_en = 0;
	cvmx_write_csr(CVMX_MIO_PTP_CLOCK_CFG, ptp_clock.u64);
	/* Clock counted down, so reverse it */
	count = 1000000000 - count;
	/* Return the rate */
	ref_clock[qlm] = count * cvmx_clock_get_rate(CVMX_CLOCK_CORE) / (stop_cycle - start_cycle);
	return ref_clock[qlm];
}
/**
 * Initialize and start the SPI interface.
 *
 * @param interface The identifier of the packet interface to configure and
 *                  use as a SPI interface.
 * @param mode      The operating mode for the SPI interface. The interface
 *                  can operate as a full duplex (both Tx and Rx data paths
 *                  active) or as a halfplex (either the Tx data path is
 *                  active or the Rx data path is active, but not both).
 * @param timeout   Timeout to wait for clock synchronization in seconds
 * @return Zero on success, negative of failure.
 */
int cvmx_spi_start_interface(int interface, cvmx_spi_mode_t mode, int timeout)
{
    uint64_t                     timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
    cvmx_spxx_trn4_ctl_t         spxx_trn4_ctl;
    cvmx_spxx_clk_stat_t         stat;
    cvmx_stxx_com_ctl_t          stxx_com_ctl;
    cvmx_srxx_com_ctl_t          srxx_com_ctl;
    cvmx_stxx_spi4_dat_t         stxx_spi4_dat;
    uint64_t                     count;
    cvmx_pko_reg_gmx_port_mode_t pko_mode;


    if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
        return -1;

    cvmx_dprintf ("SPI%d: mode %s, cal_len: %d, cal_rep: %d\n",
            interface, modes[mode], CAL_LEN, CAL_REP);

      // Configure for 16 ports (PKO -> GMX FIFO partition setting)
      // ----------------------------------------------------------
    pko_mode.u64 = cvmx_read_csr(CVMX_PKO_REG_GMX_PORT_MODE);
    if (interface == 0) 
    {
        pko_mode.s.mode0 = 0;
    }
    else 
    {
        pko_mode.s.mode1 = 0;
    }
    cvmx_write_csr(CVMX_PKO_REG_GMX_PORT_MODE, pko_mode.u64);

      // Configure GMX
      // -------------------------------------------------
    cvmx_write_csr (CVMX_GMXX_TX_PRTS(interface), 0xA);
      // PRTS           [ 4: 0] ( 5b) = 10


      // Bringing up Spi4 Interface
      // -------------------------------------------------

      // Reset the Spi4 deskew logic
      // -------------------------------------------------
    cvmx_write_csr (CVMX_SPXX_DBG_DESKEW_CTL(interface), 0x00200000);
      // DLLDIS         [ 0: 0] ( 1b) = 0
      // DLLFRC         [ 1: 1] ( 1b) = 0
      // OFFDLY         [ 7: 2] ( 6b) = 0
      // BITSEL         [12: 8] ( 5b) = 0
      // OFFSET         [17:13] ( 5b) = 0
      // MUX            [18:18] ( 1b) = 0
      // INC            [19:19] ( 1b) = 0
      // DEC            [20:20] ( 1b) = 0
      // CLRDLY         [21:21] ( 1b) = 1    // Forces a reset
    cvmx_wait (100 * MS);

      // Setup the CLKDLY right in the middle
      // -------------------------------------------------
    cvmx_write_csr (CVMX_SPXX_CLK_CTL(interface), 0x00000830);
      // SRXDLCK        [ 0: 0] ( 1b) = 0
      // RCVTRN         [ 1: 1] ( 1b) = 0
      // DRPTRN         [ 2: 2] ( 1b) = 0
      // SNDTRN         [ 3: 3] ( 1b) = 0
      // STATRCV        [ 4: 4] ( 1b) = 1    // Enable status channel Rx
      // STATDRV        [ 5: 5] ( 1b) = 1    // Enable status channel Tx
      // RUNBIST        [ 6: 6] ( 1b) = 0
      // CLKDLY         [11: 7] ( 5b) = 10   // 16 is the middle of the range
      // SRXLCK         [12:12] ( 1b) = 0
      // STXLCK         [13:13] ( 1b) = 0
      // SEETRN         [14:14] ( 1b) = 0
    cvmx_wait (100 * MS);

      // Reset SRX0 DLL
      // -------------------------------------------------
    cvmx_write_csr (CVMX_SPXX_CLK_CTL(interface), 0x00000831);
      // SRXDLCK        [ 0: 0] ( 1b) = 1    // Restart the DLL
      // RCVTRN         [ 1: 1] ( 1b) = 0
      // DRPTRN         [ 2: 2] ( 1b) = 0
      // SNDTRN         [ 3: 3] ( 1b) = 0
      // STATRCV        [ 4: 4] ( 1b) = 1
      // STATDRV        [ 5: 5] ( 1b) = 1
      // RUNBIST        [ 6: 6] ( 1b) = 0
      // CLKDLY         [11: 7] ( 5b) = 10
      // SRXLCK         [12:12] ( 1b) = 0
      // STXLCK         [13:13] ( 1b) = 0
      // SEETRN         [14:14] ( 1b) = 0

      // Waiting for Inf0 Spi4 RX DLL to lock
      // -------------------------------------------------
    cvmx_wait (100 * MS);

      // Enable dynamic alignment
      // -------------------------------------------------
    spxx_trn4_ctl.u64 = 0;
    spxx_trn4_ctl.s.mux_en   = 1;
    spxx_trn4_ctl.s.macro_en = 1;
    spxx_trn4_ctl.s.maxdist  = 16;
    spxx_trn4_ctl.s.jitter   = 1;
    cvmx_write_csr (CVMX_SPXX_TRN4_CTL(interface), spxx_trn4_ctl.u64);
      // MUX_EN         [ 0: 0] ( 1b) = 1
      // MACRO_EN       [ 1: 1] ( 1b) = 1
      // MAXDIST        [ 6: 2] ( 5b) = 16
      // SET_BOOT       [ 7: 7] ( 1b) = 0
      // CLR_BOOT       [ 8: 8] ( 1b) = 1
      // JITTER         [11: 9] ( 3b) = 1
      // TRNTEST        [12:12] ( 1b) = 0
    cvmx_write_csr (CVMX_SPXX_DBG_DESKEW_CTL(interface), 0x0);
      // DLLDIS         [ 0: 0] ( 1b) = 0
      // DLLFRC         [ 1: 1] ( 1b) = 0
      // OFFDLY         [ 7: 2] ( 6b) = 0
      // BITSEL         [12: 8] ( 5b) = 0
      // OFFSET         [17:13] ( 5b) = 0
      // MUX            [18:18] ( 1b) = 0
      // INC            [19:19] ( 1b) = 0
      // DEC            [20:20] ( 1b) = 0
      // CLRDLY         [21:21] ( 1b) = 0

    if (mode & CVMX_SPI_MODE_RX_HALFPLEX) {
          // SRX0 Ports
          // -------------------------------------------------
        cvmx_write_csr (CVMX_SRXX_COM_CTL(interface), 0x00000090);
          // INF_EN         [ 0: 0] ( 1b) = 0
          // ST_EN          [ 3: 3] ( 1b) = 0
          // PRTS           [ 9: 4] ( 6b) = 9

          // SRX0 Calendar Table
          // -------------------------------------------------
        cvmx_write_csr (CVMX_SRXX_SPI4_CALX(0, interface), 0x00013210);
          // PRT0           [ 4: 0] ( 5b) = 0
          // PRT1           [ 9: 5] ( 5b) = 1
          // PRT2           [14:10] ( 5b) = 2
          // PRT3           [19:15] ( 5b) = 3
          // ODDPAR         [20:20] ( 1b) = 1
        cvmx_write_csr (CVMX_SRXX_SPI4_CALX(1, interface), 0x00017654);
          // PRT0           [ 4: 0] ( 5b) = 4
          // PRT1           [ 9: 5] ( 5b) = 5
          // PRT2           [14:10] ( 5b) = 6
          // PRT3           [19:15] ( 5b) = 7
          // ODDPAR         [20:20] ( 1b) = 1
        cvmx_write_csr (CVMX_SRXX_SPI4_CALX(2, interface), 0x00000098);
          // PRT0           [ 4: 0] ( 5b) = 8
          // PRT1           [ 9: 5] ( 5b) = 9
          // PRT2           [14:10] ( 5b) = 0
          // PRT3           [19:15] ( 5b) = 0
          // ODDPAR         [20:20] ( 1b) = 0
        cvmx_write_csr (CVMX_SRXX_SPI4_STAT(interface), (CAL_REP << 8) | CAL_LEN);
          // LEN            [ 7: 0] ( 8b) = a
          // M              [15: 8] ( 8b) = 1
    }

    if (mode & CVMX_SPI_MODE_TX_HALFPLEX) {
          // STX0 Config
          // -------------------------------------------------
        cvmx_write_csr (CVMX_STXX_ARB_CTL(interface), 0x0);
          // IGNTPA         [ 3: 3] ( 1b) = 0
          // MINTRN         [ 5: 5] ( 1b) = 0
        cvmx_write_csr (CVMX_GMXX_TX_SPI_MAX(interface), 0x0408);
          // MAX2           [15: 8] ( 8b) = 4
          // MAX1           [ 7: 0] ( 8b) = 8
        cvmx_write_csr (CVMX_GMXX_TX_SPI_THRESH(interface), 0x4);
          // THRESH         [ 5: 0] ( 6b) = 4
        cvmx_write_csr (CVMX_GMXX_TX_SPI_CTL(interface), 0x0);
          // ENFORCE        [ 2: 2] ( 1b) = 0
          // TPA_CLR        [ 1: 1] ( 1b) = 0
          // CONT_PKT       [ 0: 0] ( 1b) = 0

          // STX0 Training Control
          // -------------------------------------------------
        stxx_spi4_dat.u64 = 0;
	stxx_spi4_dat.s.alpha = 32;    /*Minimum needed by dynamic alignment*/
	stxx_spi4_dat.s.max_t = 0xFFFF;  /*Minimum interval is 0x20*/
        cvmx_write_csr (CVMX_STXX_SPI4_DAT(interface), stxx_spi4_dat.u64);
          // MAX_T          [15: 0] (16b) = 0
          // ALPHA          [31:16] (16b) = 0

          // STX0 Calendar Table
          // -------------------------------------------------
        cvmx_write_csr (CVMX_STXX_SPI4_CALX(0, interface), 0x00013210);
          // PRT0           [ 4: 0] ( 5b) = 0
          // PRT1           [ 9: 5] ( 5b) = 1
          // PRT2           [14:10] ( 5b) = 2
          // PRT3           [19:15] ( 5b) = 3
          // ODDPAR         [20:20] ( 1b) = 1
        cvmx_write_csr (CVMX_STXX_SPI4_CALX(1, interface), 0x00017654);
          // PRT0           [ 4: 0] ( 5b) = 4
          // PRT1           [ 9: 5] ( 5b) = 5
          // PRT2           [14:10] ( 5b) = 6
          // PRT3           [19:15] ( 5b) = 7
          // ODDPAR         [20:20] ( 1b) = 1
        cvmx_write_csr (CVMX_STXX_SPI4_CALX(2, interface), 0x00000098);
          // PRT0           [ 4: 0] ( 5b) = 8
          // PRT1           [ 9: 5] ( 5b) = 9
          // PRT2           [14:10] ( 5b) = 0
          // PRT3           [19:15] ( 5b) = 0
          // ODDPAR         [20:20] ( 1b) = 0
        cvmx_write_csr (CVMX_STXX_SPI4_STAT(interface), (CAL_REP << 8) | CAL_LEN);
          // LEN            [ 7: 0] ( 8b) = a
          // M              [15: 8] ( 8b) = 1
    }

      /* Regardless of operating mode, both Tx and Rx clocks must be present
       * for the SPI interface to operate.
       */
    cvmx_dprintf ("SPI%d: Waiting to see TsClk...\n", interface);
    count = 0;
    timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
    do {  /* Do we see the TsClk transitioning? */
        stat.u64 = cvmx_read_csr (CVMX_SPXX_CLK_STAT(interface));
        count = count + 1;
#ifdef DEBUG
        if ((count % 5000000) == 10) {
	    cvmx_dprintf ("SPI%d: CLK_STAT 0x%016llX\n"
                    "  s4 (%d,%d) d4 (%d,%d)\n",
                    interface, (unsigned long long)stat.u64,
                    stat.s.s4clk0, stat.s.s4clk1,
                    stat.s.d4clk0, stat.s.d4clk1);
        }
#endif
        if (cvmx_get_cycle() > timeout_time)
        {
            cvmx_dprintf ("SPI%d: Timeout\n", interface);
            return -1;
        }
    } while (stat.s.s4clk0 == 0 || stat.s.s4clk1 == 0);

    cvmx_dprintf ("SPI%d: Waiting to see RsClk...\n", interface);
    timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
    do {  /* Do we see the RsClk transitioning? */
        stat.u64 = cvmx_read_csr (CVMX_SPXX_CLK_STAT(interface));
        if (cvmx_get_cycle() > timeout_time)
        {
            cvmx_dprintf ("SPI%d: Timeout\n", interface);
            return -1;
        }
    } while (stat.s.d4clk0 == 0 || stat.s.d4clk1 == 0);


      // SRX0 & STX0 Inf0 Links are configured - begin training
      // -------------------------------------------------
    cvmx_write_csr (CVMX_SPXX_CLK_CTL(interface), 0x0000083f);
      // SRXDLCK        [ 0: 0] ( 1b) = 1
      // RCVTRN         [ 1: 1] ( 1b) = 1
      // DRPTRN         [ 2: 2] ( 1b) = 1    ...was 0
      // SNDTRN         [ 3: 3] ( 1b) = 1
      // STATRCV        [ 4: 4] ( 1b) = 1
      // STATDRV        [ 5: 5] ( 1b) = 1
      // RUNBIST        [ 6: 6] ( 1b) = 0
      // CLKDLY         [11: 7] ( 5b) = 10
      // SRXLCK         [12:12] ( 1b) = 0
      // STXLCK         [13:13] ( 1b) = 0
      // SEETRN         [14:14] ( 1b) = 0
    cvmx_wait (1000 * MS);

      // SRX0 clear the boot bit
      // -------------------------------------------------
    spxx_trn4_ctl.s.clr_boot = 1;
    cvmx_write_csr (CVMX_SPXX_TRN4_CTL(interface), spxx_trn4_ctl.u64);

      // Wait for the training sequence to complete
      // -------------------------------------------------
      // SPX0_CLK_STAT - SPX0_CLK_STAT[SRXTRN] should be 1 (bit8)
    cvmx_dprintf ("SPI%d: Waiting for training\n", interface);
    cvmx_wait (1000 * MS);
    timeout_time = cvmx_get_cycle() + 1000ull * MS * 600;  /* Wait a really long time here */
    do {
        stat.u64 = cvmx_read_csr (CVMX_SPXX_CLK_STAT(interface));
        if (cvmx_get_cycle() > timeout_time)
        {
            cvmx_dprintf ("SPI%d: Timeout\n", interface);
            return -1;
        }
    } while (stat.s.srxtrn == 0);

    if (mode & CVMX_SPI_MODE_RX_HALFPLEX) {
          // SRX0 interface should be good, send calendar data
          // -------------------------------------------------
        cvmx_dprintf ("SPI%d: Rx is synchronized, start sending calendar data\n", interface);
        srxx_com_ctl.u64 = 0;
        srxx_com_ctl.s.prts   = 9;
        srxx_com_ctl.s.inf_en = 1;
        srxx_com_ctl.s.st_en  = 1;
        cvmx_write_csr (CVMX_SRXX_COM_CTL(interface), srxx_com_ctl.u64);
    }

    if (mode & CVMX_SPI_MODE_TX_HALFPLEX) {
          // STX0 has achieved sync
          // The corespondant board should be sending calendar data
          // Enable the STX0 STAT receiver.
          // -------------------------------------------------
        stxx_com_ctl.u64 = 0;
        stxx_com_ctl.s.inf_en = 1;
        stxx_com_ctl.s.st_en = 1;
        cvmx_write_csr (CVMX_STXX_COM_CTL(interface), stxx_com_ctl.u64);

          // Waiting for calendar sync on STX0 STAT
          // -------------------------------------------------
          // SPX0_CLK_STAT - SPX0_CLK_STAT[STXCAL] should be 1 (bit10)
        cvmx_dprintf ("SPI%d: Waiting to sync on STX[%d] STAT\n", interface, interface);
        timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
        do {
            stat.u64 = cvmx_read_csr (CVMX_SPXX_CLK_STAT (interface));
            if (cvmx_get_cycle() > timeout_time)
            {
                cvmx_dprintf ("SPI%d: Timeout\n", interface);
                return -1;
            }
        } while (stat.s.stxcal == 0);
    }

      // Inf0 is synched
      // -------------------------------------------------
      // SPX0 is up
    if (mode & CVMX_SPI_MODE_RX_HALFPLEX) {
        srxx_com_ctl.s.inf_en = 1;
        cvmx_write_csr (CVMX_SRXX_COM_CTL(interface), srxx_com_ctl.u64);
        cvmx_dprintf ("SPI%d: Rx is now up\n", interface);
    }

    if (mode & CVMX_SPI_MODE_TX_HALFPLEX) {
        stxx_com_ctl.s.inf_en = 1;
        cvmx_write_csr (CVMX_STXX_COM_CTL(interface), stxx_com_ctl.u64);
        cvmx_dprintf ("SPI%d: Tx is now up\n", interface);
    }

    cvmx_write_csr (CVMX_GMXX_RXX_FRM_MIN (0,interface), 40);
    cvmx_write_csr (CVMX_GMXX_RXX_FRM_MAX (0,interface), 64*1024 - 4);
    cvmx_write_csr (CVMX_GMXX_RXX_JABBER  (0,interface), 64*1024 - 4);

    return 0;
}
/**
 * This routine restarts the SPI interface after it has lost synchronization
 * with its corespondant system.
 *
 * @param interface The identifier of the packet interface to configure and
 *                  use as a SPI interface.
 * @param mode      The operating mode for the SPI interface. The interface
 *                  can operate as a full duplex (both Tx and Rx data paths
 *                  active) or as a halfplex (either the Tx data path is
 *                  active or the Rx data path is active, but not both).
 * @param timeout   Timeout to wait for clock synchronization in seconds
 * @return Zero on success, negative of failure.
 */
int cvmx_spi_restart_interface(int interface, cvmx_spi_mode_t mode, int timeout)
{
    uint64_t                     timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
    cvmx_spxx_trn4_ctl_t         spxx_trn4_ctl;
    cvmx_spxx_clk_stat_t         stat;
    cvmx_stxx_com_ctl_t          stxx_com_ctl;
    cvmx_srxx_com_ctl_t          srxx_com_ctl;
    //cvmx_stxx_spi4_dat_t         stxx_spi4_dat;

    cvmx_dprintf ("SPI%d: Restart %s\n", interface, modes[mode]);

      // Reset the Spi4 deskew logic
      // -------------------------------------------------
    cvmx_write_csr (CVMX_SPXX_DBG_DESKEW_CTL(interface), 0x00200000);
    cvmx_wait (100 * MS);
      // Setup the CLKDLY right in the middle
      // -------------------------------------------------
    cvmx_write_csr (CVMX_SPXX_CLK_CTL(interface), 0x00000830);
    cvmx_wait (100 * MS);
      // Reset SRX0 DLL
      // -------------------------------------------------
    cvmx_write_csr (CVMX_SPXX_CLK_CTL(interface), 0x00000831);
    cvmx_wait (100 * MS);
      // Enable dynamic alignment
      // -------------------------------------------------
    spxx_trn4_ctl.u64 = 0;
    spxx_trn4_ctl.s.mux_en   = 1;
    spxx_trn4_ctl.s.macro_en = 1;
    spxx_trn4_ctl.s.maxdist  = 16;
    spxx_trn4_ctl.s.jitter   = 1;
    cvmx_write_csr (CVMX_SPXX_TRN4_CTL(interface), spxx_trn4_ctl.u64);
    cvmx_write_csr (CVMX_SPXX_DBG_DESKEW_CTL(interface), 0x0);

      /* Regardless of operating mode, both Tx and Rx clocks must be present
       * for the SPI interface to operate.
       */
    cvmx_dprintf ("SPI%d: Waiting to see TsClk...\n", interface);
    do {  /* Do we see the TsClk transitioning? */
        stat.u64 = cvmx_read_csr (CVMX_SPXX_CLK_STAT(interface));
        if (cvmx_get_cycle() > timeout_time)
        {
            cvmx_dprintf ("SPI%d: Timeout\n", interface);
            return -1;
        }
    } while (stat.s.s4clk0 == 0 || stat.s.s4clk1 == 0);

    cvmx_dprintf ("SPI%d: Waiting to see RsClk...\n", interface);
    do {  /* Do we see the RsClk transitioning? */
        stat.u64 = cvmx_read_csr (CVMX_SPXX_CLK_STAT(interface));
        if (cvmx_get_cycle() > timeout_time)
        {
            cvmx_dprintf ("SPI%d: Timeout\n", interface);
            return -1;
        }
    } while (stat.s.d4clk0 == 0 || stat.s.d4clk1 == 0);

      // SRX0 & STX0 Inf0 Links are configured - begin training
      // -------------------------------------------------
    cvmx_write_csr (CVMX_SPXX_CLK_CTL(interface), 0x0000083f);
    cvmx_wait (1000 * MS);
      // SRX0 clear the boot bit
      // -------------------------------------------------
    spxx_trn4_ctl.s.clr_boot = 1;
    cvmx_write_csr (CVMX_SPXX_TRN4_CTL(interface), spxx_trn4_ctl.u64);

      // Wait for the training sequence to complete
      // -------------------------------------------------
      // SPX0_CLK_STAT - SPX0_CLK_STAT[SRXTRN] should be 1 (bit8)
    cvmx_dprintf ("SPI%d: Waiting for training\n", interface);
    cvmx_wait (1000 * MS);
    do {
        stat.u64 = cvmx_read_csr (CVMX_SPXX_CLK_STAT(interface));
        if (cvmx_get_cycle() > timeout_time)
        {
            cvmx_dprintf ("SPI%d: Timeout\n", interface);
            return -1;
        }
    } while (stat.s.srxtrn == 0);

    if (mode & CVMX_SPI_MODE_RX_HALFPLEX) {
          // SRX0 interface should be good, send calendar data
          // -------------------------------------------------
        cvmx_dprintf ("SPI%d: Rx is synchronized, start sending calendar data\n", interface);
        srxx_com_ctl.u64 = 0;
        srxx_com_ctl.s.prts   = 9;
        srxx_com_ctl.s.inf_en = 1;
        srxx_com_ctl.s.st_en  = 1;
        cvmx_write_csr (CVMX_SRXX_COM_CTL(interface), srxx_com_ctl.u64);
    }

    if (mode & CVMX_SPI_MODE_TX_HALFPLEX) {
          // STX0 has achieved sync
          // The corespondant board should be sending calendar data
          // Enable the STX0 STAT receiver.
          // -------------------------------------------------
        stxx_com_ctl.u64 = 0;
        stxx_com_ctl.s.inf_en = 1;
        stxx_com_ctl.s.st_en = 1;
        cvmx_write_csr (CVMX_STXX_COM_CTL(interface), stxx_com_ctl.u64);

          // Waiting for calendar sync on STX0 STAT
          // -------------------------------------------------
          // SPX0_CLK_STAT - SPX0_CLK_STAT[STXCAL] should be 1 (bit10)
        cvmx_dprintf ("SPI%d: Waiting to sync on STX[%d] STAT\n", interface, interface);
        do {
            stat.u64 = cvmx_read_csr (CVMX_SPXX_CLK_STAT (interface));
            if (cvmx_get_cycle() > timeout_time)
            {
                cvmx_dprintf ("SPI%d: Timeout\n", interface);
                return -1;
            }
        } while (stat.s.stxcal == 0);
    }

      // Inf0 is synched
      // -------------------------------------------------
      // SPX0 is up
    if (mode & CVMX_SPI_MODE_RX_HALFPLEX) {
        srxx_com_ctl.s.inf_en = 1;
        cvmx_write_csr (CVMX_SRXX_COM_CTL(interface), srxx_com_ctl.u64);
        cvmx_dprintf ("SPI%d: Rx is now up\n", interface);
    }

    if (mode & CVMX_SPI_MODE_TX_HALFPLEX) {
        stxx_com_ctl.s.inf_en = 1;
        cvmx_write_csr (CVMX_STXX_COM_CTL(interface), stxx_com_ctl.u64);
        cvmx_dprintf ("SPI%d: Tx is now up\n", interface);
    }

    return 0;
}
예제 #14
0
int cvmx_spi_calendar_setup_cb(int interface, cvmx_spi_mode_t mode,
                               int num_ports)
{
    int port;
    int index;
    if (mode & CVMX_SPI_MODE_RX_HALFPLEX) {
        union cvmx_srxx_com_ctl srxx_com_ctl;
        union cvmx_srxx_spi4_stat srxx_spi4_stat;


        srxx_com_ctl.u64 = 0;
        srxx_com_ctl.s.prts = num_ports - 1;
        srxx_com_ctl.s.st_en = 0;
        srxx_com_ctl.s.inf_en = 0;
        cvmx_write_csr(CVMX_SRXX_COM_CTL(interface), srxx_com_ctl.u64);


        port = 0;
        index = 0;
        while (port < num_ports) {
            union cvmx_srxx_spi4_calx srxx_spi4_calx;
            srxx_spi4_calx.u64 = 0;
            srxx_spi4_calx.s.prt0 = port++;
            srxx_spi4_calx.s.prt1 = port++;
            srxx_spi4_calx.s.prt2 = port++;
            srxx_spi4_calx.s.prt3 = port++;
            srxx_spi4_calx.s.oddpar =
                ~(cvmx_dpop(srxx_spi4_calx.u64) & 1);
            cvmx_write_csr(CVMX_SRXX_SPI4_CALX(index, interface),
                           srxx_spi4_calx.u64);
            index++;
        }
        srxx_spi4_stat.u64 = 0;
        srxx_spi4_stat.s.len = num_ports;
        srxx_spi4_stat.s.m = 1;
        cvmx_write_csr(CVMX_SRXX_SPI4_STAT(interface),
                       srxx_spi4_stat.u64);
    }

    if (mode & CVMX_SPI_MODE_TX_HALFPLEX) {
        union cvmx_stxx_arb_ctl stxx_arb_ctl;
        union cvmx_gmxx_tx_spi_max gmxx_tx_spi_max;
        union cvmx_gmxx_tx_spi_thresh gmxx_tx_spi_thresh;
        union cvmx_gmxx_tx_spi_ctl gmxx_tx_spi_ctl;
        union cvmx_stxx_spi4_stat stxx_spi4_stat;
        union cvmx_stxx_spi4_dat stxx_spi4_dat;


        stxx_arb_ctl.u64 = 0;
        stxx_arb_ctl.s.igntpa = 0;
        stxx_arb_ctl.s.mintrn = 0;
        cvmx_write_csr(CVMX_STXX_ARB_CTL(interface), stxx_arb_ctl.u64);

        gmxx_tx_spi_max.u64 = 0;
        gmxx_tx_spi_max.s.max1 = 8;
        gmxx_tx_spi_max.s.max2 = 4;
        gmxx_tx_spi_max.s.slice = 0;
        cvmx_write_csr(CVMX_GMXX_TX_SPI_MAX(interface),
                       gmxx_tx_spi_max.u64);

        gmxx_tx_spi_thresh.u64 = 0;
        gmxx_tx_spi_thresh.s.thresh = 4;
        cvmx_write_csr(CVMX_GMXX_TX_SPI_THRESH(interface),
                       gmxx_tx_spi_thresh.u64);

        gmxx_tx_spi_ctl.u64 = 0;
        gmxx_tx_spi_ctl.s.tpa_clr = 0;
        gmxx_tx_spi_ctl.s.cont_pkt = 0;
        cvmx_write_csr(CVMX_GMXX_TX_SPI_CTL(interface),
                       gmxx_tx_spi_ctl.u64);


        stxx_spi4_dat.u64 = 0;

        stxx_spi4_dat.s.alpha = 32;
        stxx_spi4_dat.s.max_t = 0xFFFF;
        cvmx_write_csr(CVMX_STXX_SPI4_DAT(interface),
                       stxx_spi4_dat.u64);


        port = 0;
        index = 0;
        while (port < num_ports) {
            union cvmx_stxx_spi4_calx stxx_spi4_calx;
            stxx_spi4_calx.u64 = 0;
            stxx_spi4_calx.s.prt0 = port++;
            stxx_spi4_calx.s.prt1 = port++;
            stxx_spi4_calx.s.prt2 = port++;
            stxx_spi4_calx.s.prt3 = port++;
            stxx_spi4_calx.s.oddpar =
                ~(cvmx_dpop(stxx_spi4_calx.u64) & 1);
            cvmx_write_csr(CVMX_STXX_SPI4_CALX(index, interface),
                           stxx_spi4_calx.u64);
            index++;
        }
        stxx_spi4_stat.u64 = 0;
        stxx_spi4_stat.s.len = num_ports;
        stxx_spi4_stat.s.m = 1;
        cvmx_write_csr(CVMX_STXX_SPI4_STAT(interface),
                       stxx_spi4_stat.u64);
    }

    return 0;
}
예제 #15
0
파일: cvmx-ilk.c 프로젝트: 2asoft/freebsd
/**
 * configure calendar for tx
 *
 * @param interface The identifier of the packet interface to configure and
 *                  use as a ILK interface. cn68xx has 2 interfaces: ilk0 and
 *                  ilk1.
 *
 * @param cal_depth the number of calendar entries
 * @param pent      pointer to calendar entries
 *
 * @return Zero on success, negative of failure.
 */
static int cvmx_ilk_tx_cal_conf (int interface, int cal_depth, 
                          cvmx_ilk_cal_entry_t *pent)
{
    int res = -1, num_grp, num_rest, i, j;
    cvmx_ilk_txx_cfg0_t ilk_txx_cfg0;
    cvmx_ilk_txx_idx_cal_t ilk_txx_idx_cal;
    cvmx_ilk_txx_mem_cal0_t ilk_txx_mem_cal0;
    cvmx_ilk_txx_mem_cal1_t ilk_txx_mem_cal1;
    unsigned long int tmp;
    cvmx_ilk_cal_entry_t *ent_tmp;

    if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
        return res;

    if (interface >= CVMX_NUM_ILK_INTF)
        return res;

    if (cal_depth < CVMX_ILK_TX_MIN_CAL || cal_depth > CVMX_ILK_MAX_CAL
        || pent == NULL)
        return res;

    /* mandatory link-level fc as workarounds for ILK-15397 and ILK-15479 */
    /* TODO: test effectiveness */
#if 0
    if (OCTEON_IS_MODEL(OCTEON_CN68XX_PASS1_0) && pent->ent_ctrl == PIPE_BPID)
        for (i = 0; i < cal_depth; i++)
            pent->ent_ctrl = LINK;
#endif

    /* tx calendar depth must be a multiple of 8 */
    num_grp = (cal_depth - 1) / CVMX_ILK_CAL_GRP_SZ + 1;
    num_rest = cal_depth % CVMX_ILK_CAL_GRP_SZ;
    if (num_rest != 0)
    {
        ent_tmp = pent + cal_depth;
        for (i = num_rest; i < 8; i++, ent_tmp++)
        {
            ent_tmp->pipe_bpid = 0;
            ent_tmp->ent_ctrl = XOFF;
        }
    }
    cal_depth = num_grp * 8;

    /* set the depth */
    ilk_txx_cfg0.u64 = cvmx_read_csr (CVMX_ILK_TXX_CFG0(interface));
    ilk_txx_cfg0.s.cal_depth = cal_depth;
    cvmx_write_csr (CVMX_ILK_TXX_CFG0(interface), ilk_txx_cfg0.u64); 

    /* set the calendar index */
    ilk_txx_idx_cal.u64 = 0;
    ilk_txx_idx_cal.s.inc = 1;
    cvmx_write_csr (CVMX_ILK_TXX_IDX_CAL(interface), ilk_txx_idx_cal.u64); 

    /* set the calendar entries. each group has both cal0 and cal1 registers */
    for (i = 0; i < num_grp; i++)
    {
        ilk_txx_mem_cal0.u64 = 0;
        for (j = 0; j < CVMX_ILK_CAL_GRP_SZ/2; j++)
        {
            tmp = 0;
            tmp = pent->pipe_bpid & ~(~tmp << CVMX_ILK_PIPE_BPID_SZ);
            tmp <<= (CVMX_ILK_PIPE_BPID_SZ + CVMX_ILK_ENT_CTRL_SZ) * j;
            ilk_txx_mem_cal0.u64 |= tmp;

            tmp = 0;
            tmp = pent->ent_ctrl & ~(~tmp << CVMX_ILK_ENT_CTRL_SZ);
            tmp <<= (CVMX_ILK_PIPE_BPID_SZ + CVMX_ILK_ENT_CTRL_SZ) * j +
                    CVMX_ILK_PIPE_BPID_SZ;
            ilk_txx_mem_cal0.u64 |= tmp;
            pent++;
        }
        cvmx_write_csr(CVMX_ILK_TXX_MEM_CAL0(interface), ilk_txx_mem_cal0.u64);

        ilk_txx_mem_cal1.u64 = 0;
        for (j = 0; j < CVMX_ILK_CAL_GRP_SZ/2; j++)
        {
            tmp = 0;
            tmp = pent->pipe_bpid & ~(~tmp << CVMX_ILK_PIPE_BPID_SZ);
            tmp <<= (CVMX_ILK_PIPE_BPID_SZ + CVMX_ILK_ENT_CTRL_SZ) * j;
            ilk_txx_mem_cal1.u64 |= tmp;

            tmp = 0;
            tmp = pent->ent_ctrl & ~(~tmp << CVMX_ILK_ENT_CTRL_SZ);
            tmp <<= (CVMX_ILK_PIPE_BPID_SZ + CVMX_ILK_ENT_CTRL_SZ) * j +
                    CVMX_ILK_PIPE_BPID_SZ;
            ilk_txx_mem_cal1.u64 |= tmp;
            pent++;
        }
        cvmx_write_csr(CVMX_ILK_TXX_MEM_CAL1(interface), ilk_txx_mem_cal1.u64);
    }
    cvmx_read_csr (CVMX_ILK_TXX_MEM_CAL1(interface));

    return 0;
}
예제 #16
0
static int octeon_lmc_edac_probe(struct platform_device *pdev)
{
	struct mem_ctl_info *mci;
	struct edac_mc_layer layers[1];
	int mc = pdev->id;

	opstate_init();

	layers[0].type = EDAC_MC_LAYER_CHANNEL;
	layers[0].size = 1;
	layers[0].is_virt_csrow = false;

	if (OCTEON_IS_OCTEON1PLUS()) {
		union cvmx_lmcx_mem_cfg0 cfg0;

		cfg0.u64 = cvmx_read_csr(CVMX_LMCX_MEM_CFG0(0));
		if (!cfg0.s.ecc_ena) {
			dev_info(&pdev->dev, "Disabled (ECC not enabled)\n");
			return 0;
		}

		mci = edac_mc_alloc(mc, ARRAY_SIZE(layers), layers, sizeof(struct octeon_lmc_pvt));
		if (!mci)
			return -ENXIO;

		mci->pdev = &pdev->dev;
		mci->dev_name = dev_name(&pdev->dev);

		mci->mod_name = "octeon-lmc";
		mci->ctl_name = "octeon-lmc-err";
		mci->edac_check = octeon_lmc_edac_poll;

		if (edac_mc_add_mc_with_groups(mci, octeon_dev_groups)) {
			dev_err(&pdev->dev, "edac_mc_add_mc() failed\n");
			edac_mc_free(mci);
			return -ENXIO;
		}

		cfg0.u64 = cvmx_read_csr(CVMX_LMCX_MEM_CFG0(mc));
		cfg0.s.intr_ded_ena = 0;	/* We poll */
		cfg0.s.intr_sec_ena = 0;
		cvmx_write_csr(CVMX_LMCX_MEM_CFG0(mc), cfg0.u64);
	} else {
		/* OCTEON II */
		union cvmx_lmcx_int_en en;
		union cvmx_lmcx_config config;

		config.u64 = cvmx_read_csr(CVMX_LMCX_CONFIG(0));
		if (!config.s.ecc_ena) {
			dev_info(&pdev->dev, "Disabled (ECC not enabled)\n");
			return 0;
		}

		mci = edac_mc_alloc(mc, ARRAY_SIZE(layers), layers, sizeof(struct octeon_lmc_pvt));
		if (!mci)
			return -ENXIO;

		mci->pdev = &pdev->dev;
		mci->dev_name = dev_name(&pdev->dev);

		mci->mod_name = "octeon-lmc";
		mci->ctl_name = "co_lmc_err";
		mci->edac_check = octeon_lmc_edac_poll_o2;

		if (edac_mc_add_mc_with_groups(mci, octeon_dev_groups)) {
			dev_err(&pdev->dev, "edac_mc_add_mc() failed\n");
			edac_mc_free(mci);
			return -ENXIO;
		}

		en.u64 = cvmx_read_csr(CVMX_LMCX_MEM_CFG0(mc));
		en.s.intr_ded_ena = 0;	/* We poll */
		en.s.intr_sec_ena = 0;
		cvmx_write_csr(CVMX_LMCX_MEM_CFG0(mc), en.u64);
	}
	platform_set_drvdata(pdev, mci);

	return 0;
}
예제 #17
0
파일: cvmx-ilk.c 프로젝트: 2asoft/freebsd
static void cvmx_ilk_reg_dump_rx (int interface)
{
    int i;
    cvmx_ilk_rxx_cfg0_t ilk_rxx_cfg0;
    cvmx_ilk_rxx_cfg1_t ilk_rxx_cfg1;
    cvmx_ilk_rxx_int_t ilk_rxx_int;
    cvmx_ilk_rxx_jabber_t ilk_rxx_jabber;
    cvmx_ilk_rx_lnex_cfg_t ilk_rx_lnex_cfg;
    cvmx_ilk_rx_lnex_int_t ilk_rx_lnex_int;
    cvmx_ilk_gbl_cfg_t ilk_gbl_cfg;
    cvmx_ilk_ser_cfg_t ilk_ser_cfg;
    cvmx_ilk_rxf_idx_pmap_t ilk_rxf_idx_pmap;
    cvmx_ilk_rxf_mem_pmap_t ilk_rxf_mem_pmap;
    cvmx_ilk_rxx_idx_cal_t ilk_rxx_idx_cal;
    cvmx_ilk_rxx_mem_cal0_t ilk_rxx_mem_cal0;
    cvmx_ilk_rxx_mem_cal1_t ilk_rxx_mem_cal1;

    ilk_rxx_cfg0.u64 = cvmx_read_csr (CVMX_ILK_RXX_CFG0(interface));
    cvmx_dprintf ("ilk rxx cfg0: 0x%16lx\n", ilk_rxx_cfg0.u64);
    
    ilk_rxx_cfg1.u64 = cvmx_read_csr (CVMX_ILK_RXX_CFG1(interface));
    cvmx_dprintf ("ilk rxx cfg1: 0x%16lx\n", ilk_rxx_cfg1.u64);
    
    ilk_rxx_int.u64 = cvmx_read_csr (CVMX_ILK_RXX_INT(interface));
    cvmx_dprintf ("ilk rxx int: 0x%16lx\n", ilk_rxx_int.u64);
    cvmx_write_csr (CVMX_ILK_RXX_INT(interface), ilk_rxx_int.u64);

    ilk_rxx_jabber.u64 = cvmx_read_csr (CVMX_ILK_RXX_JABBER(interface));
    cvmx_dprintf ("ilk rxx jabber: 0x%16lx\n", ilk_rxx_jabber.u64);

#define LNE_NUM_DBG 4
    for (i = 0; i < LNE_NUM_DBG; i++)
    {
        ilk_rx_lnex_cfg.u64 = cvmx_read_csr (CVMX_ILK_RX_LNEX_CFG(i));
        cvmx_dprintf ("ilk rx lnex cfg lane: %d  0x%16lx\n", i,
                      ilk_rx_lnex_cfg.u64);
    }

    for (i = 0; i < LNE_NUM_DBG; i++)
    {
        ilk_rx_lnex_int.u64 = cvmx_read_csr (CVMX_ILK_RX_LNEX_INT(i));
        cvmx_dprintf ("ilk rx lnex int lane: %d  0x%16lx\n", i,
                      ilk_rx_lnex_int.u64);
        cvmx_write_csr (CVMX_ILK_RX_LNEX_INT(i), ilk_rx_lnex_int.u64);
    }

    ilk_gbl_cfg.u64 = cvmx_read_csr (CVMX_ILK_GBL_CFG);
    cvmx_dprintf ("ilk gbl cfg: 0x%16lx\n", ilk_gbl_cfg.u64);

    ilk_ser_cfg.u64 = cvmx_read_csr (CVMX_ILK_SER_CFG);
    cvmx_dprintf ("ilk ser cfg: 0x%16lx\n", ilk_ser_cfg.u64);

#define CHAN_NUM_DBG 8
    ilk_rxf_idx_pmap.u64 = 0;
    ilk_rxf_idx_pmap.s.index = interface * 256;
    ilk_rxf_idx_pmap.s.inc = 1;
    cvmx_write_csr (CVMX_ILK_RXF_IDX_PMAP, ilk_rxf_idx_pmap.u64);
    for (i = 0; i < CHAN_NUM_DBG; i++)
    {
        ilk_rxf_mem_pmap.u64 = cvmx_read_csr (CVMX_ILK_RXF_MEM_PMAP);
        cvmx_dprintf ("ilk rxf mem pmap chan: %3d  0x%16lx\n", i,
                      ilk_rxf_mem_pmap.u64);
    }

#define CAL_NUM_DBG 2
    ilk_rxx_idx_cal.u64 = 0;
    ilk_rxx_idx_cal.s.inc = 1;
    cvmx_write_csr (CVMX_ILK_RXX_IDX_CAL(interface), ilk_rxx_idx_cal.u64); 
    for (i = 0; i < CAL_NUM_DBG; i++)
    {
        ilk_rxx_idx_cal.u64 = cvmx_read_csr(CVMX_ILK_RXX_IDX_CAL(interface));
        cvmx_dprintf ("ilk rxx idx cal: 0x%16lx\n", ilk_rxx_idx_cal.u64);

        ilk_rxx_mem_cal0.u64 = cvmx_read_csr(CVMX_ILK_RXX_MEM_CAL0(interface));
        cvmx_dprintf ("ilk rxx mem cal0: 0x%16lx\n", ilk_rxx_mem_cal0.u64);
        ilk_rxx_mem_cal1.u64 = cvmx_read_csr(CVMX_ILK_RXX_MEM_CAL1(interface));
        cvmx_dprintf ("ilk rxx mem cal1: 0x%16lx\n", ilk_rxx_mem_cal1.u64);
    }
}
예제 #18
0
static int octeon_mgmt_receive_one(struct octeon_mgmt *p)
{
	int port = p->port;
	struct net_device *netdev = p->netdev;
	union cvmx_mixx_ircnt mix_ircnt;
	union mgmt_port_ring_entry re;
	struct sk_buff *skb;
	struct sk_buff *skb2;
	struct sk_buff *skb_new;
	union mgmt_port_ring_entry re2;
	int rc = 1;

	re.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb);
	if (likely(re.s.code == RING_ENTRY_CODE_DONE)) {
		/* A good packet, send it up. */
		skb_put(skb, re.s.len);
good:
		skb->protocol = eth_type_trans(skb, netdev);
		netdev->stats.rx_packets++;
		netdev->stats.rx_bytes += skb->len;
		netif_receive_skb(skb);
		rc = 0;
	} else if (re.s.code == RING_ENTRY_CODE_MORE) {
		/*
		 * Packet split across skbs.  This can happen if we
		 * increase the MTU.  Buffers that are already in the
		 * rx ring can then end up being too small.  As the rx
		 * ring is refilled, buffers sized for the new MTU
		 * will be used and we should go back to the normal
		 * non-split case.
		 */
		skb_put(skb, re.s.len);
		do {
			re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2);
			if (re2.s.code != RING_ENTRY_CODE_MORE
				&& re2.s.code != RING_ENTRY_CODE_DONE)
				goto split_error;
			skb_put(skb2,  re2.s.len);
			skb_new = skb_copy_expand(skb, 0, skb2->len,
						  GFP_ATOMIC);
			if (!skb_new)
				goto split_error;
			if (skb_copy_bits(skb2, 0, skb_tail_pointer(skb_new),
					  skb2->len))
				goto split_error;
			skb_put(skb_new, skb2->len);
			dev_kfree_skb_any(skb);
			dev_kfree_skb_any(skb2);
			skb = skb_new;
		} while (re2.s.code == RING_ENTRY_CODE_MORE);
		goto good;
	} else {
		/* Some other error, discard it. */
		dev_kfree_skb_any(skb);
		/*
		 * Error statistics are accumulated in
		 * octeon_mgmt_update_rx_stats.
		 */
	}
	goto done;
split_error:
	/* Discard the whole mess. */
	dev_kfree_skb_any(skb);
	dev_kfree_skb_any(skb2);
	while (re2.s.code == RING_ENTRY_CODE_MORE) {
		re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2);
		dev_kfree_skb_any(skb2);
	}
	netdev->stats.rx_errors++;

done:
	/* Tell the hardware we processed a packet.  */
	mix_ircnt.u64 = 0;
	mix_ircnt.s.ircnt = 1;
	cvmx_write_csr(CVMX_MIXX_IRCNT(port), mix_ircnt.u64);
	return rc;
}
예제 #19
0
static int
octeon_gpio_attach(device_t dev)
{
    struct octeon_gpio_softc *sc = device_get_softc(dev);
    struct octeon_gpio_pin *pinp;
    cvmx_gpio_bit_cfgx_t gpio_cfgx;

    int i;

    KASSERT((device_get_unit(dev) == 0),
            ("octeon_gpio: Only one gpio module supported"));

    mtx_init(&sc->gpio_mtx, device_get_nameunit(dev), NULL, MTX_DEF);

    for ( i = 0; i < OCTEON_GPIO_IRQS; i++) {
        if ((sc->gpio_irq_res[i] = bus_alloc_resource(dev,
                                   SYS_RES_IRQ, &sc->gpio_irq_rid[i],
                                   OCTEON_IRQ_GPIO0 + i, OCTEON_IRQ_GPIO0 + i, 1,
                                   RF_SHAREABLE | RF_ACTIVE)) == NULL) {
            device_printf(dev, "unable to allocate IRQ resource\n");
            octeon_gpio_detach(dev);
            return (ENXIO);
        }

        sc->gpio_intr_cookies[i] = sc;
        if ((bus_setup_intr(dev, sc->gpio_irq_res[i], INTR_TYPE_MISC,
                            octeon_gpio_filter, octeon_gpio_intr,
                            &(sc->gpio_intr_cookies[i]), &sc->gpio_ih[i]))) {
            device_printf(dev,
                          "WARNING: unable to register interrupt handler\n");
            octeon_gpio_detach(dev);
            return (ENXIO);
        }
    }

    sc->dev = dev;
    /* Configure all pins as input */
    /* disable interrupts for all pins */
    pinp = octeon_gpio_pins;
    i = 0;
    while (pinp->name) {
        strncpy(sc->gpio_pins[i].gp_name, pinp->name, GPIOMAXNAME);
        sc->gpio_pins[i].gp_pin = pinp->pin;
        sc->gpio_pins[i].gp_caps = DEFAULT_CAPS;
        sc->gpio_pins[i].gp_flags = 0;
        octeon_gpio_pin_configure(sc, &sc->gpio_pins[i], pinp->flags);
        pinp++;
        i++;
    }

    sc->gpio_npins = i;

#if 0
    /*
     * Sample: how to enable edge-triggered interrupt
     * for GPIO pin
     */
    gpio_cfgx.u64 = cvmx_read_csr(CVMX_GPIO_BIT_CFGX(7));
    gpio_cfgx.s.int_en = 1;
    gpio_cfgx.s.int_type = OCTEON_GPIO_IRQ_EDGE;
    cvmx_write_csr(CVMX_GPIO_BIT_CFGX(7), gpio_cfgx.u64);
#endif

    if (bootverbose) {
        for (i = 0; i < 16; i++) {
            gpio_cfgx.u64 = cvmx_read_csr(CVMX_GPIO_BIT_CFGX(i));
            device_printf(dev, "[pin%d] output=%d, invinput=%d, intr=%d, intr_type=%s\n",
                          i, gpio_cfgx.s.tx_oe, gpio_cfgx.s.rx_xor,
                          gpio_cfgx.s.int_en, gpio_cfgx.s.int_type ? "rising edge" : "level");
        }
    }
    sc->busdev = gpiobus_attach_bus(dev);
    if (sc->busdev == NULL) {
        octeon_gpio_detach(dev);
        return (ENXIO);
    }

    return (0);
}
예제 #20
0
static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
{
	struct octeon_mgmt *p = netdev_priv(netdev);
	int port = p->port;
	union cvmx_agl_gmx_rxx_adr_ctl adr_ctl;
	union cvmx_agl_gmx_prtx_cfg agl_gmx_prtx;
	unsigned long flags;
	unsigned int prev_packet_enable;
	unsigned int cam_mode = 1; /* 1 - Accept on CAM match */
	unsigned int multicast_mode = 1; /* 1 - Reject all multicast.  */
	struct octeon_mgmt_cam_state cam_state;
	struct netdev_hw_addr *ha;
	int available_cam_entries;

	memset(&cam_state, 0, sizeof(cam_state));

	if ((netdev->flags & IFF_PROMISC) || netdev->uc.count > 7) {
		cam_mode = 0;
		available_cam_entries = 8;
	} else {
		/*
		 * One CAM entry for the primary address, leaves seven
		 * for the secondary addresses.
		 */
		available_cam_entries = 7 - netdev->uc.count;
	}

	if (netdev->flags & IFF_MULTICAST) {
		if (cam_mode == 0 || (netdev->flags & IFF_ALLMULTI) ||
		    netdev_mc_count(netdev) > available_cam_entries)
			multicast_mode = 2; /* 2 - Accept all multicast.  */
		else
			multicast_mode = 0; /* 0 - Use CAM.  */
	}

	if (cam_mode == 1) {
		/* Add primary address. */
		octeon_mgmt_cam_state_add(&cam_state, netdev->dev_addr);
		netdev_for_each_uc_addr(ha, netdev)
			octeon_mgmt_cam_state_add(&cam_state, ha->addr);
	}
	if (multicast_mode == 0) {
		netdev_for_each_mc_addr(ha, netdev)
			octeon_mgmt_cam_state_add(&cam_state, ha->addr);
	}

	spin_lock_irqsave(&p->lock, flags);

	/* Disable packet I/O. */
	agl_gmx_prtx.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
	prev_packet_enable = agl_gmx_prtx.s.en;
	agl_gmx_prtx.s.en = 0;
	cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);

	adr_ctl.u64 = 0;
	adr_ctl.s.cam_mode = cam_mode;
	adr_ctl.s.mcst = multicast_mode;
	adr_ctl.s.bcst = 1;     /* Allow broadcast */

	cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CTL(port), adr_ctl.u64);

	cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM0(port), cam_state.cam[0]);
	cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM1(port), cam_state.cam[1]);
	cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM2(port), cam_state.cam[2]);
	cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM3(port), cam_state.cam[3]);
	cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM4(port), cam_state.cam[4]);
	cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM5(port), cam_state.cam[5]);
	cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM_EN(port), cam_state.cam_mask);

	/* Restore packet I/O. */
	agl_gmx_prtx.s.en = prev_packet_enable;
	cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);

	spin_unlock_irqrestore(&p->lock, flags);
}
예제 #21
0
/**
 * Enable an endpoint to respond to an IN transaction
 *
 * @param usb    USB device state populated by
 *               cvmx_usbd_initialize().
 * @param endpoint_num
 *               Endpoint number to enable
 * @param transfer_type
 *               Transfer type for the endpoint
 * @param max_packet_size
 *               Maximum packet size for the endpoint
 * @param buffer Buffer to send
 * @param buffer_length
 *               Length of the buffer in bytes
 *
 * @return Zero on success, negative on failure
 */
int cvmx_usbd_in_endpoint_enable(cvmx_usbd_state_t *usb,
    int endpoint_num, cvmx_usbd_transfer_t transfer_type,
    int max_packet_size, uint64_t buffer, int buffer_length)
{
    cvmx_usbcx_diepctlx_t usbc_diepctl;
    cvmx_usbcx_dieptsizx_t usbc_dieptsiz;

    if (cvmx_unlikely(usb->init_flags & CVMX_USBD_INITIALIZE_FLAGS_DEBUG))
        cvmx_dprintf("%s: endpoint=%d buffer=0x%llx length=%d\n",
            __FUNCTION__, endpoint_num, (ULL)buffer, buffer_length);

    usb->endpoint[endpoint_num].buffer_length = buffer_length;

    CVMX_SYNCW; /* Flush out pending writes before enable */

    /* Clear any pending interrupts */
    __cvmx_usbd_write_csr32(usb, CVMX_USBCX_DIEPINTX(endpoint_num, usb->index),
        __cvmx_usbd_read_csr32(usb, CVMX_USBCX_DIEPINTX(endpoint_num, usb->index)));

    usbc_dieptsiz.u32 = 0;
    usbc_dieptsiz.s.mc = 1;
    if (buffer)
    {
        cvmx_write_csr(CVMX_USBNX_DMA0_OUTB_CHN0(usb->index) + endpoint_num*8, buffer);
        usbc_dieptsiz.s.pktcnt = (buffer_length + max_packet_size - 1) / max_packet_size;
        if (usbc_dieptsiz.s.pktcnt == 0)
            usbc_dieptsiz.s.pktcnt = 1;
        usbc_dieptsiz.s.xfersize = buffer_length;
    }
    else
    {
        usbc_dieptsiz.s.pktcnt = 0;
        usbc_dieptsiz.s.xfersize = 0;
    }
    __cvmx_usbd_write_csr32(usb, CVMX_USBCX_DIEPTSIZX(endpoint_num, usb->index), usbc_dieptsiz.u32);

    usbc_diepctl.u32 = 0;
    usbc_diepctl.s.epena = (buffer != 0);
    usbc_diepctl.s.setd1pid = 0;
    usbc_diepctl.s.setd0pid = (buffer == 0);
    usbc_diepctl.s.cnak = 1;
    usbc_diepctl.s.txfnum = endpoint_num;
    usbc_diepctl.s.eptype = transfer_type;
    usbc_diepctl.s.usbactep = 1;
    usbc_diepctl.s.nextep = endpoint_num;
    if (endpoint_num == 0)
    {
        switch (max_packet_size)
        {
            case 8:
                usbc_diepctl.s.mps = 3;
                break;
            case 16:
                usbc_diepctl.s.mps = 2;
                break;
            case 32:
                usbc_diepctl.s.mps = 1;
                break;
            default:
                usbc_diepctl.s.mps = 0;
                break;
        }
    }
    else
        usbc_diepctl.s.mps = max_packet_size;
    __cvmx_usbd_write_csr32(usb, CVMX_USBCX_DIEPCTLX(endpoint_num, usb->index), usbc_diepctl.u32);

    return 0;
}
예제 #22
0
static int octeon_mgmt_open(struct net_device *netdev)
{
	struct octeon_mgmt *p = netdev_priv(netdev);
	int port = p->port;
	union cvmx_mixx_ctl mix_ctl;
	union cvmx_agl_gmx_inf_mode agl_gmx_inf_mode;
	union cvmx_mixx_oring1 oring1;
	union cvmx_mixx_iring1 iring1;
	union cvmx_agl_gmx_prtx_cfg prtx_cfg;
	union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
	union cvmx_mixx_irhwm mix_irhwm;
	union cvmx_mixx_orhwm mix_orhwm;
	union cvmx_mixx_intena mix_intena;
	struct sockaddr sa;

	/* Allocate ring buffers.  */
	p->tx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
			     GFP_KERNEL);
	if (!p->tx_ring)
		return -ENOMEM;
	p->tx_ring_handle =
		dma_map_single(p->dev, p->tx_ring,
			       ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
			       DMA_BIDIRECTIONAL);
	p->tx_next = 0;
	p->tx_next_clean = 0;
	p->tx_current_fill = 0;

	p->rx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
			     GFP_KERNEL);
	if (!p->rx_ring)
		goto err_nomem;
	p->rx_ring_handle =
		dma_map_single(p->dev, p->rx_ring,
			       ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
			       DMA_BIDIRECTIONAL);

	p->rx_next = 0;
	p->rx_next_fill = 0;
	p->rx_current_fill = 0;

	octeon_mgmt_reset_hw(p);

	mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port));

	/* Bring it out of reset if needed. */
	if (mix_ctl.s.reset) {
		mix_ctl.s.reset = 0;
		cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64);
		do {
			mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port));
		} while (mix_ctl.s.reset);
	}

	agl_gmx_inf_mode.u64 = 0;
	agl_gmx_inf_mode.s.en = 1;
	cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);

	oring1.u64 = 0;
	oring1.s.obase = p->tx_ring_handle >> 3;
	oring1.s.osize = OCTEON_MGMT_TX_RING_SIZE;
	cvmx_write_csr(CVMX_MIXX_ORING1(port), oring1.u64);

	iring1.u64 = 0;
	iring1.s.ibase = p->rx_ring_handle >> 3;
	iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE;
	cvmx_write_csr(CVMX_MIXX_IRING1(port), iring1.u64);

	/* Disable packet I/O. */
	prtx_cfg.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
	prtx_cfg.s.en = 0;
	cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), prtx_cfg.u64);

	memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN);
	octeon_mgmt_set_mac_address(netdev, &sa);

	octeon_mgmt_change_mtu(netdev, netdev->mtu);

	/*
	 * Enable the port HW. Packets are not allowed until
	 * cvmx_mgmt_port_enable() is called.
	 */
	mix_ctl.u64 = 0;
	mix_ctl.s.crc_strip = 1;    /* Strip the ending CRC */
	mix_ctl.s.en = 1;           /* Enable the port */
	mix_ctl.s.nbtarb = 0;       /* Arbitration mode */
	/* MII CB-request FIFO programmable high watermark */
	mix_ctl.s.mrq_hwm = 1;
	cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64);

	if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)
	    || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
		/*
		 * Force compensation values, as they are not
		 * determined properly by HW
		 */
		union cvmx_agl_gmx_drv_ctl drv_ctl;

		drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL);
		if (port) {
			drv_ctl.s.byp_en1 = 1;
			drv_ctl.s.nctl1 = 6;
			drv_ctl.s.pctl1 = 6;
		} else {
			drv_ctl.s.byp_en = 1;
			drv_ctl.s.nctl = 6;
			drv_ctl.s.pctl = 6;
		}
		cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64);
	}

	octeon_mgmt_rx_fill_ring(netdev);

	/* Clear statistics. */
	/* Clear on read. */
	cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_CTL(port), 1);
	cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(port), 0);
	cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(port), 0);

	cvmx_write_csr(CVMX_AGL_GMX_TXX_STATS_CTL(port), 1);
	cvmx_write_csr(CVMX_AGL_GMX_TXX_STAT0(port), 0);
	cvmx_write_csr(CVMX_AGL_GMX_TXX_STAT1(port), 0);

	/* Clear any pending interrupts */
	cvmx_write_csr(CVMX_MIXX_ISR(port), cvmx_read_csr(CVMX_MIXX_ISR(port)));

	if (request_irq(p->irq, octeon_mgmt_interrupt, 0, netdev->name,
			netdev)) {
		dev_err(p->dev, "request_irq(%d) failed.\n", p->irq);
		goto err_noirq;
	}

	/* Interrupt every single RX packet */
	mix_irhwm.u64 = 0;
	mix_irhwm.s.irhwm = 0;
	cvmx_write_csr(CVMX_MIXX_IRHWM(port), mix_irhwm.u64);

	/* Interrupt when we have 1 or more packets to clean.  */
	mix_orhwm.u64 = 0;
	mix_orhwm.s.orhwm = 1;
	cvmx_write_csr(CVMX_MIXX_ORHWM(port), mix_orhwm.u64);

	/* Enable receive and transmit interrupts */
	mix_intena.u64 = 0;
	mix_intena.s.ithena = 1;
	mix_intena.s.othena = 1;
	cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64);

	/* Enable packet I/O. */

	rxx_frm_ctl.u64 = 0;
	rxx_frm_ctl.s.pre_align = 1;
	/*
	 * When set, disables the length check for non-min sized pkts
	 * with padding in the client data.
	 */
	rxx_frm_ctl.s.pad_len = 1;
	/* When set, disables the length check for VLAN pkts */
	rxx_frm_ctl.s.vlan_len = 1;
	/* When set, PREAMBLE checking is  less strict */
	rxx_frm_ctl.s.pre_free = 1;
	/* Control Pause Frames can match station SMAC */
	rxx_frm_ctl.s.ctl_smac = 0;
	/* Control Pause Frames can match globally assign Multicast address */
	rxx_frm_ctl.s.ctl_mcst = 1;
	/* Forward pause information to TX block */
	rxx_frm_ctl.s.ctl_bck = 1;
	/* Drop Control Pause Frames */
	rxx_frm_ctl.s.ctl_drp = 1;
	/* Strip off the preamble */
	rxx_frm_ctl.s.pre_strp = 1;
	/*
	 * This port is configured to send PREAMBLE+SFD to begin every
	 * frame.  GMX checks that the PREAMBLE is sent correctly.
	 */
	rxx_frm_ctl.s.pre_chk = 1;
	cvmx_write_csr(CVMX_AGL_GMX_RXX_FRM_CTL(port), rxx_frm_ctl.u64);

	/* Enable the AGL block */
	agl_gmx_inf_mode.u64 = 0;
	agl_gmx_inf_mode.s.en = 1;
	cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);

	/* Configure the port duplex and enables */
	prtx_cfg.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
	prtx_cfg.s.tx_en = 1;
	prtx_cfg.s.rx_en = 1;
	prtx_cfg.s.en = 1;
	p->last_duplex = 1;
	prtx_cfg.s.duplex = p->last_duplex;
	cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), prtx_cfg.u64);

	p->last_link = 0;
	netif_carrier_off(netdev);

	if (octeon_mgmt_init_phy(netdev)) {
		dev_err(p->dev, "Cannot initialize PHY.\n");
		goto err_noirq;
	}

	netif_wake_queue(netdev);
	napi_enable(&p->napi);

	return 0;
err_noirq:
	octeon_mgmt_reset_hw(p);
	dma_unmap_single(p->dev, p->rx_ring_handle,
			 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
			 DMA_BIDIRECTIONAL);
	kfree(p->rx_ring);
err_nomem:
	dma_unmap_single(p->dev, p->tx_ring_handle,
			 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
			 DMA_BIDIRECTIONAL);
	kfree(p->tx_ring);
	return -ENOMEM;
}
예제 #23
0
/**
 * Called after libata determines the needed PIO mode. This
 * function programs the Octeon bootbus regions to support the
 * timing requirements of the PIO mode.
 *
 * @ap:     ATA port information
 * @dev:    ATA device
 */
static void octeon_cf_set_piomode(struct ata_port *ap, struct ata_device *dev)
{
	struct octeon_cf_data *ocd = ap->dev->platform_data;
	union cvmx_mio_boot_reg_timx reg_tim;
	int cs = ocd->base_region;
	int T;
	struct ata_timing timing;

	int use_iordy;
	int trh;
	int pause;
	/* These names are timing parameters from the ATA spec */
	int t1;
	int t2;
	int t2i;

	T = (int)(2000000000000LL / octeon_get_clock_rate());

	if (ata_timing_compute(dev, dev->pio_mode, &timing, T, T))
		BUG();

	t1 = timing.setup;
	if (t1)
		t1--;
	t2 = timing.active;
	if (t2)
		t2--;
	t2i = timing.act8b;
	if (t2i)
		t2i--;

	trh = ns_to_tim_reg(2, 20);
	if (trh)
		trh--;

	pause = timing.cycle - timing.active - timing.setup - trh;
	if (pause)
		pause--;

	octeon_cf_set_boot_reg_cfg(cs);
	if (ocd->dma_engine >= 0)
		/* True IDE mode, program both chip selects.  */
		octeon_cf_set_boot_reg_cfg(cs + 1);


	use_iordy = ata_pio_need_iordy(dev);

	reg_tim.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_TIMX(cs));
	/* Disable page mode */
	reg_tim.s.pagem = 0;
	/* Enable dynamic timing */
	reg_tim.s.waitm = use_iordy;
	/* Pages are disabled */
	reg_tim.s.pages = 0;
	/* We don't use multiplexed address mode */
	reg_tim.s.ale = 0;
	/* Not used */
	reg_tim.s.page = 0;
	/* Time after IORDY to coninue to assert the data */
	reg_tim.s.wait = 0;
	/* Time to wait to complete the cycle. */
	reg_tim.s.pause = pause;
	/* How long to hold after a write to de-assert CE. */
	reg_tim.s.wr_hld = trh;
	/* How long to wait after a read to de-assert CE. */
	reg_tim.s.rd_hld = trh;
	/* How long write enable is asserted */
	reg_tim.s.we = t2;
	/* How long read enable is asserted */
	reg_tim.s.oe = t2;
	/* Time after CE that read/write starts */
	reg_tim.s.ce = ns_to_tim_reg(2, 5);
	/* Time before CE that address is valid */
	reg_tim.s.adr = 0;

	/* Program the bootbus region timing for the data port chip select. */
	cvmx_write_csr(CVMX_MIO_BOOT_REG_TIMX(cs), reg_tim.u64);
	if (ocd->dma_engine >= 0)
		/* True IDE mode, program both chip selects.  */
		cvmx_write_csr(CVMX_MIO_BOOT_REG_TIMX(cs + 1), reg_tim.u64);
}
예제 #24
0
파일: cvmx-ilk.c 프로젝트: 2asoft/freebsd
//#define CVMX_ILK_STATS_ENA 1
int cvmx_ilk_enable (int interface)
{
    int res = -1;
    int retry_count = 0;
    cvmx_helper_link_info_t result;
    cvmx_ilk_txx_cfg1_t ilk_txx_cfg1;
    cvmx_ilk_rxx_cfg1_t ilk_rxx_cfg1;
#ifdef CVMX_ILK_STATS_ENA
    cvmx_ilk_rxx_cfg0_t ilk_rxx_cfg0;
    cvmx_ilk_txx_cfg0_t ilk_txx_cfg0;
#endif

    if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
        return res;

    if (interface >= CVMX_NUM_ILK_INTF)
        return res;

    result.u64 = 0;
    
#ifdef CVMX_ILK_STATS_ENA
    cvmx_dprintf ("\n");
    cvmx_dprintf ("<<<< ILK%d: Before enabling ilk\n", interface);
    cvmx_ilk_reg_dump_rx (interface);
    cvmx_ilk_reg_dump_tx (interface);
#endif

    /* RX packet will be enabled only if link is up */

    /* TX side */
    ilk_txx_cfg1.u64 = cvmx_read_csr (CVMX_ILK_TXX_CFG1(interface));
    ilk_txx_cfg1.s.pkt_ena = 1;
    ilk_txx_cfg1.s.rx_link_fc_ign = 1; /* cannot use link fc workaround */
    cvmx_write_csr (CVMX_ILK_TXX_CFG1(interface), ilk_txx_cfg1.u64); 
    cvmx_read_csr (CVMX_ILK_TXX_CFG1(interface));

#ifdef CVMX_ILK_STATS_ENA
    /* RX side stats */
    ilk_rxx_cfg0.u64 = cvmx_read_csr (CVMX_ILK_RXX_CFG0(interface));
    ilk_rxx_cfg0.s.lnk_stats_ena = 1;
    cvmx_write_csr (CVMX_ILK_RXX_CFG0(interface), ilk_rxx_cfg0.u64); 

    /* TX side stats */
    ilk_txx_cfg0.u64 = cvmx_read_csr (CVMX_ILK_TXX_CFG0(interface));
    ilk_txx_cfg0.s.lnk_stats_ena = 1;
    cvmx_write_csr (CVMX_ILK_TXX_CFG0(interface), ilk_txx_cfg0.u64); 
#endif

retry:
    retry_count++;
    if (retry_count > 10)
       goto out;

    /* Make sure the link is up, so that packets can be sent. */
    result = __cvmx_helper_ilk_link_get(cvmx_helper_get_ipd_port(interface + CVMX_ILK_GBL_BASE, 0));

    /* Small delay before another retry. */
    cvmx_wait_usec(100);

    ilk_rxx_cfg1.u64 = cvmx_read_csr(CVMX_ILK_RXX_CFG1(interface));
    if (ilk_rxx_cfg1.s.pkt_ena == 0)
       goto retry; 

out:
        
#ifdef CVMX_ILK_STATS_ENA
    cvmx_dprintf (">>>> ILK%d: After ILK is enabled\n", interface);
    cvmx_ilk_reg_dump_rx (interface);
    cvmx_ilk_reg_dump_tx (interface);
#endif

    if (result.s.link_up)
        return 0;

    return -1;
}
예제 #25
0
int cvm_oct_rgmii_init(struct ifnet *ifp)
{
	struct octebus_softc *sc;
	cvm_oct_private_t *priv = (cvm_oct_private_t *)ifp->if_softc;
	int error;
	int rid;

	if (cvm_oct_common_init(ifp) != 0)
	    return ENXIO;

	priv->open = cvm_oct_common_open;
	priv->stop = cvm_oct_common_stop;
	priv->stop(ifp);

	/* Due to GMX errata in CN3XXX series chips, it is necessary to take the
	   link down immediately whne the PHY changes state. In order to do this
	   we call the poll function every time the RGMII inband status changes.
	   This may cause problems if the PHY doesn't implement inband status
	   properly */
	if (number_rgmii_ports == 0) {
		sc = device_get_softc(device_get_parent(priv->dev));

		rid = 0;
		sc->sc_rgmii_irq = bus_alloc_resource(sc->sc_dev, SYS_RES_IRQ,
						      &rid, OCTEON_IRQ_RML,
						      OCTEON_IRQ_RML, 1,
						      RF_ACTIVE);
		if (sc->sc_rgmii_irq == NULL) {
			device_printf(sc->sc_dev, "could not allocate RGMII irq");
			return ENXIO;
		}

		error = bus_setup_intr(sc->sc_dev, sc->sc_rgmii_irq,
				       INTR_TYPE_NET | INTR_MPSAFE,
				       cvm_oct_rgmii_rml_interrupt, NULL,
				       &number_rgmii_ports, NULL);
		if (error != 0) {
			device_printf(sc->sc_dev, "could not setup RGMII irq");
			return error;
		}
	}
	number_rgmii_ports++;

	/* Only true RGMII ports need to be polled. In GMII mode, port 0 is really
	   a RGMII port */
	if (((priv->imode == CVMX_HELPER_INTERFACE_MODE_GMII) && (priv->port == 0)) ||
	    (priv->imode == CVMX_HELPER_INTERFACE_MODE_RGMII)) {

		if (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) {

			cvmx_gmxx_rxx_int_en_t gmx_rx_int_en;
			int interface = INTERFACE(priv->port);
			int index = INDEX(priv->port);

			/* Enable interrupts on inband status changes for this port */
			gmx_rx_int_en.u64 = cvmx_read_csr(CVMX_GMXX_RXX_INT_EN(index, interface));
			gmx_rx_int_en.s.phy_dupx = 1;
			gmx_rx_int_en.s.phy_link = 1;
			gmx_rx_int_en.s.phy_spd = 1;
			cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(index, interface), gmx_rx_int_en.u64);
			priv->poll = cvm_oct_rgmii_poll;
		}
	}

	return 0;
}
예제 #26
0
파일: cvmx-ilk.c 프로젝트: 2asoft/freebsd
/**
 * Show channel statistics
 *
 * @param interface The identifier of the packet interface to disable. cn68xx
 *                  has 2 interfaces: ilk0 and ilk1.
 * @param pstats A pointer to cvmx_ilk_stats_ctrl_t that specifies which
 *               logical channels to access
 *
 * @return nothing
 */
void cvmx_ilk_show_stats (int interface, cvmx_ilk_stats_ctrl_t *pstats)
{
    unsigned int i;
    cvmx_ilk_rxx_idx_stat0_t ilk_rxx_idx_stat0;
    cvmx_ilk_rxx_idx_stat1_t ilk_rxx_idx_stat1;
    cvmx_ilk_rxx_mem_stat0_t ilk_rxx_mem_stat0;
    cvmx_ilk_rxx_mem_stat1_t ilk_rxx_mem_stat1;

    cvmx_ilk_txx_idx_stat0_t ilk_txx_idx_stat0;
    cvmx_ilk_txx_idx_stat1_t ilk_txx_idx_stat1;
    cvmx_ilk_txx_mem_stat0_t ilk_txx_mem_stat0;
    cvmx_ilk_txx_mem_stat1_t ilk_txx_mem_stat1;

    if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
        return;

    if (interface >= CVMX_NUM_ILK_INTF)
        return;

    if (pstats == NULL)
        return;

    /* discrete channels */
    if (pstats->chan_list != NULL)
    {
        for (i = 0; i < pstats->num_chans; i++)
        {

            /* get the number of rx packets */
            ilk_rxx_idx_stat0.u64 = 0;
            ilk_rxx_idx_stat0.s.index = *pstats->chan_list;
            ilk_rxx_idx_stat0.s.clr = pstats->clr_on_rd;
            cvmx_write_csr (CVMX_ILK_RXX_IDX_STAT0(interface),
                            ilk_rxx_idx_stat0.u64);
            ilk_rxx_mem_stat0.u64 = cvmx_read_csr
                                    (CVMX_ILK_RXX_MEM_STAT0(interface));

            /* get the number of rx bytes */
            ilk_rxx_idx_stat1.u64 = 0;
            ilk_rxx_idx_stat1.s.index = *pstats->chan_list;
            ilk_rxx_idx_stat1.s.clr = pstats->clr_on_rd;
            cvmx_write_csr (CVMX_ILK_RXX_IDX_STAT1(interface),
                            ilk_rxx_idx_stat1.u64);
            ilk_rxx_mem_stat1.u64 = cvmx_read_csr
                                    (CVMX_ILK_RXX_MEM_STAT1(interface));

            cvmx_dprintf ("ILK%d Channel%d Rx: %d packets %d bytes\n", interface,
                    *pstats->chan_list, ilk_rxx_mem_stat0.s.rx_pkt,
                    (unsigned int) ilk_rxx_mem_stat1.s.rx_bytes);

            /* get the number of tx packets */
            ilk_txx_idx_stat0.u64 = 0;
            ilk_txx_idx_stat0.s.index = *pstats->chan_list;
            ilk_txx_idx_stat0.s.clr = pstats->clr_on_rd;
            cvmx_write_csr (CVMX_ILK_TXX_IDX_STAT0(interface),
                            ilk_txx_idx_stat0.u64);
            ilk_txx_mem_stat0.u64 = cvmx_read_csr
                                    (CVMX_ILK_TXX_MEM_STAT0(interface));

            /* get the number of tx bytes */
            ilk_txx_idx_stat1.u64 = 0;
            ilk_txx_idx_stat1.s.index = *pstats->chan_list;
            ilk_txx_idx_stat1.s.clr = pstats->clr_on_rd;
            cvmx_write_csr (CVMX_ILK_TXX_IDX_STAT1(interface),
                            ilk_txx_idx_stat1.u64);
            ilk_txx_mem_stat1.u64 = cvmx_read_csr
                                    (CVMX_ILK_TXX_MEM_STAT1(interface));

            cvmx_dprintf ("ILK%d Channel%d Tx: %d packets %d bytes\n", interface,
                    *pstats->chan_list, ilk_txx_mem_stat0.s.tx_pkt,
                    (unsigned int) ilk_txx_mem_stat1.s.tx_bytes);

            pstats++;
        }
        return;
    }

    /* continuous channels */
    ilk_rxx_idx_stat0.u64 = 0;
    ilk_rxx_idx_stat0.s.index = pstats->chan_start;
    ilk_rxx_idx_stat0.s.inc = pstats->chan_step;
    ilk_rxx_idx_stat0.s.clr = pstats->clr_on_rd;
    cvmx_write_csr (CVMX_ILK_RXX_IDX_STAT0(interface), ilk_rxx_idx_stat0.u64);

    ilk_rxx_idx_stat1.u64 = 0;
    ilk_rxx_idx_stat1.s.index = pstats->chan_start;
    ilk_rxx_idx_stat1.s.inc = pstats->chan_step;
    ilk_rxx_idx_stat1.s.clr = pstats->clr_on_rd;
    cvmx_write_csr (CVMX_ILK_RXX_IDX_STAT1(interface), ilk_rxx_idx_stat1.u64);

    ilk_txx_idx_stat0.u64 = 0;
    ilk_txx_idx_stat0.s.index = pstats->chan_start;
    ilk_txx_idx_stat0.s.inc = pstats->chan_step;
    ilk_txx_idx_stat0.s.clr = pstats->clr_on_rd;
    cvmx_write_csr (CVMX_ILK_TXX_IDX_STAT0(interface), ilk_txx_idx_stat0.u64);

    ilk_txx_idx_stat1.u64 = 0;
    ilk_txx_idx_stat1.s.index = pstats->chan_start;
    ilk_txx_idx_stat1.s.inc = pstats->chan_step;
    ilk_txx_idx_stat1.s.clr = pstats->clr_on_rd;
    cvmx_write_csr (CVMX_ILK_TXX_IDX_STAT1(interface), ilk_txx_idx_stat1.u64);

    for (i = pstats->chan_start; i <= pstats->chan_end; i += pstats->chan_step)
    {
        ilk_rxx_mem_stat0.u64 = cvmx_read_csr
                                (CVMX_ILK_RXX_MEM_STAT0(interface));
        ilk_rxx_mem_stat1.u64 = cvmx_read_csr
                                (CVMX_ILK_RXX_MEM_STAT1(interface));
        cvmx_dprintf ("ILK%d Channel%d Rx: %d packets %d bytes\n", interface, i,
                ilk_rxx_mem_stat0.s.rx_pkt,
                (unsigned int) ilk_rxx_mem_stat1.s.rx_bytes);

        ilk_txx_mem_stat0.u64 = cvmx_read_csr
                                (CVMX_ILK_TXX_MEM_STAT0(interface));
        ilk_txx_mem_stat1.u64 = cvmx_read_csr
                                (CVMX_ILK_TXX_MEM_STAT1(interface));
        cvmx_dprintf ("ILK%d Channel%d Tx: %d packets %d bytes\n", interface, i,
                ilk_rxx_mem_stat0.s.rx_pkt,
                (unsigned int) ilk_rxx_mem_stat1.s.rx_bytes);
    }

    return;
}
예제 #27
0
static int cvm_oct_probe(struct platform_device *pdev)
{
	int num_interfaces;
	int interface;
	int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE;
	int qos;
	struct device_node *pip;
	int mtu_overhead = ETH_HLEN + ETH_FCS_LEN;

#if IS_ENABLED(CONFIG_VLAN_8021Q)
	mtu_overhead += VLAN_HLEN;
#endif

	octeon_mdiobus_force_mod_depencency();

	pip = pdev->dev.of_node;
	if (!pip) {
		pr_err("Error: No 'pip' in /aliases\n");
		return -EINVAL;
	}

	cvm_oct_configure_common_hw();

	cvmx_helper_initialize_packet_io_global();

	if (receive_group_order) {
		if (receive_group_order > 4)
			receive_group_order = 4;
		pow_receive_groups = (1 << (1 << receive_group_order)) - 1;
	} else {
		pow_receive_groups = BIT(pow_receive_group);
	}

	/* Change the input group for all ports before input is enabled */
	num_interfaces = cvmx_helper_get_number_of_interfaces();
	for (interface = 0; interface < num_interfaces; interface++) {
		int num_ports = cvmx_helper_ports_on_interface(interface);
		int port;

		for (port = cvmx_helper_get_ipd_port(interface, 0);
		     port < cvmx_helper_get_ipd_port(interface, num_ports);
		     port++) {
			union cvmx_pip_prt_tagx pip_prt_tagx;

			pip_prt_tagx.u64 =
			    cvmx_read_csr(CVMX_PIP_PRT_TAGX(port));

			if (receive_group_order) {
				int tag_mask;

				/* We support only 16 groups at the moment, so
				 * always disable the two additional "hidden"
				 * tag_mask bits on CN68XX.
				 */
				if (OCTEON_IS_MODEL(OCTEON_CN68XX))
					pip_prt_tagx.u64 |= 0x3ull << 44;

				tag_mask = ~((1 << receive_group_order) - 1);
				pip_prt_tagx.s.grptagbase	= 0;
				pip_prt_tagx.s.grptagmask	= tag_mask;
				pip_prt_tagx.s.grptag		= 1;
				pip_prt_tagx.s.tag_mode		= 0;
				pip_prt_tagx.s.inc_prt_flag	= 1;
				pip_prt_tagx.s.ip6_dprt_flag	= 1;
				pip_prt_tagx.s.ip4_dprt_flag	= 1;
				pip_prt_tagx.s.ip6_sprt_flag	= 1;
				pip_prt_tagx.s.ip4_sprt_flag	= 1;
				pip_prt_tagx.s.ip6_dst_flag	= 1;
				pip_prt_tagx.s.ip4_dst_flag	= 1;
				pip_prt_tagx.s.ip6_src_flag	= 1;
				pip_prt_tagx.s.ip4_src_flag	= 1;
				pip_prt_tagx.s.grp		= 0;
			} else {
				pip_prt_tagx.s.grptag	= 0;
				pip_prt_tagx.s.grp	= pow_receive_group;
			}

			cvmx_write_csr(CVMX_PIP_PRT_TAGX(port),
				       pip_prt_tagx.u64);
		}
	}

	cvmx_helper_ipd_and_packet_input_enable();

	memset(cvm_oct_device, 0, sizeof(cvm_oct_device));

	/*
	 * Initialize the FAU used for counting packet buffers that
	 * need to be freed.
	 */
	cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);

	/* Initialize the FAU used for counting tx SKBs that need to be freed */
	cvmx_fau_atomic_write32(FAU_TOTAL_TX_TO_CLEAN, 0);

	if ((pow_send_group != -1)) {
		struct net_device *dev;

		dev = alloc_etherdev(sizeof(struct octeon_ethernet));
		if (dev) {
			/* Initialize the device private structure. */
			struct octeon_ethernet *priv = netdev_priv(dev);

			SET_NETDEV_DEV(dev, &pdev->dev);
			dev->netdev_ops = &cvm_oct_pow_netdev_ops;
			priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED;
			priv->port = CVMX_PIP_NUM_INPUT_PORTS;
			priv->queue = -1;
			strcpy(dev->name, "pow%d");
			for (qos = 0; qos < 16; qos++)
				skb_queue_head_init(&priv->tx_free_list[qos]);
			dev->min_mtu = VLAN_ETH_ZLEN - mtu_overhead;
			dev->max_mtu = OCTEON_MAX_MTU - mtu_overhead;

			if (register_netdev(dev) < 0) {
				pr_err("Failed to register ethernet device for POW\n");
				free_netdev(dev);
			} else {
				cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev;
				pr_info("%s: POW send group %d, receive group %d\n",
					dev->name, pow_send_group,
					pow_receive_group);
			}
		} else {
			pr_err("Failed to allocate ethernet device for POW\n");
		}
	}

	num_interfaces = cvmx_helper_get_number_of_interfaces();
	for (interface = 0; interface < num_interfaces; interface++) {
		cvmx_helper_interface_mode_t imode =
		    cvmx_helper_interface_get_mode(interface);
		int num_ports = cvmx_helper_ports_on_interface(interface);
		int port;
		int port_index;

		for (port_index = 0,
		     port = cvmx_helper_get_ipd_port(interface, 0);
		     port < cvmx_helper_get_ipd_port(interface, num_ports);
		     port_index++, port++) {
			struct octeon_ethernet *priv;
			struct net_device *dev =
			    alloc_etherdev(sizeof(struct octeon_ethernet));
			if (!dev) {
				pr_err("Failed to allocate ethernet device for port %d\n",
				       port);
				continue;
			}

			/* Initialize the device private structure. */
			SET_NETDEV_DEV(dev, &pdev->dev);
			priv = netdev_priv(dev);
			priv->netdev = dev;
			priv->of_node = cvm_oct_node_for_port(pip, interface,
							      port_index);

			INIT_DELAYED_WORK(&priv->port_periodic_work,
					  cvm_oct_periodic_worker);
			priv->imode = imode;
			priv->port = port;
			priv->queue = cvmx_pko_get_base_queue(priv->port);
			priv->fau = fau - cvmx_pko_get_num_queues(port) * 4;
			for (qos = 0; qos < 16; qos++)
				skb_queue_head_init(&priv->tx_free_list[qos]);
			for (qos = 0; qos < cvmx_pko_get_num_queues(port);
			     qos++)
				cvmx_fau_atomic_write32(priv->fau + qos * 4, 0);
			dev->min_mtu = VLAN_ETH_ZLEN - mtu_overhead;
			dev->max_mtu = OCTEON_MAX_MTU - mtu_overhead;

			switch (priv->imode) {
			/* These types don't support ports to IPD/PKO */
			case CVMX_HELPER_INTERFACE_MODE_DISABLED:
			case CVMX_HELPER_INTERFACE_MODE_PCIE:
			case CVMX_HELPER_INTERFACE_MODE_PICMG:
				break;

			case CVMX_HELPER_INTERFACE_MODE_NPI:
				dev->netdev_ops = &cvm_oct_npi_netdev_ops;
				strcpy(dev->name, "npi%d");
				break;

			case CVMX_HELPER_INTERFACE_MODE_XAUI:
				dev->netdev_ops = &cvm_oct_xaui_netdev_ops;
				strcpy(dev->name, "xaui%d");
				break;

			case CVMX_HELPER_INTERFACE_MODE_LOOP:
				dev->netdev_ops = &cvm_oct_npi_netdev_ops;
				strcpy(dev->name, "loop%d");
				break;

			case CVMX_HELPER_INTERFACE_MODE_SGMII:
				dev->netdev_ops = &cvm_oct_sgmii_netdev_ops;
				strcpy(dev->name, "eth%d");
				break;

			case CVMX_HELPER_INTERFACE_MODE_SPI:
				dev->netdev_ops = &cvm_oct_spi_netdev_ops;
				strcpy(dev->name, "spi%d");
				break;

			case CVMX_HELPER_INTERFACE_MODE_RGMII:
			case CVMX_HELPER_INTERFACE_MODE_GMII:
				dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
				strcpy(dev->name, "eth%d");
				cvm_set_rgmii_delay(priv->of_node, interface,
						    port_index);
				break;
			}

			if (!dev->netdev_ops) {
				free_netdev(dev);
			} else if (register_netdev(dev) < 0) {
				pr_err("Failed to register ethernet device for interface %d, port %d\n",
				       interface, priv->port);
				free_netdev(dev);
			} else {
				cvm_oct_device[priv->port] = dev;
				fau -=
				    cvmx_pko_get_num_queues(priv->port) *
				    sizeof(u32);
				schedule_delayed_work(&priv->port_periodic_work,
						      HZ);
			}
		}
	}

	cvm_oct_tx_initialize();
	cvm_oct_rx_initialize();

	/*
	 * 150 uS: about 10 1500-byte packets at 1GE.
	 */
	cvm_oct_tx_poll_interval = 150 * (octeon_get_clock_rate() / 1000000);

	schedule_delayed_work(&cvm_oct_rx_refill_work, HZ);

	return 0;
}
예제 #28
0
파일: cvmx-ilk.c 프로젝트: 2asoft/freebsd
/**
 * Initialize and start the ILK interface.
 *
 * @param interface The identifier of the packet interface to configure and
 *                  use as a ILK interface. cn68xx has 2 interfaces: ilk0 and
 *                  ilk1.
 *
 * @param lane_mask the lane group for this interface
 *
 * @return Zero on success, negative of failure.
 */
int cvmx_ilk_start_interface (int interface, unsigned char lane_mask)
{
    int res = -1;
    int other_intf, this_qlm, other_qlm;
    unsigned char uni_mask;
    cvmx_mio_qlmx_cfg_t mio_qlmx_cfg, other_mio_qlmx_cfg;
    cvmx_ilk_txx_cfg0_t ilk_txx_cfg0;
    cvmx_ilk_rxx_cfg0_t ilk_rxx_cfg0;
    cvmx_ilk_ser_cfg_t ilk_ser_cfg;

    if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
        return res;

    if (interface >= CVMX_NUM_ILK_INTF)
        return res;

    if (lane_mask == 0)
        return res;

    /* check conflicts between 2 ilk interfaces. 1 lane can be assigned to 1
     * interface only */
    other_intf = !interface;
    this_qlm = interface + CVMX_ILK_QLM_BASE;
    other_qlm = other_intf + CVMX_ILK_QLM_BASE;
    if (cvmx_ilk_intf_cfg[other_intf].lane_en_mask & lane_mask)
    {
        cvmx_dprintf ("ILK%d: %s: lane assignment conflict\n", interface,
                      __FUNCTION__);
        return res;
    }

    /* check the legality of the lane mask. interface 0 can have 8 lanes,
     * while interface 1 can have 4 lanes at most */
    uni_mask = lane_mask >> (interface * 4);
    if ((uni_mask != 0x1 && uni_mask != 0x3 && uni_mask != 0xf &&
         uni_mask != 0xff) || (interface == 1 && lane_mask > 0xf0))
    {
#if CVMX_ENABLE_DEBUG_PRINTS
        cvmx_dprintf ("ILK%d: %s: incorrect lane mask: 0x%x \n", interface,
                      __FUNCTION__, uni_mask);
#endif
        return res;
    }

    /* check the availability of qlms. qlm_cfg = 001 means the chip is fused
     * to give this qlm to ilk */
    mio_qlmx_cfg.u64 = cvmx_read_csr (CVMX_MIO_QLMX_CFG(this_qlm));
    other_mio_qlmx_cfg.u64 = cvmx_read_csr (CVMX_MIO_QLMX_CFG(other_qlm));
    if (mio_qlmx_cfg.s.qlm_cfg != 1 ||
        (uni_mask == 0xff && other_mio_qlmx_cfg.s.qlm_cfg != 1))
    {
#if CVMX_ENABLE_DEBUG_PRINTS
        cvmx_dprintf ("ILK%d: %s: qlm unavailable\n", interface, __FUNCTION__);
#endif
        return res;
    }

    /* power up the serdes */
    ilk_ser_cfg.u64 = cvmx_read_csr (CVMX_ILK_SER_CFG);
    if (ilk_ser_cfg.s.ser_pwrup == 0)
    {
        ilk_ser_cfg.s.ser_rxpol_auto = 1;
        ilk_ser_cfg.s.ser_rxpol = 0;
        ilk_ser_cfg.s.ser_txpol = 0;
        ilk_ser_cfg.s.ser_reset_n = 0xff;
        ilk_ser_cfg.s.ser_haul = 0;
    }
    ilk_ser_cfg.s.ser_pwrup |= ((interface ==0) && (lane_mask > 0xf)) ?
                               0x3 : (1 << interface);
    cvmx_write_csr (CVMX_ILK_SER_CFG, ilk_ser_cfg.u64);

    /* configure the lane enable of the interface */
    ilk_txx_cfg0.u64 = cvmx_read_csr (CVMX_ILK_TXX_CFG0(interface));
    ilk_rxx_cfg0.u64 = cvmx_read_csr (CVMX_ILK_RXX_CFG0(interface));
    ilk_txx_cfg0.s.lane_ena = ilk_rxx_cfg0.s.lane_ena = lane_mask;
    cvmx_write_csr (CVMX_ILK_TXX_CFG0(interface), ilk_txx_cfg0.u64);
    cvmx_write_csr (CVMX_ILK_RXX_CFG0(interface), ilk_rxx_cfg0.u64);

    /* write to local cache. for lane speed, if interface 0 has 8 lanes,
     * assume both qlms have the same speed */
    cvmx_ilk_intf_cfg[interface].intf_en = 1;
    cvmx_ilk_intf_cfg[interface].lane_en_mask = lane_mask;
    res = 0;

    return res;
}
예제 #29
0
static irqreturn_t cvm_oct_rgmii_rml_interrupt(int cpl, void *dev_id)
{
	union cvmx_npi_rsl_int_blocks rsl_int_blocks;
	int index;
	irqreturn_t return_status = IRQ_NONE;

	rsl_int_blocks.u64 = cvmx_read_csr(CVMX_NPI_RSL_INT_BLOCKS);

	
	if (rsl_int_blocks.s.gmx0) {

		int interface = 0;
		
		for (index = 0;
		     index < cvmx_helper_ports_on_interface(interface);
		     index++) {

			
			union cvmx_gmxx_rxx_int_reg gmx_rx_int_reg;
			gmx_rx_int_reg.u64 =
			    cvmx_read_csr(CVMX_GMXX_RXX_INT_REG
					  (index, interface));
			gmx_rx_int_reg.u64 &=
			    cvmx_read_csr(CVMX_GMXX_RXX_INT_EN
					  (index, interface));
			
			if (gmx_rx_int_reg.s.phy_dupx
			    || gmx_rx_int_reg.s.phy_link
			    || gmx_rx_int_reg.s.phy_spd) {

				struct net_device *dev =
				    cvm_oct_device[cvmx_helper_get_ipd_port
						   (interface, index)];
				if (dev)
					cvm_oct_rgmii_poll(dev);
				gmx_rx_int_reg.u64 = 0;
				gmx_rx_int_reg.s.phy_dupx = 1;
				gmx_rx_int_reg.s.phy_link = 1;
				gmx_rx_int_reg.s.phy_spd = 1;
				cvmx_write_csr(CVMX_GMXX_RXX_INT_REG
					       (index, interface),
					       gmx_rx_int_reg.u64);
				return_status = IRQ_HANDLED;
			}
		}
	}

	
	if (rsl_int_blocks.s.gmx1) {

		int interface = 1;
		
		for (index = 0;
		     index < cvmx_helper_ports_on_interface(interface);
		     index++) {

			
			union cvmx_gmxx_rxx_int_reg gmx_rx_int_reg;
			gmx_rx_int_reg.u64 =
			    cvmx_read_csr(CVMX_GMXX_RXX_INT_REG
					  (index, interface));
			gmx_rx_int_reg.u64 &=
			    cvmx_read_csr(CVMX_GMXX_RXX_INT_EN
					  (index, interface));
			
			if (gmx_rx_int_reg.s.phy_dupx
			    || gmx_rx_int_reg.s.phy_link
			    || gmx_rx_int_reg.s.phy_spd) {

				struct net_device *dev =
				    cvm_oct_device[cvmx_helper_get_ipd_port
						   (interface, index)];
				if (dev)
					cvm_oct_rgmii_poll(dev);
				gmx_rx_int_reg.u64 = 0;
				gmx_rx_int_reg.s.phy_dupx = 1;
				gmx_rx_int_reg.s.phy_link = 1;
				gmx_rx_int_reg.s.phy_spd = 1;
				cvmx_write_csr(CVMX_GMXX_RXX_INT_REG
					       (index, interface),
					       gmx_rx_int_reg.u64);
				return_status = IRQ_HANDLED;
			}
		}
	}
	return return_status;
}
예제 #30
0
static int octeon_mgmt_open(struct net_device *netdev)
{
	struct octeon_mgmt *p = netdev_priv(netdev);
	union cvmx_mixx_ctl mix_ctl;
	union cvmx_agl_gmx_inf_mode agl_gmx_inf_mode;
	union cvmx_mixx_oring1 oring1;
	union cvmx_mixx_iring1 iring1;
	union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
	union cvmx_mixx_irhwm mix_irhwm;
	union cvmx_mixx_orhwm mix_orhwm;
	union cvmx_mixx_intena mix_intena;
	struct sockaddr sa;

	/* Allocate ring buffers.  */
	p->tx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
			     GFP_KERNEL);
	if (!p->tx_ring)
		return -ENOMEM;
	p->tx_ring_handle =
		dma_map_single(p->dev, p->tx_ring,
			       ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
			       DMA_BIDIRECTIONAL);
	p->tx_next = 0;
	p->tx_next_clean = 0;
	p->tx_current_fill = 0;


	p->rx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
			     GFP_KERNEL);
	if (!p->rx_ring)
		goto err_nomem;
	p->rx_ring_handle =
		dma_map_single(p->dev, p->rx_ring,
			       ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
			       DMA_BIDIRECTIONAL);

	p->rx_next = 0;
	p->rx_next_fill = 0;
	p->rx_current_fill = 0;

	octeon_mgmt_reset_hw(p);

	mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);

	/* Bring it out of reset if needed. */
	if (mix_ctl.s.reset) {
		mix_ctl.s.reset = 0;
		cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
		do {
			mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
		} while (mix_ctl.s.reset);
	}

	if (OCTEON_IS_MODEL(OCTEON_CN5XXX)) {
		agl_gmx_inf_mode.u64 = 0;
		agl_gmx_inf_mode.s.en = 1;
		cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);
	}
	if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)
		|| OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
		/* Force compensation values, as they are not
		 * determined properly by HW
		 */
		union cvmx_agl_gmx_drv_ctl drv_ctl;

		drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL);
		if (p->port) {
			drv_ctl.s.byp_en1 = 1;
			drv_ctl.s.nctl1 = 6;
			drv_ctl.s.pctl1 = 6;
		} else {
			drv_ctl.s.byp_en = 1;
			drv_ctl.s.nctl = 6;
			drv_ctl.s.pctl = 6;
		}
		cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64);
	}

	oring1.u64 = 0;
	oring1.s.obase = p->tx_ring_handle >> 3;
	oring1.s.osize = OCTEON_MGMT_TX_RING_SIZE;
	cvmx_write_csr(p->mix + MIX_ORING1, oring1.u64);

	iring1.u64 = 0;
	iring1.s.ibase = p->rx_ring_handle >> 3;
	iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE;
	cvmx_write_csr(p->mix + MIX_IRING1, iring1.u64);

	memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN);
	octeon_mgmt_set_mac_address(netdev, &sa);

	octeon_mgmt_change_mtu(netdev, netdev->mtu);

	/* Enable the port HW. Packets are not allowed until
	 * cvmx_mgmt_port_enable() is called.
	 */
	mix_ctl.u64 = 0;
	mix_ctl.s.crc_strip = 1;    /* Strip the ending CRC */
	mix_ctl.s.en = 1;           /* Enable the port */
	mix_ctl.s.nbtarb = 0;       /* Arbitration mode */
	/* MII CB-request FIFO programmable high watermark */
	mix_ctl.s.mrq_hwm = 1;
#ifdef __LITTLE_ENDIAN
	mix_ctl.s.lendian = 1;
#endif
	cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);

	/* Read the PHY to find the mode of the interface. */
	if (octeon_mgmt_init_phy(netdev)) {
		dev_err(p->dev, "Cannot initialize PHY on MIX%d.\n", p->port);
		goto err_noirq;
	}

	/* Set the mode of the interface, RGMII/MII. */
	if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && netdev->phydev) {
		union cvmx_agl_prtx_ctl agl_prtx_ctl;
		int rgmii_mode = (netdev->phydev->supported &
				  (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)) != 0;

		agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
		agl_prtx_ctl.s.mode = rgmii_mode ? 0 : 1;
		cvmx_write_csr(p->agl_prt_ctl,	agl_prtx_ctl.u64);

		/* MII clocks counts are based on the 125Mhz
		 * reference, which has an 8nS period. So our delays
		 * need to be multiplied by this factor.
		 */
#define NS_PER_PHY_CLK 8

		/* Take the DLL and clock tree out of reset */
		agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
		agl_prtx_ctl.s.clkrst = 0;
		if (rgmii_mode) {
			agl_prtx_ctl.s.dllrst = 0;
			agl_prtx_ctl.s.clktx_byp = 0;
		}
		cvmx_write_csr(p->agl_prt_ctl,	agl_prtx_ctl.u64);
		cvmx_read_csr(p->agl_prt_ctl); /* Force write out before wait */

		/* Wait for the DLL to lock. External 125 MHz
		 * reference clock must be stable at this point.
		 */
		ndelay(256 * NS_PER_PHY_CLK);

		/* Enable the interface */
		agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
		agl_prtx_ctl.s.enable = 1;
		cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);

		/* Read the value back to force the previous write */
		agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);

		/* Enable the compensation controller */
		agl_prtx_ctl.s.comp = 1;
		agl_prtx_ctl.s.drv_byp = 0;
		cvmx_write_csr(p->agl_prt_ctl,	agl_prtx_ctl.u64);
		/* Force write out before wait. */
		cvmx_read_csr(p->agl_prt_ctl);

		/* For compensation state to lock. */
		ndelay(1040 * NS_PER_PHY_CLK);

		/* Default Interframe Gaps are too small.  Recommended
		 * workaround is.
		 *
		 * AGL_GMX_TX_IFG[IFG1]=14
		 * AGL_GMX_TX_IFG[IFG2]=10
		 */
		cvmx_write_csr(CVMX_AGL_GMX_TX_IFG, 0xae);
	}

	octeon_mgmt_rx_fill_ring(netdev);

	/* Clear statistics. */
	/* Clear on read. */
	cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_CTL, 1);
	cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP, 0);
	cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD, 0);

	cvmx_write_csr(p->agl + AGL_GMX_TX_STATS_CTL, 1);
	cvmx_write_csr(p->agl + AGL_GMX_TX_STAT0, 0);
	cvmx_write_csr(p->agl + AGL_GMX_TX_STAT1, 0);

	/* Clear any pending interrupts */
	cvmx_write_csr(p->mix + MIX_ISR, cvmx_read_csr(p->mix + MIX_ISR));

	if (request_irq(p->irq, octeon_mgmt_interrupt, 0, netdev->name,
			netdev)) {
		dev_err(p->dev, "request_irq(%d) failed.\n", p->irq);
		goto err_noirq;
	}

	/* Interrupt every single RX packet */
	mix_irhwm.u64 = 0;
	mix_irhwm.s.irhwm = 0;
	cvmx_write_csr(p->mix + MIX_IRHWM, mix_irhwm.u64);

	/* Interrupt when we have 1 or more packets to clean.  */
	mix_orhwm.u64 = 0;
	mix_orhwm.s.orhwm = 0;
	cvmx_write_csr(p->mix + MIX_ORHWM, mix_orhwm.u64);

	/* Enable receive and transmit interrupts */
	mix_intena.u64 = 0;
	mix_intena.s.ithena = 1;
	mix_intena.s.othena = 1;
	cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);

	/* Enable packet I/O. */

	rxx_frm_ctl.u64 = 0;
	rxx_frm_ctl.s.ptp_mode = p->has_rx_tstamp ? 1 : 0;
	rxx_frm_ctl.s.pre_align = 1;
	/* When set, disables the length check for non-min sized pkts
	 * with padding in the client data.
	 */
	rxx_frm_ctl.s.pad_len = 1;
	/* When set, disables the length check for VLAN pkts */
	rxx_frm_ctl.s.vlan_len = 1;
	/* When set, PREAMBLE checking is  less strict */
	rxx_frm_ctl.s.pre_free = 1;
	/* Control Pause Frames can match station SMAC */
	rxx_frm_ctl.s.ctl_smac = 0;
	/* Control Pause Frames can match globally assign Multicast address */
	rxx_frm_ctl.s.ctl_mcst = 1;
	/* Forward pause information to TX block */
	rxx_frm_ctl.s.ctl_bck = 1;
	/* Drop Control Pause Frames */
	rxx_frm_ctl.s.ctl_drp = 1;
	/* Strip off the preamble */
	rxx_frm_ctl.s.pre_strp = 1;
	/* This port is configured to send PREAMBLE+SFD to begin every
	 * frame.  GMX checks that the PREAMBLE is sent correctly.
	 */
	rxx_frm_ctl.s.pre_chk = 1;
	cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);

	/* Configure the port duplex, speed and enables */
	octeon_mgmt_disable_link(p);
	if (netdev->phydev)
		octeon_mgmt_update_link(p);
	octeon_mgmt_enable_link(p);

	p->last_link = 0;
	p->last_speed = 0;
	/* PHY is not present in simulator. The carrier is enabled
	 * while initializing the phy for simulator, leave it enabled.
	 */
	if (netdev->phydev) {
		netif_carrier_off(netdev);
		phy_start_aneg(netdev->phydev);
	}

	netif_wake_queue(netdev);
	napi_enable(&p->napi);

	return 0;
err_noirq:
	octeon_mgmt_reset_hw(p);
	dma_unmap_single(p->dev, p->rx_ring_handle,
			 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
			 DMA_BIDIRECTIONAL);
	kfree(p->rx_ring);
err_nomem:
	dma_unmap_single(p->dev, p->tx_ring_handle,
			 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
			 DMA_BIDIRECTIONAL);
	kfree(p->tx_ring);
	return -ENOMEM;
}