/**
 * Callback to perform link training
 *
 * @interface: The identifier of the packet interface to configure and
 *                  use as a SPI interface.
 * @mode:      The operating mode for the SPI interface. The interface
 *                  can operate as a full duplex (both Tx and Rx data paths
 *                  active) or as a halfplex (either the Tx data path is
 *                  active or the Rx data path is active, but not both).
 * @timeout:   Timeout to wait for link to be trained (in seconds)
 *
 * Returns Zero on success, non-zero error code on failure (will cause
 * SPI initialization to abort)
 */
int cvmx_spi_training_cb(int interface, cvmx_spi_mode_t mode, int timeout)
{
	union cvmx_spxx_trn4_ctl spxx_trn4_ctl;
	union cvmx_spxx_clk_stat stat;
	uint64_t MS = cvmx_sysinfo_get()->cpu_clock_hz / 1000;
	uint64_t timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
	int rx_training_needed;

	/* SRX0 & STX0 Inf0 Links are configured - begin training */
	union cvmx_spxx_clk_ctl spxx_clk_ctl;
	spxx_clk_ctl.u64 = 0;
	spxx_clk_ctl.s.seetrn = 0;
	spxx_clk_ctl.s.clkdly = 0x10;
	spxx_clk_ctl.s.runbist = 0;
	spxx_clk_ctl.s.statdrv = 0;
	/* This should always be on the opposite edge as statdrv */
	spxx_clk_ctl.s.statrcv = 1;
	spxx_clk_ctl.s.sndtrn = 1;
	spxx_clk_ctl.s.drptrn = 1;
	spxx_clk_ctl.s.rcvtrn = 1;
	spxx_clk_ctl.s.srxdlck = 1;
	cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64);
	cvmx_wait(1000 * MS);

	/* SRX0 clear the boot bit */
	spxx_trn4_ctl.u64 = cvmx_read_csr(CVMX_SPXX_TRN4_CTL(interface));
	spxx_trn4_ctl.s.clr_boot = 1;
	cvmx_write_csr(CVMX_SPXX_TRN4_CTL(interface), spxx_trn4_ctl.u64);

	/* Wait for the training sequence to complete */
	cvmx_dprintf("SPI%d: Waiting for training\n", interface);
	cvmx_wait(1000 * MS);
	/* Wait a really long time here */
	timeout_time = cvmx_get_cycle() + 1000ull * MS * 600;
	/*
	 * The HRM says we must wait for 34 + 16 * MAXDIST training sequences.
	 * We'll be pessimistic and wait for a lot more.
	 */
	rx_training_needed = 500;
	do {
		stat.u64 = cvmx_read_csr(CVMX_SPXX_CLK_STAT(interface));
		if (stat.s.srxtrn && rx_training_needed) {
			rx_training_needed--;
			cvmx_write_csr(CVMX_SPXX_CLK_STAT(interface), stat.u64);
			stat.s.srxtrn = 0;
		}
		if (cvmx_get_cycle() > timeout_time) {
			cvmx_dprintf("SPI%d: Timeout\n", interface);
			return -1;
		}
	} while (stat.s.srxtrn == 0);

	return 0;
}
int cvmx_spi_training_cb(int interface, cvmx_spi_mode_t mode, int timeout)
{
    union cvmx_spxx_trn4_ctl spxx_trn4_ctl;
    union cvmx_spxx_clk_stat stat;
    uint64_t MS = cvmx_sysinfo_get()->cpu_clock_hz / 1000;
    uint64_t timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
    int rx_training_needed;


    union cvmx_spxx_clk_ctl spxx_clk_ctl;
    spxx_clk_ctl.u64 = 0;
    spxx_clk_ctl.s.seetrn = 0;
    spxx_clk_ctl.s.clkdly = 0x10;
    spxx_clk_ctl.s.runbist = 0;
    spxx_clk_ctl.s.statdrv = 0;

    spxx_clk_ctl.s.statrcv = 1;
    spxx_clk_ctl.s.sndtrn = 1;
    spxx_clk_ctl.s.drptrn = 1;
    spxx_clk_ctl.s.rcvtrn = 1;
    spxx_clk_ctl.s.srxdlck = 1;
    cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64);
    cvmx_wait(1000 * MS);


    spxx_trn4_ctl.u64 = cvmx_read_csr(CVMX_SPXX_TRN4_CTL(interface));
    spxx_trn4_ctl.s.clr_boot = 1;
    cvmx_write_csr(CVMX_SPXX_TRN4_CTL(interface), spxx_trn4_ctl.u64);


    cvmx_dprintf("SPI%d: Waiting for training\n", interface);
    cvmx_wait(1000 * MS);

    timeout_time = cvmx_get_cycle() + 1000ull * MS * 600;
    rx_training_needed = 500;
    do {
        stat.u64 = cvmx_read_csr(CVMX_SPXX_CLK_STAT(interface));
        if (stat.s.srxtrn && rx_training_needed) {
            rx_training_needed--;
            cvmx_write_csr(CVMX_SPXX_CLK_STAT(interface), stat.u64);
            stat.s.srxtrn = 0;
        }
        if (cvmx_get_cycle() > timeout_time) {
            cvmx_dprintf("SPI%d: Timeout\n", interface);
            return -1;
        }
    } while (stat.s.srxtrn == 0);

    return 0;
}
int cvmx_spi_clock_detect_cb(int interface, cvmx_spi_mode_t mode, int timeout)
{
    int clock_transitions;
    union cvmx_spxx_clk_stat stat;
    uint64_t timeout_time;
    uint64_t MS = cvmx_sysinfo_get()->cpu_clock_hz / 1000;

    cvmx_dprintf("SPI%d: Waiting to see TsClk...\n", interface);
    timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
    clock_transitions = 100;
    do {
        stat.u64 = cvmx_read_csr(CVMX_SPXX_CLK_STAT(interface));
        if (stat.s.s4clk0 && stat.s.s4clk1 && clock_transitions) {
            clock_transitions--;
            cvmx_write_csr(CVMX_SPXX_CLK_STAT(interface), stat.u64);
            stat.s.s4clk0 = 0;
            stat.s.s4clk1 = 0;
        }
        if (cvmx_get_cycle() > timeout_time) {
            cvmx_dprintf("SPI%d: Timeout\n", interface);
            return -1;
        }
    } while (stat.s.s4clk0 == 0 || stat.s.s4clk1 == 0);

    cvmx_dprintf("SPI%d: Waiting to see RsClk...\n", interface);
    timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
    clock_transitions = 100;
    do {
        stat.u64 = cvmx_read_csr(CVMX_SPXX_CLK_STAT(interface));
        if (stat.s.d4clk0 && stat.s.d4clk1 && clock_transitions) {
            clock_transitions--;
            cvmx_write_csr(CVMX_SPXX_CLK_STAT(interface), stat.u64);
            stat.s.d4clk0 = 0;
            stat.s.d4clk1 = 0;
        }
        if (cvmx_get_cycle() > timeout_time) {
            cvmx_dprintf("SPI%d: Timeout\n", interface);
            return -1;
        }
    } while (stat.s.d4clk0 == 0 || stat.s.d4clk1 == 0);

    return 0;
}
/**
 * Callback to perform calendar data synchronization
 *
 * @interface: The identifier of the packet interface to configure and
 *                  use as a SPI interface.
 * @mode:      The operating mode for the SPI interface. The interface
 *                  can operate as a full duplex (both Tx and Rx data paths
 *                  active) or as a halfplex (either the Tx data path is
 *                  active or the Rx data path is active, but not both).
 * @timeout:   Timeout to wait for calendar data in seconds
 *
 * Returns Zero on success, non-zero error code on failure (will cause
 * SPI initialization to abort)
 */
int cvmx_spi_calendar_sync_cb(int interface, cvmx_spi_mode_t mode, int timeout)
{
	uint64_t MS = cvmx_sysinfo_get()->cpu_clock_hz / 1000;
	if (mode & CVMX_SPI_MODE_RX_HALFPLEX) {
		/* SRX0 interface should be good, send calendar data */
		union cvmx_srxx_com_ctl srxx_com_ctl;
		cvmx_dprintf
		    ("SPI%d: Rx is synchronized, start sending calendar data\n",
		     interface);
		srxx_com_ctl.u64 = cvmx_read_csr(CVMX_SRXX_COM_CTL(interface));
		srxx_com_ctl.s.inf_en = 1;
		srxx_com_ctl.s.st_en = 1;
		cvmx_write_csr(CVMX_SRXX_COM_CTL(interface), srxx_com_ctl.u64);
	}

	if (mode & CVMX_SPI_MODE_TX_HALFPLEX) {
		/* STX0 has achieved sync */
		/* The corespondant board should be sending calendar data */
		/* Enable the STX0 STAT receiver. */
		union cvmx_spxx_clk_stat stat;
		uint64_t timeout_time;
		union cvmx_stxx_com_ctl stxx_com_ctl;
		stxx_com_ctl.u64 = 0;
		stxx_com_ctl.s.st_en = 1;
		cvmx_write_csr(CVMX_STXX_COM_CTL(interface), stxx_com_ctl.u64);

		/* Waiting for calendar sync on STX0 STAT */
		cvmx_dprintf("SPI%d: Waiting to sync on STX[%d] STAT\n",
			     interface, interface);
		timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
		/* SPX0_CLK_STAT - SPX0_CLK_STAT[STXCAL] should be 1 (bit10) */
		do {
			stat.u64 = cvmx_read_csr(CVMX_SPXX_CLK_STAT(interface));
			if (cvmx_get_cycle() > timeout_time) {
				cvmx_dprintf("SPI%d: Timeout\n", interface);
				return -1;
			}
		} while (stat.s.stxcal == 0);
	}

	return 0;
}
int cvmx_spi_calendar_sync_cb(int interface, cvmx_spi_mode_t mode, int timeout)
{
    uint64_t MS = cvmx_sysinfo_get()->cpu_clock_hz / 1000;
    if (mode & CVMX_SPI_MODE_RX_HALFPLEX) {

        union cvmx_srxx_com_ctl srxx_com_ctl;
        cvmx_dprintf
        ("SPI%d: Rx is synchronized, start sending calendar data\n",
         interface);
        srxx_com_ctl.u64 = cvmx_read_csr(CVMX_SRXX_COM_CTL(interface));
        srxx_com_ctl.s.inf_en = 1;
        srxx_com_ctl.s.st_en = 1;
        cvmx_write_csr(CVMX_SRXX_COM_CTL(interface), srxx_com_ctl.u64);
    }

    if (mode & CVMX_SPI_MODE_TX_HALFPLEX) {



        union cvmx_spxx_clk_stat stat;
        uint64_t timeout_time;
        union cvmx_stxx_com_ctl stxx_com_ctl;
        stxx_com_ctl.u64 = 0;
        stxx_com_ctl.s.st_en = 1;
        cvmx_write_csr(CVMX_STXX_COM_CTL(interface), stxx_com_ctl.u64);


        cvmx_dprintf("SPI%d: Waiting to sync on STX[%d] STAT\n",
                     interface, interface);
        timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;

        do {
            stat.u64 = cvmx_read_csr(CVMX_SPXX_CLK_STAT(interface));
            if (cvmx_get_cycle() > timeout_time) {
                cvmx_dprintf("SPI%d: Timeout\n", interface);
                return -1;
            }
        } while (stat.s.stxcal == 0);
    }

    return 0;
}
Example #6
0
inline int inic_do_per_second_duty_cycle_processing()
{
		if ( cvmx_coremask_first_core(coremask_data) )
		{
				CVM_COMMON_SIMPRINTF("cycles: %lld (%lld), idle count=%lld\n", (ll64_t)(end_cycle - start_cycle), (ll64_t)(process_count), (ll64_t)(idle_counter));
		}

		process_count = 0;
		idle_counter = 0;
		start_cycle = cvmx_get_cycle();

		if ( cvmx_coremask_first_core(coremask_data) )
		{
				int i=0;
				uint64_t fpa_hw_counters[8];
#ifndef REAL_HW
				uint64_t fpa_counters[8];

#endif


				for (i=0; i<8; i++)
				{
#ifndef REAL_HW
						fpa_counters[i] = (uint64_t)(CVM_COMMON_GET_FPA_USE_COUNT(i));
#endif
						fpa_hw_counters[i] =  CVM_COMMON_FPA_AVAIL_COUNT(i);
				}



				//CVM_COMMON_SIMPRINTF("Connection count = %lld (%lld)\n",  (ll64_t)(total_conn_count), (ll64_t)(conn_count));
				CVM_COMMON_SIMPRINTF("%6lld : %6lld : %6lld : %6lld\n", (ll64_t)(fpa_hw_counters[0]), (ll64_t)(fpa_hw_counters[1]), (ll64_t)(fpa_hw_counters[2]), (ll64_t)(fpa_hw_counters[3]));
				CVM_COMMON_SIMPRINTF("%6lld : %6lld : %6lld : %6lld\n", (ll64_t)(fpa_hw_counters[4]), (ll64_t)(fpa_hw_counters[5]), (ll64_t)(fpa_hw_counters[6]), (ll64_t)(fpa_hw_counters[7]));

#ifdef TCP_TPS_SIM
				{
						uint64_t total_conn_count = ((uint64_t)(cvmx_fau_fetch_and_add32(CVMX_FAU_REG_TCP_CONNECTION_COUNT, 0)));
						cvmx_fau_atomic_write32(CVMX_FAU_REG_TCP_CONNECTION_COUNT, 0);
						CVM_COMMON_SIMPRINTF("Total TPS count = %lu\n", total_conn_count);
				}
#endif

		}

		return (0);
}
Example #7
0
/**
 * @INTERNAL
 * Initialize a host mode PCIe link. This function takes a PCIe
 * port from reset to a link up state. Software can then begin
 * configuring the rest of the link.
 *
 * @param pcie_port PCIe port to initialize
 *
 * @return Zero on success
 */
static int __cvmx_pcie_rc_initialize_link(int pcie_port)
{
    uint64_t start_cycle;
    cvmx_pescx_ctl_status_t pescx_ctl_status;
    cvmx_pciercx_cfg452_t pciercx_cfg452;
    cvmx_pciercx_cfg032_t pciercx_cfg032;
    cvmx_pciercx_cfg448_t pciercx_cfg448;

    /* Set the lane width */
    pciercx_cfg452.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG452(pcie_port));
    pescx_ctl_status.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS(pcie_port));
    if (pescx_ctl_status.s.qlm_cfg == 0)
    {
        /* We're in 8 lane (56XX) or 4 lane (54XX) mode */
        pciercx_cfg452.s.lme = 0xf;
    }
    else
    {
        /* We're in 4 lane (56XX) or 2 lane (52XX) mode */
        pciercx_cfg452.s.lme = 0x7;
    }
    cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG452(pcie_port), pciercx_cfg452.u32);

    /* CN52XX pass 1.x has an errata where length mismatches on UR responses can
        cause bus errors on 64bit memory reads. Turning off length error
        checking fixes this */
    if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X))
    {
        cvmx_pciercx_cfg455_t pciercx_cfg455;
        pciercx_cfg455.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG455(pcie_port));
        pciercx_cfg455.s.m_cpl_len_err = 1;
        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG455(pcie_port), pciercx_cfg455.u32);
    }

    /* Lane swap needs to be manually enabled for CN52XX */
    if (OCTEON_IS_MODEL(OCTEON_CN52XX) && (pcie_port == 1))
    {
      pescx_ctl_status.s.lane_swp = 1;
      cvmx_write_csr(CVMX_PESCX_CTL_STATUS(pcie_port),pescx_ctl_status.u64);
    }

    /* Bring up the link */
    pescx_ctl_status.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS(pcie_port));
    pescx_ctl_status.s.lnk_enb = 1;
    cvmx_write_csr(CVMX_PESCX_CTL_STATUS(pcie_port), pescx_ctl_status.u64);

    /* CN52XX pass 1.0: Due to a bug in 2nd order CDR, it needs to be disabled */
    if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_0))
        __cvmx_helper_errata_qlm_disable_2nd_order_cdr(0);

    /* Wait for the link to come up */
    start_cycle = cvmx_get_cycle();
    do
    {
        if (cvmx_get_cycle() - start_cycle > 2*cvmx_sysinfo_get()->cpu_clock_hz)
        {
            cvmx_dprintf("PCIe: Port %d link timeout\n", pcie_port);
            return -1;
        }
        cvmx_wait(10000);
        pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
    } while (pciercx_cfg032.s.dlla == 0);

    /* Update the Replay Time Limit. Empirically, some PCIe devices take a
        little longer to respond than expected under load. As a workaround for
        this we configure the Replay Time Limit to the value expected for a 512
        byte MPS instead of our actual 256 byte MPS. The numbers below are
        directly from the PCIe spec table 3-4 */
    pciercx_cfg448.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG448(pcie_port));
    switch (pciercx_cfg032.s.nlw)
    {
        case 1: /* 1 lane */
            pciercx_cfg448.s.rtl = 1677;
            break;
        case 2: /* 2 lanes */
            pciercx_cfg448.s.rtl = 867;
            break;
        case 4: /* 4 lanes */
            pciercx_cfg448.s.rtl = 462;
            break;
        case 8: /* 8 lanes */
            pciercx_cfg448.s.rtl = 258;
            break;
    }
    cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG448(pcie_port), pciercx_cfg448.u32);

    return 0;
}
Example #8
0
/**
 * Initialize a host mode PCIe link. This function takes a PCIe
 * port from reset to a link up state. Software can then begin
 * configuring the rest of the link.
 *
 * @pcie_port: PCIe port to initialize
 *
 * Returns Zero on success
 */
static int __cvmx_pcie_rc_initialize_link(int pcie_port)
{
	uint64_t start_cycle;
	union cvmx_pescx_ctl_status pescx_ctl_status;
	union cvmx_pciercx_cfg452 pciercx_cfg452;
	union cvmx_pciercx_cfg032 pciercx_cfg032;
	union cvmx_pciercx_cfg448 pciercx_cfg448;

	/* Set the lane width */
	pciercx_cfg452.u32 =
	    cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG452(pcie_port));
	pescx_ctl_status.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS(pcie_port));
	if (pescx_ctl_status.s.qlm_cfg == 0) {
		/* We're in 8 lane (56XX) or 4 lane (54XX) mode */
		pciercx_cfg452.s.lme = 0xf;
	} else {
		/* We're in 4 lane (56XX) or 2 lane (52XX) mode */
		pciercx_cfg452.s.lme = 0x7;
	}
	cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG452(pcie_port),
			     pciercx_cfg452.u32);

	/*
	 * CN52XX pass 1.x has an errata where length mismatches on UR
	 * responses can cause bus errors on 64bit memory
	 * reads. Turning off length error checking fixes this.
	 */
	if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
		union cvmx_pciercx_cfg455 pciercx_cfg455;
		pciercx_cfg455.u32 =
		    cvmx_pcie_cfgx_read(pcie_port,
					CVMX_PCIERCX_CFG455(pcie_port));
		pciercx_cfg455.s.m_cpl_len_err = 1;
		cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG455(pcie_port),
				     pciercx_cfg455.u32);
	}

	/* Lane swap needs to be manually enabled for CN52XX */
	if (OCTEON_IS_MODEL(OCTEON_CN52XX) && (pcie_port == 1)) {
		pescx_ctl_status.s.lane_swp = 1;
		cvmx_write_csr(CVMX_PESCX_CTL_STATUS(pcie_port),
			       pescx_ctl_status.u64);
	}

	/* Bring up the link */
	pescx_ctl_status.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS(pcie_port));
	pescx_ctl_status.s.lnk_enb = 1;
	cvmx_write_csr(CVMX_PESCX_CTL_STATUS(pcie_port), pescx_ctl_status.u64);

	/*
	 * CN52XX pass 1.0: Due to a bug in 2nd order CDR, it needs to
	 * be disabled.
	 */
	if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_0))
		__cvmx_helper_errata_qlm_disable_2nd_order_cdr(0);

	/* Wait for the link to come up */
	cvmx_dprintf("PCIe: Waiting for port %d link\n", pcie_port);
	start_cycle = cvmx_get_cycle();
	do {
		if (cvmx_get_cycle() - start_cycle >
		    2 * cvmx_sysinfo_get()->cpu_clock_hz) {
			cvmx_dprintf("PCIe: Port %d link timeout\n",
				     pcie_port);
			return -1;
		}
		cvmx_wait(10000);
		pciercx_cfg032.u32 =
		    cvmx_pcie_cfgx_read(pcie_port,
					CVMX_PCIERCX_CFG032(pcie_port));
	} while (pciercx_cfg032.s.dlla == 0);

	/* Display the link status */
	cvmx_dprintf("PCIe: Port %d link active, %d lanes\n", pcie_port,
		     pciercx_cfg032.s.nlw);

	pciercx_cfg448.u32 =
	    cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG448(pcie_port));
	switch (pciercx_cfg032.s.nlw) {
	case 1:		/* 1 lane */
		pciercx_cfg448.s.rtl = 1677;
		break;
	case 2:		/* 2 lanes */
		pciercx_cfg448.s.rtl = 867;
		break;
	case 4:		/* 4 lanes */
		pciercx_cfg448.s.rtl = 462;
		break;
	case 8:		/* 8 lanes */
		pciercx_cfg448.s.rtl = 258;
		break;
	}
	cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG448(pcie_port),
			     pciercx_cfg448.u32);

	return 0;
}
/**
 * Callback to perform clock detection
 *
 * @interface: The identifier of the packet interface to configure and
 *                  use as a SPI interface.
 * @mode:      The operating mode for the SPI interface. The interface
 *                  can operate as a full duplex (both Tx and Rx data paths
 *                  active) or as a halfplex (either the Tx data path is
 *                  active or the Rx data path is active, but not both).
 * @timeout:   Timeout to wait for clock synchronization in seconds
 *
 * Returns Zero on success, non-zero error code on failure (will cause
 * SPI initialization to abort)
 */
int cvmx_spi_clock_detect_cb(int interface, cvmx_spi_mode_t mode, int timeout)
{
	int clock_transitions;
	union cvmx_spxx_clk_stat stat;
	uint64_t timeout_time;
	uint64_t MS = cvmx_sysinfo_get()->cpu_clock_hz / 1000;

	/*
	 * Regardless of operating mode, both Tx and Rx clocks must be
	 * present for the SPI interface to operate.
	 */
	cvmx_dprintf("SPI%d: Waiting to see TsClk...\n", interface);
	timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
	/*
	 * Require 100 clock transitions in order to avoid any noise
	 * in the beginning.
	 */
	clock_transitions = 100;
	do {
		stat.u64 = cvmx_read_csr(CVMX_SPXX_CLK_STAT(interface));
		if (stat.s.s4clk0 && stat.s.s4clk1 && clock_transitions) {
			/*
			 * We've seen a clock transition, so decrement
			 * the number we still need.
			 */
			clock_transitions--;
			cvmx_write_csr(CVMX_SPXX_CLK_STAT(interface), stat.u64);
			stat.s.s4clk0 = 0;
			stat.s.s4clk1 = 0;
		}
		if (cvmx_get_cycle() > timeout_time) {
			cvmx_dprintf("SPI%d: Timeout\n", interface);
			return -1;
		}
	} while (stat.s.s4clk0 == 0 || stat.s.s4clk1 == 0);

	cvmx_dprintf("SPI%d: Waiting to see RsClk...\n", interface);
	timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
	/*
	 * Require 100 clock transitions in order to avoid any noise in the
	 * beginning.
	 */
	clock_transitions = 100;
	do {
		stat.u64 = cvmx_read_csr(CVMX_SPXX_CLK_STAT(interface));
		if (stat.s.d4clk0 && stat.s.d4clk1 && clock_transitions) {
			/*
			 * We've seen a clock transition, so decrement
			 * the number we still need
			 */
			clock_transitions--;
			cvmx_write_csr(CVMX_SPXX_CLK_STAT(interface), stat.u64);
			stat.s.d4clk0 = 0;
			stat.s.d4clk1 = 0;
		}
		if (cvmx_get_cycle() > timeout_time) {
			cvmx_dprintf("SPI%d: Timeout\n", interface);
			return -1;
		}
	} while (stat.s.d4clk0 == 0 || stat.s.d4clk1 == 0);

	return 0;
}
Example #10
0
/**
 * Process incoming packets. 
 */
int inic_data_loop(void)
{
		cvm_common_wqe_t *swp = NULL;
		cvm_tcp_in_endpoints_t conn;
		cvm_tcp_tcphdr_t *th = NULL;
		cvm_ip_ip_t *ih = NULL;
		cvmx_sysinfo_t *sys_info_ptr = cvmx_sysinfo_get();
		uint64_t cpu_clock_hz = sys_info_ptr->cpu_clock_hz;
		uint64_t tick_cycle = cvmx_get_cycle();
		uint64_t tick_step;
		uint32_t idle_processing_interval_ticks = (CVM_COMMON_IDLE_PROCESSING_INTERVAL)*(1000*1000)/(CVM_COMMON_TICK_LEN_US);
		uint32_t idle_processing_last_ticks = 0;
#ifdef INET6
		struct cvm_ip6_ip6_hdr *ip6 = NULL;
#ifdef CVM_ENET_TUNNEL
		struct cvm_ip6_ip6_hdr *i6h = NULL;
#endif
#endif


#ifdef CVM_CLI_APP
		uint64_t idle_cycle_start_value;
#endif

		/* for the simulator */
		if (cpu_clock_hz == 0)
		{
				cpu_clock_hz = 333000000;
		}

		tick_step = (CVM_COMMON_TICK_LEN_US * cpu_clock_hz) / 1000000;
		cvm_debug_print_interval = cpu_clock_hz;

#ifndef REAL_HW
		/* for the simulator, set the debug interval to be 3M cycles */
		cvm_debug_print_interval = 3000000;
#endif

#ifdef DUTY_CYCLE
		start_cycle = cvmx_get_cycle();
		process_count = 0;
#endif

		if (cvmx_coremask_first_core(coremask_data)) 
		{
				/* Initiate a timer transaction for arp entry timeouts */
				//if(cvm_enet_arp_timeout_init() != CVMX_TIM_STATUS_SUCCESS)
				//{
				//		printf("Failed init of cvm_ip_arp_timeout_init\n");
				//}
		}

#if defined(CVM_COMBINED_APP_STACK)
		/* Flush the packets sent by main_global and main_local */
		/*
		printf("before cvm_send_packet () \n ");
		if (out_swp)
		{
				cvm_send_packet ();
		}
		printf("after cvm_send_packet () \n ");
		*/
		uint64_t app_timeout = cvmx_get_cycle ();
#endif




		/* start the main loop */
		while (1)
		{


#ifdef DUTY_CYCLE
				end_cycle = cvmx_get_cycle();

				/* check the wrap around case */
				if (end_cycle < start_cycle) end_cycle += cpu_clock_hz;

				if ((end_cycle - start_cycle) > cvm_debug_print_interval)
				{
						inic_do_per_second_duty_cycle_processing();
				}
#endif /* DUTY_CYCLE */

				cvmx_pow_work_request_async_nocheck(CVMX_SCR_WORK, 1);

				/* update the ticks variable */
				while (cvmx_get_cycle() - tick_cycle > tick_step)
				{
						tick_cycle += tick_step;
						cvm_tcp_ticks++;
						if (!(cvm_tcp_ticks & 0x1f)) CVM_COMMON_HISTORY_SET_CYCLE();
				}


				/* do common idle processing */
				if ( (cvm_tcp_ticks - idle_processing_last_ticks) > idle_processing_interval_ticks)
				{
						if (cvmx_coremask_first_core(coremask_data)) 
						{
								cvm_common_do_idle_processing();
						}

						idle_processing_last_ticks = cvm_tcp_ticks;
				}


#ifdef CVM_CLI_APP
				idle_cycle_start_value = cvmx_get_cycle();
#endif

				/* get work entry */
				swp = (cvm_common_wqe_t *)cvmx_pow_work_response_async(CVMX_SCR_WORK);
				if (swp == NULL)
				{
						idle_counter++;

						if(core_id == highest_core_id)
						{
								cvm_enet_check_link_status();
						}

#ifdef CVM_CLI_APP
						cvmx_fau_atomic_add64(core_idle_cycles[core_id], (cvmx_get_cycle()-idle_cycle_start_value) );
#endif
						continue;
				}

				CVM_COMMON_EXTRA_STATS_ADD64 (CVM_FAU_REG_WQE_RCVD, 1);

#ifdef WORK_QUEUE_ENTRY_SIZE_128 // {
				CVMX_PREFETCH0(swp);
#else
				/* Prefetch work-queue entry */
				CVMX_PREFETCH0(swp);
				CVMX_PREFETCH128(swp);
#endif // WORK_QUEUE_ENTRY_SIZE_128 }

				out_swp = 0;
				out_swp_tail = 0;


#ifdef DUTY_CYCLE
				/* we are about to start processing the packet - remember the cycle count */
				process_start_cycle = cvmx_get_cycle();
#endif


				/* Short cut the common case */
				if (cvmx_likely(swp->hw_wqe.unused == 0))
				{
						goto packet_from_the_wire;
				}
				printf("Get work with unused is %X\n", swp->hw_wqe.unused);

				{
						{

packet_from_the_wire:

#if CVM_PKO_DONTFREE
								swp->hw_wqe.packet_ptr.s.i = 0;
#endif

#ifdef SANITY_CHECKS
								/* we have a work queue entry - do input sanity checks */
								ret = cvm_common_input_sanity_and_buffer_count_update(swp);
#endif

								if (cvmx_unlikely(swp->hw_wqe.word2.s.rcv_error))
								{
										goto discard_swp; /* Receive error */
								}

#ifndef WORK_QUEUE_ENTRY_SIZE_128 // {
								{
										/* Make sure pre-fetch completed */
										uint64_t dp = *(volatile uint64_t*)&swp->next;
								}
#endif // WORK_QUEUE_ENTRY_SIZE_128 }

								{
										/* Initialize SW portion of the work-queue entry */
										uint64_t *dptr = (uint64_t*)(&swp->next);
										dptr[0] = 0;
										dptr[1] = 0;
										dptr[2] = 0;
										dptr[3] = 0;
								}

								if(cvmx_unlikely(swp->hw_wqe.word2.s.not_IP))
								{
										goto output;
								}

								/* Shortcut classification to avoid multiple lookups */
								if(
#ifndef INET6
												swp->hw_wqe.word2.s.is_v6 || 
#endif
												swp->hw_wqe.word2.s.is_bcast 
#ifndef INET6
												|| swp->hw_wqe.word2.s.is_mcast
#endif
								  )
								{
										goto discard_swp; /* Receive error */
								}


								/* Packet is unicast IPv4, without L2 errors */
								/* (All IP exceptions are dropped.  This currently includes
								 *  IPv4 options and IPv6 extension headers.)
								 */
								if(cvmx_unlikely(swp->hw_wqe.word2.s.IP_exc))
								{
										goto discard_swp;
								}

								/* Packet is Ipv4 (and no IP exceptions) */
								if (cvmx_unlikely(swp->hw_wqe.word2.s.is_frag || !swp->hw_wqe.word2.s.tcp_or_udp))
								{
										goto output;
								}

#ifdef ANVL_RFC_793_COMPLIANCE
								/* RFC 793 says that:
								   - We should send a RST out when we get a packet with FIN set 
								   without the ACK bit set in the flags field. 
								   - We should send a RST out when we get a packet with no flag set.
								   Hence, let TCP stack handle these conditions.
								 */
								if (cvmx_unlikely(swp->hw_wqe.word2.s.L4_error &&
														(cvmx_pip_l4_err_t)(swp->hw_wqe.word2.s.err_code != CVMX_PIP_TCP_FLG8_ERR) &&
														(cvmx_pip_l4_err_t)(swp->hw_wqe.word2.s.err_code != CVMX_PIP_TCP_FLG9_ERR)))
#else
										if (cvmx_unlikely(swp->hw_wqe.word2.s.L4_error))
#endif
										{
												cvm_tcp_handle_error(swp);
												goto discard_swp;
										}

								/* Packet is not fragmented, TCP/UDP, no IP exceptions/L4 errors */
								/* We can try an L4 lookup now, but we need all the information */
								ih = ((cvm_ip_ip_t *)&(swp->hw_wqe.packet_data[CVM_COMMON_PD_ALIGN]));

								if (!swp->hw_wqe.word2.s.is_v6)
								{
										/* for IPv4, we must subtract CVM_COMMON_PD_ALIGN rom tcp_offset to get the offset in the mbuf */
										swp->l4_offset = ((uint16_t)(ih->ip_hl) << 2) + CVM_COMMON_PD_ALIGN;
										swp->l4_prot = ih->ip_p;
								}
#ifdef INET6
								else
								{
										ip6 = (struct cvm_ip6_ip6_hdr *) &swp->hw_wqe.packet_data[CVM_COMMON_IP6_PD_ALIGN];

										CVM_COMMON_DBG_MSG (CVM_COMMON_DBG_LVL_5, 
														"%s: %d Packet trace Src: %s/%d Dest: %s/%d prot: %d len: %d\n", 
														__FUNCTION__, __LINE__, 
														cvm_ip6_ip6_sprintf (&ip6->ip6_dst), conn.ie_fport, 
														cvm_ip6_ip6_sprintf (&ip6->ip6_src), conn.ie_lport,
														swp->l4_prot, swp->hw_wqe.len);
										/* for IPv4, we must subtract CVM_COMMON_PD_ALIGN rom tcp_offset to get the offset in the mbuf */
										swp->l4_offset = CVM_IP6_IP6_HDRLEN;
										swp->l4_prot = ip6->ip6_ctlun.ip6_un1.ip6_un1_nxt;

								}
#endif

								th = ((cvm_tcp_tcphdr_t *)&(swp->hw_wqe.packet_data[swp->l4_offset]));

								/* check if it is a TCP packet */
								if (swp->l4_prot == CVM_IP_IPPROTO_TCP)
								{
										process_handle(swp);
#ifdef INET6
										if (!swp->hw_wqe.word2.s.is_v6)
#endif
										{
												CVM_TCP_TCP_DUMP ((void*)ih);

												/* assume IPv4 for now */
												conn.ie_laddr = ih->ip_dst.s_addr;
												conn.ie_faddr = ih->ip_src.s_addr;
												conn.ie_lport = th->th_dport;
												conn.ie_fport = th->th_sport;

										}
#ifdef INET6
										else
										{
												/* assume IPv4 for now */
												memcpy (&conn.ie6_laddr, &ip6->ip6_dst, sizeof (struct cvm_ip6_in6_addr));
												memcpy (&conn.ie6_faddr, &ip6->ip6_src, sizeof (struct cvm_ip6_in6_addr));
												conn.ie_lport = th->th_dport;
												conn.ie_fport = th->th_sport;

												/* do a TCP lookup */
												swp->tcb = cvm_tcp6_lookup (swp);

												CVM_COMMON_DBG_MSG (CVM_COMMON_DBG_LVL_5, "%s: %d TCPv6 lookup Src: %s/%d Dest: %s/%d ret_tcb: 0x%llx\n", 
																__FUNCTION__, __LINE__, 
																cvm_ip6_ip6_sprintf ((cvm_ip6_in6_addr_t *) &conn.ie6_faddr), conn.ie_fport, 
																cvm_ip6_ip6_sprintf ((cvm_ip6_in6_addr_t *) &conn.ie6_laddr), conn.ie_lport, 
																CAST64(swp->tcb));
										}
#endif // INET6
								}


								goto output;
						} /* packet from wire */
				} /* switch */


output:
				CVMX_SYNCWS;

				/* Send packet out */
				if (out_swp)
				{
						cvm_send_packet();
				}

				if(swp != NULL)
				{
						S3_send_packet((cvmx_wqe_t *)swp);
						swp = NULL;
				}
#ifdef DUTY_CYCLE
				process_end_cycle = cvmx_get_cycle();
				process_count += (process_end_cycle - process_start_cycle);
#endif
		}

		return (0);


discard_swp:
		/* Free the chained buffers */
		cvm_common_packet_free(swp);

		/* Free the work queue entry */
		cvm_common_free_fpa_buffer(swp, CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE / CVMX_CACHE_LINE_SIZE);
		swp = NULL;
		goto output;

} /* inic_data_loop */
/**
 * Initialize and start the SPI interface.
 *
 * @param interface The identifier of the packet interface to configure and
 *                  use as a SPI interface.
 * @param mode      The operating mode for the SPI interface. The interface
 *                  can operate as a full duplex (both Tx and Rx data paths
 *                  active) or as a halfplex (either the Tx data path is
 *                  active or the Rx data path is active, but not both).
 * @param timeout   Timeout to wait for clock synchronization in seconds
 * @return Zero on success, negative of failure.
 */
int cvmx_spi_start_interface(int interface, cvmx_spi_mode_t mode, int timeout)
{
    uint64_t                     timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
    cvmx_spxx_trn4_ctl_t         spxx_trn4_ctl;
    cvmx_spxx_clk_stat_t         stat;
    cvmx_stxx_com_ctl_t          stxx_com_ctl;
    cvmx_srxx_com_ctl_t          srxx_com_ctl;
    cvmx_stxx_spi4_dat_t         stxx_spi4_dat;
    uint64_t                     count;
    cvmx_pko_reg_gmx_port_mode_t pko_mode;


    if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
        return -1;

    cvmx_dprintf ("SPI%d: mode %s, cal_len: %d, cal_rep: %d\n",
            interface, modes[mode], CAL_LEN, CAL_REP);

      // Configure for 16 ports (PKO -> GMX FIFO partition setting)
      // ----------------------------------------------------------
    pko_mode.u64 = cvmx_read_csr(CVMX_PKO_REG_GMX_PORT_MODE);
    if (interface == 0) 
    {
        pko_mode.s.mode0 = 0;
    }
    else 
    {
        pko_mode.s.mode1 = 0;
    }
    cvmx_write_csr(CVMX_PKO_REG_GMX_PORT_MODE, pko_mode.u64);

      // Configure GMX
      // -------------------------------------------------
    cvmx_write_csr (CVMX_GMXX_TX_PRTS(interface), 0xA);
      // PRTS           [ 4: 0] ( 5b) = 10


      // Bringing up Spi4 Interface
      // -------------------------------------------------

      // Reset the Spi4 deskew logic
      // -------------------------------------------------
    cvmx_write_csr (CVMX_SPXX_DBG_DESKEW_CTL(interface), 0x00200000);
      // DLLDIS         [ 0: 0] ( 1b) = 0
      // DLLFRC         [ 1: 1] ( 1b) = 0
      // OFFDLY         [ 7: 2] ( 6b) = 0
      // BITSEL         [12: 8] ( 5b) = 0
      // OFFSET         [17:13] ( 5b) = 0
      // MUX            [18:18] ( 1b) = 0
      // INC            [19:19] ( 1b) = 0
      // DEC            [20:20] ( 1b) = 0
      // CLRDLY         [21:21] ( 1b) = 1    // Forces a reset
    cvmx_wait (100 * MS);

      // Setup the CLKDLY right in the middle
      // -------------------------------------------------
    cvmx_write_csr (CVMX_SPXX_CLK_CTL(interface), 0x00000830);
      // SRXDLCK        [ 0: 0] ( 1b) = 0
      // RCVTRN         [ 1: 1] ( 1b) = 0
      // DRPTRN         [ 2: 2] ( 1b) = 0
      // SNDTRN         [ 3: 3] ( 1b) = 0
      // STATRCV        [ 4: 4] ( 1b) = 1    // Enable status channel Rx
      // STATDRV        [ 5: 5] ( 1b) = 1    // Enable status channel Tx
      // RUNBIST        [ 6: 6] ( 1b) = 0
      // CLKDLY         [11: 7] ( 5b) = 10   // 16 is the middle of the range
      // SRXLCK         [12:12] ( 1b) = 0
      // STXLCK         [13:13] ( 1b) = 0
      // SEETRN         [14:14] ( 1b) = 0
    cvmx_wait (100 * MS);

      // Reset SRX0 DLL
      // -------------------------------------------------
    cvmx_write_csr (CVMX_SPXX_CLK_CTL(interface), 0x00000831);
      // SRXDLCK        [ 0: 0] ( 1b) = 1    // Restart the DLL
      // RCVTRN         [ 1: 1] ( 1b) = 0
      // DRPTRN         [ 2: 2] ( 1b) = 0
      // SNDTRN         [ 3: 3] ( 1b) = 0
      // STATRCV        [ 4: 4] ( 1b) = 1
      // STATDRV        [ 5: 5] ( 1b) = 1
      // RUNBIST        [ 6: 6] ( 1b) = 0
      // CLKDLY         [11: 7] ( 5b) = 10
      // SRXLCK         [12:12] ( 1b) = 0
      // STXLCK         [13:13] ( 1b) = 0
      // SEETRN         [14:14] ( 1b) = 0

      // Waiting for Inf0 Spi4 RX DLL to lock
      // -------------------------------------------------
    cvmx_wait (100 * MS);

      // Enable dynamic alignment
      // -------------------------------------------------
    spxx_trn4_ctl.u64 = 0;
    spxx_trn4_ctl.s.mux_en   = 1;
    spxx_trn4_ctl.s.macro_en = 1;
    spxx_trn4_ctl.s.maxdist  = 16;
    spxx_trn4_ctl.s.jitter   = 1;
    cvmx_write_csr (CVMX_SPXX_TRN4_CTL(interface), spxx_trn4_ctl.u64);
      // MUX_EN         [ 0: 0] ( 1b) = 1
      // MACRO_EN       [ 1: 1] ( 1b) = 1
      // MAXDIST        [ 6: 2] ( 5b) = 16
      // SET_BOOT       [ 7: 7] ( 1b) = 0
      // CLR_BOOT       [ 8: 8] ( 1b) = 1
      // JITTER         [11: 9] ( 3b) = 1
      // TRNTEST        [12:12] ( 1b) = 0
    cvmx_write_csr (CVMX_SPXX_DBG_DESKEW_CTL(interface), 0x0);
      // DLLDIS         [ 0: 0] ( 1b) = 0
      // DLLFRC         [ 1: 1] ( 1b) = 0
      // OFFDLY         [ 7: 2] ( 6b) = 0
      // BITSEL         [12: 8] ( 5b) = 0
      // OFFSET         [17:13] ( 5b) = 0
      // MUX            [18:18] ( 1b) = 0
      // INC            [19:19] ( 1b) = 0
      // DEC            [20:20] ( 1b) = 0
      // CLRDLY         [21:21] ( 1b) = 0

    if (mode & CVMX_SPI_MODE_RX_HALFPLEX) {
          // SRX0 Ports
          // -------------------------------------------------
        cvmx_write_csr (CVMX_SRXX_COM_CTL(interface), 0x00000090);
          // INF_EN         [ 0: 0] ( 1b) = 0
          // ST_EN          [ 3: 3] ( 1b) = 0
          // PRTS           [ 9: 4] ( 6b) = 9

          // SRX0 Calendar Table
          // -------------------------------------------------
        cvmx_write_csr (CVMX_SRXX_SPI4_CALX(0, interface), 0x00013210);
          // PRT0           [ 4: 0] ( 5b) = 0
          // PRT1           [ 9: 5] ( 5b) = 1
          // PRT2           [14:10] ( 5b) = 2
          // PRT3           [19:15] ( 5b) = 3
          // ODDPAR         [20:20] ( 1b) = 1
        cvmx_write_csr (CVMX_SRXX_SPI4_CALX(1, interface), 0x00017654);
          // PRT0           [ 4: 0] ( 5b) = 4
          // PRT1           [ 9: 5] ( 5b) = 5
          // PRT2           [14:10] ( 5b) = 6
          // PRT3           [19:15] ( 5b) = 7
          // ODDPAR         [20:20] ( 1b) = 1
        cvmx_write_csr (CVMX_SRXX_SPI4_CALX(2, interface), 0x00000098);
          // PRT0           [ 4: 0] ( 5b) = 8
          // PRT1           [ 9: 5] ( 5b) = 9
          // PRT2           [14:10] ( 5b) = 0
          // PRT3           [19:15] ( 5b) = 0
          // ODDPAR         [20:20] ( 1b) = 0
        cvmx_write_csr (CVMX_SRXX_SPI4_STAT(interface), (CAL_REP << 8) | CAL_LEN);
          // LEN            [ 7: 0] ( 8b) = a
          // M              [15: 8] ( 8b) = 1
    }

    if (mode & CVMX_SPI_MODE_TX_HALFPLEX) {
          // STX0 Config
          // -------------------------------------------------
        cvmx_write_csr (CVMX_STXX_ARB_CTL(interface), 0x0);
          // IGNTPA         [ 3: 3] ( 1b) = 0
          // MINTRN         [ 5: 5] ( 1b) = 0
        cvmx_write_csr (CVMX_GMXX_TX_SPI_MAX(interface), 0x0408);
          // MAX2           [15: 8] ( 8b) = 4
          // MAX1           [ 7: 0] ( 8b) = 8
        cvmx_write_csr (CVMX_GMXX_TX_SPI_THRESH(interface), 0x4);
          // THRESH         [ 5: 0] ( 6b) = 4
        cvmx_write_csr (CVMX_GMXX_TX_SPI_CTL(interface), 0x0);
          // ENFORCE        [ 2: 2] ( 1b) = 0
          // TPA_CLR        [ 1: 1] ( 1b) = 0
          // CONT_PKT       [ 0: 0] ( 1b) = 0

          // STX0 Training Control
          // -------------------------------------------------
        stxx_spi4_dat.u64 = 0;
	stxx_spi4_dat.s.alpha = 32;    /*Minimum needed by dynamic alignment*/
	stxx_spi4_dat.s.max_t = 0xFFFF;  /*Minimum interval is 0x20*/
        cvmx_write_csr (CVMX_STXX_SPI4_DAT(interface), stxx_spi4_dat.u64);
          // MAX_T          [15: 0] (16b) = 0
          // ALPHA          [31:16] (16b) = 0

          // STX0 Calendar Table
          // -------------------------------------------------
        cvmx_write_csr (CVMX_STXX_SPI4_CALX(0, interface), 0x00013210);
          // PRT0           [ 4: 0] ( 5b) = 0
          // PRT1           [ 9: 5] ( 5b) = 1
          // PRT2           [14:10] ( 5b) = 2
          // PRT3           [19:15] ( 5b) = 3
          // ODDPAR         [20:20] ( 1b) = 1
        cvmx_write_csr (CVMX_STXX_SPI4_CALX(1, interface), 0x00017654);
          // PRT0           [ 4: 0] ( 5b) = 4
          // PRT1           [ 9: 5] ( 5b) = 5
          // PRT2           [14:10] ( 5b) = 6
          // PRT3           [19:15] ( 5b) = 7
          // ODDPAR         [20:20] ( 1b) = 1
        cvmx_write_csr (CVMX_STXX_SPI4_CALX(2, interface), 0x00000098);
          // PRT0           [ 4: 0] ( 5b) = 8
          // PRT1           [ 9: 5] ( 5b) = 9
          // PRT2           [14:10] ( 5b) = 0
          // PRT3           [19:15] ( 5b) = 0
          // ODDPAR         [20:20] ( 1b) = 0
        cvmx_write_csr (CVMX_STXX_SPI4_STAT(interface), (CAL_REP << 8) | CAL_LEN);
          // LEN            [ 7: 0] ( 8b) = a
          // M              [15: 8] ( 8b) = 1
    }

      /* Regardless of operating mode, both Tx and Rx clocks must be present
       * for the SPI interface to operate.
       */
    cvmx_dprintf ("SPI%d: Waiting to see TsClk...\n", interface);
    count = 0;
    timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
    do {  /* Do we see the TsClk transitioning? */
        stat.u64 = cvmx_read_csr (CVMX_SPXX_CLK_STAT(interface));
        count = count + 1;
#ifdef DEBUG
        if ((count % 5000000) == 10) {
	    cvmx_dprintf ("SPI%d: CLK_STAT 0x%016llX\n"
                    "  s4 (%d,%d) d4 (%d,%d)\n",
                    interface, (unsigned long long)stat.u64,
                    stat.s.s4clk0, stat.s.s4clk1,
                    stat.s.d4clk0, stat.s.d4clk1);
        }
#endif
        if (cvmx_get_cycle() > timeout_time)
        {
            cvmx_dprintf ("SPI%d: Timeout\n", interface);
            return -1;
        }
    } while (stat.s.s4clk0 == 0 || stat.s.s4clk1 == 0);

    cvmx_dprintf ("SPI%d: Waiting to see RsClk...\n", interface);
    timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
    do {  /* Do we see the RsClk transitioning? */
        stat.u64 = cvmx_read_csr (CVMX_SPXX_CLK_STAT(interface));
        if (cvmx_get_cycle() > timeout_time)
        {
            cvmx_dprintf ("SPI%d: Timeout\n", interface);
            return -1;
        }
    } while (stat.s.d4clk0 == 0 || stat.s.d4clk1 == 0);


      // SRX0 & STX0 Inf0 Links are configured - begin training
      // -------------------------------------------------
    cvmx_write_csr (CVMX_SPXX_CLK_CTL(interface), 0x0000083f);
      // SRXDLCK        [ 0: 0] ( 1b) = 1
      // RCVTRN         [ 1: 1] ( 1b) = 1
      // DRPTRN         [ 2: 2] ( 1b) = 1    ...was 0
      // SNDTRN         [ 3: 3] ( 1b) = 1
      // STATRCV        [ 4: 4] ( 1b) = 1
      // STATDRV        [ 5: 5] ( 1b) = 1
      // RUNBIST        [ 6: 6] ( 1b) = 0
      // CLKDLY         [11: 7] ( 5b) = 10
      // SRXLCK         [12:12] ( 1b) = 0
      // STXLCK         [13:13] ( 1b) = 0
      // SEETRN         [14:14] ( 1b) = 0
    cvmx_wait (1000 * MS);

      // SRX0 clear the boot bit
      // -------------------------------------------------
    spxx_trn4_ctl.s.clr_boot = 1;
    cvmx_write_csr (CVMX_SPXX_TRN4_CTL(interface), spxx_trn4_ctl.u64);

      // Wait for the training sequence to complete
      // -------------------------------------------------
      // SPX0_CLK_STAT - SPX0_CLK_STAT[SRXTRN] should be 1 (bit8)
    cvmx_dprintf ("SPI%d: Waiting for training\n", interface);
    cvmx_wait (1000 * MS);
    timeout_time = cvmx_get_cycle() + 1000ull * MS * 600;  /* Wait a really long time here */
    do {
        stat.u64 = cvmx_read_csr (CVMX_SPXX_CLK_STAT(interface));
        if (cvmx_get_cycle() > timeout_time)
        {
            cvmx_dprintf ("SPI%d: Timeout\n", interface);
            return -1;
        }
    } while (stat.s.srxtrn == 0);

    if (mode & CVMX_SPI_MODE_RX_HALFPLEX) {
          // SRX0 interface should be good, send calendar data
          // -------------------------------------------------
        cvmx_dprintf ("SPI%d: Rx is synchronized, start sending calendar data\n", interface);
        srxx_com_ctl.u64 = 0;
        srxx_com_ctl.s.prts   = 9;
        srxx_com_ctl.s.inf_en = 1;
        srxx_com_ctl.s.st_en  = 1;
        cvmx_write_csr (CVMX_SRXX_COM_CTL(interface), srxx_com_ctl.u64);
    }

    if (mode & CVMX_SPI_MODE_TX_HALFPLEX) {
          // STX0 has achieved sync
          // The corespondant board should be sending calendar data
          // Enable the STX0 STAT receiver.
          // -------------------------------------------------
        stxx_com_ctl.u64 = 0;
        stxx_com_ctl.s.inf_en = 1;
        stxx_com_ctl.s.st_en = 1;
        cvmx_write_csr (CVMX_STXX_COM_CTL(interface), stxx_com_ctl.u64);

          // Waiting for calendar sync on STX0 STAT
          // -------------------------------------------------
          // SPX0_CLK_STAT - SPX0_CLK_STAT[STXCAL] should be 1 (bit10)
        cvmx_dprintf ("SPI%d: Waiting to sync on STX[%d] STAT\n", interface, interface);
        timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
        do {
            stat.u64 = cvmx_read_csr (CVMX_SPXX_CLK_STAT (interface));
            if (cvmx_get_cycle() > timeout_time)
            {
                cvmx_dprintf ("SPI%d: Timeout\n", interface);
                return -1;
            }
        } while (stat.s.stxcal == 0);
    }

      // Inf0 is synched
      // -------------------------------------------------
      // SPX0 is up
    if (mode & CVMX_SPI_MODE_RX_HALFPLEX) {
        srxx_com_ctl.s.inf_en = 1;
        cvmx_write_csr (CVMX_SRXX_COM_CTL(interface), srxx_com_ctl.u64);
        cvmx_dprintf ("SPI%d: Rx is now up\n", interface);
    }

    if (mode & CVMX_SPI_MODE_TX_HALFPLEX) {
        stxx_com_ctl.s.inf_en = 1;
        cvmx_write_csr (CVMX_STXX_COM_CTL(interface), stxx_com_ctl.u64);
        cvmx_dprintf ("SPI%d: Tx is now up\n", interface);
    }

    cvmx_write_csr (CVMX_GMXX_RXX_FRM_MIN (0,interface), 40);
    cvmx_write_csr (CVMX_GMXX_RXX_FRM_MAX (0,interface), 64*1024 - 4);
    cvmx_write_csr (CVMX_GMXX_RXX_JABBER  (0,interface), 64*1024 - 4);

    return 0;
}
/**
 * This routine restarts the SPI interface after it has lost synchronization
 * with its corespondant system.
 *
 * @param interface The identifier of the packet interface to configure and
 *                  use as a SPI interface.
 * @param mode      The operating mode for the SPI interface. The interface
 *                  can operate as a full duplex (both Tx and Rx data paths
 *                  active) or as a halfplex (either the Tx data path is
 *                  active or the Rx data path is active, but not both).
 * @param timeout   Timeout to wait for clock synchronization in seconds
 * @return Zero on success, negative of failure.
 */
int cvmx_spi_restart_interface(int interface, cvmx_spi_mode_t mode, int timeout)
{
    uint64_t                     timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
    cvmx_spxx_trn4_ctl_t         spxx_trn4_ctl;
    cvmx_spxx_clk_stat_t         stat;
    cvmx_stxx_com_ctl_t          stxx_com_ctl;
    cvmx_srxx_com_ctl_t          srxx_com_ctl;
    //cvmx_stxx_spi4_dat_t         stxx_spi4_dat;

    cvmx_dprintf ("SPI%d: Restart %s\n", interface, modes[mode]);

      // Reset the Spi4 deskew logic
      // -------------------------------------------------
    cvmx_write_csr (CVMX_SPXX_DBG_DESKEW_CTL(interface), 0x00200000);
    cvmx_wait (100 * MS);
      // Setup the CLKDLY right in the middle
      // -------------------------------------------------
    cvmx_write_csr (CVMX_SPXX_CLK_CTL(interface), 0x00000830);
    cvmx_wait (100 * MS);
      // Reset SRX0 DLL
      // -------------------------------------------------
    cvmx_write_csr (CVMX_SPXX_CLK_CTL(interface), 0x00000831);
    cvmx_wait (100 * MS);
      // Enable dynamic alignment
      // -------------------------------------------------
    spxx_trn4_ctl.u64 = 0;
    spxx_trn4_ctl.s.mux_en   = 1;
    spxx_trn4_ctl.s.macro_en = 1;
    spxx_trn4_ctl.s.maxdist  = 16;
    spxx_trn4_ctl.s.jitter   = 1;
    cvmx_write_csr (CVMX_SPXX_TRN4_CTL(interface), spxx_trn4_ctl.u64);
    cvmx_write_csr (CVMX_SPXX_DBG_DESKEW_CTL(interface), 0x0);

      /* Regardless of operating mode, both Tx and Rx clocks must be present
       * for the SPI interface to operate.
       */
    cvmx_dprintf ("SPI%d: Waiting to see TsClk...\n", interface);
    do {  /* Do we see the TsClk transitioning? */
        stat.u64 = cvmx_read_csr (CVMX_SPXX_CLK_STAT(interface));
        if (cvmx_get_cycle() > timeout_time)
        {
            cvmx_dprintf ("SPI%d: Timeout\n", interface);
            return -1;
        }
    } while (stat.s.s4clk0 == 0 || stat.s.s4clk1 == 0);

    cvmx_dprintf ("SPI%d: Waiting to see RsClk...\n", interface);
    do {  /* Do we see the RsClk transitioning? */
        stat.u64 = cvmx_read_csr (CVMX_SPXX_CLK_STAT(interface));
        if (cvmx_get_cycle() > timeout_time)
        {
            cvmx_dprintf ("SPI%d: Timeout\n", interface);
            return -1;
        }
    } while (stat.s.d4clk0 == 0 || stat.s.d4clk1 == 0);

      // SRX0 & STX0 Inf0 Links are configured - begin training
      // -------------------------------------------------
    cvmx_write_csr (CVMX_SPXX_CLK_CTL(interface), 0x0000083f);
    cvmx_wait (1000 * MS);
      // SRX0 clear the boot bit
      // -------------------------------------------------
    spxx_trn4_ctl.s.clr_boot = 1;
    cvmx_write_csr (CVMX_SPXX_TRN4_CTL(interface), spxx_trn4_ctl.u64);

      // Wait for the training sequence to complete
      // -------------------------------------------------
      // SPX0_CLK_STAT - SPX0_CLK_STAT[SRXTRN] should be 1 (bit8)
    cvmx_dprintf ("SPI%d: Waiting for training\n", interface);
    cvmx_wait (1000 * MS);
    do {
        stat.u64 = cvmx_read_csr (CVMX_SPXX_CLK_STAT(interface));
        if (cvmx_get_cycle() > timeout_time)
        {
            cvmx_dprintf ("SPI%d: Timeout\n", interface);
            return -1;
        }
    } while (stat.s.srxtrn == 0);

    if (mode & CVMX_SPI_MODE_RX_HALFPLEX) {
          // SRX0 interface should be good, send calendar data
          // -------------------------------------------------
        cvmx_dprintf ("SPI%d: Rx is synchronized, start sending calendar data\n", interface);
        srxx_com_ctl.u64 = 0;
        srxx_com_ctl.s.prts   = 9;
        srxx_com_ctl.s.inf_en = 1;
        srxx_com_ctl.s.st_en  = 1;
        cvmx_write_csr (CVMX_SRXX_COM_CTL(interface), srxx_com_ctl.u64);
    }

    if (mode & CVMX_SPI_MODE_TX_HALFPLEX) {
          // STX0 has achieved sync
          // The corespondant board should be sending calendar data
          // Enable the STX0 STAT receiver.
          // -------------------------------------------------
        stxx_com_ctl.u64 = 0;
        stxx_com_ctl.s.inf_en = 1;
        stxx_com_ctl.s.st_en = 1;
        cvmx_write_csr (CVMX_STXX_COM_CTL(interface), stxx_com_ctl.u64);

          // Waiting for calendar sync on STX0 STAT
          // -------------------------------------------------
          // SPX0_CLK_STAT - SPX0_CLK_STAT[STXCAL] should be 1 (bit10)
        cvmx_dprintf ("SPI%d: Waiting to sync on STX[%d] STAT\n", interface, interface);
        do {
            stat.u64 = cvmx_read_csr (CVMX_SPXX_CLK_STAT (interface));
            if (cvmx_get_cycle() > timeout_time)
            {
                cvmx_dprintf ("SPI%d: Timeout\n", interface);
                return -1;
            }
        } while (stat.s.stxcal == 0);
    }

      // Inf0 is synched
      // -------------------------------------------------
      // SPX0 is up
    if (mode & CVMX_SPI_MODE_RX_HALFPLEX) {
        srxx_com_ctl.s.inf_en = 1;
        cvmx_write_csr (CVMX_SRXX_COM_CTL(interface), srxx_com_ctl.u64);
        cvmx_dprintf ("SPI%d: Rx is now up\n", interface);
    }

    if (mode & CVMX_SPI_MODE_TX_HALFPLEX) {
        stxx_com_ctl.s.inf_en = 1;
        cvmx_write_csr (CVMX_STXX_COM_CTL(interface), stxx_com_ctl.u64);
        cvmx_dprintf ("SPI%d: Tx is now up\n", interface);
    }

    return 0;
}