Exemplo n.º 1
0
void octeon_set_baud(uint32_t uart, uint32_t baud)
{
    uint16_t divisor;
    uint8_t uart_index = GET_UART_INDEX(uart);
    uint8_t node = GET_UART_NODE(uart);
    cvmx_uart_lcr_t lcrval;

    divisor = compute_divisor(octeon_get_ioclk_hz(), baud);

    lcrval.u64 = cvmx_read_csr_node(node, CVMX_MIO_UARTX_LCR(uart_index));

    cvmx_wait((2 * divisor * 16) + 10000);

    lcrval.s.dlab = 1;	/* temporary to program the divisor */
    cvmx_write_csr_node(node, CVMX_MIO_UARTX_LCR(uart_index), lcrval.u64);

    cvmx_write_csr_node(node, CVMX_MIO_UARTX_DLL(uart_index), divisor & 0xff);
    cvmx_write_csr_node(node, CVMX_MIO_UARTX_DLH(uart_index), (divisor >> 8) & 0xff);
    /* divisor is programmed now, set this back to normal */
    lcrval.s.dlab = 0;
    cvmx_write_csr_node(node, CVMX_MIO_UARTX_LCR(uart_index), lcrval.u64);

#ifndef CONFIG_OCTEON_SIM_SPEED
    /* spec says need to wait after you program the divisor */
    cvmx_wait((2 * divisor * 16) + 10000);
    /* Wait a little longer..... */
    mdelay(5);
#endif
}
Exemplo n.º 2
0
/**
 * Callback to perform link training
 *
 * @interface: The identifier of the packet interface to configure and
 *                  use as a SPI interface.
 * @mode:      The operating mode for the SPI interface. The interface
 *                  can operate as a full duplex (both Tx and Rx data paths
 *                  active) or as a halfplex (either the Tx data path is
 *                  active or the Rx data path is active, but not both).
 * @timeout:   Timeout to wait for link to be trained (in seconds)
 *
 * Returns Zero on success, non-zero error code on failure (will cause
 * SPI initialization to abort)
 */
int cvmx_spi_training_cb(int interface, cvmx_spi_mode_t mode, int timeout)
{
	union cvmx_spxx_trn4_ctl spxx_trn4_ctl;
	union cvmx_spxx_clk_stat stat;
	uint64_t MS = cvmx_sysinfo_get()->cpu_clock_hz / 1000;
	uint64_t timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
	int rx_training_needed;

	/* SRX0 & STX0 Inf0 Links are configured - begin training */
	union cvmx_spxx_clk_ctl spxx_clk_ctl;
	spxx_clk_ctl.u64 = 0;
	spxx_clk_ctl.s.seetrn = 0;
	spxx_clk_ctl.s.clkdly = 0x10;
	spxx_clk_ctl.s.runbist = 0;
	spxx_clk_ctl.s.statdrv = 0;
	/* This should always be on the opposite edge as statdrv */
	spxx_clk_ctl.s.statrcv = 1;
	spxx_clk_ctl.s.sndtrn = 1;
	spxx_clk_ctl.s.drptrn = 1;
	spxx_clk_ctl.s.rcvtrn = 1;
	spxx_clk_ctl.s.srxdlck = 1;
	cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64);
	cvmx_wait(1000 * MS);

	/* SRX0 clear the boot bit */
	spxx_trn4_ctl.u64 = cvmx_read_csr(CVMX_SPXX_TRN4_CTL(interface));
	spxx_trn4_ctl.s.clr_boot = 1;
	cvmx_write_csr(CVMX_SPXX_TRN4_CTL(interface), spxx_trn4_ctl.u64);

	/* Wait for the training sequence to complete */
	cvmx_dprintf("SPI%d: Waiting for training\n", interface);
	cvmx_wait(1000 * MS);
	/* Wait a really long time here */
	timeout_time = cvmx_get_cycle() + 1000ull * MS * 600;
	/*
	 * The HRM says we must wait for 34 + 16 * MAXDIST training sequences.
	 * We'll be pessimistic and wait for a lot more.
	 */
	rx_training_needed = 500;
	do {
		stat.u64 = cvmx_read_csr(CVMX_SPXX_CLK_STAT(interface));
		if (stat.s.srxtrn && rx_training_needed) {
			rx_training_needed--;
			cvmx_write_csr(CVMX_SPXX_CLK_STAT(interface), stat.u64);
			stat.s.srxtrn = 0;
		}
		if (cvmx_get_cycle() > timeout_time) {
			cvmx_dprintf("SPI%d: Timeout\n", interface);
			return -1;
		}
	} while (stat.s.srxtrn == 0);

	return 0;
}
int cvmx_spi_training_cb(int interface, cvmx_spi_mode_t mode, int timeout)
{
    union cvmx_spxx_trn4_ctl spxx_trn4_ctl;
    union cvmx_spxx_clk_stat stat;
    uint64_t MS = cvmx_sysinfo_get()->cpu_clock_hz / 1000;
    uint64_t timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
    int rx_training_needed;


    union cvmx_spxx_clk_ctl spxx_clk_ctl;
    spxx_clk_ctl.u64 = 0;
    spxx_clk_ctl.s.seetrn = 0;
    spxx_clk_ctl.s.clkdly = 0x10;
    spxx_clk_ctl.s.runbist = 0;
    spxx_clk_ctl.s.statdrv = 0;

    spxx_clk_ctl.s.statrcv = 1;
    spxx_clk_ctl.s.sndtrn = 1;
    spxx_clk_ctl.s.drptrn = 1;
    spxx_clk_ctl.s.rcvtrn = 1;
    spxx_clk_ctl.s.srxdlck = 1;
    cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64);
    cvmx_wait(1000 * MS);


    spxx_trn4_ctl.u64 = cvmx_read_csr(CVMX_SPXX_TRN4_CTL(interface));
    spxx_trn4_ctl.s.clr_boot = 1;
    cvmx_write_csr(CVMX_SPXX_TRN4_CTL(interface), spxx_trn4_ctl.u64);


    cvmx_dprintf("SPI%d: Waiting for training\n", interface);
    cvmx_wait(1000 * MS);

    timeout_time = cvmx_get_cycle() + 1000ull * MS * 600;
    rx_training_needed = 500;
    do {
        stat.u64 = cvmx_read_csr(CVMX_SPXX_CLK_STAT(interface));
        if (stat.s.srxtrn && rx_training_needed) {
            rx_training_needed--;
            cvmx_write_csr(CVMX_SPXX_CLK_STAT(interface), stat.u64);
            stat.s.srxtrn = 0;
        }
        if (cvmx_get_cycle() > timeout_time) {
            cvmx_dprintf("SPI%d: Timeout\n", interface);
            return -1;
        }
    } while (stat.s.srxtrn == 0);

    return 0;
}
Exemplo n.º 4
0
void cvmx_ixf18201_write16(uint16_t reg_addr, uint16_t data)
{
    cvmx_write64_uint16(IXF_ADDR_16, reg_addr);
    cvmx_write64_uint16(IXF_WR_DATA_16, data);
    cvmx_write64_uint8(IXF_TRANS_TYPE, 0);
    cvmx_wait(800000);
}
Exemplo n.º 5
0
/**
 * version of printf that works better in exception context.
 *
 * @param format
 */
static void safe_printf(const char *format, ...)
{
    static char buffer[256];
    va_list args;
    va_start(args, format);
    int count = vsnprintf(buffer, sizeof(buffer), format, args);
    va_end(args);

    char *ptr = buffer;
    while (count-- > 0)
    {
        cvmx_uart_lsr_t lsrval;

        /* Spin until there is room */
        do
        {
            lsrval.u64 = cvmx_read_csr(CVMX_MIO_UARTX_LSR(0));
            if (lsrval.s.temt == 0)
                cvmx_wait(10000);   /* Just to reduce the load on the system */
        }
        while (lsrval.s.temt == 0);

        if (*ptr == '\n')
            cvmx_write_csr(CVMX_MIO_UARTX_THR(0), '\r');
        cvmx_write_csr(CVMX_MIO_UARTX_THR(0), *ptr++);
    }
}
Exemplo n.º 6
0
int inic_data_local_init(void)
{
		core_id = 0;

		CVM_COMMON_DBG_MSG(CVM_COMMON_DBG_LVL_INFO, "inic_data_local_init\n");

		if ( (cvmx_helper_initialize_packet_io_local()) == -1)
		{
				printf("inic_data_local_init : Failed to initialize/setup input ports\n");
				return (-1);
		}

		core_id = cvmx_get_core_num();
		cvmx_wait(core_id * 500);  /* this is only for pass 1 */
		/*cvm_common_rand8_init();*/

		cvm_ip_local_init();
#ifdef INET6
		cvm_ip6_local_init();
#endif
		cvm_tcp_local_init();
		cvm_so_stack_socket_local_init();
		cvm_udp_local_init();
		cvm_raw_local_init();

		return 0;
}
Exemplo n.º 7
0
static int octeon_mdiobus_read(struct mii_bus *bus, int phy_id, int regnum)
{
	struct octeon_mdiobus *p = bus->priv;
	union cvmx_smix_cmd smi_cmd;
	union cvmx_smix_rd_dat smi_rd;
	int timeout = 1000;

	smi_cmd.u64 = 0;
	smi_cmd.s.phy_op = 1; /* MDIO_CLAUSE_22_READ */
	smi_cmd.s.phy_adr = phy_id;
	smi_cmd.s.reg_adr = regnum;
	cvmx_write_csr(CVMX_SMIX_CMD(p->unit), smi_cmd.u64);

	do {
		/*
		 * Wait 1000 clocks so we don't saturate the RSL bus
		 * doing reads.
		 */
		cvmx_wait(1000);
		smi_rd.u64 = cvmx_read_csr(CVMX_SMIX_RD_DAT(p->unit));
	} while (smi_rd.s.pending && --timeout);

	if (smi_rd.s.val)
		return smi_rd.s.dat;
	else
		return -EIO;
}
Exemplo n.º 8
0
static int octeon_mdiobus_write(struct mii_bus *bus, int phy_id,
				int regnum, u16 val)
{
	struct octeon_mdiobus *p = bus->priv;
	union cvmx_smix_cmd smi_cmd;
	union cvmx_smix_wr_dat smi_wr;
	int timeout = 1000;

	smi_wr.u64 = 0;
	smi_wr.s.dat = val;
	cvmx_write_csr(CVMX_SMIX_WR_DAT(p->unit), smi_wr.u64);

	smi_cmd.u64 = 0;
	smi_cmd.s.phy_op = 0; /* MDIO_CLAUSE_22_WRITE */
	smi_cmd.s.phy_adr = phy_id;
	smi_cmd.s.reg_adr = regnum;
	cvmx_write_csr(CVMX_SMIX_CMD(p->unit), smi_cmd.u64);

	do {
		/*
		 * Wait 1000 clocks so we don't saturate the RSL bus
		 * doing reads.
		 */
		cvmx_wait(1000);
		smi_wr.u64 = cvmx_read_csr(CVMX_SMIX_WR_DAT(p->unit));
	} while (smi_wr.s.pending && --timeout);

	if (timeout <= 0)
		return -EIO;

	return 0;
}
Exemplo n.º 9
0
/* Reset the hardware to clean state.  */
static void octeon_mgmt_reset_hw(struct octeon_mgmt *p)
{
	union cvmx_mixx_ctl mix_ctl;
	union cvmx_mixx_bist mix_bist;
	union cvmx_agl_gmx_bist agl_gmx_bist;

	mix_ctl.u64 = 0;
	cvmx_write_csr(CVMX_MIXX_CTL(p->port), mix_ctl.u64);
	do {
		mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(p->port));
	} while (mix_ctl.s.busy);
	mix_ctl.s.reset = 1;
	cvmx_write_csr(CVMX_MIXX_CTL(p->port), mix_ctl.u64);
	cvmx_read_csr(CVMX_MIXX_CTL(p->port));
	cvmx_wait(64);

	mix_bist.u64 = cvmx_read_csr(CVMX_MIXX_BIST(p->port));
	if (mix_bist.u64)
		dev_warn(p->dev, "MIX failed BIST (0x%016llx)\n",
			(unsigned long long)mix_bist.u64);

	agl_gmx_bist.u64 = cvmx_read_csr(CVMX_AGL_GMX_BIST);
	if (agl_gmx_bist.u64)
		dev_warn(p->dev, "AGL failed BIST (0x%016llx)\n",
			 (unsigned long long)agl_gmx_bist.u64);
}
Exemplo n.º 10
0
/**
 * version of printf that works better in exception context.
 *
 * @param format
 *
 * XXX If this function weren't in cvmx-interrupt.c, we'd use the SDK version.
 */
void cvmx_safe_printf(const char *format, ...)
{
    char buffer[256];
    char *ptr = buffer;
    int count;
    va_list args;

    va_start(args, format);
#ifndef __U_BOOT__
    count = vsnprintf(buffer, sizeof(buffer), format, args);
#else
    count = vsprintf(buffer, format, args);
#endif
    va_end(args);

    while (count-- > 0)
    {
        cvmx_uart_lsr_t lsrval;

        /* Spin until there is room */
        do
        {
            lsrval.u64 = cvmx_read_csr(CVMX_MIO_UARTX_LSR(0));
#if !defined(CONFIG_OCTEON_SIM_SPEED)
            if (lsrval.s.temt == 0)
                cvmx_wait(10000);   /* Just to reduce the load on the system */
#endif
        }
        while (lsrval.s.temt == 0);

        if (*ptr == '\n')
            cvmx_write_csr(CVMX_MIO_UARTX_THR(0), '\r');
        cvmx_write_csr(CVMX_MIO_UARTX_THR(0), *ptr++);
    }
}
Exemplo n.º 11
0
uint16_t cvmx_ixf18201_read16(uint16_t reg_addr)
{
    cvmx_write64_uint16(IXF_ADDR_16, reg_addr);
    cvmx_write64_uint8(IXF_TRANS_TYPE, 1);  // Do read
    cvmx_wait(800000);

    /* Read result */
    return(cvmx_read64_uint16(IXF_RD_DATA_16));
}
Exemplo n.º 12
0
/**
 * Wait for the TX buffer to be empty
 *
 * @param uart_index Uart to check
 */
void uart_wait_idle( int uart_index )
{
	cvmx_uart_lsr_t lsrval;

	// Spin until there is room
	do
	{
		lsrval.u64 = cvmx_read_csr( CVMX_MIO_UARTX_LSR( uart_index ) );
		if ( lsrval.s.temt == 0 )
		{
			cvmx_wait( 10000 );   // Just to reduce the load on the system
		}
	}
	while ( lsrval.s.temt == 0 );
}
Exemplo n.º 13
0
/**
 * Shutdown the RAID block. RAID must be idle when
 * this function is called.
 *
 * @return Zero on success, negative on failure
 */
int cvmx_raid_shutdown(void)
{
    cvmx_rad_reg_ctl_t rad_reg_ctl;

    if (cvmx_cmd_queue_length(CVMX_CMD_QUEUE_RAID)) {
        cvmx_dprintf("ERROR: cvmx_raid_shutdown: RAID not idle.\n");
        return -1;
    }

    rad_reg_ctl.u64 = cvmx_read_csr(CVMX_RAD_REG_CTL);
    rad_reg_ctl.s.reset = 1;
    cvmx_write_csr(CVMX_RAD_REG_CTL, rad_reg_ctl.u64);
    cvmx_wait(100);

    cvmx_cmd_queue_shutdown(CVMX_CMD_QUEUE_RAID);
    cvmx_write_csr(CVMX_RAD_REG_CMD_BUF, 0);
    return 0;
}
Exemplo n.º 14
0
/**
 * Shutdown the ZIP block for a queue. ZIP must be idle when
 * this function is called.
 *
 * @param queue   Zip instruction queue of the command
 *
 * @return Zero on success, negative on failure
 */
int cvmx_zip_queue_shutdown(int queue)
{
    cvmx_zip_cmd_ctl_t zip_cmd_ctl;

    if (cvmx_cmd_queue_length(CVMX_CMD_QUEUE_ZIP_QUE(queue)))
    {
        cvmx_dprintf("ERROR: cvmx_zip_shutdown: ZIP not idle.\n");
        return -1;
    }

    zip_cmd_ctl.u64 = cvmx_read_csr(CVMX_ZIP_CMD_CTL);
    zip_cmd_ctl.s.reset = 1;
    cvmx_write_csr(CVMX_ZIP_CMD_CTL, zip_cmd_ctl.u64);
    cvmx_wait(100);

    cvmx_cmd_queue_shutdown(CVMX_CMD_QUEUE_ZIP_QUE(queue));
    return 0;
}
Exemplo n.º 15
0
/**
 * Perform an MII write. Called by the generic MII routines
 *
 * @param dev      Device to perform write for
 * @param phy_id   The MII phy id
 * @param location Register location to write
 * @param val      Value to write
 */
void cvm_oct_mdio_write(struct ifnet *ifp, int phy_id, int location, int val)
{
	cvmx_smi_cmd_t          smi_cmd;
	cvmx_smi_wr_dat_t       smi_wr;

	MDIO_LOCK();
	smi_wr.u64 = 0;
	smi_wr.s.dat = val;
	cvmx_write_csr(CVMX_SMI_WR_DAT, smi_wr.u64);

	smi_cmd.u64 = 0;
	smi_cmd.s.phy_op = 0;
	smi_cmd.s.phy_adr = phy_id;
	smi_cmd.s.reg_adr = location;
	cvmx_write_csr(CVMX_SMI_CMD, smi_cmd.u64);

	do {
		cvmx_wait(1000);
		smi_wr.u64 = cvmx_read_csr(CVMX_SMI_WR_DAT);
	} while (smi_wr.s.pending);
	MDIO_UNLOCK();
}
Exemplo n.º 16
0
/**
 * Perform an MII read. Called by the generic MII routines
 *
 * @param dev      Device to perform read for
 * @param phy_id   The MII phy id
 * @param location Register location to read
 * @return Result from the read or zero on failure
 */
int cvm_oct_mdio_read(struct ifnet *ifp, int phy_id, int location)
{
	cvmx_smi_cmd_t          smi_cmd;
	cvmx_smi_rd_dat_t       smi_rd;

	MDIO_LOCK();
	smi_cmd.u64 = 0;
	smi_cmd.s.phy_op = 1;
	smi_cmd.s.phy_adr = phy_id;
	smi_cmd.s.reg_adr = location;
	cvmx_write_csr(CVMX_SMI_CMD, smi_cmd.u64);

	do {
		cvmx_wait(1000);
		smi_rd.u64 = cvmx_read_csr(CVMX_SMI_RD_DAT);
	} while (smi_rd.s.pending);

	MDIO_UNLOCK();

	if (smi_rd.s.val)
		return smi_rd.s.dat;
	else
		return 0;
}
Exemplo n.º 17
0
/**
 * Put a single byte to uart port.
 *
 * @param uart_index Uart to write to (0 or 1)
 * @param ch         Byte to write
 */
inline void uart_write_byte( int uart_index, uint8_t ch )
{
	if ( !pci_console )
	{
		cvmx_uart_lsr_t lsrval;

		// Spin until there is room
		do
		{
			lsrval.u64 = cvmx_read_csr( CVMX_MIO_UARTX_LSR( uart_index ) );
			if ( lsrval.s.thre == 0 )
			{
				cvmx_wait( 10000 );   /* Just to reduce the load on the system */
			}
		}
		while ( lsrval.s.thre == 0 );

		// Write the byte
		cvmx_write_csr( CVMX_MIO_UARTX_THR( uart_index ), ch );
		return;
	}
	else
	{
		char r = '\r';
		if ( pci_cons_desc_addr )
		{
    	    if (ch == '\n')
				octeon_pci_console_write(pci_cons_desc_addr, 0,  &r, 1, OCT_PCI_CON_FLAG_NONBLOCK);
			octeon_pci_console_write(pci_cons_desc_addr, 0,  (char *)&ch, 1, OCT_PCI_CON_FLAG_NONBLOCK);
		}
		else
		{
			printf( "pci_console read error\n" );
		}
	}
}
Exemplo n.º 18
0
/**
 * Initialize a USB port for use. This must be called before any
 * other access to the Octeon USB port is made. The port starts
 * off in the disabled state.
 *
 * @param usb    Pointer to an empty cvmx_usbd_state_t structure
 *               that will be populated by the initialize call.
 *               This structure is then passed to all other USB
 *               functions.
 * @param usb_port_number
 *               Which Octeon USB port to initialize.
 * @param flags  Flags to control hardware initialization. See
 *               cvmx_usbd_initialize_flags_t for the flag
 *               definitions. Some flags are mandatory.
 *
 * @return Zero or a negative on error.
 */
int cvmx_usbd_initialize(cvmx_usbd_state_t *usb,
                                      int usb_port_number,
                                      cvmx_usbd_initialize_flags_t flags)
{
    cvmx_usbnx_clk_ctl_t usbn_clk_ctl;
    cvmx_usbnx_usbp_ctl_status_t usbn_usbp_ctl_status;

    if (cvmx_unlikely(flags & CVMX_USBD_INITIALIZE_FLAGS_DEBUG))
        cvmx_dprintf("%s: Called\n", __FUNCTION__);

    memset(usb, 0, sizeof(*usb));
    usb->init_flags = flags;
    usb->index = usb_port_number;

    /* Try to determine clock type automatically */
    if ((usb->init_flags & (CVMX_USBD_INITIALIZE_FLAGS_CLOCK_XO_XI |
                  CVMX_USBD_INITIALIZE_FLAGS_CLOCK_XO_GND)) == 0)
    {
        if (__cvmx_helper_board_usb_get_clock_type() == USB_CLOCK_TYPE_CRYSTAL_12)
            usb->init_flags |= CVMX_USBD_INITIALIZE_FLAGS_CLOCK_XO_XI;  /* Only 12 MHZ crystals are supported */
        else
            usb->init_flags |= CVMX_USBD_INITIALIZE_FLAGS_CLOCK_XO_GND;
    }

    if (usb->init_flags & CVMX_USBD_INITIALIZE_FLAGS_CLOCK_XO_GND)
    {
        /* Check for auto ref clock frequency */
        if (!(usb->init_flags & CVMX_USBD_INITIALIZE_FLAGS_CLOCK_MHZ_MASK))
            switch (__cvmx_helper_board_usb_get_clock_type())
            {
                case USB_CLOCK_TYPE_REF_12:
                    usb->init_flags |= CVMX_USBD_INITIALIZE_FLAGS_CLOCK_12MHZ;
                    break;
                case USB_CLOCK_TYPE_REF_24:
                    usb->init_flags |= CVMX_USBD_INITIALIZE_FLAGS_CLOCK_24MHZ;
                    break;
                case USB_CLOCK_TYPE_REF_48:
                default:
                    usb->init_flags |= CVMX_USBD_INITIALIZE_FLAGS_CLOCK_48MHZ;
                    break;
            }
    }

    /* Power On Reset and PHY Initialization */

    /* 1. Wait for DCOK to assert (nothing to do) */
    /* 2a. Write USBN0/1_CLK_CTL[POR] = 1 and
        USBN0/1_CLK_CTL[HRST,PRST,HCLK_RST] = 0 */
    usbn_clk_ctl.u64 = cvmx_read_csr(CVMX_USBNX_CLK_CTL(usb->index));
    usbn_clk_ctl.s.por = 1;
    usbn_clk_ctl.s.hrst = 0;
    usbn_clk_ctl.s.prst = 0;
    usbn_clk_ctl.s.hclk_rst = 0;
    usbn_clk_ctl.s.enable = 0;
    /* 2b. Select the USB reference clock/crystal parameters by writing
        appropriate values to USBN0/1_CLK_CTL[P_C_SEL, P_RTYPE, P_COM_ON] */
    if (usb->init_flags & CVMX_USBD_INITIALIZE_FLAGS_CLOCK_XO_GND)
    {
        /* The USB port uses 12/24/48MHz 2.5V board clock
            source at USB_XO. USB_XI should be tied to GND.
            Most Octeon evaluation boards require this setting */
        if (OCTEON_IS_MODEL(OCTEON_CN3XXX))
        {
            usbn_clk_ctl.cn31xx.p_rclk  = 1; /* From CN31XX,CN30XX manual */
            usbn_clk_ctl.cn31xx.p_xenbn = 0;
        }
        else if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN50XX))
            usbn_clk_ctl.cn56xx.p_rtype = 2; /* From CN56XX,CN50XX manual */
        else
            usbn_clk_ctl.cn52xx.p_rtype = 1; /* From CN52XX manual */

        switch (usb->init_flags & CVMX_USBD_INITIALIZE_FLAGS_CLOCK_MHZ_MASK)
        {
            case CVMX_USBD_INITIALIZE_FLAGS_CLOCK_12MHZ:
                usbn_clk_ctl.s.p_c_sel = 0;
                break;
            case CVMX_USBD_INITIALIZE_FLAGS_CLOCK_24MHZ:
                usbn_clk_ctl.s.p_c_sel = 1;
                break;
            case CVMX_USBD_INITIALIZE_FLAGS_CLOCK_48MHZ:
                usbn_clk_ctl.s.p_c_sel = 2;
                break;
        }
    }
    else
    {
        /* The USB port uses a 12MHz crystal as clock source
            at USB_XO and USB_XI */
        if (OCTEON_IS_MODEL(OCTEON_CN3XXX))
        {
            usbn_clk_ctl.cn31xx.p_rclk  = 1; /* From CN31XX,CN30XX manual */
            usbn_clk_ctl.cn31xx.p_xenbn = 1;
        }
        else if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN50XX))
            usbn_clk_ctl.cn56xx.p_rtype = 0; /* From CN56XX,CN50XX manual */
        else
            usbn_clk_ctl.cn52xx.p_rtype = 0; /* From CN52XX manual */

        usbn_clk_ctl.s.p_c_sel = 0;
    }
    /* 2c. Select the HCLK via writing USBN0/1_CLK_CTL[DIVIDE, DIVIDE2] and
        setting USBN0/1_CLK_CTL[ENABLE] = 1.  Divide the core clock down such
        that USB is as close as possible to 125Mhz */
    {
        int divisor = (cvmx_clock_get_rate(CVMX_CLOCK_CORE)+125000000-1)/125000000;
        if (divisor < 4)  /* Lower than 4 doesn't seem to work properly */
            divisor = 4;
        usbn_clk_ctl.s.divide = divisor;
        usbn_clk_ctl.s.divide2 = 0;
    }
    cvmx_write_csr(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
    /* 2d. Write USBN0/1_CLK_CTL[HCLK_RST] = 1 */
    usbn_clk_ctl.s.hclk_rst = 1;
    cvmx_write_csr(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
    /* 2e.  Wait 64 core-clock cycles for HCLK to stabilize */
    cvmx_wait(64);
    /* 3. Program the power-on reset field in the USBN clock-control register:
        USBN_CLK_CTL[POR] = 0 */
    usbn_clk_ctl.s.por = 0;
    cvmx_write_csr(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
    /* 4. Wait 1 ms for PHY clock to start */
    cvmx_wait_usec(1000);
    /* 5. Program the Reset input from automatic test equipment field in the
        USBP control and status register: USBN_USBP_CTL_STATUS[ATE_RESET] = 1 */
    usbn_usbp_ctl_status.u64 = cvmx_read_csr(CVMX_USBNX_USBP_CTL_STATUS(usb->index));
    usbn_usbp_ctl_status.s.ate_reset = 1;
    cvmx_write_csr(CVMX_USBNX_USBP_CTL_STATUS(usb->index), usbn_usbp_ctl_status.u64);
    /* 6. Wait 10 cycles */
    cvmx_wait(10);
    /* 7. Clear ATE_RESET field in the USBN clock-control register:
        USBN_USBP_CTL_STATUS[ATE_RESET] = 0 */
    usbn_usbp_ctl_status.s.ate_reset = 0;
    cvmx_write_csr(CVMX_USBNX_USBP_CTL_STATUS(usb->index), usbn_usbp_ctl_status.u64);
    /* 8. Program the PHY reset field in the USBN clock-control register:
        USBN_CLK_CTL[PRST] = 1 */
    usbn_clk_ctl.s.prst = 1;
    cvmx_write_csr(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
    /* 9. Program the USBP control and status register to select host or
        device mode. USBN_USBP_CTL_STATUS[HST_MODE] = 0 for host, = 1 for
        device */
    usbn_usbp_ctl_status.s.hst_mode = 1;
    usbn_usbp_ctl_status.s.dm_pulld = 0;
    usbn_usbp_ctl_status.s.dp_pulld = 0;
    cvmx_write_csr(CVMX_USBNX_USBP_CTL_STATUS(usb->index), usbn_usbp_ctl_status.u64);
    /* 10. Wait 1 µs */
    cvmx_wait_usec(1);
    /* 11. Program the hreset_n field in the USBN clock-control register:
        USBN_CLK_CTL[HRST] = 1 */
    usbn_clk_ctl.s.hrst = 1;
    cvmx_write_csr(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
    /* 12. Proceed to USB core initialization */
    usbn_clk_ctl.s.enable = 1;
    cvmx_write_csr(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
    cvmx_wait_usec(1);

    /* Program the following fields in the global AHB configuration
        register (USBC_GAHBCFG)
        DMA mode, USBC_GAHBCFG[DMAEn]: 1 = DMA mode, 0 = slave mode
        Burst length, USBC_GAHBCFG[HBSTLEN] = 0
        Nonperiodic TxFIFO empty level (slave mode only),
        USBC_GAHBCFG[NPTXFEMPLVL]
        Periodic TxFIFO empty level (slave mode only),
        USBC_GAHBCFG[PTXFEMPLVL]
        Global interrupt mask, USBC_GAHBCFG[GLBLINTRMSK] = 1 */
    {
        cvmx_usbcx_gahbcfg_t usbcx_gahbcfg;
        usbcx_gahbcfg.u32 = 0;
        usbcx_gahbcfg.s.dmaen = 1;
        usbcx_gahbcfg.s.hbstlen = 0;
        usbcx_gahbcfg.s.nptxfemplvl = 1;
        usbcx_gahbcfg.s.ptxfemplvl = 1;
        usbcx_gahbcfg.s.glblintrmsk = 1;
        __cvmx_usbd_write_csr32(usb, CVMX_USBCX_GAHBCFG(usb->index), usbcx_gahbcfg.u32);
    }

    /* Program the following fields in USBC_GUSBCFG register.
        HS/FS timeout calibration, USBC_GUSBCFG[TOUTCAL] = 0
        ULPI DDR select, USBC_GUSBCFG[DDRSEL] = 0
        USB turnaround time, USBC_GUSBCFG[USBTRDTIM] = 0x5
        PHY low-power clock select, USBC_GUSBCFG[PHYLPWRCLKSEL] = 0 */
    {
        cvmx_usbcx_gusbcfg_t usbcx_gusbcfg;
        usbcx_gusbcfg.u32 = __cvmx_usbd_read_csr32(usb, CVMX_USBCX_GUSBCFG(usb->index));
        usbcx_gusbcfg.s.toutcal = 0;
        usbcx_gusbcfg.s.ddrsel = 0;
        usbcx_gusbcfg.s.usbtrdtim = 0x5;
        usbcx_gusbcfg.s.phylpwrclksel = 0;
        __cvmx_usbd_write_csr32(usb, CVMX_USBCX_GUSBCFG(usb->index), usbcx_gusbcfg.u32);
    }

    /* Program the following fields in the USBC0/1_DCFG register:
        Device speed, USBC0/1_DCFG[DEVSPD] = 0 (high speed)
        Non-zero-length status OUT handshake, USBC0/1_DCFG[NZSTSOUTHSHK]=0
        Periodic frame interval (if periodic endpoints are supported),
        USBC0/1_DCFG[PERFRINT] = 1 */
    {
        cvmx_usbcx_dcfg_t usbcx_dcfg;
        usbcx_dcfg.u32 = __cvmx_usbd_read_csr32(usb, CVMX_USBCX_DCFG(usb->index));
        usbcx_dcfg.s.devspd = 0;
        usbcx_dcfg.s.nzstsouthshk = 0;
        usbcx_dcfg.s.perfrint = 1;
        __cvmx_usbd_write_csr32(usb, CVMX_USBCX_DCFG(usb->index), usbcx_dcfg.u32);
    }

    /* Program the USBC0/1_GINTMSK register */
    {
        cvmx_usbcx_gintmsk_t usbcx_gintmsk;
        usbcx_gintmsk.u32 = __cvmx_usbd_read_csr32(usb, CVMX_USBCX_GINTMSK(usb->index));
        usbcx_gintmsk.s.oepintmsk = 1;
        usbcx_gintmsk.s.inepintmsk = 1;
        usbcx_gintmsk.s.enumdonemsk = 1;
        usbcx_gintmsk.s.usbrstmsk = 1;
        usbcx_gintmsk.s.usbsuspmsk = 1;
        __cvmx_usbd_write_csr32(usb, CVMX_USBCX_GINTMSK(usb->index), usbcx_gintmsk.u32);
    }

    cvmx_usbd_disable(usb);
    return 0;
}
Exemplo n.º 19
0
/**
 * Callback to perform SPI4 reset
 *
 * @interface: The identifier of the packet interface to configure and
 *                  use as a SPI interface.
 * @mode:      The operating mode for the SPI interface. The interface
 *                  can operate as a full duplex (both Tx and Rx data paths
 *                  active) or as a halfplex (either the Tx data path is
 *                  active or the Rx data path is active, but not both).
 *
 * Returns Zero on success, non-zero error code on failure (will cause
 * SPI initialization to abort)
 */
int cvmx_spi_reset_cb(int interface, cvmx_spi_mode_t mode)
{
	union cvmx_spxx_dbg_deskew_ctl spxx_dbg_deskew_ctl;
	union cvmx_spxx_clk_ctl spxx_clk_ctl;
	union cvmx_spxx_bist_stat spxx_bist_stat;
	union cvmx_spxx_int_msk spxx_int_msk;
	union cvmx_stxx_int_msk stxx_int_msk;
	union cvmx_spxx_trn4_ctl spxx_trn4_ctl;
	int index;
	uint64_t MS = cvmx_sysinfo_get()->cpu_clock_hz / 1000;

	/* Disable SPI error events while we run BIST */
	spxx_int_msk.u64 = cvmx_read_csr(CVMX_SPXX_INT_MSK(interface));
	cvmx_write_csr(CVMX_SPXX_INT_MSK(interface), 0);
	stxx_int_msk.u64 = cvmx_read_csr(CVMX_STXX_INT_MSK(interface));
	cvmx_write_csr(CVMX_STXX_INT_MSK(interface), 0);

	/* Run BIST in the SPI interface */
	cvmx_write_csr(CVMX_SRXX_COM_CTL(interface), 0);
	cvmx_write_csr(CVMX_STXX_COM_CTL(interface), 0);
	spxx_clk_ctl.u64 = 0;
	spxx_clk_ctl.s.runbist = 1;
	cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64);
	cvmx_wait(10 * MS);
	spxx_bist_stat.u64 = cvmx_read_csr(CVMX_SPXX_BIST_STAT(interface));
	if (spxx_bist_stat.s.stat0)
		cvmx_dprintf
		    ("ERROR SPI%d: BIST failed on receive datapath FIFO\n",
		     interface);
	if (spxx_bist_stat.s.stat1)
		cvmx_dprintf("ERROR SPI%d: BIST failed on RX calendar table\n",
			     interface);
	if (spxx_bist_stat.s.stat2)
		cvmx_dprintf("ERROR SPI%d: BIST failed on TX calendar table\n",
			     interface);

	/* Clear the calendar table after BIST to fix parity errors */
	for (index = 0; index < 32; index++) {
		union cvmx_srxx_spi4_calx srxx_spi4_calx;
		union cvmx_stxx_spi4_calx stxx_spi4_calx;

		srxx_spi4_calx.u64 = 0;
		srxx_spi4_calx.s.oddpar = 1;
		cvmx_write_csr(CVMX_SRXX_SPI4_CALX(index, interface),
			       srxx_spi4_calx.u64);

		stxx_spi4_calx.u64 = 0;
		stxx_spi4_calx.s.oddpar = 1;
		cvmx_write_csr(CVMX_STXX_SPI4_CALX(index, interface),
			       stxx_spi4_calx.u64);
	}

	/* Re enable reporting of error interrupts */
	cvmx_write_csr(CVMX_SPXX_INT_REG(interface),
		       cvmx_read_csr(CVMX_SPXX_INT_REG(interface)));
	cvmx_write_csr(CVMX_SPXX_INT_MSK(interface), spxx_int_msk.u64);
	cvmx_write_csr(CVMX_STXX_INT_REG(interface),
		       cvmx_read_csr(CVMX_STXX_INT_REG(interface)));
	cvmx_write_csr(CVMX_STXX_INT_MSK(interface), stxx_int_msk.u64);

	/* Setup the CLKDLY right in the middle */
	spxx_clk_ctl.u64 = 0;
	spxx_clk_ctl.s.seetrn = 0;
	spxx_clk_ctl.s.clkdly = 0x10;
	spxx_clk_ctl.s.runbist = 0;
	spxx_clk_ctl.s.statdrv = 0;
	/* This should always be on the opposite edge as statdrv */
	spxx_clk_ctl.s.statrcv = 1;
	spxx_clk_ctl.s.sndtrn = 0;
	spxx_clk_ctl.s.drptrn = 0;
	spxx_clk_ctl.s.rcvtrn = 0;
	spxx_clk_ctl.s.srxdlck = 0;
	cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64);
	cvmx_wait(100 * MS);

	/* Reset SRX0 DLL */
	spxx_clk_ctl.s.srxdlck = 1;
	cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64);

	/* Waiting for Inf0 Spi4 RX DLL to lock */
	cvmx_wait(100 * MS);

	/* Enable dynamic alignment */
	spxx_trn4_ctl.s.trntest = 0;
	spxx_trn4_ctl.s.jitter = 1;
	spxx_trn4_ctl.s.clr_boot = 1;
	spxx_trn4_ctl.s.set_boot = 0;
	if (OCTEON_IS_MODEL(OCTEON_CN58XX))
		spxx_trn4_ctl.s.maxdist = 3;
	else
		spxx_trn4_ctl.s.maxdist = 8;
	spxx_trn4_ctl.s.macro_en = 1;
	spxx_trn4_ctl.s.mux_en = 1;
	cvmx_write_csr(CVMX_SPXX_TRN4_CTL(interface), spxx_trn4_ctl.u64);

	spxx_dbg_deskew_ctl.u64 = 0;
	cvmx_write_csr(CVMX_SPXX_DBG_DESKEW_CTL(interface),
		       spxx_dbg_deskew_ctl.u64);

	return 0;
}
/**
 * Initialize and start the SPI interface.
 *
 * @param interface The identifier of the packet interface to configure and
 *                  use as a SPI interface.
 * @param mode      The operating mode for the SPI interface. The interface
 *                  can operate as a full duplex (both Tx and Rx data paths
 *                  active) or as a halfplex (either the Tx data path is
 *                  active or the Rx data path is active, but not both).
 * @param timeout   Timeout to wait for clock synchronization in seconds
 * @return Zero on success, negative of failure.
 */
int cvmx_spi_start_interface(int interface, cvmx_spi_mode_t mode, int timeout)
{
    uint64_t                     timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
    cvmx_spxx_trn4_ctl_t         spxx_trn4_ctl;
    cvmx_spxx_clk_stat_t         stat;
    cvmx_stxx_com_ctl_t          stxx_com_ctl;
    cvmx_srxx_com_ctl_t          srxx_com_ctl;
    cvmx_stxx_spi4_dat_t         stxx_spi4_dat;
    uint64_t                     count;
    cvmx_pko_reg_gmx_port_mode_t pko_mode;


    if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
        return -1;

    cvmx_dprintf ("SPI%d: mode %s, cal_len: %d, cal_rep: %d\n",
            interface, modes[mode], CAL_LEN, CAL_REP);

      // Configure for 16 ports (PKO -> GMX FIFO partition setting)
      // ----------------------------------------------------------
    pko_mode.u64 = cvmx_read_csr(CVMX_PKO_REG_GMX_PORT_MODE);
    if (interface == 0) 
    {
        pko_mode.s.mode0 = 0;
    }
    else 
    {
        pko_mode.s.mode1 = 0;
    }
    cvmx_write_csr(CVMX_PKO_REG_GMX_PORT_MODE, pko_mode.u64);

      // Configure GMX
      // -------------------------------------------------
    cvmx_write_csr (CVMX_GMXX_TX_PRTS(interface), 0xA);
      // PRTS           [ 4: 0] ( 5b) = 10


      // Bringing up Spi4 Interface
      // -------------------------------------------------

      // Reset the Spi4 deskew logic
      // -------------------------------------------------
    cvmx_write_csr (CVMX_SPXX_DBG_DESKEW_CTL(interface), 0x00200000);
      // DLLDIS         [ 0: 0] ( 1b) = 0
      // DLLFRC         [ 1: 1] ( 1b) = 0
      // OFFDLY         [ 7: 2] ( 6b) = 0
      // BITSEL         [12: 8] ( 5b) = 0
      // OFFSET         [17:13] ( 5b) = 0
      // MUX            [18:18] ( 1b) = 0
      // INC            [19:19] ( 1b) = 0
      // DEC            [20:20] ( 1b) = 0
      // CLRDLY         [21:21] ( 1b) = 1    // Forces a reset
    cvmx_wait (100 * MS);

      // Setup the CLKDLY right in the middle
      // -------------------------------------------------
    cvmx_write_csr (CVMX_SPXX_CLK_CTL(interface), 0x00000830);
      // SRXDLCK        [ 0: 0] ( 1b) = 0
      // RCVTRN         [ 1: 1] ( 1b) = 0
      // DRPTRN         [ 2: 2] ( 1b) = 0
      // SNDTRN         [ 3: 3] ( 1b) = 0
      // STATRCV        [ 4: 4] ( 1b) = 1    // Enable status channel Rx
      // STATDRV        [ 5: 5] ( 1b) = 1    // Enable status channel Tx
      // RUNBIST        [ 6: 6] ( 1b) = 0
      // CLKDLY         [11: 7] ( 5b) = 10   // 16 is the middle of the range
      // SRXLCK         [12:12] ( 1b) = 0
      // STXLCK         [13:13] ( 1b) = 0
      // SEETRN         [14:14] ( 1b) = 0
    cvmx_wait (100 * MS);

      // Reset SRX0 DLL
      // -------------------------------------------------
    cvmx_write_csr (CVMX_SPXX_CLK_CTL(interface), 0x00000831);
      // SRXDLCK        [ 0: 0] ( 1b) = 1    // Restart the DLL
      // RCVTRN         [ 1: 1] ( 1b) = 0
      // DRPTRN         [ 2: 2] ( 1b) = 0
      // SNDTRN         [ 3: 3] ( 1b) = 0
      // STATRCV        [ 4: 4] ( 1b) = 1
      // STATDRV        [ 5: 5] ( 1b) = 1
      // RUNBIST        [ 6: 6] ( 1b) = 0
      // CLKDLY         [11: 7] ( 5b) = 10
      // SRXLCK         [12:12] ( 1b) = 0
      // STXLCK         [13:13] ( 1b) = 0
      // SEETRN         [14:14] ( 1b) = 0

      // Waiting for Inf0 Spi4 RX DLL to lock
      // -------------------------------------------------
    cvmx_wait (100 * MS);

      // Enable dynamic alignment
      // -------------------------------------------------
    spxx_trn4_ctl.u64 = 0;
    spxx_trn4_ctl.s.mux_en   = 1;
    spxx_trn4_ctl.s.macro_en = 1;
    spxx_trn4_ctl.s.maxdist  = 16;
    spxx_trn4_ctl.s.jitter   = 1;
    cvmx_write_csr (CVMX_SPXX_TRN4_CTL(interface), spxx_trn4_ctl.u64);
      // MUX_EN         [ 0: 0] ( 1b) = 1
      // MACRO_EN       [ 1: 1] ( 1b) = 1
      // MAXDIST        [ 6: 2] ( 5b) = 16
      // SET_BOOT       [ 7: 7] ( 1b) = 0
      // CLR_BOOT       [ 8: 8] ( 1b) = 1
      // JITTER         [11: 9] ( 3b) = 1
      // TRNTEST        [12:12] ( 1b) = 0
    cvmx_write_csr (CVMX_SPXX_DBG_DESKEW_CTL(interface), 0x0);
      // DLLDIS         [ 0: 0] ( 1b) = 0
      // DLLFRC         [ 1: 1] ( 1b) = 0
      // OFFDLY         [ 7: 2] ( 6b) = 0
      // BITSEL         [12: 8] ( 5b) = 0
      // OFFSET         [17:13] ( 5b) = 0
      // MUX            [18:18] ( 1b) = 0
      // INC            [19:19] ( 1b) = 0
      // DEC            [20:20] ( 1b) = 0
      // CLRDLY         [21:21] ( 1b) = 0

    if (mode & CVMX_SPI_MODE_RX_HALFPLEX) {
          // SRX0 Ports
          // -------------------------------------------------
        cvmx_write_csr (CVMX_SRXX_COM_CTL(interface), 0x00000090);
          // INF_EN         [ 0: 0] ( 1b) = 0
          // ST_EN          [ 3: 3] ( 1b) = 0
          // PRTS           [ 9: 4] ( 6b) = 9

          // SRX0 Calendar Table
          // -------------------------------------------------
        cvmx_write_csr (CVMX_SRXX_SPI4_CALX(0, interface), 0x00013210);
          // PRT0           [ 4: 0] ( 5b) = 0
          // PRT1           [ 9: 5] ( 5b) = 1
          // PRT2           [14:10] ( 5b) = 2
          // PRT3           [19:15] ( 5b) = 3
          // ODDPAR         [20:20] ( 1b) = 1
        cvmx_write_csr (CVMX_SRXX_SPI4_CALX(1, interface), 0x00017654);
          // PRT0           [ 4: 0] ( 5b) = 4
          // PRT1           [ 9: 5] ( 5b) = 5
          // PRT2           [14:10] ( 5b) = 6
          // PRT3           [19:15] ( 5b) = 7
          // ODDPAR         [20:20] ( 1b) = 1
        cvmx_write_csr (CVMX_SRXX_SPI4_CALX(2, interface), 0x00000098);
          // PRT0           [ 4: 0] ( 5b) = 8
          // PRT1           [ 9: 5] ( 5b) = 9
          // PRT2           [14:10] ( 5b) = 0
          // PRT3           [19:15] ( 5b) = 0
          // ODDPAR         [20:20] ( 1b) = 0
        cvmx_write_csr (CVMX_SRXX_SPI4_STAT(interface), (CAL_REP << 8) | CAL_LEN);
          // LEN            [ 7: 0] ( 8b) = a
          // M              [15: 8] ( 8b) = 1
    }

    if (mode & CVMX_SPI_MODE_TX_HALFPLEX) {
          // STX0 Config
          // -------------------------------------------------
        cvmx_write_csr (CVMX_STXX_ARB_CTL(interface), 0x0);
          // IGNTPA         [ 3: 3] ( 1b) = 0
          // MINTRN         [ 5: 5] ( 1b) = 0
        cvmx_write_csr (CVMX_GMXX_TX_SPI_MAX(interface), 0x0408);
          // MAX2           [15: 8] ( 8b) = 4
          // MAX1           [ 7: 0] ( 8b) = 8
        cvmx_write_csr (CVMX_GMXX_TX_SPI_THRESH(interface), 0x4);
          // THRESH         [ 5: 0] ( 6b) = 4
        cvmx_write_csr (CVMX_GMXX_TX_SPI_CTL(interface), 0x0);
          // ENFORCE        [ 2: 2] ( 1b) = 0
          // TPA_CLR        [ 1: 1] ( 1b) = 0
          // CONT_PKT       [ 0: 0] ( 1b) = 0

          // STX0 Training Control
          // -------------------------------------------------
        stxx_spi4_dat.u64 = 0;
	stxx_spi4_dat.s.alpha = 32;    /*Minimum needed by dynamic alignment*/
	stxx_spi4_dat.s.max_t = 0xFFFF;  /*Minimum interval is 0x20*/
        cvmx_write_csr (CVMX_STXX_SPI4_DAT(interface), stxx_spi4_dat.u64);
          // MAX_T          [15: 0] (16b) = 0
          // ALPHA          [31:16] (16b) = 0

          // STX0 Calendar Table
          // -------------------------------------------------
        cvmx_write_csr (CVMX_STXX_SPI4_CALX(0, interface), 0x00013210);
          // PRT0           [ 4: 0] ( 5b) = 0
          // PRT1           [ 9: 5] ( 5b) = 1
          // PRT2           [14:10] ( 5b) = 2
          // PRT3           [19:15] ( 5b) = 3
          // ODDPAR         [20:20] ( 1b) = 1
        cvmx_write_csr (CVMX_STXX_SPI4_CALX(1, interface), 0x00017654);
          // PRT0           [ 4: 0] ( 5b) = 4
          // PRT1           [ 9: 5] ( 5b) = 5
          // PRT2           [14:10] ( 5b) = 6
          // PRT3           [19:15] ( 5b) = 7
          // ODDPAR         [20:20] ( 1b) = 1
        cvmx_write_csr (CVMX_STXX_SPI4_CALX(2, interface), 0x00000098);
          // PRT0           [ 4: 0] ( 5b) = 8
          // PRT1           [ 9: 5] ( 5b) = 9
          // PRT2           [14:10] ( 5b) = 0
          // PRT3           [19:15] ( 5b) = 0
          // ODDPAR         [20:20] ( 1b) = 0
        cvmx_write_csr (CVMX_STXX_SPI4_STAT(interface), (CAL_REP << 8) | CAL_LEN);
          // LEN            [ 7: 0] ( 8b) = a
          // M              [15: 8] ( 8b) = 1
    }

      /* Regardless of operating mode, both Tx and Rx clocks must be present
       * for the SPI interface to operate.
       */
    cvmx_dprintf ("SPI%d: Waiting to see TsClk...\n", interface);
    count = 0;
    timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
    do {  /* Do we see the TsClk transitioning? */
        stat.u64 = cvmx_read_csr (CVMX_SPXX_CLK_STAT(interface));
        count = count + 1;
#ifdef DEBUG
        if ((count % 5000000) == 10) {
	    cvmx_dprintf ("SPI%d: CLK_STAT 0x%016llX\n"
                    "  s4 (%d,%d) d4 (%d,%d)\n",
                    interface, (unsigned long long)stat.u64,
                    stat.s.s4clk0, stat.s.s4clk1,
                    stat.s.d4clk0, stat.s.d4clk1);
        }
#endif
        if (cvmx_get_cycle() > timeout_time)
        {
            cvmx_dprintf ("SPI%d: Timeout\n", interface);
            return -1;
        }
    } while (stat.s.s4clk0 == 0 || stat.s.s4clk1 == 0);

    cvmx_dprintf ("SPI%d: Waiting to see RsClk...\n", interface);
    timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
    do {  /* Do we see the RsClk transitioning? */
        stat.u64 = cvmx_read_csr (CVMX_SPXX_CLK_STAT(interface));
        if (cvmx_get_cycle() > timeout_time)
        {
            cvmx_dprintf ("SPI%d: Timeout\n", interface);
            return -1;
        }
    } while (stat.s.d4clk0 == 0 || stat.s.d4clk1 == 0);


      // SRX0 & STX0 Inf0 Links are configured - begin training
      // -------------------------------------------------
    cvmx_write_csr (CVMX_SPXX_CLK_CTL(interface), 0x0000083f);
      // SRXDLCK        [ 0: 0] ( 1b) = 1
      // RCVTRN         [ 1: 1] ( 1b) = 1
      // DRPTRN         [ 2: 2] ( 1b) = 1    ...was 0
      // SNDTRN         [ 3: 3] ( 1b) = 1
      // STATRCV        [ 4: 4] ( 1b) = 1
      // STATDRV        [ 5: 5] ( 1b) = 1
      // RUNBIST        [ 6: 6] ( 1b) = 0
      // CLKDLY         [11: 7] ( 5b) = 10
      // SRXLCK         [12:12] ( 1b) = 0
      // STXLCK         [13:13] ( 1b) = 0
      // SEETRN         [14:14] ( 1b) = 0
    cvmx_wait (1000 * MS);

      // SRX0 clear the boot bit
      // -------------------------------------------------
    spxx_trn4_ctl.s.clr_boot = 1;
    cvmx_write_csr (CVMX_SPXX_TRN4_CTL(interface), spxx_trn4_ctl.u64);

      // Wait for the training sequence to complete
      // -------------------------------------------------
      // SPX0_CLK_STAT - SPX0_CLK_STAT[SRXTRN] should be 1 (bit8)
    cvmx_dprintf ("SPI%d: Waiting for training\n", interface);
    cvmx_wait (1000 * MS);
    timeout_time = cvmx_get_cycle() + 1000ull * MS * 600;  /* Wait a really long time here */
    do {
        stat.u64 = cvmx_read_csr (CVMX_SPXX_CLK_STAT(interface));
        if (cvmx_get_cycle() > timeout_time)
        {
            cvmx_dprintf ("SPI%d: Timeout\n", interface);
            return -1;
        }
    } while (stat.s.srxtrn == 0);

    if (mode & CVMX_SPI_MODE_RX_HALFPLEX) {
          // SRX0 interface should be good, send calendar data
          // -------------------------------------------------
        cvmx_dprintf ("SPI%d: Rx is synchronized, start sending calendar data\n", interface);
        srxx_com_ctl.u64 = 0;
        srxx_com_ctl.s.prts   = 9;
        srxx_com_ctl.s.inf_en = 1;
        srxx_com_ctl.s.st_en  = 1;
        cvmx_write_csr (CVMX_SRXX_COM_CTL(interface), srxx_com_ctl.u64);
    }

    if (mode & CVMX_SPI_MODE_TX_HALFPLEX) {
          // STX0 has achieved sync
          // The corespondant board should be sending calendar data
          // Enable the STX0 STAT receiver.
          // -------------------------------------------------
        stxx_com_ctl.u64 = 0;
        stxx_com_ctl.s.inf_en = 1;
        stxx_com_ctl.s.st_en = 1;
        cvmx_write_csr (CVMX_STXX_COM_CTL(interface), stxx_com_ctl.u64);

          // Waiting for calendar sync on STX0 STAT
          // -------------------------------------------------
          // SPX0_CLK_STAT - SPX0_CLK_STAT[STXCAL] should be 1 (bit10)
        cvmx_dprintf ("SPI%d: Waiting to sync on STX[%d] STAT\n", interface, interface);
        timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
        do {
            stat.u64 = cvmx_read_csr (CVMX_SPXX_CLK_STAT (interface));
            if (cvmx_get_cycle() > timeout_time)
            {
                cvmx_dprintf ("SPI%d: Timeout\n", interface);
                return -1;
            }
        } while (stat.s.stxcal == 0);
    }

      // Inf0 is synched
      // -------------------------------------------------
      // SPX0 is up
    if (mode & CVMX_SPI_MODE_RX_HALFPLEX) {
        srxx_com_ctl.s.inf_en = 1;
        cvmx_write_csr (CVMX_SRXX_COM_CTL(interface), srxx_com_ctl.u64);
        cvmx_dprintf ("SPI%d: Rx is now up\n", interface);
    }

    if (mode & CVMX_SPI_MODE_TX_HALFPLEX) {
        stxx_com_ctl.s.inf_en = 1;
        cvmx_write_csr (CVMX_STXX_COM_CTL(interface), stxx_com_ctl.u64);
        cvmx_dprintf ("SPI%d: Tx is now up\n", interface);
    }

    cvmx_write_csr (CVMX_GMXX_RXX_FRM_MIN (0,interface), 40);
    cvmx_write_csr (CVMX_GMXX_RXX_FRM_MAX (0,interface), 64*1024 - 4);
    cvmx_write_csr (CVMX_GMXX_RXX_JABBER  (0,interface), 64*1024 - 4);

    return 0;
}
/**
 * This routine restarts the SPI interface after it has lost synchronization
 * with its corespondant system.
 *
 * @param interface The identifier of the packet interface to configure and
 *                  use as a SPI interface.
 * @param mode      The operating mode for the SPI interface. The interface
 *                  can operate as a full duplex (both Tx and Rx data paths
 *                  active) or as a halfplex (either the Tx data path is
 *                  active or the Rx data path is active, but not both).
 * @param timeout   Timeout to wait for clock synchronization in seconds
 * @return Zero on success, negative of failure.
 */
int cvmx_spi_restart_interface(int interface, cvmx_spi_mode_t mode, int timeout)
{
    uint64_t                     timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
    cvmx_spxx_trn4_ctl_t         spxx_trn4_ctl;
    cvmx_spxx_clk_stat_t         stat;
    cvmx_stxx_com_ctl_t          stxx_com_ctl;
    cvmx_srxx_com_ctl_t          srxx_com_ctl;
    //cvmx_stxx_spi4_dat_t         stxx_spi4_dat;

    cvmx_dprintf ("SPI%d: Restart %s\n", interface, modes[mode]);

      // Reset the Spi4 deskew logic
      // -------------------------------------------------
    cvmx_write_csr (CVMX_SPXX_DBG_DESKEW_CTL(interface), 0x00200000);
    cvmx_wait (100 * MS);
      // Setup the CLKDLY right in the middle
      // -------------------------------------------------
    cvmx_write_csr (CVMX_SPXX_CLK_CTL(interface), 0x00000830);
    cvmx_wait (100 * MS);
      // Reset SRX0 DLL
      // -------------------------------------------------
    cvmx_write_csr (CVMX_SPXX_CLK_CTL(interface), 0x00000831);
    cvmx_wait (100 * MS);
      // Enable dynamic alignment
      // -------------------------------------------------
    spxx_trn4_ctl.u64 = 0;
    spxx_trn4_ctl.s.mux_en   = 1;
    spxx_trn4_ctl.s.macro_en = 1;
    spxx_trn4_ctl.s.maxdist  = 16;
    spxx_trn4_ctl.s.jitter   = 1;
    cvmx_write_csr (CVMX_SPXX_TRN4_CTL(interface), spxx_trn4_ctl.u64);
    cvmx_write_csr (CVMX_SPXX_DBG_DESKEW_CTL(interface), 0x0);

      /* Regardless of operating mode, both Tx and Rx clocks must be present
       * for the SPI interface to operate.
       */
    cvmx_dprintf ("SPI%d: Waiting to see TsClk...\n", interface);
    do {  /* Do we see the TsClk transitioning? */
        stat.u64 = cvmx_read_csr (CVMX_SPXX_CLK_STAT(interface));
        if (cvmx_get_cycle() > timeout_time)
        {
            cvmx_dprintf ("SPI%d: Timeout\n", interface);
            return -1;
        }
    } while (stat.s.s4clk0 == 0 || stat.s.s4clk1 == 0);

    cvmx_dprintf ("SPI%d: Waiting to see RsClk...\n", interface);
    do {  /* Do we see the RsClk transitioning? */
        stat.u64 = cvmx_read_csr (CVMX_SPXX_CLK_STAT(interface));
        if (cvmx_get_cycle() > timeout_time)
        {
            cvmx_dprintf ("SPI%d: Timeout\n", interface);
            return -1;
        }
    } while (stat.s.d4clk0 == 0 || stat.s.d4clk1 == 0);

      // SRX0 & STX0 Inf0 Links are configured - begin training
      // -------------------------------------------------
    cvmx_write_csr (CVMX_SPXX_CLK_CTL(interface), 0x0000083f);
    cvmx_wait (1000 * MS);
      // SRX0 clear the boot bit
      // -------------------------------------------------
    spxx_trn4_ctl.s.clr_boot = 1;
    cvmx_write_csr (CVMX_SPXX_TRN4_CTL(interface), spxx_trn4_ctl.u64);

      // Wait for the training sequence to complete
      // -------------------------------------------------
      // SPX0_CLK_STAT - SPX0_CLK_STAT[SRXTRN] should be 1 (bit8)
    cvmx_dprintf ("SPI%d: Waiting for training\n", interface);
    cvmx_wait (1000 * MS);
    do {
        stat.u64 = cvmx_read_csr (CVMX_SPXX_CLK_STAT(interface));
        if (cvmx_get_cycle() > timeout_time)
        {
            cvmx_dprintf ("SPI%d: Timeout\n", interface);
            return -1;
        }
    } while (stat.s.srxtrn == 0);

    if (mode & CVMX_SPI_MODE_RX_HALFPLEX) {
          // SRX0 interface should be good, send calendar data
          // -------------------------------------------------
        cvmx_dprintf ("SPI%d: Rx is synchronized, start sending calendar data\n", interface);
        srxx_com_ctl.u64 = 0;
        srxx_com_ctl.s.prts   = 9;
        srxx_com_ctl.s.inf_en = 1;
        srxx_com_ctl.s.st_en  = 1;
        cvmx_write_csr (CVMX_SRXX_COM_CTL(interface), srxx_com_ctl.u64);
    }

    if (mode & CVMX_SPI_MODE_TX_HALFPLEX) {
          // STX0 has achieved sync
          // The corespondant board should be sending calendar data
          // Enable the STX0 STAT receiver.
          // -------------------------------------------------
        stxx_com_ctl.u64 = 0;
        stxx_com_ctl.s.inf_en = 1;
        stxx_com_ctl.s.st_en = 1;
        cvmx_write_csr (CVMX_STXX_COM_CTL(interface), stxx_com_ctl.u64);

          // Waiting for calendar sync on STX0 STAT
          // -------------------------------------------------
          // SPX0_CLK_STAT - SPX0_CLK_STAT[STXCAL] should be 1 (bit10)
        cvmx_dprintf ("SPI%d: Waiting to sync on STX[%d] STAT\n", interface, interface);
        do {
            stat.u64 = cvmx_read_csr (CVMX_SPXX_CLK_STAT (interface));
            if (cvmx_get_cycle() > timeout_time)
            {
                cvmx_dprintf ("SPI%d: Timeout\n", interface);
                return -1;
            }
        } while (stat.s.stxcal == 0);
    }

      // Inf0 is synched
      // -------------------------------------------------
      // SPX0 is up
    if (mode & CVMX_SPI_MODE_RX_HALFPLEX) {
        srxx_com_ctl.s.inf_en = 1;
        cvmx_write_csr (CVMX_SRXX_COM_CTL(interface), srxx_com_ctl.u64);
        cvmx_dprintf ("SPI%d: Rx is now up\n", interface);
    }

    if (mode & CVMX_SPI_MODE_TX_HALFPLEX) {
        stxx_com_ctl.s.inf_en = 1;
        cvmx_write_csr (CVMX_STXX_COM_CTL(interface), stxx_com_ctl.u64);
        cvmx_dprintf ("SPI%d: Tx is now up\n", interface);
    }

    return 0;
}
Exemplo n.º 22
0
/**
 * Initialize a host mode PCIe link. This function takes a PCIe
 * port from reset to a link up state. Software can then begin
 * configuring the rest of the link.
 *
 * @pcie_port: PCIe port to initialize
 *
 * Returns Zero on success
 */
static int __cvmx_pcie_rc_initialize_link(int pcie_port)
{
	uint64_t start_cycle;
	union cvmx_pescx_ctl_status pescx_ctl_status;
	union cvmx_pciercx_cfg452 pciercx_cfg452;
	union cvmx_pciercx_cfg032 pciercx_cfg032;
	union cvmx_pciercx_cfg448 pciercx_cfg448;

	/* Set the lane width */
	pciercx_cfg452.u32 =
	    cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG452(pcie_port));
	pescx_ctl_status.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS(pcie_port));
	if (pescx_ctl_status.s.qlm_cfg == 0) {
		/* We're in 8 lane (56XX) or 4 lane (54XX) mode */
		pciercx_cfg452.s.lme = 0xf;
	} else {
		/* We're in 4 lane (56XX) or 2 lane (52XX) mode */
		pciercx_cfg452.s.lme = 0x7;
	}
	cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG452(pcie_port),
			     pciercx_cfg452.u32);

	/*
	 * CN52XX pass 1.x has an errata where length mismatches on UR
	 * responses can cause bus errors on 64bit memory
	 * reads. Turning off length error checking fixes this.
	 */
	if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
		union cvmx_pciercx_cfg455 pciercx_cfg455;
		pciercx_cfg455.u32 =
		    cvmx_pcie_cfgx_read(pcie_port,
					CVMX_PCIERCX_CFG455(pcie_port));
		pciercx_cfg455.s.m_cpl_len_err = 1;
		cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG455(pcie_port),
				     pciercx_cfg455.u32);
	}

	/* Lane swap needs to be manually enabled for CN52XX */
	if (OCTEON_IS_MODEL(OCTEON_CN52XX) && (pcie_port == 1)) {
		pescx_ctl_status.s.lane_swp = 1;
		cvmx_write_csr(CVMX_PESCX_CTL_STATUS(pcie_port),
			       pescx_ctl_status.u64);
	}

	/* Bring up the link */
	pescx_ctl_status.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS(pcie_port));
	pescx_ctl_status.s.lnk_enb = 1;
	cvmx_write_csr(CVMX_PESCX_CTL_STATUS(pcie_port), pescx_ctl_status.u64);

	/*
	 * CN52XX pass 1.0: Due to a bug in 2nd order CDR, it needs to
	 * be disabled.
	 */
	if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_0))
		__cvmx_helper_errata_qlm_disable_2nd_order_cdr(0);

	/* Wait for the link to come up */
	cvmx_dprintf("PCIe: Waiting for port %d link\n", pcie_port);
	start_cycle = cvmx_get_cycle();
	do {
		if (cvmx_get_cycle() - start_cycle >
		    2 * cvmx_sysinfo_get()->cpu_clock_hz) {
			cvmx_dprintf("PCIe: Port %d link timeout\n",
				     pcie_port);
			return -1;
		}
		cvmx_wait(10000);
		pciercx_cfg032.u32 =
		    cvmx_pcie_cfgx_read(pcie_port,
					CVMX_PCIERCX_CFG032(pcie_port));
	} while (pciercx_cfg032.s.dlla == 0);

	/* Display the link status */
	cvmx_dprintf("PCIe: Port %d link active, %d lanes\n", pcie_port,
		     pciercx_cfg032.s.nlw);

	pciercx_cfg448.u32 =
	    cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG448(pcie_port));
	switch (pciercx_cfg032.s.nlw) {
	case 1:		/* 1 lane */
		pciercx_cfg448.s.rtl = 1677;
		break;
	case 2:		/* 2 lanes */
		pciercx_cfg448.s.rtl = 867;
		break;
	case 4:		/* 4 lanes */
		pciercx_cfg448.s.rtl = 462;
		break;
	case 8:		/* 8 lanes */
		pciercx_cfg448.s.rtl = 258;
		break;
	}
	cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG448(pcie_port),
			     pciercx_cfg448.u32);

	return 0;
}
Exemplo n.º 23
0
void write_rld_cfg(rldram_csr_config_t *cfg_ptr)
{
    cvmx_dfa_memcfg0_t    memcfg0;
    cvmx_dfa_memcfg2_t    memcfg2;

    memcfg0.u64 = cfg_ptr->dfa_memcfg0_base;

    if ((OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
    {
        uint32_t dfa_memcfg0;

        if (OCTEON_IS_MODEL (OCTEON_CN58XX)) {
	      // Set RLDQK90_RST and RDLCK_RST to reset all three DLLs.
	    memcfg0.s.rldck_rst    = 1;
	    memcfg0.s.rldqck90_rst = 1;
            cvmx_write_csr(CVMX_DFA_MEMCFG0, memcfg0.u64);
            ll_printf("CVMX_DFA_MEMCFG0: 0x%08x  clk/qk90 reset\n", (uint32_t) memcfg0.u64);
            cvmx_csr_db_decode(cvmx_get_proc_id(), CVMX_DFA_MEMCFG0 & ~(1ull<<63), memcfg0.u64);

	      // Clear RDLCK_RST while asserting RLDQK90_RST to bring RLDCK DLL out of reset.
	    memcfg0.s.rldck_rst    = 0;
	    memcfg0.s.rldqck90_rst = 1;
            cvmx_write_csr(CVMX_DFA_MEMCFG0, memcfg0.u64);
            cvmx_wait(4000000);  /* Wait  */
            ll_printf("CVMX_DFA_MEMCFG0: 0x%08x  qk90 reset\n", (uint32_t) memcfg0.u64);
            cvmx_csr_db_decode(cvmx_get_proc_id(), CVMX_DFA_MEMCFG0 & ~(1ull<<63), memcfg0.u64);

	      // Clear both RDLCK90_RST and RLDQK90_RST to bring the RLDQK90 DLL out of reset.
	    memcfg0.s.rldck_rst    = 0;
	    memcfg0.s.rldqck90_rst = 0;
	    cvmx_write_csr(CVMX_DFA_MEMCFG0, memcfg0.u64);
            cvmx_wait(4000000);  /* Wait  */
            ll_printf("CVMX_DFA_MEMCFG0: 0x%08x  DLL out of reset\n", (uint32_t) memcfg0.u64);
            cvmx_csr_db_decode(cvmx_get_proc_id(), CVMX_DFA_MEMCFG0 & ~(1ull<<63), memcfg0.u64);
	}

        //=======================================================================
        // Now print out the sequence of events:
        cvmx_write_csr(CVMX_DFA_MEMCFG0, cfg_ptr->dfa_memcfg0_base);
        ll_printf("CVMX_DFA_MEMCFG0: 0x%08x  port enables\n", cfg_ptr->dfa_memcfg0_base);
        cvmx_csr_db_decode(cvmx_get_proc_id(), CVMX_DFA_MEMCFG0 & ~(1ull<<63), cfg_ptr->dfa_memcfg0_base);
        cvmx_wait(4000000);  /* Wait  */

        cvmx_write_csr(CVMX_DFA_MEMCFG1, cfg_ptr->dfa_memcfg1_base);
        ll_printf("CVMX_DFA_MEMCFG1: 0x%08x\n", cfg_ptr->dfa_memcfg1_base);
        cvmx_csr_db_decode(cvmx_get_proc_id(), CVMX_DFA_MEMCFG1 & ~(1ull<<63), cfg_ptr->dfa_memcfg1_base);

        if (cfg_ptr->p0_ena ==1)
        {
            cvmx_write_csr(CVMX_DFA_MEMRLD,  cfg_ptr->mrs_dat_p0bunk0);
            ll_printf("CVMX_DFA_MEMRLD : 0x%08x  p0_ena memrld\n", cfg_ptr->mrs_dat_p0bunk0);
            cvmx_csr_db_decode(cvmx_get_proc_id(), CVMX_DFA_MEMRLD & ~(1ull<<63), cfg_ptr->mrs_dat_p0bunk0);

            dfa_memcfg0 = ( cfg_ptr->dfa_memcfg0_base |
                            (1 << 23) |   // P0_INIT
                            (1 << 25)     // BUNK_INIT[1:0]=Bunk#0
                          );

            cvmx_write_csr(CVMX_DFA_MEMCFG0, dfa_memcfg0);
            ll_printf("CVMX_DFA_MEMCFG0: 0x%08x  p0_init/bunk_init\n", dfa_memcfg0);
            cvmx_csr_db_decode(cvmx_get_proc_id(), CVMX_DFA_MEMCFG0 & ~(1ull<<63), dfa_memcfg0);
            cvmx_wait(RLD_INIT_DELAY);
            ll_printf("Delay.....\n");
            cvmx_write_csr(CVMX_DFA_MEMCFG0, cfg_ptr->dfa_memcfg0_base);
            ll_printf("CVMX_DFA_MEMCFG0: 0x%08x  back to base\n", cfg_ptr->dfa_memcfg0_base);
            cvmx_csr_db_decode(cvmx_get_proc_id(), CVMX_DFA_MEMCFG0 & ~(1ull<<63), cfg_ptr->dfa_memcfg0_base);
        }

        if (cfg_ptr->p1_ena ==1)
        {
            cvmx_write_csr(CVMX_DFA_MEMRLD,  cfg_ptr->mrs_dat_p1bunk0);
            ll_printf("CVMX_DFA_MEMRLD : 0x%08x  p1_ena memrld\n", cfg_ptr->mrs_dat_p1bunk0);
            cvmx_csr_db_decode(cvmx_get_proc_id(), CVMX_DFA_MEMRLD & ~(1ull<<63), cfg_ptr->mrs_dat_p1bunk0);

            dfa_memcfg0 = ( cfg_ptr->dfa_memcfg0_base |
                            (1 << 24) |   // P1_INIT
                            (1 << 25)     // BUNK_INIT[1:0]=Bunk#0
                          );
            cvmx_write_csr(CVMX_DFA_MEMCFG0, dfa_memcfg0);
            ll_printf("CVMX_DFA_MEMCFG0: 0x%08x  p1_init/bunk_init\n", dfa_memcfg0);
            cvmx_csr_db_decode(cvmx_get_proc_id(), CVMX_DFA_MEMCFG0 & ~(1ull<<63), dfa_memcfg0);
            cvmx_wait(RLD_INIT_DELAY);
            ll_printf("Delay.....\n");
            cvmx_write_csr(CVMX_DFA_MEMCFG0, cfg_ptr->dfa_memcfg0_base);
            ll_printf("CVMX_DFA_MEMCFG0: 0x%08x  back to base\n", cfg_ptr->dfa_memcfg0_base);
            cvmx_csr_db_decode(cvmx_get_proc_id(), CVMX_DFA_MEMCFG0 & ~(1ull<<63), cfg_ptr->dfa_memcfg0_base);
	}

        // P0 Bunk#1
        if ((cfg_ptr->p0_ena ==1) && (cfg_ptr->bunkport == 2))
        {
            cvmx_write_csr(CVMX_DFA_MEMRLD,  cfg_ptr->mrs_dat_p0bunk1);
            ll_printf("CVMX_DFA_MEMRLD : 0x%08x  p0_ena memrld\n", cfg_ptr->mrs_dat_p0bunk1);
            cvmx_csr_db_decode(cvmx_get_proc_id(), CVMX_DFA_MEMRLD & ~(1ull<<63), cfg_ptr->mrs_dat_p0bunk1);

            dfa_memcfg0 = ( cfg_ptr->dfa_memcfg0_base |
                            (1 << 23) |   // P0_INIT
                            (2 << 25)     // BUNK_INIT[1:0]=Bunk#1
                          );
            cvmx_write_csr(CVMX_DFA_MEMCFG0, dfa_memcfg0);
            ll_printf("CVMX_DFA_MEMCFG0: 0x%08x  p0_init/bunk_init\n", dfa_memcfg0);
            cvmx_csr_db_decode(cvmx_get_proc_id(), CVMX_DFA_MEMCFG0 & ~(1ull<<63), dfa_memcfg0);
            cvmx_wait(RLD_INIT_DELAY);
            ll_printf("Delay.....\n");

            if (cfg_ptr->p1_ena == 1)
            { // Re-arm Px_INIT if P1-B1 init is required
                cvmx_write_csr(CVMX_DFA_MEMCFG0, cfg_ptr->dfa_memcfg0_base);
                ll_printf("CVMX_DFA_MEMCFG0: 0x%08x  px_init rearm\n", cfg_ptr->dfa_memcfg0_base);
                cvmx_csr_db_decode(cvmx_get_proc_id(), CVMX_DFA_MEMCFG0 & ~(1ull<<63), cfg_ptr->dfa_memcfg0_base);
            }
        }

        if ((cfg_ptr->p1_ena == 1) && (cfg_ptr->bunkport == 2))
        {
            cvmx_write_csr(CVMX_DFA_MEMRLD,  cfg_ptr->mrs_dat_p1bunk1);
            ll_printf("CVMX_DFA_MEMRLD : 0x%08x  p1_ena memrld\n", cfg_ptr->mrs_dat_p1bunk1);
            cvmx_csr_db_decode(cvmx_get_proc_id(), CVMX_DFA_MEMRLD & ~(1ull<<63), cfg_ptr->mrs_dat_p1bunk1);

            dfa_memcfg0 = ( cfg_ptr->dfa_memcfg0_base |
                            (1 << 24) |   // P1_INIT
                            (2 << 25)     // BUNK_INIT[1:0]=10
                          );
            cvmx_write_csr(CVMX_DFA_MEMCFG0, dfa_memcfg0);
            ll_printf("CVMX_DFA_MEMCFG0: 0x%08x  p1_init/bunk_init\n", dfa_memcfg0);
            cvmx_csr_db_decode(cvmx_get_proc_id(), CVMX_DFA_MEMCFG0 & ~(1ull<<63), dfa_memcfg0);
        }
        cvmx_wait(4000000);  // 1/100S, 0.01S, 10mS
        ll_printf("Delay.....\n");

          /* Enable bunks */
        dfa_memcfg0 = cfg_ptr->dfa_memcfg0_base |((cfg_ptr->bunkport >= 1) << 25) | ((cfg_ptr->bunkport == 2) << 26);
        cvmx_write_csr(CVMX_DFA_MEMCFG0, dfa_memcfg0);
        ll_printf("CVMX_DFA_MEMCFG0: 0x%08x  enable bunks\n", dfa_memcfg0);
        cvmx_csr_db_decode(cvmx_get_proc_id(), CVMX_DFA_MEMCFG0 & ~(1ull<<63), dfa_memcfg0);
        cvmx_wait(RLD_INIT_DELAY);
        ll_printf("Delay.....\n");

          /* Issue a Silo reset by toggling SILRST in memcfg2. */
        memcfg2.u64 = cvmx_read_csr (CVMX_DFA_MEMCFG2);
        memcfg2.s.silrst = 1;
	cvmx_write_csr (CVMX_DFA_MEMCFG2, memcfg2.u64);
        ll_printf("CVMX_DFA_MEMCFG2: 0x%08x  silo reset start\n", (uint32_t) memcfg2.u64);
        memcfg2.s.silrst = 0;
	cvmx_write_csr (CVMX_DFA_MEMCFG2, memcfg2.u64);
        ll_printf("CVMX_DFA_MEMCFG2: 0x%08x  silo reset done\n", (uint32_t) memcfg2.u64);
    }
}
Exemplo n.º 24
0
static void cn31xx_dfa_memory_init(void)
{
    if (OCTEON_IS_MODEL(OCTEON_CN31XX))
    {
        cvmx_dfa_ddr2_cfg_t  dfaCfg;
        cvmx_dfa_eclkcfg_t   dfaEcklCfg;
        cvmx_dfa_ddr2_addr_t dfaAddr;
        cvmx_dfa_ddr2_tmg_t  dfaTmg;
        cvmx_dfa_ddr2_pll_t  dfaPll;
        int mem_freq_hz = 533*1000000;
        int ref_freq_hz = cvmx_sysinfo_get()->dfa_ref_clock_hz;
        if (!ref_freq_hz)
            ref_freq_hz = 33*1000000;

        cvmx_dprintf ("Configuring DFA memory for %d MHz operation.\n",mem_freq_hz/1000000);

          /* Turn on the DFA memory port. */
        dfaCfg.u64 = cvmx_read_csr (CVMX_DFA_DDR2_CFG);
        dfaCfg.s.prtena = 1;
        cvmx_write_csr (CVMX_DFA_DDR2_CFG, dfaCfg.u64);

          /* Start the PLL alignment sequence */
        dfaPll.u64 = 0;
        dfaPll.s.pll_ratio  = mem_freq_hz/ref_freq_hz         /*400Mhz / 33MHz*/;
        dfaPll.s.pll_div2   = 1              /*400 - 1 */;
        dfaPll.s.pll_bypass = 0;
        cvmx_write_csr (CVMX_DFA_DDR2_PLL, dfaPll.u64);

        dfaPll.s.pll_init = 1;
        cvmx_write_csr (CVMX_DFA_DDR2_PLL, dfaPll.u64);

        cvmx_wait (RLD_INIT_DELAY); //want 150uS
        dfaPll.s.qdll_ena = 1;
        cvmx_write_csr (CVMX_DFA_DDR2_PLL, dfaPll.u64);

        cvmx_wait (RLD_INIT_DELAY); //want 10us
        dfaEcklCfg.u64 = 0;
        dfaEcklCfg.s.dfa_frstn = 1;
        cvmx_write_csr (CVMX_DFA_ECLKCFG, dfaEcklCfg.u64);

          /* Configure the DFA Memory */
        dfaCfg.s.silo_hc = 1 /*400 - 1 */;
        dfaCfg.s.silo_qc = 0 /*400 - 0 */;
        dfaCfg.s.tskw    = 1 /*400 - 1 */;
        dfaCfg.s.ref_int = 0x820 /*533 - 0x820  400 - 0x618*/;
        dfaCfg.s.trfc    = 0x1A  /*533 - 0x23   400 - 0x1A*/;
        dfaCfg.s.fprch   = 0; /* 1 more conservative*/
        dfaCfg.s.bprch   = 0; /* 1 */
        cvmx_write_csr (CVMX_DFA_DDR2_CFG, dfaCfg.u64);

        dfaEcklCfg.u64 = cvmx_read_csr (CVMX_DFA_ECLKCFG);
        dfaEcklCfg.s.maxbnk = 1;
        cvmx_write_csr (CVMX_DFA_ECLKCFG, dfaEcklCfg.u64);

        dfaAddr.u64 = cvmx_read_csr (CVMX_DFA_DDR2_ADDR);
        dfaAddr.s.num_cols    = 0x1;
        dfaAddr.s.num_colrows = 0x2;
        dfaAddr.s.num_rnks    = 0x1;
        cvmx_write_csr (CVMX_DFA_DDR2_ADDR, dfaAddr.u64);

        dfaTmg.u64 =  cvmx_read_csr (CVMX_DFA_DDR2_TMG);
        dfaTmg.s.ddr2t    = 0;
        dfaTmg.s.tmrd     = 0x2;
        dfaTmg.s.caslat   = 0x4 /*400 - 0x3, 500 - 0x4*/;
        dfaTmg.s.pocas    = 0;
        dfaTmg.s.addlat   = 0;
        dfaTmg.s.trcd     = 4   /*400 - 3, 500 - 4*/;
        dfaTmg.s.trrd     = 2;
        dfaTmg.s.tras     = 0xB /*400 - 8, 500 - 0xB*/;
        dfaTmg.s.trp      = 4   /*400 - 3, 500 - 4*/;
        dfaTmg.s.twr      = 4   /*400 - 3, 500 - 4*/;
        dfaTmg.s.twtr     = 2   /*400 - 2 */;
        dfaTmg.s.tfaw     = 0xE /*400 - 0xA, 500 - 0xE*/;
        dfaTmg.s.r2r_slot = 0;
        dfaTmg.s.dic      = 0;  /*400 - 0 */
        dfaTmg.s.dqsn_ena = 0;
        dfaTmg.s.odt_rtt  = 0;
        cvmx_write_csr (CVMX_DFA_DDR2_TMG, dfaTmg.u64);

          /* Turn on the DDR2 interface and wait a bit for the hardware to setup. */
        dfaCfg.s.init = 1;
        cvmx_write_csr (CVMX_DFA_DDR2_CFG, dfaCfg.u64);
        cvmx_wait(RLD_INIT_DELAY); // want at least 64K cycles
    }
}
Exemplo n.º 25
0
int cvmx_spi_reset_cb(int interface, cvmx_spi_mode_t mode)
{
    union cvmx_spxx_dbg_deskew_ctl spxx_dbg_deskew_ctl;
    union cvmx_spxx_clk_ctl spxx_clk_ctl;
    union cvmx_spxx_bist_stat spxx_bist_stat;
    union cvmx_spxx_int_msk spxx_int_msk;
    union cvmx_stxx_int_msk stxx_int_msk;
    union cvmx_spxx_trn4_ctl spxx_trn4_ctl;
    int index;
    uint64_t MS = cvmx_sysinfo_get()->cpu_clock_hz / 1000;


    spxx_int_msk.u64 = cvmx_read_csr(CVMX_SPXX_INT_MSK(interface));
    cvmx_write_csr(CVMX_SPXX_INT_MSK(interface), 0);
    stxx_int_msk.u64 = cvmx_read_csr(CVMX_STXX_INT_MSK(interface));
    cvmx_write_csr(CVMX_STXX_INT_MSK(interface), 0);


    cvmx_write_csr(CVMX_SRXX_COM_CTL(interface), 0);
    cvmx_write_csr(CVMX_STXX_COM_CTL(interface), 0);
    spxx_clk_ctl.u64 = 0;
    spxx_clk_ctl.s.runbist = 1;
    cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64);
    cvmx_wait(10 * MS);
    spxx_bist_stat.u64 = cvmx_read_csr(CVMX_SPXX_BIST_STAT(interface));
    if (spxx_bist_stat.s.stat0)
        cvmx_dprintf
        ("ERROR SPI%d: BIST failed on receive datapath FIFO\n",
         interface);
    if (spxx_bist_stat.s.stat1)
        cvmx_dprintf("ERROR SPI%d: BIST failed on RX calendar table\n",
                     interface);
    if (spxx_bist_stat.s.stat2)
        cvmx_dprintf("ERROR SPI%d: BIST failed on TX calendar table\n",
                     interface);


    for (index = 0; index < 32; index++) {
        union cvmx_srxx_spi4_calx srxx_spi4_calx;
        union cvmx_stxx_spi4_calx stxx_spi4_calx;

        srxx_spi4_calx.u64 = 0;
        srxx_spi4_calx.s.oddpar = 1;
        cvmx_write_csr(CVMX_SRXX_SPI4_CALX(index, interface),
                       srxx_spi4_calx.u64);

        stxx_spi4_calx.u64 = 0;
        stxx_spi4_calx.s.oddpar = 1;
        cvmx_write_csr(CVMX_STXX_SPI4_CALX(index, interface),
                       stxx_spi4_calx.u64);
    }


    cvmx_write_csr(CVMX_SPXX_INT_REG(interface),
                   cvmx_read_csr(CVMX_SPXX_INT_REG(interface)));
    cvmx_write_csr(CVMX_SPXX_INT_MSK(interface), spxx_int_msk.u64);
    cvmx_write_csr(CVMX_STXX_INT_REG(interface),
                   cvmx_read_csr(CVMX_STXX_INT_REG(interface)));
    cvmx_write_csr(CVMX_STXX_INT_MSK(interface), stxx_int_msk.u64);


    spxx_clk_ctl.u64 = 0;
    spxx_clk_ctl.s.seetrn = 0;
    spxx_clk_ctl.s.clkdly = 0x10;
    spxx_clk_ctl.s.runbist = 0;
    spxx_clk_ctl.s.statdrv = 0;

    spxx_clk_ctl.s.statrcv = 1;
    spxx_clk_ctl.s.sndtrn = 0;
    spxx_clk_ctl.s.drptrn = 0;
    spxx_clk_ctl.s.rcvtrn = 0;
    spxx_clk_ctl.s.srxdlck = 0;
    cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64);
    cvmx_wait(100 * MS);


    spxx_clk_ctl.s.srxdlck = 1;
    cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64);


    cvmx_wait(100 * MS);


    spxx_trn4_ctl.s.trntest = 0;
    spxx_trn4_ctl.s.jitter = 1;
    spxx_trn4_ctl.s.clr_boot = 1;
    spxx_trn4_ctl.s.set_boot = 0;
    if (OCTEON_IS_MODEL(OCTEON_CN58XX))
        spxx_trn4_ctl.s.maxdist = 3;
    else
        spxx_trn4_ctl.s.maxdist = 8;
    spxx_trn4_ctl.s.macro_en = 1;
    spxx_trn4_ctl.s.mux_en = 1;
    cvmx_write_csr(CVMX_SPXX_TRN4_CTL(interface), spxx_trn4_ctl.u64);

    spxx_dbg_deskew_ctl.u64 = 0;
    cvmx_write_csr(CVMX_SPXX_DBG_DESKEW_CTL(interface),
                   spxx_dbg_deskew_ctl.u64);

    return 0;
}
Exemplo n.º 26
0
/**
 * @INTERNAL
 * Initialize a host mode PCIe link. This function takes a PCIe
 * port from reset to a link up state. Software can then begin
 * configuring the rest of the link.
 *
 * @param pcie_port PCIe port to initialize
 *
 * @return Zero on success
 */
static int __cvmx_pcie_rc_initialize_link(int pcie_port)
{
    uint64_t start_cycle;
    cvmx_pescx_ctl_status_t pescx_ctl_status;
    cvmx_pciercx_cfg452_t pciercx_cfg452;
    cvmx_pciercx_cfg032_t pciercx_cfg032;
    cvmx_pciercx_cfg448_t pciercx_cfg448;

    /* Set the lane width */
    pciercx_cfg452.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG452(pcie_port));
    pescx_ctl_status.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS(pcie_port));
    if (pescx_ctl_status.s.qlm_cfg == 0)
    {
        /* We're in 8 lane (56XX) or 4 lane (54XX) mode */
        pciercx_cfg452.s.lme = 0xf;
    }
    else
    {
        /* We're in 4 lane (56XX) or 2 lane (52XX) mode */
        pciercx_cfg452.s.lme = 0x7;
    }
    cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG452(pcie_port), pciercx_cfg452.u32);

    /* CN52XX pass 1.x has an errata where length mismatches on UR responses can
        cause bus errors on 64bit memory reads. Turning off length error
        checking fixes this */
    if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X))
    {
        cvmx_pciercx_cfg455_t pciercx_cfg455;
        pciercx_cfg455.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG455(pcie_port));
        pciercx_cfg455.s.m_cpl_len_err = 1;
        cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG455(pcie_port), pciercx_cfg455.u32);
    }

    /* Lane swap needs to be manually enabled for CN52XX */
    if (OCTEON_IS_MODEL(OCTEON_CN52XX) && (pcie_port == 1))
    {
      pescx_ctl_status.s.lane_swp = 1;
      cvmx_write_csr(CVMX_PESCX_CTL_STATUS(pcie_port),pescx_ctl_status.u64);
    }

    /* Bring up the link */
    pescx_ctl_status.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS(pcie_port));
    pescx_ctl_status.s.lnk_enb = 1;
    cvmx_write_csr(CVMX_PESCX_CTL_STATUS(pcie_port), pescx_ctl_status.u64);

    /* CN52XX pass 1.0: Due to a bug in 2nd order CDR, it needs to be disabled */
    if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_0))
        __cvmx_helper_errata_qlm_disable_2nd_order_cdr(0);

    /* Wait for the link to come up */
    start_cycle = cvmx_get_cycle();
    do
    {
        if (cvmx_get_cycle() - start_cycle > 2*cvmx_sysinfo_get()->cpu_clock_hz)
        {
            cvmx_dprintf("PCIe: Port %d link timeout\n", pcie_port);
            return -1;
        }
        cvmx_wait(10000);
        pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
    } while (pciercx_cfg032.s.dlla == 0);

    /* Update the Replay Time Limit. Empirically, some PCIe devices take a
        little longer to respond than expected under load. As a workaround for
        this we configure the Replay Time Limit to the value expected for a 512
        byte MPS instead of our actual 256 byte MPS. The numbers below are
        directly from the PCIe spec table 3-4 */
    pciercx_cfg448.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG448(pcie_port));
    switch (pciercx_cfg032.s.nlw)
    {
        case 1: /* 1 lane */
            pciercx_cfg448.s.rtl = 1677;
            break;
        case 2: /* 2 lanes */
            pciercx_cfg448.s.rtl = 867;
            break;
        case 4: /* 4 lanes */
            pciercx_cfg448.s.rtl = 462;
            break;
        case 8: /* 8 lanes */
            pciercx_cfg448.s.rtl = 258;
            break;
    }
    cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG448(pcie_port), pciercx_cfg448.u32);

    return 0;
}
Exemplo n.º 27
0
/**
 * @INTERNAL
 * Probe a RGMII interface and determine the number of ports
 * connected to it. The RGMII interface should still be down
 * after this call.
 *
 * @param interface Interface to probe
 *
 * @return Number of ports on the interface. Zero to disable.
 */
int __cvmx_helper_agl_probe(int interface)
{
	int port = cvmx_helper_agl_get_port(interface);
	union cvmx_agl_gmx_bist gmx_bist;
	union cvmx_agl_gmx_prtx_cfg gmx_prtx_cfg;
	union cvmx_agl_prtx_ctl agl_prtx_ctl;
	uint64_t clock_scale;
	int result;

	result = __cvmx_helper_agl_enumerate(interface);
	if (result == 0)
		return 0;

	/* Check BIST status */
	gmx_bist.u64 = cvmx_read_csr(CVMX_AGL_GMX_BIST);
	if (gmx_bist.u64)
		cvmx_warn("Managment port AGL failed BIST (0x%016llx) on AGL%d\n",
			  CAST64(gmx_bist.u64), port);

	/* Disable the external input/output */
	gmx_prtx_cfg.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
        gmx_prtx_cfg.s.en = 0;
	cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), gmx_prtx_cfg.u64);

	/* Set the rgx_ref_clk MUX with AGL_PRTx_CTL[REFCLK_SEL]. Default value
	   is 0 (RGMII REFCLK). Recommended to use RGMII RXC(1) or sclk/4 (2)
	   to save cost.
	 */

	/* MII clocks counts are based on the 125Mhz reference, so our
	 * delays need to be scaled to match the core clock rate. The
	 * "+1" is to make sure rounding always waits a little too
	 * long. FIXME.
	 */
	clock_scale = cvmx_clock_get_rate(CVMX_CLOCK_CORE) / 125000000 + 1;

	agl_prtx_ctl.u64 = cvmx_read_csr(CVMX_AGL_PRTX_CTL(port));
	agl_prtx_ctl.s.clkrst = 0;
	agl_prtx_ctl.s.dllrst = 0;
	agl_prtx_ctl.s.clktx_byp = 0;


	if (OCTEON_IS_MODEL(OCTEON_CN70XX)) {
		agl_prtx_ctl.s.refclk_sel = 0;
		agl_prtx_ctl.s.clkrx_set =
			cvmx_helper_get_agl_rx_clock_skew(interface, port);
		agl_prtx_ctl.s.clkrx_byp =
			cvmx_helper_get_agl_rx_clock_delay_bypass(interface,
								  port);
	}
	cvmx_write_csr(CVMX_AGL_PRTX_CTL(port), agl_prtx_ctl.u64);
	/* Force write out before wait */
	cvmx_read_csr(CVMX_AGL_PRTX_CTL(port));
	/*
	 * Wait for the DLL to lock. External 125 MHz reference clock must be
	 * stable at this point.
	 */
	cvmx_wait(256 * clock_scale);

	/* Enable the componsation controller */
	agl_prtx_ctl.u64 = cvmx_read_csr(CVMX_AGL_PRTX_CTL(port));
	agl_prtx_ctl.s.drv_byp = 0;
	cvmx_write_csr(CVMX_AGL_PRTX_CTL(port), agl_prtx_ctl.u64);
	/* Force write out before wait */
	cvmx_read_csr(CVMX_AGL_PRTX_CTL(port));

	if (!OCTEON_IS_OCTEON3()) {
		/* Enable the interface */
		agl_prtx_ctl.u64 = cvmx_read_csr(CVMX_AGL_PRTX_CTL(port));
		agl_prtx_ctl.s.enable = 1;
		cvmx_write_csr(CVMX_AGL_PRTX_CTL(port), agl_prtx_ctl.u64);
		/* Read the value back to force the previous write */
		agl_prtx_ctl.u64 = cvmx_read_csr(CVMX_AGL_PRTX_CTL(port));
	}

	/* Enable the compensation controller */
	agl_prtx_ctl.u64 = cvmx_read_csr(CVMX_AGL_PRTX_CTL(port));
	agl_prtx_ctl.s.comp = 1;
	cvmx_write_csr(CVMX_AGL_PRTX_CTL(port), agl_prtx_ctl.u64);
	/* Force write out before wait */
	cvmx_read_csr(CVMX_AGL_PRTX_CTL(port));

	/* for componsation state to lock. */
	cvmx_wait(1024 * clock_scale);

	return result;
}
Exemplo n.º 28
0
void inic_top(void)
{
		int i = 0;
		int c = 0;

		static uint64_t last_core_idle_value[CVMX_MAX_CORES];
		uint64_t idle_delta[CVMX_MAX_CORES];
		int first_loop = 1;
		cvmx_sysinfo_t *sys_info_ptr = cvmx_sysinfo_get();

		uprint(CLI_CURSOR_OFF);

		while(c==0x0)
		{
				uprint(CLI_GOTO_TOP);
				uprint(CLI_ERASE_WIN);
				uprint("\n");

				if (first_loop)
				{
						for (i=0; i<CVMX_MAX_CORES; i++) last_core_idle_value[i] = cvmx_fau_fetch_and_add64(core_idle_cycles[i], 0x0);
						cvmx_wait(sys_info_ptr->cpu_clock_hz);
						first_loop = 0;
						c = ugetchar();
						continue;
				}

				for (i=0; i<CVMX_MAX_CORES; i++)
				{
						idle_delta[i] = cvmx_fau_fetch_and_add64(core_idle_cycles[i], 0x0) - last_core_idle_value[i];
						last_core_idle_value[i] = cvmx_fau_fetch_and_add64(core_idle_cycles[i], 0x0);

						if (idle_delta[i] > sys_info_ptr->cpu_clock_hz) idle_delta[i] = sys_info_ptr->cpu_clock_hz;
				}

				uprint(CLI_REVERSE);
				uprint(" Stack Cores Utilization \n");
				uprint(CLI_NORMAL);
				uprint("\n\n");

				for (i=0; i<CVMX_MAX_CORES; i++)
				{
						if ((cvmx_coremask_core(i) & coremask_data) != 0)
						{
								if (i != cvm_common_get_last_core(coremask_data))
								{
										float val = (float)((float)idle_delta[i]/(float)sys_info_ptr->cpu_clock_hz);

										val = (1.0 - val);

										uprint("    Core %2d : ", i);
										uprint(CLI_BOLD);
										uprint("%-3.1f %%\n", ((float)val*100.0));
										uprint(CLI_NORMAL);
								}
						}
				}

				uprint("\n\nPress any key to exit...\n");

				cvmx_wait(sys_info_ptr->cpu_clock_hz);
				c = ugetchar();
		}

		uprint(CLI_CURSOR_ON);
}
Exemplo n.º 29
0
/**
 * Initialize a PCIe port for use in host(RC) mode. It doesn't enumerate the bus.
 *
 * @param pcie_port PCIe port to initialize
 *
 * @return Zero on success
 */
int cvmx_pcie_rc_initialize(int pcie_port)
{
    int i;
    cvmx_ciu_soft_prst_t ciu_soft_prst;
    cvmx_pescx_bist_status_t pescx_bist_status;
    cvmx_pescx_bist_status2_t pescx_bist_status2;
    cvmx_npei_ctl_status_t npei_ctl_status;
    cvmx_npei_mem_access_ctl_t npei_mem_access_ctl;
    cvmx_npei_mem_access_subidx_t mem_access_subid;
    cvmx_npei_dbg_data_t npei_dbg_data;
    cvmx_pescx_ctl_status2_t pescx_ctl_status2;
    cvmx_pciercx_cfg032_t pciercx_cfg032;

retry:
    /* Make sure we aren't trying to setup a target mode interface in host mode */
    npei_ctl_status.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS);
    if ((pcie_port==0) && !npei_ctl_status.s.host_mode)
    {
        cvmx_dprintf("PCIe: ERROR: cvmx_pcie_rc_initialize() called on port0, but port0 is not in host mode\n");
        return -1;
    }

    /* Make sure a CN52XX isn't trying to bring up port 1 when it is disabled */
    if (OCTEON_IS_MODEL(OCTEON_CN52XX))
    {
        npei_dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
        if ((pcie_port==1) && npei_dbg_data.cn52xx.qlm0_link_width)
        {
            cvmx_dprintf("PCIe: ERROR: cvmx_pcie_rc_initialize() called on port1, but port1 is disabled\n");
            return -1;
        }
    }

    /* PCIe switch arbitration mode. '0' == fixed priority NPEI, PCIe0, then PCIe1. '1' == round robin. */
    npei_ctl_status.s.arb = 1;
    /* Allow up to 0x20 config retries */
    npei_ctl_status.s.cfg_rtry = 0x20;
    /* CN52XX pass1.x has an errata where P0_NTAGS and P1_NTAGS don't reset */
    if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X))
    {
        npei_ctl_status.s.p0_ntags = 0x20;
        npei_ctl_status.s.p1_ntags = 0x20;
    }
    cvmx_write_csr(CVMX_PEXP_NPEI_CTL_STATUS, npei_ctl_status.u64);

    /* Bring the PCIe out of reset */
    if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_EBH5200)
    {
        /* The EBH5200 board swapped the PCIe reset lines on the board. As a
            workaround for this bug, we bring both PCIe ports out of reset at
            the same time instead of on separate calls. So for port 0, we bring
            both out of reset and do nothing on port 1 */
        if (pcie_port == 0)
        {
            ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
            /* After a chip reset the PCIe will also be in reset. If it isn't,
                most likely someone is trying to init it again without a proper
                PCIe reset */
            if (ciu_soft_prst.s.soft_prst == 0)
            {
		/* Reset the ports */
		ciu_soft_prst.s.soft_prst = 1;
		cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
		ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
		ciu_soft_prst.s.soft_prst = 1;
		cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
		/* Wait until pcie resets the ports. */
		cvmx_wait_usec(2000);
            }
            ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
            ciu_soft_prst.s.soft_prst = 0;
            cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
            ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
            ciu_soft_prst.s.soft_prst = 0;
            cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
        }
    }
    else
    {
        /* The normal case: The PCIe ports are completely separate and can be
            brought out of reset independently */
        if (pcie_port)
            ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
        else
            ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
        /* After a chip reset the PCIe will also be in reset. If it isn't,
            most likely someone is trying to init it again without a proper
            PCIe reset */
        if (ciu_soft_prst.s.soft_prst == 0)
        {
	    /* Reset the port */
	    ciu_soft_prst.s.soft_prst = 1;
	    if (pcie_port)
		cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
 	    else
		cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
	    /* Wait until pcie resets the ports. */
	    cvmx_wait_usec(2000);
        }
        if (pcie_port)
        {
            ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
            ciu_soft_prst.s.soft_prst = 0;
            cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
        }
        else
        {
            ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
            ciu_soft_prst.s.soft_prst = 0;
            cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
        }
    }

    /* Wait for PCIe reset to complete. Due to errata PCIE-700, we don't poll
       PESCX_CTL_STATUS2[PCIERST], but simply wait a fixed number of cycles */
    cvmx_wait(400000);

    /* PESCX_BIST_STATUS2[PCLK_RUN] was missing on pass 1 of CN56XX and
        CN52XX, so we only probe it on newer chips */
    if (!OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) && !OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X))
    {
        /* Clear PCLK_RUN so we can check if the clock is running */
        pescx_ctl_status2.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS2(pcie_port));
        pescx_ctl_status2.s.pclk_run = 1;
        cvmx_write_csr(CVMX_PESCX_CTL_STATUS2(pcie_port), pescx_ctl_status2.u64);
        /* Now that we cleared PCLK_RUN, wait for it to be set again telling
            us the clock is running */
        if (CVMX_WAIT_FOR_FIELD64(CVMX_PESCX_CTL_STATUS2(pcie_port),
            cvmx_pescx_ctl_status2_t, pclk_run, ==, 1, 10000))
        {
            cvmx_dprintf("PCIe: Port %d isn't clocked, skipping.\n", pcie_port);
            return -1;
        }
    }
Exemplo n.º 30
0
int cvmx_ixf18201_init(void)
{
    int index;  /* For indexing the two 'ports' on ixf */
    int offset;

    /* Reset IXF, and take all blocks out of reset */

/*
Initializing...
PP0:~CONSOLE-> Changing register value, addr 0x0003, old: 0x0000, new: 0x0001
PP0:~CONSOLE-> Changing register value, addr 0x0003, old: 0x0001, new: 0x0000
PP0:~CONSOLE->  **** LLM201(Lochlomond) Driver loaded ****
PP0:~CONSOLE->  LLM201 Driver - Released on Tue Aug 28 09:51:30 2007.
PP0:~CONSOLE-> retval is: 0
PP0:~CONSOLE-> Changing register value, addr 0x0003, old: 0x0000, new: 0x0001
PP0:~CONSOLE-> Changing register value, addr 0x0003, old: 0x0001, new: 0x0000
PP0:~CONSOLE-> Brought all blocks out of reset
PP0:~CONSOLE-> Getting default config.
*/


    cvmx_ixf18201_write16(0x0003, 0x0001);
    cvmx_ixf18201_write16(0x0003, 0);

    /*
PP0:~CONSOLE-> Changing register value, addr 0x0000, old: 0x4014, new: 0x4010
PP0:~CONSOLE-> Changing register value, addr 0x0000, old: 0x4010, new: 0x4014
PP0:~CONSOLE-> Changing register value, addr 0x0004, old: 0x01ff, new: 0x0140
PP0:~CONSOLE-> Changing register value, addr 0x0009, old: 0x007f, new: 0x0000
    */
    cvmx_ixf18201_write16(0x0000, 0x4010);
    cvmx_ixf18201_write16(0x0000, 0x4014);
    cvmx_ixf18201_write16(0x0004, 0x0140);
    cvmx_ixf18201_write16(0x0009, 0);


    /*
PP0:~CONSOLE-> Changing register value, addr 0x000e, old: 0x0000, new: 0x000f
PP0:~CONSOLE-> Changing register value, addr 0x000f, old: 0x0000, new: 0x0004
PP0:~CONSOLE-> Changing register value, addr 0x000f, old: 0x0004, new: 0x0006
PP0:~CONSOLE-> Changing register value, addr 0x000e, old: 0x000f, new: 0x00f0
PP0:~CONSOLE-> Changing register value, addr 0x000f, old: 0x0006, new: 0x0040
PP0:~CONSOLE-> Changing register value, addr 0x000f, old: 0x0040, new: 0x0060
    */
    // skip GPIO, 0xe/0xf


    /*
PP0:~CONSOLE-> Changing register value, addr 0x3100, old: 0x57fb, new: 0x7f7b
PP0:~CONSOLE-> Changing register value, addr 0x3600, old: 0x57fb, new: 0x7f7b
PP0:~CONSOLE-> Changing register value, addr 0x3005, old: 0x8010, new: 0x0040
PP0:~CONSOLE-> Changing register value, addr 0x3006, old: 0x061a, new: 0x0000
PP0:~CONSOLE-> Changing register value, addr 0x3505, old: 0x8010, new: 0x0040
PP0:~CONSOLE-> Changing register value, addr 0x3506, old: 0x061a, new: 0x0000
    */
    for (index = 0; index < 2;index++ )
    {
        offset = 0x500 * index;
        cvmx_ixf18201_write32(0x3100 + offset, 0x47f7b);
        cvmx_ixf18201_write16(0x3005 + offset, 0x0040);
        cvmx_ixf18201_write16(0x3006 + offset, 0);
    }

    /*PP0:~CONSOLE->   *** SPI soft reset ***, block id: 0
PP0:~CONSOLE-> Changing register value, addr 0x3007, old: 0xf980, new: 0xf9c0
PP0:~CONSOLE-> Changing register value, addr 0x3008, old: 0xa6f0, new: 0x36f0
PP0:~CONSOLE-> Changing register value, addr 0x3000, old: 0x0080, new: 0x0060
PP0:~CONSOLE-> Changing register value, addr 0x3002, old: 0x0200, new: 0x0040
PP0:~CONSOLE-> Changing register value, addr 0x3003, old: 0x0100, new: 0x0000
PP0:~CONSOLE-> Changing register value, addr 0x30c2, old: 0x0080, new: 0x0060
PP0:~CONSOLE-> Changing register value, addr 0x300a, old: 0x0800, new: 0x0000
PP0:~CONSOLE-> Changing register value, addr 0x3007, old: 0xf9c0, new: 0x89c0
PP0:~CONSOLE-> Changing register value, addr 0x3016, old: 0x0000, new: 0x0010
PP0:~CONSOLE-> Changing register value, addr 0x3008, old: 0x36f0, new: 0x3610
PP0:~CONSOLE-> Changing register value, addr 0x3012, old: 0x0000, new: 0x0010
PP0:~CONSOLE-> Changing register value, addr 0x3007, old: 0x89c0, new: 0x8980
PP0:~CONSOLE-> Changing register value, addr 0x3008, old: 0x3610, new: 0xa210
PP0:~CONSOLE->

    */


    for (index = 0; index < 2;index++ )
    {
        offset = 0x500 * index;
        int cal_len_min_1 = 0;  /* Calendar length -1.  Must match number
                                ** of ports configured for interface.*/
        cvmx_ixf18201_write16(0x3007 + offset, 0x81c0 | (cal_len_min_1 << 11));
        cvmx_ixf18201_write16(0x3008 + offset, 0x3600 | (cal_len_min_1 << 4));
        cvmx_ixf18201_write16(0x3000 + offset, 0x0060);
        cvmx_ixf18201_write16(0x3002 + offset, 0x0040);
        cvmx_ixf18201_write16(0x3003 + offset, 0x0000);
        cvmx_ixf18201_write16(0x30c2 + offset, 0x0060);
        cvmx_ixf18201_write16(0x300a + offset, 0x0000);
        cvmx_ixf18201_write16(0x3007 + offset, 0x81c0 | (cal_len_min_1 << 11));
        cvmx_ixf18201_write16(0x3016 + offset, 0x0010);
        cvmx_ixf18201_write16(0x3008 + offset, 0x3600 | (cal_len_min_1 << 4));
        cvmx_ixf18201_write16(0x3012 + offset, 0x0010);
        cvmx_ixf18201_write16(0x3007 + offset, 0x8180 | (cal_len_min_1 << 11));
        cvmx_ixf18201_write16(0x3008 + offset, 0xa200 | (cal_len_min_1 << 4));

        cvmx_ixf18201_write16(0x3090 + offset, 0x0301);  /* Enable hairpin loopback */
    }



    /*
PP0:~CONSOLE-> Changing register value, addr 0x0004, old: 0x0140, new: 0x1fff
PP0:~CONSOLE-> Changing register value, addr 0x0009, old: 0x0000, new: 0x007f
PP0:~CONSOLE-> Changing register value, addr 0x310b, old: 0x0004, new: 0xffff
PP0:~CONSOLE-> Changing register value, addr 0x310a, old: 0x7f7b, new: 0xffff

    */

    cvmx_ixf18201_write16(0x0004, 0x1fff);
    cvmx_ixf18201_write16(0x0009, 0x007f);
#if 0
    /* MDI autoscan */
    cvmx_ixf18201_write16(0x310b, 0xffff);
    cvmx_ixf18201_write16(0x310a, 0xffff);
#endif


    /*
    *** 32 bit register, trace only captures part of it...
PP0:~CONSOLE-> Changing register value, addr 0x3100, old: 0x7f7b, new: 0x7f78
PP0:~CONSOLE-> Changing register value, addr 0x3600, old: 0x7f7b, new: 0x7f78
    */

    for (index = 0; index < 2;index++ )
    {
        offset = 0x500 * index;
        cvmx_ixf18201_write32(0x3100 + offset, 0x47f7c); /* Also enable jumbo frames */
        /* Set max packet size to 9600 bytes, max supported by IXF18201 */
        cvmx_ixf18201_write32(0x3114 + offset, 0x25800000);
    }


    cvmx_wait(100000000);

    /* Now reset the PCS blocks in the phy.  This seems to be required after
    ** bringing up the Cortina. */
    cvmx_ixf18201_mii_write(1, 3, 0, 0x8000);
    cvmx_ixf18201_mii_write(5, 3, 0, 0x8000);


    return 1;

}