示例#1
0
/**
 * Enable an endpoint to respond to an IN transaction
 *
 * @param usb    USB device state populated by
 *               cvmx_usbd_initialize().
 * @param endpoint_num
 *               Endpoint number to enable
 * @param transfer_type
 *               Transfer type for the endpoint
 * @param max_packet_size
 *               Maximum packet size for the endpoint
 * @param buffer Buffer to send
 * @param buffer_length
 *               Length of the buffer in bytes
 *
 * @return Zero on success, negative on failure
 */
int cvmx_usbd_in_endpoint_enable(cvmx_usbd_state_t *usb,
    int endpoint_num, cvmx_usbd_transfer_t transfer_type,
    int max_packet_size, uint64_t buffer, int buffer_length)
{
    cvmx_usbcx_diepctlx_t usbc_diepctl;
    cvmx_usbcx_dieptsizx_t usbc_dieptsiz;

    if (cvmx_unlikely(usb->init_flags & CVMX_USBD_INITIALIZE_FLAGS_DEBUG))
        cvmx_dprintf("%s: endpoint=%d buffer=0x%llx length=%d\n",
            __FUNCTION__, endpoint_num, (ULL)buffer, buffer_length);

    usb->endpoint[endpoint_num].buffer_length = buffer_length;

    CVMX_SYNCW; /* Flush out pending writes before enable */

    /* Clear any pending interrupts */
    __cvmx_usbd_write_csr32(usb, CVMX_USBCX_DIEPINTX(endpoint_num, usb->index),
        __cvmx_usbd_read_csr32(usb, CVMX_USBCX_DIEPINTX(endpoint_num, usb->index)));

    usbc_dieptsiz.u32 = 0;
    usbc_dieptsiz.s.mc = 1;
    if (buffer)
    {
        cvmx_write_csr(CVMX_USBNX_DMA0_OUTB_CHN0(usb->index) + endpoint_num*8, buffer);
        usbc_dieptsiz.s.pktcnt = (buffer_length + max_packet_size - 1) / max_packet_size;
        if (usbc_dieptsiz.s.pktcnt == 0)
            usbc_dieptsiz.s.pktcnt = 1;
        usbc_dieptsiz.s.xfersize = buffer_length;
    }
    else
    {
        usbc_dieptsiz.s.pktcnt = 0;
        usbc_dieptsiz.s.xfersize = 0;
    }
    __cvmx_usbd_write_csr32(usb, CVMX_USBCX_DIEPTSIZX(endpoint_num, usb->index), usbc_dieptsiz.u32);

    usbc_diepctl.u32 = 0;
    usbc_diepctl.s.epena = (buffer != 0);
    usbc_diepctl.s.setd1pid = 0;
    usbc_diepctl.s.setd0pid = (buffer == 0);
    usbc_diepctl.s.cnak = 1;
    usbc_diepctl.s.txfnum = endpoint_num;
    usbc_diepctl.s.eptype = transfer_type;
    usbc_diepctl.s.usbactep = 1;
    usbc_diepctl.s.nextep = endpoint_num;
    if (endpoint_num == 0)
    {
        switch (max_packet_size)
        {
            case 8:
                usbc_diepctl.s.mps = 3;
                break;
            case 16:
                usbc_diepctl.s.mps = 2;
                break;
            case 32:
                usbc_diepctl.s.mps = 1;
                break;
            default:
                usbc_diepctl.s.mps = 0;
                break;
        }
    }
    else
        usbc_diepctl.s.mps = max_packet_size;
    __cvmx_usbd_write_csr32(usb, CVMX_USBCX_DIEPCTLX(endpoint_num, usb->index), usbc_diepctl.u32);

    return 0;
}
示例#2
0
文件: cvmx-pow.c 项目: 2asoft/freebsd
static int __cvmx_pow_capture_v2(void *buffer, int buffer_size)
{
    __cvmx_pow_dump_t *dump = (__cvmx_pow_dump_t*)buffer;
    int num_cores;
    int num_pow_entries = cvmx_pow_get_num_entries();
    int core;
    int index;
    int bits;

    if (buffer_size < (int)sizeof(__cvmx_pow_dump_t))
    {
        cvmx_dprintf("cvmx_pow_capture: Buffer too small\n");
        return -1;
    }

    num_cores = cvmx_octeon_num_cores();

    /* Read all core related state */
    for (core=0; core<num_cores; core++)
    {
        cvmx_pow_load_addr_t load_addr;
        load_addr.u64 = 0;
        load_addr.sstatus_cn68xx.mem_region = CVMX_IO_SEG;
        load_addr.sstatus_cn68xx.is_io = 1;
        load_addr.sstatus_cn68xx.did = CVMX_OCT_DID_TAG_TAG5;
        load_addr.sstatus_cn68xx.coreid = core;
        for (bits=1; bits<6; bits++)
        {
            load_addr.sstatus_cn68xx.opcode = bits;
            dump->sstatus[core][bits].u64 = cvmx_read_csr(load_addr.u64);
        }
    }
    /* Read all internal POW entries */
    for (index=0; index<num_pow_entries; index++)
    {
        cvmx_pow_load_addr_t load_addr;
        load_addr.u64 = 0;
        load_addr.smemload_cn68xx.mem_region = CVMX_IO_SEG;
        load_addr.smemload_cn68xx.is_io = 1;
        load_addr.smemload_cn68xx.did = CVMX_OCT_DID_TAG_TAG2;
        load_addr.smemload_cn68xx.index = index;
        for (bits=1; bits<5; bits++)
        {
            load_addr.smemload_cn68xx.opcode = bits;
            dump->smemload[index][bits].u64 = cvmx_read_csr(load_addr.u64);
        }
    }

    /* Read all group and queue pointers */
    for (index=0; index<64; index++)
    {
        cvmx_pow_load_addr_t load_addr;
        load_addr.u64 = 0;
        load_addr.sindexload_cn68xx.mem_region = CVMX_IO_SEG;
        load_addr.sindexload_cn68xx.is_io = 1;
        load_addr.sindexload_cn68xx.did = CVMX_OCT_DID_TAG_TAG3;
        load_addr.sindexload_cn68xx.qos_grp = index;
        for (bits=1; bits<7; bits++)
        {
            load_addr.sindexload_cn68xx.opcode = bits;
            dump->sindexload[index][bits].u64 = cvmx_read_csr(load_addr.u64);
        }
    }
    return 0;
}
示例#3
0
文件: cvmx-ilk.c 项目: 2asoft/freebsd
void cvmx_ilk_runtime_status (int interface)
{
    cvmx_ilk_txx_cfg1_t ilk_txx_cfg1;
    cvmx_ilk_txx_flow_ctl0_t ilk_txx_flow_ctl0; 
    cvmx_ilk_rxx_cfg1_t ilk_rxx_cfg1;
    cvmx_ilk_rxx_int_t ilk_rxx_int;
    cvmx_ilk_rxx_flow_ctl0_t ilk_rxx_flow_ctl0; 
    cvmx_ilk_rxx_flow_ctl1_t ilk_rxx_flow_ctl1; 
    cvmx_ilk_gbl_int_t ilk_gbl_int;

    cvmx_dprintf ("\nilk run-time status: interface: %d\n", interface);

    ilk_txx_cfg1.u64 = cvmx_read_csr (CVMX_ILK_TXX_CFG1(interface));
    cvmx_dprintf ("\nilk txx cfg1: 0x%16lx\n", ilk_txx_cfg1.u64);
    if (ilk_txx_cfg1.s.rx_link_fc)
        cvmx_dprintf ("link flow control received\n");
    if (ilk_txx_cfg1.s.tx_link_fc)
        cvmx_dprintf ("link flow control sent\n");

    ilk_txx_flow_ctl0.u64 = cvmx_read_csr (CVMX_ILK_TXX_FLOW_CTL0(interface));
    cvmx_dprintf ("\nilk txx flow ctl0: 0x%16lx\n", ilk_txx_flow_ctl0.u64);
        
    ilk_rxx_cfg1.u64 = cvmx_read_csr (CVMX_ILK_RXX_CFG1(interface));
    cvmx_dprintf ("\nilk rxx cfg1: 0x%16lx\n", ilk_rxx_cfg1.u64);
    cvmx_dprintf ("rx fifo count: %d\n", ilk_rxx_cfg1.s.rx_fifo_cnt);

    ilk_rxx_int.u64 = cvmx_read_csr (CVMX_ILK_RXX_INT(interface));
    cvmx_dprintf ("\nilk rxx int: 0x%16lx\n", ilk_rxx_int.u64);
    if (ilk_rxx_int.s.pkt_drop_rxf)
        cvmx_dprintf ("rx fifo packet drop\n");
    if (ilk_rxx_int.u64)
        cvmx_write_csr (CVMX_ILK_RXX_INT(interface), ilk_rxx_int.u64);
        
    ilk_rxx_flow_ctl0.u64 = cvmx_read_csr (CVMX_ILK_RXX_FLOW_CTL0(interface));
    cvmx_dprintf ("\nilk rxx flow ctl0: 0x%16lx\n", ilk_rxx_flow_ctl0.u64);

    ilk_rxx_flow_ctl1.u64 = cvmx_read_csr (CVMX_ILK_RXX_FLOW_CTL1(interface));
    cvmx_dprintf ("\nilk rxx flow ctl1: 0x%16lx\n", ilk_rxx_flow_ctl1.u64);

    ilk_gbl_int.u64 = cvmx_read_csr (CVMX_ILK_GBL_INT);
    cvmx_dprintf ("\nilk gbl int: 0x%16lx\n", ilk_gbl_int.u64);
    if (ilk_gbl_int.s.rxf_push_full)
        cvmx_dprintf ("rx fifo overflow\n");
    if (ilk_gbl_int.u64)
        cvmx_write_csr (CVMX_ILK_GBL_INT, ilk_gbl_int.u64);
}
示例#4
0
/**
 * Initialize a USB port for use. This must be called before any
 * other access to the Octeon USB port is made. The port starts
 * off in the disabled state.
 *
 * @param usb    Pointer to an empty cvmx_usbd_state_t structure
 *               that will be populated by the initialize call.
 *               This structure is then passed to all other USB
 *               functions.
 * @param usb_port_number
 *               Which Octeon USB port to initialize.
 * @param flags  Flags to control hardware initialization. See
 *               cvmx_usbd_initialize_flags_t for the flag
 *               definitions. Some flags are mandatory.
 *
 * @return Zero or a negative on error.
 */
int cvmx_usbd_initialize(cvmx_usbd_state_t *usb,
                                      int usb_port_number,
                                      cvmx_usbd_initialize_flags_t flags)
{
    cvmx_usbnx_clk_ctl_t usbn_clk_ctl;
    cvmx_usbnx_usbp_ctl_status_t usbn_usbp_ctl_status;

    if (cvmx_unlikely(flags & CVMX_USBD_INITIALIZE_FLAGS_DEBUG))
        cvmx_dprintf("%s: Called\n", __FUNCTION__);

    memset(usb, 0, sizeof(usb));
    usb->init_flags = flags;
    usb->index = usb_port_number;

    /* Try to determine clock type automatically */
    if ((usb->init_flags & (CVMX_USBD_INITIALIZE_FLAGS_CLOCK_XO_XI |
                  CVMX_USBD_INITIALIZE_FLAGS_CLOCK_XO_GND)) == 0)
    {
        if (__cvmx_helper_board_usb_get_clock_type() == USB_CLOCK_TYPE_CRYSTAL_12)
            usb->init_flags |= CVMX_USBD_INITIALIZE_FLAGS_CLOCK_XO_XI;  /* Only 12 MHZ crystals are supported */
        else
            usb->init_flags |= CVMX_USBD_INITIALIZE_FLAGS_CLOCK_XO_GND;
    }

    if (usb->init_flags & CVMX_USBD_INITIALIZE_FLAGS_CLOCK_XO_GND)
    {
        /* Check for auto ref clock frequency */
        if (!(usb->init_flags & CVMX_USBD_INITIALIZE_FLAGS_CLOCK_MHZ_MASK))
            switch (__cvmx_helper_board_usb_get_clock_type())
            {
                case USB_CLOCK_TYPE_REF_12:
                    usb->init_flags |= CVMX_USBD_INITIALIZE_FLAGS_CLOCK_12MHZ;
                    break;
                case USB_CLOCK_TYPE_REF_24:
                    usb->init_flags |= CVMX_USBD_INITIALIZE_FLAGS_CLOCK_24MHZ;
                    break;
                case USB_CLOCK_TYPE_REF_48:
                default:
                    usb->init_flags |= CVMX_USBD_INITIALIZE_FLAGS_CLOCK_48MHZ;
                    break;
            }
    }

    /* Power On Reset and PHY Initialization */

    /* 1. Wait for DCOK to assert (nothing to do) */
    /* 2a. Write USBN0/1_CLK_CTL[POR] = 1 and
        USBN0/1_CLK_CTL[HRST,PRST,HCLK_RST] = 0 */
    usbn_clk_ctl.u64 = cvmx_read_csr(CVMX_USBNX_CLK_CTL(usb->index));
    usbn_clk_ctl.s.por = 1;
    usbn_clk_ctl.s.hrst = 0;
    usbn_clk_ctl.s.prst = 0;
    usbn_clk_ctl.s.hclk_rst = 0;
    usbn_clk_ctl.s.enable = 0;
    /* 2b. Select the USB reference clock/crystal parameters by writing
        appropriate values to USBN0/1_CLK_CTL[P_C_SEL, P_RTYPE, P_COM_ON] */
    if (usb->init_flags & CVMX_USBD_INITIALIZE_FLAGS_CLOCK_XO_GND)
    {
        /* The USB port uses 12/24/48MHz 2.5V board clock
            source at USB_XO. USB_XI should be tied to GND.
            Most Octeon evaluation boards require this setting */
        if (OCTEON_IS_MODEL(OCTEON_CN3XXX))
        {
            usbn_clk_ctl.cn31xx.p_rclk  = 1; /* From CN31XX,CN30XX manual */
            usbn_clk_ctl.cn31xx.p_xenbn = 0;
        }
        else if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN50XX))
            usbn_clk_ctl.cn56xx.p_rtype = 2; /* From CN56XX,CN50XX manual */
        else
            usbn_clk_ctl.cn52xx.p_rtype = 1; /* From CN52XX manual */

        switch (usb->init_flags & CVMX_USBD_INITIALIZE_FLAGS_CLOCK_MHZ_MASK)
        {
            case CVMX_USBD_INITIALIZE_FLAGS_CLOCK_12MHZ:
                usbn_clk_ctl.s.p_c_sel = 0;
                break;
            case CVMX_USBD_INITIALIZE_FLAGS_CLOCK_24MHZ:
                usbn_clk_ctl.s.p_c_sel = 1;
                break;
            case CVMX_USBD_INITIALIZE_FLAGS_CLOCK_48MHZ:
                usbn_clk_ctl.s.p_c_sel = 2;
                break;
        }
    }
    else
    {
        /* The USB port uses a 12MHz crystal as clock source
            at USB_XO and USB_XI */
        if (OCTEON_IS_MODEL(OCTEON_CN3XXX))
        {
            usbn_clk_ctl.cn31xx.p_rclk  = 1; /* From CN31XX,CN30XX manual */
            usbn_clk_ctl.cn31xx.p_xenbn = 1;
        }
        else if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN50XX))
            usbn_clk_ctl.cn56xx.p_rtype = 0; /* From CN56XX,CN50XX manual */
        else
            usbn_clk_ctl.cn52xx.p_rtype = 0; /* From CN52XX manual */

        usbn_clk_ctl.s.p_c_sel = 0;
    }
    /* 2c. Select the HCLK via writing USBN0/1_CLK_CTL[DIVIDE, DIVIDE2] and
        setting USBN0/1_CLK_CTL[ENABLE] = 1.  Divide the core clock down such
        that USB is as close as possible to 125Mhz */
    {
        int divisor = (cvmx_clock_get_rate(CVMX_CLOCK_CORE)+125000000-1)/125000000;
        if (divisor < 4)  /* Lower than 4 doesn't seem to work properly */
            divisor = 4;
        usbn_clk_ctl.s.divide = divisor;
        usbn_clk_ctl.s.divide2 = 0;
    }
    cvmx_write_csr(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
    /* 2d. Write USBN0/1_CLK_CTL[HCLK_RST] = 1 */
    usbn_clk_ctl.s.hclk_rst = 1;
    cvmx_write_csr(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
    /* 2e.  Wait 64 core-clock cycles for HCLK to stabilize */
    cvmx_wait(64);
    /* 3. Program the power-on reset field in the USBN clock-control register:
        USBN_CLK_CTL[POR] = 0 */
    usbn_clk_ctl.s.por = 0;
    cvmx_write_csr(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
    /* 4. Wait 1 ms for PHY clock to start */
    cvmx_wait_usec(1000);
    /* 5. Program the Reset input from automatic test equipment field in the
        USBP control and status register: USBN_USBP_CTL_STATUS[ATE_RESET] = 1 */
    usbn_usbp_ctl_status.u64 = cvmx_read_csr(CVMX_USBNX_USBP_CTL_STATUS(usb->index));
    usbn_usbp_ctl_status.s.ate_reset = 1;
    cvmx_write_csr(CVMX_USBNX_USBP_CTL_STATUS(usb->index), usbn_usbp_ctl_status.u64);
    /* 6. Wait 10 cycles */
    cvmx_wait(10);
    /* 7. Clear ATE_RESET field in the USBN clock-control register:
        USBN_USBP_CTL_STATUS[ATE_RESET] = 0 */
    usbn_usbp_ctl_status.s.ate_reset = 0;
    cvmx_write_csr(CVMX_USBNX_USBP_CTL_STATUS(usb->index), usbn_usbp_ctl_status.u64);
    /* 8. Program the PHY reset field in the USBN clock-control register:
        USBN_CLK_CTL[PRST] = 1 */
    usbn_clk_ctl.s.prst = 1;
    cvmx_write_csr(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
    /* 9. Program the USBP control and status register to select host or
        device mode. USBN_USBP_CTL_STATUS[HST_MODE] = 0 for host, = 1 for
        device */
    usbn_usbp_ctl_status.s.hst_mode = 1;
    usbn_usbp_ctl_status.s.dm_pulld = 0;
    usbn_usbp_ctl_status.s.dp_pulld = 0;
    cvmx_write_csr(CVMX_USBNX_USBP_CTL_STATUS(usb->index), usbn_usbp_ctl_status.u64);
    /* 10. Wait 1 µs */
    cvmx_wait_usec(1);
    /* 11. Program the hreset_n field in the USBN clock-control register:
        USBN_CLK_CTL[HRST] = 1 */
    usbn_clk_ctl.s.hrst = 1;
    cvmx_write_csr(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
    /* 12. Proceed to USB core initialization */
    usbn_clk_ctl.s.enable = 1;
    cvmx_write_csr(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
    cvmx_wait_usec(1);

    /* Program the following fields in the global AHB configuration
        register (USBC_GAHBCFG)
        DMA mode, USBC_GAHBCFG[DMAEn]: 1 = DMA mode, 0 = slave mode
        Burst length, USBC_GAHBCFG[HBSTLEN] = 0
        Nonperiodic TxFIFO empty level (slave mode only),
        USBC_GAHBCFG[NPTXFEMPLVL]
        Periodic TxFIFO empty level (slave mode only),
        USBC_GAHBCFG[PTXFEMPLVL]
        Global interrupt mask, USBC_GAHBCFG[GLBLINTRMSK] = 1 */
    {
        cvmx_usbcx_gahbcfg_t usbcx_gahbcfg;
        usbcx_gahbcfg.u32 = 0;
        usbcx_gahbcfg.s.dmaen = 1;
        usbcx_gahbcfg.s.hbstlen = 0;
        usbcx_gahbcfg.s.nptxfemplvl = 1;
        usbcx_gahbcfg.s.ptxfemplvl = 1;
        usbcx_gahbcfg.s.glblintrmsk = 1;
        __cvmx_usbd_write_csr32(usb, CVMX_USBCX_GAHBCFG(usb->index), usbcx_gahbcfg.u32);
    }

    /* Program the following fields in USBC_GUSBCFG register.
        HS/FS timeout calibration, USBC_GUSBCFG[TOUTCAL] = 0
        ULPI DDR select, USBC_GUSBCFG[DDRSEL] = 0
        USB turnaround time, USBC_GUSBCFG[USBTRDTIM] = 0x5
        PHY low-power clock select, USBC_GUSBCFG[PHYLPWRCLKSEL] = 0 */
    {
        cvmx_usbcx_gusbcfg_t usbcx_gusbcfg;
        usbcx_gusbcfg.u32 = __cvmx_usbd_read_csr32(usb, CVMX_USBCX_GUSBCFG(usb->index));
        usbcx_gusbcfg.s.toutcal = 0;
        usbcx_gusbcfg.s.ddrsel = 0;
        usbcx_gusbcfg.s.usbtrdtim = 0x5;
        usbcx_gusbcfg.s.phylpwrclksel = 0;
        __cvmx_usbd_write_csr32(usb, CVMX_USBCX_GUSBCFG(usb->index), usbcx_gusbcfg.u32);
    }

    /* Program the following fields in the USBC0/1_DCFG register:
        Device speed, USBC0/1_DCFG[DEVSPD] = 0 (high speed)
        Non-zero-length status OUT handshake, USBC0/1_DCFG[NZSTSOUTHSHK]=0
        Periodic frame interval (if periodic endpoints are supported),
        USBC0/1_DCFG[PERFRINT] = 1 */
    {
        cvmx_usbcx_dcfg_t usbcx_dcfg;
        usbcx_dcfg.u32 = __cvmx_usbd_read_csr32(usb, CVMX_USBCX_DCFG(usb->index));
        usbcx_dcfg.s.devspd = 0;
        usbcx_dcfg.s.nzstsouthshk = 0;
        usbcx_dcfg.s.perfrint = 1;
        __cvmx_usbd_write_csr32(usb, CVMX_USBCX_DCFG(usb->index), usbcx_dcfg.u32);
    }

    /* Program the USBC0/1_GINTMSK register */
    {
        cvmx_usbcx_gintmsk_t usbcx_gintmsk;
        usbcx_gintmsk.u32 = __cvmx_usbd_read_csr32(usb, CVMX_USBCX_GINTMSK(usb->index));
        usbcx_gintmsk.s.oepintmsk = 1;
        usbcx_gintmsk.s.inepintmsk = 1;
        usbcx_gintmsk.s.enumdonemsk = 1;
        usbcx_gintmsk.s.usbrstmsk = 1;
        usbcx_gintmsk.s.usbsuspmsk = 1;
        __cvmx_usbd_write_csr32(usb, CVMX_USBCX_GINTMSK(usb->index), usbcx_gintmsk.u32);
    }

    cvmx_usbd_disable(usb);
    return 0;
}
示例#5
0
文件: cvmx-ilk.c 项目: 2asoft/freebsd
//#define CVMX_ILK_STATS_ENA 1
int cvmx_ilk_enable (int interface)
{
    int res = -1;
    int retry_count = 0;
    cvmx_helper_link_info_t result;
    cvmx_ilk_txx_cfg1_t ilk_txx_cfg1;
    cvmx_ilk_rxx_cfg1_t ilk_rxx_cfg1;
#ifdef CVMX_ILK_STATS_ENA
    cvmx_ilk_rxx_cfg0_t ilk_rxx_cfg0;
    cvmx_ilk_txx_cfg0_t ilk_txx_cfg0;
#endif

    if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
        return res;

    if (interface >= CVMX_NUM_ILK_INTF)
        return res;

    result.u64 = 0;
    
#ifdef CVMX_ILK_STATS_ENA
    cvmx_dprintf ("\n");
    cvmx_dprintf ("<<<< ILK%d: Before enabling ilk\n", interface);
    cvmx_ilk_reg_dump_rx (interface);
    cvmx_ilk_reg_dump_tx (interface);
#endif

    /* RX packet will be enabled only if link is up */

    /* TX side */
    ilk_txx_cfg1.u64 = cvmx_read_csr (CVMX_ILK_TXX_CFG1(interface));
    ilk_txx_cfg1.s.pkt_ena = 1;
    ilk_txx_cfg1.s.rx_link_fc_ign = 1; /* cannot use link fc workaround */
    cvmx_write_csr (CVMX_ILK_TXX_CFG1(interface), ilk_txx_cfg1.u64); 
    cvmx_read_csr (CVMX_ILK_TXX_CFG1(interface));

#ifdef CVMX_ILK_STATS_ENA
    /* RX side stats */
    ilk_rxx_cfg0.u64 = cvmx_read_csr (CVMX_ILK_RXX_CFG0(interface));
    ilk_rxx_cfg0.s.lnk_stats_ena = 1;
    cvmx_write_csr (CVMX_ILK_RXX_CFG0(interface), ilk_rxx_cfg0.u64); 

    /* TX side stats */
    ilk_txx_cfg0.u64 = cvmx_read_csr (CVMX_ILK_TXX_CFG0(interface));
    ilk_txx_cfg0.s.lnk_stats_ena = 1;
    cvmx_write_csr (CVMX_ILK_TXX_CFG0(interface), ilk_txx_cfg0.u64); 
#endif

retry:
    retry_count++;
    if (retry_count > 10)
       goto out;

    /* Make sure the link is up, so that packets can be sent. */
    result = __cvmx_helper_ilk_link_get(cvmx_helper_get_ipd_port(interface + CVMX_ILK_GBL_BASE, 0));

    /* Small delay before another retry. */
    cvmx_wait_usec(100);

    ilk_rxx_cfg1.u64 = cvmx_read_csr(CVMX_ILK_RXX_CFG1(interface));
    if (ilk_rxx_cfg1.s.pkt_ena == 0)
       goto retry; 

out:
        
#ifdef CVMX_ILK_STATS_ENA
    cvmx_dprintf (">>>> ILK%d: After ILK is enabled\n", interface);
    cvmx_ilk_reg_dump_rx (interface);
    cvmx_ilk_reg_dump_tx (interface);
#endif

    if (result.s.link_up)
        return 0;

    return -1;
}
示例#6
0
文件: cvmx-ilk.c 项目: 2asoft/freebsd
/**
 * Initialize and start the ILK interface.
 *
 * @param interface The identifier of the packet interface to configure and
 *                  use as a ILK interface. cn68xx has 2 interfaces: ilk0 and
 *                  ilk1.
 *
 * @param lane_mask the lane group for this interface
 *
 * @return Zero on success, negative of failure.
 */
int cvmx_ilk_start_interface (int interface, unsigned char lane_mask)
{
    int res = -1;
    int other_intf, this_qlm, other_qlm;
    unsigned char uni_mask;
    cvmx_mio_qlmx_cfg_t mio_qlmx_cfg, other_mio_qlmx_cfg;
    cvmx_ilk_txx_cfg0_t ilk_txx_cfg0;
    cvmx_ilk_rxx_cfg0_t ilk_rxx_cfg0;
    cvmx_ilk_ser_cfg_t ilk_ser_cfg;

    if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
        return res;

    if (interface >= CVMX_NUM_ILK_INTF)
        return res;

    if (lane_mask == 0)
        return res;

    /* check conflicts between 2 ilk interfaces. 1 lane can be assigned to 1
     * interface only */
    other_intf = !interface;
    this_qlm = interface + CVMX_ILK_QLM_BASE;
    other_qlm = other_intf + CVMX_ILK_QLM_BASE;
    if (cvmx_ilk_intf_cfg[other_intf].lane_en_mask & lane_mask)
    {
        cvmx_dprintf ("ILK%d: %s: lane assignment conflict\n", interface,
                      __FUNCTION__);
        return res;
    }

    /* check the legality of the lane mask. interface 0 can have 8 lanes,
     * while interface 1 can have 4 lanes at most */
    uni_mask = lane_mask >> (interface * 4);
    if ((uni_mask != 0x1 && uni_mask != 0x3 && uni_mask != 0xf &&
         uni_mask != 0xff) || (interface == 1 && lane_mask > 0xf0))
    {
#if CVMX_ENABLE_DEBUG_PRINTS
        cvmx_dprintf ("ILK%d: %s: incorrect lane mask: 0x%x \n", interface,
                      __FUNCTION__, uni_mask);
#endif
        return res;
    }

    /* check the availability of qlms. qlm_cfg = 001 means the chip is fused
     * to give this qlm to ilk */
    mio_qlmx_cfg.u64 = cvmx_read_csr (CVMX_MIO_QLMX_CFG(this_qlm));
    other_mio_qlmx_cfg.u64 = cvmx_read_csr (CVMX_MIO_QLMX_CFG(other_qlm));
    if (mio_qlmx_cfg.s.qlm_cfg != 1 ||
        (uni_mask == 0xff && other_mio_qlmx_cfg.s.qlm_cfg != 1))
    {
#if CVMX_ENABLE_DEBUG_PRINTS
        cvmx_dprintf ("ILK%d: %s: qlm unavailable\n", interface, __FUNCTION__);
#endif
        return res;
    }

    /* power up the serdes */
    ilk_ser_cfg.u64 = cvmx_read_csr (CVMX_ILK_SER_CFG);
    if (ilk_ser_cfg.s.ser_pwrup == 0)
    {
        ilk_ser_cfg.s.ser_rxpol_auto = 1;
        ilk_ser_cfg.s.ser_rxpol = 0;
        ilk_ser_cfg.s.ser_txpol = 0;
        ilk_ser_cfg.s.ser_reset_n = 0xff;
        ilk_ser_cfg.s.ser_haul = 0;
    }
    ilk_ser_cfg.s.ser_pwrup |= ((interface ==0) && (lane_mask > 0xf)) ?
                               0x3 : (1 << interface);
    cvmx_write_csr (CVMX_ILK_SER_CFG, ilk_ser_cfg.u64);

    /* configure the lane enable of the interface */
    ilk_txx_cfg0.u64 = cvmx_read_csr (CVMX_ILK_TXX_CFG0(interface));
    ilk_rxx_cfg0.u64 = cvmx_read_csr (CVMX_ILK_RXX_CFG0(interface));
    ilk_txx_cfg0.s.lane_ena = ilk_rxx_cfg0.s.lane_ena = lane_mask;
    cvmx_write_csr (CVMX_ILK_TXX_CFG0(interface), ilk_txx_cfg0.u64);
    cvmx_write_csr (CVMX_ILK_RXX_CFG0(interface), ilk_rxx_cfg0.u64);

    /* write to local cache. for lane speed, if interface 0 has 8 lanes,
     * assume both qlms have the same speed */
    cvmx_ilk_intf_cfg[interface].intf_en = 1;
    cvmx_ilk_intf_cfg[interface].lane_en_mask = lane_mask;
    res = 0;

    return res;
}
示例#7
0
int64_t cvmx_bootmem_phy_alloc(uint64_t req_size, uint64_t address_min,
			       uint64_t address_max, uint64_t alignment,
			       uint32_t flags)
{

	uint64_t head_addr;
	uint64_t ent_addr;
	/* points to previous list entry, NULL current entry is head of list */
	uint64_t prev_addr = 0;
	uint64_t new_ent_addr = 0;
	uint64_t desired_min_addr;

#ifdef DEBUG
	cvmx_dprintf("cvmx_bootmem_phy_alloc: req_size: 0x%llx, "
		     "min_addr: 0x%llx, max_addr: 0x%llx, align: 0x%llx\n",
		     (unsigned long long)req_size,
		     (unsigned long long)address_min,
		     (unsigned long long)address_max,
		     (unsigned long long)alignment);
#endif

	if (cvmx_bootmem_desc->major_version > 3) {
		cvmx_dprintf("ERROR: Incompatible bootmem descriptor "
			     "version: %d.%d at addr: %p\n",
			     (int)cvmx_bootmem_desc->major_version,
			     (int)cvmx_bootmem_desc->minor_version,
			     cvmx_bootmem_desc);
		goto error_out;
	}

	/*
	 * Do a variety of checks to validate the arguments.  The
	 * allocator code will later assume that these checks have
	 * been made.  We validate that the requested constraints are
	 * not self-contradictory before we look through the list of
	 * available memory.
	 */

	/* 0 is not a valid req_size for this allocator */
	if (!req_size)
		goto error_out;

	/* Round req_size up to mult of minimum alignment bytes */
	req_size = (req_size + (CVMX_BOOTMEM_ALIGNMENT_SIZE - 1)) &
		~(CVMX_BOOTMEM_ALIGNMENT_SIZE - 1);

	/*
	 * Convert !0 address_min and 0 address_max to special case of
	 * range that specifies an exact memory block to allocate.  Do
	 * this before other checks and adjustments so that this
	 * tranformation will be validated.
	 */
	if (address_min && !address_max)
		address_max = address_min + req_size;
	else if (!address_min && !address_max)
		address_max = ~0ull;  /* If no limits given, use max limits */


	/*
	 * Enforce minimum alignment (this also keeps the minimum free block
	 * req_size the same as the alignment req_size.
	 */
	if (alignment < CVMX_BOOTMEM_ALIGNMENT_SIZE)
		alignment = CVMX_BOOTMEM_ALIGNMENT_SIZE;

	/*
	 * Adjust address minimum based on requested alignment (round
	 * up to meet alignment).  Do this here so we can reject
	 * impossible requests up front. (NOP for address_min == 0)
	 */
	if (alignment)
		address_min = ALIGN(address_min, alignment);

	/*
	 * Reject inconsistent args.  We have adjusted these, so this
	 * may fail due to our internal changes even if this check
	 * would pass for the values the user supplied.
	 */
	if (req_size > address_max - address_min)
		goto error_out;

	/* Walk through the list entries - first fit found is returned */

	if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
		cvmx_bootmem_lock();
	head_addr = cvmx_bootmem_desc->head_addr;
	ent_addr = head_addr;
	for (; ent_addr;
	     prev_addr = ent_addr,
	     ent_addr = cvmx_bootmem_phy_get_next(ent_addr)) {
		uint64_t usable_base, usable_max;
		uint64_t ent_size = cvmx_bootmem_phy_get_size(ent_addr);

		if (cvmx_bootmem_phy_get_next(ent_addr)
		    && ent_addr > cvmx_bootmem_phy_get_next(ent_addr)) {
			cvmx_dprintf("Internal bootmem_alloc() error: ent: "
				"0x%llx, next: 0x%llx\n",
				(unsigned long long)ent_addr,
				(unsigned long long)
				cvmx_bootmem_phy_get_next(ent_addr));
			goto error_out;
		}

		/*
		 * Determine if this is an entry that can satisify the
		 * request Check to make sure entry is large enough to
		 * satisfy request.
		 */
		usable_base =
		    ALIGN(max(address_min, ent_addr), alignment);
		usable_max = min(address_max, ent_addr + ent_size);
		/*
		 * We should be able to allocate block at address
		 * usable_base.
		 */

		desired_min_addr = usable_base;
		/*
		 * Determine if request can be satisfied from the
		 * current entry.
		 */
		if (!((ent_addr + ent_size) > usable_base
				&& ent_addr < address_max
				&& req_size <= usable_max - usable_base))
			continue;
		/*
		 * We have found an entry that has room to satisfy the
		 * request, so allocate it from this entry.  If end
		 * CVMX_BOOTMEM_FLAG_END_ALLOC set, then allocate from
		 * the end of this block rather than the beginning.
		 */
		if (flags & CVMX_BOOTMEM_FLAG_END_ALLOC) {
			desired_min_addr = usable_max - req_size;
			/*
			 * Align desired address down to required
			 * alignment.
			 */
			desired_min_addr &= ~(alignment - 1);
		}

		/* Match at start of entry */
		if (desired_min_addr == ent_addr) {
			if (req_size < ent_size) {
				/*
				 * big enough to create a new block
				 * from top portion of block.
				 */
				new_ent_addr = ent_addr + req_size;
				cvmx_bootmem_phy_set_next(new_ent_addr,
					cvmx_bootmem_phy_get_next(ent_addr));
				cvmx_bootmem_phy_set_size(new_ent_addr,
							ent_size -
							req_size);

				/*
				 * Adjust next pointer as following
				 * code uses this.
				 */
				cvmx_bootmem_phy_set_next(ent_addr,
							new_ent_addr);
			}

			/*
			 * adjust prev ptr or head to remove this
			 * entry from list.
			 */
			if (prev_addr)
				cvmx_bootmem_phy_set_next(prev_addr,
					cvmx_bootmem_phy_get_next(ent_addr));
			else
				/*
				 * head of list being returned, so
				 * update head ptr.
				 */
				cvmx_bootmem_desc->head_addr =
					cvmx_bootmem_phy_get_next(ent_addr);

			if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
				cvmx_bootmem_unlock();
			return desired_min_addr;
		}
		/*
		 * block returned doesn't start at beginning of entry,
		 * so we know that we will be splitting a block off
		 * the front of this one.  Create a new block from the
		 * beginning, add to list, and go to top of loop
		 * again.
		 *
		 * create new block from high portion of
		 * block, so that top block starts at desired
		 * addr.
		 */
		new_ent_addr = desired_min_addr;
		cvmx_bootmem_phy_set_next(new_ent_addr,
					cvmx_bootmem_phy_get_next
					(ent_addr));
		cvmx_bootmem_phy_set_size(new_ent_addr,
					cvmx_bootmem_phy_get_size
					(ent_addr) -
					(desired_min_addr -
						ent_addr));
		cvmx_bootmem_phy_set_size(ent_addr,
					desired_min_addr - ent_addr);
		cvmx_bootmem_phy_set_next(ent_addr, new_ent_addr);
		/* Loop again to handle actual alloc from new block */
	}
error_out:
	/* We didn't find anything, so return error */
	if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
		cvmx_bootmem_unlock();
	return -1;
}
示例#8
0
文件: cvmx-qlm.c 项目: xtra72/s805
/**
 * Errata G-16094: QLM Gen2 Equalizer Default Setting Change.
 * CN68XX pass 1.x and CN66XX pass 1.x QLM tweak. This function tweaks the
 * JTAG setting for a QLMs to run better at 5 and 6.25Ghz.
 */
void __cvmx_qlm_speed_tweak(void)
{
	cvmx_mio_qlmx_cfg_t qlm_cfg;
	int num_qlms = cvmx_qlm_get_num();
	int qlm;

	/* Workaround for Errata (G-16467) */
	if (OCTEON_IS_MODEL(OCTEON_CN68XX_PASS2_X)) {
		for (qlm = 0; qlm < num_qlms; qlm++) {
			int ir50dac;
			/* This workaround only applies to QLMs running at 6.25Ghz */
			if (cvmx_qlm_get_gbaud_mhz(qlm) == 6250) {
#ifdef CVMX_QLM_DUMP_STATE
				cvmx_dprintf("%s:%d: QLM%d: Applying workaround for Errata G-16467\n", __func__, __LINE__, qlm);
				cvmx_qlm_display_registers(qlm);
				cvmx_dprintf("\n");
#endif
				cvmx_qlm_jtag_set(qlm, -1, "cfg_cdr_trunc", 0);
				/* Hold the QLM in reset */
				cvmx_qlm_jtag_set(qlm, -1, "cfg_rst_n_set", 0);
				cvmx_qlm_jtag_set(qlm, -1, "cfg_rst_n_clr", 1);
				/* Forcfe TX to be idle */
				cvmx_qlm_jtag_set(qlm, -1, "cfg_tx_idle_clr", 0);
				cvmx_qlm_jtag_set(qlm, -1, "cfg_tx_idle_set", 1);
				if (OCTEON_IS_MODEL(OCTEON_CN68XX_PASS2_0)) {
					ir50dac = cvmx_qlm_jtag_get(qlm, 0, "ir50dac");
					while (++ir50dac <= 31)
						cvmx_qlm_jtag_set(qlm, -1, "ir50dac", ir50dac);
				}
				cvmx_qlm_jtag_set(qlm, -1, "div4_byp", 0);
				cvmx_qlm_jtag_set(qlm, -1, "clkf_byp", 16);
				cvmx_qlm_jtag_set(qlm, -1, "serdes_pll_byp", 1);
				cvmx_qlm_jtag_set(qlm, -1, "spdsel_byp", 1);
#ifdef CVMX_QLM_DUMP_STATE
				cvmx_dprintf("%s:%d: QLM%d: Done applying workaround for Errata G-16467\n", __func__, __LINE__, qlm);
				cvmx_qlm_display_registers(qlm);
				cvmx_dprintf("\n\n");
#endif
				/* The QLM will be taken out of reset later when ILK/XAUI are initialized. */
			}
		}

#ifndef CVMX_BUILD_FOR_LINUX_HOST
		/* These QLM tuning parameters are specific to EBB6800
		   eval boards using Cavium QLM cables. These should be
		   removed or tunned based on customer boards. */
		if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_EBB6800) {
			for (qlm = 0; qlm < num_qlms; qlm++) {
#ifdef CVMX_QLM_DUMP_STATE
				cvmx_dprintf("Setting tunning parameters for QLM%d\n", qlm);
#endif
				cvmx_qlm_jtag_set(qlm, -1, "biasdrv_hs_ls_byp", 12);
				cvmx_qlm_jtag_set(qlm, -1, "biasdrv_hf_byp", 12);
				cvmx_qlm_jtag_set(qlm, -1, "biasdrv_lf_ls_byp", 12);
				cvmx_qlm_jtag_set(qlm, -1, "biasdrv_lf_byp", 12);
				cvmx_qlm_jtag_set(qlm, -1, "tcoeff_hf_byp", 15);
				cvmx_qlm_jtag_set(qlm, -1, "tcoeff_hf_ls_byp", 15);
				cvmx_qlm_jtag_set(qlm, -1, "tcoeff_lf_ls_byp", 15);
				cvmx_qlm_jtag_set(qlm, -1, "tcoeff_lf_byp", 15);
				cvmx_qlm_jtag_set(qlm, -1, "rx_cap_gen2", 0);
				cvmx_qlm_jtag_set(qlm, -1, "rx_eq_gen2", 11);
				cvmx_qlm_jtag_set(qlm, -1, "serdes_tx_byp", 1);
			}
		}
		else if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_NIC68_4) {
			for (qlm = 0; qlm < num_qlms; qlm++) {
#ifdef CVMX_QLM_DUMP_STATE
				cvmx_dprintf("Setting tunning parameters for QLM%d\n", qlm);
#endif
				cvmx_qlm_jtag_set(qlm, -1, "biasdrv_hs_ls_byp", 30);
				cvmx_qlm_jtag_set(qlm, -1, "biasdrv_hf_byp", 30);
				cvmx_qlm_jtag_set(qlm, -1, "biasdrv_lf_ls_byp", 30);
				cvmx_qlm_jtag_set(qlm, -1, "biasdrv_lf_byp", 30);
				cvmx_qlm_jtag_set(qlm, -1, "tcoeff_hf_byp", 0);
				cvmx_qlm_jtag_set(qlm, -1, "tcoeff_hf_ls_byp", 0);
				cvmx_qlm_jtag_set(qlm, -1, "tcoeff_lf_ls_byp", 0);
				cvmx_qlm_jtag_set(qlm, -1, "tcoeff_lf_byp", 0);
				cvmx_qlm_jtag_set(qlm, -1, "rx_cap_gen2", 1);
				cvmx_qlm_jtag_set(qlm, -1, "rx_eq_gen2", 8);
				cvmx_qlm_jtag_set(qlm, -1, "serdes_tx_byp", 1);
			}
		}
#endif
	}

	/* G-16094 QLM Gen2 Equalizer Default Setting Change */
	else if (OCTEON_IS_MODEL(OCTEON_CN68XX_PASS1_X)
		 || OCTEON_IS_MODEL(OCTEON_CN66XX_PASS1_X)) {
		/* Loop through the QLMs */
		for (qlm = 0; qlm < num_qlms; qlm++) {
			/* Read the QLM speed */
			qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(qlm));

			/* If the QLM is at 6.25Ghz or 5Ghz then program JTAG */
			if ((qlm_cfg.s.qlm_spd == 5) || (qlm_cfg.s.qlm_spd == 12) || (qlm_cfg.s.qlm_spd == 0) || (qlm_cfg.s.qlm_spd == 6) || (qlm_cfg.s.qlm_spd == 11)) {
				cvmx_qlm_jtag_set(qlm, -1, "rx_cap_gen2", 0x1);
				cvmx_qlm_jtag_set(qlm, -1, "rx_eq_gen2", 0x8);
			}
		}
	}
}
示例#9
0
/**
 * Callback to perform SPI4 reset
 *
 * @interface: The identifier of the packet interface to configure and
 *		    use as a SPI interface.
 * @mode:      The operating mode for the SPI interface. The interface
 *		    can operate as a full duplex (both Tx and Rx data paths
 *		    active) or as a halfplex (either the Tx data path is
 *		    active or the Rx data path is active, but not both).
 *
 * Returns Zero on success, non-zero error code on failure (will cause
 * SPI initialization to abort)
 */
int cvmx_spi_reset_cb(int interface, cvmx_spi_mode_t mode)
{
	union cvmx_spxx_dbg_deskew_ctl spxx_dbg_deskew_ctl;
	union cvmx_spxx_clk_ctl spxx_clk_ctl;
	union cvmx_spxx_bist_stat spxx_bist_stat;
	union cvmx_spxx_int_msk spxx_int_msk;
	union cvmx_stxx_int_msk stxx_int_msk;
	union cvmx_spxx_trn4_ctl spxx_trn4_ctl;
	int index;
	uint64_t MS = cvmx_sysinfo_get()->cpu_clock_hz / 1000;

	/* Disable SPI error events while we run BIST */
	spxx_int_msk.u64 = cvmx_read_csr(CVMX_SPXX_INT_MSK(interface));
	cvmx_write_csr(CVMX_SPXX_INT_MSK(interface), 0);
	stxx_int_msk.u64 = cvmx_read_csr(CVMX_STXX_INT_MSK(interface));
	cvmx_write_csr(CVMX_STXX_INT_MSK(interface), 0);

	/* Run BIST in the SPI interface */
	cvmx_write_csr(CVMX_SRXX_COM_CTL(interface), 0);
	cvmx_write_csr(CVMX_STXX_COM_CTL(interface), 0);
	spxx_clk_ctl.u64 = 0;
	spxx_clk_ctl.s.runbist = 1;
	cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64);
	cvmx_wait(10 * MS);
	spxx_bist_stat.u64 = cvmx_read_csr(CVMX_SPXX_BIST_STAT(interface));
	if (spxx_bist_stat.s.stat0)
		cvmx_dprintf
		    ("ERROR SPI%d: BIST failed on receive datapath FIFO\n",
		     interface);
	if (spxx_bist_stat.s.stat1)
		cvmx_dprintf("ERROR SPI%d: BIST failed on RX calendar table\n",
			     interface);
	if (spxx_bist_stat.s.stat2)
		cvmx_dprintf("ERROR SPI%d: BIST failed on TX calendar table\n",
			     interface);

	/* Clear the calendar table after BIST to fix parity errors */
	for (index = 0; index < 32; index++) {
		union cvmx_srxx_spi4_calx srxx_spi4_calx;
		union cvmx_stxx_spi4_calx stxx_spi4_calx;

		srxx_spi4_calx.u64 = 0;
		srxx_spi4_calx.s.oddpar = 1;
		cvmx_write_csr(CVMX_SRXX_SPI4_CALX(index, interface),
			       srxx_spi4_calx.u64);

		stxx_spi4_calx.u64 = 0;
		stxx_spi4_calx.s.oddpar = 1;
		cvmx_write_csr(CVMX_STXX_SPI4_CALX(index, interface),
			       stxx_spi4_calx.u64);
	}

	/* Re enable reporting of error interrupts */
	cvmx_write_csr(CVMX_SPXX_INT_REG(interface),
		       cvmx_read_csr(CVMX_SPXX_INT_REG(interface)));
	cvmx_write_csr(CVMX_SPXX_INT_MSK(interface), spxx_int_msk.u64);
	cvmx_write_csr(CVMX_STXX_INT_REG(interface),
		       cvmx_read_csr(CVMX_STXX_INT_REG(interface)));
	cvmx_write_csr(CVMX_STXX_INT_MSK(interface), stxx_int_msk.u64);

	/* Setup the CLKDLY right in the middle */
	spxx_clk_ctl.u64 = 0;
	spxx_clk_ctl.s.seetrn = 0;
	spxx_clk_ctl.s.clkdly = 0x10;
	spxx_clk_ctl.s.runbist = 0;
	spxx_clk_ctl.s.statdrv = 0;
	/* This should always be on the opposite edge as statdrv */
	spxx_clk_ctl.s.statrcv = 1;
	spxx_clk_ctl.s.sndtrn = 0;
	spxx_clk_ctl.s.drptrn = 0;
	spxx_clk_ctl.s.rcvtrn = 0;
	spxx_clk_ctl.s.srxdlck = 0;
	cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64);
	cvmx_wait(100 * MS);

	/* Reset SRX0 DLL */
	spxx_clk_ctl.s.srxdlck = 1;
	cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64);

	/* Waiting for Inf0 Spi4 RX DLL to lock */
	cvmx_wait(100 * MS);

	/* Enable dynamic alignment */
	spxx_trn4_ctl.s.trntest = 0;
	spxx_trn4_ctl.s.jitter = 1;
	spxx_trn4_ctl.s.clr_boot = 1;
	spxx_trn4_ctl.s.set_boot = 0;
	if (OCTEON_IS_MODEL(OCTEON_CN58XX))
		spxx_trn4_ctl.s.maxdist = 3;
	else
		spxx_trn4_ctl.s.maxdist = 8;
	spxx_trn4_ctl.s.macro_en = 1;
	spxx_trn4_ctl.s.mux_en = 1;
	cvmx_write_csr(CVMX_SPXX_TRN4_CTL(interface), spxx_trn4_ctl.u64);

	spxx_dbg_deskew_ctl.u64 = 0;
	cvmx_write_csr(CVMX_SPXX_DBG_DESKEW_CTL(interface),
		       spxx_dbg_deskew_ctl.u64);

	return 0;
}
示例#10
0
/**
 * Callback to perform clock detection
 *
 * @interface: The identifier of the packet interface to configure and
 *		    use as a SPI interface.
 * @mode:      The operating mode for the SPI interface. The interface
 *		    can operate as a full duplex (both Tx and Rx data paths
 *		    active) or as a halfplex (either the Tx data path is
 *		    active or the Rx data path is active, but not both).
 * @timeout:   Timeout to wait for clock synchronization in seconds
 *
 * Returns Zero on success, non-zero error code on failure (will cause
 * SPI initialization to abort)
 */
int cvmx_spi_clock_detect_cb(int interface, cvmx_spi_mode_t mode, int timeout)
{
	int clock_transitions;
	union cvmx_spxx_clk_stat stat;
	uint64_t timeout_time;
	uint64_t MS = cvmx_sysinfo_get()->cpu_clock_hz / 1000;

	/*
	 * Regardless of operating mode, both Tx and Rx clocks must be
	 * present for the SPI interface to operate.
	 */
	cvmx_dprintf("SPI%d: Waiting to see TsClk...\n", interface);
	timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
	/*
	 * Require 100 clock transitions in order to avoid any noise
	 * in the beginning.
	 */
	clock_transitions = 100;
	do {
		stat.u64 = cvmx_read_csr(CVMX_SPXX_CLK_STAT(interface));
		if (stat.s.s4clk0 && stat.s.s4clk1 && clock_transitions) {
			/*
			 * We've seen a clock transition, so decrement
			 * the number we still need.
			 */
			clock_transitions--;
			cvmx_write_csr(CVMX_SPXX_CLK_STAT(interface), stat.u64);
			stat.s.s4clk0 = 0;
			stat.s.s4clk1 = 0;
		}
		if (cvmx_get_cycle() > timeout_time) {
			cvmx_dprintf("SPI%d: Timeout\n", interface);
			return -1;
		}
	} while (stat.s.s4clk0 == 0 || stat.s.s4clk1 == 0);

	cvmx_dprintf("SPI%d: Waiting to see RsClk...\n", interface);
	timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
	/*
	 * Require 100 clock transitions in order to avoid any noise in the
	 * beginning.
	 */
	clock_transitions = 100;
	do {
		stat.u64 = cvmx_read_csr(CVMX_SPXX_CLK_STAT(interface));
		if (stat.s.d4clk0 && stat.s.d4clk1 && clock_transitions) {
			/*
			 * We've seen a clock transition, so decrement
			 * the number we still need
			 */
			clock_transitions--;
			cvmx_write_csr(CVMX_SPXX_CLK_STAT(interface), stat.u64);
			stat.s.d4clk0 = 0;
			stat.s.d4clk1 = 0;
		}
		if (cvmx_get_cycle() > timeout_time) {
			cvmx_dprintf("SPI%d: Timeout\n", interface);
			return -1;
		}
	} while (stat.s.d4clk0 == 0 || stat.s.d4clk1 == 0);

	return 0;
}
示例#11
0
void cvmx_l2c_config_perf(uint32_t counter, enum cvmx_l2c_event event,
			  uint32_t clear_on_read)
{
	if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
		union cvmx_l2c_pfctl pfctl;

		pfctl.u64 = cvmx_read_csr(CVMX_L2C_PFCTL);

		switch (counter) {
		case 0:
			pfctl.s.cnt0sel = event;
			pfctl.s.cnt0ena = 1;
			pfctl.s.cnt0rdclr = clear_on_read;
			break;
		case 1:
			pfctl.s.cnt1sel = event;
			pfctl.s.cnt1ena = 1;
			pfctl.s.cnt1rdclr = clear_on_read;
			break;
		case 2:
			pfctl.s.cnt2sel = event;
			pfctl.s.cnt2ena = 1;
			pfctl.s.cnt2rdclr = clear_on_read;
			break;
		case 3:
		default:
			pfctl.s.cnt3sel = event;
			pfctl.s.cnt3ena = 1;
			pfctl.s.cnt3rdclr = clear_on_read;
			break;
		}

		cvmx_write_csr(CVMX_L2C_PFCTL, pfctl.u64);
	} else {
		union cvmx_l2c_tadx_prf l2c_tadx_prf;
		int tad;

		cvmx_dprintf("L2C performance counter events are different for this chip, mapping 'event' to cvmx_l2c_tad_event_t\n");
		if (clear_on_read)
			cvmx_dprintf("L2C counters don't support clear on read for this chip\n");

		l2c_tadx_prf.u64 = cvmx_read_csr(CVMX_L2C_TADX_PRF(0));

		switch (counter) {
		case 0:
			l2c_tadx_prf.s.cnt0sel = event;
			break;
		case 1:
			l2c_tadx_prf.s.cnt1sel = event;
			break;
		case 2:
			l2c_tadx_prf.s.cnt2sel = event;
			break;
		default:
		case 3:
			l2c_tadx_prf.s.cnt3sel = event;
			break;
		}
		for (tad = 0; tad < CVMX_L2C_TADS; tad++)
			cvmx_write_csr(CVMX_L2C_TADX_PRF(tad),
				       l2c_tadx_prf.u64);
	}
}
示例#12
0
文件: cvmx-pow.c 项目: 2asoft/freebsd
void __cvmx_pow_display_v2(void *buffer, int buffer_size)
{
    __cvmx_pow_dump_t *dump = (__cvmx_pow_dump_t*)buffer;
    int num_pow_entries = cvmx_pow_get_num_entries();
    int num_cores;
    int core;
    int index;
    uint8_t entry_list[2048];

    if (buffer_size < (int)sizeof(__cvmx_pow_dump_t))
    {
        cvmx_dprintf("cvmx_pow_dump: Buffer too small, pow_dump_t = 0x%x, buffer_size = 0x%x\n", (int)sizeof(__cvmx_pow_dump_t), buffer_size);
        return;
    }

    memset(entry_list, 0, sizeof(entry_list));
    num_cores = cvmx_octeon_num_cores();

    /* Print the free list info */
    {
        int valid[3], has_one[3], head[3], tail[3], qnum_head, qnum_tail;
        int idx;

        valid[0] = dump->sindexload[0][4].sindexload1_cn68xx.queue_val;
        valid[1] = dump->sindexload[0][5].sindexload1_cn68xx.queue_val;
        valid[2] = dump->sindexload[0][6].sindexload1_cn68xx.queue_val;
        has_one[0] = dump->sindexload[0][4].sindexload1_cn68xx.queue_one;
        has_one[1] = dump->sindexload[0][5].sindexload1_cn68xx.queue_one;
        has_one[2] = dump->sindexload[0][6].sindexload1_cn68xx.queue_one;
        head[0] = dump->sindexload[0][4].sindexload1_cn68xx.queue_head;
        head[1] = dump->sindexload[0][5].sindexload1_cn68xx.queue_head;
        head[2] = dump->sindexload[0][6].sindexload1_cn68xx.queue_head;
        tail[0] = dump->sindexload[0][4].sindexload1_cn68xx.queue_tail;
        tail[1] = dump->sindexload[0][5].sindexload1_cn68xx.queue_tail;
        tail[2] = dump->sindexload[0][6].sindexload1_cn68xx.queue_tail;
        qnum_head = dump->sindexload[0][4].sindexload1_cn68xx.qnum_head;
        qnum_tail = dump->sindexload[0][4].sindexload1_cn68xx.qnum_tail;

        printf("Free List: qnum_head=%d, qnum_tail=%d\n", qnum_head, qnum_tail);
        printf("Free0: valid=%d, one=%d, head=%llu, tail=%llu\n", valid[0], has_one[0], CAST64(head[0]), CAST64(tail[0]));
        printf("Free1: valid=%d, one=%d, head=%llu, tail=%llu\n", valid[1], has_one[1], CAST64(head[1]), CAST64(tail[1]));
        printf("Free2: valid=%d, one=%d, head=%llu, tail=%llu\n", valid[2], has_one[2], CAST64(head[2]), CAST64(tail[2]));
        
        idx=qnum_head;
        while (valid[0] || valid[1] || valid[2])
        {
            int qidx = idx % 3;

            if (head[qidx] == tail[qidx])
                valid[qidx] = 0;

            if (__cvmx_pow_entry_mark_list(head[qidx], CVMX_POW_LIST_FREE, entry_list))   
                break;
            head[qidx] = dump->smemload[head[qidx]][4].s_smemload3_cn68xx.fwd_index;
            //printf("qidx = %d, idx = %d, head[qidx] = %d\n", qidx, idx, head[qidx]);
            idx++;
        }
    }
            
    /* Print the core state */
    for (core = 0; core < num_cores; core++)
    {
        int pendtag = 1;
        int pendwqp = 2;
        int tag = 3;
        int wqp = 4;
        int links = 5;

        printf("Core %d State: tag=%s,0x%08x", core, 
               OCT_TAG_TYPE_STRING(dump->sstatus[core][tag].s_sstatus2_cn68xx.tag_type),
               dump->sstatus[core][tag].s_sstatus2_cn68xx.tag);
        if (dump->sstatus[core][tag].s_sstatus2_cn68xx.tag_type != CVMX_POW_TAG_TYPE_NULL_NULL)
        {
            __cvmx_pow_entry_mark_list(dump->sstatus[core][tag].s_sstatus2_cn68xx.index, CVMX_POW_LIST_CORE + core, entry_list);
            printf(" grp=%d",                   dump->sstatus[core][tag].s_sstatus2_cn68xx.grp);
            printf(" wqp=0x%016llx",            CAST64(dump->sstatus[core][wqp].s_sstatus3_cn68xx.wqp));
            printf(" index=%d",                 dump->sstatus[core][tag].s_sstatus2_cn68xx.index);
            if (dump->sstatus[core][links].s_sstatus4_cn68xx.head)
                printf(" head");
            else
                printf(" prev=%d", dump->sstatus[core][links].s_sstatus4_cn68xx.revlink_index);
            if (dump->sstatus[core][links].s_sstatus4_cn68xx.tail)
                printf(" tail");
            else
                printf(" next=%d", dump->sstatus[core][links].s_sstatus4_cn68xx.link_index);
        }
        if (dump->sstatus[core][pendtag].s_sstatus0_cn68xx.pend_switch)
        {
            printf(" pend_switch=%d",           dump->sstatus[core][pendtag].s_sstatus0_cn68xx.pend_switch);
        }
                                                                                
        if (dump->sstatus[core][pendtag].s_sstatus0_cn68xx.pend_desched)
        {
            printf(" pend_desched=%d",          dump->sstatus[core][pendtag].s_sstatus0_cn68xx.pend_desched);
            printf(" pend_nosched=%d",          dump->sstatus[core][pendtag].s_sstatus0_cn68xx.pend_nosched);
        }
        if (dump->sstatus[core][pendtag].s_sstatus0_cn68xx.pend_get_work)
        {
            if (dump->sstatus[core][pendtag].s_sstatus0_cn68xx.pend_get_work_wait)
                printf(" (Waiting for work)");
            else
                printf(" (Getting work)");
        }
        if (dump->sstatus[core][pendtag].s_sstatus0_cn68xx.pend_alloc_we)
            printf(" pend_alloc_we=%d",          dump->sstatus[core][pendtag].s_sstatus0_cn68xx.pend_alloc_we);
        if (dump->sstatus[core][pendtag].s_sstatus0_cn68xx.pend_nosched_clr)
        {
            printf(" pend_nosched_clr=%d",      dump->sstatus[core][pendtag].s_sstatus0_cn68xx.pend_nosched_clr);
            printf(" pend_index=%d",            dump->sstatus[core][pendtag].s_sstatus0_cn68xx.pend_index);
        }
        if (dump->sstatus[core][pendtag].s_sstatus0_cn68xx.pend_switch)
        {
            printf(" pending tag=%s,0x%08x",
                   OCT_TAG_TYPE_STRING(dump->sstatus[core][pendtag].s_sstatus0_cn68xx.pend_type),
                   dump->sstatus[core][pendtag].s_sstatus0_cn68xx.pend_tag);
        }
        if (dump->sstatus[core][pendwqp].s_sstatus1_cn68xx.pend_nosched_clr)
            printf(" pend_wqp=0x%016llx\n",     CAST64(dump->sstatus[core][pendwqp].s_sstatus1_cn68xx.pend_wqp));
        printf("\n");
    }

    /* Print out the state of the nosched list and the 16 deschedule lists. */
    __cvmx_pow_display_list_and_walk(CVMX_POW_LIST_NOSCHED, dump, entry_list,
                            dump->sindexload[0][3].sindexload0_cn68xx.queue_val,
                            dump->sindexload[0][3].sindexload0_cn68xx.queue_one,
                            dump->sindexload[0][3].sindexload0_cn68xx.queue_head,
                            dump->sindexload[0][3].sindexload0_cn68xx.queue_tail);
    for (index=0; index<64; index++)
    {
        __cvmx_pow_display_list_and_walk(CVMX_POW_LIST_DESCHED + index, dump, entry_list,
                                dump->sindexload[index][2].sindexload0_cn68xx.queue_val,
                                dump->sindexload[index][2].sindexload0_cn68xx.queue_one,
                                dump->sindexload[index][2].sindexload0_cn68xx.queue_head,
                                dump->sindexload[index][2].sindexload0_cn68xx.queue_tail);
    }

    /* Print out the state of the 8 internal input queues */
    for (index=0; index<8; index++)
    {
        __cvmx_pow_display_list_and_walk(CVMX_POW_LIST_INPUT + index, dump, entry_list,
                                dump->sindexload[index][1].sindexload0_cn68xx.queue_val,
                                dump->sindexload[index][1].sindexload0_cn68xx.queue_one,
                                dump->sindexload[index][1].sindexload0_cn68xx.queue_head,
                                dump->sindexload[index][1].sindexload0_cn68xx.queue_tail);
    }

    /* Print out the state of the 16 memory queues */
    for (index=0; index<8; index++)
    {
        const char *name;
        if (dump->sindexload[index][1].sindexload0_cn68xx.queue_head)
            name = "Queue %da Memory (is head)";
        else
            name = "Queue %da Memory";
        __cvmx_pow_display_list(name, index,
                                dump->sindexload[index][1].sindexload0_cn68xx.queue_val,
                                dump->sindexload[index][1].sindexload0_cn68xx.queue_one,
                                dump->sindexload[index][1].sindexload0_cn68xx.queue_head,
                                dump->sindexload[index][1].sindexload0_cn68xx.queue_tail);
        if (dump->sindexload[index+8][1].sindexload0_cn68xx.queue_head)
            name = "Queue %db Memory (is head)";
        else
            name = "Queue %db Memory";
        __cvmx_pow_display_list(name, index,
                                dump->sindexload[index+8][1].sindexload0_cn68xx.queue_val,
                                dump->sindexload[index+8][1].sindexload0_cn68xx.queue_one,
                                dump->sindexload[index+8][1].sindexload0_cn68xx.queue_head,
                                dump->sindexload[index+8][1].sindexload0_cn68xx.queue_tail);
    }

    /* Print out each of the internal POW entries. Each entry has a tag, group,
       wqe, and possibly a next pointer. The next pointer is only valid if this
       entry isn't make as a tail */
    for (index=0; index<num_pow_entries; index++)
    {
        printf("Entry %d(%-10s): tag=%s,0x%08x grp=%d wqp=0x%016llx", index,
               __cvmx_pow_list_names[entry_list[index]],
               OCT_TAG_TYPE_STRING(dump->smemload[index][1].s_smemload0_cn68xx.tag_type),
               dump->smemload[index][1].s_smemload0_cn68xx.tag,
               dump->smemload[index][2].s_smemload1_cn68xx.grp,
               CAST64(dump->smemload[index][2].s_smemload1_cn68xx.wqp));
        if (dump->smemload[index][1].s_smemload0_cn68xx.tail)
            printf(" tail");
        else
            printf(" next=%d", dump->smemload[index][4].s_smemload3_cn68xx.fwd_index);
        if (entry_list[index] >= CVMX_POW_LIST_DESCHED)
        {
            printf(" prev=%d", dump->smemload[index][4].s_smemload3_cn68xx.fwd_index);
            printf(" nosched=%d", dump->smemload[index][1].s_smemload1_cn68xx.nosched);
            if (dump->smemload[index][3].s_smemload2_cn68xx.pend_switch)
            {
                printf(" pending tag=%s,0x%08x",
                       OCT_TAG_TYPE_STRING(dump->smemload[index][3].s_smemload2_cn68xx.pend_type),
                       dump->smemload[index][3].s_smemload2_cn68xx.pend_tag);
            }
        }
        printf("\n");
    }
}
示例#13
0
文件: cvmx-pow.c 项目: 2asoft/freebsd
void __cvmx_pow_display_v1(void *buffer, int buffer_size)
{
    __cvmx_pow_dump_t *dump = (__cvmx_pow_dump_t*)buffer;
    int num_pow_entries = cvmx_pow_get_num_entries();
    int num_cores;
    int core;
    int index;
    uint8_t entry_list[2048];

    if (buffer_size < (int)sizeof(__cvmx_pow_dump_t))
    {
        cvmx_dprintf("cvmx_pow_dump: Buffer too small\n");
        return;
    }

    memset(entry_list, 0, sizeof(entry_list));
    num_cores = cvmx_octeon_num_cores();

    /* Print the free list info */
    __cvmx_pow_display_list_and_walk(CVMX_POW_LIST_FREE, dump, entry_list,
                                     dump->sindexload[0][0].sindexload0.free_val,
                                     dump->sindexload[0][0].sindexload0.free_one,
                                     dump->sindexload[0][0].sindexload0.free_head,
                                     dump->sindexload[0][0].sindexload0.free_tail);

    /* Print the core state */
    for (core=0; core<num_cores; core++)
    {
        const int bit_rev = 1;
        const int bit_cur = 2;
        const int bit_wqp = 4;
        printf("Core %d State:  tag=%s,0x%08x", core,
               OCT_TAG_TYPE_STRING(dump->sstatus[core][bit_cur].s_sstatus2.tag_type),
               dump->sstatus[core][bit_cur].s_sstatus2.tag);
        if (dump->sstatus[core][bit_cur].s_sstatus2.tag_type != CVMX_POW_TAG_TYPE_NULL_NULL)
        {
            __cvmx_pow_entry_mark_list(dump->sstatus[core][bit_cur].s_sstatus2.index, CVMX_POW_LIST_CORE + core, entry_list);
            printf(" grp=%d",                   dump->sstatus[core][bit_cur].s_sstatus2.grp);
            printf(" wqp=0x%016llx",            CAST64(dump->sstatus[core][bit_cur|bit_wqp].s_sstatus4.wqp));
            printf(" index=%d",                 dump->sstatus[core][bit_cur].s_sstatus2.index);
            if (dump->sstatus[core][bit_cur].s_sstatus2.head)
                printf(" head");
            else
                printf(" prev=%d", dump->sstatus[core][bit_cur|bit_rev].s_sstatus3.revlink_index);
            if (dump->sstatus[core][bit_cur].s_sstatus2.tail)
                printf(" tail");
            else
                printf(" next=%d", dump->sstatus[core][bit_cur].s_sstatus2.link_index);
        }

        if (dump->sstatus[core][0].s_sstatus0.pend_switch)
        {
            printf(" pend_switch=%d",           dump->sstatus[core][0].s_sstatus0.pend_switch);
            printf(" pend_switch_full=%d",      dump->sstatus[core][0].s_sstatus0.pend_switch_full);
            printf(" pend_switch_null=%d",      dump->sstatus[core][0].s_sstatus0.pend_switch_null);
        }

        if (dump->sstatus[core][0].s_sstatus0.pend_desched)
        {
            printf(" pend_desched=%d",          dump->sstatus[core][0].s_sstatus0.pend_desched);
            printf(" pend_desched_switch=%d",   dump->sstatus[core][0].s_sstatus0.pend_desched_switch);
            printf(" pend_nosched=%d",          dump->sstatus[core][0].s_sstatus0.pend_nosched);
            if (dump->sstatus[core][0].s_sstatus0.pend_desched_switch)
                printf(" pend_grp=%d",              dump->sstatus[core][0].s_sstatus0.pend_grp);
        }

        if (dump->sstatus[core][0].s_sstatus0.pend_new_work)
        {
            if (dump->sstatus[core][0].s_sstatus0.pend_new_work_wait)
                printf(" (Waiting for work)");
            else
                printf(" (Getting work)");
        }
        if (dump->sstatus[core][0].s_sstatus0.pend_null_rd)
            printf(" pend_null_rd=%d",          dump->sstatus[core][0].s_sstatus0.pend_null_rd);
        if (dump->sstatus[core][0].s_sstatus0.pend_nosched_clr)
        {
            printf(" pend_nosched_clr=%d",      dump->sstatus[core][0].s_sstatus0.pend_nosched_clr);
            printf(" pend_index=%d",            dump->sstatus[core][0].s_sstatus0.pend_index);
        }
        if (dump->sstatus[core][0].s_sstatus0.pend_switch ||
            (dump->sstatus[core][0].s_sstatus0.pend_desched &&
            dump->sstatus[core][0].s_sstatus0.pend_desched_switch))
        {
            printf(" pending tag=%s,0x%08x",
                   OCT_TAG_TYPE_STRING(dump->sstatus[core][0].s_sstatus0.pend_type),
                   dump->sstatus[core][0].s_sstatus0.pend_tag);
        }
        if (dump->sstatus[core][0].s_sstatus0.pend_nosched_clr)
            printf(" pend_wqp=0x%016llx\n",     CAST64(dump->sstatus[core][bit_wqp].s_sstatus1.pend_wqp));
        printf("\n");
    }

    /* Print out the state of the nosched list and the 16 deschedule lists. */
    __cvmx_pow_display_list_and_walk(CVMX_POW_LIST_NOSCHED, dump, entry_list,
                            dump->sindexload[0][2].sindexload1.nosched_val,
                            dump->sindexload[0][2].sindexload1.nosched_one,
                            dump->sindexload[0][2].sindexload1.nosched_head,
                            dump->sindexload[0][2].sindexload1.nosched_tail);
    for (index=0; index<16; index++)
    {
        __cvmx_pow_display_list_and_walk(CVMX_POW_LIST_DESCHED + index, dump, entry_list,
                                dump->sindexload[index][2].sindexload1.des_val,
                                dump->sindexload[index][2].sindexload1.des_one,
                                dump->sindexload[index][2].sindexload1.des_head,
                                dump->sindexload[index][2].sindexload1.des_tail);
    }

    /* Print out the state of the 8 internal input queues */
    for (index=0; index<8; index++)
    {
        __cvmx_pow_display_list_and_walk(CVMX_POW_LIST_INPUT + index, dump, entry_list,
                                dump->sindexload[index][0].sindexload0.loc_val,
                                dump->sindexload[index][0].sindexload0.loc_one,
                                dump->sindexload[index][0].sindexload0.loc_head,
                                dump->sindexload[index][0].sindexload0.loc_tail);
    }

    /* Print out the state of the 16 memory queues */
    for (index=0; index<8; index++)
    {
        const char *name;
        if (dump->sindexload[index][1].sindexload2.rmt_is_head)
            name = "Queue %da Memory (is head)";
        else
            name = "Queue %da Memory";
        __cvmx_pow_display_list(name, index,
                                dump->sindexload[index][1].sindexload2.rmt_val,
                                dump->sindexload[index][1].sindexload2.rmt_one,
                                dump->sindexload[index][1].sindexload2.rmt_head,
                                dump->sindexload[index][3].sindexload3.rmt_tail);
        if (dump->sindexload[index+8][1].sindexload2.rmt_is_head)
            name = "Queue %db Memory (is head)";
        else
            name = "Queue %db Memory";
        __cvmx_pow_display_list(name, index,
                                dump->sindexload[index+8][1].sindexload2.rmt_val,
                                dump->sindexload[index+8][1].sindexload2.rmt_one,
                                dump->sindexload[index+8][1].sindexload2.rmt_head,
                                dump->sindexload[index+8][3].sindexload3.rmt_tail);
    }

    /* Print out each of the internal POW entries. Each entry has a tag, group,
        wqe, and possibly a next pointer. The next pointer is only valid if this
        entry isn't make as a tail */
    for (index=0; index<num_pow_entries; index++)
    {
        printf("Entry %d(%-10s): tag=%s,0x%08x grp=%d wqp=0x%016llx", index,
               __cvmx_pow_list_names[entry_list[index]],
               OCT_TAG_TYPE_STRING(dump->smemload[index][0].s_smemload0.tag_type),
               dump->smemload[index][0].s_smemload0.tag,
               dump->smemload[index][0].s_smemload0.grp,
               CAST64(dump->smemload[index][2].s_smemload1.wqp));
        if (dump->smemload[index][0].s_smemload0.tail)
            printf(" tail");
        else
            printf(" next=%d", dump->smemload[index][0].s_smemload0.next_index);
        if (entry_list[index] >= CVMX_POW_LIST_DESCHED)
        {
            printf(" nosched=%d", dump->smemload[index][1].s_smemload2.nosched);
            if (dump->smemload[index][1].s_smemload2.pend_switch)
            {
                printf(" pending tag=%s,0x%08x",
                       OCT_TAG_TYPE_STRING(dump->smemload[index][1].s_smemload2.pend_type),
                       dump->smemload[index][1].s_smemload2.pend_tag);
            }
        }
        printf("\n");
    }
}
/**
 * Initialize and start the SPI interface.
 *
 * @param interface The identifier of the packet interface to configure and
 *                  use as a SPI interface.
 * @param mode      The operating mode for the SPI interface. The interface
 *                  can operate as a full duplex (both Tx and Rx data paths
 *                  active) or as a halfplex (either the Tx data path is
 *                  active or the Rx data path is active, but not both).
 * @param timeout   Timeout to wait for clock synchronization in seconds
 * @return Zero on success, negative of failure.
 */
int cvmx_spi_start_interface(int interface, cvmx_spi_mode_t mode, int timeout)
{
    uint64_t                     timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
    cvmx_spxx_trn4_ctl_t         spxx_trn4_ctl;
    cvmx_spxx_clk_stat_t         stat;
    cvmx_stxx_com_ctl_t          stxx_com_ctl;
    cvmx_srxx_com_ctl_t          srxx_com_ctl;
    cvmx_stxx_spi4_dat_t         stxx_spi4_dat;
    uint64_t                     count;
    cvmx_pko_reg_gmx_port_mode_t pko_mode;


    if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
        return -1;

    cvmx_dprintf ("SPI%d: mode %s, cal_len: %d, cal_rep: %d\n",
            interface, modes[mode], CAL_LEN, CAL_REP);

      // Configure for 16 ports (PKO -> GMX FIFO partition setting)
      // ----------------------------------------------------------
    pko_mode.u64 = cvmx_read_csr(CVMX_PKO_REG_GMX_PORT_MODE);
    if (interface == 0) 
    {
        pko_mode.s.mode0 = 0;
    }
    else 
    {
        pko_mode.s.mode1 = 0;
    }
    cvmx_write_csr(CVMX_PKO_REG_GMX_PORT_MODE, pko_mode.u64);

      // Configure GMX
      // -------------------------------------------------
    cvmx_write_csr (CVMX_GMXX_TX_PRTS(interface), 0xA);
      // PRTS           [ 4: 0] ( 5b) = 10


      // Bringing up Spi4 Interface
      // -------------------------------------------------

      // Reset the Spi4 deskew logic
      // -------------------------------------------------
    cvmx_write_csr (CVMX_SPXX_DBG_DESKEW_CTL(interface), 0x00200000);
      // DLLDIS         [ 0: 0] ( 1b) = 0
      // DLLFRC         [ 1: 1] ( 1b) = 0
      // OFFDLY         [ 7: 2] ( 6b) = 0
      // BITSEL         [12: 8] ( 5b) = 0
      // OFFSET         [17:13] ( 5b) = 0
      // MUX            [18:18] ( 1b) = 0
      // INC            [19:19] ( 1b) = 0
      // DEC            [20:20] ( 1b) = 0
      // CLRDLY         [21:21] ( 1b) = 1    // Forces a reset
    cvmx_wait (100 * MS);

      // Setup the CLKDLY right in the middle
      // -------------------------------------------------
    cvmx_write_csr (CVMX_SPXX_CLK_CTL(interface), 0x00000830);
      // SRXDLCK        [ 0: 0] ( 1b) = 0
      // RCVTRN         [ 1: 1] ( 1b) = 0
      // DRPTRN         [ 2: 2] ( 1b) = 0
      // SNDTRN         [ 3: 3] ( 1b) = 0
      // STATRCV        [ 4: 4] ( 1b) = 1    // Enable status channel Rx
      // STATDRV        [ 5: 5] ( 1b) = 1    // Enable status channel Tx
      // RUNBIST        [ 6: 6] ( 1b) = 0
      // CLKDLY         [11: 7] ( 5b) = 10   // 16 is the middle of the range
      // SRXLCK         [12:12] ( 1b) = 0
      // STXLCK         [13:13] ( 1b) = 0
      // SEETRN         [14:14] ( 1b) = 0
    cvmx_wait (100 * MS);

      // Reset SRX0 DLL
      // -------------------------------------------------
    cvmx_write_csr (CVMX_SPXX_CLK_CTL(interface), 0x00000831);
      // SRXDLCK        [ 0: 0] ( 1b) = 1    // Restart the DLL
      // RCVTRN         [ 1: 1] ( 1b) = 0
      // DRPTRN         [ 2: 2] ( 1b) = 0
      // SNDTRN         [ 3: 3] ( 1b) = 0
      // STATRCV        [ 4: 4] ( 1b) = 1
      // STATDRV        [ 5: 5] ( 1b) = 1
      // RUNBIST        [ 6: 6] ( 1b) = 0
      // CLKDLY         [11: 7] ( 5b) = 10
      // SRXLCK         [12:12] ( 1b) = 0
      // STXLCK         [13:13] ( 1b) = 0
      // SEETRN         [14:14] ( 1b) = 0

      // Waiting for Inf0 Spi4 RX DLL to lock
      // -------------------------------------------------
    cvmx_wait (100 * MS);

      // Enable dynamic alignment
      // -------------------------------------------------
    spxx_trn4_ctl.u64 = 0;
    spxx_trn4_ctl.s.mux_en   = 1;
    spxx_trn4_ctl.s.macro_en = 1;
    spxx_trn4_ctl.s.maxdist  = 16;
    spxx_trn4_ctl.s.jitter   = 1;
    cvmx_write_csr (CVMX_SPXX_TRN4_CTL(interface), spxx_trn4_ctl.u64);
      // MUX_EN         [ 0: 0] ( 1b) = 1
      // MACRO_EN       [ 1: 1] ( 1b) = 1
      // MAXDIST        [ 6: 2] ( 5b) = 16
      // SET_BOOT       [ 7: 7] ( 1b) = 0
      // CLR_BOOT       [ 8: 8] ( 1b) = 1
      // JITTER         [11: 9] ( 3b) = 1
      // TRNTEST        [12:12] ( 1b) = 0
    cvmx_write_csr (CVMX_SPXX_DBG_DESKEW_CTL(interface), 0x0);
      // DLLDIS         [ 0: 0] ( 1b) = 0
      // DLLFRC         [ 1: 1] ( 1b) = 0
      // OFFDLY         [ 7: 2] ( 6b) = 0
      // BITSEL         [12: 8] ( 5b) = 0
      // OFFSET         [17:13] ( 5b) = 0
      // MUX            [18:18] ( 1b) = 0
      // INC            [19:19] ( 1b) = 0
      // DEC            [20:20] ( 1b) = 0
      // CLRDLY         [21:21] ( 1b) = 0

    if (mode & CVMX_SPI_MODE_RX_HALFPLEX) {
          // SRX0 Ports
          // -------------------------------------------------
        cvmx_write_csr (CVMX_SRXX_COM_CTL(interface), 0x00000090);
          // INF_EN         [ 0: 0] ( 1b) = 0
          // ST_EN          [ 3: 3] ( 1b) = 0
          // PRTS           [ 9: 4] ( 6b) = 9

          // SRX0 Calendar Table
          // -------------------------------------------------
        cvmx_write_csr (CVMX_SRXX_SPI4_CALX(0, interface), 0x00013210);
          // PRT0           [ 4: 0] ( 5b) = 0
          // PRT1           [ 9: 5] ( 5b) = 1
          // PRT2           [14:10] ( 5b) = 2
          // PRT3           [19:15] ( 5b) = 3
          // ODDPAR         [20:20] ( 1b) = 1
        cvmx_write_csr (CVMX_SRXX_SPI4_CALX(1, interface), 0x00017654);
          // PRT0           [ 4: 0] ( 5b) = 4
          // PRT1           [ 9: 5] ( 5b) = 5
          // PRT2           [14:10] ( 5b) = 6
          // PRT3           [19:15] ( 5b) = 7
          // ODDPAR         [20:20] ( 1b) = 1
        cvmx_write_csr (CVMX_SRXX_SPI4_CALX(2, interface), 0x00000098);
          // PRT0           [ 4: 0] ( 5b) = 8
          // PRT1           [ 9: 5] ( 5b) = 9
          // PRT2           [14:10] ( 5b) = 0
          // PRT3           [19:15] ( 5b) = 0
          // ODDPAR         [20:20] ( 1b) = 0
        cvmx_write_csr (CVMX_SRXX_SPI4_STAT(interface), (CAL_REP << 8) | CAL_LEN);
          // LEN            [ 7: 0] ( 8b) = a
          // M              [15: 8] ( 8b) = 1
    }

    if (mode & CVMX_SPI_MODE_TX_HALFPLEX) {
          // STX0 Config
          // -------------------------------------------------
        cvmx_write_csr (CVMX_STXX_ARB_CTL(interface), 0x0);
          // IGNTPA         [ 3: 3] ( 1b) = 0
          // MINTRN         [ 5: 5] ( 1b) = 0
        cvmx_write_csr (CVMX_GMXX_TX_SPI_MAX(interface), 0x0408);
          // MAX2           [15: 8] ( 8b) = 4
          // MAX1           [ 7: 0] ( 8b) = 8
        cvmx_write_csr (CVMX_GMXX_TX_SPI_THRESH(interface), 0x4);
          // THRESH         [ 5: 0] ( 6b) = 4
        cvmx_write_csr (CVMX_GMXX_TX_SPI_CTL(interface), 0x0);
          // ENFORCE        [ 2: 2] ( 1b) = 0
          // TPA_CLR        [ 1: 1] ( 1b) = 0
          // CONT_PKT       [ 0: 0] ( 1b) = 0

          // STX0 Training Control
          // -------------------------------------------------
        stxx_spi4_dat.u64 = 0;
	stxx_spi4_dat.s.alpha = 32;    /*Minimum needed by dynamic alignment*/
	stxx_spi4_dat.s.max_t = 0xFFFF;  /*Minimum interval is 0x20*/
        cvmx_write_csr (CVMX_STXX_SPI4_DAT(interface), stxx_spi4_dat.u64);
          // MAX_T          [15: 0] (16b) = 0
          // ALPHA          [31:16] (16b) = 0

          // STX0 Calendar Table
          // -------------------------------------------------
        cvmx_write_csr (CVMX_STXX_SPI4_CALX(0, interface), 0x00013210);
          // PRT0           [ 4: 0] ( 5b) = 0
          // PRT1           [ 9: 5] ( 5b) = 1
          // PRT2           [14:10] ( 5b) = 2
          // PRT3           [19:15] ( 5b) = 3
          // ODDPAR         [20:20] ( 1b) = 1
        cvmx_write_csr (CVMX_STXX_SPI4_CALX(1, interface), 0x00017654);
          // PRT0           [ 4: 0] ( 5b) = 4
          // PRT1           [ 9: 5] ( 5b) = 5
          // PRT2           [14:10] ( 5b) = 6
          // PRT3           [19:15] ( 5b) = 7
          // ODDPAR         [20:20] ( 1b) = 1
        cvmx_write_csr (CVMX_STXX_SPI4_CALX(2, interface), 0x00000098);
          // PRT0           [ 4: 0] ( 5b) = 8
          // PRT1           [ 9: 5] ( 5b) = 9
          // PRT2           [14:10] ( 5b) = 0
          // PRT3           [19:15] ( 5b) = 0
          // ODDPAR         [20:20] ( 1b) = 0
        cvmx_write_csr (CVMX_STXX_SPI4_STAT(interface), (CAL_REP << 8) | CAL_LEN);
          // LEN            [ 7: 0] ( 8b) = a
          // M              [15: 8] ( 8b) = 1
    }

      /* Regardless of operating mode, both Tx and Rx clocks must be present
       * for the SPI interface to operate.
       */
    cvmx_dprintf ("SPI%d: Waiting to see TsClk...\n", interface);
    count = 0;
    timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
    do {  /* Do we see the TsClk transitioning? */
        stat.u64 = cvmx_read_csr (CVMX_SPXX_CLK_STAT(interface));
        count = count + 1;
#ifdef DEBUG
        if ((count % 5000000) == 10) {
	    cvmx_dprintf ("SPI%d: CLK_STAT 0x%016llX\n"
                    "  s4 (%d,%d) d4 (%d,%d)\n",
                    interface, (unsigned long long)stat.u64,
                    stat.s.s4clk0, stat.s.s4clk1,
                    stat.s.d4clk0, stat.s.d4clk1);
        }
#endif
        if (cvmx_get_cycle() > timeout_time)
        {
            cvmx_dprintf ("SPI%d: Timeout\n", interface);
            return -1;
        }
    } while (stat.s.s4clk0 == 0 || stat.s.s4clk1 == 0);

    cvmx_dprintf ("SPI%d: Waiting to see RsClk...\n", interface);
    timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
    do {  /* Do we see the RsClk transitioning? */
        stat.u64 = cvmx_read_csr (CVMX_SPXX_CLK_STAT(interface));
        if (cvmx_get_cycle() > timeout_time)
        {
            cvmx_dprintf ("SPI%d: Timeout\n", interface);
            return -1;
        }
    } while (stat.s.d4clk0 == 0 || stat.s.d4clk1 == 0);


      // SRX0 & STX0 Inf0 Links are configured - begin training
      // -------------------------------------------------
    cvmx_write_csr (CVMX_SPXX_CLK_CTL(interface), 0x0000083f);
      // SRXDLCK        [ 0: 0] ( 1b) = 1
      // RCVTRN         [ 1: 1] ( 1b) = 1
      // DRPTRN         [ 2: 2] ( 1b) = 1    ...was 0
      // SNDTRN         [ 3: 3] ( 1b) = 1
      // STATRCV        [ 4: 4] ( 1b) = 1
      // STATDRV        [ 5: 5] ( 1b) = 1
      // RUNBIST        [ 6: 6] ( 1b) = 0
      // CLKDLY         [11: 7] ( 5b) = 10
      // SRXLCK         [12:12] ( 1b) = 0
      // STXLCK         [13:13] ( 1b) = 0
      // SEETRN         [14:14] ( 1b) = 0
    cvmx_wait (1000 * MS);

      // SRX0 clear the boot bit
      // -------------------------------------------------
    spxx_trn4_ctl.s.clr_boot = 1;
    cvmx_write_csr (CVMX_SPXX_TRN4_CTL(interface), spxx_trn4_ctl.u64);

      // Wait for the training sequence to complete
      // -------------------------------------------------
      // SPX0_CLK_STAT - SPX0_CLK_STAT[SRXTRN] should be 1 (bit8)
    cvmx_dprintf ("SPI%d: Waiting for training\n", interface);
    cvmx_wait (1000 * MS);
    timeout_time = cvmx_get_cycle() + 1000ull * MS * 600;  /* Wait a really long time here */
    do {
        stat.u64 = cvmx_read_csr (CVMX_SPXX_CLK_STAT(interface));
        if (cvmx_get_cycle() > timeout_time)
        {
            cvmx_dprintf ("SPI%d: Timeout\n", interface);
            return -1;
        }
    } while (stat.s.srxtrn == 0);

    if (mode & CVMX_SPI_MODE_RX_HALFPLEX) {
          // SRX0 interface should be good, send calendar data
          // -------------------------------------------------
        cvmx_dprintf ("SPI%d: Rx is synchronized, start sending calendar data\n", interface);
        srxx_com_ctl.u64 = 0;
        srxx_com_ctl.s.prts   = 9;
        srxx_com_ctl.s.inf_en = 1;
        srxx_com_ctl.s.st_en  = 1;
        cvmx_write_csr (CVMX_SRXX_COM_CTL(interface), srxx_com_ctl.u64);
    }

    if (mode & CVMX_SPI_MODE_TX_HALFPLEX) {
          // STX0 has achieved sync
          // The corespondant board should be sending calendar data
          // Enable the STX0 STAT receiver.
          // -------------------------------------------------
        stxx_com_ctl.u64 = 0;
        stxx_com_ctl.s.inf_en = 1;
        stxx_com_ctl.s.st_en = 1;
        cvmx_write_csr (CVMX_STXX_COM_CTL(interface), stxx_com_ctl.u64);

          // Waiting for calendar sync on STX0 STAT
          // -------------------------------------------------
          // SPX0_CLK_STAT - SPX0_CLK_STAT[STXCAL] should be 1 (bit10)
        cvmx_dprintf ("SPI%d: Waiting to sync on STX[%d] STAT\n", interface, interface);
        timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
        do {
            stat.u64 = cvmx_read_csr (CVMX_SPXX_CLK_STAT (interface));
            if (cvmx_get_cycle() > timeout_time)
            {
                cvmx_dprintf ("SPI%d: Timeout\n", interface);
                return -1;
            }
        } while (stat.s.stxcal == 0);
    }

      // Inf0 is synched
      // -------------------------------------------------
      // SPX0 is up
    if (mode & CVMX_SPI_MODE_RX_HALFPLEX) {
        srxx_com_ctl.s.inf_en = 1;
        cvmx_write_csr (CVMX_SRXX_COM_CTL(interface), srxx_com_ctl.u64);
        cvmx_dprintf ("SPI%d: Rx is now up\n", interface);
    }

    if (mode & CVMX_SPI_MODE_TX_HALFPLEX) {
        stxx_com_ctl.s.inf_en = 1;
        cvmx_write_csr (CVMX_STXX_COM_CTL(interface), stxx_com_ctl.u64);
        cvmx_dprintf ("SPI%d: Tx is now up\n", interface);
    }

    cvmx_write_csr (CVMX_GMXX_RXX_FRM_MIN (0,interface), 40);
    cvmx_write_csr (CVMX_GMXX_RXX_FRM_MAX (0,interface), 64*1024 - 4);
    cvmx_write_csr (CVMX_GMXX_RXX_JABBER  (0,interface), 64*1024 - 4);

    return 0;
}
示例#15
0
int __cvmx_bootmem_phy_free(uint64_t phy_addr, uint64_t size, uint32_t flags)
{
	uint64_t cur_addr;
	uint64_t prev_addr = 0;	/* zero is invalid */
	int retval = 0;

#ifdef DEBUG
	cvmx_dprintf("__cvmx_bootmem_phy_free addr: 0x%llx, size: 0x%llx\n",
		     (unsigned long long)phy_addr, (unsigned long long)size);
#endif
	if (cvmx_bootmem_desc->major_version > 3) {
		cvmx_dprintf("ERROR: Incompatible bootmem descriptor "
			     "version: %d.%d at addr: %p\n",
			     (int)cvmx_bootmem_desc->major_version,
			     (int)cvmx_bootmem_desc->minor_version,
			     cvmx_bootmem_desc);
		return 0;
	}

	/* 0 is not a valid size for this allocator */
	if (!size)
		return 0;

	if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
		cvmx_bootmem_lock();
	cur_addr = cvmx_bootmem_desc->head_addr;
	if (cur_addr == 0 || phy_addr < cur_addr) {
		/* add at front of list - special case with changing head ptr */
		if (cur_addr && phy_addr + size > cur_addr)
			goto bootmem_free_done;	/* error, overlapping section */
		else if (phy_addr + size == cur_addr) {
			/* Add to front of existing first block */
			cvmx_bootmem_phy_set_next(phy_addr,
						  cvmx_bootmem_phy_get_next
						  (cur_addr));
			cvmx_bootmem_phy_set_size(phy_addr,
						  cvmx_bootmem_phy_get_size
						  (cur_addr) + size);
			cvmx_bootmem_desc->head_addr = phy_addr;

		} else {
			/* New block before first block.  OK if cur_addr is 0 */
			cvmx_bootmem_phy_set_next(phy_addr, cur_addr);
			cvmx_bootmem_phy_set_size(phy_addr, size);
			cvmx_bootmem_desc->head_addr = phy_addr;
		}
		retval = 1;
		goto bootmem_free_done;
	}

	/* Find place in list to add block */
	while (cur_addr && phy_addr > cur_addr) {
		prev_addr = cur_addr;
		cur_addr = cvmx_bootmem_phy_get_next(cur_addr);
	}

	if (!cur_addr) {
		/*
		 * We have reached the end of the list, add on to end,
		 * checking to see if we need to combine with last
		 * block
		 */
		if (prev_addr + cvmx_bootmem_phy_get_size(prev_addr) ==
		    phy_addr) {
			cvmx_bootmem_phy_set_size(prev_addr,
						  cvmx_bootmem_phy_get_size
						  (prev_addr) + size);
		} else {
			cvmx_bootmem_phy_set_next(prev_addr, phy_addr);
			cvmx_bootmem_phy_set_size(phy_addr, size);
			cvmx_bootmem_phy_set_next(phy_addr, 0);
		}
		retval = 1;
		goto bootmem_free_done;
	} else {
		/*
		 * insert between prev and cur nodes, checking for
		 * merge with either/both.
		 */
		if (prev_addr + cvmx_bootmem_phy_get_size(prev_addr) ==
		    phy_addr) {
			/* Merge with previous */
			cvmx_bootmem_phy_set_size(prev_addr,
						  cvmx_bootmem_phy_get_size
						  (prev_addr) + size);
			if (phy_addr + size == cur_addr) {
				/* Also merge with current */
				cvmx_bootmem_phy_set_size(prev_addr,
					cvmx_bootmem_phy_get_size(cur_addr) +
					cvmx_bootmem_phy_get_size(prev_addr));
				cvmx_bootmem_phy_set_next(prev_addr,
					cvmx_bootmem_phy_get_next(cur_addr));
			}
			retval = 1;
			goto bootmem_free_done;
		} else if (phy_addr + size == cur_addr) {
			/* Merge with current */
			cvmx_bootmem_phy_set_size(phy_addr,
						  cvmx_bootmem_phy_get_size
						  (cur_addr) + size);
			cvmx_bootmem_phy_set_next(phy_addr,
						  cvmx_bootmem_phy_get_next
						  (cur_addr));
			cvmx_bootmem_phy_set_next(prev_addr, phy_addr);
			retval = 1;
			goto bootmem_free_done;
		}

		/* It is a standalone block, add in between prev and cur */
		cvmx_bootmem_phy_set_size(phy_addr, size);
		cvmx_bootmem_phy_set_next(phy_addr, cur_addr);
		cvmx_bootmem_phy_set_next(prev_addr, phy_addr);

	}
	retval = 1;

bootmem_free_done:
	if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
		cvmx_bootmem_unlock();
	return retval;

}
示例#16
0
文件: cvmx-qlm.c 项目: xtra72/s805
/**
 * Initialize the QLM layer
 */
void cvmx_qlm_init(void)
{
	int qlm;
	int qlm_jtag_length;
	char *qlm_jtag_name = "cvmx_qlm_jtag";
	int qlm_jtag_size = CVMX_QLM_JTAG_UINT32 * 8 * sizeof(uint32_t);
	static uint64_t qlm_base = 0;
	const cvmx_bootmem_named_block_desc_t *desc;

	if (OCTEON_IS_OCTEON3())
		return;

#ifndef CVMX_BUILD_FOR_LINUX_HOST
	/* Skip actual JTAG accesses on simulator */
	if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM)
		return;
#endif

	qlm_jtag_length = cvmx_qlm_jtag_get_length();

	if (sizeof(uint32_t) * qlm_jtag_length > (int)sizeof(__cvmx_qlm_jtag_xor_ref[0]) * 8) {
		cvmx_dprintf("ERROR: cvmx_qlm_init: JTAG chain larger than XOR ref size\n");
		return;
	}

	/* No need to initialize the initial JTAG state if cvmx_qlm_jtag
	   named block is already created. */
	if ((desc = cvmx_bootmem_find_named_block(qlm_jtag_name)) != NULL) {
#ifdef CVMX_BUILD_FOR_LINUX_HOST
		char buffer[qlm_jtag_size];

		octeon_remote_read_mem(buffer, desc->base_addr, qlm_jtag_size);
		memcpy(__cvmx_qlm_jtag_xor_ref, buffer, qlm_jtag_size);
#else
		__cvmx_qlm_jtag_xor_ref = cvmx_phys_to_ptr(desc->base_addr);
#endif
		/* Initialize the internal JTAG */
		cvmx_helper_qlm_jtag_init();
		return;
	}

	/* Create named block to store the initial JTAG state. */
	qlm_base = cvmx_bootmem_phy_named_block_alloc(qlm_jtag_size, 0, 0, 128, qlm_jtag_name, CVMX_BOOTMEM_FLAG_END_ALLOC);

	if (qlm_base == -1ull) {
		cvmx_dprintf("ERROR: cvmx_qlm_init: Error in creating %s named block\n", qlm_jtag_name);
		return;
	}
#ifndef CVMX_BUILD_FOR_LINUX_HOST
	__cvmx_qlm_jtag_xor_ref = cvmx_phys_to_ptr(qlm_base);
#endif
	memset(__cvmx_qlm_jtag_xor_ref, 0, qlm_jtag_size);

	/* Initialize the internal JTAG */
	cvmx_helper_qlm_jtag_init();

	/* Read the XOR defaults for the JTAG chain */
	for (qlm = 0; qlm < cvmx_qlm_get_num(); qlm++) {
		int i;
		int num_lanes = cvmx_qlm_get_lanes(qlm);
		/* Shift all zeros in the chain to make sure all fields are at
		   reset defaults */
		cvmx_helper_qlm_jtag_shift_zeros(qlm, qlm_jtag_length * num_lanes);
		cvmx_helper_qlm_jtag_update(qlm);

		/* Capture the reset defaults */
		cvmx_helper_qlm_jtag_capture(qlm);
		/* Save the reset defaults. This will shift out too much data, but
		   the extra zeros don't hurt anything */
		for (i = 0; i < CVMX_QLM_JTAG_UINT32; i++)
			__cvmx_qlm_jtag_xor_ref[qlm][i] = cvmx_helper_qlm_jtag_shift(qlm, 32, 0);
	}

#ifdef CVMX_BUILD_FOR_LINUX_HOST
	/* Update the initial state for oct-remote utils. */
	{
		char buffer[qlm_jtag_size];

		memcpy(buffer, &__cvmx_qlm_jtag_xor_ref, qlm_jtag_size);
		octeon_remote_write_mem(qlm_base, buffer, qlm_jtag_size);
	}
#endif

	/* Apply all QLM errata workarounds. */
	__cvmx_qlm_speed_tweak();
	__cvmx_qlm_pcie_idle_dac_tweak();
}
示例#17
0
int64_t cvmx_bootmem_phy_named_block_alloc(uint64_t size, uint64_t min_addr,
					   uint64_t max_addr,
					   uint64_t alignment,
					   char *name,
					   uint32_t flags)
{
	int64_t addr_allocated;
	struct cvmx_bootmem_named_block_desc *named_block_desc_ptr;

#ifdef DEBUG
	cvmx_dprintf("cvmx_bootmem_phy_named_block_alloc: size: 0x%llx, min: "
		     "0x%llx, max: 0x%llx, align: 0x%llx, name: %s\n",
		     (unsigned long long)size,
		     (unsigned long long)min_addr,
		     (unsigned long long)max_addr,
		     (unsigned long long)alignment,
		     name);
#endif
	if (cvmx_bootmem_desc->major_version != 3) {
		cvmx_dprintf("ERROR: Incompatible bootmem descriptor version: "
			     "%d.%d at addr: %p\n",
			     (int)cvmx_bootmem_desc->major_version,
			     (int)cvmx_bootmem_desc->minor_version,
			     cvmx_bootmem_desc);
		return -1;
	}

	/*
	 * Take lock here, as name lookup/block alloc/name add need to
	 * be atomic.
	 */
	if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
		cvmx_spinlock_lock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock));

	/* Get pointer to first available named block descriptor */
	named_block_desc_ptr =
		cvmx_bootmem_phy_named_block_find(NULL,
						  flags | CVMX_BOOTMEM_FLAG_NO_LOCKING);

	/*
	 * Check to see if name already in use, return error if name
	 * not available or no more room for blocks.
	 */
	if (cvmx_bootmem_phy_named_block_find(name,
					      flags | CVMX_BOOTMEM_FLAG_NO_LOCKING) || !named_block_desc_ptr) {
		if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
			cvmx_spinlock_unlock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock));
		return -1;
	}


	/*
	 * Round size up to mult of minimum alignment bytes We need
	 * the actual size allocated to allow for blocks to be
	 * coallesced when they are freed.  The alloc routine does the
	 * same rounding up on all allocations.
	 */
	size = ALIGN(size, CVMX_BOOTMEM_ALIGNMENT_SIZE);

	addr_allocated = cvmx_bootmem_phy_alloc(size, min_addr, max_addr,
						alignment,
						flags | CVMX_BOOTMEM_FLAG_NO_LOCKING);
	if (addr_allocated >= 0) {
		named_block_desc_ptr->base_addr = addr_allocated;
		named_block_desc_ptr->size = size;
		strncpy(named_block_desc_ptr->name, name,
			cvmx_bootmem_desc->named_block_name_len);
		named_block_desc_ptr->name[cvmx_bootmem_desc->named_block_name_len - 1] = 0;
	}

	if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
		cvmx_spinlock_unlock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock));
	return addr_allocated;
}
/**
 * Return the MII PHY address associated with the given IPD
 * port. A result of -1 means there isn't a MII capable PHY
 * connected to this port. On chips supporting multiple MII
 * busses the bus number is encoded in bits <15:8>.
 *
 * This function must be modified for every new Octeon board.
 * Internally it uses switch statements based on the cvmx_sysinfo
 * data to determine board types and revisions. It replies on the
 * fact that every Octeon board receives a unique board type
 * enumeration from the bootloader.
 *
 * @ipd_port: Octeon IPD port to get the MII address for.
 *
 * Returns MII PHY address and bus number or -1.
 */
int cvmx_helper_board_get_mii_address(int ipd_port)
{
	switch (cvmx_sysinfo_get()->board_type) {
	case CVMX_BOARD_TYPE_SIM:
		/* Simulator doesn't have MII */
		return -1;
	case CVMX_BOARD_TYPE_EBT3000:
	case CVMX_BOARD_TYPE_EBT5800:
	case CVMX_BOARD_TYPE_THUNDER:
	case CVMX_BOARD_TYPE_NICPRO2:
		/* Interface 0 is SPI4, interface 1 is RGMII */
		if ((ipd_port >= 16) && (ipd_port < 20))
			return ipd_port - 16;
		else
			return -1;
	case CVMX_BOARD_TYPE_KODAMA:
	case CVMX_BOARD_TYPE_EBH3100:
	case CVMX_BOARD_TYPE_HIKARI:
	case CVMX_BOARD_TYPE_CN3010_EVB_HS5:
	case CVMX_BOARD_TYPE_CN3005_EVB_HS5:
	case CVMX_BOARD_TYPE_CN3020_EVB_HS5:
		/*
		 * Port 0 is WAN connected to a PHY, Port 1 is GMII
		 * connected to a switch
		 */
		if (ipd_port == 0)
			return 4;
		else if (ipd_port == 1)
			return 9;
		else
			return -1;
	case CVMX_BOARD_TYPE_NAC38:
		/* Board has 8 RGMII ports PHYs are 0-7 */
		if ((ipd_port >= 0) && (ipd_port < 4))
			return ipd_port;
		else if ((ipd_port >= 16) && (ipd_port < 20))
			return ipd_port - 16 + 4;
		else
			return -1;
	case CVMX_BOARD_TYPE_EBH3000:
		/* Board has dual SPI4 and no PHYs */
		return -1;
	case CVMX_BOARD_TYPE_EBH5200:
	case CVMX_BOARD_TYPE_EBH5201:
	case CVMX_BOARD_TYPE_EBT5200:
		/*
		 * Board has 4 SGMII ports. The PHYs start right after the MII
		 * ports MII0 = 0, MII1 = 1, SGMII = 2-5.
		 */
		if ((ipd_port >= 0) && (ipd_port < 4))
			return ipd_port + 2;
		else
			return -1;
	case CVMX_BOARD_TYPE_EBH5600:
	case CVMX_BOARD_TYPE_EBH5601:
	case CVMX_BOARD_TYPE_EBH5610:
		/*
		 * Board has 8 SGMII ports. 4 connect out, two connect
		 * to a switch, and 2 loop to each other
		 */
		if ((ipd_port >= 0) && (ipd_port < 4))
			return ipd_port + 1;
		else
			return -1;
	case CVMX_BOARD_TYPE_CUST_NB5:
		if (ipd_port == 2)
			return 4;
		else
			return -1;
	case CVMX_BOARD_TYPE_NIC_XLE_4G:
		/* Board has 4 SGMII ports. connected QLM3(interface 1) */
		if ((ipd_port >= 16) && (ipd_port < 20))
			return ipd_port - 16 + 1;
		else
			return -1;
	case CVMX_BOARD_TYPE_BBGW_REF:
		/*
		 * No PHYs are connected to Octeon, everything is
		 * through switch.
		 */
		return -1;

	case CVMX_BOARD_TYPE_CUST_WSX16:
		if (ipd_port >= 0 && ipd_port <= 3)
			return ipd_port;
		else if (ipd_port >= 16 && ipd_port <= 19)
			return ipd_port - 16 + 4;
		else
			return -1;
	}

	/* Some unknown board. Somebody forgot to update this function... */
	cvmx_dprintf
	    ("cvmx_helper_board_get_mii_address: Unknown board type %d\n",
	     cvmx_sysinfo_get()->board_type);
	return -1;
}
示例#19
0
/**
 * @INTERNAL
 * Return the link state of an IPD/PKO port as returned by ILK link status.
 *
 * @param ipd_port IPD/PKO port to query
 *
 * @return Link state
 */
cvmx_helper_link_info_t __cvmx_helper_ilk_link_get(int ipd_port)
{
    cvmx_helper_link_info_t result;
    int interface = cvmx_helper_get_interface_num(ipd_port);
    int retry_count = 0;
    cvmx_ilk_rxx_cfg1_t ilk_rxx_cfg1;
    cvmx_ilk_rxx_int_t ilk_rxx_int;
    int lanes = 0;

    result.u64 = 0;
    interface -= CVMX_ILK_GBL_BASE;

retry:
    retry_count++;
    if (retry_count > 10)
        goto out;

    ilk_rxx_cfg1.u64 = cvmx_read_csr (CVMX_ILK_RXX_CFG1(interface));
    ilk_rxx_int.u64 = cvmx_read_csr (CVMX_ILK_RXX_INT(interface));

    /* Clear all RX status bits */
    if (ilk_rxx_int.u64)
        cvmx_write_csr(CVMX_ILK_RXX_INT(interface), ilk_rxx_int.u64);

    if (ilk_rxx_cfg1.s.rx_bdry_lock_ena == 0)
    {
        /* We need to start looking for work boundary lock */
        ilk_rxx_cfg1.s.rx_bdry_lock_ena = cvmx_ilk_get_intf_ln_msk(interface);
        ilk_rxx_cfg1.s.rx_align_ena = 0;
        cvmx_write_csr(CVMX_ILK_RXX_CFG1(interface), ilk_rxx_cfg1.u64);
        //cvmx_dprintf("ILK%d: Looking for word boundary lock\n", interface);
        goto retry;
    }

    if (ilk_rxx_cfg1.s.rx_align_ena == 0)
    {
        if (ilk_rxx_int.s.word_sync_done)
        {
            ilk_rxx_cfg1.s.rx_align_ena = 1;
            cvmx_write_csr(CVMX_ILK_RXX_CFG1(interface), ilk_rxx_cfg1.u64);
            //printf("ILK%d: Looking for lane alignment\n", interface);
            goto retry;
        }
        goto out;
    }

    if (ilk_rxx_int.s.lane_align_fail)
    {
        ilk_rxx_cfg1.s.rx_bdry_lock_ena = 0;
        ilk_rxx_cfg1.s.rx_align_ena = 0;
        cvmx_write_csr(CVMX_ILK_RXX_CFG1(interface), ilk_rxx_cfg1.u64);
        cvmx_dprintf("ILK%d: Lane alignment failed\n", interface);
        goto out;
    }

    if (ilk_rxx_int.s.lane_align_done)
    {
        //cvmx_dprintf("ILK%d: Lane alignment complete\n", interface);
    }

    lanes = cvmx_pop(ilk_rxx_cfg1.s.rx_bdry_lock_ena);

    result.s.link_up = 1;
    result.s.full_duplex = 1;
    result.s.speed = cvmx_qlm_get_gbaud_mhz(1+interface) * 64 / 67;
    result.s.speed *= lanes;

out:
    /* If the link is down we will force disable the RX path. If it up, we'll
        set it to match the TX state set by the if_enable call */
    if (result.s.link_up)
    {
        cvmx_ilk_txx_cfg1_t ilk_txx_cfg1;
        ilk_txx_cfg1.u64 = cvmx_read_csr(CVMX_ILK_TXX_CFG1(interface));
        ilk_rxx_cfg1.s.pkt_ena = ilk_txx_cfg1.s.pkt_ena;
        cvmx_write_csr(CVMX_ILK_RXX_CFG1(interface), ilk_rxx_cfg1.u64);
        //cvmx_dprintf("ILK%d: link up, %d Mbps, Full duplex mode, %d lanes\n", interface, result.s.speed, lanes);  
    }
    else
    {
        ilk_rxx_cfg1.s.pkt_ena = 0;
        cvmx_write_csr(CVMX_ILK_RXX_CFG1(interface), ilk_rxx_cfg1.u64);
        //cvmx_dprintf("ILK link down\n");
    }
    return result;
}
示例#20
0
文件: cvmx-ilk.c 项目: 2asoft/freebsd
/**
 * Show channel statistics
 *
 * @param interface The identifier of the packet interface to disable. cn68xx
 *                  has 2 interfaces: ilk0 and ilk1.
 * @param pstats A pointer to cvmx_ilk_stats_ctrl_t that specifies which
 *               logical channels to access
 *
 * @return nothing
 */
void cvmx_ilk_show_stats (int interface, cvmx_ilk_stats_ctrl_t *pstats)
{
    unsigned int i;
    cvmx_ilk_rxx_idx_stat0_t ilk_rxx_idx_stat0;
    cvmx_ilk_rxx_idx_stat1_t ilk_rxx_idx_stat1;
    cvmx_ilk_rxx_mem_stat0_t ilk_rxx_mem_stat0;
    cvmx_ilk_rxx_mem_stat1_t ilk_rxx_mem_stat1;

    cvmx_ilk_txx_idx_stat0_t ilk_txx_idx_stat0;
    cvmx_ilk_txx_idx_stat1_t ilk_txx_idx_stat1;
    cvmx_ilk_txx_mem_stat0_t ilk_txx_mem_stat0;
    cvmx_ilk_txx_mem_stat1_t ilk_txx_mem_stat1;

    if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
        return;

    if (interface >= CVMX_NUM_ILK_INTF)
        return;

    if (pstats == NULL)
        return;

    /* discrete channels */
    if (pstats->chan_list != NULL)
    {
        for (i = 0; i < pstats->num_chans; i++)
        {

            /* get the number of rx packets */
            ilk_rxx_idx_stat0.u64 = 0;
            ilk_rxx_idx_stat0.s.index = *pstats->chan_list;
            ilk_rxx_idx_stat0.s.clr = pstats->clr_on_rd;
            cvmx_write_csr (CVMX_ILK_RXX_IDX_STAT0(interface),
                            ilk_rxx_idx_stat0.u64);
            ilk_rxx_mem_stat0.u64 = cvmx_read_csr
                                    (CVMX_ILK_RXX_MEM_STAT0(interface));

            /* get the number of rx bytes */
            ilk_rxx_idx_stat1.u64 = 0;
            ilk_rxx_idx_stat1.s.index = *pstats->chan_list;
            ilk_rxx_idx_stat1.s.clr = pstats->clr_on_rd;
            cvmx_write_csr (CVMX_ILK_RXX_IDX_STAT1(interface),
                            ilk_rxx_idx_stat1.u64);
            ilk_rxx_mem_stat1.u64 = cvmx_read_csr
                                    (CVMX_ILK_RXX_MEM_STAT1(interface));

            cvmx_dprintf ("ILK%d Channel%d Rx: %d packets %d bytes\n", interface,
                    *pstats->chan_list, ilk_rxx_mem_stat0.s.rx_pkt,
                    (unsigned int) ilk_rxx_mem_stat1.s.rx_bytes);

            /* get the number of tx packets */
            ilk_txx_idx_stat0.u64 = 0;
            ilk_txx_idx_stat0.s.index = *pstats->chan_list;
            ilk_txx_idx_stat0.s.clr = pstats->clr_on_rd;
            cvmx_write_csr (CVMX_ILK_TXX_IDX_STAT0(interface),
                            ilk_txx_idx_stat0.u64);
            ilk_txx_mem_stat0.u64 = cvmx_read_csr
                                    (CVMX_ILK_TXX_MEM_STAT0(interface));

            /* get the number of tx bytes */
            ilk_txx_idx_stat1.u64 = 0;
            ilk_txx_idx_stat1.s.index = *pstats->chan_list;
            ilk_txx_idx_stat1.s.clr = pstats->clr_on_rd;
            cvmx_write_csr (CVMX_ILK_TXX_IDX_STAT1(interface),
                            ilk_txx_idx_stat1.u64);
            ilk_txx_mem_stat1.u64 = cvmx_read_csr
                                    (CVMX_ILK_TXX_MEM_STAT1(interface));

            cvmx_dprintf ("ILK%d Channel%d Tx: %d packets %d bytes\n", interface,
                    *pstats->chan_list, ilk_txx_mem_stat0.s.tx_pkt,
                    (unsigned int) ilk_txx_mem_stat1.s.tx_bytes);

            pstats++;
        }
        return;
    }

    /* continuous channels */
    ilk_rxx_idx_stat0.u64 = 0;
    ilk_rxx_idx_stat0.s.index = pstats->chan_start;
    ilk_rxx_idx_stat0.s.inc = pstats->chan_step;
    ilk_rxx_idx_stat0.s.clr = pstats->clr_on_rd;
    cvmx_write_csr (CVMX_ILK_RXX_IDX_STAT0(interface), ilk_rxx_idx_stat0.u64);

    ilk_rxx_idx_stat1.u64 = 0;
    ilk_rxx_idx_stat1.s.index = pstats->chan_start;
    ilk_rxx_idx_stat1.s.inc = pstats->chan_step;
    ilk_rxx_idx_stat1.s.clr = pstats->clr_on_rd;
    cvmx_write_csr (CVMX_ILK_RXX_IDX_STAT1(interface), ilk_rxx_idx_stat1.u64);

    ilk_txx_idx_stat0.u64 = 0;
    ilk_txx_idx_stat0.s.index = pstats->chan_start;
    ilk_txx_idx_stat0.s.inc = pstats->chan_step;
    ilk_txx_idx_stat0.s.clr = pstats->clr_on_rd;
    cvmx_write_csr (CVMX_ILK_TXX_IDX_STAT0(interface), ilk_txx_idx_stat0.u64);

    ilk_txx_idx_stat1.u64 = 0;
    ilk_txx_idx_stat1.s.index = pstats->chan_start;
    ilk_txx_idx_stat1.s.inc = pstats->chan_step;
    ilk_txx_idx_stat1.s.clr = pstats->clr_on_rd;
    cvmx_write_csr (CVMX_ILK_TXX_IDX_STAT1(interface), ilk_txx_idx_stat1.u64);

    for (i = pstats->chan_start; i <= pstats->chan_end; i += pstats->chan_step)
    {
        ilk_rxx_mem_stat0.u64 = cvmx_read_csr
                                (CVMX_ILK_RXX_MEM_STAT0(interface));
        ilk_rxx_mem_stat1.u64 = cvmx_read_csr
                                (CVMX_ILK_RXX_MEM_STAT1(interface));
        cvmx_dprintf ("ILK%d Channel%d Rx: %d packets %d bytes\n", interface, i,
                ilk_rxx_mem_stat0.s.rx_pkt,
                (unsigned int) ilk_rxx_mem_stat1.s.rx_bytes);

        ilk_txx_mem_stat0.u64 = cvmx_read_csr
                                (CVMX_ILK_TXX_MEM_STAT0(interface));
        ilk_txx_mem_stat1.u64 = cvmx_read_csr
                                (CVMX_ILK_TXX_MEM_STAT1(interface));
        cvmx_dprintf ("ILK%d Channel%d Tx: %d packets %d bytes\n", interface, i,
                ilk_rxx_mem_stat0.s.rx_pkt,
                (unsigned int) ilk_rxx_mem_stat1.s.rx_bytes);
    }

    return;
}
int cvmx_spi_reset_cb(int interface, cvmx_spi_mode_t mode)
{
    union cvmx_spxx_dbg_deskew_ctl spxx_dbg_deskew_ctl;
    union cvmx_spxx_clk_ctl spxx_clk_ctl;
    union cvmx_spxx_bist_stat spxx_bist_stat;
    union cvmx_spxx_int_msk spxx_int_msk;
    union cvmx_stxx_int_msk stxx_int_msk;
    union cvmx_spxx_trn4_ctl spxx_trn4_ctl;
    int index;
    uint64_t MS = cvmx_sysinfo_get()->cpu_clock_hz / 1000;


    spxx_int_msk.u64 = cvmx_read_csr(CVMX_SPXX_INT_MSK(interface));
    cvmx_write_csr(CVMX_SPXX_INT_MSK(interface), 0);
    stxx_int_msk.u64 = cvmx_read_csr(CVMX_STXX_INT_MSK(interface));
    cvmx_write_csr(CVMX_STXX_INT_MSK(interface), 0);


    cvmx_write_csr(CVMX_SRXX_COM_CTL(interface), 0);
    cvmx_write_csr(CVMX_STXX_COM_CTL(interface), 0);
    spxx_clk_ctl.u64 = 0;
    spxx_clk_ctl.s.runbist = 1;
    cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64);
    cvmx_wait(10 * MS);
    spxx_bist_stat.u64 = cvmx_read_csr(CVMX_SPXX_BIST_STAT(interface));
    if (spxx_bist_stat.s.stat0)
        cvmx_dprintf
        ("ERROR SPI%d: BIST failed on receive datapath FIFO\n",
         interface);
    if (spxx_bist_stat.s.stat1)
        cvmx_dprintf("ERROR SPI%d: BIST failed on RX calendar table\n",
                     interface);
    if (spxx_bist_stat.s.stat2)
        cvmx_dprintf("ERROR SPI%d: BIST failed on TX calendar table\n",
                     interface);


    for (index = 0; index < 32; index++) {
        union cvmx_srxx_spi4_calx srxx_spi4_calx;
        union cvmx_stxx_spi4_calx stxx_spi4_calx;

        srxx_spi4_calx.u64 = 0;
        srxx_spi4_calx.s.oddpar = 1;
        cvmx_write_csr(CVMX_SRXX_SPI4_CALX(index, interface),
                       srxx_spi4_calx.u64);

        stxx_spi4_calx.u64 = 0;
        stxx_spi4_calx.s.oddpar = 1;
        cvmx_write_csr(CVMX_STXX_SPI4_CALX(index, interface),
                       stxx_spi4_calx.u64);
    }


    cvmx_write_csr(CVMX_SPXX_INT_REG(interface),
                   cvmx_read_csr(CVMX_SPXX_INT_REG(interface)));
    cvmx_write_csr(CVMX_SPXX_INT_MSK(interface), spxx_int_msk.u64);
    cvmx_write_csr(CVMX_STXX_INT_REG(interface),
                   cvmx_read_csr(CVMX_STXX_INT_REG(interface)));
    cvmx_write_csr(CVMX_STXX_INT_MSK(interface), stxx_int_msk.u64);


    spxx_clk_ctl.u64 = 0;
    spxx_clk_ctl.s.seetrn = 0;
    spxx_clk_ctl.s.clkdly = 0x10;
    spxx_clk_ctl.s.runbist = 0;
    spxx_clk_ctl.s.statdrv = 0;

    spxx_clk_ctl.s.statrcv = 1;
    spxx_clk_ctl.s.sndtrn = 0;
    spxx_clk_ctl.s.drptrn = 0;
    spxx_clk_ctl.s.rcvtrn = 0;
    spxx_clk_ctl.s.srxdlck = 0;
    cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64);
    cvmx_wait(100 * MS);


    spxx_clk_ctl.s.srxdlck = 1;
    cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64);


    cvmx_wait(100 * MS);


    spxx_trn4_ctl.s.trntest = 0;
    spxx_trn4_ctl.s.jitter = 1;
    spxx_trn4_ctl.s.clr_boot = 1;
    spxx_trn4_ctl.s.set_boot = 0;
    if (OCTEON_IS_MODEL(OCTEON_CN58XX))
        spxx_trn4_ctl.s.maxdist = 3;
    else
        spxx_trn4_ctl.s.maxdist = 8;
    spxx_trn4_ctl.s.macro_en = 1;
    spxx_trn4_ctl.s.mux_en = 1;
    cvmx_write_csr(CVMX_SPXX_TRN4_CTL(interface), spxx_trn4_ctl.u64);

    spxx_dbg_deskew_ctl.u64 = 0;
    cvmx_write_csr(CVMX_SPXX_DBG_DESKEW_CTL(interface),
                   spxx_dbg_deskew_ctl.u64);

    return 0;
}
示例#22
0
文件: cvmx-ilk.c 项目: 2asoft/freebsd
static void cvmx_ilk_reg_dump_rx (int interface)
{
    int i;
    cvmx_ilk_rxx_cfg0_t ilk_rxx_cfg0;
    cvmx_ilk_rxx_cfg1_t ilk_rxx_cfg1;
    cvmx_ilk_rxx_int_t ilk_rxx_int;
    cvmx_ilk_rxx_jabber_t ilk_rxx_jabber;
    cvmx_ilk_rx_lnex_cfg_t ilk_rx_lnex_cfg;
    cvmx_ilk_rx_lnex_int_t ilk_rx_lnex_int;
    cvmx_ilk_gbl_cfg_t ilk_gbl_cfg;
    cvmx_ilk_ser_cfg_t ilk_ser_cfg;
    cvmx_ilk_rxf_idx_pmap_t ilk_rxf_idx_pmap;
    cvmx_ilk_rxf_mem_pmap_t ilk_rxf_mem_pmap;
    cvmx_ilk_rxx_idx_cal_t ilk_rxx_idx_cal;
    cvmx_ilk_rxx_mem_cal0_t ilk_rxx_mem_cal0;
    cvmx_ilk_rxx_mem_cal1_t ilk_rxx_mem_cal1;

    ilk_rxx_cfg0.u64 = cvmx_read_csr (CVMX_ILK_RXX_CFG0(interface));
    cvmx_dprintf ("ilk rxx cfg0: 0x%16lx\n", ilk_rxx_cfg0.u64);
    
    ilk_rxx_cfg1.u64 = cvmx_read_csr (CVMX_ILK_RXX_CFG1(interface));
    cvmx_dprintf ("ilk rxx cfg1: 0x%16lx\n", ilk_rxx_cfg1.u64);
    
    ilk_rxx_int.u64 = cvmx_read_csr (CVMX_ILK_RXX_INT(interface));
    cvmx_dprintf ("ilk rxx int: 0x%16lx\n", ilk_rxx_int.u64);
    cvmx_write_csr (CVMX_ILK_RXX_INT(interface), ilk_rxx_int.u64);

    ilk_rxx_jabber.u64 = cvmx_read_csr (CVMX_ILK_RXX_JABBER(interface));
    cvmx_dprintf ("ilk rxx jabber: 0x%16lx\n", ilk_rxx_jabber.u64);

#define LNE_NUM_DBG 4
    for (i = 0; i < LNE_NUM_DBG; i++)
    {
        ilk_rx_lnex_cfg.u64 = cvmx_read_csr (CVMX_ILK_RX_LNEX_CFG(i));
        cvmx_dprintf ("ilk rx lnex cfg lane: %d  0x%16lx\n", i,
                      ilk_rx_lnex_cfg.u64);
    }

    for (i = 0; i < LNE_NUM_DBG; i++)
    {
        ilk_rx_lnex_int.u64 = cvmx_read_csr (CVMX_ILK_RX_LNEX_INT(i));
        cvmx_dprintf ("ilk rx lnex int lane: %d  0x%16lx\n", i,
                      ilk_rx_lnex_int.u64);
        cvmx_write_csr (CVMX_ILK_RX_LNEX_INT(i), ilk_rx_lnex_int.u64);
    }

    ilk_gbl_cfg.u64 = cvmx_read_csr (CVMX_ILK_GBL_CFG);
    cvmx_dprintf ("ilk gbl cfg: 0x%16lx\n", ilk_gbl_cfg.u64);

    ilk_ser_cfg.u64 = cvmx_read_csr (CVMX_ILK_SER_CFG);
    cvmx_dprintf ("ilk ser cfg: 0x%16lx\n", ilk_ser_cfg.u64);

#define CHAN_NUM_DBG 8
    ilk_rxf_idx_pmap.u64 = 0;
    ilk_rxf_idx_pmap.s.index = interface * 256;
    ilk_rxf_idx_pmap.s.inc = 1;
    cvmx_write_csr (CVMX_ILK_RXF_IDX_PMAP, ilk_rxf_idx_pmap.u64);
    for (i = 0; i < CHAN_NUM_DBG; i++)
    {
        ilk_rxf_mem_pmap.u64 = cvmx_read_csr (CVMX_ILK_RXF_MEM_PMAP);
        cvmx_dprintf ("ilk rxf mem pmap chan: %3d  0x%16lx\n", i,
                      ilk_rxf_mem_pmap.u64);
    }

#define CAL_NUM_DBG 2
    ilk_rxx_idx_cal.u64 = 0;
    ilk_rxx_idx_cal.s.inc = 1;
    cvmx_write_csr (CVMX_ILK_RXX_IDX_CAL(interface), ilk_rxx_idx_cal.u64); 
    for (i = 0; i < CAL_NUM_DBG; i++)
    {
        ilk_rxx_idx_cal.u64 = cvmx_read_csr(CVMX_ILK_RXX_IDX_CAL(interface));
        cvmx_dprintf ("ilk rxx idx cal: 0x%16lx\n", ilk_rxx_idx_cal.u64);

        ilk_rxx_mem_cal0.u64 = cvmx_read_csr(CVMX_ILK_RXX_MEM_CAL0(interface));
        cvmx_dprintf ("ilk rxx mem cal0: 0x%16lx\n", ilk_rxx_mem_cal0.u64);
        ilk_rxx_mem_cal1.u64 = cvmx_read_csr(CVMX_ILK_RXX_MEM_CAL1(interface));
        cvmx_dprintf ("ilk rxx mem cal1: 0x%16lx\n", ilk_rxx_mem_cal1.u64);
    }
}
int cvmx_user_static_config(void)
{

#ifdef CVMX_ENABLE_HELPER_FUNCTIONS
	if (cvmx_pko_queue_static_config() != 0) {
		cvmx_dprintf("ERROR: cvmx_pko_queue_static_config() failed\n");
	}

	cvmx_ipd_config_init_from_cvmx_config();
	cvmx_rgmii_config_init_from_cvmx_config();

	cvmx_enable_helper_flag = 1;
	cvmx_pko_set_cmd_que_pool_config(CVMX_FPA_OUTPUT_BUFFER_POOL, CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 0);
	//cvmx_pko_queue_show();
#endif

#if !defined (CVMX_BUILD_FOR_LINUX_KERNEL) && !defined(CVMX_BUILD_FOR_UBOOT)
        cvmx_llm_num_ports = CVMX_LLM_NUM_PORTS;
#endif

#ifdef CVMX_HELPER_SPI_TIMEOUT
        cvmx_spi_config_set_timeout(CVMX_HELPER_SPI_TIMEOUT);
#endif

#ifdef CVMX_HELPER_ILK_LA_MODE_INTERFACE0
        cvmx_ilk_LA_mode[0].ilk_LA_mode = CVMX_HELPER_ILK_LA_MODE_INTERFACE0;
#endif

#ifdef CVMX_HELPER_ILK_LA_MODE_INTERFACE1
        cvmx_ilk_LA_mode[1].ilk_LA_mode = CVMX_HELPER_ILK_LA_MODE_INTERFACE1;
#endif

#ifdef CVMX_HELPER_ILK_LA_MODE_CAL_ENABLE_INTERFACE0
	cvmx_ilk_LA_mode[0].ilk_LA_mode_cal_ena = CVMX_HELPER_ILK_LA_MODE_CAL_ENABLE_INTERFACE0;
#endif

#ifdef CVMX_HELPER_ILK_LA_MODE_CAL_ENABLE_INTERFACE1
	cvmx_ilk_LA_mode[1].ilk_LA_mode_cal_ena = CVMX_HELPER_ILK_LA_MODE_CAL_ENABLE_INTERFACE1;
#endif

#ifdef CVMX_HELPER_ILK_MAX_CHAN_INTERFACE0
	cvmx_ilk_chans[0] = CVMX_HELPER_ILK_MAX_CHAN_INTERFACE0;
#else
 	cvmx_ilk_chans[0] = 8;
#endif

#ifdef CVMX_HELPER_ILK_MAX_CHAN_INTERFACE1
	cvmx_ilk_chans[1] = CVMX_HELPER_ILK_MAX_CHAN_INTERFACE1;
#else
 	cvmx_ilk_chans[1] = 8;
#endif

/* Max number of channels configured for ILK0 */
#ifdef CVMX_HELPER_ILK_INTF_MASK_INTERFACE0
	cvmx_ilk_lane_mask[0] =  CVMX_HELPER_ILK_INTF_MASK_INTERFACE0;
#else
	cvmx_ilk_lane_mask[0] =  0xf;
#endif

/* Max number of channels configured for ILK1 */
#ifdef CVMX_HELPER_ILK_INTF_MASK_INTERFACE1
	cvmx_ilk_lane_mask[1] =  CVMX_HELPER_ILK_INTF_MASK_INTERFACE1 << 4;
#else
	cvmx_ilk_lane_mask[1] =  0xf << 4;
#endif

#ifdef CVMX_ENABLE_ZIP_FUNCTIONS
	cvmx_zip_config_init_from_cvmx_config();
#endif

#ifdef CVMX_ENABLE_DFA_FUNCTIONS
	cvmx_dfa_config_init_from_cvmx_config();
#endif

#ifdef  CVMX_ENABLE_TIMER_FUNCTIONS
	cvmx_timer_config_init_from_cvmx_config();
#endif

#ifdef CVMX_ENABLE_BCH_FUNCTIONS
	cvmx_bch_config_init_from_cvmx_config();
#endif

	cvmx_outputbuffer_config_init_from_cvmx_config();
	return 0;
}
示例#24
0
/**
 * @INTERNAL
 * Perform USB device mode initialization after a reset completes.
 * This should be called after USBC0/1_GINTSTS[USBRESET] and
 * corresponds to section 22.6.1.1, "Initialization on USB Reset",
 * in the manual.
 *
 * @param usb    USB device state populated by
 *               cvmx_usbd_initialize().
 *
 * @return Zero or negative on error.
 */
static int __cvmx_usbd_device_reset_complete(cvmx_usbd_state_t *usb)
{
    cvmx_usbcx_ghwcfg2_t usbcx_ghwcfg2;
    cvmx_usbcx_ghwcfg3_t usbcx_ghwcfg3;
    cvmx_usbcx_doepmsk_t usbcx_doepmsk;
    cvmx_usbcx_diepmsk_t usbcx_diepmsk;
    cvmx_usbcx_daintmsk_t usbc_daintmsk;
    cvmx_usbcx_gnptxfsiz_t gnptxfsiz;
    int fifo_space;
    int i;

    if (cvmx_unlikely(usb->init_flags & CVMX_USBD_INITIALIZE_FLAGS_DEBUG))
        cvmx_dprintf("%s: Processing reset\n", __FUNCTION__);

    usbcx_ghwcfg2.u32 = __cvmx_usbd_read_csr32(usb, CVMX_USBCX_GHWCFG2(usb->index));
    usbcx_ghwcfg3.u32 = __cvmx_usbd_read_csr32(usb, CVMX_USBCX_GHWCFG3(usb->index));

    /* Set up the data FIFO RAM for each of the FIFOs */
    fifo_space = usbcx_ghwcfg3.s.dfifodepth;

    /* Start at the top of the FIFO and assign space for each periodic fifo */
    for (i=usbcx_ghwcfg2.s.numdeveps; i>0; i--)
    {
        cvmx_usbcx_dptxfsizx_t siz;
        siz.u32 = __cvmx_usbd_read_csr32(usb, CVMX_USBCX_DPTXFSIZX(i, usb->index));
        fifo_space -= siz.s.dptxfsize;
        siz.s.dptxfstaddr = fifo_space;
        __cvmx_usbd_write_csr32(usb, CVMX_USBCX_DPTXFSIZX(i, usb->index), siz.u32);
    }

    /* Assign half the leftover space to the non periodic tx fifo */
    gnptxfsiz.u32 = __cvmx_usbd_read_csr32(usb, CVMX_USBCX_GNPTXFSIZ(usb->index));
    gnptxfsiz.s.nptxfdep = fifo_space / 2;
    fifo_space -= gnptxfsiz.s.nptxfdep;
    gnptxfsiz.s.nptxfstaddr = fifo_space;
    __cvmx_usbd_write_csr32(usb, CVMX_USBCX_GNPTXFSIZ(usb->index), gnptxfsiz.u32);

    /* Assign the remain space to the RX fifo */
    __cvmx_usbd_write_csr32(usb, CVMX_USBCX_GRXFSIZ(usb->index), fifo_space);

    /* Unmask the common endpoint interrupts */
    usbcx_doepmsk.u32 = 0;
    usbcx_doepmsk.s.setupmsk = 1;
    usbcx_doepmsk.s.epdisbldmsk = 1;
    usbcx_doepmsk.s.xfercomplmsk = 1;
    __cvmx_usbd_write_csr32(usb, CVMX_USBCX_DOEPMSK(usb->index), usbcx_doepmsk.u32);
    usbcx_diepmsk.u32 = 0;
    usbcx_diepmsk.s.epdisbldmsk = 1;
    usbcx_diepmsk.s.xfercomplmsk = 1;
    __cvmx_usbd_write_csr32(usb, CVMX_USBCX_DIEPMSK(usb->index), usbcx_diepmsk.u32);

    usbc_daintmsk.u32 = 0;
    usbc_daintmsk.s.inepmsk = -1;
    usbc_daintmsk.s.outepmsk = -1;
    __cvmx_usbd_write_csr32(usb, CVMX_USBCX_DAINTMSK(usb->index), usbc_daintmsk.u32);

    /* Set all endpoints to NAK */
    for (i=0; i<usbcx_ghwcfg2.s.numdeveps+1; i++)
    {
        cvmx_usbcx_doepctlx_t usbc_doepctl;
        usbc_doepctl.u32 = 0;
        usbc_doepctl.s.snak = 1;
        usbc_doepctl.s.usbactep = 1;
        usbc_doepctl.s.mps = (i==0) ? 0 : 64;
        __cvmx_usbd_write_csr32(usb, CVMX_USBCX_DOEPCTLX(i, usb->index), usbc_doepctl.u32);
    }

    return 0;
}
int cvmx_coremask_str2bmp(cvmx_coremask_t *pcm, char *hexstr)
{
	int i, j;
	int l;		/* length of the hexstr in characters */
	int lb;		/* number of bits taken by hexstr */
	int hldr_offset;/* holder's offset within the coremask */
	int hldr_xsz;	/* holder's size in the number of hex digits */
	cvmx_coremask_holder_t h;
	char c;

#define MINUS_ONE (hexstr[0] == '-' && hexstr[1] == '1' && hexstr[2] == 0)
	if (MINUS_ONE) {
		cvmx_coremask_set_all(pcm);
		return 0;
	}

	/* Skip '0x' from hexstr */
	if (hexstr[0] == '0' && (hexstr[1] == 'x' || hexstr[1] == 'X'))
		hexstr += 2;

	if (hexstr[0] == '0') {
		cvmx_dprintf("%s: hexstr starts with zero\n", __func__);
		return -3;
	}

	l = strlen(hexstr);
	for (i = 0; i < l; i++) {
		if (isxdigit((int)hexstr[i]) == 0) {
			cvmx_dprintf("%s: Non-hex digit within hexstr\n",
			    __func__);
			return -2;
		}
	}

	lb = (l - 1) * 4;
	if (hexstr[0] > '7')
		lb += 4;
	else if (hexstr[0] > '3')
		lb += 3;
	else if (hexstr[0] > '1')
		lb += 2;
	else
		lb += 1;
	if (lb > CVMX_MIPS_MAX_CORES) {
		cvmx_dprintf("%s: hexstr (%s) is too long\n", __func__,
		    hexstr);
		return -1;
	}
	cvmx_coremask_clear_all(pcm);
	hldr_offset = 0;
	hldr_xsz = 2 * sizeof(cvmx_coremask_holder_t);
	for (i = l; i > 0; i -= hldr_xsz) {
		c = hexstr[i];
		hexstr[i] = 0;
		j = i - hldr_xsz;
		if (j < 0)
			j = 0;
		h = strtoull(&hexstr[j], NULL, 16);
#ifndef CVMX_BUILD_FOR_LINUX_KERNEL
		if (errno == EINVAL) {
			cvmx_dprintf("%s: strtou returns w/ EINVAL\n",
			    __func__);
			return -2;
		}
#endif
		pcm->coremask_bitmap[hldr_offset] = h;
		hexstr[i] = c;
		hldr_offset++;
	}

	return 0;
}
示例#26
0
/**
 * Poll the USB block for status and call all needed callback
 * handlers. This function is meant to be called in the interrupt
 * handler for the USB controller. It can also be called
 * periodically in a loop for non-interrupt based operation.
 *
 * @param usb    USB device state populated by
 *               cvmx_usbd_initialize().
 *
 * @return Zero or negative on error.
 */
int cvmx_usbd_poll(cvmx_usbd_state_t *usb)
{
    cvmx_usbcx_gintsts_t usbc_gintsts;

    if (cvmx_unlikely(usb->init_flags & CVMX_USBD_INITIALIZE_FLAGS_DEBUG))
        cvmx_dprintf("%s: Called\n", __FUNCTION__);

    /* Read the pending interrupts */
    usbc_gintsts.u32 = __cvmx_usbd_read_csr32(usb, CVMX_USBCX_GINTSTS(usb->index));
    usbc_gintsts.u32 &= __cvmx_usbd_read_csr32(usb, CVMX_USBCX_GINTMSK(usb->index));

    /* Clear the interrupts now that we know about them */
    __cvmx_usbd_write_csr32(usb, CVMX_USBCX_GINTSTS(usb->index), usbc_gintsts.u32);

    if (usbc_gintsts.s.usbsusp)
        __cvmx_usbd_callback(usb, CVMX_USBD_CALLBACK_SUSPEND, 0, 0);

    if (usbc_gintsts.s.enumdone)
        __cvmx_usbd_callback(usb, CVMX_USBD_CALLBACK_ENUM_COMPLETE, 0, 0);

    if (usbc_gintsts.s.usbrst)
    {
        /* USB Reset (USBRst)
            The core sets this bit to indicate that a reset is
            detected on the USB. */
        __cvmx_usbd_device_reset_complete(usb);
        __cvmx_usbd_callback(usb, CVMX_USBD_CALLBACK_RESET, 0, 0);
    }

    if (usbc_gintsts.s.oepint || usbc_gintsts.s.iepint)
    {
        cvmx_usbcx_daint_t usbc_daint;
        usbc_daint.u32 = __cvmx_usbd_read_csr32(usb, CVMX_USBCX_DAINT(usb->index));
        if (usbc_daint.s.inepint)
        {
            int active_endpoints = usbc_daint.s.inepint;

            while (active_endpoints)
            {
                int endpoint;
                CVMX_CLZ(endpoint, active_endpoints);
                endpoint = 31 - endpoint;
                __cvmx_usbd_poll_in_endpoint(usb, endpoint);
                active_endpoints ^= 1<<endpoint;
            }
        }
        if (usbc_daint.s.outepint)
        {
            int active_endpoints = usbc_daint.s.outepint;

            while (active_endpoints)
            {
                int endpoint;
                CVMX_CLZ(endpoint, active_endpoints);
                endpoint = 31 - endpoint;
                __cvmx_usbd_poll_out_endpoint(usb, endpoint);
                active_endpoints ^= 1<<endpoint;
            }
        }
    }

    return 0;
}
/**
 * This routine restarts the SPI interface after it has lost synchronization
 * with its corespondant system.
 *
 * @param interface The identifier of the packet interface to configure and
 *                  use as a SPI interface.
 * @param mode      The operating mode for the SPI interface. The interface
 *                  can operate as a full duplex (both Tx and Rx data paths
 *                  active) or as a halfplex (either the Tx data path is
 *                  active or the Rx data path is active, but not both).
 * @param timeout   Timeout to wait for clock synchronization in seconds
 * @return Zero on success, negative of failure.
 */
int cvmx_spi_restart_interface(int interface, cvmx_spi_mode_t mode, int timeout)
{
    uint64_t                     timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
    cvmx_spxx_trn4_ctl_t         spxx_trn4_ctl;
    cvmx_spxx_clk_stat_t         stat;
    cvmx_stxx_com_ctl_t          stxx_com_ctl;
    cvmx_srxx_com_ctl_t          srxx_com_ctl;
    //cvmx_stxx_spi4_dat_t         stxx_spi4_dat;

    cvmx_dprintf ("SPI%d: Restart %s\n", interface, modes[mode]);

      // Reset the Spi4 deskew logic
      // -------------------------------------------------
    cvmx_write_csr (CVMX_SPXX_DBG_DESKEW_CTL(interface), 0x00200000);
    cvmx_wait (100 * MS);
      // Setup the CLKDLY right in the middle
      // -------------------------------------------------
    cvmx_write_csr (CVMX_SPXX_CLK_CTL(interface), 0x00000830);
    cvmx_wait (100 * MS);
      // Reset SRX0 DLL
      // -------------------------------------------------
    cvmx_write_csr (CVMX_SPXX_CLK_CTL(interface), 0x00000831);
    cvmx_wait (100 * MS);
      // Enable dynamic alignment
      // -------------------------------------------------
    spxx_trn4_ctl.u64 = 0;
    spxx_trn4_ctl.s.mux_en   = 1;
    spxx_trn4_ctl.s.macro_en = 1;
    spxx_trn4_ctl.s.maxdist  = 16;
    spxx_trn4_ctl.s.jitter   = 1;
    cvmx_write_csr (CVMX_SPXX_TRN4_CTL(interface), spxx_trn4_ctl.u64);
    cvmx_write_csr (CVMX_SPXX_DBG_DESKEW_CTL(interface), 0x0);

      /* Regardless of operating mode, both Tx and Rx clocks must be present
       * for the SPI interface to operate.
       */
    cvmx_dprintf ("SPI%d: Waiting to see TsClk...\n", interface);
    do {  /* Do we see the TsClk transitioning? */
        stat.u64 = cvmx_read_csr (CVMX_SPXX_CLK_STAT(interface));
        if (cvmx_get_cycle() > timeout_time)
        {
            cvmx_dprintf ("SPI%d: Timeout\n", interface);
            return -1;
        }
    } while (stat.s.s4clk0 == 0 || stat.s.s4clk1 == 0);

    cvmx_dprintf ("SPI%d: Waiting to see RsClk...\n", interface);
    do {  /* Do we see the RsClk transitioning? */
        stat.u64 = cvmx_read_csr (CVMX_SPXX_CLK_STAT(interface));
        if (cvmx_get_cycle() > timeout_time)
        {
            cvmx_dprintf ("SPI%d: Timeout\n", interface);
            return -1;
        }
    } while (stat.s.d4clk0 == 0 || stat.s.d4clk1 == 0);

      // SRX0 & STX0 Inf0 Links are configured - begin training
      // -------------------------------------------------
    cvmx_write_csr (CVMX_SPXX_CLK_CTL(interface), 0x0000083f);
    cvmx_wait (1000 * MS);
      // SRX0 clear the boot bit
      // -------------------------------------------------
    spxx_trn4_ctl.s.clr_boot = 1;
    cvmx_write_csr (CVMX_SPXX_TRN4_CTL(interface), spxx_trn4_ctl.u64);

      // Wait for the training sequence to complete
      // -------------------------------------------------
      // SPX0_CLK_STAT - SPX0_CLK_STAT[SRXTRN] should be 1 (bit8)
    cvmx_dprintf ("SPI%d: Waiting for training\n", interface);
    cvmx_wait (1000 * MS);
    do {
        stat.u64 = cvmx_read_csr (CVMX_SPXX_CLK_STAT(interface));
        if (cvmx_get_cycle() > timeout_time)
        {
            cvmx_dprintf ("SPI%d: Timeout\n", interface);
            return -1;
        }
    } while (stat.s.srxtrn == 0);

    if (mode & CVMX_SPI_MODE_RX_HALFPLEX) {
          // SRX0 interface should be good, send calendar data
          // -------------------------------------------------
        cvmx_dprintf ("SPI%d: Rx is synchronized, start sending calendar data\n", interface);
        srxx_com_ctl.u64 = 0;
        srxx_com_ctl.s.prts   = 9;
        srxx_com_ctl.s.inf_en = 1;
        srxx_com_ctl.s.st_en  = 1;
        cvmx_write_csr (CVMX_SRXX_COM_CTL(interface), srxx_com_ctl.u64);
    }

    if (mode & CVMX_SPI_MODE_TX_HALFPLEX) {
          // STX0 has achieved sync
          // The corespondant board should be sending calendar data
          // Enable the STX0 STAT receiver.
          // -------------------------------------------------
        stxx_com_ctl.u64 = 0;
        stxx_com_ctl.s.inf_en = 1;
        stxx_com_ctl.s.st_en = 1;
        cvmx_write_csr (CVMX_STXX_COM_CTL(interface), stxx_com_ctl.u64);

          // Waiting for calendar sync on STX0 STAT
          // -------------------------------------------------
          // SPX0_CLK_STAT - SPX0_CLK_STAT[STXCAL] should be 1 (bit10)
        cvmx_dprintf ("SPI%d: Waiting to sync on STX[%d] STAT\n", interface, interface);
        do {
            stat.u64 = cvmx_read_csr (CVMX_SPXX_CLK_STAT (interface));
            if (cvmx_get_cycle() > timeout_time)
            {
                cvmx_dprintf ("SPI%d: Timeout\n", interface);
                return -1;
            }
        } while (stat.s.stxcal == 0);
    }

      // Inf0 is synched
      // -------------------------------------------------
      // SPX0 is up
    if (mode & CVMX_SPI_MODE_RX_HALFPLEX) {
        srxx_com_ctl.s.inf_en = 1;
        cvmx_write_csr (CVMX_SRXX_COM_CTL(interface), srxx_com_ctl.u64);
        cvmx_dprintf ("SPI%d: Rx is now up\n", interface);
    }

    if (mode & CVMX_SPI_MODE_TX_HALFPLEX) {
        stxx_com_ctl.s.inf_en = 1;
        cvmx_write_csr (CVMX_STXX_COM_CTL(interface), stxx_com_ctl.u64);
        cvmx_dprintf ("SPI%d: Tx is now up\n", interface);
    }

    return 0;
}
/**
 * Configure a output port and the associated queues for use.
 *
 * @port:       Port to configure.
 * @base_queue: First queue number to associate with this port.
 * @num_queues: Number of queues to associate with this port
 * @priority:   Array of priority levels for each queue. Values are
 *                   allowed to be 0-8. A value of 8 get 8 times the traffic
 *                   of a value of 1.  A value of 0 indicates that no rounds
 *                   will be participated in. These priorities can be changed
 *                   on the fly while the pko is enabled. A priority of 9
 *                   indicates that static priority should be used.  If static
 *                   priority is used all queues with static priority must be
 *                   contiguous starting at the base_queue, and lower numbered
 *                   queues have higher priority than higher numbered queues.
 *                   There must be num_queues elements in the array.
 */
cvmx_pko_status_t cvmx_pko_config_port(uint64_t port, uint64_t base_queue,
                                       uint64_t num_queues,
                                       const uint64_t priority[])
{
    cvmx_pko_status_t result_code;
    uint64_t queue;
    union cvmx_pko_mem_queue_ptrs config;
    union cvmx_pko_reg_queue_ptrs1 config1;
    int static_priority_base = -1;
    int static_priority_end = -1;

    if ((port >= CVMX_PKO_NUM_OUTPUT_PORTS)
            && (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID)) {
        cvmx_dprintf("ERROR: cvmx_pko_config_port: Invalid port %llu\n",
                     (unsigned long long)port);
        return CVMX_PKO_INVALID_PORT;
    }

    if (base_queue + num_queues > CVMX_PKO_MAX_OUTPUT_QUEUES) {
        cvmx_dprintf
        ("ERROR: cvmx_pko_config_port: Invalid queue range %llu\n",
         (unsigned long long)(base_queue + num_queues));
        return CVMX_PKO_INVALID_QUEUE;
    }

    if (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID) {
        /*
         * Validate the static queue priority setup and set
         * static_priority_base and static_priority_end
         * accordingly.
         */
        for (queue = 0; queue < num_queues; queue++) {
            /* Find first queue of static priority */
            if (static_priority_base == -1
                    && priority[queue] ==
                    CVMX_PKO_QUEUE_STATIC_PRIORITY)
                static_priority_base = queue;
            /* Find last queue of static priority */
            if (static_priority_base != -1
                    && static_priority_end == -1
                    && priority[queue] != CVMX_PKO_QUEUE_STATIC_PRIORITY
                    && queue)
                static_priority_end = queue - 1;
            else if (static_priority_base != -1
                     && static_priority_end == -1
                     && queue == num_queues - 1)
                /* all queues are static priority */
                static_priority_end = queue;
            /*
             * Check to make sure all static priority
             * queues are contiguous.  Also catches some
             * cases of static priorites not starting at
             * queue 0.
             */
            if (static_priority_end != -1
                    && (int)queue > static_priority_end
                    && priority[queue] ==
                    CVMX_PKO_QUEUE_STATIC_PRIORITY) {
                cvmx_dprintf("ERROR: cvmx_pko_config_port: "
                             "Static priority queues aren't "
                             "contiguous or don't start at "
                             "base queue. q: %d, eq: %d\n",
                             (int)queue, static_priority_end);
                return CVMX_PKO_INVALID_PRIORITY;
            }
        }
        if (static_priority_base > 0) {
            cvmx_dprintf("ERROR: cvmx_pko_config_port: Static "
                         "priority queues don't start at base "
                         "queue. sq: %d\n",
                         static_priority_base);
            return CVMX_PKO_INVALID_PRIORITY;
        }
#if 0
        cvmx_dprintf("Port %d: Static priority queue base: %d, "
                     "end: %d\n", port,
                     static_priority_base, static_priority_end);
#endif
    }
    /*
     * At this point, static_priority_base and static_priority_end
     * are either both -1, or are valid start/end queue
     * numbers.
     */

    result_code = CVMX_PKO_SUCCESS;

#ifdef PKO_DEBUG
    cvmx_dprintf("num queues: %d (%lld,%lld)\n", num_queues,
                 CVMX_PKO_QUEUES_PER_PORT_INTERFACE0,
                 CVMX_PKO_QUEUES_PER_PORT_INTERFACE1);
#endif

    for (queue = 0; queue < num_queues; queue++) {
        uint64_t *buf_ptr = NULL;

        config1.u64 = 0;
        config1.s.idx3 = queue >> 3;
        config1.s.qid7 = (base_queue + queue) >> 7;

        config.u64 = 0;
        config.s.tail = queue == (num_queues - 1);
        config.s.index = queue;
        config.s.port = port;
        config.s.queue = base_queue + queue;

        if (!cvmx_octeon_is_pass1()) {
            config.s.static_p = static_priority_base >= 0;
            config.s.static_q = (int)queue <= static_priority_end;
            config.s.s_tail = (int)queue == static_priority_end;
        }
        /*
         * Convert the priority into an enable bit field. Try
         * to space the bits out evenly so the packet don't
         * get grouped up
         */
        switch ((int)priority[queue]) {
        case 0:
            config.s.qos_mask = 0x00;
            break;
        case 1:
            config.s.qos_mask = 0x01;
            break;
        case 2:
            config.s.qos_mask = 0x11;
            break;
        case 3:
            config.s.qos_mask = 0x49;
            break;
        case 4:
            config.s.qos_mask = 0x55;
            break;
        case 5:
            config.s.qos_mask = 0x57;
            break;
        case 6:
            config.s.qos_mask = 0x77;
            break;
        case 7:
            config.s.qos_mask = 0x7f;
            break;
        case 8:
            config.s.qos_mask = 0xff;
            break;
        case CVMX_PKO_QUEUE_STATIC_PRIORITY:
            /* Pass 1 will fall through to the error case */
            if (!cvmx_octeon_is_pass1()) {
                config.s.qos_mask = 0xff;
                break;
            }
        default:
            cvmx_dprintf("ERROR: cvmx_pko_config_port: Invalid "
                         "priority %llu\n",
                         (unsigned long long)priority[queue]);
            config.s.qos_mask = 0xff;
            result_code = CVMX_PKO_INVALID_PRIORITY;
            break;
        }

        if (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID) {
            cvmx_cmd_queue_result_t cmd_res =
                cvmx_cmd_queue_initialize(CVMX_CMD_QUEUE_PKO
                                          (base_queue + queue),
                                          CVMX_PKO_MAX_QUEUE_DEPTH,
                                          CVMX_FPA_OUTPUT_BUFFER_POOL,
                                          CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE
                                          -
                                          CVMX_PKO_COMMAND_BUFFER_SIZE_ADJUST
                                          * 8);
            if (cmd_res != CVMX_CMD_QUEUE_SUCCESS) {
                switch (cmd_res) {
                case CVMX_CMD_QUEUE_NO_MEMORY:
                    cvmx_dprintf("ERROR: "
                                 "cvmx_pko_config_port: "
                                 "Unable to allocate "
                                 "output buffer.\n");
                    return CVMX_PKO_NO_MEMORY;
                case CVMX_CMD_QUEUE_ALREADY_SETUP:
                    cvmx_dprintf
                    ("ERROR: cvmx_pko_config_port: Port already setup.\n");
                    return CVMX_PKO_PORT_ALREADY_SETUP;
                case CVMX_CMD_QUEUE_INVALID_PARAM:
                default:
                    cvmx_dprintf
                    ("ERROR: cvmx_pko_config_port: Command queue initialization failed.\n");
                    return CVMX_PKO_CMD_QUEUE_INIT_ERROR;
                }
            }

            buf_ptr =
                (uint64_t *)
                cvmx_cmd_queue_buffer(CVMX_CMD_QUEUE_PKO
                                      (base_queue + queue));
            config.s.buf_ptr = cvmx_ptr_to_phys(buf_ptr);
        } else
            config.s.buf_ptr = 0;

        CVMX_SYNCWS;

        if (!OCTEON_IS_MODEL(OCTEON_CN3XXX))
            cvmx_write_csr(CVMX_PKO_REG_QUEUE_PTRS1, config1.u64);
        cvmx_write_csr(CVMX_PKO_MEM_QUEUE_PTRS, config.u64);
    }

    return result_code;
}
示例#29
0
文件: cvmx-pow.c 项目: 2asoft/freebsd
static int __cvmx_pow_capture_v1(void *buffer, int buffer_size)
{
    __cvmx_pow_dump_t *dump = (__cvmx_pow_dump_t*)buffer;
    int num_cores;
    int num_pow_entries = cvmx_pow_get_num_entries();
    int core;
    int index;
    int bits;

    if (buffer_size < (int)sizeof(__cvmx_pow_dump_t))
    {
        cvmx_dprintf("cvmx_pow_capture: Buffer too small\n");
        return -1;
    }

    num_cores = cvmx_octeon_num_cores();

    /* Read all core related state */
    for (core=0; core<num_cores; core++)
    {
        cvmx_pow_load_addr_t load_addr;
        load_addr.u64 = 0;
        load_addr.sstatus.mem_region = CVMX_IO_SEG;
        load_addr.sstatus.is_io = 1;
        load_addr.sstatus.did = CVMX_OCT_DID_TAG_TAG1;
        load_addr.sstatus.coreid = core;
        for (bits=0; bits<8; bits++)
        {
            load_addr.sstatus.get_rev = (bits & 1) != 0;
            load_addr.sstatus.get_cur = (bits & 2) != 0;
            load_addr.sstatus.get_wqp = (bits & 4) != 0;
            if ((load_addr.sstatus.get_cur == 0) && load_addr.sstatus.get_rev)
                dump->sstatus[core][bits].u64 = -1;
            else
                dump->sstatus[core][bits].u64 = cvmx_read_csr(load_addr.u64);
        }
    }

    /* Read all internal POW entries */
    for (index=0; index<num_pow_entries; index++)
    {
        cvmx_pow_load_addr_t load_addr;
        load_addr.u64 = 0;
        load_addr.smemload.mem_region = CVMX_IO_SEG;
        load_addr.smemload.is_io = 1;
        load_addr.smemload.did = CVMX_OCT_DID_TAG_TAG2;
        load_addr.smemload.index = index;
        for (bits=0; bits<3; bits++)
        {
            load_addr.smemload.get_des = (bits & 1) != 0;
            load_addr.smemload.get_wqp = (bits & 2) != 0;
            dump->smemload[index][bits].u64 = cvmx_read_csr(load_addr.u64);
        }
    }

    /* Read all group and queue pointers */
    for (index=0; index<16; index++)
    {
        cvmx_pow_load_addr_t load_addr;
        load_addr.u64 = 0;
        load_addr.sindexload.mem_region = CVMX_IO_SEG;
        load_addr.sindexload.is_io = 1;
        load_addr.sindexload.did = CVMX_OCT_DID_TAG_TAG3;
        load_addr.sindexload.qosgrp = index;
        for (bits=0; bits<4; bits++)
        {
            load_addr.sindexload.get_rmt =  (bits & 1) != 0;
            load_addr.sindexload.get_des_get_tail =  (bits & 2) != 0;
            /* The first pass only has 8 valid index values */
            if ((load_addr.sindexload.get_rmt == 0) &&
                (load_addr.sindexload.get_des_get_tail == 0) &&
                (index >= 8))
                dump->sindexload[index][bits].u64 = -1;
            else
                dump->sindexload[index][bits].u64 = cvmx_read_csr(load_addr.u64);
        }
    }
    return 0;
}