int __cvmx_helper_spi_enable(int interface) { /* * Normally the ethernet L2 CRC is checked and stripped in the * GMX block. When you are using SPI, this isn' the case and * IPD needs to check the L2 CRC. */ int num_ports = cvmx_helper_ports_on_interface(interface); int ipd_port; for (ipd_port = interface * 16; ipd_port < interface * 16 + num_ports; ipd_port++) { union cvmx_pip_prt_cfgx port_config; port_config.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port)); port_config.s.crc_en = 1; cvmx_write_csr(CVMX_PIP_PRT_CFGX(ipd_port), port_config.u64); } if (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) { cvmx_spi_start_interface(interface, CVMX_SPI_MODE_DUPLEX, CVMX_HELPER_SPI_TIMEOUT, num_ports); if (cvmx_spi4000_is_present(interface)) cvmx_spi4000_initialize(interface); } __cvmx_interrupt_spxx_int_msk_enable(interface); __cvmx_interrupt_stxx_int_msk_enable(interface); __cvmx_interrupt_gmxx_enable(interface); return 0; }
/** * @INTERNAL * Bringup and enable a SPI interface. After this call packet I/O * should be fully functional. This is called with IPD enabled but * PKO disabled. * * @param interface Interface to bring up * * @return Zero on success, negative on failure */ int __cvmx_helper_spi_enable(int interface) { /* Normally the ethernet L2 CRC is checked and stripped in the GMX block. When you are using SPI, this isn' the case and IPD needs to check the L2 CRC */ int num_ports = cvmx_helper_ports_on_interface(interface); int ipd_port; for (ipd_port=interface*16; ipd_port<interface*16+num_ports; ipd_port++) { cvmx_pip_prt_cfgx_t port_config; port_config.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port)); port_config.s.crc_en = 1; #ifdef OCTEON_VENDOR_RADISYS /* * Incoming packets on the RSYS4GBE have the FCS stripped. */ if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_CUST_RADISYS_RSYS4GBE) port_config.s.crc_en = 0; #endif cvmx_write_csr(CVMX_PIP_PRT_CFGX(ipd_port), port_config.u64); } if (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) { cvmx_spi_start_interface(interface, CVMX_SPI_MODE_DUPLEX, CVMX_HELPER_SPI_TIMEOUT, num_ports); if (cvmx_spi4000_is_present(interface)) cvmx_spi4000_initialize(interface); } return 0; }
/** * @INTERNAL * Bringup and enable a LOOP interface. After this call packet * I/O should be fully functional. This is called with IPD * enabled but PKO disabled. * * @param interface Interface to bring up * * @return Zero on success, negative on failure */ int __cvmx_helper_loop_enable(int interface) { cvmx_pip_prt_cfgx_t port_cfg; int num_ports, index; unsigned long offset; num_ports = __cvmx_helper_get_num_ipd_ports(interface); /* * We need to disable length checking so packet < 64 bytes and jumbo * frames don't get errors */ for (index = 0; index < num_ports; index++) { offset = ((octeon_has_feature(OCTEON_FEATURE_PKND)) ? cvmx_helper_get_pknd(interface, index) : cvmx_helper_get_ipd_port(interface, index)); port_cfg.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(offset)); port_cfg.s.maxerr_en = 0; port_cfg.s.minerr_en = 0; cvmx_write_csr(CVMX_PIP_PRT_CFGX(offset), port_cfg.u64); } /* * Disable FCS stripping for loopback ports */ if (!octeon_has_feature(OCTEON_FEATURE_PKND)) { cvmx_ipd_sub_port_fcs_t ipd_sub_port_fcs; ipd_sub_port_fcs.u64 = cvmx_read_csr(CVMX_IPD_SUB_PORT_FCS); ipd_sub_port_fcs.s.port_bit2 = 0; cvmx_write_csr(CVMX_IPD_SUB_PORT_FCS, ipd_sub_port_fcs.u64); } return 0; }
int __cvmx_helper_spi_enable(int interface) { /* */ int num_ports = cvmx_helper_ports_on_interface(interface); int ipd_port; for (ipd_port = interface * 16; ipd_port < interface * 16 + num_ports; ipd_port++) { union cvmx_pip_prt_cfgx port_config; port_config.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port)); port_config.s.crc_en = 1; cvmx_write_csr(CVMX_PIP_PRT_CFGX(ipd_port), port_config.u64); } if (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) { cvmx_spi_start_interface(interface, CVMX_SPI_MODE_DUPLEX, CVMX_HELPER_SPI_TIMEOUT, num_ports); if (cvmx_spi4000_is_present(interface)) cvmx_spi4000_initialize(interface); } __cvmx_interrupt_spxx_int_msk_enable(interface); __cvmx_interrupt_stxx_int_msk_enable(interface); __cvmx_interrupt_gmxx_enable(interface); return 0; }
int __cvmx_helper_npi_enable(int interface) { /* * On CN50XX, CN52XX, and CN56XX we need to disable length * checking so packet < 64 bytes and jumbo frames don't get * errors. */ if (!OCTEON_IS_MODEL(OCTEON_CN3XXX) && !OCTEON_IS_MODEL(OCTEON_CN58XX)) { int num_ports = cvmx_helper_ports_on_interface(interface); int port; for (port = 0; port < num_ports; port++) { union cvmx_pip_prt_cfgx port_cfg; int ipd_port = cvmx_helper_get_ipd_port(interface, port); port_cfg.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port)); port_cfg.s.maxerr_en = 0; port_cfg.s.minerr_en = 0; cvmx_write_csr(CVMX_PIP_PRT_CFGX(ipd_port), port_cfg.u64); } } /* Enables are controlled by the remote host, so nothing to do here */ return 0; }
/** * @INTERNAL * Bringup and enable a NPI interface. After this call packet * I/O should be fully functional. This is called with IPD * enabled but PKO disabled. * * @param interface Interface to bring up * * @return Zero on success, negative on failure */ int __cvmx_helper_npi_enable(int interface) { int port; int num_ports = cvmx_helper_ports_on_interface(interface); if (OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN58XX)) /* * Enables are controlled by the remote host, so * nothing to do here. */ return 0; /* * On CN50XX, CN52XX, and CN56XX we need to disable length * checking so packet < 64 bytes and jumbo frames don't get * errors. */ for (port = 0; port < num_ports; port++) { union cvmx_pip_prt_cfgx port_cfg; int ipd_port = (OCTEON_IS_MODEL(OCTEON_CN68XX)) ? cvmx_helper_get_pknd(interface, port) : cvmx_helper_get_ipd_port(interface, port); port_cfg.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port)); port_cfg.s.lenerr_en = 0; port_cfg.s.maxerr_en = 0; port_cfg.s.minerr_en = 0; cvmx_write_csr(CVMX_PIP_PRT_CFGX(ipd_port), port_cfg.u64); if (OCTEON_IS_MODEL(OCTEON_CN68XX)) { /* Set up pknd and bpid */ union cvmx_sli_portx_pkind config; config.u64 = cvmx_read_csr(CVMX_PEXP_SLI_PORTX_PKIND(port)); config.s.bpkind = cvmx_helper_get_bpid(interface, port); config.s.pkind = cvmx_helper_get_pknd(interface, port); cvmx_write_csr(CVMX_PEXP_SLI_PORTX_PKIND(port), config.u64); } } if (OCTEON_IS_MODEL(OCTEON_CN68XX)) { /* * Set up pko pipes. */ union cvmx_sli_tx_pipe config; config.u64 = cvmx_read_csr(CVMX_PEXP_SLI_TX_PIPE); config.s.base = __cvmx_pko_get_pipe(interface, 0); config.s.nump = cvmx_npi_num_pipes < 0 ? num_ports : cvmx_npi_num_pipes; cvmx_write_csr(CVMX_PEXP_SLI_TX_PIPE, config.u64); } /* Enables are controlled by the remote host, so nothing to do here */ return 0; }
int __cvmx_helper_loop_probe(int interface) { union cvmx_ipd_sub_port_fcs ipd_sub_port_fcs; int num_ports = 4; int port; for (port = 0; port < num_ports; port++) { union cvmx_pip_prt_cfgx port_cfg; int ipd_port = cvmx_helper_get_ipd_port(interface, port); port_cfg.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port)); port_cfg.s.maxerr_en = 0; port_cfg.s.minerr_en = 0; cvmx_write_csr(CVMX_PIP_PRT_CFGX(ipd_port), port_cfg.u64); } ipd_sub_port_fcs.u64 = cvmx_read_csr(CVMX_IPD_SUB_PORT_FCS); ipd_sub_port_fcs.s.port_bit2 = 0; cvmx_write_csr(CVMX_IPD_SUB_PORT_FCS, ipd_sub_port_fcs.u64); return num_ports; }
/** * Probe a LOOP interface and determine the number of ports * connected to it. The LOOP interface should still be down * after this call. * * @interface: Interface to probe * * Returns Number of ports on the interface. Zero to disable. */ int __cvmx_helper_loop_probe(int interface) { union cvmx_ipd_sub_port_fcs ipd_sub_port_fcs; int num_ports = 4; int port; /* We need to disable length checking so packet < 64 bytes and jumbo frames don't get errors */ for (port = 0; port < num_ports; port++) { union cvmx_pip_prt_cfgx port_cfg; int ipd_port = cvmx_helper_get_ipd_port(interface, port); port_cfg.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port)); port_cfg.s.maxerr_en = 0; port_cfg.s.minerr_en = 0; cvmx_write_csr(CVMX_PIP_PRT_CFGX(ipd_port), port_cfg.u64); } /* Disable FCS stripping for loopback ports */ ipd_sub_port_fcs.u64 = cvmx_read_csr(CVMX_IPD_SUB_PORT_FCS); ipd_sub_port_fcs.s.port_bit2 = 0; cvmx_write_csr(CVMX_IPD_SUB_PORT_FCS, ipd_sub_port_fcs.u64); return num_ports; }
int __cvmx_helper_npi_enable(int interface) { if (!OCTEON_IS_MODEL(OCTEON_CN3XXX) && !OCTEON_IS_MODEL(OCTEON_CN58XX)) { int num_ports = cvmx_helper_ports_on_interface(interface); int port; for (port = 0; port < num_ports; port++) { union cvmx_pip_prt_cfgx port_cfg; int ipd_port = cvmx_helper_get_ipd_port(interface, port); port_cfg.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port)); port_cfg.s.maxerr_en = 0; port_cfg.s.minerr_en = 0; cvmx_write_csr(CVMX_PIP_PRT_CFGX(ipd_port), port_cfg.u64); } } return 0; }
/** * @INTERNAL * Bringup and enable SRIO interface. After this call packet * I/O should be fully functional. This is called with IPD * enabled but PKO disabled. * * @param interface Interface to bring up * * @return Zero on success, negative on failure */ int __cvmx_helper_srio_enable(int interface) { int num_ports = cvmx_helper_ports_on_interface(interface); int index; cvmx_sriomaintx_core_enables_t sriomaintx_core_enables; cvmx_sriox_imsg_ctrl_t sriox_imsg_ctrl; cvmx_sriox_status_reg_t srio_status_reg; cvmx_dpi_ctl_t dpi_ctl; int srio_port = interface - 4; /* All SRIO ports have a cvmx_srio_rx_message_header_t header on them that must be skipped by IPD */ for (index=0; index<num_ports; index++) { cvmx_pip_prt_cfgx_t port_config; cvmx_sriox_omsg_portx_t sriox_omsg_portx; cvmx_sriox_omsg_sp_mrx_t sriox_omsg_sp_mrx; cvmx_sriox_omsg_fmp_mrx_t sriox_omsg_fmp_mrx; cvmx_sriox_omsg_nmp_mrx_t sriox_omsg_nmp_mrx; int ipd_port = cvmx_helper_get_ipd_port(interface, index); port_config.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port)); /* Only change the skip if the user hasn't already set it */ if (!port_config.s.skip) { port_config.s.skip = sizeof(cvmx_srio_rx_message_header_t); cvmx_write_csr(CVMX_PIP_PRT_CFGX(ipd_port), port_config.u64); } /* Enable TX with PKO */ sriox_omsg_portx.u64 = cvmx_read_csr(CVMX_SRIOX_OMSG_PORTX(index, srio_port)); sriox_omsg_portx.s.port = (srio_port) * 2 + index; sriox_omsg_portx.s.enable = 1; cvmx_write_csr(CVMX_SRIOX_OMSG_PORTX(index, srio_port), sriox_omsg_portx.u64); /* Allow OMSG controller to send regardless of the state of any other controller. Allow messages to different IDs and MBOXes to go in parallel */ sriox_omsg_sp_mrx.u64 = 0; sriox_omsg_sp_mrx.s.xmbox_sp = 1; sriox_omsg_sp_mrx.s.ctlr_sp = 1; sriox_omsg_sp_mrx.s.ctlr_fmp = 1; sriox_omsg_sp_mrx.s.ctlr_nmp = 1; sriox_omsg_sp_mrx.s.id_sp = 1; sriox_omsg_sp_mrx.s.id_fmp = 1; sriox_omsg_sp_mrx.s.id_nmp = 1; sriox_omsg_sp_mrx.s.mbox_sp = 1; sriox_omsg_sp_mrx.s.mbox_fmp = 1; sriox_omsg_sp_mrx.s.mbox_nmp = 1; sriox_omsg_sp_mrx.s.all_psd = 1; cvmx_write_csr(CVMX_SRIOX_OMSG_SP_MRX(index, srio_port), sriox_omsg_sp_mrx.u64); /* Allow OMSG controller to send regardless of the state of any other controller. Allow messages to different IDs and MBOXes to go in parallel */ sriox_omsg_fmp_mrx.u64 = 0; sriox_omsg_fmp_mrx.s.ctlr_sp = 1; sriox_omsg_fmp_mrx.s.ctlr_fmp = 1; sriox_omsg_fmp_mrx.s.ctlr_nmp = 1; sriox_omsg_fmp_mrx.s.id_sp = 1; sriox_omsg_fmp_mrx.s.id_fmp = 1; sriox_omsg_fmp_mrx.s.id_nmp = 1; sriox_omsg_fmp_mrx.s.mbox_sp = 1; sriox_omsg_fmp_mrx.s.mbox_fmp = 1; sriox_omsg_fmp_mrx.s.mbox_nmp = 1; sriox_omsg_fmp_mrx.s.all_psd = 1; cvmx_write_csr(CVMX_SRIOX_OMSG_FMP_MRX(index, srio_port), sriox_omsg_fmp_mrx.u64); /* Once the first part of a message is accepted, always acept the rest of the message */ sriox_omsg_nmp_mrx.u64 = 0; sriox_omsg_nmp_mrx.s.all_sp = 1; sriox_omsg_nmp_mrx.s.all_fmp = 1; sriox_omsg_nmp_mrx.s.all_nmp = 1; cvmx_write_csr(CVMX_SRIOX_OMSG_NMP_MRX(index, srio_port), sriox_omsg_nmp_mrx.u64); } /* Choose the receive controller based on the mailbox */ sriox_imsg_ctrl.u64 = cvmx_read_csr(CVMX_SRIOX_IMSG_CTRL(srio_port)); sriox_imsg_ctrl.s.prt_sel = 0; sriox_imsg_ctrl.s.mbox = 0xa; cvmx_write_csr(CVMX_SRIOX_IMSG_CTRL(srio_port), sriox_imsg_ctrl.u64); /* DPI must be enabled for us to RX messages */ dpi_ctl.u64 = cvmx_read_csr(CVMX_DPI_CTL); dpi_ctl.s.clk = 1; dpi_ctl.s.en = 1; cvmx_write_csr(CVMX_DPI_CTL, dpi_ctl.u64); /* Make sure register access is allowed */ srio_status_reg.u64 = cvmx_read_csr(CVMX_SRIOX_STATUS_REG(srio_port)); if (!srio_status_reg.s.access) return 0; /* Enable RX */ if (!cvmx_srio_config_read32(srio_port, 0, -1, 0, 0, CVMX_SRIOMAINTX_CORE_ENABLES(srio_port), &sriomaintx_core_enables.u32)) { sriomaintx_core_enables.s.imsg0 = 1; sriomaintx_core_enables.s.imsg1 = 1; cvmx_srio_config_write32(srio_port, 0, -1, 0, 0, CVMX_SRIOMAINTX_CORE_ENABLES(srio_port), sriomaintx_core_enables.u32); } return 0; }
/** * cvm_oct_ioctl_hwtstamp - IOCTL support for timestamping * @dev: Device to change * @rq: the request * @cmd: the command * * Returns Zero on success */ static int cvm_oct_ioctl_hwtstamp(struct net_device *dev, struct ifreq *rq, int cmd) { struct octeon_ethernet *priv = netdev_priv(dev); struct hwtstamp_config config; union cvmx_mio_ptp_clock_cfg ptp; int have_hw_timestamps = 0; if (copy_from_user(&config, rq->ifr_data, sizeof(config))) return -EFAULT; if (config.flags) /* reserved for future extensions */ return -EINVAL; /* Check the status of hardware for tiemstamps */ if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { /* Write TX timestamp into word 4 */ cvmx_write_csr(CVMX_PKO_REG_TIMESTAMP, 4); /* Get the current state of the PTP clock */ ptp.u64 = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_CFG); if (!ptp.s.ext_clk_en) { /* * The clock has not been configured to use an * external source. Program it to use the main clock * reference. */ unsigned long long clock_comp = (NSEC_PER_SEC << 32) / octeon_get_io_clock_rate(); cvmx_write_csr(CVMX_MIO_PTP_CLOCK_COMP, clock_comp); pr_info("PTP Clock: Using sclk reference at %lld Hz\n", (NSEC_PER_SEC << 32) / clock_comp); } else { /* The clock is already programmed to use a GPIO */ unsigned long long clock_comp = cvmx_read_csr( CVMX_MIO_PTP_CLOCK_COMP); pr_info("PTP Clock: Using GPIO %d at %lld Hz\n", ptp.s.ext_clk_in, (NSEC_PER_SEC << 32) / clock_comp); } /* Enable the clock if it wasn't done already */ if (!ptp.s.ptp_en) { ptp.s.ptp_en = 1; cvmx_write_csr(CVMX_MIO_PTP_CLOCK_CFG, ptp.u64); } have_hw_timestamps = 1; /* Only the first two interfaces support hardware timestamps */ if (priv->port >= 32) have_hw_timestamps = 0; } /* Require hardware if ALLOW_TIMESTAMPS_WITHOUT_HARDWARE=0 */ if (!ALLOW_TIMESTAMPS_WITHOUT_HARDWARE && !have_hw_timestamps) return -EINVAL; switch (config.tx_type) { case HWTSTAMP_TX_OFF: priv->flags &= ~(OCTEON_ETHERNET_FLAG_TX_TIMESTAMP_SW | OCTEON_ETHERNET_FLAG_TX_TIMESTAMP_HW); break; case HWTSTAMP_TX_ON: priv->flags |= (have_hw_timestamps) ? OCTEON_ETHERNET_FLAG_TX_TIMESTAMP_HW : OCTEON_ETHERNET_FLAG_TX_TIMESTAMP_SW; break; default: return -ERANGE; } switch (config.rx_filter) { case HWTSTAMP_FILTER_NONE: priv->flags &= ~(OCTEON_ETHERNET_FLAG_RX_TIMESTAMP_HW | OCTEON_ETHERNET_FLAG_RX_TIMESTAMP_SW); if (have_hw_timestamps) { int interface = INTERFACE(priv->port); int index = INDEX(priv->port); union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl; union cvmx_pip_prt_cfgx pip_prt_cfgx; gmxx_rxx_frm_ctl.u64 = cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface)); gmxx_rxx_frm_ctl.s.ptp_mode = 0; cvmx_write_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface), gmxx_rxx_frm_ctl.u64); pip_prt_cfgx.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(priv->port)); pip_prt_cfgx.s.skip = 0; cvmx_write_csr(CVMX_PIP_PRT_CFGX(priv->port), pip_prt_cfgx.u64); } break; case HWTSTAMP_FILTER_ALL: case HWTSTAMP_FILTER_SOME: case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_EVENT: case HWTSTAMP_FILTER_PTP_V2_SYNC: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: priv->flags |= (have_hw_timestamps) ? OCTEON_ETHERNET_FLAG_RX_TIMESTAMP_HW : OCTEON_ETHERNET_FLAG_RX_TIMESTAMP_SW; config.rx_filter = HWTSTAMP_FILTER_ALL; if (have_hw_timestamps) { int interface = INTERFACE(priv->port); int index = INDEX(priv->port); union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl; union cvmx_pip_prt_cfgx pip_prt_cfgx; gmxx_rxx_frm_ctl.u64 = cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface)); gmxx_rxx_frm_ctl.s.ptp_mode = 1; cvmx_write_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface), gmxx_rxx_frm_ctl.u64); pip_prt_cfgx.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(priv->port)); pip_prt_cfgx.s.skip = 8; cvmx_write_csr(CVMX_PIP_PRT_CFGX(priv->port), pip_prt_cfgx.u64); } break; default: return -ERANGE; } if (copy_to_user(rq->ifr_data, &config, sizeof(config))) return -EFAULT; return 0; }