/** * Assign a MAC addres from the pool of available MAC addresses * Can return as either a 64-bit value and/or 6 octets. * * @param macp Filled in with the assigned address if non-NULL * @param octets Filled in with the assigned address if non-NULL * @return Zero on success */ int cvm_assign_mac_address(uint64_t *macp, uint8_t *octets) { /* Initialize from global MAC address base; fail if not set */ if (cvm_oct_mac_addr == 0) { memcpy((uint8_t *)&cvm_oct_mac_addr + 2, cvmx_sysinfo_get()->mac_addr_base, 6); if (cvm_oct_mac_addr == 0) return ENXIO; cvm_oct_mac_addr_offset = cvmx_mgmt_port_num_ports(); cvm_oct_mac_addr += cvm_oct_mac_addr_offset; } if (cvm_oct_mac_addr_offset >= cvmx_sysinfo_get()->mac_addr_count) return ENXIO; /* Out of addresses to assign */ if (macp) *macp = cvm_oct_mac_addr; if (octets) memcpy(octets, (u_int8_t *)&cvm_oct_mac_addr + 2, 6); cvm_oct_mac_addr++; cvm_oct_mac_addr_offset++; return 0; }
int __cvmx_helper_spi_enumerate(int interface) { #if defined(OCTEON_VENDOR_LANNER) if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_CUST_LANNER_MR955) { cvmx_pko_reg_crc_enable_t enable; enable.u64 = cvmx_read_csr(CVMX_PKO_REG_CRC_ENABLE); enable.s.enable &= 0xffff << (16 - (interface*16)); cvmx_write_csr(CVMX_PKO_REG_CRC_ENABLE, enable.u64); if (interface == 1) return 12; /* XXX This is not entirely true. */ return 0; } #endif #if defined(OCTEON_VENDOR_RADISYS) if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_CUST_RADISYS_RSYS4GBE) { if (interface == 0) return 13; if (interface == 1) return 8; return 0; } #endif if ((cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) && cvmx_spi4000_is_present(interface)) return 10; else return 16; }
static int sysctl_machdep_led_display(SYSCTL_HANDLER_ARGS) { size_t buflen; char buf[9]; int error; if (req->newptr == NULL) return (EINVAL); if (cvmx_sysinfo_get()->led_display_base_addr == 0) return (ENODEV); /* * Revision 1.x of the EBT3000 only supports 4 characters, but * other devices support 8. */ if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_EBT3000 && cvmx_sysinfo_get()->board_rev_major == 1) buflen = 4; else buflen = 8; if (req->newlen > buflen) return (E2BIG); error = SYSCTL_IN(req, buf, req->newlen); if (error != 0) return (error); buf[req->newlen] = '\0'; ebt3000_str_write(buf); return (0); }
/** * @INTERNAL * Bringup and enable a SPI interface. After this call packet I/O * should be fully functional. This is called with IPD enabled but * PKO disabled. * * @param interface Interface to bring up * * @return Zero on success, negative on failure */ int __cvmx_helper_spi_enable(int interface) { /* Normally the ethernet L2 CRC is checked and stripped in the GMX block. When you are using SPI, this isn' the case and IPD needs to check the L2 CRC */ int num_ports = cvmx_helper_ports_on_interface(interface); int ipd_port; for (ipd_port=interface*16; ipd_port<interface*16+num_ports; ipd_port++) { cvmx_pip_prt_cfgx_t port_config; port_config.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port)); port_config.s.crc_en = 1; #ifdef OCTEON_VENDOR_RADISYS /* * Incoming packets on the RSYS4GBE have the FCS stripped. */ if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_CUST_RADISYS_RSYS4GBE) port_config.s.crc_en = 0; #endif cvmx_write_csr(CVMX_PIP_PRT_CFGX(ipd_port), port_config.u64); } if (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) { cvmx_spi_start_interface(interface, CVMX_SPI_MODE_DUPLEX, CVMX_HELPER_SPI_TIMEOUT, num_ports); if (cvmx_spi4000_is_present(interface)) cvmx_spi4000_initialize(interface); } return 0; }
/** * Request shutdown of the currently running core. Should be * called by the application when it has been registered with * app_shutdown option set to 1. */ void cvmx_app_hotplug_core_shutdown(void) { uint32_t flags; if (cvmx_app_hotplug_info_ptr->shutdown_cores) { cvmx_sysinfo_t *sys_info_ptr = cvmx_sysinfo_get(); __cvmx_app_hotplug_sync(); if (cvmx_coremask_first_core(sys_info_ptr->core_mask)) { bzero(cvmx_app_hotplug_info_ptr, sizeof(*cvmx_app_hotplug_info_ptr)); #ifdef DEBUG printf("__cvmx_app_hotplug_shutdown(): setting shutdown done! \n"); #endif cvmx_app_hotplug_info_ptr->shutdown_done = 1; } /* Tell the debugger that this application is finishing. */ cvmx_debug_finish (); flags = cvmx_interrupt_disable_save(); __cvmx_app_hotplug_sync(); /* Reset the core */ __cvmx_app_hotplug_reset(); } else { cvmx_sysinfo_remove_self_from_core_mask(); cvmx_app_hotplug_remove_self_from_core_mask(); flags = cvmx_interrupt_disable_save(); __cvmx_app_hotplug_reset(); } }
/** * This routine deprecates the the cvmx_app_hotplug_register method. This * registers application for hotplug and the application will have CPU * hotplug callbacks. Various callbacks are specified in cb. * cvmx_app_hotplug_callbacks_t documents the callbacks * * This routine only needs to be called once per application. * * @param cb Callback routine from the application. * @param arg Argument to the application callback routins * @param app_shutdown When set to 1 the application will invoke core_shutdown on each core. When set to 0 core shutdown will be called invoked automatically after invoking the application callback. * @return Return index of app on success, -1 on failure * */ int cvmx_app_hotplug_register_cb(cvmx_app_hotplug_callbacks_t *cb, void* arg, int app_shutdown) { cvmx_app_hotplug_info_t *app_info; /* Find the list of applications launched by bootoct utility. */ app_info = cvmx_app_hotplug_get_info(cvmx_sysinfo_get()->core_mask); cvmx_app_hotplug_info_ptr = app_info; if (!app_info) { /* Application not launched by bootoct? */ printf("ERROR: cmvx_app_hotplug_register() failed\n"); return -1; } /* Register the callback */ app_info->data = CAST64(arg); app_info->shutdown_callback = CAST64(cb->shutdown_callback); app_info->cores_added_callback = CAST64(cb->cores_added_callback); app_info->cores_removed_callback = CAST64(cb->cores_removed_callback); app_info->unplug_callback = CAST64(cb->unplug_core_callback); app_info->hotplug_start = CAST64(cb->hotplug_start); app_info->app_shutdown = app_shutdown; #ifdef DEBUG printf("cvmx_app_hotplug_register(): coremask 0x%x valid %d\n", app_info->coremask, app_info->valid); #endif cvmx_interrupt_register(CVMX_IRQ_MBOX0, __cvmx_app_hotplug_shutdown, NULL); return 0; }
/** * Rate limit a PKO port to a max bits/sec. This function is only * supported on CN51XX and higher, excluding CN58XX. * * @port: Port to rate limit * @bits_s: PKO rate limit in bits/sec * @burst: Maximum number of bits to burst before rate * limiting cuts in. * * Returns Zero on success, negative on failure */ int cvmx_pko_rate_limit_bits(int port, uint64_t bits_s, int burst) { union cvmx_pko_mem_port_rate0 pko_mem_port_rate0; union cvmx_pko_mem_port_rate1 pko_mem_port_rate1; uint64_t clock_rate = cvmx_sysinfo_get()->cpu_clock_hz; uint64_t tokens_per_bit = clock_rate * 16 / bits_s; pko_mem_port_rate0.u64 = 0; pko_mem_port_rate0.s.pid = port; /* * Each packet has a 12 bytes of interframe gap, an 8 byte * preamble, and a 4 byte CRC. These are not included in the * per word count. Multiply by 8 to covert to bits and divide * by 256 for limit granularity. */ pko_mem_port_rate0.s.rate_pkt = (12 + 8 + 4) * 8 * tokens_per_bit / 256; /* Each 8 byte word has 64bits */ pko_mem_port_rate0.s.rate_word = 64 * tokens_per_bit; pko_mem_port_rate1.u64 = 0; pko_mem_port_rate1.s.pid = port; pko_mem_port_rate1.s.rate_lim = tokens_per_bit * burst / 256; cvmx_write_csr(CVMX_PKO_MEM_PORT_RATE0, pko_mem_port_rate0.u64); cvmx_write_csr(CVMX_PKO_MEM_PORT_RATE1, pko_mem_port_rate1.u64); return 0; }
/** * Per network device initialization * * @param dev Device to initialize * @return Zero on success */ int cvm_oct_common_init(struct ifnet *ifp) { uint8_t mac[6]; cvm_oct_private_t *priv = (cvm_oct_private_t *)ifp->if_softc; if (cvm_assign_mac_address(NULL, mac) != 0) return ENXIO; ifp->if_mtu = ETHERMTU; cvm_oct_mdio_setup_device(ifp); cvm_oct_common_set_mac_address(ifp, mac); cvm_oct_common_change_mtu(ifp, ifp->if_mtu); /* * Do any last-minute board-specific initialization. */ switch (cvmx_sysinfo_get()->board_type) { #if defined(OCTEON_VENDOR_LANNER) case CVMX_BOARD_TYPE_CUST_LANNER_MR320: case CVMX_BOARD_TYPE_CUST_LANNER_MR321X: if (priv->phy_id == 16) cvm_oct_mv88e61xx_setup_device(ifp); break; #endif default: break; } device_attach(priv->dev); return 0; }
cvmx_helper_link_info_t cvmx_agl_link_get(int port) { cvmx_helper_link_info_t result; int interface, port_index; /* For simulator also set the link up */ if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM) { result.u64 = 0; result.s.full_duplex = 1; result.s.link_up = 1; result.s.speed = 100; return result; } /* Fake IPD port is used on some older models. */ if (port < 0) return __cvmx_helper_board_link_get(port); /* Simulator does not have PHY, use some defaults. */ interface = cvmx_helper_get_interface_num(port); port_index = cvmx_helper_get_interface_index_num(port); if (cvmx_helper_get_port_force_link_up(interface, port_index)) { result.u64 = 0; result.s.full_duplex = 1; result.s.link_up = 1; result.s.speed = 1000; return result; } return __cvmx_helper_board_link_get(port); }
/** * Initialize the internal QLM JTAG logic to allow programming * of the JTAG chain by the cvmx_helper_qlm_jtag_*() functions. * These functions should only be used at the direction of Cavium * Networks. Programming incorrect values into the JTAG chain * can cause chip damage. */ void cvmx_helper_qlm_jtag_init(void) { union cvmx_ciu_qlm_jtgc jtgc; uint32_t clock_div = 0; uint32_t divisor = cvmx_sysinfo_get()->cpu_clock_hz / (25 * 1000000); divisor = (divisor - 1) >> 2; /* Convert the divisor into a power of 2 shift */ while (divisor) { clock_div++; divisor = divisor >> 1; } /* * Clock divider for QLM JTAG operations. eclk is divided by * 2^(CLK_DIV + 2) */ jtgc.u64 = 0; jtgc.s.clk_div = clock_div; jtgc.s.mux_sel = 0; if (OCTEON_IS_MODEL(OCTEON_CN52XX)) jtgc.s.bypass = 0x3; else jtgc.s.bypass = 0xf; cvmx_write_csr(CVMX_CIU_QLM_JTGC, jtgc.u64); cvmx_read_csr(CVMX_CIU_QLM_JTGC); }
int __cvmx_helper_spi_enable(int interface) { /* * Normally the ethernet L2 CRC is checked and stripped in the * GMX block. When you are using SPI, this isn' the case and * IPD needs to check the L2 CRC. */ int num_ports = cvmx_helper_ports_on_interface(interface); int ipd_port; for (ipd_port = interface * 16; ipd_port < interface * 16 + num_ports; ipd_port++) { union cvmx_pip_prt_cfgx port_config; port_config.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port)); port_config.s.crc_en = 1; cvmx_write_csr(CVMX_PIP_PRT_CFGX(ipd_port), port_config.u64); } if (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) { cvmx_spi_start_interface(interface, CVMX_SPI_MODE_DUPLEX, CVMX_HELPER_SPI_TIMEOUT, num_ports); if (cvmx_spi4000_is_present(interface)) cvmx_spi4000_initialize(interface); } __cvmx_interrupt_spxx_int_msk_enable(interface); __cvmx_interrupt_stxx_int_msk_enable(interface); __cvmx_interrupt_gmxx_enable(interface); return 0; }
void cvm_oct_rgmii_uninit(struct ifnet *ifp) { cvm_oct_private_t *priv = (cvm_oct_private_t *)ifp->if_softc; cvm_oct_common_uninit(ifp); /* Only true RGMII ports need to be polled. In GMII mode, port 0 is really a RGMII port */ if (((priv->imode == CVMX_HELPER_INTERFACE_MODE_GMII) && (priv->port == 0)) || (priv->imode == CVMX_HELPER_INTERFACE_MODE_RGMII)) { if (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) { cvmx_gmxx_rxx_int_en_t gmx_rx_int_en; int interface = INTERFACE(priv->port); int index = INDEX(priv->port); /* Disable interrupts on inband status changes for this port */ gmx_rx_int_en.u64 = cvmx_read_csr(CVMX_GMXX_RXX_INT_EN(index, interface)); gmx_rx_int_en.s.phy_dupx = 0; gmx_rx_int_en.s.phy_link = 0; gmx_rx_int_en.s.phy_spd = 0; cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(index, interface), gmx_rx_int_en.u64); } } /* Remove the interrupt handler when the last port is removed */ number_rgmii_ports--; if (number_rgmii_ports == 0) panic("%s: need to implement IRQ release.", __func__); }
/** * Perform initialization required only once for an SGMII port. * * @interface: Interface to init * @index: Index of prot on the interface * * Returns Zero on success, negative on failure */ static int __cvmx_helper_sgmii_hardware_init_one_time(int interface, int index) { const uint64_t clock_mhz = cvmx_sysinfo_get()->cpu_clock_hz / 1000000; union cvmx_pcsx_miscx_ctl_reg pcs_misc_ctl_reg; union cvmx_pcsx_linkx_timer_count_reg pcsx_linkx_timer_count_reg; union cvmx_gmxx_prtx_cfg gmxx_prtx_cfg; /* Disable GMX */ gmxx_prtx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); gmxx_prtx_cfg.s.en = 0; cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmxx_prtx_cfg.u64); /* * Write PCS*_LINK*_TIMER_COUNT_REG[COUNT] with the * appropriate value. 1000BASE-X specifies a 10ms * interval. SGMII specifies a 1.6ms interval. */ pcs_misc_ctl_reg.u64 = cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface)); pcsx_linkx_timer_count_reg.u64 = cvmx_read_csr(CVMX_PCSX_LINKX_TIMER_COUNT_REG(index, interface)); if (pcs_misc_ctl_reg.s.mode) { /* 1000BASE-X */ pcsx_linkx_timer_count_reg.s.count = (10000ull * clock_mhz) >> 10; } else {
/** * Enable port. */ int cvm_oct_common_open(struct ifnet *ifp) { cvmx_gmxx_prtx_cfg_t gmx_cfg; cvm_oct_private_t *priv = (cvm_oct_private_t *)ifp->if_softc; int interface = INTERFACE(priv->port); int index = INDEX(priv->port); cvmx_helper_link_info_t link_info; gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); gmx_cfg.s.en = 1; cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64); /* * Set the link state unless we are using MII. */ if (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM && priv->miibus == NULL) { link_info = cvmx_helper_link_get(priv->port); if (!link_info.s.link_up) if_link_state_change(ifp, LINK_STATE_DOWN); else if_link_state_change(ifp, LINK_STATE_UP); } return 0; }
int __cvmx_helper_spi_enable(int interface) { /* */ int num_ports = cvmx_helper_ports_on_interface(interface); int ipd_port; for (ipd_port = interface * 16; ipd_port < interface * 16 + num_ports; ipd_port++) { union cvmx_pip_prt_cfgx port_config; port_config.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port)); port_config.s.crc_en = 1; cvmx_write_csr(CVMX_PIP_PRT_CFGX(ipd_port), port_config.u64); } if (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) { cvmx_spi_start_interface(interface, CVMX_SPI_MODE_DUPLEX, CVMX_HELPER_SPI_TIMEOUT, num_ports); if (cvmx_spi4000_is_present(interface)) cvmx_spi4000_initialize(interface); } __cvmx_interrupt_spxx_int_msk_enable(interface); __cvmx_interrupt_stxx_int_msk_enable(interface); __cvmx_interrupt_gmxx_enable(interface); return 0; }
/** * Get clock rate based on the clock type. * * @param node - CPU node number * @param clock - Enumeration of the clock type. * @return - return the clock rate. */ uint64_t cvmx_clock_get_rate_node(int node, cvmx_clock_t clock) { const uint64_t REF_CLOCK = 50000000; #ifdef CVMX_BUILD_FOR_UBOOT uint64_t rate_eclk = 0; uint64_t rate_sclk = 0; uint64_t rate_dclk = 0; #endif if (cvmx_unlikely(!rate_eclk)) { /* Note: The order of these checks is important. ** octeon_has_feature(OCTEON_FEATURE_PCIE) is true for both 6XXX ** and 52XX/56XX, so OCTEON_FEATURE_NPEI _must_ be checked first */ if (octeon_has_feature(OCTEON_FEATURE_NPEI)) { cvmx_npei_dbg_data_t npei_dbg_data; npei_dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA); rate_eclk = REF_CLOCK * npei_dbg_data.s.c_mul; rate_sclk = rate_eclk; } else if (OCTEON_IS_OCTEON3()) { cvmx_rst_boot_t rst_boot; rst_boot.u64 = cvmx_read_csr_node(node, CVMX_RST_BOOT); rate_eclk = REF_CLOCK * rst_boot.s.c_mul; rate_sclk = REF_CLOCK * rst_boot.s.pnr_mul; } else if (octeon_has_feature(OCTEON_FEATURE_PCIE)) { cvmx_mio_rst_boot_t mio_rst_boot; mio_rst_boot.u64 = cvmx_read_csr(CVMX_MIO_RST_BOOT); rate_eclk = REF_CLOCK * mio_rst_boot.s.c_mul; rate_sclk = REF_CLOCK * mio_rst_boot.s.pnr_mul; } else { cvmx_dbg_data_t dbg_data; dbg_data.u64 = cvmx_read_csr(CVMX_DBG_DATA); rate_eclk = REF_CLOCK * dbg_data.s.c_mul; rate_sclk = rate_eclk; } } switch (clock) { case CVMX_CLOCK_SCLK: case CVMX_CLOCK_TIM: case CVMX_CLOCK_IPD: return rate_sclk; case CVMX_CLOCK_RCLK: case CVMX_CLOCK_CORE: return rate_eclk; case CVMX_CLOCK_DDR: #if !defined(CVMX_BUILD_FOR_LINUX_HOST) && !defined(CVMX_BUILD_FOR_TOOLCHAIN) if (cvmx_unlikely(!rate_dclk)) rate_dclk = cvmx_sysinfo_get()->dram_data_rate_hz; #endif return rate_dclk; } cvmx_dprintf("cvmx_clock_get_rate: Unknown clock type\n"); return 0; }
static void octeon_boot_params_init(register_t ptr) { octeon_boot_descriptor_t *app_desc_ptr; cvmx_bootinfo_t *octeon_bootinfo; if (ptr == 0 || ptr >= MAX_APP_DESC_ADDR) { cvmx_safe_printf("app descriptor passed at invalid address %#jx\n", (uintmax_t)ptr); platform_reset(); } app_desc_ptr = (octeon_boot_descriptor_t *)(intptr_t)ptr; if (app_desc_ptr->desc_version < 6) { cvmx_safe_printf("Your boot code is too old to be supported.\n"); platform_reset(); } octeon_bootinfo = octeon_process_app_desc_ver_6(app_desc_ptr); if (octeon_bootinfo == NULL) { cvmx_safe_printf("Could not parse boot descriptor.\n"); platform_reset(); } if (cvmx_sysinfo_get()->led_display_base_addr != 0) { /* * Revision 1.x of the EBT3000 only supports 4 characters, but * other devices support 8. */ if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_EBT3000 && cvmx_sysinfo_get()->board_rev_major == 1) ebt3000_str_write("FBSD"); else ebt3000_str_write("FreeBSD!"); } if (cvmx_sysinfo_get()->phy_mem_desc_addr == (uint64_t)0) { cvmx_safe_printf("Your boot loader did not supply a memory descriptor.\n"); platform_reset(); } cvmx_bootmem_init(cvmx_sysinfo_get()->phy_mem_desc_addr); octeon_feature_init(); __cvmx_helper_cfg_init(); }
int __cvmx_helper_spi_enumerate(int interface) { if ((cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) && cvmx_spi4000_is_present(interface)) { return 10; } else { return 16; } }
/** * Assign a MAC addres from the pool of available MAC addresses * Can return as either a 64-bit value and/or 6 octets. * * @param macp Filled in with the assigned address if non-NULL * @param octets Filled in with the assigned address if non-NULL * @return Zero on success */ int cvm_assign_mac_address(uint64_t *macp, uint8_t *octets) { /* Initialize from global MAC address base; fail if not set */ if (cvm_oct_mac_addr == 0) { memcpy((uint8_t *)&cvm_oct_mac_addr + 2, cvmx_sysinfo_get()->mac_addr_base, 6); if (cvm_oct_mac_addr == 0) return ENXIO; /* * The offset from mac_addr_base that should be used for the next port * that is configured. By convention, if any mgmt ports exist on the * chip, they get the first mac addresses. The ports controlled by * driver that use this function are numbered sequencially following * any mgmt addresses that may exist. * * XXX Would be nice if __cvmx_mgmt_port_num_ports() were * not static to cvmx-mgmt-port.c. */ if (OCTEON_IS_MODEL(OCTEON_CN56XX)) cvm_oct_mac_addr_offset = 1; else if (OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN63XX)) cvm_oct_mac_addr_offset = 2; else cvm_oct_mac_addr_offset = 0; cvm_oct_mac_addr += cvm_oct_mac_addr_offset; } if (cvm_oct_mac_addr_offset >= cvmx_sysinfo_get()->mac_addr_count) return ENXIO; /* Out of addresses to assign */ if (macp) *macp = cvm_oct_mac_addr; if (octets) memcpy(octets, (u_int8_t *)&cvm_oct_mac_addr + 2, 6); cvm_oct_mac_addr++; cvm_oct_mac_addr_offset++; return 0; }
void platform_cpu_mask(cpuset_t *mask) { uint64_t core_mask = cvmx_sysinfo_get()->core_mask; uint64_t i, m; CPU_ZERO(mask); for (i = 0, m = 1 ; i < MAXCPU; i++, m <<= 1) if (core_mask & m) CPU_SET(i, mask); }
/** * Callback to perform link training * * @interface: The identifier of the packet interface to configure and * use as a SPI interface. * @mode: The operating mode for the SPI interface. The interface * can operate as a full duplex (both Tx and Rx data paths * active) or as a halfplex (either the Tx data path is * active or the Rx data path is active, but not both). * @timeout: Timeout to wait for link to be trained (in seconds) * * Returns Zero on success, non-zero error code on failure (will cause * SPI initialization to abort) */ int cvmx_spi_training_cb(int interface, cvmx_spi_mode_t mode, int timeout) { union cvmx_spxx_trn4_ctl spxx_trn4_ctl; union cvmx_spxx_clk_stat stat; uint64_t MS = cvmx_sysinfo_get()->cpu_clock_hz / 1000; uint64_t timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout; int rx_training_needed; /* SRX0 & STX0 Inf0 Links are configured - begin training */ union cvmx_spxx_clk_ctl spxx_clk_ctl; spxx_clk_ctl.u64 = 0; spxx_clk_ctl.s.seetrn = 0; spxx_clk_ctl.s.clkdly = 0x10; spxx_clk_ctl.s.runbist = 0; spxx_clk_ctl.s.statdrv = 0; /* This should always be on the opposite edge as statdrv */ spxx_clk_ctl.s.statrcv = 1; spxx_clk_ctl.s.sndtrn = 1; spxx_clk_ctl.s.drptrn = 1; spxx_clk_ctl.s.rcvtrn = 1; spxx_clk_ctl.s.srxdlck = 1; cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64); cvmx_wait(1000 * MS); /* SRX0 clear the boot bit */ spxx_trn4_ctl.u64 = cvmx_read_csr(CVMX_SPXX_TRN4_CTL(interface)); spxx_trn4_ctl.s.clr_boot = 1; cvmx_write_csr(CVMX_SPXX_TRN4_CTL(interface), spxx_trn4_ctl.u64); /* Wait for the training sequence to complete */ cvmx_dprintf("SPI%d: Waiting for training\n", interface); cvmx_wait(1000 * MS); /* Wait a really long time here */ timeout_time = cvmx_get_cycle() + 1000ull * MS * 600; /* * The HRM says we must wait for 34 + 16 * MAXDIST training sequences. * We'll be pessimistic and wait for a lot more. */ rx_training_needed = 500; do { stat.u64 = cvmx_read_csr(CVMX_SPXX_CLK_STAT(interface)); if (stat.s.srxtrn && rx_training_needed) { rx_training_needed--; cvmx_write_csr(CVMX_SPXX_CLK_STAT(interface), stat.u64); stat.s.srxtrn = 0; } if (cvmx_get_cycle() > timeout_time) { cvmx_dprintf("SPI%d: Timeout\n", interface); return -1; } } while (stat.s.srxtrn == 0); return 0; }
static void __init octeon_smp_setup(void) { const int coreid = cvmx_get_core_num(); int cpus; int id; struct cvmx_sysinfo *sysinfo = cvmx_sysinfo_get(); #ifdef CONFIG_HOTPLUG_CPU int core_mask = octeon_get_boot_coremask(); unsigned int num_cores = cvmx_octeon_num_cores(); #endif /* The present CPUs are initially just the boot cpu (CPU 0). */ for (id = 0; id < NR_CPUS; id++) { set_cpu_possible(id, id == 0); set_cpu_present(id, id == 0); } __cpu_number_map[coreid] = 0; __cpu_logical_map[0] = coreid; /* The present CPUs get the lowest CPU numbers. */ cpus = 1; for (id = 0; id < NR_CPUS; id++) { if ((id != coreid) && cvmx_coremask_is_core_set(&sysinfo->core_mask, id)) { set_cpu_possible(cpus, true); set_cpu_present(cpus, true); __cpu_number_map[id] = cpus; __cpu_logical_map[cpus] = id; cpus++; } } #ifdef CONFIG_HOTPLUG_CPU /* * The possible CPUs are all those present on the chip. We * will assign CPU numbers for possible cores as well. Cores * are always consecutively numberd from 0. */ for (id = 0; setup_max_cpus && octeon_bootloader_entry_addr && id < num_cores && id < NR_CPUS; id++) { if (!(core_mask & (1 << id))) { set_cpu_possible(cpus, true); __cpu_number_map[id] = cpus; __cpu_logical_map[cpus] = id; cpus++; } } #endif octeon_smp_hotplug_setup(); }
/** * Convert nanosecond based time to setting used in the * boot bus timing register, based on timing multiple * * */ static uint32_t ns_to_tim_reg(int tim_mult, uint32_t nsecs) { uint32_t val; /* Compute # of eclock periods to get desired duration in nanoseconds */ val = FLASH_RoundUP(nsecs * (cvmx_sysinfo_get()->cpu_clock_hz/1000000), 1000); /* Factor in timing multiple, if not 1 */ if (tim_mult != 1) val = FLASH_RoundUP(val, tim_mult); return (val); }
/** * @INTERNAL * Probe a SPI interface and determine the number of ports * connected to it. The SPI interface should still be down after * this call. * * @param interface Interface to probe * * @return Number of ports on the interface. Zero to disable. */ int __cvmx_helper_spi_probe(int interface) { int num_ports = 0; if ((cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) && cvmx_spi4000_is_present(interface)) { num_ports = 10; } #if defined(OCTEON_VENDOR_LANNER) else if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_CUST_LANNER_MR955) { cvmx_pko_reg_crc_enable_t enable; if (interface == 1) { num_ports = 12; } else { /* XXX This is not entirely true. */ num_ports = 0; } enable.u64 = cvmx_read_csr(CVMX_PKO_REG_CRC_ENABLE); enable.s.enable &= 0xffff << (16 - (interface*16)); cvmx_write_csr(CVMX_PKO_REG_CRC_ENABLE, enable.u64); } #endif else { cvmx_pko_reg_crc_enable_t enable; num_ports = 16; /* Unlike the SPI4000, most SPI devices don't automatically put on the L2 CRC. For everything except for the SPI4000 have PKO append the L2 CRC to the packet */ enable.u64 = cvmx_read_csr(CVMX_PKO_REG_CRC_ENABLE); enable.s.enable |= 0xffff << (interface*16); cvmx_write_csr(CVMX_PKO_REG_CRC_ENABLE, enable.u64); } __cvmx_helper_setup_gmx(interface, num_ports); return num_ports; }
int cvmx_spi_training_cb(int interface, cvmx_spi_mode_t mode, int timeout) { union cvmx_spxx_trn4_ctl spxx_trn4_ctl; union cvmx_spxx_clk_stat stat; uint64_t MS = cvmx_sysinfo_get()->cpu_clock_hz / 1000; uint64_t timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout; int rx_training_needed; union cvmx_spxx_clk_ctl spxx_clk_ctl; spxx_clk_ctl.u64 = 0; spxx_clk_ctl.s.seetrn = 0; spxx_clk_ctl.s.clkdly = 0x10; spxx_clk_ctl.s.runbist = 0; spxx_clk_ctl.s.statdrv = 0; spxx_clk_ctl.s.statrcv = 1; spxx_clk_ctl.s.sndtrn = 1; spxx_clk_ctl.s.drptrn = 1; spxx_clk_ctl.s.rcvtrn = 1; spxx_clk_ctl.s.srxdlck = 1; cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64); cvmx_wait(1000 * MS); spxx_trn4_ctl.u64 = cvmx_read_csr(CVMX_SPXX_TRN4_CTL(interface)); spxx_trn4_ctl.s.clr_boot = 1; cvmx_write_csr(CVMX_SPXX_TRN4_CTL(interface), spxx_trn4_ctl.u64); cvmx_dprintf("SPI%d: Waiting for training\n", interface); cvmx_wait(1000 * MS); timeout_time = cvmx_get_cycle() + 1000ull * MS * 600; rx_training_needed = 500; do { stat.u64 = cvmx_read_csr(CVMX_SPXX_CLK_STAT(interface)); if (stat.s.srxtrn && rx_training_needed) { rx_training_needed--; cvmx_write_csr(CVMX_SPXX_CLK_STAT(interface), stat.u64); stat.s.srxtrn = 0; } if (cvmx_get_cycle() > timeout_time) { cvmx_dprintf("SPI%d: Timeout\n", interface); return -1; } } while (stat.s.srxtrn == 0); return 0; }
/** * @INTERNAL * Return the link state of an IPD/PKO port as returned by * auto negotiation. The result of this function may not match * Octeon's link config if auto negotiation has changed since * the last call to cvmx_helper_link_set(). * * @param ipd_port IPD/PKO port to query * * @return Link state */ cvmx_helper_link_info_t __cvmx_helper_spi_link_get(int ipd_port) { cvmx_helper_link_info_t result; int interface = cvmx_helper_get_interface_num(ipd_port); int index = cvmx_helper_get_interface_index_num(ipd_port); result.u64 = 0; if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM) { /* The simulator gives you a simulated full duplex link */ result.s.link_up = 1; result.s.full_duplex = 1; result.s.speed = 10000; } else if (cvmx_spi4000_is_present(interface)) { cvmx_gmxx_rxx_rx_inbnd_t inband = cvmx_spi4000_check_speed(interface, index); result.s.link_up = inband.s.status; result.s.full_duplex = inband.s.duplex; switch (inband.s.speed) { case 0: /* 10 Mbps */ result.s.speed = 10; break; case 1: /* 100 Mbps */ result.s.speed = 100; break; case 2: /* 1 Gbps */ result.s.speed = 1000; break; case 3: /* Illegal */ result.s.speed = 0; result.s.link_up = 0; break; } } else { /* For generic SPI we can't determine the link, just return some sane results */ result.s.link_up = 1; result.s.full_duplex = 1; result.s.speed = 10000; } return result; }
/* * ISR for the incoming shutdown request interrupt. */ static void __cvmx_app_hotplug_shutdown(int irq_number, uint64_t registers[32], void *user_arg) { cvmx_sysinfo_t *sys_info_ptr = cvmx_sysinfo_get(); uint64_t mbox; cvmx_app_hotplug_info_t *ai = cvmx_app_hotplug_info_ptr; int dbg = 0; #ifdef DEBUG dbg = 1; #endif cvmx_interrupt_mask_irq(CVMX_IRQ_MBOX0); mbox = cvmx_read_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num())); /* Clear the interrupt */ cvmx_write_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()), mbox); /* Make sure the write above completes */ cvmx_read_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num())); if (!cvmx_app_hotplug_info_ptr) { printf("ERROR: Application is not registered for hotplug!\n"); return; } if (ai->hotplug_activated_coremask != sys_info_ptr->core_mask) { printf("ERROR: Shutdown requested when not all app cores have " "activated hotplug\n" "Application coremask: 0x%x Hotplug " "coremask: 0x%x\n", (unsigned int)sys_info_ptr->core_mask, (unsigned int)ai->hotplug_activated_coremask); return; } if (mbox & 1ull) { int core = cvmx_get_core_num(); if (dbg) printf("Shutting down application .\n"); /* Call the application's own callback function */ if (ai->shutdown_callback) { ((void(*)(void*))(long)ai->shutdown_callback)(CASTPTR(void *, ai->data)); }
/** * Activate the current application core for receiving hotplug shutdown requests. * * This routine makes sure that each core belonging to the application is enabled * to receive the shutdown notification and also provides a barrier sync to make * sure that all cores are ready. */ int cvmx_app_hotplug_activate(void) { uint64_t cnt = 0; uint64_t cnt_interval = 10000000; while (!cvmx_app_hotplug_info_ptr) { cnt++; if ((cnt % cnt_interval) == 0) printf("waiting for cnt=%lld\n", (unsigned long long)cnt); } if (cvmx_app_hotplug_info_ptr->hplugged_cores & (1ull << cvmx_get_core_num())) { #ifdef DEBUG printf("core=%d : is being hotplugged \n", cvmx_get_core_num()); #endif cvmx_sysinfo_t *sys_info_ptr = cvmx_sysinfo_get(); sys_info_ptr->core_mask |= 1ull << cvmx_get_core_num(); } else { __cvmx_app_hotplug_sync(); } cvmx_spinlock_lock(&cvmx_app_hotplug_lock); if (!cvmx_app_hotplug_info_ptr) { cvmx_spinlock_unlock(&cvmx_app_hotplug_lock); printf("ERROR: This application is not registered for hotplug\n"); return -1; } /* Enable the interrupt before we mark the core as activated */ cvmx_interrupt_unmask_irq(CVMX_IRQ_MBOX0); cvmx_app_hotplug_info_ptr->hotplug_activated_coremask |= (1ull<<cvmx_get_core_num()); #ifdef DEBUG printf("cvmx_app_hotplug_activate(): coremask 0x%x valid %d sizeof %d\n", cvmx_app_hotplug_info_ptr->coremask, cvmx_app_hotplug_info_ptr->valid, sizeof(*cvmx_app_hotplug_info_ptr)); #endif cvmx_spinlock_unlock(&cvmx_app_hotplug_lock); return 0; }
int __cvmx_helper_spi_probe(int interface) { int num_ports = 0; if ((cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) && cvmx_spi4000_is_present(interface)) { num_ports = 10; } else { union cvmx_pko_reg_crc_enable enable; num_ports = 16; enable.u64 = cvmx_read_csr(CVMX_PKO_REG_CRC_ENABLE); enable.s.enable |= 0xffff << (interface * 16); cvmx_write_csr(CVMX_PKO_REG_CRC_ENABLE, enable.u64); } __cvmx_helper_setup_gmx(interface, num_ports); return num_ports; }
static cvmx_bootinfo_t * octeon_process_app_desc_ver_6(octeon_boot_descriptor_t *app_desc_ptr) { cvmx_bootinfo_t *octeon_bootinfo; /* XXX Why is 0x00000000ffffffffULL a bad value? */ if (app_desc_ptr->cvmx_desc_vaddr == 0 || app_desc_ptr->cvmx_desc_vaddr == 0xfffffffful) { cvmx_safe_printf("Bad octeon_bootinfo %#jx\n", (uintmax_t)app_desc_ptr->cvmx_desc_vaddr); return (NULL); } octeon_bootinfo = cvmx_phys_to_ptr(app_desc_ptr->cvmx_desc_vaddr); if (octeon_bootinfo->major_version != 1) { cvmx_safe_printf("Incompatible CVMX descriptor from bootloader: %d.%d %p\n", (int) octeon_bootinfo->major_version, (int) octeon_bootinfo->minor_version, octeon_bootinfo); return (NULL); } cvmx_sysinfo_minimal_initialize(octeon_bootinfo->phy_mem_desc_addr, octeon_bootinfo->board_type, octeon_bootinfo->board_rev_major, octeon_bootinfo->board_rev_minor, octeon_bootinfo->eclock_hz); memcpy(cvmx_sysinfo_get()->mac_addr_base, octeon_bootinfo->mac_addr_base, 6); cvmx_sysinfo_get()->mac_addr_count = octeon_bootinfo->mac_addr_count; cvmx_sysinfo_get()->compact_flash_common_base_addr = octeon_bootinfo->compact_flash_common_base_addr; cvmx_sysinfo_get()->compact_flash_attribute_base_addr = octeon_bootinfo->compact_flash_attribute_base_addr; cvmx_sysinfo_get()->core_mask = octeon_bootinfo->core_mask; cvmx_sysinfo_get()->led_display_base_addr = octeon_bootinfo->led_display_base_addr; memcpy(cvmx_sysinfo_get()->board_serial_number, octeon_bootinfo->board_serial_number, sizeof cvmx_sysinfo_get()->board_serial_number); return (octeon_bootinfo); }