/** * @INTERNAL * Bringup and enable a LOOP interface. After this call packet * I/O should be fully functional. This is called with IPD * enabled but PKO disabled. * * @param interface Interface to bring up * * @return Zero on success, negative on failure */ int __cvmx_helper_loop_enable(int interface) { cvmx_pip_prt_cfgx_t port_cfg; int num_ports, index; unsigned long offset; num_ports = __cvmx_helper_get_num_ipd_ports(interface); /* * We need to disable length checking so packet < 64 bytes and jumbo * frames don't get errors */ for (index = 0; index < num_ports; index++) { offset = ((octeon_has_feature(OCTEON_FEATURE_PKND)) ? cvmx_helper_get_pknd(interface, index) : cvmx_helper_get_ipd_port(interface, index)); port_cfg.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(offset)); port_cfg.s.maxerr_en = 0; port_cfg.s.minerr_en = 0; cvmx_write_csr(CVMX_PIP_PRT_CFGX(offset), port_cfg.u64); } /* * Disable FCS stripping for loopback ports */ if (!octeon_has_feature(OCTEON_FEATURE_PKND)) { cvmx_ipd_sub_port_fcs_t ipd_sub_port_fcs; ipd_sub_port_fcs.u64 = cvmx_read_csr(CVMX_IPD_SUB_PORT_FCS); ipd_sub_port_fcs.s.port_bit2 = 0; cvmx_write_csr(CVMX_IPD_SUB_PORT_FCS, ipd_sub_port_fcs.u64); } return 0; }
int do_nmi(cmd_tbl_t * cmdtp, int flag, int argc, char *const argv[]) { cvmx_coremask_t coremask = CVMX_COREMASK_EMPTY; uint64_t cores; int node; if (argc > 1) { if (cvmx_coremask_str2bmp(&coremask, argv[1])) { puts("Error: could not parse coremask string\n"); return -1; } } else { cvmx_coremask_set_self(&coremask); } if (octeon_has_feature(OCTEON_FEATURE_MULTINODE)) { for (node = CVMX_MAX_NODES - 1; node >= 0; node--) { cores = cvmx_coremask_get64_node(&coremask, node); cvmx_write_csr_node(node, CVMX_CIU3_NMI, cores); } } else { cores = cvmx_coremask_get64(&coremask); if (octeon_has_feature(OCTEON_FEATURE_CIU3)) cvmx_write_csr(CVMX_CIU3_NMI, cores); else cvmx_write_csr(CVMX_CIU_NMI, cores); } return 0; }
/** * Get clock rate based on the clock type. * * @param node - CPU node number * @param clock - Enumeration of the clock type. * @return - return the clock rate. */ uint64_t cvmx_clock_get_rate_node(int node, cvmx_clock_t clock) { const uint64_t REF_CLOCK = 50000000; #ifdef CVMX_BUILD_FOR_UBOOT uint64_t rate_eclk = 0; uint64_t rate_sclk = 0; uint64_t rate_dclk = 0; #endif if (cvmx_unlikely(!rate_eclk)) { /* Note: The order of these checks is important. ** octeon_has_feature(OCTEON_FEATURE_PCIE) is true for both 6XXX ** and 52XX/56XX, so OCTEON_FEATURE_NPEI _must_ be checked first */ if (octeon_has_feature(OCTEON_FEATURE_NPEI)) { cvmx_npei_dbg_data_t npei_dbg_data; npei_dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA); rate_eclk = REF_CLOCK * npei_dbg_data.s.c_mul; rate_sclk = rate_eclk; } else if (OCTEON_IS_OCTEON3()) { cvmx_rst_boot_t rst_boot; rst_boot.u64 = cvmx_read_csr_node(node, CVMX_RST_BOOT); rate_eclk = REF_CLOCK * rst_boot.s.c_mul; rate_sclk = REF_CLOCK * rst_boot.s.pnr_mul; } else if (octeon_has_feature(OCTEON_FEATURE_PCIE)) { cvmx_mio_rst_boot_t mio_rst_boot; mio_rst_boot.u64 = cvmx_read_csr(CVMX_MIO_RST_BOOT); rate_eclk = REF_CLOCK * mio_rst_boot.s.c_mul; rate_sclk = REF_CLOCK * mio_rst_boot.s.pnr_mul; } else { cvmx_dbg_data_t dbg_data; dbg_data.u64 = cvmx_read_csr(CVMX_DBG_DATA); rate_eclk = REF_CLOCK * dbg_data.s.c_mul; rate_sclk = rate_eclk; } } switch (clock) { case CVMX_CLOCK_SCLK: case CVMX_CLOCK_TIM: case CVMX_CLOCK_IPD: return rate_sclk; case CVMX_CLOCK_RCLK: case CVMX_CLOCK_CORE: return rate_eclk; case CVMX_CLOCK_DDR: #if !defined(CVMX_BUILD_FOR_LINUX_HOST) && !defined(CVMX_BUILD_FOR_TOOLCHAIN) if (cvmx_unlikely(!rate_dclk)) rate_dclk = cvmx_sysinfo_get()->dram_data_rate_hz; #endif return rate_dclk; } cvmx_dprintf("cvmx_clock_get_rate: Unknown clock type\n"); return 0; }
int cvmx_crypto_dormant_enable(uint64_t key) { if (octeon_has_feature(OCTEON_FEATURE_CRYPTO)) return 1; if (octeon_has_feature(OCTEON_FEATURE_DORM_CRYPTO)) { cvmx_rnm_eer_key_t v; v.s.key = key; cvmx_write_csr(CVMX_RNM_EER_KEY, v.u64); } return octeon_has_feature(OCTEON_FEATURE_CRYPTO); }
/** * Return the number of DMA engimes supported by this chip * * @return Number of DMA engines */ int cvmx_dma_engine_get_num(void) { if (octeon_has_feature(OCTEON_FEATURE_NPEI)) { if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) return 4; else return 5; } else if (octeon_has_feature(OCTEON_FEATURE_PCIE)) return 8; else return 2; }
int cvmx_apply_pko_config(void) { int i,j,n; #define PKO_CFG __cvmx_pko_config //show_pko_queue_config(); //show_pko_port_config(); for (i = 0; i < cvmx_helper_get_number_of_interfaces(); i++) { n = __cvmx_helper_early_ports_on_interface(i); for (j = 0; j < n; j++) { int port_cnt = PKO_CFG[i][j].internal_port_cnt ; int rv; int queues_cnt = PKO_CFG[i][j].queues_per_internal_port; if (queues_cnt == CVMX_HELPER_CFG_INVALID_VALUE) queues_cnt = 1; if (port_cnt == CVMX_HELPER_CFG_INVALID_VALUE) port_cnt = 1; if (!octeon_has_feature(OCTEON_FEATURE_PKND)) port_cnt = 1; if(dbg_parse) cvmx_dprintf("alloc internal ports for interface=%d" "port=%d cnt=%d\n", i, j, port_cnt); rv = cvmx_pko_alloc_iport_and_queues(i, j, port_cnt, queues_cnt); if (rv < 0) return -1; } } return 0; #undef PKO_CFG }
/** * Store the current POW internal state into the supplied * buffer. It is recommended that you pass a buffer of at least * 128KB. The format of the capture may change based on SDK * version and Octeon chip. * * @param buffer Buffer to store capture into * @param buffer_size * The size of the supplied buffer * * @return Zero on sucess, negative on failure */ int cvmx_pow_capture(void *buffer, int buffer_size) { if (octeon_has_feature(OCTEON_FEATURE_PKND)) return __cvmx_pow_capture_v2(buffer, buffer_size); else return __cvmx_pow_capture_v1(buffer, buffer_size); }
/** * Module / driver shutdown */ static void __exit octeon_tra_cleanup(void) { if (!octeon_has_feature(OCTEON_FEATURE_TRA)) return; cvmx_tra_enable(0); free_irq(OCTEON_IRQ_TRACE, octeon_tra_interrupt); }
int cvmx_pko_get_num_pko_ports(int interface, int index) { if (octeon_has_feature(OCTEON_FEATURE_PKND)) return __cvmx_helper_cfg_pko_port_num(interface, index); else return 1; }
static void octeon_irq_msi_enable(unsigned int irq) { if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) { /* * Octeon PCI doesn't have the ability to mask/unmask * MSI interrupts individually. Instead of * masking/unmasking them in groups of 16, we simple * assume MSI devices are well behaved. MSI * interrupts are always enable and the ACK is assumed * to be enough. */ } else { /* These chips have PCIe. Note that we only support * the first 64 MSI interrupts. Unfortunately all the * MSI enables are in the same register. We use * MSI0's lock to control access to them all. */ uint64_t en; unsigned long flags; spin_lock_irqsave(&octeon_irq_msi_lock, flags); en = cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0); en |= 1ull << (irq - OCTEON_IRQ_MSI_BIT0); cvmx_write_csr(CVMX_PEXP_NPEI_MSI_ENB0, en); cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0); spin_unlock_irqrestore(&octeon_irq_msi_lock, flags); } }
/** * Display a list and mark all elements on the list as belonging to * the list. * * @param entry_type Type of the list to display and mark * @param dump POW capture data * @param entry_list Array to store marks in * @param valid Set if the queue contains any elements * @param has_one Set if the queue contains exactly one element * @param head The head pointer * @param tail The tail pointer */ static void __cvmx_pow_display_list_and_walk(__cvmx_pow_list_types_t entry_type, __cvmx_pow_dump_t *dump, uint8_t entry_list[], int valid, int has_one, uint64_t head, uint64_t tail) { __cvmx_pow_display_list(__cvmx_pow_list_names[entry_type], 0, valid, has_one, head, tail); if (valid) { if (has_one) __cvmx_pow_entry_mark_list(head, entry_type, entry_list); else { while (head != tail) { if (__cvmx_pow_entry_mark_list(head, entry_type, entry_list)) break; if (octeon_has_feature(OCTEON_FEATURE_PKND)) { if (entry_type >= CVMX_POW_LIST_INPUT && entry_type < CVMX_POW_LIST_CORE) head = dump->smemload[head][4].s_smemload3_cn68xx.next_index; else head = dump->smemload[head][4].s_smemload3_cn68xx.fwd_index; } else head = dump->smemload[head][0].s_smemload0.next_index; } __cvmx_pow_entry_mark_list(tail, entry_type, entry_list); } } }
int cvmx_pko_get_base_pko_port(int interface, int index) { if (octeon_has_feature(OCTEON_FEATURE_PKND)) return __cvmx_helper_cfg_pko_port_base(interface, index); else return cvmx_helper_get_ipd_port(interface, index); }
/** * @INTERNAL * This function is called by cvmx_helper_shutdown() to extract * all FPA buffers out of the IPD and PIP. After this function * completes, all FPA buffers that were prefetched by IPD and PIP * wil be in the apropriate FPA pool. This functions does not reset * PIP or IPD as FPA pool zero must be empty before the reset can * be performed. WARNING: It is very important that IPD and PIP be * reset soon after a call to this function. */ void __cvmx_ipd_free_ptr(void) { if (octeon_has_feature(OCTEON_FEATURE_PKND)) __cvmx_ipd_free_ptr_v2(); else __cvmx_ipd_free_ptr_v1(); }
int cvmx_pko_queue_static_config(void) { if (octeon_has_feature(OCTEON_FEATURE_PKND)) { return cvmx_pko_queue_static_config_pknd(); } else { return cvmx_pko_queue_static_config_non_pknd(); } }
int cvmx_pko_get_base_queue(int port) { if (octeon_has_feature(OCTEON_FEATURE_PKND)) { return __cvmx_helper_cfg_pko_queue_base( cvmx_helper_cfg_ipd2pko_port_base(port)); } else return cvmx_pko_get_base_queue_per_core(port, 0); }
uint64_t cvmx_crypto_dormant_dbg(void) { cvmx_rnm_eer_dbg_t dbg; if (!octeon_has_feature(OCTEON_FEATURE_DORM_CRYPTO)) return 0; dbg.u64 = cvmx_read_csr(CVMX_RNM_EER_DBG); return dbg.s.dat; }
void __init octeon_setup_smp(void) { struct plat_smp_ops *ops; if (octeon_has_feature(OCTEON_FEATURE_CIU3)) ops = &octeon_78xx_smp_ops; else ops = &octeon_smp_ops; register_smp_ops(ops); }
static void octm_identify(driver_t *drv, device_t parent) { unsigned i; if (!octeon_has_feature(OCTEON_FEATURE_MGMT_PORT)) return; for (i = 0; i < CVMX_MGMT_PORT_NUM_PORTS; i++) BUS_ADD_CHILD(parent, 0, "octm", i); }
/** * Dump a POW capture to the console in a human readable format. * * @param buffer POW capture from cvmx_pow_capture() * @param buffer_size * Size of the buffer */ void cvmx_pow_display(void *buffer, int buffer_size) { printf("POW Display Start\n"); if (octeon_has_feature(OCTEON_FEATURE_PKND)) __cvmx_pow_display_v2(buffer, buffer_size); else __cvmx_pow_display_v1(buffer, buffer_size); printf("POW Display End\n"); return; }
void cvmx_coremask_print(const cvmx_coremask_t *pcm) { int i, j; int start; int found = 0; /* Print one node per line. Since the bitmap is stored LSB to MSB * we reverse the order when printing. */ if (!octeon_has_feature(OCTEON_FEATURE_MULTINODE)) { start = 0; for (j = CVMX_COREMASK_MAX_CORES_PER_NODE - CVMX_COREMASK_HLDRSZ; j >= 0; j -= CVMX_COREMASK_HLDRSZ) { if (pcm->coremask_bitmap[j / CVMX_COREMASK_HLDRSZ] != 0) start = 1; if (start) cvmx_dprintf(" 0x%llx", (unsigned long long)pcm->coremask_bitmap[j / CVMX_COREMASK_HLDRSZ]); } if (start) found = 1; /* If the coremask is empty print <EMPTY> so it is not confusing. */ if (!found) cvmx_dprintf("<EMPTY>"); cvmx_dprintf("\n"); return; } for (i = 0; i < CVMX_MAX_USED_CORES_BMP; i += CVMX_COREMASK_MAX_CORES_PER_NODE) { cvmx_dprintf("%s node %d:", i > 0 ? "\n" : "", cvmx_coremask_core_to_node(i)); start = 0; for (j = i + CVMX_COREMASK_MAX_CORES_PER_NODE - CVMX_COREMASK_HLDRSZ; j >= i; j -= CVMX_COREMASK_HLDRSZ) { /* Don't start printing until we get a non-zero word. */ if (pcm->coremask_bitmap[j / CVMX_COREMASK_HLDRSZ] != 0) start = 1; if (start) cvmx_dprintf(" 0x%llx", (unsigned long long)pcm->coremask_bitmap[j / CVMX_COREMASK_HLDRSZ]); } if (start) found = 1; } /* If the coremask is empty print <EMPTY> so it is not confusing. */ if (!found) cvmx_dprintf("<EMPTY>"); cvmx_dprintf("\n"); }
static int cvmx_get_interface_number(int type, int index) { if (type > CVMX_PARSE_INF_TYPE_CNT) { cvmx_dprintf("ERROR: interface type=%d \n",type); return -1; } if (index > CVMX_PARSE_INF_INDEX_MAX) { cvmx_dprintf("ERROR: interface index=%d \n",index); return -1; } if (octeon_has_feature(OCTEON_FEATURE_PKND)) return interface_index_pknd[type][index]; return interface_index[type][index]; }
static void octeon_irq_msi_ack(unsigned int irq) { if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) { /* These chips have PCI */ cvmx_write_csr(CVMX_NPI_NPI_MSI_RCV, 1ull << (irq - OCTEON_IRQ_MSI_BIT0)); } else { /* * These chips have PCIe. Thankfully the ACK doesn't * need any locking. */ cvmx_write_csr(CVMX_PEXP_NPEI_MSI_RCV0, 1ull << (irq - OCTEON_IRQ_MSI_BIT0)); } }
/** * Write a PCIe config space register indirectly. This is used for * registers of the form PCIEEP_CFG??? and PCIERC?_CFG???. * * @pcie_port: PCIe port to write to * @cfg_offset: Address to write * @val: Value to write */ static void cvmx_pcie_cfgx_write(int pcie_port, uint32_t cfg_offset, uint32_t val) { if (octeon_has_feature(OCTEON_FEATURE_NPEI)) { union cvmx_pescx_cfg_wr pescx_cfg_wr; pescx_cfg_wr.u64 = 0; pescx_cfg_wr.s.addr = cfg_offset; pescx_cfg_wr.s.data = val; cvmx_write_csr(CVMX_PESCX_CFG_WR(pcie_port), pescx_cfg_wr.u64); } else { union cvmx_pemx_cfg_wr pemx_cfg_wr; pemx_cfg_wr.u64 = 0; pemx_cfg_wr.s.addr = cfg_offset; pemx_cfg_wr.s.data = val; cvmx_write_csr(CVMX_PEMX_CFG_WR(pcie_port), pemx_cfg_wr.u64); } }
/** * Read a PCIe config space register indirectly. This is used for * registers of the form PCIEEP_CFG??? and PCIERC?_CFG???. * * @pcie_port: PCIe port to read from * @cfg_offset: Address to read * * Returns Value read */ static uint32_t cvmx_pcie_cfgx_read(int pcie_port, uint32_t cfg_offset) { if (octeon_has_feature(OCTEON_FEATURE_NPEI)) { union cvmx_pescx_cfg_rd pescx_cfg_rd; pescx_cfg_rd.u64 = 0; pescx_cfg_rd.s.addr = cfg_offset; cvmx_write_csr(CVMX_PESCX_CFG_RD(pcie_port), pescx_cfg_rd.u64); pescx_cfg_rd.u64 = cvmx_read_csr(CVMX_PESCX_CFG_RD(pcie_port)); return pescx_cfg_rd.s.data; } else { union cvmx_pemx_cfg_rd pemx_cfg_rd; pemx_cfg_rd.u64 = 0; pemx_cfg_rd.s.addr = cfg_offset; cvmx_write_csr(CVMX_PEMX_CFG_RD(pcie_port), pemx_cfg_rd.u64); pemx_cfg_rd.u64 = cvmx_read_csr(CVMX_PEMX_CFG_RD(pcie_port)); return pemx_cfg_rd.s.data; } }
int __octeon_spi_board_initialize(void) { int i; int nodeoffset, next_nodeoffset; const void *nodep; u32 *pgpio_handle; int gpio_offset; int phandle; int max_spi = 2; u32 tag; int level = 0; debug("%s: Entry\n", __func__); if (!octeon_has_feature(OCTEON_FEATURE_SPI)) return -1; nodeoffset = fdt_path_offset(gd->fdt_blob, "/soc/spi"); if (nodeoffset < 0) { debug("SPI interface not found in device tree\n"); return -1; } if (fdt_node_check_compatible(gd->fdt_blob, nodeoffset, "cavium,octeon-3010-spi")) { puts("Incompatible SPI interface type\n"); return -1; } while ((next_nodeoffset = fdt_next_node(gd->fdt_blob, nodeoffset, &level)) > 0) { if (level < 0) break; if (level > 1) continue; if (fdt_node_check_compatible(gd->fdt_blob, next_nodeoffset, "spi-flash")) { } } }
static void octeon_irq_msi_disable(unsigned int irq) { if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) { /* See comment in enable */ } else { /* * These chips have PCIe. Note that we only support * the first 64 MSI interrupts. Unfortunately all the * MSI enables are in the same register. We use * MSI0's lock to control access to them all. */ uint64_t en; unsigned long flags; spin_lock_irqsave(&octeon_irq_msi_lock, flags); en = cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0); en &= ~(1ull << (irq - OCTEON_IRQ_MSI_BIT0)); cvmx_write_csr(CVMX_PEXP_NPEI_MSI_ENB0, en); cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0); spin_unlock_irqrestore(&octeon_irq_msi_lock, flags); } }
/** * @INTERNAL * Probe a SRIO interface and determine the number of ports * connected to it. The SRIO interface should still be down * after this call. * * @param interface Interface to probe * * @return Number of ports on the interface. Zero to disable. */ int __cvmx_helper_srio_probe(int interface) { cvmx_sriox_status_reg_t srio0_status_reg; cvmx_sriox_status_reg_t srio1_status_reg; if (!octeon_has_feature(OCTEON_FEATURE_SRIO)) return 0; /* Read MIO_QLMX_CFG CSRs to find SRIO status. */ if (OCTEON_IS_MODEL(OCTEON_CN66XX)) { int status = cvmx_qlm_get_status(0); int srio_port = interface - 4; switch(srio_port) { case 0: /* 1x4 lane */ if (status == 4) return 2; break; case 2: /* 2x2 lane */ if (status == 5) return 2; break; case 1: /* 4x1 long/short */ case 3: /* 4x1 long/short */ if (status == 6) return 2; break; } return 0; } srio0_status_reg.u64 = cvmx_read_csr(CVMX_SRIOX_STATUS_REG(0)); srio1_status_reg.u64 = cvmx_read_csr(CVMX_SRIOX_STATUS_REG(1)); if (srio0_status_reg.s.srio || srio1_status_reg.s.srio) return 2; else return 0; }
int cvm_oct_common_open(struct net_device *dev, void (*link_poll)(struct net_device *)) { union cvmx_gmxx_prtx_cfg gmx_cfg; struct octeon_ethernet *priv = netdev_priv(dev); int interface = INTERFACE(priv->port); int index = INDEX(priv->port); cvmx_helper_link_info_t link_info; int rv; rv = cvm_oct_phy_setup_device(dev); if (rv) return rv; gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); gmx_cfg.s.en = 1; if (octeon_has_feature(OCTEON_FEATURE_PKND)) gmx_cfg.s.pknd = priv->port; cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64); if (octeon_is_simulation()) return 0; if (dev->phydev) { int r = phy_read_status(dev->phydev); if (r == 0 && dev->phydev->link == 0) netif_carrier_off(dev); cvm_oct_adjust_link(dev); } else { link_info = cvmx_helper_link_get(priv->port); if (!link_info.s.link_up) netif_carrier_off(dev); priv->poll = link_poll; link_poll(dev); } return 0; }
static void octusb_octeon_identify(driver_t *drv, device_t parent) { if (octeon_has_feature(OCTEON_FEATURE_USB)) BUS_ADD_CHILD(parent, 0, "octusb", 0); }
void platform_start(__register_t a0, __register_t a1, __register_t a2 __unused, __register_t a3) { const struct octeon_feature_description *ofd; uint64_t platform_counter_freq; int rv; mips_postboot_fixup(); /* * Initialize boot parameters so that we can determine things like * which console we shoud use, etc. */ octeon_boot_params_init(a3); /* Initialize pcpu stuff */ mips_pcpu0_init(); mips_timer_early_init(cvmx_sysinfo_get()->cpu_clock_hz); /* Initialize console. */ cninit(); /* * Display information about the CPU. */ #if !defined(OCTEON_MODEL) printf("Using runtime CPU model checks.\n"); #else printf("Compiled for CPU model: " __XSTRING(OCTEON_MODEL) "\n"); #endif strcpy(cpu_model, octeon_model_get_string(cvmx_get_proc_id())); printf("CPU Model: %s\n", cpu_model); printf("CPU clock: %uMHz Core Mask: %#x\n", cvmx_sysinfo_get()->cpu_clock_hz / 1000000, cvmx_sysinfo_get()->core_mask); rv = octeon_model_version_check(cvmx_get_proc_id()); if (rv == -1) panic("%s: kernel not compatible with this processor.", __func__); /* * Display information about the board. */ #if defined(OCTEON_BOARD_CAPK_0100ND) strcpy(cpu_board, "CAPK-0100ND"); if (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_CN3010_EVB_HS5) { panic("Compiled for %s, but board type is %s.", cpu_board, cvmx_board_type_to_string(cvmx_sysinfo_get()->board_type)); } #else strcpy(cpu_board, cvmx_board_type_to_string(cvmx_sysinfo_get()->board_type)); #endif printf("Board: %s\n", cpu_board); printf("Board Type: %u Revision: %u/%u\n", cvmx_sysinfo_get()->board_type, cvmx_sysinfo_get()->board_rev_major, cvmx_sysinfo_get()->board_rev_minor); printf("Serial number: %s\n", cvmx_sysinfo_get()->board_serial_number); /* * Additional on-chip hardware/settings. * * XXX Display PCI host/target? What else? */ printf("MAC address base: %6D (%u configured)\n", cvmx_sysinfo_get()->mac_addr_base, ":", cvmx_sysinfo_get()->mac_addr_count); octeon_ciu_reset(); /* * Convert U-Boot 'bootoctlinux' loader command line arguments into * boot flags and kernel environment variables. */ bootverbose = 1; octeon_init_kenv(a3); /* * For some reason on the cn38xx simulator ebase register is set to * 0x80001000 at bootup time. Move it back to the default, but * when we move to having support for multiple executives, we need * to rethink this. */ mips_wr_ebase(0x80000000); octeon_memory_init(); init_param1(); init_param2(physmem); mips_cpu_init(); pmap_bootstrap(); mips_proc0_init(); mutex_init(); kdb_init(); #ifdef KDB if (boothowto & RB_KDB) kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger"); #endif cpu_clock = cvmx_sysinfo_get()->cpu_clock_hz; platform_counter_freq = cpu_clock; octeon_timecounter.tc_frequency = cpu_clock; platform_timecounter = &octeon_timecounter; mips_timer_init_params(platform_counter_freq, 0); set_cputicker(octeon_get_ticks, cpu_clock, 0); #ifdef SMP /* * Clear any pending IPIs. */ cvmx_write_csr(CVMX_CIU_MBOX_CLRX(0), 0xffffffff); #endif printf("Octeon SDK: %s\n", OCTEON_SDK_VERSION_STRING); printf("Available Octeon features:"); for (ofd = octeon_feature_descriptions; ofd->ofd_string != NULL; ofd++) if (octeon_has_feature(ofd->ofd_feature)) printf(" %s", ofd->ofd_string); printf("\n"); }