/** * Shutdown the ZIP block for a queue. ZIP must be idle when * this function is called. * * @param queue Zip instruction queue of the command * * @return Zero on success, negative on failure */ int cvmx_zip_queue_shutdown(int queue) { cvmx_zip_cmd_ctl_t zip_cmd_ctl; if (cvmx_cmd_queue_length(CVMX_CMD_QUEUE_ZIP_QUE(queue))) { cvmx_dprintf("ERROR: cvmx_zip_shutdown: ZIP not idle.\n"); return -1; } zip_cmd_ctl.u64 = cvmx_read_csr(CVMX_ZIP_CMD_CTL); zip_cmd_ctl.s.reset = 1; cvmx_write_csr(CVMX_ZIP_CMD_CTL, zip_cmd_ctl.u64); cvmx_wait(100); cvmx_cmd_queue_shutdown(CVMX_CMD_QUEUE_ZIP_QUE(queue)); return 0; }
/** * Shutdown the RAID block. RAID must be idle when * this function is called. * * @return Zero on success, negative on failure */ int cvmx_raid_shutdown(void) { cvmx_rad_reg_ctl_t rad_reg_ctl; if (cvmx_cmd_queue_length(CVMX_CMD_QUEUE_RAID)) { cvmx_dprintf("ERROR: cvmx_raid_shutdown: RAID not idle.\n"); return -1; } rad_reg_ctl.u64 = cvmx_read_csr(CVMX_RAD_REG_CTL); rad_reg_ctl.s.reset = 1; cvmx_write_csr(CVMX_RAD_REG_CTL, rad_reg_ctl.u64); cvmx_wait(100); cvmx_cmd_queue_shutdown(CVMX_CMD_QUEUE_RAID); cvmx_write_csr(CVMX_RAD_REG_CMD_BUF, 0); return 0; }
int cvmx_l2c_set_hw_way_partition(uint32_t mask) { uint32_t valid_mask; valid_mask = (0x1 << cvmx_l2c_get_num_assoc()) - 1; mask &= valid_mask; /* A UMSK setting which blocks all L2C Ways is an error on some chips */ if (mask == valid_mask && !OCTEON_IS_MODEL(OCTEON_CN63XX)) return -1; if (OCTEON_IS_MODEL(OCTEON_CN63XX)) cvmx_write_csr(CVMX_L2C_WPAR_IOBX(0), mask); else cvmx_write_csr(CVMX_L2C_SPAR4, (cvmx_read_csr(CVMX_L2C_SPAR4) & ~0xFF) | mask); return 0; }
int __cvmx_helper_spi_probe(int interface) { int num_ports = 0; if ((cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) && cvmx_spi4000_is_present(interface)) { num_ports = 10; } else { union cvmx_pko_reg_crc_enable enable; num_ports = 16; enable.u64 = cvmx_read_csr(CVMX_PKO_REG_CRC_ENABLE); enable.s.enable |= 0xffff << (interface * 16); cvmx_write_csr(CVMX_PKO_REG_CRC_ENABLE, enable.u64); } __cvmx_helper_setup_gmx(interface, num_ports); return num_ports; }
/** * Enables the packet output hardware. It must already be * configured. */ void cvmx_pko_enable(void) { union cvmx_pko_reg_flags flags; flags.u64 = cvmx_read_csr(CVMX_PKO_REG_FLAGS); if (flags.s.ena_pko) cvmx_dprintf ("Warning: Enabling PKO when PKO already enabled.\n"); flags.s.ena_dwb = 1; flags.s.ena_pko = 1; /* * always enable big endian for 3-word command. Does nothing * for 2-word. */ flags.s.store_be = 1; cvmx_write_csr(CVMX_PKO_REG_FLAGS, flags.u64); }
/** * Return the link state of an IPD/PKO port as returned by * auto negotiation. The result of this function may not match * Octeon's link config if auto negotiation has changed since * the last call to cvmx_helper_link_set(). * * @ipd_port: IPD/PKO port to query * * Returns Link state */ cvmx_helper_link_info_t __cvmx_helper_rgmii_link_get(int ipd_port) { int interface = cvmx_helper_get_interface_num(ipd_port); int index = cvmx_helper_get_interface_index_num(ipd_port); union cvmx_asxx_prt_loop asxx_prt_loop; asxx_prt_loop.u64 = cvmx_read_csr(CVMX_ASXX_PRT_LOOP(interface)); if (asxx_prt_loop.s.int_loop & (1 << index)) { /* Force 1Gbps full duplex on internal loopback */ cvmx_helper_link_info_t result; result.u64 = 0; result.s.full_duplex = 1; result.s.link_up = 1; result.s.speed = 1000; return result; } else return __cvmx_helper_board_link_get(ipd_port); }
void cvmx_ilk_runtime_status (int interface) { cvmx_ilk_txx_cfg1_t ilk_txx_cfg1; cvmx_ilk_txx_flow_ctl0_t ilk_txx_flow_ctl0; cvmx_ilk_rxx_cfg1_t ilk_rxx_cfg1; cvmx_ilk_rxx_int_t ilk_rxx_int; cvmx_ilk_rxx_flow_ctl0_t ilk_rxx_flow_ctl0; cvmx_ilk_rxx_flow_ctl1_t ilk_rxx_flow_ctl1; cvmx_ilk_gbl_int_t ilk_gbl_int; cvmx_dprintf ("\nilk run-time status: interface: %d\n", interface); ilk_txx_cfg1.u64 = cvmx_read_csr (CVMX_ILK_TXX_CFG1(interface)); cvmx_dprintf ("\nilk txx cfg1: 0x%16lx\n", ilk_txx_cfg1.u64); if (ilk_txx_cfg1.s.rx_link_fc) cvmx_dprintf ("link flow control received\n"); if (ilk_txx_cfg1.s.tx_link_fc) cvmx_dprintf ("link flow control sent\n"); ilk_txx_flow_ctl0.u64 = cvmx_read_csr (CVMX_ILK_TXX_FLOW_CTL0(interface)); cvmx_dprintf ("\nilk txx flow ctl0: 0x%16lx\n", ilk_txx_flow_ctl0.u64); ilk_rxx_cfg1.u64 = cvmx_read_csr (CVMX_ILK_RXX_CFG1(interface)); cvmx_dprintf ("\nilk rxx cfg1: 0x%16lx\n", ilk_rxx_cfg1.u64); cvmx_dprintf ("rx fifo count: %d\n", ilk_rxx_cfg1.s.rx_fifo_cnt); ilk_rxx_int.u64 = cvmx_read_csr (CVMX_ILK_RXX_INT(interface)); cvmx_dprintf ("\nilk rxx int: 0x%16lx\n", ilk_rxx_int.u64); if (ilk_rxx_int.s.pkt_drop_rxf) cvmx_dprintf ("rx fifo packet drop\n"); if (ilk_rxx_int.u64) cvmx_write_csr (CVMX_ILK_RXX_INT(interface), ilk_rxx_int.u64); ilk_rxx_flow_ctl0.u64 = cvmx_read_csr (CVMX_ILK_RXX_FLOW_CTL0(interface)); cvmx_dprintf ("\nilk rxx flow ctl0: 0x%16lx\n", ilk_rxx_flow_ctl0.u64); ilk_rxx_flow_ctl1.u64 = cvmx_read_csr (CVMX_ILK_RXX_FLOW_CTL1(interface)); cvmx_dprintf ("\nilk rxx flow ctl1: 0x%16lx\n", ilk_rxx_flow_ctl1.u64); ilk_gbl_int.u64 = cvmx_read_csr (CVMX_ILK_GBL_INT); cvmx_dprintf ("\nilk gbl int: 0x%16lx\n", ilk_gbl_int.u64); if (ilk_gbl_int.s.rxf_push_full) cvmx_dprintf ("rx fifo overflow\n"); if (ilk_gbl_int.u64) cvmx_write_csr (CVMX_ILK_GBL_INT, ilk_gbl_int.u64); }
int cvm_oct_xaui_open(struct net_device *dev) { union cvmx_gmxx_prtx_cfg gmx_cfg; struct octeon_ethernet *priv = netdev_priv(dev); int interface = INTERFACE(priv->port); int index = INDEX(priv->port); cvmx_helper_link_info_t link_info; gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); gmx_cfg.s.en = 1; cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64); if (!octeon_is_simulation()) { link_info = cvmx_helper_link_get(priv->port); if (!link_info.s.link_up) netif_carrier_off(dev); } return 0; }
static int ciu_en1_intr_bind(void *arg, u_char target) { uint64_t mask; int core; int irq; irq = (uintptr_t)arg; CPU_FOREACH(core) { mask = cvmx_read_csr(CVMX_CIU_INTX_EN1(core*2)); if (core == target) mask |= 1ull << (irq - CIU_IRQ_EN1_BEGIN); else mask &= ~(1ull << (irq - CIU_IRQ_EN1_BEGIN)); cvmx_write_csr(CVMX_CIU_INTX_EN1(core*2), mask); } return (0); }
/** * @INTERNAL * Probe a SPI interface and determine the number of ports * connected to it. The SPI interface should still be down after * this call. * * @param interface Interface to probe * * @return Number of ports on the interface. Zero to disable. */ int __cvmx_helper_spi_probe(int interface) { int num_ports = __cvmx_helper_spi_enumerate(interface); if (num_ports == 16) { cvmx_pko_reg_crc_enable_t enable; /* * Unlike the SPI4000, most SPI devices don't * automatically put on the L2 CRC. For everything * except for the SPI4000 have PKO append the L2 CRC * to the packet */ enable.u64 = cvmx_read_csr(CVMX_PKO_REG_CRC_ENABLE); enable.s.enable |= 0xffff << (interface*16); cvmx_write_csr(CVMX_PKO_REG_CRC_ENABLE, enable.u64); } __cvmx_helper_setup_gmx(interface, num_ports); return num_ports; }
/** * @INTERNAL * Decrement the MPLL Multiplier for the DLM as per Errata G-20669 * * @param qlm DLM to configure * @param baud_mhz Speed of the DLM configured at * @param old_multiplier MPLL_MULTIPLIER value to decrement */ void __cvmx_qlm_set_mult(int qlm, int baud_mhz, int old_multiplier) { cvmx_gserx_dlmx_mpll_multiplier_t mpll_multiplier; uint64_t meas_refclock, mult; if (!OCTEON_IS_MODEL(OCTEON_CN70XX)) return; if (qlm == -1) return; meas_refclock = cvmx_qlm_measure_clock(qlm); if (meas_refclock == 0) { cvmx_warn("DLM%d: Reference clock not running\n", qlm); return; } mult = (uint64_t)baud_mhz * 1000000 + (meas_refclock/2); mult /= meas_refclock; #ifdef CVMX_BUILD_FOR_UBOOT /* For simulator just write the multiplier directly, to make it faster to boot. */ if (gd->arch.board_desc.board_type == CVMX_BOARD_TYPE_SIM) { cvmx_write_csr(CVMX_GSERX_DLMX_MPLL_MULTIPLIER(qlm, 0), mult); return; } #endif /* 6. Decrease MPLL_MULTIPLIER by one continually until it reaches the desired long-term setting, ensuring that each MPLL_MULTIPLIER value is constant for at least 1 msec before changing to the next value. The desired long-term setting is as indicated in HRM tables 21-1, 21-2, and 21-3. This is not required with the HRM sequence. */ do { mpll_multiplier.u64 = cvmx_read_csr(CVMX_GSERX_DLMX_MPLL_MULTIPLIER(qlm, 0)); mpll_multiplier.s.mpll_multiplier = --old_multiplier; cvmx_write_csr(CVMX_GSERX_DLMX_MPLL_MULTIPLIER(qlm, 0), mpll_multiplier.u64); /* Wait for 1 ms */ cvmx_wait_usec(1000); } while (old_multiplier > (int)mult); }
/** * cvm_oct_common_set_mac_address - set the hardware MAC address for a device * @dev: The device in question. * @addr: Address structure to change it too. * Returns Zero on success */ static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr) { struct octeon_ethernet *priv = netdev_priv(dev); union cvmx_gmxx_prtx_cfg gmx_cfg; int interface = INTERFACE(priv->port); int index = INDEX(priv->port); memcpy(dev->dev_addr, addr + 2, 6); if ((interface < 2) && (cvmx_helper_interface_get_mode(interface) != CVMX_HELPER_INTERFACE_MODE_SPI)) { int i; uint8_t *ptr = addr; uint64_t mac = 0; for (i = 0; i < 6; i++) mac = (mac << 8) | (uint64_t) (ptr[i + 2]); gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64 & ~1ull); cvmx_write_csr(CVMX_GMXX_SMACX(index, interface), mac); cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM0(index, interface), ptr[2]); cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM1(index, interface), ptr[3]); cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM2(index, interface), ptr[4]); cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM3(index, interface), ptr[5]); cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM4(index, interface), ptr[6]); cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM5(index, interface), ptr[7]); cvm_oct_common_set_multicast_list(dev); cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64); } return 0; }
static void ehci_octeon_start(struct device *dev) { union cvmx_uctlx_ehci_ctl ehci_ctl; octeon2_usb_clocks_start(dev); ehci_ctl.u64 = cvmx_read_csr(CVMX_UCTLX_EHCI_CTL(0)); /* Use 64-bit addressing. */ ehci_ctl.s.ehci_64b_addr_en = 1; ehci_ctl.s.l2c_addr_msb = 0; #ifdef __BIG_ENDIAN ehci_ctl.s.l2c_buff_emod = 1; /* Byte swapped. */ ehci_ctl.s.l2c_desc_emod = 1; /* Byte swapped. */ #else ehci_ctl.s.l2c_buff_emod = 0; /* not swapped. */ ehci_ctl.s.l2c_desc_emod = 0; /* not swapped. */ ehci_ctl.s.inv_reg_a2 = 1; #endif cvmx_write_csr(CVMX_UCTLX_EHCI_CTL(0), ehci_ctl.u64); }
/** * Initialize the ZIP block * * @return Zero on success, negative on failure */ int cvmx_zip_initialize(void) { cvmx_zip_cmd_buf_t zip_cmd_buf; cvmx_cmd_queue_result_t result; result = cvmx_cmd_queue_initialize(CVMX_CMD_QUEUE_ZIP, 0, CVMX_FPA_OUTPUT_BUFFER_POOL, CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE); if (result != CVMX_CMD_QUEUE_SUCCESS) return -1; zip_cmd_buf.u64 = 0; zip_cmd_buf.s.dwb = CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE/128; zip_cmd_buf.s.pool = CVMX_FPA_OUTPUT_BUFFER_POOL; zip_cmd_buf.s.size = CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE/8; zip_cmd_buf.s.ptr = cvmx_ptr_to_phys(cvmx_cmd_queue_buffer(CVMX_CMD_QUEUE_ZIP))>>7; cvmx_write_csr(CVMX_ZIP_CMD_BUF, zip_cmd_buf.u64); cvmx_write_csr(CVMX_ZIP_ERROR, 1); cvmx_read_csr(CVMX_ZIP_CMD_BUF); /* Read to make sure setup is complete */ return 0; }
/** * @INTERNAL * Bringup and enable a RGMII interface. After this call packet * I/O should be fully functional. This is called with IPD * enabled but PKO disabled. * * @param interface Interface to bring up * * @return Zero on success, negative on failure */ int __cvmx_helper_agl_enable(int interface) { int port = cvmx_helper_agl_get_port(interface); int ipd_port = cvmx_helper_get_ipd_port(interface, port); union cvmx_pko_mem_port_ptrs pko_mem_port_ptrs; union cvmx_pko_reg_read_idx read_idx; int do_link_set = 1; int i; /* Setup PKO for AGL interface. Back pressure is not supported. */ pko_mem_port_ptrs.u64 = 0; read_idx.u64 = 0; read_idx.s.inc = 1; cvmx_write_csr(CVMX_PKO_REG_READ_IDX, read_idx.u64); for (i = 0 ; i < 40; i++) { pko_mem_port_ptrs.u64 = cvmx_read_csr(CVMX_PKO_MEM_PORT_PTRS); if (pko_mem_port_ptrs.s.pid == 24) { pko_mem_port_ptrs.s.eid = 10; pko_mem_port_ptrs.s.bp_port = 40; cvmx_write_csr(CVMX_PKO_MEM_PORT_PTRS, pko_mem_port_ptrs.u64); break; } } cvmx_agl_enable(port); #ifdef CVMX_BUILD_FOR_LINUX_KERNEL /* * Linux kernel driver will call ....link_set with the * proper link state. In the simulator there is no * link state polling and hence it is set from * here. */ if (!(cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM)) do_link_set = 0; #endif if (do_link_set) cvmx_agl_link_set(port, cvmx_agl_link_get(ipd_port), 1); return 0; }
int cvm_oct_rgmii_init(struct net_device *dev) { struct octeon_ethernet *priv = netdev_priv(dev); int r; cvm_oct_common_init(dev); dev->netdev_ops->ndo_stop(dev); if (number_rgmii_ports == 0) { r = request_irq(OCTEON_IRQ_RML, cvm_oct_rgmii_rml_interrupt, IRQF_SHARED, "RGMII", &number_rgmii_ports); } number_rgmii_ports++; if (((priv->imode == CVMX_HELPER_INTERFACE_MODE_GMII) && (priv->port == 0)) || (priv->imode == CVMX_HELPER_INTERFACE_MODE_RGMII)) { if (!octeon_is_simulation()) { union cvmx_gmxx_rxx_int_en gmx_rx_int_en; int interface = INTERFACE(priv->port); int index = INDEX(priv->port); gmx_rx_int_en.u64 = cvmx_read_csr(CVMX_GMXX_RXX_INT_EN (index, interface)); gmx_rx_int_en.s.phy_dupx = 1; gmx_rx_int_en.s.phy_link = 1; gmx_rx_int_en.s.phy_spd = 1; cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(index, interface), gmx_rx_int_en.u64); priv->poll = cvm_oct_rgmii_poll; } } return 0; }
static int octeon_gpio_filter(void *arg) { cvmx_gpio_bit_cfgx_t gpio_cfgx; void **cookie = arg; struct octeon_gpio_softc *sc = *cookie; long int irq = (cookie - sc->gpio_intr_cookies); if ((irq < 0) || (irq >= OCTEON_GPIO_IRQS)) return (FILTER_STRAY); gpio_cfgx.u64 = cvmx_read_csr(CVMX_GPIO_BIT_CFGX(irq)); /* Clear rising edge detector */ if (gpio_cfgx.s.int_type == OCTEON_GPIO_IRQ_EDGE) cvmx_gpio_interrupt_clear(1 << irq); /* disable interrupt */ gpio_cfgx.s.int_en = 0; cvmx_write_csr(CVMX_GPIO_BIT_CFGX(irq), gpio_cfgx.u64); return (FILTER_SCHEDULE_THREAD); }
static ktime_t ptp_to_ktime(u64 ptptime) { ktime_t ktimebase; u64 ptpbase; unsigned long flags; local_irq_save(flags); /* Fill the icache with the code */ ktime_get_real(); /* Flush all pending operations */ mb(); /* Read the time and PTP clock as close together as * possible. It is important that this sequence take the same * amount of time to reduce jitter */ ktimebase = ktime_get_real(); ptpbase = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_HI); local_irq_restore(flags); return ktime_sub_ns(ktimebase, ptpbase - ptptime); }
/** * Perform an MII write. Called by the generic MII routines * * @param dev Device to perform write for * @param phy_id The MII phy id * @param location Register location to write * @param val Value to write */ void cvm_oct_mdio_write(struct ifnet *ifp, int phy_id, int location, int val) { cvmx_smi_cmd_t smi_cmd; cvmx_smi_wr_dat_t smi_wr; MDIO_LOCK(); smi_wr.u64 = 0; smi_wr.s.dat = val; cvmx_write_csr(CVMX_SMI_WR_DAT, smi_wr.u64); smi_cmd.u64 = 0; smi_cmd.s.phy_op = 0; smi_cmd.s.phy_adr = phy_id; smi_cmd.s.reg_adr = location; cvmx_write_csr(CVMX_SMI_CMD, smi_cmd.u64); do { smi_wr.u64 = cvmx_read_csr(CVMX_SMI_WR_DAT); } while (smi_wr.s.pending); MDIO_UNLOCK(); }
/** * Enable ASX error interrupts that exist on CN3XXX, CN50XX, and * CN58XX. * * @block: Interface to enable 0-1 */ void __cvmx_interrupt_asxx_enable(int block) { int mask; union cvmx_asxx_int_en csr; /* * CN38XX and CN58XX have two interfaces with 4 ports per * interface. All other chips have a max of 3 ports on * interface 0 */ if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)) mask = 0xf; /* Set enables for 4 ports */ else mask = 0x7; /* Set enables for 3 ports */ /* Enable interface interrupts */ csr.u64 = cvmx_read_csr(CVMX_ASXX_INT_EN(block)); csr.s.txpsh = mask; csr.s.txpop = mask; csr.s.ovrflw = mask; cvmx_write_csr(CVMX_ASXX_INT_EN(block), csr.u64); }
static irqreturn_t mailbox_interrupt(int irq, void *dev_id) { const int coreid = cvmx_get_core_num(); uint64_t action; /* Load the mailbox register to figure out what we're supposed to do */ action = cvmx_read_csr(CVMX_CIU_MBOX_CLRX(coreid)) & 0xffff; /* Clear the mailbox to clear the interrupt */ cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), action); if (action & SMP_CALL_FUNCTION) smp_call_function_interrupt(); if (action & SMP_RESCHEDULE_YOURSELF) scheduler_ipi(); /* Check if we've been told to flush the icache */ if (action & SMP_ICACHE_FLUSH) asm volatile ("synci 0($0)\n"); return IRQ_HANDLED; }
/** * Module/ driver initialization. * * Returns Zero on success */ static int __init flash_init(void) { /* * Read the bootbus region 0 setup to determine the base * address of the flash. */ union cvmx_mio_boot_reg_cfgx region_cfg; region_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(0)); if (region_cfg.s.en) { /* * The bootloader always takes the flash and sets its * address so the entire flash fits below * 0x1fc00000. This way the flash aliases to * 0x1fc00000 for booting. Software can access the * full flash at the true address, while core boot can * access 4MB. */ /* Use this name so old part lines work */ flash_map.name = "phys_mapped_flash"; flash_map.phys = region_cfg.s.base << 16; flash_map.size = 0x1fc00000 - flash_map.phys; /* 8-bit bus (0 + 1) or 16-bit bus (1 + 1) */ flash_map.bankwidth = region_cfg.s.width + 1; flash_map.virt = ioremap(flash_map.phys, flash_map.size); pr_notice("Bootbus flash: Setting flash for %luMB flash at " "0x%08llx\n", flash_map.size >> 20, flash_map.phys); // simple_map_init(&flash_map); flash_map.read = octeon_flash_map_read; flash_map.write = octeon_flash_map_write; flash_map.copy_from = octeon_flash_map_copy_from; flash_map.copy_to = octeon_flash_map_copy_to; mymtd = do_map_probe("cfi_probe", &flash_map); if (mymtd) { mymtd->owner = THIS_MODULE; mtd_device_parse_register(mymtd, part_probe_types, NULL, NULL, 0); } else { pr_err("Failed to register MTD device for flash\n"); } }
static void octeon_mgmt_adjust_link(struct net_device *netdev) { struct octeon_mgmt *p = netdev_priv(netdev); int port = p->port; union cvmx_agl_gmx_prtx_cfg prtx_cfg; unsigned long flags; int link_changed = 0; spin_lock_irqsave(&p->lock, flags); if (p->phydev->link) { if (!p->last_link) link_changed = 1; if (p->last_duplex != p->phydev->duplex) { p->last_duplex = p->phydev->duplex; prtx_cfg.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port)); prtx_cfg.s.duplex = p->phydev->duplex; cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), prtx_cfg.u64); } } else { if (p->last_link) link_changed = -1; } p->last_link = p->phydev->link; spin_unlock_irqrestore(&p->lock, flags); if (link_changed != 0) { if (link_changed > 0) { netif_carrier_on(netdev); pr_info("%s: Link is up - %d/%s\n", netdev->name, p->phydev->speed, DUPLEX_FULL == p->phydev->duplex ? "Full" : "Half"); } else { netif_carrier_off(netdev); pr_info("%s: Link is down\n", netdev->name); } } }
void cvm_oct_rgmii_uninit(struct net_device *dev) { struct octeon_ethernet *priv = netdev_priv(dev); cvm_oct_common_uninit(dev); /* * Only true RGMII ports need to be polled. In GMII mode, port * 0 is really a RGMII port. */ if (((priv->imode == CVMX_HELPER_INTERFACE_MODE_GMII) && (priv->port == 0)) || (priv->imode == CVMX_HELPER_INTERFACE_MODE_RGMII)) { if (!octeon_is_simulation()) { union cvmx_gmxx_rxx_int_en gmx_rx_int_en; int interface = INTERFACE(priv->port); int index = INDEX(priv->port); /* * Disable interrupts on inband status changes * for this port. */ gmx_rx_int_en.u64 = cvmx_read_csr(CVMX_GMXX_RXX_INT_EN (index, interface)); gmx_rx_int_en.s.phy_dupx = 0; gmx_rx_int_en.s.phy_link = 0; gmx_rx_int_en.s.phy_spd = 0; cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(index, interface), gmx_rx_int_en.u64); } } /* Remove the interrupt handler when the last port is removed. */ number_rgmii_ports--; if (number_rgmii_ports == 0) free_irq(OCTEON_IRQ_RML, &number_rgmii_ports); cancel_work_sync(&priv->port_work); }
static irqreturn_t mailbox_interrupt(int irq, void *dev_id) { u64 mbox_clrx = CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()); u64 action; int i; /* * Make sure the function array initialization remains * correct. */ BUILD_BUG_ON(SMP_RESCHEDULE_YOURSELF != (1 << 0)); BUILD_BUG_ON(SMP_CALL_FUNCTION != (1 << 1)); BUILD_BUG_ON(SMP_ICACHE_FLUSH != (1 << 2)); /* * Load the mailbox register to figure out what we're supposed * to do. */ action = cvmx_read_csr(mbox_clrx); if (OCTEON_IS_MODEL(OCTEON_CN68XX)) action &= 0xff; else action &= 0xffff; /* Clear the mailbox to clear the interrupt */ cvmx_write_csr(mbox_clrx, action); for (i = 0; i < ARRAY_SIZE(octeon_message_functions) && action;) { if (action & 1) { void (*fn)(void) = octeon_message_functions[i]; if (fn) fn(); } action >>= 1; i++; } return IRQ_HANDLED; }
/** * @INTERNAL * Probe RGMII ports and determine the number present * * @param interface Interface to probe * * @return Number of RGMII/GMII/MII ports (0-4). */ int __cvmx_helper_rgmii_probe(int interface) { int num_ports = 0; cvmx_gmxx_inf_mode_t mode; mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface)); if (mode.s.type) { if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)) { cvmx_dprintf("ERROR: RGMII initialize called in SPI interface\n"); } else if (OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN30XX) || OCTEON_IS_MODEL(OCTEON_CN50XX)) { /* On these chips "type" says we're in GMII/MII mode. This limits us to 2 ports */ num_ports = 2; } else { cvmx_dprintf("ERROR: Unsupported Octeon model in %s\n", __FUNCTION__); } } else { if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)) { num_ports = 4; } else if (OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN30XX) || OCTEON_IS_MODEL(OCTEON_CN50XX)) { num_ports = 3; } else { cvmx_dprintf("ERROR: Unsupported Octeon model in %s\n", __FUNCTION__); } } return num_ports; }
static int cvm_oct_mdio_read(struct net_device *dev, int phy_id, int location) { union cvmx_smix_cmd smi_cmd; union cvmx_smix_rd_dat smi_rd; smi_cmd.u64 = 0; smi_cmd.s.phy_op = 1; smi_cmd.s.phy_adr = phy_id; smi_cmd.s.reg_adr = location; cvmx_write_csr(CVMX_SMIX_CMD(0), smi_cmd.u64); do { if (!in_interrupt()) yield(); smi_rd.u64 = cvmx_read_csr(CVMX_SMIX_RD_DAT(0)); } while (smi_rd.s.pending); if (smi_rd.s.val) return smi_rd.s.dat; else return 0; }
/** * This function checks to see if the software is compatible with the * chip it is running on. This is called in the application startup code * and does not need to be called directly by the application. * Does not return if software is incompatible, unless compiled for the * FreeBSD kernel, in which case it returns -1. * * @param chip_id chip id that the software is being run on. * * @return 0: runtime checking or exact version match * 1: chip is newer revision than compiled for, but software will run properly. * -1: software is incompatible */ int octeon_model_version_check(uint32_t chip_id __attribute__ ((unused))) { //printf("Model Number: %s\n", octeon_model_get_string(chip_id)); #if !OCTEON_IS_COMMON_BINARY() /* Check for special case of mismarked 3005 samples, and adjust cpuid */ if (chip_id == OCTEON_CN3010_PASS1 && (cvmx_read_csr(0x80011800800007B8ull) & (1ull << 34))) chip_id |= 0x10; if ((OCTEON_MODEL & 0xffffff) != chip_id) { if (!OCTEON_IS_MODEL((OM_IGNORE_REVISION | chip_id)) || (OCTEON_MODEL & 0xffffff) > chip_id || (((OCTEON_MODEL & 0xffffff) ^ chip_id) & 0x10)) { printf("ERROR: Software not configured for this chip\n" " Expecting ID=0x%08x, Chip is 0x%08x\n", (OCTEON_MODEL & 0xffffff), (unsigned int)chip_id); if ((OCTEON_MODEL & 0xffffff) > chip_id) printf("Refusing to run on older revision than program was compiled for.\n"); #if !defined(CVMX_BUILD_FOR_FREEBSD_KERNEL) exit(-1); #else return(-1); #endif } else { printf("\n###################################################\n"); printf("WARNING: Software configured for older revision than running on.\n" " Compiled for ID=0x%08x, Chip is 0x%08x\n", (OCTEON_MODEL & 0xffffff), (unsigned int)chip_id); printf("###################################################\n\n"); return(1); } } #endif cvmx_warn_if(CVMX_ENABLE_PARAMETER_CHECKING, "Parameter checks are enabled. Expect some performance loss due to the extra checking\n"); cvmx_warn_if(CVMX_ENABLE_CSR_ADDRESS_CHECKING, "CSR address checks are enabled. Expect some performance loss due to the extra checking\n"); cvmx_warn_if(CVMX_ENABLE_POW_CHECKS, "POW state checks are enabled. Expect some performance loss due to the extra checking\n"); return(0); }
/** * This function is called when the trace buffer hits a trigger * or fills. We don't enable the fill interrupt, so it should * only be on triggers. * * @param cpl Interrupt number * @param dev_id unused * * @return IRQ status, should always be IRQ_HANDLED */ static irqreturn_t octeon_tra_interrupt(int cpl, void *dev_id) { /* Stop the trace buffer in case it is still running. A trigger should have already stopped it */ cvmx_tra_enable(0); /* Clear the trace buffer interrupt status */ cvmx_write_csr(CVMX_TRA_INT_STATUS, cvmx_read_csr(CVMX_TRA_INT_STATUS)); /* We can optionally stop the other cores */ if (OCTEON_TRA_DUMP_CORES_ON_INTERRUPT) { pr_info("Octeon Trace Buffer Dumping Core state\n"); on_each_cpu(octeon_tra_dump_regs, NULL, 1); } pr_info("Octeon Trace Buffer Start\n"); cvmx_tra_display(); pr_info("Octeon Trace Buffer End\n"); /* Restart the trace buffer */ cvmx_tra_enable(1); return IRQ_HANDLED; }
static void cvm_oct_mdio_write(struct net_device *dev, int phy_id, int location, int val) { union cvmx_smix_cmd smi_cmd; union cvmx_smix_wr_dat smi_wr; smi_wr.u64 = 0; smi_wr.s.dat = val; cvmx_write_csr(CVMX_SMIX_WR_DAT(0), smi_wr.u64); smi_cmd.u64 = 0; smi_cmd.s.phy_op = 0; smi_cmd.s.phy_adr = phy_id; smi_cmd.s.reg_adr = location; cvmx_write_csr(CVMX_SMIX_CMD(0), smi_cmd.u64); do { if (!in_interrupt()) yield(); smi_wr.u64 = cvmx_read_csr(CVMX_SMIX_WR_DAT(0)); } while (smi_wr.s.pending); }