/** * Convert nanosecond based time to setting used in the * boot bus timing register, based on timing multiple */ static unsigned int ns_to_tim_reg(unsigned int tim_mult, unsigned int nsecs) { unsigned int val; /* * Compute # of eclock periods to get desired duration in * nanoseconds. */ val = DIV_ROUND_UP(nsecs * (octeon_get_clock_rate() / 1000000), 1000 * tim_mult); return val; }
static int show_latency(struct seq_file *m, void *v) { u64 cpuclk, avg, max, min; struct latency_info curr_li = li; cpuclk = octeon_get_clock_rate(); max = (curr_li.max_latency * 1000000000) / cpuclk; min = (curr_li.min_latency * 1000000000) / cpuclk; avg = (curr_li.latency_sum * 1000000000) / (cpuclk * curr_li.interrupt_cnt); seq_printf(m, "cnt: %10lld, avg: %7lld ns, max: %7lld ns, min: %7lld ns\n", curr_li.interrupt_cnt, avg, max, min); return 0; }
static void init_latency_info(struct latency_info *li, int startup) { /* interval in milli seconds after which the interrupt will * be triggered */ int interval = 1; if (startup) { /* Calculating by the amounts io clock and cpu clock would * increment in interval amount of ms */ li->io_interval = (octeon_get_io_clock_rate() * interval) / 1000; li->cpu_interval = (octeon_get_clock_rate() * interval) / 1000; } li->timer_start1 = 0; li->timer_start2 = 0; li->max_latency = 0; li->min_latency = (u64)-1; li->latency_sum = 0; li->interrupt_cnt = 0; }
/* * Version of octeon_model_get_string() that takes buffer as argument, * as running early in u-boot static/global variables don't work when * running from flash. */ const char *octeon_model_get_string_buffer(uint32_t chip_id, char *buffer) { const char *family; const char *core_model; char pass[4]; int clock_mhz; const char *suffix; union cvmx_l2d_fus3 fus3; int num_cores; union cvmx_mio_fus_dat2 fus_dat2; union cvmx_mio_fus_dat3 fus_dat3; char fuse_model[10]; uint32_t fuse_data = 0; fus3.u64 = cvmx_read_csr(CVMX_L2D_FUS3); fus_dat2.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT2); fus_dat3.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT3); num_cores = cvmx_octeon_num_cores(); /* Make sure the non existant devices look disabled */ switch ((chip_id >> 8) & 0xff) { case 6: /* CN50XX */ case 2: /* CN30XX */ fus_dat3.s.nodfa_dte = 1; fus_dat3.s.nozip = 1; break; case 4: /* CN57XX or CN56XX */ fus_dat3.s.nodfa_dte = 1; break; default: break; } /* Make a guess at the suffix */ /* NSP = everything */ /* EXP = No crypto */ /* SCP = No DFA, No zip */ /* CP = No DFA, No crypto, No zip */ if (fus_dat3.s.nodfa_dte) { if (fus_dat2.s.nocrypto) suffix = "CP"; else suffix = "SCP"; } else if (fus_dat2.s.nocrypto) suffix = "EXP"; else suffix = "NSP"; /* * Assume pass number is encoded using <5:3><2:0>. Exceptions * will be fixed later. */ sprintf(pass, "%u.%u", ((chip_id >> 3) & 7) + 1, chip_id & 7); /* * Use the number of cores to determine the last 2 digits of * the model number. There are some exceptions that are fixed * later. */ switch (num_cores) { case 16: core_model = "60"; break; case 15: core_model = "58"; break; case 14: core_model = "55"; break; case 13: core_model = "52"; break; case 12: core_model = "50"; break; case 11: core_model = "48"; break; case 10: core_model = "45"; break; case 9: core_model = "42"; break; case 8: core_model = "40"; break; case 7: core_model = "38"; break; case 6: core_model = "34"; break; case 5: core_model = "32"; break; case 4: core_model = "30"; break; case 3: core_model = "25"; break; case 2: core_model = "20"; break; case 1: core_model = "10"; break; default: core_model = "XX"; break; } /* Now figure out the family, the first two digits */ switch ((chip_id >> 8) & 0xff) { case 0: /* CN38XX, CN37XX or CN36XX */ if (fus3.cn38xx.crip_512k) { /* * For some unknown reason, the 16 core one is * called 37 instead of 36. */ if (num_cores >= 16) family = "37"; else family = "36"; } else family = "38"; /* * This series of chips didn't follow the standard * pass numbering. */ switch (chip_id & 0xf) { case 0: strcpy(pass, "1.X"); break; case 1: strcpy(pass, "2.X"); break; case 3: strcpy(pass, "3.X"); break; default: strcpy(pass, "X.X"); break; } break; case 1: /* CN31XX or CN3020 */ if ((chip_id & 0x10) || fus3.cn31xx.crip_128k) family = "30"; else family = "31"; /* * This series of chips didn't follow the standard * pass numbering. */ switch (chip_id & 0xf) { case 0: strcpy(pass, "1.0"); break; case 2: strcpy(pass, "1.1"); break; default: strcpy(pass, "X.X"); break; } break; case 2: /* CN3010 or CN3005 */ family = "30"; /* A chip with half cache is an 05 */ if (fus3.cn30xx.crip_64k) core_model = "05"; /* * This series of chips didn't follow the standard * pass numbering. */ switch (chip_id & 0xf) { case 0: strcpy(pass, "1.0"); break; case 2: strcpy(pass, "1.1"); break; default: strcpy(pass, "X.X"); break; } break; case 3: /* CN58XX */ family = "58"; /* Special case. 4 core, no crypto */ if ((num_cores == 4) && fus_dat2.cn38xx.nocrypto) core_model = "29"; /* Pass 1 uses different encodings for pass numbers */ if ((chip_id & 0xFF) < 0x8) { switch (chip_id & 0x3) { case 0: strcpy(pass, "1.0"); break; case 1: strcpy(pass, "1.1"); break; case 3: strcpy(pass, "1.2"); break; default: strcpy(pass, "1.X"); break; } } break; case 4: /* CN57XX, CN56XX, CN55XX, CN54XX */ if (fus_dat2.cn56xx.raid_en) { if (fus3.cn56xx.crip_1024k) family = "55"; else family = "57"; if (fus_dat2.cn56xx.nocrypto) suffix = "SP"; else suffix = "SSP"; } else { if (fus_dat2.cn56xx.nocrypto) suffix = "CP"; else { suffix = "NSP"; if (fus_dat3.s.nozip) suffix = "SCP"; } if (fus3.cn56xx.crip_1024k) family = "54"; else family = "56"; } break; case 6: /* CN50XX */ family = "50"; break; case 7: /* CN52XX */ if (fus3.cn52xx.crip_256k) family = "51"; else family = "52"; break; default: family = "XX"; core_model = "XX"; strcpy(pass, "X.X"); suffix = "XXX"; break; } clock_mhz = octeon_get_clock_rate() / 1000000; if (family[0] != '3') { /* Check for model in fuses, overrides normal decode */ /* This is _not_ valid for Octeon CN3XXX models */ fuse_data |= cvmx_fuse_read_byte(51); fuse_data = fuse_data << 8; fuse_data |= cvmx_fuse_read_byte(50); fuse_data = fuse_data << 8; fuse_data |= cvmx_fuse_read_byte(49); fuse_data = fuse_data << 8; fuse_data |= cvmx_fuse_read_byte(48); if (fuse_data & 0x7ffff) { int model = fuse_data & 0x3fff; int suffix = (fuse_data >> 14) & 0x1f; if (suffix && model) { /* * Have both number and suffix in * fuses, so both */ sprintf(fuse_model, "%d%c", model, 'A' + suffix - 1); core_model = ""; family = fuse_model; } else if (suffix && !model) { /* * Only have suffix, so add suffix to * 'normal' model number. */ sprintf(fuse_model, "%s%c", core_model, 'A' + suffix - 1); core_model = fuse_model; } else { /* * Don't have suffix, so just use * model from fuses. */ sprintf(fuse_model, "%d", model); core_model = ""; family = fuse_model; } }
static int cvm_oct_probe(struct platform_device *pdev) { int num_interfaces; int interface; int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE; int qos; struct device_node *pip; octeon_mdiobus_force_mod_depencency(); pip = pdev->dev.of_node; if (!pip) { pr_err("Error: No 'pip' in /aliases\n"); return -EINVAL; } cvm_oct_configure_common_hw(); cvmx_helper_initialize_packet_io_global(); /* Change the input group for all ports before input is enabled */ num_interfaces = cvmx_helper_get_number_of_interfaces(); for (interface = 0; interface < num_interfaces; interface++) { int num_ports = cvmx_helper_ports_on_interface(interface); int port; for (port = cvmx_helper_get_ipd_port(interface, 0); port < cvmx_helper_get_ipd_port(interface, num_ports); port++) { union cvmx_pip_prt_tagx pip_prt_tagx; pip_prt_tagx.u64 = cvmx_read_csr(CVMX_PIP_PRT_TAGX(port)); pip_prt_tagx.s.grp = pow_receive_group; cvmx_write_csr(CVMX_PIP_PRT_TAGX(port), pip_prt_tagx.u64); } } cvmx_helper_ipd_and_packet_input_enable(); memset(cvm_oct_device, 0, sizeof(cvm_oct_device)); /* * Initialize the FAU used for counting packet buffers that * need to be freed. */ cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0); /* Initialize the FAU used for counting tx SKBs that need to be freed */ cvmx_fau_atomic_write32(FAU_TOTAL_TX_TO_CLEAN, 0); if ((pow_send_group != -1)) { struct net_device *dev; pr_info("\tConfiguring device for POW only access\n"); dev = alloc_etherdev(sizeof(struct octeon_ethernet)); if (dev) { /* Initialize the device private structure. */ struct octeon_ethernet *priv = netdev_priv(dev); dev->netdev_ops = &cvm_oct_pow_netdev_ops; priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED; priv->port = CVMX_PIP_NUM_INPUT_PORTS; priv->queue = -1; strcpy(dev->name, "pow%d"); for (qos = 0; qos < 16; qos++) skb_queue_head_init(&priv->tx_free_list[qos]); if (register_netdev(dev) < 0) { pr_err("Failed to register ethernet device for POW\n"); free_netdev(dev); } else { cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev; pr_info("%s: POW send group %d, receive group %d\n", dev->name, pow_send_group, pow_receive_group); } } else { pr_err("Failed to allocate ethernet device for POW\n"); } } num_interfaces = cvmx_helper_get_number_of_interfaces(); for (interface = 0; interface < num_interfaces; interface++) { cvmx_helper_interface_mode_t imode = cvmx_helper_interface_get_mode(interface); int num_ports = cvmx_helper_ports_on_interface(interface); int port; int port_index; for (port_index = 0, port = cvmx_helper_get_ipd_port(interface, 0); port < cvmx_helper_get_ipd_port(interface, num_ports); port_index++, port++) { struct octeon_ethernet *priv; struct net_device *dev = alloc_etherdev(sizeof(struct octeon_ethernet)); if (!dev) { pr_err("Failed to allocate ethernet device for port %d\n", port); continue; } /* Initialize the device private structure. */ priv = netdev_priv(dev); priv->netdev = dev; priv->of_node = cvm_oct_node_for_port(pip, interface, port_index); INIT_DELAYED_WORK(&priv->port_periodic_work, cvm_oct_periodic_worker); priv->imode = imode; priv->port = port; priv->queue = cvmx_pko_get_base_queue(priv->port); priv->fau = fau - cvmx_pko_get_num_queues(port) * 4; for (qos = 0; qos < 16; qos++) skb_queue_head_init(&priv->tx_free_list[qos]); for (qos = 0; qos < cvmx_pko_get_num_queues(port); qos++) cvmx_fau_atomic_write32(priv->fau + qos * 4, 0); switch (priv->imode) { /* These types don't support ports to IPD/PKO */ case CVMX_HELPER_INTERFACE_MODE_DISABLED: case CVMX_HELPER_INTERFACE_MODE_PCIE: case CVMX_HELPER_INTERFACE_MODE_PICMG: break; case CVMX_HELPER_INTERFACE_MODE_NPI: dev->netdev_ops = &cvm_oct_npi_netdev_ops; strcpy(dev->name, "npi%d"); break; case CVMX_HELPER_INTERFACE_MODE_XAUI: dev->netdev_ops = &cvm_oct_xaui_netdev_ops; strcpy(dev->name, "xaui%d"); break; case CVMX_HELPER_INTERFACE_MODE_LOOP: dev->netdev_ops = &cvm_oct_npi_netdev_ops; strcpy(dev->name, "loop%d"); break; case CVMX_HELPER_INTERFACE_MODE_SGMII: dev->netdev_ops = &cvm_oct_sgmii_netdev_ops; strcpy(dev->name, "eth%d"); break; case CVMX_HELPER_INTERFACE_MODE_SPI: dev->netdev_ops = &cvm_oct_spi_netdev_ops; strcpy(dev->name, "spi%d"); break; case CVMX_HELPER_INTERFACE_MODE_RGMII: case CVMX_HELPER_INTERFACE_MODE_GMII: dev->netdev_ops = &cvm_oct_rgmii_netdev_ops; strcpy(dev->name, "eth%d"); break; } if (!dev->netdev_ops) { free_netdev(dev); } else if (register_netdev(dev) < 0) { pr_err("Failed to register ethernet device for interface %d, port %d\n", interface, priv->port); free_netdev(dev); } else { cvm_oct_device[priv->port] = dev; fau -= cvmx_pko_get_num_queues(priv->port) * sizeof(u32); schedule_delayed_work(&priv->port_periodic_work, HZ); } } } cvm_oct_tx_initialize(); cvm_oct_rx_initialize(); /* * 150 uS: about 10 1500-byte packets at 1GE. */ cvm_oct_tx_poll_interval = 150 * (octeon_get_clock_rate() / 1000000); schedule_delayed_work(&cvm_oct_rx_refill_work, HZ); return 0; }
/** * Called after libata determines the needed PIO mode. This * function programs the Octeon bootbus regions to support the * timing requirements of the PIO mode. * * @ap: ATA port information * @dev: ATA device */ static void octeon_cf_set_piomode(struct ata_port *ap, struct ata_device *dev) { struct octeon_cf_data *ocd = ap->dev->platform_data; union cvmx_mio_boot_reg_timx reg_tim; int cs = ocd->base_region; int T; struct ata_timing timing; int use_iordy; int trh; int pause; /* These names are timing parameters from the ATA spec */ int t1; int t2; int t2i; T = (int)(2000000000000LL / octeon_get_clock_rate()); if (ata_timing_compute(dev, dev->pio_mode, &timing, T, T)) BUG(); t1 = timing.setup; if (t1) t1--; t2 = timing.active; if (t2) t2--; t2i = timing.act8b; if (t2i) t2i--; trh = ns_to_tim_reg(2, 20); if (trh) trh--; pause = timing.cycle - timing.active - timing.setup - trh; if (pause) pause--; octeon_cf_set_boot_reg_cfg(cs); if (ocd->dma_engine >= 0) /* True IDE mode, program both chip selects. */ octeon_cf_set_boot_reg_cfg(cs + 1); use_iordy = ata_pio_need_iordy(dev); reg_tim.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_TIMX(cs)); /* Disable page mode */ reg_tim.s.pagem = 0; /* Enable dynamic timing */ reg_tim.s.waitm = use_iordy; /* Pages are disabled */ reg_tim.s.pages = 0; /* We don't use multiplexed address mode */ reg_tim.s.ale = 0; /* Not used */ reg_tim.s.page = 0; /* Time after IORDY to coninue to assert the data */ reg_tim.s.wait = 0; /* Time to wait to complete the cycle. */ reg_tim.s.pause = pause; /* How long to hold after a write to de-assert CE. */ reg_tim.s.wr_hld = trh; /* How long to wait after a read to de-assert CE. */ reg_tim.s.rd_hld = trh; /* How long write enable is asserted */ reg_tim.s.we = t2; /* How long read enable is asserted */ reg_tim.s.oe = t2; /* Time after CE that read/write starts */ reg_tim.s.ce = ns_to_tim_reg(2, 5); /* Time before CE that address is valid */ reg_tim.s.adr = 0; /* Program the bootbus region timing for the data port chip select. */ cvmx_write_csr(CVMX_MIO_BOOT_REG_TIMX(cs), reg_tim.u64); if (ocd->dma_engine >= 0) /* True IDE mode, program both chip selects. */ cvmx_write_csr(CVMX_MIO_BOOT_REG_TIMX(cs + 1), reg_tim.u64); }
static void octeon_cf_set_dmamode(struct ata_port *ap, struct ata_device *dev) { struct octeon_cf_data *ocd = dev->link->ap->dev->platform_data; union cvmx_mio_boot_dma_timx dma_tim; unsigned int oe_a; unsigned int oe_n; unsigned int dma_ackh; unsigned int dma_arq; unsigned int pause; unsigned int T0, Tkr, Td; unsigned int tim_mult; const struct ata_timing *timing; timing = ata_timing_find_mode(dev->dma_mode); T0 = timing->cycle; Td = timing->active; Tkr = timing->recover; dma_ackh = timing->dmack_hold; dma_tim.u64 = 0; /* dma_tim.s.tim_mult = 0 --> 4x */ tim_mult = 4; /* not spec'ed, value in eclocks, not affected by tim_mult */ dma_arq = 8; pause = 25 - dma_arq * 1000 / (octeon_get_clock_rate() / 1000000); /* Tz */ oe_a = Td; /* Tkr from cf spec, lengthened to meet T0 */ oe_n = max(T0 - oe_a, Tkr); dma_tim.s.dmack_pi = 1; dma_tim.s.oe_n = ns_to_tim_reg(tim_mult, oe_n); dma_tim.s.oe_a = ns_to_tim_reg(tim_mult, oe_a); /* * This is tI, C.F. spec. says 0, but Sony CF card requires * more, we use 20 nS. */ dma_tim.s.dmack_s = ns_to_tim_reg(tim_mult, 20); dma_tim.s.dmack_h = ns_to_tim_reg(tim_mult, dma_ackh); dma_tim.s.dmarq = dma_arq; dma_tim.s.pause = ns_to_tim_reg(tim_mult, pause); dma_tim.s.rd_dly = 0; /* Sample right on edge */ /* writes only */ dma_tim.s.we_n = ns_to_tim_reg(tim_mult, oe_n); dma_tim.s.we_a = ns_to_tim_reg(tim_mult, oe_a); pr_debug("ns to ticks (mult %d) of %d is: %d\n", tim_mult, 60, ns_to_tim_reg(tim_mult, 60)); pr_debug("oe_n: %d, oe_a: %d, dmack_s: %d, dmack_h: " "%d, dmarq: %d, pause: %d\n", dma_tim.s.oe_n, dma_tim.s.oe_a, dma_tim.s.dmack_s, dma_tim.s.dmack_h, dma_tim.s.dmarq, dma_tim.s.pause); cvmx_write_csr(CVMX_MIO_BOOT_DMA_TIMX(ocd->dma_engine), dma_tim.u64); }
static int cvm_oct_driver_probe(struct platform_device *dev) { int num_interfaces; int interface; int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE; int qos; octeon_mdiobus_force_mod_depencency(); pr_notice("cavium-ethernet %s\n", OCTEON_ETHERNET_VERSION); if (OCTEON_IS_MODEL(OCTEON_CN52XX)) cvm_oct_mac_addr_offset = 2; /* First two are the mgmt ports. */ else if (OCTEON_IS_MODEL(OCTEON_CN56XX)) cvm_oct_mac_addr_offset = 1; /* First one is the mgmt port. */ else cvm_oct_mac_addr_offset = 0; #if defined(CONFIG_SG590) || defined(CONFIG_SG770) { union cvmx_gmxx_inf_mode mode; mode.u64 = 0; mode.s.en = 1; cvmx_write_csr(CVMX_GMXX_INF_MODE(0), mode.u64); } #endif #ifdef CONFIG_SG8200 /* * SG8200 directs the 2 internal ports out MII to 2 single 100Mbit * interfaces. (The switch is on the 8139C+). */ { union cvmx_gmxx_inf_mode mode; mode.u64 = 0; mode.s.en = 1; mode.s.p0mii = 1; mode.s.type = 1; cvmx_write_csr(CVMX_GMXX_INF_MODE(0), mode.u64); } #endif cvm_oct_poll_queue = create_singlethread_workqueue("octeon-ethernet"); if (cvm_oct_poll_queue == NULL) { pr_err("octeon-ethernet: Cannot create workqueue"); return -ENOMEM; } cvm_oct_configure_common_hw(); cvmx_helper_initialize_packet_io_global(); /* Change the input group for all ports before input is enabled */ num_interfaces = cvmx_helper_get_number_of_interfaces(); for (interface = 0; interface < num_interfaces; interface++) { int num_ports = cvmx_helper_ports_on_interface(interface); int port; for (port = cvmx_helper_get_ipd_port(interface, 0); port < cvmx_helper_get_ipd_port(interface, num_ports); port++) { union cvmx_pip_prt_tagx pip_prt_tagx; pip_prt_tagx.u64 = cvmx_read_csr(CVMX_PIP_PRT_TAGX(port)); pip_prt_tagx.s.grp = pow_receive_group; cvmx_write_csr(CVMX_PIP_PRT_TAGX(port), pip_prt_tagx.u64); } } cvmx_helper_ipd_and_packet_input_enable(); memset(cvm_oct_device, 0, sizeof(cvm_oct_device)); /* * Initialize the FAU used for counting packet buffers that * need to be freed. */ cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0); /* Initialize the FAU used for counting tx SKBs that need to be freed */ cvmx_fau_atomic_write32(FAU_TOTAL_TX_TO_CLEAN, 0); if ((pow_send_group != -1)) { struct net_device *dev; pr_info("\tConfiguring device for POW only access\n"); dev = alloc_etherdev(sizeof(struct octeon_ethernet)); if (dev) { /* Initialize the device private structure. */ struct octeon_ethernet *priv = netdev_priv(dev); dev->netdev_ops = &cvm_oct_pow_netdev_ops; priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED; priv->port = CVMX_PIP_NUM_INPUT_PORTS; priv->queue = -1; strcpy(dev->name, "pow%d"); for (qos = 0; qos < 16; qos++) skb_queue_head_init(&priv->tx_free_list[qos]); if (register_netdev(dev) < 0) { pr_err("Failed to register ethernet device for POW\n"); free_netdev(dev); } else { cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev; pr_info("%s: POW send group %d, receive group %d\n", dev->name, pow_send_group, pow_receive_group); } } else { pr_err("Failed to allocate ethernet device for POW\n"); } } num_interfaces = cvmx_helper_get_number_of_interfaces(); for (interface = 0; interface < num_interfaces; interface++) { cvmx_helper_interface_mode_t imode = cvmx_helper_interface_get_mode(interface); int num_ports = cvmx_helper_ports_on_interface(interface); int port; for (port = cvmx_helper_get_ipd_port(interface, 0); port < cvmx_helper_get_ipd_port(interface, num_ports); port++) { struct octeon_ethernet *priv; struct net_device *dev = alloc_etherdev(sizeof(struct octeon_ethernet)); if (!dev) { pr_err("Failed to allocate ethernet device for port %d\n", port); continue; } /* Initialize the device private structure. */ priv = netdev_priv(dev); INIT_DELAYED_WORK(&priv->port_periodic_work, cvm_oct_periodic_worker); priv->imode = imode; priv->port = port; priv->queue = cvmx_pko_get_base_queue(priv->port); priv->fau = fau - cvmx_pko_get_num_queues(port) * 4; for (qos = 0; qos < 16; qos++) skb_queue_head_init(&priv->tx_free_list[qos]); for (qos = 0; qos < cvmx_pko_get_num_queues(port); qos++) cvmx_fau_atomic_write32(priv->fau + qos * 4, 0); switch (priv->imode) { /* These types don't support ports to IPD/PKO */ case CVMX_HELPER_INTERFACE_MODE_DISABLED: case CVMX_HELPER_INTERFACE_MODE_PCIE: case CVMX_HELPER_INTERFACE_MODE_PICMG: break; case CVMX_HELPER_INTERFACE_MODE_NPI: dev->netdev_ops = &cvm_oct_npi_netdev_ops; strcpy(dev->name, "npi%d"); break; case CVMX_HELPER_INTERFACE_MODE_XAUI: dev->netdev_ops = &cvm_oct_xaui_netdev_ops; strcpy(dev->name, "xaui%d"); break; case CVMX_HELPER_INTERFACE_MODE_LOOP: dev->netdev_ops = &cvm_oct_npi_netdev_ops; strcpy(dev->name, "loop%d"); break; case CVMX_HELPER_INTERFACE_MODE_SGMII: dev->netdev_ops = &cvm_oct_sgmii_netdev_ops; strcpy(dev->name, "eth%d"); break; case CVMX_HELPER_INTERFACE_MODE_SPI: dev->netdev_ops = &cvm_oct_spi_netdev_ops; strcpy(dev->name, "spi%d"); break; case CVMX_HELPER_INTERFACE_MODE_RGMII: case CVMX_HELPER_INTERFACE_MODE_GMII: dev->netdev_ops = &cvm_oct_rgmii_netdev_ops; strcpy(dev->name, "eth%d"); break; } if (!dev->netdev_ops) { free_netdev(dev); } else if (register_netdev(dev) < 0) { pr_err("Failed to register ethernet device " "for interface %d, port %d\n", interface, priv->port); free_netdev(dev); } else { cvm_oct_device[priv->port] = dev; fau -= cvmx_pko_get_num_queues(priv->port) * sizeof(uint32_t); queue_delayed_work(cvm_oct_poll_queue, &priv->port_periodic_work, HZ); } } } cvm_oct_tx_initialize(); cvm_oct_rx_initialize(); /* * 150 uS: about 10 1500-byte packtes at 1GE. */ cvm_oct_tx_poll_interval = 150 * (octeon_get_clock_rate() / 1000000); queue_delayed_work(cvm_oct_poll_queue, &cvm_oct_rx_refill_work, HZ); #ifdef CONFIG_SG590 { void cvm_oct_marvel_switch_init(void); printk("cavium-ethernet: enabling Marvell 88e6046 switch: intra-switch forwarding disabled\n"); cvm_oct_marvel_switch_init(); } #endif return 0; }
const char *octeon_model_get_string_buffer(uint32_t chip_id, char *buffer) { const char *family; const char *core_model; char pass[4]; int clock_mhz; const char *suffix; union cvmx_l2d_fus3 fus3; int num_cores; union cvmx_mio_fus_dat2 fus_dat2; union cvmx_mio_fus_dat3 fus_dat3; char fuse_model[10]; uint32_t fuse_data = 0; fus3.u64 = 0; if (!OCTEON_IS_MODEL(OCTEON_CN6XXX)) fus3.u64 = cvmx_read_csr(CVMX_L2D_FUS3); fus_dat2.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT2); fus_dat3.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT3); num_cores = cvmx_pop(cvmx_read_csr(CVMX_CIU_FUSE)); /* */ switch ((chip_id >> 8) & 0xff) { case 6: /* */ case 2: /* */ fus_dat3.s.nodfa_dte = 1; fus_dat3.s.nozip = 1; break; case 4: /* */ fus_dat3.s.nodfa_dte = 1; break; default: break; } /* */ /* */ /* */ /* */ /* */ if (fus_dat3.s.nodfa_dte) { if (fus_dat2.s.nocrypto) suffix = "CP"; else suffix = "SCP"; } else if (fus_dat2.s.nocrypto) suffix = "EXP"; else suffix = "NSP"; /* */ sprintf(pass, "%d.%d", (int)((chip_id >> 3) & 7) + 1, (int)chip_id & 7); /* */ switch (num_cores) { case 32: core_model = "80"; break; case 24: core_model = "70"; break; case 16: core_model = "60"; break; case 15: core_model = "58"; break; case 14: core_model = "55"; break; case 13: core_model = "52"; break; case 12: core_model = "50"; break; case 11: core_model = "48"; break; case 10: core_model = "45"; break; case 9: core_model = "42"; break; case 8: core_model = "40"; break; case 7: core_model = "38"; break; case 6: core_model = "34"; break; case 5: core_model = "32"; break; case 4: core_model = "30"; break; case 3: core_model = "25"; break; case 2: core_model = "20"; break; case 1: core_model = "10"; break; default: core_model = "XX"; break; } /* */ switch ((chip_id >> 8) & 0xff) { case 0: /* */ if (fus3.cn38xx.crip_512k) { /* */ if (num_cores >= 16) family = "37"; else family = "36"; } else family = "38"; /* */ switch (chip_id & 0xf) { case 0: strcpy(pass, "1.X"); break; case 1: strcpy(pass, "2.X"); break; case 3: strcpy(pass, "3.X"); break; default: strcpy(pass, "X.X"); break; } break; case 1: /* */ if ((chip_id & 0x10) || fus3.cn31xx.crip_128k) family = "30"; else family = "31"; /* */ switch (chip_id & 0xf) { case 0: strcpy(pass, "1.0"); break; case 2: strcpy(pass, "1.1"); break; default: strcpy(pass, "X.X"); break; } break; case 2: /* */ family = "30"; /* */ if (fus3.cn30xx.crip_64k) core_model = "05"; /* */ switch (chip_id & 0xf) { case 0: strcpy(pass, "1.0"); break; case 2: strcpy(pass, "1.1"); break; default: strcpy(pass, "X.X"); break; } break; case 3: /* */ family = "58"; /* */ if ((num_cores == 4) && fus3.cn58xx.crip_1024k && !strncmp(suffix, "CP", 2)) core_model = "29"; /* */ if ((chip_id & 0xFF) < 0x8) { switch (chip_id & 0x3) { case 0: strcpy(pass, "1.0"); break; case 1: strcpy(pass, "1.1"); break; case 3: strcpy(pass, "1.2"); break; default: strcpy(pass, "1.X"); break; } } break; case 4: /* */ if (fus_dat2.cn56xx.raid_en) { if (fus3.cn56xx.crip_1024k) family = "55"; else family = "57"; if (fus_dat2.cn56xx.nocrypto) suffix = "SP"; else suffix = "SSP"; } else { if (fus_dat2.cn56xx.nocrypto) suffix = "CP"; else { suffix = "NSP"; if (fus_dat3.s.nozip) suffix = "SCP"; if (fus_dat3.s.bar2_en) suffix = "NSPB2"; } if (fus3.cn56xx.crip_1024k) family = "54"; else family = "56"; } break; case 6: /* */ family = "50"; break; case 7: /* */ if (fus3.cn52xx.crip_256k) family = "51"; else family = "52"; break; case 0x93: /* */ family = "61"; if (fus_dat2.cn61xx.nocrypto && fus_dat2.cn61xx.dorm_crypto) suffix = "AP"; if (fus_dat2.cn61xx.nocrypto) suffix = "CP"; else if (fus_dat2.cn61xx.dorm_crypto) suffix = "DAP"; else if (fus_dat3.cn61xx.nozip) suffix = "SCP"; break; case 0x90: /* */ family = "63"; if (fus_dat3.s.l2c_crip == 2) family = "62"; if (num_cores == 6) /* */ core_model = "35"; if (fus_dat2.cn63xx.nocrypto) suffix = "CP"; else if (fus_dat2.cn63xx.dorm_crypto) suffix = "DAP"; else if (fus_dat3.cn63xx.nozip) suffix = "SCP"; else suffix = "AAP"; break; case 0x92: /* */ family = "66"; if (num_cores == 6) /* */ core_model = "35"; if (fus_dat2.cn66xx.nocrypto && fus_dat2.cn66xx.dorm_crypto) suffix = "AP"; if (fus_dat2.cn66xx.nocrypto) suffix = "CP"; else if (fus_dat2.cn66xx.dorm_crypto) suffix = "DAP"; else if (fus_dat3.cn66xx.nozip) suffix = "SCP"; else suffix = "AAP"; break; case 0x91: /* */ family = "68"; if (fus_dat2.cn68xx.nocrypto && fus_dat3.cn68xx.nozip) suffix = "CP"; else if (fus_dat2.cn68xx.dorm_crypto) suffix = "DAP"; else if (fus_dat3.cn68xx.nozip) suffix = "SCP"; else if (fus_dat2.cn68xx.nocrypto) suffix = "SP"; else suffix = "AAP"; break; default: family = "XX"; core_model = "XX"; strcpy(pass, "X.X"); suffix = "XXX"; break; } clock_mhz = octeon_get_clock_rate() / 1000000; if (family[0] != '3') { int fuse_base = 384 / 8; if (family[0] == '6') fuse_base = 832 / 8; /* */ /* */ fuse_data |= cvmx_fuse_read_byte(fuse_base + 3); fuse_data = fuse_data << 8; fuse_data |= cvmx_fuse_read_byte(fuse_base + 2); fuse_data = fuse_data << 8; fuse_data |= cvmx_fuse_read_byte(fuse_base + 1); fuse_data = fuse_data << 8; fuse_data |= cvmx_fuse_read_byte(fuse_base); if (fuse_data & 0x7ffff) { int model = fuse_data & 0x3fff; int suffix = (fuse_data >> 14) & 0x1f; if (suffix && model) { /* */ sprintf(fuse_model, "%d%c", model, 'A' + suffix - 1); core_model = ""; family = fuse_model; } else if (suffix && !model) { /* */ sprintf(fuse_model, "%s%c", core_model, 'A' + suffix - 1); core_model = fuse_model; } else { /* */ sprintf(fuse_model, "%d", model); core_model = ""; family = fuse_model; } }
static int cvm_oct_probe(struct platform_device *pdev) { int num_interfaces; int interface; int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE; int qos; struct device_node *pip; int mtu_overhead = ETH_HLEN + ETH_FCS_LEN; #if IS_ENABLED(CONFIG_VLAN_8021Q) mtu_overhead += VLAN_HLEN; #endif octeon_mdiobus_force_mod_depencency(); pip = pdev->dev.of_node; if (!pip) { pr_err("Error: No 'pip' in /aliases\n"); return -EINVAL; } cvm_oct_configure_common_hw(); cvmx_helper_initialize_packet_io_global(); if (receive_group_order) { if (receive_group_order > 4) receive_group_order = 4; pow_receive_groups = (1 << (1 << receive_group_order)) - 1; } else { pow_receive_groups = BIT(pow_receive_group); } /* Change the input group for all ports before input is enabled */ num_interfaces = cvmx_helper_get_number_of_interfaces(); for (interface = 0; interface < num_interfaces; interface++) { int num_ports = cvmx_helper_ports_on_interface(interface); int port; for (port = cvmx_helper_get_ipd_port(interface, 0); port < cvmx_helper_get_ipd_port(interface, num_ports); port++) { union cvmx_pip_prt_tagx pip_prt_tagx; pip_prt_tagx.u64 = cvmx_read_csr(CVMX_PIP_PRT_TAGX(port)); if (receive_group_order) { int tag_mask; /* We support only 16 groups at the moment, so * always disable the two additional "hidden" * tag_mask bits on CN68XX. */ if (OCTEON_IS_MODEL(OCTEON_CN68XX)) pip_prt_tagx.u64 |= 0x3ull << 44; tag_mask = ~((1 << receive_group_order) - 1); pip_prt_tagx.s.grptagbase = 0; pip_prt_tagx.s.grptagmask = tag_mask; pip_prt_tagx.s.grptag = 1; pip_prt_tagx.s.tag_mode = 0; pip_prt_tagx.s.inc_prt_flag = 1; pip_prt_tagx.s.ip6_dprt_flag = 1; pip_prt_tagx.s.ip4_dprt_flag = 1; pip_prt_tagx.s.ip6_sprt_flag = 1; pip_prt_tagx.s.ip4_sprt_flag = 1; pip_prt_tagx.s.ip6_dst_flag = 1; pip_prt_tagx.s.ip4_dst_flag = 1; pip_prt_tagx.s.ip6_src_flag = 1; pip_prt_tagx.s.ip4_src_flag = 1; pip_prt_tagx.s.grp = 0; } else { pip_prt_tagx.s.grptag = 0; pip_prt_tagx.s.grp = pow_receive_group; } cvmx_write_csr(CVMX_PIP_PRT_TAGX(port), pip_prt_tagx.u64); } } cvmx_helper_ipd_and_packet_input_enable(); memset(cvm_oct_device, 0, sizeof(cvm_oct_device)); /* * Initialize the FAU used for counting packet buffers that * need to be freed. */ cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0); /* Initialize the FAU used for counting tx SKBs that need to be freed */ cvmx_fau_atomic_write32(FAU_TOTAL_TX_TO_CLEAN, 0); if ((pow_send_group != -1)) { struct net_device *dev; dev = alloc_etherdev(sizeof(struct octeon_ethernet)); if (dev) { /* Initialize the device private structure. */ struct octeon_ethernet *priv = netdev_priv(dev); SET_NETDEV_DEV(dev, &pdev->dev); dev->netdev_ops = &cvm_oct_pow_netdev_ops; priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED; priv->port = CVMX_PIP_NUM_INPUT_PORTS; priv->queue = -1; strcpy(dev->name, "pow%d"); for (qos = 0; qos < 16; qos++) skb_queue_head_init(&priv->tx_free_list[qos]); dev->min_mtu = VLAN_ETH_ZLEN - mtu_overhead; dev->max_mtu = OCTEON_MAX_MTU - mtu_overhead; if (register_netdev(dev) < 0) { pr_err("Failed to register ethernet device for POW\n"); free_netdev(dev); } else { cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev; pr_info("%s: POW send group %d, receive group %d\n", dev->name, pow_send_group, pow_receive_group); } } else { pr_err("Failed to allocate ethernet device for POW\n"); } } num_interfaces = cvmx_helper_get_number_of_interfaces(); for (interface = 0; interface < num_interfaces; interface++) { cvmx_helper_interface_mode_t imode = cvmx_helper_interface_get_mode(interface); int num_ports = cvmx_helper_ports_on_interface(interface); int port; int port_index; for (port_index = 0, port = cvmx_helper_get_ipd_port(interface, 0); port < cvmx_helper_get_ipd_port(interface, num_ports); port_index++, port++) { struct octeon_ethernet *priv; struct net_device *dev = alloc_etherdev(sizeof(struct octeon_ethernet)); if (!dev) { pr_err("Failed to allocate ethernet device for port %d\n", port); continue; } /* Initialize the device private structure. */ SET_NETDEV_DEV(dev, &pdev->dev); priv = netdev_priv(dev); priv->netdev = dev; priv->of_node = cvm_oct_node_for_port(pip, interface, port_index); INIT_DELAYED_WORK(&priv->port_periodic_work, cvm_oct_periodic_worker); priv->imode = imode; priv->port = port; priv->queue = cvmx_pko_get_base_queue(priv->port); priv->fau = fau - cvmx_pko_get_num_queues(port) * 4; for (qos = 0; qos < 16; qos++) skb_queue_head_init(&priv->tx_free_list[qos]); for (qos = 0; qos < cvmx_pko_get_num_queues(port); qos++) cvmx_fau_atomic_write32(priv->fau + qos * 4, 0); dev->min_mtu = VLAN_ETH_ZLEN - mtu_overhead; dev->max_mtu = OCTEON_MAX_MTU - mtu_overhead; switch (priv->imode) { /* These types don't support ports to IPD/PKO */ case CVMX_HELPER_INTERFACE_MODE_DISABLED: case CVMX_HELPER_INTERFACE_MODE_PCIE: case CVMX_HELPER_INTERFACE_MODE_PICMG: break; case CVMX_HELPER_INTERFACE_MODE_NPI: dev->netdev_ops = &cvm_oct_npi_netdev_ops; strcpy(dev->name, "npi%d"); break; case CVMX_HELPER_INTERFACE_MODE_XAUI: dev->netdev_ops = &cvm_oct_xaui_netdev_ops; strcpy(dev->name, "xaui%d"); break; case CVMX_HELPER_INTERFACE_MODE_LOOP: dev->netdev_ops = &cvm_oct_npi_netdev_ops; strcpy(dev->name, "loop%d"); break; case CVMX_HELPER_INTERFACE_MODE_SGMII: dev->netdev_ops = &cvm_oct_sgmii_netdev_ops; strcpy(dev->name, "eth%d"); break; case CVMX_HELPER_INTERFACE_MODE_SPI: dev->netdev_ops = &cvm_oct_spi_netdev_ops; strcpy(dev->name, "spi%d"); break; case CVMX_HELPER_INTERFACE_MODE_RGMII: case CVMX_HELPER_INTERFACE_MODE_GMII: dev->netdev_ops = &cvm_oct_rgmii_netdev_ops; strcpy(dev->name, "eth%d"); cvm_set_rgmii_delay(priv->of_node, interface, port_index); break; } if (!dev->netdev_ops) { free_netdev(dev); } else if (register_netdev(dev) < 0) { pr_err("Failed to register ethernet device for interface %d, port %d\n", interface, priv->port); free_netdev(dev); } else { cvm_oct_device[priv->port] = dev; fau -= cvmx_pko_get_num_queues(priv->port) * sizeof(u32); schedule_delayed_work(&priv->port_periodic_work, HZ); } } } cvm_oct_tx_initialize(); cvm_oct_rx_initialize(); /* * 150 uS: about 10 1500-byte packets at 1GE. */ cvm_oct_tx_poll_interval = 150 * (octeon_get_clock_rate() / 1000000); schedule_delayed_work(&cvm_oct_rx_refill_work, HZ); return 0; }