/** * Configure a output port and the associated queues for use. * * @port: Port to configure. * @base_queue: First queue number to associate with this port. * @num_queues: Number of queues to associate with this port * @priority: Array of priority levels for each queue. Values are * allowed to be 0-8. A value of 8 get 8 times the traffic * of a value of 1. A value of 0 indicates that no rounds * will be participated in. These priorities can be changed * on the fly while the pko is enabled. A priority of 9 * indicates that static priority should be used. If static * priority is used all queues with static priority must be * contiguous starting at the base_queue, and lower numbered * queues have higher priority than higher numbered queues. * There must be num_queues elements in the array. */ cvmx_pko_status_t cvmx_pko_config_port(uint64_t port, uint64_t base_queue, uint64_t num_queues, const uint64_t priority[]) { cvmx_pko_status_t result_code; uint64_t queue; union cvmx_pko_mem_queue_ptrs config; union cvmx_pko_reg_queue_ptrs1 config1; int static_priority_base = -1; int static_priority_end = -1; if ((port >= CVMX_PKO_NUM_OUTPUT_PORTS) && (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID)) { cvmx_dprintf("ERROR: cvmx_pko_config_port: Invalid port %llu\n", (unsigned long long)port); return CVMX_PKO_INVALID_PORT; } if (base_queue + num_queues > CVMX_PKO_MAX_OUTPUT_QUEUES) { cvmx_dprintf ("ERROR: cvmx_pko_config_port: Invalid queue range %llu\n", (unsigned long long)(base_queue + num_queues)); return CVMX_PKO_INVALID_QUEUE; } if (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID) { /* * Validate the static queue priority setup and set * static_priority_base and static_priority_end * accordingly. */ for (queue = 0; queue < num_queues; queue++) { /* Find first queue of static priority */ if (static_priority_base == -1 && priority[queue] == CVMX_PKO_QUEUE_STATIC_PRIORITY) static_priority_base = queue; /* Find last queue of static priority */ if (static_priority_base != -1 && static_priority_end == -1 && priority[queue] != CVMX_PKO_QUEUE_STATIC_PRIORITY && queue) static_priority_end = queue - 1; else if (static_priority_base != -1 && static_priority_end == -1 && queue == num_queues - 1) /* all queues are static priority */ static_priority_end = queue; /* * Check to make sure all static priority * queues are contiguous. Also catches some * cases of static priorites not starting at * queue 0. */ if (static_priority_end != -1 && (int)queue > static_priority_end && priority[queue] == CVMX_PKO_QUEUE_STATIC_PRIORITY) { cvmx_dprintf("ERROR: cvmx_pko_config_port: " "Static priority queues aren't " "contiguous or don't start at " "base queue. q: %d, eq: %d\n", (int)queue, static_priority_end); return CVMX_PKO_INVALID_PRIORITY; } } if (static_priority_base > 0) { cvmx_dprintf("ERROR: cvmx_pko_config_port: Static " "priority queues don't start at base " "queue. sq: %d\n", static_priority_base); return CVMX_PKO_INVALID_PRIORITY; } } /* * At this point, static_priority_base and static_priority_end * are either both -1, or are valid start/end queue * numbers. */ result_code = CVMX_PKO_SUCCESS; #ifdef PKO_DEBUG cvmx_dprintf("num queues: %d (%lld,%lld)\n", num_queues, CVMX_PKO_QUEUES_PER_PORT_INTERFACE0, CVMX_PKO_QUEUES_PER_PORT_INTERFACE1); #endif for (queue = 0; queue < num_queues; queue++) { uint64_t *buf_ptr = NULL; config1.u64 = 0; config1.s.idx3 = queue >> 3; config1.s.qid7 = (base_queue + queue) >> 7; config.u64 = 0; config.s.tail = queue == (num_queues - 1); config.s.index = queue; config.s.port = port; config.s.queue = base_queue + queue; if (!cvmx_octeon_is_pass1()) { config.s.static_p = static_priority_base >= 0; config.s.static_q = (int)queue <= static_priority_end; config.s.s_tail = (int)queue == static_priority_end; } /* * Convert the priority into an enable bit field. Try * to space the bits out evenly so the packet don't * get grouped up */ switch ((int)priority[queue]) { case 0: config.s.qos_mask = 0x00; break; case 1: config.s.qos_mask = 0x01; break; case 2: config.s.qos_mask = 0x11; break; case 3: config.s.qos_mask = 0x49; break; case 4: config.s.qos_mask = 0x55; break; case 5: config.s.qos_mask = 0x57; break; case 6: config.s.qos_mask = 0x77; break; case 7: config.s.qos_mask = 0x7f; break; case 8: config.s.qos_mask = 0xff; break; case CVMX_PKO_QUEUE_STATIC_PRIORITY: /* Pass 1 will fall through to the error case */ if (!cvmx_octeon_is_pass1()) { config.s.qos_mask = 0xff; break; } default: cvmx_dprintf("ERROR: cvmx_pko_config_port: Invalid " "priority %llu\n", (unsigned long long)priority[queue]); config.s.qos_mask = 0xff; result_code = CVMX_PKO_INVALID_PRIORITY; break; } if (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID) { cvmx_cmd_queue_result_t cmd_res = cvmx_cmd_queue_initialize(CVMX_CMD_QUEUE_PKO (base_queue + queue), CVMX_PKO_MAX_QUEUE_DEPTH, CVMX_FPA_OUTPUT_BUFFER_POOL, CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE - CVMX_PKO_COMMAND_BUFFER_SIZE_ADJUST * 8); if (cmd_res != CVMX_CMD_QUEUE_SUCCESS) { switch (cmd_res) { case CVMX_CMD_QUEUE_NO_MEMORY: cvmx_dprintf("ERROR: " "cvmx_pko_config_port: " "Unable to allocate " "output buffer.\n"); return CVMX_PKO_NO_MEMORY; case CVMX_CMD_QUEUE_ALREADY_SETUP: cvmx_dprintf ("ERROR: cvmx_pko_config_port: Port already setup.\n"); return CVMX_PKO_PORT_ALREADY_SETUP; case CVMX_CMD_QUEUE_INVALID_PARAM: default: cvmx_dprintf ("ERROR: cvmx_pko_config_port: Command queue initialization failed.\n"); return CVMX_PKO_CMD_QUEUE_INIT_ERROR; } } buf_ptr = (uint64_t *) cvmx_cmd_queue_buffer(CVMX_CMD_QUEUE_PKO (base_queue + queue)); config.s.buf_ptr = cvmx_ptr_to_phys(buf_ptr); } else config.s.buf_ptr = 0; CVMX_SYNCWS; if (!OCTEON_IS_MODEL(OCTEON_CN3XXX)) cvmx_write_csr(CVMX_PKO_REG_QUEUE_PTRS1, config1.u64); cvmx_write_csr(CVMX_PKO_MEM_QUEUE_PTRS, config.u64); } return result_code; }
/** * Configure an IPD/PKO port for the specified link state. This * function does not influence auto negotiation at the PHY level. * The passed link state must always match the link state returned * by cvmx_helper_link_get(). It is normally best to use * cvmx_helper_link_autoconf() instead. * * @ipd_port: IPD/PKO port to configure * @link_info: The new link state * * Returns Zero on success, negative on failure */ int __cvmx_helper_rgmii_link_set(int ipd_port, cvmx_helper_link_info_t link_info) { int result = 0; int interface = cvmx_helper_get_interface_num(ipd_port); int index = cvmx_helper_get_interface_index_num(ipd_port); union cvmx_gmxx_prtx_cfg original_gmx_cfg; union cvmx_gmxx_prtx_cfg new_gmx_cfg; union cvmx_pko_mem_queue_qos pko_mem_queue_qos; union cvmx_pko_mem_queue_qos pko_mem_queue_qos_save[16]; union cvmx_gmxx_tx_ovr_bp gmx_tx_ovr_bp; union cvmx_gmxx_tx_ovr_bp gmx_tx_ovr_bp_save; int i; /* Ignore speed sets in the simulator */ if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM) return 0; /* Read the current settings so we know the current enable state */ original_gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); new_gmx_cfg = original_gmx_cfg; /* Disable the lowest level RX */ cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface), cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(interface)) & ~(1 << index)); memset(pko_mem_queue_qos_save, 0, sizeof(pko_mem_queue_qos_save)); /* Disable all queues so that TX should become idle */ for (i = 0; i < cvmx_pko_get_num_queues(ipd_port); i++) { int queue = cvmx_pko_get_base_queue(ipd_port) + i; cvmx_write_csr(CVMX_PKO_REG_READ_IDX, queue); pko_mem_queue_qos.u64 = cvmx_read_csr(CVMX_PKO_MEM_QUEUE_QOS); pko_mem_queue_qos.s.pid = ipd_port; pko_mem_queue_qos.s.qid = queue; pko_mem_queue_qos_save[i] = pko_mem_queue_qos; pko_mem_queue_qos.s.qos_mask = 0; cvmx_write_csr(CVMX_PKO_MEM_QUEUE_QOS, pko_mem_queue_qos.u64); } /* Disable backpressure */ gmx_tx_ovr_bp.u64 = cvmx_read_csr(CVMX_GMXX_TX_OVR_BP(interface)); gmx_tx_ovr_bp_save = gmx_tx_ovr_bp; gmx_tx_ovr_bp.s.bp &= ~(1 << index); gmx_tx_ovr_bp.s.en |= 1 << index; cvmx_write_csr(CVMX_GMXX_TX_OVR_BP(interface), gmx_tx_ovr_bp.u64); cvmx_read_csr(CVMX_GMXX_TX_OVR_BP(interface)); /* * Poll the GMX state machine waiting for it to become * idle. Preferably we should only change speed when it is * idle. If it doesn't become idle we will still do the speed * change, but there is a slight chance that GMX will * lockup. */ cvmx_write_csr(CVMX_NPI_DBG_SELECT, interface * 0x800 + index * 0x100 + 0x880); CVMX_WAIT_FOR_FIELD64(CVMX_DBG_DATA, union cvmx_dbg_data, data & 7, ==, 0, 10000); CVMX_WAIT_FOR_FIELD64(CVMX_DBG_DATA, union cvmx_dbg_data, data & 0xf, ==, 0, 10000); /* Disable the port before we make any changes */ new_gmx_cfg.s.en = 0; cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), new_gmx_cfg.u64); cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); /* Set full/half duplex */ if (cvmx_octeon_is_pass1()) /* Half duplex is broken for 38XX Pass 1 */ new_gmx_cfg.s.duplex = 1; else if (!link_info.s.link_up) /* Force full duplex on down links */ new_gmx_cfg.s.duplex = 1; else new_gmx_cfg.s.duplex = link_info.s.full_duplex; /* Set the link speed. Anything unknown is set to 1Gbps */ if (link_info.s.speed == 10) { new_gmx_cfg.s.slottime = 0; new_gmx_cfg.s.speed = 0; } else if (link_info.s.speed == 100) { new_gmx_cfg.s.slottime = 0; new_gmx_cfg.s.speed = 0; } else { new_gmx_cfg.s.slottime = 1; new_gmx_cfg.s.speed = 1; } /* Adjust the clocks */ if (link_info.s.speed == 10) { cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 50); cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x40); cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0); } else if (link_info.s.speed == 100) { cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 5); cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x40); cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0); } else { cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 1); cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x200); cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0x2000); } if (OCTEON_IS_MODEL(OCTEON_CN30XX) || OCTEON_IS_MODEL(OCTEON_CN50XX)) { if ((link_info.s.speed == 10) || (link_info.s.speed == 100)) { union cvmx_gmxx_inf_mode mode; mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface)); /* * Port .en .type .p0mii Configuration * ---- --- ----- ------ ----------------------------------------- * X 0 X X All links are disabled. * 0 1 X 0 Port 0 is RGMII * 0 1 X 1 Port 0 is MII * 1 1 0 X Ports 1 and 2 are configured as RGMII ports. * 1 1 1 X Port 1: GMII/MII; Port 2: disabled. GMII or * MII port is selected by GMX_PRT1_CFG[SPEED]. */ /* In MII mode, CLK_CNT = 1. */ if (((index == 0) && (mode.s.p0mii == 1)) || ((index != 0) && (mode.s.type == 1))) { cvmx_write_csr(CVMX_GMXX_TXX_CLK (index, interface), 1); } } } /* Do a read to make sure all setup stuff is complete */ cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); /* Save the new GMX setting without enabling the port */ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), new_gmx_cfg.u64); /* Enable the lowest level RX */ cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface), cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(interface)) | (1 << index)); /* Re-enable the TX path */ for (i = 0; i < cvmx_pko_get_num_queues(ipd_port); i++) { int queue = cvmx_pko_get_base_queue(ipd_port) + i; cvmx_write_csr(CVMX_PKO_REG_READ_IDX, queue); cvmx_write_csr(CVMX_PKO_MEM_QUEUE_QOS, pko_mem_queue_qos_save[i].u64); } /* Restore backpressure */ cvmx_write_csr(CVMX_GMXX_TX_OVR_BP(interface), gmx_tx_ovr_bp_save.u64); /* Restore the GMX enable state. Port config is complete */ new_gmx_cfg.s.en = original_gmx_cfg.s.en; cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), new_gmx_cfg.u64); return result; }
cvmx_pko_status_t cvmx_pko_config_port(uint64_t port, uint64_t base_queue, uint64_t num_queues, const uint64_t priority[]) { cvmx_pko_status_t result_code; uint64_t queue; union cvmx_pko_mem_queue_ptrs config; union cvmx_pko_reg_queue_ptrs1 config1; int static_priority_base = -1; int static_priority_end = -1; if ((port >= CVMX_PKO_NUM_OUTPUT_PORTS) && (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID)) { cvmx_dprintf("ERROR: cvmx_pko_config_port: Invalid port %llu\n", (unsigned long long)port); return CVMX_PKO_INVALID_PORT; } if (base_queue + num_queues > CVMX_PKO_MAX_OUTPUT_QUEUES) { cvmx_dprintf ("ERROR: cvmx_pko_config_port: Invalid queue range %llu\n", (unsigned long long)(base_queue + num_queues)); return CVMX_PKO_INVALID_QUEUE; } if (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID) { for (queue = 0; queue < num_queues; queue++) { if (static_priority_base == -1 && priority[queue] == CVMX_PKO_QUEUE_STATIC_PRIORITY) static_priority_base = queue; if (static_priority_base != -1 && static_priority_end == -1 && priority[queue] != CVMX_PKO_QUEUE_STATIC_PRIORITY && queue) static_priority_end = queue - 1; else if (static_priority_base != -1 && static_priority_end == -1 && queue == num_queues - 1) static_priority_end = queue; if (static_priority_end != -1 && (int)queue > static_priority_end && priority[queue] == CVMX_PKO_QUEUE_STATIC_PRIORITY) { cvmx_dprintf("ERROR: cvmx_pko_config_port: " "Static priority queues aren't " "contiguous or don't start at " "base queue. q: %d, eq: %d\n", (int)queue, static_priority_end); return CVMX_PKO_INVALID_PRIORITY; } } if (static_priority_base > 0) { cvmx_dprintf("ERROR: cvmx_pko_config_port: Static " "priority queues don't start at base " "queue. sq: %d\n", static_priority_base); return CVMX_PKO_INVALID_PRIORITY; } #if 0 cvmx_dprintf("Port %d: Static priority queue base: %d, " "end: %d\n", port, static_priority_base, static_priority_end); #endif } result_code = CVMX_PKO_SUCCESS; #ifdef PKO_DEBUG cvmx_dprintf("num queues: %d (%lld,%lld)\n", num_queues, CVMX_PKO_QUEUES_PER_PORT_INTERFACE0, CVMX_PKO_QUEUES_PER_PORT_INTERFACE1); #endif for (queue = 0; queue < num_queues; queue++) { uint64_t *buf_ptr = NULL; config1.u64 = 0; config1.s.idx3 = queue >> 3; config1.s.qid7 = (base_queue + queue) >> 7; config.u64 = 0; config.s.tail = queue == (num_queues - 1); config.s.index = queue; config.s.port = port; config.s.queue = base_queue + queue; if (!cvmx_octeon_is_pass1()) { config.s.static_p = static_priority_base >= 0; config.s.static_q = (int)queue <= static_priority_end; config.s.s_tail = (int)queue == static_priority_end; } switch ((int)priority[queue]) { case 0: config.s.qos_mask = 0x00; break; case 1: config.s.qos_mask = 0x01; break; case 2: config.s.qos_mask = 0x11; break; case 3: config.s.qos_mask = 0x49; break; case 4: config.s.qos_mask = 0x55; break; case 5: config.s.qos_mask = 0x57; break; case 6: config.s.qos_mask = 0x77; break; case 7: config.s.qos_mask = 0x7f; break; case 8: config.s.qos_mask = 0xff; break; case CVMX_PKO_QUEUE_STATIC_PRIORITY: if (!cvmx_octeon_is_pass1()) { config.s.qos_mask = 0xff; break; } default: cvmx_dprintf("ERROR: cvmx_pko_config_port: Invalid " "priority %llu\n", (unsigned long long)priority[queue]); config.s.qos_mask = 0xff; result_code = CVMX_PKO_INVALID_PRIORITY; break; } if (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID) { cvmx_cmd_queue_result_t cmd_res = cvmx_cmd_queue_initialize(CVMX_CMD_QUEUE_PKO (base_queue + queue), CVMX_PKO_MAX_QUEUE_DEPTH, CVMX_FPA_OUTPUT_BUFFER_POOL, CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE - CVMX_PKO_COMMAND_BUFFER_SIZE_ADJUST * 8); if (cmd_res != CVMX_CMD_QUEUE_SUCCESS) { switch (cmd_res) { case CVMX_CMD_QUEUE_NO_MEMORY: cvmx_dprintf("ERROR: " "cvmx_pko_config_port: " "Unable to allocate " "output buffer.\n"); return CVMX_PKO_NO_MEMORY; case CVMX_CMD_QUEUE_ALREADY_SETUP: cvmx_dprintf ("ERROR: cvmx_pko_config_port: Port already setup.\n"); return CVMX_PKO_PORT_ALREADY_SETUP; case CVMX_CMD_QUEUE_INVALID_PARAM: default: cvmx_dprintf ("ERROR: cvmx_pko_config_port: Command queue initialization failed.\n"); return CVMX_PKO_CMD_QUEUE_INIT_ERROR; } } buf_ptr = (uint64_t *) cvmx_cmd_queue_buffer(CVMX_CMD_QUEUE_PKO (base_queue + queue)); config.s.buf_ptr = cvmx_ptr_to_phys(buf_ptr); } else config.s.buf_ptr = 0; CVMX_SYNCWS; if (!OCTEON_IS_MODEL(OCTEON_CN3XXX)) cvmx_write_csr(CVMX_PKO_REG_QUEUE_PTRS1, config1.u64); cvmx_write_csr(CVMX_PKO_MEM_QUEUE_PTRS, config.u64); } return result_code; }
/** * Configure all of the ASX, GMX, and PKO regsiters required * to get RGMII to function on the supplied interface. * * @interface: PKO Interface to configure (0 or 1) * * Returns Zero on success */ int __cvmx_helper_rgmii_enable(int interface) { int num_ports = cvmx_helper_ports_on_interface(interface); int port; struct cvmx_sysinfo *sys_info_ptr = cvmx_sysinfo_get(); union cvmx_gmxx_inf_mode mode; union cvmx_asxx_tx_prt_en asx_tx; union cvmx_asxx_rx_prt_en asx_rx; mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface)); if (mode.s.en == 0) return -1; if ((OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)) && mode.s.type == 1) /* Ignore SPI interfaces */ return -1; /* Configure the ASX registers needed to use the RGMII ports */ asx_tx.u64 = 0; asx_tx.s.prt_en = cvmx_build_mask(num_ports); cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(interface), asx_tx.u64); asx_rx.u64 = 0; asx_rx.s.prt_en = cvmx_build_mask(num_ports); cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface), asx_rx.u64); /* Configure the GMX registers needed to use the RGMII ports */ for (port = 0; port < num_ports; port++) { /* Setting of CVMX_GMXX_TXX_THRESH has been moved to __cvmx_helper_setup_gmx() */ if (cvmx_octeon_is_pass1()) __cvmx_helper_errata_asx_pass1(interface, port, sys_info_ptr-> cpu_clock_hz); else { /* * Configure more flexible RGMII preamble * checking. Pass 1 doesn't support this * feature. */ union cvmx_gmxx_rxx_frm_ctl frm_ctl; frm_ctl.u64 = cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL (port, interface)); /* New field, so must be compile time */ frm_ctl.s.pre_free = 1; cvmx_write_csr(CVMX_GMXX_RXX_FRM_CTL(port, interface), frm_ctl.u64); } /* * Each pause frame transmitted will ask for about 10M * bit times before resume. If buffer space comes * available before that time has expired, an XON * pause frame (0 time) will be transmitted to restart * the flow. */ cvmx_write_csr(CVMX_GMXX_TXX_PAUSE_PKT_TIME(port, interface), 20000); cvmx_write_csr(CVMX_GMXX_TXX_PAUSE_PKT_INTERVAL (port, interface), 19000); if (OCTEON_IS_MODEL(OCTEON_CN50XX)) { cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, interface), 16); cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, interface), 16); } else { cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, interface), 24); cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, interface), 24); } } __cvmx_helper_setup_gmx(interface, num_ports); /* enable the ports now */ for (port = 0; port < num_ports; port++) { union cvmx_gmxx_prtx_cfg gmx_cfg; cvmx_helper_link_autoconf(cvmx_helper_get_ipd_port (interface, port)); gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(port, interface)); gmx_cfg.s.en = 1; cvmx_write_csr(CVMX_GMXX_PRTX_CFG(port, interface), gmx_cfg.u64); } __cvmx_interrupt_asxx_enable(interface); __cvmx_interrupt_gmxx_enable(interface); return 0; }