Beispiel #1
0
static int __init cvm_oct_init_module(void)
{
    int num_interfaces;
    int interface;
    int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE;
    int qos;

    octeon_mdiobus_force_mod_depencency();
    pr_notice("cavium-ethernet %s\n", OCTEON_ETHERNET_VERSION);

    if (OCTEON_IS_MODEL(OCTEON_CN52XX))
        cvm_oct_mac_addr_offset = 2; /* First two are the mgmt ports. */
    else if (OCTEON_IS_MODEL(OCTEON_CN56XX))
        cvm_oct_mac_addr_offset = 1; /* First one is the mgmt port. */
    else
        cvm_oct_mac_addr_offset = 0;

    cvm_oct_poll_queue = create_singlethread_workqueue("octeon-ethernet");
    if (cvm_oct_poll_queue == NULL) {
        pr_err("octeon-ethernet: Cannot create workqueue");
        return -ENOMEM;
    }

    cvm_oct_configure_common_hw();

    cvmx_helper_initialize_packet_io_global();

    /* Change the input group for all ports before input is enabled */
    num_interfaces = cvmx_helper_get_number_of_interfaces();
    for (interface = 0; interface < num_interfaces; interface++) {
        int num_ports = cvmx_helper_ports_on_interface(interface);
        int port;

        for (port = cvmx_helper_get_ipd_port(interface, 0);
                port < cvmx_helper_get_ipd_port(interface, num_ports);
                port++) {
            union cvmx_pip_prt_tagx pip_prt_tagx;
            pip_prt_tagx.u64 =
                cvmx_read_csr(CVMX_PIP_PRT_TAGX(port));
            pip_prt_tagx.s.grp = pow_receive_group;
            cvmx_write_csr(CVMX_PIP_PRT_TAGX(port),
                           pip_prt_tagx.u64);
        }
    }

    cvmx_helper_ipd_and_packet_input_enable();

    memset(cvm_oct_device, 0, sizeof(cvm_oct_device));

    /*
     * Initialize the FAU used for counting packet buffers that
     * need to be freed.
     */
    cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);

    /* Initialize the FAU used for counting tx SKBs that need to be freed */
    cvmx_fau_atomic_write32(FAU_TOTAL_TX_TO_CLEAN, 0);

    if ((pow_send_group != -1)) {
        struct net_device *dev;
        pr_info("\tConfiguring device for POW only access\n");
        dev = alloc_etherdev(sizeof(struct octeon_ethernet));
        if (dev) {
            /* Initialize the device private structure. */
            struct octeon_ethernet *priv = netdev_priv(dev);

            dev->netdev_ops = &cvm_oct_pow_netdev_ops;
            priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED;
            priv->port = CVMX_PIP_NUM_INPUT_PORTS;
            priv->queue = -1;
            strcpy(dev->name, "pow%d");
            for (qos = 0; qos < 16; qos++)
                skb_queue_head_init(&priv->tx_free_list[qos]);

            if (register_netdev(dev) < 0) {
                pr_err("Failed to register ethernet device for POW\n");
                free_netdev(dev);
            } else {
                cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev;
                pr_info("%s: POW send group %d, receive group %d\n",
                        dev->name, pow_send_group,
                        pow_receive_group);
            }
        } else {
            pr_err("Failed to allocate ethernet device for POW\n");
        }
    }

    num_interfaces = cvmx_helper_get_number_of_interfaces();
    for (interface = 0; interface < num_interfaces; interface++) {
        cvmx_helper_interface_mode_t imode =
            cvmx_helper_interface_get_mode(interface);
        int num_ports = cvmx_helper_ports_on_interface(interface);
        int port;

        for (port = cvmx_helper_get_ipd_port(interface, 0);
                port < cvmx_helper_get_ipd_port(interface, num_ports);
                port++) {
            struct octeon_ethernet *priv;
            struct net_device *dev =
                alloc_etherdev(sizeof(struct octeon_ethernet));
            if (!dev) {
                pr_err("Failed to allocate ethernet device for port %d\n", port);
                continue;
            }

            /* Initialize the device private structure. */
            priv = netdev_priv(dev);

            INIT_DELAYED_WORK(&priv->port_periodic_work,
                              cvm_oct_periodic_worker);
            priv->imode = imode;
            priv->port = port;
            priv->queue = cvmx_pko_get_base_queue(priv->port);
            priv->fau = fau - cvmx_pko_get_num_queues(port) * 4;
            for (qos = 0; qos < 16; qos++)
                skb_queue_head_init(&priv->tx_free_list[qos]);
            for (qos = 0; qos < cvmx_pko_get_num_queues(port);
                    qos++)
                cvmx_fau_atomic_write32(priv->fau + qos * 4, 0);

            switch (priv->imode) {

            /* These types don't support ports to IPD/PKO */
            case CVMX_HELPER_INTERFACE_MODE_DISABLED:
            case CVMX_HELPER_INTERFACE_MODE_PCIE:
            case CVMX_HELPER_INTERFACE_MODE_PICMG:
                break;

            case CVMX_HELPER_INTERFACE_MODE_NPI:
                dev->netdev_ops = &cvm_oct_npi_netdev_ops;
                strcpy(dev->name, "npi%d");
                break;

            case CVMX_HELPER_INTERFACE_MODE_XAUI:
                dev->netdev_ops = &cvm_oct_xaui_netdev_ops;
                strcpy(dev->name, "xaui%d");
                break;

            case CVMX_HELPER_INTERFACE_MODE_LOOP:
                dev->netdev_ops = &cvm_oct_npi_netdev_ops;
                strcpy(dev->name, "loop%d");
                break;

            case CVMX_HELPER_INTERFACE_MODE_SGMII:
                dev->netdev_ops = &cvm_oct_sgmii_netdev_ops;
                strcpy(dev->name, "eth%d");
                break;

            case CVMX_HELPER_INTERFACE_MODE_SPI:
                dev->netdev_ops = &cvm_oct_spi_netdev_ops;
                strcpy(dev->name, "spi%d");
                break;

            case CVMX_HELPER_INTERFACE_MODE_RGMII:
            case CVMX_HELPER_INTERFACE_MODE_GMII:
                dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
                strcpy(dev->name, "eth%d");
                break;
            }

            if (!dev->netdev_ops) {
                free_netdev(dev);
            } else if (register_netdev(dev) < 0) {
                pr_err("Failed to register ethernet device "
                       "for interface %d, port %d\n",
                       interface, priv->port);
                free_netdev(dev);
            } else {
                cvm_oct_device[priv->port] = dev;
                fau -=
                    cvmx_pko_get_num_queues(priv->port) *
                    sizeof(uint32_t);
                queue_delayed_work(cvm_oct_poll_queue,
                                   &priv->port_periodic_work, HZ);
            }
        }
    }

    cvm_oct_tx_initialize();
    cvm_oct_rx_initialize();

    /*
     * 150 uS: about 10 1500-byte packtes at 1GE.
     */
    cvm_oct_tx_poll_interval = 150 * (octeon_get_clock_rate() / 1000000);

    queue_delayed_work(cvm_oct_poll_queue, &cvm_oct_rx_refill_work, HZ);

    return 0;
}
Beispiel #2
0
/* Here is the description of the parameters that are passed to QLM configuration
 * 	param0 : The QLM to configure
 * 	param1 : Speed to configure the QLM at
 * 	param2 : Mode the QLM to configure
 * 	param3 : 1 = RC, 0 = EP
 * 	param4 : 0 = GEN1, 1 = GEN2, 2 = GEN3
 * 	param5 : ref clock select, 0 = 100Mhz, 1 = 125MHz, 2 = 156MHz
 * 	param6 : ref clock input to use:
 * 		 0 - external reference (QLMx_REF_CLK)
 * 		 1 = common clock 0 (QLMC_REF_CLK0)
 * 		 2 = common_clock 1 (QLMC_REF_CLK1)
 */
int checkboard(void)
{
	int qlm;
	char env_var[16];
	int node = 0;

	/* Since i2c is broken on pass 1.0 we use an environment variable */
	if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_0)) {
		ulong board_version;
		board_version = getenv_ulong("board_version", 10, 1);

		gd->arch.board_desc.rev_major = board_version;
	}

	octeon_init_qlm(0);

	for (node = 0; node < CVMX_MAX_NODES; node++) {
		int speed[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
		int mode[8] = { -1, -1, -1, -1, -1, -1, -1, -1 };
		int pcie_rc[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
		int pcie_gen[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
		int ref_clock_sel[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
		int ref_clock_input[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };

		if (!(gd->arch.node_mask & (1 << node)))
			continue;

		for (qlm = 0; qlm < 8; qlm++) {
			const char *mode_str;

			mode[qlm] = CVMX_QLM_MODE_DISABLED;
			sprintf(env_var, "qlm%d:%d_mode", qlm, node);
			mode_str = getenv(env_var);
			if (!mode_str)
				continue;
			if (!strncmp(mode_str, "sgmii", 5)
			    || !strncmp(mode_str, "xaui", 4)
			    || !strncmp(mode_str, "dxaui", 5)
			    || !strncmp(mode_str, "rxaui", 5)) {
				/* BGX0 is QLM0 or QLM2 */
				if (qlm == 2
				    && mode[0] != -1
				    && mode[0] != CVMX_QLM_MODE_PCIE
				    && mode[0] != CVMX_QLM_MODE_PCIE_1X8) {
					printf("NODE %d: Not configuring QLM2,  as QLM0 is already set for BGX0\n", node);
					continue;
				}
				/* BGX1 is QLM1 or QLM3 */
				if (qlm == 3
				    && mode[1] != -1
				    && mode[1] != CVMX_QLM_MODE_PCIE
				    && mode[1] != CVMX_QLM_MODE_PCIE_1X8) {
					printf("NODE %d: Not configuring QLM2,  as QLM1 is already set for BGX1\n", node);
					continue;
				}
			}
			if (!strncmp(mode_str, "sgmii", 5)) {
				speed[qlm] = 1250;
				mode[qlm] = CVMX_QLM_MODE_SGMII;
				ref_clock_sel[qlm] = 2;
				printf("NODE %d:QLM %d: SGMII\n", node, qlm);
			} else if (!strncmp(mode_str, "xaui", 4)) {
				speed[qlm] = 3125;
				mode[qlm] = CVMX_QLM_MODE_XAUI;
				ref_clock_sel[qlm] = 2;
				printf("NODE %d:QLM %d: XAUI\n", node, qlm);
			} else if (!strncmp(mode_str, "dxaui", 5)) {
				speed[qlm] = 6250;
				mode[qlm] = CVMX_QLM_MODE_XAUI;
				ref_clock_sel[qlm] = 2;
				printf("NODE %d:QLM %d: DXAUI\n", node, qlm);
			} else if (!strncmp(mode_str, "rxaui", 5)) {
				speed[qlm] = 6250;
				mode[qlm] = CVMX_QLM_MODE_RXAUI;
				ref_clock_sel[qlm] = 2;
				printf("NODE %d:QLM %d: RXAUI\n", node, qlm);
			} else if (!strcmp(mode_str, "ila")) {
				if (qlm != 2 && qlm != 3) {
					printf("Error: ILA not supported on NODE%d:QLM %d\n",
						node, qlm);
				} else {
					speed[qlm] = 6250;
					mode[qlm] = CVMX_QLM_MODE_ILK;
					ref_clock_sel[qlm] = 2;
					printf("NODE %d:QLM %d: ILA\n", node, qlm);
				}
			} else if (!strcmp(mode_str, "ilk")) {
				int lanes = 0;
				char ilk_env[16];

				if (qlm < 4) {
					printf("Error: ILK not supported on NODE%d:QLM %d\n",
				       node, qlm);
				} else {
					speed[qlm] = 6250;
					mode[qlm] = CVMX_QLM_MODE_ILK;
					ref_clock_sel[qlm] = 2;
					printf("NODE %d:QLM %d: ILK\n", node, qlm);
				}
				sprintf(ilk_env, "ilk%d:%d_lanes", qlm, node);
				if (getenv(ilk_env))
					lanes = getenv_ulong(ilk_env, 0, 16);
				if (lanes > 4)
					ref_clock_input[qlm] = 2;
			} else if (!strncmp(mode_str, "xlaui", 5)) {
				if (qlm < 4) {
					printf("Error: XLAUI not supported on NODE%d:QLM %d\n",
				       	node, qlm);
				} else {
					speed[qlm] = 103125;
					mode[qlm] = CVMX_QLM_MODE_XLAUI;
					ref_clock_sel[qlm] = 2;
					printf("NODE %d:QLM %d: XLAUI\n", node, qlm);
				}
			} else if (!strncmp(mode_str, "xfi", 3)) {
				if (qlm < 4) {
					printf("Error: XFI not supported on NODE%d:QLM %d\n",
				       node, qlm);
				} else {
					speed[qlm] = 103125;
					mode[qlm] = CVMX_QLM_MODE_XFI;
					ref_clock_sel[qlm] = 2;
					printf("NODE %d:QLM %d: XFI\n", node, qlm);
				}
			} else if (!strncmp(mode_str, "10G_KR", 6)) {
				if (qlm < 4) {
					printf("Error: 10G_KR not supported on NODE%d:QLM %d\n",
				       node, qlm);
				} else {
					speed[qlm] = 103125;
					mode[qlm] = CVMX_QLM_MODE_10G_KR;
					ref_clock_sel[qlm] = 2;
					printf("NODE %d:QLM %d: 10G_KR\n", node, qlm);
				}
			} else if (!strncmp(mode_str, "40G_KR4", 7)) {
				if (qlm < 4) {
					printf("Error: 40G_KR4 not supported on NODE%d:QLM %d\n",
				       node, qlm);
				} else {
					speed[qlm] = 103125;
					mode[qlm] = CVMX_QLM_MODE_40G_KR4;
					ref_clock_sel[qlm] = 2;
					printf("NODE %d:QLM %d: 40G_KR4\n", node, qlm);
				}
			} else if (!strcmp(mode_str, "pcie")) {
				char *pmode;
				int lanes = 0;

				sprintf(env_var, "pcie%d:%d_mode", qlm, node);
				pmode = getenv(env_var);
				if (pmode && !strcmp(pmode, "ep"))
					pcie_rc[qlm] = 0;
				else
					pcie_rc[qlm] = 1;
				sprintf(env_var, "pcie%d:%d_gen", qlm, node);
				pcie_gen[qlm] = getenv_ulong(env_var, 0, 3);
				sprintf(env_var, "pcie%d:%d_lanes", qlm, node);
				lanes = getenv_ulong(env_var, 0, 8);
				if (lanes == 8)
					mode[qlm] = CVMX_QLM_MODE_PCIE_1X8;
				else
					mode[qlm] = CVMX_QLM_MODE_PCIE;
				ref_clock_sel[qlm] = 0;
				printf("NODE %d:QLM %d: PCIe gen%d %s\n", node, qlm, pcie_gen[qlm] + 1,
					pcie_rc[qlm] ? "root complex" : "endpoint");
			} else {
				printf("NODE %d:QLM %d: disabled\n", node, qlm);
			}
		}

		for (qlm = 0; qlm < 8; qlm++) {
			if (mode[qlm] == -1)
				continue;
			debug("Configuring node%d qlm%d with speed(%d), mode(%d), RC(%d), Gen(%d), REF_CLK(%d), CLK_SOURCE(%d)\n",
			 	node, qlm, speed[qlm], mode[qlm], pcie_rc[qlm],
			 	pcie_gen[qlm] + 1, ref_clock_sel[qlm], ref_clock_input[qlm]);
			octeon_configure_qlm_cn78xx(node, qlm, speed[qlm], mode[qlm], pcie_rc[qlm],
					    pcie_gen[qlm], ref_clock_sel[qlm],
					    ref_clock_input[qlm]);
		}
	}

        checkboardinfo();

	return 0;
}
void __init arch_init_irq(void)
{
	int irq;
	struct irq_chip *chip0;
	struct irq_chip *chip0_timer;
	struct irq_chip *chip1;

#ifdef CONFIG_SMP
	/* Set the default affinity to the boot cpu. */
	cpumask_clear(irq_default_affinity);
	cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
#endif

	if (NR_IRQS < OCTEON_IRQ_LAST)
		pr_err("octeon_irq_init: NR_IRQS is set too low\n");

	if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) ||
	    OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) ||
	    OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X)) {
		chip0 = &octeon_irq_chip_ciu0_v2;
		chip0_timer = &octeon_irq_chip_ciu0_timer_v2;
		chip1 = &octeon_irq_chip_ciu1_v2;
	} else {
		chip0 = &octeon_irq_chip_ciu0;
		chip0_timer = &octeon_irq_chip_ciu0_timer;
		chip1 = &octeon_irq_chip_ciu1;
	}

	/* 0 - 15 reserved for i8259 master and slave controller. */

	/* 17 - 23 Mips internal */
	for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++) {
		set_irq_chip_and_handler(irq, &octeon_irq_chip_core,
					 handle_percpu_irq);
	}

	/* 24 - 87 CIU_INT_SUM0 */
	for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_BOOTDMA; irq++) {
		switch (irq) {
		case OCTEON_IRQ_GMX_DRP0:
		case OCTEON_IRQ_GMX_DRP1:
		case OCTEON_IRQ_IPD_DRP:
		case OCTEON_IRQ_KEY_ZERO:
		case OCTEON_IRQ_TIMER0:
		case OCTEON_IRQ_TIMER1:
		case OCTEON_IRQ_TIMER2:
		case OCTEON_IRQ_TIMER3:
			set_irq_chip_and_handler(irq, chip0_timer, handle_percpu_irq);
			break;
		default:
			set_irq_chip_and_handler(irq, chip0, handle_percpu_irq);
			break;
		}
	}

	/* 88 - 151 CIU_INT_SUM1 */
	for (irq = OCTEON_IRQ_WDOG0; irq <= OCTEON_IRQ_RESERVED151; irq++) {
		set_irq_chip_and_handler(irq, chip1, handle_percpu_irq);
	}

#ifdef CONFIG_PCI_MSI
	/* 152 - 215 PCI/PCIe MSI interrupts */
	for (irq = OCTEON_IRQ_MSI_BIT0; irq <= OCTEON_IRQ_MSI_BIT63; irq++) {
		set_irq_chip_and_handler(irq, &octeon_irq_chip_msi,
					 handle_percpu_irq);
	}
#endif
	set_c0_status(0x300 << 2);
}
Beispiel #4
0
/**
 * Initialize and start the ILK interface.
 *
 * @param interface The identifier of the packet interface to configure and
 *                  use as a ILK interface. cn68xx has 2 interfaces: ilk0 and
 *                  ilk1.
 *
 * @param lane_mask the lane group for this interface
 *
 * @return Zero on success, negative of failure.
 */
int cvmx_ilk_start_interface (int interface, unsigned char lane_mask)
{
    int res = -1;
    int other_intf, this_qlm, other_qlm;
    unsigned char uni_mask;
    cvmx_mio_qlmx_cfg_t mio_qlmx_cfg, other_mio_qlmx_cfg;
    cvmx_ilk_txx_cfg0_t ilk_txx_cfg0;
    cvmx_ilk_rxx_cfg0_t ilk_rxx_cfg0;
    cvmx_ilk_ser_cfg_t ilk_ser_cfg;

    if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
        return res;

    if (interface >= CVMX_NUM_ILK_INTF)
        return res;

    if (lane_mask == 0)
        return res;

    /* check conflicts between 2 ilk interfaces. 1 lane can be assigned to 1
     * interface only */
    other_intf = !interface;
    this_qlm = interface + CVMX_ILK_QLM_BASE;
    other_qlm = other_intf + CVMX_ILK_QLM_BASE;
    if (cvmx_ilk_intf_cfg[other_intf].lane_en_mask & lane_mask)
    {
        cvmx_dprintf ("ILK%d: %s: lane assignment conflict\n", interface,
                      __FUNCTION__);
        return res;
    }

    /* check the legality of the lane mask. interface 0 can have 8 lanes,
     * while interface 1 can have 4 lanes at most */
    uni_mask = lane_mask >> (interface * 4);
    if ((uni_mask != 0x1 && uni_mask != 0x3 && uni_mask != 0xf &&
         uni_mask != 0xff) || (interface == 1 && lane_mask > 0xf0))
    {
#if CVMX_ENABLE_DEBUG_PRINTS
        cvmx_dprintf ("ILK%d: %s: incorrect lane mask: 0x%x \n", interface,
                      __FUNCTION__, uni_mask);
#endif
        return res;
    }

    /* check the availability of qlms. qlm_cfg = 001 means the chip is fused
     * to give this qlm to ilk */
    mio_qlmx_cfg.u64 = cvmx_read_csr (CVMX_MIO_QLMX_CFG(this_qlm));
    other_mio_qlmx_cfg.u64 = cvmx_read_csr (CVMX_MIO_QLMX_CFG(other_qlm));
    if (mio_qlmx_cfg.s.qlm_cfg != 1 ||
        (uni_mask == 0xff && other_mio_qlmx_cfg.s.qlm_cfg != 1))
    {
#if CVMX_ENABLE_DEBUG_PRINTS
        cvmx_dprintf ("ILK%d: %s: qlm unavailable\n", interface, __FUNCTION__);
#endif
        return res;
    }

    /* power up the serdes */
    ilk_ser_cfg.u64 = cvmx_read_csr (CVMX_ILK_SER_CFG);
    if (ilk_ser_cfg.s.ser_pwrup == 0)
    {
        ilk_ser_cfg.s.ser_rxpol_auto = 1;
        ilk_ser_cfg.s.ser_rxpol = 0;
        ilk_ser_cfg.s.ser_txpol = 0;
        ilk_ser_cfg.s.ser_reset_n = 0xff;
        ilk_ser_cfg.s.ser_haul = 0;
    }
    ilk_ser_cfg.s.ser_pwrup |= ((interface ==0) && (lane_mask > 0xf)) ?
                               0x3 : (1 << interface);
    cvmx_write_csr (CVMX_ILK_SER_CFG, ilk_ser_cfg.u64);

    /* configure the lane enable of the interface */
    ilk_txx_cfg0.u64 = cvmx_read_csr (CVMX_ILK_TXX_CFG0(interface));
    ilk_rxx_cfg0.u64 = cvmx_read_csr (CVMX_ILK_RXX_CFG0(interface));
    ilk_txx_cfg0.s.lane_ena = ilk_rxx_cfg0.s.lane_ena = lane_mask;
    cvmx_write_csr (CVMX_ILK_TXX_CFG0(interface), ilk_txx_cfg0.u64);
    cvmx_write_csr (CVMX_ILK_RXX_CFG0(interface), ilk_rxx_cfg0.u64);

    /* write to local cache. for lane speed, if interface 0 has 8 lanes,
     * assume both qlms have the same speed */
    cvmx_ilk_intf_cfg[interface].intf_en = 1;
    cvmx_ilk_intf_cfg[interface].lane_en_mask = lane_mask;
    res = 0;

    return res;
}
Beispiel #5
0
/**
 * Initialize a USB port for use. This must be called before any
 * other access to the Octeon USB port is made. The port starts
 * off in the disabled state.
 *
 * @param usb    Pointer to an empty cvmx_usbd_state_t structure
 *               that will be populated by the initialize call.
 *               This structure is then passed to all other USB
 *               functions.
 * @param usb_port_number
 *               Which Octeon USB port to initialize.
 * @param flags  Flags to control hardware initialization. See
 *               cvmx_usbd_initialize_flags_t for the flag
 *               definitions. Some flags are mandatory.
 *
 * @return Zero or a negative on error.
 */
int cvmx_usbd_initialize(cvmx_usbd_state_t *usb,
                                      int usb_port_number,
                                      cvmx_usbd_initialize_flags_t flags)
{
    cvmx_usbnx_clk_ctl_t usbn_clk_ctl;
    cvmx_usbnx_usbp_ctl_status_t usbn_usbp_ctl_status;

    if (cvmx_unlikely(flags & CVMX_USBD_INITIALIZE_FLAGS_DEBUG))
        cvmx_dprintf("%s: Called\n", __FUNCTION__);

    memset(usb, 0, sizeof(usb));
    usb->init_flags = flags;
    usb->index = usb_port_number;

    /* Try to determine clock type automatically */
    if ((usb->init_flags & (CVMX_USBD_INITIALIZE_FLAGS_CLOCK_XO_XI |
                  CVMX_USBD_INITIALIZE_FLAGS_CLOCK_XO_GND)) == 0)
    {
        if (__cvmx_helper_board_usb_get_clock_type() == USB_CLOCK_TYPE_CRYSTAL_12)
            usb->init_flags |= CVMX_USBD_INITIALIZE_FLAGS_CLOCK_XO_XI;  /* Only 12 MHZ crystals are supported */
        else
            usb->init_flags |= CVMX_USBD_INITIALIZE_FLAGS_CLOCK_XO_GND;
    }

    if (usb->init_flags & CVMX_USBD_INITIALIZE_FLAGS_CLOCK_XO_GND)
    {
        /* Check for auto ref clock frequency */
        if (!(usb->init_flags & CVMX_USBD_INITIALIZE_FLAGS_CLOCK_MHZ_MASK))
            switch (__cvmx_helper_board_usb_get_clock_type())
            {
                case USB_CLOCK_TYPE_REF_12:
                    usb->init_flags |= CVMX_USBD_INITIALIZE_FLAGS_CLOCK_12MHZ;
                    break;
                case USB_CLOCK_TYPE_REF_24:
                    usb->init_flags |= CVMX_USBD_INITIALIZE_FLAGS_CLOCK_24MHZ;
                    break;
                case USB_CLOCK_TYPE_REF_48:
                default:
                    usb->init_flags |= CVMX_USBD_INITIALIZE_FLAGS_CLOCK_48MHZ;
                    break;
            }
    }

    /* Power On Reset and PHY Initialization */

    /* 1. Wait for DCOK to assert (nothing to do) */
    /* 2a. Write USBN0/1_CLK_CTL[POR] = 1 and
        USBN0/1_CLK_CTL[HRST,PRST,HCLK_RST] = 0 */
    usbn_clk_ctl.u64 = cvmx_read_csr(CVMX_USBNX_CLK_CTL(usb->index));
    usbn_clk_ctl.s.por = 1;
    usbn_clk_ctl.s.hrst = 0;
    usbn_clk_ctl.s.prst = 0;
    usbn_clk_ctl.s.hclk_rst = 0;
    usbn_clk_ctl.s.enable = 0;
    /* 2b. Select the USB reference clock/crystal parameters by writing
        appropriate values to USBN0/1_CLK_CTL[P_C_SEL, P_RTYPE, P_COM_ON] */
    if (usb->init_flags & CVMX_USBD_INITIALIZE_FLAGS_CLOCK_XO_GND)
    {
        /* The USB port uses 12/24/48MHz 2.5V board clock
            source at USB_XO. USB_XI should be tied to GND.
            Most Octeon evaluation boards require this setting */
        if (OCTEON_IS_MODEL(OCTEON_CN3XXX))
        {
            usbn_clk_ctl.cn31xx.p_rclk  = 1; /* From CN31XX,CN30XX manual */
            usbn_clk_ctl.cn31xx.p_xenbn = 0;
        }
        else if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN50XX))
            usbn_clk_ctl.cn56xx.p_rtype = 2; /* From CN56XX,CN50XX manual */
        else
            usbn_clk_ctl.cn52xx.p_rtype = 1; /* From CN52XX manual */

        switch (usb->init_flags & CVMX_USBD_INITIALIZE_FLAGS_CLOCK_MHZ_MASK)
        {
            case CVMX_USBD_INITIALIZE_FLAGS_CLOCK_12MHZ:
                usbn_clk_ctl.s.p_c_sel = 0;
                break;
            case CVMX_USBD_INITIALIZE_FLAGS_CLOCK_24MHZ:
                usbn_clk_ctl.s.p_c_sel = 1;
                break;
            case CVMX_USBD_INITIALIZE_FLAGS_CLOCK_48MHZ:
                usbn_clk_ctl.s.p_c_sel = 2;
                break;
        }
    }
    else
    {
        /* The USB port uses a 12MHz crystal as clock source
            at USB_XO and USB_XI */
        if (OCTEON_IS_MODEL(OCTEON_CN3XXX))
        {
            usbn_clk_ctl.cn31xx.p_rclk  = 1; /* From CN31XX,CN30XX manual */
            usbn_clk_ctl.cn31xx.p_xenbn = 1;
        }
        else if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN50XX))
            usbn_clk_ctl.cn56xx.p_rtype = 0; /* From CN56XX,CN50XX manual */
        else
            usbn_clk_ctl.cn52xx.p_rtype = 0; /* From CN52XX manual */

        usbn_clk_ctl.s.p_c_sel = 0;
    }
    /* 2c. Select the HCLK via writing USBN0/1_CLK_CTL[DIVIDE, DIVIDE2] and
        setting USBN0/1_CLK_CTL[ENABLE] = 1.  Divide the core clock down such
        that USB is as close as possible to 125Mhz */
    {
        int divisor = (cvmx_clock_get_rate(CVMX_CLOCK_CORE)+125000000-1)/125000000;
        if (divisor < 4)  /* Lower than 4 doesn't seem to work properly */
            divisor = 4;
        usbn_clk_ctl.s.divide = divisor;
        usbn_clk_ctl.s.divide2 = 0;
    }
    cvmx_write_csr(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
    /* 2d. Write USBN0/1_CLK_CTL[HCLK_RST] = 1 */
    usbn_clk_ctl.s.hclk_rst = 1;
    cvmx_write_csr(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
    /* 2e.  Wait 64 core-clock cycles for HCLK to stabilize */
    cvmx_wait(64);
    /* 3. Program the power-on reset field in the USBN clock-control register:
        USBN_CLK_CTL[POR] = 0 */
    usbn_clk_ctl.s.por = 0;
    cvmx_write_csr(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
    /* 4. Wait 1 ms for PHY clock to start */
    cvmx_wait_usec(1000);
    /* 5. Program the Reset input from automatic test equipment field in the
        USBP control and status register: USBN_USBP_CTL_STATUS[ATE_RESET] = 1 */
    usbn_usbp_ctl_status.u64 = cvmx_read_csr(CVMX_USBNX_USBP_CTL_STATUS(usb->index));
    usbn_usbp_ctl_status.s.ate_reset = 1;
    cvmx_write_csr(CVMX_USBNX_USBP_CTL_STATUS(usb->index), usbn_usbp_ctl_status.u64);
    /* 6. Wait 10 cycles */
    cvmx_wait(10);
    /* 7. Clear ATE_RESET field in the USBN clock-control register:
        USBN_USBP_CTL_STATUS[ATE_RESET] = 0 */
    usbn_usbp_ctl_status.s.ate_reset = 0;
    cvmx_write_csr(CVMX_USBNX_USBP_CTL_STATUS(usb->index), usbn_usbp_ctl_status.u64);
    /* 8. Program the PHY reset field in the USBN clock-control register:
        USBN_CLK_CTL[PRST] = 1 */
    usbn_clk_ctl.s.prst = 1;
    cvmx_write_csr(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
    /* 9. Program the USBP control and status register to select host or
        device mode. USBN_USBP_CTL_STATUS[HST_MODE] = 0 for host, = 1 for
        device */
    usbn_usbp_ctl_status.s.hst_mode = 1;
    usbn_usbp_ctl_status.s.dm_pulld = 0;
    usbn_usbp_ctl_status.s.dp_pulld = 0;
    cvmx_write_csr(CVMX_USBNX_USBP_CTL_STATUS(usb->index), usbn_usbp_ctl_status.u64);
    /* 10. Wait 1 µs */
    cvmx_wait_usec(1);
    /* 11. Program the hreset_n field in the USBN clock-control register:
        USBN_CLK_CTL[HRST] = 1 */
    usbn_clk_ctl.s.hrst = 1;
    cvmx_write_csr(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
    /* 12. Proceed to USB core initialization */
    usbn_clk_ctl.s.enable = 1;
    cvmx_write_csr(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
    cvmx_wait_usec(1);

    /* Program the following fields in the global AHB configuration
        register (USBC_GAHBCFG)
        DMA mode, USBC_GAHBCFG[DMAEn]: 1 = DMA mode, 0 = slave mode
        Burst length, USBC_GAHBCFG[HBSTLEN] = 0
        Nonperiodic TxFIFO empty level (slave mode only),
        USBC_GAHBCFG[NPTXFEMPLVL]
        Periodic TxFIFO empty level (slave mode only),
        USBC_GAHBCFG[PTXFEMPLVL]
        Global interrupt mask, USBC_GAHBCFG[GLBLINTRMSK] = 1 */
    {
        cvmx_usbcx_gahbcfg_t usbcx_gahbcfg;
        usbcx_gahbcfg.u32 = 0;
        usbcx_gahbcfg.s.dmaen = 1;
        usbcx_gahbcfg.s.hbstlen = 0;
        usbcx_gahbcfg.s.nptxfemplvl = 1;
        usbcx_gahbcfg.s.ptxfemplvl = 1;
        usbcx_gahbcfg.s.glblintrmsk = 1;
        __cvmx_usbd_write_csr32(usb, CVMX_USBCX_GAHBCFG(usb->index), usbcx_gahbcfg.u32);
    }

    /* Program the following fields in USBC_GUSBCFG register.
        HS/FS timeout calibration, USBC_GUSBCFG[TOUTCAL] = 0
        ULPI DDR select, USBC_GUSBCFG[DDRSEL] = 0
        USB turnaround time, USBC_GUSBCFG[USBTRDTIM] = 0x5
        PHY low-power clock select, USBC_GUSBCFG[PHYLPWRCLKSEL] = 0 */
    {
        cvmx_usbcx_gusbcfg_t usbcx_gusbcfg;
        usbcx_gusbcfg.u32 = __cvmx_usbd_read_csr32(usb, CVMX_USBCX_GUSBCFG(usb->index));
        usbcx_gusbcfg.s.toutcal = 0;
        usbcx_gusbcfg.s.ddrsel = 0;
        usbcx_gusbcfg.s.usbtrdtim = 0x5;
        usbcx_gusbcfg.s.phylpwrclksel = 0;
        __cvmx_usbd_write_csr32(usb, CVMX_USBCX_GUSBCFG(usb->index), usbcx_gusbcfg.u32);
    }

    /* Program the following fields in the USBC0/1_DCFG register:
        Device speed, USBC0/1_DCFG[DEVSPD] = 0 (high speed)
        Non-zero-length status OUT handshake, USBC0/1_DCFG[NZSTSOUTHSHK]=0
        Periodic frame interval (if periodic endpoints are supported),
        USBC0/1_DCFG[PERFRINT] = 1 */
    {
        cvmx_usbcx_dcfg_t usbcx_dcfg;
        usbcx_dcfg.u32 = __cvmx_usbd_read_csr32(usb, CVMX_USBCX_DCFG(usb->index));
        usbcx_dcfg.s.devspd = 0;
        usbcx_dcfg.s.nzstsouthshk = 0;
        usbcx_dcfg.s.perfrint = 1;
        __cvmx_usbd_write_csr32(usb, CVMX_USBCX_DCFG(usb->index), usbcx_dcfg.u32);
    }

    /* Program the USBC0/1_GINTMSK register */
    {
        cvmx_usbcx_gintmsk_t usbcx_gintmsk;
        usbcx_gintmsk.u32 = __cvmx_usbd_read_csr32(usb, CVMX_USBCX_GINTMSK(usb->index));
        usbcx_gintmsk.s.oepintmsk = 1;
        usbcx_gintmsk.s.inepintmsk = 1;
        usbcx_gintmsk.s.enumdonemsk = 1;
        usbcx_gintmsk.s.usbrstmsk = 1;
        usbcx_gintmsk.s.usbsuspmsk = 1;
        __cvmx_usbd_write_csr32(usb, CVMX_USBCX_GINTMSK(usb->index), usbcx_gintmsk.u32);
    }

    cvmx_usbd_disable(usb);
    return 0;
}
Beispiel #6
0
static enum cvmx_qlm_mode __cvmx_qlm_get_mode_cn6xxx(int qlm)
{
	cvmx_mio_qlmx_cfg_t qlmx_cfg;

	if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
		qlmx_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(qlm));
		/* QLM is disabled when QLM SPD is 15. */
		if (qlmx_cfg.s.qlm_spd == 15)
			return CVMX_QLM_MODE_DISABLED;

		switch (qlmx_cfg.s.qlm_cfg) {
		case 0:	/* PCIE */
			return CVMX_QLM_MODE_PCIE;
		case 1:	/* ILK */
			return CVMX_QLM_MODE_ILK;
		case 2:	/* SGMII */
			return CVMX_QLM_MODE_SGMII;
		case 3:	/* XAUI */
			return CVMX_QLM_MODE_XAUI;
		case 7:	/* RXAUI */
			return CVMX_QLM_MODE_RXAUI;
		default:
			return CVMX_QLM_MODE_DISABLED;
		}
	} else if (OCTEON_IS_MODEL(OCTEON_CN66XX)) {
		qlmx_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(qlm));
		/* QLM is disabled when QLM SPD is 15. */
		if (qlmx_cfg.s.qlm_spd == 15)
			return CVMX_QLM_MODE_DISABLED;

		switch (qlmx_cfg.s.qlm_cfg) {
		case 0x9:	/* SGMII */
			return CVMX_QLM_MODE_SGMII;
		case 0xb:	/* XAUI */
			return CVMX_QLM_MODE_XAUI;
		case 0x0:	/* PCIE gen2 */
		case 0x8:	/* PCIE gen2 (alias) */
		case 0x2:	/* PCIE gen1 */
		case 0xa:	/* PCIE gen1 (alias) */
			return CVMX_QLM_MODE_PCIE;
		case 0x1:	/* SRIO 1x4 short */
		case 0x3:	/* SRIO 1x4 long */
			return CVMX_QLM_MODE_SRIO_1X4;
		case 0x4:	/* SRIO 2x2 short */
		case 0x6:	/* SRIO 2x2 long */
			return CVMX_QLM_MODE_SRIO_2X2;
		case 0x5:	/* SRIO 4x1 short */
		case 0x7:	/* SRIO 4x1 long */
			if (!OCTEON_IS_MODEL(OCTEON_CN66XX_PASS1_0))
				return CVMX_QLM_MODE_SRIO_4X1;
		/* fallthrough */
		default:
			return CVMX_QLM_MODE_DISABLED;
		}
	} else if (OCTEON_IS_MODEL(OCTEON_CN63XX)) {
		cvmx_sriox_status_reg_t status_reg;
		/* For now skip qlm2 */
		if (qlm == 2) {
			cvmx_gmxx_inf_mode_t inf_mode;
			inf_mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(0));
			if (inf_mode.s.speed == 15)
				return CVMX_QLM_MODE_DISABLED;
			else if (inf_mode.s.mode == 0)
				return CVMX_QLM_MODE_SGMII;
			else
				return CVMX_QLM_MODE_XAUI;
		}
		status_reg.u64 = cvmx_read_csr(CVMX_SRIOX_STATUS_REG(qlm));
		if (status_reg.s.srio)
			return CVMX_QLM_MODE_SRIO_1X4;
		else
			return CVMX_QLM_MODE_PCIE;
	} else if (OCTEON_IS_MODEL(OCTEON_CN61XX)) {
		qlmx_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(qlm));
		/* QLM is disabled when QLM SPD is 15. */
		if (qlmx_cfg.s.qlm_spd == 15)
			return CVMX_QLM_MODE_DISABLED;

		switch (qlm) {
		case 0:
			switch (qlmx_cfg.s.qlm_cfg) {
			case 0:	/* PCIe 1x4 gen2 / gen1 */
				return CVMX_QLM_MODE_PCIE;
			case 2:	/* SGMII */
				return CVMX_QLM_MODE_SGMII;
			case 3:	/* XAUI */
				return CVMX_QLM_MODE_XAUI;
			default:
				return CVMX_QLM_MODE_DISABLED;
			}
			break;
		case 1:
			switch (qlmx_cfg.s.qlm_cfg) {
			case 0:	/* PCIe 1x2 gen2 / gen1 */
				return CVMX_QLM_MODE_PCIE_1X2;
			case 1:	/* PCIe 2x1 gen2 / gen1 */
				return CVMX_QLM_MODE_PCIE_2X1;
			default:
				return CVMX_QLM_MODE_DISABLED;
			}
			break;
		case 2:
			switch (qlmx_cfg.s.qlm_cfg) {
			case 2:	/* SGMII */
				return CVMX_QLM_MODE_SGMII;
			case 3:	/* XAUI */
				return CVMX_QLM_MODE_XAUI;
			default:
				return CVMX_QLM_MODE_DISABLED;
			}
			break;
		}
	} else if (OCTEON_IS_MODEL(OCTEON_CNF71XX)) {
		qlmx_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(qlm));
		/* QLM is disabled when QLM SPD is 15. */
		if (qlmx_cfg.s.qlm_spd == 15)
			return CVMX_QLM_MODE_DISABLED;

		switch (qlm) {
		case 0:
			if (qlmx_cfg.s.qlm_cfg == 2)	/* SGMII */
				return CVMX_QLM_MODE_SGMII;
			break;
		case 1:
			switch (qlmx_cfg.s.qlm_cfg) {
			case 0:	/* PCIe 1x2 gen2 / gen1 */
				return CVMX_QLM_MODE_PCIE_1X2;
			case 1:	/* PCIe 2x1 gen2 / gen1 */
				return CVMX_QLM_MODE_PCIE_2X1;
			default:
				return CVMX_QLM_MODE_DISABLED;
			}
			break;
		}
	}
	return CVMX_QLM_MODE_DISABLED;
}
Beispiel #7
0
//#define CVMX_ILK_STATS_ENA 1
int cvmx_ilk_enable (int interface)
{
    int res = -1;
    int retry_count = 0;
    cvmx_helper_link_info_t result;
    cvmx_ilk_txx_cfg1_t ilk_txx_cfg1;
    cvmx_ilk_rxx_cfg1_t ilk_rxx_cfg1;
#ifdef CVMX_ILK_STATS_ENA
    cvmx_ilk_rxx_cfg0_t ilk_rxx_cfg0;
    cvmx_ilk_txx_cfg0_t ilk_txx_cfg0;
#endif

    if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
        return res;

    if (interface >= CVMX_NUM_ILK_INTF)
        return res;

    result.u64 = 0;
    
#ifdef CVMX_ILK_STATS_ENA
    cvmx_dprintf ("\n");
    cvmx_dprintf ("<<<< ILK%d: Before enabling ilk\n", interface);
    cvmx_ilk_reg_dump_rx (interface);
    cvmx_ilk_reg_dump_tx (interface);
#endif

    /* RX packet will be enabled only if link is up */

    /* TX side */
    ilk_txx_cfg1.u64 = cvmx_read_csr (CVMX_ILK_TXX_CFG1(interface));
    ilk_txx_cfg1.s.pkt_ena = 1;
    ilk_txx_cfg1.s.rx_link_fc_ign = 1; /* cannot use link fc workaround */
    cvmx_write_csr (CVMX_ILK_TXX_CFG1(interface), ilk_txx_cfg1.u64); 
    cvmx_read_csr (CVMX_ILK_TXX_CFG1(interface));

#ifdef CVMX_ILK_STATS_ENA
    /* RX side stats */
    ilk_rxx_cfg0.u64 = cvmx_read_csr (CVMX_ILK_RXX_CFG0(interface));
    ilk_rxx_cfg0.s.lnk_stats_ena = 1;
    cvmx_write_csr (CVMX_ILK_RXX_CFG0(interface), ilk_rxx_cfg0.u64); 

    /* TX side stats */
    ilk_txx_cfg0.u64 = cvmx_read_csr (CVMX_ILK_TXX_CFG0(interface));
    ilk_txx_cfg0.s.lnk_stats_ena = 1;
    cvmx_write_csr (CVMX_ILK_TXX_CFG0(interface), ilk_txx_cfg0.u64); 
#endif

retry:
    retry_count++;
    if (retry_count > 10)
       goto out;

    /* Make sure the link is up, so that packets can be sent. */
    result = __cvmx_helper_ilk_link_get(cvmx_helper_get_ipd_port(interface + CVMX_ILK_GBL_BASE, 0));

    /* Small delay before another retry. */
    cvmx_wait_usec(100);

    ilk_rxx_cfg1.u64 = cvmx_read_csr(CVMX_ILK_RXX_CFG1(interface));
    if (ilk_rxx_cfg1.s.pkt_ena == 0)
       goto retry; 

out:
        
#ifdef CVMX_ILK_STATS_ENA
    cvmx_dprintf (">>>> ILK%d: After ILK is enabled\n", interface);
    cvmx_ilk_reg_dump_rx (interface);
    cvmx_ilk_reg_dump_tx (interface);
#endif

    if (result.s.link_up)
        return 0;

    return -1;
}
Beispiel #8
0
static int octeon_mgmt_open(struct net_device *netdev)
{
	struct octeon_mgmt *p = netdev_priv(netdev);
	union cvmx_mixx_ctl mix_ctl;
	union cvmx_agl_gmx_inf_mode agl_gmx_inf_mode;
	union cvmx_mixx_oring1 oring1;
	union cvmx_mixx_iring1 iring1;
	union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
	union cvmx_mixx_irhwm mix_irhwm;
	union cvmx_mixx_orhwm mix_orhwm;
	union cvmx_mixx_intena mix_intena;
	struct sockaddr sa;

	/* Allocate ring buffers.  */
	p->tx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
			     GFP_KERNEL);
	if (!p->tx_ring)
		return -ENOMEM;
	p->tx_ring_handle =
		dma_map_single(p->dev, p->tx_ring,
			       ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
			       DMA_BIDIRECTIONAL);
	p->tx_next = 0;
	p->tx_next_clean = 0;
	p->tx_current_fill = 0;


	p->rx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
			     GFP_KERNEL);
	if (!p->rx_ring)
		goto err_nomem;
	p->rx_ring_handle =
		dma_map_single(p->dev, p->rx_ring,
			       ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
			       DMA_BIDIRECTIONAL);

	p->rx_next = 0;
	p->rx_next_fill = 0;
	p->rx_current_fill = 0;

	octeon_mgmt_reset_hw(p);

	mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);

	/* Bring it out of reset if needed. */
	if (mix_ctl.s.reset) {
		mix_ctl.s.reset = 0;
		cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
		do {
			mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
		} while (mix_ctl.s.reset);
	}

	if (OCTEON_IS_MODEL(OCTEON_CN5XXX)) {
		agl_gmx_inf_mode.u64 = 0;
		agl_gmx_inf_mode.s.en = 1;
		cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);
	}
	if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)
		|| OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
		/* Force compensation values, as they are not
		 * determined properly by HW
		 */
		union cvmx_agl_gmx_drv_ctl drv_ctl;

		drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL);
		if (p->port) {
			drv_ctl.s.byp_en1 = 1;
			drv_ctl.s.nctl1 = 6;
			drv_ctl.s.pctl1 = 6;
		} else {
			drv_ctl.s.byp_en = 1;
			drv_ctl.s.nctl = 6;
			drv_ctl.s.pctl = 6;
		}
		cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64);
	}

	oring1.u64 = 0;
	oring1.s.obase = p->tx_ring_handle >> 3;
	oring1.s.osize = OCTEON_MGMT_TX_RING_SIZE;
	cvmx_write_csr(p->mix + MIX_ORING1, oring1.u64);

	iring1.u64 = 0;
	iring1.s.ibase = p->rx_ring_handle >> 3;
	iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE;
	cvmx_write_csr(p->mix + MIX_IRING1, iring1.u64);

	memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN);
	octeon_mgmt_set_mac_address(netdev, &sa);

	octeon_mgmt_change_mtu(netdev, netdev->mtu);

	/* Enable the port HW. Packets are not allowed until
	 * cvmx_mgmt_port_enable() is called.
	 */
	mix_ctl.u64 = 0;
	mix_ctl.s.crc_strip = 1;    /* Strip the ending CRC */
	mix_ctl.s.en = 1;           /* Enable the port */
	mix_ctl.s.nbtarb = 0;       /* Arbitration mode */
	/* MII CB-request FIFO programmable high watermark */
	mix_ctl.s.mrq_hwm = 1;
#ifdef __LITTLE_ENDIAN
	mix_ctl.s.lendian = 1;
#endif
	cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);

	/* Read the PHY to find the mode of the interface. */
	if (octeon_mgmt_init_phy(netdev)) {
		dev_err(p->dev, "Cannot initialize PHY on MIX%d.\n", p->port);
		goto err_noirq;
	}

	/* Set the mode of the interface, RGMII/MII. */
	if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && netdev->phydev) {
		union cvmx_agl_prtx_ctl agl_prtx_ctl;
		int rgmii_mode = (netdev->phydev->supported &
				  (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)) != 0;

		agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
		agl_prtx_ctl.s.mode = rgmii_mode ? 0 : 1;
		cvmx_write_csr(p->agl_prt_ctl,	agl_prtx_ctl.u64);

		/* MII clocks counts are based on the 125Mhz
		 * reference, which has an 8nS period. So our delays
		 * need to be multiplied by this factor.
		 */
#define NS_PER_PHY_CLK 8

		/* Take the DLL and clock tree out of reset */
		agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
		agl_prtx_ctl.s.clkrst = 0;
		if (rgmii_mode) {
			agl_prtx_ctl.s.dllrst = 0;
			agl_prtx_ctl.s.clktx_byp = 0;
		}
		cvmx_write_csr(p->agl_prt_ctl,	agl_prtx_ctl.u64);
		cvmx_read_csr(p->agl_prt_ctl); /* Force write out before wait */

		/* Wait for the DLL to lock. External 125 MHz
		 * reference clock must be stable at this point.
		 */
		ndelay(256 * NS_PER_PHY_CLK);

		/* Enable the interface */
		agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
		agl_prtx_ctl.s.enable = 1;
		cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);

		/* Read the value back to force the previous write */
		agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);

		/* Enable the compensation controller */
		agl_prtx_ctl.s.comp = 1;
		agl_prtx_ctl.s.drv_byp = 0;
		cvmx_write_csr(p->agl_prt_ctl,	agl_prtx_ctl.u64);
		/* Force write out before wait. */
		cvmx_read_csr(p->agl_prt_ctl);

		/* For compensation state to lock. */
		ndelay(1040 * NS_PER_PHY_CLK);

		/* Default Interframe Gaps are too small.  Recommended
		 * workaround is.
		 *
		 * AGL_GMX_TX_IFG[IFG1]=14
		 * AGL_GMX_TX_IFG[IFG2]=10
		 */
		cvmx_write_csr(CVMX_AGL_GMX_TX_IFG, 0xae);
	}

	octeon_mgmt_rx_fill_ring(netdev);

	/* Clear statistics. */
	/* Clear on read. */
	cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_CTL, 1);
	cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP, 0);
	cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD, 0);

	cvmx_write_csr(p->agl + AGL_GMX_TX_STATS_CTL, 1);
	cvmx_write_csr(p->agl + AGL_GMX_TX_STAT0, 0);
	cvmx_write_csr(p->agl + AGL_GMX_TX_STAT1, 0);

	/* Clear any pending interrupts */
	cvmx_write_csr(p->mix + MIX_ISR, cvmx_read_csr(p->mix + MIX_ISR));

	if (request_irq(p->irq, octeon_mgmt_interrupt, 0, netdev->name,
			netdev)) {
		dev_err(p->dev, "request_irq(%d) failed.\n", p->irq);
		goto err_noirq;
	}

	/* Interrupt every single RX packet */
	mix_irhwm.u64 = 0;
	mix_irhwm.s.irhwm = 0;
	cvmx_write_csr(p->mix + MIX_IRHWM, mix_irhwm.u64);

	/* Interrupt when we have 1 or more packets to clean.  */
	mix_orhwm.u64 = 0;
	mix_orhwm.s.orhwm = 0;
	cvmx_write_csr(p->mix + MIX_ORHWM, mix_orhwm.u64);

	/* Enable receive and transmit interrupts */
	mix_intena.u64 = 0;
	mix_intena.s.ithena = 1;
	mix_intena.s.othena = 1;
	cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);

	/* Enable packet I/O. */

	rxx_frm_ctl.u64 = 0;
	rxx_frm_ctl.s.ptp_mode = p->has_rx_tstamp ? 1 : 0;
	rxx_frm_ctl.s.pre_align = 1;
	/* When set, disables the length check for non-min sized pkts
	 * with padding in the client data.
	 */
	rxx_frm_ctl.s.pad_len = 1;
	/* When set, disables the length check for VLAN pkts */
	rxx_frm_ctl.s.vlan_len = 1;
	/* When set, PREAMBLE checking is  less strict */
	rxx_frm_ctl.s.pre_free = 1;
	/* Control Pause Frames can match station SMAC */
	rxx_frm_ctl.s.ctl_smac = 0;
	/* Control Pause Frames can match globally assign Multicast address */
	rxx_frm_ctl.s.ctl_mcst = 1;
	/* Forward pause information to TX block */
	rxx_frm_ctl.s.ctl_bck = 1;
	/* Drop Control Pause Frames */
	rxx_frm_ctl.s.ctl_drp = 1;
	/* Strip off the preamble */
	rxx_frm_ctl.s.pre_strp = 1;
	/* This port is configured to send PREAMBLE+SFD to begin every
	 * frame.  GMX checks that the PREAMBLE is sent correctly.
	 */
	rxx_frm_ctl.s.pre_chk = 1;
	cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);

	/* Configure the port duplex, speed and enables */
	octeon_mgmt_disable_link(p);
	if (netdev->phydev)
		octeon_mgmt_update_link(p);
	octeon_mgmt_enable_link(p);

	p->last_link = 0;
	p->last_speed = 0;
	/* PHY is not present in simulator. The carrier is enabled
	 * while initializing the phy for simulator, leave it enabled.
	 */
	if (netdev->phydev) {
		netif_carrier_off(netdev);
		phy_start_aneg(netdev->phydev);
	}

	netif_wake_queue(netdev);
	napi_enable(&p->napi);

	return 0;
err_noirq:
	octeon_mgmt_reset_hw(p);
	dma_unmap_single(p->dev, p->rx_ring_handle,
			 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
			 DMA_BIDIRECTIONAL);
	kfree(p->rx_ring);
err_nomem:
	dma_unmap_single(p->dev, p->tx_ring_handle,
			 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
			 DMA_BIDIRECTIONAL);
	kfree(p->tx_ring);
	return -ENOMEM;
}
int cvmx_spi_reset_cb(int interface, cvmx_spi_mode_t mode)
{
    union cvmx_spxx_dbg_deskew_ctl spxx_dbg_deskew_ctl;
    union cvmx_spxx_clk_ctl spxx_clk_ctl;
    union cvmx_spxx_bist_stat spxx_bist_stat;
    union cvmx_spxx_int_msk spxx_int_msk;
    union cvmx_stxx_int_msk stxx_int_msk;
    union cvmx_spxx_trn4_ctl spxx_trn4_ctl;
    int index;
    uint64_t MS = cvmx_sysinfo_get()->cpu_clock_hz / 1000;


    spxx_int_msk.u64 = cvmx_read_csr(CVMX_SPXX_INT_MSK(interface));
    cvmx_write_csr(CVMX_SPXX_INT_MSK(interface), 0);
    stxx_int_msk.u64 = cvmx_read_csr(CVMX_STXX_INT_MSK(interface));
    cvmx_write_csr(CVMX_STXX_INT_MSK(interface), 0);


    cvmx_write_csr(CVMX_SRXX_COM_CTL(interface), 0);
    cvmx_write_csr(CVMX_STXX_COM_CTL(interface), 0);
    spxx_clk_ctl.u64 = 0;
    spxx_clk_ctl.s.runbist = 1;
    cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64);
    cvmx_wait(10 * MS);
    spxx_bist_stat.u64 = cvmx_read_csr(CVMX_SPXX_BIST_STAT(interface));
    if (spxx_bist_stat.s.stat0)
        cvmx_dprintf
        ("ERROR SPI%d: BIST failed on receive datapath FIFO\n",
         interface);
    if (spxx_bist_stat.s.stat1)
        cvmx_dprintf("ERROR SPI%d: BIST failed on RX calendar table\n",
                     interface);
    if (spxx_bist_stat.s.stat2)
        cvmx_dprintf("ERROR SPI%d: BIST failed on TX calendar table\n",
                     interface);


    for (index = 0; index < 32; index++) {
        union cvmx_srxx_spi4_calx srxx_spi4_calx;
        union cvmx_stxx_spi4_calx stxx_spi4_calx;

        srxx_spi4_calx.u64 = 0;
        srxx_spi4_calx.s.oddpar = 1;
        cvmx_write_csr(CVMX_SRXX_SPI4_CALX(index, interface),
                       srxx_spi4_calx.u64);

        stxx_spi4_calx.u64 = 0;
        stxx_spi4_calx.s.oddpar = 1;
        cvmx_write_csr(CVMX_STXX_SPI4_CALX(index, interface),
                       stxx_spi4_calx.u64);
    }


    cvmx_write_csr(CVMX_SPXX_INT_REG(interface),
                   cvmx_read_csr(CVMX_SPXX_INT_REG(interface)));
    cvmx_write_csr(CVMX_SPXX_INT_MSK(interface), spxx_int_msk.u64);
    cvmx_write_csr(CVMX_STXX_INT_REG(interface),
                   cvmx_read_csr(CVMX_STXX_INT_REG(interface)));
    cvmx_write_csr(CVMX_STXX_INT_MSK(interface), stxx_int_msk.u64);


    spxx_clk_ctl.u64 = 0;
    spxx_clk_ctl.s.seetrn = 0;
    spxx_clk_ctl.s.clkdly = 0x10;
    spxx_clk_ctl.s.runbist = 0;
    spxx_clk_ctl.s.statdrv = 0;

    spxx_clk_ctl.s.statrcv = 1;
    spxx_clk_ctl.s.sndtrn = 0;
    spxx_clk_ctl.s.drptrn = 0;
    spxx_clk_ctl.s.rcvtrn = 0;
    spxx_clk_ctl.s.srxdlck = 0;
    cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64);
    cvmx_wait(100 * MS);


    spxx_clk_ctl.s.srxdlck = 1;
    cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64);


    cvmx_wait(100 * MS);


    spxx_trn4_ctl.s.trntest = 0;
    spxx_trn4_ctl.s.jitter = 1;
    spxx_trn4_ctl.s.clr_boot = 1;
    spxx_trn4_ctl.s.set_boot = 0;
    if (OCTEON_IS_MODEL(OCTEON_CN58XX))
        spxx_trn4_ctl.s.maxdist = 3;
    else
        spxx_trn4_ctl.s.maxdist = 8;
    spxx_trn4_ctl.s.macro_en = 1;
    spxx_trn4_ctl.s.mux_en = 1;
    cvmx_write_csr(CVMX_SPXX_TRN4_CTL(interface), spxx_trn4_ctl.u64);

    spxx_dbg_deskew_ctl.u64 = 0;
    cvmx_write_csr(CVMX_SPXX_DBG_DESKEW_CTL(interface),
                   spxx_dbg_deskew_ctl.u64);

    return 0;
}
Beispiel #10
0
static int octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev,
				      struct ifreq *rq, int cmd)
{
	struct octeon_mgmt *p = netdev_priv(netdev);
	struct hwtstamp_config config;
	union cvmx_mio_ptp_clock_cfg ptp;
	union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
	bool have_hw_timestamps = false;

	if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
		return -EFAULT;

	if (config.flags) /* reserved for future extensions */
		return -EINVAL;

	/* Check the status of hardware for tiemstamps */
	if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
		/* Get the current state of the PTP clock */
		ptp.u64 = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_CFG);
		if (!ptp.s.ext_clk_en) {
			/* The clock has not been configured to use an
			 * external source.  Program it to use the main clock
			 * reference.
			 */
			u64 clock_comp = (NSEC_PER_SEC << 32) /	octeon_get_io_clock_rate();
			if (!ptp.s.ptp_en)
				cvmx_write_csr(CVMX_MIO_PTP_CLOCK_COMP, clock_comp);
			netdev_info(netdev,
				    "PTP Clock using sclk reference @ %lldHz\n",
				    (NSEC_PER_SEC << 32) / clock_comp);
		} else {
			/* The clock is already programmed to use a GPIO */
			u64 clock_comp = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_COMP);
			netdev_info(netdev,
				    "PTP Clock using GPIO%d @ %lld Hz\n",
				    ptp.s.ext_clk_in, (NSEC_PER_SEC << 32) / clock_comp);
		}

		/* Enable the clock if it wasn't done already */
		if (!ptp.s.ptp_en) {
			ptp.s.ptp_en = 1;
			cvmx_write_csr(CVMX_MIO_PTP_CLOCK_CFG, ptp.u64);
		}
		have_hw_timestamps = true;
	}

	if (!have_hw_timestamps)
		return -EINVAL;

	switch (config.tx_type) {
	case HWTSTAMP_TX_OFF:
	case HWTSTAMP_TX_ON:
		break;
	default:
		return -ERANGE;
	}

	switch (config.rx_filter) {
	case HWTSTAMP_FILTER_NONE:
		p->has_rx_tstamp = false;
		rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL);
		rxx_frm_ctl.s.ptp_mode = 0;
		cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
		break;
	case HWTSTAMP_FILTER_ALL:
	case HWTSTAMP_FILTER_SOME:
	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
	case HWTSTAMP_FILTER_PTP_V2_EVENT:
	case HWTSTAMP_FILTER_PTP_V2_SYNC:
	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
	case HWTSTAMP_FILTER_NTP_ALL:
		p->has_rx_tstamp = have_hw_timestamps;
		config.rx_filter = HWTSTAMP_FILTER_ALL;
		if (p->has_rx_tstamp) {
			rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL);
			rxx_frm_ctl.s.ptp_mode = 1;
			cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
		}
		break;
	default:
		return -ERANGE;
	}

	if (copy_to_user(rq->ifr_data, &config, sizeof(config)))
		return -EFAULT;

	return 0;
}
Beispiel #11
0
static void octeon_mgmt_update_link(struct octeon_mgmt *p)
{
	struct net_device *ndev = p->netdev;
	struct phy_device *phydev = ndev->phydev;
	union cvmx_agl_gmx_prtx_cfg prtx_cfg;

	prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);

	if (!phydev->link)
		prtx_cfg.s.duplex = 1;
	else
		prtx_cfg.s.duplex = phydev->duplex;

	switch (phydev->speed) {
	case 10:
		prtx_cfg.s.speed = 0;
		prtx_cfg.s.slottime = 0;

		if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
			prtx_cfg.s.burst = 1;
			prtx_cfg.s.speed_msb = 1;
		}
		break;
	case 100:
		prtx_cfg.s.speed = 0;
		prtx_cfg.s.slottime = 0;

		if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
			prtx_cfg.s.burst = 1;
			prtx_cfg.s.speed_msb = 0;
		}
		break;
	case 1000:
		/* 1000 MBits is only supported on 6XXX chips */
		if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
			prtx_cfg.s.speed = 1;
			prtx_cfg.s.speed_msb = 0;
			/* Only matters for half-duplex */
			prtx_cfg.s.slottime = 1;
			prtx_cfg.s.burst = phydev->duplex;
		}
		break;
	case 0:  /* No link */
	default:
		break;
	}

	/* Write the new GMX setting with the port still disabled. */
	cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);

	/* Read GMX CFG again to make sure the config is completed. */
	prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);

	if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
		union cvmx_agl_gmx_txx_clk agl_clk;
		union cvmx_agl_prtx_ctl prtx_ctl;

		prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
		agl_clk.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_CLK);
		/* MII (both speeds) and RGMII 1000 speed. */
		agl_clk.s.clk_cnt = 1;
		if (prtx_ctl.s.mode == 0) { /* RGMII mode */
			if (phydev->speed == 10)
				agl_clk.s.clk_cnt = 50;
			else if (phydev->speed == 100)
				agl_clk.s.clk_cnt = 5;
		}
		cvmx_write_csr(p->agl + AGL_GMX_TX_CLK, agl_clk.u64);
	}
}
Beispiel #12
0
/**
 * Configure a output port and the associated queues for use.
 *
 * @port:       Port to configure.
 * @base_queue: First queue number to associate with this port.
 * @num_queues: Number of queues to associate with this port
 * @priority:   Array of priority levels for each queue. Values are
 *                   allowed to be 0-8. A value of 8 get 8 times the traffic
 *                   of a value of 1.  A value of 0 indicates that no rounds
 *                   will be participated in. These priorities can be changed
 *                   on the fly while the pko is enabled. A priority of 9
 *                   indicates that static priority should be used.  If static
 *                   priority is used all queues with static priority must be
 *                   contiguous starting at the base_queue, and lower numbered
 *                   queues have higher priority than higher numbered queues.
 *                   There must be num_queues elements in the array.
 */
cvmx_pko_status_t cvmx_pko_config_port(uint64_t port, uint64_t base_queue,
				       uint64_t num_queues,
				       const uint64_t priority[])
{
	cvmx_pko_status_t result_code;
	uint64_t queue;
	union cvmx_pko_mem_queue_ptrs config;
	union cvmx_pko_reg_queue_ptrs1 config1;
	int static_priority_base = -1;
	int static_priority_end = -1;

	if ((port >= CVMX_PKO_NUM_OUTPUT_PORTS)
	    && (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID)) {
		cvmx_dprintf("ERROR: cvmx_pko_config_port: Invalid port %llu\n",
			     (unsigned long long)port);
		return CVMX_PKO_INVALID_PORT;
	}

	if (base_queue + num_queues > CVMX_PKO_MAX_OUTPUT_QUEUES) {
		cvmx_dprintf
		    ("ERROR: cvmx_pko_config_port: Invalid queue range %llu\n",
		     (unsigned long long)(base_queue + num_queues));
		return CVMX_PKO_INVALID_QUEUE;
	}

	if (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID) {
		/*
		 * Validate the static queue priority setup and set
		 * static_priority_base and static_priority_end
		 * accordingly.
		 */
		for (queue = 0; queue < num_queues; queue++) {
			/* Find first queue of static priority */
			if (static_priority_base == -1
			    && priority[queue] ==
			    CVMX_PKO_QUEUE_STATIC_PRIORITY)
				static_priority_base = queue;
			/* Find last queue of static priority */
			if (static_priority_base != -1
			    && static_priority_end == -1
			    && priority[queue] != CVMX_PKO_QUEUE_STATIC_PRIORITY
			    && queue)
				static_priority_end = queue - 1;
			else if (static_priority_base != -1
				 && static_priority_end == -1
				 && queue == num_queues - 1)
				/* all queues are static priority */
				static_priority_end = queue;
			/*
			 * Check to make sure all static priority
			 * queues are contiguous.  Also catches some
			 * cases of static priorites not starting at
			 * queue 0.
			 */
			if (static_priority_end != -1
			    && (int)queue > static_priority_end
			    && priority[queue] ==
			    CVMX_PKO_QUEUE_STATIC_PRIORITY) {
				cvmx_dprintf("ERROR: cvmx_pko_config_port: "
					     "Static priority queues aren't "
					     "contiguous or don't start at "
					     "base queue. q: %d, eq: %d\n",
					(int)queue, static_priority_end);
				return CVMX_PKO_INVALID_PRIORITY;
			}
		}
		if (static_priority_base > 0) {
			cvmx_dprintf("ERROR: cvmx_pko_config_port: Static "
				     "priority queues don't start at base "
				     "queue. sq: %d\n",
				static_priority_base);
			return CVMX_PKO_INVALID_PRIORITY;
		}
#if 0
		cvmx_dprintf("Port %d: Static priority queue base: %d, "
			     "end: %d\n", port,
			static_priority_base, static_priority_end);
#endif
	}
	/*
	 * At this point, static_priority_base and static_priority_end
	 * are either both -1, or are valid start/end queue
	 * numbers.
	 */

	result_code = CVMX_PKO_SUCCESS;

#ifdef PKO_DEBUG
	cvmx_dprintf("num queues: %d (%lld,%lld)\n", num_queues,
		     CVMX_PKO_QUEUES_PER_PORT_INTERFACE0,
		     CVMX_PKO_QUEUES_PER_PORT_INTERFACE1);
#endif

	for (queue = 0; queue < num_queues; queue++) {
		uint64_t *buf_ptr = NULL;

		config1.u64 = 0;
		config1.s.idx3 = queue >> 3;
		config1.s.qid7 = (base_queue + queue) >> 7;

		config.u64 = 0;
		config.s.tail = queue == (num_queues - 1);
		config.s.index = queue;
		config.s.port = port;
		config.s.queue = base_queue + queue;

		if (!cvmx_octeon_is_pass1()) {
			config.s.static_p = static_priority_base >= 0;
			config.s.static_q = (int)queue <= static_priority_end;
			config.s.s_tail = (int)queue == static_priority_end;
		}
		/*
		 * Convert the priority into an enable bit field. Try
		 * to space the bits out evenly so the packet don't
		 * get grouped up
		 */
		switch ((int)priority[queue]) {
		case 0:
			config.s.qos_mask = 0x00;
			break;
		case 1:
			config.s.qos_mask = 0x01;
			break;
		case 2:
			config.s.qos_mask = 0x11;
			break;
		case 3:
			config.s.qos_mask = 0x49;
			break;
		case 4:
			config.s.qos_mask = 0x55;
			break;
		case 5:
			config.s.qos_mask = 0x57;
			break;
		case 6:
			config.s.qos_mask = 0x77;
			break;
		case 7:
			config.s.qos_mask = 0x7f;
			break;
		case 8:
			config.s.qos_mask = 0xff;
			break;
		case CVMX_PKO_QUEUE_STATIC_PRIORITY:
			/* Pass 1 will fall through to the error case */
			if (!cvmx_octeon_is_pass1()) {
				config.s.qos_mask = 0xff;
				break;
			}
		default:
			cvmx_dprintf("ERROR: cvmx_pko_config_port: Invalid "
				     "priority %llu\n",
				(unsigned long long)priority[queue]);
			config.s.qos_mask = 0xff;
			result_code = CVMX_PKO_INVALID_PRIORITY;
			break;
		}

		if (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID) {
			cvmx_cmd_queue_result_t cmd_res =
			    cvmx_cmd_queue_initialize(CVMX_CMD_QUEUE_PKO
						      (base_queue + queue),
						      CVMX_PKO_MAX_QUEUE_DEPTH,
						      CVMX_FPA_OUTPUT_BUFFER_POOL,
						      CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE
						      -
						      CVMX_PKO_COMMAND_BUFFER_SIZE_ADJUST
						      * 8);
			if (cmd_res != CVMX_CMD_QUEUE_SUCCESS) {
				switch (cmd_res) {
				case CVMX_CMD_QUEUE_NO_MEMORY:
					cvmx_dprintf("ERROR: "
						     "cvmx_pko_config_port: "
						     "Unable to allocate "
						     "output buffer.\n");
					return CVMX_PKO_NO_MEMORY;
				case CVMX_CMD_QUEUE_ALREADY_SETUP:
					cvmx_dprintf
					    ("ERROR: cvmx_pko_config_port: Port already setup.\n");
					return CVMX_PKO_PORT_ALREADY_SETUP;
				case CVMX_CMD_QUEUE_INVALID_PARAM:
				default:
					cvmx_dprintf
					    ("ERROR: cvmx_pko_config_port: Command queue initialization failed.\n");
					return CVMX_PKO_CMD_QUEUE_INIT_ERROR;
				}
			}

			buf_ptr =
			    (uint64_t *)
			    cvmx_cmd_queue_buffer(CVMX_CMD_QUEUE_PKO
						  (base_queue + queue));
			config.s.buf_ptr = cvmx_ptr_to_phys(buf_ptr);
		} else
			config.s.buf_ptr = 0;

		CVMX_SYNCWS;

		if (!OCTEON_IS_MODEL(OCTEON_CN3XXX))
			cvmx_write_csr(CVMX_PKO_REG_QUEUE_PTRS1, config1.u64);
		cvmx_write_csr(CVMX_PKO_MEM_QUEUE_PTRS, config.u64);
	}

	return result_code;
}
Beispiel #13
0
/**
 * cvm_oct_ioctl_hwtstamp - IOCTL support for timestamping
 * @dev:    Device to change
 * @rq:     the request
 * @cmd:    the command
 *
 * Returns Zero on success
 */
static int cvm_oct_ioctl_hwtstamp(struct net_device *dev,
	struct ifreq *rq, int cmd)
{
	struct octeon_ethernet *priv = netdev_priv(dev);
	struct hwtstamp_config config;
	union cvmx_mio_ptp_clock_cfg ptp;
	int have_hw_timestamps = 0;

	if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
		return -EFAULT;

	if (config.flags) /* reserved for future extensions */
		return -EINVAL;

	/* Check the status of hardware for tiemstamps */
	if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
		/* Write TX timestamp into word 4 */
		cvmx_write_csr(CVMX_PKO_REG_TIMESTAMP, 4);
		/* Get the current state of the PTP clock */
		ptp.u64 = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_CFG);
		if (!ptp.s.ext_clk_en) {
			/*
			 * The clock has not been configured to use an
			 * external source.  Program it to use the main clock
			 * reference.
			 */
			unsigned long long clock_comp = (NSEC_PER_SEC << 32) /
				octeon_get_io_clock_rate();
			cvmx_write_csr(CVMX_MIO_PTP_CLOCK_COMP, clock_comp);
			pr_info("PTP Clock: Using sclk reference at %lld Hz\n",
				(NSEC_PER_SEC << 32) / clock_comp);
		} else {
			/* The clock is already programmed to use a GPIO */
			unsigned long long clock_comp = cvmx_read_csr(
				CVMX_MIO_PTP_CLOCK_COMP);
			pr_info("PTP Clock: Using GPIO %d at %lld Hz\n",
				ptp.s.ext_clk_in,
				(NSEC_PER_SEC << 32) / clock_comp);
		}

		/* Enable the clock if it wasn't done already */
		if (!ptp.s.ptp_en) {
			ptp.s.ptp_en = 1;
			cvmx_write_csr(CVMX_MIO_PTP_CLOCK_CFG, ptp.u64);
		}
		have_hw_timestamps = 1;
		/* Only the first two interfaces support hardware timestamps */
		if (priv->port >= 32)
			have_hw_timestamps = 0;
	}

	/* Require hardware if ALLOW_TIMESTAMPS_WITHOUT_HARDWARE=0 */
	if (!ALLOW_TIMESTAMPS_WITHOUT_HARDWARE && !have_hw_timestamps)
		return -EINVAL;

	switch (config.tx_type) {
		case HWTSTAMP_TX_OFF:
			priv->flags &= ~(OCTEON_ETHERNET_FLAG_TX_TIMESTAMP_SW |
					 OCTEON_ETHERNET_FLAG_TX_TIMESTAMP_HW);
			break;
		case HWTSTAMP_TX_ON:
			priv->flags |= (have_hw_timestamps) ?
				OCTEON_ETHERNET_FLAG_TX_TIMESTAMP_HW :
				OCTEON_ETHERNET_FLAG_TX_TIMESTAMP_SW;
			break;
		default:
			return -ERANGE;
	}

	switch (config.rx_filter) {
		case HWTSTAMP_FILTER_NONE:
			priv->flags &= ~(OCTEON_ETHERNET_FLAG_RX_TIMESTAMP_HW |
					 OCTEON_ETHERNET_FLAG_RX_TIMESTAMP_SW);
			if (have_hw_timestamps) {
				int interface = INTERFACE(priv->port);
				int index = INDEX(priv->port);
				union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl;
				union cvmx_pip_prt_cfgx pip_prt_cfgx;

				gmxx_rxx_frm_ctl.u64 = cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface));
				gmxx_rxx_frm_ctl.s.ptp_mode = 0;
				cvmx_write_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface), gmxx_rxx_frm_ctl.u64);

				pip_prt_cfgx.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(priv->port));
				pip_prt_cfgx.s.skip = 0;
				cvmx_write_csr(CVMX_PIP_PRT_CFGX(priv->port), pip_prt_cfgx.u64);
			}
			break;
		case HWTSTAMP_FILTER_ALL:
		case HWTSTAMP_FILTER_SOME:
		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
		case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
		case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
		case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
		case HWTSTAMP_FILTER_PTP_V2_EVENT:
		case HWTSTAMP_FILTER_PTP_V2_SYNC:
		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
			priv->flags |= (have_hw_timestamps) ?
				OCTEON_ETHERNET_FLAG_RX_TIMESTAMP_HW :
				OCTEON_ETHERNET_FLAG_RX_TIMESTAMP_SW;
			config.rx_filter = HWTSTAMP_FILTER_ALL;
			if (have_hw_timestamps) {
				int interface = INTERFACE(priv->port);
				int index = INDEX(priv->port);
				union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl;
				union cvmx_pip_prt_cfgx pip_prt_cfgx;

				gmxx_rxx_frm_ctl.u64 = cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface));
				gmxx_rxx_frm_ctl.s.ptp_mode = 1;
				cvmx_write_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface), gmxx_rxx_frm_ctl.u64);

				pip_prt_cfgx.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(priv->port));
				pip_prt_cfgx.s.skip = 8;
				cvmx_write_csr(CVMX_PIP_PRT_CFGX(priv->port), pip_prt_cfgx.u64);
			}
			break;
		default:
			return -ERANGE;
	}

	if (copy_to_user(rq->ifr_data, &config, sizeof(config)))
		return -EFAULT;

	return 0;
}
Beispiel #14
0
/**
 * @INTERNAL
 * Probe a ILK interface and determine the number of ports
 * connected to it. The ILK interface should still be down
 * after this call.
 *
 * @param interface Interface to probe
 *
 * @return Number of ports on the interface. Zero to disable.
 */
int __cvmx_helper_ilk_probe(int interface)
{
    int i, j, res = -1;
    static int pipe_base = 0, pknd_base = 0;
    static cvmx_ilk_pipe_chan_t *pch = NULL, *tmp;
    static cvmx_ilk_chan_pknd_t *chpknd = NULL, *tmp1;
    static cvmx_ilk_cal_entry_t *calent = NULL, *tmp2;

    if (!OCTEON_IS_MODEL(OCTEON_CN68XX))
        return 0;

    interface -= CVMX_ILK_GBL_BASE;
    if (interface >= CVMX_NUM_ILK_INTF)
        return 0;

    /* the configuration should be done only once */
    if (cvmx_ilk_get_intf_ena (interface))
        return cvmx_ilk_chans[interface];

    /* configure lanes and enable the link */
    res = cvmx_ilk_start_interface (interface, cvmx_ilk_lane_mask[interface]);
    if (res < 0)
        return 0;

    /* set up the group of pipes available to ilk */
    if (pipe_base == 0)
        pipe_base = __cvmx_pko_get_pipe (interface + CVMX_ILK_GBL_BASE, 0);

    if (pipe_base == -1)
    {
        pipe_base = 0;
        return 0;
    }

    res = cvmx_ilk_set_pipe (interface, pipe_base, cvmx_ilk_chans[interface]);
    if (res < 0)
        return 0;

    /* set up pipe to channel mapping */
    i = pipe_base;
    if (pch == NULL)
    {
        pch = (cvmx_ilk_pipe_chan_t *)
#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
        kmalloc(CVMX_MAX_ILK_CHANS * sizeof(cvmx_ilk_pipe_chan_t), GFP_KERNEL);
#else
        cvmx_bootmem_alloc (CVMX_MAX_ILK_CHANS * sizeof(cvmx_ilk_pipe_chan_t),
                            sizeof(cvmx_ilk_pipe_chan_t));
#endif
        if (pch == NULL)
            return 0;
    }

    memset (pch, 0, CVMX_MAX_ILK_CHANS * sizeof(cvmx_ilk_pipe_chan_t));
    tmp = pch;
    for (j = 0; j < cvmx_ilk_chans[interface]; j++)
    {
        tmp->pipe = i++;
        tmp->chan = cvmx_ilk_chan_map[interface][j];
        tmp++;
    }
    res = cvmx_ilk_tx_set_channel (interface, pch, cvmx_ilk_chans[interface]);
    if (res < 0)
    {
        res = 0;
        goto err_free_pch;
    }
    pipe_base += cvmx_ilk_chans[interface];

    /* set up channel to pkind mapping */
    if (pknd_base == 0)
        pknd_base = cvmx_helper_get_pknd (interface + CVMX_ILK_GBL_BASE, 0);

    i = pknd_base;
    if (chpknd == NULL)
    {
        chpknd = (cvmx_ilk_chan_pknd_t *)
#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
        kmalloc(CVMX_MAX_ILK_PKNDS * sizeof(cvmx_ilk_chan_pknd_t), GFP_KERNEL);
#else
        cvmx_bootmem_alloc (CVMX_MAX_ILK_PKNDS * sizeof(cvmx_ilk_chan_pknd_t),
                            sizeof(cvmx_ilk_chan_pknd_t));
#endif
        if (chpknd == NULL)
        {
            pipe_base -= cvmx_ilk_chans[interface];
            res = 0;
            goto err_free_pch;
        }
    }

    memset (chpknd, 0, CVMX_MAX_ILK_PKNDS * sizeof(cvmx_ilk_chan_pknd_t));
    tmp1 = chpknd;
    for (j = 0; j < cvmx_ilk_chans[interface]; j++)
    {
        tmp1->chan = cvmx_ilk_chan_map[interface][j];
        tmp1->pknd = i++;
        tmp1++;
    }
    res = cvmx_ilk_rx_set_pknd (interface, chpknd, cvmx_ilk_chans[interface]);
    if (res < 0)
    {
        pipe_base -= cvmx_ilk_chans[interface];
        res = 0;
        goto err_free_chpknd;
    }
    pknd_base += cvmx_ilk_chans[interface];

    /* Set up tx calendar */
    if (calent == NULL)
    {
        calent = (cvmx_ilk_cal_entry_t *)
#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
        kmalloc(CVMX_MAX_ILK_PIPES * sizeof(cvmx_ilk_cal_entry_t), GFP_KERNEL);
#else
        cvmx_bootmem_alloc (CVMX_MAX_ILK_PIPES * sizeof(cvmx_ilk_cal_entry_t),
                            sizeof(cvmx_ilk_cal_entry_t));
#endif
        if (calent == NULL)
        {
            pipe_base -= cvmx_ilk_chans[interface];
            pknd_base -= cvmx_ilk_chans[interface];
            res = 0;
            goto err_free_chpknd;
        }
    }

    memset (calent, 0, CVMX_MAX_ILK_PIPES * sizeof(cvmx_ilk_cal_entry_t));
    tmp1 = chpknd;
    tmp2 = calent;
    for (j = 0; j < cvmx_ilk_chans[interface]; j++)
    {
        tmp2->pipe_bpid = tmp1->pknd;
        tmp2->ent_ctrl = PIPE_BPID;
        tmp1++;
        tmp2++;
    }
    res = cvmx_ilk_cal_setup_tx (interface, cvmx_ilk_chans[interface],
                                 calent, 1);
    if (res < 0)
    {
        pipe_base -= cvmx_ilk_chans[interface];
        pknd_base -= cvmx_ilk_chans[interface];
        res = 0;
        goto err_free_calent;
    }

    /* set up rx calendar. allocated memory can be reused.
     * this is because max pkind is always less than max pipe */
    memset (calent, 0, CVMX_MAX_ILK_PIPES * sizeof(cvmx_ilk_cal_entry_t));
    tmp = pch;
    tmp2 = calent;
    for (j = 0; j < cvmx_ilk_chans[interface]; j++)
    {
        tmp2->pipe_bpid = tmp->pipe;
        tmp2->ent_ctrl = PIPE_BPID;
        tmp++;
        tmp2++;
    }
    res = cvmx_ilk_cal_setup_rx (interface, cvmx_ilk_chans[interface],
                                 calent, CVMX_ILK_RX_FIFO_WM, 1);
    if (res < 0)
    {
        pipe_base -= cvmx_ilk_chans[interface];
        pknd_base -= cvmx_ilk_chans[interface];
        res = 0;
        goto err_free_calent;
    }
    res = __cvmx_helper_ilk_enumerate(interface + CVMX_ILK_GBL_BASE);

    goto out;

err_free_calent:
#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
    kfree (calent);
#else
    /* no free() for cvmx_bootmem_alloc() */
#endif

err_free_chpknd:
#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
    kfree (chpknd);
#else
    /* no free() for cvmx_bootmem_alloc() */ 
#endif

err_free_pch:
#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
    kfree (pch);
#else
    /* no free() for cvmx_bootmem_alloc() */ 
#endif
out:
    return res;
}
Beispiel #15
0
void __cvmx_qlm_pcie_cfg_rxd_set_tweak(int qlm, int lane)
{
	if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)) {
		cvmx_qlm_jtag_set(qlm, lane, "cfg_rxd_set", 0x1);
	}
}
/* Version of octeon_model_get_string() that takes buffer as argument, as
** running early in u-boot static/global variables don't work when running from
** flash
*/
const char *octeon_model_get_string_buffer(uint32_t chip_id, char *buffer)
{
	const char *family;
	const char *core_model;
	char pass[4];
#ifndef CVMX_BUILD_FOR_UBOOT
	int clock_mhz;
#endif
	const char *suffix;
	cvmx_l2d_fus3_t fus3;
	int num_cores;
	cvmx_mio_fus_dat2_t fus_dat2;
	cvmx_mio_fus_dat3_t fus_dat3;
	char fuse_model[10];
	char fuse_suffix[4] = {0};
	uint64_t fuse_data = 0;

	fus3.u64 = 0;
	if (OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))
		fus3.u64 = cvmx_read_csr(CVMX_L2D_FUS3);
	fus_dat2.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT2);
	fus_dat3.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT3);
	if (OCTEON_IS_MODEL(OCTEON_CN78XX))
		num_cores = cvmx_pop(cvmx_read_csr(CVMX_CIU3_FUSE));
	else
		num_cores = cvmx_pop(cvmx_read_csr(CVMX_CIU_FUSE));

	/* Make sure the non existent devices look disabled */
	switch ((chip_id >> 8) & 0xff) {
	case 6:		/* CN50XX */
	case 2:		/* CN30XX */
		fus_dat3.s.nodfa_dte = 1;
		fus_dat3.s.nozip = 1;
		break;
	case 4:		/* CN57XX or CN56XX */
		fus_dat3.s.nodfa_dte = 1;
		break;
	default:
		break;
	}

	/* Make a guess at the suffix */
	/* NSP = everything */
	/* EXP = No crypto */
	/* SCP = No DFA, No zip */
	/* CP = No DFA, No crypto, No zip */
	if (fus_dat3.s.nodfa_dte) {
		if (fus_dat2.s.nocrypto)
			suffix = "CP";
		else
			suffix = "SCP";
	} else if (fus_dat2.s.nocrypto)
		suffix = "EXP";
	else
		suffix = "NSP";

	/* Assume pass number is encoded using <5:3><2:0>. Exceptions will be
	   fixed later */
	sprintf(pass, "%d.%d", (int)((chip_id >> 3) & 7) + 1, (int)chip_id & 7);

	/* Use the number of cores to determine the last 2 digits of the model
	   number. There are some exceptions that are fixed later */
	switch (num_cores) {
	case 32:
		core_model = "80";
		break;
	case 24:
		core_model = "70";
		break;
	case 16:
		core_model = "60";
		break;
	case 15:
		core_model = "58";
		break;
	case 14:
		core_model = "55";
		break;
	case 13:
		core_model = "52";
		break;
	case 12:
		core_model = "50";
		break;
	case 11:
		core_model = "48";
		break;
	case 10:
		core_model = "45";
		break;
	case 9:
		core_model = "42";
		break;
	case 8:
		core_model = "40";
		break;
	case 7:
		core_model = "38";
		break;
	case 6:
		core_model = "34";
		break;
	case 5:
		core_model = "32";
		break;
	case 4:
		core_model = "30";
		break;
	case 3:
		core_model = "25";
		break;
	case 2:
		core_model = "20";
		break;
	case 1:
		core_model = "10";
		break;
	default:
		core_model = "XX";
		break;
	}

	/* Now figure out the family, the first two digits */
	switch ((chip_id >> 8) & 0xff) {
	case 0:		/* CN38XX, CN37XX or CN36XX */
		if (fus3.cn38xx.crip_512k) {
			/* For some unknown reason, the 16 core one is called 37 instead of 36 */
			if (num_cores >= 16)
				family = "37";
			else
				family = "36";
		} else
			family = "38";
		/* This series of chips didn't follow the standard pass numbering */
		switch (chip_id & 0xf) {
		case 0:
			strcpy(pass, "1.X");
			break;
		case 1:
			strcpy(pass, "2.X");
			break;
		case 3:
			strcpy(pass, "3.X");
			break;
		default:
			strcpy(pass, "X.X");
			break;
		}
		break;
	case 1:		/* CN31XX or CN3020 */
		if ((chip_id & 0x10) || fus3.cn31xx.crip_128k)
			family = "30";
		else
			family = "31";
		/* This series of chips didn't follow the standard pass numbering */
		switch (chip_id & 0xf) {
		case 0:
			strcpy(pass, "1.0");
			break;
		case 2:
			strcpy(pass, "1.1");
			break;
		default:
			strcpy(pass, "X.X");
			break;
		}
		break;
	case 2:		/* CN3010 or CN3005 */
		family = "30";
		/* A chip with half cache is an 05 */
		if (fus3.cn30xx.crip_64k)
			core_model = "05";
		/* This series of chips didn't follow the standard pass numbering */
		switch (chip_id & 0xf) {
		case 0:
			strcpy(pass, "1.0");
			break;
		case 2:
			strcpy(pass, "1.1");
			break;
		default:
			strcpy(pass, "X.X");
			break;
		}
		break;
	case 3:		/* CN58XX */
		family = "58";
		/* Special case. 4 core, half cache (CP with half cache) */
		if ((num_cores == 4) && fus3.cn58xx.crip_1024k && !strncmp(suffix, "CP", 2))
			core_model = "29";

		/* Pass 1 uses different encodings for pass numbers */
		if ((chip_id & 0xFF) < 0x8) {
			switch (chip_id & 0x3) {
			case 0:
				strcpy(pass, "1.0");
				break;
			case 1:
				strcpy(pass, "1.1");
				break;
			case 3:
				strcpy(pass, "1.2");
				break;
			default:
				strcpy(pass, "1.X");
				break;
			}
		}
		break;
	case 4:		/* CN57XX, CN56XX, CN55XX, CN54XX */
		if (fus_dat2.cn56xx.raid_en) {
			if (fus3.cn56xx.crip_1024k)
				family = "55";
			else
				family = "57";
			if (fus_dat2.cn56xx.nocrypto)
				suffix = "SP";
			else
				suffix = "SSP";
		} else {
			if (fus_dat2.cn56xx.nocrypto)
				suffix = "CP";
			else {
				suffix = "NSP";
				if (fus_dat3.s.nozip)
					suffix = "SCP";

				if (fus_dat3.cn56xx.bar2_en)
					suffix = "NSPB2";
			}
			if (fus3.cn56xx.crip_1024k)
				family = "54";
			else
				family = "56";
		}
		break;
	case 6:		/* CN50XX */
		family = "50";
		break;
	case 7:		/* CN52XX */
		if (fus3.cn52xx.crip_256k)
			family = "51";
		else
			family = "52";
		break;
	case 0x93:		/* CN61XX/CN60XX */
		family = "61";
		if (fus_dat3.cn63xx.l2c_crip == 2)
			family = "60";
		if (fus_dat3.cn61xx.nozip)
			suffix = "SCP";
		else
			suffix = "AAP";
		break;
	case 0x90:		/* CN63XX/CN62XX */
		family = "63";
		if (fus_dat3.s.l2c_crip == 2)
			family = "62";
		if (num_cores == 6)	/* Other core counts match generic */
			core_model = "35";
		if (fus_dat2.cn63xx.nocrypto)
			suffix = "CP";
		else if (fus_dat2.cn63xx.dorm_crypto)
			suffix = "DAP";
		else if (fus_dat3.cn63xx.nozip)
			suffix = "SCP";
		else
			suffix = "AAP";
		break;
	case 0x92:		/* CN66XX */
		family = "66";
		if (num_cores == 6)	/* Other core counts match generic */
			core_model = "35";
		if (fus_dat2.cn66xx.nocrypto && fus_dat2.cn66xx.dorm_crypto)
			suffix = "AP";
		if (fus_dat2.cn66xx.nocrypto)
			suffix = "CP";
		else if (fus_dat2.cn66xx.dorm_crypto)
			suffix = "DAP";
		else if (fus_dat3.cn66xx.nozip && fus_dat2.cn66xx.raid_en)
			suffix = "SCP";
		else if (!fus_dat2.cn66xx.raid_en)
			suffix = "HAP";
		else
			suffix = "AAP";
		break;
	case 0x91:		/* CN68XX */
		family = "68";
		if (fus_dat2.cn68xx.nocrypto && fus_dat3.cn68xx.nozip)
			suffix = "CP";
		else if (fus_dat2.cn68xx.dorm_crypto)
			suffix = "DAP";
		else if (fus_dat3.cn68xx.nozip)
			suffix = "SCP";
		else if (fus_dat2.cn68xx.nocrypto)
			suffix = "SP";
		else if (!fus_dat2.cn68xx.raid_en)
			suffix = "HAP";
		else
			suffix = "AAP";
		break;
	case 0x94:		/* CNF71XX */
		family = "F71";
		if (fus_dat3.cnf71xx.nozip)
			suffix = "SCP";
		else
			suffix = "AAP";
		break;
	case 0x95:		/* CN78XX */
		family = "78";
		if (fus_dat3.cn70xx.nozip)
			suffix = "SCP";
		else
			suffix = "AAP";
		break;
	case 0x96:		/* CN70XX */
		family = "70";
		if (cvmx_read_csr(CVMX_MIO_FUS_PDF) & (0x1ULL << 32))
			family = "71";
		if (fus_dat2.cn70xx.nocrypto)
			suffix = "CP";
		else if (fus_dat3.cn70xx.nodfa_dte)
			suffix = "SCP";
		else
			suffix = "AAP";
		break;
	default:
		family = "XX";
		core_model = "XX";
		strcpy(pass, "X.X");
		suffix = "XXX";
		break;
	}

#ifndef CVMX_BUILD_FOR_UBOOT
	clock_mhz = cvmx_clock_get_rate(CVMX_CLOCK_RCLK) / 1000000;
#endif

	if (family[0] != '3') {
		if (OCTEON_IS_OCTEON1PLUS() || OCTEON_IS_OCTEON2()) {
			int fuse_base = 384 / 8;
			if (family[0] == '6' || OCTEON_IS_OCTEON3())
				fuse_base = 832 / 8;
			/* Check for model in fuses, overrides normal decode */
			/* This is _not_ valid for Octeon CN3XXX models */
			fuse_data |= cvmx_fuse_read_byte(fuse_base + 5);
			fuse_data = fuse_data << 8;
			fuse_data |= cvmx_fuse_read_byte(fuse_base + 4);
			fuse_data = fuse_data << 8;
			fuse_data |= cvmx_fuse_read_byte(fuse_base + 3);
			fuse_data = fuse_data << 8;
			fuse_data |= cvmx_fuse_read_byte(fuse_base + 2);
			fuse_data = fuse_data << 8;
			fuse_data |= cvmx_fuse_read_byte(fuse_base + 1);
			fuse_data = fuse_data << 8;
			fuse_data |= cvmx_fuse_read_byte(fuse_base);
			if (fuse_data & 0x7ffff) {
				int model = fuse_data & 0x3fff;
				int suffix = (fuse_data >> 14) & 0x1f;
				if (suffix && model) {      /* Have both number and suffix in fuses, so both */
					sprintf(fuse_model, "%d%c", model, 'A' + suffix - 1);
					core_model = "";
					family = fuse_model;
				} else if (suffix && !model) {      /* Only have suffix, so add suffix to 'normal' model number */
					sprintf(fuse_model, "%s%c", core_model, 'A' + suffix - 1);
					core_model = fuse_model;
				} else {    /* Don't have suffix, so just use model from fuses */

					sprintf(fuse_model, "%d", model);
					core_model = "";
					family = fuse_model;
				}
			}
		} else {
Beispiel #17
0
/**
 * Get the speed (Gbaud) of the QLM in Mhz.
 *
 * @param qlm    QLM to examine
 *
 * @return Speed in Mhz
 */
int cvmx_qlm_get_gbaud_mhz(int qlm)
{
	if (OCTEON_IS_MODEL(OCTEON_CN63XX)) {
		if (qlm == 2) {
			cvmx_gmxx_inf_mode_t inf_mode;
			inf_mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(0));
			switch (inf_mode.s.speed) {
			case 0:
				return 5000;	/* 5     Gbaud */
			case 1:
				return 2500;	/* 2.5   Gbaud */
			case 2:
				return 2500;	/* 2.5   Gbaud */
			case 3:
				return 1250;	/* 1.25  Gbaud */
			case 4:
				return 1250;	/* 1.25  Gbaud */
			case 5:
				return 6250;	/* 6.25  Gbaud */
			case 6:
				return 5000;	/* 5     Gbaud */
			case 7:
				return 2500;	/* 2.5   Gbaud */
			case 8:
				return 3125;	/* 3.125 Gbaud */
			case 9:
				return 2500;	/* 2.5   Gbaud */
			case 10:
				return 1250;	/* 1.25  Gbaud */
			case 11:
				return 5000;	/* 5     Gbaud */
			case 12:
				return 6250;	/* 6.25  Gbaud */
			case 13:
				return 3750;	/* 3.75  Gbaud */
			case 14:
				return 3125;	/* 3.125 Gbaud */
			default:
				return 0;	/* Disabled */
			}
		} else {
			cvmx_sriox_status_reg_t status_reg;
			status_reg.u64 = cvmx_read_csr(CVMX_SRIOX_STATUS_REG(qlm));
			if (status_reg.s.srio) {
				cvmx_sriomaintx_port_0_ctl2_t sriomaintx_port_0_ctl2;
				sriomaintx_port_0_ctl2.u32 = cvmx_read_csr(CVMX_SRIOMAINTX_PORT_0_CTL2(qlm));
				switch (sriomaintx_port_0_ctl2.s.sel_baud) {
				case 1:
					return 1250;	/* 1.25  Gbaud */
				case 2:
					return 2500;	/* 2.5   Gbaud */
				case 3:
					return 3125;	/* 3.125 Gbaud */
				case 4:
					return 5000;	/* 5     Gbaud */
				case 5:
					return 6250;	/* 6.250 Gbaud */
				default:
					return 0;	/* Disabled */
				}
			} else {
				cvmx_pciercx_cfg032_t pciercx_cfg032;
				pciercx_cfg032.u32 = cvmx_read_csr(CVMX_PCIERCX_CFG032(qlm));
				switch (pciercx_cfg032.s.ls) {
				case 1:
					return 2500;
				case 2:
					return 5000;
				case 4:
					return 8000;
				default:
					{
						cvmx_mio_rst_boot_t mio_rst_boot;
						mio_rst_boot.u64 = cvmx_read_csr(CVMX_MIO_RST_BOOT);
						if ((qlm == 0) && mio_rst_boot.s.qlm0_spd == 0xf)
							return 0;
						if ((qlm == 1) && mio_rst_boot.s.qlm1_spd == 0xf)
							return 0;
						return 5000;	/* Best guess I can make */
					}
				}
			}
		}
	} else if (OCTEON_IS_OCTEON2()) {
		cvmx_mio_qlmx_cfg_t qlm_cfg;

		qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(qlm));
		switch (qlm_cfg.s.qlm_spd) {
		case 0:
			return 5000;	/* 5     Gbaud */
		case 1:
			return 2500;	/* 2.5   Gbaud */
		case 2:
			return 2500;	/* 2.5   Gbaud */
		case 3:
			return 1250;	/* 1.25  Gbaud */
		case 4:
			return 1250;	/* 1.25  Gbaud */
		case 5:
			return 6250;	/* 6.25  Gbaud */
		case 6:
			return 5000;	/* 5     Gbaud */
		case 7:
			return 2500;	/* 2.5   Gbaud */
		case 8:
			return 3125;	/* 3.125 Gbaud */
		case 9:
			return 2500;	/* 2.5   Gbaud */
		case 10:
			return 1250;	/* 1.25  Gbaud */
		case 11:
			return 5000;	/* 5     Gbaud */
		case 12:
			return 6250;	/* 6.25  Gbaud */
		case 13:
			return 3750;	/* 3.75  Gbaud */
		case 14:
			return 3125;	/* 3.125 Gbaud */
		default:
			return 0;	/* Disabled */
		}
	} else if (OCTEON_IS_MODEL(OCTEON_CN70XX)) {
		cvmx_gserx_dlmx_mpll_multiplier_t mpll_multiplier;
		uint64_t meas_refclock;
		uint64_t freq;

		/* Measure the reference clock */
		meas_refclock = cvmx_qlm_measure_clock(qlm);
		/* Multiply to get the final frequency */
		mpll_multiplier.u64 = cvmx_read_csr(CVMX_GSERX_DLMX_MPLL_MULTIPLIER(qlm, 0));
		freq = meas_refclock * mpll_multiplier.s.mpll_multiplier;
		freq = (freq + 500000) / 1000000;
		return freq;
	}
	return 0;
}
Beispiel #18
0
/**
 * Packet transmit
 *
 * @param m    Packet to send
 * @param dev    Device info structure
 * @return Always returns zero
 */
int cvm_oct_xmit(struct mbuf *m, struct ifnet *ifp)
{
	cvmx_pko_command_word0_t    pko_command;
	cvmx_buf_ptr_t              hw_buffer;
	int                         dropped;
	int                         qos;
	cvm_oct_private_t          *priv = (cvm_oct_private_t *)ifp->if_softc;
	int32_t in_use;
	int32_t buffers_to_free;
	cvmx_wqe_t *work;

	/* Prefetch the private data structure.
	   It is larger that one cache line */
	CVMX_PREFETCH(priv, 0);

	/* Start off assuming no drop */
	dropped = 0;

	/* The check on CVMX_PKO_QUEUES_PER_PORT_* is designed to completely
	   remove "qos" in the event neither interface supports multiple queues
	   per port */
	if ((CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 > 1) ||
	    (CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 > 1)) {
		qos = GET_MBUF_QOS(m);
		if (qos <= 0)
			qos = 0;
		else if (qos >= cvmx_pko_get_num_queues(priv->port))
			qos = 0;
	} else
		qos = 0;

	/* The CN3XXX series of parts has an errata (GMX-401) which causes the
	   GMX block to hang if a collision occurs towards the end of a
	   <68 byte packet. As a workaround for this, we pad packets to be
	   68 bytes whenever we are in half duplex mode. We don't handle
	   the case of having a small packet but no room to add the padding.
	   The kernel should always give us at least a cache line */
	if (__predict_false(m->m_pkthdr.len < 64) && OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
		cvmx_gmxx_prtx_cfg_t gmx_prt_cfg;
		int interface = INTERFACE(priv->port);
		int index = INDEX(priv->port);

		if (interface < 2) {
			/* We only need to pad packet in half duplex mode */
			gmx_prt_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
			if (gmx_prt_cfg.s.duplex == 0) {
				static uint8_t pad[64];

				if (!m_append(m, sizeof pad - m->m_pkthdr.len, pad))
					printf("%s: unable to padd small packet.", __func__);
			}
		}
	}

#ifdef OCTEON_VENDOR_RADISYS
	/*
	 * The RSYS4GBE will hang if asked to transmit a packet less than 60 bytes.
	 */
	if (__predict_false(m->m_pkthdr.len < 60) &&
	    cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_CUST_RADISYS_RSYS4GBE) {
		static uint8_t pad[60];

		if (!m_append(m, sizeof pad - m->m_pkthdr.len, pad))
			printf("%s: unable to pad small packet.", __func__);
	}
#endif

	/*
	 * If the packet is not fragmented.
	 */
	if (m->m_pkthdr.len == m->m_len) {
		/* Build the PKO buffer pointer */
		hw_buffer.u64 = 0;
		hw_buffer.s.addr = cvmx_ptr_to_phys(m->m_data);
		hw_buffer.s.pool = 0;
		hw_buffer.s.size = m->m_len;

		/* Build the PKO command */
		pko_command.u64 = 0;
		pko_command.s.segs = 1;
		pko_command.s.dontfree = 1; /* Do not put this buffer into the FPA.  */

		work = NULL;
	} else {
		struct mbuf *n;
		unsigned segs;
		uint64_t *gp;

		/*
		 * The packet is fragmented, we need to send a list of segments
		 * in memory we borrow from the WQE pool.
		 */
		work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL);
		if (work == NULL) {
			m_freem(m);
			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
			return 1;
		}

		segs = 0;
		gp = (uint64_t *)work;
		for (n = m; n != NULL; n = n->m_next) {
			if (segs == CVMX_FPA_WQE_POOL_SIZE / sizeof (uint64_t))
				panic("%s: too many segments in packet; call m_collapse().", __func__);

			/* Build the PKO buffer pointer */
			hw_buffer.u64 = 0;
			hw_buffer.s.i = 1; /* Do not put this buffer into the FPA.  */
			hw_buffer.s.addr = cvmx_ptr_to_phys(n->m_data);
			hw_buffer.s.pool = 0;
			hw_buffer.s.size = n->m_len;

			*gp++ = hw_buffer.u64;
			segs++;
		}

		/* Build the PKO buffer gather list pointer */
		hw_buffer.u64 = 0;
		hw_buffer.s.addr = cvmx_ptr_to_phys(work);
		hw_buffer.s.pool = CVMX_FPA_WQE_POOL;
		hw_buffer.s.size = segs;

		/* Build the PKO command */
		pko_command.u64 = 0;
		pko_command.s.segs = segs;
		pko_command.s.gather = 1;
		pko_command.s.dontfree = 0; /* Put the WQE above back into the FPA.  */
	}

	/* Finish building the PKO command */
	pko_command.s.n2 = 1; /* Don't pollute L2 with the outgoing packet */
	pko_command.s.reg0 = priv->fau+qos*4;
	pko_command.s.total_bytes = m->m_pkthdr.len;
	pko_command.s.size0 = CVMX_FAU_OP_SIZE_32;
	pko_command.s.subone0 = 1;

	/* Check if we can use the hardware checksumming */
	if ((m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) != 0) {
		/* Use hardware checksum calc */
		pko_command.s.ipoffp1 = ETHER_HDR_LEN + 1;
	}

	/*
	 * XXX
	 * Could use a different free queue (and different FAU address) per
	 * core instead of per QoS, to reduce contention here.
	 */
	IF_LOCK(&priv->tx_free_queue[qos]);
	/* Get the number of mbufs in use by the hardware */
	in_use = cvmx_fau_fetch_and_add32(priv->fau+qos*4, 1);
	buffers_to_free = cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);

	cvmx_pko_send_packet_prepare(priv->port, priv->queue + qos, CVMX_PKO_LOCK_CMD_QUEUE);

	/* Drop this packet if we have too many already queued to the HW */
	if (_IF_QFULL(&priv->tx_free_queue[qos])) {
		dropped = 1;
	}
	/* Send the packet to the output queue */
	else
	if (__predict_false(cvmx_pko_send_packet_finish(priv->port, priv->queue + qos, pko_command, hw_buffer, CVMX_PKO_LOCK_CMD_QUEUE))) {
		DEBUGPRINT("%s: Failed to send the packet\n", if_name(ifp));
		dropped = 1;
	}

	if (__predict_false(dropped)) {
		m_freem(m);
		cvmx_fau_atomic_add32(priv->fau+qos*4, -1);
		if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
	} else {
		/* Put this packet on the queue to be freed later */
		_IF_ENQUEUE(&priv->tx_free_queue[qos], m);

		/* Pass it to any BPF listeners.  */
		ETHER_BPF_MTAP(ifp, m);

		if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
		if_inc_counter(ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len);
	}

	/* Free mbufs not in use by the hardware */
	if (_IF_QLEN(&priv->tx_free_queue[qos]) > in_use) {
		while (_IF_QLEN(&priv->tx_free_queue[qos]) > in_use) {
			_IF_DEQUEUE(&priv->tx_free_queue[qos], m);
			m_freem(m);
		}
	}
	IF_UNLOCK(&priv->tx_free_queue[qos]);

	return dropped;
}
Beispiel #19
0
static int octeon_mgmt_open(struct net_device *netdev)
{
	struct octeon_mgmt *p = netdev_priv(netdev);
	int port = p->port;
	union cvmx_mixx_ctl mix_ctl;
	union cvmx_agl_gmx_inf_mode agl_gmx_inf_mode;
	union cvmx_mixx_oring1 oring1;
	union cvmx_mixx_iring1 iring1;
	union cvmx_agl_gmx_prtx_cfg prtx_cfg;
	union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
	union cvmx_mixx_irhwm mix_irhwm;
	union cvmx_mixx_orhwm mix_orhwm;
	union cvmx_mixx_intena mix_intena;
	struct sockaddr sa;

	/* Allocate ring buffers.  */
	p->tx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
			     GFP_KERNEL);
	if (!p->tx_ring)
		return -ENOMEM;
	p->tx_ring_handle =
		dma_map_single(p->dev, p->tx_ring,
			       ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
			       DMA_BIDIRECTIONAL);
	p->tx_next = 0;
	p->tx_next_clean = 0;
	p->tx_current_fill = 0;

	p->rx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
			     GFP_KERNEL);
	if (!p->rx_ring)
		goto err_nomem;
	p->rx_ring_handle =
		dma_map_single(p->dev, p->rx_ring,
			       ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
			       DMA_BIDIRECTIONAL);

	p->rx_next = 0;
	p->rx_next_fill = 0;
	p->rx_current_fill = 0;

	octeon_mgmt_reset_hw(p);

	mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port));

	/* Bring it out of reset if needed. */
	if (mix_ctl.s.reset) {
		mix_ctl.s.reset = 0;
		cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64);
		do {
			mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port));
		} while (mix_ctl.s.reset);
	}

	agl_gmx_inf_mode.u64 = 0;
	agl_gmx_inf_mode.s.en = 1;
	cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);

	oring1.u64 = 0;
	oring1.s.obase = p->tx_ring_handle >> 3;
	oring1.s.osize = OCTEON_MGMT_TX_RING_SIZE;
	cvmx_write_csr(CVMX_MIXX_ORING1(port), oring1.u64);

	iring1.u64 = 0;
	iring1.s.ibase = p->rx_ring_handle >> 3;
	iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE;
	cvmx_write_csr(CVMX_MIXX_IRING1(port), iring1.u64);

	/* Disable packet I/O. */
	prtx_cfg.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
	prtx_cfg.s.en = 0;
	cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), prtx_cfg.u64);

	memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN);
	octeon_mgmt_set_mac_address(netdev, &sa);

	octeon_mgmt_change_mtu(netdev, netdev->mtu);

	/*
	 * Enable the port HW. Packets are not allowed until
	 * cvmx_mgmt_port_enable() is called.
	 */
	mix_ctl.u64 = 0;
	mix_ctl.s.crc_strip = 1;    /* Strip the ending CRC */
	mix_ctl.s.en = 1;           /* Enable the port */
	mix_ctl.s.nbtarb = 0;       /* Arbitration mode */
	/* MII CB-request FIFO programmable high watermark */
	mix_ctl.s.mrq_hwm = 1;
	cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64);

	if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)
	    || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
		/*
		 * Force compensation values, as they are not
		 * determined properly by HW
		 */
		union cvmx_agl_gmx_drv_ctl drv_ctl;

		drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL);
		if (port) {
			drv_ctl.s.byp_en1 = 1;
			drv_ctl.s.nctl1 = 6;
			drv_ctl.s.pctl1 = 6;
		} else {
			drv_ctl.s.byp_en = 1;
			drv_ctl.s.nctl = 6;
			drv_ctl.s.pctl = 6;
		}
		cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64);
	}

	octeon_mgmt_rx_fill_ring(netdev);

	/* Clear statistics. */
	/* Clear on read. */
	cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_CTL(port), 1);
	cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(port), 0);
	cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(port), 0);

	cvmx_write_csr(CVMX_AGL_GMX_TXX_STATS_CTL(port), 1);
	cvmx_write_csr(CVMX_AGL_GMX_TXX_STAT0(port), 0);
	cvmx_write_csr(CVMX_AGL_GMX_TXX_STAT1(port), 0);

	/* Clear any pending interrupts */
	cvmx_write_csr(CVMX_MIXX_ISR(port), cvmx_read_csr(CVMX_MIXX_ISR(port)));

	if (request_irq(p->irq, octeon_mgmt_interrupt, 0, netdev->name,
			netdev)) {
		dev_err(p->dev, "request_irq(%d) failed.\n", p->irq);
		goto err_noirq;
	}

	/* Interrupt every single RX packet */
	mix_irhwm.u64 = 0;
	mix_irhwm.s.irhwm = 0;
	cvmx_write_csr(CVMX_MIXX_IRHWM(port), mix_irhwm.u64);

	/* Interrupt when we have 1 or more packets to clean.  */
	mix_orhwm.u64 = 0;
	mix_orhwm.s.orhwm = 1;
	cvmx_write_csr(CVMX_MIXX_ORHWM(port), mix_orhwm.u64);

	/* Enable receive and transmit interrupts */
	mix_intena.u64 = 0;
	mix_intena.s.ithena = 1;
	mix_intena.s.othena = 1;
	cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64);

	/* Enable packet I/O. */

	rxx_frm_ctl.u64 = 0;
	rxx_frm_ctl.s.pre_align = 1;
	/*
	 * When set, disables the length check for non-min sized pkts
	 * with padding in the client data.
	 */
	rxx_frm_ctl.s.pad_len = 1;
	/* When set, disables the length check for VLAN pkts */
	rxx_frm_ctl.s.vlan_len = 1;
	/* When set, PREAMBLE checking is  less strict */
	rxx_frm_ctl.s.pre_free = 1;
	/* Control Pause Frames can match station SMAC */
	rxx_frm_ctl.s.ctl_smac = 0;
	/* Control Pause Frames can match globally assign Multicast address */
	rxx_frm_ctl.s.ctl_mcst = 1;
	/* Forward pause information to TX block */
	rxx_frm_ctl.s.ctl_bck = 1;
	/* Drop Control Pause Frames */
	rxx_frm_ctl.s.ctl_drp = 1;
	/* Strip off the preamble */
	rxx_frm_ctl.s.pre_strp = 1;
	/*
	 * This port is configured to send PREAMBLE+SFD to begin every
	 * frame.  GMX checks that the PREAMBLE is sent correctly.
	 */
	rxx_frm_ctl.s.pre_chk = 1;
	cvmx_write_csr(CVMX_AGL_GMX_RXX_FRM_CTL(port), rxx_frm_ctl.u64);

	/* Enable the AGL block */
	agl_gmx_inf_mode.u64 = 0;
	agl_gmx_inf_mode.s.en = 1;
	cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);

	/* Configure the port duplex and enables */
	prtx_cfg.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
	prtx_cfg.s.tx_en = 1;
	prtx_cfg.s.rx_en = 1;
	prtx_cfg.s.en = 1;
	p->last_duplex = 1;
	prtx_cfg.s.duplex = p->last_duplex;
	cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), prtx_cfg.u64);

	p->last_link = 0;
	netif_carrier_off(netdev);

	if (octeon_mgmt_init_phy(netdev)) {
		dev_err(p->dev, "Cannot initialize PHY.\n");
		goto err_noirq;
	}

	netif_wake_queue(netdev);
	napi_enable(&p->napi);

	return 0;
err_noirq:
	octeon_mgmt_reset_hw(p);
	dma_unmap_single(p->dev, p->rx_ring_handle,
			 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
			 DMA_BIDIRECTIONAL);
	kfree(p->rx_ring);
err_nomem:
	dma_unmap_single(p->dev, p->tx_ring_handle,
			 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
			 DMA_BIDIRECTIONAL);
	kfree(p->tx_ring);
	return -ENOMEM;
}
Beispiel #20
0
static int __init octeon_serial_init(void)
{
	int enable_uart0;
	int enable_uart1;
	int enable_uart2;
	struct plat_serial8250_port *p;

#ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL
	/*
	 * If we are configured to run as the second of two kernels,
	 * disable uart0 and enable uart1. Uart0 is owned by the first
	 * kernel
	 */
	enable_uart0 = 0;
	enable_uart1 = 1;
#else
	/*
	 * We are configured for the first kernel. We'll enable uart0
	 * if the bootloader told us to use 0, otherwise will enable
	 * uart 1.
	 */
	enable_uart0 = (octeon_get_boot_uart() == 0);
	enable_uart1 = (octeon_get_boot_uart() == 1);
#ifdef CONFIG_KGDB
	enable_uart1 = 1;
#endif
#endif

#ifdef CONFIG_SG8200
	enable_uart1 = 1;
#endif

	/* Right now CN52XX is the only chip with a third uart */
	enable_uart2 = OCTEON_IS_MODEL(OCTEON_CN52XX);

	p = octeon_uart8250_data;
	if (enable_uart0) {
		/* Add a ttyS device for hardware uart 0 */
		octeon_uart_set_common(p);
		p->membase = (void *) CVMX_MIO_UARTX_RBR(0);
		p->mapbase = CVMX_MIO_UARTX_RBR(0) & ((1ull << 49) - 1);
		p->irq = OCTEON_IRQ_UART0;
		p++;
	}

	if (enable_uart1) {
		/* Add a ttyS device for hardware uart 1 */
		octeon_uart_set_common(p);
		p->membase = (void *) CVMX_MIO_UARTX_RBR(1);
		p->mapbase = CVMX_MIO_UARTX_RBR(1) & ((1ull << 49) - 1);
		p->irq = OCTEON_IRQ_UART1;
		p++;
	}
	if (enable_uart2) {
		/* Add a ttyS device for hardware uart 2 */
		octeon_uart_set_common(p);
		p->membase = (void *) CVMX_MIO_UART2_RBR;
		p->mapbase = CVMX_MIO_UART2_RBR & ((1ull << 49) - 1);
		p->irq = OCTEON_IRQ_UART2;
		p++;
	}

	BUG_ON(p > &octeon_uart8250_data[OCTEON_MAX_UARTS]);

	return platform_device_register(&octeon_uart8250_device);
}
Beispiel #21
0
/**
 * Show channel statistics
 *
 * @param interface The identifier of the packet interface to disable. cn68xx
 *                  has 2 interfaces: ilk0 and ilk1.
 * @param pstats A pointer to cvmx_ilk_stats_ctrl_t that specifies which
 *               logical channels to access
 *
 * @return nothing
 */
void cvmx_ilk_show_stats (int interface, cvmx_ilk_stats_ctrl_t *pstats)
{
    unsigned int i;
    cvmx_ilk_rxx_idx_stat0_t ilk_rxx_idx_stat0;
    cvmx_ilk_rxx_idx_stat1_t ilk_rxx_idx_stat1;
    cvmx_ilk_rxx_mem_stat0_t ilk_rxx_mem_stat0;
    cvmx_ilk_rxx_mem_stat1_t ilk_rxx_mem_stat1;

    cvmx_ilk_txx_idx_stat0_t ilk_txx_idx_stat0;
    cvmx_ilk_txx_idx_stat1_t ilk_txx_idx_stat1;
    cvmx_ilk_txx_mem_stat0_t ilk_txx_mem_stat0;
    cvmx_ilk_txx_mem_stat1_t ilk_txx_mem_stat1;

    if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
        return;

    if (interface >= CVMX_NUM_ILK_INTF)
        return;

    if (pstats == NULL)
        return;

    /* discrete channels */
    if (pstats->chan_list != NULL)
    {
        for (i = 0; i < pstats->num_chans; i++)
        {

            /* get the number of rx packets */
            ilk_rxx_idx_stat0.u64 = 0;
            ilk_rxx_idx_stat0.s.index = *pstats->chan_list;
            ilk_rxx_idx_stat0.s.clr = pstats->clr_on_rd;
            cvmx_write_csr (CVMX_ILK_RXX_IDX_STAT0(interface),
                            ilk_rxx_idx_stat0.u64);
            ilk_rxx_mem_stat0.u64 = cvmx_read_csr
                                    (CVMX_ILK_RXX_MEM_STAT0(interface));

            /* get the number of rx bytes */
            ilk_rxx_idx_stat1.u64 = 0;
            ilk_rxx_idx_stat1.s.index = *pstats->chan_list;
            ilk_rxx_idx_stat1.s.clr = pstats->clr_on_rd;
            cvmx_write_csr (CVMX_ILK_RXX_IDX_STAT1(interface),
                            ilk_rxx_idx_stat1.u64);
            ilk_rxx_mem_stat1.u64 = cvmx_read_csr
                                    (CVMX_ILK_RXX_MEM_STAT1(interface));

            cvmx_dprintf ("ILK%d Channel%d Rx: %d packets %d bytes\n", interface,
                    *pstats->chan_list, ilk_rxx_mem_stat0.s.rx_pkt,
                    (unsigned int) ilk_rxx_mem_stat1.s.rx_bytes);

            /* get the number of tx packets */
            ilk_txx_idx_stat0.u64 = 0;
            ilk_txx_idx_stat0.s.index = *pstats->chan_list;
            ilk_txx_idx_stat0.s.clr = pstats->clr_on_rd;
            cvmx_write_csr (CVMX_ILK_TXX_IDX_STAT0(interface),
                            ilk_txx_idx_stat0.u64);
            ilk_txx_mem_stat0.u64 = cvmx_read_csr
                                    (CVMX_ILK_TXX_MEM_STAT0(interface));

            /* get the number of tx bytes */
            ilk_txx_idx_stat1.u64 = 0;
            ilk_txx_idx_stat1.s.index = *pstats->chan_list;
            ilk_txx_idx_stat1.s.clr = pstats->clr_on_rd;
            cvmx_write_csr (CVMX_ILK_TXX_IDX_STAT1(interface),
                            ilk_txx_idx_stat1.u64);
            ilk_txx_mem_stat1.u64 = cvmx_read_csr
                                    (CVMX_ILK_TXX_MEM_STAT1(interface));

            cvmx_dprintf ("ILK%d Channel%d Tx: %d packets %d bytes\n", interface,
                    *pstats->chan_list, ilk_txx_mem_stat0.s.tx_pkt,
                    (unsigned int) ilk_txx_mem_stat1.s.tx_bytes);

            pstats++;
        }
        return;
    }

    /* continuous channels */
    ilk_rxx_idx_stat0.u64 = 0;
    ilk_rxx_idx_stat0.s.index = pstats->chan_start;
    ilk_rxx_idx_stat0.s.inc = pstats->chan_step;
    ilk_rxx_idx_stat0.s.clr = pstats->clr_on_rd;
    cvmx_write_csr (CVMX_ILK_RXX_IDX_STAT0(interface), ilk_rxx_idx_stat0.u64);

    ilk_rxx_idx_stat1.u64 = 0;
    ilk_rxx_idx_stat1.s.index = pstats->chan_start;
    ilk_rxx_idx_stat1.s.inc = pstats->chan_step;
    ilk_rxx_idx_stat1.s.clr = pstats->clr_on_rd;
    cvmx_write_csr (CVMX_ILK_RXX_IDX_STAT1(interface), ilk_rxx_idx_stat1.u64);

    ilk_txx_idx_stat0.u64 = 0;
    ilk_txx_idx_stat0.s.index = pstats->chan_start;
    ilk_txx_idx_stat0.s.inc = pstats->chan_step;
    ilk_txx_idx_stat0.s.clr = pstats->clr_on_rd;
    cvmx_write_csr (CVMX_ILK_TXX_IDX_STAT0(interface), ilk_txx_idx_stat0.u64);

    ilk_txx_idx_stat1.u64 = 0;
    ilk_txx_idx_stat1.s.index = pstats->chan_start;
    ilk_txx_idx_stat1.s.inc = pstats->chan_step;
    ilk_txx_idx_stat1.s.clr = pstats->clr_on_rd;
    cvmx_write_csr (CVMX_ILK_TXX_IDX_STAT1(interface), ilk_txx_idx_stat1.u64);

    for (i = pstats->chan_start; i <= pstats->chan_end; i += pstats->chan_step)
    {
        ilk_rxx_mem_stat0.u64 = cvmx_read_csr
                                (CVMX_ILK_RXX_MEM_STAT0(interface));
        ilk_rxx_mem_stat1.u64 = cvmx_read_csr
                                (CVMX_ILK_RXX_MEM_STAT1(interface));
        cvmx_dprintf ("ILK%d Channel%d Rx: %d packets %d bytes\n", interface, i,
                ilk_rxx_mem_stat0.s.rx_pkt,
                (unsigned int) ilk_rxx_mem_stat1.s.rx_bytes);

        ilk_txx_mem_stat0.u64 = cvmx_read_csr
                                (CVMX_ILK_TXX_MEM_STAT0(interface));
        ilk_txx_mem_stat1.u64 = cvmx_read_csr
                                (CVMX_ILK_TXX_MEM_STAT1(interface));
        cvmx_dprintf ("ILK%d Channel%d Tx: %d packets %d bytes\n", interface, i,
                ilk_rxx_mem_stat0.s.rx_pkt,
                (unsigned int) ilk_rxx_mem_stat1.s.rx_bytes);
    }

    return;
}
/**
 * Initialize and start the SPI interface.
 *
 * @param interface The identifier of the packet interface to configure and
 *                  use as a SPI interface.
 * @param mode      The operating mode for the SPI interface. The interface
 *                  can operate as a full duplex (both Tx and Rx data paths
 *                  active) or as a halfplex (either the Tx data path is
 *                  active or the Rx data path is active, but not both).
 * @param timeout   Timeout to wait for clock synchronization in seconds
 * @return Zero on success, negative of failure.
 */
int cvmx_spi_start_interface(int interface, cvmx_spi_mode_t mode, int timeout)
{
    uint64_t                     timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
    cvmx_spxx_trn4_ctl_t         spxx_trn4_ctl;
    cvmx_spxx_clk_stat_t         stat;
    cvmx_stxx_com_ctl_t          stxx_com_ctl;
    cvmx_srxx_com_ctl_t          srxx_com_ctl;
    cvmx_stxx_spi4_dat_t         stxx_spi4_dat;
    uint64_t                     count;
    cvmx_pko_reg_gmx_port_mode_t pko_mode;


    if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
        return -1;

    cvmx_dprintf ("SPI%d: mode %s, cal_len: %d, cal_rep: %d\n",
            interface, modes[mode], CAL_LEN, CAL_REP);

      // Configure for 16 ports (PKO -> GMX FIFO partition setting)
      // ----------------------------------------------------------
    pko_mode.u64 = cvmx_read_csr(CVMX_PKO_REG_GMX_PORT_MODE);
    if (interface == 0) 
    {
        pko_mode.s.mode0 = 0;
    }
    else 
    {
        pko_mode.s.mode1 = 0;
    }
    cvmx_write_csr(CVMX_PKO_REG_GMX_PORT_MODE, pko_mode.u64);

      // Configure GMX
      // -------------------------------------------------
    cvmx_write_csr (CVMX_GMXX_TX_PRTS(interface), 0xA);
      // PRTS           [ 4: 0] ( 5b) = 10


      // Bringing up Spi4 Interface
      // -------------------------------------------------

      // Reset the Spi4 deskew logic
      // -------------------------------------------------
    cvmx_write_csr (CVMX_SPXX_DBG_DESKEW_CTL(interface), 0x00200000);
      // DLLDIS         [ 0: 0] ( 1b) = 0
      // DLLFRC         [ 1: 1] ( 1b) = 0
      // OFFDLY         [ 7: 2] ( 6b) = 0
      // BITSEL         [12: 8] ( 5b) = 0
      // OFFSET         [17:13] ( 5b) = 0
      // MUX            [18:18] ( 1b) = 0
      // INC            [19:19] ( 1b) = 0
      // DEC            [20:20] ( 1b) = 0
      // CLRDLY         [21:21] ( 1b) = 1    // Forces a reset
    cvmx_wait (100 * MS);

      // Setup the CLKDLY right in the middle
      // -------------------------------------------------
    cvmx_write_csr (CVMX_SPXX_CLK_CTL(interface), 0x00000830);
      // SRXDLCK        [ 0: 0] ( 1b) = 0
      // RCVTRN         [ 1: 1] ( 1b) = 0
      // DRPTRN         [ 2: 2] ( 1b) = 0
      // SNDTRN         [ 3: 3] ( 1b) = 0
      // STATRCV        [ 4: 4] ( 1b) = 1    // Enable status channel Rx
      // STATDRV        [ 5: 5] ( 1b) = 1    // Enable status channel Tx
      // RUNBIST        [ 6: 6] ( 1b) = 0
      // CLKDLY         [11: 7] ( 5b) = 10   // 16 is the middle of the range
      // SRXLCK         [12:12] ( 1b) = 0
      // STXLCK         [13:13] ( 1b) = 0
      // SEETRN         [14:14] ( 1b) = 0
    cvmx_wait (100 * MS);

      // Reset SRX0 DLL
      // -------------------------------------------------
    cvmx_write_csr (CVMX_SPXX_CLK_CTL(interface), 0x00000831);
      // SRXDLCK        [ 0: 0] ( 1b) = 1    // Restart the DLL
      // RCVTRN         [ 1: 1] ( 1b) = 0
      // DRPTRN         [ 2: 2] ( 1b) = 0
      // SNDTRN         [ 3: 3] ( 1b) = 0
      // STATRCV        [ 4: 4] ( 1b) = 1
      // STATDRV        [ 5: 5] ( 1b) = 1
      // RUNBIST        [ 6: 6] ( 1b) = 0
      // CLKDLY         [11: 7] ( 5b) = 10
      // SRXLCK         [12:12] ( 1b) = 0
      // STXLCK         [13:13] ( 1b) = 0
      // SEETRN         [14:14] ( 1b) = 0

      // Waiting for Inf0 Spi4 RX DLL to lock
      // -------------------------------------------------
    cvmx_wait (100 * MS);

      // Enable dynamic alignment
      // -------------------------------------------------
    spxx_trn4_ctl.u64 = 0;
    spxx_trn4_ctl.s.mux_en   = 1;
    spxx_trn4_ctl.s.macro_en = 1;
    spxx_trn4_ctl.s.maxdist  = 16;
    spxx_trn4_ctl.s.jitter   = 1;
    cvmx_write_csr (CVMX_SPXX_TRN4_CTL(interface), spxx_trn4_ctl.u64);
      // MUX_EN         [ 0: 0] ( 1b) = 1
      // MACRO_EN       [ 1: 1] ( 1b) = 1
      // MAXDIST        [ 6: 2] ( 5b) = 16
      // SET_BOOT       [ 7: 7] ( 1b) = 0
      // CLR_BOOT       [ 8: 8] ( 1b) = 1
      // JITTER         [11: 9] ( 3b) = 1
      // TRNTEST        [12:12] ( 1b) = 0
    cvmx_write_csr (CVMX_SPXX_DBG_DESKEW_CTL(interface), 0x0);
      // DLLDIS         [ 0: 0] ( 1b) = 0
      // DLLFRC         [ 1: 1] ( 1b) = 0
      // OFFDLY         [ 7: 2] ( 6b) = 0
      // BITSEL         [12: 8] ( 5b) = 0
      // OFFSET         [17:13] ( 5b) = 0
      // MUX            [18:18] ( 1b) = 0
      // INC            [19:19] ( 1b) = 0
      // DEC            [20:20] ( 1b) = 0
      // CLRDLY         [21:21] ( 1b) = 0

    if (mode & CVMX_SPI_MODE_RX_HALFPLEX) {
          // SRX0 Ports
          // -------------------------------------------------
        cvmx_write_csr (CVMX_SRXX_COM_CTL(interface), 0x00000090);
          // INF_EN         [ 0: 0] ( 1b) = 0
          // ST_EN          [ 3: 3] ( 1b) = 0
          // PRTS           [ 9: 4] ( 6b) = 9

          // SRX0 Calendar Table
          // -------------------------------------------------
        cvmx_write_csr (CVMX_SRXX_SPI4_CALX(0, interface), 0x00013210);
          // PRT0           [ 4: 0] ( 5b) = 0
          // PRT1           [ 9: 5] ( 5b) = 1
          // PRT2           [14:10] ( 5b) = 2
          // PRT3           [19:15] ( 5b) = 3
          // ODDPAR         [20:20] ( 1b) = 1
        cvmx_write_csr (CVMX_SRXX_SPI4_CALX(1, interface), 0x00017654);
          // PRT0           [ 4: 0] ( 5b) = 4
          // PRT1           [ 9: 5] ( 5b) = 5
          // PRT2           [14:10] ( 5b) = 6
          // PRT3           [19:15] ( 5b) = 7
          // ODDPAR         [20:20] ( 1b) = 1
        cvmx_write_csr (CVMX_SRXX_SPI4_CALX(2, interface), 0x00000098);
          // PRT0           [ 4: 0] ( 5b) = 8
          // PRT1           [ 9: 5] ( 5b) = 9
          // PRT2           [14:10] ( 5b) = 0
          // PRT3           [19:15] ( 5b) = 0
          // ODDPAR         [20:20] ( 1b) = 0
        cvmx_write_csr (CVMX_SRXX_SPI4_STAT(interface), (CAL_REP << 8) | CAL_LEN);
          // LEN            [ 7: 0] ( 8b) = a
          // M              [15: 8] ( 8b) = 1
    }

    if (mode & CVMX_SPI_MODE_TX_HALFPLEX) {
          // STX0 Config
          // -------------------------------------------------
        cvmx_write_csr (CVMX_STXX_ARB_CTL(interface), 0x0);
          // IGNTPA         [ 3: 3] ( 1b) = 0
          // MINTRN         [ 5: 5] ( 1b) = 0
        cvmx_write_csr (CVMX_GMXX_TX_SPI_MAX(interface), 0x0408);
          // MAX2           [15: 8] ( 8b) = 4
          // MAX1           [ 7: 0] ( 8b) = 8
        cvmx_write_csr (CVMX_GMXX_TX_SPI_THRESH(interface), 0x4);
          // THRESH         [ 5: 0] ( 6b) = 4
        cvmx_write_csr (CVMX_GMXX_TX_SPI_CTL(interface), 0x0);
          // ENFORCE        [ 2: 2] ( 1b) = 0
          // TPA_CLR        [ 1: 1] ( 1b) = 0
          // CONT_PKT       [ 0: 0] ( 1b) = 0

          // STX0 Training Control
          // -------------------------------------------------
        stxx_spi4_dat.u64 = 0;
	stxx_spi4_dat.s.alpha = 32;    /*Minimum needed by dynamic alignment*/
	stxx_spi4_dat.s.max_t = 0xFFFF;  /*Minimum interval is 0x20*/
        cvmx_write_csr (CVMX_STXX_SPI4_DAT(interface), stxx_spi4_dat.u64);
          // MAX_T          [15: 0] (16b) = 0
          // ALPHA          [31:16] (16b) = 0

          // STX0 Calendar Table
          // -------------------------------------------------
        cvmx_write_csr (CVMX_STXX_SPI4_CALX(0, interface), 0x00013210);
          // PRT0           [ 4: 0] ( 5b) = 0
          // PRT1           [ 9: 5] ( 5b) = 1
          // PRT2           [14:10] ( 5b) = 2
          // PRT3           [19:15] ( 5b) = 3
          // ODDPAR         [20:20] ( 1b) = 1
        cvmx_write_csr (CVMX_STXX_SPI4_CALX(1, interface), 0x00017654);
          // PRT0           [ 4: 0] ( 5b) = 4
          // PRT1           [ 9: 5] ( 5b) = 5
          // PRT2           [14:10] ( 5b) = 6
          // PRT3           [19:15] ( 5b) = 7
          // ODDPAR         [20:20] ( 1b) = 1
        cvmx_write_csr (CVMX_STXX_SPI4_CALX(2, interface), 0x00000098);
          // PRT0           [ 4: 0] ( 5b) = 8
          // PRT1           [ 9: 5] ( 5b) = 9
          // PRT2           [14:10] ( 5b) = 0
          // PRT3           [19:15] ( 5b) = 0
          // ODDPAR         [20:20] ( 1b) = 0
        cvmx_write_csr (CVMX_STXX_SPI4_STAT(interface), (CAL_REP << 8) | CAL_LEN);
          // LEN            [ 7: 0] ( 8b) = a
          // M              [15: 8] ( 8b) = 1
    }

      /* Regardless of operating mode, both Tx and Rx clocks must be present
       * for the SPI interface to operate.
       */
    cvmx_dprintf ("SPI%d: Waiting to see TsClk...\n", interface);
    count = 0;
    timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
    do {  /* Do we see the TsClk transitioning? */
        stat.u64 = cvmx_read_csr (CVMX_SPXX_CLK_STAT(interface));
        count = count + 1;
#ifdef DEBUG
        if ((count % 5000000) == 10) {
	    cvmx_dprintf ("SPI%d: CLK_STAT 0x%016llX\n"
                    "  s4 (%d,%d) d4 (%d,%d)\n",
                    interface, (unsigned long long)stat.u64,
                    stat.s.s4clk0, stat.s.s4clk1,
                    stat.s.d4clk0, stat.s.d4clk1);
        }
#endif
        if (cvmx_get_cycle() > timeout_time)
        {
            cvmx_dprintf ("SPI%d: Timeout\n", interface);
            return -1;
        }
    } while (stat.s.s4clk0 == 0 || stat.s.s4clk1 == 0);

    cvmx_dprintf ("SPI%d: Waiting to see RsClk...\n", interface);
    timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
    do {  /* Do we see the RsClk transitioning? */
        stat.u64 = cvmx_read_csr (CVMX_SPXX_CLK_STAT(interface));
        if (cvmx_get_cycle() > timeout_time)
        {
            cvmx_dprintf ("SPI%d: Timeout\n", interface);
            return -1;
        }
    } while (stat.s.d4clk0 == 0 || stat.s.d4clk1 == 0);


      // SRX0 & STX0 Inf0 Links are configured - begin training
      // -------------------------------------------------
    cvmx_write_csr (CVMX_SPXX_CLK_CTL(interface), 0x0000083f);
      // SRXDLCK        [ 0: 0] ( 1b) = 1
      // RCVTRN         [ 1: 1] ( 1b) = 1
      // DRPTRN         [ 2: 2] ( 1b) = 1    ...was 0
      // SNDTRN         [ 3: 3] ( 1b) = 1
      // STATRCV        [ 4: 4] ( 1b) = 1
      // STATDRV        [ 5: 5] ( 1b) = 1
      // RUNBIST        [ 6: 6] ( 1b) = 0
      // CLKDLY         [11: 7] ( 5b) = 10
      // SRXLCK         [12:12] ( 1b) = 0
      // STXLCK         [13:13] ( 1b) = 0
      // SEETRN         [14:14] ( 1b) = 0
    cvmx_wait (1000 * MS);

      // SRX0 clear the boot bit
      // -------------------------------------------------
    spxx_trn4_ctl.s.clr_boot = 1;
    cvmx_write_csr (CVMX_SPXX_TRN4_CTL(interface), spxx_trn4_ctl.u64);

      // Wait for the training sequence to complete
      // -------------------------------------------------
      // SPX0_CLK_STAT - SPX0_CLK_STAT[SRXTRN] should be 1 (bit8)
    cvmx_dprintf ("SPI%d: Waiting for training\n", interface);
    cvmx_wait (1000 * MS);
    timeout_time = cvmx_get_cycle() + 1000ull * MS * 600;  /* Wait a really long time here */
    do {
        stat.u64 = cvmx_read_csr (CVMX_SPXX_CLK_STAT(interface));
        if (cvmx_get_cycle() > timeout_time)
        {
            cvmx_dprintf ("SPI%d: Timeout\n", interface);
            return -1;
        }
    } while (stat.s.srxtrn == 0);

    if (mode & CVMX_SPI_MODE_RX_HALFPLEX) {
          // SRX0 interface should be good, send calendar data
          // -------------------------------------------------
        cvmx_dprintf ("SPI%d: Rx is synchronized, start sending calendar data\n", interface);
        srxx_com_ctl.u64 = 0;
        srxx_com_ctl.s.prts   = 9;
        srxx_com_ctl.s.inf_en = 1;
        srxx_com_ctl.s.st_en  = 1;
        cvmx_write_csr (CVMX_SRXX_COM_CTL(interface), srxx_com_ctl.u64);
    }

    if (mode & CVMX_SPI_MODE_TX_HALFPLEX) {
          // STX0 has achieved sync
          // The corespondant board should be sending calendar data
          // Enable the STX0 STAT receiver.
          // -------------------------------------------------
        stxx_com_ctl.u64 = 0;
        stxx_com_ctl.s.inf_en = 1;
        stxx_com_ctl.s.st_en = 1;
        cvmx_write_csr (CVMX_STXX_COM_CTL(interface), stxx_com_ctl.u64);

          // Waiting for calendar sync on STX0 STAT
          // -------------------------------------------------
          // SPX0_CLK_STAT - SPX0_CLK_STAT[STXCAL] should be 1 (bit10)
        cvmx_dprintf ("SPI%d: Waiting to sync on STX[%d] STAT\n", interface, interface);
        timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
        do {
            stat.u64 = cvmx_read_csr (CVMX_SPXX_CLK_STAT (interface));
            if (cvmx_get_cycle() > timeout_time)
            {
                cvmx_dprintf ("SPI%d: Timeout\n", interface);
                return -1;
            }
        } while (stat.s.stxcal == 0);
    }

      // Inf0 is synched
      // -------------------------------------------------
      // SPX0 is up
    if (mode & CVMX_SPI_MODE_RX_HALFPLEX) {
        srxx_com_ctl.s.inf_en = 1;
        cvmx_write_csr (CVMX_SRXX_COM_CTL(interface), srxx_com_ctl.u64);
        cvmx_dprintf ("SPI%d: Rx is now up\n", interface);
    }

    if (mode & CVMX_SPI_MODE_TX_HALFPLEX) {
        stxx_com_ctl.s.inf_en = 1;
        cvmx_write_csr (CVMX_STXX_COM_CTL(interface), stxx_com_ctl.u64);
        cvmx_dprintf ("SPI%d: Tx is now up\n", interface);
    }

    cvmx_write_csr (CVMX_GMXX_RXX_FRM_MIN (0,interface), 40);
    cvmx_write_csr (CVMX_GMXX_RXX_FRM_MAX (0,interface), 64*1024 - 4);
    cvmx_write_csr (CVMX_GMXX_RXX_JABBER  (0,interface), 64*1024 - 4);

    return 0;
}
Beispiel #23
0
/**
 * configure calendar for tx
 *
 * @param interface The identifier of the packet interface to configure and
 *                  use as a ILK interface. cn68xx has 2 interfaces: ilk0 and
 *                  ilk1.
 *
 * @param cal_depth the number of calendar entries
 * @param pent      pointer to calendar entries
 *
 * @return Zero on success, negative of failure.
 */
static int cvmx_ilk_tx_cal_conf (int interface, int cal_depth, 
                          cvmx_ilk_cal_entry_t *pent)
{
    int res = -1, num_grp, num_rest, i, j;
    cvmx_ilk_txx_cfg0_t ilk_txx_cfg0;
    cvmx_ilk_txx_idx_cal_t ilk_txx_idx_cal;
    cvmx_ilk_txx_mem_cal0_t ilk_txx_mem_cal0;
    cvmx_ilk_txx_mem_cal1_t ilk_txx_mem_cal1;
    unsigned long int tmp;
    cvmx_ilk_cal_entry_t *ent_tmp;

    if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
        return res;

    if (interface >= CVMX_NUM_ILK_INTF)
        return res;

    if (cal_depth < CVMX_ILK_TX_MIN_CAL || cal_depth > CVMX_ILK_MAX_CAL
        || pent == NULL)
        return res;

    /* mandatory link-level fc as workarounds for ILK-15397 and ILK-15479 */
    /* TODO: test effectiveness */
#if 0
    if (OCTEON_IS_MODEL(OCTEON_CN68XX_PASS1_0) && pent->ent_ctrl == PIPE_BPID)
        for (i = 0; i < cal_depth; i++)
            pent->ent_ctrl = LINK;
#endif

    /* tx calendar depth must be a multiple of 8 */
    num_grp = (cal_depth - 1) / CVMX_ILK_CAL_GRP_SZ + 1;
    num_rest = cal_depth % CVMX_ILK_CAL_GRP_SZ;
    if (num_rest != 0)
    {
        ent_tmp = pent + cal_depth;
        for (i = num_rest; i < 8; i++, ent_tmp++)
        {
            ent_tmp->pipe_bpid = 0;
            ent_tmp->ent_ctrl = XOFF;
        }
    }
    cal_depth = num_grp * 8;

    /* set the depth */
    ilk_txx_cfg0.u64 = cvmx_read_csr (CVMX_ILK_TXX_CFG0(interface));
    ilk_txx_cfg0.s.cal_depth = cal_depth;
    cvmx_write_csr (CVMX_ILK_TXX_CFG0(interface), ilk_txx_cfg0.u64); 

    /* set the calendar index */
    ilk_txx_idx_cal.u64 = 0;
    ilk_txx_idx_cal.s.inc = 1;
    cvmx_write_csr (CVMX_ILK_TXX_IDX_CAL(interface), ilk_txx_idx_cal.u64); 

    /* set the calendar entries. each group has both cal0 and cal1 registers */
    for (i = 0; i < num_grp; i++)
    {
        ilk_txx_mem_cal0.u64 = 0;
        for (j = 0; j < CVMX_ILK_CAL_GRP_SZ/2; j++)
        {
            tmp = 0;
            tmp = pent->pipe_bpid & ~(~tmp << CVMX_ILK_PIPE_BPID_SZ);
            tmp <<= (CVMX_ILK_PIPE_BPID_SZ + CVMX_ILK_ENT_CTRL_SZ) * j;
            ilk_txx_mem_cal0.u64 |= tmp;

            tmp = 0;
            tmp = pent->ent_ctrl & ~(~tmp << CVMX_ILK_ENT_CTRL_SZ);
            tmp <<= (CVMX_ILK_PIPE_BPID_SZ + CVMX_ILK_ENT_CTRL_SZ) * j +
                    CVMX_ILK_PIPE_BPID_SZ;
            ilk_txx_mem_cal0.u64 |= tmp;
            pent++;
        }
        cvmx_write_csr(CVMX_ILK_TXX_MEM_CAL0(interface), ilk_txx_mem_cal0.u64);

        ilk_txx_mem_cal1.u64 = 0;
        for (j = 0; j < CVMX_ILK_CAL_GRP_SZ/2; j++)
        {
            tmp = 0;
            tmp = pent->pipe_bpid & ~(~tmp << CVMX_ILK_PIPE_BPID_SZ);
            tmp <<= (CVMX_ILK_PIPE_BPID_SZ + CVMX_ILK_ENT_CTRL_SZ) * j;
            ilk_txx_mem_cal1.u64 |= tmp;

            tmp = 0;
            tmp = pent->ent_ctrl & ~(~tmp << CVMX_ILK_ENT_CTRL_SZ);
            tmp <<= (CVMX_ILK_PIPE_BPID_SZ + CVMX_ILK_ENT_CTRL_SZ) * j +
                    CVMX_ILK_PIPE_BPID_SZ;
            ilk_txx_mem_cal1.u64 |= tmp;
            pent++;
        }
        cvmx_write_csr(CVMX_ILK_TXX_MEM_CAL1(interface), ilk_txx_mem_cal1.u64);
    }
    cvmx_read_csr (CVMX_ILK_TXX_MEM_CAL1(interface));

    return 0;
}
Beispiel #24
0
int early_board_init(void)
{
	const int cpu_ref = 33;

	/* NOTE: this is early in the init process, so the serial port is not
	 * yet configured
	 */

	/* Populate global data from eeprom */
	uint8_t ee_buf[OCTEON_EEPROM_MAX_TUPLE_LENGTH];
	int addr;

	addr = octeon_tlv_get_tuple_addr(CONFIG_SYS_DEF_EEPROM_ADDR,
					 EEPROM_CLOCK_DESC_TYPE, 0, ee_buf,
					 OCTEON_EEPROM_MAX_TUPLE_LENGTH);
	if (addr >= 0) {
		memcpy((void *)&(gd->arch.clock_desc), ee_buf,
		       sizeof(octeon_eeprom_clock_desc_t));
	}

	octeon_board_get_descriptor(CVMX_BOARD_TYPE_CUST_WSX16, 1, 0);

	if (OCTEON_IS_MODEL(OCTEON_CN58XX))
		gd->mem_clk = 333;
	else
		gd->mem_clk = 266;

	/*
	 * For the cn58xx the DDR reference clock frequency is used to
	 * configure the appropriate internal DDR_CK/DDR2_REF_CLK ratio in
	 * order to generate the ddr clock frequency specified by
	 * ddr_clock_mhz.  For cn38xx this setting should match the state
	 * of the DDR2 output clock divide by 2 pin DDR2_PLL_DIV2.
	 *
	 * For DDR@_PLL_DIV2 = 0 the DDR_CK/DDR2_REF_CLK ratio is 4
	 * For DDR@_PLL_DIV2 = 1 the DDR_CK/DDR2_REF_CLK ratio is 2
	 */
#define FIXED_DDR_REF_CLOCK_RATIO	4

	/*
	 * More details about DDR clock configuration used for LMC
	 * configuration for the CN58XX.  Not used for CN38XX.  Since the
	 * reference clock frequency is not known it is inferred from the
	 * specified DCLK frequency divided by the DDR_CK/DDR2_REF_CLK
	 * ratio.
	 */
	if (OCTEON_IS_MODEL(OCTEON_CN38XX))
		gd->arch.ddr_ref_hertz =
			gd->mem_clk * 1000000 / FIXED_DDR_REF_CLOCK_RATIO;
	else
		gd->arch.ddr_ref_hertz = gd->mem_clk * 1000000 / 2;

	octeon_board_get_mac_addr();

	/* Read CPU clock multiplier */
	uint64_t data = cvmx_read_csr(CVMX_DBG_DATA);
	data = data >> 18;
	data &= 0x1f;

	gd->cpu_clk = data * cpu_ref;

	/* adjust for 33.33 Mhz clock */
	if (cpu_ref == 33)
		gd->cpu_clk += data / 4 + data / 8;

	if (gd->cpu_clk < 100 || gd->cpu_clk > 600)
		gd->cpu_clk = DEFAULT_ECLK_FREQ_MHZ;
	gd->cpu_clk *= 1000000;

	return 0;
}
Beispiel #25
0
static int cvm_oct_probe(struct platform_device *pdev)
{
	int num_interfaces;
	int interface;
	int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE;
	int qos;
	struct device_node *pip;
	int mtu_overhead = ETH_HLEN + ETH_FCS_LEN;

#if IS_ENABLED(CONFIG_VLAN_8021Q)
	mtu_overhead += VLAN_HLEN;
#endif

	octeon_mdiobus_force_mod_depencency();

	pip = pdev->dev.of_node;
	if (!pip) {
		pr_err("Error: No 'pip' in /aliases\n");
		return -EINVAL;
	}

	cvm_oct_configure_common_hw();

	cvmx_helper_initialize_packet_io_global();

	if (receive_group_order) {
		if (receive_group_order > 4)
			receive_group_order = 4;
		pow_receive_groups = (1 << (1 << receive_group_order)) - 1;
	} else {
		pow_receive_groups = BIT(pow_receive_group);
	}

	/* Change the input group for all ports before input is enabled */
	num_interfaces = cvmx_helper_get_number_of_interfaces();
	for (interface = 0; interface < num_interfaces; interface++) {
		int num_ports = cvmx_helper_ports_on_interface(interface);
		int port;

		for (port = cvmx_helper_get_ipd_port(interface, 0);
		     port < cvmx_helper_get_ipd_port(interface, num_ports);
		     port++) {
			union cvmx_pip_prt_tagx pip_prt_tagx;

			pip_prt_tagx.u64 =
			    cvmx_read_csr(CVMX_PIP_PRT_TAGX(port));

			if (receive_group_order) {
				int tag_mask;

				/* We support only 16 groups at the moment, so
				 * always disable the two additional "hidden"
				 * tag_mask bits on CN68XX.
				 */
				if (OCTEON_IS_MODEL(OCTEON_CN68XX))
					pip_prt_tagx.u64 |= 0x3ull << 44;

				tag_mask = ~((1 << receive_group_order) - 1);
				pip_prt_tagx.s.grptagbase	= 0;
				pip_prt_tagx.s.grptagmask	= tag_mask;
				pip_prt_tagx.s.grptag		= 1;
				pip_prt_tagx.s.tag_mode		= 0;
				pip_prt_tagx.s.inc_prt_flag	= 1;
				pip_prt_tagx.s.ip6_dprt_flag	= 1;
				pip_prt_tagx.s.ip4_dprt_flag	= 1;
				pip_prt_tagx.s.ip6_sprt_flag	= 1;
				pip_prt_tagx.s.ip4_sprt_flag	= 1;
				pip_prt_tagx.s.ip6_dst_flag	= 1;
				pip_prt_tagx.s.ip4_dst_flag	= 1;
				pip_prt_tagx.s.ip6_src_flag	= 1;
				pip_prt_tagx.s.ip4_src_flag	= 1;
				pip_prt_tagx.s.grp		= 0;
			} else {
				pip_prt_tagx.s.grptag	= 0;
				pip_prt_tagx.s.grp	= pow_receive_group;
			}

			cvmx_write_csr(CVMX_PIP_PRT_TAGX(port),
				       pip_prt_tagx.u64);
		}
	}

	cvmx_helper_ipd_and_packet_input_enable();

	memset(cvm_oct_device, 0, sizeof(cvm_oct_device));

	/*
	 * Initialize the FAU used for counting packet buffers that
	 * need to be freed.
	 */
	cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);

	/* Initialize the FAU used for counting tx SKBs that need to be freed */
	cvmx_fau_atomic_write32(FAU_TOTAL_TX_TO_CLEAN, 0);

	if ((pow_send_group != -1)) {
		struct net_device *dev;

		dev = alloc_etherdev(sizeof(struct octeon_ethernet));
		if (dev) {
			/* Initialize the device private structure. */
			struct octeon_ethernet *priv = netdev_priv(dev);

			SET_NETDEV_DEV(dev, &pdev->dev);
			dev->netdev_ops = &cvm_oct_pow_netdev_ops;
			priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED;
			priv->port = CVMX_PIP_NUM_INPUT_PORTS;
			priv->queue = -1;
			strcpy(dev->name, "pow%d");
			for (qos = 0; qos < 16; qos++)
				skb_queue_head_init(&priv->tx_free_list[qos]);
			dev->min_mtu = VLAN_ETH_ZLEN - mtu_overhead;
			dev->max_mtu = OCTEON_MAX_MTU - mtu_overhead;

			if (register_netdev(dev) < 0) {
				pr_err("Failed to register ethernet device for POW\n");
				free_netdev(dev);
			} else {
				cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev;
				pr_info("%s: POW send group %d, receive group %d\n",
					dev->name, pow_send_group,
					pow_receive_group);
			}
		} else {
			pr_err("Failed to allocate ethernet device for POW\n");
		}
	}

	num_interfaces = cvmx_helper_get_number_of_interfaces();
	for (interface = 0; interface < num_interfaces; interface++) {
		cvmx_helper_interface_mode_t imode =
		    cvmx_helper_interface_get_mode(interface);
		int num_ports = cvmx_helper_ports_on_interface(interface);
		int port;
		int port_index;

		for (port_index = 0,
		     port = cvmx_helper_get_ipd_port(interface, 0);
		     port < cvmx_helper_get_ipd_port(interface, num_ports);
		     port_index++, port++) {
			struct octeon_ethernet *priv;
			struct net_device *dev =
			    alloc_etherdev(sizeof(struct octeon_ethernet));
			if (!dev) {
				pr_err("Failed to allocate ethernet device for port %d\n",
				       port);
				continue;
			}

			/* Initialize the device private structure. */
			SET_NETDEV_DEV(dev, &pdev->dev);
			priv = netdev_priv(dev);
			priv->netdev = dev;
			priv->of_node = cvm_oct_node_for_port(pip, interface,
							      port_index);

			INIT_DELAYED_WORK(&priv->port_periodic_work,
					  cvm_oct_periodic_worker);
			priv->imode = imode;
			priv->port = port;
			priv->queue = cvmx_pko_get_base_queue(priv->port);
			priv->fau = fau - cvmx_pko_get_num_queues(port) * 4;
			for (qos = 0; qos < 16; qos++)
				skb_queue_head_init(&priv->tx_free_list[qos]);
			for (qos = 0; qos < cvmx_pko_get_num_queues(port);
			     qos++)
				cvmx_fau_atomic_write32(priv->fau + qos * 4, 0);
			dev->min_mtu = VLAN_ETH_ZLEN - mtu_overhead;
			dev->max_mtu = OCTEON_MAX_MTU - mtu_overhead;

			switch (priv->imode) {
			/* These types don't support ports to IPD/PKO */
			case CVMX_HELPER_INTERFACE_MODE_DISABLED:
			case CVMX_HELPER_INTERFACE_MODE_PCIE:
			case CVMX_HELPER_INTERFACE_MODE_PICMG:
				break;

			case CVMX_HELPER_INTERFACE_MODE_NPI:
				dev->netdev_ops = &cvm_oct_npi_netdev_ops;
				strcpy(dev->name, "npi%d");
				break;

			case CVMX_HELPER_INTERFACE_MODE_XAUI:
				dev->netdev_ops = &cvm_oct_xaui_netdev_ops;
				strcpy(dev->name, "xaui%d");
				break;

			case CVMX_HELPER_INTERFACE_MODE_LOOP:
				dev->netdev_ops = &cvm_oct_npi_netdev_ops;
				strcpy(dev->name, "loop%d");
				break;

			case CVMX_HELPER_INTERFACE_MODE_SGMII:
				dev->netdev_ops = &cvm_oct_sgmii_netdev_ops;
				strcpy(dev->name, "eth%d");
				break;

			case CVMX_HELPER_INTERFACE_MODE_SPI:
				dev->netdev_ops = &cvm_oct_spi_netdev_ops;
				strcpy(dev->name, "spi%d");
				break;

			case CVMX_HELPER_INTERFACE_MODE_RGMII:
			case CVMX_HELPER_INTERFACE_MODE_GMII:
				dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
				strcpy(dev->name, "eth%d");
				cvm_set_rgmii_delay(priv->of_node, interface,
						    port_index);
				break;
			}

			if (!dev->netdev_ops) {
				free_netdev(dev);
			} else if (register_netdev(dev) < 0) {
				pr_err("Failed to register ethernet device for interface %d, port %d\n",
				       interface, priv->port);
				free_netdev(dev);
			} else {
				cvm_oct_device[priv->port] = dev;
				fau -=
				    cvmx_pko_get_num_queues(priv->port) *
				    sizeof(u32);
				schedule_delayed_work(&priv->port_periodic_work,
						      HZ);
			}
		}
	}

	cvm_oct_tx_initialize();
	cvm_oct_rx_initialize();

	/*
	 * 150 uS: about 10 1500-byte packets at 1GE.
	 */
	cvm_oct_tx_poll_interval = 150 * (octeon_get_clock_rate() / 1000000);

	schedule_delayed_work(&cvm_oct_rx_refill_work, HZ);

	return 0;
}
Beispiel #26
0
/**
 * Measure the reference clock of a QLM
 *
 * @param qlm    QLM to measure
 *
 * @return Clock rate in Hz
 *       */
int cvmx_qlm_measure_clock(int qlm)
{
	cvmx_mio_ptp_clock_cfg_t ptp_clock;
	uint64_t count;
	uint64_t start_cycle, stop_cycle;
#ifdef CVMX_BUILD_FOR_UBOOT
	int ref_clock[16] = {0};
#else
	static int ref_clock[16] = {0};
#endif

	if (ref_clock[qlm])
		return ref_clock[qlm];

	if (OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))
		return -1;

	/* Force the reference to 156.25Mhz when running in simulation.
	   This supports the most speeds */
#ifdef CVMX_BUILD_FOR_UBOOT
	if (gd->arch.board_desc.board_type == CVMX_BOARD_TYPE_SIM)
		return 156250000;
#elif !defined(CVMX_BUILD_FOR_LINUX_HOST)
	if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM)
		return 156250000;
#endif
	/* Disable the PTP event counter while we configure it */
	ptp_clock.u64 = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_CFG);	/* For CN63XXp1 errata */
	ptp_clock.s.evcnt_en = 0;
	cvmx_write_csr(CVMX_MIO_PTP_CLOCK_CFG, ptp_clock.u64);
	/* Count on rising edge, Choose which QLM to count */
	ptp_clock.u64 = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_CFG);	/* For CN63XXp1 errata */
	ptp_clock.s.evcnt_edge = 0;
	ptp_clock.s.evcnt_in = 0x10 + qlm;
	cvmx_write_csr(CVMX_MIO_PTP_CLOCK_CFG, ptp_clock.u64);
	/* Clear MIO_PTP_EVT_CNT */
	cvmx_read_csr(CVMX_MIO_PTP_EVT_CNT);	/* For CN63XXp1 errata */
	count = cvmx_read_csr(CVMX_MIO_PTP_EVT_CNT);
	cvmx_write_csr(CVMX_MIO_PTP_EVT_CNT, -count);
	/* Set MIO_PTP_EVT_CNT to 1 billion */
	cvmx_write_csr(CVMX_MIO_PTP_EVT_CNT, 1000000000);
	/* Enable the PTP event counter */
	ptp_clock.u64 = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_CFG);	/* For CN63XXp1 errata */
	ptp_clock.s.evcnt_en = 1;
	cvmx_write_csr(CVMX_MIO_PTP_CLOCK_CFG, ptp_clock.u64);
	start_cycle = cvmx_clock_get_count(CVMX_CLOCK_CORE);
	/* Wait for 50ms */
	cvmx_wait_usec(50000);
	/* Read the counter */
	cvmx_read_csr(CVMX_MIO_PTP_EVT_CNT);	/* For CN63XXp1 errata */
	count = cvmx_read_csr(CVMX_MIO_PTP_EVT_CNT);
	stop_cycle = cvmx_clock_get_count(CVMX_CLOCK_CORE);
	/* Disable the PTP event counter */
	ptp_clock.u64 = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_CFG);	/* For CN63XXp1 errata */
	ptp_clock.s.evcnt_en = 0;
	cvmx_write_csr(CVMX_MIO_PTP_CLOCK_CFG, ptp_clock.u64);
	/* Clock counted down, so reverse it */
	count = 1000000000 - count;
	/* Return the rate */
	ref_clock[qlm] = count * cvmx_clock_get_rate(CVMX_CLOCK_CORE) / (stop_cycle - start_cycle);
	return ref_clock[qlm];
}
Beispiel #27
0
int board_fixup_fdt(void)
{
	char fdt_key[16];
	int qlm;
	char env_var[16];
	const char *mode;
	int node;

	/* First patch the PHY configuration based on board revision */
	octeon_fdt_patch_rename((void *)(gd->fdt_blob),
				gd->arch.board_desc.rev_major == 1
				? "0,rev1" : "0,notrev1",
				"cavium,board-trim", false, NULL, NULL);

	/* Next patch CPU revision chages */
	for (node = 0; node < CVMX_MAX_NODES; node++) {
		if (!(gd->arch.node_mask & (1 << node)))
			continue;
		if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_0))
			sprintf(fdt_key, "%d,rev1.0", node);
		else
			sprintf(fdt_key, "%d,notrev1.0", node);
		octeon_fdt_patch((void *)(gd->fdt_blob), fdt_key, "cavium,cpu-trim");
	}

	for (qlm = 0; qlm < 8; qlm++) {
		sprintf(env_var, "qlm%d:%d_mode", qlm, 0);
		mode = getenv(env_var);
		sprintf(fdt_key, "%d,none", qlm);
		if (!mode) {
			sprintf(fdt_key, "%d,none", qlm);
		} else if (!strncmp(mode, "sgmii", 5)) {
			sprintf(fdt_key, "%d,sgmii", qlm);
		} else if (!strncmp(mode, "xaui", 4)) {
			sprintf(fdt_key, "%d,xaui", qlm);
		} else if (!strncmp(mode, "dxaui", 5)) {
			sprintf(fdt_key, "%d,dxaui", qlm);
		} else if (!strcmp(mode, "ilk")) {
			if (qlm < 4)
				printf("Error: ILK not supported on QLM %d\n",
				       qlm);
			else
				sprintf(fdt_key, "%d,ilk", qlm);
		} else if (!strncmp(mode, "rxaui", 5)) {
			sprintf(fdt_key, "%d,rxaui", qlm);
		} else if (!strncmp(mode, "xlaui", 5)) {
			if (qlm < 4)
				printf("Error: XLAUI not supported on QLM %d\n",
				       qlm);
			else
				sprintf(fdt_key, "%d,xlaui", qlm);
		} else if (!strncmp(mode, "xfi", 3)) {
			if (qlm < 4)
				printf("Error: XFI not supported on QLM %d\n",
				       qlm);
			else
				sprintf(fdt_key, "%d,xfi", qlm);
		} else if (!strncmp(mode, "10G_KR", 6)) {
			if (qlm < 4)
				printf("Error: 10G_KR not supported on QLM %d\n",
				       qlm);
			else
				sprintf(fdt_key, "%d,10G_KR", qlm);
		} else if (!strncmp(mode, "40G_KR4", 7)) {
			if (qlm < 4)
				printf("Error: 40G_KR4 not supported on QLM %d\n",
				       qlm);
			else
				sprintf(fdt_key, "%d,40G_KR4", qlm);
		}
		debug("Patching qlm %d for %s\n", qlm, fdt_key);
		octeon_fdt_patch_rename((void *)gd->fdt_blob, fdt_key, NULL, true,
					strstr(mode, ",no_phy") ? kill_fdt_phy : NULL, NULL);
	}

	/* Test for node 1 */
	if (!(gd->arch.node_mask & (1 << 1))) {
		octeon_fdt_patch((void *)(gd->fdt_blob), "1,none",
				 "cavium,node-trim");
		goto final_cleanup;
	}

	/* This won't do anything if node 1 is missing */
	for (qlm = 10; qlm < 18; qlm++) {
		sprintf(env_var, "qlm%d:%d_mode", qlm - 10, 1);
		mode = getenv(env_var);

		sprintf(fdt_key, "%d,none", qlm);
		if (!mode) {
			sprintf(fdt_key, "%d,none", qlm);
		} else if (!strncmp(mode, "sgmii", 5)) {
			sprintf(fdt_key, "%d,sgmii", qlm);
		} else if (!strncmp(mode, "xaui", 4)) {
			sprintf(fdt_key, "%d,xaui", qlm);
		} else if (!strncmp(mode, "dxaui", 5)) {
			sprintf(fdt_key, "%d,dxaui", qlm);
		} else if (!strcmp(mode, "ilk")) {
			if (qlm < 4)
				printf("Error: ILK not supported on QLM %d\n",
				       qlm);
			else
				sprintf(fdt_key, "%d,ilk", qlm);
		} else if (!strncmp(mode, "rxaui", 5)) {
			sprintf(fdt_key, "%d,rxaui", qlm);
		} else if (!strncmp(mode, "xlaui", 5)) {
			if (qlm < 4)
				printf("Error: XLAUI not supported on QLM %d\n",
				       qlm);
			else
				sprintf(fdt_key, "%d,xlaui", qlm);
		} else if (!strncmp(mode, "xfi", 3)) {
			if (qlm < 4)
				printf("Error: XFI not supported on QLM %d\n",
				       qlm);
			else
				sprintf(fdt_key, "%d,xfi", qlm);
		} else if (!strncmp(mode, "10G_KR", 6)) {
			if (qlm < 4)
				printf("Error: 10G_KR not supported on QLM %d\n",
				       qlm);
			else
				sprintf(fdt_key, "%d,10G_KR", qlm);
		} else if (!strncmp(mode, "40G_KR4", 7)) {
			if (qlm < 4)
				printf("Error: 40G_KR4 not supported on QLM %d\n",
				       qlm);
			else
				sprintf(fdt_key, "%d,40G_KR4", qlm);
		}
		debug("Patching qlm %d for %s\n", qlm, fdt_key);
		octeon_fdt_patch_rename((void *)gd->fdt_blob, fdt_key, NULL, true,
					strstr(mode, ",no_phy") ? kill_fdt_phy : NULL, NULL);
	}

final_cleanup:
	node = fdt_node_offset_by_compatible((void *)gd->fdt_blob, -1, "cavium,octeon-7890-bgx");
	while (node != -FDT_ERR_NOTFOUND) {
		int depth = 0;
		int child = fdt_next_node((void *)gd->fdt_blob, node, &depth);
		if (depth != 1)
			fdt_nop_node((void *)gd->fdt_blob, node);
		node = fdt_node_offset_by_compatible((void *)gd->fdt_blob, node, "cavium,octeon-7890-bgx");
	}

	return 0;
}
Beispiel #28
0
/**
 * Errata G-16094: QLM Gen2 Equalizer Default Setting Change.
 * CN68XX pass 1.x and CN66XX pass 1.x QLM tweak. This function tweaks the
 * JTAG setting for a QLMs to run better at 5 and 6.25Ghz.
 */
void __cvmx_qlm_speed_tweak(void)
{
	cvmx_mio_qlmx_cfg_t qlm_cfg;
	int num_qlms = cvmx_qlm_get_num();
	int qlm;

	/* Workaround for Errata (G-16467) */
	if (OCTEON_IS_MODEL(OCTEON_CN68XX_PASS2_X)) {
		for (qlm = 0; qlm < num_qlms; qlm++) {
			int ir50dac;
			/* This workaround only applies to QLMs running at 6.25Ghz */
			if (cvmx_qlm_get_gbaud_mhz(qlm) == 6250) {
#ifdef CVMX_QLM_DUMP_STATE
				cvmx_dprintf("%s:%d: QLM%d: Applying workaround for Errata G-16467\n", __func__, __LINE__, qlm);
				cvmx_qlm_display_registers(qlm);
				cvmx_dprintf("\n");
#endif
				cvmx_qlm_jtag_set(qlm, -1, "cfg_cdr_trunc", 0);
				/* Hold the QLM in reset */
				cvmx_qlm_jtag_set(qlm, -1, "cfg_rst_n_set", 0);
				cvmx_qlm_jtag_set(qlm, -1, "cfg_rst_n_clr", 1);
				/* Forcfe TX to be idle */
				cvmx_qlm_jtag_set(qlm, -1, "cfg_tx_idle_clr", 0);
				cvmx_qlm_jtag_set(qlm, -1, "cfg_tx_idle_set", 1);
				if (OCTEON_IS_MODEL(OCTEON_CN68XX_PASS2_0)) {
					ir50dac = cvmx_qlm_jtag_get(qlm, 0, "ir50dac");
					while (++ir50dac <= 31)
						cvmx_qlm_jtag_set(qlm, -1, "ir50dac", ir50dac);
				}
				cvmx_qlm_jtag_set(qlm, -1, "div4_byp", 0);
				cvmx_qlm_jtag_set(qlm, -1, "clkf_byp", 16);
				cvmx_qlm_jtag_set(qlm, -1, "serdes_pll_byp", 1);
				cvmx_qlm_jtag_set(qlm, -1, "spdsel_byp", 1);
#ifdef CVMX_QLM_DUMP_STATE
				cvmx_dprintf("%s:%d: QLM%d: Done applying workaround for Errata G-16467\n", __func__, __LINE__, qlm);
				cvmx_qlm_display_registers(qlm);
				cvmx_dprintf("\n\n");
#endif
				/* The QLM will be taken out of reset later when ILK/XAUI are initialized. */
			}
		}

#ifndef CVMX_BUILD_FOR_LINUX_HOST
		/* These QLM tuning parameters are specific to EBB6800
		   eval boards using Cavium QLM cables. These should be
		   removed or tunned based on customer boards. */
		if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_EBB6800) {
			for (qlm = 0; qlm < num_qlms; qlm++) {
#ifdef CVMX_QLM_DUMP_STATE
				cvmx_dprintf("Setting tunning parameters for QLM%d\n", qlm);
#endif
				cvmx_qlm_jtag_set(qlm, -1, "biasdrv_hs_ls_byp", 12);
				cvmx_qlm_jtag_set(qlm, -1, "biasdrv_hf_byp", 12);
				cvmx_qlm_jtag_set(qlm, -1, "biasdrv_lf_ls_byp", 12);
				cvmx_qlm_jtag_set(qlm, -1, "biasdrv_lf_byp", 12);
				cvmx_qlm_jtag_set(qlm, -1, "tcoeff_hf_byp", 15);
				cvmx_qlm_jtag_set(qlm, -1, "tcoeff_hf_ls_byp", 15);
				cvmx_qlm_jtag_set(qlm, -1, "tcoeff_lf_ls_byp", 15);
				cvmx_qlm_jtag_set(qlm, -1, "tcoeff_lf_byp", 15);
				cvmx_qlm_jtag_set(qlm, -1, "rx_cap_gen2", 0);
				cvmx_qlm_jtag_set(qlm, -1, "rx_eq_gen2", 11);
				cvmx_qlm_jtag_set(qlm, -1, "serdes_tx_byp", 1);
			}
		}
		else if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_NIC68_4) {
			for (qlm = 0; qlm < num_qlms; qlm++) {
#ifdef CVMX_QLM_DUMP_STATE
				cvmx_dprintf("Setting tunning parameters for QLM%d\n", qlm);
#endif
				cvmx_qlm_jtag_set(qlm, -1, "biasdrv_hs_ls_byp", 30);
				cvmx_qlm_jtag_set(qlm, -1, "biasdrv_hf_byp", 30);
				cvmx_qlm_jtag_set(qlm, -1, "biasdrv_lf_ls_byp", 30);
				cvmx_qlm_jtag_set(qlm, -1, "biasdrv_lf_byp", 30);
				cvmx_qlm_jtag_set(qlm, -1, "tcoeff_hf_byp", 0);
				cvmx_qlm_jtag_set(qlm, -1, "tcoeff_hf_ls_byp", 0);
				cvmx_qlm_jtag_set(qlm, -1, "tcoeff_lf_ls_byp", 0);
				cvmx_qlm_jtag_set(qlm, -1, "tcoeff_lf_byp", 0);
				cvmx_qlm_jtag_set(qlm, -1, "rx_cap_gen2", 1);
				cvmx_qlm_jtag_set(qlm, -1, "rx_eq_gen2", 8);
				cvmx_qlm_jtag_set(qlm, -1, "serdes_tx_byp", 1);
			}
		}
#endif
	}

	/* G-16094 QLM Gen2 Equalizer Default Setting Change */
	else if (OCTEON_IS_MODEL(OCTEON_CN68XX_PASS1_X)
		 || OCTEON_IS_MODEL(OCTEON_CN66XX_PASS1_X)) {
		/* Loop through the QLMs */
		for (qlm = 0; qlm < num_qlms; qlm++) {
			/* Read the QLM speed */
			qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(qlm));

			/* If the QLM is at 6.25Ghz or 5Ghz then program JTAG */
			if ((qlm_cfg.s.qlm_spd == 5) || (qlm_cfg.s.qlm_spd == 12) || (qlm_cfg.s.qlm_spd == 0) || (qlm_cfg.s.qlm_spd == 6) || (qlm_cfg.s.qlm_spd == 11)) {
				cvmx_qlm_jtag_set(qlm, -1, "rx_cap_gen2", 0x1);
				cvmx_qlm_jtag_set(qlm, -1, "rx_eq_gen2", 0x8);
			}
		}
	}
}
Beispiel #29
0
/**
 * Configure common hardware for all interfaces
 */
static void cvm_oct_configure_common_hw(device_t bus)
{
	struct octebus_softc *sc;
	int pko_queues;
	int error;
	int rid;

        sc = device_get_softc(bus);

	/* Setup the FPA */
	cvmx_fpa_enable();
	cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
			     num_packet_buffers);
	cvm_oct_mem_fill_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
			     num_packet_buffers);
	if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL) {
		/*
		 * If the FPA uses different pools for output buffers and
		 * packets, size the output buffer pool based on the number
		 * of PKO queues.
		 */
		if (OCTEON_IS_MODEL(OCTEON_CN38XX))
			pko_queues = 128;
		else if (OCTEON_IS_MODEL(OCTEON_CN3XXX))
			pko_queues = 32;
		else if (OCTEON_IS_MODEL(OCTEON_CN50XX))
			pko_queues = 32;
		else
			pko_queues = 256;

		cvm_oct_num_output_buffers = 4 * pko_queues;
		cvm_oct_mem_fill_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
				     CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE,
				     cvm_oct_num_output_buffers);
	}

	if (USE_RED)
		cvmx_helper_setup_red(num_packet_buffers/4,
				      num_packet_buffers/8);

	/* Enable the MII interface */
	if (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM)
		cvmx_write_csr(CVMX_SMI_EN, 1);

	/* Register an IRQ hander for to receive POW interrupts */
        rid = 0;
        sc->sc_rx_irq = bus_alloc_resource(bus, SYS_RES_IRQ, &rid,
					   OCTEON_IRQ_WORKQ0 + pow_receive_group,
					   OCTEON_IRQ_WORKQ0 + pow_receive_group,
					   1, RF_ACTIVE);
        if (sc->sc_rx_irq == NULL) {
                device_printf(bus, "could not allocate workq irq");
		return;
        }

        error = bus_setup_intr(bus, sc->sc_rx_irq, INTR_TYPE_NET | INTR_MPSAFE,
			       cvm_oct_do_interrupt, NULL, cvm_oct_device,
			       &sc->sc_rx_intr_cookie);
        if (error != 0) {
                device_printf(bus, "could not setup workq irq");
		return;
        }


#ifdef SMP
	{
		cvmx_ciu_intx0_t en;
		int core;

		CPU_FOREACH(core) {
			if (core == PCPU_GET(cpuid))
				continue;

			en.u64 = cvmx_read_csr(CVMX_CIU_INTX_EN0(core*2));
			en.s.workq |= (1<<pow_receive_group);
			cvmx_write_csr(CVMX_CIU_INTX_EN0(core*2), en.u64);
		}
	}
#endif
}
static void __cvmx_ipd_free_ptr_v1(void)
{
	unsigned wqe_pool = cvmx_fpa_get_wqe_pool();
	int i;
	union cvmx_ipd_ptr_count ptr_count;
	union cvmx_ipd_prc_port_ptr_fifo_ctl prc_port_fifo;
        int packet_pool = (int)cvmx_fpa_get_packet_pool();

	/* Only CN38XXp{1,2} cannot read pointer out of the IPD */
	if (OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2))
		return;

	ptr_count.u64 = cvmx_read_csr(CVMX_IPD_PTR_COUNT);

	/* Handle Work Queue Entry in cn56xx and cn52xx */
	if (octeon_has_feature(OCTEON_FEATURE_NO_WPTR)) {
		union cvmx_ipd_ctl_status ctl_status;
		ctl_status.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
		if (ctl_status.s.no_wptr)
			wqe_pool = packet_pool;
	}

	/* Free the prefetched WQE */
	if (ptr_count.s.wqev_cnt) {
		union cvmx_ipd_wqe_ptr_valid wqe_ptr_valid;
		wqe_ptr_valid.u64 = cvmx_read_csr(CVMX_IPD_WQE_PTR_VALID);
		cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)wqe_ptr_valid.s.ptr << 7),
			      wqe_pool, 0);
	}

	/* Free all WQE in the fifo */
	if (ptr_count.s.wqe_pcnt) {
		int i;
		union cvmx_ipd_pwp_ptr_fifo_ctl pwp_fifo;
		pwp_fifo.u64 = cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL);
		for (i = 0; i < ptr_count.s.wqe_pcnt; i++) {
			pwp_fifo.s.cena = 0;
			pwp_fifo.s.raddr = pwp_fifo.s.max_cnts + (pwp_fifo.s.wraddr + i) % pwp_fifo.s.max_cnts;
			cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL, pwp_fifo.u64);
			pwp_fifo.u64 = cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL);
			cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)pwp_fifo.s.ptr << 7),
				      wqe_pool, 0);
		}
		pwp_fifo.s.cena = 1;
		cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL, pwp_fifo.u64);
	}

	/* Free the prefetched packet */
	if (ptr_count.s.pktv_cnt) {
		union cvmx_ipd_pkt_ptr_valid pkt_ptr_valid;
		pkt_ptr_valid.u64 = cvmx_read_csr(CVMX_IPD_PKT_PTR_VALID);
		cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)pkt_ptr_valid.s.ptr << 7),
			      packet_pool, 0);
	}

	/* Free the per port prefetched packets */
	prc_port_fifo.u64 = cvmx_read_csr(CVMX_IPD_PRC_PORT_PTR_FIFO_CTL);

	for (i = 0; i < prc_port_fifo.s.max_pkt; i++) {
		prc_port_fifo.s.cena = 0;
		prc_port_fifo.s.raddr = i % prc_port_fifo.s.max_pkt;
		cvmx_write_csr(CVMX_IPD_PRC_PORT_PTR_FIFO_CTL,
			       prc_port_fifo.u64);
		prc_port_fifo.u64 = cvmx_read_csr(CVMX_IPD_PRC_PORT_PTR_FIFO_CTL);
		cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)prc_port_fifo.s.ptr << 7),
			      packet_pool, 0);
	}
	prc_port_fifo.s.cena = 1;
	cvmx_write_csr(CVMX_IPD_PRC_PORT_PTR_FIFO_CTL, prc_port_fifo.u64);

	/* Free all packets in the holding fifo */
	if (ptr_count.s.pfif_cnt) {
		int i;
		union cvmx_ipd_prc_hold_ptr_fifo_ctl prc_hold_fifo;

		prc_hold_fifo.u64 = cvmx_read_csr(CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL);

		for (i = 0; i < ptr_count.s.pfif_cnt; i++) {
			prc_hold_fifo.s.cena = 0;
			prc_hold_fifo.s.raddr = (prc_hold_fifo.s.praddr + i) % prc_hold_fifo.s.max_pkt;
			cvmx_write_csr(CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL,
				       prc_hold_fifo.u64);
			prc_hold_fifo.u64 = cvmx_read_csr(CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL);
			cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)prc_hold_fifo.s.ptr << 7),
				      packet_pool, 0);
		}
		prc_hold_fifo.s.cena = 1;
		cvmx_write_csr(CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL,
			       prc_hold_fifo.u64);
	}

	/* Free all packets in the fifo */
	if (ptr_count.s.pkt_pcnt) {
		int i;
		union cvmx_ipd_pwp_ptr_fifo_ctl pwp_fifo;
		pwp_fifo.u64 = cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL);

		for (i = 0; i < ptr_count.s.pkt_pcnt; i++) {
			pwp_fifo.s.cena = 0;
			pwp_fifo.s.raddr = (pwp_fifo.s.praddr + i) % pwp_fifo.s.max_cnts;
			cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL, pwp_fifo.u64);
			pwp_fifo.u64 = cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL);
			cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)pwp_fifo.s.ptr << 7),
				      packet_pool, 0);
		}
		pwp_fifo.s.cena = 1;
		cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL, pwp_fifo.u64);
	}
}