Пример #1
0
/**
 * @INTERNAL
 * Bringup and enable a SPI interface. After this call packet I/O
 * should be fully functional. This is called with IPD enabled but
 * PKO disabled.
 *
 * @param interface Interface to bring up
 *
 * @return Zero on success, negative on failure
 */
int __cvmx_helper_spi_enable(int interface)
{
    /* Normally the ethernet L2 CRC is checked and stripped in the GMX block.
        When you are using SPI, this isn' the case and IPD needs to check
        the L2 CRC */
    int num_ports = cvmx_helper_ports_on_interface(interface);
    int ipd_port;
    for (ipd_port=interface*16; ipd_port<interface*16+num_ports; ipd_port++)
    {
        cvmx_pip_prt_cfgx_t port_config;
        port_config.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port));
        port_config.s.crc_en = 1;
#ifdef OCTEON_VENDOR_RADISYS
	/*
	 * Incoming packets on the RSYS4GBE have the FCS stripped.
	 */
	if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_CUST_RADISYS_RSYS4GBE)
		port_config.s.crc_en = 0;
#endif
        cvmx_write_csr(CVMX_PIP_PRT_CFGX(ipd_port), port_config.u64);
    }

    if (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM)
    {
        cvmx_spi_start_interface(interface, CVMX_SPI_MODE_DUPLEX, CVMX_HELPER_SPI_TIMEOUT, num_ports);
        if (cvmx_spi4000_is_present(interface))
            cvmx_spi4000_initialize(interface);
    }
    return 0;
}
int __cvmx_helper_spi_enable(int interface)
{
	/*
                                                               
                                                              
                                  
  */
	int num_ports = cvmx_helper_ports_on_interface(interface);
	int ipd_port;
	for (ipd_port = interface * 16; ipd_port < interface * 16 + num_ports;
	     ipd_port++) {
		union cvmx_pip_prt_cfgx port_config;
		port_config.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port));
		port_config.s.crc_en = 1;
		cvmx_write_csr(CVMX_PIP_PRT_CFGX(ipd_port), port_config.u64);
	}

	if (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) {
		cvmx_spi_start_interface(interface, CVMX_SPI_MODE_DUPLEX,
					 CVMX_HELPER_SPI_TIMEOUT, num_ports);
		if (cvmx_spi4000_is_present(interface))
			cvmx_spi4000_initialize(interface);
	}
	__cvmx_interrupt_spxx_int_msk_enable(interface);
	__cvmx_interrupt_stxx_int_msk_enable(interface);
	__cvmx_interrupt_gmxx_enable(interface);
	return 0;
}
Пример #3
0
int __cvmx_helper_spi_enable(int interface)
{
	/*
	 * Normally the ethernet L2 CRC is checked and stripped in the
	 * GMX block.  When you are using SPI, this isn' the case and
	 * IPD needs to check the L2 CRC.
	 */
	int num_ports = cvmx_helper_ports_on_interface(interface);
	int ipd_port;
	for (ipd_port = interface * 16; ipd_port < interface * 16 + num_ports;
	     ipd_port++) {
		union cvmx_pip_prt_cfgx port_config;
		port_config.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port));
		port_config.s.crc_en = 1;
		cvmx_write_csr(CVMX_PIP_PRT_CFGX(ipd_port), port_config.u64);
	}

	if (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) {
		cvmx_spi_start_interface(interface, CVMX_SPI_MODE_DUPLEX,
					 CVMX_HELPER_SPI_TIMEOUT, num_ports);
		if (cvmx_spi4000_is_present(interface))
			cvmx_spi4000_initialize(interface);
	}
	__cvmx_interrupt_spxx_int_msk_enable(interface);
	__cvmx_interrupt_stxx_int_msk_enable(interface);
	__cvmx_interrupt_gmxx_enable(interface);
	return 0;
}
Пример #4
0
int __cvmx_helper_npi_enable(int interface)
{
	/*
	 * On CN50XX, CN52XX, and CN56XX we need to disable length
	 * checking so packet < 64 bytes and jumbo frames don't get
	 * errors.
	 */
	if (!OCTEON_IS_MODEL(OCTEON_CN3XXX) &&
	    !OCTEON_IS_MODEL(OCTEON_CN58XX)) {
		int num_ports = cvmx_helper_ports_on_interface(interface);
		int port;
		for (port = 0; port < num_ports; port++) {
			union cvmx_pip_prt_cfgx port_cfg;
			int ipd_port =
			    cvmx_helper_get_ipd_port(interface, port);
			port_cfg.u64 =
			    cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port));
			port_cfg.s.maxerr_en = 0;
			port_cfg.s.minerr_en = 0;
			cvmx_write_csr(CVMX_PIP_PRT_CFGX(ipd_port),
				       port_cfg.u64);
		}
	}

	/* Enables are controlled by the remote host, so nothing to do here */
	return 0;
}
/**
 * @INTERNAL
 * Bringup and enable a NPI interface. After this call packet
 * I/O should be fully functional. This is called with IPD
 * enabled but PKO disabled.
 *
 * @param interface Interface to bring up
 *
 * @return Zero on success, negative on failure
 */
int __cvmx_helper_npi_enable(int interface)
{
	int port;
	int num_ports = cvmx_helper_ports_on_interface(interface);

	if (OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN58XX))
		/*
		 * Enables are controlled by the remote host, so
		 * nothing to do here.
		 */
		return 0;

	/*
	 * On CN50XX, CN52XX, and CN56XX we need to disable length
	 * checking so packet < 64 bytes and jumbo frames don't get
	 * errors.
	 */
	for (port = 0; port < num_ports; port++) {
		union cvmx_pip_prt_cfgx port_cfg;
		int ipd_port = (OCTEON_IS_MODEL(OCTEON_CN68XX)) ? cvmx_helper_get_pknd(interface, port) : cvmx_helper_get_ipd_port(interface, port);
		port_cfg.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port));
		port_cfg.s.lenerr_en = 0;
		port_cfg.s.maxerr_en = 0;
		port_cfg.s.minerr_en = 0;
		cvmx_write_csr(CVMX_PIP_PRT_CFGX(ipd_port), port_cfg.u64);

		if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
			/* Set up pknd and bpid */
			union cvmx_sli_portx_pkind config;
			config.u64 = cvmx_read_csr(CVMX_PEXP_SLI_PORTX_PKIND(port));
			config.s.bpkind = cvmx_helper_get_bpid(interface, port);
			config.s.pkind = cvmx_helper_get_pknd(interface, port);
			cvmx_write_csr(CVMX_PEXP_SLI_PORTX_PKIND(port), config.u64);
		}
	}

	if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
		/*
		 * Set up pko pipes.
		 */
		union cvmx_sli_tx_pipe config;
		config.u64 = cvmx_read_csr(CVMX_PEXP_SLI_TX_PIPE);
		config.s.base = __cvmx_pko_get_pipe(interface, 0);
		config.s.nump = cvmx_npi_num_pipes < 0 ? num_ports : cvmx_npi_num_pipes;
		cvmx_write_csr(CVMX_PEXP_SLI_TX_PIPE, config.u64);
	}

	/* Enables are controlled by the remote host, so nothing to do here */
	return 0;
}
Пример #6
0
int __cvmx_helper_npi_enable(int interface)
{
	
	if (!OCTEON_IS_MODEL(OCTEON_CN3XXX) &&
	    !OCTEON_IS_MODEL(OCTEON_CN58XX)) {
		int num_ports = cvmx_helper_ports_on_interface(interface);
		int port;
		for (port = 0; port < num_ports; port++) {
			union cvmx_pip_prt_cfgx port_cfg;
			int ipd_port =
			    cvmx_helper_get_ipd_port(interface, port);
			port_cfg.u64 =
			    cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port));
			port_cfg.s.maxerr_en = 0;
			port_cfg.s.minerr_en = 0;
			cvmx_write_csr(CVMX_PIP_PRT_CFGX(ipd_port),
				       port_cfg.u64);
		}
	}

	
	return 0;
}
Пример #7
0
static int cvm_oct_probe(struct platform_device *pdev)
{
	int num_interfaces;
	int interface;
	int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE;
	int qos;
	struct device_node *pip;

	octeon_mdiobus_force_mod_depencency();

	pip = pdev->dev.of_node;
	if (!pip) {
		pr_err("Error: No 'pip' in /aliases\n");
		return -EINVAL;
	}


	cvm_oct_configure_common_hw();

	cvmx_helper_initialize_packet_io_global();

	/* Change the input group for all ports before input is enabled */
	num_interfaces = cvmx_helper_get_number_of_interfaces();
	for (interface = 0; interface < num_interfaces; interface++) {
		int num_ports = cvmx_helper_ports_on_interface(interface);
		int port;

		for (port = cvmx_helper_get_ipd_port(interface, 0);
		     port < cvmx_helper_get_ipd_port(interface, num_ports);
		     port++) {
			union cvmx_pip_prt_tagx pip_prt_tagx;

			pip_prt_tagx.u64 =
			    cvmx_read_csr(CVMX_PIP_PRT_TAGX(port));
			pip_prt_tagx.s.grp = pow_receive_group;
			cvmx_write_csr(CVMX_PIP_PRT_TAGX(port),
				       pip_prt_tagx.u64);
		}
	}

	cvmx_helper_ipd_and_packet_input_enable();

	memset(cvm_oct_device, 0, sizeof(cvm_oct_device));

	/*
	 * Initialize the FAU used for counting packet buffers that
	 * need to be freed.
	 */
	cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);

	/* Initialize the FAU used for counting tx SKBs that need to be freed */
	cvmx_fau_atomic_write32(FAU_TOTAL_TX_TO_CLEAN, 0);

	if ((pow_send_group != -1)) {
		struct net_device *dev;

		pr_info("\tConfiguring device for POW only access\n");
		dev = alloc_etherdev(sizeof(struct octeon_ethernet));
		if (dev) {
			/* Initialize the device private structure. */
			struct octeon_ethernet *priv = netdev_priv(dev);

			dev->netdev_ops = &cvm_oct_pow_netdev_ops;
			priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED;
			priv->port = CVMX_PIP_NUM_INPUT_PORTS;
			priv->queue = -1;
			strcpy(dev->name, "pow%d");
			for (qos = 0; qos < 16; qos++)
				skb_queue_head_init(&priv->tx_free_list[qos]);

			if (register_netdev(dev) < 0) {
				pr_err("Failed to register ethernet device for POW\n");
				free_netdev(dev);
			} else {
				cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev;
				pr_info("%s: POW send group %d, receive group %d\n",
					dev->name, pow_send_group,
					pow_receive_group);
			}
		} else {
			pr_err("Failed to allocate ethernet device for POW\n");
		}
	}

	num_interfaces = cvmx_helper_get_number_of_interfaces();
	for (interface = 0; interface < num_interfaces; interface++) {
		cvmx_helper_interface_mode_t imode =
		    cvmx_helper_interface_get_mode(interface);
		int num_ports = cvmx_helper_ports_on_interface(interface);
		int port;
		int port_index;

		for (port_index = 0,
		     port = cvmx_helper_get_ipd_port(interface, 0);
		     port < cvmx_helper_get_ipd_port(interface, num_ports);
		     port_index++, port++) {
			struct octeon_ethernet *priv;
			struct net_device *dev =
			    alloc_etherdev(sizeof(struct octeon_ethernet));
			if (!dev) {
				pr_err("Failed to allocate ethernet device for port %d\n",
				       port);
				continue;
			}

			/* Initialize the device private structure. */
			priv = netdev_priv(dev);
			priv->netdev = dev;
			priv->of_node = cvm_oct_node_for_port(pip, interface,
								port_index);

			INIT_DELAYED_WORK(&priv->port_periodic_work,
					  cvm_oct_periodic_worker);
			priv->imode = imode;
			priv->port = port;
			priv->queue = cvmx_pko_get_base_queue(priv->port);
			priv->fau = fau - cvmx_pko_get_num_queues(port) * 4;
			for (qos = 0; qos < 16; qos++)
				skb_queue_head_init(&priv->tx_free_list[qos]);
			for (qos = 0; qos < cvmx_pko_get_num_queues(port);
			     qos++)
				cvmx_fau_atomic_write32(priv->fau + qos * 4, 0);

			switch (priv->imode) {
			/* These types don't support ports to IPD/PKO */
			case CVMX_HELPER_INTERFACE_MODE_DISABLED:
			case CVMX_HELPER_INTERFACE_MODE_PCIE:
			case CVMX_HELPER_INTERFACE_MODE_PICMG:
				break;

			case CVMX_HELPER_INTERFACE_MODE_NPI:
				dev->netdev_ops = &cvm_oct_npi_netdev_ops;
				strcpy(dev->name, "npi%d");
				break;

			case CVMX_HELPER_INTERFACE_MODE_XAUI:
				dev->netdev_ops = &cvm_oct_xaui_netdev_ops;
				strcpy(dev->name, "xaui%d");
				break;

			case CVMX_HELPER_INTERFACE_MODE_LOOP:
				dev->netdev_ops = &cvm_oct_npi_netdev_ops;
				strcpy(dev->name, "loop%d");
				break;

			case CVMX_HELPER_INTERFACE_MODE_SGMII:
				dev->netdev_ops = &cvm_oct_sgmii_netdev_ops;
				strcpy(dev->name, "eth%d");
				break;

			case CVMX_HELPER_INTERFACE_MODE_SPI:
				dev->netdev_ops = &cvm_oct_spi_netdev_ops;
				strcpy(dev->name, "spi%d");
				break;

			case CVMX_HELPER_INTERFACE_MODE_RGMII:
			case CVMX_HELPER_INTERFACE_MODE_GMII:
				dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
				strcpy(dev->name, "eth%d");
				break;
			}

			if (!dev->netdev_ops) {
				free_netdev(dev);
			} else if (register_netdev(dev) < 0) {
				pr_err("Failed to register ethernet device for interface %d, port %d\n",
					 interface, priv->port);
				free_netdev(dev);
			} else {
				cvm_oct_device[priv->port] = dev;
				fau -=
				    cvmx_pko_get_num_queues(priv->port) *
				    sizeof(u32);
				schedule_delayed_work(&priv->port_periodic_work, HZ);
			}
		}
	}

	cvm_oct_tx_initialize();
	cvm_oct_rx_initialize();

	/*
	 * 150 uS: about 10 1500-byte packets at 1GE.
	 */
	cvm_oct_tx_poll_interval = 150 * (octeon_get_clock_rate() / 1000000);

	schedule_delayed_work(&cvm_oct_rx_refill_work, HZ);

	return 0;
}
Пример #8
0
static irqreturn_t cvm_oct_rgmii_rml_interrupt(int cpl, void *dev_id)
{
	union cvmx_npi_rsl_int_blocks rsl_int_blocks;
	int index;
	irqreturn_t return_status = IRQ_NONE;

	rsl_int_blocks.u64 = cvmx_read_csr(CVMX_NPI_RSL_INT_BLOCKS);

	/* Check and see if this interrupt was caused by the GMX0 block */
	if (rsl_int_blocks.s.gmx0) {

		int interface = 0;
		/* Loop through every port of this interface */
		for (index = 0;
		     index < cvmx_helper_ports_on_interface(interface);
		     index++) {

			/* Read the GMX interrupt status bits */
			union cvmx_gmxx_rxx_int_reg gmx_rx_int_reg;
			gmx_rx_int_reg.u64 = cvmx_read_csr(CVMX_GMXX_RXX_INT_REG(index, interface));
			gmx_rx_int_reg.u64 &= cvmx_read_csr(CVMX_GMXX_RXX_INT_EN(index, interface));
			/* Poll the port if inband status changed */
			if (gmx_rx_int_reg.s.phy_dupx
			    || gmx_rx_int_reg.s.phy_link
			    || gmx_rx_int_reg.s.phy_spd) {
				struct octeon_ethernet *priv = cvm_oct_by_port[cvmx_helper_get_ipd_port(interface, index)];

				if (priv && !atomic_read(&cvm_oct_poll_queue_stopping))
					queue_work(cvm_oct_poll_queue, &priv->port_work);

				gmx_rx_int_reg.u64 = 0;
				gmx_rx_int_reg.s.phy_dupx = 1;
				gmx_rx_int_reg.s.phy_link = 1;
				gmx_rx_int_reg.s.phy_spd = 1;
				cvmx_write_csr(CVMX_GMXX_RXX_INT_REG(index, interface), gmx_rx_int_reg.u64);
				return_status = IRQ_HANDLED;
			}
		}
	}

	/* Check and see if this interrupt was caused by the GMX1 block */
	if (rsl_int_blocks.s.gmx1) {

		int interface = 1;
		/* Loop through every port of this interface */
		for (index = 0;
		     index < cvmx_helper_ports_on_interface(interface);
		     index++) {

			/* Read the GMX interrupt status bits */
			union cvmx_gmxx_rxx_int_reg gmx_rx_int_reg;
			gmx_rx_int_reg.u64 = cvmx_read_csr(CVMX_GMXX_RXX_INT_REG(index, interface));
			gmx_rx_int_reg.u64 &= cvmx_read_csr(CVMX_GMXX_RXX_INT_EN(index, interface));
			/* Poll the port if inband status changed */
			if (gmx_rx_int_reg.s.phy_dupx
			    || gmx_rx_int_reg.s.phy_link
			    || gmx_rx_int_reg.s.phy_spd) {
				struct octeon_ethernet *priv = cvm_oct_by_port[cvmx_helper_get_ipd_port(interface, index)];

				if (priv && !atomic_read(&cvm_oct_poll_queue_stopping))
					queue_work(cvm_oct_poll_queue, &priv->port_work);

				gmx_rx_int_reg.u64 = 0;
				gmx_rx_int_reg.s.phy_dupx = 1;
				gmx_rx_int_reg.s.phy_link = 1;
				gmx_rx_int_reg.s.phy_spd = 1;
				cvmx_write_csr(CVMX_GMXX_RXX_INT_REG(index, interface), gmx_rx_int_reg.u64);
				return_status = IRQ_HANDLED;
			}
		}
	}
	return return_status;
}
Пример #9
0
int main(int argc, char *argv[])
{
  /* mandatory function to initialize simple executive application */
  cvmx_user_app_init();

  sysinfo = cvmx_sysinfo_get();

  if (cvmx_is_init_core()) {
    /* may need to specify this manually for simulator */
    cpu_clock_hz = sysinfo->cpu_clock_hz;

    if(init_tasks(NUM_PACKET_BUFFERS) != 0) {
      printf("Initialization failed!\n");
      exit(-1);
    }

    /* get the FPA pool number of packet and WQE pools */
    packet_pool = cvmx_fpa_get_packet_pool();
    wqe_pool = cvmx_fpa_get_wqe_pool();

    print_debug_info();

    int num_interfaces = cvmx_helper_get_number_of_interfaces();
    int interface;
    bool found_valid_xaui_port = false;

    for (interface=0; interface < num_interfaces && !found_valid_xaui_port; interface++) {
      uint32_t num_ports = cvmx_helper_ports_on_interface(interface);
      cvmx_helper_interface_mode_t imode = cvmx_helper_interface_get_mode(interface);

      if (imode == CVMX_HELPER_INTERFACE_MODE_XAUI) {
        printf("\nIdentified XAUI interface with %" PRIu32 " port(s)\n", num_ports);
        printf("interface number: %d\n", interface);

        uint32_t port;
        for (port = 0; port < num_ports; port++) {
          if (cvmx_helper_is_port_valid(interface, port)) {
            xaui_ipd_port = cvmx_helper_get_ipd_port(interface, port);
            printf("xaui_ipd_port: %d\n", xaui_ipd_port);
            found_valid_xaui_port = true;
            break;
          }
        }
      }

      printf("\n");
    }
  }

  /* Wait (stall) until all cores in the given coremask have reached this
   * point in the progam execution before proceeding. */
  CORE_MASK_BARRIER_SYNC; 

  if (cvmx_is_init_core()) {
    receive_packet();  
  } else if (cvmx_get_core_num() == 1) {
    send_packet();
  } else { /* for this program, all cores besides the first two are superfluous */
    printf("Superfluous core #%02d\n", cvmx_get_core_num());
    return 0;
  }

  printf("Execution complete for core #%02d\n", cvmx_get_core_num());
  return 0;
}
Пример #10
0
static int cvm_oct_driver_probe(struct platform_device *dev)
{
	int num_interfaces;
	int interface;
	int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE;
	int qos;

	octeon_mdiobus_force_mod_depencency();
	pr_notice("cavium-ethernet %s\n", OCTEON_ETHERNET_VERSION);

	if (OCTEON_IS_MODEL(OCTEON_CN52XX))
		cvm_oct_mac_addr_offset = 2; /* First two are the mgmt ports. */
	else if (OCTEON_IS_MODEL(OCTEON_CN56XX))
		cvm_oct_mac_addr_offset = 1; /* First one is the mgmt port. */
	else
		cvm_oct_mac_addr_offset = 0;

#if defined(CONFIG_SG590) || defined(CONFIG_SG770)
{
	union cvmx_gmxx_inf_mode mode;
	mode.u64 = 0;
	mode.s.en = 1;
	cvmx_write_csr(CVMX_GMXX_INF_MODE(0), mode.u64);
}
#endif
#ifdef CONFIG_SG8200
/*
 *	SG8200 directs the 2 internal ports out MII to 2 single 100Mbit
 *	interfaces. (The switch is on the 8139C+).
 */
{
	union cvmx_gmxx_inf_mode mode;
	mode.u64 = 0;
	mode.s.en = 1;
	mode.s.p0mii = 1;
	mode.s.type = 1;
	cvmx_write_csr(CVMX_GMXX_INF_MODE(0), mode.u64);
}
#endif

	cvm_oct_poll_queue = create_singlethread_workqueue("octeon-ethernet");
	if (cvm_oct_poll_queue == NULL) {
		pr_err("octeon-ethernet: Cannot create workqueue");
		return -ENOMEM;
	}

	cvm_oct_configure_common_hw();

	cvmx_helper_initialize_packet_io_global();

	/* Change the input group for all ports before input is enabled */
	num_interfaces = cvmx_helper_get_number_of_interfaces();
	for (interface = 0; interface < num_interfaces; interface++) {
		int num_ports = cvmx_helper_ports_on_interface(interface);
		int port;

		for (port = cvmx_helper_get_ipd_port(interface, 0);
		     port < cvmx_helper_get_ipd_port(interface, num_ports);
		     port++) {
			union cvmx_pip_prt_tagx pip_prt_tagx;
			pip_prt_tagx.u64 =
			    cvmx_read_csr(CVMX_PIP_PRT_TAGX(port));
			pip_prt_tagx.s.grp = pow_receive_group;
			cvmx_write_csr(CVMX_PIP_PRT_TAGX(port),
				       pip_prt_tagx.u64);
		}
	}

	cvmx_helper_ipd_and_packet_input_enable();

	memset(cvm_oct_device, 0, sizeof(cvm_oct_device));

	/*
	 * Initialize the FAU used for counting packet buffers that
	 * need to be freed.
	 */
	cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);

	/* Initialize the FAU used for counting tx SKBs that need to be freed */
	cvmx_fau_atomic_write32(FAU_TOTAL_TX_TO_CLEAN, 0);

	if ((pow_send_group != -1)) {
		struct net_device *dev;
		pr_info("\tConfiguring device for POW only access\n");
		dev = alloc_etherdev(sizeof(struct octeon_ethernet));
		if (dev) {
			/* Initialize the device private structure. */
			struct octeon_ethernet *priv = netdev_priv(dev);

			dev->netdev_ops = &cvm_oct_pow_netdev_ops;
			priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED;
			priv->port = CVMX_PIP_NUM_INPUT_PORTS;
			priv->queue = -1;
			strcpy(dev->name, "pow%d");
			for (qos = 0; qos < 16; qos++)
				skb_queue_head_init(&priv->tx_free_list[qos]);

			if (register_netdev(dev) < 0) {
				pr_err("Failed to register ethernet device for POW\n");
				free_netdev(dev);
			} else {
				cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev;
				pr_info("%s: POW send group %d, receive group %d\n",
					dev->name, pow_send_group,
					pow_receive_group);
			}
		} else {
			pr_err("Failed to allocate ethernet device for POW\n");
		}
	}

	num_interfaces = cvmx_helper_get_number_of_interfaces();
	for (interface = 0; interface < num_interfaces; interface++) {
		cvmx_helper_interface_mode_t imode =
		    cvmx_helper_interface_get_mode(interface);
		int num_ports = cvmx_helper_ports_on_interface(interface);
		int port;

		for (port = cvmx_helper_get_ipd_port(interface, 0);
		     port < cvmx_helper_get_ipd_port(interface, num_ports);
		     port++) {
			struct octeon_ethernet *priv;
			struct net_device *dev =
			    alloc_etherdev(sizeof(struct octeon_ethernet));
			if (!dev) {
				pr_err("Failed to allocate ethernet device for port %d\n", port);
				continue;
			}

			/* Initialize the device private structure. */
			priv = netdev_priv(dev);

			INIT_DELAYED_WORK(&priv->port_periodic_work,
					  cvm_oct_periodic_worker);
			priv->imode = imode;
			priv->port = port;
			priv->queue = cvmx_pko_get_base_queue(priv->port);
			priv->fau = fau - cvmx_pko_get_num_queues(port) * 4;
			for (qos = 0; qos < 16; qos++)
				skb_queue_head_init(&priv->tx_free_list[qos]);
			for (qos = 0; qos < cvmx_pko_get_num_queues(port);
			     qos++)
				cvmx_fau_atomic_write32(priv->fau + qos * 4, 0);

			switch (priv->imode) {

			/* These types don't support ports to IPD/PKO */
			case CVMX_HELPER_INTERFACE_MODE_DISABLED:
			case CVMX_HELPER_INTERFACE_MODE_PCIE:
			case CVMX_HELPER_INTERFACE_MODE_PICMG:
				break;

			case CVMX_HELPER_INTERFACE_MODE_NPI:
				dev->netdev_ops = &cvm_oct_npi_netdev_ops;
				strcpy(dev->name, "npi%d");
				break;

			case CVMX_HELPER_INTERFACE_MODE_XAUI:
				dev->netdev_ops = &cvm_oct_xaui_netdev_ops;
				strcpy(dev->name, "xaui%d");
				break;

			case CVMX_HELPER_INTERFACE_MODE_LOOP:
				dev->netdev_ops = &cvm_oct_npi_netdev_ops;
				strcpy(dev->name, "loop%d");
				break;

			case CVMX_HELPER_INTERFACE_MODE_SGMII:
				dev->netdev_ops = &cvm_oct_sgmii_netdev_ops;
				strcpy(dev->name, "eth%d");
				break;

			case CVMX_HELPER_INTERFACE_MODE_SPI:
				dev->netdev_ops = &cvm_oct_spi_netdev_ops;
				strcpy(dev->name, "spi%d");
				break;

			case CVMX_HELPER_INTERFACE_MODE_RGMII:
			case CVMX_HELPER_INTERFACE_MODE_GMII:
				dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
				strcpy(dev->name, "eth%d");
				break;
			}

			if (!dev->netdev_ops) {
				free_netdev(dev);
			} else if (register_netdev(dev) < 0) {
				pr_err("Failed to register ethernet device "
					 "for interface %d, port %d\n",
					 interface, priv->port);
				free_netdev(dev);
			} else {
				cvm_oct_device[priv->port] = dev;
				fau -=
				    cvmx_pko_get_num_queues(priv->port) *
				    sizeof(uint32_t);
				queue_delayed_work(cvm_oct_poll_queue,
						   &priv->port_periodic_work, HZ);
			}
		}
	}

	cvm_oct_tx_initialize();
	cvm_oct_rx_initialize();

	/*
	 * 150 uS: about 10 1500-byte packtes at 1GE.
	 */
	cvm_oct_tx_poll_interval = 150 * (octeon_get_clock_rate() / 1000000);

	queue_delayed_work(cvm_oct_poll_queue, &cvm_oct_rx_refill_work, HZ);

#ifdef CONFIG_SG590
{
	void cvm_oct_marvel_switch_init(void);
	printk("cavium-ethernet: enabling Marvell 88e6046 switch: intra-switch forwarding disabled\n");
	cvm_oct_marvel_switch_init();
}
#endif

	return 0;
}
Пример #11
0
/**
 * Module/ driver initialization. Creates the linux network
 * devices.
 *
 * @return Zero on success
 */
int cvm_oct_init_module(device_t bus)
{
	device_t dev;
	int ifnum;
	int num_interfaces;
	int interface;
	int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE;
	int qos;

	cvm_oct_rx_initialize();
	cvm_oct_configure_common_hw(bus);

	cvmx_helper_initialize_packet_io_global();

	/* Change the input group for all ports before input is enabled */
	num_interfaces = cvmx_helper_get_number_of_interfaces();
	for (interface = 0; interface < num_interfaces; interface++) {
		int num_ports = cvmx_helper_ports_on_interface(interface);
		int port;

		for (port = 0; port < num_ports; port++) {
			cvmx_pip_prt_tagx_t pip_prt_tagx;
			int pkind = cvmx_helper_get_ipd_port(interface, port);

			pip_prt_tagx.u64 = cvmx_read_csr(CVMX_PIP_PRT_TAGX(pkind));
			pip_prt_tagx.s.grp = pow_receive_group;
			cvmx_write_csr(CVMX_PIP_PRT_TAGX(pkind), pip_prt_tagx.u64);
		}
	}

	cvmx_helper_ipd_and_packet_input_enable();

	memset(cvm_oct_device, 0, sizeof(cvm_oct_device));

	cvm_oct_link_taskq = taskqueue_create("octe link", M_NOWAIT,
	    taskqueue_thread_enqueue, &cvm_oct_link_taskq);
	taskqueue_start_threads(&cvm_oct_link_taskq, 1, PI_NET,
	    "octe link taskq");

	/* Initialize the FAU used for counting packet buffers that need to be freed */
	cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);

	ifnum = 0;
	num_interfaces = cvmx_helper_get_number_of_interfaces();
	for (interface = 0; interface < num_interfaces; interface++) {
		cvmx_helper_interface_mode_t imode = cvmx_helper_interface_get_mode(interface);
		int num_ports = cvmx_helper_ports_on_interface(interface);
		int port;

		for (port = cvmx_helper_get_ipd_port(interface, 0);
		     port < cvmx_helper_get_ipd_port(interface, num_ports);
		     ifnum++, port++) {
			cvm_oct_private_t *priv;
			struct ifnet *ifp;
			
			dev = BUS_ADD_CHILD(bus, 0, "octe", ifnum);
			if (dev != NULL)
				ifp = if_alloc(IFT_ETHER);
			if (dev == NULL || ifp == NULL) {
				printf("Failed to allocate ethernet device for interface %d port %d\n", interface, port);
				continue;
			}

			/* Initialize the device private structure. */
			device_probe(dev);
			priv = device_get_softc(dev);
			priv->dev = dev;
			priv->ifp = ifp;
			priv->imode = imode;
			priv->port = port;
			priv->queue = cvmx_pko_get_base_queue(priv->port);
			priv->fau = fau - cvmx_pko_get_num_queues(port) * 4;
			for (qos = 0; qos < cvmx_pko_get_num_queues(port); qos++)
				cvmx_fau_atomic_write32(priv->fau+qos*4, 0);
			TASK_INIT(&priv->link_task, 0, cvm_oct_update_link, priv);

			switch (priv->imode) {

			/* These types don't support ports to IPD/PKO */
			case CVMX_HELPER_INTERFACE_MODE_DISABLED:
			case CVMX_HELPER_INTERFACE_MODE_PCIE:
			case CVMX_HELPER_INTERFACE_MODE_PICMG:
				break;

			case CVMX_HELPER_INTERFACE_MODE_NPI:
				priv->init = cvm_oct_common_init;
				priv->uninit = cvm_oct_common_uninit;
				device_set_desc(dev, "Cavium Octeon NPI Ethernet");
				break;

			case CVMX_HELPER_INTERFACE_MODE_XAUI:
				priv->init = cvm_oct_xaui_init;
				priv->uninit = cvm_oct_common_uninit;
				device_set_desc(dev, "Cavium Octeon XAUI Ethernet");
				break;

			case CVMX_HELPER_INTERFACE_MODE_LOOP:
				priv->init = cvm_oct_common_init;
				priv->uninit = cvm_oct_common_uninit;
				device_set_desc(dev, "Cavium Octeon LOOP Ethernet");
				break;

			case CVMX_HELPER_INTERFACE_MODE_SGMII:
				priv->init = cvm_oct_sgmii_init;
				priv->uninit = cvm_oct_common_uninit;
				device_set_desc(dev, "Cavium Octeon SGMII Ethernet");
				break;

			case CVMX_HELPER_INTERFACE_MODE_SPI:
				priv->init = cvm_oct_spi_init;
				priv->uninit = cvm_oct_spi_uninit;
				device_set_desc(dev, "Cavium Octeon SPI Ethernet");
				break;

			case CVMX_HELPER_INTERFACE_MODE_RGMII:
				priv->init = cvm_oct_rgmii_init;
				priv->uninit = cvm_oct_rgmii_uninit;
				device_set_desc(dev, "Cavium Octeon RGMII Ethernet");
				break;

			case CVMX_HELPER_INTERFACE_MODE_GMII:
				priv->init = cvm_oct_rgmii_init;
				priv->uninit = cvm_oct_rgmii_uninit;
				device_set_desc(dev, "Cavium Octeon GMII Ethernet");
				break;
			}

			ifp->if_softc = priv;

			if (!priv->init) {
				printf("octe%d: unsupported device type interface %d, port %d\n",
				       ifnum, interface, priv->port);
				if_free(ifp);
			} else if (priv->init(ifp) != 0) {
				printf("octe%d: failed to register device for interface %d, port %d\n",
				       ifnum, interface, priv->port);
				if_free(ifp);
			} else {
				cvm_oct_device[priv->port] = ifp;
				fau -= cvmx_pko_get_num_queues(priv->port) * sizeof(uint32_t);
			}
		}
	}

	if (INTERRUPT_LIMIT) {
		/* Set the POW timer rate to give an interrupt at most INTERRUPT_LIMIT times per second */
		cvmx_write_csr(CVMX_POW_WQ_INT_PC, cvmx_clock_get_rate(CVMX_CLOCK_CORE)/(INTERRUPT_LIMIT*16*256)<<8);

		/* Enable POW timer interrupt. It will count when there are packets available */
		cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0x1ful<<24);
	} else {
		/* Enable POW interrupt when our port has at least one packet */
		cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0x1001);
	}

	callout_init(&cvm_oct_poll_timer, 1);
	callout_reset(&cvm_oct_poll_timer, hz, cvm_do_timer, NULL);

	return 0;
}
Пример #12
0
/**
 * @INTERNAL
 * Bringup and enable SRIO interface. After this call packet
 * I/O should be fully functional. This is called with IPD
 * enabled but PKO disabled.
 *
 * @param interface Interface to bring up
 *
 * @return Zero on success, negative on failure
 */
int __cvmx_helper_srio_enable(int interface)
{
    int num_ports = cvmx_helper_ports_on_interface(interface);
    int index;
    cvmx_sriomaintx_core_enables_t sriomaintx_core_enables;
    cvmx_sriox_imsg_ctrl_t sriox_imsg_ctrl;
    cvmx_sriox_status_reg_t srio_status_reg;
    cvmx_dpi_ctl_t dpi_ctl;
    int srio_port = interface - 4;

    /* All SRIO ports have a cvmx_srio_rx_message_header_t header
        on them that must be skipped by IPD */
    for (index=0; index<num_ports; index++)
    {
        cvmx_pip_prt_cfgx_t port_config;
        cvmx_sriox_omsg_portx_t sriox_omsg_portx;
        cvmx_sriox_omsg_sp_mrx_t sriox_omsg_sp_mrx;
        cvmx_sriox_omsg_fmp_mrx_t sriox_omsg_fmp_mrx;
        cvmx_sriox_omsg_nmp_mrx_t sriox_omsg_nmp_mrx;
        int ipd_port = cvmx_helper_get_ipd_port(interface, index);
        port_config.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port));
        /* Only change the skip if the user hasn't already set it */
        if (!port_config.s.skip)
        {
            port_config.s.skip = sizeof(cvmx_srio_rx_message_header_t);
            cvmx_write_csr(CVMX_PIP_PRT_CFGX(ipd_port), port_config.u64);
        }

        /* Enable TX with PKO */
        sriox_omsg_portx.u64 = cvmx_read_csr(CVMX_SRIOX_OMSG_PORTX(index, srio_port));
        sriox_omsg_portx.s.port = (srio_port) * 2 + index;
        sriox_omsg_portx.s.enable = 1;
        cvmx_write_csr(CVMX_SRIOX_OMSG_PORTX(index, srio_port), sriox_omsg_portx.u64);

        /* Allow OMSG controller to send regardless of the state of any other
            controller. Allow messages to different IDs and MBOXes to go in
            parallel */
        sriox_omsg_sp_mrx.u64 = 0;
        sriox_omsg_sp_mrx.s.xmbox_sp = 1;
        sriox_omsg_sp_mrx.s.ctlr_sp = 1;
        sriox_omsg_sp_mrx.s.ctlr_fmp = 1;
        sriox_omsg_sp_mrx.s.ctlr_nmp = 1;
        sriox_omsg_sp_mrx.s.id_sp = 1;
        sriox_omsg_sp_mrx.s.id_fmp = 1;
        sriox_omsg_sp_mrx.s.id_nmp = 1;
        sriox_omsg_sp_mrx.s.mbox_sp = 1;
        sriox_omsg_sp_mrx.s.mbox_fmp = 1;
        sriox_omsg_sp_mrx.s.mbox_nmp = 1;
        sriox_omsg_sp_mrx.s.all_psd = 1;
        cvmx_write_csr(CVMX_SRIOX_OMSG_SP_MRX(index, srio_port), sriox_omsg_sp_mrx.u64);

        /* Allow OMSG controller to send regardless of the state of any other
            controller. Allow messages to different IDs and MBOXes to go in
            parallel */
        sriox_omsg_fmp_mrx.u64 = 0;
        sriox_omsg_fmp_mrx.s.ctlr_sp = 1;
        sriox_omsg_fmp_mrx.s.ctlr_fmp = 1;
        sriox_omsg_fmp_mrx.s.ctlr_nmp = 1;
        sriox_omsg_fmp_mrx.s.id_sp = 1;
        sriox_omsg_fmp_mrx.s.id_fmp = 1;
        sriox_omsg_fmp_mrx.s.id_nmp = 1;
        sriox_omsg_fmp_mrx.s.mbox_sp = 1;
        sriox_omsg_fmp_mrx.s.mbox_fmp = 1;
        sriox_omsg_fmp_mrx.s.mbox_nmp = 1;
        sriox_omsg_fmp_mrx.s.all_psd = 1;
        cvmx_write_csr(CVMX_SRIOX_OMSG_FMP_MRX(index, srio_port), sriox_omsg_fmp_mrx.u64);

        /* Once the first part of a message is accepted, always acept the rest
            of the message */
        sriox_omsg_nmp_mrx.u64 = 0;
        sriox_omsg_nmp_mrx.s.all_sp = 1;
        sriox_omsg_nmp_mrx.s.all_fmp = 1;
        sriox_omsg_nmp_mrx.s.all_nmp = 1;
        cvmx_write_csr(CVMX_SRIOX_OMSG_NMP_MRX(index, srio_port), sriox_omsg_nmp_mrx.u64);

    }

    /* Choose the receive controller based on the mailbox */
    sriox_imsg_ctrl.u64 = cvmx_read_csr(CVMX_SRIOX_IMSG_CTRL(srio_port));
    sriox_imsg_ctrl.s.prt_sel = 0;
    sriox_imsg_ctrl.s.mbox = 0xa;
    cvmx_write_csr(CVMX_SRIOX_IMSG_CTRL(srio_port), sriox_imsg_ctrl.u64);

    /* DPI must be enabled for us to RX messages */
    dpi_ctl.u64 = cvmx_read_csr(CVMX_DPI_CTL);
    dpi_ctl.s.clk = 1;
    dpi_ctl.s.en = 1;
    cvmx_write_csr(CVMX_DPI_CTL, dpi_ctl.u64);

    /* Make sure register access is allowed */
    srio_status_reg.u64 = cvmx_read_csr(CVMX_SRIOX_STATUS_REG(srio_port));
    if (!srio_status_reg.s.access)
        return 0;

    /* Enable RX */
    if (!cvmx_srio_config_read32(srio_port, 0, -1, 0, 0,
        CVMX_SRIOMAINTX_CORE_ENABLES(srio_port), &sriomaintx_core_enables.u32))
    {
        sriomaintx_core_enables.s.imsg0 = 1;
        sriomaintx_core_enables.s.imsg1 = 1;
        cvmx_srio_config_write32(srio_port, 0, -1, 0, 0,
            CVMX_SRIOMAINTX_CORE_ENABLES(srio_port), sriomaintx_core_enables.u32);
    }

    return 0;
}
Пример #13
0
/**
 * @INTERNAL
 * Configure all of the ASX, GMX, and PKO regsiters required
 * to get RGMII to function on the supplied interface.
 *
 * @param interface PKO Interface to configure (0 or 1)
 *
 * @return Zero on success
 */
int __cvmx_helper_rgmii_enable(int interface)
{
    int num_ports = cvmx_helper_ports_on_interface(interface);
    int port;
    cvmx_gmxx_inf_mode_t mode;
    cvmx_asxx_tx_prt_en_t asx_tx;
    cvmx_asxx_rx_prt_en_t asx_rx;

    mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));

    if (mode.s.en == 0)
        return -1;
    if ((OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)) && mode.s.type == 1)   /* Ignore SPI interfaces */
        return -1;

    /* Configure the ASX registers needed to use the RGMII ports */
    asx_tx.u64 = 0;
    asx_tx.s.prt_en = cvmx_build_mask(num_ports);
    cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(interface), asx_tx.u64);

    asx_rx.u64 = 0;
    asx_rx.s.prt_en = cvmx_build_mask(num_ports);
    cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface), asx_rx.u64);

    /* Configure the GMX registers needed to use the RGMII ports */
    for (port=0; port<num_ports; port++)
    {
        /* Setting of CVMX_GMXX_TXX_THRESH has been moved to
            __cvmx_helper_setup_gmx() */

        /* Configure more flexible RGMII preamble checking. Pass 1 doesn't
           support this feature. */
        cvmx_gmxx_rxx_frm_ctl_t frm_ctl;
        frm_ctl.u64 = cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(port, interface));
        frm_ctl.s.pre_free = 1;  /* New field, so must be compile time */
        cvmx_write_csr(CVMX_GMXX_RXX_FRM_CTL(port, interface), frm_ctl.u64);

        /* Each pause frame transmitted will ask for about 10M bit times
            before resume.  If buffer space comes available before that time
            has expired, an XON pause frame (0 time) will be transmitted to
            restart the flow. */
        cvmx_write_csr(CVMX_GMXX_TXX_PAUSE_PKT_TIME(port, interface), 20000);
        cvmx_write_csr(CVMX_GMXX_TXX_PAUSE_PKT_INTERVAL(port, interface), 19000);

        /*
         * Board types we have to know at compile-time.
         */
#if defined(OCTEON_BOARD_CAPK_0100ND)
        cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, interface), 26);
        cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, interface), 26);
#else
	/*
	 * Vendor-defined board types.
	 */
#if defined(OCTEON_VENDOR_LANNER)
	switch (cvmx_sysinfo_get()->board_type) {
	case CVMX_BOARD_TYPE_CUST_LANNER_MR320:
	case CVMX_BOARD_TYPE_CUST_LANNER_MR321X:
            if (port == 0) {
                cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, interface), 4);
	    } else {
                cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, interface), 7);
            }
            cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, interface), 0);
	    break;
	}
#else
        /*
         * For board types we can determine at runtime.
         */
        if (OCTEON_IS_MODEL(OCTEON_CN50XX))
        {
            cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, interface), 16);
            cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, interface), 16);
        }
        else
        {
            cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, interface), 24);
            cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, interface), 24);
        }
#endif
#endif
    }

    __cvmx_helper_setup_gmx(interface, num_ports);

    /* enable the ports now */
    for (port=0; port<num_ports; port++)
    {
        cvmx_gmxx_prtx_cfg_t gmx_cfg;
        cvmx_helper_link_autoconf(cvmx_helper_get_ipd_port(interface, port));
        gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(port, interface));
        gmx_cfg.s.en = 1;
        cvmx_write_csr(CVMX_GMXX_PRTX_CFG(port, interface), gmx_cfg.u64);
    }
    return 0;
}
Пример #14
0
static int cvm_oct_rgmii_rml_interrupt(void *dev_id)
{
	cvmx_npi_rsl_int_blocks_t rsl_int_blocks;
	int index;
	int return_status = FILTER_STRAY;

	rsl_int_blocks.u64 = cvmx_read_csr(CVMX_NPI_RSL_INT_BLOCKS);

	/* Check and see if this interrupt was caused by the GMX0 block */
	if (rsl_int_blocks.s.gmx0) {

		int interface = 0;
		/* Loop through every port of this interface */
		for (index = 0; index < cvmx_helper_ports_on_interface(interface); index++) {

			/* Read the GMX interrupt status bits */
			cvmx_gmxx_rxx_int_reg_t gmx_rx_int_reg;
			gmx_rx_int_reg.u64 = cvmx_read_csr(CVMX_GMXX_RXX_INT_REG(index, interface));
			gmx_rx_int_reg.u64 &= cvmx_read_csr(CVMX_GMXX_RXX_INT_EN(index, interface));
			/* Poll the port if inband status changed */
			if (gmx_rx_int_reg.s.phy_dupx || gmx_rx_int_reg.s.phy_link || gmx_rx_int_reg.s.phy_spd) {

				struct ifnet *ifp = cvm_oct_device[cvmx_helper_get_ipd_port(interface, index)];
				if (ifp)
					cvm_oct_rgmii_poll(ifp);
				gmx_rx_int_reg.u64 = 0;
				gmx_rx_int_reg.s.phy_dupx = 1;
				gmx_rx_int_reg.s.phy_link = 1;
				gmx_rx_int_reg.s.phy_spd = 1;
				cvmx_write_csr(CVMX_GMXX_RXX_INT_REG(index, interface), gmx_rx_int_reg.u64);
				return_status = FILTER_HANDLED;
			}
		}
	}

	/* Check and see if this interrupt was caused by the GMX1 block */
	if (rsl_int_blocks.s.gmx1) {

		int interface = 1;
		/* Loop through every port of this interface */
		for (index = 0; index < cvmx_helper_ports_on_interface(interface); index++) {

			/* Read the GMX interrupt status bits */
			cvmx_gmxx_rxx_int_reg_t gmx_rx_int_reg;
			gmx_rx_int_reg.u64 = cvmx_read_csr(CVMX_GMXX_RXX_INT_REG(index, interface));
			gmx_rx_int_reg.u64 &= cvmx_read_csr(CVMX_GMXX_RXX_INT_EN(index, interface));
			/* Poll the port if inband status changed */
			if (gmx_rx_int_reg.s.phy_dupx || gmx_rx_int_reg.s.phy_link || gmx_rx_int_reg.s.phy_spd) {

				struct ifnet *ifp = cvm_oct_device[cvmx_helper_get_ipd_port(interface, index)];
				if (ifp)
					cvm_oct_rgmii_poll(ifp);
				gmx_rx_int_reg.u64 = 0;
				gmx_rx_int_reg.s.phy_dupx = 1;
				gmx_rx_int_reg.s.phy_link = 1;
				gmx_rx_int_reg.s.phy_spd = 1;
				cvmx_write_csr(CVMX_GMXX_RXX_INT_REG(index, interface), gmx_rx_int_reg.u64);
				return_status = FILTER_HANDLED;
			}
		}
	}
	return return_status;
}
Пример #15
0
static int cvm_oct_probe(struct platform_device *pdev)
{
	int num_interfaces;
	int interface;
	int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE;
	int qos;
	struct device_node *pip;
	int mtu_overhead = ETH_HLEN + ETH_FCS_LEN;

#if IS_ENABLED(CONFIG_VLAN_8021Q)
	mtu_overhead += VLAN_HLEN;
#endif

	octeon_mdiobus_force_mod_depencency();

	pip = pdev->dev.of_node;
	if (!pip) {
		pr_err("Error: No 'pip' in /aliases\n");
		return -EINVAL;
	}

	cvm_oct_configure_common_hw();

	cvmx_helper_initialize_packet_io_global();

	if (receive_group_order) {
		if (receive_group_order > 4)
			receive_group_order = 4;
		pow_receive_groups = (1 << (1 << receive_group_order)) - 1;
	} else {
		pow_receive_groups = BIT(pow_receive_group);
	}

	/* Change the input group for all ports before input is enabled */
	num_interfaces = cvmx_helper_get_number_of_interfaces();
	for (interface = 0; interface < num_interfaces; interface++) {
		int num_ports = cvmx_helper_ports_on_interface(interface);
		int port;

		for (port = cvmx_helper_get_ipd_port(interface, 0);
		     port < cvmx_helper_get_ipd_port(interface, num_ports);
		     port++) {
			union cvmx_pip_prt_tagx pip_prt_tagx;

			pip_prt_tagx.u64 =
			    cvmx_read_csr(CVMX_PIP_PRT_TAGX(port));

			if (receive_group_order) {
				int tag_mask;

				/* We support only 16 groups at the moment, so
				 * always disable the two additional "hidden"
				 * tag_mask bits on CN68XX.
				 */
				if (OCTEON_IS_MODEL(OCTEON_CN68XX))
					pip_prt_tagx.u64 |= 0x3ull << 44;

				tag_mask = ~((1 << receive_group_order) - 1);
				pip_prt_tagx.s.grptagbase	= 0;
				pip_prt_tagx.s.grptagmask	= tag_mask;
				pip_prt_tagx.s.grptag		= 1;
				pip_prt_tagx.s.tag_mode		= 0;
				pip_prt_tagx.s.inc_prt_flag	= 1;
				pip_prt_tagx.s.ip6_dprt_flag	= 1;
				pip_prt_tagx.s.ip4_dprt_flag	= 1;
				pip_prt_tagx.s.ip6_sprt_flag	= 1;
				pip_prt_tagx.s.ip4_sprt_flag	= 1;
				pip_prt_tagx.s.ip6_dst_flag	= 1;
				pip_prt_tagx.s.ip4_dst_flag	= 1;
				pip_prt_tagx.s.ip6_src_flag	= 1;
				pip_prt_tagx.s.ip4_src_flag	= 1;
				pip_prt_tagx.s.grp		= 0;
			} else {
				pip_prt_tagx.s.grptag	= 0;
				pip_prt_tagx.s.grp	= pow_receive_group;
			}

			cvmx_write_csr(CVMX_PIP_PRT_TAGX(port),
				       pip_prt_tagx.u64);
		}
	}

	cvmx_helper_ipd_and_packet_input_enable();

	memset(cvm_oct_device, 0, sizeof(cvm_oct_device));

	/*
	 * Initialize the FAU used for counting packet buffers that
	 * need to be freed.
	 */
	cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);

	/* Initialize the FAU used for counting tx SKBs that need to be freed */
	cvmx_fau_atomic_write32(FAU_TOTAL_TX_TO_CLEAN, 0);

	if ((pow_send_group != -1)) {
		struct net_device *dev;

		dev = alloc_etherdev(sizeof(struct octeon_ethernet));
		if (dev) {
			/* Initialize the device private structure. */
			struct octeon_ethernet *priv = netdev_priv(dev);

			SET_NETDEV_DEV(dev, &pdev->dev);
			dev->netdev_ops = &cvm_oct_pow_netdev_ops;
			priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED;
			priv->port = CVMX_PIP_NUM_INPUT_PORTS;
			priv->queue = -1;
			strcpy(dev->name, "pow%d");
			for (qos = 0; qos < 16; qos++)
				skb_queue_head_init(&priv->tx_free_list[qos]);
			dev->min_mtu = VLAN_ETH_ZLEN - mtu_overhead;
			dev->max_mtu = OCTEON_MAX_MTU - mtu_overhead;

			if (register_netdev(dev) < 0) {
				pr_err("Failed to register ethernet device for POW\n");
				free_netdev(dev);
			} else {
				cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev;
				pr_info("%s: POW send group %d, receive group %d\n",
					dev->name, pow_send_group,
					pow_receive_group);
			}
		} else {
			pr_err("Failed to allocate ethernet device for POW\n");
		}
	}

	num_interfaces = cvmx_helper_get_number_of_interfaces();
	for (interface = 0; interface < num_interfaces; interface++) {
		cvmx_helper_interface_mode_t imode =
		    cvmx_helper_interface_get_mode(interface);
		int num_ports = cvmx_helper_ports_on_interface(interface);
		int port;
		int port_index;

		for (port_index = 0,
		     port = cvmx_helper_get_ipd_port(interface, 0);
		     port < cvmx_helper_get_ipd_port(interface, num_ports);
		     port_index++, port++) {
			struct octeon_ethernet *priv;
			struct net_device *dev =
			    alloc_etherdev(sizeof(struct octeon_ethernet));
			if (!dev) {
				pr_err("Failed to allocate ethernet device for port %d\n",
				       port);
				continue;
			}

			/* Initialize the device private structure. */
			SET_NETDEV_DEV(dev, &pdev->dev);
			priv = netdev_priv(dev);
			priv->netdev = dev;
			priv->of_node = cvm_oct_node_for_port(pip, interface,
							      port_index);

			INIT_DELAYED_WORK(&priv->port_periodic_work,
					  cvm_oct_periodic_worker);
			priv->imode = imode;
			priv->port = port;
			priv->queue = cvmx_pko_get_base_queue(priv->port);
			priv->fau = fau - cvmx_pko_get_num_queues(port) * 4;
			for (qos = 0; qos < 16; qos++)
				skb_queue_head_init(&priv->tx_free_list[qos]);
			for (qos = 0; qos < cvmx_pko_get_num_queues(port);
			     qos++)
				cvmx_fau_atomic_write32(priv->fau + qos * 4, 0);
			dev->min_mtu = VLAN_ETH_ZLEN - mtu_overhead;
			dev->max_mtu = OCTEON_MAX_MTU - mtu_overhead;

			switch (priv->imode) {
			/* These types don't support ports to IPD/PKO */
			case CVMX_HELPER_INTERFACE_MODE_DISABLED:
			case CVMX_HELPER_INTERFACE_MODE_PCIE:
			case CVMX_HELPER_INTERFACE_MODE_PICMG:
				break;

			case CVMX_HELPER_INTERFACE_MODE_NPI:
				dev->netdev_ops = &cvm_oct_npi_netdev_ops;
				strcpy(dev->name, "npi%d");
				break;

			case CVMX_HELPER_INTERFACE_MODE_XAUI:
				dev->netdev_ops = &cvm_oct_xaui_netdev_ops;
				strcpy(dev->name, "xaui%d");
				break;

			case CVMX_HELPER_INTERFACE_MODE_LOOP:
				dev->netdev_ops = &cvm_oct_npi_netdev_ops;
				strcpy(dev->name, "loop%d");
				break;

			case CVMX_HELPER_INTERFACE_MODE_SGMII:
				dev->netdev_ops = &cvm_oct_sgmii_netdev_ops;
				strcpy(dev->name, "eth%d");
				break;

			case CVMX_HELPER_INTERFACE_MODE_SPI:
				dev->netdev_ops = &cvm_oct_spi_netdev_ops;
				strcpy(dev->name, "spi%d");
				break;

			case CVMX_HELPER_INTERFACE_MODE_RGMII:
			case CVMX_HELPER_INTERFACE_MODE_GMII:
				dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
				strcpy(dev->name, "eth%d");
				cvm_set_rgmii_delay(priv->of_node, interface,
						    port_index);
				break;
			}

			if (!dev->netdev_ops) {
				free_netdev(dev);
			} else if (register_netdev(dev) < 0) {
				pr_err("Failed to register ethernet device for interface %d, port %d\n",
				       interface, priv->port);
				free_netdev(dev);
			} else {
				cvm_oct_device[priv->port] = dev;
				fau -=
				    cvmx_pko_get_num_queues(priv->port) *
				    sizeof(u32);
				schedule_delayed_work(&priv->port_periodic_work,
						      HZ);
			}
		}
	}

	cvm_oct_tx_initialize();
	cvm_oct_rx_initialize();

	/*
	 * 150 uS: about 10 1500-byte packets at 1GE.
	 */
	cvm_oct_tx_poll_interval = 150 * (octeon_get_clock_rate() / 1000000);

	schedule_delayed_work(&cvm_oct_rx_refill_work, HZ);

	return 0;
}
Пример #16
0
/**
 * Configure all of the ASX, GMX, and PKO regsiters required
 * to get RGMII to function on the supplied interface.
 *
 * @interface: PKO Interface to configure (0 or 1)
 *
 * Returns Zero on success
 */
int __cvmx_helper_rgmii_enable(int interface)
{
	int num_ports = cvmx_helper_ports_on_interface(interface);
	int port;
	struct cvmx_sysinfo *sys_info_ptr = cvmx_sysinfo_get();
	union cvmx_gmxx_inf_mode mode;
	union cvmx_asxx_tx_prt_en asx_tx;
	union cvmx_asxx_rx_prt_en asx_rx;

	mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));

	if (mode.s.en == 0)
		return -1;
	if ((OCTEON_IS_MODEL(OCTEON_CN38XX) ||
	     OCTEON_IS_MODEL(OCTEON_CN58XX)) && mode.s.type == 1)
		/* Ignore SPI interfaces */
		return -1;

	/* Configure the ASX registers needed to use the RGMII ports */
	asx_tx.u64 = 0;
	asx_tx.s.prt_en = cvmx_build_mask(num_ports);
	cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(interface), asx_tx.u64);

	asx_rx.u64 = 0;
	asx_rx.s.prt_en = cvmx_build_mask(num_ports);
	cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface), asx_rx.u64);

	/* Configure the GMX registers needed to use the RGMII ports */
	for (port = 0; port < num_ports; port++) {
		/* Setting of CVMX_GMXX_TXX_THRESH has been moved to
		   __cvmx_helper_setup_gmx() */

		if (cvmx_octeon_is_pass1())
			__cvmx_helper_errata_asx_pass1(interface, port,
						       sys_info_ptr->
						       cpu_clock_hz);
		else {
			/*
			 * Configure more flexible RGMII preamble
			 * checking. Pass 1 doesn't support this
			 * feature.
			 */
			union cvmx_gmxx_rxx_frm_ctl frm_ctl;
			frm_ctl.u64 =
			    cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL
					  (port, interface));
			/* New field, so must be compile time */
			frm_ctl.s.pre_free = 1;
			cvmx_write_csr(CVMX_GMXX_RXX_FRM_CTL(port, interface),
				       frm_ctl.u64);
		}

		/*
		 * Each pause frame transmitted will ask for about 10M
		 * bit times before resume.  If buffer space comes
		 * available before that time has expired, an XON
		 * pause frame (0 time) will be transmitted to restart
		 * the flow.
		 */
		cvmx_write_csr(CVMX_GMXX_TXX_PAUSE_PKT_TIME(port, interface),
			       20000);
		cvmx_write_csr(CVMX_GMXX_TXX_PAUSE_PKT_INTERVAL
			       (port, interface), 19000);

		if (OCTEON_IS_MODEL(OCTEON_CN50XX)) {
			cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, interface),
				       16);
			cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, interface),
				       16);
		} else {
			cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, interface),
				       24);
			cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, interface),
				       24);
		}
	}

	__cvmx_helper_setup_gmx(interface, num_ports);

	/* enable the ports now */
	for (port = 0; port < num_ports; port++) {
		union cvmx_gmxx_prtx_cfg gmx_cfg;
		cvmx_helper_link_autoconf(cvmx_helper_get_ipd_port
					  (interface, port));
		gmx_cfg.u64 =
		    cvmx_read_csr(CVMX_GMXX_PRTX_CFG(port, interface));
		gmx_cfg.s.en = 1;
		cvmx_write_csr(CVMX_GMXX_PRTX_CFG(port, interface),
			       gmx_cfg.u64);
	}
	__cvmx_interrupt_asxx_enable(interface);
	__cvmx_interrupt_gmxx_enable(interface);

	return 0;
}
Пример #17
0
static irqreturn_t cvm_oct_rgmii_rml_interrupt(int cpl, void *dev_id)
{
	union cvmx_npi_rsl_int_blocks rsl_int_blocks;
	int index;
	irqreturn_t return_status = IRQ_NONE;

	rsl_int_blocks.u64 = cvmx_read_csr(CVMX_NPI_RSL_INT_BLOCKS);

	
	if (rsl_int_blocks.s.gmx0) {

		int interface = 0;
		
		for (index = 0;
		     index < cvmx_helper_ports_on_interface(interface);
		     index++) {

			
			union cvmx_gmxx_rxx_int_reg gmx_rx_int_reg;
			gmx_rx_int_reg.u64 =
			    cvmx_read_csr(CVMX_GMXX_RXX_INT_REG
					  (index, interface));
			gmx_rx_int_reg.u64 &=
			    cvmx_read_csr(CVMX_GMXX_RXX_INT_EN
					  (index, interface));
			
			if (gmx_rx_int_reg.s.phy_dupx
			    || gmx_rx_int_reg.s.phy_link
			    || gmx_rx_int_reg.s.phy_spd) {

				struct net_device *dev =
				    cvm_oct_device[cvmx_helper_get_ipd_port
						   (interface, index)];
				if (dev)
					cvm_oct_rgmii_poll(dev);
				gmx_rx_int_reg.u64 = 0;
				gmx_rx_int_reg.s.phy_dupx = 1;
				gmx_rx_int_reg.s.phy_link = 1;
				gmx_rx_int_reg.s.phy_spd = 1;
				cvmx_write_csr(CVMX_GMXX_RXX_INT_REG
					       (index, interface),
					       gmx_rx_int_reg.u64);
				return_status = IRQ_HANDLED;
			}
		}
	}

	
	if (rsl_int_blocks.s.gmx1) {

		int interface = 1;
		
		for (index = 0;
		     index < cvmx_helper_ports_on_interface(interface);
		     index++) {

			
			union cvmx_gmxx_rxx_int_reg gmx_rx_int_reg;
			gmx_rx_int_reg.u64 =
			    cvmx_read_csr(CVMX_GMXX_RXX_INT_REG
					  (index, interface));
			gmx_rx_int_reg.u64 &=
			    cvmx_read_csr(CVMX_GMXX_RXX_INT_EN
					  (index, interface));
			
			if (gmx_rx_int_reg.s.phy_dupx
			    || gmx_rx_int_reg.s.phy_link
			    || gmx_rx_int_reg.s.phy_spd) {

				struct net_device *dev =
				    cvm_oct_device[cvmx_helper_get_ipd_port
						   (interface, index)];
				if (dev)
					cvm_oct_rgmii_poll(dev);
				gmx_rx_int_reg.u64 = 0;
				gmx_rx_int_reg.s.phy_dupx = 1;
				gmx_rx_int_reg.s.phy_link = 1;
				gmx_rx_int_reg.s.phy_spd = 1;
				cvmx_write_csr(CVMX_GMXX_RXX_INT_REG
					       (index, interface),
					       gmx_rx_int_reg.u64);
				return_status = IRQ_HANDLED;
			}
		}
	}
	return return_status;
}