void oct_tx_process_hw_work(cvmx_wqe_t *work, uint32_t outport)
{
	uint64_t queue = cvmx_pko_get_base_queue(outport);

	cvmx_pko_send_packet_prepare(outport, queue, CVMX_PKO_LOCK_CMD_QUEUE);

    /* Build a PKO pointer to this packet */
    cvmx_pko_command_word0_t pko_command;
    pko_command.u64 = 0;
    pko_command.s.segs = work->word2.s.bufs;
    pko_command.s.total_bytes = cvmx_wqe_get_len(work);

    /* Send the packet */
    cvmx_pko_return_value_t send_status = cvmx_pko_send_packet_finish(outport, queue, pko_command, work->packet_ptr, CVMX_PKO_LOCK_CMD_QUEUE);
    if (send_status != CVMX_PKO_SUCCESS)
    {
        printf("Failed to send packet using cvmx_pko_send_packet2\n");
        cvmx_helper_free_packet_data(work);
		STAT_TX_HW_SEND_ERR;
    }
	else
	{
		STAT_TX_SEND_OVER;
	}

    cvmx_fpa_free(work, wqe_pool, 0);
}
void oct_tx_process_hw(mbuf_t *mbuf, uint32_t outport)
{
    uint64_t queue;

    /* Build a PKO pointer to this packet */
    cvmx_pko_return_value_t send_status;
    cvmx_pko_command_word0_t pko_command;

    queue = cvmx_pko_get_base_queue(outport);

    cvmx_pko_send_packet_prepare(outport, queue, CVMX_PKO_LOCK_CMD_QUEUE);

    pko_command.u64 = 0;
    pko_command.s.segs = 1;
    pko_command.s.total_bytes = mbuf->pkt_totallen;

    /* Send the packet */
    send_status = cvmx_pko_send_packet_finish(outport, queue, pko_command, mbuf->packet_ptr, CVMX_PKO_LOCK_CMD_QUEUE);
    if (send_status != CVMX_PKO_SUCCESS)
    {
        STAT_TX_HW_SEND_ERR;
        PACKET_DESTROY_DATA(mbuf);
    }
    else
    {
        STAT_TX_SEND_OVER;
    }
    MBUF_FREE(mbuf);
}
void cvmx_pko_initialize_global(void)
{
    int i;
    uint64_t priority = 8;
    union cvmx_pko_reg_cmd_buf config;

    /*
     * Set the size of the PKO command buffers to an odd number of
     * 64bit words. This allows the normal two word send to stay
    <<<<<<< HEAD
     * aligned and never span a command word buffer.
    =======
     * aligned and never span a comamnd word buffer.
    >>>>>>> 296c66da8a02d52243f45b80521febece5ed498a
     */
    config.u64 = 0;
    config.s.pool = CVMX_FPA_OUTPUT_BUFFER_POOL;
    config.s.size = CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE / 8 - 1;

    cvmx_write_csr(CVMX_PKO_REG_CMD_BUF, config.u64);

    for (i = 0; i < CVMX_PKO_MAX_OUTPUT_QUEUES; i++)
        cvmx_pko_config_port(CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID, i, 1,
                             &priority);

    /*
     * If we aren't using all of the queues optimize PKO's
     * internal memory.
     */
    if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)
            || OCTEON_IS_MODEL(OCTEON_CN56XX)
            || OCTEON_IS_MODEL(OCTEON_CN52XX)) {
        int num_interfaces = cvmx_helper_get_number_of_interfaces();
        int last_port =
            cvmx_helper_get_last_ipd_port(num_interfaces - 1);
        int max_queues =
            cvmx_pko_get_base_queue(last_port) +
            cvmx_pko_get_num_queues(last_port);
        if (OCTEON_IS_MODEL(OCTEON_CN38XX)) {
            if (max_queues <= 32)
                cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 2);
            else if (max_queues <= 64)
                cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 1);
        } else {
            if (max_queues <= 64)
                cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 2);
            else if (max_queues <= 128)
                cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 1);
        }
    }
}
Example #4
0
/* IN PROGRESS */
void send_packet()
{
  uint8_t *buf, *pbuf; 
  uint64_t queue, length, buf_phys_addr;
  cvmx_pko_command_word0_t pko_command;
  cvmx_pko_return_value_t status;
  cvmx_buf_ptr_t hw_buffer;

  buf = (uint8_t *) cvmx_fpa_alloc(packet_pool);

  if (buf == NULL) {
    printf("ERROR: allocation from pool %" PRIu64 " failed!\n", packet_pool);
    return;
  } else {
    printf("Packet allocation successful!\n");
  }

  pbuf = build_packet(buf, PAYLOAD_SIZE);
  length = (uint64_t) (pbuf - buf);

  printf("buf : %p\n", buf);
  printf("pbuf: %p\n", pbuf);
  printf("diff: %" PRIu64 "\n", length);

  pko_command.u64 = 0;
  pko_command.s.segs = 1;
  pko_command.s.total_bytes = length;
  pko_command.s.dontfree = 1;

  buf_phys_addr = cvmx_ptr_to_phys(buf); 
  printf("buf_phys_addr: %" PRIu64 "\n", buf_phys_addr);

  hw_buffer.s.i = 0;
  hw_buffer.s.back = 0;
  hw_buffer.s.pool = packet_pool; // the pool that the buffer came from
  hw_buffer.s.size = length; // the size of the segment pointed to by addr (in bytes)
  hw_buffer.s.addr = cvmx_ptr_to_phys(buf); // pointer to the first byte of the data

  queue = cvmx_pko_get_base_queue(xaui_ipd_port);
  printf("queue: %" PRIu64 "\n", queue);

  cvmx_pko_send_packet_prepare(xaui_ipd_port, queue, CVMX_PKO_LOCK_NONE);

  // THROWS EXCEPTION HERE
  status = cvmx_pko_send_packet_finish(xaui_ipd_port, queue, pko_command, hw_buffer, CVMX_PKO_LOCK_NONE);

  if (status == CVMX_PKO_SUCCESS) {
    printf("Succesfully sent packet!\n");
    cvmx_fpa_free(buf, packet_pool, 0);
  }
}
Example #5
0
void S3_send_packet(cvmx_wqe_t * work)
{
		uint64_t        port;
		cvmx_buf_ptr_t  packet_ptr;
		cvmx_pko_command_word0_t pko_command;
		/* Build a PKO pointer to this packet */
		pko_command.u64 = 0;


		/* Errata PKI-100 fix. We need to fix chain pointers on segmneted
		   packets. Although the size is also wrong on a single buffer packet,
		   PKO doesn't care so we ignore it */
		if (cvmx_unlikely(work->word2.s.bufs > 1))
				cvmx_helper_fix_ipd_packet_chain(work);

		port = work->ipprt;
		if( port >= portbase + portnum)
				port = work->ipprt - portnum;
		else
				port = work->ipprt + portnum;

		int queue = cvmx_pko_get_base_queue(port);
		cvmx_pko_send_packet_prepare(port, queue, CVMX_PKO_LOCK_ATOMIC_TAG);

		pko_command.s.total_bytes = work->len;
		pko_command.s.segs = work->word2.s.bufs;
		pko_command.s.ipoffp1 = 14 + 1;
		packet_ptr = work->packet_ptr;
		//cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 0);
		cvm_common_free_fpa_buffer(work, CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE / CVMX_CACHE_LINE_SIZE);
		work = NULL;

		/*
		 * Send the packet and wait for the tag switch to complete before
		 * accessing the output queue. This ensures the locking required
		 * for the queue.
		 *
		 */
		if (cvmx_pko_send_packet_finish(port, queue, pko_command, packet_ptr, CVMX_PKO_LOCK_ATOMIC_TAG))
		{
				printf("Failed to send packet using cvmx_pko_send_packet_finish\
								n");
		}
}
Example #6
0
void cvmx_pko_initialize_global(void)
{
	int i;
	uint64_t priority = 8;
	union cvmx_pko_reg_cmd_buf config;

	
	config.u64 = 0;
	config.s.pool = CVMX_FPA_OUTPUT_BUFFER_POOL;
	config.s.size = CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE / 8 - 1;

	cvmx_write_csr(CVMX_PKO_REG_CMD_BUF, config.u64);

	for (i = 0; i < CVMX_PKO_MAX_OUTPUT_QUEUES; i++)
		cvmx_pko_config_port(CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID, i, 1,
				     &priority);

	
	if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)
	    || OCTEON_IS_MODEL(OCTEON_CN56XX)
	    || OCTEON_IS_MODEL(OCTEON_CN52XX)) {
		int num_interfaces = cvmx_helper_get_number_of_interfaces();
		int last_port =
		    cvmx_helper_get_last_ipd_port(num_interfaces - 1);
		int max_queues =
		    cvmx_pko_get_base_queue(last_port) +
		    cvmx_pko_get_num_queues(last_port);
		if (OCTEON_IS_MODEL(OCTEON_CN38XX)) {
			if (max_queues <= 32)
				cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 2);
			else if (max_queues <= 64)
				cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 1);
		} else {
			if (max_queues <= 64)
				cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 2);
			else if (max_queues <= 128)
				cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 1);
		}
	}
}
Example #7
0
static int cvm_oct_probe(struct platform_device *pdev)
{
	int num_interfaces;
	int interface;
	int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE;
	int qos;
	struct device_node *pip;

	octeon_mdiobus_force_mod_depencency();

	pip = pdev->dev.of_node;
	if (!pip) {
		pr_err("Error: No 'pip' in /aliases\n");
		return -EINVAL;
	}


	cvm_oct_configure_common_hw();

	cvmx_helper_initialize_packet_io_global();

	/* Change the input group for all ports before input is enabled */
	num_interfaces = cvmx_helper_get_number_of_interfaces();
	for (interface = 0; interface < num_interfaces; interface++) {
		int num_ports = cvmx_helper_ports_on_interface(interface);
		int port;

		for (port = cvmx_helper_get_ipd_port(interface, 0);
		     port < cvmx_helper_get_ipd_port(interface, num_ports);
		     port++) {
			union cvmx_pip_prt_tagx pip_prt_tagx;

			pip_prt_tagx.u64 =
			    cvmx_read_csr(CVMX_PIP_PRT_TAGX(port));
			pip_prt_tagx.s.grp = pow_receive_group;
			cvmx_write_csr(CVMX_PIP_PRT_TAGX(port),
				       pip_prt_tagx.u64);
		}
	}

	cvmx_helper_ipd_and_packet_input_enable();

	memset(cvm_oct_device, 0, sizeof(cvm_oct_device));

	/*
	 * Initialize the FAU used for counting packet buffers that
	 * need to be freed.
	 */
	cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);

	/* Initialize the FAU used for counting tx SKBs that need to be freed */
	cvmx_fau_atomic_write32(FAU_TOTAL_TX_TO_CLEAN, 0);

	if ((pow_send_group != -1)) {
		struct net_device *dev;

		pr_info("\tConfiguring device for POW only access\n");
		dev = alloc_etherdev(sizeof(struct octeon_ethernet));
		if (dev) {
			/* Initialize the device private structure. */
			struct octeon_ethernet *priv = netdev_priv(dev);

			dev->netdev_ops = &cvm_oct_pow_netdev_ops;
			priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED;
			priv->port = CVMX_PIP_NUM_INPUT_PORTS;
			priv->queue = -1;
			strcpy(dev->name, "pow%d");
			for (qos = 0; qos < 16; qos++)
				skb_queue_head_init(&priv->tx_free_list[qos]);

			if (register_netdev(dev) < 0) {
				pr_err("Failed to register ethernet device for POW\n");
				free_netdev(dev);
			} else {
				cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev;
				pr_info("%s: POW send group %d, receive group %d\n",
					dev->name, pow_send_group,
					pow_receive_group);
			}
		} else {
			pr_err("Failed to allocate ethernet device for POW\n");
		}
	}

	num_interfaces = cvmx_helper_get_number_of_interfaces();
	for (interface = 0; interface < num_interfaces; interface++) {
		cvmx_helper_interface_mode_t imode =
		    cvmx_helper_interface_get_mode(interface);
		int num_ports = cvmx_helper_ports_on_interface(interface);
		int port;
		int port_index;

		for (port_index = 0,
		     port = cvmx_helper_get_ipd_port(interface, 0);
		     port < cvmx_helper_get_ipd_port(interface, num_ports);
		     port_index++, port++) {
			struct octeon_ethernet *priv;
			struct net_device *dev =
			    alloc_etherdev(sizeof(struct octeon_ethernet));
			if (!dev) {
				pr_err("Failed to allocate ethernet device for port %d\n",
				       port);
				continue;
			}

			/* Initialize the device private structure. */
			priv = netdev_priv(dev);
			priv->netdev = dev;
			priv->of_node = cvm_oct_node_for_port(pip, interface,
								port_index);

			INIT_DELAYED_WORK(&priv->port_periodic_work,
					  cvm_oct_periodic_worker);
			priv->imode = imode;
			priv->port = port;
			priv->queue = cvmx_pko_get_base_queue(priv->port);
			priv->fau = fau - cvmx_pko_get_num_queues(port) * 4;
			for (qos = 0; qos < 16; qos++)
				skb_queue_head_init(&priv->tx_free_list[qos]);
			for (qos = 0; qos < cvmx_pko_get_num_queues(port);
			     qos++)
				cvmx_fau_atomic_write32(priv->fau + qos * 4, 0);

			switch (priv->imode) {
			/* These types don't support ports to IPD/PKO */
			case CVMX_HELPER_INTERFACE_MODE_DISABLED:
			case CVMX_HELPER_INTERFACE_MODE_PCIE:
			case CVMX_HELPER_INTERFACE_MODE_PICMG:
				break;

			case CVMX_HELPER_INTERFACE_MODE_NPI:
				dev->netdev_ops = &cvm_oct_npi_netdev_ops;
				strcpy(dev->name, "npi%d");
				break;

			case CVMX_HELPER_INTERFACE_MODE_XAUI:
				dev->netdev_ops = &cvm_oct_xaui_netdev_ops;
				strcpy(dev->name, "xaui%d");
				break;

			case CVMX_HELPER_INTERFACE_MODE_LOOP:
				dev->netdev_ops = &cvm_oct_npi_netdev_ops;
				strcpy(dev->name, "loop%d");
				break;

			case CVMX_HELPER_INTERFACE_MODE_SGMII:
				dev->netdev_ops = &cvm_oct_sgmii_netdev_ops;
				strcpy(dev->name, "eth%d");
				break;

			case CVMX_HELPER_INTERFACE_MODE_SPI:
				dev->netdev_ops = &cvm_oct_spi_netdev_ops;
				strcpy(dev->name, "spi%d");
				break;

			case CVMX_HELPER_INTERFACE_MODE_RGMII:
			case CVMX_HELPER_INTERFACE_MODE_GMII:
				dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
				strcpy(dev->name, "eth%d");
				break;
			}

			if (!dev->netdev_ops) {
				free_netdev(dev);
			} else if (register_netdev(dev) < 0) {
				pr_err("Failed to register ethernet device for interface %d, port %d\n",
					 interface, priv->port);
				free_netdev(dev);
			} else {
				cvm_oct_device[priv->port] = dev;
				fau -=
				    cvmx_pko_get_num_queues(priv->port) *
				    sizeof(u32);
				schedule_delayed_work(&priv->port_periodic_work, HZ);
			}
		}
	}

	cvm_oct_tx_initialize();
	cvm_oct_rx_initialize();

	/*
	 * 150 uS: about 10 1500-byte packets at 1GE.
	 */
	cvm_oct_tx_poll_interval = 150 * (octeon_get_clock_rate() / 1000000);

	schedule_delayed_work(&cvm_oct_rx_refill_work, HZ);

	return 0;
}
Example #8
0
static int cvm_oct_driver_probe(struct platform_device *dev)
{
	int num_interfaces;
	int interface;
	int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE;
	int qos;

	octeon_mdiobus_force_mod_depencency();
	pr_notice("cavium-ethernet %s\n", OCTEON_ETHERNET_VERSION);

	if (OCTEON_IS_MODEL(OCTEON_CN52XX))
		cvm_oct_mac_addr_offset = 2; /* First two are the mgmt ports. */
	else if (OCTEON_IS_MODEL(OCTEON_CN56XX))
		cvm_oct_mac_addr_offset = 1; /* First one is the mgmt port. */
	else
		cvm_oct_mac_addr_offset = 0;

#if defined(CONFIG_SG590) || defined(CONFIG_SG770)
{
	union cvmx_gmxx_inf_mode mode;
	mode.u64 = 0;
	mode.s.en = 1;
	cvmx_write_csr(CVMX_GMXX_INF_MODE(0), mode.u64);
}
#endif
#ifdef CONFIG_SG8200
/*
 *	SG8200 directs the 2 internal ports out MII to 2 single 100Mbit
 *	interfaces. (The switch is on the 8139C+).
 */
{
	union cvmx_gmxx_inf_mode mode;
	mode.u64 = 0;
	mode.s.en = 1;
	mode.s.p0mii = 1;
	mode.s.type = 1;
	cvmx_write_csr(CVMX_GMXX_INF_MODE(0), mode.u64);
}
#endif

	cvm_oct_poll_queue = create_singlethread_workqueue("octeon-ethernet");
	if (cvm_oct_poll_queue == NULL) {
		pr_err("octeon-ethernet: Cannot create workqueue");
		return -ENOMEM;
	}

	cvm_oct_configure_common_hw();

	cvmx_helper_initialize_packet_io_global();

	/* Change the input group for all ports before input is enabled */
	num_interfaces = cvmx_helper_get_number_of_interfaces();
	for (interface = 0; interface < num_interfaces; interface++) {
		int num_ports = cvmx_helper_ports_on_interface(interface);
		int port;

		for (port = cvmx_helper_get_ipd_port(interface, 0);
		     port < cvmx_helper_get_ipd_port(interface, num_ports);
		     port++) {
			union cvmx_pip_prt_tagx pip_prt_tagx;
			pip_prt_tagx.u64 =
			    cvmx_read_csr(CVMX_PIP_PRT_TAGX(port));
			pip_prt_tagx.s.grp = pow_receive_group;
			cvmx_write_csr(CVMX_PIP_PRT_TAGX(port),
				       pip_prt_tagx.u64);
		}
	}

	cvmx_helper_ipd_and_packet_input_enable();

	memset(cvm_oct_device, 0, sizeof(cvm_oct_device));

	/*
	 * Initialize the FAU used for counting packet buffers that
	 * need to be freed.
	 */
	cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);

	/* Initialize the FAU used for counting tx SKBs that need to be freed */
	cvmx_fau_atomic_write32(FAU_TOTAL_TX_TO_CLEAN, 0);

	if ((pow_send_group != -1)) {
		struct net_device *dev;
		pr_info("\tConfiguring device for POW only access\n");
		dev = alloc_etherdev(sizeof(struct octeon_ethernet));
		if (dev) {
			/* Initialize the device private structure. */
			struct octeon_ethernet *priv = netdev_priv(dev);

			dev->netdev_ops = &cvm_oct_pow_netdev_ops;
			priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED;
			priv->port = CVMX_PIP_NUM_INPUT_PORTS;
			priv->queue = -1;
			strcpy(dev->name, "pow%d");
			for (qos = 0; qos < 16; qos++)
				skb_queue_head_init(&priv->tx_free_list[qos]);

			if (register_netdev(dev) < 0) {
				pr_err("Failed to register ethernet device for POW\n");
				free_netdev(dev);
			} else {
				cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev;
				pr_info("%s: POW send group %d, receive group %d\n",
					dev->name, pow_send_group,
					pow_receive_group);
			}
		} else {
			pr_err("Failed to allocate ethernet device for POW\n");
		}
	}

	num_interfaces = cvmx_helper_get_number_of_interfaces();
	for (interface = 0; interface < num_interfaces; interface++) {
		cvmx_helper_interface_mode_t imode =
		    cvmx_helper_interface_get_mode(interface);
		int num_ports = cvmx_helper_ports_on_interface(interface);
		int port;

		for (port = cvmx_helper_get_ipd_port(interface, 0);
		     port < cvmx_helper_get_ipd_port(interface, num_ports);
		     port++) {
			struct octeon_ethernet *priv;
			struct net_device *dev =
			    alloc_etherdev(sizeof(struct octeon_ethernet));
			if (!dev) {
				pr_err("Failed to allocate ethernet device for port %d\n", port);
				continue;
			}

			/* Initialize the device private structure. */
			priv = netdev_priv(dev);

			INIT_DELAYED_WORK(&priv->port_periodic_work,
					  cvm_oct_periodic_worker);
			priv->imode = imode;
			priv->port = port;
			priv->queue = cvmx_pko_get_base_queue(priv->port);
			priv->fau = fau - cvmx_pko_get_num_queues(port) * 4;
			for (qos = 0; qos < 16; qos++)
				skb_queue_head_init(&priv->tx_free_list[qos]);
			for (qos = 0; qos < cvmx_pko_get_num_queues(port);
			     qos++)
				cvmx_fau_atomic_write32(priv->fau + qos * 4, 0);

			switch (priv->imode) {

			/* These types don't support ports to IPD/PKO */
			case CVMX_HELPER_INTERFACE_MODE_DISABLED:
			case CVMX_HELPER_INTERFACE_MODE_PCIE:
			case CVMX_HELPER_INTERFACE_MODE_PICMG:
				break;

			case CVMX_HELPER_INTERFACE_MODE_NPI:
				dev->netdev_ops = &cvm_oct_npi_netdev_ops;
				strcpy(dev->name, "npi%d");
				break;

			case CVMX_HELPER_INTERFACE_MODE_XAUI:
				dev->netdev_ops = &cvm_oct_xaui_netdev_ops;
				strcpy(dev->name, "xaui%d");
				break;

			case CVMX_HELPER_INTERFACE_MODE_LOOP:
				dev->netdev_ops = &cvm_oct_npi_netdev_ops;
				strcpy(dev->name, "loop%d");
				break;

			case CVMX_HELPER_INTERFACE_MODE_SGMII:
				dev->netdev_ops = &cvm_oct_sgmii_netdev_ops;
				strcpy(dev->name, "eth%d");
				break;

			case CVMX_HELPER_INTERFACE_MODE_SPI:
				dev->netdev_ops = &cvm_oct_spi_netdev_ops;
				strcpy(dev->name, "spi%d");
				break;

			case CVMX_HELPER_INTERFACE_MODE_RGMII:
			case CVMX_HELPER_INTERFACE_MODE_GMII:
				dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
				strcpy(dev->name, "eth%d");
				break;
			}

			if (!dev->netdev_ops) {
				free_netdev(dev);
			} else if (register_netdev(dev) < 0) {
				pr_err("Failed to register ethernet device "
					 "for interface %d, port %d\n",
					 interface, priv->port);
				free_netdev(dev);
			} else {
				cvm_oct_device[priv->port] = dev;
				fau -=
				    cvmx_pko_get_num_queues(priv->port) *
				    sizeof(uint32_t);
				queue_delayed_work(cvm_oct_poll_queue,
						   &priv->port_periodic_work, HZ);
			}
		}
	}

	cvm_oct_tx_initialize();
	cvm_oct_rx_initialize();

	/*
	 * 150 uS: about 10 1500-byte packtes at 1GE.
	 */
	cvm_oct_tx_poll_interval = 150 * (octeon_get_clock_rate() / 1000000);

	queue_delayed_work(cvm_oct_poll_queue, &cvm_oct_rx_refill_work, HZ);

#ifdef CONFIG_SG590
{
	void cvm_oct_marvel_switch_init(void);
	printk("cavium-ethernet: enabling Marvell 88e6046 switch: intra-switch forwarding disabled\n");
	cvm_oct_marvel_switch_init();
}
#endif

	return 0;
}
Example #9
0
/**
 * Module/ driver initialization. Creates the linux network
 * devices.
 *
 * @return Zero on success
 */
int cvm_oct_init_module(device_t bus)
{
	device_t dev;
	int ifnum;
	int num_interfaces;
	int interface;
	int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE;
	int qos;

	cvm_oct_rx_initialize();
	cvm_oct_configure_common_hw(bus);

	cvmx_helper_initialize_packet_io_global();

	/* Change the input group for all ports before input is enabled */
	num_interfaces = cvmx_helper_get_number_of_interfaces();
	for (interface = 0; interface < num_interfaces; interface++) {
		int num_ports = cvmx_helper_ports_on_interface(interface);
		int port;

		for (port = 0; port < num_ports; port++) {
			cvmx_pip_prt_tagx_t pip_prt_tagx;
			int pkind = cvmx_helper_get_ipd_port(interface, port);

			pip_prt_tagx.u64 = cvmx_read_csr(CVMX_PIP_PRT_TAGX(pkind));
			pip_prt_tagx.s.grp = pow_receive_group;
			cvmx_write_csr(CVMX_PIP_PRT_TAGX(pkind), pip_prt_tagx.u64);
		}
	}

	cvmx_helper_ipd_and_packet_input_enable();

	memset(cvm_oct_device, 0, sizeof(cvm_oct_device));

	cvm_oct_link_taskq = taskqueue_create("octe link", M_NOWAIT,
	    taskqueue_thread_enqueue, &cvm_oct_link_taskq);
	taskqueue_start_threads(&cvm_oct_link_taskq, 1, PI_NET,
	    "octe link taskq");

	/* Initialize the FAU used for counting packet buffers that need to be freed */
	cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);

	ifnum = 0;
	num_interfaces = cvmx_helper_get_number_of_interfaces();
	for (interface = 0; interface < num_interfaces; interface++) {
		cvmx_helper_interface_mode_t imode = cvmx_helper_interface_get_mode(interface);
		int num_ports = cvmx_helper_ports_on_interface(interface);
		int port;

		for (port = cvmx_helper_get_ipd_port(interface, 0);
		     port < cvmx_helper_get_ipd_port(interface, num_ports);
		     ifnum++, port++) {
			cvm_oct_private_t *priv;
			struct ifnet *ifp;
			
			dev = BUS_ADD_CHILD(bus, 0, "octe", ifnum);
			if (dev != NULL)
				ifp = if_alloc(IFT_ETHER);
			if (dev == NULL || ifp == NULL) {
				printf("Failed to allocate ethernet device for interface %d port %d\n", interface, port);
				continue;
			}

			/* Initialize the device private structure. */
			device_probe(dev);
			priv = device_get_softc(dev);
			priv->dev = dev;
			priv->ifp = ifp;
			priv->imode = imode;
			priv->port = port;
			priv->queue = cvmx_pko_get_base_queue(priv->port);
			priv->fau = fau - cvmx_pko_get_num_queues(port) * 4;
			for (qos = 0; qos < cvmx_pko_get_num_queues(port); qos++)
				cvmx_fau_atomic_write32(priv->fau+qos*4, 0);
			TASK_INIT(&priv->link_task, 0, cvm_oct_update_link, priv);

			switch (priv->imode) {

			/* These types don't support ports to IPD/PKO */
			case CVMX_HELPER_INTERFACE_MODE_DISABLED:
			case CVMX_HELPER_INTERFACE_MODE_PCIE:
			case CVMX_HELPER_INTERFACE_MODE_PICMG:
				break;

			case CVMX_HELPER_INTERFACE_MODE_NPI:
				priv->init = cvm_oct_common_init;
				priv->uninit = cvm_oct_common_uninit;
				device_set_desc(dev, "Cavium Octeon NPI Ethernet");
				break;

			case CVMX_HELPER_INTERFACE_MODE_XAUI:
				priv->init = cvm_oct_xaui_init;
				priv->uninit = cvm_oct_common_uninit;
				device_set_desc(dev, "Cavium Octeon XAUI Ethernet");
				break;

			case CVMX_HELPER_INTERFACE_MODE_LOOP:
				priv->init = cvm_oct_common_init;
				priv->uninit = cvm_oct_common_uninit;
				device_set_desc(dev, "Cavium Octeon LOOP Ethernet");
				break;

			case CVMX_HELPER_INTERFACE_MODE_SGMII:
				priv->init = cvm_oct_sgmii_init;
				priv->uninit = cvm_oct_common_uninit;
				device_set_desc(dev, "Cavium Octeon SGMII Ethernet");
				break;

			case CVMX_HELPER_INTERFACE_MODE_SPI:
				priv->init = cvm_oct_spi_init;
				priv->uninit = cvm_oct_spi_uninit;
				device_set_desc(dev, "Cavium Octeon SPI Ethernet");
				break;

			case CVMX_HELPER_INTERFACE_MODE_RGMII:
				priv->init = cvm_oct_rgmii_init;
				priv->uninit = cvm_oct_rgmii_uninit;
				device_set_desc(dev, "Cavium Octeon RGMII Ethernet");
				break;

			case CVMX_HELPER_INTERFACE_MODE_GMII:
				priv->init = cvm_oct_rgmii_init;
				priv->uninit = cvm_oct_rgmii_uninit;
				device_set_desc(dev, "Cavium Octeon GMII Ethernet");
				break;
			}

			ifp->if_softc = priv;

			if (!priv->init) {
				printf("octe%d: unsupported device type interface %d, port %d\n",
				       ifnum, interface, priv->port);
				if_free(ifp);
			} else if (priv->init(ifp) != 0) {
				printf("octe%d: failed to register device for interface %d, port %d\n",
				       ifnum, interface, priv->port);
				if_free(ifp);
			} else {
				cvm_oct_device[priv->port] = ifp;
				fau -= cvmx_pko_get_num_queues(priv->port) * sizeof(uint32_t);
			}
		}
	}

	if (INTERRUPT_LIMIT) {
		/* Set the POW timer rate to give an interrupt at most INTERRUPT_LIMIT times per second */
		cvmx_write_csr(CVMX_POW_WQ_INT_PC, cvmx_clock_get_rate(CVMX_CLOCK_CORE)/(INTERRUPT_LIMIT*16*256)<<8);

		/* Enable POW timer interrupt. It will count when there are packets available */
		cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0x1ful<<24);
	} else {
		/* Enable POW interrupt when our port has at least one packet */
		cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0x1001);
	}

	callout_init(&cvm_oct_poll_timer, 1);
	callout_reset(&cvm_oct_poll_timer, hz, cvm_do_timer, NULL);

	return 0;
}
Example #10
0
/**
 * @INTERNAL
 * Configure an IPD/PKO port for the specified link state. This
 * function does not influence auto negotiation at the PHY level.
 * The passed link state must always match the link state returned
 * by cvmx_helper_link_get(). It is normally best to use
 * cvmx_helper_link_autoconf() instead.
 *
 * @param ipd_port  IPD/PKO port to configure
 * @param link_info The new link state
 *
 * @return Zero on success, negative on failure
 */
int __cvmx_helper_rgmii_link_set(int ipd_port, cvmx_helper_link_info_t link_info)
{
    int result = 0;
    int interface = cvmx_helper_get_interface_num(ipd_port);
    int index = cvmx_helper_get_interface_index_num(ipd_port);
    cvmx_gmxx_prtx_cfg_t original_gmx_cfg;
    cvmx_gmxx_prtx_cfg_t new_gmx_cfg;
    cvmx_pko_mem_queue_qos_t pko_mem_queue_qos;
    cvmx_pko_mem_queue_qos_t pko_mem_queue_qos_save[16];
    cvmx_gmxx_tx_ovr_bp_t gmx_tx_ovr_bp;
    cvmx_gmxx_tx_ovr_bp_t gmx_tx_ovr_bp_save;
    int i;

    /* Ignore speed sets in the simulator */
    if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM)
        return 0;

    /* Read the current settings so we know the current enable state */
    original_gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
    new_gmx_cfg = original_gmx_cfg;

    /* Disable the lowest level RX */
    cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface),
                   cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(interface)) & ~(1<<index));

    memset(pko_mem_queue_qos_save, 0, sizeof(pko_mem_queue_qos_save));
    /* Disable all queues so that TX should become idle */
    for (i=0; i<cvmx_pko_get_num_queues(ipd_port); i++)
    {
        int queue = cvmx_pko_get_base_queue(ipd_port) + i;
        cvmx_write_csr(CVMX_PKO_REG_READ_IDX, queue);
        pko_mem_queue_qos.u64 = cvmx_read_csr(CVMX_PKO_MEM_QUEUE_QOS);
        pko_mem_queue_qos.s.pid = ipd_port;
        pko_mem_queue_qos.s.qid = queue;
        pko_mem_queue_qos_save[i] = pko_mem_queue_qos;
        pko_mem_queue_qos.s.qos_mask = 0;
        cvmx_write_csr(CVMX_PKO_MEM_QUEUE_QOS, pko_mem_queue_qos.u64);
    }

    /* Disable backpressure */
    gmx_tx_ovr_bp.u64 = cvmx_read_csr(CVMX_GMXX_TX_OVR_BP(interface));
    gmx_tx_ovr_bp_save = gmx_tx_ovr_bp;
    gmx_tx_ovr_bp.s.bp &= ~(1<<index);
    gmx_tx_ovr_bp.s.en |= 1<<index;
    cvmx_write_csr(CVMX_GMXX_TX_OVR_BP(interface), gmx_tx_ovr_bp.u64);
    cvmx_read_csr(CVMX_GMXX_TX_OVR_BP(interface));

    /* Poll the GMX state machine waiting for it to become idle. Preferably we
        should only change speed when it is idle. If it doesn't become idle we
        will still do the speed change, but there is a slight chance that GMX
        will lockup */
    cvmx_write_csr(CVMX_NPI_DBG_SELECT, interface*0x800 + index*0x100 + 0x880);
    CVMX_WAIT_FOR_FIELD64(CVMX_DBG_DATA, cvmx_dbg_data_t, data&7, ==, 0, 10000);
    CVMX_WAIT_FOR_FIELD64(CVMX_DBG_DATA, cvmx_dbg_data_t, data&0xf, ==, 0, 10000);

    /* Disable the port before we make any changes */
    new_gmx_cfg.s.en = 0;
    cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), new_gmx_cfg.u64);
    cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));

    /* Set full/half duplex */
    if (!link_info.s.link_up)
        new_gmx_cfg.s.duplex = 1;   /* Force full duplex on down links */
    else
        new_gmx_cfg.s.duplex = link_info.s.full_duplex;

    /* Set the link speed. Anything unknown is set to 1Gbps */
    if (link_info.s.speed == 10)
    {
        new_gmx_cfg.s.slottime = 0;
        new_gmx_cfg.s.speed = 0;
    }
    else if (link_info.s.speed == 100)
    {
        new_gmx_cfg.s.slottime = 0;
        new_gmx_cfg.s.speed = 0;
    }
    else
    {
        new_gmx_cfg.s.slottime = 1;
        new_gmx_cfg.s.speed = 1;
    }

    /* Adjust the clocks */
    if (link_info.s.speed == 10)
    {
        cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 50);
        cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x40);
        cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0);
    }
    else if (link_info.s.speed == 100)
    {
        cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 5);
        cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x40);
        cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0);
    }
    else
    {
        cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 1);
        cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x200);
        cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0x2000);
    }

    if (OCTEON_IS_MODEL(OCTEON_CN30XX) || OCTEON_IS_MODEL(OCTEON_CN50XX))
    {
        if ((link_info.s.speed == 10) || (link_info.s.speed == 100))
        {
            cvmx_gmxx_inf_mode_t mode;
            mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));

            /*
            ** Port  .en  .type  .p0mii  Configuration
            ** ----  ---  -----  ------  -----------------------------------------
            **  X      0     X      X    All links are disabled.
            **  0      1     X      0    Port 0 is RGMII
            **  0      1     X      1    Port 0 is MII
            **  1      1     0      X    Ports 1 and 2 are configured as RGMII ports.
            **  1      1     1      X    Port 1: GMII/MII; Port 2: disabled. GMII or
            **                           MII port is selected by GMX_PRT1_CFG[SPEED].
            */

            /* In MII mode, CLK_CNT = 1. */
            if (((index == 0) && (mode.s.p0mii == 1)) || ((index != 0) && (mode.s.type == 1)))
            {
                cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 1);
            }
        }
    }

    /* Do a read to make sure all setup stuff is complete */
    cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));

    /* Save the new GMX setting without enabling the port */
    cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), new_gmx_cfg.u64);

    /* Enable the lowest level RX */
    cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface),
                   cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(interface)) | (1<<index));

    /* Re-enable the TX path */
    for (i=0; i<cvmx_pko_get_num_queues(ipd_port); i++)
    {
        int queue = cvmx_pko_get_base_queue(ipd_port) + i;
        cvmx_write_csr(CVMX_PKO_REG_READ_IDX, queue);
        cvmx_write_csr(CVMX_PKO_MEM_QUEUE_QOS, pko_mem_queue_qos_save[i].u64);
    }

    /* Restore backpressure */
    cvmx_write_csr(CVMX_GMXX_TX_OVR_BP(interface), gmx_tx_ovr_bp_save.u64);

    /* Restore the GMX enable state. Port config is complete */
    new_gmx_cfg.s.en = original_gmx_cfg.s.en;
    cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), new_gmx_cfg.u64);

    return result;
}
Example #11
0
static int cvm_oct_probe(struct platform_device *pdev)
{
	int num_interfaces;
	int interface;
	int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE;
	int qos;
	struct device_node *pip;
	int mtu_overhead = ETH_HLEN + ETH_FCS_LEN;

#if IS_ENABLED(CONFIG_VLAN_8021Q)
	mtu_overhead += VLAN_HLEN;
#endif

	octeon_mdiobus_force_mod_depencency();

	pip = pdev->dev.of_node;
	if (!pip) {
		pr_err("Error: No 'pip' in /aliases\n");
		return -EINVAL;
	}

	cvm_oct_configure_common_hw();

	cvmx_helper_initialize_packet_io_global();

	if (receive_group_order) {
		if (receive_group_order > 4)
			receive_group_order = 4;
		pow_receive_groups = (1 << (1 << receive_group_order)) - 1;
	} else {
		pow_receive_groups = BIT(pow_receive_group);
	}

	/* Change the input group for all ports before input is enabled */
	num_interfaces = cvmx_helper_get_number_of_interfaces();
	for (interface = 0; interface < num_interfaces; interface++) {
		int num_ports = cvmx_helper_ports_on_interface(interface);
		int port;

		for (port = cvmx_helper_get_ipd_port(interface, 0);
		     port < cvmx_helper_get_ipd_port(interface, num_ports);
		     port++) {
			union cvmx_pip_prt_tagx pip_prt_tagx;

			pip_prt_tagx.u64 =
			    cvmx_read_csr(CVMX_PIP_PRT_TAGX(port));

			if (receive_group_order) {
				int tag_mask;

				/* We support only 16 groups at the moment, so
				 * always disable the two additional "hidden"
				 * tag_mask bits on CN68XX.
				 */
				if (OCTEON_IS_MODEL(OCTEON_CN68XX))
					pip_prt_tagx.u64 |= 0x3ull << 44;

				tag_mask = ~((1 << receive_group_order) - 1);
				pip_prt_tagx.s.grptagbase	= 0;
				pip_prt_tagx.s.grptagmask	= tag_mask;
				pip_prt_tagx.s.grptag		= 1;
				pip_prt_tagx.s.tag_mode		= 0;
				pip_prt_tagx.s.inc_prt_flag	= 1;
				pip_prt_tagx.s.ip6_dprt_flag	= 1;
				pip_prt_tagx.s.ip4_dprt_flag	= 1;
				pip_prt_tagx.s.ip6_sprt_flag	= 1;
				pip_prt_tagx.s.ip4_sprt_flag	= 1;
				pip_prt_tagx.s.ip6_dst_flag	= 1;
				pip_prt_tagx.s.ip4_dst_flag	= 1;
				pip_prt_tagx.s.ip6_src_flag	= 1;
				pip_prt_tagx.s.ip4_src_flag	= 1;
				pip_prt_tagx.s.grp		= 0;
			} else {
				pip_prt_tagx.s.grptag	= 0;
				pip_prt_tagx.s.grp	= pow_receive_group;
			}

			cvmx_write_csr(CVMX_PIP_PRT_TAGX(port),
				       pip_prt_tagx.u64);
		}
	}

	cvmx_helper_ipd_and_packet_input_enable();

	memset(cvm_oct_device, 0, sizeof(cvm_oct_device));

	/*
	 * Initialize the FAU used for counting packet buffers that
	 * need to be freed.
	 */
	cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);

	/* Initialize the FAU used for counting tx SKBs that need to be freed */
	cvmx_fau_atomic_write32(FAU_TOTAL_TX_TO_CLEAN, 0);

	if ((pow_send_group != -1)) {
		struct net_device *dev;

		dev = alloc_etherdev(sizeof(struct octeon_ethernet));
		if (dev) {
			/* Initialize the device private structure. */
			struct octeon_ethernet *priv = netdev_priv(dev);

			SET_NETDEV_DEV(dev, &pdev->dev);
			dev->netdev_ops = &cvm_oct_pow_netdev_ops;
			priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED;
			priv->port = CVMX_PIP_NUM_INPUT_PORTS;
			priv->queue = -1;
			strcpy(dev->name, "pow%d");
			for (qos = 0; qos < 16; qos++)
				skb_queue_head_init(&priv->tx_free_list[qos]);
			dev->min_mtu = VLAN_ETH_ZLEN - mtu_overhead;
			dev->max_mtu = OCTEON_MAX_MTU - mtu_overhead;

			if (register_netdev(dev) < 0) {
				pr_err("Failed to register ethernet device for POW\n");
				free_netdev(dev);
			} else {
				cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev;
				pr_info("%s: POW send group %d, receive group %d\n",
					dev->name, pow_send_group,
					pow_receive_group);
			}
		} else {
			pr_err("Failed to allocate ethernet device for POW\n");
		}
	}

	num_interfaces = cvmx_helper_get_number_of_interfaces();
	for (interface = 0; interface < num_interfaces; interface++) {
		cvmx_helper_interface_mode_t imode =
		    cvmx_helper_interface_get_mode(interface);
		int num_ports = cvmx_helper_ports_on_interface(interface);
		int port;
		int port_index;

		for (port_index = 0,
		     port = cvmx_helper_get_ipd_port(interface, 0);
		     port < cvmx_helper_get_ipd_port(interface, num_ports);
		     port_index++, port++) {
			struct octeon_ethernet *priv;
			struct net_device *dev =
			    alloc_etherdev(sizeof(struct octeon_ethernet));
			if (!dev) {
				pr_err("Failed to allocate ethernet device for port %d\n",
				       port);
				continue;
			}

			/* Initialize the device private structure. */
			SET_NETDEV_DEV(dev, &pdev->dev);
			priv = netdev_priv(dev);
			priv->netdev = dev;
			priv->of_node = cvm_oct_node_for_port(pip, interface,
							      port_index);

			INIT_DELAYED_WORK(&priv->port_periodic_work,
					  cvm_oct_periodic_worker);
			priv->imode = imode;
			priv->port = port;
			priv->queue = cvmx_pko_get_base_queue(priv->port);
			priv->fau = fau - cvmx_pko_get_num_queues(port) * 4;
			for (qos = 0; qos < 16; qos++)
				skb_queue_head_init(&priv->tx_free_list[qos]);
			for (qos = 0; qos < cvmx_pko_get_num_queues(port);
			     qos++)
				cvmx_fau_atomic_write32(priv->fau + qos * 4, 0);
			dev->min_mtu = VLAN_ETH_ZLEN - mtu_overhead;
			dev->max_mtu = OCTEON_MAX_MTU - mtu_overhead;

			switch (priv->imode) {
			/* These types don't support ports to IPD/PKO */
			case CVMX_HELPER_INTERFACE_MODE_DISABLED:
			case CVMX_HELPER_INTERFACE_MODE_PCIE:
			case CVMX_HELPER_INTERFACE_MODE_PICMG:
				break;

			case CVMX_HELPER_INTERFACE_MODE_NPI:
				dev->netdev_ops = &cvm_oct_npi_netdev_ops;
				strcpy(dev->name, "npi%d");
				break;

			case CVMX_HELPER_INTERFACE_MODE_XAUI:
				dev->netdev_ops = &cvm_oct_xaui_netdev_ops;
				strcpy(dev->name, "xaui%d");
				break;

			case CVMX_HELPER_INTERFACE_MODE_LOOP:
				dev->netdev_ops = &cvm_oct_npi_netdev_ops;
				strcpy(dev->name, "loop%d");
				break;

			case CVMX_HELPER_INTERFACE_MODE_SGMII:
				dev->netdev_ops = &cvm_oct_sgmii_netdev_ops;
				strcpy(dev->name, "eth%d");
				break;

			case CVMX_HELPER_INTERFACE_MODE_SPI:
				dev->netdev_ops = &cvm_oct_spi_netdev_ops;
				strcpy(dev->name, "spi%d");
				break;

			case CVMX_HELPER_INTERFACE_MODE_RGMII:
			case CVMX_HELPER_INTERFACE_MODE_GMII:
				dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
				strcpy(dev->name, "eth%d");
				cvm_set_rgmii_delay(priv->of_node, interface,
						    port_index);
				break;
			}

			if (!dev->netdev_ops) {
				free_netdev(dev);
			} else if (register_netdev(dev) < 0) {
				pr_err("Failed to register ethernet device for interface %d, port %d\n",
				       interface, priv->port);
				free_netdev(dev);
			} else {
				cvm_oct_device[priv->port] = dev;
				fau -=
				    cvmx_pko_get_num_queues(priv->port) *
				    sizeof(u32);
				schedule_delayed_work(&priv->port_periodic_work,
						      HZ);
			}
		}
	}

	cvm_oct_tx_initialize();
	cvm_oct_rx_initialize();

	/*
	 * 150 uS: about 10 1500-byte packets at 1GE.
	 */
	cvm_oct_tx_poll_interval = 150 * (octeon_get_clock_rate() / 1000000);

	schedule_delayed_work(&cvm_oct_rx_refill_work, HZ);

	return 0;
}
Example #12
0
void oct_tx_process_sw(mbuf_t *mbuf, uint8_t outport)
{
    uint64_t queue;
    cvmx_pko_return_value_t send_status;

    uint8_t *dont_free_cookie = NULL;

    queue = cvmx_pko_get_base_queue(outport);

    cvmx_pko_send_packet_prepare(outport, queue, CVMX_PKO_LOCK_CMD_QUEUE);

    tx_done_t *tx_done = &(oct_stx[LOCAL_CPU_ID]->tx_done[outport]);
    if(tx_done->tx_entries < (OCT_PKO_TX_DESC_NUM - 1))
    {
        dont_free_cookie = oct_pend_tx_done_add(tx_done, (void *)mbuf);
    }
    else
    {
        PACKET_DESTROY_ALL(mbuf);
        STAT_TX_SW_DESC_ERR;
        return;
    }
    /*command word0*/
    cvmx_pko_command_word0_t pko_command;
    pko_command.u64 = 0;

    pko_command.s.segs = 1;
    pko_command.s.total_bytes = mbuf->pkt_totallen;

    pko_command.s.rsp = 1;
    pko_command.s.dontfree = 1;

    /*command word1*/
    cvmx_buf_ptr_t packet;
    packet.u64 = 0;
    packet.s.size = mbuf->pkt_totallen;
    packet.s.addr = (uint64_t)mbuf->pkt_ptr;

    /*command word2*/
    cvmx_pko_command_word2_t tx_ptr_word;
    tx_ptr_word.u64 = 0;
    tx_ptr_word.s.ptr = (uint64_t)cvmx_ptr_to_phys(dont_free_cookie);

    /* Send the packet */
    send_status = cvmx_pko_send_packet_finish3(outport, queue, pko_command, packet, tx_ptr_word.u64, CVMX_PKO_LOCK_CMD_QUEUE);
    if(send_status != CVMX_PKO_SUCCESS)
    {
        if(dont_free_cookie)
        {
            oct_pend_tx_done_remove(tx_done);
        }

        PACKET_DESTROY_ALL(mbuf);
        STAT_TX_SW_SEND_ERR;
        return;
    }
    else
    {
        STAT_TX_SEND_OVER;
    }

}
Example #13
0
void oct_tx_process_mbuf(mbuf_t *mbuf, uint8_t port)
{
	uint64_t queue;
	cvmx_pko_return_value_t send_status;
	
	if(port > OCT_PHY_PORT_MAX)
	{
		printf("Send port is invalid");
		PACKET_DESTROY_ALL(mbuf);
		STAT_TX_SEND_PORT_ERR;
		return;
	}

	queue = cvmx_pko_get_base_queue(port);

	cvmx_pko_send_packet_prepare(port, queue, CVMX_PKO_LOCK_CMD_QUEUE);


	if(PKTBUF_IS_HW(mbuf))
	{
		/* Build a PKO pointer to this packet */
		cvmx_pko_command_word0_t pko_command;
		pko_command.u64 = 0;
		pko_command.s.segs = 1;
		pko_command.s.total_bytes = mbuf->pkt_totallen;

		/* Send the packet */
		send_status = cvmx_pko_send_packet_finish(port, queue, pko_command, mbuf->packet_ptr, CVMX_PKO_LOCK_CMD_QUEUE);
		if (send_status != CVMX_PKO_SUCCESS)
	    {
	        printf("Failed to send packet using cvmx_pko_send_packet2\n");
			STAT_TX_HW_SEND_ERR;
	        PACKET_DESTROY_DATA(mbuf);
	    }

		MBUF_FREE(mbuf);
	}
	else if(PKTBUF_IS_SW(mbuf))
	{
		uint8_t *dont_free_cookie = NULL;
		tx_done_t *tx_done = &(oct_stx[local_cpu_id]->tx_done[port]);
		if(tx_done->tx_entries < (OCT_PKO_TX_DESC_NUM - 1))
		{
			dont_free_cookie = oct_pend_tx_done_add(tx_done, (void *)mbuf);			
		}
		else
		{
			PACKET_DESTROY_ALL(mbuf);
			STAT_TX_SW_DESC_ERR;
			return;
		}
		/*command word0*/
		cvmx_pko_command_word0_t pko_command;
		pko_command.u64 = 0;
		
		pko_command.s.segs = 1;
		pko_command.s.total_bytes = mbuf->pkt_totallen;

		pko_command.s.rsp = 1;
		pko_command.s.dontfree = 1;

		/*command word1*/
		cvmx_buf_ptr_t packet;
		packet.u64 = 0;
		packet.s.size = mbuf->pkt_totallen;
		packet.s.addr = (uint64_t)mbuf->pkt_ptr;

		/*command word2*/
		cvmx_pko_command_word2_t tx_ptr_word;
		tx_ptr_word.u64 = 0;
		tx_ptr_word.s.ptr = (uint64_t)cvmx_ptr_to_phys(dont_free_cookie);

		/* Send the packet */
		send_status = cvmx_pko_send_packet_finish3(port, queue, pko_command, packet, tx_ptr_word.u64, CVMX_PKO_LOCK_CMD_QUEUE);
		if(send_status != CVMX_PKO_SUCCESS)
		{
			if(dont_free_cookie)
			{
				oct_pend_tx_done_remove(tx_done);
			}

			printf("Failed to send packet using cvmx_pko_send_packet3\n");
			
	        PACKET_DESTROY_ALL(mbuf);
			STAT_TX_SW_SEND_ERR;
			return;
		}
	}
	else
	{
		printf("pkt space %d is wrong, please check it\n", PKTBUF_SPACE_GET(mbuf));
	}

	STAT_TX_SEND_OVER;
	
}