コード例 #1
0
void oct_tx_process_hw_work(cvmx_wqe_t *work, uint32_t outport)
{
	uint64_t queue = cvmx_pko_get_base_queue(outport);

	cvmx_pko_send_packet_prepare(outport, queue, CVMX_PKO_LOCK_CMD_QUEUE);

    /* Build a PKO pointer to this packet */
    cvmx_pko_command_word0_t pko_command;
    pko_command.u64 = 0;
    pko_command.s.segs = work->word2.s.bufs;
    pko_command.s.total_bytes = cvmx_wqe_get_len(work);

    /* Send the packet */
    cvmx_pko_return_value_t send_status = cvmx_pko_send_packet_finish(outport, queue, pko_command, work->packet_ptr, CVMX_PKO_LOCK_CMD_QUEUE);
    if (send_status != CVMX_PKO_SUCCESS)
    {
        printf("Failed to send packet using cvmx_pko_send_packet2\n");
        cvmx_helper_free_packet_data(work);
		STAT_TX_HW_SEND_ERR;
    }
	else
	{
		STAT_TX_SEND_OVER;
	}

    cvmx_fpa_free(work, wqe_pool, 0);
}
コード例 #2
0
void oct_tx_process_hw(mbuf_t *mbuf, uint32_t outport)
{
    uint64_t queue;

    /* Build a PKO pointer to this packet */
    cvmx_pko_return_value_t send_status;
    cvmx_pko_command_word0_t pko_command;

    queue = cvmx_pko_get_base_queue(outport);

    cvmx_pko_send_packet_prepare(outport, queue, CVMX_PKO_LOCK_CMD_QUEUE);

    pko_command.u64 = 0;
    pko_command.s.segs = 1;
    pko_command.s.total_bytes = mbuf->pkt_totallen;

    /* Send the packet */
    send_status = cvmx_pko_send_packet_finish(outport, queue, pko_command, mbuf->packet_ptr, CVMX_PKO_LOCK_CMD_QUEUE);
    if (send_status != CVMX_PKO_SUCCESS)
    {
        STAT_TX_HW_SEND_ERR;
        PACKET_DESTROY_DATA(mbuf);
    }
    else
    {
        STAT_TX_SEND_OVER;
    }
    MBUF_FREE(mbuf);
}
コード例 #3
0
/* IN PROGRESS */
void send_packet()
{
  uint8_t *buf, *pbuf; 
  uint64_t queue, length, buf_phys_addr;
  cvmx_pko_command_word0_t pko_command;
  cvmx_pko_return_value_t status;
  cvmx_buf_ptr_t hw_buffer;

  buf = (uint8_t *) cvmx_fpa_alloc(packet_pool);

  if (buf == NULL) {
    printf("ERROR: allocation from pool %" PRIu64 " failed!\n", packet_pool);
    return;
  } else {
    printf("Packet allocation successful!\n");
  }

  pbuf = build_packet(buf, PAYLOAD_SIZE);
  length = (uint64_t) (pbuf - buf);

  printf("buf : %p\n", buf);
  printf("pbuf: %p\n", pbuf);
  printf("diff: %" PRIu64 "\n", length);

  pko_command.u64 = 0;
  pko_command.s.segs = 1;
  pko_command.s.total_bytes = length;
  pko_command.s.dontfree = 1;

  buf_phys_addr = cvmx_ptr_to_phys(buf); 
  printf("buf_phys_addr: %" PRIu64 "\n", buf_phys_addr);

  hw_buffer.s.i = 0;
  hw_buffer.s.back = 0;
  hw_buffer.s.pool = packet_pool; // the pool that the buffer came from
  hw_buffer.s.size = length; // the size of the segment pointed to by addr (in bytes)
  hw_buffer.s.addr = cvmx_ptr_to_phys(buf); // pointer to the first byte of the data

  queue = cvmx_pko_get_base_queue(xaui_ipd_port);
  printf("queue: %" PRIu64 "\n", queue);

  cvmx_pko_send_packet_prepare(xaui_ipd_port, queue, CVMX_PKO_LOCK_NONE);

  // THROWS EXCEPTION HERE
  status = cvmx_pko_send_packet_finish(xaui_ipd_port, queue, pko_command, hw_buffer, CVMX_PKO_LOCK_NONE);

  if (status == CVMX_PKO_SUCCESS) {
    printf("Succesfully sent packet!\n");
    cvmx_fpa_free(buf, packet_pool, 0);
  }
}
コード例 #4
0
ファイル: inicdata.test.c プロジェクト: Silverglass/S3-swift
void S3_send_packet(cvmx_wqe_t * work)
{
		uint64_t        port;
		cvmx_buf_ptr_t  packet_ptr;
		cvmx_pko_command_word0_t pko_command;
		/* Build a PKO pointer to this packet */
		pko_command.u64 = 0;


		/* Errata PKI-100 fix. We need to fix chain pointers on segmneted
		   packets. Although the size is also wrong on a single buffer packet,
		   PKO doesn't care so we ignore it */
		if (cvmx_unlikely(work->word2.s.bufs > 1))
				cvmx_helper_fix_ipd_packet_chain(work);

		port = work->ipprt;
		if( port >= portbase + portnum)
				port = work->ipprt - portnum;
		else
				port = work->ipprt + portnum;

		int queue = cvmx_pko_get_base_queue(port);
		cvmx_pko_send_packet_prepare(port, queue, CVMX_PKO_LOCK_ATOMIC_TAG);

		pko_command.s.total_bytes = work->len;
		pko_command.s.segs = work->word2.s.bufs;
		pko_command.s.ipoffp1 = 14 + 1;
		packet_ptr = work->packet_ptr;
		//cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 0);
		cvm_common_free_fpa_buffer(work, CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE / CVMX_CACHE_LINE_SIZE);
		work = NULL;

		/*
		 * Send the packet and wait for the tag switch to complete before
		 * accessing the output queue. This ensures the locking required
		 * for the queue.
		 *
		 */
		if (cvmx_pko_send_packet_finish(port, queue, pko_command, packet_ptr, CVMX_PKO_LOCK_ATOMIC_TAG))
		{
				printf("Failed to send packet using cvmx_pko_send_packet_finish\
								n");
		}
}
コード例 #5
0
void oct_tx_process_sw(mbuf_t *mbuf, uint8_t outport)
{
    uint64_t queue;
    cvmx_pko_return_value_t send_status;

    uint8_t *dont_free_cookie = NULL;

    queue = cvmx_pko_get_base_queue(outport);

    cvmx_pko_send_packet_prepare(outport, queue, CVMX_PKO_LOCK_CMD_QUEUE);

    tx_done_t *tx_done = &(oct_stx[LOCAL_CPU_ID]->tx_done[outport]);
    if(tx_done->tx_entries < (OCT_PKO_TX_DESC_NUM - 1))
    {
        dont_free_cookie = oct_pend_tx_done_add(tx_done, (void *)mbuf);
    }
    else
    {
        PACKET_DESTROY_ALL(mbuf);
        STAT_TX_SW_DESC_ERR;
        return;
    }
    /*command word0*/
    cvmx_pko_command_word0_t pko_command;
    pko_command.u64 = 0;

    pko_command.s.segs = 1;
    pko_command.s.total_bytes = mbuf->pkt_totallen;

    pko_command.s.rsp = 1;
    pko_command.s.dontfree = 1;

    /*command word1*/
    cvmx_buf_ptr_t packet;
    packet.u64 = 0;
    packet.s.size = mbuf->pkt_totallen;
    packet.s.addr = (uint64_t)mbuf->pkt_ptr;

    /*command word2*/
    cvmx_pko_command_word2_t tx_ptr_word;
    tx_ptr_word.u64 = 0;
    tx_ptr_word.s.ptr = (uint64_t)cvmx_ptr_to_phys(dont_free_cookie);

    /* Send the packet */
    send_status = cvmx_pko_send_packet_finish3(outport, queue, pko_command, packet, tx_ptr_word.u64, CVMX_PKO_LOCK_CMD_QUEUE);
    if(send_status != CVMX_PKO_SUCCESS)
    {
        if(dont_free_cookie)
        {
            oct_pend_tx_done_remove(tx_done);
        }

        PACKET_DESTROY_ALL(mbuf);
        STAT_TX_SW_SEND_ERR;
        return;
    }
    else
    {
        STAT_TX_SEND_OVER;
    }

}
コード例 #6
0
ファイル: ethernet-tx.c プロジェクト: 2trill2spill/freebsd
/**
 * Packet transmit
 *
 * @param m    Packet to send
 * @param dev    Device info structure
 * @return Always returns zero
 */
int cvm_oct_xmit(struct mbuf *m, struct ifnet *ifp)
{
	cvmx_pko_command_word0_t    pko_command;
	cvmx_buf_ptr_t              hw_buffer;
	int                         dropped;
	int                         qos;
	cvm_oct_private_t          *priv = (cvm_oct_private_t *)ifp->if_softc;
	int32_t in_use;
	int32_t buffers_to_free;
	cvmx_wqe_t *work;

	/* Prefetch the private data structure.
	   It is larger that one cache line */
	CVMX_PREFETCH(priv, 0);

	/* Start off assuming no drop */
	dropped = 0;

	/* The check on CVMX_PKO_QUEUES_PER_PORT_* is designed to completely
	   remove "qos" in the event neither interface supports multiple queues
	   per port */
	if ((CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 > 1) ||
	    (CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 > 1)) {
		qos = GET_MBUF_QOS(m);
		if (qos <= 0)
			qos = 0;
		else if (qos >= cvmx_pko_get_num_queues(priv->port))
			qos = 0;
	} else
		qos = 0;

	/* The CN3XXX series of parts has an errata (GMX-401) which causes the
	   GMX block to hang if a collision occurs towards the end of a
	   <68 byte packet. As a workaround for this, we pad packets to be
	   68 bytes whenever we are in half duplex mode. We don't handle
	   the case of having a small packet but no room to add the padding.
	   The kernel should always give us at least a cache line */
	if (__predict_false(m->m_pkthdr.len < 64) && OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
		cvmx_gmxx_prtx_cfg_t gmx_prt_cfg;
		int interface = INTERFACE(priv->port);
		int index = INDEX(priv->port);

		if (interface < 2) {
			/* We only need to pad packet in half duplex mode */
			gmx_prt_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
			if (gmx_prt_cfg.s.duplex == 0) {
				static uint8_t pad[64];

				if (!m_append(m, sizeof pad - m->m_pkthdr.len, pad))
					printf("%s: unable to padd small packet.", __func__);
			}
		}
	}

#ifdef OCTEON_VENDOR_RADISYS
	/*
	 * The RSYS4GBE will hang if asked to transmit a packet less than 60 bytes.
	 */
	if (__predict_false(m->m_pkthdr.len < 60) &&
	    cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_CUST_RADISYS_RSYS4GBE) {
		static uint8_t pad[60];

		if (!m_append(m, sizeof pad - m->m_pkthdr.len, pad))
			printf("%s: unable to pad small packet.", __func__);
	}
#endif

	/*
	 * If the packet is not fragmented.
	 */
	if (m->m_pkthdr.len == m->m_len) {
		/* Build the PKO buffer pointer */
		hw_buffer.u64 = 0;
		hw_buffer.s.addr = cvmx_ptr_to_phys(m->m_data);
		hw_buffer.s.pool = 0;
		hw_buffer.s.size = m->m_len;

		/* Build the PKO command */
		pko_command.u64 = 0;
		pko_command.s.segs = 1;
		pko_command.s.dontfree = 1; /* Do not put this buffer into the FPA.  */

		work = NULL;
	} else {
		struct mbuf *n;
		unsigned segs;
		uint64_t *gp;

		/*
		 * The packet is fragmented, we need to send a list of segments
		 * in memory we borrow from the WQE pool.
		 */
		work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL);
		if (work == NULL) {
			m_freem(m);
			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
			return 1;
		}

		segs = 0;
		gp = (uint64_t *)work;
		for (n = m; n != NULL; n = n->m_next) {
			if (segs == CVMX_FPA_WQE_POOL_SIZE / sizeof (uint64_t))
				panic("%s: too many segments in packet; call m_collapse().", __func__);

			/* Build the PKO buffer pointer */
			hw_buffer.u64 = 0;
			hw_buffer.s.i = 1; /* Do not put this buffer into the FPA.  */
			hw_buffer.s.addr = cvmx_ptr_to_phys(n->m_data);
			hw_buffer.s.pool = 0;
			hw_buffer.s.size = n->m_len;

			*gp++ = hw_buffer.u64;
			segs++;
		}

		/* Build the PKO buffer gather list pointer */
		hw_buffer.u64 = 0;
		hw_buffer.s.addr = cvmx_ptr_to_phys(work);
		hw_buffer.s.pool = CVMX_FPA_WQE_POOL;
		hw_buffer.s.size = segs;

		/* Build the PKO command */
		pko_command.u64 = 0;
		pko_command.s.segs = segs;
		pko_command.s.gather = 1;
		pko_command.s.dontfree = 0; /* Put the WQE above back into the FPA.  */
	}

	/* Finish building the PKO command */
	pko_command.s.n2 = 1; /* Don't pollute L2 with the outgoing packet */
	pko_command.s.reg0 = priv->fau+qos*4;
	pko_command.s.total_bytes = m->m_pkthdr.len;
	pko_command.s.size0 = CVMX_FAU_OP_SIZE_32;
	pko_command.s.subone0 = 1;

	/* Check if we can use the hardware checksumming */
	if ((m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) != 0) {
		/* Use hardware checksum calc */
		pko_command.s.ipoffp1 = ETHER_HDR_LEN + 1;
	}

	/*
	 * XXX
	 * Could use a different free queue (and different FAU address) per
	 * core instead of per QoS, to reduce contention here.
	 */
	IF_LOCK(&priv->tx_free_queue[qos]);
	/* Get the number of mbufs in use by the hardware */
	in_use = cvmx_fau_fetch_and_add32(priv->fau+qos*4, 1);
	buffers_to_free = cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);

	cvmx_pko_send_packet_prepare(priv->port, priv->queue + qos, CVMX_PKO_LOCK_CMD_QUEUE);

	/* Drop this packet if we have too many already queued to the HW */
	if (_IF_QFULL(&priv->tx_free_queue[qos])) {
		dropped = 1;
	}
	/* Send the packet to the output queue */
	else
	if (__predict_false(cvmx_pko_send_packet_finish(priv->port, priv->queue + qos, pko_command, hw_buffer, CVMX_PKO_LOCK_CMD_QUEUE))) {
		DEBUGPRINT("%s: Failed to send the packet\n", if_name(ifp));
		dropped = 1;
	}

	if (__predict_false(dropped)) {
		m_freem(m);
		cvmx_fau_atomic_add32(priv->fau+qos*4, -1);
		if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
	} else {
		/* Put this packet on the queue to be freed later */
		_IF_ENQUEUE(&priv->tx_free_queue[qos], m);

		/* Pass it to any BPF listeners.  */
		ETHER_BPF_MTAP(ifp, m);

		if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
		if_inc_counter(ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len);
	}

	/* Free mbufs not in use by the hardware */
	if (_IF_QLEN(&priv->tx_free_queue[qos]) > in_use) {
		while (_IF_QLEN(&priv->tx_free_queue[qos]) > in_use) {
			_IF_DEQUEUE(&priv->tx_free_queue[qos], m);
			m_freem(m);
		}
	}
	IF_UNLOCK(&priv->tx_free_queue[qos]);

	return dropped;
}
コード例 #7
0
ファイル: oct-rxtx.c プロジェクト: lanxbrad/sec-fw
void oct_tx_process_mbuf(mbuf_t *mbuf, uint8_t port)
{
	uint64_t queue;
	cvmx_pko_return_value_t send_status;
	
	if(port > OCT_PHY_PORT_MAX)
	{
		printf("Send port is invalid");
		PACKET_DESTROY_ALL(mbuf);
		STAT_TX_SEND_PORT_ERR;
		return;
	}

	queue = cvmx_pko_get_base_queue(port);

	cvmx_pko_send_packet_prepare(port, queue, CVMX_PKO_LOCK_CMD_QUEUE);


	if(PKTBUF_IS_HW(mbuf))
	{
		/* Build a PKO pointer to this packet */
		cvmx_pko_command_word0_t pko_command;
		pko_command.u64 = 0;
		pko_command.s.segs = 1;
		pko_command.s.total_bytes = mbuf->pkt_totallen;

		/* Send the packet */
		send_status = cvmx_pko_send_packet_finish(port, queue, pko_command, mbuf->packet_ptr, CVMX_PKO_LOCK_CMD_QUEUE);
		if (send_status != CVMX_PKO_SUCCESS)
	    {
	        printf("Failed to send packet using cvmx_pko_send_packet2\n");
			STAT_TX_HW_SEND_ERR;
	        PACKET_DESTROY_DATA(mbuf);
	    }

		MBUF_FREE(mbuf);
	}
	else if(PKTBUF_IS_SW(mbuf))
	{
		uint8_t *dont_free_cookie = NULL;
		tx_done_t *tx_done = &(oct_stx[local_cpu_id]->tx_done[port]);
		if(tx_done->tx_entries < (OCT_PKO_TX_DESC_NUM - 1))
		{
			dont_free_cookie = oct_pend_tx_done_add(tx_done, (void *)mbuf);			
		}
		else
		{
			PACKET_DESTROY_ALL(mbuf);
			STAT_TX_SW_DESC_ERR;
			return;
		}
		/*command word0*/
		cvmx_pko_command_word0_t pko_command;
		pko_command.u64 = 0;
		
		pko_command.s.segs = 1;
		pko_command.s.total_bytes = mbuf->pkt_totallen;

		pko_command.s.rsp = 1;
		pko_command.s.dontfree = 1;

		/*command word1*/
		cvmx_buf_ptr_t packet;
		packet.u64 = 0;
		packet.s.size = mbuf->pkt_totallen;
		packet.s.addr = (uint64_t)mbuf->pkt_ptr;

		/*command word2*/
		cvmx_pko_command_word2_t tx_ptr_word;
		tx_ptr_word.u64 = 0;
		tx_ptr_word.s.ptr = (uint64_t)cvmx_ptr_to_phys(dont_free_cookie);

		/* Send the packet */
		send_status = cvmx_pko_send_packet_finish3(port, queue, pko_command, packet, tx_ptr_word.u64, CVMX_PKO_LOCK_CMD_QUEUE);
		if(send_status != CVMX_PKO_SUCCESS)
		{
			if(dont_free_cookie)
			{
				oct_pend_tx_done_remove(tx_done);
			}

			printf("Failed to send packet using cvmx_pko_send_packet3\n");
			
	        PACKET_DESTROY_ALL(mbuf);
			STAT_TX_SW_SEND_ERR;
			return;
		}
	}
	else
	{
		printf("pkt space %d is wrong, please check it\n", PKTBUF_SPACE_GET(mbuf));
	}

	STAT_TX_SEND_OVER;
	
}