Ejemplo n.º 1
0
uint32_t oct_pow_se2linux(mbuf_t *m)
{
    cvmx_wqe_t *work = NULL;
	uint8_t input = 0;
	uint8_t linux_group = 0;

    /* Get a work queue entry */
    work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL);
    if(NULL == work)
    {
        return SEC_NO;
    }

    memset(work, 0, sizeof(cvmx_wqe_t));

    work->packet_ptr.u64 = m->packet_ptr.u64;
    work->word2.s.bufs = 1;

    input = m->input_port;

	if(input == 0)
	{
		linux_group = POW0_LINUX_GROUP;
	}
	else if (input == 1)
	{
		linux_group = POW1_LINUX_GROUP;
	}
	else if (input == 2)
	{
		linux_group = POW2_LINUX_GROUP;
	}
	else if (input == 3)
	{
		linux_group = POW3_LINUX_GROUP;
	}
	else
	{
		return SEC_NO;
	}

    cvmx_wqe_set_len(work, m->pkt_totallen);
    cvmx_wqe_set_port(work, m->input_port);
    cvmx_wqe_set_grp(work, linux_group);

    cvmx_pow_work_submit(work, 0, 0, 0, linux_group);

    MBUF_FREE(m);

    return SEC_OK;
}
Ejemplo n.º 2
0
/* IN PROGRESS */
void send_packet()
{
  uint8_t *buf, *pbuf; 
  uint64_t queue, length, buf_phys_addr;
  cvmx_pko_command_word0_t pko_command;
  cvmx_pko_return_value_t status;
  cvmx_buf_ptr_t hw_buffer;

  buf = (uint8_t *) cvmx_fpa_alloc(packet_pool);

  if (buf == NULL) {
    printf("ERROR: allocation from pool %" PRIu64 " failed!\n", packet_pool);
    return;
  } else {
    printf("Packet allocation successful!\n");
  }

  pbuf = build_packet(buf, PAYLOAD_SIZE);
  length = (uint64_t) (pbuf - buf);

  printf("buf : %p\n", buf);
  printf("pbuf: %p\n", pbuf);
  printf("diff: %" PRIu64 "\n", length);

  pko_command.u64 = 0;
  pko_command.s.segs = 1;
  pko_command.s.total_bytes = length;
  pko_command.s.dontfree = 1;

  buf_phys_addr = cvmx_ptr_to_phys(buf); 
  printf("buf_phys_addr: %" PRIu64 "\n", buf_phys_addr);

  hw_buffer.s.i = 0;
  hw_buffer.s.back = 0;
  hw_buffer.s.pool = packet_pool; // the pool that the buffer came from
  hw_buffer.s.size = length; // the size of the segment pointed to by addr (in bytes)
  hw_buffer.s.addr = cvmx_ptr_to_phys(buf); // pointer to the first byte of the data

  queue = cvmx_pko_get_base_queue(xaui_ipd_port);
  printf("queue: %" PRIu64 "\n", queue);

  cvmx_pko_send_packet_prepare(xaui_ipd_port, queue, CVMX_PKO_LOCK_NONE);

  // THROWS EXCEPTION HERE
  status = cvmx_pko_send_packet_finish(xaui_ipd_port, queue, pko_command, hw_buffer, CVMX_PKO_LOCK_NONE);

  if (status == CVMX_PKO_SUCCESS) {
    printf("Succesfully sent packet!\n");
    cvmx_fpa_free(buf, packet_pool, 0);
  }
}
Ejemplo n.º 3
0
/**
 * Shutdown a Memory pool and validate that it had all of
 * the buffers originally placed in it.
 *
 * @pool:   Pool to shutdown
 * Returns Zero on success
 *         - Positive is count of missing buffers
 *         - Negative is too many buffers or corrupted pointers
 */
uint64_t cvmx_fpa_shutdown_pool(uint64_t pool)
{
	uint64_t errors = 0;
	uint64_t count = 0;
	uint64_t base = cvmx_ptr_to_phys(cvmx_fpa_pool_info[pool].base);
	uint64_t finish =
	    base +
	    cvmx_fpa_pool_info[pool].size *
	    cvmx_fpa_pool_info[pool].starting_element_count;
	void *ptr;
	uint64_t address;

	count = 0;
	do {
		ptr = cvmx_fpa_alloc(pool);
		if (ptr)
			address = cvmx_ptr_to_phys(ptr);
		else
			address = 0;
		if (address) {
			if ((address >= base) && (address < finish) &&
			    (((address -
			       base) % cvmx_fpa_pool_info[pool].size) == 0)) {
				count++;
			} else {
				cvmx_dprintf
				    ("ERROR: cvmx_fpa_shutdown_pool: Illegal address 0x%llx in pool %s(%d)\n",
				     (unsigned long long)address,
				     cvmx_fpa_pool_info[pool].name, (int)pool);
				errors++;
			}
		}
	} while (address);

#ifdef CVMX_ENABLE_PKO_FUNCTIONS
	if (pool == 0)
		cvmx_ipd_free_ptr();
#endif

	if (errors) {
		cvmx_dprintf
		    ("ERROR: cvmx_fpa_shutdown_pool: Pool %s(%d) started at 0x%llx, ended at 0x%llx, with a step of 0x%llx\n",
		     cvmx_fpa_pool_info[pool].name, (int)pool,
		     (unsigned long long)base, (unsigned long long)finish,
		     (unsigned long long)cvmx_fpa_pool_info[pool].size);
		return -errors;
	} else
		return 0;
}
Ejemplo n.º 4
0
/**
 * Free the supplied hardware pool of mbufs
 *
 * @param pool     Pool to allocate an mbuf for
 * @param size     Size of the buffer needed for the pool
 * @param elements Number of buffers to allocate
 */
void cvm_oct_mem_empty_fpa(int pool, int size, int elements)
{
	char *memory;

	do {
		memory = cvmx_fpa_alloc(pool);
		if (memory) {
			struct mbuf *m = *(struct mbuf **)(memory - sizeof(void *));
			elements--;
			m_freem(m);
		}
	} while (memory);

	if (elements < 0)
		printf("Warning: Freeing of pool %u had too many mbufs (%d)\n", pool, elements);
	else if (elements > 0)
		printf("Warning: Freeing of pool %u is missing %d mbufs\n", pool, elements);
}
Ejemplo n.º 5
0
/**
 * Free the supplied hardware pool of skbuffs
 *
 * @pool:     Pool to allocate an skbuff for
 * @size:     Size of the buffer needed for the pool
 * @elements: Number of buffers to allocate
 */
static void cvm_oct_free_hw_skbuff(int pool, int size, int elements)
{
	char *memory;

	do {
		memory = cvmx_fpa_alloc(pool);
		if (memory) {
			struct sk_buff *skb =
			    *(struct sk_buff **)(memory - sizeof(void *));
			elements--;
			dev_kfree_skb(skb);
		}
	} while (memory);

	if (elements < 0)
		pr_warning("Freeing of pool %u had too many skbuffs (%d)\n",
		     pool, elements);
	else if (elements > 0)
		pr_warning("Freeing of pool %u is missing %d skbuffs\n",
		       pool, elements);
}
static void cvm_oct_free_hw_memory(int pool, int size, int elements)
{
	char *memory;
	char *fpa;
	do {
		fpa = cvmx_fpa_alloc(pool);
		if (fpa) {
			elements--;
			fpa = (char *)phys_to_virt(cvmx_ptr_to_phys(fpa));
			memory = *((char **)fpa - 1);
			kfree(memory);
		}
	} while (fpa);

	if (elements < 0)
		pr_warning("Freeing of pool %u had too many buffers (%d)\n",
			pool, elements);
	else if (elements > 0)
		pr_warning("Warning: Freeing of pool %u is missing %d buffers\n",
			pool, elements);
}
Ejemplo n.º 7
0
/**
 * Free memory previously allocated with cvm_oct_fill_hw_memory
 *
 * @pool:     FPA pool to free
 * @size:     Size of each buffer in the pool
 * @elements: Number of buffers that should be in the pool
 */
static void cvm_oct_free_hw_memory(int pool, int size, int elements)
{
	if (USE_32BIT_SHARED) {
		pr_warning("Warning: 32 shared memory is not freeable\n");
	} else {
		char *memory;
		do {
			memory = cvmx_fpa_alloc(pool);
			if (memory) {
				elements--;
				kfree(phys_to_virt(cvmx_ptr_to_phys(memory)));
			}
		} while (memory);

		if (elements < 0)
			pr_warning("Freeing of pool %u had too many "
				   "buffers (%d)\n",
			       pool, elements);
		else if (elements > 0)
			pr_warning("Warning: Freeing of pool %u is "
				"missing %d buffers\n",
			     pool, elements);
	}
}
static uint32_t 
octeon_se_fastpath_fragc_helper_alloc(SeFastpathFragmentContext fragc,
				      SeFastpathPacketContext orig_pc,
				      SeFastpathPacketContext frag_pc,
				      uint16_t data_len)
{
  cvmx_wqe_t *wqe;
  cvmx_buf_ptr_t fragment;
  uint64_t num_segments = 0;
  uint32_t len;
  size_t alignment;

  wqe = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL);
  if (cvmx_unlikely(wqe == NULL))
    {
      OCTEON_SE_DEBUG(3, "Out of memory while allocating wqe for fragment.\n");
      return 1;
    }
  
  len = data_len + fragc->frag_hlen;
  if (cvmx_unlikely(orig_pc->s->ip_version_6))
    alignment = (OCTEON_SE_ALIGN_64(orig_pc->s->ip_offset
				    + OCTEON_SE_FASTPATH_IP6_HDRLEN
				    + OCTEON_SE_FASTPATH_IP6_EXT_FRAG_HDRLEN)
		 - (orig_pc->s->ip_offset
		    + OCTEON_SE_FASTPATH_IP6_HDRLEN
		    + OCTEON_SE_FASTPATH_IP6_EXT_FRAG_HDRLEN));
  else
    alignment = (OCTEON_SE_ALIGN_64(orig_pc->s->ip_offset
				    + OCTEON_SE_FASTPATH_IP4_HDRLEN)
		 - (orig_pc->s->ip_offset
		    + OCTEON_SE_FASTPATH_IP4_HDRLEN));

  fragment.u64 = 
    octeon_se_fastpath_alloc_packet_chain(len + orig_pc->s->ip_offset,
					  alignment, &num_segments);
  if (cvmx_unlikely(fragment.u64 == 0))
    {
      OCTEON_SE_DEBUG(3, "Out of memory while allocating fragments.\n");
      cvmx_fpa_free(wqe, CVMX_FPA_WQE_POOL, 0);
      return 1;
    }
  wqe->packet_ptr.u64 = fragment.u64;
  wqe->len = len + orig_pc->s->ip_offset;
  wqe->word2.s.bufs = num_segments;




  frag_pc->wqe = wqe;
  frag_pc->s->ip_offset = orig_pc->s->ip_offset;
  frag_pc->s->ip_len = len;
  frag_pc->s->ip_version_6 = orig_pc->s->ip_version_6;

  frag_pc->mtu = orig_pc->mtu;
  frag_pc->oport = orig_pc->oport;
  frag_pc->nh_index = orig_pc->nh_index;
  frag_pc->media_hdrlen = orig_pc->media_hdrlen;
  memcpy(frag_pc->media_hdr.data, 
	 orig_pc->media_hdr.data, frag_pc->media_hdrlen);

  return 0;
}
Ejemplo n.º 9
0
/**
 * Packet transmit
 *
 * @param m    Packet to send
 * @param dev    Device info structure
 * @return Always returns zero
 */
int cvm_oct_xmit(struct mbuf *m, struct ifnet *ifp)
{
	cvmx_pko_command_word0_t    pko_command;
	cvmx_buf_ptr_t              hw_buffer;
	int                         dropped;
	int                         qos;
	cvm_oct_private_t          *priv = (cvm_oct_private_t *)ifp->if_softc;
	int32_t in_use;
	int32_t buffers_to_free;
	cvmx_wqe_t *work;

	/* Prefetch the private data structure.
	   It is larger that one cache line */
	CVMX_PREFETCH(priv, 0);

	/* Start off assuming no drop */
	dropped = 0;

	/* The check on CVMX_PKO_QUEUES_PER_PORT_* is designed to completely
	   remove "qos" in the event neither interface supports multiple queues
	   per port */
	if ((CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 > 1) ||
	    (CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 > 1)) {
		qos = GET_MBUF_QOS(m);
		if (qos <= 0)
			qos = 0;
		else if (qos >= cvmx_pko_get_num_queues(priv->port))
			qos = 0;
	} else
		qos = 0;

	/* The CN3XXX series of parts has an errata (GMX-401) which causes the
	   GMX block to hang if a collision occurs towards the end of a
	   <68 byte packet. As a workaround for this, we pad packets to be
	   68 bytes whenever we are in half duplex mode. We don't handle
	   the case of having a small packet but no room to add the padding.
	   The kernel should always give us at least a cache line */
	if (__predict_false(m->m_pkthdr.len < 64) && OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
		cvmx_gmxx_prtx_cfg_t gmx_prt_cfg;
		int interface = INTERFACE(priv->port);
		int index = INDEX(priv->port);

		if (interface < 2) {
			/* We only need to pad packet in half duplex mode */
			gmx_prt_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
			if (gmx_prt_cfg.s.duplex == 0) {
				static uint8_t pad[64];

				if (!m_append(m, sizeof pad - m->m_pkthdr.len, pad))
					printf("%s: unable to padd small packet.", __func__);
			}
		}
	}

#ifdef OCTEON_VENDOR_RADISYS
	/*
	 * The RSYS4GBE will hang if asked to transmit a packet less than 60 bytes.
	 */
	if (__predict_false(m->m_pkthdr.len < 60) &&
	    cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_CUST_RADISYS_RSYS4GBE) {
		static uint8_t pad[60];

		if (!m_append(m, sizeof pad - m->m_pkthdr.len, pad))
			printf("%s: unable to pad small packet.", __func__);
	}
#endif

	/*
	 * If the packet is not fragmented.
	 */
	if (m->m_pkthdr.len == m->m_len) {
		/* Build the PKO buffer pointer */
		hw_buffer.u64 = 0;
		hw_buffer.s.addr = cvmx_ptr_to_phys(m->m_data);
		hw_buffer.s.pool = 0;
		hw_buffer.s.size = m->m_len;

		/* Build the PKO command */
		pko_command.u64 = 0;
		pko_command.s.segs = 1;
		pko_command.s.dontfree = 1; /* Do not put this buffer into the FPA.  */

		work = NULL;
	} else {
		struct mbuf *n;
		unsigned segs;
		uint64_t *gp;

		/*
		 * The packet is fragmented, we need to send a list of segments
		 * in memory we borrow from the WQE pool.
		 */
		work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL);
		if (work == NULL) {
			m_freem(m);
			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
			return 1;
		}

		segs = 0;
		gp = (uint64_t *)work;
		for (n = m; n != NULL; n = n->m_next) {
			if (segs == CVMX_FPA_WQE_POOL_SIZE / sizeof (uint64_t))
				panic("%s: too many segments in packet; call m_collapse().", __func__);

			/* Build the PKO buffer pointer */
			hw_buffer.u64 = 0;
			hw_buffer.s.i = 1; /* Do not put this buffer into the FPA.  */
			hw_buffer.s.addr = cvmx_ptr_to_phys(n->m_data);
			hw_buffer.s.pool = 0;
			hw_buffer.s.size = n->m_len;

			*gp++ = hw_buffer.u64;
			segs++;
		}

		/* Build the PKO buffer gather list pointer */
		hw_buffer.u64 = 0;
		hw_buffer.s.addr = cvmx_ptr_to_phys(work);
		hw_buffer.s.pool = CVMX_FPA_WQE_POOL;
		hw_buffer.s.size = segs;

		/* Build the PKO command */
		pko_command.u64 = 0;
		pko_command.s.segs = segs;
		pko_command.s.gather = 1;
		pko_command.s.dontfree = 0; /* Put the WQE above back into the FPA.  */
	}

	/* Finish building the PKO command */
	pko_command.s.n2 = 1; /* Don't pollute L2 with the outgoing packet */
	pko_command.s.reg0 = priv->fau+qos*4;
	pko_command.s.total_bytes = m->m_pkthdr.len;
	pko_command.s.size0 = CVMX_FAU_OP_SIZE_32;
	pko_command.s.subone0 = 1;

	/* Check if we can use the hardware checksumming */
	if ((m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) != 0) {
		/* Use hardware checksum calc */
		pko_command.s.ipoffp1 = ETHER_HDR_LEN + 1;
	}

	/*
	 * XXX
	 * Could use a different free queue (and different FAU address) per
	 * core instead of per QoS, to reduce contention here.
	 */
	IF_LOCK(&priv->tx_free_queue[qos]);
	/* Get the number of mbufs in use by the hardware */
	in_use = cvmx_fau_fetch_and_add32(priv->fau+qos*4, 1);
	buffers_to_free = cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);

	cvmx_pko_send_packet_prepare(priv->port, priv->queue + qos, CVMX_PKO_LOCK_CMD_QUEUE);

	/* Drop this packet if we have too many already queued to the HW */
	if (_IF_QFULL(&priv->tx_free_queue[qos])) {
		dropped = 1;
	}
	/* Send the packet to the output queue */
	else
	if (__predict_false(cvmx_pko_send_packet_finish(priv->port, priv->queue + qos, pko_command, hw_buffer, CVMX_PKO_LOCK_CMD_QUEUE))) {
		DEBUGPRINT("%s: Failed to send the packet\n", if_name(ifp));
		dropped = 1;
	}

	if (__predict_false(dropped)) {
		m_freem(m);
		cvmx_fau_atomic_add32(priv->fau+qos*4, -1);
		if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
	} else {
		/* Put this packet on the queue to be freed later */
		_IF_ENQUEUE(&priv->tx_free_queue[qos], m);

		/* Pass it to any BPF listeners.  */
		ETHER_BPF_MTAP(ifp, m);

		if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
		if_inc_counter(ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len);
	}

	/* Free mbufs not in use by the hardware */
	if (_IF_QLEN(&priv->tx_free_queue[qos]) > in_use) {
		while (_IF_QLEN(&priv->tx_free_queue[qos]) > in_use) {
			_IF_DEQUEUE(&priv->tx_free_queue[qos], m);
			m_freem(m);
		}
	}
	IF_UNLOCK(&priv->tx_free_queue[qos]);

	return dropped;
}