Example #1
0
struct lagopus_packet *
alloc_lagopus_packet(void) {
  struct lagopus_packet *pkt;
  struct rte_mbuf *mbuf;
  unsigned sock;

  mbuf = NULL;
  if (rawsocket_only_mode != true) {
    for (sock = 0; sock < APP_MAX_SOCKETS; sock++) {
      if (app.pools[sock] != NULL) {
        mbuf = rte_pktmbuf_alloc(app.pools[sock]);
        break;
      }
    }
    if (mbuf == NULL) {
      lagopus_msg_error("rte_pktmbuf_alloc failed\n");
      return NULL;
    }
  } else {
    /* do not use rte_mempool because it is not initialized. */
    mbuf = calloc(1, sizeof(struct rte_mbuf) + APP_DEFAULT_MBUF_SIZE);
    if (mbuf == NULL) {
      lagopus_msg_error("memory exhausted\n");
      return NULL;
    }
    mbuf->buf_addr = (void *)&mbuf[1];
    mbuf->buf_len = APP_DEFAULT_MBUF_SIZE;
    rte_pktmbuf_reset(mbuf);
    rte_mbuf_refcnt_set(mbuf, 1);
  }
  pkt = (struct lagopus_packet *)
        (mbuf->buf_addr + APP_DEFAULT_MBUF_LOCALDATA_OFFSET);
  pkt->mbuf = mbuf;
  return pkt;
}
Example #2
0
/**
 * Given a Netmap ring and a slot index for that ring, construct a dpdk mbuf
 * from the data held in the buffer associated with the slot.
 * Allocation/deallocation of the dpdk mbuf are the responsability of the
 * caller.
 * Note that mbuf chains are not supported.
 */
static void
slot_to_mbuf(struct netmap_ring *r, uint32_t index, struct rte_mbuf *mbuf)
{
	char *data;
	uint16_t length;

	rte_pktmbuf_reset(mbuf);
	length = r->slot[index].len;
	data = rte_pktmbuf_append(mbuf, length);

	if (data != NULL)
	    rte_memcpy(data, NETMAP_BUF(r, r->slot[index].buf_idx), length);
}
Example #3
0
void alloc_mbufs(struct rte_mempool* mp, struct rte_mbuf* bufs[], uint32_t len, uint16_t pkt_len) {
	// this is essentially rte_pktmbuf_alloc_bulk()
	// but the loop is optimized to directly set the pkt/data len flags as well
	// since most allocs directly do this (packet generators)
	rte_mempool_get_bulk(mp, (void **)bufs, len);
	uint32_t i = 0;
	switch (len % 4) {
		while (i != len) {
			case 0:
				rte_mbuf_refcnt_set(bufs[i], 1);
				rte_pktmbuf_reset(bufs[i]);
				bufs[i]->pkt_len = pkt_len;
				bufs[i]->data_len = pkt_len;
				i++;
				// fall through
			case 3:
				rte_mbuf_refcnt_set(bufs[i], 1);
				rte_pktmbuf_reset(bufs[i]);
				bufs[i]->pkt_len = pkt_len;
				bufs[i]->data_len = pkt_len;
				i++;
				// fall through
			case 2:
				rte_mbuf_refcnt_set(bufs[i], 1);
				rte_pktmbuf_reset(bufs[i]);
				bufs[i]->pkt_len = pkt_len;
				bufs[i]->data_len = pkt_len;
				i++;
				// fall through
			case 1:
				rte_mbuf_refcnt_set(bufs[i], 1);
				rte_pktmbuf_reset(bufs[i]);
				bufs[i]->pkt_len = pkt_len;
				bufs[i]->data_len = pkt_len;
				i++;
		}
	}
}
rte_mbuf_t *rte_pktmbuf_alloc(rte_mempool_t *mp){

    uint16_t buf_len;

    utl_rte_check(mp);

    buf_len = mp->elt_size ;

    rte_mbuf_t *m =(rte_mbuf_t *)malloc(buf_len );
    assert(m);

    m->magic  = MAGIC0;
    m->magic2 = MAGIC2;
    m->pool   = mp;
    m->refcnt_reserved =0;

    m->buf_len    = buf_len;
    m->buf_addr   =(char *)((char *)m+sizeof(rte_mbuf_t)+RTE_PKTMBUF_HEADROOM) ;

    rte_pktmbuf_reset(m);
    return (m);
}