void test_pkt_mbuf(){ rte_mempool_t * mp1=utl_rte_mempool_create("big-const", CONST_NB_MBUF, CONST_MBUF_SIZE, 32); rte_mbuf_t * m1 = rte_pktmbuf_alloc(mp1); rte_mbuf_t * m2 = rte_pktmbuf_alloc(mp1); char *p=rte_pktmbuf_append(m1, 10); int i; for (i=0; i<10;i++) { p[i]=i; } p=rte_pktmbuf_append(m2, 10); for (i=0; i<10;i++) { p[i]=0x55+i; } rte_pktmbuf_dump(m1, m1->pkt_len); rte_pktmbuf_dump(m2, m1->pkt_len); rte_pktmbuf_free(m1); rte_pktmbuf_free(m2); }
static void send_paxos_message(paxos_message *pm) { uint8_t port_id = 0; struct rte_mbuf *created_pkt = rte_pktmbuf_alloc(mbuf_pool); created_pkt->l2_len = sizeof(struct ether_hdr); created_pkt->l3_len = sizeof(struct ipv4_hdr); created_pkt->l4_len = sizeof(struct udp_hdr) + sizeof(paxos_message); craft_new_packet(&created_pkt, IPv4(192,168,4,99), ACCEPTOR_ADDR, PROPOSER_PORT, ACCEPTOR_PORT, sizeof(paxos_message), port_id); //struct udp_hdr *udp; size_t udp_offset = sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr); //udp = rte_pktmbuf_mtod_offset(created_pkt, struct udp_hdr *, udp_offset); size_t paxos_offset = udp_offset + sizeof(struct udp_hdr); struct paxos_hdr *px = rte_pktmbuf_mtod_offset(created_pkt, struct paxos_hdr *, paxos_offset); px->msgtype = rte_cpu_to_be_16(pm->type); px->inst = rte_cpu_to_be_32(pm->u.accept.iid); px->inst = rte_cpu_to_be_32(pm->u.accept.iid); px->rnd = rte_cpu_to_be_16(pm->u.accept.ballot); px->vrnd = rte_cpu_to_be_16(pm->u.accept.value_ballot); px->acptid = 0; rte_memcpy(px->paxosval, pm->u.accept.value.paxos_value_val, pm->u.accept.value.paxos_value_len); created_pkt->ol_flags = PKT_TX_IPV4 | PKT_TX_IP_CKSUM | PKT_TX_UDP_CKSUM; const uint16_t nb_tx = rte_eth_tx_burst(port_id, 0, &created_pkt, 1); rte_pktmbuf_free(created_pkt); rte_log(RTE_LOG_DEBUG, RTE_LOGTYPE_USER8, "Send %d messages\n", nb_tx); }
static int test_pktmbuf_free_segment(void) { unsigned i; struct rte_mbuf *m[NB_MBUF]; int ret = 0; for (i=0; i<NB_MBUF; i++) m[i] = NULL; /* alloc NB_MBUF mbufs */ for (i=0; i<NB_MBUF; i++) { m[i] = rte_pktmbuf_alloc(pktmbuf_pool); if (m[i] == NULL) { printf("rte_pktmbuf_alloc() failed (%u)\n", i); ret = -1; } } /* free them */ for (i=0; i<NB_MBUF; i++) { if (m[i] != NULL) { struct rte_mbuf *mb, *mt; mb = m[i]; while(mb != NULL) { mt = mb; mb = mb->pkt.next; rte_pktmbuf_free_seg(mt); } } } return ret; }
/** * Send CPL_SET_TCB_FIELD message */ static void set_tcb_field(struct adapter *adapter, unsigned int ftid, u16 word, u64 mask, u64 val, int no_reply) { struct rte_mbuf *mbuf; struct cpl_set_tcb_field *req; struct sge_ctrl_txq *ctrlq; ctrlq = &adapter->sge.ctrlq[0]; mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool); WARN_ON(!mbuf); mbuf->data_len = sizeof(*req); mbuf->pkt_len = mbuf->data_len; req = rte_pktmbuf_mtod(mbuf, struct cpl_set_tcb_field *); memset(req, 0, sizeof(*req)); INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, ftid); req->reply_ctrl = cpu_to_be16(V_REPLY_CHAN(0) | V_QUEUENO(adapter->sge.fw_evtq.abs_id) | V_NO_REPLY(no_reply)); req->word_cookie = cpu_to_be16(V_WORD(word) | V_COOKIE(ftid)); req->mask = cpu_to_be64(mask); req->val = cpu_to_be64(val); t4_mgmt_tx(ctrlq, mbuf); }
void pktgen_send_ping4( uint32_t pid, uint8_t seq_idx ) { port_info_t * info = &pktgen.info[pid]; pkt_seq_t * ppkt = &info->seq_pkt[PING_PKT]; pkt_seq_t * spkt = &info->seq_pkt[seq_idx]; struct rte_mbuf * m ; uint8_t qid = 0; m = rte_pktmbuf_alloc(info->q[qid].special_mp); if ( unlikely(m == NULL) ) { pktgen_log_warning("No packet buffers found"); return; } *ppkt = *spkt; // Copy the sequence setup to the ping setup. pktgen_packet_ctor(info, PING_PKT, ICMP4_ECHO); rte_memcpy((uint8_t *)m->buf_addr + m->data_off, (uint8_t *)&ppkt->hdr, ppkt->pktSize); m->pkt_len = ppkt->pktSize; m->data_len = ppkt->pktSize; pktgen_send_mbuf(m, pid, qid); pktgen_set_q_flags(info, qid, DO_TX_FLUSH); }
static void kni_allocate_mbufs(struct rte_kni *kni) { int i, ret; struct rte_mbuf *pkts[MAX_MBUF_BURST_NUM]; /* Check if pktmbuf pool has been configured */ if (kni->pktmbuf_pool == NULL) { RTE_LOG(ERR, KNI, "No valid mempool for allocating mbufs\n"); return; } for (i = 0; i < MAX_MBUF_BURST_NUM; i++) { pkts[i] = rte_pktmbuf_alloc(kni->pktmbuf_pool); if (unlikely(pkts[i] == NULL)) { /* Out of memory */ RTE_LOG(ERR, KNI, "Out of memory\n"); break; } } /* No pkt mbuf alocated */ if (i <= 0) return; ret = kni_fifo_put(kni->alloc_q, (void **)pkts, i); /* Check if any mbufs not put into alloc_q, and then free them */ if (ret >= 0 && ret < i && ret < MAX_MBUF_BURST_NUM) { int j; for (j = ret; j < i; j++) rte_pktmbuf_free(pkts[j]); } }
/* * test data manipulation in mbuf with non-ascii data */ static int test_pktmbuf_with_non_ascii_data(void) { struct rte_mbuf *m = NULL; char *data; m = rte_pktmbuf_alloc(pktmbuf_pool); if (m == NULL) GOTO_FAIL("Cannot allocate mbuf"); if (rte_pktmbuf_pkt_len(m) != 0) GOTO_FAIL("Bad length"); data = rte_pktmbuf_append(m, MBUF_TEST_DATA_LEN); if (data == NULL) GOTO_FAIL("Cannot append data"); if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN) GOTO_FAIL("Bad pkt length"); if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN) GOTO_FAIL("Bad data length"); memset(data, 0xff, rte_pktmbuf_pkt_len(m)); if (!rte_pktmbuf_is_contiguous(m)) GOTO_FAIL("Buffer should be continuous"); rte_pktmbuf_dump(m, MBUF_TEST_DATA_LEN); rte_pktmbuf_free(m); return 0; fail: if(m) { rte_pktmbuf_free(m); } return -1; }
struct lagopus_packet * alloc_lagopus_packet(void) { struct lagopus_packet *pkt; struct rte_mbuf *mbuf; unsigned sock; mbuf = NULL; if (rawsocket_only_mode != true) { for (sock = 0; sock < APP_MAX_SOCKETS; sock++) { if (app.pools[sock] != NULL) { mbuf = rte_pktmbuf_alloc(app.pools[sock]); break; } } if (mbuf == NULL) { lagopus_msg_error("rte_pktmbuf_alloc failed\n"); return NULL; } } else { /* do not use rte_mempool because it is not initialized. */ mbuf = calloc(1, sizeof(struct rte_mbuf) + APP_DEFAULT_MBUF_SIZE); if (mbuf == NULL) { lagopus_msg_error("memory exhausted\n"); return NULL; } mbuf->buf_addr = (void *)&mbuf[1]; mbuf->buf_len = APP_DEFAULT_MBUF_SIZE; rte_pktmbuf_reset(mbuf); rte_mbuf_refcnt_set(mbuf, 1); } pkt = (struct lagopus_packet *) (mbuf->buf_addr + APP_DEFAULT_MBUF_LOCALDATA_OFFSET); pkt->mbuf = mbuf; return pkt; }
static void set_mempool(struct rte_mempool *mempool) { #if (!PER_CORE) int initialized[RTE_MAX_NUMA_NODES]; for (int i = 0; i < RTE_MAX_NUMA_NODES; i++) { initialized[i] = 0; } #endif if (mempool == NULL) { rte_panic("Got a NULL mempool"); } /* Loop through all cores, to see if any of them belong to this * socket. */ for (int i = 0; i < RTE_MAX_LCORE; i++) { int sid = rte_lcore_to_socket_id(i); #if (!PER_CORE) if (!initialized[sid]) { #endif struct rte_mbuf *mbuf = NULL; #if (PER_CORE) pframe_pool[i] = mempool; #else pframe_pool[sid] = mempool; #endif /* Initialize mbuf template */ #if PER_CORE mbuf = rte_pktmbuf_alloc(pframe_pool[i]); if (mbuf == NULL) { rte_panic("Bad mbuf"); } mbuf_template[i] = *mbuf; rte_pktmbuf_free(mbuf); #else mbuf = rte_pktmbuf_alloc(pframe_pool[sid]); if (mbuf == NULL || mbuf->next != NULL || mbuf->pool == NULL) { rte_panic("Bad mbuf"); } mbuf_template[sid] = *mbuf; rte_pktmbuf_free(mbuf); #endif #if (!PER_CORE) initialized[sid] = 1; } #endif } }
static int testclone_testupdate_testdetach(void) { #ifndef RTE_MBUF_SCATTER_GATHER return 0; #else struct rte_mbuf *mc = NULL; struct rte_mbuf *clone = NULL; /* alloc a mbuf */ mc = rte_pktmbuf_alloc(pktmbuf_pool); if (mc == NULL) GOTO_FAIL("ooops not allocating mbuf"); if (rte_pktmbuf_pkt_len(mc) != 0) GOTO_FAIL("Bad length"); /* clone the allocated mbuf */ clone = rte_pktmbuf_clone(mc, pktmbuf_pool); if (clone == NULL) GOTO_FAIL("cannot clone data\n"); rte_pktmbuf_free(clone); mc->pkt.next = rte_pktmbuf_alloc(pktmbuf_pool); if(mc->pkt.next == NULL) GOTO_FAIL("Next Pkt Null\n"); clone = rte_pktmbuf_clone(mc, pktmbuf_pool); if (clone == NULL) GOTO_FAIL("cannot clone data\n"); /* free mbuf */ rte_pktmbuf_free(mc); rte_pktmbuf_free(clone); mc = NULL; clone = NULL; return 0; fail: if (mc) rte_pktmbuf_free(mc); return -1; #endif /* RTE_MBUF_SCATTER_GATHER */ }
/* Sends 'num_pkts' 'packets' and 'request' data to datapath. */ int dpdk_link_send_bulk(struct dpif_dpdk_message *request, const struct ofpbuf *const *packets, size_t num_pkts) { struct rte_mbuf *mbufs[PKT_BURST_SIZE] = {NULL}; uint8_t *mbuf_data = NULL; int i = 0; int ret = 0; if (num_pkts > PKT_BURST_SIZE) { return EINVAL; } DPDK_DEBUG() for (i = 0; i < num_pkts; i++) { mbufs[i] = rte_pktmbuf_alloc(mp); if (!mbufs[i]) { return ENOBUFS; } mbuf_data = rte_pktmbuf_mtod(mbufs[i], uint8_t *); rte_memcpy(mbuf_data, &request[i], sizeof(request[i])); if (request->type == DPIF_DPDK_PACKET_FAMILY) { mbuf_data = mbuf_data + sizeof(request[i]); if (likely(packets[i]->size <= (mbufs[i]->buf_len - sizeof(request[i])))) { rte_memcpy(mbuf_data, packets[i]->data, packets[i]->size); rte_pktmbuf_data_len(mbufs[i]) = sizeof(request[i]) + packets[i]->size; rte_pktmbuf_pkt_len(mbufs[i]) = rte_pktmbuf_data_len(mbufs[i]); } else { RTE_LOG(ERR, APP, "%s, %d: %s", __FUNCTION__, __LINE__, "memcpy prevented: packet size exceeds available mbuf space"); for (i = 0; i < num_pkts; i++) { rte_pktmbuf_free(mbufs[i]); } return ENOMEM; } } else { rte_pktmbuf_data_len(mbufs[i]) = sizeof(request[i]); rte_pktmbuf_pkt_len(mbufs[i]) = rte_pktmbuf_data_len(mbufs[i]); } } ret = rte_ring_sp_enqueue_bulk(message_ring, (void * const *)mbufs, num_pkts); if (ret == -ENOBUFS) { for (i = 0; i < num_pkts; i++) { rte_pktmbuf_free(mbufs[i]); } ret = ENOBUFS; } else if (unlikely(ret == -EDQUOT)) { ret = EDQUOT; } return ret; }
int dpdpcap_transmit_in_loop(pcap_t *p, const u_char *buf, int size, int number) { int transmitLcoreId = 0; int i = 0; if (p == NULL || buf == NULL || p->deviceId < 0 || p->deviceId > RTE_MAX_ETHPORTS) { snprintf (errbuf_g, PCAP_ERRBUF_SIZE, "Invalid parameter"); return DPDKPCAP_FAILURE; } for (i = 0; i < DEF_PKT_BURST; i++) { mbuf_g[i] = rte_pktmbuf_alloc(txPool); if (mbuf_g[i] == NULL) { snprintf (errbuf_g, PCAP_ERRBUF_SIZE, "Could not allocate buffer on port %d\n", p->deviceId); return DPDKPCAP_FAILURE; } struct rte_mbuf* mbuf = mbuf_g[i]; if (mbuf->buf_len < size) { snprintf (errbuf_g, PCAP_ERRBUF_SIZE, "Can not copy packet data : packet size %d, mbuf length %d, port %d\n", size, mbuf->buf_len, p->deviceId); return DPDKPCAP_FAILURE; } rte_memcpy(mbuf->pkt.data, buf, size); mbuf->pkt.data_len = size; mbuf->pkt.pkt_len = size; mbuf->pkt.nb_segs = 1; rte_pktmbuf_refcnt_update(mbuf, 1); } dpdkpcap_tx_args_t args; args.number = number; args.portId = p->deviceId; transmitLcoreId = p->deviceId + 1; debug("Transferring TX loop to the core %u\n", transmitLcoreId); if (rte_eal_remote_launch(txLoop, &args, transmitLcoreId) < 0) { snprintf (errbuf_g, PCAP_ERRBUF_SIZE, "Can not run TX on a slave core: transmitLcoreId %d\n", transmitLcoreId); return DPDKPCAP_FAILURE; } rte_eal_wait_lcore(transmitLcoreId); return DPDKPCAP_OK; }
/** * @brief Assign device ID to its object * * @param devId uint8_t, ID of DPDK device * * @return MBuf_t*, pointer to allocated MBuf or NULL if failed */ MBuf_t* DPDKAdapter::txMbufAlloc(uint8_t devId) { if(devId > RTE_MAX_ETHPORTS) { qCritical("Device ID is out of range"); return NULL; } return rte_pktmbuf_alloc(findMPool(devId, TX_)); }
void xmit_arp_req(struct gatekeeper_if *iface, const struct ipaddr *addr, const struct ether_addr *ha, uint16_t tx_queue) { struct rte_mbuf *created_pkt; struct ether_hdr *eth_hdr; struct arp_hdr *arp_hdr; size_t pkt_size; struct lls_config *lls_conf = get_lls_conf(); int ret; struct rte_mempool *mp = lls_conf->net->gatekeeper_pktmbuf_pool[ rte_lcore_to_socket_id(lls_conf->lcore_id)]; created_pkt = rte_pktmbuf_alloc(mp); if (created_pkt == NULL) { LLS_LOG(ERR, "Could not alloc a packet for an ARP request\n"); return; } pkt_size = iface->l2_len_out + sizeof(struct arp_hdr); created_pkt->data_len = pkt_size; created_pkt->pkt_len = pkt_size; /* Set-up Ethernet header. */ eth_hdr = rte_pktmbuf_mtod(created_pkt, struct ether_hdr *); ether_addr_copy(&iface->eth_addr, ð_hdr->s_addr); if (ha == NULL) memset(ð_hdr->d_addr, 0xFF, ETHER_ADDR_LEN); else ether_addr_copy(ha, ð_hdr->d_addr); /* Set-up VLAN header. */ if (iface->vlan_insert) fill_vlan_hdr(eth_hdr, iface->vlan_tag_be, ETHER_TYPE_ARP); else eth_hdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_ARP); /* Set-up ARP header. */ arp_hdr = pkt_out_skip_l2(iface, eth_hdr); arp_hdr->arp_hrd = rte_cpu_to_be_16(ARP_HRD_ETHER); arp_hdr->arp_pro = rte_cpu_to_be_16(ETHER_TYPE_IPv4); arp_hdr->arp_hln = ETHER_ADDR_LEN; arp_hdr->arp_pln = sizeof(struct in_addr); arp_hdr->arp_op = rte_cpu_to_be_16(ARP_OP_REQUEST); ether_addr_copy(&iface->eth_addr, &arp_hdr->arp_data.arp_sha); arp_hdr->arp_data.arp_sip = iface->ip4_addr.s_addr; memset(&arp_hdr->arp_data.arp_tha, 0, ETHER_ADDR_LEN); arp_hdr->arp_data.arp_tip = addr->ip.v4.s_addr; ret = rte_eth_tx_burst(iface->id, tx_queue, &created_pkt, 1); if (ret <= 0) { rte_pktmbuf_free(created_pkt); LLS_LOG(ERR, "Could not transmit an ARP request\n"); } }
void pktgen_send_arp( uint32_t pid, uint32_t type, uint8_t seq_idx ) { port_info_t * info = &pktgen.info[pid]; pkt_seq_t * pkt; struct rte_mbuf * m ; struct ether_hdr * eth; arpPkt_t * arp; uint32_t addr; uint8_t qid = 0; pkt = &info->seq_pkt[seq_idx]; m = rte_pktmbuf_alloc(info->q[qid].special_mp); if ( unlikely(m == NULL) ) { pktgen_log_warning("No packet buffers found"); return; } eth = rte_pktmbuf_mtod(m, struct ether_hdr *); arp = (arpPkt_t *)ð[1]; /* src and dest addr */ memset(ð->d_addr, 0xFF, 6); ether_addr_copy(&pkt->eth_src_addr, ð->s_addr); eth->ether_type = htons(ETHER_TYPE_ARP); memset(arp, 0, sizeof(arpPkt_t)); rte_memcpy( &arp->sha, &pkt->eth_src_addr, 6 ); addr = htonl(pkt->ip_src_addr); inetAddrCopy(&arp->spa, &addr); if ( likely(type == GRATUITOUS_ARP) ) { rte_memcpy( &arp->tha, &pkt->eth_src_addr, 6 ); addr = htonl(pkt->ip_src_addr); inetAddrCopy(&arp->tpa, &addr); } else { memset( &arp->tha, 0, 6 ); addr = htonl(pkt->ip_dst_addr); inetAddrCopy(&arp->tpa, &addr); } /* Fill in the rest of the ARP packet header */ arp->hrd = htons(ETH_HW_TYPE); arp->pro = htons(ETHER_TYPE_IPv4); arp->hln = 6; arp->pln = 4; arp->op = htons(ARP_REQUEST); m->pkt.pkt_len = 60; m->pkt.data_len = 60; pktgen_send_mbuf(m, pid, qid); pktgen_set_q_flags(info, qid, DO_TX_FLUSH); }
int init_mempool() { #if (!PER_CORE) int initialized[RTE_MAX_NUMA_NODES]; for (int i = 0; i < RTE_MAX_NUMA_NODES; i++) { initialized[i] = 0; } #endif /* Loop through all cores, to see if any of them belong to this * socket. */ for (int i = 0; i < RTE_MAX_LCORE; i++) { int sid = rte_lcore_to_socket_id(i); #if (!PER_CORE) if (!initialized[sid]) { #endif struct rte_mbuf *mbuf; if (!init_mempool_socket(i, sid)) { goto fail; } /* Initialize mbuf template */ #if PER_CORE mbuf = rte_pktmbuf_alloc(pframe_pool[i]); mbuf_template[i] = *mbuf; rte_pktmbuf_free(mbuf); #else mbuf = rte_pktmbuf_alloc(pframe_pool[sid]); mbuf_template[sid] = *mbuf; rte_pktmbuf_free(mbuf); #endif #if (!PER_CORE) initialized[sid] = 1; } #endif } return 0; fail: /* FIXME: Should ideally free up the pools here, but have no way of * doing so currently */ return -ENOMEM; }
static void kni_allocate_mbufs(struct rte_kni *kni) { int i, ret; struct rte_mbuf *pkts[MAX_MBUF_BURST_NUM]; void *phys[MAX_MBUF_BURST_NUM]; RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pool) != offsetof(struct rte_kni_mbuf, pool)); RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_addr) != offsetof(struct rte_kni_mbuf, buf_addr)); RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, next) != offsetof(struct rte_kni_mbuf, next)); RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) != offsetof(struct rte_kni_mbuf, data_off)); RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) != offsetof(struct rte_kni_mbuf, data_len)); RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) != offsetof(struct rte_kni_mbuf, pkt_len)); RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) != offsetof(struct rte_kni_mbuf, ol_flags)); /* Check if pktmbuf pool has been configured */ if (kni->pktmbuf_pool == NULL) { RTE_LOG(ERR, KNI, "No valid mempool for allocating mbufs\n"); return; } for (i = 0; i < MAX_MBUF_BURST_NUM; i++) { pkts[i] = rte_pktmbuf_alloc(kni->pktmbuf_pool); if (unlikely(pkts[i] == NULL)) { /* Out of memory */ RTE_LOG(ERR, KNI, "Out of memory\n"); break; } phys[i] = va2pa(pkts[i]); } /* No pkt mbuf alocated */ if (i <= 0) return; ret = kni_fifo_put(kni->alloc_q, phys, i); /* Check if any mbufs not put into alloc_q, and then free them */ if (ret >= 0 && ret < i && ret < MAX_MBUF_BURST_NUM) { int j; for (j = ret; j < i; j++) rte_pktmbuf_free(pkts[j]); } }
/* * test allocation and free of mbufs */ static int test_pktmbuf_pool(void) { unsigned i; struct rte_mbuf *m[NB_MBUF]; int ret = 0; for (i=0; i<NB_MBUF; i++) m[i] = NULL; /* alloc NB_MBUF mbufs */ for (i=0; i<NB_MBUF; i++) { m[i] = rte_pktmbuf_alloc(pktmbuf_pool); if (m[i] == NULL) { printf("rte_pktmbuf_alloc() failed (%u)\n", i); ret = -1; } } struct rte_mbuf *extra = NULL; extra = rte_pktmbuf_alloc(pktmbuf_pool); if(extra != NULL) { printf("Error pool not empty"); ret = -1; } #ifdef RTE_MBUF_SCATTER_GATHER extra = rte_pktmbuf_clone(m[0], pktmbuf_pool); if(extra != NULL) { printf("Error pool not empty"); ret = -1; } #endif /* free them */ for (i=0; i<NB_MBUF; i++) { if (m[i] != NULL) rte_pktmbuf_free(m[i]); } return ret; }
/** * A call to tx_sync_ring will try to empty a Netmap TX ring by converting its * buffers into rte_mbufs and sending them out on the rings's dpdk port. */ static int tx_sync_ring(struct netmap_ring *ring, uint8_t port, uint16_t ring_number, struct rte_mempool *pool, uint16_t max_burst) { uint32_t i, n_tx; uint16_t burst_size; uint32_t cur_slot, n_used_slots; struct rte_mbuf *tx_mbufs[COMPAT_NETMAP_MAX_BURST]; n_used_slots = ring->num_slots - ring->avail; n_used_slots = RTE_MIN(n_used_slots, max_burst); cur_slot = (ring->cur + ring->avail) & (ring->num_slots - 1); while (n_used_slots) { burst_size = (uint16_t)RTE_MIN(n_used_slots, RTE_DIM(tx_mbufs)); for (i = 0; i < burst_size; i++) { tx_mbufs[i] = rte_pktmbuf_alloc(pool); if (tx_mbufs[i] == NULL) goto err; slot_to_mbuf(ring, cur_slot, tx_mbufs[i]); cur_slot = NETMAP_RING_NEXT(ring, cur_slot); } n_tx = rte_eth_tx_burst(port, ring_number, tx_mbufs, burst_size); /* Update the Netmap ring structure to reflect the change */ ring->avail += n_tx; n_used_slots -= n_tx; /* Return the mbufs that failed to transmit to their pool */ if (unlikely(n_tx != burst_size)) { for (i = n_tx; i < burst_size; i++) rte_pktmbuf_free(tx_mbufs[i]); break; } } return 0; err: for (; i == 0; --i) rte_pktmbuf_free(tx_mbufs[i]); RTE_LOG(ERR, USER1, "Couldn't get mbuf from mempool is the mempool too small?\n"); return -1; }
static struct rte_mbuf *build_packet(const unsigned char *data, size_t len) { struct rte_mempool *mp = pg_get_mempool(); struct rte_mbuf *pkt = rte_pktmbuf_alloc(mp); void *packet; pkt->pkt_len = len; pkt->data_len = len; pkt->nb_segs = 1; pkt->next = NULL; packet = rte_pktmbuf_mtod(pkt, void*); memcpy(packet, data, len); return pkt; }
static struct rte_mbuf * dpdk_replicate_packet_mb (vlib_buffer_t * b) { vlib_main_t * vm = vlib_get_main(); vlib_buffer_main_t * bm = vm->buffer_main; struct rte_mbuf * first_mb = 0, * new_mb, * pkt_mb, ** prev_mb_next = 0; u8 nb_segs, nb_segs_left; u32 copy_bytes; unsigned socket_id = rte_socket_id(); ASSERT (bm->pktmbuf_pools[socket_id]); pkt_mb = ((struct rte_mbuf *)b)-1; nb_segs = pkt_mb->nb_segs; for (nb_segs_left = nb_segs; nb_segs_left; nb_segs_left--) { if (PREDICT_FALSE(pkt_mb == 0)) { clib_warning ("Missing %d mbuf chain segment(s): " "(nb_segs = %d, nb_segs_left = %d)!", nb_segs - nb_segs_left, nb_segs, nb_segs_left); if (first_mb) rte_pktmbuf_free(first_mb); return NULL; } new_mb = rte_pktmbuf_alloc (bm->pktmbuf_pools[socket_id]); if (PREDICT_FALSE(new_mb == 0)) { if (first_mb) rte_pktmbuf_free(first_mb); return NULL; } /* * Copy packet info into 1st segment. */ if (first_mb == 0) { first_mb = new_mb; rte_pktmbuf_pkt_len (first_mb) = pkt_mb->pkt_len; first_mb->nb_segs = pkt_mb->nb_segs; first_mb->port = pkt_mb->port; #ifdef DAW_FIXME // TX Offload support TBD first_mb->vlan_macip = pkt_mb->vlan_macip; first_mb->hash = pkt_mb->hash; first_mb->ol_flags = pkt_mb->ol_flags #endif } else {
/* Modify packet ethernet header */ static void test_action_execute_set_ethernet(int argc, char *argv[]) { struct rte_mempool *pktmbuf_pool; struct action action_multiple[MAX_ACTIONS] = {0}; pktmbuf_pool = rte_mempool_create("MProc_pktmbuf_pool", 20, /* num mbufs */ 2048 + sizeof(struct rte_mbuf) + 128, /*pktmbuf size */ 32, /*cache size */ sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL, 0, 0); struct rte_mbuf *ethernet_buf = rte_pktmbuf_alloc(pktmbuf_pool); struct ovs_key_ethernet set_ethernet; __u8 eth_src_set[6] = {0xDE, 0xAD, 0xBE, 0xEF, 0xCA, 0xFE}; __u8 eth_dst_set[6] = {0xCA, 0xFE, 0xDE, 0xAD, 0xBE, 0xEF}; memcpy(&set_ethernet.eth_src, ð_src_set, sizeof(eth_src_set)); memcpy(&set_ethernet.eth_dst, ð_dst_set, sizeof(eth_dst_set)); struct ovs_key_ethernet ethernet_orig; __u8 eth_src_orig[6] = {0xFF, 0xFF, 0xFF, 0xCC, 0xCC, 0xCC}; __u8 eth_dst_orig[6] = {0xAA, 0xAA, 0xAA, 0xEE, 0xEE, 0xEE}; memcpy(ðernet_orig.eth_src, ð_src_orig, sizeof(eth_src_orig)); memcpy(ðernet_orig.eth_dst, ð_dst_orig, sizeof(eth_dst_orig)); vport_init(); action_multiple[0].type = ACTION_SET_ETHERNET; action_multiple[0].data.ethernet = set_ethernet; action_null_build(&action_multiple[1]); struct ovs_key_ethernet *pktmbuf_data = rte_pktmbuf_mtod(ethernet_buf, struct ovs_key_ethernet *); memcpy(pktmbuf_data, ðernet_orig, sizeof(ethernet_orig)); action_execute(action_multiple, ethernet_buf); pktmbuf_data = rte_pktmbuf_mtod(ethernet_buf, struct ovs_key_ethernet *); /* Can't compare struct directly as ovs_key_ethernet has src first then * dst whereas the real ethernet header has dst first then source */ assert(memcmp(pktmbuf_data, &set_ethernet.eth_dst, sizeof(eth_dst_set)) == 0); assert(memcmp((uint8_t *)pktmbuf_data + sizeof(eth_dst_set), &set_ethernet.eth_src, sizeof(eth_src_set)) == 0); rte_pktmbuf_free(ethernet_buf); }
/* Try to execute action with a pop vlan and output action, which should succeed */ static void test_action_execute_multiple_actions__pop_vlan_and_output(int argc, char *argv[]) { /* We write some data into the place where a VLAN tag would be and the 4 * bytes after. We then call action execute and make sure the fake VLAN tag * is gone and has been replaced by the data in the 4 bytes after * * We then output the packet to a port and make the same checks */ struct rte_mempool *pktmbuf_pool; struct action action_multiple[MAX_ACTIONS] = {0}; int count = 0; pktmbuf_pool = rte_mempool_create("MProc_pktmbuf_pool", 20, /* num mbufs */ 2048 + sizeof(struct rte_mbuf) + 128, /*pktmbuf size */ 32, /*cache size */ sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL, 0, 0); struct rte_mbuf *vlan_output_buf = rte_pktmbuf_alloc(pktmbuf_pool); vport_init(); /* We have no real packet but the function which pops the VLAN does * some checks of pkt len so we define a fake one here */ vlan_output_buf->pkt.pkt_len = 20; action_pop_vlan_build(&action_multiple[0]); action_output_build(&action_multiple[1], 17); action_null_build(&action_multiple[2]); int *pktmbuf_data = rte_pktmbuf_mtod(vlan_output_buf, int *); *(pktmbuf_data + 2) = 0xCAFED00D; /* Note last 2 bytes must be 0081, ie 8100 in network format */ *(pktmbuf_data + 3) = 0x00000081; /* 12 bytes after src/dst MAC is vlan */ *(pktmbuf_data + 4) = 0xBABEFACE; action_execute(action_multiple, vlan_output_buf); pktmbuf_data = rte_pktmbuf_mtod(vlan_output_buf, int *); assert(*(pktmbuf_data + 3) != 0x00000081); assert(*(pktmbuf_data + 3) == 0xBABEFACE); count = receive_from_vport(17, &vlan_output_buf); pktmbuf_data = rte_pktmbuf_mtod(vlan_output_buf, int *); assert(count == 1); assert(*(pktmbuf_data + 3) != 0x00000081); assert(*(pktmbuf_data + 3) == 0xBABEFACE); }
/* Try to execute action with the push vlan (PCP) action, which should * succeed */ static void test_action_execute_push_vlan__pcp(int argc, char *argv[]) { /* Write Ethertype value of 0x0800 to byte 11 of the packet, * where it is expected, and assign a length to the packet. * After call to action_execute: * - the length of the packet should have increased by 4 bytes * - the value of byte 11 should by 0x8100 (0081 in network format) * - the value of byte 15 should by 0x0800 (0008 in network format) * - the value of the TCI field should be equal to the assigned * value */ struct rte_mempool *pktmbuf_pool; struct action action_multiple[MAX_ACTIONS] = {0}; pktmbuf_pool = rte_mempool_create("MProc_pktmbuf_pool", 20, /* num mbufs */ 2048 + sizeof(struct rte_mbuf) + 128, /*pktmbuf size */ 32, /*cache size */ sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL, 0, 0); struct rte_mbuf *pcp_buf = rte_pktmbuf_alloc(pktmbuf_pool); uint16_t pcp_tci = htons(0x2000); /* PCP is the upper 3 bits of the TCI */ vport_init(); /* Set the packet length - after the VLAN tag has been inserted, * the value should increase by 4 bytes (i.e. the length of the tag) */ pcp_buf->pkt.pkt_len = 64; action_push_vlan_build(&action_multiple[0], pcp_tci); action_null_build(&action_multiple[1]); short *pkt_data = rte_pktmbuf_mtod(pcp_buf, short *); *(pkt_data + 6) = 0x0008; /* Set Ethertype to 0008, i.e. 0800 in network format */ action_execute(action_multiple, pcp_buf); pkt_data = rte_pktmbuf_mtod(pcp_buf, short *); assert(*(pkt_data + 6) == 0x0081); /* 802.1Q Ethertype has been inserted */ assert(*(pkt_data + 7) == 0x0020); /* TCI value has been inserted */ assert(*(pkt_data + 8) == 0x0008); /* Ethertype has been shifted by 4 bytes */ assert(pcp_buf->pkt.pkt_len == 68);/* Packet length has increased by 4 bytes */ rte_pktmbuf_free(pcp_buf); }
int pcap_sendpacket(pcap_t *p, const u_char *buf, int size) { int ret = 0; struct rte_mbuf* mbuf = NULL; if (p == NULL || buf == NULL || p->deviceId < 0 || p->deviceId > RTE_MAX_ETHPORTS) { snprintf (errbuf_g, PCAP_ERRBUF_SIZE, "Invalid parameter"); return DPDKPCAP_FAILURE; } mbuf = rte_pktmbuf_alloc(txPool); if (mbuf == NULL) { snprintf (errbuf_g, PCAP_ERRBUF_SIZE, "Could not allocate buffer on port %d\n", p->deviceId); return DPDKPCAP_FAILURE; } if (mbuf->buf_len < size) { snprintf (errbuf_g, PCAP_ERRBUF_SIZE, "Can not copy packet data : packet size %d, mbuf length %d, port %d\n", size, mbuf->buf_len, p->deviceId); return DPDKPCAP_FAILURE; } rte_memcpy(mbuf->pkt.data, buf, size); mbuf->pkt.data_len = size; mbuf->pkt.pkt_len = size; mbuf->pkt.nb_segs = 1; while (1) { ret = rte_eth_tx_burst(p->deviceId, 0, &mbuf, 1); if (ret == 1) { break; } } debug("Sent a packet to port %d\n", p->deviceId); return DPDKPCAP_OK; }
static int usock_read_init(struct vr_usocket *usockp) { usockp->usock_read_offset = 0; switch (usockp->usock_proto) { case NETLINK: if (usockp->usock_parent) { usockp->usock_read_len = NLMSG_HDRLEN; usockp->usock_state = READING_HEADER; } break; case EVENT: usockp->usock_read_len = USOCK_EVENT_BUF_LEN; usockp->usock_state = READING_DATA; break; case PACKET: if (usockp->usock_mbuf) { RTE_LOG(ERR, VROUTER, "Error initing usock read: mbuf is already exist\n"); return -EINVAL; } usockp->usock_mbuf = rte_pktmbuf_alloc(usockp->usock_mbuf_pool); if (!usockp->usock_mbuf) { RTE_LOG(ERR, VROUTER, "Error initing usock read: cannot allocate mbuf\n"); return -ENOMEM; } usockp->usock_rx_buf = rte_pktmbuf_mtod(usockp->usock_mbuf, char *); usockp->usock_buf_len = usockp->usock_mbuf->buf_len - rte_pktmbuf_headroom(usockp->usock_mbuf); usockp->usock_read_len = usockp->usock_buf_len; usockp->usock_state = READING_DATA; break; default: break; } return 0; }
/* * To send, we copy the data from the TCP/IP stack memory into DPDK * memory. TODO: share TCP/IP stack mbufs with DPDK mbufs to avoid * data copy. */ void VIFHYPER_SEND(struct virtif_user *viu, struct iovec *iov, size_t iovlen) { struct rte_mbuf *m; void *dptr; unsigned i; m = rte_pktmbuf_alloc(mbpool); for (i = 0; i < iovlen; i++) { dptr = rte_pktmbuf_append(m, iov[i].iov_len); if (dptr == NULL) { /* log error somehow? */ rte_pktmbuf_free(m); break; } memcpy(dptr, iov[i].iov_base, iov[i].iov_len); } rte_eth_tx_burst(IF_PORTID, 0, &m, 1); }
static struct rte_mbuf* get_delay_pkt_bad_crc(struct rte_mempool* pool, uint32_t* rem_delay, uint32_t min_pkt_size) { // _Thread_local support seems to suck in (older?) gcc versions? // this should give us the best compatibility static __thread uint32_t target = 0; static __thread uint32_t current = 0; uint32_t delay = *rem_delay; target += delay; if (target < current) { // don't add a delay *rem_delay = 0; return NULL; } // add delay target -= current; current = 0; if (delay < min_pkt_size) { *rem_delay = min_pkt_size; // will be set to 0 at the end of the function delay = min_pkt_size; } // calculate the optimimum packet size if (delay < 1538) { delay = delay; } else if (delay > 2000) { // 2000 is an arbitrary chosen value as it doesn't really matter // we just need to avoid doing something stupid for packet sizes that are just over 1538 bytes delay = 1538; } else { // delay between 1538 and 2000 delay = delay / 2; } *rem_delay -= delay; struct rte_mbuf* pkt = rte_pktmbuf_alloc(pool); // account for preamble, sfd, and ifg (CRC is disabled) pkt->data_len = delay - 20; pkt->pkt_len = delay - 20; pkt->ol_flags |= PKT_TX_NO_CRC_CSUM; current += delay; return pkt; }
/* * Arrange for mbuf to be transmitted. * * TODO: use bulk transfers. This should not be too difficult and will * have a big performance impact. */ void VIFHYPER_SENDMBUF(struct virtif_user *viu, struct mbuf *m0, int pktlen, void *d, int dlen) { struct rte_mbuf *rm; struct mbuf *m; void *rmdptr; rm = rte_pktmbuf_alloc(mbpool_tx); for (m = m0; m; ) { rmdptr = rte_pktmbuf_append(rm, dlen); if (rmdptr == NULL) { /* log error somehow? */ rte_pktmbuf_free(rm); break; } memcpy(rmdptr, d, dlen); /* XXX */ VIF_MBUF_NEXT(m, &m, &d, &dlen); } VIF_MBUF_FREE(m0); rte_eth_tx_burst(IF_PORTID, 0, &rm, 1); }
/* Modify packet ipv4 header */ static void test_action_execute_set_ipv4(int argc, char *argv[]) { struct rte_mempool *pktmbuf_pool; struct action action_multiple[MAX_ACTIONS] = {0}; struct ipv4_hdr *pkt_ipv4; pktmbuf_pool = rte_mempool_create("MProc_pktmbuf_pool", 20, /* num mbufs */ 2048 + sizeof(struct rte_mbuf) + 128, /*pktmbuf size */ 32, /*cache size */ sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL, 0, 0); struct rte_mbuf *ipv4_buf = rte_pktmbuf_alloc(pktmbuf_pool); struct ovs_key_ipv4 set_ipv4; set_ipv4.ipv4_tos = 0xFF; vport_init(); action_multiple[0].type = ACTION_SET_IPV4; action_multiple[0].data.ipv4 = set_ipv4; action_null_build(&action_multiple[1]); uint8_t *pktmbuf_data = rte_pktmbuf_mtod(ipv4_buf, uint8_t *); pktmbuf_data += sizeof(struct ether_hdr); pkt_ipv4 = (struct ipv4_hdr *)(pktmbuf_data); pkt_ipv4->type_of_service = 0xaa; action_execute(action_multiple, ipv4_buf); pktmbuf_data = rte_pktmbuf_mtod(ipv4_buf, uint8_t *); pktmbuf_data += sizeof(struct ether_hdr); pkt_ipv4 = (struct ipv4_hdr *)(pktmbuf_data); assert(pkt_ipv4->type_of_service == set_ipv4.ipv4_tos); rte_pktmbuf_free(ipv4_buf); }