static int testclone_testupdate_testdetach(void) { #ifndef RTE_MBUF_SCATTER_GATHER return 0; #else struct rte_mbuf *mc = NULL; struct rte_mbuf *clone = NULL; /* alloc a mbuf */ mc = rte_pktmbuf_alloc(pktmbuf_pool); if (mc == NULL) GOTO_FAIL("ooops not allocating mbuf"); if (rte_pktmbuf_pkt_len(mc) != 0) GOTO_FAIL("Bad length"); /* clone the allocated mbuf */ clone = rte_pktmbuf_clone(mc, pktmbuf_pool); if (clone == NULL) GOTO_FAIL("cannot clone data\n"); rte_pktmbuf_free(clone); mc->pkt.next = rte_pktmbuf_alloc(pktmbuf_pool); if(mc->pkt.next == NULL) GOTO_FAIL("Next Pkt Null\n"); clone = rte_pktmbuf_clone(mc, pktmbuf_pool); if (clone == NULL) GOTO_FAIL("cannot clone data\n"); /* free mbuf */ rte_pktmbuf_free(mc); rte_pktmbuf_free(clone); mc = NULL; clone = NULL; return 0; fail: if (mc) rte_pktmbuf_free(mc); return -1; #endif /* RTE_MBUF_SCATTER_GATHER */ }
void counter_register_pkt(void *arg, struct rte_mbuf **buffer, int nb_rx) { if (nb_rx == 0) return; struct counter_t *counter = (struct counter_t *) arg; uint64_t start_a = rte_get_tsc_cycles(), diff_a; if (nb_rx > rte_ring_free_count(counter->ring)) { RTE_LOG(ERR, COUNTER, "Not enough free entries in ring!\n"); } // enqueue packet in ring // this methode must be thread safe struct rte_mbuf *bulk[nb_rx]; unsigned nb_registered = 0; for (unsigned i = 0; i < nb_rx; ++i) { struct ether_hdr *eth = rte_pktmbuf_mtod(buffer[i], struct ether_hdr *); if (!is_same_ether_addr(&counter->rx_register->mac, ð->d_addr)) { continue; } bulk[nb_registered] = rte_pktmbuf_clone(buffer[i], counter->clone_pool); if (bulk[nb_registered] == NULL) { RTE_LOG(ERR, COUNTER, "Could not clone mbuf!\n"); continue; } nb_registered += 1; } int n = rte_ring_enqueue_burst(counter->ring,(void * const*) &bulk, nb_registered); if (n < nb_rx) { RTE_LOG(ERR, COUNTER, "Could not enqueue every new packtes for registration! " "(%"PRIu32"/%"PRIu32") free: %"PRIu32"\n", n, nb_rx, rte_ring_free_count(counter->ring)); } diff_a = rte_get_tsc_cycles() - start_a; counter->aTime += diff_a;//* 1000.0 / rte_get_tsc_hz(); counter->nb_measurements_a += nb_rx; }
/* * test allocation and free of mbufs */ static int test_pktmbuf_pool(void) { unsigned i; struct rte_mbuf *m[NB_MBUF]; int ret = 0; for (i=0; i<NB_MBUF; i++) m[i] = NULL; /* alloc NB_MBUF mbufs */ for (i=0; i<NB_MBUF; i++) { m[i] = rte_pktmbuf_alloc(pktmbuf_pool); if (m[i] == NULL) { printf("rte_pktmbuf_alloc() failed (%u)\n", i); ret = -1; } } struct rte_mbuf *extra = NULL; extra = rte_pktmbuf_alloc(pktmbuf_pool); if(extra != NULL) { printf("Error pool not empty"); ret = -1; } #ifdef RTE_MBUF_SCATTER_GATHER extra = rte_pktmbuf_clone(m[0], pktmbuf_pool); if(extra != NULL) { printf("Error pool not empty"); ret = -1; } #endif /* free them */ for (i=0; i<NB_MBUF; i++) { if (m[i] != NULL) rte_pktmbuf_free(m[i]); } return ret; }
/* functional description: packet forward(enqueue) module,we push packet into QoS queue or TX queue input mod: RX_MOD_FORWARD output mod: RX_MOD_DROP RX_MOD_IDLE module stack pos: as 1st action ,before drop module,and below any policy module */ dbg_local enum RX_MOD_INDEX rx_module_forward(dbg_local struct rte_mbuf*pktbuf,dbg_local enum RX_MOD_INDEX imodid) { dbg_local int iPortIn; dbg_local int iPortOut; dbg_local int iFlowIdx; dbg_local int iDstIdx; dbg_local int uiSIP; dbg_local int uiDIP; dbg_local int idx; dbg_local int rc; dbg_local int iUsed; dbg_local int iMod; dbg_local struct ether_hdr *eh; dbg_local struct ether_arp *ea; dbg_local struct iphdr *iph; dbg_local int iPortForwardFlag[MAX_PORT_NB]; dbg_local struct rte_mbuf*rmPortForwardMbuf[MAX_PORT_NB]; dbg_local enum RX_MOD_INDEX nextmodid=imodid; if(imodid!=RX_MOD_FORWARD) goto local_ret; //phaze 1,we filter packet with illegal l2 and l3 address,and drop it //printf("input port:%d\n",(int)pktbuf->pkt.in_port); eh=rte_pktmbuf_mtod(pktbuf,struct ether_hdr*); uiSIP=0xffffffff; switch(HTONS(eh->ether_type)) { case ETH_TYPE_ARP: ea=(struct ether_arp *)(sizeof(struct ether_hdr)+(char*)eh); uiSIP=HTONL(MAKEUINT32FROMUINT8ARR(ea->arp_spa)); uiDIP=HTONL(MAKEUINT32FROMUINT8ARR(ea->arp_tpa)); break; case ETH_TYPE_IP: iph=(struct iphdr *)(sizeof(struct ether_hdr)+(char*)eh); uiSIP=HTONL(iph->ip_src); uiDIP=HTONL(iph->ip_dst); break; default: goto exception_tag; break; } iFlowIdx=find_net_entry(uiSIP,TRUE); //pkt source legality checking if(iFlowIdx==-1) goto exception_tag; if(gFlow[iFlowIdx].b_mac_learned==FALSE) goto exception_tag; if(gFlow[iFlowIdx].b_flow_enabled==FALSE) goto exception_tag; if(gFlow[iFlowIdx].portid!=pktbuf->pkt.in_port) goto exception_tag; if(!IS_MAC_EQUAL(eh->s_addr.addr_bytes,gFlow[iFlowIdx].eaHostMAC.addr_bytes)) goto exception_tag; iDstIdx=find_net_entry(uiDIP,TRUE); /*printf("%02x,%02x,%02x,%02x,%02x,%02x,\n",eh->d_addr.addr_bytes[0] ,eh->d_addr.addr_bytes[1] ,eh->d_addr.addr_bytes[2] ,eh->d_addr.addr_bytes[3] ,eh->d_addr.addr_bytes[4] ,eh->d_addr.addr_bytes[5] );*/ if(IS_MAC_BROADCAST(eh->d_addr.addr_bytes)){//link broadcast iUsed=FALSE; for(idx=0;idx<gNBPort;idx++) if(port_brdcst_map[(int)pktbuf->pkt.in_port][idx]==TRUE){ iPortForwardFlag[idx]=1; if(iUsed==FALSE){ rmPortForwardMbuf[idx]=pktbuf; iUsed=TRUE; }else{ rmPortForwardMbuf[idx]=rte_pktmbuf_clone(pktbuf,gmempool); if(!rmPortForwardMbuf[idx]) iPortForwardFlag[idx]=0; } }else iPortForwardFlag[idx]=0; }else{//link uicast for(idx=0;idx<gNBPort;idx++) iPortForwardFlag[idx]=0; if(iDstIdx!=-1){ // printf(".......idstidx!=-1.%08x\n",gFlow[iDstIdx].uiInnerIP); if(gFlow[iDstIdx].b_mac_learned==FALSE) goto exception_tag; // printf("Heror1\n"); if(gFlow[iDstIdx].b_flow_enabled==FALSE) goto exception_tag; // printf("Heror2\n"); iPortForwardFlag[(int)gFlow[iDstIdx].portid]=1; rmPortForwardMbuf[(int)gFlow[iDstIdx].portid]=pktbuf; } else { // printf(".......idstidx==-1\n"); if(port_def_frd_map[(int)pktbuf->pkt.in_port]!=-1){ iPortForwardFlag[port_def_frd_map[(int)pktbuf->pkt.in_port]]=1; rmPortForwardMbuf[port_def_frd_map[(int)pktbuf->pkt.in_port]]=pktbuf; } else goto exception_tag; } } //mirror--pkt--cloning for(idx=0;idx<gNBPort;idx++) { if(!iPortForwardFlag[idx])continue; switch(port_policy_map[(int)pktbuf->pkt.in_port][idx]) { case PORT_POLICY_MAP_DIRECT: //printf("...direct:%d->%d\n",(int)pktbuf->pkt.in_port,idx); rc=EnqueueIntoPortQueue(idx,&rmPortForwardMbuf[idx],1); //printf("...rc:%d\n",rc); if(!rc) goto exception_tag; break; case PORT_POLICY_MAP_QOS: //high nibble:dst port //low nibble :src port iMod=sched_mod_map[(int)rmPortForwardMbuf[idx]->pkt.in_port][idx]; rmPortForwardMbuf[idx]->pkt.in_port=MAKEBYTE(idx,rmPortForwardMbuf[idx]->pkt.in_port); rc=rte_ring_mp_enqueue(sched_mod_list[iMod].rrFirLev,rmPortForwardMbuf[idx]); if(rc==-ENOBUFS){ goto exception_tag; } break; case PORT_POLICY_MAP_UNDEFINE: goto exception_tag; break; } } local_ret: return nextmodid; exception_tag: nextmodid=RX_MOD_DROP; goto local_ret; }
static int packetsgen_poll(struct pg_brick *brick, uint16_t *pkts_cnt, struct pg_error **errp) { struct pg_packetsgen_state *state; struct rte_mempool *mp = pg_get_mempool(); struct rte_mbuf **pkts; struct pg_brick_side *s; uint64_t pkts_mask; int ret; uint16_t i; state = pg_brick_get_state(brick, struct pg_packetsgen_state); s = &brick->sides[state->output]; pkts = g_new0(struct rte_mbuf*, state->packets_nb); for (i = 0; i < state->packets_nb; i++) { pkts[i] = rte_pktmbuf_clone(state->packets[i], mp); pkts[i]->udata64 = i; } pkts_mask = pg_mask_firsts(state->packets_nb); *pkts_cnt = state->packets_nb; ret = pg_brick_side_forward(s, pg_flip_side(state->output), pkts, pkts_mask, errp); pg_packets_free(pkts, pkts_mask); g_free(pkts); return ret; }