/* Skip all VOID items of the pattern */ void classify_pattern_skip_void_item(struct rte_flow_item *items, const struct rte_flow_item *pattern) { uint32_t cpy_count = 0; const struct rte_flow_item *pb = pattern, *pe = pattern; for (;;) { /* Find a non-void item first */ pb = classify_find_first_item(pb, false); if (pb->type == RTE_FLOW_ITEM_TYPE_END) { pe = pb; break; } /* Find a void item */ pe = classify_find_first_item(pb + 1, true); cpy_count = pe - pb; rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count); items += cpy_count; if (pe->type == RTE_FLOW_ITEM_TYPE_END) { pb = pe; break; } pb = pe + 1; } /* Copy the END item. */ rte_memcpy(items, pe, sizeof(struct rte_flow_item)); }
static void copy_buf_to_pkt_segs(void* buf, unsigned len, struct rte_mbuf *pkt, unsigned offset) { struct rte_mbuf *seg; void *seg_buf; unsigned copy_len; seg = pkt; while (offset >= seg->data_len) { offset -= seg->data_len; seg = seg->next; } copy_len = seg->data_len - offset; seg_buf = rte_pktmbuf_mtod_offset(seg, char *, offset); while (len > copy_len) { rte_memcpy(seg_buf, buf, (size_t) copy_len); len -= copy_len; buf = ((char*) buf + copy_len); seg = seg->next; seg_buf = rte_pktmbuf_mtod(seg, char *); copy_len = seg->data_len; } rte_memcpy(seg_buf, buf, (size_t) len); }
int addr_sa_to_xaddr(struct sockaddr* sa, socklen_t slen, struct xaddr* xa) { struct sockaddr_in* in4 = (struct sockaddr_in*)sa; struct sockaddr_in6* in6 = (struct sockaddr_in6*)sa; memset(xa, '\0', sizeof(*xa)); switch (sa->sa_family) { case AF_INET: if (slen < sizeof(*in4)) return (-1); xa->af = AF_INET; rte_memcpy(&xa->v4, &in4->sin_addr, sizeof(xa->v4)); break; case AF_INET6: if (slen < sizeof(*in6)) return (-1); xa->af = AF_INET6; rte_memcpy(&xa->v6, &in6->sin6_addr, sizeof(xa->v6)); xa->scope_id = in6->sin6_scope_id; break; default: return (-1); } return (0); }
static int file_read_cached(const char *file_name, uint8_t **mem, uint32_t beg, uint32_t len, uint32_t socket, struct hash_set *hs) { if (len == 0) { *mem = 0; return 0; } uint8_t *data_mem; /* Since the configuration can reference the same file from multiple places, use prox_shared infrastructure to detect this and return previously loaded data. */ char name[256]; snprintf(name, sizeof(name), "%u-%u:%s", beg, len, file_name); *mem = prox_sh_find_socket(socket, name); if (*mem) return 0; /* check if the file has been loaded on the other socket. */ if (socket == 1 && (data_mem = prox_sh_find_socket(0, name))) { uint8_t *data_find = hash_set_find(hs, data_mem, len); if (!data_find) { data_find = prox_zmalloc(len, socket); PROX_PANIC(data_find == NULL, "Failed to allocate memory (%u bytes) to hold header for peer\n", len); rte_memcpy(data_find, data_mem, len); hash_set_add(hs, data_find, len); } *mem = data_find; prox_sh_add_socket(socket, name, *mem); return 0; } /* It is possible that a file with a different name contains the same data. In that case, search all loaded files and compare the data to reduce memory utilization.*/ data_mem = malloc(len); PROX_PANIC(data_mem == NULL, "Failed to allocate temporary memory to hold data\n"); if (file_read_content(file_name, data_mem, beg, len)) { plog_err("%s\n", file_get_error()); return -1; } uint8_t *data_find = hash_set_find(hs, data_mem, len); if (!data_find) { data_find = prox_zmalloc(len, socket); PROX_PANIC(data_find == NULL, "Failed to allocate memory (%u bytes) to hold header for peer\n", len); rte_memcpy(data_find, data_mem, len); hash_set_add(hs, data_find, len); } free(data_mem); *mem = data_find; prox_sh_add_socket(socket, name, *mem); return 0; }
/* Sends 'num_pkts' 'packets' and 'request' data to datapath. */ int dpdk_link_send_bulk(struct dpif_dpdk_message *request, const struct ofpbuf *const *packets, size_t num_pkts) { struct rte_mbuf *mbufs[PKT_BURST_SIZE] = {NULL}; uint8_t *mbuf_data = NULL; int i = 0; int ret = 0; if (num_pkts > PKT_BURST_SIZE) { return EINVAL; } DPDK_DEBUG() for (i = 0; i < num_pkts; i++) { mbufs[i] = rte_pktmbuf_alloc(mp); if (!mbufs[i]) { return ENOBUFS; } mbuf_data = rte_pktmbuf_mtod(mbufs[i], uint8_t *); rte_memcpy(mbuf_data, &request[i], sizeof(request[i])); if (request->type == DPIF_DPDK_PACKET_FAMILY) { mbuf_data = mbuf_data + sizeof(request[i]); if (likely(packets[i]->size <= (mbufs[i]->buf_len - sizeof(request[i])))) { rte_memcpy(mbuf_data, packets[i]->data, packets[i]->size); rte_pktmbuf_data_len(mbufs[i]) = sizeof(request[i]) + packets[i]->size; rte_pktmbuf_pkt_len(mbufs[i]) = rte_pktmbuf_data_len(mbufs[i]); } else { RTE_LOG(ERR, APP, "%s, %d: %s", __FUNCTION__, __LINE__, "memcpy prevented: packet size exceeds available mbuf space"); for (i = 0; i < num_pkts; i++) { rte_pktmbuf_free(mbufs[i]); } return ENOMEM; } } else { rte_pktmbuf_data_len(mbufs[i]) = sizeof(request[i]); rte_pktmbuf_pkt_len(mbufs[i]) = rte_pktmbuf_data_len(mbufs[i]); } } ret = rte_ring_sp_enqueue_bulk(message_ring, (void * const *)mbufs, num_pkts); if (ret == -ENOBUFS) { for (i = 0; i < num_pkts; i++) { rte_pktmbuf_free(mbufs[i]); } ret = ENOBUFS; } else if (unlikely(ret == -EDQUOT)) { ret = EDQUOT; } return ret; }
/** * Process the received mbox message. */ int lio_mbox_process_message(struct lio_mbox *mbox) { struct lio_mbox_cmd mbox_cmd; if (mbox->state & LIO_MBOX_STATE_ERROR) { if (mbox->state & (LIO_MBOX_STATE_RES_PENDING | LIO_MBOX_STATE_RES_RECEIVING)) { rte_memcpy(&mbox_cmd, &mbox->mbox_resp, sizeof(struct lio_mbox_cmd)); mbox->state = LIO_MBOX_STATE_IDLE; rte_write64(LIO_PFVFSIG, mbox->mbox_read_reg); mbox_cmd.recv_status = 1; if (mbox_cmd.fn) mbox_cmd.fn(mbox->lio_dev, &mbox_cmd, mbox_cmd.fn_arg); return 0; } mbox->state = LIO_MBOX_STATE_IDLE; rte_write64(LIO_PFVFSIG, mbox->mbox_read_reg); return 0; } if (mbox->state & LIO_MBOX_STATE_RES_RECEIVED) { rte_memcpy(&mbox_cmd, &mbox->mbox_resp, sizeof(struct lio_mbox_cmd)); mbox->state = LIO_MBOX_STATE_IDLE; rte_write64(LIO_PFVFSIG, mbox->mbox_read_reg); mbox_cmd.recv_status = 0; if (mbox_cmd.fn) mbox_cmd.fn(mbox->lio_dev, &mbox_cmd, mbox_cmd.fn_arg); return 0; } if (mbox->state & LIO_MBOX_STATE_REQ_RECEIVED) { rte_memcpy(&mbox_cmd, &mbox->mbox_req, sizeof(struct lio_mbox_cmd)); if (!mbox_cmd.msg.s.resp_needed) { mbox->state &= ~LIO_MBOX_STATE_REQ_RECEIVED; if (!(mbox->state & LIO_MBOX_STATE_RES_PENDING)) mbox->state = LIO_MBOX_STATE_IDLE; rte_write64(LIO_PFVFSIG, mbox->mbox_read_reg); } lio_mbox_process_cmd(mbox, &mbox_cmd); return 0; } RTE_ASSERT(0); return 0; }
void pktgen_send_arp( uint32_t pid, uint32_t type, uint8_t seq_idx ) { port_info_t * info = &pktgen.info[pid]; pkt_seq_t * pkt; struct rte_mbuf * m ; struct ether_hdr * eth; arpPkt_t * arp; uint32_t addr; uint8_t qid = 0; pkt = &info->seq_pkt[seq_idx]; m = rte_pktmbuf_alloc(info->q[qid].special_mp); if ( unlikely(m == NULL) ) { pktgen_log_warning("No packet buffers found"); return; } eth = rte_pktmbuf_mtod(m, struct ether_hdr *); arp = (arpPkt_t *)ð[1]; /* src and dest addr */ memset(ð->d_addr, 0xFF, 6); ether_addr_copy(&pkt->eth_src_addr, ð->s_addr); eth->ether_type = htons(ETHER_TYPE_ARP); memset(arp, 0, sizeof(arpPkt_t)); rte_memcpy( &arp->sha, &pkt->eth_src_addr, 6 ); addr = htonl(pkt->ip_src_addr); inetAddrCopy(&arp->spa, &addr); if ( likely(type == GRATUITOUS_ARP) ) { rte_memcpy( &arp->tha, &pkt->eth_src_addr, 6 ); addr = htonl(pkt->ip_src_addr); inetAddrCopy(&arp->tpa, &addr); } else { memset( &arp->tha, 0, 6 ); addr = htonl(pkt->ip_dst_addr); inetAddrCopy(&arp->tpa, &addr); } /* Fill in the rest of the ARP packet header */ arp->hrd = htons(ETH_HW_TYPE); arp->pro = htons(ETHER_TYPE_IPv4); arp->hln = 6; arp->pln = 4; arp->op = htons(ARP_REQUEST); m->pkt.pkt_len = 60; m->pkt.data_len = 60; pktgen_send_mbuf(m, pid, qid); pktgen_set_q_flags(info, qid, DO_TX_FLUSH); }
/** * @brief Copy a buffer to mbuf * * @param devId port number * @param data Data buffer * @param dataLen Data buffer length * @param pMbuf mbuf * * @return true on success */ bool DPDKAdapter::copyBufToMbuf(uint8_t devId, char* data, unsigned int dataLen, MBuf_t*& pMbuf) { unsigned int offset = 0; pMbuf = NULL; pMbuf = DPDKAdapter::instance()->txMbufAlloc(devId); if (pMbuf == NULL) { qCritical("No mbuf available"); return false; } pMbuf->pkt.data_len = dataLen < MAX_SEG_SIZE ? dataLen : MAX_SEG_SIZE; pMbuf->pkt.pkt_len = dataLen; pMbuf->pkt.nb_segs = (dataLen / MAX_SEG_SIZE) + ((dataLen % MAX_SEG_SIZE) || 0); qDebug("pkt_len %u, data_len %u, nb_segs %u",pMbuf->pkt.pkt_len, pMbuf->pkt.data_len, pMbuf->pkt.nb_segs); rte_memcpy(pMbuf->pkt.data, data, pMbuf->pkt.data_len); if (dataLen <= MAX_SEG_SIZE) { return true; } dataLen -= pMbuf->pkt.data_len; offset = pMbuf->pkt.data_len; MBuf_t* pCurMbuf = pMbuf; while (dataLen > 0) { qDebug("offset %u, dataLen %u", offset, dataLen); pCurMbuf->pkt.next = DPDKAdapter::instance()->txMbufAlloc(devId); if (pCurMbuf->pkt.next == NULL) { qCritical("No mbuf available"); return false; } pCurMbuf = pCurMbuf->pkt.next; pCurMbuf->pkt.data_len = dataLen < MAX_SEG_SIZE ? dataLen : MAX_SEG_SIZE; qDebug("pkt_len %u, data_len %u, nb_segs %u",pCurMbuf->pkt.pkt_len, pCurMbuf->pkt.data_len, pCurMbuf->pkt.nb_segs); rte_memcpy(pCurMbuf->pkt.data, data + offset, pCurMbuf->pkt.data_len); dataLen -= pCurMbuf->pkt.data_len; offset += pCurMbuf->pkt.data_len; } return true; }
/***************************************************************************** * trace_entry_dump() ****************************************************************************/ void trace_entry_dump(trace_printer_cb_t printer, void *printer_arg, trace_printer_fmt_cb_t fmt, const char *comp_name, const char *buffer, uint32_t bufsize, uint32_t *start_id, uint32_t *start_pos, uint32_t end_pos) { uint32_t old_start = *start_pos; uint32_t start_raw; trace_level_t te_lvl; uint32_t te_len; uint32_t len_to_copy; trace_entry_hdr_peek_buf(buffer, bufsize, old_start, start_id, &te_lvl, &te_len); len_to_copy = te_len - offsetof(trace_entry_t, te_buf); start_raw = (old_start + offsetof(trace_entry_t, te_buf)) % bufsize; if (start_raw + len_to_copy > bufsize) { /* LOCAL copy! */ char raw_entry[len_to_copy]; rte_memcpy(raw_entry, &buffer[start_raw], bufsize - start_raw); rte_memcpy(&raw_entry[bufsize - start_raw], buffer, len_to_copy - (bufsize - start_raw)); if (fmt) { char *fmt_data = fmt(raw_entry, te_len); printer(printer_arg, te_lvl, comp_name, fmt_data, strlen(fmt_data)); } else printer(printer_arg, te_lvl, comp_name, raw_entry, te_len); *start_pos = len_to_copy - (bufsize - start_raw); } else { if (fmt) { char *fmt_data = fmt(&buffer[start_raw], te_len); printer(printer_arg, te_lvl, comp_name, fmt_data, strlen(fmt_data)); } else printer(printer_arg, te_lvl, comp_name, &buffer[start_raw], te_len); *start_pos = (old_start + te_len) % bufsize; } /* Look at the next entry too in order to return the new id. */ if (*start_pos != end_pos) trace_entry_hdr_peek_buf(buffer, bufsize, *start_pos, start_id, NULL, NULL); }
/* Builds up the correct configuration for vmdq+dcb based on the vlan tags array * given above, and the number of traffic classes available for use. */ static inline int get_eth_conf(struct rte_eth_conf *eth_conf) { struct rte_eth_vmdq_dcb_conf conf; struct rte_eth_vmdq_rx_conf vmdq_conf; struct rte_eth_dcb_rx_conf dcb_conf; struct rte_eth_vmdq_dcb_tx_conf tx_conf; uint8_t i; conf.nb_queue_pools = (enum rte_eth_nb_pools)num_pools; vmdq_conf.nb_queue_pools = (enum rte_eth_nb_pools)num_pools; tx_conf.nb_queue_pools = (enum rte_eth_nb_pools)num_pools; conf.nb_pool_maps = num_pools; vmdq_conf.nb_pool_maps = num_pools; conf.enable_default_pool = 0; vmdq_conf.enable_default_pool = 0; conf.default_pool = 0; /* set explicit value, even if not used */ vmdq_conf.default_pool = 0; for (i = 0; i < conf.nb_pool_maps; i++) { conf.pool_map[i].vlan_id = vlan_tags[i]; vmdq_conf.pool_map[i].vlan_id = vlan_tags[i]; conf.pool_map[i].pools = 1UL << i; vmdq_conf.pool_map[i].pools = 1UL << i; } for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){ conf.dcb_tc[i] = i % num_tcs; dcb_conf.dcb_tc[i] = i % num_tcs; tx_conf.dcb_tc[i] = i % num_tcs; } dcb_conf.nb_tcs = (enum rte_eth_nb_tcs)num_tcs; (void)(rte_memcpy(eth_conf, &vmdq_dcb_conf_default, sizeof(*eth_conf))); (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_dcb_conf, &conf, sizeof(conf))); (void)(rte_memcpy(ð_conf->rx_adv_conf.dcb_rx_conf, &dcb_conf, sizeof(dcb_conf))); (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_rx_conf, &vmdq_conf, sizeof(vmdq_conf))); (void)(rte_memcpy(ð_conf->tx_adv_conf.vmdq_dcb_tx_conf, &tx_conf, sizeof(tx_conf))); if (rss_enable) { eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB_RSS; eth_conf->rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP | ETH_RSS_SCTP; } return 0; }
/** * @brief Clone mbuf * * @param devId Port number * @param pMbufIn mbuf * * @return true on success */ MBuf_t* DPDKAdapter::cloneMbuf(uint8_t devId, const MBuf_t* pMbufIn) { MBuf_t* pMbufOut = NULL; if (pMbufIn == NULL) { qCritical("No mbuf provided"); return NULL; } pMbufOut = DPDKAdapter::instance()->txMbufAlloc(devId); if (pMbufOut == NULL) { qCritical("No mbuf available"); return NULL; } rte_memcpy(pMbufOut->pkt.data, pMbufIn->pkt.data, pMbufIn->pkt.data_len); pMbufOut->pkt.nb_segs = pMbufIn->pkt.nb_segs; pMbufOut->pkt.data_len = pMbufIn->pkt.data_len; pMbufOut->pkt.pkt_len = pMbufIn->pkt.pkt_len; MBuf_t* pCurMbufOut = pMbufOut; MBuf_t* pCurMbufIn = pMbufIn->pkt.next; while (pCurMbufIn != 0) { pCurMbufOut->pkt.next = DPDKAdapter::instance()->txMbufAlloc(devId); if (pCurMbufOut == NULL) { qCritical("No mbuf available"); return NULL; } pCurMbufOut = pCurMbufOut->pkt.next; qDebug("pkt_len %u, data_len %u, nb_segs %u", pCurMbufIn->pkt.pkt_len, pCurMbufIn->pkt.data_len, pCurMbufIn->pkt.nb_segs); rte_memcpy(pCurMbufOut->pkt.data, pCurMbufIn->pkt.data, pCurMbufIn->pkt.data_len); pCurMbufOut->pkt.nb_segs = pCurMbufIn->pkt.nb_segs; pCurMbufOut->pkt.data_len = pCurMbufIn->pkt.data_len; pCurMbufOut->pkt.pkt_len = pCurMbufIn->pkt.pkt_len; pCurMbufIn = pCurMbufIn->pkt.next; } return pMbufOut; }
static void qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info) { rte_memcpy(&qdev->dev_info, info, sizeof(*info)); qdev->num_tc = qdev->dev_info.num_tc; qdev->ops = qed_ops; }
int addr_pton_cidr(const char* p, struct xaddr* n, int* l) { struct xaddr tmp; int masklen = -1; char addrbuf[64]; char* mp; char* cp; /* Don't modify argument */ if (p == NULL || strlcpy(addrbuf, p, sizeof(addrbuf)) >= sizeof(addrbuf)) return (-1); if ((mp = strchr(addrbuf, '/')) != NULL) { *mp = '\0'; mp++; masklen = (int) strtol(mp, &cp, 10); if (*mp == '\0' || *cp != '\0' || masklen > 128) return (-1); } if (addr_pton(addrbuf, &tmp) == -1) return (-1); if (mp == NULL) masklen = addr_unicast_masklen(tmp.af); if (addr_masklen_valid(tmp.af, masklen) == -1) return (-1); if (n != NULL) rte_memcpy(n, &tmp, sizeof(*n)); if (l != NULL) *l = masklen; return (0); }
/** * @brief Copy all mbuf segments to a buffer * * @param devId port number * @param pMbuf mbuf * @param data Data buffer * @param dataLen Data buffer length * * @return true on success */ bool DPDKAdapter::copyMbufToBuf(uint8_t devId, MBuf_t* pMbuf, char* data, unsigned int& dataLen) { qDebug("pkt_len %u, data_len %u, nb_segs %u", pMbuf->pkt.pkt_len, pMbuf->pkt.data_len, pMbuf->pkt.nb_segs); unsigned int segCnt = pMbuf->pkt.nb_segs; unsigned int offset = 0; MBuf_t* pNextMbuf = pMbuf; dataLen = pMbuf->pkt.pkt_len; while (segCnt > 0) { MBuf_t* pCurMbuf = pNextMbuf; qDebug("segCnt %u, offset %u", segCnt, offset); rte_memcpy(data + offset, pCurMbuf->pkt.data, pCurMbuf->pkt.data_len); qDebug("pkt_len %u, data_len %u", pCurMbuf->pkt.pkt_len, pCurMbuf->pkt.data_len); offset += pCurMbuf->pkt.data_len; pNextMbuf = pCurMbuf->pkt.next; rte_pktmbuf_free(pCurMbuf); segCnt--; } return true; }
static int rte_port_source_rx(void *port, struct rte_mbuf **pkts, uint32_t n_pkts) { struct rte_port_source *p = port; uint32_t i; if (rte_pktmbuf_alloc_bulk(p->mempool, pkts, n_pkts) != 0) return 0; if (p->pkt_buff != NULL) { for (i = 0; i < n_pkts; i++) { uint8_t *pkt_data = rte_pktmbuf_mtod(pkts[i], uint8_t *); rte_memcpy(pkt_data, p->pkts[p->pkt_index], p->pkt_len[p->pkt_index]); pkts[i]->data_len = p->pkt_len[p->pkt_index]; pkts[i]->pkt_len = pkts[i]->data_len; p->pkt_index++; if (p->pkt_index >= p->n_pkts) p->pkt_index = 0; } } RTE_PORT_SOURCE_STATS_PKTS_IN_ADD(p, n_pkts); return n_pkts; }
void pktgen_send_ping4( uint32_t pid, uint8_t seq_idx ) { port_info_t * info = &pktgen.info[pid]; pkt_seq_t * ppkt = &info->seq_pkt[PING_PKT]; pkt_seq_t * spkt = &info->seq_pkt[seq_idx]; struct rte_mbuf * m ; uint8_t qid = 0; m = rte_pktmbuf_alloc(info->q[qid].special_mp); if ( unlikely(m == NULL) ) { pktgen_log_warning("No packet buffers found"); return; } *ppkt = *spkt; // Copy the sequence setup to the ping setup. pktgen_packet_ctor(info, PING_PKT, ICMP4_ECHO); rte_memcpy((uint8_t *)m->buf_addr + m->data_off, (uint8_t *)&ppkt->hdr, ppkt->pktSize); m->pkt_len = ppkt->pktSize; m->data_len = ppkt->pktSize; pktgen_send_mbuf(m, pid, qid); pktgen_set_q_flags(info, qid, DO_TX_FLUSH); }
static inline int32_t __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key, hash_sig_t sig) { hash_sig_t *sig_bucket; uint8_t *key_bucket; uint32_t bucket_index, i; int32_t pos; /* Get the hash signature and bucket index */ sig |= h->sig_msb; bucket_index = sig & h->bucket_bitmask; sig_bucket = get_sig_tbl_bucket(h, bucket_index); key_bucket = get_key_tbl_bucket(h, bucket_index); /* Check if key is already present in the hash */ for (i = 0; i < h->bucket_entries; i++) { if ((sig == sig_bucket[i]) && likely(memcmp(key, get_key_from_bucket(h, key_bucket, i), h->key_len) == 0)) { return bucket_index * h->bucket_entries + i; } } /* Check if any free slot within the bucket to add the new key */ pos = find_first(NULL_SIGNATURE, sig_bucket, h->bucket_entries); if (unlikely(pos < 0)) return -ENOSPC; /* Add the new key to the bucket */ sig_bucket[pos] = sig; rte_memcpy(get_key_from_bucket(h, key_bucket, pos), key, h->key_len); return bucket_index * h->bucket_entries + pos; }
int netdpcmd_ring_send(void *buff, int buff_len) { void *msg; if(buff_len > NETDP_RING_MSG_SIZE) { printf("Too long message size, max is %d \n", NETDP_RING_MSG_SIZE); return NETDP_EMSGPOOL; } if (rte_mempool_get(netdpcmd_message_pool, &msg) < 0) { printf("Getting message failed \n"); return NETDP_EMSGPOOL; } rte_memcpy(msg, buff, buff_len); if (rte_ring_enqueue(netdpcmd_ring_tx, msg) < 0) { printf("Sending message to NETDP stack failed \n"); rte_mempool_put(netdpcmd_message_pool, msg); return NETDP_EMSGPOOL; } return 0; }
static void send_paxos_message(paxos_message *pm) { uint8_t port_id = 0; struct rte_mbuf *created_pkt = rte_pktmbuf_alloc(mbuf_pool); created_pkt->l2_len = sizeof(struct ether_hdr); created_pkt->l3_len = sizeof(struct ipv4_hdr); created_pkt->l4_len = sizeof(struct udp_hdr) + sizeof(paxos_message); craft_new_packet(&created_pkt, IPv4(192,168,4,99), ACCEPTOR_ADDR, PROPOSER_PORT, ACCEPTOR_PORT, sizeof(paxos_message), port_id); //struct udp_hdr *udp; size_t udp_offset = sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr); //udp = rte_pktmbuf_mtod_offset(created_pkt, struct udp_hdr *, udp_offset); size_t paxos_offset = udp_offset + sizeof(struct udp_hdr); struct paxos_hdr *px = rte_pktmbuf_mtod_offset(created_pkt, struct paxos_hdr *, paxos_offset); px->msgtype = rte_cpu_to_be_16(pm->type); px->inst = rte_cpu_to_be_32(pm->u.accept.iid); px->inst = rte_cpu_to_be_32(pm->u.accept.iid); px->rnd = rte_cpu_to_be_16(pm->u.accept.ballot); px->vrnd = rte_cpu_to_be_16(pm->u.accept.value_ballot); px->acptid = 0; rte_memcpy(px->paxosval, pm->u.accept.value.paxos_value_val, pm->u.accept.value.paxos_value_len); created_pkt->ol_flags = PKT_TX_IPV4 | PKT_TX_IP_CKSUM | PKT_TX_UDP_CKSUM; const uint16_t nb_tx = rte_eth_tx_burst(port_id, 0, &created_pkt, 1); rte_pktmbuf_free(created_pkt); rte_log(RTE_LOG_DEBUG, RTE_LOGTYPE_USER8, "Send %d messages\n", nb_tx); }
int addr_sa_pton(const char* h, const char* s, struct sockaddr* sa, socklen_t slen) { struct addrinfo hints; struct addrinfo* ai; memset(&hints, '\0', sizeof(hints)); hints.ai_flags = AI_NUMERICHOST; if (h == NULL || getaddrinfo(h, s, &hints, &ai) != 0) return (-1); if (ai == NULL || ai->ai_addr == NULL) { if (ai) { freeaddrinfo(ai); ai = NULL; } return (-1); } if (sa != NULL) { if (slen < ai->ai_addrlen) { freeaddrinfo(ai); return (-1); } rte_memcpy(sa, &ai->ai_addr, ai->ai_addrlen); } freeaddrinfo(ai); return (0); }
void pktgen_packet_dump(struct rte_mbuf *m, int pid) { port_info_t *info = &pktgen.info[pid]; int plen = (m->pkt_len + FCS_SIZE); unsigned char *curr_data; struct rte_mbuf *curr_mbuf; /* Checking if info->dump_tail will not overflow is done in the caller */ if (info->dump_list[info->dump_tail].data != NULL) rte_free(info->dump_list[info->dump_tail].data); info->dump_list[info->dump_tail].data = rte_malloc("Packet data", plen, 0); info->dump_list[info->dump_tail].len = plen; for (curr_data = info->dump_list[info->dump_tail].data, curr_mbuf = m; curr_mbuf != NULL; curr_data += curr_mbuf->data_len, curr_mbuf = curr_mbuf->next) rte_memcpy(curr_data, (uint8_t *)curr_mbuf->buf_addr + m->data_off, curr_mbuf->data_len); ++info->dump_tail; }
/* * Resize allocated memory. */ void * rte_realloc(void *ptr, size_t size, unsigned align) { if (ptr == NULL) return rte_malloc(NULL, size, align); struct malloc_elem *elem = malloc_elem_from_data(ptr); if (elem == NULL) rte_panic("Fatal error: memory corruption detected\n"); size = CACHE_LINE_ROUNDUP(size), align = CACHE_LINE_ROUNDUP(align); /* check alignment matches first, and if ok, see if we can resize block */ if (RTE_PTR_ALIGN(ptr,align) == ptr && malloc_elem_resize(elem, size) == 0) return ptr; /* either alignment is off, or we have no room to expand, * so move data. */ void *new_ptr = rte_malloc(NULL, size, align); if (new_ptr == NULL) return NULL; const unsigned old_size = elem->size - MALLOC_ELEM_OVERHEAD; rte_memcpy(new_ptr, ptr, old_size < size ? old_size : size); rte_free(ptr); return new_ptr; }
int anscli_ring_send(void *buff, int buff_len) { void *msg; if(buff_len > ANS_RING_MSG_SIZE) { printf("Too long message size, max is %d \n", ANS_RING_MSG_SIZE); return ANS_EMSGPOOL; } if (rte_mempool_get(anscli_message_pool, &msg) < 0) { printf("Getting message failed \n"); return ANS_EMSGPOOL; } rte_memcpy(msg, buff, buff_len); if (rte_ring_enqueue(anscli_ring_tx, msg) < 0) { printf("Sending message to ANS stack failed \n"); rte_mempool_put(anscli_message_pool, msg); return ANS_EMSGPOOL; } return 0; }
/* Add event to buffer, free space check is done prior to calling * this function */ static inline void buf_event_enqueue(struct rte_event_eth_rx_adapter *rx_adapter, struct rte_event *ev) { struct rte_eth_event_enqueue_buffer *buf = &rx_adapter->event_enqueue_buffer; rte_memcpy(&buf->events[buf->count++], ev, sizeof(struct rte_event)); }
static int cxgbe_set_eeprom(struct rte_eth_dev *dev, struct rte_dev_eeprom_info *eeprom) { struct port_info *pi = (struct port_info *)(dev->data->dev_private); struct adapter *adapter = pi->adapter; u8 *buf; int err = 0; u32 aligned_offset, aligned_len, *p; if (eeprom->magic != EEPROM_MAGIC) return -EINVAL; aligned_offset = eeprom->offset & ~3; aligned_len = (eeprom->length + (eeprom->offset & 3) + 3) & ~3; if (adapter->pf > 0) { u32 start = 1024 + adapter->pf * EEPROMPFSIZE; if (aligned_offset < start || aligned_offset + aligned_len > start + EEPROMPFSIZE) return -EPERM; } if (aligned_offset != eeprom->offset || aligned_len != eeprom->length) { /* RMW possibly needed for first or last words. */ buf = rte_zmalloc(NULL, aligned_len, 0); if (!buf) return -ENOMEM; err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf); if (!err && aligned_len > 4) err = eeprom_rd_phys(adapter, aligned_offset + aligned_len - 4, (u32 *)&buf[aligned_len - 4]); if (err) goto out; rte_memcpy(buf + (eeprom->offset & 3), eeprom->data, eeprom->length); } else { buf = eeprom->data; } err = t4_seeprom_wp(adapter, false); if (err) goto out; for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) { err = eeprom_wr_phys(adapter, aligned_offset, *p); aligned_offset += 4; } if (!err) err = t4_seeprom_wp(adapter, true); out: if (buf != eeprom->data) rte_free(buf); return err; }
static int parse_args(int argc, char **argv) { int ch; int r = 0, l = 0, P = 0, H = 0; while((ch = getopt(argc, argv, "q:p:r:l:P:H:s")) != -1) { switch(ch) { case 'p': nb_port = atoi(optarg); printf("port number %u\n", nb_port); if(nb_port < 2 || (nb_port & 1)) { usage(); return -1; } break; case 'r': rte_memcpy(pc_rule_file, optarg, strlen(optarg) + 1); r = 1; break; case 'l': rte_memcpy(pc_fib_file, optarg, strlen(optarg) + 1); l = 1; break; case 'P': rte_memcpy(dpi_file, optarg, strlen(optarg) + 1); P = 1; break; case 'H': rte_memcpy(hash_file, optarg, strlen(optarg) + 1); H = 1; break; default: usage(); return -1; } } if(!r || !l || !P || !H) { usage(); } return 0; }
static int avf_init_rss(struct avf_adapter *adapter) { struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter); struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(adapter); struct rte_eth_rss_conf *rss_conf; uint8_t i, j, nb_q; int ret; rss_conf = &adapter->eth_dev->data->dev_conf.rx_adv_conf.rss_conf; nb_q = RTE_MIN(adapter->eth_dev->data->nb_rx_queues, AVF_MAX_NUM_QUEUES); if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) { PMD_DRV_LOG(DEBUG, "RSS is not supported"); return -ENOTSUP; } if (adapter->eth_dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) { PMD_DRV_LOG(WARNING, "RSS is enabled by PF by default"); /* set all lut items to default queue */ for (i = 0; i < vf->vf_res->rss_lut_size; i++) vf->rss_lut[i] = 0; ret = avf_configure_rss_lut(adapter); return ret; } /* In AVF, RSS enablement is set by PF driver. It is not supported * to set based on rss_conf->rss_hf. */ /* configure RSS key */ if (!rss_conf->rss_key) { /* Calculate the default hash key */ for (i = 0; i <= vf->vf_res->rss_key_size; i++) vf->rss_key[i] = (uint8_t)rte_rand(); } else rte_memcpy(vf->rss_key, rss_conf->rss_key, RTE_MIN(rss_conf->rss_key_len, vf->vf_res->rss_key_size)); /* init RSS LUT table */ for (i = 0, j = 0; i < vf->vf_res->rss_lut_size; i++, j++) { if (j >= nb_q) j = 0; vf->rss_lut[i] = j; } /* send virtchnnl ops to configure rss*/ ret = avf_configure_rss_lut(adapter); if (ret) return ret; ret = avf_configure_rss_key(adapter); if (ret) return ret; return 0; }
int dpdpcap_transmit_in_loop(pcap_t *p, const u_char *buf, int size, int number) { int transmitLcoreId = 0; int i = 0; if (p == NULL || buf == NULL || p->deviceId < 0 || p->deviceId > RTE_MAX_ETHPORTS) { snprintf (errbuf_g, PCAP_ERRBUF_SIZE, "Invalid parameter"); return DPDKPCAP_FAILURE; } for (i = 0; i < DEF_PKT_BURST; i++) { mbuf_g[i] = rte_pktmbuf_alloc(txPool); if (mbuf_g[i] == NULL) { snprintf (errbuf_g, PCAP_ERRBUF_SIZE, "Could not allocate buffer on port %d\n", p->deviceId); return DPDKPCAP_FAILURE; } struct rte_mbuf* mbuf = mbuf_g[i]; if (mbuf->buf_len < size) { snprintf (errbuf_g, PCAP_ERRBUF_SIZE, "Can not copy packet data : packet size %d, mbuf length %d, port %d\n", size, mbuf->buf_len, p->deviceId); return DPDKPCAP_FAILURE; } rte_memcpy(mbuf->pkt.data, buf, size); mbuf->pkt.data_len = size; mbuf->pkt.pkt_len = size; mbuf->pkt.nb_segs = 1; rte_pktmbuf_refcnt_update(mbuf, 1); } dpdkpcap_tx_args_t args; args.number = number; args.portId = p->deviceId; transmitLcoreId = p->deviceId + 1; debug("Transferring TX loop to the core %u\n", transmitLcoreId); if (rte_eal_remote_launch(txLoop, &args, transmitLcoreId) < 0) { snprintf (errbuf_g, PCAP_ERRBUF_SIZE, "Can not run TX on a slave core: transmitLcoreId %d\n", transmitLcoreId); return DPDKPCAP_FAILURE; } rte_eal_wait_lcore(transmitLcoreId); return DPDKPCAP_OK; }
static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out) { SHA512_CTX ctx; if (!SHA512_Init(&ctx)) return -EFAULT; SHA512_Transform(&ctx, data_in); rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH); return 0; }
static inline void do_data_copy_dequeue(struct vhost_virtqueue *vq) { struct batch_copy_elem *elem = vq->batch_copy_elems; uint16_t count = vq->batch_copy_nb_elems; int i; for (i = 0; i < count; i++) rte_memcpy(elem[i].dst, elem[i].src, elem[i].len); }