/* * Resize allocated memory. */ void * rte_realloc(void *ptr, size_t size, unsigned align) { if (ptr == NULL) return rte_malloc(NULL, size, align); struct malloc_elem *elem = malloc_elem_from_data(ptr); if (elem == NULL) rte_panic("Fatal error: memory corruption detected\n"); size = CACHE_LINE_ROUNDUP(size), align = CACHE_LINE_ROUNDUP(align); /* check alignment matches first, and if ok, see if we can resize block */ if (RTE_PTR_ALIGN(ptr,align) == ptr && malloc_elem_resize(elem, size) == 0) return ptr; /* either alignment is off, or we have no room to expand, * so move data. */ void *new_ptr = rte_malloc(NULL, size, align); if (new_ptr == NULL) return NULL; const unsigned old_size = elem->size - MALLOC_ELEM_OVERHEAD; rte_memcpy(new_ptr, ptr, old_size < size ? old_size : size); rte_free(ptr); return new_ptr; }
static void metrics_display(int port_id) { struct rte_metric_value *metrics; struct rte_metric_name *names; int len, ret; static const char *nic_stats_border = "########################"; len = rte_metrics_get_names(NULL, 0); if (len < 0) { printf("Cannot get metrics count\n"); return; } if (len == 0) { printf("No metrics to display (none have been registered)\n"); return; } metrics = rte_malloc("proc_info_metrics", sizeof(struct rte_metric_value) * len, 0); if (metrics == NULL) { printf("Cannot allocate memory for metrics\n"); return; } names = rte_malloc(NULL, sizeof(struct rte_metric_name) * len, 0); if (names == NULL) { printf("Cannot allocate memory for metrcis names\n"); rte_free(metrics); return; } if (len != rte_metrics_get_names(names, len)) { printf("Cannot get metrics names\n"); rte_free(metrics); rte_free(names); return; } if (port_id == RTE_METRICS_GLOBAL) printf("###### Non port specific metrics #########\n"); else printf("###### metrics for port %-2d #########\n", port_id); printf("%s############################\n", nic_stats_border); ret = rte_metrics_get_values(port_id, metrics, len); if (ret < 0 || ret > len) { printf("Cannot get metrics values\n"); rte_free(metrics); rte_free(names); return; } int i; for (i = 0; i < len; i++) printf("%s: %"PRIu64"\n", names[i].name, metrics[i].value); printf("%s############################\n", nic_stats_border); rte_free(metrics); rte_free(names); }
void * cperf_pmd_cyclecount_test_constructor(struct rte_mempool *sess_mp, uint8_t dev_id, uint16_t qp_id, const struct cperf_options *options, const struct cperf_test_vector *test_vector, const struct cperf_op_fns *op_fns) { struct cperf_pmd_cyclecount_ctx *ctx = NULL; /* preallocate buffers for crypto ops as they can get quite big */ size_t alloc_sz = sizeof(struct rte_crypto_op *) * options->nb_descriptors; ctx = rte_malloc(NULL, sizeof(struct cperf_pmd_cyclecount_ctx), 0); if (ctx == NULL) goto err; ctx->dev_id = dev_id; ctx->qp_id = qp_id; ctx->populate_ops = op_fns->populate_ops; ctx->options = options; ctx->test_vector = test_vector; /* IV goes at the end of the crypto operation */ uint16_t iv_offset = sizeof(struct rte_crypto_op) + sizeof(struct rte_crypto_sym_op); ctx->sess = op_fns->sess_create( sess_mp, dev_id, options, test_vector, iv_offset); if (ctx->sess == NULL) goto err; if (cperf_alloc_common_memory(options, test_vector, dev_id, qp_id, 0, &ctx->src_buf_offset, &ctx->dst_buf_offset, &ctx->pool) < 0) goto err; ctx->ops = rte_malloc("ops", alloc_sz, 0); if (!ctx->ops) goto err; ctx->ops_processed = rte_malloc("ops_processed", alloc_sz, 0); if (!ctx->ops_processed) goto err; return ctx; err: cperf_pmd_cyclecount_test_free(ctx); return NULL; }
//将数据加入到Hash表中,其中,hash表中的表项以源目ip作为唯一标识。 void addToHashTable(void *handle, struct hashtable * table, struct ip * iphead, struct sk_buff *skb){ printf("2 "); fflush(stdout); if (table->addr == NULL){ table->addr = (struct srcDstAddr *)rte_malloc("srcaddr", sizeof(struct srcDstAddr),0); if (table->addr == NULL) {printf("Out of Mem5!\n");return ;} else{ table->addr->Src = iphead->ip_src; table->addr->Dst = iphead->ip_dst; table->addr->next = NULL; table->addr->packets = NULL; addToAddr(handle, table->addr, iphead, skb); } } else{ struct srcDstAddr * current, *pre; current = table->addr; pre = table->addr; while (current){ if (current->Dst.s_addr == iphead->ip_src.s_addr && current->Src.s_addr == iphead->ip_dst.s_addr){ //hit addToAddr(handle, current, iphead, skb); break; } else{ pre = current; current = current->next; } } if (current == NULL){ pre->next = (struct srcDstAddr *)rte_malloc("srcdst", sizeof(struct srcDstAddr),0); if (pre->next == NULL) {printf("Out of Mem6!\n");return ;} else{ pre->next->Dst = iphead->ip_dst; pre->next->Src = iphead->ip_src; pre->next->next = NULL; pre->next->packets = NULL; addToAddr(handle, pre->next, iphead, skb); } } } }
/* * kfifo_alloc - allocates a new FIFO and its internal buffer * @size: the size of the internal buffer to be allocated. * @lock: the lock to be used to protect the fifo buffer * The size will be rounded-up to a power of 2. */ struct sft_queue *sft_queue_alloc(unsigned int size) { unsigned char *buffer; struct sft_queue *ret; /* * round up to the next power of 2, since our 'let the indices * wrap' technique works only in this case. */ if (!is_power_of_2(size)) { BUG_ON(size > 0x80000000); size = roundup_pow_of_two(size); } buffer = rte_malloc(NULL, size, 0); if (unlikely(!buffer)) return NULL; ret = sft_queue_init(buffer, size); if (unlikely(!ret)) rte_free(buffer); return ret; }
void ipDeFragment(void * handle, struct ip * iphead,struct sk_buff *skb){ printf("1\n"); IpImpl * impl = (IpImpl *)handle; int index = addrtoHash( iphead->ip_src, iphead->ip_dst); int offset = ntohs(iphead ->ip_off); int flags = offset&~IP_OFFSET; offset &= IP_OFFSET; if(((flags & IP_MF) ==0)&&(offset ==0)){// no fragment. //printf("No fragment.\n"); struct ring_buf * ptr = (struct ring_buf *)rte_malloc("rp",sizeof(struct ring_buf *),0); if(ptr ==NULL)OUTOFMEM ptr -> type = 0; ptr -> ptr = iphead; rte_ring_enqueue(impl -> r, ptr); } else { printf("Fragment in %d.\n",index); fflush(stdout); addToHashTable(handle, &impl -> tables[index], iphead, skb); } // tables[index].addr->packets->ipFra->info.ipHead = iphead; /*here need to add ip packet info */ /*to do :add ipFragment head*/ }
/** * Set up the DPDK rings which will be used to pass packets, via * pointers, between the multi-process distributor and node processes. * Each node needs one RX queue. */ static int init_shm_rings(void) { unsigned int i; unsigned int socket_id; const char *q_name; const unsigned int ringsize = NODE_QUEUE_RINGSIZE; nodes = rte_malloc("node details", sizeof(*nodes) * num_nodes, 0); if (nodes == NULL) rte_exit(EXIT_FAILURE, "Cannot allocate memory for " "node program details\n"); for (i = 0; i < num_nodes; i++) { /* Create an RX queue for each node */ socket_id = rte_socket_id(); q_name = get_rx_queue_name(i); nodes[i].rx_q = rte_ring_create(q_name, ringsize, socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ); if (nodes[i].rx_q == NULL) rte_exit(EXIT_FAILURE, "Cannot create rx ring queue " "for node %u\n", i); } return 0; }
static int get_host_identifier(struct nvme_controller *ctrlr) { int ret; uint64_t *host_id; struct nvme_command cmd = {}; cmd.opc = NVME_OPC_GET_FEATURES; cmd.cdw10 = NVME_FEAT_HOST_IDENTIFIER; outstanding_commands = 0; host_id = rte_malloc(NULL, 8, 0); ret = nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, host_id, 8, get_feature_completion, &features[NVME_FEAT_HOST_IDENTIFIER]); if (ret) { fprintf(stdout, "Get Feature: Failed\n"); return -1; } outstanding_commands++; while (outstanding_commands) { nvme_ctrlr_process_admin_completions(ctrlr); } if (features[NVME_FEAT_HOST_IDENTIFIER].valid) { fprintf(stdout, "Get Feature: Host Identifier 0x%"PRIx64"\n", *host_id); } return 0; }
void pktgen_packet_dump(struct rte_mbuf *m, int pid) { port_info_t *info = &pktgen.info[pid]; int plen = (m->pkt_len + FCS_SIZE); unsigned char *curr_data; struct rte_mbuf *curr_mbuf; /* Checking if info->dump_tail will not overflow is done in the caller */ if (info->dump_list[info->dump_tail].data != NULL) rte_free(info->dump_list[info->dump_tail].data); info->dump_list[info->dump_tail].data = rte_malloc("Packet data", plen, 0); info->dump_list[info->dump_tail].len = plen; for (curr_data = info->dump_list[info->dump_tail].data, curr_mbuf = m; curr_mbuf != NULL; curr_data += curr_mbuf->data_len, curr_mbuf = curr_mbuf->next) rte_memcpy(curr_data, (uint8_t *)curr_mbuf->buf_addr + m->data_off, curr_mbuf->data_len); ++info->dump_tail; }
static int vtophys_positive_test(void) { void *p = NULL; int i; unsigned int size = 1; int rc = 0; for (i = 0; i < 31; i++) { p = rte_malloc("vtophys_test", size, 512); if (p == NULL) continue; if (spdk_vtophys(p) == SPDK_VTOPHYS_ERROR) { rc = -1; printf("Err: VA=%p is not mapped to a huge_page,\n", p); rte_free(p); break; } rte_free(p); size = size << 1; } if (!rc) printf("vtophys_positive_test passed\n"); else printf("vtophys_positive_test failed\n"); return rc; }
/** * Set up the DPDK rings which will be used to pass packets, via * pointers, between the multi-process server and client processes. * Each client needs one RX queue. */ static int init_shm_rings(void) { unsigned i; unsigned socket_id; const char * q_name; const unsigned ringsize = CLIENT_QUEUE_RINGSIZE; clients = rte_malloc("client details", sizeof(*clients) * num_clients, 0); if (clients == NULL) rte_exit(EXIT_FAILURE, "Cannot allocate memory for client program details\n"); for (i = 0; i < num_clients; i++) { /* Create an RX queue for each client */ socket_id = rte_socket_id(); q_name = get_rx_queue_name(i); clients[i].rx_q = rte_ring_create(q_name, ringsize, socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ ); /* single prod, single cons */ if (clients[i].rx_q == NULL) rte_exit(EXIT_FAILURE, "Cannot create rx ring queue for client %u\n", i); } return 0; }
/** * Set up the DPDK rings which will be used to pass packets, via * pointers, between the multi-process server and client processes. * Each client needs one RX queue. */ static int init_shm_rings(void) { unsigned i; const unsigned ringsize = CLIENT_QUEUE_RINGSIZE; clients = rte_malloc("client details", sizeof(*clients) * num_clients, 0); if (clients == NULL) rte_exit(EXIT_FAILURE, "Cannot allocate memory for client program details\n"); port_queues = rte_malloc("port_txq details", sizeof(*port_queues) * ports->num_ports, 0); if (port_queues == NULL) rte_exit(EXIT_FAILURE, "Cannot allocate memory for port tx_q details\n"); for (i = 0; i < num_clients; i++) { /* Create an RX queue for each client */ clients[i].rx_q = rte_ring_create(get_rx_queue_name(i), ringsize, SOCKET0, NO_FLAGS); /* multi producer multi consumer*/ if (clients[i].rx_q == NULL) rte_exit(EXIT_FAILURE, "Cannot create rx ring for client %u\n", i); clients[i].tx_q = rte_ring_create(get_tx_queue_name(i), ringsize, SOCKET0, NO_FLAGS); /* multi producer multi consumer*/ if (clients[i].tx_q == NULL) rte_exit(EXIT_FAILURE, "Cannot create tx ring for client %u\n", i); } for (i = 0; i < ports->num_ports; i++) { /* Create an RX queue for each ports */ port_queues[i].tx_q = rte_ring_create(get_port_tx_queue_name(i), ringsize, SOCKET0, RING_F_SC_DEQ); /* multi producer single consumer*/ if (port_queues[i].tx_q == NULL) rte_exit(EXIT_FAILURE, "Cannot create tx ring for port %u\n", i); } vswitch_packet_ring = rte_ring_create(PACKET_RING_NAME, DAEMON_PKT_QUEUE_RINGSIZE, SOCKET0, NO_FLAGS); if (vswitch_packet_ring == NULL) rte_exit(EXIT_FAILURE, "Cannot create packet ring for vswitchd"); return 0; }
tw_tx_t * tw_tx_init(tw_loop_t * loop) { tw_tx_t * temp_handle = loop->tx_handle_queue; if (temp_handle == NULL) { temp_handle = rte_malloc("tw_tx_t *", sizeof (tw_tx_t), RTE_CACHE_LINE_SIZE); loop->tx_handle_queue = temp_handle; } else { while (temp_handle->next != NULL) temp_handle = temp_handle->next; temp_handle->next = rte_malloc("tw_tx_t *", sizeof (tw_tx_t), RTE_CACHE_LINE_SIZE); temp_handle = temp_handle->next; } //tx_handle = (tw_tx_t *) temp_handle; temp_handle->handle_type = TW_TX_HANDLE; loop->active_handles++; return temp_handle; }
void* control_init(int32_t socket_id, unsigned events) { struct netl_handle* netl_h; struct handle_res* res; netl_h = netl_create(events); if (netl_h == NULL) { RTE_LOG(ERR, PKTJ_CTRL1, "Couldn't initialize netlink socket"); goto err; } neighbor4_struct[socket_id] = nei_create(socket_id); if (neighbor4_struct[socket_id] == NULL) { RTE_LOG(ERR, PKTJ_CTRL1, "Couldn't initialize neighbor4 struct"); goto err; } neighbor6_struct[socket_id] = nei_create(socket_id); if (neighbor6_struct[socket_id] == NULL) { RTE_LOG(ERR, PKTJ_CTRL1, "Couldn't initialize neighbor6 struct"); goto err; } netl_h->cb.addr4 = addr4; netl_h->cb.addr6 = addr6; netl_h->cb.neighbor4 = neighbor4; netl_h->cb.neighbor6 = neighbor6; netl_h->cb.route4 = route4; netl_h->cb.route6 = route6; netl_h->cb.link = eth_link; struct in_addr invalid_ip = {INADDR_ANY}; struct in6_addr invalid_ip6 = IN6ADDR_ANY_INIT; if (add_invalid_neighbor4(neighbor4_struct[socket_id], &invalid_ip, BAD_PORT) < 0) { RTE_LOG(ERR, PKTJ_CTRL1, "Couldn't add drop target in neighbor4 table"); goto err; } if (add_invalid_neighbor6(neighbor6_struct[socket_id], &invalid_ip6, BAD_PORT) < 0) { RTE_LOG(ERR, PKTJ_CTRL1, "Couldn't add drop target in neighbor6 table"); goto err; } res = rte_malloc("handle-res", sizeof(*res), socket_id); res->socket_id = socket_id; res->netl_h = netl_h; return res; err: rte_panic("failed to init control_main"); }
void * spdk_malloc(size_t size, size_t align, uint64_t *phys_addr) { void *buf = rte_malloc(NULL, size, align); if (buf && phys_addr) { *phys_addr = rte_malloc_virt2phy(buf); } return buf; }
tw_timer_t * tw_timer_init(tw_loop_t * loop) { tw_timer_t * temp_handle = loop->timer_handle_queue; if (temp_handle == NULL) { temp_handle = rte_malloc("tw_timer_t *", sizeof (tw_timer_t), RTE_CACHE_LINE_SIZE); loop->timer_handle_queue = temp_handle; } else { while (temp_handle->next != NULL) temp_handle = temp_handle->next; temp_handle->next = rte_malloc("tw_timer_t *", sizeof (tw_timer_t), RTE_CACHE_LINE_SIZE); temp_handle = temp_handle->next; } //TODO populate tw_loop_t entries if needed //timer_handle = (tw_timer_t *) temp_handle; temp_handle->handle_type = TW_TIMER_HANDLE; loop->active_handles++; return temp_handle; }
static void task_ctor(struct rte_mempool *mp, void *arg, void *__task, unsigned id) { struct perf_task *task = __task; task->buf = rte_malloc(NULL, g_io_size_bytes, 0x200); if (task->buf == NULL) { fprintf(stderr, "task->buf rte_malloc failed\n"); exit(1); } }
struct malloc_disk *create_malloc_disk(uint64_t num_blocks, uint32_t block_size) { struct malloc_disk *mdisk; if (block_size % 512 != 0) { SPDK_ERRLOG("Block size %u is not a multiple of 512.\n", block_size); return NULL; } if (num_blocks == 0) { SPDK_ERRLOG("Disk must be more than 0 blocks\n"); return NULL; } mdisk = rte_malloc(NULL, sizeof(*mdisk), 0); if (!mdisk) { perror("mdisk"); return NULL; } memset(mdisk, 0, sizeof(*mdisk)); /* * Allocate the large backend memory buffer using rte_malloc(), * so that we guarantee it is allocated from hugepage memory. * * TODO: need to pass a hint so we know which socket to allocate * from on multi-socket systems. */ mdisk->malloc_buf = rte_zmalloc(NULL, num_blocks * block_size, 2 * 1024 * 1024); if (!mdisk->malloc_buf) { SPDK_ERRLOG("rte_zmalloc failed\n"); rte_free(mdisk); return NULL; } snprintf(mdisk->disk.name, SPDK_BDEV_MAX_NAME_LENGTH, "Malloc%d", malloc_disk_count); snprintf(mdisk->disk.product_name, SPDK_BDEV_MAX_PRODUCT_NAME_LENGTH, "Malloc disk"); malloc_disk_count++; mdisk->disk.write_cache = 1; mdisk->disk.blocklen = block_size; mdisk->disk.blockcnt = num_blocks; mdisk->disk.thin_provisioning = 1; mdisk->disk.max_unmap_bdesc_count = MALLOC_MAX_UNMAP_BDESC; mdisk->disk.ctxt = mdisk; mdisk->disk.fn_table = &malloc_fn_table; spdk_bdev_register(&mdisk->disk); mdisk->next = g_malloc_disk_head; g_malloc_disk_head = mdisk; return mdisk; }
/* * The virtio device sends us the size of the descriptor ring. */ static int vhost_user_set_vring_num(struct virtio_net *dev, VhostUserMsg *msg) { struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index]; vq->size = msg->payload.state.num; if (dev->dequeue_zero_copy) { vq->nr_zmbuf = 0; vq->last_zmbuf_idx = 0; vq->zmbuf_size = vq->size; vq->zmbufs = rte_zmalloc(NULL, vq->zmbuf_size * sizeof(struct zcopy_mbuf), 0); if (vq->zmbufs == NULL) { RTE_LOG(WARNING, VHOST_CONFIG, "failed to allocate mem for zero copy; " "zero copy is force disabled\n"); dev->dequeue_zero_copy = 0; } } vq->shadow_used_ring = rte_malloc(NULL, vq->size * sizeof(struct vring_used_elem), RTE_CACHE_LINE_SIZE); if (!vq->shadow_used_ring) { RTE_LOG(ERR, VHOST_CONFIG, "failed to allocate memory for shadow used ring.\n"); return -1; } vq->batch_copy_elems = rte_malloc(NULL, vq->size * sizeof(struct batch_copy_elem), RTE_CACHE_LINE_SIZE); if (!vq->batch_copy_elems) { RTE_LOG(ERR, VHOST_CONFIG, "failed to allocate memory for batching copy.\n"); return -1; } return 0; }
static char *session_hash_cache_build_line_fn(struct h_scalar *he) { struct sft_fdb_entry *dc = container_of(he, struct sft_fdb_entry, h_scalar); char *line; if((line = (char *)rte_malloc(NULL, 200, 0)) == NULL) return NULL; snprintf(line, 150, "sess:%u.%u.%u.%u:%u--%u.%u.%u.%u:%u|%u\n", RTE_NIPQUAD(dc->sess_key.ip_src), dc->sess_key.port_src, RTE_NIPQUAD(dc->sess_key.ip_dst), dc->sess_key.port_dst, dc->sess_key.proto); return line; }
/* * This function parses the backend config. It takes the filename * and fills up the backend_server array. This includes the mac and ip * address of the backend servers */ static int parse_backend_config(void) { int ret, temp, i; char ip[32]; char mac[32]; FILE * cfg; cfg = fopen(lb->cfg_filename, "r"); if (cfg == NULL) { rte_exit(EXIT_FAILURE, "Error openning server \'%s\' config\n", lb->cfg_filename); } ret = fscanf(cfg, "%*s %d", &temp); if (temp <= 0) { rte_exit(EXIT_FAILURE, "Error parsing config, need at least one server configurations\n"); } lb->server_count = temp; lb->server = (struct backend_server *)rte_malloc("backend server info", sizeof(struct backend_server) * lb->server_count, 0); if (lb->server == NULL) { rte_exit(EXIT_FAILURE, "Malloc failed, can't allocate server information\n"); } for (i = 0; i < lb->server_count; i++) { ret = fscanf(cfg, "%s %s", ip, mac); if (ret != 2) { rte_exit(EXIT_FAILURE, "Invalid backend config structure\n"); } ret = onvm_pkt_parse_ip(ip, &lb->server[i].d_ip); if (ret < 0) { rte_exit(EXIT_FAILURE, "Error parsing config IP address #%d\n", i); } ret =onvm_pkt_parse_mac(mac, lb->server[i].d_addr_bytes); if (ret < 0) { rte_exit(EXIT_FAILURE, "Error parsing config MAC address #%d\n", i); } } fclose(cfg); printf("\nARP config:\n"); for (i = 0; i < lb->server_count; i++) { printf("%" PRIu8 ".%" PRIu8 ".%" PRIu8 ".%" PRIu8 " ", lb->server[i].d_ip & 0xFF, (lb->server[i].d_ip >> 8) & 0xFF, (lb->server[i].d_ip >> 16) & 0xFF, (lb->server[i].d_ip >> 24) & 0xFF); printf("%02x:%02x:%02x:%02x:%02x:%02x\n", lb->server[i].d_addr_bytes[0], lb->server[i].d_addr_bytes[1], lb->server[i].d_addr_bytes[2], lb->server[i].d_addr_bytes[3], lb->server[i].d_addr_bytes[4], lb->server[i].d_addr_bytes[5]); } return ret; }
static int packet_socket(struct sock *sk, int type, int proto) { struct packet_priv *priv = rte_malloc(NULL, sizeof(*sk->priv), 0); if (!priv) return sock_errno(sk, ENOMEM); priv->type = type; sk->priv = priv; return sock_errno(sk, 0); }
/* Initialise data buffers. */ static int init_buffers(void) { unsigned i; large_buf_read = rte_malloc("memcpy", LARGE_BUFFER_SIZE, ALIGNMENT_UNIT); if (large_buf_read == NULL) goto error_large_buf_read; large_buf_write = rte_malloc("memcpy", LARGE_BUFFER_SIZE, ALIGNMENT_UNIT); if (large_buf_write == NULL) goto error_large_buf_write; small_buf_read = rte_malloc("memcpy", SMALL_BUFFER_SIZE, ALIGNMENT_UNIT); if (small_buf_read == NULL) goto error_small_buf_read; small_buf_write = rte_malloc("memcpy", SMALL_BUFFER_SIZE, ALIGNMENT_UNIT); if (small_buf_write == NULL) goto error_small_buf_write; for (i = 0; i < LARGE_BUFFER_SIZE; i++) large_buf_read[i] = rte_rand(); for (i = 0; i < SMALL_BUFFER_SIZE; i++) small_buf_read[i] = rte_rand(); return 0; error_small_buf_write: rte_free(small_buf_read); error_small_buf_read: rte_free(large_buf_write); error_large_buf_write: rte_free(large_buf_read); error_large_buf_read: printf("ERROR: not enough memory\n"); return -1; }
tw_rx_t * tw_rx_init(tw_loop_t * loop) { tw_rx_t * temp_rx_handle = loop->rx_handle_queue; //udp_handle = loop->rx_handle_queue; if (temp_rx_handle == NULL) { temp_rx_handle = rte_malloc("tw_udp_t *", sizeof (tw_udp_t), RTE_CACHE_LINE_SIZE); loop->rx_handle_queue = temp_rx_handle; } else { while (temp_rx_handle->next != NULL) temp_rx_handle = temp_rx_handle->next; temp_rx_handle->next = rte_malloc("tw_udp_t *", sizeof (tw_udp_t), RTE_CACHE_LINE_SIZE); temp_rx_handle = temp_rx_handle->next; } //TODO populate tw_loop_t entries if needed //udp_handle = temp_udp_handle; temp_rx_handle->handle_type = TW_UDP_HANDLE; //loop->active_handles++; //udp_handle = temp_udp_handle; //udp_handle->flags = 0; //return 0; return temp_rx_handle; }
ark_pkt_dir_t ark_pktdir_init(void *base) { struct ark_pkt_dir_inst *inst = rte_malloc("ark_pkt_dir_inst", sizeof(struct ark_pkt_dir_inst), 0); if (inst == NULL) { PMD_DRV_LOG(ERR, "Failed to malloc ark_pkt_dir_inst.\n"); return inst; } inst->regs = (struct ark_pkt_dir_regs *)base; inst->regs->ctrl = 0x00110110; /* POR state */ return inst; }
void init_receiver(unsigned core_id, unsigned in_port, struct receiver_t *receiver) { receiver->core_id = core_id; receiver->in_port = in_port; receiver->nb_handler = 0; receiver->nb_polls = 0; receiver->nb_rec = 0; receiver->pkts_received = 0; receiver->burst_buffer = rte_malloc(NULL, BURST_SIZE * sizeof(void*), 64); rte_eth_macaddr_get(receiver->in_port, &receiver->mac); }
/** * @brief PacketInfo copy constructor * * @param other Object to be copied */ DPDKAdapter::PacketInfo::PacketInfo(const PacketInfo& other) { devId_ = other.devId_; mbuf_ = DPDKAdapter::instance()->cloneMbuf(devId_, other.mbuf_); data_ = (char*)rte_malloc("packet data", other.dataLen_, 0); if (!data_) { qCritical("Could not allocate memory for a packet"); } rte_memcpy(data_, other.data_, other.dataLen_); dataLen_ = other.dataLen_; qDebug("mbuf_ %p", mbuf_); }
struct memblock_head * memblock_alloc_block(size_t size) { const size_t head_length = memblock_align(sizeof(struct memblock_head)); struct memblock_head *block; /* Avoid wasting bytes that wouldn't be used due to misalignment. */ size = memblock_align(size); block = rte_malloc("memblock", head_length + size, 0); if (unlikely(block == NULL)) return NULL; block->next = ((char *)block) + head_length; block->end = block->next + size; return block; }
int channel_monitor_init(void) { global_event_fd = epoll_create1(0); if (global_event_fd == 0) { RTE_LOG(ERR, CHANNEL_MONITOR, "Error creating epoll context with " "error %s\n", strerror(errno)); return -1; } global_events_list = rte_malloc("epoll_events", sizeof(*global_events_list) * MAX_EVENTS, RTE_CACHE_LINE_SIZE); if (global_events_list == NULL) { RTE_LOG(ERR, CHANNEL_MONITOR, "Unable to rte_malloc for " "epoll events\n"); return -1; } return 0; }
int dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev, uint32_t req_dist_set) { struct dpaa2_dev_priv *priv = eth_dev->data->dev_private; struct fsl_mc_io *dpni = priv->hw; struct dpni_rx_tc_dist_cfg tc_cfg; struct dpkg_profile_cfg kg_cfg; void *p_params; int ret, tc_index = 0; p_params = rte_malloc( NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE); if (!p_params) { RTE_LOG(ERR, PMD, "Memory unavaialble\n"); return -ENOMEM; } memset(p_params, 0, DIST_PARAM_IOVA_SIZE); memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg)); dpaa2_distset_to_dpkg_profile_cfg(req_dist_set, &kg_cfg); tc_cfg.key_cfg_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(p_params)); tc_cfg.dist_size = eth_dev->data->nb_rx_queues; tc_cfg.dist_mode = DPNI_DIST_MODE_HASH; ret = dpni_prepare_key_cfg(&kg_cfg, p_params); if (ret) { RTE_LOG(ERR, PMD, "Unable to prepare extract parameters\n"); rte_free(p_params); return ret; } ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW, priv->token, tc_index, &tc_cfg); rte_free(p_params); if (ret) { RTE_LOG(ERR, PMD, "Setting distribution for Rx failed with err: %d\n", ret); return ret; } return 0; }