void app_main_loop_worker(void) { struct app_mbuf_array *worker_mbuf; uint32_t i; RTE_LOG(INFO, USER1, "Core %u is doing work (no pipeline)\n", rte_lcore_id()); worker_mbuf = rte_malloc_socket(NULL, sizeof(struct app_mbuf_array), RTE_CACHE_LINE_SIZE, rte_socket_id()); if (worker_mbuf == NULL) rte_panic("Worker thread: cannot allocate buffer space\n"); for (i = 0; ; i = ((i + 1) & (app.n_ports - 1))) { int ret; ret = rte_ring_sc_dequeue_bulk( app.rings_rx[i], (void **) worker_mbuf->array, app.burst_size_worker_read); if (ret == -ENOENT) continue; do { ret = rte_ring_sp_enqueue_bulk( app.rings_tx[i ^ 1], (void **) worker_mbuf->array, app.burst_size_worker_write); } while (ret < 0); } }
static void app_init_rings(void) { uint32_t n_swq, i; n_swq = app_get_n_swq_in(); RTE_LOG(INFO, USER1, "Initializing %u SW rings ...\n", n_swq); app.rings = rte_malloc_socket(NULL, n_swq * sizeof(struct rte_ring *), RTE_CACHE_LINE_SIZE, rte_socket_id()); if (app.rings == NULL) rte_panic("Cannot allocate memory to store ring pointers\n"); for (i = 0; i < n_swq; i++) { struct rte_ring *ring; char name[32]; snprintf(name, sizeof(name), "app_ring_%u", i); ring = rte_ring_create( name, app.rsz_swq, rte_socket_id(), RING_F_SP_ENQ | RING_F_SC_DEQ); if (ring == NULL) rte_panic("Cannot create ring %u\n", i); app.rings[i] = ring; } }
static void udpi_init_rings(void) { uint32_t n_swq, i; n_swq = udpi.n_workers ; RTE_LOG(INFO, USER1, "Initializing %u SW rings for ctrlmsg\n", n_swq); udpi.msg_rings = (struct rte_ring**)rte_malloc_socket(NULL, n_swq * sizeof(struct rte_ring *), RTE_CACHE_LINE_SIZE, rte_socket_id()); if (udpi.msg_rings == NULL) rte_panic("Cannot allocate memory to store ring pointers\n"); for (i = 0; i < n_swq; i++) { struct rte_ring *ring; char name[32]; snprintf(name, sizeof(name), "udpi_ctrlmsg_%u", i); ring = rte_ring_create( name, 16, rte_socket_id(), RING_F_SC_DEQ|RING_F_SP_ENQ); if (ring == NULL) rte_panic("Cannot create ctrlmsg ring %u\n", i); udpi.msg_rings[i] = ring; } }
/* * Allocate zero'd memory on specified heap. */ void * rte_zmalloc_socket(const char *type, size_t size, unsigned align, int socket) { void *ptr = rte_malloc_socket(type, size, align, socket); if (ptr != NULL) memset(ptr, 0, size); return ptr; }
/** * Internal helper to allocate memory once for several disparate objects. * * The most restrictive alignment constraint for standard objects is assumed * to be sizeof(double) and is used as a default value. * * C11 code would include stdalign.h and use alignof(max_align_t) however * we'll stick with C99 for the time being. */ static inline size_t mlx4_mallocv_inline(const char *type, const struct mlx4_malloc_vec *vec, unsigned int cnt, int zero, int socket) { unsigned int i; size_t size; size_t least; uint8_t *data = NULL; int fill = !vec[0].addr; fill: size = 0; least = 0; for (i = 0; i < cnt; ++i) { size_t align = (uintptr_t)vec[i].align; if (!align) { align = sizeof(double); } else if (!rte_is_power_of_2(align)) { rte_errno = EINVAL; goto error; } if (least < align) least = align; align = RTE_ALIGN_CEIL(size, align); size = align + vec[i].size; if (fill && vec[i].addr) *vec[i].addr = data + align; } if (fill) return size; if (!zero) data = rte_malloc_socket(type, size, least, socket); else data = rte_zmalloc_socket(type, size, least, socket); if (data) { fill = 1; goto fill; } rte_errno = ENOMEM; error: for (i = 0; i != cnt; ++i) if (vec[i].addr) *vec[i].addr = NULL; return 0; }
int sfc_efx_tso_alloc_tsoh_objs(struct sfc_efx_tx_sw_desc *sw_ring, unsigned int txq_entries, unsigned int socket_id) { unsigned int i; for (i = 0; i < txq_entries; ++i) { sw_ring[i].tsoh = rte_malloc_socket("sfc-efx-txq-tsoh-obj", SFC_TSOH_STD_LEN, RTE_CACHE_LINE_SIZE, socket_id); if (sw_ring[i].tsoh == NULL) goto fail_alloc_tsoh_objs; } return 0; fail_alloc_tsoh_objs: while (i > 0) rte_free(sw_ring[--i].tsoh); return ENOMEM; }
static struct virtio_net* numa_realloc(struct virtio_net *dev, int index) { int oldnode, newnode; struct virtio_net *old_dev; struct vhost_virtqueue *old_vq, *vq; int ret; old_dev = dev; vq = old_vq = dev->virtqueue[index]; ret = get_mempolicy(&newnode, NULL, 0, old_vq->desc, MPOL_F_NODE | MPOL_F_ADDR); /* check if we need to reallocate vq */ ret |= get_mempolicy(&oldnode, NULL, 0, old_vq, MPOL_F_NODE | MPOL_F_ADDR); if (ret) { RTE_LOG(ERR, VHOST_CONFIG, "Unable to get vq numa information.\n"); return dev; } if (oldnode != newnode) { RTE_LOG(INFO, VHOST_CONFIG, "reallocate vq from %d to %d node\n", oldnode, newnode); vq = rte_malloc_socket(NULL, sizeof(*vq), 0, newnode); if (!vq) return dev; memcpy(vq, old_vq, sizeof(*vq)); rte_free(old_vq); } /* check if we need to reallocate dev */ ret = get_mempolicy(&oldnode, NULL, 0, old_dev, MPOL_F_NODE | MPOL_F_ADDR); if (ret) { RTE_LOG(ERR, VHOST_CONFIG, "Unable to get dev numa information.\n"); goto out; } if (oldnode != newnode) { RTE_LOG(INFO, VHOST_CONFIG, "reallocate dev from %d to %d node\n", oldnode, newnode); dev = rte_malloc_socket(NULL, sizeof(*dev), 0, newnode); if (!dev) { dev = old_dev; goto out; } memcpy(dev, old_dev, sizeof(*dev)); rte_free(old_dev); } out: dev->virtqueue[index] = vq; vhost_devices[dev->vid] = dev; if (old_vq != vq) vhost_user_iotlb_init(dev, index); return dev; }
void app_main_loop_pipeline_tx(void) { struct rte_pipeline *p; uint32_t port_in_id[APP_MAX_PORTS]; uint32_t port_out_id[APP_MAX_PORTS]; uint32_t table_id[APP_MAX_PORTS]; uint32_t i; uint32_t core_id = rte_lcore_id(); struct app_core_params *core_params = app_get_core_params(core_id); if ((core_params == NULL) || (core_params->core_type != APP_CORE_TX)) rte_panic("Core %u misconfiguration\n", core_id); RTE_LOG(INFO, USER1, "Core %u is doing TX\n", core_id); /* Pipeline configuration */ struct rte_pipeline_params pipeline_params = { .name = "pipeline", .socket_id = rte_socket_id(), }; p = rte_pipeline_create(&pipeline_params); if (p == NULL) rte_panic("%s: Unable to configure the pipeline\n", __func__); /* Input port configuration */ for (i = 0; i < app.n_ports; i++) { struct rte_port_ring_reader_params port_ring_params = { .ring = app.rings[core_params->swq_in[i]], }; struct rte_pipeline_port_in_params port_params = { .ops = &rte_port_ring_reader_ops, .arg_create = (void *) &port_ring_params, .f_action = (app.ether_hdr_pop_push) ? app_pipeline_tx_port_in_action_handler : NULL, .arg_ah = NULL, .burst_size = app.bsz_swq_rd, }; if (rte_pipeline_port_in_create(p, &port_params, &port_in_id[i])) { rte_panic("%s: Unable to configure input port for " "ring TX %i\n", __func__, i); } } /* Output port configuration */ for (i = 0; i < app.n_ports; i++) { struct rte_port_ethdev_writer_params port_ethdev_params = { .port_id = app.ports[i], .queue_id = 0, .tx_burst_sz = app.bsz_hwq_wr, }; struct rte_pipeline_port_out_params port_params = { .ops = &rte_port_ethdev_writer_ops, .arg_create = (void *) &port_ethdev_params, .f_action = NULL, .f_action_bulk = NULL, .arg_ah = NULL, }; if (rte_pipeline_port_out_create(p, &port_params, &port_out_id[i])) { rte_panic("%s: Unable to configure output port for " "port %d\n", __func__, app.ports[i]); } } /* Table configuration */ for (i = 0; i < app.n_ports; i++) { struct rte_pipeline_table_params table_params = { .ops = &rte_table_stub_ops, .arg_create = NULL, .f_action_hit = NULL, .f_action_miss = NULL, .arg_ah = NULL, .action_data_size = 0, }; if (rte_pipeline_table_create(p, &table_params, &table_id[i])) { rte_panic("%s: Unable to configure table %u\n", __func__, table_id[i]); } } /* Interconnecting ports and tables */ for (i = 0; i < app.n_ports; i++) if (rte_pipeline_port_in_connect_to_table(p, port_in_id[i], table_id[i])) rte_panic("%s: Unable to connect input port %u to " "table %u\n", __func__, port_in_id[i], table_id[i]); /* Add entries to tables */ for (i = 0; i < app.n_ports; i++) { struct rte_pipeline_table_entry default_entry = { .action = RTE_PIPELINE_ACTION_PORT, {.port_id = port_out_id[i]}, }; struct rte_pipeline_table_entry *default_entry_ptr; if (rte_pipeline_table_default_entry_add(p, table_id[i], &default_entry, &default_entry_ptr)) rte_panic("%s: Unable to add default entry to " "table %u\n", __func__, table_id[i]); } /* Enable input ports */ for (i = 0; i < app.n_ports; i++) if (rte_pipeline_port_in_enable(p, port_in_id[i])) rte_panic("Unable to enable input port %u\n", port_in_id[i]); /* Check pipeline consistency */ if (rte_pipeline_check(p) < 0) rte_panic("%s: Pipeline consistency check failed\n", __func__); /* Run-time */ for (i = 0; ; i++) { rte_pipeline_run(p); if ((i & APP_FLUSH) == 0) rte_pipeline_flush(p); } } void app_main_loop_tx(void) { struct app_mbuf_array *m[APP_MAX_PORTS]; uint32_t i; uint32_t core_id = rte_lcore_id(); struct app_core_params *core_params = app_get_core_params(core_id); if ((core_params == NULL) || (core_params->core_type != APP_CORE_TX)) rte_panic("Core %u misconfiguration\n", core_id); RTE_LOG(INFO, USER1, "Core %u is doing TX (no pipeline)\n", core_id); for (i = 0; i < APP_MAX_PORTS; i++) { m[i] = rte_malloc_socket(NULL, sizeof(struct app_mbuf_array), CACHE_LINE_SIZE, rte_socket_id()); if (m[i] == NULL) rte_panic("%s: Cannot allocate buffer space\n", __func__); } for (i = 0; ; i = ((i + 1) & (app.n_ports - 1))) { uint32_t n_mbufs, n_pkts; int ret; n_mbufs = m[i]->n_mbufs; ret = rte_ring_sc_dequeue_bulk( app.rings[core_params->swq_in[i]], (void **) &m[i]->array[n_mbufs], app.bsz_swq_rd); if (ret == -ENOENT) continue; n_mbufs += app.bsz_swq_rd; if (n_mbufs < app.bsz_hwq_wr) { m[i]->n_mbufs = n_mbufs; continue; } n_pkts = rte_eth_tx_burst( app.ports[i], 0, m[i]->array, n_mbufs); if (n_pkts < n_mbufs) { uint32_t k; for (k = n_pkts; k < n_mbufs; k++) { struct rte_mbuf *pkt_to_free; pkt_to_free = m[i]->array[k]; rte_pktmbuf_free(pkt_to_free); } } m[i]->n_mbufs = 0; } }
/* * Allocate memory on default heap. */ void * rte_malloc(const char *type, size_t size, unsigned align) { return rte_malloc_socket(type, size, align, SOCKET_ID_ANY); }
void app_main_loop_pipeline_passthrough(void) { struct rte_pipeline_params pipeline_params = { .name = "pipeline", .socket_id = rte_socket_id(), }; struct rte_pipeline *p; uint32_t port_in_id[APP_MAX_PORTS]; uint32_t port_out_id[APP_MAX_PORTS]; uint32_t table_id[APP_MAX_PORTS]; uint32_t i; uint32_t core_id = rte_lcore_id(); struct app_core_params *core_params = app_get_core_params(core_id); if ((core_params == NULL) || (core_params->core_type != APP_CORE_PT)) rte_panic("Core %u misconfiguration\n", core_id); RTE_LOG(INFO, USER1, "Core %u is doing pass-through\n", core_id); /* Pipeline configuration */ p = rte_pipeline_create(&pipeline_params); if (p == NULL) rte_panic("%s: Unable to configure the pipeline\n", __func__); /* Input port configuration */ for (i = 0; i < app.n_ports; i++) { struct rte_port_ring_reader_params port_ring_params = { .ring = app.rings[core_params->swq_in[i]], }; struct rte_pipeline_port_in_params port_params = { .ops = &rte_port_ring_reader_ops, .arg_create = (void *) &port_ring_params, .f_action = NULL, .arg_ah = NULL, .burst_size = app.bsz_swq_rd, }; if (rte_pipeline_port_in_create(p, &port_params, &port_in_id[i])) { rte_panic("%s: Unable to configure input port for " "ring %d\n", __func__, i); } } /* Output port configuration */ for (i = 0; i < app.n_ports; i++) { struct rte_port_ring_writer_params port_ring_params = { .ring = app.rings[core_params->swq_out[i]], .tx_burst_sz = app.bsz_swq_wr, }; struct rte_pipeline_port_out_params port_params = { .ops = &rte_port_ring_writer_ops, .arg_create = (void *) &port_ring_params, .f_action = NULL, .f_action_bulk = NULL, .arg_ah = NULL, }; if (rte_pipeline_port_out_create(p, &port_params, &port_out_id[i])) { rte_panic("%s: Unable to configure output port for " "ring %d\n", __func__, i); } } /* Table configuration */ for (i = 0; i < app.n_ports; i++) { struct rte_pipeline_table_params table_params = { .ops = &rte_table_stub_ops, .arg_create = NULL, .f_action_hit = NULL, .f_action_miss = NULL, .arg_ah = NULL, .action_data_size = 0, }; if (rte_pipeline_table_create(p, &table_params, &table_id[i])) rte_panic("%s: Unable to configure table %u\n", __func__, i); } /* Interconnecting ports and tables */ for (i = 0; i < app.n_ports; i++) { if (rte_pipeline_port_in_connect_to_table(p, port_in_id[i], table_id[i])) { rte_panic("%s: Unable to connect input port %u to " "table %u\n", __func__, port_in_id[i], table_id[i]); } } /* Add entries to tables */ for (i = 0; i < app.n_ports; i++) { struct rte_pipeline_table_entry default_entry = { .action = RTE_PIPELINE_ACTION_PORT, {.port_id = port_out_id[i]}, }; struct rte_pipeline_table_entry *default_entry_ptr; if (rte_pipeline_table_default_entry_add(p, table_id[i], &default_entry, &default_entry_ptr)) rte_panic("%s: Unable to add default entry to " "table %u\n", __func__, table_id[i]); } /* Enable input ports */ for (i = 0; i < app.n_ports; i++) if (rte_pipeline_port_in_enable(p, port_in_id[i])) rte_panic("Unable to enable input port %u\n", port_in_id[i]); /* Check pipeline consistency */ if (rte_pipeline_check(p) < 0) rte_panic("%s: Pipeline consistency check failed\n", __func__); /* Run-time */ for (i = 0; ; i++) { rte_pipeline_run(p); if ((i & APP_FLUSH) == 0) rte_pipeline_flush(p); } } void app_main_loop_passthrough(void) { struct app_mbuf_array *m; uint32_t i; uint32_t core_id = rte_lcore_id(); struct app_core_params *core_params = app_get_core_params(core_id); if ((core_params == NULL) || (core_params->core_type != APP_CORE_PT)) rte_panic("Core %u misconfiguration\n", core_id); RTE_LOG(INFO, USER1, "Core %u is doing pass-through (no pipeline)\n", core_id); m = rte_malloc_socket(NULL, sizeof(struct app_mbuf_array), RTE_CACHE_LINE_SIZE, rte_socket_id()); if (m == NULL) rte_panic("%s: cannot allocate buffer space\n", __func__); for (i = 0; ; i = ((i + 1) & (app.n_ports - 1))) { int ret; ret = rte_ring_sc_dequeue_bulk( app.rings[core_params->swq_in[i]], (void **) m->array, app.bsz_swq_rd); if (ret == -ENOENT) continue; do { ret = rte_ring_sp_enqueue_bulk( app.rings[core_params->swq_out[i]], (void **) m->array, app.bsz_swq_wr); } while (ret < 0); } }
static struct virtio_net* numa_realloc(struct virtio_net *dev, int index) { int oldnode, newnode; struct virtio_net_config_ll *old_ll_dev, *new_ll_dev = NULL; struct vhost_virtqueue *old_vq, *new_vq = NULL; int ret; int realloc_dev = 0, realloc_vq = 0; old_ll_dev = (struct virtio_net_config_ll *)dev; old_vq = dev->virtqueue[index]; ret = get_mempolicy(&newnode, NULL, 0, old_vq->desc, MPOL_F_NODE | MPOL_F_ADDR); ret = ret | get_mempolicy(&oldnode, NULL, 0, old_ll_dev, MPOL_F_NODE | MPOL_F_ADDR); if (ret) { RTE_LOG(ERR, VHOST_CONFIG, "Unable to get vring desc or dev numa information.\n"); return dev; } if (oldnode != newnode) realloc_dev = 1; ret = get_mempolicy(&oldnode, NULL, 0, old_vq, MPOL_F_NODE | MPOL_F_ADDR); if (ret) { RTE_LOG(ERR, VHOST_CONFIG, "Unable to get vq numa information.\n"); return dev; } if (oldnode != newnode) realloc_vq = 1; if (realloc_dev == 0 && realloc_vq == 0) return dev; if (realloc_dev) new_ll_dev = rte_malloc_socket(NULL, sizeof(struct virtio_net_config_ll), 0, newnode); if (realloc_vq) new_vq = rte_malloc_socket(NULL, sizeof(struct vhost_virtqueue), 0, newnode); if (!new_ll_dev && !new_vq) return dev; if (realloc_vq) memcpy(new_vq, old_vq, sizeof(*new_vq)); if (realloc_dev) memcpy(new_ll_dev, old_ll_dev, sizeof(*new_ll_dev)); (new_ll_dev ? new_ll_dev : old_ll_dev)->dev.virtqueue[index] = new_vq ? new_vq : old_vq; if (realloc_vq) rte_free(old_vq); if (realloc_dev) { if (ll_root == old_ll_dev) ll_root = new_ll_dev; else { struct virtio_net_config_ll *prev = ll_root; while (prev->next != old_ll_dev) prev = prev->next; prev->next = new_ll_dev; new_ll_dev->next = old_ll_dev->next; } rte_free(old_ll_dev); } return realloc_dev ? &new_ll_dev->dev : dev; }
static struct virtio_net* numa_realloc(struct virtio_net *dev, int index) { int oldnode, newnode; struct virtio_net *old_dev; struct vhost_virtqueue *old_vq, *vq; int ret; /* * vq is allocated on pairs, we should try to do realloc * on first queue of one queue pair only. */ if (index % VIRTIO_QNUM != 0) return dev; old_dev = dev; vq = old_vq = dev->virtqueue[index]; ret = get_mempolicy(&newnode, NULL, 0, old_vq->desc, MPOL_F_NODE | MPOL_F_ADDR); /* check if we need to reallocate vq */ ret |= get_mempolicy(&oldnode, NULL, 0, old_vq, MPOL_F_NODE | MPOL_F_ADDR); if (ret) { RTE_LOG(ERR, VHOST_CONFIG, "Unable to get vq numa information.\n"); return dev; } if (oldnode != newnode) { RTE_LOG(INFO, VHOST_CONFIG, "reallocate vq from %d to %d node\n", oldnode, newnode); vq = rte_malloc_socket(NULL, sizeof(*vq) * VIRTIO_QNUM, 0, newnode); if (!vq) return dev; memcpy(vq, old_vq, sizeof(*vq) * VIRTIO_QNUM); rte_free(old_vq); } /* check if we need to reallocate dev */ ret = get_mempolicy(&oldnode, NULL, 0, old_dev, MPOL_F_NODE | MPOL_F_ADDR); if (ret) { RTE_LOG(ERR, VHOST_CONFIG, "Unable to get dev numa information.\n"); goto out; } if (oldnode != newnode) { RTE_LOG(INFO, VHOST_CONFIG, "reallocate dev from %d to %d node\n", oldnode, newnode); dev = rte_malloc_socket(NULL, sizeof(*dev), 0, newnode); if (!dev) { dev = old_dev; goto out; } memcpy(dev, old_dev, sizeof(*dev)); rte_free(old_dev); } out: dev->virtqueue[index] = vq; dev->virtqueue[index + 1] = vq + 1; vhost_devices[dev->device_fh] = dev; return dev; }