/* * Initialise the datapath and all associated data structures. */ void datapath_init(void) { int one = 1; vswitchd_packet_ring = rte_ring_create(VSWITCHD_PACKET_RING_NAME, VSWITCHD_RINGSIZE, SOCKET0, NO_FLAGS); if (vswitchd_packet_ring == NULL) rte_exit(EXIT_FAILURE, "Cannot create packet ring for vswitchd"); vswitchd_reply_ring = rte_ring_create(VSWITCHD_REPLY_RING_NAME, VSWITCHD_RINGSIZE, SOCKET0, NO_FLAGS); if (vswitchd_reply_ring == NULL) rte_exit(EXIT_FAILURE, "Cannot create reply ring for vswitchd"); vswitchd_message_ring = rte_ring_create(VSWITCHD_MESSAGE_RING_NAME, VSWITCHD_RINGSIZE, SOCKET0, NO_FLAGS); if (vswitchd_message_ring == NULL) rte_exit(EXIT_FAILURE, "Cannot create message ring for vswitchd"); dpif_socket = socket(AF_UNIX, SOCK_DGRAM, 0); if (dpif_socket < 0) rte_exit(EXIT_FAILURE, "Cannot create socket"); if (ioctl(dpif_socket, FIONBIO, &one) < 0) rte_exit(EXIT_FAILURE, "Cannot make socket non-blocking"); }
/* * Initialize a pool of keys * These are unique tokens that can be obtained by threads * calling lthread_key_create() */ void _lthread_key_pool_init(void) { static struct rte_ring *pool; struct lthread_key *new_key; char name[MAX_LTHREAD_NAME_SIZE]; bzero(key_table, sizeof(key_table)); /* only one lcore should do this */ if (rte_atomic64_cmpset(&key_pool_init, 0, 1)) { snprintf(name, MAX_LTHREAD_NAME_SIZE, "lthread_key_pool_%d", getpid()); pool = rte_ring_create(name, LTHREAD_MAX_KEYS, 0, 0); LTHREAD_ASSERT(pool); int i; for (i = 1; i < LTHREAD_MAX_KEYS; i++) { new_key = &key_table[i]; rte_ring_mp_enqueue((struct rte_ring *)pool, (void *)new_key); } key_pool = pool; } /* other lcores wait here till done */ while (key_pool == NULL) { rte_compiler_barrier(); sched_yield(); }; }
/** * Set up the DPDK rings which will be used to pass packets, via * pointers, between the multi-process distributor and node processes. * Each node needs one RX queue. */ static int init_shm_rings(void) { unsigned int i; unsigned int socket_id; const char *q_name; const unsigned int ringsize = NODE_QUEUE_RINGSIZE; nodes = rte_malloc("node details", sizeof(*nodes) * num_nodes, 0); if (nodes == NULL) rte_exit(EXIT_FAILURE, "Cannot allocate memory for " "node program details\n"); for (i = 0; i < num_nodes; i++) { /* Create an RX queue for each node */ socket_id = rte_socket_id(); q_name = get_rx_queue_name(i); nodes[i].rx_q = rte_ring_create(q_name, ringsize, socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ); if (nodes[i].rx_q == NULL) rte_exit(EXIT_FAILURE, "Cannot create rx ring queue " "for node %u\n", i); } return 0; }
static void app_init_rings(void) { uint32_t n_swq, i; n_swq = app_get_n_swq_in(); RTE_LOG(INFO, USER1, "Initializing %u SW rings ...\n", n_swq); app.rings = rte_malloc_socket(NULL, n_swq * sizeof(struct rte_ring *), RTE_CACHE_LINE_SIZE, rte_socket_id()); if (app.rings == NULL) rte_panic("Cannot allocate memory to store ring pointers\n"); for (i = 0; i < n_swq; i++) { struct rte_ring *ring; char name[32]; snprintf(name, sizeof(name), "app_ring_%u", i); ring = rte_ring_create( name, app.rsz_swq, rte_socket_id(), RING_F_SP_ENQ | RING_F_SC_DEQ); if (ring == NULL) rte_panic("Cannot create ring %u\n", i); app.rings[i] = ring; } }
static void udpi_init_rings(void) { uint32_t n_swq, i; n_swq = udpi.n_workers ; RTE_LOG(INFO, USER1, "Initializing %u SW rings for ctrlmsg\n", n_swq); udpi.msg_rings = (struct rte_ring**)rte_malloc_socket(NULL, n_swq * sizeof(struct rte_ring *), RTE_CACHE_LINE_SIZE, rte_socket_id()); if (udpi.msg_rings == NULL) rte_panic("Cannot allocate memory to store ring pointers\n"); for (i = 0; i < n_swq; i++) { struct rte_ring *ring; char name[32]; snprintf(name, sizeof(name), "udpi_ctrlmsg_%u", i); ring = rte_ring_create( name, 16, rte_socket_id(), RING_F_SC_DEQ|RING_F_SP_ENQ); if (ring == NULL) rte_panic("Cannot create ctrlmsg ring %u\n", i); udpi.msg_rings[i] = ring; } }
/** * Set up the DPDK rings which will be used to pass packets, via * pointers, between the multi-process server and client processes. * Each client needs one RX queue. */ static int init_shm_rings(void) { unsigned i; unsigned socket_id; const char * q_name; const unsigned ringsize = CLIENT_QUEUE_RINGSIZE; clients = rte_malloc("client details", sizeof(*clients) * num_clients, 0); if (clients == NULL) rte_exit(EXIT_FAILURE, "Cannot allocate memory for client program details\n"); for (i = 0; i < num_clients; i++) { /* Create an RX queue for each client */ socket_id = rte_socket_id(); q_name = get_rx_queue_name(i); clients[i].rx_q = rte_ring_create(q_name, ringsize, socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ ); /* single prod, single cons */ if (clients[i].rx_q == NULL) rte_exit(EXIT_FAILURE, "Cannot create rx ring queue for client %u\n", i); } return 0; }
/** * Set up the DPDK rings which will be used to pass packets, via * pointers, between the multi-process server and client processes. * Each client needs one RX queue. */ static int init_shm_rings(void) { unsigned i; const unsigned ringsize = CLIENT_QUEUE_RINGSIZE; clients = rte_malloc("client details", sizeof(*clients) * num_clients, 0); if (clients == NULL) rte_exit(EXIT_FAILURE, "Cannot allocate memory for client program details\n"); port_queues = rte_malloc("port_txq details", sizeof(*port_queues) * ports->num_ports, 0); if (port_queues == NULL) rte_exit(EXIT_FAILURE, "Cannot allocate memory for port tx_q details\n"); for (i = 0; i < num_clients; i++) { /* Create an RX queue for each client */ clients[i].rx_q = rte_ring_create(get_rx_queue_name(i), ringsize, SOCKET0, NO_FLAGS); /* multi producer multi consumer*/ if (clients[i].rx_q == NULL) rte_exit(EXIT_FAILURE, "Cannot create rx ring for client %u\n", i); clients[i].tx_q = rte_ring_create(get_tx_queue_name(i), ringsize, SOCKET0, NO_FLAGS); /* multi producer multi consumer*/ if (clients[i].tx_q == NULL) rte_exit(EXIT_FAILURE, "Cannot create tx ring for client %u\n", i); } for (i = 0; i < ports->num_ports; i++) { /* Create an RX queue for each ports */ port_queues[i].tx_q = rte_ring_create(get_port_tx_queue_name(i), ringsize, SOCKET0, RING_F_SC_DEQ); /* multi producer single consumer*/ if (port_queues[i].tx_q == NULL) rte_exit(EXIT_FAILURE, "Cannot create tx ring for port %u\n", i); } vswitch_packet_ring = rte_ring_create(PACKET_RING_NAME, DAEMON_PKT_QUEUE_RINGSIZE, SOCKET0, NO_FLAGS); if (vswitch_packet_ring == NULL) rte_exit(EXIT_FAILURE, "Cannot create packet ring for vswitchd"); return 0; }
int main(int argc, char **argv) { int c; int ret; int sp_sc; unsigned socket_io; /* initialize EAL first */ ret = rte_eal_init(argc, argv); argc -= ret; argv += ret; sp_sc = 1; bulk_size = 1; while ((c = getopt(argc, argv, "sm:b:w:")) != -1) { switch (c) { case 's': sp_sc = 1; break; case 'm': sp_sc = 0; nb_producers = atoi(optarg); break; case 'b': bulk_size = atoi(optarg); break; case 'w': work_cycles = atoi(optarg); break; case '?': break; } } setlocale(LC_NUMERIC, ""); socket_io = rte_lcore_to_socket_id(rte_get_master_lcore()); ring = rte_ring_create(ring_name, ring_size, socket_io, RING_F_SP_ENQ | RING_F_SC_DEQ); if (ring == NULL) { rte_panic("Cannot create ring"); } if (sp_sc) { printf("[MASTER] Single Producer/Consumer\n"); printf("[MASTER] Bulk size: %d\n", bulk_size); driver_sp_sc(); } else { printf("[MASTER] Number of Producers/Consumers: %d\n", nb_producers); printf("[MASTER] Bulk size: %d\n", bulk_size); driver_mp_mc(); } rte_eal_mp_wait_lcore(); }
void InitIpToEtherRing(void ) { // socket_tcb_ring_send = rte_ring_create(TCB_TO_SOCKET, socket_tcb_ring_size, SOCKET_ID_ANY, 0); int buffer_size = sizeof(struct rte_mbuf *); buffer_message_pool = rte_mempool_create(_MSG_POOL, pool_size, buffer_size, 32, 0, NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0); if(buffer_message_pool == NULL) { printf("ERROR **** ip -- ether Message failed\n"); } else { printf("ip - ether message pool OK.\n"); } ip_to_ether_ring_send = rte_ring_create(IP_ETHER_RING_NAME, ring_size, SOCKET_ID_ANY, 0); if(ip_to_ether_ring_send) { printf("ip_to_ether_ring_send ring OK\n"); } else { printf("ERROR * ring ip_to_ether_ring_send failed\n"); } ether_to_ip_ring_send = rte_ring_create(ETHER_IP_RING_NAME, ring_size, SOCKET_ID_ANY, 0); if(ether_to_ip_ring_send) { printf("ether_to_ip_ring_send ring OK\n"); } else { printf("ERROR * ring ether_to_ip_ring_send failed\n"); } ether_to_ip_ring_recv = rte_ring_lookup(ETHER_IP_RING_NAME); if(ether_to_ip_ring_recv) { printf("ether_to_ip_ring_recv ring OK\n"); } else { printf("ERROR * ring ether_to_ip_ring_recv failed\n"); } ip_to_ether_ring_recv = rte_ring_lookup(IP_ETHER_RING_NAME); if(ip_to_ether_ring_recv) { printf("ip_to_ether_ring_recv ring OK\n"); } else { printf("ERROR * ring ip_to_ether_ring_recv failed\n"); } }
/* Main function */ int main(int argc, char **argv) { int ret; int i; /* Create handler for SIGINT for CTRL + C closing and SIGALRM to print stats*/ signal(SIGINT, sig_handler); signal(SIGALRM, alarm_routine); /* Initialize DPDK enviroment with args, then shift argc and argv to get application parameters */ ret = rte_eal_init(argc, argv); if (ret < 0) FATAL_ERROR("Cannot init EAL\n"); argc -= ret; argv += ret; /* Check if this application can use 1 core*/ ret = rte_lcore_count (); if (ret != 2) FATAL_ERROR("This application needs exactly 2 cores."); /* Parse arguments */ parse_args(argc, argv); if (ret < 0) FATAL_ERROR("Wrong arguments\n"); /* Probe PCI bus for ethernet devices, mandatory only in DPDK < 1.8.0 */ #if RTE_VER_MAJOR == 1 && RTE_VER_MINOR < 8 ret = rte_eal_pci_probe(); if (ret < 0) FATAL_ERROR("Cannot probe PCI\n"); #endif /* Get number of ethernet devices */ nb_sys_ports = rte_eth_dev_count(); if (nb_sys_ports <= 0) FATAL_ERROR("Cannot find ETH devices\n"); /* Create a mempool with per-core cache, initializing every element for be used as mbuf, and allocating on the current NUMA node */ pktmbuf_pool = rte_mempool_create(MEMPOOL_NAME, buffer_size-1, MEMPOOL_ELEM_SZ, MEMPOOL_CACHE_SZ, sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL,rte_socket_id(), 0); if (pktmbuf_pool == NULL) FATAL_ERROR("Cannot create cluster_mem_pool. Errno: %d [ENOMEM: %d, ENOSPC: %d, E_RTE_NO_TAILQ: %d, E_RTE_NO_CONFIG: %d, E_RTE_SECONDARY: %d, EINVAL: %d, EEXIST: %d]\n", rte_errno, ENOMEM, ENOSPC, E_RTE_NO_TAILQ, E_RTE_NO_CONFIG, E_RTE_SECONDARY, EINVAL, EEXIST ); /* Create a ring for exchanging packets between cores, and allocating on the current NUMA node */ intermediate_ring = rte_ring_create (RING_NAME, buffer_size, rte_socket_id(), RING_F_SP_ENQ | RING_F_SC_DEQ ); if (intermediate_ring == NULL ) FATAL_ERROR("Cannot create ring"); /* Operations needed for each ethernet device */ for(i=0; i < nb_sys_ports; i++) init_port(i); /* Start consumer and producer routine on 2 different cores: producer launched first... */ ret = rte_eal_mp_remote_launch (main_loop_producer, NULL, SKIP_MASTER); if (ret != 0) FATAL_ERROR("Cannot start consumer thread\n"); /* ... and then loop in consumer */ main_loop_consumer ( NULL ); return 0; }
/** * Set up the DPDK rings which will be used to pass packets, via * pointers, between the multi-process server and NF processes. * Each NF needs one RX queue. */ static int init_shm_rings(void) { unsigned i; unsigned socket_id; const char * rq_name; const char * tq_name; const char * msg_q_name; const unsigned ringsize = NF_QUEUE_RINGSIZE; const unsigned msgringsize = NF_MSG_QUEUE_SIZE; // use calloc since we allocate for all possible NFs // ensure that all fields are init to 0 to avoid reading garbage // TODO plopreiato, move to creation when a NF starts for (i = 0; i < MAX_NFS; i++) { /* Create an RX queue for each NF */ socket_id = rte_socket_id(); rq_name = get_rx_queue_name(i); tq_name = get_tx_queue_name(i); msg_q_name = get_msg_queue_name(i); nfs[i].instance_id = i; nfs[i].rx_q = rte_ring_create(rq_name, ringsize, socket_id, RING_F_SC_DEQ); /* multi prod, single cons */ nfs[i].tx_q = rte_ring_create(tq_name, ringsize, socket_id, RING_F_SC_DEQ); /* multi prod, single cons */ nfs[i].msg_q = rte_ring_create(msg_q_name, msgringsize, socket_id, RING_F_SC_DEQ); /* multi prod, single cons */ if (nfs[i].rx_q == NULL) rte_exit(EXIT_FAILURE, "Cannot create rx ring queue for NF %u\n", i); if (nfs[i].tx_q == NULL) rte_exit(EXIT_FAILURE, "Cannot create tx ring queue for NF %u\n", i); if (nfs[i].msg_q == NULL) rte_exit(EXIT_FAILURE, "Cannot create msg queue for NF %u\n", i); } return 0; }
/* Alloc KNI Devices for PORT_ID */ static int odp_kni_alloc(uint8_t port_id) { uint8_t i; struct rte_kni *kni; struct rte_kni_conf conf; struct kni_port_params **params = kni_port_params_array; unsigned lcore_id = params[port_id]->lcore_id; unsigned lcore_socket = rte_lcore_to_socket_id(lcore_id); struct rte_mempool * kni_mempool = odp_pktmbuf_pool[lcore_socket]; if (port_id >= RTE_MAX_ETHPORTS || !params[port_id]) return -1; memset(&conf, 0, sizeof(conf)); snprintf(conf.name, RTE_KNI_NAMESIZE,"keth%u", port_id); conf.group_id = (uint16_t)port_id; conf.mbuf_size = MAX_PACKET_SZ; struct rte_kni_ops ops; struct rte_eth_dev_info dev_info; memset(&dev_info, 0, sizeof(dev_info)); rte_eth_dev_info_get(port_id, &dev_info); conf.addr = dev_info.pci_dev->addr; conf.id = dev_info.pci_dev->id; memset(&ops, 0, sizeof(ops)); ops.port_id = port_id; ops.change_mtu = kni_change_mtu; ops.config_network_if = kni_config_network_interface; kni = rte_kni_alloc(kni_mempool, &conf, &ops); if (!kni) rte_exit(EXIT_FAILURE, "Fail to create kni for " "port: %d\n", port_id); params[port_id]->kni = kni; /* Create Ring to recieve the pkts from other cores */ char ring_name[32]; snprintf(ring_name,sizeof(ring_name),"kni_ring_s%u_p%u",lcore_socket,port_id); params[port_id]->ring = rte_ring_create(ring_name,ODP_KNI_RING_SIZE, lcore_socket,RING_F_SC_DEQ); if(!params[port_id]->ring) rte_exit(EXIT_FAILURE, "Fail to create ring for kni %s",ring_name); return 0; }
int dpdk_packet_socket_init(void) { unsigned lcore_id; struct vr_dpdk_lcore *lcorep; void *event_sock = NULL; int err; vr_dpdk.packet_transport = (void *)vr_usocket(PACKET, RAW); if (!vr_dpdk.packet_transport) return -1; if (!vr_dpdk.packet_ring) { vr_dpdk.packet_ring = rte_ring_lookup("pkt0_tx"); if (!vr_dpdk.packet_ring) { /* multi-producers single-consumer ring */ vr_dpdk.packet_ring = rte_ring_create("pkt0_tx", VR_DPDK_TX_RING_SZ, SOCKET_ID_ANY, RING_F_SC_DEQ); if (!vr_dpdk.packet_ring) { RTE_LOG(ERR, VROUTER, " error creating pkt0 ring\n"); goto error; } } } /* socket events to wake up the pkt0 lcore */ RTE_LCORE_FOREACH(lcore_id) { lcorep = vr_dpdk.lcores[lcore_id]; event_sock = (void *)vr_usocket(EVENT, RAW); if (!event_sock) { goto error; } if (vr_usocket_bind_usockets(vr_dpdk.packet_transport, event_sock)) goto error; lcorep->lcore_event_sock = event_sock; } return 0; error: err = errno; if (event_sock) vr_usocket_close(event_sock); vr_usocket_close(vr_dpdk.packet_transport); vr_dpdk.packet_transport = NULL; errno = err; return -ENOMEM; }
/** * Allocate a rte_ring for newly created NFs */ static int init_info_queue(void) { incoming_msg_queue = rte_ring_create( _MGR_MSG_QUEUE_NAME, MAX_NFS, rte_socket_id(), RING_F_SC_DEQ); // MP enqueue (default), SC dequeue if (incoming_msg_queue == NULL) rte_exit(EXIT_FAILURE, "Cannot create incoming msg queue\n"); return 0; }
int dpdk_packet_socket_init(void) { void *event_sock = NULL; int err; vr_dpdk.packet_transport = (void *)vr_usocket(PACKET, RAW); if (!vr_dpdk.packet_transport) return -1; if (!vr_dpdk.packet_ring) { vr_dpdk.packet_ring = rte_ring_lookup("packet_tx"); if (!vr_dpdk.packet_ring) { /* multi-producers single-consumer ring */ vr_dpdk.packet_ring = rte_ring_create("packet_tx", VR_DPDK_TX_RING_SZ, SOCKET_ID_ANY, RING_F_SC_DEQ); if (!vr_dpdk.packet_ring) { RTE_LOG(ERR, VROUTER, " error creating packet ring\n"); goto error; } } } /* create and bind event usock to wake up the packet lcore */ event_sock = (void *)vr_usocket(EVENT, RAW); if (!event_sock) { RTE_LOG(ERR, VROUTER, " error creating packet event\n"); goto error; } if (vr_usocket_bind_usockets(vr_dpdk.packet_transport, event_sock)) { RTE_LOG(ERR, VROUTER, " error binding packet event\n"); goto error; } vr_dpdk.packet_event_sock = event_sock; return 0; error: err = errno; if (event_sock) vr_usocket_close(event_sock); vr_usocket_close(vr_dpdk.packet_transport); vr_dpdk.packet_transport = NULL; errno = err; return -ENOMEM; }
static void app_init_rings(void) { uint32_t i; for (i = 0; i < app.n_ports; i++) { char name[32]; snprintf(name, sizeof(name), "app_ring_rx_%u", i); app.rings_rx[i] = rte_ring_create( name, app.ring_rx_size, rte_socket_id(), RING_F_SP_ENQ | RING_F_SC_DEQ); if (app.rings_rx[i] == NULL) rte_panic("Cannot create RX ring %u\n", i); } for (i = 0; i < app.n_ports; i++) { char name[32]; snprintf(name, sizeof(name), "app_ring_tx_%u", i); app.rings_tx[i] = rte_ring_create( name, app.ring_tx_size, rte_socket_id(), RING_F_SP_ENQ | RING_F_SC_DEQ); if (app.rings_tx[i] == NULL) rte_panic("Cannot create TX ring %u\n", i); } }
void create_rings() { // Create ring queues app1 = rte_ring_create("app1", 32768, 0); app2 = rte_ring_create("app2", 32768, 0); app3 = rte_ring_create("app3", 32768, 0); app4 = rte_ring_create("app4", 32768, 0); out1 = rte_ring_create("out1", 32768, 0); out2 = rte_ring_create("out2", 32768, 0); }
void init_ring(int lcore_id, uint8_t port_id) { struct rte_ring *ring; char ring_name[RTE_RING_NAMESIZE]; rte_snprintf(ring_name, RTE_RING_NAMESIZE, "core%d_port%d", lcore_id, port_id); ring = rte_ring_create(ring_name, RING_SIZE, rte_socket_id(), RING_F_SP_ENQ | RING_F_SC_DEQ); if (ring == NULL) rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno)); rte_ring_set_water_mark(ring, 80 * RING_SIZE / 100); rings[lcore_id][port_id] = ring; }
static void app_init_msgq(struct app_params *app) { uint32_t i; for (i = 0; i < app->n_msgq; i++) { struct app_msgq_params *p = &app->msgq_params[i]; APP_LOG(app, HIGH, "Initializing %s ...", p->name); app->msgq[i] = rte_ring_create( p->name, p->size, p->cpu_socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ); if (app->msgq[i] == NULL) rte_panic("%s init error\n", p->name); } }
static int update_order_ring(struct rte_cryptodev *dev, uint16_t qp_id) { struct scheduler_ctx *sched_ctx = dev->data->dev_private; struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id]; if (sched_ctx->reordering_enabled) { char order_ring_name[RTE_CRYPTODEV_NAME_MAX_LEN]; uint32_t buff_size = rte_align32pow2( sched_ctx->nb_slaves * PER_SLAVE_BUFF_SIZE); if (qp_ctx->order_ring) { rte_ring_free(qp_ctx->order_ring); qp_ctx->order_ring = NULL; } if (!buff_size) return 0; if (snprintf(order_ring_name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_rb_%u_%u", RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD), dev->data->dev_id, qp_id) < 0) { CS_LOG_ERR("failed to create unique reorder buffer " "name"); return -ENOMEM; } qp_ctx->order_ring = rte_ring_create(order_ring_name, buff_size, rte_socket_id(), RING_F_SP_ENQ | RING_F_SC_DEQ); if (!qp_ctx->order_ring) { CS_LOG_ERR("failed to create order ring"); return -ENOMEM; } } else { if (qp_ctx->order_ring) { rte_ring_free(qp_ctx->order_ring); qp_ctx->order_ring = NULL; } } return 0; }
/** * Set up the DPDK rings which will be used to pass packets, via * pointers, between the multi-process server and client processes. * Each client needs one RX queue. */ static int init_shm_rings(void) { unsigned i; unsigned socket_id; const char * q_name; const unsigned ringsize = CLIENT_QUEUE_RINGSIZE; int retval; clients = rte_malloc("client details", sizeof(*clients) * num_clients, 0); if (clients == NULL) rte_exit(EXIT_FAILURE, "Cannot allocate memory for client program details\n"); for (i = 0; i < num_clients; i++) { /* Create an RX queue for each client */ socket_id = rte_socket_id(); q_name = get_rx_queue_name(i); if (rte_eal_process_type() == RTE_PROC_SECONDARY) { clients[i].rx_q = rte_ring_lookup(get_rx_queue_name(i)); } else { clients[i].rx_q = rte_ring_create(q_name, ringsize, socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ ); /* single prod, single cons */ } if (clients[i].rx_q == NULL) rte_exit(EXIT_FAILURE, "Cannot create rx ring queue for client %u\n", i); /* int port = rte_eth_from_rings("PORT1", &clients[i].rx_q, 1, &clients[i].rx_q, 1, rte_socket_id()); RTE_LOG(INFO, APP, "Ring port Number: %d\n", port); retval = init_port(port); */ } if (retval < 0) return retval; return 0; }
/** Create a ring to place process packets on */ static struct rte_ring * aesni_gcm_pmd_qp_create_processed_pkts_ring(struct aesni_gcm_qp *qp, unsigned ring_size, int socket_id) { struct rte_ring *r; r = rte_ring_lookup(qp->name); if (r) { if (rte_ring_get_size(r) >= ring_size) { GCM_LOG_INFO("Reusing existing ring %s for processed" " packets", qp->name); return r; } GCM_LOG_ERR("Unable to reuse existing ring %s for processed" " packets", qp->name); return NULL; } return rte_ring_create(qp->name, ring_size, socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ); }
void init(Stream * pl, const char *name, void ** handle){ IpImpl * impl = calloc(1,sizeof(IpImpl)); if (!impl){ printf("Out of Mem.\n"); return ; } //point to func. pl -> timeout = 10;//timeout value is 10s the same as the default value. impl -> timeout = 10; pl -> init = init; pl -> addPacket = dpdk_ipDeFragment; //empty pl -> getPacket = getPacket; pl -> getStream = NULL; pl -> realsePacket = realsePacket; pl -> checkTimeOut = checkTimeOut; pl -> showState = NULL; impl -> r = rte_ring_lookup(name); impl -> tail = NULL; impl -> head = (struct ipPacketHead *)rte_malloc("tailhead",sizeof(struct ipFragment),0); impl -> head -> timer_pre = impl -> head; impl -> head -> timer_next = NULL; if(impl -> r == NULL){ printf("Ring %s not found ,now creating a new ring.\n",name); impl -> r = rte_ring_create(name,4096, -1, 0); if(impl -> r == NULL){ printf("Error in creating ring.\n"); return ; } else printf("Done in creating ring.\n"); } //init the module. initIpTable(impl -> tables); *handle = impl; printf("Init ip module done!\n"); }
/** Create a ring to place processed ops on */ static struct rte_ring * zuc_pmd_qp_create_processed_ops_ring(struct zuc_qp *qp, unsigned ring_size, int socket_id) { struct rte_ring *r; r = rte_ring_lookup(qp->name); if (r) { if (rte_ring_get_size(r) >= ring_size) { ZUC_LOG(INFO, "Reusing existing ring %s" " for processed packets", qp->name); return r; } ZUC_LOG(ERR, "Unable to reuse existing ring %s" " for processed packets", qp->name); return NULL; } return rte_ring_create(qp->name, ring_size, socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ); }
void fb_mailbox_create(void) { int i = 0, j = 0; for (i = 0; i < GATEKEEPER_MAX_FUNC_BLKS; i++) { for (j = 0; j < GATEKEEPER_MAX_NUMA_NODES; j++) { char ring_name[256]; snprintf(ring_name, sizeof(ring_name), "%s_%d", mailboxes[i].fb_name, j); printf("%s\n", ring_name); mailboxes[i].func_mailbox[j] = rte_ring_create(ring_name, MAILBOX_RING_SIZE, j, RING_F_SC_DEQ); if (mailboxes[i].func_mailbox[j] == NULL) { fprintf(stderr, "failed to allocate %s mailbox %d\n", mailboxes[i].fb_name, j); return; } } } }
/** Create a ring to place processed ops on */ static struct rte_ring * snow3g_pmd_qp_create_processed_ops_ring(struct snow3g_qp *qp, unsigned ring_size, int socket_id) { struct rte_ring *r; r = rte_ring_lookup(qp->name); if (r) { if (r->prod.size >= ring_size) { SNOW3G_LOG_INFO("Reusing existing ring %s" " for processed packets", qp->name); return r; } SNOW3G_LOG_ERR("Unable to reuse existing ring %s" " for processed packets", qp->name); return NULL; } return rte_ring_create(qp->name, ring_size, socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ); }
/** Create a ring to place processed operations on */ static struct rte_ring * openssl_pmd_qp_create_processed_ops_ring(struct openssl_qp *qp, unsigned int ring_size, int socket_id) { struct rte_ring *r; r = rte_ring_lookup(qp->name); if (r) { if (r->prod.size >= ring_size) { OPENSSL_LOG_INFO( "Reusing existing ring %s for processed ops", qp->name); return r; } OPENSSL_LOG_ERR( "Unable to reuse existing ring %s for processed ops", qp->name); return NULL; } return rte_ring_create(qp->name, ring_size, socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ); }
/** Create a ring to place process packets on */ static struct rte_ring * null_crypto_pmd_qp_create_processed_pkts_ring(struct null_crypto_qp *qp, unsigned ring_size, int socket_id) { struct rte_ring *r; r = rte_ring_lookup(qp->name); if (r) { if (rte_ring_get_size(r) >= ring_size) { NULL_CRYPTO_LOG_INFO( "Reusing existing ring %s for processed packets", qp->name); return r; } NULL_CRYPTO_LOG_INFO( "Unable to reuse existing ring %s for processed packets", qp->name); return NULL; } return rte_ring_create(qp->name, ring_size, socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ); }
/* must be called per process */ int ipaugenblick_app_init(int argc,char **argv,char *app_unique_id) { int i; char ringname[1024]; openlog(NULL, 0, LOG_USER); if(rte_eal_init(argc, argv) < 0) { syslog(LOG_ERR,"cannot initialize rte_eal"); return -1; } syslog(LOG_INFO,"EAL initialized\n"); free_clients_ring = rte_ring_lookup(FREE_CLIENTS_RING); if(!free_clients_ring) { syslog(LOG_ERR,"cannot find ring %s %d\n",__FILE__,__LINE__); exit(0); } free_connections_ring = rte_ring_lookup(FREE_CONNECTIONS_RING); if(!free_connections_ring) { syslog(LOG_ERR,"cannot find free connections ring\n"); return -1; } free_connections_pool = rte_mempool_lookup(FREE_CONNECTIONS_POOL_NAME); if(!free_connections_pool) { syslog(LOG_ERR,"cannot find free connections pool\n"); return -1; } memset(local_socket_descriptors,0,sizeof(local_socket_descriptors)); for(i = 0;i < IPAUGENBLICK_CONNECTION_POOL_SIZE;i++) { sprintf(ringname,RX_RING_NAME_BASE"%d",i); local_socket_descriptors[i].rx_ring = rte_ring_lookup(ringname); if(!local_socket_descriptors[i].rx_ring) { syslog(LOG_ERR,"%s %d\n",__FILE__,__LINE__); exit(0); } sprintf(ringname,TX_RING_NAME_BASE"%d",i); local_socket_descriptors[i].tx_ring = rte_ring_lookup(ringname); if(!local_socket_descriptors[i].tx_ring) { syslog(LOG_ERR,"%s %d\n",__FILE__,__LINE__); exit(0); } local_socket_descriptors[i].select = -1; local_socket_descriptors[i].socket = NULL; sprintf(ringname,"lrxcache%s_%d",app_unique_id,i); syslog(LOG_DEBUG,"local cache name %s\n",ringname); local_socket_descriptors[i].local_cache = rte_ring_create(ringname, 16384,rte_socket_id(), RING_F_SC_DEQ|RING_F_SP_ENQ); if(!local_socket_descriptors[i].local_cache) { syslog(LOG_WARNING,"cannot create local cache\n"); local_socket_descriptors[i].local_cache = rte_ring_lookup(ringname); if(!local_socket_descriptors[i].local_cache) { syslog(LOG_ERR,"and cannot find\n"); exit(0); } } local_socket_descriptors[i].any_event_received = 0; } tx_bufs_pool = rte_mempool_lookup("mbufs_mempool"); if(!tx_bufs_pool) { syslog(LOG_ERR,"cannot find tx bufs pool\n"); return -1; } free_command_pool = rte_mempool_lookup(FREE_COMMAND_POOL_NAME); if(!free_command_pool) { syslog(LOG_ERR,"cannot find free command pool\n"); return -1; } command_ring = rte_ring_lookup(COMMAND_RING_NAME); if(!command_ring) { syslog(LOG_ERR,"cannot find command ring\n"); return -1; } rx_bufs_ring = rte_ring_lookup("rx_mbufs_ring"); if(!rx_bufs_ring) { syslog(LOG_ERR,"cannot find rx bufs ring\n"); return -1; } selectors_ring = rte_ring_lookup(SELECTOR_RING_NAME); for(i = 0;i < IPAUGENBLICK_SELECTOR_POOL_SIZE;i++) { sprintf(ringname,"SELECTOR_RING_NAME%d",i); selectors[i].ready_connections = rte_ring_lookup(ringname); if(!selectors[i].ready_connections) { syslog(LOG_ERR,"cannot find ring %s %d\n",__FILE__,__LINE__); exit(0); } } signal(SIGHUP, sig_handler); signal(SIGINT, sig_handler); signal(SIGILL, sig_handler); signal(SIGABRT, sig_handler); signal(SIGFPE, sig_handler); signal(SIGFPE, sig_handler); signal(SIGSEGV, sig_handler); signal(SIGTERM, sig_handler); signal(SIGUSR1, sig_handler); // pthread_create(&stats_thread,NULL,print_stats,NULL); return ((tx_bufs_pool == NULL)||(command_ring == NULL)||(free_command_pool == NULL)); }
static void app_init_rings_tx(void) { unsigned lcore; /* Initialize the rings for the TX side */ for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) { struct app_lcore_params_worker *lp_worker = &app.lcore_params[lcore].worker; unsigned port; if (app.lcore_params[lcore].type != e_APP_LCORE_WORKER) { continue; } for (port = 0; port < APP_MAX_NIC_PORTS; port ++) { char name[32]; struct app_lcore_params_io *lp_io = NULL; struct rte_ring *ring; uint32_t socket_io, lcore_io; if (app.nic_tx_port_mask[port] == 0) { continue; } if (app_get_lcore_for_nic_tx(port, &lcore_io) < 0) { rte_panic("Algorithmic error (no I/O core to handle TX of port %u)\n", port); } lp_io = &app.lcore_params[lcore_io].io; socket_io = rte_lcore_to_socket_id(lcore_io); printf("Creating ring to connect worker lcore %u with TX port %u (through I/O lcore %u) (socket %u) ...\n", lcore, port, (unsigned)lcore_io, (unsigned)socket_io); snprintf(name, sizeof(name), "app_ring_tx_s%u_w%u_p%u", socket_io, lcore, port); ring = rte_ring_create( name, app.ring_tx_size, socket_io, RING_F_SP_ENQ | RING_F_SC_DEQ); if (ring == NULL) { rte_panic("Cannot create ring to connect worker core %u with TX port %u\n", lcore, port); } lp_worker->rings_out[port] = ring; lp_io->tx.rings[port][lp_worker->worker_id] = ring; } } for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) { struct app_lcore_params_io *lp_io = &app.lcore_params[lcore].io; unsigned i; if ((app.lcore_params[lcore].type != e_APP_LCORE_IO) || (lp_io->tx.n_nic_ports == 0)) { continue; } for (i = 0; i < lp_io->tx.n_nic_ports; i ++){ unsigned port, j; port = lp_io->tx.nic_ports[i]; for (j = 0; j < app_get_lcores_worker(); j ++) { if (lp_io->tx.rings[port][j] == NULL) { rte_panic("Algorithmic error (I/O TX rings)\n"); } } } } }