Ejemplo n.º 1
0
/* Initialize DPDK link layer.
 *
 * No need to free any memory on shutdown as memory is owned by datapath.
 */
int
dpdk_link_init(void)
{
    DPDK_DEBUG()

    reply_ring = rte_ring_lookup(VSWITCHD_REPLY_RING_NAME);
    if (reply_ring == NULL) {
        rte_exit(EXIT_FAILURE, "Cannot get reply ring - is datapath running?\n");
    }

    message_ring = rte_ring_lookup(VSWITCHD_MESSAGE_RING_NAME);
    if (message_ring == NULL) {
        rte_exit(EXIT_FAILURE, "Cannot get message ring - is datapath running?\n");
    }

    packet_ring = rte_ring_lookup(VSWITCHD_PACKET_RING_NAME);
    if (packet_ring == NULL) {
        rte_exit(EXIT_FAILURE,
                 "Cannot get packet packet ring - is datapath running?\n");
    }

    mp = rte_mempool_lookup(PKTMBUF_POOL_NAME);
    if (mp == NULL) {
        rte_exit(EXIT_FAILURE,
                 "Cannot get mempool for mbufs - is datapath running?\n");
    }

    return 0;
}
Ejemplo n.º 2
0
int netdpcmd_ring_init(void)  
{

    netdpcmd_ring_rx = rte_ring_lookup(NETDP_PRI_2_SEC);
    if(NULL == netdpcmd_ring_rx)
    {
        printf("Lookup ring(%s) failed \n", NETDP_PRI_2_SEC);
        return NETDP_ECTRLRING;
    }

    netdpcmd_ring_tx = rte_ring_lookup(NETDP_SEC_2_PRI);
     if(NULL == netdpcmd_ring_tx)
    {
        printf("Lookup ring(%s) failed \n", NETDP_SEC_2_PRI);
        return NETDP_ECTRLRING;
    }

    netdpcmd_message_pool = rte_mempool_lookup(NETDP_MSG_POOL_NAME);
     if(NULL == netdpcmd_message_pool)
    {
        printf("Lookup message pool(%s) failed \n", NETDP_MSG_POOL_NAME);
        return NETDP_EMSGPOOL;
    }
        
    return 0;
}
Ejemplo n.º 3
0
int anscli_ring_init(void)
{

    anscli_ring_rx = rte_ring_lookup(ANS_PRI_2_SEC);
    if(NULL == anscli_ring_rx)
    {
        printf("Lookup ring(%s) failed \n", ANS_PRI_2_SEC);
        return ANS_ECTRLRING;
    }

    anscli_ring_tx = rte_ring_lookup(ANS_SEC_2_PRI);
     if(NULL == anscli_ring_tx)
    {
        printf("Lookup ring(%s) failed \n", ANS_SEC_2_PRI);
        return ANS_ECTRLRING;
    }

    anscli_message_pool = rte_mempool_lookup(ANS_MSG_POOL_NAME);
     if(NULL == anscli_message_pool)
    {
        printf("Lookup message pool(%s) failed \n", ANS_MSG_POOL_NAME);
        return ANS_EMSGPOOL;
    }

    return 0;
}
Ejemplo n.º 4
0
void stats_ring_init(void)
{
	uint32_t lcore_id = -1;
	struct lcore_cfg *lconf;
	struct task_args *targ;

	rsm = alloc_stats_ring_manager();
	while(prox_core_next(&lcore_id, 1) == 0) {
		lconf = &lcore_cfg[lcore_id];

		for(uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
			targ = &lconf->targs[task_id];

			for(uint32_t rxring_id = 0; rxring_id < targ->nb_rxrings; ++rxring_id) {
				if (!targ->tx_opt_ring_task)
					init_rings_add(rsm, targ->rx_rings[rxring_id]);
			}

			for (uint32_t txring_id = 0; txring_id < targ->nb_txrings; ++txring_id) {
				if (!targ->tx_opt_ring)
					init_rings_add(rsm, targ->tx_rings[txring_id]);
			}
		}
	}

	struct ring_stats *stats = NULL;

	for (uint8_t port_id = 0; port_id < PROX_MAX_PORTS; ++port_id) {
		if (!prox_port_cfg[port_id].active) {
			continue;
		}

		if (prox_port_cfg[port_id].rx_ring[0] != '\0') {
			stats = init_rings_add(rsm, rte_ring_lookup(prox_port_cfg[port_id].rx_ring));
			stats->port[stats->nb_ports++] = &prox_port_cfg[port_id];
		}

		if (prox_port_cfg[port_id].tx_ring[0] != '\0') {
			stats = init_rings_add(rsm, rte_ring_lookup(prox_port_cfg[port_id].tx_ring));
			stats->port[stats->nb_ports++] = &prox_port_cfg[port_id];
		}
	}

	/* The actual usable space for a ring is size - 1. There is at
	   most one free entry in the ring to distinguish between
	   full/empty. */
	for (uint16_t ring_id = 0; ring_id < rsm->n_rings; ++ring_id)
#if RTE_VERSION < RTE_VERSION_NUM(17,5,0,1)
		rsm->ring_stats[ring_id].size = rsm->ring_stats[ring_id].ring->prod.size - 1;
#else
		rsm->ring_stats[ring_id].size = rsm->ring_stats[ring_id].ring->size - 1;
#endif
}
Ejemplo n.º 5
0
int ipaugenblick_create_client(ipaugenblick_update_cbk_t update_cbk)
{
    int ringset_idx;
    ipaugenblick_cmd_t *cmd;
    char ringname[1024];

    if(rte_ring_dequeue(free_clients_ring,(void **)&ringset_idx)) {
        syslog(LOG_ERR,"%s %d\n",__FILE__,__LINE__);
        return -1;
    }
    sprintf(ringname,"%s%d",FREE_CLIENTS_RING,ringset_idx);
    client_ring = rte_ring_lookup(ringname);
    if(!client_ring) {
	syslog(LOG_ERR,"%s %d\n",__FILE__,__LINE__);
        return -2;
    }
    cmd = ipaugenblick_get_free_command_buf();
    if(!cmd) {
       ipaugenblick_stats_cannot_allocate_cmd++;
       return -3;
    }

    cmd->cmd = IPAUGENBLICK_CONNECT_CLIENT;
    cmd->ringset_idx = ringset_idx;
    if(ipaugenblick_enqueue_command_buf(cmd)) {
       ipaugenblick_free_command_buf(cmd);
       return -3;
    }
    g_client_ringset_idx = (uint32_t)ringset_idx;
    ipaugenblick_update_cbk = update_cbk;
    return 0;
}
Ejemplo n.º 6
0
void InitIpToEtherRing(void )
{
 //  socket_tcb_ring_send = rte_ring_create(TCB_TO_SOCKET, socket_tcb_ring_size, SOCKET_ID_ANY, 0);
   int buffer_size = sizeof(struct rte_mbuf *);
   buffer_message_pool = rte_mempool_create(_MSG_POOL, pool_size,
            buffer_size, 32, 0,
            NULL, NULL, NULL, NULL,
            SOCKET_ID_ANY, 0);
   if(buffer_message_pool == NULL) {
      printf("ERROR **** ip -- ether Message failed\n");
   }
   else {
      printf("ip - ether message pool OK.\n");
   }
   ip_to_ether_ring_send = rte_ring_create(IP_ETHER_RING_NAME, ring_size, SOCKET_ID_ANY, 0);
   if(ip_to_ether_ring_send) {
      printf("ip_to_ether_ring_send ring OK\n");
   }
   else {
      printf("ERROR * ring ip_to_ether_ring_send failed\n");
   }
   ether_to_ip_ring_send = rte_ring_create(ETHER_IP_RING_NAME, ring_size, SOCKET_ID_ANY, 0);
   if(ether_to_ip_ring_send) {
      printf("ether_to_ip_ring_send ring OK\n");
   }
   else {
      printf("ERROR * ring ether_to_ip_ring_send failed\n");
   }
   ether_to_ip_ring_recv = rte_ring_lookup(ETHER_IP_RING_NAME);
   if(ether_to_ip_ring_recv) {
      printf("ether_to_ip_ring_recv ring OK\n");
   }
   else {
      printf("ERROR * ring ether_to_ip_ring_recv failed\n");
   }
   ip_to_ether_ring_recv = rte_ring_lookup(IP_ETHER_RING_NAME);
   if(ip_to_ether_ring_recv) {
      printf("ip_to_ether_ring_recv ring OK\n");
   }
   else {
      printf("ERROR * ring ip_to_ether_ring_recv failed\n");
   }
}
Ejemplo n.º 7
0
void init(char * tx_ring_name, char * rx_ring_name)
{
	if ((tx_ring = rte_ring_lookup(tx_ring_name)) == NULL)
	{
		rte_exit(EXIT_FAILURE, "Cannot find TX ring\n");
	}

	if ((rx_ring = rte_ring_lookup(rx_ring_name)) == NULL)
	{
		rte_exit(EXIT_FAILURE, "Cannot find RX ring\n");
	}

#if ALLOC_METHOD == ALLOC_OVS
	if ((alloc_q = rte_ring_lookup("recycling_queue")) == NULL)
	{
		rte_exit(EXIT_FAILURE, "Cannot find alloc ring\n");
	}
#endif
}
Ejemplo n.º 8
0
int
dpdk_packet_socket_init(void)
{
    unsigned lcore_id;
    struct vr_dpdk_lcore *lcorep;
    void *event_sock = NULL;
    int err;

    vr_dpdk.packet_transport = (void *)vr_usocket(PACKET, RAW);
    if (!vr_dpdk.packet_transport)
        return -1;

    if (!vr_dpdk.packet_ring) {
        vr_dpdk.packet_ring = rte_ring_lookup("pkt0_tx");
        if (!vr_dpdk.packet_ring) {
            /* multi-producers single-consumer ring */
            vr_dpdk.packet_ring = rte_ring_create("pkt0_tx", VR_DPDK_TX_RING_SZ,
                    SOCKET_ID_ANY, RING_F_SC_DEQ);
            if (!vr_dpdk.packet_ring) {
                RTE_LOG(ERR, VROUTER, "    error creating pkt0 ring\n");
                goto error;
            }
        }
    }

    /* socket events to wake up the pkt0 lcore */
    RTE_LCORE_FOREACH(lcore_id) {
        lcorep = vr_dpdk.lcores[lcore_id];
        event_sock = (void *)vr_usocket(EVENT, RAW);
        if (!event_sock) {
            goto error;
        }

        if (vr_usocket_bind_usockets(vr_dpdk.packet_transport,
                    event_sock))
            goto error;
        lcorep->lcore_event_sock = event_sock;
    }

    return 0;

error:
    err = errno;
    if (event_sock)
        vr_usocket_close(event_sock);
    vr_usocket_close(vr_dpdk.packet_transport);
    vr_dpdk.packet_transport = NULL;
    errno = err;

    return -ENOMEM;
}
int
dpdk_packet_socket_init(void)
{
    void *event_sock = NULL;
    int err;

    vr_dpdk.packet_transport = (void *)vr_usocket(PACKET, RAW);
    if (!vr_dpdk.packet_transport)
        return -1;

    if (!vr_dpdk.packet_ring) {
        vr_dpdk.packet_ring = rte_ring_lookup("packet_tx");
        if (!vr_dpdk.packet_ring) {
            /* multi-producers single-consumer ring */
            vr_dpdk.packet_ring = rte_ring_create("packet_tx", VR_DPDK_TX_RING_SZ,
                    SOCKET_ID_ANY, RING_F_SC_DEQ);
            if (!vr_dpdk.packet_ring) {
                RTE_LOG(ERR, VROUTER, "    error creating packet ring\n");
                goto error;
            }
        }
    }

    /* create and bind event usock to wake up the packet lcore */
    event_sock = (void *)vr_usocket(EVENT, RAW);
    if (!event_sock) {
        RTE_LOG(ERR, VROUTER, "    error creating packet event\n");
        goto error;
    }

    if (vr_usocket_bind_usockets(vr_dpdk.packet_transport,
                event_sock)) {
        RTE_LOG(ERR, VROUTER, "    error binding packet event\n");
        goto error;
    }
    vr_dpdk.packet_event_sock = event_sock;

    return 0;

error:
    err = errno;
    if (event_sock)
        vr_usocket_close(event_sock);
    vr_usocket_close(vr_dpdk.packet_transport);
    vr_dpdk.packet_transport = NULL;
    errno = err;

    return -ENOMEM;
}
Ejemplo n.º 10
0
/**
 * Set up the DPDK rings which will be used to pass packets, via
 * pointers, between the multi-process server and client processes.
 * Each client needs one RX queue.
 */
static int
init_shm_rings(void)
{
	unsigned i;
	unsigned socket_id;
	const char * q_name;
	const unsigned ringsize = CLIENT_QUEUE_RINGSIZE;
	int retval;

	clients = rte_malloc("client details",
		sizeof(*clients) * num_clients, 0);
	if (clients == NULL)
		rte_exit(EXIT_FAILURE, "Cannot allocate memory for client program details\n");

	for (i = 0; i < num_clients; i++) {
		/* Create an RX queue for each client */
		socket_id = rte_socket_id();
		q_name = get_rx_queue_name(i);
		if (rte_eal_process_type() == RTE_PROC_SECONDARY)
		{
			clients[i].rx_q = rte_ring_lookup(get_rx_queue_name(i));
		}
		else
		{
			clients[i].rx_q = rte_ring_create(q_name,
					ringsize, socket_id,
					RING_F_SP_ENQ | RING_F_SC_DEQ ); /* single prod, single cons */
		}
		if (clients[i].rx_q == NULL)
			rte_exit(EXIT_FAILURE, "Cannot create rx ring queue for client %u\n", i);
		
		/*
		int port = rte_eth_from_rings("PORT1", &clients[i].rx_q, 1, &clients[i].rx_q, 1, rte_socket_id());
		RTE_LOG(INFO, APP, "Ring port Number: %d\n", port);
		retval = init_port(port);
		*/
	}
	if (retval < 0) return retval;
	
	return 0;
}
Ejemplo n.º 11
0
void init(Stream * pl, const char *name, void ** handle){

	IpImpl * impl = calloc(1,sizeof(IpImpl));
	if (!impl){
		printf("Out of Mem.\n");
		return ;
	}
	//point to func.
	pl -> timeout = 10;//timeout value is 10s the same as the default value.
	impl -> timeout = 10;
	pl -> init = init;
	pl -> addPacket = dpdk_ipDeFragment;
	//empty
	pl -> getPacket = getPacket;
	pl -> getStream = NULL;
	pl -> realsePacket = realsePacket;
	pl -> checkTimeOut = checkTimeOut;
	pl -> showState = NULL;
	impl -> r = rte_ring_lookup(name);
	impl -> tail = NULL;
	impl -> head = (struct ipPacketHead *)rte_malloc("tailhead",sizeof(struct ipFragment),0);
	impl -> head -> timer_pre = impl -> head;
	impl -> head -> timer_next = NULL;
	if(impl -> r == NULL){
		printf("Ring %s not found ,now creating a new ring.\n",name);
		impl -> r = rte_ring_create(name,4096, -1, 0);
		if(impl -> r == NULL){
			printf("Error in creating ring.\n");
			return ;
		}
		else
			printf("Done in creating ring.\n");
	}
	//init the module.
	initIpTable(impl -> tables); 
	*handle = impl; 
	printf("Init ip module done!\n");

}
Ejemplo n.º 12
0
/** Create a ring to place process packets on */
static struct rte_ring *
aesni_gcm_pmd_qp_create_processed_pkts_ring(struct aesni_gcm_qp *qp,
		unsigned ring_size, int socket_id)
{
	struct rte_ring *r;

	r = rte_ring_lookup(qp->name);
	if (r) {
		if (rte_ring_get_size(r) >= ring_size) {
			GCM_LOG_INFO("Reusing existing ring %s for processed"
					" packets", qp->name);
			return r;
		}

		GCM_LOG_ERR("Unable to reuse existing ring %s for processed"
				" packets", qp->name);
		return NULL;
	}

	return rte_ring_create(qp->name, ring_size, socket_id,
			RING_F_SP_ENQ | RING_F_SC_DEQ);
}
/** Create a ring to place processed operations on */
static struct rte_ring *
openssl_pmd_qp_create_processed_ops_ring(struct openssl_qp *qp,
		unsigned int ring_size, int socket_id)
{
	struct rte_ring *r;

	r = rte_ring_lookup(qp->name);
	if (r) {
		if (r->prod.size >= ring_size) {
			OPENSSL_LOG_INFO(
				"Reusing existing ring %s for processed ops",
				 qp->name);
			return r;
		}

		OPENSSL_LOG_ERR(
			"Unable to reuse existing ring %s for processed ops",
			 qp->name);
		return NULL;
	}

	return rte_ring_create(qp->name, ring_size, socket_id,
			RING_F_SP_ENQ | RING_F_SC_DEQ);
}
Ejemplo n.º 14
0
/** Create a ring to place processed ops on */
static struct rte_ring *
zuc_pmd_qp_create_processed_ops_ring(struct zuc_qp *qp,
		unsigned ring_size, int socket_id)
{
	struct rte_ring *r;

	r = rte_ring_lookup(qp->name);
	if (r) {
		if (rte_ring_get_size(r) >= ring_size) {
			ZUC_LOG(INFO, "Reusing existing ring %s"
					" for processed packets",
					 qp->name);
			return r;
		}

		ZUC_LOG(ERR, "Unable to reuse existing ring %s"
				" for processed packets",
				 qp->name);
		return NULL;
	}

	return rte_ring_create(qp->name, ring_size, socket_id,
			RING_F_SP_ENQ | RING_F_SC_DEQ);
}
Ejemplo n.º 15
0
/** Create a ring to place process packets on */
static struct rte_ring *
null_crypto_pmd_qp_create_processed_pkts_ring(struct null_crypto_qp *qp,
		unsigned ring_size, int socket_id)
{
	struct rte_ring *r;

	r = rte_ring_lookup(qp->name);
	if (r) {
		if (rte_ring_get_size(r) >= ring_size) {
			NULL_CRYPTO_LOG_INFO(
				"Reusing existing ring %s for processed packets",
				qp->name);
			return r;
		}

		NULL_CRYPTO_LOG_INFO(
			"Unable to reuse existing ring %s for processed packets",
			 qp->name);
		return NULL;
	}

	return rte_ring_create(qp->name, ring_size, socket_id,
			RING_F_SP_ENQ | RING_F_SC_DEQ);
}
Ejemplo n.º 16
0
/** Create a ring to place processed ops on */
static struct rte_ring *
snow3g_pmd_qp_create_processed_ops_ring(struct snow3g_qp *qp,
		unsigned ring_size, int socket_id)
{
	struct rte_ring *r;

	r = rte_ring_lookup(qp->name);
	if (r) {
		if (r->prod.size >= ring_size) {
			SNOW3G_LOG_INFO("Reusing existing ring %s"
					" for processed packets",
					 qp->name);
			return r;
		}

		SNOW3G_LOG_ERR("Unable to reuse existing ring %s"
				" for processed packets",
				 qp->name);
		return NULL;
	}

	return rte_ring_create(qp->name, ring_size, socket_id,
			RING_F_SP_ENQ | RING_F_SC_DEQ);
}
Ejemplo n.º 17
0
/*
 * Application main function - loops through
 * receiving and processing packets. Never returns
 */
int
main(int argc, char *argv[])
{
    const struct rte_memzone *mz;
    struct rte_ring *rx_ring;
    struct rte_mempool *mp;
    struct port_info *ports;
    int need_flush = 0; /* indicates whether we have unsent packets */
    int retval;
    void *pkts[PKT_READ_SIZE];
    uint16_t sent;

    if ((retval = rte_eal_init(argc, argv)) < 0)
        return -1;
    argc -= retval;
    argv += retval;

    if (parse_app_args(argc, argv) < 0)
        rte_exit(EXIT_FAILURE, "Invalid command-line arguments\n");

    if (rte_eth_dev_count() == 0)
        rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n");

    rx_ring = rte_ring_lookup(get_rx_queue_name(client_id));
    if (rx_ring == NULL)
        rte_exit(EXIT_FAILURE, "Cannot get RX ring - is server process running?\n");

    mp = rte_mempool_lookup(PKTMBUF_POOL_NAME);
    if (mp == NULL)
        rte_exit(EXIT_FAILURE, "Cannot get mempool for mbufs\n");

    mz = rte_memzone_lookup(MZ_PORT_INFO);
    if (mz == NULL)
        rte_exit(EXIT_FAILURE, "Cannot get port info structure\n");
    ports = mz->addr;
    tx_stats = &(ports->tx_stats[client_id]);

    configure_output_ports(ports);

    RTE_LOG(INFO, APP, "Finished Process Init.\n");

    printf("\nClient process %d handling packets\n", client_id);
    printf("[Press Ctrl-C to quit ...]\n");

    for (;;) {
        uint16_t i, rx_pkts = PKT_READ_SIZE;
        uint8_t port;

        /* try dequeuing max possible packets first, if that fails, get the
         * most we can. Loop body should only execute once, maximum */
        while (rx_pkts > 0 &&
                unlikely(rte_ring_dequeue_bulk(rx_ring, pkts, rx_pkts) != 0))
            rx_pkts = (uint16_t)RTE_MIN(rte_ring_count(rx_ring), PKT_READ_SIZE);

        if (unlikely(rx_pkts == 0)) {
            if (need_flush)
                for (port = 0; port < ports->num_ports; port++) {
                    sent = rte_eth_tx_buffer_flush(ports->id[port], client_id,
                                                   tx_buffer[port]);
                    if (unlikely(sent))
                        tx_stats->tx[port] += sent;
                }
            need_flush = 0;
            continue;
        }

        for (i = 0; i < rx_pkts; i++)
            handle_packet(pkts[i]);

        need_flush = 1;
    }
}
Ejemplo n.º 18
0
/* must be called per process */
int ipaugenblick_app_init(int argc,char **argv,char *app_unique_id)
{
    int i;
    char ringname[1024];

    openlog(NULL, 0, LOG_USER);

    if(rte_eal_init(argc, argv) < 0) {
        syslog(LOG_ERR,"cannot initialize rte_eal");
	return -1;
    }
    syslog(LOG_INFO,"EAL initialized\n");

    free_clients_ring = rte_ring_lookup(FREE_CLIENTS_RING);
    if(!free_clients_ring) {
        syslog(LOG_ERR,"cannot find ring %s %d\n",__FILE__,__LINE__);
        exit(0);
    }

    free_connections_ring = rte_ring_lookup(FREE_CONNECTIONS_RING);

    if(!free_connections_ring) {
        syslog(LOG_ERR,"cannot find free connections ring\n");
        return -1;
    }

    free_connections_pool = rte_mempool_lookup(FREE_CONNECTIONS_POOL_NAME);

    if(!free_connections_pool) {
        syslog(LOG_ERR,"cannot find free connections pool\n");
        return -1;
    }

    memset(local_socket_descriptors,0,sizeof(local_socket_descriptors));
    for(i = 0;i < IPAUGENBLICK_CONNECTION_POOL_SIZE;i++) {
        sprintf(ringname,RX_RING_NAME_BASE"%d",i);
        local_socket_descriptors[i].rx_ring = rte_ring_lookup(ringname);
        if(!local_socket_descriptors[i].rx_ring) {
            syslog(LOG_ERR,"%s %d\n",__FILE__,__LINE__);
            exit(0);
        }
        sprintf(ringname,TX_RING_NAME_BASE"%d",i);
        local_socket_descriptors[i].tx_ring = rte_ring_lookup(ringname);
        if(!local_socket_descriptors[i].tx_ring) {
            syslog(LOG_ERR,"%s %d\n",__FILE__,__LINE__);
            exit(0);
        }
        local_socket_descriptors[i].select = -1;
        local_socket_descriptors[i].socket = NULL;
        sprintf(ringname,"lrxcache%s_%d",app_unique_id,i);
        syslog(LOG_DEBUG,"local cache name %s\n",ringname);
        local_socket_descriptors[i].local_cache = rte_ring_create(ringname, 16384,rte_socket_id(), RING_F_SC_DEQ|RING_F_SP_ENQ);
        if(!local_socket_descriptors[i].local_cache) {
           syslog(LOG_WARNING,"cannot create local cache\n");
	   local_socket_descriptors[i].local_cache = rte_ring_lookup(ringname);
	   if(!local_socket_descriptors[i].local_cache) {
		syslog(LOG_ERR,"and cannot find\n");
		exit(0);
	   } 
        }
	local_socket_descriptors[i].any_event_received = 0;
    }
    tx_bufs_pool = rte_mempool_lookup("mbufs_mempool");
    if(!tx_bufs_pool) {
        syslog(LOG_ERR,"cannot find tx bufs pool\n");
        return -1;
    }
    
    free_command_pool = rte_mempool_lookup(FREE_COMMAND_POOL_NAME);
    if(!free_command_pool) {
        syslog(LOG_ERR,"cannot find free command pool\n");
        return -1;
    }
    
    command_ring = rte_ring_lookup(COMMAND_RING_NAME);
    if(!command_ring) {
        syslog(LOG_ERR,"cannot find command ring\n");
        return -1;
    }
    rx_bufs_ring = rte_ring_lookup("rx_mbufs_ring");
    if(!rx_bufs_ring) {
        syslog(LOG_ERR,"cannot find rx bufs ring\n");
        return -1;
    }
    selectors_ring = rte_ring_lookup(SELECTOR_RING_NAME);
    
    for(i = 0;i < IPAUGENBLICK_SELECTOR_POOL_SIZE;i++) {
        sprintf(ringname,"SELECTOR_RING_NAME%d",i);
        selectors[i].ready_connections = rte_ring_lookup(ringname);
        if(!selectors[i].ready_connections) {
            syslog(LOG_ERR,"cannot find ring %s %d\n",__FILE__,__LINE__);
            exit(0);
        } 
    }
    
    signal(SIGHUP, sig_handler);
    signal(SIGINT, sig_handler);
    signal(SIGILL, sig_handler);
    signal(SIGABRT, sig_handler);
    signal(SIGFPE, sig_handler);
    signal(SIGFPE, sig_handler);
    signal(SIGSEGV, sig_handler);
    signal(SIGTERM, sig_handler);
    signal(SIGUSR1, sig_handler);
//    pthread_create(&stats_thread,NULL,print_stats,NULL);
    return ((tx_bufs_pool == NULL)||(command_ring == NULL)||(free_command_pool == NULL));
}
Ejemplo n.º 19
0
/**
 * CALLED BY NF:
 * Initialises everything we need
 *
 * Returns the number of arguments parsed by both rte_eal_init and
 * parse_nflib_args offset by 1.  This is used by getopt in the NF's
 * code.  The offsetting by one accounts for getopt parsing "--" which
 * increments optind by 1 each time.
 */
int
onvm_nf_init(int argc, char *argv[], const char *nf_tag) {
        const struct rte_memzone *mz;
	const struct rte_memzone *mz_scp;
        struct rte_mempool *mp;
	struct onvm_service_chain **scp;
        int retval_eal, retval_parse, retval_final;

        if ((retval_eal = rte_eal_init(argc, argv)) < 0)
                return -1;

        /* Modify argc and argv to conform to getopt rules for parse_nflib_args */
        argc -= retval_eal; argv += retval_eal;

        /* Reset getopt global variables opterr and optind to their default values */
        opterr = 0; optind = 1;

        if ((retval_parse = parse_nflib_args(argc, argv)) < 0)
                rte_exit(EXIT_FAILURE, "Invalid command-line arguments\n");

        /*
         * Calculate the offset that the nf will use to modify argc and argv for its
         * getopt call. This is the sum of the number of arguments parsed by
         * rte_eal_init and parse_nflib_args. This will be decremented by 1 to assure
         * getopt is looking at the correct index since optind is incremented by 1 each
         * time "--" is parsed.
         * This is the value that will be returned if initialization succeeds.
         */
        retval_final = (retval_eal + retval_parse) - 1;

        /* Reset getopt global variables opterr and optind to their default values */
        opterr = 0; optind = 1;

        /* Lookup mempool for nf_info struct */
        nf_info_mp = rte_mempool_lookup(_NF_MEMPOOL_NAME);
        if (nf_info_mp == NULL)
                rte_exit(EXIT_FAILURE, "No Client Info mempool - bye\n");

        /* Initialize the info struct */
        nf_info = ovnm_nf_info_init(nf_tag);

        mp = rte_mempool_lookup(PKTMBUF_POOL_NAME);
        if (mp == NULL)
                rte_exit(EXIT_FAILURE, "Cannot get mempool for mbufs\n");

        mz = rte_memzone_lookup(MZ_CLIENT_INFO);
        if (mz == NULL)
                rte_exit(EXIT_FAILURE, "Cannot get tx info structure\n");
        tx_stats = mz->addr;

	mz_scp = rte_memzone_lookup(MZ_SCP_INFO);
	if (mz_scp == NULL)
		rte_exit(EXIT_FAILURE, "Cannot get service chain info structre\n");
	scp = mz_scp->addr;
	default_chain = *scp;

	onvm_sc_print(default_chain);

        nf_info_ring = rte_ring_lookup(_NF_QUEUE_NAME);
        if (nf_info_ring == NULL)
                rte_exit(EXIT_FAILURE, "Cannot get nf_info ring");

        /* Put this NF's info struct onto queue for manager to process startup */
        if (rte_ring_enqueue(nf_info_ring, nf_info) < 0) {
                rte_mempool_put(nf_info_mp, nf_info); // give back mermory
                rte_exit(EXIT_FAILURE, "Cannot send nf_info to manager");
        }

        /* Wait for a client id to be assigned by the manager */
        RTE_LOG(INFO, APP, "Waiting for manager to assign an ID...\n");
        for (; nf_info->status == (uint16_t)NF_WAITING_FOR_ID ;) {
                sleep(1);
        }

        /* This NF is trying to declare an ID already in use. */
        if (nf_info->status == NF_ID_CONFLICT) {
                rte_mempool_put(nf_info_mp, nf_info);
                rte_exit(NF_ID_CONFLICT, "Selected ID already in use. Exiting...\n");
        } else if(nf_info->status == NF_NO_IDS) {
                rte_mempool_put(nf_info_mp, nf_info);
                rte_exit(NF_NO_IDS, "There are no ids available for this NF\n");
        } else if(nf_info->status != NF_STARTING) {
                rte_mempool_put(nf_info_mp, nf_info);
                rte_exit(EXIT_FAILURE, "Error occurred during manager initialization\n");
        }
        RTE_LOG(INFO, APP, "Using Instance ID %d\n", nf_info->instance_id);
        RTE_LOG(INFO, APP, "Using Service ID %d\n", nf_info->service_id);

        /* Now, map rx and tx rings into client space */
        rx_ring = rte_ring_lookup(get_rx_queue_name(nf_info->instance_id));
        if (rx_ring == NULL)
                rte_exit(EXIT_FAILURE, "Cannot get RX ring - is server process running?\n");

        tx_ring = rte_ring_lookup(get_tx_queue_name(nf_info->instance_id));
        if (tx_ring == NULL)
                rte_exit(EXIT_FAILURE, "Cannot get TX ring - is server process running?\n");

        /* Tell the manager we're ready to recieve packets */
        nf_info->status = NF_RUNNING;

        RTE_LOG(INFO, APP, "Finished Process Init.\n");
        return retval_final;
}
Ejemplo n.º 20
0
int app_init(void)
{
	uint32_t i;
	char ring_name[MAX_NAME_LEN];
	char pool_name[MAX_NAME_LEN];

	/* init driver(s) */
	if (rte_pmd_init_all() < 0)
		rte_exit(EXIT_FAILURE, "Cannot init PMD\n");

	if (rte_eal_pci_probe() < 0)
		rte_exit(EXIT_FAILURE, "Cannot probe PCI\n");

	if (rte_eth_dev_count() == 0)
		rte_exit(EXIT_FAILURE, "No Ethernet port - bye\n");

	/* load configuration profile */
	if (app_load_cfg_profile(cfg_profile) != 0)
		rte_exit(EXIT_FAILURE, "Invalid configuration profile\n");
	
	/* Initialize each active flow */
	for(i = 0; i < nb_pfc; i++) {
		uint32_t socket = rte_lcore_to_socket_id(qos_conf[i].rx_core);
		struct rte_ring *ring;

		rte_snprintf(ring_name, MAX_NAME_LEN, "ring-%u-%u", i, qos_conf[i].rx_core);
		ring = rte_ring_lookup(ring_name);
		if (ring == NULL)
			qos_conf[i].rx_ring = rte_ring_create(ring_name, ring_conf.ring_size,
			 	socket, RING_F_SP_ENQ | RING_F_SC_DEQ);
		else
			qos_conf[i].rx_ring = ring;

		rte_snprintf(ring_name, MAX_NAME_LEN, "ring-%u-%u", i, qos_conf[i].tx_core);
		ring = rte_ring_lookup(ring_name);
		if (ring == NULL)
			qos_conf[i].tx_ring = rte_ring_create(ring_name, ring_conf.ring_size,
				socket, RING_F_SP_ENQ | RING_F_SC_DEQ);
		else
			qos_conf[i].tx_ring = ring;


		/* create the mbuf pools for each RX Port */
		rte_snprintf(pool_name, MAX_NAME_LEN, "mbuf_pool%u", i);
		qos_conf[i].mbuf_pool = rte_mempool_create(pool_name, mp_size, MBUF_SIZE,
						burst_conf.rx_burst * 4,
						sizeof(struct rte_pktmbuf_pool_private),
						rte_pktmbuf_pool_init, NULL,
						rte_pktmbuf_init, NULL,
						rte_eth_dev_socket_id(qos_conf[i].rx_port),
						0);
		if (qos_conf[i].mbuf_pool == NULL)
			rte_exit(EXIT_FAILURE, "Cannot init mbuf pool for socket %u\n", i);

		app_init_port(qos_conf[i].rx_port, qos_conf[i].mbuf_pool);
		app_init_port(qos_conf[i].tx_port, qos_conf[i].mbuf_pool);
		
		qos_conf[i].sched_port = app_init_sched_port(qos_conf[i].tx_port, socket);
	}

	RTE_LOG(INFO, APP, "time stamp clock running at %" PRIu64 " Hz\n",
			 rte_get_timer_hz());
	
	RTE_LOG(INFO, APP, "Ring sizes: NIC RX = %u, Mempool = %d SW queue = %u,"
			 "NIC TX = %u\n", ring_conf.rx_size, mp_size, ring_conf.ring_size,
			 ring_conf.tx_size);

	RTE_LOG(INFO, APP, "Burst sizes: RX read = %hu, RX write = %hu,\n"
						  "             Worker read/QoS enqueue = %hu,\n"
						  "             QoS dequeue = %hu, Worker write = %hu\n",
		burst_conf.rx_burst, burst_conf.ring_burst, burst_conf.ring_burst, 
		burst_conf.qos_dequeue, burst_conf.tx_burst);

	RTE_LOG(INFO, APP, "NIC thresholds RX (p = %hhu, h = %hhu, w = %hhu),"
				 "TX (p = %hhu, h = %hhu, w = %hhu)\n",
		rx_thresh.pthresh, rx_thresh.hthresh, rx_thresh.wthresh,
		tx_thresh.pthresh, tx_thresh.hthresh, tx_thresh.wthresh);

	return 0;
}
Ejemplo n.º 21
0
/**
 * CALLED BY NF:
 * Application main function - loops through
 * receiving and processing packets. Never returns
 */
int
onvm_nf_run(struct onvm_nf_info* info, int(*handler)(struct rte_mbuf* pkt, struct onvm_pkt_meta* meta)) {
        void *pkts[PKT_READ_SIZE];
        struct onvm_pkt_meta* meta;

        printf("\nClient process %d handling packets\n", info->instance_id);
        printf("[Press Ctrl-C to quit ...]\n");

        /* Listen for ^C so we can exit gracefully */
        signal(SIGINT, handle_signal);

        for (; keep_running;) {
                uint16_t i, j, nb_pkts = PKT_READ_SIZE;
                void *pktsTX[PKT_READ_SIZE];
                int tx_batch_size = 0;
                int ret_act;

                /* try dequeuing max possible packets first, if that fails, get the
                 * most we can. Loop body should only execute once, maximum */
                while (nb_pkts > 0 &&
                                unlikely(rte_ring_dequeue_bulk(rx_ring, pkts, nb_pkts) != 0))
                        nb_pkts = (uint16_t)RTE_MIN(rte_ring_count(rx_ring), PKT_READ_SIZE);

                if(nb_pkts == 0) {
                        continue;
                }
                /* Give each packet to the user proccessing function */
                for (i = 0; i < nb_pkts; i++) {
                        meta = onvm_get_pkt_meta((struct rte_mbuf*)pkts[i]);
                        ret_act = (*handler)((struct rte_mbuf*)pkts[i], meta);
                        /* NF returns 0 to return packets or 1 to buffer */
                        if(likely(ret_act == 0)) {
                                pktsTX[tx_batch_size++] = pkts[i];
                        }
                        else {
                                tx_stats->tx_buffer[info->instance_id]++;
                        }
                }

                if (unlikely(tx_batch_size > 0 && rte_ring_enqueue_bulk(tx_ring, pktsTX, tx_batch_size) == -ENOBUFS)) {
                        tx_stats->tx_drop[info->instance_id] += tx_batch_size;
                        for (j = 0; j < tx_batch_size; j++) {
                                rte_pktmbuf_free(pktsTX[j]);
                        }
                } else {
                        tx_stats->tx[info->instance_id] += tx_batch_size;
                }
        }

        nf_info->status = NF_STOPPED;

        /* Put this NF's info struct back into queue for manager to ack shutdown */
        nf_info_ring = rte_ring_lookup(_NF_QUEUE_NAME);
        if (nf_info_ring == NULL) {
                rte_mempool_put(nf_info_mp, nf_info); // give back mermory
                rte_exit(EXIT_FAILURE, "Cannot get nf_info ring for shutdown");
        }

        if (rte_ring_enqueue(nf_info_ring, nf_info) < 0) {
                rte_mempool_put(nf_info_mp, nf_info); // give back mermory
                rte_exit(EXIT_FAILURE, "Cannot send nf_info to manager for shutdown");
        }
        return 0;
}
Ejemplo n.º 22
0
/*
 * Application main function - loops through
 * receiving and processing packets. Never returns
 */
int
main(int argc, char *argv[])
{
    struct rte_ring *rx_ring = NULL;
    struct rte_ring *tx_ring = NULL;
    int retval = 0;
    void *pkts[PKT_READ_SIZE];
    int rslt = 0;

    if ((retval = rte_eal_init(argc, argv)) < 0) {
        return -1;
    }

    argc -= retval;
    argv += retval;

    if (parse_app_args(argc, argv) < 0) {
        rte_exit(EXIT_FAILURE, "Invalid command-line arguments\n");
    }

    rx_ring = rte_ring_lookup(get_rx_queue_name(client_id));
    if (rx_ring == NULL) {
        rte_exit(EXIT_FAILURE,
            "Cannot get RX ring - is server process running?\n");
    }

    tx_ring = rte_ring_lookup(get_tx_queue_name(client_id));
    if (tx_ring == NULL) {
        rte_exit(EXIT_FAILURE,
            "Cannot get TX ring - is server process running?\n");
    }

    RTE_LOG(INFO, APP, "Finished Process Init.\n");

    printf("\nClient process %d handling packets\n", client_id);
    printf("[Press Ctrl-C to quit ...]\n");

    for (;;) {
        unsigned rx_pkts = PKT_READ_SIZE;

        /* Try dequeuing max possible packets first, if that fails, get the
         * most we can. Loop body should only execute once, maximum.
         */
        while (unlikely(rte_ring_dequeue_bulk(rx_ring, pkts, rx_pkts) != 0) &&
            rx_pkts > 0) {
            rx_pkts = (uint16_t)RTE_MIN(rte_ring_count(rx_ring), PKT_READ_SIZE);
        }

        if (rx_pkts > 0) {
            pkt++;
            /* blocking enqueue */
            do {
                rslt = rte_ring_enqueue_bulk(tx_ring, pkts, rx_pkts);
            } while (rslt == -ENOBUFS);
        } else {
               no_pkt++;
        }

        if (!(pkt %  100000)) {
            printf("pkt %d %d\n", pkt, no_pkt);
            pkt = no_pkt = 0;
        }
    }
}
Ejemplo n.º 23
0
/*
 * This function is run in the secondary instance to test that creation of
 * objects fails in a secondary
 */
static int
run_object_creation_tests(void)
{
	const unsigned flags = 0;
	const unsigned size = 1024;
	const unsigned elt_size = 64;
	const unsigned cache_size = 64;
	const unsigned priv_data_size = 32;

	printf("### Testing object creation - expect lots of mz reserve errors!\n");

	rte_errno = 0;
	if ((rte_memzone_reserve("test_mz", size, rte_socket_id(),
				 flags) == NULL) &&
	    (rte_memzone_lookup("test_mz") == NULL)) {
		printf("Error: unexpected return value from rte_memzone_reserve\n");
		return -1;
	}
	printf("# Checked rte_memzone_reserve() OK\n");

	rte_errno = 0;
	if ((rte_ring_create(
		     "test_ring", size, rte_socket_id(), flags) == NULL) &&
		    (rte_ring_lookup("test_ring") == NULL)){
		printf("Error: unexpected return value from rte_ring_create()\n");
		return -1;
	}
	printf("# Checked rte_ring_create() OK\n");

	rte_errno = 0;
	if ((rte_mempool_create("test_mp", size, elt_size, cache_size,
				priv_data_size, NULL, NULL, NULL, NULL,
				rte_socket_id(), flags) == NULL) &&
	     (rte_mempool_lookup("test_mp") == NULL)){
		printf("Error: unexpected return value from rte_mempool_create()\n");
		return -1;
	}
	printf("# Checked rte_mempool_create() OK\n");

#ifdef RTE_LIBRTE_HASH
	const struct rte_hash_parameters hash_params = { .name = "test_mp_hash" };
	rte_errno=0;
	if ((rte_hash_create(&hash_params) != NULL) &&
	    (rte_hash_find_existing(hash_params.name) == NULL)){
		printf("Error: unexpected return value from rte_hash_create()\n");
		return -1;
	}
	printf("# Checked rte_hash_create() OK\n");

	const struct rte_fbk_hash_params fbk_params = { .name = "test_fbk_mp_hash" };
	rte_errno=0;
	if ((rte_fbk_hash_create(&fbk_params) != NULL) &&
	    (rte_fbk_hash_find_existing(fbk_params.name) == NULL)){
		printf("Error: unexpected return value from rte_fbk_hash_create()\n");
		return -1;
	}
	printf("# Checked rte_fbk_hash_create() OK\n");
#endif

#ifdef RTE_LIBRTE_LPM
	rte_errno=0;
	struct rte_lpm_config config;

	config.max_rules = rte_socket_id();
	config.number_tbl8s = 256;
	config.flags = 0;
	if ((rte_lpm_create("test_lpm", size, &config) != NULL) &&
	    (rte_lpm_find_existing("test_lpm") == NULL)){
		printf("Error: unexpected return value from rte_lpm_create()\n");
		return -1;
	}
	printf("# Checked rte_lpm_create() OK\n");
#endif

	/* Run a test_pci call */
	if (test_pci() != 0) {
		printf("PCI scan failed in secondary\n");
		if (getuid() == 0) /* pci scans can fail as non-root */
			return -1;
	} else
		printf("PCI scan succeeded in secondary\n");

	return 0;
}

/* if called in a primary process, just spawns off a secondary process to
 * run validation tests - which brings us right back here again...
 * if called in a secondary process, this runs a series of API tests to check
 * how things run in a secondary instance.
 */
int
test_mp_secondary(void)
{
	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
		if (!test_pci_run) {
			printf("=== Running pre-requisite test of test_pci\n");
			test_pci();
			printf("=== Requisite test done\n");
		}
		return run_secondary_instances();
	}

	printf("IN SECONDARY PROCESS\n");

	return run_object_creation_tests();
}

static struct test_command multiprocess_cmd = {
	.command = "multiprocess_autotest",
	.callback = test_mp_secondary,
};
REGISTER_TEST_COMMAND(multiprocess_cmd);