Ejemplo n.º 1
0
void
app_print_params(void)
{
	unsigned port, queue, lcore, i, j;

	/* Print NIC RX configuration */
	printf("NIC RX ports: ");
	for (port = 0; port < APP_MAX_NIC_PORTS; port ++) {
		uint32_t n_rx_queues = app_get_nic_rx_queues_per_port((uint8_t) port);

		if (n_rx_queues == 0) {
			continue;
		}

		printf("%u (", port);
		for (queue = 0; queue < APP_MAX_RX_QUEUES_PER_NIC_PORT; queue ++) {
			if (app.nic_rx_queue_mask[port][queue] == 1) {
				printf("%u ", queue);
			}
		}
		printf(")  ");
	}
	printf(";\n");

	/* Print I/O lcore RX params */
	for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
		struct app_lcore_params_io *lp = &app.lcore_params[lcore].io;

		if ((app.lcore_params[lcore].type != e_APP_LCORE_IO) ||
		    (lp->rx.n_nic_queues == 0)) {
			continue;
		}

		printf("I/O lcore %u (socket %u): ", lcore, rte_lcore_to_socket_id(lcore));

		printf("RX ports  ");
		for (i = 0; i < lp->rx.n_nic_queues; i ++) {
			printf("(%u, %u)  ",
				(unsigned) lp->rx.nic_queues[i].port,
				(unsigned) lp->rx.nic_queues[i].queue);
		}
		printf("; ");

		printf("Output rings  ");
		for (i = 0; i < lp->rx.n_rings; i ++) {
			printf("%p  ", lp->rx.rings[i]);
		}
		printf(";\n");
	}

	/* Print worker lcore RX params */
	for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
		struct app_lcore_params_worker *lp = &app.lcore_params[lcore].worker;

		if (app.lcore_params[lcore].type != e_APP_LCORE_WORKER) {
			continue;
		}

		printf("Worker lcore %u (socket %u) ID %u: ",
			lcore,
			rte_lcore_to_socket_id(lcore),
			(unsigned)lp->worker_id);

		printf("Input rings  ");
		for (i = 0; i < lp->n_rings_in; i ++) {
			printf("%p  ", lp->rings_in[i]);
		}

		printf(";\n");
	}

	printf("\n");

	/* Print NIC TX configuration */
	printf("NIC TX ports:  ");
	for (port = 0; port < APP_MAX_NIC_PORTS; port ++) {
		if (app.nic_tx_port_mask[port] == 1) {
			printf("%u  ", port);
		}
	}
	printf(";\n");

	/* Print I/O TX lcore params */
	for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
		struct app_lcore_params_io *lp = &app.lcore_params[lcore].io;
		uint32_t n_workers = app_get_lcores_worker();

		if ((app.lcore_params[lcore].type != e_APP_LCORE_IO) ||
		     (lp->tx.n_nic_ports == 0)) {
			continue;
		}

		printf("I/O lcore %u (socket %u): ", lcore, rte_lcore_to_socket_id(lcore));

		printf("Input rings per TX port  ");
		for (i = 0; i < lp->tx.n_nic_ports; i ++) {
			port = lp->tx.nic_ports[i];

			printf("%u (", port);
			for (j = 0; j < n_workers; j ++) {
				printf("%p  ", lp->tx.rings[port][j]);
			}
			printf(")  ");

		}

		printf(";\n");
	}

	/* Print worker lcore TX params */
	for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
		struct app_lcore_params_worker *lp = &app.lcore_params[lcore].worker;

		if (app.lcore_params[lcore].type != e_APP_LCORE_WORKER) {
			continue;
		}

		printf("Worker lcore %u (socket %u) ID %u: \n",
			lcore,
			rte_lcore_to_socket_id(lcore),
			(unsigned)lp->worker_id);

		printf("Output rings per TX port  ");
		for (port = 0; port < APP_MAX_NIC_PORTS; port ++) {
			if (lp->rings_out[port] != NULL) {
				printf("%u (%p)  ", port, lp->rings_out[port]);
			}
		}

		printf(";\n");
	}

	/* Rings */
	printf("Ring sizes: NIC RX = %u; Worker in = %u; Worker out = %u; NIC TX = %u;\n",
		(unsigned) app.nic_rx_ring_size,
		(unsigned) app.ring_rx_size,
		(unsigned) app.ring_tx_size,
		(unsigned) app.nic_tx_ring_size);

	/* Bursts */
	printf("Burst sizes: I/O RX (rd = %u, wr = %u); Worker (rd = %u, wr = %u); I/O TX (rd = %u, wr = %u)\n",
		(unsigned) app.burst_size_io_rx_read,
		(unsigned) app.burst_size_io_rx_write,
		(unsigned) app.burst_size_worker_read,
		(unsigned) app.burst_size_worker_write,
		(unsigned) app.burst_size_io_tx_read,
		(unsigned) app.burst_size_io_tx_write);
}
Ejemplo n.º 2
0
Archivo: init.c Proyecto: exuuwen/study
static void udpi_init_ports(void)
{
	/* Init NIC ports, then start the ports */
	int32_t ret = -1;
	uint32_t queue_id = 0;
	uint32_t lcore_id = 0;
	uint32_t i, j;


	/* Init port */
	for (j=0; j<udpi.n_ports; j++)
	{
		RTE_LOG(INFO, PORT, "Initializing NIC port %u ...\n", j);
		fflush(stdout);

		ret = rte_eth_dev_configure(j, udpi.ports[j].n_queues, udpi.ports[j].n_queues, &udpi.port_conf);
		if(0 > ret)
		{
			rte_panic("Cannot init NIC port %u (%d)\n", j, ret);
		}

		for (i=0; i<udpi.n_cores; i++) 
		{
			if (udpi.cores[i].core_type != UDPI_CORE_IPV4_RX || udpi.cores[i].port_id != j)
				continue;
        
			lcore_id = udpi.cores[i].core_id;
			queue_id = udpi.cores[i].id;

			ret = rte_eth_rx_queue_setup(j, queue_id, udpi.rsz_hwq_rx, 
				rte_lcore_to_socket_id(lcore_id), NULL, udpi.pool);
			if (0 > ret)
			{
				rte_panic("Cannot init Rx for port %u with qid %u (%d)\n", j, queue_id, ret);
			}
			
			ret = rte_eth_dev_set_rx_queue_stats_mapping(j, queue_id, queue_id);
			if (ret < 0) 
			{
				rte_panic("Failed to set tx_queue_stats_mapping of p%u_q%u. error = %d.",
					j, queue_id, ret);
			}
                
			ret = rte_eth_tx_queue_setup(j, queue_id, udpi.rsz_hwq_tx, rte_lcore_to_socket_id(lcore_id), NULL);
			if (0 > ret)
			{
				rte_panic("Cannot init Tx for port %u with qid %u (%d)\n", j, queue_id, ret);
			}

			ret = rte_eth_dev_set_tx_queue_stats_mapping(j, queue_id, queue_id);
			if (ret < 0) 
			{
				rte_panic("Failed to set tx_queue_stats_mapping of p%u_q%u. error = %d.", j, queue_id, ret);
			}
		}  

		/* Start port */
		ret = rte_eth_dev_start(j);
		if(0 > ret)
		{
			rte_panic("Cannot start port %u (%d)\n", j, ret);
		}

		rte_eth_promiscuous_enable(j);
	}

	udpi_ports_check_link();

	return;
}
Ejemplo n.º 3
0
/**********************************************************************
*@description:
* 
*
*@parameters:
* [in]: 
* [in]: 
*
*@return values: 
*
**********************************************************************/
static int odp_init_ports(unsigned short nb_ports, struct odp_user_config  *user_conf, struct odp_lcore_config *lcore_conf)
{
    int ret;
    uint8_t portid; 
    uint16_t queueid;
    unsigned lcore_id;
    uint8_t nb_rx_queue =0;
    uint8_t max_rx_queue =0;
    uint8_t queue, socketid;
    uint32_t n_tx_queue, nb_lcores, nb_mbuf;
    struct ether_addr eth_addr;
    struct rte_eth_dev_info dev_info;
    struct rte_eth_txconf *txconf;


    nb_lcores = rte_lcore_count();
    n_tx_queue = nb_lcores;
    if (n_tx_queue > MAX_TX_QUEUE_PER_PORT)
    	n_tx_queue = MAX_TX_QUEUE_PER_PORT;

    printf("\nStart to Init port \n" );

    /* initialize all ports */
    for (portid = 0; portid < nb_ports; portid++) 
    {
        /* skip ports that are not enabled */
        if ((user_conf->port_mask & (1 << portid)) == 0) 
        {
            printf("\nSkipping disabled port %d\n", portid);
            continue;
        }

        /* init port */
        printf("\t port %d:  \n", portid );

        nb_rx_queue = odp_get_port_rx_queues_nb(portid, user_conf);

        if(max_rx_queue < nb_rx_queue)
            max_rx_queue = nb_rx_queue;
        
        printf("\t Creating queues: rx queue number=%d tx queue number=%u... \n", nb_rx_queue, (unsigned)n_tx_queue );

        ret = rte_eth_dev_configure(portid, nb_rx_queue, (uint16_t)n_tx_queue, &odp_port_conf);
        if (ret < 0)
        	rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%d\n", ret, portid);

        rte_eth_macaddr_get(portid, &eth_addr);

        printf ("\t MAC Address:%02X:%02X:%02X:%02X:%02X:%02X \n", 
        	eth_addr.addr_bytes[0], eth_addr.addr_bytes[1],
        	eth_addr.addr_bytes[2], eth_addr.addr_bytes[3],
        	eth_addr.addr_bytes[4], eth_addr.addr_bytes[5]);

        /* init one TX queue per couple (lcore,port) */
        queueid = 0;
        for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++)
        {
            if (rte_lcore_is_enabled(lcore_id) == 0)
            	continue;

            if (user_conf->numa_on)
            	socketid = (uint8_t)rte_lcore_to_socket_id(lcore_id);
            else
            	socketid = 0;

            printf("\t lcore id:%u, tx queue id:%d, socket id:%d \n", lcore_id, queueid, socketid);
            
            ret = rte_eth_tx_queue_setup(portid, queueid, ODP_TX_DESC_DEFAULT, socketid, &odp_tx_conf);
            if (ret < 0)
            	rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, " "port=%d\n", ret, portid);

            lcore_conf[lcore_id].tx_queue_id[portid] = queueid;
            
            queueid++;
        }
        
        printf("\n");

    }

    nb_mbuf = RTE_MAX((nb_ports*nb_rx_queue*ODP_RX_DESC_DEFAULT +	
				nb_ports*nb_lcores*MAX_PKT_BURST +					
				nb_ports*n_tx_queue*ODP_TX_DESC_DEFAULT +	
				nb_lcores*MEMPOOL_CACHE_SIZE), MAX_MBUF_NB);
				
    /* init memory */
    ret = odp_init_mbuf_pool(nb_mbuf, user_conf);
    if (ret < 0)
    	rte_exit(EXIT_FAILURE, "init_mem failed\n");

    for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) 
    {
        if (rte_lcore_is_enabled(lcore_id) == 0)
            continue;
        
        printf("\nInitializing rx queues on lcore %u ... \n", lcore_id );

        /* init RX queues */
        for(queue = 0; queue < lcore_conf[lcore_id].n_rx_queue; ++queue) 
        {
            portid = lcore_conf[lcore_id].rx_queue_list[queue].port_id;
            queueid = lcore_conf[lcore_id].rx_queue_list[queue].queue_id;

            if (user_conf->numa_on)
                socketid = (uint8_t)rte_lcore_to_socket_id(lcore_id);
            else
                socketid = 0;

            printf("port id:%d, rx queue id: %d, socket id:%d \n", portid, queueid, socketid);

            ret = rte_eth_rx_queue_setup(portid, queueid, ODP_RX_DESC_DEFAULT, socketid, &odp_rx_conf, odp_pktmbuf_pool[socketid]);
            if (ret < 0)
                rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup: err=%d," "port=%d\n", ret, portid);
        }
    }

    return 0;
}
/* Init KNI RX queue */
struct vr_dpdk_queue *
vr_dpdk_kni_rx_queue_init(unsigned lcore_id, struct vr_interface *vif,
    unsigned host_lcore_id)
{
    struct vr_dpdk_lcore *lcore = vr_dpdk.lcores[lcore_id];
    const unsigned socket_id = rte_lcore_to_socket_id(lcore_id);
    uint8_t port_id = 0;
    unsigned vif_idx = vif->vif_idx;
    struct vr_dpdk_queue *rx_queue = &lcore->lcore_rx_queues[vif_idx];
    struct vr_dpdk_queue_params *rx_queue_params
                    = &lcore->lcore_rx_queue_params[vif_idx];

    if (vif->vif_type == VIF_TYPE_HOST) {
        port_id = (((struct vr_dpdk_ethdev *)(vif->vif_bridge->vif_os))->
                ethdev_port_id);
    }

    /* init queue */
    rx_queue->rxq_ops = dpdk_knidev_reader_ops;
    rx_queue->q_queue_h = NULL;
    rx_queue->q_vif = vrouter_get_interface(vif->vif_rid, vif_idx);

    /* create the queue */
    struct dpdk_knidev_reader_params reader_params = {
        .kni = vif->vif_os,
    };
    rx_queue->q_queue_h = rx_queue->rxq_ops.f_create(&reader_params, socket_id);
    if (rx_queue->q_queue_h == NULL) {
        RTE_LOG(ERR, VROUTER, "    error creating KNI device %s RX queue"
            " at eth device %" PRIu8 "\n", vif->vif_name, port_id);
        return NULL;
    }

    /* store queue params */
    rx_queue_params->qp_release_op = &dpdk_kni_rx_queue_release;

    return rx_queue;
}

/* Release KNI TX queue */
static void
dpdk_kni_tx_queue_release(unsigned lcore_id, struct vr_interface *vif)
{
    struct vr_dpdk_lcore *lcore = vr_dpdk.lcores[lcore_id];
    struct vr_dpdk_queue *tx_queue = &lcore->lcore_tx_queues[vif->vif_idx];
    struct vr_dpdk_queue_params *tx_queue_params
                        = &lcore->lcore_tx_queue_params[vif->vif_idx];

    tx_queue->txq_ops.f_tx = NULL;
    rte_wmb();

    /* flush and free the queue */
    if (tx_queue->txq_ops.f_free(tx_queue->q_queue_h)) {
        RTE_LOG(ERR, VROUTER, "    error freeing lcore %u KNI device TX queue\n",
                    lcore_id);
    }

    /* reset the queue */
    vrouter_put_interface(tx_queue->q_vif);
    memset(tx_queue, 0, sizeof(*tx_queue));
    memset(tx_queue_params, 0, sizeof(*tx_queue_params));
}

/* Init KNI TX queue */
struct vr_dpdk_queue *
vr_dpdk_kni_tx_queue_init(unsigned lcore_id, struct vr_interface *vif,
    unsigned host_lcore_id)
{
    struct vr_dpdk_lcore *lcore = vr_dpdk.lcores[lcore_id];
    const unsigned socket_id = rte_lcore_to_socket_id(lcore_id);
    uint8_t port_id = 0;
    unsigned vif_idx = vif->vif_idx;
    struct vr_dpdk_queue *tx_queue = &lcore->lcore_tx_queues[vif_idx];
    struct vr_dpdk_queue_params *tx_queue_params
                    = &lcore->lcore_tx_queue_params[vif_idx];
    struct vr_dpdk_ethdev *ethdev;

    if (vif->vif_type == VIF_TYPE_HOST) {
        ethdev = vif->vif_bridge->vif_os;
        if (ethdev == NULL) {
            RTE_LOG(ERR, VROUTER, "    error creating KNI device %s TX queue:"
                " bridge vif %u ethdev is not initialized\n",
                vif->vif_name, vif->vif_bridge->vif_idx);
            return NULL;
        }
        port_id = ethdev->ethdev_port_id;
    }

    /* init queue */
    tx_queue->txq_ops = dpdk_knidev_writer_ops;
    tx_queue->q_queue_h = NULL;
    tx_queue->q_vif = vrouter_get_interface(vif->vif_rid, vif_idx);

    /* create the queue */
    struct dpdk_knidev_writer_params writer_params = {
        .kni = vif->vif_os,
        .tx_burst_sz = VR_DPDK_TX_BURST_SZ,
    };
    tx_queue->q_queue_h = tx_queue->txq_ops.f_create(&writer_params, socket_id);
    if (tx_queue->q_queue_h == NULL) {
        RTE_LOG(ERR, VROUTER, "    error creating KNI device %s TX queue"
            " at eth device %" PRIu8 "\n", vif->vif_name, port_id);
        return NULL;
    }

    /* store queue params */
    tx_queue_params->qp_release_op = &dpdk_kni_tx_queue_release;

    return tx_queue;
}

/* Change KNI MTU size callback */
static int
dpdk_knidev_change_mtu(uint8_t port_id, unsigned new_mtu)
{
    struct vrouter *router = vrouter_get(0);
    struct vr_interface *vif;
    int i, ret;
    uint8_t ethdev_port_id, slave_port_id;
    struct vr_dpdk_ethdev *ethdev = NULL;

    RTE_LOG(INFO, VROUTER, "Changing eth device %" PRIu8 " MTU to %u\n",
                    port_id, new_mtu);
    if (port_id >= rte_eth_dev_count()) {
        RTE_LOG(ERR, VROUTER, "Error changing eth device %"PRIu8" MTU: invalid eth device\n", port_id);
        return -EINVAL;
    }

    /*
     * TODO: DPDK bond PMD does not implement mtu_set op, so we need to
     * set the MTU manually for all the slaves.
     */
    /* Bond vif uses first slave port ID. */
    if (router->vr_eth_if) {
        ethdev = (struct vr_dpdk_ethdev *)router->vr_eth_if->vif_os;
        if (ethdev && ethdev->ethdev_nb_slaves > 0) {
            for (i = 0; i < ethdev->ethdev_nb_slaves; i++) {
                if (port_id == ethdev->ethdev_slaves[i])
                    break;
            }
            /* Clear ethdev if no port match. */
            if (i >= ethdev->ethdev_nb_slaves)
                ethdev = NULL;
        }
    }
    if (ethdev && ethdev->ethdev_nb_slaves > 0) {
        for (i = 0; i < ethdev->ethdev_nb_slaves; i++) {
            slave_port_id = ethdev->ethdev_slaves[i];
            RTE_LOG(INFO, VROUTER, "    changing bond member eth device %" PRIu8
                " MTU to %u\n", slave_port_id, new_mtu);

            ret =  rte_eth_dev_set_mtu(slave_port_id, new_mtu);
            if (ret < 0) {
                RTE_LOG(ERR, VROUTER, "    error changing bond member eth device %" PRIu8
                    " MTU: %s (%d)\n", slave_port_id, rte_strerror(-ret), -ret);
                return ret;
            }
        }
    } else {
        ret =  rte_eth_dev_set_mtu(port_id, new_mtu);
        if (ret < 0) {
            RTE_LOG(ERR, VROUTER, "Error changing eth device %" PRIu8
                " MTU: %s (%d)\n", port_id, rte_strerror(-ret), -ret);
        }
        return ret;
    }

    /* On success, inform vrouter about new MTU */
    for (i = 0; i < router->vr_max_interfaces; i++) {
        vif = __vrouter_get_interface(router, i);
        if (vif && (vif->vif_type == VIF_TYPE_PHYSICAL)) {
            ethdev_port_id = (((struct vr_dpdk_ethdev *)(vif->vif_os))->
                        ethdev_port_id);
            if (ethdev_port_id == port_id) {
                /* Ethernet header size */
                new_mtu += sizeof(struct vr_eth);
                if (vr_dpdk.vlan_tag != VLAN_ID_INVALID) {
                    /* 802.1q header size */
                    new_mtu += sizeof(uint32_t);
                }
                vif->vif_mtu = new_mtu;
                if (vif->vif_bridge)
                    vif->vif_bridge->vif_mtu = new_mtu;
            }
        }
    }

    return 0;
}


/* Configure KNI state callback */
static int
dpdk_knidev_config_network_if(uint8_t port_id, uint8_t if_up)
{
    int ret = 0;

    RTE_LOG(INFO, VROUTER, "Configuring eth device %" PRIu8 " %s\n",
                    port_id, if_up ? "UP" : "DOWN");
    if (port_id >= rte_eth_dev_count() || port_id >= RTE_MAX_ETHPORTS) {
        RTE_LOG(ERR, VROUTER, "Invalid eth device %" PRIu8 "\n", port_id);
        return -EINVAL;
    }

    if (if_up)
        ret = rte_eth_dev_start(port_id);
    else
        rte_eth_dev_stop(port_id);

    if (ret < 0) {
        RTE_LOG(ERR, VROUTER, "Configuring eth device %" PRIu8 " UP"
                    "failed (%d)\n", port_id, ret);
    }

    return ret;
}

/* Init KNI */
int
vr_dpdk_knidev_init(uint8_t port_id, struct vr_interface *vif)
{
    int i;
    struct rte_eth_dev_info dev_info;
    struct rte_kni_conf kni_conf;
    struct rte_kni_ops kni_ops;
    struct rte_kni *kni;
    struct rte_config *rte_conf = rte_eal_get_configuration();

    if (!vr_dpdk.kni_inited) {
        /*
         * If the host does not support KNIs (i.e. RedHat), we'll get
         * a panic here.
         */
        rte_kni_init(VR_DPDK_MAX_KNI_INTERFACES);
        vr_dpdk.kni_inited = true;
    }

    /* get eth device info */
    memset(&dev_info, 0, sizeof(dev_info));
    rte_eth_dev_info_get(port_id, &dev_info);

    /* create KNI configuration */
    memset(&kni_conf, 0, sizeof(kni_conf));
    strncpy(kni_conf.name, (char *)vif->vif_name, sizeof(kni_conf.name) - 1);

    kni_conf.addr = dev_info.pci_dev->addr;
    kni_conf.id = dev_info.pci_dev->id;
    kni_conf.group_id = port_id;
    kni_conf.mbuf_size = VR_DPDK_MAX_PACKET_SZ;
    /*
     * Due to DPDK commit 41a6ebd, now to prevent packet reordering in KNI
     * we have to bind KNI kernel thread to a first online unused CPU.
     */
    for (i = 0; i < RTE_MAX_LCORE; i++) {
        if (lcore_config[i].detected
                && rte_conf->lcore_role[VR_DPDK_FWD_LCORE_ID + i] == ROLE_OFF) {
            kni_conf.force_bind = 1;
            kni_conf.core_id = i;
            RTE_LOG(INFO, VROUTER, "    bind KNI kernel thread to CPU %d\n", i);
            break;
        }
    }

    /* KNI options
     *
     * Changing state of the KNI interface can change state of the physical
     * interface. This is useful for the vhost, but not for the VLAN
     * forwarding interface.
     */
    if (vif->vif_type == VIF_TYPE_VLAN) {
        memset(&kni_ops, 0, sizeof(kni_ops));
    } else {
        kni_ops.port_id = port_id;
        kni_ops.change_mtu = dpdk_knidev_change_mtu;
        kni_ops.config_network_if = dpdk_knidev_config_network_if;
    }

    /* allocate KNI device */
    kni = rte_kni_alloc(vr_dpdk.rss_mempool, &kni_conf, &kni_ops);
    if (kni == NULL) {
        RTE_LOG(ERR, VROUTER, "    error allocation KNI device %s"
            " at eth device %" PRIu8 "\n", vif->vif_name, port_id);
        return -ENOMEM;
    }

    /* store pointer to KNI for further use */
    vif->vif_os = kni;

    /* add interface to the table of KNIs */
    for (i = 0; i < VR_DPDK_MAX_KNI_INTERFACES; i++) {
        if (vr_dpdk.knis[i] == NULL) {
            vr_dpdk.knis[i] = vif->vif_os;
            break;
        }
    }

    return 0;
}
Ejemplo n.º 5
0
int
acl_init(int is_ipv4)
{
	unsigned int i;
	struct rte_acl_rule *acl_base_ipv4 = NULL, *acl_base_ipv6 = NULL;
	unsigned int acl_num_ipv4 = 0, acl_num_ipv6 = 0;
	struct rte_acl_ctx *acl_ctx;

	if (check_acl_config() != 0) {
		acl_log("Failed to get valid ACL options\n");
		return -1;
	}

	dump_acl_config();

	if (is_ipv4) {
		/* Load  rules from the input file */
		if (add_rules(acl_parm_config.rule_ipv4_name, &acl_base_ipv4,
			      &acl_num_ipv4, sizeof(struct acl4_rule),
			      &parse_cb_ipv4vlan_rule) < 0) {
			acl_log("Failed to add ipv4 rules\n");
			return -1;
		}

		acl_log("IPv4 ACL entries %u:\n", acl_num_ipv4);
		dump_ipv4_rules((struct acl4_rule *)acl_base_ipv4, acl_num_ipv4,
				1);
		for (i = 0; i < NB_SOCKETS; i++) {
			if ((acl_ctx = setup_acl(acl_base_ipv4, acl_num_ipv4, 0,
						 i)) != NULL) {
				ipv4_acx[i] = acl_ctx;
			} else if (acl_num_ipv4 == 0) {
				ipv4_acx[i] = NULL;
			} else {
				acl_log("setup_acl failed for ipv4 with "
					"socketid %d, keeping previous rules "
					"for that socket\n",
					i);
			}
		}
#ifdef L3FWDACL_DEBUG
		if (acl_base_ipv4) {
			acl_config.rule_ipv4 =
			    (struct acl4_rule *)acl_base_ipv4;
		}
#else
		free(acl_base_ipv4);
#endif
	} else {
		if (add_rules(acl_parm_config.rule_ipv6_name, &acl_base_ipv6,
			      &acl_num_ipv6, sizeof(struct acl6_rule),
			      &parse_cb_ipv6_rule) < 0) {
			acl_log("Failed to add ipv6 rules\n");
			return -1;
		}

		acl_log("IPv6 ACL entries %u:\n", acl_num_ipv6);
		dump_ipv6_rules((struct acl6_rule *)acl_base_ipv6, acl_num_ipv6,
				1);
		for (i = 0; i < NB_SOCKETS; i++) {
			if ((acl_ctx = setup_acl(acl_base_ipv6, acl_num_ipv6, 1,
						 i)) != NULL) {
				ipv6_acx[i] = acl_ctx;
			} else if (acl_num_ipv6 == 0) {
				ipv6_acx[i] = NULL;
			} else {
				acl_log("setup_acl failed for ipv6 with "
					"socketid %d, keeping previous rules "
					"for that socket\n",
					i);
			}
		}
#ifdef L3FWDACL_DEBUG
		if (acl_base_ipv6) {
			acl_config.rule_ipv6 =
			    (struct acl6_rule *)acl_base_ipv6;
		}
#else
		free(acl_base_ipv6);
#endif
	}

	int socketid, lcore_id;
	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
		if (rte_lcore_is_enabled(lcore_id) == 0)
			continue;

		if (numa_on)
			socketid = rte_lcore_to_socket_id(lcore_id);
		else
			socketid = 0;

		rte_atomic64_cmpset(
		    (uintptr_t *)&lcore_conf[lcore_id].new_acx_ipv4,
		    (uintptr_t)lcore_conf[lcore_id].new_acx_ipv4,
		    (uintptr_t)ipv4_acx[socketid]);
		rte_atomic64_cmpset(
		    (uintptr_t *)&lcore_conf[lcore_id].new_acx_ipv6,
		    (uintptr_t)lcore_conf[lcore_id].new_acx_ipv6,
		    (uintptr_t)ipv6_acx[socketid]);
	}

	return 0;
}
Ejemplo n.º 6
0
void
app_init_rings_tx(void) {
  unsigned lcore;

  /* Initialize the rings for the TX side */
  for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
    struct app_lcore_params_worker *lp_worker = &app.lcore_params[lcore].worker;
    unsigned port;

    if (app.lcore_params[lcore].type != e_APP_LCORE_WORKER &&
        app.lcore_params[lcore].type != e_APP_LCORE_IO_WORKER) {
      continue;
    }

    for (port = 0; port < APP_MAX_NIC_PORTS; port ++) {
      char name[32];
      struct app_lcore_params_io *lp_io = NULL;
      struct rte_ring *ring;
      uint32_t socket_io, lcore_io;

      if (app.nic_tx_port_mask[port] == 0) {
        continue;
      }

      if (app_get_lcore_for_nic_tx((uint8_t) port, &lcore_io) < 0) {
        rte_panic("Algorithmic error (no I/O core to handle TX of port %u)\n",
                  port);
      }

      lp_io = &app.lcore_params[lcore_io].io;
      socket_io = rte_lcore_to_socket_id(lcore_io);

      lagopus_dprint("Creating ring to connect "
                     "worker lcore %u with "
                     "TX port %u (through I/O lcore %u)"
                     " (socket %u) ...\n",
                     lcore,
                     port,
                     (unsigned)lcore_io,
                     (unsigned)socket_io);
      snprintf(name, sizeof(name),
               "app_ring_tx_s%u_w%u_p%u",
               socket_io, lcore, port);
      ring = rte_ring_create(
               name,
               app.ring_tx_size,
               (int)socket_io,
               RING_F_SP_ENQ | RING_F_SC_DEQ);
      if (ring == NULL) {
        rte_panic("Cannot create ring to connect"
                  " worker core %u with TX port %u\n",
                  lcore,
                  port);
      }

      lp_worker->rings_out[port] = ring;
      lp_io->tx.rings[port][lp_worker->worker_id] = ring;
    }
  }

  for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
    struct app_lcore_params_io *lp_io = &app.lcore_params[lcore].io;
    unsigned i;

    if ((app.lcore_params[lcore].type != e_APP_LCORE_IO &&
         app.lcore_params[lcore].type != e_APP_LCORE_IO_WORKER) ||
        (lp_io->tx.n_nic_ports == 0)) {
      continue;
    }

    for (i = 0; i < lp_io->tx.n_nic_ports; i ++) {
      unsigned port, j;

      port = lp_io->tx.nic_ports[i];
      for (j = 0; j < app_get_lcores_worker(); j ++) {
        if (lp_io->tx.rings[port][j] == NULL) {
          rte_panic("Algorithmic error (I/O TX rings)\n");
        }
      }
    }
  }
}
Ejemplo n.º 7
0
/* 
 * Parses the argument given in the command line of the application,
 * calculates mask for used cores and initializes EAL with calculated core mask
 */
int
app_parse_args(int argc, char **argv)
{
	int opt, ret;
	int option_index;
	const char *optname;
	char *prgname = argv[0];
	uint32_t i, nb_lcores;

	static struct option lgopts[] = {
		{ "pfc", 1, 0, 0 },
		{ "mst", 1, 0, 0 },
		{ "rsz", 1, 0, 0 },
		{ "bsz", 1, 0, 0 },
		{ "msz", 1, 0, 0 },
		{ "rth", 1, 0, 0 },
		{ "tth", 1, 0, 0 },
		{ "cfg", 1, 0, 0 },
		{ NULL,  0, 0, 0 }
	};

	/* initialize EAL first */
	ret = rte_eal_init(argc, argv);
	if (ret < 0)
		return -1;

	argc -= ret;
	argv += ret;

	/* set en_US locale to print big numbers with ',' */
	setlocale(LC_NUMERIC, "en_US.utf-8");

	while ((opt = getopt_long(argc, argv, "i",
		lgopts, &option_index)) != EOF) {

			switch (opt) {
			case 'i':
				printf("Interactive-mode selected\n");
				interactive = 1;
				break;
			/* long options */
			case 0:
				optname = lgopts[option_index].name;
				if (str_is(optname, "pfc")) {
					ret = app_parse_flow_conf(optarg);
					if (ret) {
						RTE_LOG(ERR, APP, "Invalid pipe configuration %s\n", optarg);
						return -1;
					}
					break;
				}
				if (str_is(optname, "mst")) {
					app_master_core = (uint32_t)atoi(optarg);
					break;
				}
				if (str_is(optname, "rsz")) {
					ret = app_parse_ring_conf(optarg);
					if (ret) {
						RTE_LOG(ERR, APP, "Invalid ring configuration %s\n", optarg);
						return -1;
					}
					break;
				}
				if (str_is(optname, "bsz")) {
					ret = app_parse_burst_conf(optarg);
					if (ret) {
						RTE_LOG(ERR, APP, "Invalid burst configuration %s\n", optarg);
						return -1;
					}
					break;
				}
				if (str_is(optname, "msz")) {
					mp_size = atoi(optarg);
					if (mp_size <= 0) {
						RTE_LOG(ERR, APP, "Invalid mempool size %s\n", optarg);
						return -1;
					}
					break;
				}
				if (str_is(optname, "rth")) {
					ret = app_parse_rth_conf(optarg);
					if (ret) {
						RTE_LOG(ERR, APP, "Invalid RX threshold configuration %s\n", optarg);
						return -1;
					}
					break;
				}
				if (str_is(optname, "tth")) {
					ret = app_parse_tth_conf(optarg);
					if (ret) {
						RTE_LOG(ERR, APP, "Invalid TX threshold configuration %s\n", optarg);
						return -1;
					}
					break;
				}
				if (str_is(optname, "cfg")) {
					cfg_profile = optarg;
					break;
				}
				break;

			default:
				app_usage(prgname);
				return -1;
			}
	}

	/* check master core index validity */
	for(i = 0; i <= app_master_core; i++) {
		if (app_used_core_mask & (1u << app_master_core)) {
			RTE_LOG(ERR, APP, "Master core index is not configured properly\n");
			app_usage(prgname);
			return -1;
		}
	}
	app_used_core_mask |= 1u << app_master_core;

	if ((app_used_core_mask != app_eal_core_mask()) ||
			(app_master_core != rte_get_master_lcore())) {
		RTE_LOG(ERR, APP, "EAL core mask not configured properly, must be %" PRIx64
				" instead of %" PRIx64 "\n" , app_used_core_mask, app_eal_core_mask());
		return -1;
	}

	if (nb_pfc == 0) {
		RTE_LOG(ERR, APP, "Packet flow not configured!\n");
		app_usage(prgname);
		return -1;
	}

	/* sanity check for cores assignment */
	nb_lcores = app_cpu_core_count();

	for(i = 0; i < nb_pfc; i++) {
		if (qos_conf[i].rx_core >= nb_lcores) {
			RTE_LOG(ERR, APP, "pfc %u: invalid RX lcore index %u\n", i + 1,
					qos_conf[i].rx_core);
			return -1;
		}
		if (qos_conf[i].wt_core >= nb_lcores) {
			RTE_LOG(ERR, APP, "pfc %u: invalid WT lcore index %u\n", i + 1,
					qos_conf[i].wt_core);
			return -1;
		}
		uint32_t rx_sock = rte_lcore_to_socket_id(qos_conf[i].rx_core);
		uint32_t wt_sock = rte_lcore_to_socket_id(qos_conf[i].wt_core);
		if (rx_sock != wt_sock) {
			RTE_LOG(ERR, APP, "pfc %u: RX and WT must be on the same socket\n", i + 1);
			return -1;
		}
		app_numa_mask |= 1 << rte_lcore_to_socket_id(qos_conf[i].rx_core);
	}

	return 0;
}
Ejemplo n.º 8
0
static void
app_init_nics(void)
{
	unsigned socket;
	uint32_t lcore;
	uint8_t port, queue;
	int ret;
	uint32_t n_rx_queues, n_tx_queues;

	/* Init driver */
	printf("Initializing the PMD driver ...\n");
	if (rte_pmd_init_all() < 0) {
		rte_panic("Cannot init PMD\n");
	}

	if (rte_eal_pci_probe() < 0) {
		rte_panic("Cannot probe PCI\n");
	}

	/* Init NIC ports and queues, then start the ports */
	for (port = 0; port < APP_MAX_NIC_PORTS; port ++) {
		struct rte_mempool *pool;

		n_rx_queues = app_get_nic_rx_queues_per_port(port);
		n_tx_queues = app.nic_tx_port_mask[port];

		if ((n_rx_queues == 0) && (n_tx_queues == 0)) {
			continue;
		}

		/* Init port */
		printf("Initializing NIC port %u ...\n", (unsigned) port);
		ret = rte_eth_dev_configure(
			port,
			(uint8_t) n_rx_queues,
			(uint8_t) n_tx_queues,
			&port_conf);
		if (ret < 0) {
			rte_panic("Cannot init NIC port %u (%d)\n", (unsigned) port, ret);
		}
		rte_eth_promiscuous_enable(port);

		/* Init RX queues */
		for (queue = 0; queue < APP_MAX_RX_QUEUES_PER_NIC_PORT; queue ++) {
			if (app.nic_rx_queue_mask[port][queue] == 0) {
				continue;
			}

			app_get_lcore_for_nic_rx(port, queue, &lcore);
			socket = rte_lcore_to_socket_id(lcore);
			pool = app.lcore_params[lcore].pool;

			printf("Initializing NIC port %u RX queue %u ...\n",
				(unsigned) port,
				(unsigned) queue);
			ret = rte_eth_rx_queue_setup(
				port,
				queue,
				(uint16_t) app.nic_rx_ring_size,
				socket,
				&rx_conf,
				pool);
			if (ret < 0) {
				rte_panic("Cannot init RX queue %u for port %u (%d)\n",
					(unsigned) queue,
					(unsigned) port,
					ret);
			}
		}

		/* Init TX queues */
		if (app.nic_tx_port_mask[port] == 1) {
			app_get_lcore_for_nic_tx(port, &lcore);
			socket = rte_lcore_to_socket_id(lcore);
			printf("Initializing NIC port %u TX queue 0 ...\n",
				(unsigned) port);
			ret = rte_eth_tx_queue_setup(
				port,
				0,
				(uint16_t) app.nic_tx_ring_size,
				socket,
				&tx_conf);
			if (ret < 0) {
				rte_panic("Cannot init TX queue 0 for port %d (%d)\n",
					port,
					ret);
			}
		}

		/* Start port */
		ret = rte_eth_dev_start(port);
		if (ret < 0) {
			rte_panic("Cannot start port %d (%d)\n", port, ret);
		}
	}

	check_all_ports_link_status(APP_MAX_NIC_PORTS, (~0x0));
}
Ejemplo n.º 9
0
static void
app_init_nics(void)
{
	uint32_t socket, lcore;
	uint8_t	port, queue;
	struct ether_addr mac_addr;
	int ret;

	/* Init driver */
	printf("Initializing the PMD driver ...\n");
#ifdef RTE_LIBRTE_IGB_PMD
	if (rte_igb_pmd_init() < 0) {
		rte_panic("Cannot init IGB PMD\n");
	}
#endif
#ifdef RTE_LIBRTE_IXGBE_PMD
	if (rte_ixgbe_pmd_init() < 0) {
		rte_panic("Cannot init IXGBE PMD\n");
	}
#endif
	if (rte_eal_pci_probe() < 0) {
		rte_panic("Cannot probe PCI\n");
	}

	memset(port_stat,0,sizeof(struct port_stat)*MAX_PORT_NUM);
	/* Init NIC ports and queues, then start the ports */
	for (port = 0; port < APP_MAX_NIC_PORTS; port ++) {
		struct rte_eth_link link;
		struct rte_mempool *pool;
		uint32_t n_rx_queues, n_tx_queues;

		n_rx_queues = app_get_nic_rx_queues_per_port(port);
		n_tx_queues = app.nic_tx_port_mask[port];

		if ((n_rx_queues == 0) && (n_tx_queues == 0)) {
			continue;
		}

		/* Init port */
		printf("Initializing NIC port %u ...\n", (uint32_t) port);
		ret = rte_eth_dev_configure(
			port,
			(uint8_t) n_rx_queues,
			(uint8_t) n_tx_queues,
			&port_conf);
		if (ret < 0) {
			rte_panic("Cannot init NIC port %u (%d)\n", (uint32_t) port, ret);
		}
		rte_eth_promiscuous_enable(port);

		/* Init RX queues */
		for (queue = 0; queue < APP_MAX_RX_QUEUES_PER_NIC_PORT; queue ++) {
			if (app.nic_rx_queue_mask[port][queue] == 0) {
				continue;
			}

			app_get_lcore_for_nic_rx(port, queue, &lcore);
			socket = rte_lcore_to_socket_id(lcore);
			pool = app.lcore_params[lcore].pool;

			printf("Initializing NIC port %u RX queue %u ...\n",
				(uint32_t) port,
				(uint32_t) queue);
			ret = rte_eth_rx_queue_setup(
				port,
				queue,
				(uint16_t) app.nic_rx_ring_size,
				socket,
				&rx_conf,
				pool);
			if (ret < 0) {
				rte_panic("Cannot init RX queue %u for port %u (%d)\n",
					(uint32_t) queue,
					(uint32_t) port,
					ret);
			}
		}

		/* Init TX queues */
		
		if (app.nic_tx_port_mask[port] == 1) {
			app_get_lcore_for_nic_tx(port, &lcore);
			socket = rte_lcore_to_socket_id(lcore);
			printf("Initializing NIC port %u TX queue 0 ...\n",
				(uint32_t) port);
			ret = rte_eth_tx_queue_setup(
				port,
				0,
				(uint16_t) app.nic_tx_ring_size,
				socket,
				&tx_conf);
			if (ret < 0) {
				rte_panic("Cannot init TX queue 0 for port %d (%d)\n",
					port,
					ret);
			}
		}
		

		/* Start port */
		ret = rte_eth_dev_start(port);
		if (ret < 0) {
			rte_panic("Cannot start port %d (%d)\n", port, ret);
		}

		/* Get link status */
		rte_eth_link_get(port, &link);
		rte_eth_macaddr_get(port,&mac_addr);
		int i=0;
		for(i=0;i<5;i++)
			printf("%02x:",mac_addr.addr_bytes[i]);
		printf("%02x\n",mac_addr.addr_bytes[i]);
		memcpy(port_stat[port].mac_addr,mac_addr.addr_bytes,6);
		for(i=0;i<5;i++)
			printf("%02x:",port_stat[port].mac_addr[i]);
		printf("%02x\n",port_stat[port].mac_addr[i]);
		if (link.link_status) {
			printf("Port %u is UP (%u Mbps)\n",
				(uint32_t) port,
				(unsigned) link.link_speed);
			port_stat[port].port_status=1;
			port_stat[port].port_speed=link.link_speed;
		} else {
			printf("Port %u is DOWN\n",
				(uint32_t) port);
			port_stat[port].port_status=0;
		}
	}
}
Ejemplo n.º 10
0
int32_t populateNodeInfo (void)
{
    int32_t i = 0, socketId = -1, lcoreIndex = 0, enable = 0;
    uint8_t coreCount, portCount;
    struct rte_eth_dev_info devInfo;

    /* fetch total lcore count under DPDK */
    coreCount = rte_lcore_count();
    for (i = 0; i < coreCount; i++)
    {
       socketId = rte_lcore_to_socket_id(i);
       lcoreIndex = rte_lcore_index(i);
       enable = rte_lcore_is_enabled(i);

       //printf ("\n Logical %d Physical %d Socket %d Enabled %d \n",
       //        i, lcoreIndex, socketId, enable);

       if (likely(enable)) {
           /* classify the lcore info per NUMA node */
           numaNodeInfo[socketId].lcoreAvail = numaNodeInfo[socketId].lcoreAvail | (1 << lcoreIndex);
           numaNodeInfo[socketId].lcoreTotal += 1;
       }
       else {
            rte_panic("ERROR: Lcore %d Socket %d not enabled\n", lcoreIndex, socketId);
            exit(EXIT_FAILURE);
       }
    }

    /* Create mempool per numa node based on interface available */
    portCount = rte_eth_dev_count();
    for (i =0; i < portCount; i++)
    {
        rte_eth_dev_info_get(i, &devInfo);
        printf("\n Inteface %d", i);
        printf("\n - driver: %s", devInfo.driver_name);
        printf("\n - if_index: %d", devInfo.if_index);
        if (devInfo.pci_dev) {
            printf("\n - PCI INFO ");
            printf("\n -- ADDR - domain:bus:devid:function %x:%x:%x:%x",
                  devInfo.pci_dev->addr.domain,
                  devInfo.pci_dev->addr.bus,
                  devInfo.pci_dev->addr.devid,
                  devInfo.pci_dev->addr.function);
            printf("\n == PCI ID - vendor:device:sub-vendor:sub-device %x:%x:%x:%x",
                  devInfo.pci_dev->id.vendor_id,
                  devInfo.pci_dev->id.device_id,
                  devInfo.pci_dev->id.subsystem_vendor_id,
                  devInfo.pci_dev->id.subsystem_device_id);
            printf("\n -- numa node: %d", devInfo.pci_dev->numa_node);
        }

        socketId = (devInfo.pci_dev->numa_node == -1)?0:devInfo.pci_dev->numa_node;
        numaNodeInfo[socketId].intfAvail = numaNodeInfo[socketId].intfAvail | (1 << i);
        numaNodeInfo[socketId].intfTotal += 1;
    }

    /* allocate mempool for numa which has NIC interfaces */
    for (i = 0; i < GTP_MAX_NUMANODE; i++)
    {
        if (likely(numaNodeInfo[i].intfAvail)) {
            /* ToDo: per interface */
            uint8_t portIndex = 0;
            char mempoolName[25];

            /* create mempool for TX */
            sprintf(mempoolName, "mbuf_pool-%d-%d-tx", i, portIndex);
            numaNodeInfo[i].tx[portIndex] = rte_mempool_create(
                        mempoolName, NB_MBUF,
                        MBUF_SIZE, 64,
                        sizeof(struct rte_pktmbuf_pool_private),
                        rte_pktmbuf_pool_init, NULL,
                        rte_pktmbuf_init, NULL,
                        i,/*SOCKET_ID_ANY*/
                         0/*MEMPOOL_F_SP_PUT*/);
            if (unlikely(numaNodeInfo[i].tx[portIndex] == NULL)) {
                rte_panic("\n ERROR: failed to get mem-pool for tx on node %d intf %d\n", i, portIndex);
                exit(EXIT_FAILURE);
            }

            /* create mempool for RX */
            sprintf(mempoolName, "mbuf_pool-%d-%d-rx", i, portIndex);
            numaNodeInfo[i].rx[portIndex] = rte_mempool_create(
                        mempoolName, NB_MBUF,
                        MBUF_SIZE, 64,
                        sizeof(struct rte_pktmbuf_pool_private),
                        rte_pktmbuf_pool_init, NULL,
                        rte_pktmbuf_init, NULL,
                        i,/*SOCKET_ID_ANY*/
                         0/*MEMPOOL_F_SP_PUT*/);
            if (unlikely(numaNodeInfo[i].rx[portIndex] == NULL)) {
                rte_panic("\n ERROR: failed to get mem-pool for rx on node %d intf %d\n", i, portIndex);
                exit(EXIT_FAILURE);
            }

        }
    }

    return 0;
}
Ejemplo n.º 11
0
static void
app_init_rings_rx(void)
{
	unsigned lcore;
	char record_File_tmp [512];

	/* Initialize the rings for the RX side */
	for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
		struct app_lcore_params_io *lp_io = &app.lcore_params[lcore].io;
		unsigned socket_io, lcore_worker;

		if ((app.lcore_params[lcore].type != e_APP_LCORE_IO) ||
		    (lp_io->rx.n_nic_queues == 0)) {
			continue;
		}

		if(record_File[0])
		{
			gettimeofday(&lp_io->rx.end_ewr, NULL);
			sprintf(record_File_tmp,"%s/TimeSeries_Port%d_%lu.txt",record_File,lp_io->rx.nic_queues[0].port,lp_io->rx.end_ewr.tv_sec);
			lp_io->rx.record=fopen(record_File_tmp,"w+");
			if(lp_io->rx.record==NULL)
			{
				perror("record file");
				exit(-1);
			}
		}else
		{
			lp_io->rx.record=NULL;
		}

		socket_io = rte_lcore_to_socket_id(lcore);

		for (lcore_worker = 0; lcore_worker < APP_MAX_LCORES; lcore_worker ++) {
			char name[32];
			struct app_lcore_params_worker *lp_worker = &app.lcore_params[lcore_worker].worker;
			struct rte_ring *ring = NULL;

			if (app.lcore_params[lcore_worker].type != e_APP_LCORE_WORKER) {
				continue;
			}

			printf("Creating ring to connect I/O lcore %u (socket %u) with worker lcore %u ...\n",
				lcore,
				socket_io,
				lcore_worker);
			rte_snprintf(name, sizeof(name), "app_ring_rx_s%u_io%u_w%u",
				socket_io,
				lcore,
				lcore_worker);
			ring = rte_ring_create(
				name,
				app.ring_rx_size,
				socket_io,
				RING_F_SP_ENQ | RING_F_SC_DEQ);
			if (ring == NULL) {
				rte_panic("Cannot create ring to connect I/O core %u with worker core %u\n",
					lcore,
					lcore_worker);
			}

			lp_io->rx.rings[lp_io->rx.n_rings] = ring;
			lp_io->rx.n_rings ++;

			lp_worker->rings_in[lp_worker->n_rings_in] = ring;
			lp_worker->n_rings_in ++;
		}
	}

	for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
		struct app_lcore_params_io *lp_io = &app.lcore_params[lcore].io;

		if ((app.lcore_params[lcore].type != e_APP_LCORE_IO) ||
		    (lp_io->rx.n_nic_queues == 0)) {
			continue;
		}

		if (lp_io->rx.n_rings != app_get_lcores_worker()) {
			rte_panic("Algorithmic error (I/O RX rings)\n");
		}
	}

	for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
		struct app_lcore_params_worker *lp_worker = &app.lcore_params[lcore].worker;

		if (app.lcore_params[lcore].type != e_APP_LCORE_WORKER) {
			continue;
		}

		if (lp_worker->n_rings_in != app_get_lcores_io_rx()) {
			rte_panic("Algorithmic error (worker input rings)\n");
		}
	}
}
Ejemplo n.º 12
0
/* Initialize cores and allocate mempools */
static void init_lcores(void)
{
	char name[64];
	struct lcore_cfg *lconf = 0;
	static uint8_t *worker_thread_table[MAX_SOCKETS] = {0};
	static uint16_t *user_table[MAX_SOCKETS] = {0};
	struct rte_lpm *ipv4_lpm[MAX_SOCKETS] = {0};
	struct rte_hash *qinq_to_gre_lookup[MAX_SOCKETS] = {0};
	struct next_hop_struct *next_hop[MAX_SOCKETS] = {0};

	/* need to allocate mempools as the first thing to use the lowest possible address range */
	setup_mempools(lcore_cfg_init);

	lcore_cfg = rte_zmalloc_socket("lcore_cfg_hp", RTE_MAX_LCORE * sizeof(struct lcore_cfg), CACHE_LINE_SIZE, rte_socket_id());
	TGEN_PANIC(lcore_cfg == NULL, "Could not allocate memory for core control structures\n");
	rte_memcpy(lcore_cfg, lcore_cfg_init, RTE_MAX_LCORE * sizeof(struct lcore_cfg));

	init_lcore_info();
	check_no_mode_core();

	mprintf("=== Initializing rings on cores ===\n");
	init_rings();

	for (uint8_t socket_id = 0; socket_id < MAX_SOCKETS; ++socket_id) {
		uint16_t data_structs_flags = data_structs_needed(lconf, socket_id);
		if (data_structs_flags & DATA_STRUCTS_NEED_WT_TABLE) {
			worker_thread_table[socket_id] = rte_zmalloc_socket(NULL , 0x1000000, CACHE_LINE_SIZE, socket_id);
			TGEN_PANIC(worker_thread_table == NULL, "Error creating worker thread table");
		}

		if (data_structs_flags & DATA_STRUCTS_NEED_GRE_TABLE) {
			mprintf("=== user <-> QinQ table configuration ===\n");
			qinq_to_gre_lookup[socket_id] = read_gre_table_config(config_path, "gre_table.cfg", worker_thread_table[socket_id], lb_nb_txrings, socket_id);
			TGEN_PANIC(NULL == qinq_to_gre_lookup[socket_id], "Failed to allocate qinq to gre lookup table\n");
		}

		if (data_structs_flags & DATA_STRUCTS_NEED_USER_TABLE) {
			mprintf("=== User table configuration ===\n");
			user_table[socket_id] = read_user_table_config(config_path, "user_table.cfg", &qinq_to_gre_lookup[socket_id], socket_id);
			TGEN_PANIC(NULL == user_table[socket_id], "Failed to allocate user lookup table\n");
		}

		if (data_structs_flags & DATA_STRUCTS_NEED_NEXT_HOP) {
			mprintf("=== Next hop configuration ===\n");
			next_hop[socket_id] = read_next_hop_config(config_path, "next_hop.cfg", &tgen_used_port_mask, socket_id);
			init_routing_ports();
		}

		if (data_structs_flags & DATA_STRUCTS_NEED_LPM_V4) {
			mprintf("=== IPv4 routing configuration ===\n");
			ipv4_lpm[socket_id] = read_lpm_v4_config(config_path, "ipv4.cfg", socket_id);
			TGEN_PANIC(NULL == ipv4_lpm[socket_id], "Failed to allocate IPv4 LPM\n");
		}

		if (data_structs_flags & DATA_STRUCTS_NEED_LPM_V6) {
			mprintf("=== IPv6 routing configuration ===\n");
			read_lpm_v6_config(config_path, "ipv6.cfg", socket_id);
		}
	}

	check_consistent_cfg();

	mprintf("=== Initializing tables, mempools and queue numbers on cores ===\n");
	for (uint8_t lcore_id = 0; lcore_id < RTE_MAX_LCORE; ++lcore_id) {
		if (!rte_lcore_is_enabled(lcore_id) || lcore_id == tgen_cfg.master) {
			continue;
		}

		lconf = &lcore_cfg[lcore_id];
		uint8_t socket = rte_lcore_to_socket_id(lcore_id);

		for (uint8_t task_id = 0; task_id < lconf->nb_tasks; ++task_id) {
			struct task_startup_cfg *startup_cfg = &lconf->startup_cfg[task_id];

			if (QOS == startup_cfg->mode) {
				rte_snprintf(name, sizeof(name), "qos_sched_port_%u_%u", lcore_id, task_id);

				startup_cfg->qos_conf.port_params.name = name;
				startup_cfg->qos_conf.port_params.socket = socket;
				startup_cfg->qos_conf.port_params.rate = TEN_GIGABIT;
				startup_cfg->sched_port = rte_sched_port_config(&startup_cfg->qos_conf.port_params);

				TGEN_PANIC(startup_cfg->sched_port == NULL, "failed to create sched_port");

				mprintf("number of pipes: %d\n\n", startup_cfg->qos_conf.port_params.n_pipes_per_subport);
				int err = rte_sched_subport_config(startup_cfg->sched_port, 0, startup_cfg->qos_conf.subport_params);
				TGEN_PANIC(err != 0, "Failed setting up sched_port subport, error: %d", err);

				/* only single subport and single pipe profile is supported */
				for (uint32_t pipe = 0; pipe < startup_cfg->qos_conf.port_params.n_pipes_per_subport; ++pipe) {
					err = rte_sched_pipe_config(startup_cfg->sched_port, 0 , pipe, 0);
					TGEN_PANIC(err != 0, "failed setting up sched port pipe, error: %d", err);
				}
			}
			if (LB_QINQ == startup_cfg->mode) {
				startup_cfg->worker_thread_table = worker_thread_table[rte_socket_id()];
			}
			if (QINQ_DECAP_ARP == startup_cfg->mode || QINQ_DECAP_V4 == startup_cfg->mode) {
				startup_cfg->qinq_gre = qinq_to_gre_lookup[rte_socket_id()];
			}
			if (QOS == startup_cfg->mode || CLASSIFY == startup_cfg->mode || QINQ_DECAP_V6 == startup_cfg->mode) {
				startup_cfg->user_table = user_table[rte_socket_id()];
			}
			if (ROUTING == startup_cfg->mode || FWD == startup_cfg->mode || QINQ_DECAP_V4 == startup_cfg->mode) {
				startup_cfg->next_hop = next_hop[rte_socket_id()];
			}
			if (QINQ_DECAP_V4 == startup_cfg->mode || FWD == startup_cfg->mode || ROUTING == startup_cfg->mode) {
				startup_cfg->ipv4_lpm = ipv4_lpm[rte_socket_id()];
			}

		}

		mprintf("\t*** Initializing core %u ***\n", lcore_id);
		if (lconf->flags & PCFG_CPETABLEv4) {
			sprintf(name, "core_%u_CPEv4Table", lcore_id);

			uint8_t table_part = lconf->startup_cfg[0].nb_slave_threads;
			if (!rte_is_power_of_2(table_part)) {
				table_part = rte_align32pow2(table_part) >> 1;
			}

			struct rte_hash_parameters hash_params = {
				.name = name,
				.entries = MAX_GRE / table_part,
				.bucket_entries = GRE_BUCKET_ENTRIES,
				.key_len = sizeof(struct hash_gre_struct),
				.entry_len = sizeof(struct cpe_table_hash_entry),
				.hash_func_init_val = 0,
				.socket_id = socket
			};
			lconf->cpe_v4_table = rte_hash_ext_create(&hash_params);
			TGEN_PANIC(lconf->cpe_v4_table == NULL, "Unable to allocate memory for IPv4 hash table on core %u\n", lcore_id);

			/* set all entries to expire at MAX_TSC (i.e. never) so that we don't waste cycles at startup going through all the empty entries */
			setup_arp_entries(lconf->cpe_v4_table);

			/* for locality, copy the pointer to the port structure where it is needed at packet handling time */
			for (uint8_t task_id = 0; task_id < lconf->nb_tasks; ++task_id) {
				if (lconf->startup_cfg[task_id].flags & PORT_STARTUP_CPEv4) {
					lconf->startup_cfg[task_id].cpe_table = lconf->cpe_v4_table;
				}
			}
		}

		if (lconf->flags & PCFG_CPETABLEv6) {
			sprintf(name, "core_%u_CPEv6Table", lcore_id);

			uint8_t table_part = lconf->startup_cfg[0].nb_slave_threads;
			if (!rte_is_power_of_2(table_part)) {
				table_part = rte_align32pow2(table_part) >> 1;
			}
Ejemplo n.º 13
0
static void init_rings(void)
{
	struct lcore_cfg *lconf;
	char ring_name[3] = "A";

	for (uint8_t lcore_id = 0; lcore_id < RTE_MAX_LCORE; ++lcore_id) {
		if (!rte_lcore_is_enabled(lcore_id) || lcore_id == tgen_cfg.master) {
			continue;
		}

		lconf = &lcore_cfg[lcore_id];
		uint8_t socket = rte_lcore_to_socket_id(lcore_id);
		mprintf("\t*** Initializing core %u ***\n", lcore_id);
		for (uint8_t task_id = 0; task_id < lconf->nb_tasks; ++task_id) {
			struct task_startup_cfg *sstartup_cfg = &lconf->startup_cfg[task_id];
			if (sstartup_cfg->ring_size == 0) {
				sstartup_cfg->ring_size = RING_RX_SIZE;
			}
			uint8_t tot_nb_txrings = 0;
			for (uint8_t idx = 0; idx < MAX_PROTOCOLS; ++idx) {
				if (!sstartup_cfg->thread_list[idx].active) {
					continue;
				}

				for (uint8_t ring_idx = 0; ring_idx < sstartup_cfg->thread_list[idx].nb_threads; ++ring_idx, ++tot_nb_txrings) {
					TGEN_ASSERT(ring_idx < MAX_WT_PER_LB);
					TGEN_ASSERT(tot_nb_txrings < MAX_RINGS_PER_CORE);

					uint8_t lcore_worker = sstartup_cfg->thread_list[idx].thread_id[ring_idx];
					TGEN_PANIC(!rte_lcore_is_enabled(lcore_worker) || lcore_worker == tgen_cfg.master, "Invalid worker: lcore %u is not enabled\n", lcore_worker);
					struct lcore_cfg *lworker = &lcore_cfg[lcore_worker];
					uint8_t dest_task = sstartup_cfg->thread_list[idx].dest_task;
					struct task_startup_cfg *dstartup_cfg = &lworker->startup_cfg[dest_task];
					TGEN_PANIC(!(dstartup_cfg->flags & PORT_STARTUP_RX_RING), "Invalid worker: lcore %u task %u is not expecting to receive through a ring\n", lcore_worker, dest_task);

					TGEN_PANIC(dest_task >= lworker->nb_tasks, "Invalid worker: lcore %u task %u not configured\n", lcore_worker, dest_task);

					mprintf("\t\tCreating ring (size: %u) to connect core %u (socket %u) with worker core %u worker %u...\n",
					        sstartup_cfg->ring_size, lcore_id, socket, lcore_worker, ring_idx);
					/* socket used is the one that the sending core resides on */
					struct rte_ring *ring = rte_ring_create(ring_name, sstartup_cfg->ring_size, socket, RING_F_SP_ENQ | RING_F_SC_DEQ);
					TGEN_PANIC(ring == NULL, "Cannot create ring to connect I/O core %u with worker core %u\n", lcore_id, lcore_worker);

					ring_name[0]++;
					TGEN_ASSERT(dstartup_cfg->nb_rxrings < MAX_RINGS_PER_CORE);
					/* will skip inactive rings */
					sstartup_cfg->tx_rings[tot_nb_txrings] = ring;
					dstartup_cfg->rx_rings[dstartup_cfg->nb_rxrings] = ring;
					++dstartup_cfg->nb_rxrings;

					dstartup_cfg->nb_slave_threads = sstartup_cfg->thread_list[idx].nb_threads;
					mprintf("\t\tCore %u port %u tx_ring[%u] => core %u task %u rx_ring[%u] %p %s %u WT\n",
					        lcore_id, task_id, ring_idx, lcore_worker, dest_task, dstartup_cfg->nb_rxrings, ring, ring->name,
					        dstartup_cfg->nb_slave_threads);
				}

				if (LB_QINQ == sstartup_cfg->mode || LB_NETWORK == sstartup_cfg->mode) {
					if (lb_nb_txrings == 0xff) {
						lb_nb_txrings = sstartup_cfg->nb_worker_threads;
					}
					else if (lb_nb_txrings != sstartup_cfg->nb_worker_threads) {
							TGEN_PANIC(tot_nb_txrings != 1, "All LB should have same number of tx_rings: %u != %u\n", lb_nb_txrings, sstartup_cfg->nb_txrings);
					}
				}
			}
		}
	}
}
Ejemplo n.º 14
0
static int
init_mem(unsigned nb_mbuf)
{
    int socketid;
    unsigned lcore_id;
    char s[64];

    for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
        if (rte_lcore_is_enabled(lcore_id) == 0)
            continue;

        if (sk.numa_on)
            socketid = rte_lcore_to_socket_id(lcore_id);
        else
            socketid = 0;

        if (socketid >= NB_SOCKETS) {
            rte_exit(EXIT_FAILURE,
                     "Socket %d of lcore %u is out of range %d\n",
                     socketid, lcore_id, NB_SOCKETS);
        }

        if (pktmbuf_pool[socketid] == NULL) {
            snprintf(s, sizeof(s), "mbuf_pool_%d", socketid);
            pktmbuf_pool[socketid] =
                rte_pktmbuf_pool_create(s, nb_mbuf,
                                        MEMPOOL_CACHE_SIZE, 0,
                                        RTE_MBUF_DEFAULT_BUF_SIZE, socketid);
            if (pktmbuf_pool[socketid] == NULL)
                rte_exit(EXIT_FAILURE,
                         "Cannot init mbuf pool on socket %d\n",
                         socketid);
            else
                LOG_INFO("Allocated mbuf pool on socket %d.", socketid);

        }

#ifdef IP_FRAG
        struct rte_mempool *mp;

        if (socket_direct_pool[socketid] == NULL) {
            LOG_INFO("Creating direct mempool on socket %i\n",
                    socketid);
            snprintf(s, sizeof(s), "pool_direct_%i", socketid);

            mp = rte_pktmbuf_pool_create(s, IP_FRAG_NB_MBUF, 32,
                                         0, RTE_MBUF_DEFAULT_BUF_SIZE, socketid);
            if (mp == NULL) {
                LOG_ERR("Cannot create direct mempool\n");
                return -1;
            }
            socket_direct_pool[socketid] = mp;
        }

        if (socket_indirect_pool[socketid] == NULL) {
            LOG_INFO("Creating indirect mempool on socket %i\n",
                    socketid);
            snprintf(s, sizeof(s), "pool_indirect_%i", socketid);

            mp = rte_pktmbuf_pool_create(s, IP_FRAG_NB_MBUF, 32, 0, 0,
                                         socketid);
            if (mp == NULL) {
                LOG_ERR("Cannot create indirect mempool\n");
                return -1;
            }
            socket_indirect_pool[socketid] = mp;
        }
#endif

    }
    return 0;
}
Ejemplo n.º 15
0
static void init_task_gen(struct task_base *tbase, struct task_args *targ)
{
	struct task_gen_server *task = (struct task_gen_server *)tbase;
	const int socket_id = rte_lcore_to_socket_id(targ->lconf->id);

	static char name[] = "server_mempool";
	name[0]++;
	task->mempool = rte_mempool_create(name,
					   targ->nb_mbuf - 1, MBUF_SIZE,
					   targ->nb_cache_mbuf,
					   sizeof(struct rte_pktmbuf_pool_private),
					   rte_pktmbuf_pool_init, NULL,
					   rte_pktmbuf_init, 0,
					   socket_id, 0);
	int pop = lua_getfrom(prox_lua(), GLOBAL, targ->streams);
	PROX_PANIC(pop < 0, "Failed to find '%s' in lua\n", targ->streams);

	lua_len(prox_lua(), -1);
	uint32_t n_listen = lua_tointeger(prox_lua(), -1);
	lua_pop(prox_lua(), 1);
	PROX_PANIC(n_listen == 0, "No services specified to listen on\n");

	task->bundle_cfgs = rte_zmalloc_socket(NULL, n_listen * sizeof(task->bundle_cfgs[0]), RTE_CACHE_LINE_SIZE, socket_id);

	plogx_info("n_listen = %d\n", n_listen);
	const struct rte_hash_parameters listen_table = {
		.name = name,
		.entries = n_listen * 4,
		.key_len = sizeof(struct new_tuple),
		.hash_func = rte_hash_crc,
		.hash_func_init_val = 0,
		.socket_id = socket_id,
	};
	name[0]++;

	task->listen_hash = rte_hash_create(&listen_table);
	task->listen_entries = rte_zmalloc_socket(NULL, listen_table.entries * sizeof(task->listen_entries[0]), RTE_CACHE_LINE_SIZE, socket_id);

	int idx = 0;
	lua_pushnil(prox_lua());
	while (lua_next(prox_lua(), -2)) {
		task->bundle_cfgs[idx].n_stream_cfgs = 1;
		int ret = lua_to_stream_cfg(prox_lua(), STACK, NULL, socket_id, &task->bundle_cfgs[idx].stream_cfgs[0]);
		PROX_PANIC(ret, "Failed to load stream cfg\n");
		struct stream_cfg *stream = task->bundle_cfgs[idx].stream_cfgs[0];

		// TODO: check mask and add to hash for each host
		struct new_tuple nt = {
			.dst_addr = stream->servers.ip,
			.proto_id = stream->proto,
			.dst_port = stream->servers.port,
			.l2_types[0] = 0x0008,
		};

		ret = rte_hash_add_key(task->listen_hash, &nt);
		PROX_PANIC(ret < 0, "Failed to add\n");

		task->listen_entries[ret] = &task->bundle_cfgs[idx];

		plogx_dbg("Server = "IPv4_BYTES_FMT":%d\n", IPv4_BYTES(((uint8_t*)&nt.dst_addr)), rte_bswap16(nt.dst_port));
		++idx;
		lua_pop(prox_lua(), 1);
	}

	static char name2[] = "task_gen_hash2";
	name2[0]++;
	PROX_PANIC(bundle_ctx_pool_create(name2, targ->n_concur_conn*2, &task->bundle_ctx_pool, socket_id), "Failed to create conn_ctx_pool");
	task->heap = heap_create(targ->n_concur_conn*2, socket_id);
	task->seed = rte_rdtsc();
}

static void init_task_gen_client(struct task_base *tbase, struct task_args *targ)
{
	struct task_gen_client *task = (struct task_gen_client *)tbase;
	static char name[] = "gen_pool";
	const uint32_t socket = rte_lcore_to_socket_id(targ->lconf->id);
	name[0]++;
	task->mempool = rte_mempool_create(name,
					   targ->nb_mbuf - 1, MBUF_SIZE,
					   targ->nb_cache_mbuf,
					   sizeof(struct rte_pktmbuf_pool_private),
					   rte_pktmbuf_pool_init, NULL,
					   rte_pktmbuf_init, 0,
					   socket, 0);

	/* streams contains a lua table. Go through it and read each
	   stream with associated imix_fraction. */
	uint32_t imix;
	int i = 0;

	int pop = lua_getfrom(prox_lua(), GLOBAL, targ->streams);
	PROX_PANIC(pop < 0, "Failed to find '%s' in lua\n", targ->streams);

	lua_len(prox_lua(), -1);
	uint32_t n_bundle_cfgs = lua_tointeger(prox_lua(), -1);
	lua_pop(prox_lua(), 1);
	PROX_PANIC(n_bundle_cfgs == 0, "No configs specified\n");
	plogx_info("loading %d bundle_cfgs\n", n_bundle_cfgs);
	task->bundle_cfgs = rte_zmalloc_socket(NULL, n_bundle_cfgs * sizeof(task->bundle_cfgs[0]), RTE_CACHE_LINE_SIZE, socket);
	lua_pushnil(prox_lua());

	int total_imix = 0;
	struct cdf *cdf = cdf_create(n_bundle_cfgs, socket);

	while (lua_next(prox_lua(), -2)) {
		PROX_PANIC(lua_to_int(prox_lua(), TABLE, "imix_fraction", &imix) ||
			   lua_to_bundle_cfg(prox_lua(), TABLE, "bundle", socket, &task->bundle_cfgs[i]),
			   "Failed to load bundle cfg:\n%s\n", get_lua_to_errors());
		cdf_add(cdf, imix);
		total_imix += imix;
		++i;
		lua_pop(prox_lua(), 1);
	}
	lua_pop(prox_lua(), pop);
	cdf_setup(cdf);

	task->tot_imix = total_imix;
	task->cdf = cdf;
	static char name2[] = "task_gen_hash";
	name2[0]++;
	PROX_PANIC(bundle_ctx_pool_create(name2, targ->n_concur_conn, &task->bundle_ctx_pool, socket), "Failed to create conn_ctx_pool");
	task->heap = heap_create(targ->n_concur_conn, socket);
	task->seed = rte_rdtsc();
}

static struct task_init task_init_gen1 = {
	.mode_str = "genl4",
	.sub_mode_str = "server",
	.init = init_task_gen,
	.handle = handle_gen_bulk,
	.flag_features = TASK_ZERO_RX,
	.size = sizeof(struct task_gen_server),
	.mbuf_size = 2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM,
};

static struct task_init task_init_gen2 = {
	.mode_str = "genl4",
	.init = init_task_gen_client,
	.handle = handle_gen_bulk_client,
	.flag_features = TASK_ZERO_RX,
	.size = sizeof(struct task_gen_client),
	.mbuf_size = 2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM,
};

__attribute__((constructor)) static void reg_task_gen(void)
{
	reg_task(&task_init_gen1);
	reg_task(&task_init_gen2);
}
Ejemplo n.º 16
0
/*
 * Initialize a given port using default settings and with the RX buffers
 * coming from the mbuf_pool passed as a parameter.
 * FIXME: Starting with assumption of one thread/core per port
 */
static inline int uhd_dpdk_port_init(struct uhd_dpdk_port *port,
                                     struct rte_mempool *rx_mbuf_pool,
                                     unsigned int mtu)
{
    int retval;

    /* Check for a valid port */
    if (port->id >= rte_eth_dev_count())
        return -ENODEV;

    /* Set up Ethernet device with defaults (1 RX ring, 1 TX ring) */
    /* FIXME: Check if hw_ip_checksum is possible */
    struct rte_eth_conf port_conf = {
        .rxmode = {
            .max_rx_pkt_len = mtu,
            .jumbo_frame = 1,
            .hw_ip_checksum = 1,
        }
    };
    retval = rte_eth_dev_configure(port->id, 1, 1, &port_conf);
    if (retval != 0)
        return retval;

    retval = rte_eth_rx_queue_setup(port->id, 0, DEFAULT_RING_SIZE,
                 rte_eth_dev_socket_id(port->id), NULL, rx_mbuf_pool);
    if (retval < 0)
        return retval;

    retval = rte_eth_tx_queue_setup(port->id, 0, DEFAULT_RING_SIZE,
                 rte_eth_dev_socket_id(port->id), NULL);
    if (retval < 0)
        goto port_init_fail;

    /* Create the hash table for the RX sockets */
    char name[32];
    snprintf(name, sizeof(name), "rx_table_%u", port->id);
    struct rte_hash_parameters hash_params = {
        .name = name,
        .entries = UHD_DPDK_MAX_SOCKET_CNT,
        .key_len = sizeof(struct uhd_dpdk_ipv4_5tuple),
        .hash_func = NULL,
        .hash_func_init_val = 0,
    };
    port->rx_table = rte_hash_create(&hash_params);
    if (port->rx_table == NULL) {
        retval = rte_errno;
        goto port_init_fail;
    }

    /* Create ARP table */
    snprintf(name, sizeof(name), "arp_table_%u", port->id);
    hash_params.name = name;
    hash_params.entries = UHD_DPDK_MAX_SOCKET_CNT;
    hash_params.key_len = sizeof(uint32_t);
    hash_params.hash_func = NULL;
    hash_params.hash_func_init_val = 0;
    port->arp_table = rte_hash_create(&hash_params);
    if (port->arp_table == NULL) {
        retval = rte_errno;
        goto free_rx_table;
    }

    /* Set up list for TX queues */
    LIST_INIT(&port->txq_list);

    /* Start the Ethernet port. */
    retval = rte_eth_dev_start(port->id);
    if (retval < 0) {
        goto free_arp_table;
    }

    /* Display the port MAC address. */
    rte_eth_macaddr_get(port->id, &port->mac_addr);
    RTE_LOG(INFO, EAL, "Port %u MAC: %02x %02x %02x %02x %02x %02x\n",
                (unsigned)port->id,
                port->mac_addr.addr_bytes[0], port->mac_addr.addr_bytes[1],
                port->mac_addr.addr_bytes[2], port->mac_addr.addr_bytes[3],
                port->mac_addr.addr_bytes[4], port->mac_addr.addr_bytes[5]);

    struct rte_eth_link link;
    rte_eth_link_get(port->id, &link);
    RTE_LOG(INFO, EAL, "Port %u UP: %d\n", port->id, link.link_status);

    return 0;

free_arp_table:
    rte_hash_free(port->arp_table);
free_rx_table:
    rte_hash_free(port->rx_table);
port_init_fail:
    return rte_errno;
}

static int uhd_dpdk_thread_init(struct uhd_dpdk_thread *thread, unsigned int id)
{
    if (!ctx || !thread)
        return -EINVAL;

    unsigned int socket_id = rte_lcore_to_socket_id(id);
    thread->id = id;
    thread->rx_pktbuf_pool = ctx->rx_pktbuf_pools[socket_id];
    thread->tx_pktbuf_pool = ctx->tx_pktbuf_pools[socket_id];
    LIST_INIT(&thread->port_list);

    char name[32];
    snprintf(name, sizeof(name), "sockreq_ring_%u", id);
    thread->sock_req_ring = rte_ring_create(
                               name,
                               UHD_DPDK_MAX_PENDING_SOCK_REQS,
                               socket_id,
                               RING_F_SC_DEQ
                            );
    if (!thread->sock_req_ring)
        return -ENOMEM;
    return 0;
}


int uhd_dpdk_init(int argc, char **argv, unsigned int num_ports,
                  int *port_thread_mapping, int num_mbufs, int mbuf_cache_size,
                  int mtu)
{
    /* Init context only once */
    if (ctx)
        return 1;

    if ((num_ports == 0) || (port_thread_mapping == NULL)) {
        return -EINVAL;
    }

    /* Grabs arguments intended for DPDK's EAL */
    int ret = rte_eal_init(argc, argv);
    if (ret < 0)
        rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");

    ctx = (struct uhd_dpdk_ctx *) rte_zmalloc("uhd_dpdk_ctx", sizeof(*ctx), rte_socket_id());
    if (!ctx)
        return -ENOMEM;

    ctx->num_threads = rte_lcore_count();
    if (ctx->num_threads <= 1)
        rte_exit(EXIT_FAILURE, "Error: No worker threads enabled\n");

    /* Check that we have ports to send/receive on */
    ctx->num_ports = rte_eth_dev_count();
    if (ctx->num_ports < 1)
        rte_exit(EXIT_FAILURE, "Error: Found no ports\n");
    if (ctx->num_ports < num_ports)
        rte_exit(EXIT_FAILURE, "Error: User requested more ports than available\n");

    /* Get memory for thread and port data structures */
    ctx->threads = rte_zmalloc("uhd_dpdk_thread", RTE_MAX_LCORE*sizeof(struct uhd_dpdk_thread), 0);
    if (!ctx->threads)
        rte_exit(EXIT_FAILURE, "Error: Could not allocate memory for thread data\n");
    ctx->ports = rte_zmalloc("uhd_dpdk_port", ctx->num_ports*sizeof(struct uhd_dpdk_port), 0);
    if (!ctx->ports)
        rte_exit(EXIT_FAILURE, "Error: Could not allocate memory for port data\n");

    /* Initialize the thread data structures */
    for (int i = rte_get_next_lcore(-1, 1, 0);
        (i < RTE_MAX_LCORE);
        i = rte_get_next_lcore(i, 1, 0))
    {
        /* Do one mempool of RX/TX per socket */
        unsigned int socket_id = rte_lcore_to_socket_id(i);
        /* FIXME Probably want to take into account actual number of ports per socket */
        if (ctx->tx_pktbuf_pools[socket_id] == NULL) {
            /* Creates a new mempool in memory to hold the mbufs.
             * This is done for each CPU socket
             */
            const int mbuf_size = mtu + 2048 + RTE_PKTMBUF_HEADROOM;
            char name[32];
            snprintf(name, sizeof(name), "rx_mbuf_pool_%u", socket_id);
            ctx->rx_pktbuf_pools[socket_id] = rte_pktmbuf_pool_create(
                                               name,
                                               ctx->num_ports*num_mbufs,
                                               mbuf_cache_size,
                                               0,
                                               mbuf_size,
                                               socket_id
                                           );
            snprintf(name, sizeof(name), "tx_mbuf_pool_%u", socket_id);
            ctx->tx_pktbuf_pools[socket_id] = rte_pktmbuf_pool_create(
                                               name,
                                               ctx->num_ports*num_mbufs,
                                               mbuf_cache_size,
                                               0,
                                               mbuf_size,
                                               socket_id
                                           );
            if ((ctx->rx_pktbuf_pools[socket_id]== NULL) ||
                (ctx->tx_pktbuf_pools[socket_id]== NULL))
                rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
        }

        if (uhd_dpdk_thread_init(&ctx->threads[i], i) < 0)
            rte_exit(EXIT_FAILURE, "Error initializing thread %i\n", i);
    }

    unsigned master_lcore = rte_get_master_lcore();

    /* Assign ports to threads and initialize the port data structures */
    for (unsigned int i = 0; i < num_ports; i++) {
        int thread_id = port_thread_mapping[i];
        if (thread_id < 0)
            continue;
        if (((unsigned int) thread_id) == master_lcore)
            RTE_LOG(WARNING, EAL, "User requested master lcore for port %u\n", i);
        if (ctx->threads[thread_id].id != (unsigned int) thread_id)
            rte_exit(EXIT_FAILURE, "Requested inactive lcore %u for port %u\n", (unsigned int) thread_id, i);

        struct uhd_dpdk_port *port = &ctx->ports[i];
        port->id = i;
        port->parent = &ctx->threads[thread_id];
        ctx->threads[thread_id].num_ports++;
        LIST_INSERT_HEAD(&ctx->threads[thread_id].port_list, port, port_entry);

        /* Initialize port. */
        if (uhd_dpdk_port_init(port, port->parent->rx_pktbuf_pool, mtu) != 0)
            rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu8 "\n",
                    i);
    }

    RTE_LOG(INFO, EAL, "Init DONE!\n");

    /* FIXME: Create functions to do this */
    RTE_LOG(INFO, EAL, "Starting I/O threads!\n");

    for (int i = rte_get_next_lcore(-1, 1, 0);
        (i < RTE_MAX_LCORE);
        i = rte_get_next_lcore(i, 1, 0))
    {
        struct uhd_dpdk_thread *t = &ctx->threads[i];
        if (!LIST_EMPTY(&t->port_list)) {
            rte_eal_remote_launch(_uhd_dpdk_driver_main, NULL, ctx->threads[i].id);
        }
    }
    return 0;
}

/* FIXME: This will be changed once we have functions to handle the threads */
int uhd_dpdk_destroy(void)
{
    if (!ctx)
        return -ENODEV;

    struct uhd_dpdk_config_req *req = (struct uhd_dpdk_config_req *) rte_zmalloc(NULL, sizeof(*req), 0);
    if (!req)
        return -ENOMEM;

    req->req_type = UHD_DPDK_LCORE_TERM;

    for (int i = rte_get_next_lcore(-1, 1, 0);
        (i < RTE_MAX_LCORE);
        i = rte_get_next_lcore(i, 1, 0))
    {
        struct uhd_dpdk_thread *t = &ctx->threads[i];

        if (LIST_EMPTY(&t->port_list))
            continue;

        if (rte_eal_get_lcore_state(t->id) == FINISHED)
            continue;

        pthread_mutex_init(&req->mutex, NULL);
        pthread_cond_init(&req->cond, NULL);
        pthread_mutex_lock(&req->mutex);
        if (rte_ring_enqueue(t->sock_req_ring, req)) {
            pthread_mutex_unlock(&req->mutex);
            RTE_LOG(ERR, USER2, "Failed to terminate thread %d\n", i);
            rte_free(req);
            return -ENOSPC;
        }
        struct timespec timeout = {
            .tv_sec = 1,
            .tv_nsec = 0
        };
        pthread_cond_timedwait(&req->cond, &req->mutex, &timeout);
        pthread_mutex_unlock(&req->mutex);
    }

    rte_free(req);
    return 0;
}
Ejemplo n.º 17
0
void
app_init_rings_rx(void) {
  unsigned lcore;

  /* Initialize the rings for the RX side */
  for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
    struct app_lcore_params_io *lp_io = &app.lcore_params[lcore].io;
    unsigned socket_io, lcore_worker;

    if ((app.lcore_params[lcore].type != e_APP_LCORE_IO &&
         app.lcore_params[lcore].type != e_APP_LCORE_IO_WORKER) ||
        (lp_io->rx.n_nic_queues == 0)) {
      continue;
    }

    socket_io = rte_lcore_to_socket_id(lcore);

    for (lcore_worker = 0; lcore_worker < APP_MAX_LCORES; lcore_worker ++) {
      char name[32];
      struct app_lcore_params_worker *lp_worker;
      struct rte_ring *ring = NULL;

      lp_worker = &app.lcore_params[lcore_worker].worker;
      if (app.lcore_params[lcore_worker].type != e_APP_LCORE_WORKER &&
          app.lcore_params[lcore_worker].type != e_APP_LCORE_IO_WORKER) {
        continue;
      }

      lagopus_dprint(
        "Creating ring to connect I/O "
        "lcore %u (socket %u) with worker lcore %u ...\n",
        lcore,
        socket_io,
        lcore_worker);
      snprintf(name, sizeof(name),
               "app_ring_rx_s%u_io%u_w%u",
               socket_io,
               lcore,
               lcore_worker);
      ring = rte_ring_create(
               name,
               app.ring_rx_size,
               (int)socket_io,
               RING_F_SP_ENQ | RING_F_SC_DEQ);
      if (ring == NULL) {
        rte_panic("Cannot create ring to connect I/O "
                  "core %u with worker core %u\n",
                  lcore,
                  lcore_worker);
      }

      lp_io->rx.rings[lp_io->rx.n_rings] = ring;
      lp_io->rx.n_rings ++;

      lp_worker->rings_in[lp_worker->n_rings_in] = ring;
      lp_worker->n_rings_in ++;
    }
  }

  for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
    struct app_lcore_params_io *lp_io = &app.lcore_params[lcore].io;

    if ((app.lcore_params[lcore].type != e_APP_LCORE_IO &&
         app.lcore_params[lcore].type != e_APP_LCORE_IO_WORKER) ||
        (lp_io->rx.n_nic_queues == 0)) {
      continue;
    }

    if (lp_io->rx.n_rings != app_get_lcores_worker()) {
      rte_panic("Algorithmic error (I/O RX rings)\n");
    }
  }

  for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
    struct app_lcore_params_worker *lp_worker;

    lp_worker = &app.lcore_params[lcore].worker;
    if (app.lcore_params[lcore].type != e_APP_LCORE_WORKER &&
        app.lcore_params[lcore].type != e_APP_LCORE_IO_WORKER) {
      continue;
    }

    if (lp_worker->n_rings_in != app_get_lcores_io_rx()) {
      rte_panic("Algorithmic error (worker input rings)\n");
    }
  }
}
Ejemplo n.º 18
0
static struct rte_ring *init_ring_between_tasks(struct lcore_cfg *lconf, struct task_args *starg,
				    const struct core_task ct, uint8_t ring_idx, int idx,
				    struct ring_init_stats *ris)
{
	uint8_t socket;
	struct rte_ring *ring = NULL;
	struct lcore_cfg *lworker;
	struct task_args *dtarg;

	PROX_ASSERT(prox_core_active(ct.core, 0));
	lworker = &lcore_cfg[ct.core];

	/* socket used is the one that the sending core resides on */
	socket = rte_lcore_to_socket_id(lconf->id);

	plog_info("\t\tCreating ring on socket %u with size %u\n"
		  "\t\t\tsource core, task and socket = %u, %u, %u\n"
		  "\t\t\tdestination core, task and socket = %u, %u, %u\n"
		  "\t\t\tdestination worker id = %u\n",
		  socket, starg->ring_size,
		  lconf->id, starg->id, socket,
		  ct.core, ct.task, rte_lcore_to_socket_id(ct.core),
		  ring_idx);

	if (ct.type) {
		struct rte_ring **dring = NULL;

		if (ct.type == CTRL_TYPE_MSG)
			dring = &lworker->ctrl_rings_m[ct.task];
		else if (ct.type == CTRL_TYPE_PKT) {
			dring = &lworker->ctrl_rings_p[ct.task];
			starg->flags |= TASK_ARG_CTRL_RINGS_P;
		}

		if (*dring == NULL)
			ring = rte_ring_create(gen_ring_name(), starg->ring_size, socket, RING_F_SC_DEQ);
		else
			ring = *dring;
		PROX_PANIC(ring == NULL, "Cannot create ring to connect I/O core %u with worker core %u\n", lconf->id, ct.core);

		starg->tx_rings[starg->tot_n_txrings_inited] = ring;
		starg->tot_n_txrings_inited++;
		*dring = ring;
		if (lconf->id == prox_cfg.master) {
			ctrl_rings[ct.core*MAX_TASKS_PER_CORE + ct.task] = ring;
		} else if (ct.core == prox_cfg.master) {
			starg->ctrl_plane_ring = ring;
		}

		plog_info("\t\tCore %u task %u to -> core %u task %u ctrl_ring %s %p %s\n",
			  lconf->id, starg->id, ct.core, ct.task, ct.type == CTRL_TYPE_PKT?
			  "pkt" : "msg", ring, ring->name);
		ris->n_ctrl_rings++;
		return ring;
	}

	dtarg = &lworker->targs[ct.task];
	lworker->targs[ct.task].worker_thread_id = ring_idx;
	PROX_ASSERT(dtarg->flags & TASK_ARG_RX_RING);
	PROX_ASSERT(ct.task < lworker->n_tasks_all);

	/* If all the following conditions are met, the ring can be
	   optimized away. */
	if (!task_is_master(starg) && !task_is_master(dtarg) && starg->lconf->id == dtarg->lconf->id &&
	    starg->nb_txrings == 1 && idx == 0 && dtarg->task &&
	    dtarg->tot_rxrings == 1 && starg->task == dtarg->task - 1) {
		plog_info("\t\tOptimizing away ring on core %u from task %u to task %u\n",
			  dtarg->lconf->id, starg->task, dtarg->task);
		/* No need to set up ws_mbuf. */
		starg->tx_opt_ring = 1;
		/* During init of destination task, the buffer in the
		   source task will be initialized. */
		dtarg->tx_opt_ring_task = starg;
		ris->n_opt_rings++;
		++dtarg->nb_rxrings;
		return NULL;
	}

	int ring_created = 1;
	/* Only create multi-producer rings if configured to do so AND
	   there is only one task sending to the task */
	if ((prox_cfg.flags & DSF_MP_RINGS && count_incoming_tasks(ct.core, ct.task) > 1)
		|| (prox_cfg.flags & DSF_ENABLE_BYPASS)) {
		ring = get_existing_ring(ct.core, ct.task);

		if (ring) {
			plog_info("\t\tCore %u task %u creatign MP ring %p to core %u task %u\n",
				  lconf->id, starg->id, ring, ct.core, ct.task);
			ring_created = 0;
		}
		else {
			ring = rte_ring_create(gen_ring_name(), starg->ring_size, socket, RING_F_SC_DEQ);
			plog_info("\t\tCore %u task %u using MP ring %p from core %u task %u\n",
				  lconf->id, starg->id, ring, ct.core, ct.task);
		}
	}
	else
		ring = rte_ring_create(gen_ring_name(), starg->ring_size, socket, RING_F_SP_ENQ | RING_F_SC_DEQ);

	PROX_PANIC(ring == NULL, "Cannot create ring to connect I/O core %u with worker core %u\n", lconf->id, ct.core);

	starg->tx_rings[starg->tot_n_txrings_inited] = ring;
	starg->tot_n_txrings_inited++;

	if (ring_created) {
		PROX_ASSERT(dtarg->nb_rxrings < MAX_RINGS_PER_TASK);
		dtarg->rx_rings[dtarg->nb_rxrings] = ring;
		++dtarg->nb_rxrings;
	}
	dtarg->nb_slave_threads = starg->core_task_set[idx].n_elems;
	dtarg->lb_friend_core = lconf->id;
	dtarg->lb_friend_task = starg->id;
	plog_info("\t\tWorker thread %d has core %d, task %d as a lb friend\n", ct.core, lconf->id, starg->id);
	plog_info("\t\tCore %u task %u tx_ring[%u] -> core %u task %u rx_ring[%u] %p %s %u WT\n",
		  lconf->id, starg->id, ring_idx, ct.core, ct.task, dtarg->nb_rxrings, ring, ring->name,
		  dtarg->nb_slave_threads);
	++ris->n_pkt_rings;
	return ring;
}
Ejemplo n.º 19
0
lagopus_result_t
dpdk_configure_interface(struct interface *ifp) {
  unsigned socket;
  uint32_t lcore;
  uint8_t queue;
  int ret;
  uint32_t n_rx_queues, n_tx_queues;
  uint8_t portid;
  struct rte_mempool *pool;

  portid = ifp->info.eth.port_number;

  n_rx_queues = app_get_nic_rx_queues_per_port(portid);
  n_tx_queues = app.nic_tx_port_mask[portid];

  if ((n_rx_queues == 0) && (n_tx_queues == 0)) {
    return LAGOPUS_RESULT_INVALID_ARGS;
  }

  if (ifp->info.eth_dpdk_phy.mtu < 64 ||
      ifp->info.eth_dpdk_phy.mtu > MAX_PACKET_SZ) {
    return LAGOPUS_RESULT_OUT_OF_RANGE;
  }

  rte_eth_dev_info_get(portid, &ifp->devinfo);

  /* Init port */
  printf("Initializing NIC port %u ...\n", (unsigned) portid);

  ret = rte_eth_dev_configure(portid,
                              (uint8_t) n_rx_queues,
                              (uint8_t) n_tx_queues,
                              &port_conf);
  if (ret < 0) {
    rte_panic("Cannot init NIC port %u (%s)\n",
              (unsigned) portid, strerror(-ret));
  }
  ret = rte_eth_dev_set_mtu(portid, ifp->info.eth_dpdk_phy.mtu);
  if (ret < 0) {
    rte_panic("Cannot set MTU(%d) for port %d (%d)\n",
              ifp->info.eth_dpdk_phy.mtu,
              portid,
              ret);
  }
  rte_eth_promiscuous_enable(portid);

  /* Init RX queues */
  for (queue = 0; queue < APP_MAX_RX_QUEUES_PER_NIC_PORT; queue ++) {
    struct app_lcore_params_io *lp;
    uint8_t i;

    if (app.nic_rx_queue_mask[portid][queue] == NIC_RX_QUEUE_UNCONFIGURED) {
      continue;
    }
    app_get_lcore_for_nic_rx(portid, queue, &lcore);
    lp = &app.lcore_params[lcore].io;
    socket = rte_lcore_to_socket_id(lcore);
    pool = app.lcore_params[lcore].pool;

    printf("Initializing NIC port %u RX queue %u ...\n",
           (unsigned) portid,
           (unsigned) queue);
    ret = rte_eth_rx_queue_setup(portid,
                                 queue,
                                 (uint16_t) app.nic_rx_ring_size,
                                 socket,
#if defined(RTE_VERSION_NUM) && RTE_VERSION >= RTE_VERSION_NUM(1, 8, 0, 0)
                                 &ifp->devinfo.default_rxconf,
#else
                                 &rx_conf,
#endif /* RTE_VERSION_NUM */
                                 pool);
    if (ret < 0) {
      rte_panic("Cannot init RX queue %u for port %u (%d)\n",
                (unsigned) queue,
                (unsigned) portid,
                ret);
    }
    for (i = 0; i < lp->rx.n_nic_queues; i++) {
      if (lp->rx.nic_queues[i].port != portid ||
          lp->rx.nic_queues[i].queue != queue) {
        continue;
      }
      lp->rx.nic_queues[i].enabled = true;
      break;
    }
  }

  /* Init TX queues */
  if (app.nic_tx_port_mask[portid] == 1) {
    app_get_lcore_for_nic_tx(portid, &lcore);
    socket = rte_lcore_to_socket_id(lcore);
    printf("Initializing NIC port %u TX queue 0 ...\n",
           (unsigned) portid);
    ret = rte_eth_tx_queue_setup(portid,
                                 0,
                                 (uint16_t) app.nic_tx_ring_size,
                                 socket,
#if defined(RTE_VERSION_NUM) && RTE_VERSION >= RTE_VERSION_NUM(1, 8, 0, 0)
                                 &ifp->devinfo.default_txconf
#else
                                 &tx_conf
#endif /* RTE_VERSION_NUM */
                                );
    if (ret < 0) {
      rte_panic("Cannot init TX queue 0 for port %d (%d)\n",
                portid,
                ret);
    }
  }

  ifp->stats = port_stats;
  dpdk_interface_set_index(ifp);

  return LAGOPUS_RESULT_OK;
}
Ejemplo n.º 20
0
static void setup_mempools_unique_per_socket(void)
{
	uint32_t flags = 0;
	char name[64];
	struct lcore_cfg *lconf = NULL;
	struct task_args *targ;

	struct rte_mempool     *pool[MAX_SOCKETS];
	uint32_t mbuf_count[MAX_SOCKETS] = {0};
	uint32_t nb_cache_mbuf[MAX_SOCKETS] = {0};
	uint32_t mbuf_size[MAX_SOCKETS] = {0};

	while (core_targ_next_early(&lconf, &targ, 0) == 0) {
		PROX_PANIC(targ->task_init == NULL, "task_init = NULL, is mode specified for core %d, task %d ?\n", lconf->id, targ->id);
		uint8_t socket = rte_lcore_to_socket_id(lconf->id);
		PROX_ASSERT(socket < MAX_SOCKETS);

		if (targ->mbuf_size_set_explicitely)
			flags = MEMPOOL_F_NO_SPREAD;
		if ((!targ->mbuf_size_set_explicitely) && (targ->task_init->mbuf_size != 0)) {
			targ->mbuf_size = targ->task_init->mbuf_size;
		}
		if (targ->rx_port_queue[0].port != OUT_DISCARD) {
			struct prox_port_cfg* port_cfg = &prox_port_cfg[targ->rx_port_queue[0].port];
			PROX_ASSERT(targ->nb_mbuf != 0);
			mbuf_count[socket] += targ->nb_mbuf;
			if (nb_cache_mbuf[socket] == 0)
				nb_cache_mbuf[socket] = targ->nb_cache_mbuf;
			else {
				PROX_PANIC(nb_cache_mbuf[socket] != targ->nb_cache_mbuf,
					   "all mbuf_cache must have the same size if using a unique mempool per socket\n");
			}
			if (mbuf_size[socket] == 0)
				mbuf_size[socket] = targ->mbuf_size;
			else {
				PROX_PANIC(mbuf_size[socket] != targ->mbuf_size,
					   "all mbuf_size must have the same size if using a unique mempool per socket\n");
			}
			if ((!targ->mbuf_size_set_explicitely) && (strcmp(port_cfg->short_name, "vmxnet3") == 0)) {
				if (mbuf_size[socket] < MBUF_SIZE + RTE_PKTMBUF_HEADROOM)
					mbuf_size[socket] = MBUF_SIZE + RTE_PKTMBUF_HEADROOM;
			}
		}
	}
	for (int i = 0 ; i < MAX_SOCKETS; i++) {
		if (mbuf_count[i] != 0) {
			sprintf(name, "socket_%u_pool", i);
			pool[i] = rte_mempool_create(name,
						     mbuf_count[i] - 1, mbuf_size[i],
						     nb_cache_mbuf[i],
						     sizeof(struct rte_pktmbuf_pool_private),
						     rte_pktmbuf_pool_init, NULL,
						     prox_pktmbuf_init, NULL,
						     i, flags);
			PROX_PANIC(pool[i] == NULL, "\t\tError: cannot create mempool for socket %u\n", i);
			plog_info("\t\tMempool %p size = %u * %u cache %u, socket %d\n", pool[i],
				  mbuf_count[i], mbuf_size[i], nb_cache_mbuf[i], i);

			if (prox_cfg.flags & DSF_SHUFFLE) {
				shuffle_mempool(pool[i], mbuf_count[i]);
			}
		}
	}

	lconf = NULL;
	while (core_targ_next_early(&lconf, &targ, 0) == 0) {
		uint8_t socket = rte_lcore_to_socket_id(lconf->id);

		if (targ->rx_port_queue[0].port != OUT_DISCARD) {
			/* use this pool for the interface that the core is receiving from */
			/* If one core receives from multiple ports, all the ports use the same mempool */
			targ->pool = pool[socket];
			/* Set the number of mbuf to the number of the unique mempool, so that the used and free work */
			targ->nb_mbuf = mbuf_count[socket];
			plog_info("\t\tMempool %p size = %u * %u cache %u, socket %d\n", targ->pool,
				  targ->nb_mbuf, mbuf_size[socket], targ->nb_cache_mbuf, socket);
		}
	}
}
Ejemplo n.º 21
0
int app_init(void)
{
	uint32_t i;
	char ring_name[MAX_NAME_LEN];
	char pool_name[MAX_NAME_LEN];

	/* init driver(s) */
	if (rte_pmd_init_all() < 0)
		rte_exit(EXIT_FAILURE, "Cannot init PMD\n");

	if (rte_eal_pci_probe() < 0)
		rte_exit(EXIT_FAILURE, "Cannot probe PCI\n");

	if (rte_eth_dev_count() == 0)
		rte_exit(EXIT_FAILURE, "No Ethernet port - bye\n");

	/* load configuration profile */
	if (app_load_cfg_profile(cfg_profile) != 0)
		rte_exit(EXIT_FAILURE, "Invalid configuration profile\n");
	
	/* Initialize each active flow */
	for(i = 0; i < nb_pfc; i++) {
		uint32_t socket = rte_lcore_to_socket_id(qos_conf[i].rx_core);
		struct rte_ring *ring;

		rte_snprintf(ring_name, MAX_NAME_LEN, "ring-%u-%u", i, qos_conf[i].rx_core);
		ring = rte_ring_lookup(ring_name);
		if (ring == NULL)
			qos_conf[i].rx_ring = rte_ring_create(ring_name, ring_conf.ring_size,
			 	socket, RING_F_SP_ENQ | RING_F_SC_DEQ);
		else
			qos_conf[i].rx_ring = ring;

		rte_snprintf(ring_name, MAX_NAME_LEN, "ring-%u-%u", i, qos_conf[i].tx_core);
		ring = rte_ring_lookup(ring_name);
		if (ring == NULL)
			qos_conf[i].tx_ring = rte_ring_create(ring_name, ring_conf.ring_size,
				socket, RING_F_SP_ENQ | RING_F_SC_DEQ);
		else
			qos_conf[i].tx_ring = ring;


		/* create the mbuf pools for each RX Port */
		rte_snprintf(pool_name, MAX_NAME_LEN, "mbuf_pool%u", i);
		qos_conf[i].mbuf_pool = rte_mempool_create(pool_name, mp_size, MBUF_SIZE,
						burst_conf.rx_burst * 4,
						sizeof(struct rte_pktmbuf_pool_private),
						rte_pktmbuf_pool_init, NULL,
						rte_pktmbuf_init, NULL,
						rte_eth_dev_socket_id(qos_conf[i].rx_port),
						0);
		if (qos_conf[i].mbuf_pool == NULL)
			rte_exit(EXIT_FAILURE, "Cannot init mbuf pool for socket %u\n", i);

		app_init_port(qos_conf[i].rx_port, qos_conf[i].mbuf_pool);
		app_init_port(qos_conf[i].tx_port, qos_conf[i].mbuf_pool);
		
		qos_conf[i].sched_port = app_init_sched_port(qos_conf[i].tx_port, socket);
	}

	RTE_LOG(INFO, APP, "time stamp clock running at %" PRIu64 " Hz\n",
			 rte_get_timer_hz());
	
	RTE_LOG(INFO, APP, "Ring sizes: NIC RX = %u, Mempool = %d SW queue = %u,"
			 "NIC TX = %u\n", ring_conf.rx_size, mp_size, ring_conf.ring_size,
			 ring_conf.tx_size);

	RTE_LOG(INFO, APP, "Burst sizes: RX read = %hu, RX write = %hu,\n"
						  "             Worker read/QoS enqueue = %hu,\n"
						  "             QoS dequeue = %hu, Worker write = %hu\n",
		burst_conf.rx_burst, burst_conf.ring_burst, burst_conf.ring_burst, 
		burst_conf.qos_dequeue, burst_conf.tx_burst);

	RTE_LOG(INFO, APP, "NIC thresholds RX (p = %hhu, h = %hhu, w = %hhu),"
				 "TX (p = %hhu, h = %hhu, w = %hhu)\n",
		rx_thresh.pthresh, rx_thresh.hthresh, rx_thresh.wthresh,
		tx_thresh.pthresh, tx_thresh.hthresh, tx_thresh.wthresh);

	return 0;
}
Ejemplo n.º 22
0
static void setup_mempool_for_rx_task(struct lcore_cfg *lconf, struct task_args *targ)
{
	const uint8_t socket = rte_lcore_to_socket_id(lconf->id);
	struct prox_port_cfg *port_cfg = &prox_port_cfg[targ->rx_port_queue[0].port];
	const struct rte_memzone *mz;
	struct rte_mempool *mp = NULL;
	uint32_t flags = 0;
	char memzone_name[64];
	char name[64];

	/* mbuf size can be set
	 *  - from config file (highest priority, overwriting any other config) - should only be used as workaround
	 *  - through each 'mode', overwriting the default mbuf_size
	 *  - defaulted to MBUF_SIZE i.e. 1518 Bytes
	 * Except is set expliciteky, ensure that size is big enough for vmxnet3 driver
	 */
	if (targ->mbuf_size_set_explicitely) {
		flags = MEMPOOL_F_NO_SPREAD;
		/* targ->mbuf_size already set */
	}
	else if (targ->task_init->mbuf_size != 0) {
		/* mbuf_size not set through config file but set through mode */
		targ->mbuf_size = targ->task_init->mbuf_size;
	}
	else if (strcmp(port_cfg->short_name, "vmxnet3") == 0) {
		if (targ->mbuf_size < MBUF_SIZE + RTE_PKTMBUF_HEADROOM)
			targ->mbuf_size = MBUF_SIZE + RTE_PKTMBUF_HEADROOM;
	}

	/* allocate memory pool for packets */
	PROX_ASSERT(targ->nb_mbuf != 0);

	if (targ->pool_name[0] == '\0') {
		sprintf(name, "core_%u_port_%u_pool", lconf->id, targ->id);
	}

	snprintf(memzone_name, sizeof(memzone_name)-1, "MP_%s", targ->pool_name);
	mz = rte_memzone_lookup(memzone_name);

	if (mz != NULL) {
		mp = (struct rte_mempool*)mz->addr;

		targ->nb_mbuf = mp->size;
		targ->pool = mp;
	}

#ifdef RTE_LIBRTE_IVSHMEM_FALSE
	if (mz != NULL && mp != NULL && mp->phys_addr != mz->ioremap_addr) {
		/* Init mbufs with ioremap_addr for dma */
		mp->phys_addr = mz->ioremap_addr;
		mp->elt_pa[0] = mp->phys_addr + (mp->elt_va_start - (uintptr_t)mp);

		struct prox_pktmbuf_reinit_args init_args;
		init_args.mp = mp;
		init_args.lconf = lconf;

		uint32_t elt_sz = mp->elt_size + mp->header_size + mp->trailer_size;
		rte_mempool_obj_iter((void*)mp->elt_va_start, mp->size, elt_sz, 1,
				     mp->elt_pa, mp->pg_num, mp->pg_shift, prox_pktmbuf_reinit, &init_args);
	}
#endif

	/* Use this pool for the interface that the core is
	   receiving from if one core receives from multiple
	   ports, all the ports use the same mempool */
	if (targ->pool == NULL) {
		plog_info("\t\tCreating mempool with name '%s'\n", name);
		targ->pool = rte_mempool_create(name,
						targ->nb_mbuf - 1, targ->mbuf_size,
						targ->nb_cache_mbuf,
						sizeof(struct rte_pktmbuf_pool_private),
						rte_pktmbuf_pool_init, NULL,
						prox_pktmbuf_init, lconf,
						socket, flags);
	}

	PROX_PANIC(targ->pool == NULL,
		   "\t\tError: cannot create mempool for core %u port %u: %s\n", lconf->id, targ->id, rte_strerror(rte_errno));

	plog_info("\t\tMempool %p size = %u * %u cache %u, socket %d\n", targ->pool,
		  targ->nb_mbuf, targ->mbuf_size, targ->nb_cache_mbuf, socket);
	if (prox_cfg.flags & DSF_SHUFFLE) {
		shuffle_mempool(targ->pool, targ->nb_mbuf);
	}
}
Ejemplo n.º 23
0
static void init_task_gen(struct task_base *tbase, struct task_args *targ)
{
	struct task_gen_server *task = (struct task_gen_server *)tbase;
	const int socket_id = rte_lcore_to_socket_id(targ->lconf->id);

	static char name[] = "server_mempool";
	name[0]++;
	task->mempool = rte_mempool_create(name,
					   4*1024 - 1, MBUF_SIZE,
					   targ->nb_cache_mbuf,
					   sizeof(struct rte_pktmbuf_pool_private),
					   rte_pktmbuf_pool_init, NULL,
					   rte_pktmbuf_init, 0,
					   socket_id, 0);
	PROX_PANIC(task->mempool == NULL, "Failed to allocate memory pool with %u elements\n", 4*1024 - 1);
	int pop = lua_getfrom(prox_lua(), GLOBAL, targ->streams);
	PROX_PANIC(pop < 0, "Failed to find '%s' in lua\n", targ->streams);

	lua_len(prox_lua(), -1);
	uint32_t n_listen = lua_tointeger(prox_lua(), -1);
	lua_pop(prox_lua(), 1);
	PROX_PANIC(n_listen == 0, "No services specified to listen on\n");

	task->bundle_cfgs = prox_zmalloc(n_listen * sizeof(task->bundle_cfgs[0]), socket_id);

	plogx_info("n_listen = %d\n", n_listen);

	struct hash_set *hs = prox_sh_find_socket(socket_id, "genl4_streams");
	if (hs == NULL) {
		/* Expected number of streams per bundle = 1, hash_set
		   will grow if full. */
		hs = hash_set_create(n_listen, socket_id);
		prox_sh_add_socket(socket_id, "genl4_streams", hs);
	}

	const struct rte_hash_parameters listen_table = {
		.name = name,
		.entries = n_listen * 4,
		.key_len = sizeof(struct new_tuple),
		.hash_func = rte_hash_crc,
		.hash_func_init_val = 0,
		.socket_id = socket_id,
	};
	name[0]++;

	task->listen_hash = rte_hash_create(&listen_table);
	task->listen_entries = prox_zmalloc(listen_table.entries * sizeof(task->listen_entries[0]), socket_id);

	int idx = 0;
	lua_pushnil(prox_lua());
	while (lua_next(prox_lua(), -2)) {
		task->bundle_cfgs[idx].n_stream_cfgs = 1;
		task->bundle_cfgs[idx].stream_cfgs = prox_zmalloc(sizeof(*task->bundle_cfgs[idx].stream_cfgs), socket_id);
		int ret = lua_to_stream_cfg(prox_lua(), STACK, NULL, socket_id, &task->bundle_cfgs[idx].stream_cfgs[0], hs);
		PROX_PANIC(ret, "Failed to load stream cfg\n");
		struct stream_cfg *stream = task->bundle_cfgs[idx].stream_cfgs[0];

		// TODO: check mask and add to hash for each host
		struct new_tuple nt = {
			.dst_addr = stream->servers.ip,
			.proto_id = stream->proto,
			.dst_port = stream->servers.port,
			.l2_types[0] = 0x0008,
		};

		ret = rte_hash_add_key(task->listen_hash, &nt);
		PROX_PANIC(ret < 0, "Failed to add\n");

		task->listen_entries[ret] = &task->bundle_cfgs[idx];

		plogx_dbg("Server = "IPv4_BYTES_FMT":%d\n", IPv4_BYTES(((uint8_t*)&nt.dst_addr)), rte_bswap16(nt.dst_port));
		++idx;
		lua_pop(prox_lua(), 1);
	}

	static char name2[] = "task_gen_hash2";

	name2[0]++;
	plogx_dbg("Creating bundle ctx pool\n");
	if (bundle_ctx_pool_create(name2, targ->n_concur_conn * 2, &task->bundle_ctx_pool, NULL, 0, NULL, socket_id)) {
		cmd_mem_stats();
		PROX_PANIC(1, "Failed to create conn_ctx_pool\n");
	}

	task->heap = heap_create(targ->n_concur_conn * 2, socket_id);
	task->seed = rte_rdtsc();

	/* TODO: calculate the CDF of the reply distribution and the
	   number of replies as the number to cover for 99% of the
	   replies. For now, assume that this is number is 2. */
	uint32_t queue_size = rte_align32pow2(targ->n_concur_conn * 2);

	PROX_PANIC(queue_size == 0, "Overflow resulted in queue size 0\n");
	task->fqueue = fqueue_create(queue_size, socket_id);
	PROX_PANIC(task->fqueue == NULL, "Failed to allocate local queue\n");

	uint32_t n_descriptors;

	if (targ->nb_txports) {
		PROX_PANIC(targ->nb_txports != 1, "Need exactly one TX port for L4 generation\n");
		n_descriptors = prox_port_cfg[targ->tx_port_queue[0].port].n_txd;
	} else {
		PROX_PANIC(targ->nb_txrings != 1, "Need exactly one TX ring for L4 generation\n");
		n_descriptors = 256;
	}

	struct token_time_cfg tt_cfg = {
		.bpp = targ->rate_bps,
		.period = rte_get_tsc_hz(),
		.bytes_max = n_descriptors * (ETHER_MIN_LEN + 20),
	};

	token_time_init(&task->token_time, &tt_cfg);
}

static void init_task_gen_client(struct task_base *tbase, struct task_args *targ)
{
	struct task_gen_client *task = (struct task_gen_client *)tbase;
	static char name[] = "gen_pool";
	const uint32_t socket = rte_lcore_to_socket_id(targ->lconf->id);
	name[0]++;
	task->mempool = rte_mempool_create(name,
					   4*1024 - 1, MBUF_SIZE,
					   targ->nb_cache_mbuf,
					   sizeof(struct rte_pktmbuf_pool_private),
					   rte_pktmbuf_pool_init, NULL,
					   rte_pktmbuf_init, 0,
					   socket, 0);
	PROX_PANIC(task->mempool == NULL, "Failed to allocate memory pool with %u elements\n", 4*1024 - 1);

	/* streams contains a lua table. Go through it and read each
	   stream with associated imix_fraction. */
	uint32_t imix;
	uint32_t i = 0;

	int pop = lua_getfrom(prox_lua(), GLOBAL, targ->streams);
	PROX_PANIC(pop < 0, "Failed to find '%s' in lua\n", targ->streams);

	lua_len(prox_lua(), -1);
	uint32_t n_bundle_cfgs = lua_tointeger(prox_lua(), -1);
	lua_pop(prox_lua(), 1);
	PROX_PANIC(n_bundle_cfgs == 0, "No configs specified\n");
	plogx_info("loading %d bundle_cfgs\n", n_bundle_cfgs);

	struct hash_set *hs = prox_sh_find_socket(socket, "genl4_streams");
	if (hs == NULL) {
		/* Expected number of streams per bundle = 8, hash_set
		   will grow if full. */
		hs = hash_set_create(n_bundle_cfgs * 8, socket);
		prox_sh_add_socket(socket, "genl4_streams", hs);
	}

	task->bundle_cfgs = prox_zmalloc(n_bundle_cfgs * sizeof(task->bundle_cfgs[0]), socket);
	lua_pushnil(prox_lua());

	int total_imix = 0;

	uint32_t *occur = prox_zmalloc(n_bundle_cfgs * sizeof(*occur), socket);
	struct cdf *cdf = cdf_create(n_bundle_cfgs, socket);

	while (lua_next(prox_lua(), -2)) {
		PROX_PANIC(lua_to_int(prox_lua(), TABLE, "imix_fraction", &imix) ||
			   lua_to_bundle_cfg(prox_lua(), TABLE, "bundle", socket, &task->bundle_cfgs[i], hs),
			   "Failed to load bundle cfg:\n%s\n", get_lua_to_errors());
		cdf_add(cdf, imix);
		occur[i] = imix;
		total_imix += imix;
		++i;
		lua_pop(prox_lua(), 1);
	}

	lua_pop(prox_lua(), pop);
	cdf_setup(cdf);

	PROX_PANIC(targ->max_setup_rate == 0, "Max setup rate not set\n");

	task->new_conn_cost = rte_get_tsc_hz()/targ->max_setup_rate;

	static char name2[] = "task_gen_hash";
	name2[0]++;
	plogx_dbg("Creating bundle ctx pool\n");
	if (bundle_ctx_pool_create(name2, targ->n_concur_conn, &task->bundle_ctx_pool, occur, n_bundle_cfgs, task->bundle_cfgs, socket)) {
		cmd_mem_stats();
		PROX_PANIC(1, "Failed to create conn_ctx_pool\n");
	}

	task->heap = heap_create(targ->n_concur_conn, socket);
	task->seed = rte_rdtsc();
	/* task->token_time.bytes_max = MAX_PKT_BURST * (ETHER_MAX_LEN + 20); */

	/* To avoid overflowing the tx descriptors, the token bucket
	   size needs to be limited. The descriptors are filled most
	   quickly with the smallest packets. For that reason, the
	   token bucket size is given by "number of tx descriptors" *
	   "smallest Ethernet packet". */
	PROX_ASSERT(targ->nb_txports == 1);

	struct token_time_cfg tt_cfg = {
		.bpp = targ->rate_bps,
		.period = rte_get_tsc_hz(),
		.bytes_max = prox_port_cfg[targ->tx_port_queue[0].port].n_txd * (ETHER_MIN_LEN + 20),
	};

	token_time_init(&task->token_time, &tt_cfg);
}

static void start_task_gen_client(struct task_base *tbase)
{
	struct task_gen_client *task = (struct task_gen_client *)tbase;

	token_time_reset(&task->token_time, rte_rdtsc(), 0);

	task->new_conn_tokens = 0;
	task->new_conn_last_tsc = rte_rdtsc();
}

static void stop_task_gen_client(struct task_base *tbase)
{
	struct task_gen_client *task = (struct task_gen_client *)tbase;
	struct bundle_ctx *bundle;

	while (!heap_is_empty(task->heap)) {
		bundle = BUNDLE_CTX_UPCAST(heap_pop(task->heap));
		bundle_expire(bundle, &task->bundle_ctx_pool, &task->l4_stats);
	}
}

static void start_task_gen_server(struct task_base *tbase)
{
	struct task_gen_server *task = (struct task_gen_server *)tbase;

	token_time_reset(&task->token_time, rte_rdtsc(), 0);
}

static void stop_task_gen_server(struct task_base *tbase)
{
	struct task_gen_server *task = (struct task_gen_server *)tbase;
	struct bundle_ctx *bundle;
	uint8_t out[MAX_PKT_BURST];

	while (!heap_is_empty(task->heap)) {
		bundle = BUNDLE_CTX_UPCAST(heap_pop(task->heap));
		bundle_expire(bundle, &task->bundle_ctx_pool, &task->l4_stats);
	}

	if (task->cancelled) {
		struct rte_mbuf *mbuf = task->mbuf_saved;

		out[0] = OUT_DISCARD;
		task->cancelled = 0;
		task->base.tx_pkt(&task->base, &mbuf, 1, out);
	}

	do {
		if (task->cur_mbufs_beg == task->cur_mbufs_end) {
			task->cur_mbufs_end = fqueue_get(task->fqueue, task->cur_mbufs, MAX_PKT_BURST);
			task->cur_mbufs_beg = 0;
			if (task->cur_mbufs_end == 0)
				break;
		}
		uint16_t n_pkts = task->cur_mbufs_end - task->cur_mbufs_beg;
		struct rte_mbuf **mbufs = task->cur_mbufs + task->cur_mbufs_beg;

		if (n_pkts) {
			for (uint16_t j = 0; j < n_pkts; ++j) {
				out[j] = OUT_DISCARD;
			}
			task->base.tx_pkt(&task->base, mbufs, n_pkts, out);
		}
	} while (1);
}

static struct task_init task_init_gen1 = {
	.mode_str = "genl4",
	.sub_mode_str = "server",
	.init = init_task_gen,
	.handle = handle_gen_bulk,
	.start = start_task_gen_server,
	.stop = stop_task_gen_server,
	.flag_features = TASK_FEATURE_ZERO_RX,
	.size = sizeof(struct task_gen_server),
	.mbuf_size = 2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM,
};

static struct task_init task_init_gen2 = {
	.mode_str = "genl4",
	.init = init_task_gen_client,
	.handle = handle_gen_bulk_client,
	.start = start_task_gen_client,
	.stop = stop_task_gen_client,
	.flag_features = TASK_FEATURE_ZERO_RX,
	.size = sizeof(struct task_gen_client),
	.mbuf_size = 2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM,
};

__attribute__((constructor)) static void reg_task_gen(void)
{
	reg_task(&task_init_gen1);
	reg_task(&task_init_gen2);
}
Ejemplo n.º 24
0
void
pktgen_config_ports(void)
{
	uint32_t lid, pid, i, s, q, sid;
	rxtx_t rt;
	pkt_seq_t   *pkt;
	port_info_t     *info;
	char buff[RTE_MEMZONE_NAMESIZE];
	int32_t ret, cache_size;
	char output_buff[256] = { 0 };

	/* Find out the total number of ports in the system. */
	/* We have already blacklisted the ones we needed to in main routine. */
	pktgen.nb_ports = rte_eth_dev_count();
	if (pktgen.nb_ports > RTE_MAX_ETHPORTS)
		pktgen.nb_ports = RTE_MAX_ETHPORTS;

	if (pktgen.nb_ports == 0)
		pktgen_log_panic("*** Did not find any ports to use ***");

	pktgen.starting_port = 0;

	/* Setup the number of ports to display at a time */
	if (pktgen.nb_ports > pktgen.nb_ports_per_page)
		pktgen.ending_port = pktgen.starting_port + pktgen.nb_ports_per_page;
	else
		pktgen.ending_port = pktgen.starting_port + pktgen.nb_ports;

	wr_port_matrix_dump(pktgen.l2p);

	pktgen_log_info("Configuring %d ports, MBUF Size %d, MBUF Cache Size %d",
	                pktgen.nb_ports, MBUF_SIZE, MBUF_CACHE_SIZE);

	/* For each lcore setup each port that is handled by that lcore. */
	for (lid = 0; lid < RTE_MAX_LCORE; lid++) {

		if (wr_get_map(pktgen.l2p, RTE_MAX_ETHPORTS, lid) == 0)
			continue;

		/* For each port attached or handled by the lcore */
		for (pid = 0; pid < pktgen.nb_ports; pid++) {

			/* If non-zero then this port is handled by this lcore. */
			if (wr_get_map(pktgen.l2p, pid, lid) == 0)
				continue;
			wr_set_port_private(pktgen.l2p, pid, &pktgen.info[pid]);
			pktgen.info[pid].pid = pid;
		}
	}
	wr_dump_l2p(pktgen.l2p);

	pktgen.total_mem_used = 0;

	for (pid = 0; pid < pktgen.nb_ports; pid++) {
		/* Skip if we do not have any lcores attached to a port. */
		if ( (rt.rxtx = wr_get_map(pktgen.l2p, pid, RTE_MAX_LCORE)) == 0)
			continue;

		pktgen.port_cnt++;
		snprintf(output_buff, sizeof(output_buff),
		         "Initialize Port %d -- TxQ %d, RxQ %d", pid, rt.tx, rt.rx);

		info = wr_get_port_private(pktgen.l2p, pid);

		info->fill_pattern_type  = ABC_FILL_PATTERN;
		strncpy(info->user_pattern, "0123456789abcdef", USER_PATTERN_SIZE);

		rte_spinlock_init(&info->port_lock);

		/* Create the pkt header structures for transmitting sequence of packets. */
		snprintf(buff, sizeof(buff), "seq_hdr_%d", pid);
		info->seq_pkt = (pkt_seq_t *)rte_zmalloc_socket(buff, (sizeof(pkt_seq_t) * NUM_TOTAL_PKTS),
														RTE_CACHE_LINE_SIZE, rte_socket_id());
		if (info->seq_pkt == NULL)
			pktgen_log_panic("Unable to allocate %d pkt_seq_t headers", NUM_TOTAL_PKTS);

		info->seqIdx    = 0;
		info->seqCnt    = 0;

		info->nb_mbufs  = MAX_MBUFS_PER_PORT;
		cache_size = (info->nb_mbufs > RTE_MEMPOOL_CACHE_MAX_SIZE) ?
		        RTE_MEMPOOL_CACHE_MAX_SIZE : info->nb_mbufs;

		pktgen_port_conf_setup(pid, &rt, &default_port_conf);

		if ( (ret = rte_eth_dev_configure(pid, rt.rx, rt.tx, &info->port_conf)) < 0)
			pktgen_log_panic("Cannot configure device: port=%d, Num queues %d,%d (%d)%s",
			                 pid, rt.rx, rt.tx, errno, rte_strerror(-ret));

		pkt = &info->seq_pkt[SINGLE_PKT];

		/* Grab the source MAC addresses * / */
		rte_eth_macaddr_get(pid, &pkt->eth_src_addr);
		pktgen_log_info("%s,  Src MAC %02x:%02x:%02x:%02x:%02x:%02x", output_buff,
		                pkt->eth_src_addr.addr_bytes[0],
		                pkt->eth_src_addr.addr_bytes[1],
		                pkt->eth_src_addr.addr_bytes[2],
		                pkt->eth_src_addr.addr_bytes[3],
		                pkt->eth_src_addr.addr_bytes[4],
		                pkt->eth_src_addr.addr_bytes[5]);

		/* Copy the first Src MAC address in SINGLE_PKT to the rest of the sequence packets. */
		for (i = 0; i < NUM_SEQ_PKTS; i++)
			ethAddrCopy(&info->seq_pkt[i].eth_src_addr, &pkt->eth_src_addr);

		pktgen.mem_used = 0;

		for (q = 0; q < rt.rx; q++) {
			/* grab the socket id value based on the lcore being used. */
			sid     = rte_lcore_to_socket_id(wr_get_port_lid(pktgen.l2p, pid, q));

			/* Create and initialize the default Receive buffers. */
			info->q[q].rx_mp = pktgen_mbuf_pool_create("Default RX", pid, q, info->nb_mbufs, sid, cache_size);
			if (info->q[q].rx_mp == NULL)
				pktgen_log_panic("Cannot init port %d for Default RX mbufs", pid);

			ret = rte_eth_rx_queue_setup(pid, q, pktgen.nb_rxd, sid, &info->rx_conf, pktgen.info[pid].q[q].rx_mp);
			if (ret < 0)
				pktgen_log_panic("rte_eth_rx_queue_setup: err=%d, port=%d, %s", ret, pid, rte_strerror(-ret));
		}
		pktgen_log_info("");

		for (q = 0; q < rt.tx; q++) {
			/* grab the socket id value based on the lcore being used. */
			sid     = rte_lcore_to_socket_id(wr_get_port_lid(pktgen.l2p, pid, q));

			/* Create and initialize the default Transmit buffers. */
			info->q[q].tx_mp = pktgen_mbuf_pool_create("Default TX", pid, q, MAX_MBUFS_PER_PORT, sid, cache_size);
			if (info->q[q].tx_mp == NULL)
				pktgen_log_panic("Cannot init port %d for Default TX mbufs", pid);

			/* Create and initialize the range Transmit buffers. */
			info->q[q].range_mp = pktgen_mbuf_pool_create("Range TX", pid, q, MAX_MBUFS_PER_PORT,   sid, 0);
			if (info->q[q].range_mp == NULL)
				pktgen_log_panic("Cannot init port %d for Range TX mbufs", pid);

			/* Create and initialize the sequence Transmit buffers. */
			info->q[q].seq_mp = pktgen_mbuf_pool_create("Sequence TX", pid, q, MAX_MBUFS_PER_PORT, sid, cache_size);
			if (info->q[q].seq_mp == NULL)
				pktgen_log_panic("Cannot init port %d for Sequence TX mbufs", pid);

			/* Used for sending special packets like ARP requests */
			info->q[q].special_mp = pktgen_mbuf_pool_create("Special TX", pid, q, MAX_SPECIAL_MBUFS, sid, 0);
			if (info->q[q].special_mp == NULL)
				pktgen_log_panic("Cannot init port %d for Special TX mbufs", pid);

			/* Setup the PCAP file for each port */
			if (pktgen.info[pid].pcap != NULL)
				if (pktgen_pcap_parse(pktgen.info[pid].pcap, info, q) == -1)
					pktgen_log_panic("Cannot load PCAP file for port %d", pid);
			/* Find out the link speed to program the WTHRESH value correctly. */
			pktgen_get_link_status(info, pid, 0);

			ret = rte_eth_tx_queue_setup(pid, q, pktgen.nb_txd, sid, &info->tx_conf);
			if (ret < 0)
				pktgen_log_panic("rte_eth_tx_queue_setup: err=%d, port=%d, %s", ret, pid, rte_strerror(-ret));
			pktgen_log_info("");
		}
		pktgen_log_info("%*sPort memory used = %6lu KB", 71, " ", (pktgen.mem_used + 1023) / 1024);
	}
	pktgen_log_info("%*sTotal memory used = %6lu KB", 70, " ", (pktgen.total_mem_used + 1023) / 1024);

	/* Start up the ports and display the port Link status */
	for (pid = 0; pid < pktgen.nb_ports; pid++) {
		if (wr_get_map(pktgen.l2p, pid, RTE_MAX_LCORE) == 0)
			continue;

		info = wr_get_port_private(pktgen.l2p, pid);

		/* Start device */
		if ( (ret = rte_eth_dev_start(pid)) < 0)
			pktgen_log_panic("rte_eth_dev_start: port=%d, %s", pid, rte_strerror(-ret));

		pktgen_get_link_status(info, pid, 1);

		if (info->link.link_status)
			snprintf(output_buff, sizeof(output_buff), "Port %2d: Link Up - speed %u Mbps - %s", pid,
			         (uint32_t)info->link.link_speed,
			         (info->link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
			         ("full-duplex") : ("half-duplex"));
		else
			snprintf(output_buff, sizeof(output_buff), "Port %2d: Link Down", pid);

		/* If enabled, put device in promiscuous mode. */
		if (pktgen.flags & PROMISCUOUS_ON_FLAG) {
			strncatf(output_buff, " <Enable promiscuous mode>");
			rte_eth_promiscuous_enable(pid);
		}

		pktgen_log_info("%s", output_buff);
		pktgen.info[pid].seq_pkt[SINGLE_PKT].pktSize = MIN_PKT_SIZE;

		/* Setup the port and packet defaults. (must be after link speed is found) */
		for (s = 0; s < NUM_TOTAL_PKTS; s++)
			pktgen_port_defaults(pid, s);

		pktgen_range_setup(info);

		rte_eth_stats_get(pid, &info->init_stats);

		pktgen_rnd_bits_init(&pktgen.info[pid].rnd_bitfields);
	}

	/* Clear the log information by putting a blank line */
	pktgen_log_info("");

	/* Setup the packet capture per port if needed. */
	for (sid = 0; sid < wr_coremap_cnt(pktgen.core_info, pktgen.core_cnt, 0); sid++)
		pktgen_packet_capture_init(&pktgen.capture[sid], sid);
}
Ejemplo n.º 25
0
static int dpdk_main(int port_id, int argc, char* argv[])
{
    struct rte_eth_dev_info dev_info;
    unsigned nb_queues;
    FILE* lfile;
    uint8_t core_id;
    int ret;

    printf("In dpdk_main\n");

    // Open the log file
    lfile = fopen("./vrouter.log", "w");

    // Program the rte log
    rte_openlog_stream(lfile);

    ret = rte_eal_init(argc, argv);
    if (ret < 0) {
		log_crit( "Invalid EAL parameters\n");
        return -1;
    }

    log_info( "Programming cmd rings now!\n");
    rx_event_fd = (int *) malloc(sizeof(int *) * rte_lcore_count());
    if (!rx_event_fd) {
        log_crit("Failed to allocate memory for rx event fd arrays\n");
        return -ENOMEM;
    }

    rte_eth_macaddr_get(port_id, &port_eth_addr);
    log_info("Port%d: MAC Address: ", port_id);
    print_ethaddr(&port_eth_addr);


    /* Determine the number of RX/TX pairs supported by NIC */
    rte_eth_dev_info_get(port_id, &dev_info);

    dev_info.pci_dev->intr_handle.type = RTE_INTR_HANDLE_VFIO_MSIX;
    dev_info.pci_dev->intr_handle.max_intr =
                    dev_info.max_rx_queues + dev_info.max_tx_queues;
    ret = rte_intr_efd_enable(&dev_info.pci_dev->intr_handle,
            dev_info.max_rx_queues);
    if (ret < 0) {
        rte_exit(EXIT_FAILURE, "Failed to enable rx interrupts\n");
    }

    ret = rte_intr_enable(&dev_info.pci_dev->intr_handle);
    if (ret < 0) {
        rte_exit(EXIT_FAILURE, "Failed to enable interrupts\n");
    }

    ret = rte_eth_dev_configure(port_id, dev_info.max_rx_queues,
                dev_info.max_tx_queues, &port_conf);
    if (ret < 0) {
        rte_exit(EXIT_FAILURE, "Failed to configure ethernet device\n");
    }

    /* For each RX/TX pair */
    nb_queues = dev_info.max_tx_queues;
    for (core_id = 0; core_id < nb_queues; core_id++) {
        char s[64];
        if (rte_lcore_is_enabled(core_id) == 0)
            continue;

        /* NUMA socket number */
        unsigned socketid = rte_lcore_to_socket_id(core_id);
        if (socketid >= NB_SOCKETS) {
            log_crit( "Socket %d of lcore %u is out of range %d\n",
				socketid, core_id, NB_SOCKETS);
            return -EBADF;
        }

        /* Create memory pool */
        if (pktmbuf_pool[socketid] == NULL) {
            log_info("Creating mempool on %d of ~%lx bytes\n",
                            socketid, NB_MBUF * MBUF_SIZE);
            printf("Creating mempool on %d of ~%lx bytes\n",
                        socketid, NB_MBUF * MBUF_SIZE);
            snprintf(s, sizeof(s), "mbuf_pool_%d", socketid);
            pktmbuf_pool[socketid] = rte_mempool_create(s,
                                                        NB_MBUF,
                                                        MBUF_SIZE,
                                                        MEMPOOL_CACHE_SIZE,
                                                        PKTMBUF_PRIV_SZ,
                                                        rte_pktmbuf_pool_init,
                                                        NULL,
                                                        rte_pktmbuf_init,
                                                        NULL,
                                                        socketid,
                                                        0);
            if (!pktmbuf_pool[socketid]) {
                log_crit( "Cannot init mbuf pool on socket %d\n", socketid);
                return -ENOMEM;
            }
        }

        /* Setup the TX queue */
        ret = rte_eth_tx_queue_setup(port_id,
                                     core_id,
                                     RTE_TX_DESC_DEFAULT,
                                     socketid,
                                     &tx_conf);
        if (ret < 0) {
            log_crit( "Cannot initialize TX queue (%d)\n", core_id);
            return -ENODEV;
        }

        /* Setup the RX queue */
        ret = rte_eth_rx_queue_setup(port_id,
                                     core_id,
                                     RTE_RX_DESC_DEFAULT,
                                     socketid,
                                     &rx_conf,
                                     pktmbuf_pool[socketid]);
        if (ret < 0) {
            log_crit( "Cannot initialize RX queue (%d)\n", core_id);
            return -ENODEV;
        }

        /* Create the event fds for event notification */
        lcore_cmd_event_fd[core_id] = eventfd(0, 0);
    }

    // Start the eth device
    ret = rte_eth_dev_start(port_id);
    if (ret < 0) {
        log_crit( "rte_eth_dev_start: err=%d, port=%d\n", ret, core_id);
        return -ENODEV;
    }

    // Put the device in promiscuous mode
    rte_eth_promiscuous_enable(port_id);

    // Wait for link up
    //check_all_ports_link_status(1, 1u << port_id);

    log_info( "Starting engines on every core\n");

    rte_eal_mp_remote_launch(engine_loop, &dev_info, CALL_MASTER);

    return 0;
}
Ejemplo n.º 26
0
int main(int argc, char ** argv)
{
    int ret, socket;
    unsigned pid, nb_ports, lcore_id, rx_lcore_id;
    struct sock_parameter sk_param;
    struct sock *sk;
    struct txrx_queue *rxq;
    struct port_queue_conf *port_q;
    struct lcore_queue_conf *lcore_q;

    ret = rte_eal_init(argc, argv);
    if (ret < 0)
        return -1;
    argc -= ret;
    argv += ret;

    /*parse gw ip and mac from cmdline*/
    if (argc > 1) {
        default_host_addr = argv[1];
        if (argc == 3)
            default_gw_addr = argv[2];
        else if (argc == 4)
            default_gw_mac = argv[3];
        else
            rte_exit(EXIT_FAILURE, "invalid arguments\n");
    }

    /*config nic*/
    nb_ports = rte_eth_dev_count();
    if (nb_ports == 0)
        rte_exit(EXIT_FAILURE, "No available NIC\n");
    for (pid = 0; pid < nb_ports; pid++) {
        ret = net_device_init(pid);
        if (ret) {
            RTE_LOG(WARNING, LDNS, "fail to initialize port %u\n", pid);
            goto release_net_device;
        }
    }
    pkt_rx_pool = rte_pktmbuf_pool_create("ldns rx pkt pool",
            PKT_RX_NB,
            32,
            0,
            RTE_MBUF_DEFAULT_BUF_SIZE,
            rte_socket_id());
    if (pkt_rx_pool == NULL)
        rte_exit(EXIT_FAILURE, "cannot alloc rx_mbuf_pool");
    
    /*sock create*/
    sk_param.mode = SOCK_MODE_COMPLETE;
    sk_param.func = dns_process;
    sk = create_sock(0, SOCK_PTOTO_IPPROTO_UDP, &sk_param);
    if (sk == NULL)
        rte_exit(EXIT_FAILURE, "cannot create sock\n");
    if (sock_bind(sk, inet_network(default_host_addr), DNS_PORT))
        rte_exit(EXIT_FAILURE, "cannot bind addr:%s port:%u",
                default_host_addr, DNS_PORT);

    /*init ethdev*/
    lcore_id = 0;
    lcore_q = lcore_q_conf_get(lcore_id);
    for (pid = 0; pid < nb_ports; pid++) {
        port_q = port_q_conf_get(pid);
        ret = rte_eth_dev_configure(pid, rx_rings, tx_rings, &default_rte_eth_conf);
        if (ret != 0)
            rte_exit(EXIT_FAILURE, "port %u configure error\n", pid);

        while (rx_lcore_id == rte_get_master_lcore()
                || !rte_lcore_is_enabled(rx_lcore_id)
                || lcore_q->nb_rxq == nb_rx_queue_per_core) {
            rx_lcore_id++;
            if (rx_lcore_id == RTE_MAX_LCORE)
                rte_exit(EXIT_FAILURE, "not enough core for port %u\n", pid);
            lcore_q = lcore_q_conf_get(lcore_id);
        }

        rxq = &lcore_q->rxq[lcore_q->nb_rxq];
        rxq->port = pid;
        rxq->lcore = rx_lcore_id;
        rxq->qid = port_q->nb_rxq;
        lcore_q->nb_rxq++;
        port_q->nb_rxq++;

        socket = rte_lcore_to_socket_id(rx_lcore_id);
        if (socket == SOCKET_ID_ANY)
            socket = 0;

        ret = rte_eth_tx_queue_setup(pid, rxq->qid, nb_txd, socket, NULL);
        if (ret < 0)
            rte_exit(EXIT_FAILURE, "fail to setup txq %u on port %u",
                    rxq->qid, pid);
        ret = rte_eth_rx_queue_setup(pid, rxq->qid, nb_rxd, socket, NULL, pkt_rx_pool);
        if (ret < 0)
            rte_exit(EXIT_FAILURE, "failt to setup rxq %u on port %u",
                    rxq->qid, pid);

        ret = rte_eth_dev_start(pid);
        if (ret < 0)
            rte_exit(EXIT_FAILURE, "fail to start port %u\n", pid);
    }

	if (dns_set_cfg(&default_dns_cfg))
		rte_exit(EXIT_FAILURE, "fail to set dns configuration%u\n", pid);

    rte_eal_mp_remote_launch(packet_launch_one_lcore, NULL, SKIP_MASTER);
    RTE_LCORE_FOREACH_SLAVE(lcore_id) {
        if (rte_eal_wait_lcore(lcore_id) < 0)
            return -1;
    }

    return 0;

release_net_device:
    for (pid; pid != 0; pid--) {
        net_device_release(pid - 1);
    }
    return -1;
}