Ejemplo n.º 1
0
/**
 * CALLED BY NF:
 * Initialises everything we need
 *
 * Returns the number of arguments parsed by both rte_eal_init and
 * parse_nflib_args offset by 1.  This is used by getopt in the NF's
 * code.  The offsetting by one accounts for getopt parsing "--" which
 * increments optind by 1 each time.
 */
int
onvm_nf_init(int argc, char *argv[], const char *nf_tag) {
        const struct rte_memzone *mz;
	const struct rte_memzone *mz_scp;
        struct rte_mempool *mp;
	struct onvm_service_chain **scp;
        int retval_eal, retval_parse, retval_final;

        if ((retval_eal = rte_eal_init(argc, argv)) < 0)
                return -1;

        /* Modify argc and argv to conform to getopt rules for parse_nflib_args */
        argc -= retval_eal; argv += retval_eal;

        /* Reset getopt global variables opterr and optind to their default values */
        opterr = 0; optind = 1;

        if ((retval_parse = parse_nflib_args(argc, argv)) < 0)
                rte_exit(EXIT_FAILURE, "Invalid command-line arguments\n");

        /*
         * Calculate the offset that the nf will use to modify argc and argv for its
         * getopt call. This is the sum of the number of arguments parsed by
         * rte_eal_init and parse_nflib_args. This will be decremented by 1 to assure
         * getopt is looking at the correct index since optind is incremented by 1 each
         * time "--" is parsed.
         * This is the value that will be returned if initialization succeeds.
         */
        retval_final = (retval_eal + retval_parse) - 1;

        /* Reset getopt global variables opterr and optind to their default values */
        opterr = 0; optind = 1;

        /* Lookup mempool for nf_info struct */
        nf_info_mp = rte_mempool_lookup(_NF_MEMPOOL_NAME);
        if (nf_info_mp == NULL)
                rte_exit(EXIT_FAILURE, "No Client Info mempool - bye\n");

        /* Initialize the info struct */
        nf_info = ovnm_nf_info_init(nf_tag);

        mp = rte_mempool_lookup(PKTMBUF_POOL_NAME);
        if (mp == NULL)
                rte_exit(EXIT_FAILURE, "Cannot get mempool for mbufs\n");

        mz = rte_memzone_lookup(MZ_CLIENT_INFO);
        if (mz == NULL)
                rte_exit(EXIT_FAILURE, "Cannot get tx info structure\n");
        tx_stats = mz->addr;

	mz_scp = rte_memzone_lookup(MZ_SCP_INFO);
	if (mz_scp == NULL)
		rte_exit(EXIT_FAILURE, "Cannot get service chain info structre\n");
	scp = mz_scp->addr;
	default_chain = *scp;

	onvm_sc_print(default_chain);

        nf_info_ring = rte_ring_lookup(_NF_QUEUE_NAME);
        if (nf_info_ring == NULL)
                rte_exit(EXIT_FAILURE, "Cannot get nf_info ring");

        /* Put this NF's info struct onto queue for manager to process startup */
        if (rte_ring_enqueue(nf_info_ring, nf_info) < 0) {
                rte_mempool_put(nf_info_mp, nf_info); // give back mermory
                rte_exit(EXIT_FAILURE, "Cannot send nf_info to manager");
        }

        /* Wait for a client id to be assigned by the manager */
        RTE_LOG(INFO, APP, "Waiting for manager to assign an ID...\n");
        for (; nf_info->status == (uint16_t)NF_WAITING_FOR_ID ;) {
                sleep(1);
        }

        /* This NF is trying to declare an ID already in use. */
        if (nf_info->status == NF_ID_CONFLICT) {
                rte_mempool_put(nf_info_mp, nf_info);
                rte_exit(NF_ID_CONFLICT, "Selected ID already in use. Exiting...\n");
        } else if(nf_info->status == NF_NO_IDS) {
                rte_mempool_put(nf_info_mp, nf_info);
                rte_exit(NF_NO_IDS, "There are no ids available for this NF\n");
        } else if(nf_info->status != NF_STARTING) {
                rte_mempool_put(nf_info_mp, nf_info);
                rte_exit(EXIT_FAILURE, "Error occurred during manager initialization\n");
        }
        RTE_LOG(INFO, APP, "Using Instance ID %d\n", nf_info->instance_id);
        RTE_LOG(INFO, APP, "Using Service ID %d\n", nf_info->service_id);

        /* Now, map rx and tx rings into client space */
        rx_ring = rte_ring_lookup(get_rx_queue_name(nf_info->instance_id));
        if (rx_ring == NULL)
                rte_exit(EXIT_FAILURE, "Cannot get RX ring - is server process running?\n");

        tx_ring = rte_ring_lookup(get_tx_queue_name(nf_info->instance_id));
        if (tx_ring == NULL)
                rte_exit(EXIT_FAILURE, "Cannot get TX ring - is server process running?\n");

        /* Tell the manager we're ready to recieve packets */
        nf_info->status = NF_RUNNING;

        RTE_LOG(INFO, APP, "Finished Process Init.\n");
        return retval_final;
}
Ejemplo n.º 2
0
int
init(int argc, char *argv[]) {
        int retval;
        const struct rte_memzone *mz_nf;
        const struct rte_memzone *mz_port;
        const struct rte_memzone *mz_cores;
        const struct rte_memzone *mz_scp;
        const struct rte_memzone *mz_services;
        const struct rte_memzone *mz_nf_per_service;
        uint8_t i, total_ports, port_id;

        /* init EAL, parsing EAL args */
        retval = rte_eal_init(argc, argv);
        if (retval < 0)
                return -1;
        argc -= retval;
        argv += retval;

#ifdef RTE_LIBRTE_PDUMP
        rte_pdump_init(NULL);
#endif

        /* get total number of ports */
        total_ports = rte_eth_dev_count_avail();

        /* set up array for NF tx data */
        mz_nf = rte_memzone_reserve(MZ_NF_INFO, sizeof(*nfs) * MAX_NFS,
                                rte_socket_id(), NO_FLAGS);
        if (mz_nf == NULL)
                rte_exit(EXIT_FAILURE, "Cannot reserve memory zone for nf information\n");
        memset(mz_nf->addr, 0, sizeof(*nfs) * MAX_NFS);
        nfs = mz_nf->addr;

        /* set up ports info */
        mz_port = rte_memzone_reserve(MZ_PORT_INFO, sizeof(*ports),
                                    rte_socket_id(), NO_FLAGS);
        if (mz_port == NULL)
                rte_exit(EXIT_FAILURE, "Cannot reserve memory zone for port information\n");
        ports = mz_port->addr;
  
        /* set up core status */
        mz_cores = rte_memzone_reserve(MZ_CORES_STATUS, sizeof(*cores) * onvm_threading_get_num_cores(),
                                    rte_socket_id(), NO_FLAGS);
        if (mz_cores == NULL)
                rte_exit(EXIT_FAILURE, "Cannot reserve memory zone for core information\n");
        memset(mz_cores->addr, 0, sizeof(*cores) * 64);
        cores = mz_cores->addr;

        /* set up array for NF tx data */
        mz_services = rte_memzone_reserve(MZ_SERVICES_INFO, sizeof(uint16_t *) * num_services, rte_socket_id(), NO_FLAGS);
        if (mz_services == NULL)
                rte_exit(EXIT_FAILURE, "Cannot reserve memory zone for services information\n");
        services = mz_services->addr;
        for (i = 0; i < num_services; i++) {
                services[i] = rte_calloc("one service NFs",
                        MAX_NFS_PER_SERVICE, sizeof(uint16_t), 0);
        }
        mz_nf_per_service = rte_memzone_reserve(MZ_NF_PER_SERVICE_INFO, sizeof(uint16_t) * num_services, rte_socket_id(), NO_FLAGS);
        if (mz_nf_per_service == NULL) {
                rte_exit(EXIT_FAILURE, "Cannot reserve memory zone for NF per service information.\n");
        }
        nf_per_service_count = mz_nf_per_service->addr;

        /* parse additional, application arguments */
        retval = parse_app_args(total_ports, argc, argv);
        if (retval != 0)
                return -1;

        /* initialise mbuf pools */
        retval = init_mbuf_pools();
        if (retval != 0)
                rte_exit(EXIT_FAILURE, "Cannot create needed mbuf pools\n");

        /* initialise nf info pool */
        retval = init_nf_info_pool();
        if (retval != 0) {
                rte_exit(EXIT_FAILURE, "Cannot create nf info mbuf pool: %s\n", rte_strerror(rte_errno));
        }

        /* initialise pool for NF messages */
        retval = init_nf_msg_pool();
        if (retval != 0) {
                rte_exit(EXIT_FAILURE, "Cannot create nf message pool: %s\n", rte_strerror(rte_errno));
        }

        /* now initialise the ports we will use */
        for (i = 0; i < ports->num_ports; i++) {
                port_id = ports->id[i];
                rte_eth_macaddr_get(port_id, &ports->mac[port_id]);
                retval = init_port(port_id);
                if (retval != 0)
                        rte_exit(EXIT_FAILURE, "Cannot initialise port %u\n", port_id);
                char event_msg_buf[20];
                sprintf(event_msg_buf, "Port %d initialized", port_id);
                onvm_stats_add_event(event_msg_buf, NULL);
        }

        check_all_ports_link_status(ports->num_ports, (~0x0));

        /* initialise the NF queues/rings for inter-eu comms */
        init_shm_rings();

        /* initialise a queue for newly created NFs */
        init_info_queue();

        /*initialize a default service chain*/
        default_chain = onvm_sc_create();
        retval = onvm_sc_append_entry(default_chain, ONVM_NF_ACTION_TONF, 1);
        if (retval == ENOSPC) {
                printf("chain length can not be larger than the maximum chain length\n");
                exit(1);
        }
        printf("Default service chain: send to sdn NF\n");

        /* set up service chain pointer shared to NFs*/
        mz_scp = rte_memzone_reserve(MZ_SCP_INFO, sizeof(struct onvm_service_chain *),
                                   rte_socket_id(), NO_FLAGS);
        if (mz_scp == NULL)
                rte_exit(EXIT_FAILURE, "Canot reserve memory zone for service chain pointer\n");
        memset(mz_scp->addr, 0, sizeof(struct onvm_service_chain *));
        default_sc_p = mz_scp->addr;
        *default_sc_p = default_chain;
        onvm_sc_print(default_chain);

        onvm_flow_dir_init();

        return 0;
}