void
pktj_lpm_stats_display(struct cmdline *cl, int is_ipv4, int option)
{
	struct lpm_stats_t *stats;

	stats = is_ipv4 ? &lpm4_stats[RTE_PER_LCORE(g_socket_id)]
			: &lpm6_stats[RTE_PER_LCORE(g_socket_id)];

	if (option == CMD_LPM_STATS_JSON) {
		cmdline_printf(cl, "{\"current\": %lu, \"add\": {\"success\": "
				   "%lu, \"failure\": %lu}, \"del\": "
				   "{\"success\": %lu, \"failure\": %lu}}\n",
			       (stats->nb_add_ok - stats->nb_del_ok),
			       stats->nb_add_ok, stats->nb_add_ko,
			       stats->nb_del_ok, stats->nb_del_ko);
	} else {
		cmdline_printf(
		    cl, "\nLPM statistics ====================================="
			"\nCurrent routes: %lu"
			"\nTotal routes added successfully: %lu"
			"\nTotal route add failures: %lu"
			"\nTotal routes deleted successfully: %lu"
			"\nTotal route delete failures: %lu",
		    (stats->nb_add_ok - stats->nb_del_ok), stats->nb_add_ok,
		    stats->nb_add_ko, stats->nb_del_ok, stats->nb_del_ko);
		cmdline_printf(
		    cl,
		    "\n====================================================\n");
	}
}
Beispiel #2
0
/*
 * Generates a log message The message will be sent in the stream
 * defined by the previous call to rte_openlog_stream().
 */
int
rte_vlog(uint32_t level, uint32_t logtype, const char *format, va_list ap)
{
	int ret;
	FILE *f = rte_logs.file;
	if (f == NULL) {
		f = default_log_stream;
		if (f == NULL) {
			/*
			 * Grab the current value of stderr here, rather than
			 * just initializing default_log_stream to stderr. This
			 * ensures that we will always use the current value
			 * of stderr, even if the application closes and
			 * reopens it.
			 */
			f = stderr;
		}
	}

	if (level > rte_logs.level)
		return 0;
	if (logtype >= rte_logs.dynamic_types_len)
		return -1;
	if (level > rte_logs.dynamic_types[logtype].loglevel)
		return 0;

	/* save loglevel and logtype in a global per-lcore variable */
	RTE_PER_LCORE(log_cur_msg).loglevel = level;
	RTE_PER_LCORE(log_cur_msg).logtype = logtype;

	ret = vfprintf(f, format, ap);
	fflush(f);
	return ret;
}
int
rte_thread_set_affinity(rte_cpuset_t *cpusetp)
{
	int s;
	unsigned lcore_id;
	pthread_t tid;

	tid = pthread_self();

	s = pthread_setaffinity_np(tid, sizeof(rte_cpuset_t), cpusetp);
	if (s != 0) {
		RTE_LOG(ERR, EAL, "pthread_setaffinity_np failed\n");
		return -1;
	}

	/* store socket_id in TLS for quick access */
	RTE_PER_LCORE(_socket_id) =
		eal_cpuset_socket_id(cpusetp);

	/* store cpuset in TLS for quick access */
	memmove(&RTE_PER_LCORE(_cpuset), cpusetp,
		sizeof(rte_cpuset_t));

	lcore_id = rte_lcore_id();
	if (lcore_id != (unsigned)LCORE_ID_ANY) {
		/* EAL thread will update lcore_config */
		lcore_config[lcore_id].socket_id = RTE_PER_LCORE(_socket_id);
		memmove(&lcore_config[lcore_id].cpuset, cpusetp,
			sizeof(rte_cpuset_t));
	}

	return 0;
}
Beispiel #4
0
/*****************************************************************************
 * tlkp_add_ucb()
 ****************************************************************************/
int tlkp_add_ucb(udp_control_block_t *ucb)
{
    int error;

    if (ucb == NULL)
        return -EINVAL;

    TRACE_FMT(TLK, DEBUG,
              "[%s()]: phys_port %u l4_hash %08X ladd/radd %08X/%08X lp/rp %u/%u",
              __func__,
              ucb->ucb_l4.l4cb_interface,
              ucb->ucb_l4.l4cb_rx_hash,
              ucb->ucb_l4.l4cb_dst_addr.ip_v4,
              ucb->ucb_l4.l4cb_src_addr.ip_v4,
              ucb->ucb_l4.l4cb_dst_port,
              ucb->ucb_l4.l4cb_src_port);

    error = tlkp_add_cb(RTE_PER_LCORE(tlkp_ucb_hash_table), &ucb->ucb_l4);
    if (error)
        return error;

    /*
     * If we have UCB trace filters enabled and the new UCB matches then
     * update the ucb_trace flag.
     */
    if (unlikely(RTE_PER_LCORE(trace_filter).tf_enabled)) {
        if (ucb_trace_filter_match(&RTE_PER_LCORE(trace_filter), ucb))
            ucb->ucb_trace = true;
    }

    return 0;
}
Beispiel #5
0
/*****************************************************************************
 * mem_lcore_init()
 ****************************************************************************/
void mem_lcore_init(uint32_t lcore_id)
{
    RTE_PER_LCORE(mbuf_pool) = mbuf_pool[lcore_id];
    RTE_PER_LCORE(mbuf_pool_tx_hdr) = mbuf_pool_tx_hdr[lcore_id];
    RTE_PER_LCORE(mbuf_pool_clone) = mbuf_pool_clone[lcore_id];
    RTE_PER_LCORE(tcb_pool) = tcb_pool[lcore_id];
    RTE_PER_LCORE(ucb_pool) = ucb_pool[lcore_id];
}
Beispiel #6
0
/*****************************************************************************
 * trace_get_uniq_id()
 ****************************************************************************/
static uint32_t trace_get_uniq_id(void)
{
    static RTE_DEFINE_PER_LCORE(uint32_t, trace_id);

    /* Reserv 0 as invalid-id. */
    RTE_PER_LCORE(trace_id)++;
    if (unlikely(RTE_PER_LCORE(trace_id) == 0))
        RTE_PER_LCORE(trace_id)++;
    return RTE_PER_LCORE(trace_id);
}
void
rte_thread_get_affinity(rte_cpuset_t *cpusetp)
{
	assert(cpusetp);
	memmove(cpusetp, &RTE_PER_LCORE(_cpuset),
		sizeof(rte_cpuset_t));
}
Beispiel #8
0
void
dpdk_set_lcore_id(unsigned cpu)
{
    /* NON_PMD_CORE_ID is reserved for use by non pmd threads. */
    ovs_assert(cpu != NON_PMD_CORE_ID);
    RTE_PER_LCORE(_lcore_id) = cpu;
}
Beispiel #9
0
void eal_thread_init_master(unsigned lcore_id)
{
	/* set the lcore ID in per-lcore memory area */
	RTE_PER_LCORE(_lcore_id) = lcore_id;

	/* set CPU affinity */
	if (eal_thread_set_affinity() < 0)
		rte_panic("cannot set affinity\n");
}
Beispiel #10
0
/*****************************************************************************
 * tlkp_find_v4_ucb()
 ****************************************************************************/
udp_control_block_t *tlkp_find_v4_ucb(uint32_t phys_port, uint32_t l4_hash,
                                      uint32_t local_addr, uint32_t remote_addr,
                                      uint16_t local_port, uint16_t remote_port)
{
    l4_control_block_t  *l4_cb;
    udp_control_block_t *ucb;

    TRACE_FMT(TLK, DEBUG,
              "[%s()]: phys_port %u l4_hash %08X ladd/radd %08X/%08X lp/rp %u/%u",
              __func__,
              phys_port,
              l4_hash,
              local_addr,
              remote_addr,
              local_port,
              remote_port);

    l4_cb = tlkp_find_v4_cb(RTE_PER_LCORE(tlkp_ucb_hash_table),
                                          phys_port,
                                          l4_hash,
                                          local_addr,
                                          remote_addr,
                                          local_port,
                                          remote_port);
    if (unlikely(l4_cb == NULL))
        return NULL;

    ucb = container_of(l4_cb, udp_control_block_t, ucb_l4);

    /*
     * If we found a UCB and we have UCB trace filters enabled then
     * check if we should update the ucb_trace flag.
     */
    if (unlikely(RTE_PER_LCORE(trace_filter).tf_enabled)) {
        if (ucb_trace_filter_match(&RTE_PER_LCORE(trace_filter), ucb))
            ucb->ucb_trace = true;
        else
            ucb->ucb_trace = false;
    } else if (unlikely(ucb->ucb_trace == true))
        ucb->ucb_trace = false;

    return ucb;
}
Beispiel #11
0
/*****************************************************************************
 * tlkp_udp_lcore_init()
 ****************************************************************************/
void tlkp_udp_lcore_init(uint32_t lcore_id)
{
    unsigned int i;

    RTE_PER_LCORE(tlkp_ucb_hash_table) =
        rte_zmalloc_socket("udp_hash_table", rte_eth_dev_count() *
                           TPG_HASH_BUCKET_SIZE *
                           sizeof(tlkp_hash_bucket_t),
                           RTE_CACHE_LINE_SIZE,
                           rte_lcore_to_socket_id(lcore_id));
    if (RTE_PER_LCORE(tlkp_ucb_hash_table) == NULL) {
        TPG_ERROR_ABORT("[%d]: Failed to allocate per lcore udp htable!\n",
                        rte_lcore_index(lcore_id));
    }

    for (i = 0; i < (rte_eth_dev_count() * TPG_HASH_BUCKET_SIZE); i++) {
        /*
         * Initialize all list headers.
         */
        LIST_INIT((&RTE_PER_LCORE(tlkp_ucb_hash_table)[i]));
    }
}
const char *
rte_strerror(int errnum)
{
#define RETVAL_SZ 256
	static RTE_DEFINE_PER_LCORE(char[RETVAL_SZ], retval);

	/* since some implementations of strerror_r throw an error
	 * themselves if errnum is too big, we handle that case here */
	if (errnum > RTE_MAX_ERRNO)
		rte_snprintf(RTE_PER_LCORE(retval), RETVAL_SZ,
#ifdef RTE_EXEC_ENV_BSDAPP
				"Unknown error: %d", errnum);
#else
				"Unknown error %d", errnum);
#endif
	else
		switch (errnum){
int
rte_assign_lcore_id (void)
{
       int ret = -1;
       unsigned lcore_id;
       struct rte_config *config = rte_eal_get_configuration();

       rte_spinlock_lock(&lcore_sl);

       /* See whether this already has an lcore ID */
       lcore_id = rte_lcore_id();
       if (lcore_id == (unsigned)-1)
       {
               /* Find the first available LCORE with a CPU detection state that
                * indicates OFF
                */
               for (lcore_id = 0;
                       (lcore_id < RTE_MAX_LCORE) && (config->lcore_role[lcore_id] == ROLE_OFF);
                       ++lcore_id);

               /* if we found one, assign it */
               if (lcore_id < RTE_MAX_LCORE)
               {
                       config->lcore_role[lcore_id] = ROLE_RTE;

                       /* These are floating lcores - no core id or socket id */
                       lcore_config[lcore_id].core_id = LCORE_ID_ANY;
                       lcore_config[lcore_id].socket_id = SOCKET_ID_ANY;

                       lcore_config[lcore_id].f = NULL;

                       lcore_config[lcore_id].thread_id = pthread_self();
                       lcore_config[lcore_id].detected = 0;                            /* Core was not detected */
                       lcore_config[lcore_id].state = RUNNING;
                       config->lcore_count++;

                       ret = lcore_id;

                       RTE_PER_LCORE(_lcore_id) = lcore_id;
               }
       }

       rte_spinlock_unlock(&lcore_sl);
       return ret;
}
Beispiel #14
0
static int
dpaa_mbuf_free_bulk(struct rte_mempool *pool,
		    void *const *obj_table,
		    unsigned int n)
{
	struct dpaa_bp_info *bp_info = DPAA_MEMPOOL_TO_POOL_INFO(pool);
	int ret;
	unsigned int i = 0;

	DPAA_MEMPOOL_DPDEBUG("Request to free %d buffers in bpid = %d",
			     n, bp_info->bpid);

	if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
		ret = rte_dpaa_portal_init((void *)0);
		if (ret) {
			DPAA_MEMPOOL_ERR("rte_dpaa_portal_init failed with ret: %d",
					 ret);
			return 0;
		}
	}

	while (i < n) {
		uint64_t phy = rte_mempool_virt2iova(obj_table[i]);

		if (unlikely(!bp_info->ptov_off)) {
			/* buffers are from single mem segment */
			if (bp_info->flags & DPAA_MPOOL_SINGLE_SEGMENT) {
				bp_info->ptov_off
						= (uint64_t)obj_table[i] - phy;
				rte_dpaa_bpid_info[bp_info->bpid].ptov_off
						= bp_info->ptov_off;
			}
		}

		dpaa_buf_free(bp_info,
			      (uint64_t)phy + bp_info->meta_data_size);
		i = i + 1;
	}

	DPAA_MEMPOOL_DPDEBUG("freed %d buffers in bpid =%d",
			     n, bp_info->bpid);

	return 0;
}
Beispiel #15
0
const char *
rte_strerror(int errnum)
{
#define RETVAL_SZ 256
	static RTE_DEFINE_PER_LCORE(char[RETVAL_SZ], retval);

	/* since some implementations of strerror_r throw an error
	 * themselves if errnum is too big, we handle that case here */
	if (errnum > RTE_MAX_ERRNO) {
		snprintf(RTE_PER_LCORE(retval), RETVAL_SZ,
#if defined(RTE_EXEC_ENV_BSDAPP)
				"Unknown error: %d", errnum);
#elif defined(RTE_EXEC_ENV_LINUXAPP)
				"Unknown error %d", errnum);
#elif defined(RTE_EXEC_ENV_OSVAPP)
				"No error information");
		(void)errnum;
#endif
	} else {
void
setUp(void) {
#ifdef HYBRID
  lagopus_result_t ret = LAGOPUS_RESULT_ANY_FAILURES;
  datastore_interp_state_t state = DATASTORE_INTERP_STATE_AUTO_COMMIT;
  char *str = NULL;

#ifdef HAVE_DPDK
  RTE_PER_LCORE(_lcore_id) = 0;
#endif /* HAVE DPDK */

  /* create interp. */
  INTERP_CREATE(ret, NULL, interp, tbl, ds);

  /* bridge create cmd. */
  TEST_BRIDGE_CREATE(ret, &interp, state, &tbl, &ds, str,
                     "br0", "1", "cha1", "c1",
                     "test_if01", "test_port01", "1");
#else /* HYBRID */
  TEST_IGNORE_MESSAGE("HYBRID is not defined.");
#endif /* HYBRID */
}
Beispiel #17
0
static void
dpdk_init__(const struct smap *ovs_other_config)
{
    char **argv = NULL, **argv_to_release = NULL;
    int result;
    int argc, argc_tmp;
    bool auto_determine = true;
    int err = 0;
    cpu_set_t cpuset;
    char *sock_dir_subcomponent;

    if (process_vhost_flags("vhost-sock-dir", xstrdup(ovs_rundir()),
                            NAME_MAX, ovs_other_config,
                            &sock_dir_subcomponent)) {
        struct stat s;
        if (!strstr(sock_dir_subcomponent, "..")) {
            vhost_sock_dir = xasprintf("%s/%s", ovs_rundir(),
                                       sock_dir_subcomponent);

            err = stat(vhost_sock_dir, &s);
            if (err) {
                VLOG_ERR("vhost-user sock directory '%s' does not exist.",
                         vhost_sock_dir);
            }
        } else {
            vhost_sock_dir = xstrdup(ovs_rundir());
            VLOG_ERR("vhost-user sock directory request '%s/%s' has invalid"
                     "characters '..' - using %s instead.",
                     ovs_rundir(), sock_dir_subcomponent, ovs_rundir());
        }
        free(sock_dir_subcomponent);
    } else {
        vhost_sock_dir = sock_dir_subcomponent;
    }

    argv = grow_argv(&argv, 0, 1);
    argc = 1;
    argv[0] = xstrdup(ovs_get_program_name());
    argc_tmp = get_dpdk_args(ovs_other_config, &argv, argc);

    while (argc_tmp != argc) {
        if (!strcmp("-c", argv[argc]) || !strcmp("-l", argv[argc])) {
            auto_determine = false;
            break;
        }
        argc++;
    }
    argc = argc_tmp;

    /**
     * NOTE: This is an unsophisticated mechanism for determining the DPDK
     * lcore for the DPDK Master.
     */
    if (auto_determine) {
        int i;
        /* Get the main thread affinity */
        CPU_ZERO(&cpuset);
        err = pthread_getaffinity_np(pthread_self(), sizeof(cpu_set_t),
                                     &cpuset);
        if (!err) {
            for (i = 0; i < CPU_SETSIZE; i++) {
                if (CPU_ISSET(i, &cpuset)) {
                    argv = grow_argv(&argv, argc, 2);
                    argv[argc++] = xstrdup("-c");
                    argv[argc++] = xasprintf("0x%08llX", (1ULL<<i));
                    i = CPU_SETSIZE;
                }
            }
        } else {
            VLOG_ERR("Thread getaffinity error %d. Using core 0x1", err);
            /* User did not set dpdk-lcore-mask and unable to get current
             * thread affintity - default to core 0x1 */
            argv = grow_argv(&argv, argc, 2);
            argv[argc++] = xstrdup("-c");
            argv[argc++] = xasprintf("0x%X", 1);
        }
    }

    argv = grow_argv(&argv, argc, 1);
    argv[argc] = NULL;

    optind = 1;

    if (VLOG_IS_INFO_ENABLED()) {
        struct ds eal_args;
        int opt;
        ds_init(&eal_args);
        ds_put_cstr(&eal_args, "EAL ARGS:");
        for (opt = 0; opt < argc; ++opt) {
            ds_put_cstr(&eal_args, " ");
            ds_put_cstr(&eal_args, argv[opt]);
        }
        VLOG_INFO("%s", ds_cstr_ro(&eal_args));
        ds_destroy(&eal_args);
    }

    argv_to_release = grow_argv(&argv_to_release, 0, argc);
    for (argc_tmp = 0; argc_tmp < argc; ++argc_tmp) {
        argv_to_release[argc_tmp] = argv[argc_tmp];
    }

    /* Make sure things are initialized ... */
    result = rte_eal_init(argc, argv);
    if (result < 0) {
        ovs_abort(result, "Cannot init EAL");
    }
    argv_release(argv, argv_to_release, argc);

    /* Set the main thread affinity back to pre rte_eal_init() value */
    if (auto_determine && !err) {
        err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t),
                                     &cpuset);
        if (err) {
            VLOG_ERR("Thread setaffinity error %d", err);
        }
    }

    rte_memzone_dump(stdout);

    /* We are called from the main thread here */
    RTE_PER_LCORE(_lcore_id) = NON_PMD_CORE_ID;

#ifdef DPDK_PDUMP
    VLOG_INFO("DPDK pdump packet capture enabled");
    err = rte_pdump_init(ovs_rundir());
    if (err) {
        VLOG_INFO("Error initialising DPDK pdump");
        rte_pdump_uninit();
    } else {
        char *server_socket_path;

        server_socket_path = xasprintf("%s/%s", ovs_rundir(),
                                       "pdump_server_socket");
        fatal_signal_add_file_to_unlink(server_socket_path);
        free(server_socket_path);
    }
#endif

    /* Finally, register the dpdk classes */
    netdev_dpdk_register();
}
Beispiel #18
0
void
pktj_stats_display(struct cmdline *cl, int option, int delay)
{
	uint64_t total_packets_dropped, total_packets_tx, total_packets_rx;
	uint64_t total_packets_kni_tx, total_packets_kni_rx,
	    total_packets_kni_dropped;
	uint64_t total_packets_ratel_dropped, total_packets_acl_dropped;
	unsigned lcoreid;
	time_t _time;
	const char *fmt_pre, *fmt_lcore, *fmt_mid, *fmt_total;

	total_packets_dropped = 0;
	total_packets_tx = 0;
	total_packets_rx = 0;
	total_packets_kni_tx = 0;
	total_packets_kni_rx = 0;
	total_packets_kni_dropped = 0;
	total_packets_acl_dropped = 0;
	total_packets_ratel_dropped = 0;

	if (option == CMD_STATS_JSON) { // json
		fmt_pre = STATS_JSON_PRE;
		fmt_lcore = STATS_JSON_LCORE;
		fmt_mid = STATS_JSON_MID;
		fmt_total = STATS_JSON_TOTAL;
	} else if (option == CMD_STATS_CSV) { // csv
		fmt_pre = STATS_CSV_PRE;
		fmt_lcore = STATS_CSV_LCORE;
		fmt_mid = STATS_CSV_MID;
		fmt_total = STATS_CSV_TOTAL;
	} else {
		fmt_pre = STATS_HUM_PRE;
		fmt_lcore = STATS_HUM_LCORE;
		fmt_mid = STATS_HUM_MID;
		fmt_total = STATS_HUM_TOTAL;
	}

	_time = time(NULL);

	for (lcoreid = 0; lcoreid < CMDLINE_MAX_CLIENTS; lcoreid++) {
		if (cmdline_clients[RTE_PER_LCORE(g_socket_id)][lcoreid].cl ==
		    cl) {
			cmdline_clients[RTE_PER_LCORE(g_socket_id)][lcoreid]
			    .csv_delay = delay;
			cmdline_clients[RTE_PER_LCORE(g_socket_id)][lcoreid]
			    .delay_timer = _time;
			break;
		}
	}

	cmdline_printf(cl, "%s", fmt_pre);

	for (lcoreid = 0; lcoreid < RTE_MAX_LCORE; lcoreid++) {
		if (!rte_lcore_is_enabled(lcoreid))
			continue;

		cmdline_printf(
		    cl, fmt_lcore, _time, lcoreid, stats[lcoreid].port_id,
		    stats[lcoreid].nb_iteration_looped, stats[lcoreid].nb_tx,
		    stats[lcoreid].nb_rx, stats[lcoreid].nb_kni_tx,
		    stats[lcoreid].nb_kni_rx, stats[lcoreid].nb_dropped,
		    stats[lcoreid].nb_kni_dropped,
		    stats[lcoreid].nb_acl_dropped,
		    stats[lcoreid].nb_ratel_dropped);

		total_packets_dropped += stats[lcoreid].nb_dropped;
		total_packets_tx += stats[lcoreid].nb_tx;
		total_packets_rx += stats[lcoreid].nb_rx;
		total_packets_kni_tx += stats[lcoreid].nb_kni_tx;
		total_packets_kni_rx += stats[lcoreid].nb_kni_rx;
		total_packets_kni_dropped += stats[lcoreid].nb_kni_dropped;
		total_packets_acl_dropped += stats[lcoreid].nb_acl_dropped;
		total_packets_ratel_dropped += stats[lcoreid].nb_ratel_dropped;
	}

	// add a null object to end the array
	cmdline_printf(cl, "%s", fmt_mid);

	cmdline_printf(cl, fmt_total, total_packets_tx, total_packets_rx,
		       total_packets_kni_tx, total_packets_kni_rx,
		       total_packets_dropped, total_packets_kni_dropped,
		       total_packets_acl_dropped, total_packets_ratel_dropped);
}
Beispiel #19
0
/* get the current loglevel for the message being processed */
int rte_log_cur_msg_loglevel(void)
{
	return RTE_PER_LCORE(log_cur_msg).loglevel;
}
unsigned rte_socket_id(void)
{
	return RTE_PER_LCORE(_socket_id);
}
Beispiel #21
0
/* get the current logtype for the message being processed */
int rte_log_cur_msg_logtype(void)
{
	return RTE_PER_LCORE(log_cur_msg).logtype;
}
Beispiel #22
0
/*****************************************************************************
 * tlkp_walk_ucb()
 ****************************************************************************/
void tlkp_walk_ucb(uint32_t phys_port, tlkp_walk_v4_cb_t callback, void *arg)
{
    tlkp_walk_v4(RTE_PER_LCORE(tlkp_ucb_hash_table), phys_port, callback, arg);
}
Beispiel #23
0
/*****************************************************************************
 * tlkp_delete_ucb()
 ****************************************************************************/
int tlkp_delete_ucb(udp_control_block_t *ucb)
{
    return tlkp_delete_cb(RTE_PER_LCORE(tlkp_ucb_hash_table), &ucb->ucb_l4);
}
Beispiel #24
0
static int
dpaa_mbuf_alloc_bulk(struct rte_mempool *pool,
		     void **obj_table,
		     unsigned int count)
{
	struct rte_mbuf **m = (struct rte_mbuf **)obj_table;
	struct bm_buffer bufs[DPAA_MBUF_MAX_ACQ_REL];
	struct dpaa_bp_info *bp_info;
	void *bufaddr;
	int i, ret;
	unsigned int n = 0;

	bp_info = DPAA_MEMPOOL_TO_POOL_INFO(pool);

	DPAA_MEMPOOL_DPDEBUG("Request to alloc %d buffers in bpid = %d",
			     count, bp_info->bpid);

	if (unlikely(count >= (RTE_MEMPOOL_CACHE_MAX_SIZE * 2))) {
		DPAA_MEMPOOL_ERR("Unable to allocate requested (%u) buffers",
				 count);
		return -1;
	}

	if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
		ret = rte_dpaa_portal_init((void *)0);
		if (ret) {
			DPAA_MEMPOOL_ERR("rte_dpaa_portal_init failed with ret: %d",
					 ret);
			return -1;
		}
	}

	while (n < count) {
		/* Acquire is all-or-nothing, so we drain in 7s,
		 * then the remainder.
		 */
		if ((count - n) > DPAA_MBUF_MAX_ACQ_REL) {
			ret = bman_acquire(bp_info->bp, bufs,
					   DPAA_MBUF_MAX_ACQ_REL, 0);
		} else {
			ret = bman_acquire(bp_info->bp, bufs, count - n, 0);
		}
		/* In case of less than requested number of buffers available
		 * in pool, qbman_swp_acquire returns 0
		 */
		if (ret <= 0) {
			DPAA_MEMPOOL_DPDEBUG("Buffer acquire failed (%d)",
					     ret);
			/* The API expect the exact number of requested
			 * buffers. Releasing all buffers allocated
			 */
			dpaa_mbuf_free_bulk(pool, obj_table, n);
			return -ENOBUFS;
		}
		/* assigning mbuf from the acquired objects */
		for (i = 0; (i < ret) && bufs[i].addr; i++) {
			/* TODO-errata - objerved that bufs may be null
			 * i.e. first buffer is valid, remaining 6 buffers
			 * may be null.
			 */
			bufaddr = DPAA_MEMPOOL_PTOV(bp_info, bufs[i].addr);
			m[n] = (struct rte_mbuf *)((char *)bufaddr
						- bp_info->meta_data_size);
			DPAA_MEMPOOL_DPDEBUG("Paddr (%p), FD (%p) from BMAN",
					     (void *)bufaddr, (void *)m[n]);
			n++;
		}
	}

	DPAA_MEMPOOL_DPDEBUG("Allocated %d buffers from bpid=%d",
			     n, bp_info->bpid);
	return 0;
}
Beispiel #25
0
/*
 * Initialize PIOT Module
 */
int
rw_piot_init(int argc, char **argv, void *instance_ptr, f_rw_piot_log_t log_fn)
{

  int rc, ret;
  int i, no_huge = 0, memory_req = 0;
  struct rte_config *cfg = rte_eal_get_configuration();

/*
 * TBD - thread safety
 */

  if (piot_initialized) {
    int num_arg;

    for (num_arg = 0; num_arg < argc; num_arg ++) {
      if (strcmp(argv[num_arg], "--") == 0) {
        break;
      } 
    }
    return num_arg;
  }
  piot_initialized = 1;


  memset(&rw_piot_global_config, 0, sizeof(rw_piot_global_config));
  ASSERT(instance_ptr);
  rw_piot_global_config.instance_ptr = instance_ptr;
  rw_piot_global_config.log_fn = log_fn;

  memset(&(rw_piot_lcore[0]), 0, sizeof(rw_piot_lcore));
  
  for (i=0; i<argc; i++) {
    if (strcmp("--no-huge", argv[i]) == 0) {
      no_huge = 1;
      RW_PIOT_LOG(RTE_LOG_INFO, "PIOT: Huge pages disabled by --no-huge cmdarg\n");
    }
    if (strcmp("-m", argv[i]) == 0) {
      if ((i+1) <argc) {
        memory_req = atoi(argv[i+1]);
        RW_PIOT_LOG(RTE_LOG_INFO, "PIOT: -m cmdarg setting requested memory to %d mb\n", memory_req);
      }
    }
    if (strcmp("--", argv[i]) == 0) {
      break;
    }
  }
  /*
   * setup system environment for DPDK
   */
  rc = dpdk_system_setup(no_huge, memory_req);
  if (rc < 0) {
    return rc;
  }

  rte_set_application_log_hook(rw_piot_log_handler);

  /*
   * Init DPDK EAL without doing thread related inits
   */
  ret = rte_eal_init_no_thread(argc, argv);
  if (ret < 0) {
    return ret;
  }
  if (geteuid() == 0){
    rte_kni_init(RWPIOT_MAX_KNI_DEVICES);
  }
 /*
  * Assign master lcore-id. Should be passed in the init - TBD
  */

  cfg->master_lcore = 0;  /* This wil be fixed with RW.Sched integration -  TBD */

  /* set the lcore ID in per-lcore memory area */
  RTE_PER_LCORE(_lcore_id) = cfg->master_lcore;

  /* set CPU affinity for master thread ??? TBD*/
  // if (eal_thread_set_affinity() < 0)
  //    rte_panic("cannot set affinity\n");

  rte_timer_subsystem_init();

  return ret; /* number of args consumed by rte_eal_init_no_thread */
}