Example #1
0
/*
 * Init the memzone subsystem
 */
int
rte_eal_memzone_init(void)
{
	struct rte_mem_config *mcfg;
	const struct rte_memseg *memseg;
	unsigned i = 0;

	/* get pointer to global configuration */
	mcfg = rte_eal_get_configuration()->mem_config;

	/* mirror the runtime memsegs from config */
	free_memseg = mcfg->free_memseg;

	/* secondary processes don't need to initialise anything */
	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
		return 0;

	memseg = rte_eal_get_physmem_layout();
	if (memseg == NULL) {
		RTE_LOG(ERR, EAL, "%s(): Cannot get physical layout\n", __func__);
		return -1;
	}

	rte_rwlock_write_lock(&mcfg->mlock);

	/* fill in uninitialized free_memsegs */
	for (i = 0; i < RTE_MAX_MEMSEG; i++) {
		if (memseg[i].addr == NULL)
			break;
		if (free_memseg[i].addr != NULL)
			continue;
		memcpy(&free_memseg[i], &memseg[i], sizeof(struct rte_memseg));
	}

	/* make all zones cache-aligned */
	for (i = 0; i < RTE_MAX_MEMSEG; i++) {
		if (free_memseg[i].addr == NULL)
			break;
		if (memseg_sanitize(&free_memseg[i]) < 0) {
			RTE_LOG(ERR, EAL, "%s(): Sanity check failed\n", __func__);
			rte_rwlock_write_unlock(&mcfg->mlock);
			return -1;
		}
	}

	/* delete all zones */
	mcfg->memzone_idx = 0;
	memset(mcfg->memzone, 0, sizeof(mcfg->memzone));

	rte_rwlock_write_unlock(&mcfg->mlock);

	return 0;
}
Example #2
0
/*
 * Init the memzone subsystem
 */
int
rte_eal_memzone_init(void)
{
	struct rte_mem_config *mcfg;
	const struct rte_memseg *memseg;

	/* get pointer to global configuration */
	mcfg = rte_eal_get_configuration()->mem_config;

	/* secondary processes don't need to initialise anything */
	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
		return 0;

	memseg = rte_eal_get_physmem_layout();
	if (memseg == NULL) {
		RTE_LOG(ERR, EAL, "%s(): Cannot get physical layout\n", __func__);
		return -1;
	}

	rte_rwlock_write_lock(&mcfg->mlock);

	/* delete all zones */
	mcfg->memzone_cnt = 0;
	memset(mcfg->memzone, 0, sizeof(mcfg->memzone));

	rte_rwlock_write_unlock(&mcfg->mlock);

	return rte_eal_malloc_heap_init();
}
Example #3
0
sflow_socket_t* sflow_socket_create(sflow_key_t* key, ss_frame_t* rx_buf) {
    int is_error = 0;

    sflow_key_dump("create socket for key", key);
    // XXX: should these be allocated from jemalloc or RTE alloc?
    sflow_socket_t* socket = je_calloc(1, sizeof(sflow_socket_t));
    if (socket == NULL) { is_error = 1; goto error_out; }

    sflow_socket_init(key, socket);

    rte_rwlock_write_lock(&sflow_hash_lock);
    int32_t socket_id = rte_hash_add_key(sflow_hash, key);
    socket->id = (uint64_t) socket_id;
    if (socket_id >= 0) {
        sflow_sockets[socket->id] = socket;
    }
    else {
        is_error = 1;
    }
    rte_rwlock_write_unlock(&sflow_hash_lock);

    // XXX: figure out what should be in this
    //RTE_LOG(INFO, L3L4, "new sflow socket: sport: %hu dport: %hu id: %lu is_error: %d\n",
    //    rte_bswap16(key->sport), rte_bswap16(key->dport), socket->id, is_error);

    error_out:
    if (unlikely(is_error)) {
        if (socket) { je_free(socket); socket = NULL; }
        RTE_LOG(ERR, L3L4, "failed to allocate sflow socket\n");
        return NULL;
    }

    return socket;
}
Example #4
0
/*
 * Return a pointer to a correctly filled memzone descriptor (with a
 * specified alignment and boundary).
 * If the allocation cannot be done, return NULL.
 */
const struct rte_memzone *
rte_memzone_reserve_bounded(const char *name, size_t len,
		int socket_id, unsigned flags, unsigned align, unsigned bound)
{
	struct rte_mem_config *mcfg;
	const struct rte_memzone *mz = NULL;

	/* both sizes cannot be explicitly called for */
	if (((flags & RTE_MEMZONE_1GB) && (flags & RTE_MEMZONE_2MB))
		|| ((flags & RTE_MEMZONE_16MB) && (flags & RTE_MEMZONE_16GB))) {
		rte_errno = EINVAL;
		return NULL;
	}

	/* get pointer to global configuration */
	mcfg = rte_eal_get_configuration()->mem_config;

	rte_rwlock_write_lock(&mcfg->mlock);

	mz = memzone_reserve_aligned_thread_unsafe(
		name, len, socket_id, flags, align, bound);

	rte_rwlock_write_unlock(&mcfg->mlock);

	return mz;
}
Example #5
0
int
rte_memzone_free(const struct rte_memzone *mz)
{
	struct rte_mem_config *mcfg;
	int ret = 0;
	void *addr;
	unsigned idx;

	if (mz == NULL)
		return -EINVAL;

	mcfg = rte_eal_get_configuration()->mem_config;

	rte_rwlock_write_lock(&mcfg->mlock);

	idx = ((uintptr_t)mz - (uintptr_t)mcfg->memzone);
	idx = idx / sizeof(struct rte_memzone);

	addr = mcfg->memzone[idx].addr;
	if (addr == NULL)
		ret = -EINVAL;
	else if (mcfg->memzone_cnt == 0) {
		rte_panic("%s(): memzone address not NULL but memzone_cnt is 0!\n",
				__func__);
	} else {
		memset(&mcfg->memzone[idx], 0, sizeof(mcfg->memzone[idx]));
		mcfg->memzone_cnt--;
	}

	rte_rwlock_write_unlock(&mcfg->mlock);

	rte_free(addr);

	return ret;
}
Example #6
0
int
rte_memzone_free(const struct rte_memzone *mz)
{
	struct rte_mem_config *mcfg;
	int ret = 0;
	void *addr;
	unsigned idx;

	if (mz == NULL)
		return -EINVAL;

	mcfg = rte_eal_get_configuration()->mem_config;

	rte_rwlock_write_lock(&mcfg->mlock);

	idx = ((uintptr_t)mz - (uintptr_t)mcfg->memzone);
	idx = idx / sizeof(struct rte_memzone);

#ifdef RTE_LIBRTE_IVSHMEM
	/*
	 * If ioremap_addr is set, it's an IVSHMEM memzone and we cannot
	 * free it.
	 */
	if (mcfg->memzone[idx].ioremap_addr != 0) {
		rte_rwlock_write_unlock(&mcfg->mlock);
		return -EINVAL;
	}
#endif

	addr = mcfg->memzone[idx].addr;

	if (addr == NULL)
		ret = -EINVAL;
	else if (mcfg->memzone_cnt == 0) {
		rte_panic("%s(): memzone address not NULL but memzone_cnt is 0!\n",
				__func__);
	} else {
		memset(&mcfg->memzone[idx], 0, sizeof(mcfg->memzone[idx]));
		mcfg->memzone_cnt--;
	}

	rte_rwlock_write_unlock(&mcfg->mlock);

	rte_free(addr);

	return ret;
}
Example #7
0
/*
 * Allocates memory for LPM object
 */
struct rte_lpm *
rte_lpm_create(const char *name, int socket_id, int max_rules,
		__rte_unused int flags)
{
	char mem_name[RTE_LPM_NAMESIZE];
	struct rte_lpm *lpm = NULL;
	uint32_t mem_size;
	struct rte_lpm_list *lpm_list;

	/* check that we have an initialised tail queue */
	if ((lpm_list = 
	     RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_LPM, rte_lpm_list)) == NULL) {
		rte_errno = E_RTE_NO_TAILQ;
		return NULL;	
	}

	RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl24_entry) != 2);
	RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl8_entry) != 2);

	/* Check user arguments. */
	if ((name == NULL) || (socket_id < -1) || (max_rules == 0)){
		rte_errno = EINVAL;
		return NULL;
	}

	rte_snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);

	/* Determine the amount of memory to allocate. */
	mem_size = sizeof(*lpm) + (sizeof(lpm->rules_tbl[0]) * max_rules);

	rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);

	/* guarantee there's no existing */
	TAILQ_FOREACH(lpm, lpm_list, next) {
		if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0)
			break;
	}
	if (lpm != NULL)
		goto exit;

	/* Allocate memory to store the LPM data structures. */
	lpm = (struct rte_lpm *)rte_zmalloc_socket(mem_name, mem_size,
			CACHE_LINE_SIZE, socket_id);
	if (lpm == NULL) {
		RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
		goto exit;
	}

	/* Save user arguments. */
	lpm->max_rules = max_rules;
	rte_snprintf(lpm->name, sizeof(lpm->name), "%s", name);

	TAILQ_INSERT_TAIL(lpm_list, lpm, next);

exit:	
	rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);

	return lpm;
}
Example #8
0
struct rte_acl_ctx *
rte_acl_create(const struct rte_acl_param *param)
{
	size_t sz;
	struct rte_acl_ctx *ctx;
	struct rte_acl_list *acl_list;
	char name[sizeof(ctx->name)];

	/* check that we have an initialised tail queue */
	acl_list = RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_ACL, rte_acl_list);
	if (acl_list == NULL) {
		rte_errno = E_RTE_NO_TAILQ;
		return NULL;
	}

	/* check that input parameters are valid. */
	if (param == NULL || param->name == NULL) {
		rte_errno = EINVAL;
		return NULL;
	}

	snprintf(name, sizeof(name), "ACL_%s", param->name);

	/* calculate amount of memory required for pattern set. */
	sz = sizeof(*ctx) + param->max_rule_num * param->rule_size;

	/* get EAL TAILQ lock. */
	rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);

	/* if we already have one with that name */
	TAILQ_FOREACH(ctx, acl_list, next) {
		if (strncmp(param->name, ctx->name, sizeof(ctx->name)) == 0)
			break;
	}

	/* if ACL with such name doesn't exist, then create a new one. */
	if (ctx == NULL && (ctx = rte_zmalloc_socket(name, sz, CACHE_LINE_SIZE,
			param->socket_id)) != NULL) {

		/* init new allocated context. */
		ctx->rules = ctx + 1;
		ctx->max_rules = param->max_rule_num;
		ctx->rule_sz = param->rule_size;
		ctx->socket_id = param->socket_id;
		snprintf(ctx->name, sizeof(ctx->name), "%s", param->name);

		TAILQ_INSERT_TAIL(acl_list, ctx, next);

	} else if (ctx == NULL) {
		RTE_LOG(ERR, ACL,
			"allocation of %zu bytes on socket %d for %s failed\n",
			sz, param->socket_id, name);
	}

	rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
	return ctx;
}
Example #9
0
/* Log the provided message to the log screen and optionally a file. */
void
pktgen_log(int level, const char *file, long line,
           const char *func, const char *fmt, ...)
{
	log_msg_t *curr_msg;
	va_list args;

	rte_rwlock_write_lock(&log_history.lock);

	curr_msg = &log_history.msg[log_history.head];

	/* log message metadata */
	gettimeofday(&curr_msg->tv, NULL);

	curr_msg->level = level;

	if (curr_msg->file != NULL)
		free(curr_msg->file);
	curr_msg->file  = strdup(file);

	curr_msg->line = line;

	if (curr_msg->func != NULL)
		free(curr_msg->func);
	curr_msg->func = strdup(func);

	/* actual log message */
	va_start(args, fmt);
	vsnprintf(curr_msg->msg, LOG_MAX_LINE, fmt, args);
	va_end(args);

	/* Adjust head and tail indexes: head must point one beyond the last valid
	 * entry, tail must move one entry if head has caught up.
	 * The array acts as a circular buffer, so if either head or tail move
	 * beyond the last array element, they are wrapped around.
	 */
	log_history.head = (log_history.head + 1) % LOG_HISTORY;

	if (log_history.head == log_history.tail)
		log_history.tail = (log_history.tail + 1) % LOG_HISTORY;

	/* Log to file if enabled */
	if (log_file != NULL)
		fprintf(log_file, "%s\n", pktgen_format_msg_file(curr_msg));

	/* Print message to screen if its level is high enough. */
	if (level >= log_level_screen)
		fprintf(stdout, "%s\n", pktgen_format_msg_stdout(curr_msg));

	log_history.need_refresh = 1;

	rte_rwlock_write_unlock(&log_history.lock);
}
Example #10
0
/* create the ring */
struct rte_ring *
rte_ring_create(const char *name, unsigned count, int socket_id,
		unsigned flags)
{
	char mz_name[RTE_MEMZONE_NAMESIZE];
	struct rte_ring *r;
	struct rte_tailq_entry *te;
	const struct rte_memzone *mz;
	ssize_t ring_size;
	int mz_flags = 0;
	struct rte_ring_list* ring_list = NULL;

	ring_list = RTE_TAILQ_CAST(rte_ring_tailq.head, rte_ring_list);

	ring_size = rte_ring_get_memsize(count);
	if (ring_size < 0) {
		rte_errno = ring_size;
		return NULL;
	}

	te = rte_zmalloc("RING_TAILQ_ENTRY", sizeof(*te), 0);
	if (te == NULL) {
		RTE_LOG(ERR, RING, "Cannot reserve memory for tailq\n");
		rte_errno = ENOMEM;
		return NULL;
	}

	snprintf(mz_name, sizeof(mz_name), "%s%s", RTE_RING_MZ_PREFIX, name);

	rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);

	/* reserve a memory zone for this ring. If we can't get rte_config or
	 * we are secondary process, the memzone_reserve function will set
	 * rte_errno for us appropriately - hence no check in this this function */
	mz = rte_memzone_reserve(mz_name, ring_size, socket_id, mz_flags);
	if (mz != NULL) {
		r = mz->addr;
		/* no need to check return value here, we already checked the
		 * arguments above */
		rte_ring_init(r, name, count, flags);

		te->data = (void *) r;

		TAILQ_INSERT_TAIL(ring_list, te, next);
	} else {
		r = NULL;
		RTE_LOG(ERR, RING, "Cannot reserve memory\n");
		rte_free(te);
	}
	rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);

	return r;
}
Example #11
0
int sflow_socket_delete(sflow_key_t* key, bool is_locked) {
    sflow_key_dump("delete socket for key", key);

    if (likely(!is_locked)) rte_rwlock_write_lock(&sflow_hash_lock);
    int32_t socket_id = rte_hash_del_key(sflow_hash, key);
    sflow_socket_t* socket = ((int32_t) socket_id) < 0 ? NULL : sflow_sockets[socket_id];
    if (likely(!is_locked)) rte_rwlock_write_unlock(&sflow_hash_lock);

    if (!socket) return -1;

    je_free(socket);
    sflow_sockets[socket_id] = NULL;

    return 0;
}
Example #12
0
static const struct rte_memzone *
rte_memzone_reserve_thread_safe(const char *name, size_t len,
				int socket_id, unsigned flags, unsigned align,
				unsigned bound)
{
	struct rte_mem_config *mcfg;
	const struct rte_memzone *mz = NULL;

	/* get pointer to global configuration */
	mcfg = rte_eal_get_configuration()->mem_config;

	rte_rwlock_write_lock(&mcfg->mlock);

	mz = memzone_reserve_aligned_thread_unsafe(
		name, len, socket_id, flags, align, bound);

	rte_rwlock_write_unlock(&mcfg->mlock);

	return mz;
}
Example #13
0
int sflow_timer_callback() {
    uint64_t expired_ticks = rte_rdtsc() - (rte_get_tsc_hz() * L4_SFLOW_EXPIRED_SECONDS);
    int expired_sockets = 0;
    sflow_socket_t* socket;

    rte_rwlock_write_lock(&sflow_hash_lock);
    for (int i = 0; i < L4_SFLOW_HASH_SIZE; ++i) {
        socket = sflow_sockets[i];
        if (!socket) continue;
        if (socket->rx_ticks < expired_ticks) {
            rte_spinlock_recursive_lock(&socket->lock);
            sflow_socket_delete(&socket->key, 1);
            rte_spinlock_recursive_unlock(&socket->lock);
            ++expired_sockets;
        }
    }
    rte_rwlock_write_unlock(&sflow_hash_lock);

    RTE_LOG(NOTICE, L3L4, "deleted %d expired sflow sockets\n", expired_sockets);
    return 0;
}
Example #14
0
struct rte_hash *
rte_hash_create(const struct rte_hash_parameters *params)
{
	struct rte_hash *h = NULL;
	uint32_t num_buckets, sig_bucket_size, key_size,
		hash_tbl_size, sig_tbl_size, key_tbl_size, mem_size;
	char hash_name[RTE_HASH_NAMESIZE];
	struct rte_hash_list *hash_list;

	/* check that we have an initialised tail queue */
	if ((hash_list = 
	     RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_HASH, rte_hash_list)) == NULL) {
		rte_errno = E_RTE_NO_TAILQ;
		return NULL;	
	}

	/* Check for valid parameters */
	if ((params == NULL) ||
			(params->entries > RTE_HASH_ENTRIES_MAX) ||
			(params->bucket_entries > RTE_HASH_BUCKET_ENTRIES_MAX) ||
			(params->entries < params->bucket_entries) ||
			!rte_is_power_of_2(params->entries) ||
			!rte_is_power_of_2(params->bucket_entries) ||
			(params->key_len == 0) ||
			(params->key_len > RTE_HASH_KEY_LENGTH_MAX)) {
		rte_errno = EINVAL;
		RTE_LOG(ERR, HASH, "rte_hash_create has invalid parameters\n");
		return NULL;
	}

	rte_snprintf(hash_name, sizeof(hash_name), "HT_%s", params->name);

	/* Calculate hash dimensions */
	num_buckets = params->entries / params->bucket_entries;
	sig_bucket_size = align_size(params->bucket_entries *
				     sizeof(hash_sig_t), SIG_BUCKET_ALIGNMENT);
	key_size =  align_size(params->key_len, KEY_ALIGNMENT);

	hash_tbl_size = align_size(sizeof(struct rte_hash), CACHE_LINE_SIZE);
	sig_tbl_size = align_size(num_buckets * sig_bucket_size,
				  CACHE_LINE_SIZE);
	key_tbl_size = align_size(num_buckets * key_size *
				  params->bucket_entries, CACHE_LINE_SIZE);
	
	/* Total memory required for hash context */
	mem_size = hash_tbl_size + sig_tbl_size + key_tbl_size;

	rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);

	/* guarantee there's no existing */
	TAILQ_FOREACH(h, hash_list, next) {
		if (strncmp(params->name, h->name, RTE_HASH_NAMESIZE) == 0)
			break;
	}
	if (h != NULL)
		goto exit;

	h = (struct rte_hash *)rte_zmalloc_socket(hash_name, mem_size,
					   CACHE_LINE_SIZE, params->socket_id);
	if (h == NULL) {
		RTE_LOG(ERR, HASH, "memory allocation failed\n");
		goto exit;
	}

	/* Setup hash context */
	rte_snprintf(h->name, sizeof(h->name), "%s", params->name);
	h->entries = params->entries;
	h->bucket_entries = params->bucket_entries;
	h->key_len = params->key_len;
	h->hash_func_init_val = params->hash_func_init_val;
	h->num_buckets = num_buckets;
	h->bucket_bitmask = h->num_buckets - 1;
	h->sig_msb = 1 << (sizeof(hash_sig_t) * 8 - 1);
	h->sig_tbl = (uint8_t *)h + hash_tbl_size;
	h->sig_tbl_bucket_size = sig_bucket_size;
	h->key_tbl = h->sig_tbl + sig_tbl_size;
	h->key_tbl_key_size = key_size;
	h->hash_func = (params->hash_func == NULL) ?
		DEFAULT_HASH_FUNC : params->hash_func;

	TAILQ_INSERT_TAIL(hash_list, h, next);

exit:
	rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);

	return h;
}
Example #15
0
/* create the mempool */
struct rte_mempool *
rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
		   unsigned cache_size, unsigned private_data_size,
		   rte_mempool_ctor_t *mp_init, void *mp_init_arg,
		   rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
		   int socket_id, unsigned flags)
{
	char mz_name[RTE_MEMZONE_NAMESIZE];
	char rg_name[RTE_RING_NAMESIZE];
	struct rte_mempool *mp = NULL;
	struct rte_ring *r;
	const struct rte_memzone *mz;
	size_t mempool_size, total_elt_size;
	int mz_flags = RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY;
	int rg_flags = 0;
	uint32_t header_size, trailer_size;
	unsigned i;
	void *obj;

	/* compilation-time checks */
	RTE_BUILD_BUG_ON((sizeof(struct rte_mempool) &
			  CACHE_LINE_MASK) != 0);
#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
	RTE_BUILD_BUG_ON((sizeof(struct rte_mempool_cache) &
			  CACHE_LINE_MASK) != 0);
	RTE_BUILD_BUG_ON((offsetof(struct rte_mempool, local_cache) &
			  CACHE_LINE_MASK) != 0);
#endif
#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
	RTE_BUILD_BUG_ON((sizeof(struct rte_mempool_debug_stats) &
			  CACHE_LINE_MASK) != 0);
	RTE_BUILD_BUG_ON((offsetof(struct rte_mempool, stats) &
			  CACHE_LINE_MASK) != 0);
#endif

	/* check that we have an initialised tail queue */
	if (RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_MEMPOOL, rte_mempool_list) == NULL) {
		rte_errno = E_RTE_NO_TAILQ;
		return NULL;	
	}
	
	/* asked cache too big */
	if (cache_size > RTE_MEMPOOL_CACHE_MAX_SIZE){
		rte_errno = EINVAL;
		return NULL;
	}

	/* "no cache align" imply "no spread" */
	if (flags & MEMPOOL_F_NO_CACHE_ALIGN)
		flags |= MEMPOOL_F_NO_SPREAD;

	/* ring flags */
	if (flags & MEMPOOL_F_SP_PUT)
		rg_flags |= RING_F_SP_ENQ;
	if (flags & MEMPOOL_F_SC_GET)
		rg_flags |= RING_F_SC_DEQ;

	rte_rwlock_write_lock(RTE_EAL_MEMPOOL_RWLOCK);

	/* allocate the ring that will be used to store objects */
	/* Ring functions will return appropriate errors if we are
	 * running as a secondary process etc., so no checks made
	 * in this function for that condition */
	rte_snprintf(rg_name, sizeof(rg_name), "MP_%s", name);
	r = rte_ring_create(rg_name, rte_align32pow2(n+1), socket_id, rg_flags);
	if (r == NULL)
		goto exit;

	/*
	 * In header, we have at least the pointer to the pool, and
	 * optionaly a 64 bits cookie.
	 */
	header_size = 0;
	header_size += sizeof(struct rte_mempool *); /* ptr to pool */
#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
	header_size += sizeof(uint64_t); /* cookie */
#endif
	if ((flags & MEMPOOL_F_NO_CACHE_ALIGN) == 0)
		header_size = (header_size + CACHE_LINE_MASK) & (~CACHE_LINE_MASK);

	/* trailer contains the cookie in debug mode */
	trailer_size = 0;
#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
	trailer_size += sizeof(uint64_t); /* cookie */
#endif
	/* element size is 8 bytes-aligned at least */
	elt_size = (elt_size + 7) & (~7);

	/* expand trailer to next cache line */
	if ((flags & MEMPOOL_F_NO_CACHE_ALIGN) == 0) {
		total_elt_size = header_size + elt_size + trailer_size;
		trailer_size += ((CACHE_LINE_SIZE -
				  (total_elt_size & CACHE_LINE_MASK)) &
				 CACHE_LINE_MASK);
	}

	/*
	 * increase trailer to add padding between objects in order to
	 * spread them accross memory channels/ranks
	 */
	if ((flags & MEMPOOL_F_NO_SPREAD) == 0) {
		unsigned new_size;
		new_size = optimize_object_size(header_size + elt_size +
						trailer_size);
		trailer_size = new_size - header_size - elt_size;
	}

	/* this is the size of an object, including header and trailer */
	total_elt_size = header_size + elt_size + trailer_size;

	/* reserve a memory zone for this mempool: private data is
	 * cache-aligned */
	private_data_size = (private_data_size +
			     CACHE_LINE_MASK) & (~CACHE_LINE_MASK);
	mempool_size = total_elt_size * n +
		sizeof(struct rte_mempool) + private_data_size;
	rte_snprintf(mz_name, sizeof(mz_name), "MP_%s", name);

	mz = rte_memzone_reserve(mz_name, mempool_size, socket_id, mz_flags);

	/*
	 * no more memory: in this case we loose previously reserved
	 * space for the as we cannot free it
	 */
	if (mz == NULL)
		goto exit;

	/* init the mempool structure */
	mp = mz->addr;
	memset(mp, 0, sizeof(*mp));
	rte_snprintf(mp->name, sizeof(mp->name), "%s", name);
	mp->phys_addr = mz->phys_addr;
	mp->ring = r;
	mp->size = n;
	mp->flags = flags;
	mp->elt_size = elt_size;
	mp->header_size = header_size;
	mp->trailer_size = trailer_size;
	mp->cache_size = cache_size;
	mp->cache_flushthresh = (uint32_t)(cache_size * CACHE_FLUSHTHRESH_MULTIPLIER);
	mp->private_data_size = private_data_size;

	/* call the initializer */
	if (mp_init)
		mp_init(mp, mp_init_arg);

	/* fill the headers and trailers, and add objects in ring */
	obj = (char *)mp + sizeof(struct rte_mempool) + private_data_size;
	for (i = 0; i < n; i++) {
		struct rte_mempool **mpp;
		obj = (char *)obj + header_size;

		/* set mempool ptr in header */
		mpp = __mempool_from_obj(obj);
		*mpp = mp;

#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
		__mempool_write_header_cookie(obj, 1);
		__mempool_write_trailer_cookie(obj);
#endif
		/* call the initializer */
		if (obj_init)
			obj_init(mp, obj_init_arg, obj, i);

		/* enqueue in ring */
		rte_ring_sp_enqueue(mp->ring, obj);
		obj = (char *)obj + elt_size + trailer_size;
	}

	RTE_EAL_TAILQ_INSERT_TAIL(RTE_TAILQ_MEMPOOL, rte_mempool_list, mp);

exit:
	rte_rwlock_write_unlock(RTE_EAL_MEMPOOL_RWLOCK);

	return mp;
}
Example #16
0
File: kni.c Project: daniel666/dpvs
static int kni_mc_list_cmp_set(struct netif_port *dev,
                               struct ether_addr *addrs, size_t naddr)
{
    int err = EDPVS_INVAL, i, j;
    struct ether_addr addrs_old[NETIF_MAX_HWADDR];
    size_t naddr_old;
    char mac[64];
    struct mc_change_list {
        size_t              naddr;
        struct ether_addr   addrs[NETIF_MAX_HWADDR*2];
        /* state: 0 - unchanged, 1 - added, 2 deleted. */
        int                 states[NETIF_MAX_HWADDR*2];
    } chg_lst = {0};

    rte_rwlock_write_lock(&dev->dev_lock);

    naddr_old = NELEMS(addrs_old);
    err = __netif_mc_dump(dev, addrs_old, &naddr_old);
    if (err != EDPVS_OK) {
        RTE_LOG(ERR, Kni, "%s: fail to get current mc list\n", __func__);
        goto out;
    }

    /* make sure change list not overflow. */
    if (naddr > NETIF_MAX_HWADDR || naddr_old > NETIF_MAX_HWADDR) {
        err = EDPVS_NOROOM;
        goto out;
    }

    RTE_LOG(DEBUG, Kni, "dev %s link mcast:\n", dev->name);

    /* add all addrs from netlink(linux) to change-list and
     * assume they're all new added by default. */
    for (i = 0; i < naddr; i++) {
        ether_addr_copy(&addrs[i], &chg_lst.addrs[i]);
        chg_lst.states[i] = 1;

        RTE_LOG(DEBUG, Kni, "    new [%02d] %s\n", i,
                eth_addr_dump(&addrs[i], mac, sizeof(mac)));
    }
    chg_lst.naddr = naddr;

    /* now check for old mc list */
    for (i = 0; i < naddr_old; i++) {
        RTE_LOG(DEBUG, Kni, "    old [%02d] %s\n", i,
                eth_addr_dump(&addrs_old[i], mac, sizeof(mac)));

        for (j = 0; j < chg_lst.naddr; j++) {
            if (eth_addr_equal(&addrs_old[i], &chg_lst.addrs[j])) {
                /* already exist */
                chg_lst.states[j] = 0;
                break;
            }
        }
        if (j == chg_lst.naddr) {
            /* deleted */
            assert(chg_lst.naddr < NETIF_MAX_HWADDR * 2);

            ether_addr_copy(&addrs_old[i], &chg_lst.addrs[chg_lst.naddr]);
            chg_lst.states[chg_lst.naddr] = 2;
            chg_lst.naddr++;
        }
    }

    /* config mc list according to change list */
    for (i = 0; i < chg_lst.naddr; i++) {
        switch (chg_lst.states[i]) {
        case 0:
            /* nothing */
            break;
        case 1:
            err = __netif_mc_add(dev, &chg_lst.addrs[i]);

            RTE_LOG(INFO, Kni, "%s: add mc addr: %s %s %s\n", __func__,
                    eth_addr_dump(&chg_lst.addrs[i], mac, sizeof(mac)),
                    dev->name, dpvs_strerror(err));
            break;
        case 2:
            err = __netif_mc_del(dev, &chg_lst.addrs[i]);

            RTE_LOG(INFO, Kni, "%s: del mc addr: %s %s %s\n", __func__,
                    eth_addr_dump(&chg_lst.addrs[i], mac, sizeof(mac)),
                    dev->name, dpvs_strerror(err));
            break;
        default:
            /* should not happen. */
            RTE_LOG(ERR, Kni, "%s: invalid state for mac: %s!\n", __func__,
                    eth_addr_dump(&chg_lst.addrs[i], mac, sizeof(mac)));
            err = EDPVS_INVAL;
            goto out;
        }
    }

    err = __netif_set_mc_list(dev);

out:
    rte_rwlock_write_unlock(&dev->dev_lock);
    return err;
}
Example #17
0
/* create the ring */
struct rte_ring *
rte_ring_create(const char *name, unsigned count, int socket_id,
                unsigned flags)
{
    char mz_name[RTE_MEMZONE_NAMESIZE];
    struct rte_ring *r;
    const struct rte_memzone *mz;
    size_t ring_size;
    int mz_flags = 0;
    struct rte_ring_list* ring_list = NULL;

    /* compilation-time checks */
    RTE_BUILD_BUG_ON((sizeof(struct rte_ring) &
                      CACHE_LINE_MASK) != 0);
    RTE_BUILD_BUG_ON((offsetof(struct rte_ring, cons) &
                      CACHE_LINE_MASK) != 0);
    RTE_BUILD_BUG_ON((offsetof(struct rte_ring, prod) &
                      CACHE_LINE_MASK) != 0);
#ifdef RTE_LIBRTE_RING_DEBUG
    RTE_BUILD_BUG_ON((sizeof(struct rte_ring_debug_stats) &
                      CACHE_LINE_MASK) != 0);
    RTE_BUILD_BUG_ON((offsetof(struct rte_ring, stats) &
                      CACHE_LINE_MASK) != 0);
#endif

    /* check that we have an initialised tail queue */
    if ((ring_list =
                RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_RING, rte_ring_list)) == NULL) {
        rte_errno = E_RTE_NO_TAILQ;
        return NULL;
    }

    /* count must be a power of 2 */
    if ((!POWEROF2(count)) || (count > RTE_RING_SZ_MASK )) {
        rte_errno = EINVAL;
        RTE_LOG(ERR, RING, "Requested size is invalid, must be power of 2, and "
                "do not exceed the size limit %u\n", RTE_RING_SZ_MASK);
        return NULL;
    }

    rte_snprintf(mz_name, sizeof(mz_name), "RG_%s", name);
    ring_size = count * sizeof(void *) + sizeof(struct rte_ring);

    rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);

    /* reserve a memory zone for this ring. If we can't get rte_config or
     * we are secondary process, the memzone_reserve function will set
     * rte_errno for us appropriately - hence no check in this this function */
    mz = rte_memzone_reserve(mz_name, ring_size, socket_id, mz_flags);
    if (mz != NULL) {
        r = mz->addr;

        /* init the ring structure */
        memset(r, 0, sizeof(*r));
        rte_snprintf(r->name, sizeof(r->name), "%s", name);
        r->flags = flags;
        r->prod.watermark = count;
        r->prod.sp_enqueue = !!(flags & RING_F_SP_ENQ);
        r->cons.sc_dequeue = !!(flags & RING_F_SC_DEQ);
        r->prod.size = r->cons.size = count;
        r->prod.mask = r->cons.mask = count-1;
        r->prod.head = r->cons.head = 0;
        r->prod.tail = r->cons.tail = 0;

        TAILQ_INSERT_TAIL(ring_list, r, next);
    } else {
        r = NULL;
        RTE_LOG(ERR, RING, "Cannot reserve memory\n");
    }
    rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);

    return r;
}