Ejemplo n.º 1
0
/*
 * Allocates memory for LPM object
 */
struct rte_lpm *
rte_lpm_create(const char *name, int socket_id, int max_rules,
		__rte_unused int flags)
{
	char mem_name[RTE_LPM_NAMESIZE];
	struct rte_lpm *lpm = NULL;
	uint32_t mem_size;
	struct rte_lpm_list *lpm_list;

	/* check that we have an initialised tail queue */
	if ((lpm_list = 
	     RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_LPM, rte_lpm_list)) == NULL) {
		rte_errno = E_RTE_NO_TAILQ;
		return NULL;	
	}

	RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl24_entry) != 2);
	RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl8_entry) != 2);

	/* Check user arguments. */
	if ((name == NULL) || (socket_id < -1) || (max_rules == 0)){
		rte_errno = EINVAL;
		return NULL;
	}

	rte_snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);

	/* Determine the amount of memory to allocate. */
	mem_size = sizeof(*lpm) + (sizeof(lpm->rules_tbl[0]) * max_rules);

	rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);

	/* guarantee there's no existing */
	TAILQ_FOREACH(lpm, lpm_list, next) {
		if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0)
			break;
	}
	if (lpm != NULL)
		goto exit;

	/* Allocate memory to store the LPM data structures. */
	lpm = (struct rte_lpm *)rte_zmalloc_socket(mem_name, mem_size,
			CACHE_LINE_SIZE, socket_id);
	if (lpm == NULL) {
		RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
		goto exit;
	}

	/* Save user arguments. */
	lpm->max_rules = max_rules;
	rte_snprintf(lpm->name, sizeof(lpm->name), "%s", name);

	TAILQ_INSERT_TAIL(lpm_list, lpm, next);

exit:	
	rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);

	return lpm;
}
Ejemplo n.º 2
0
struct rte_acl_ctx *
rte_acl_create(const struct rte_acl_param *param)
{
	size_t sz;
	struct rte_acl_ctx *ctx;
	struct rte_acl_list *acl_list;
	char name[sizeof(ctx->name)];

	/* check that we have an initialised tail queue */
	acl_list = RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_ACL, rte_acl_list);
	if (acl_list == NULL) {
		rte_errno = E_RTE_NO_TAILQ;
		return NULL;
	}

	/* check that input parameters are valid. */
	if (param == NULL || param->name == NULL) {
		rte_errno = EINVAL;
		return NULL;
	}

	snprintf(name, sizeof(name), "ACL_%s", param->name);

	/* calculate amount of memory required for pattern set. */
	sz = sizeof(*ctx) + param->max_rule_num * param->rule_size;

	/* get EAL TAILQ lock. */
	rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);

	/* if we already have one with that name */
	TAILQ_FOREACH(ctx, acl_list, next) {
		if (strncmp(param->name, ctx->name, sizeof(ctx->name)) == 0)
			break;
	}

	/* if ACL with such name doesn't exist, then create a new one. */
	if (ctx == NULL && (ctx = rte_zmalloc_socket(name, sz, CACHE_LINE_SIZE,
			param->socket_id)) != NULL) {

		/* init new allocated context. */
		ctx->rules = ctx + 1;
		ctx->max_rules = param->max_rule_num;
		ctx->rule_sz = param->rule_size;
		ctx->socket_id = param->socket_id;
		snprintf(ctx->name, sizeof(ctx->name), "%s", param->name);

		TAILQ_INSERT_TAIL(acl_list, ctx, next);

	} else if (ctx == NULL) {
		RTE_LOG(ERR, ACL,
			"allocation of %zu bytes on socket %d for %s failed\n",
			sz, param->socket_id, name);
	}

	rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
	return ctx;
}
Ejemplo n.º 3
0
static int
test_tailq_lookup(void)
{
    /* run successful  test - check result is found */
    struct rte_dummy_head *d_head;
    struct rte_dummy *d_ptr;

    d_head = RTE_TAILQ_LOOKUP_BY_IDX(DEFAULT_TAILQ, rte_dummy_head);
    if (d_head == NULL)
        do_return("Error with tailq lookup\n");

    TAILQ_FOREACH(d_ptr, d_head, next)
    if (d_ptr != &d_elem)
        do_return("Error with tailq returned from lookup - "
                  "expected element not found\n");

    /* now try a bad/error lookup */
    d_head = RTE_TAILQ_LOOKUP_BY_IDX(RTE_MAX_TAILQ, rte_dummy_head);
    if (d_head != NULL)
        do_return("Error, lookup does not return NULL for bad tailq name\n");

    return 0;
}
Ejemplo n.º 4
0
/* dump the status of all mempools on the console */
void
rte_mempool_list_dump(void)
{
	const struct rte_mempool *mp = NULL;
	struct rte_mempool_list *mempool_list;

	if ((mempool_list = 
	     RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_MEMPOOL, rte_mempool_list)) == NULL) {
		rte_errno = E_RTE_NO_TAILQ;
		return;	
	}

	rte_rwlock_read_lock(RTE_EAL_MEMPOOL_RWLOCK);

	TAILQ_FOREACH(mp, mempool_list, next) {
		rte_mempool_dump(mp);
	}
Ejemplo n.º 5
0
/*
 * Dump all ACL contexts to the stdout.
 */
void
rte_acl_list_dump(void)
{
	struct rte_acl_ctx *ctx;
	struct rte_acl_list *acl_list;

	/* check that we have an initialised tail queue */
	acl_list = RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_ACL, rte_acl_list);
	if (acl_list == NULL) {
		rte_errno = E_RTE_NO_TAILQ;
		return;
	}

	rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
	TAILQ_FOREACH(ctx, acl_list, next) {
		rte_acl_dump(ctx);
	}
Ejemplo n.º 6
0
void
rte_ring_list_dump(void)
{
    const struct rte_ring *mp;
    struct rte_ring_list *ring_list;

    /* check that we have an initialised tail queue */
    if ((ring_list =
         RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_RING, rte_ring_list)) == NULL) {
        rte_errno = E_RTE_NO_TAILQ;
        return;
    }

    rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);

    TAILQ_FOREACH(mp, ring_list, next) {
        rte_ring_dump(mp);
    }
Ejemplo n.º 7
0
struct rte_hash *
rte_hash_find_existing(const char *name)
{
	struct rte_hash *h = NULL;
	struct rte_tailq_entry *te;
	struct rte_hash_list *hash_list;

	/* check that we have an initialised tail queue */
	if ((hash_list =
			RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_HASH, rte_hash_list)) == NULL) {
		rte_errno = E_RTE_NO_TAILQ;
		return NULL;
	}

	rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
	TAILQ_FOREACH(te, hash_list, next) {
		h = (struct rte_hash *) te->data;
		if (strncmp(name, h->name, RTE_HASH_NAMESIZE) == 0)
			break;
	}
Ejemplo n.º 8
0
struct rte_acl_ctx *
rte_acl_find_existing(const char *name)
{
	struct rte_acl_ctx *ctx = NULL;
	struct rte_acl_list *acl_list;
	struct rte_tailq_entry *te;

	/* check that we have an initialised tail queue */
	acl_list = RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_ACL, rte_acl_list);
	if (acl_list == NULL) {
		rte_errno = E_RTE_NO_TAILQ;
		return NULL;
	}

	rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
	TAILQ_FOREACH(te, acl_list, next) {
		ctx = (struct rte_acl_ctx *) te->data;
		if (strncmp(name, ctx->name, sizeof(ctx->name)) == 0)
			break;
	}
Ejemplo n.º 9
0
/*
 * Find an existing lpm table and return a pointer to it.
 */
struct rte_lpm *
rte_lpm_find_existing(const char *name)
{
	struct rte_lpm *l;
	struct rte_lpm_list *lpm_list;

	/* check that we have an initialised tail queue */
	if ((lpm_list = RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_LPM, rte_lpm_list)) == NULL) {
		rte_errno = E_RTE_NO_TAILQ;
		return NULL;
	}

	rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
	TAILQ_FOREACH(l, lpm_list, next) {
		if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0)
			break;
	}
	rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);

	if (l == NULL)
		rte_errno = ENOENT;

	return l;
}
Ejemplo n.º 10
0
/* create the ring */
struct rte_ring *
rte_ring_create(const char *name, unsigned count, int socket_id,
                unsigned flags)
{
    char mz_name[RTE_MEMZONE_NAMESIZE];
    struct rte_ring *r;
    const struct rte_memzone *mz;
    size_t ring_size;
    int mz_flags = 0;
    struct rte_ring_list* ring_list = NULL;

    /* compilation-time checks */
    RTE_BUILD_BUG_ON((sizeof(struct rte_ring) &
                      CACHE_LINE_MASK) != 0);
    RTE_BUILD_BUG_ON((offsetof(struct rte_ring, cons) &
                      CACHE_LINE_MASK) != 0);
    RTE_BUILD_BUG_ON((offsetof(struct rte_ring, prod) &
                      CACHE_LINE_MASK) != 0);
#ifdef RTE_LIBRTE_RING_DEBUG
    RTE_BUILD_BUG_ON((sizeof(struct rte_ring_debug_stats) &
                      CACHE_LINE_MASK) != 0);
    RTE_BUILD_BUG_ON((offsetof(struct rte_ring, stats) &
                      CACHE_LINE_MASK) != 0);
#endif

    /* check that we have an initialised tail queue */
    if ((ring_list =
                RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_RING, rte_ring_list)) == NULL) {
        rte_errno = E_RTE_NO_TAILQ;
        return NULL;
    }

    /* count must be a power of 2 */
    if ((!POWEROF2(count)) || (count > RTE_RING_SZ_MASK )) {
        rte_errno = EINVAL;
        RTE_LOG(ERR, RING, "Requested size is invalid, must be power of 2, and "
                "do not exceed the size limit %u\n", RTE_RING_SZ_MASK);
        return NULL;
    }

    rte_snprintf(mz_name, sizeof(mz_name), "RG_%s", name);
    ring_size = count * sizeof(void *) + sizeof(struct rte_ring);

    rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);

    /* reserve a memory zone for this ring. If we can't get rte_config or
     * we are secondary process, the memzone_reserve function will set
     * rte_errno for us appropriately - hence no check in this this function */
    mz = rte_memzone_reserve(mz_name, ring_size, socket_id, mz_flags);
    if (mz != NULL) {
        r = mz->addr;

        /* init the ring structure */
        memset(r, 0, sizeof(*r));
        rte_snprintf(r->name, sizeof(r->name), "%s", name);
        r->flags = flags;
        r->prod.watermark = count;
        r->prod.sp_enqueue = !!(flags & RING_F_SP_ENQ);
        r->cons.sc_dequeue = !!(flags & RING_F_SC_DEQ);
        r->prod.size = r->cons.size = count;
        r->prod.mask = r->cons.mask = count-1;
        r->prod.head = r->cons.head = 0;
        r->prod.tail = r->cons.tail = 0;

        TAILQ_INSERT_TAIL(ring_list, r, next);
    } else {
        r = NULL;
        RTE_LOG(ERR, RING, "Cannot reserve memory\n");
    }
    rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);

    return r;
}
Ejemplo n.º 11
0
/* create the mempool */
struct rte_mempool *
rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
		   unsigned cache_size, unsigned private_data_size,
		   rte_mempool_ctor_t *mp_init, void *mp_init_arg,
		   rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
		   int socket_id, unsigned flags)
{
	char mz_name[RTE_MEMZONE_NAMESIZE];
	char rg_name[RTE_RING_NAMESIZE];
	struct rte_mempool *mp = NULL;
	struct rte_ring *r;
	const struct rte_memzone *mz;
	size_t mempool_size, total_elt_size;
	int mz_flags = RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY;
	int rg_flags = 0;
	uint32_t header_size, trailer_size;
	unsigned i;
	void *obj;

	/* compilation-time checks */
	RTE_BUILD_BUG_ON((sizeof(struct rte_mempool) &
			  CACHE_LINE_MASK) != 0);
#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
	RTE_BUILD_BUG_ON((sizeof(struct rte_mempool_cache) &
			  CACHE_LINE_MASK) != 0);
	RTE_BUILD_BUG_ON((offsetof(struct rte_mempool, local_cache) &
			  CACHE_LINE_MASK) != 0);
#endif
#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
	RTE_BUILD_BUG_ON((sizeof(struct rte_mempool_debug_stats) &
			  CACHE_LINE_MASK) != 0);
	RTE_BUILD_BUG_ON((offsetof(struct rte_mempool, stats) &
			  CACHE_LINE_MASK) != 0);
#endif

	/* check that we have an initialised tail queue */
	if (RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_MEMPOOL, rte_mempool_list) == NULL) {
		rte_errno = E_RTE_NO_TAILQ;
		return NULL;	
	}
	
	/* asked cache too big */
	if (cache_size > RTE_MEMPOOL_CACHE_MAX_SIZE){
		rte_errno = EINVAL;
		return NULL;
	}

	/* "no cache align" imply "no spread" */
	if (flags & MEMPOOL_F_NO_CACHE_ALIGN)
		flags |= MEMPOOL_F_NO_SPREAD;

	/* ring flags */
	if (flags & MEMPOOL_F_SP_PUT)
		rg_flags |= RING_F_SP_ENQ;
	if (flags & MEMPOOL_F_SC_GET)
		rg_flags |= RING_F_SC_DEQ;

	rte_rwlock_write_lock(RTE_EAL_MEMPOOL_RWLOCK);

	/* allocate the ring that will be used to store objects */
	/* Ring functions will return appropriate errors if we are
	 * running as a secondary process etc., so no checks made
	 * in this function for that condition */
	rte_snprintf(rg_name, sizeof(rg_name), "MP_%s", name);
	r = rte_ring_create(rg_name, rte_align32pow2(n+1), socket_id, rg_flags);
	if (r == NULL)
		goto exit;

	/*
	 * In header, we have at least the pointer to the pool, and
	 * optionaly a 64 bits cookie.
	 */
	header_size = 0;
	header_size += sizeof(struct rte_mempool *); /* ptr to pool */
#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
	header_size += sizeof(uint64_t); /* cookie */
#endif
	if ((flags & MEMPOOL_F_NO_CACHE_ALIGN) == 0)
		header_size = (header_size + CACHE_LINE_MASK) & (~CACHE_LINE_MASK);

	/* trailer contains the cookie in debug mode */
	trailer_size = 0;
#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
	trailer_size += sizeof(uint64_t); /* cookie */
#endif
	/* element size is 8 bytes-aligned at least */
	elt_size = (elt_size + 7) & (~7);

	/* expand trailer to next cache line */
	if ((flags & MEMPOOL_F_NO_CACHE_ALIGN) == 0) {
		total_elt_size = header_size + elt_size + trailer_size;
		trailer_size += ((CACHE_LINE_SIZE -
				  (total_elt_size & CACHE_LINE_MASK)) &
				 CACHE_LINE_MASK);
	}

	/*
	 * increase trailer to add padding between objects in order to
	 * spread them accross memory channels/ranks
	 */
	if ((flags & MEMPOOL_F_NO_SPREAD) == 0) {
		unsigned new_size;
		new_size = optimize_object_size(header_size + elt_size +
						trailer_size);
		trailer_size = new_size - header_size - elt_size;
	}

	/* this is the size of an object, including header and trailer */
	total_elt_size = header_size + elt_size + trailer_size;

	/* reserve a memory zone for this mempool: private data is
	 * cache-aligned */
	private_data_size = (private_data_size +
			     CACHE_LINE_MASK) & (~CACHE_LINE_MASK);
	mempool_size = total_elt_size * n +
		sizeof(struct rte_mempool) + private_data_size;
	rte_snprintf(mz_name, sizeof(mz_name), "MP_%s", name);

	mz = rte_memzone_reserve(mz_name, mempool_size, socket_id, mz_flags);

	/*
	 * no more memory: in this case we loose previously reserved
	 * space for the as we cannot free it
	 */
	if (mz == NULL)
		goto exit;

	/* init the mempool structure */
	mp = mz->addr;
	memset(mp, 0, sizeof(*mp));
	rte_snprintf(mp->name, sizeof(mp->name), "%s", name);
	mp->phys_addr = mz->phys_addr;
	mp->ring = r;
	mp->size = n;
	mp->flags = flags;
	mp->elt_size = elt_size;
	mp->header_size = header_size;
	mp->trailer_size = trailer_size;
	mp->cache_size = cache_size;
	mp->cache_flushthresh = (uint32_t)(cache_size * CACHE_FLUSHTHRESH_MULTIPLIER);
	mp->private_data_size = private_data_size;

	/* call the initializer */
	if (mp_init)
		mp_init(mp, mp_init_arg);

	/* fill the headers and trailers, and add objects in ring */
	obj = (char *)mp + sizeof(struct rte_mempool) + private_data_size;
	for (i = 0; i < n; i++) {
		struct rte_mempool **mpp;
		obj = (char *)obj + header_size;

		/* set mempool ptr in header */
		mpp = __mempool_from_obj(obj);
		*mpp = mp;

#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
		__mempool_write_header_cookie(obj, 1);
		__mempool_write_trailer_cookie(obj);
#endif
		/* call the initializer */
		if (obj_init)
			obj_init(mp, obj_init_arg, obj, i);

		/* enqueue in ring */
		rte_ring_sp_enqueue(mp->ring, obj);
		obj = (char *)obj + elt_size + trailer_size;
	}

	RTE_EAL_TAILQ_INSERT_TAIL(RTE_TAILQ_MEMPOOL, rte_mempool_list, mp);

exit:
	rte_rwlock_write_unlock(RTE_EAL_MEMPOOL_RWLOCK);

	return mp;
}
Ejemplo n.º 12
0
struct rte_hash *
rte_hash_create(const struct rte_hash_parameters *params)
{
	struct rte_hash *h = NULL;
	uint32_t num_buckets, sig_bucket_size, key_size,
		hash_tbl_size, sig_tbl_size, key_tbl_size, mem_size;
	char hash_name[RTE_HASH_NAMESIZE];
	struct rte_hash_list *hash_list;

	/* check that we have an initialised tail queue */
	if ((hash_list = 
	     RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_HASH, rte_hash_list)) == NULL) {
		rte_errno = E_RTE_NO_TAILQ;
		return NULL;	
	}

	/* Check for valid parameters */
	if ((params == NULL) ||
			(params->entries > RTE_HASH_ENTRIES_MAX) ||
			(params->bucket_entries > RTE_HASH_BUCKET_ENTRIES_MAX) ||
			(params->entries < params->bucket_entries) ||
			!rte_is_power_of_2(params->entries) ||
			!rte_is_power_of_2(params->bucket_entries) ||
			(params->key_len == 0) ||
			(params->key_len > RTE_HASH_KEY_LENGTH_MAX)) {
		rte_errno = EINVAL;
		RTE_LOG(ERR, HASH, "rte_hash_create has invalid parameters\n");
		return NULL;
	}

	rte_snprintf(hash_name, sizeof(hash_name), "HT_%s", params->name);

	/* Calculate hash dimensions */
	num_buckets = params->entries / params->bucket_entries;
	sig_bucket_size = align_size(params->bucket_entries *
				     sizeof(hash_sig_t), SIG_BUCKET_ALIGNMENT);
	key_size =  align_size(params->key_len, KEY_ALIGNMENT);

	hash_tbl_size = align_size(sizeof(struct rte_hash), CACHE_LINE_SIZE);
	sig_tbl_size = align_size(num_buckets * sig_bucket_size,
				  CACHE_LINE_SIZE);
	key_tbl_size = align_size(num_buckets * key_size *
				  params->bucket_entries, CACHE_LINE_SIZE);
	
	/* Total memory required for hash context */
	mem_size = hash_tbl_size + sig_tbl_size + key_tbl_size;

	rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);

	/* guarantee there's no existing */
	TAILQ_FOREACH(h, hash_list, next) {
		if (strncmp(params->name, h->name, RTE_HASH_NAMESIZE) == 0)
			break;
	}
	if (h != NULL)
		goto exit;

	h = (struct rte_hash *)rte_zmalloc_socket(hash_name, mem_size,
					   CACHE_LINE_SIZE, params->socket_id);
	if (h == NULL) {
		RTE_LOG(ERR, HASH, "memory allocation failed\n");
		goto exit;
	}

	/* Setup hash context */
	rte_snprintf(h->name, sizeof(h->name), "%s", params->name);
	h->entries = params->entries;
	h->bucket_entries = params->bucket_entries;
	h->key_len = params->key_len;
	h->hash_func_init_val = params->hash_func_init_val;
	h->num_buckets = num_buckets;
	h->bucket_bitmask = h->num_buckets - 1;
	h->sig_msb = 1 << (sizeof(hash_sig_t) * 8 - 1);
	h->sig_tbl = (uint8_t *)h + hash_tbl_size;
	h->sig_tbl_bucket_size = sig_bucket_size;
	h->key_tbl = h->sig_tbl + sig_tbl_size;
	h->key_tbl_key_size = key_size;
	h->hash_func = (params->hash_func == NULL) ?
		DEFAULT_HASH_FUNC : params->hash_func;

	TAILQ_INSERT_TAIL(hash_list, h, next);

exit:
	rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);

	return h;
}