Ejemplo n.º 1
0
/**
 * Put Ethernet address port index to MAC table
 *
 * @param mac        Pointer to Ethernet address
 * @param port       Pointer to port index
 */
static inline void mac_table_put(odph_ethaddr_t *mac, uint8_t port)
{
	mac_tbl_entry_t entry;
	uint16_t idx;

	idx = calc_mac_tbl_idx(mac);

	entry.s.mac = *mac;
	entry.s.port = port;

	odp_atomic_store_u64(&gbl_args->mac_tbl[idx], entry.u64);
}
Ejemplo n.º 2
0
odp_pool_t odp_pool_create(const char *name, odp_pool_param_t *params)
{
	odp_pool_t pool_hdl = ODP_POOL_INVALID;
	pool_entry_t *pool;
	uint32_t i, headroom = 0, tailroom = 0;
	odp_shm_t shm;

	if (params == NULL)
		return ODP_POOL_INVALID;

	/* Default size and align for timeouts */
	if (params->type == ODP_POOL_TIMEOUT) {
		params->buf.size  = 0; /* tmo.__res1 */
		params->buf.align = 0; /* tmo.__res2 */
	}

	/* Default initialization parameters */
	uint32_t p_udata_size = 0;
	uint32_t udata_stride = 0;

	/* Restriction for v1.0: All non-packet buffers are unsegmented */
	int unseg = 1;

	/* Restriction for v1.0: No zeroization support */
	const int zeroized = 0;

	uint32_t blk_size, buf_stride, buf_num, seg_len = 0;
	uint32_t buf_align =
		params->type == ODP_POOL_BUFFER ? params->buf.align : 0;

	/* Validate requested buffer alignment */
	if (buf_align > ODP_CONFIG_BUFFER_ALIGN_MAX ||
	    buf_align != ODP_ALIGN_ROUNDDOWN_POWER_2(buf_align, buf_align))
		return ODP_POOL_INVALID;

	/* Set correct alignment based on input request */
	if (buf_align == 0)
		buf_align = ODP_CACHE_LINE_SIZE;
	else if (buf_align < ODP_CONFIG_BUFFER_ALIGN_MIN)
		buf_align = ODP_CONFIG_BUFFER_ALIGN_MIN;

	/* Calculate space needed for buffer blocks and metadata */
	switch (params->type) {
	case ODP_POOL_BUFFER:
		buf_num  = params->buf.num;
		blk_size = params->buf.size;

		/* Optimize small raw buffers */
		if (blk_size > ODP_MAX_INLINE_BUF || params->buf.align != 0)
			blk_size = ODP_ALIGN_ROUNDUP(blk_size, buf_align);

		buf_stride = sizeof(odp_buffer_hdr_stride);
		break;

	case ODP_POOL_PACKET:
		unseg = 0; /* Packets are always segmented */
		headroom = ODP_CONFIG_PACKET_HEADROOM;
		tailroom = ODP_CONFIG_PACKET_TAILROOM;

		buf_num = params->pkt.num + 1; /* more one for pkt_ctx */


		seg_len = params->pkt.seg_len <= ODP_CONFIG_PACKET_SEG_LEN_MIN ?
			ODP_CONFIG_PACKET_SEG_LEN_MIN :
			(params->pkt.seg_len <= ODP_CONFIG_PACKET_SEG_LEN_MAX ?
			 params->pkt.seg_len : ODP_CONFIG_PACKET_SEG_LEN_MAX);

		seg_len = ODP_ALIGN_ROUNDUP(
			headroom + seg_len + tailroom,
			ODP_CONFIG_BUFFER_ALIGN_MIN);

		blk_size = params->pkt.len <= seg_len ? seg_len :
			ODP_ALIGN_ROUNDUP(params->pkt.len, seg_len);

		/* Reject create if pkt.len needs too many segments */
		if (blk_size / seg_len > ODP_BUFFER_MAX_SEG)
			return ODP_POOL_INVALID;

		p_udata_size = params->pkt.uarea_size;
		udata_stride = ODP_ALIGN_ROUNDUP(p_udata_size,
						 sizeof(uint64_t));

		buf_stride = sizeof(odp_packet_hdr_stride);
		break;

	case ODP_POOL_TIMEOUT:
		blk_size = 0;
		buf_num = params->tmo.num;
		buf_stride = sizeof(odp_timeout_hdr_stride);
		break;

	default:
		return ODP_POOL_INVALID;
	}

	/* Validate requested number of buffers against addressable limits */
	if (buf_num >
	    (ODP_BUFFER_MAX_BUFFERS / (buf_stride / ODP_CACHE_LINE_SIZE)))
		return ODP_POOL_INVALID;

	/* Find an unused buffer pool slot and iniitalize it as requested */
	for (i = 0; i < ODP_CONFIG_POOLS; i++) {
		pool = get_pool_entry(i);

		POOL_LOCK(&pool->s.lock);
		if (pool->s.pool_shm != ODP_SHM_INVALID) {
			POOL_UNLOCK(&pool->s.lock);
			continue;
		}

		/* found free pool */
		size_t block_size, pad_size, mdata_size, udata_size;

		pool->s.flags.all = 0;

		if (name == NULL) {
			pool->s.name[0] = 0;
		} else {
			strncpy(pool->s.name, name,
				ODP_POOL_NAME_LEN - 1);
			pool->s.name[ODP_POOL_NAME_LEN - 1] = 0;
			pool->s.flags.has_name = 1;
		}

		pool->s.params = *params;
		pool->s.buf_align = buf_align;

		/* Optimize for short buffers: Data stored in buffer hdr */
		if (blk_size <= ODP_MAX_INLINE_BUF) {
			block_size = 0;
			pool->s.buf_align = blk_size == 0 ? 0 : sizeof(void *);
		} else {
			/* more  64bytes for storing hdr address*/
			block_size	 = buf_num * (blk_size +
				ODP_HDR_BACK_PTR_SIZE);
			pool->s.buf_align = buf_align;
		}

		pad_size = ODP_CACHE_LINE_SIZE_ROUNDUP(block_size) - block_size;
		mdata_size = buf_num * buf_stride;
		udata_size = buf_num * udata_stride;

		pool->s.buf_num  = buf_num;
		pool->s.pool_size = ODP_PAGE_SIZE_ROUNDUP(block_size +
							  pad_size +
							  mdata_size +
							  udata_size);

		shm = odp_shm_reserve(pool->s.name, pool->s.pool_size,
			ODP_PAGE_SIZE, ODP_SHM_MONOPOLIZE_CNTNUS_PHY);
		if (shm == ODP_SHM_INVALID) {
			POOL_UNLOCK(&pool->s.lock);
			return ODP_POOL_INVALID;
		}
		pool->s.pool_base_addr = odp_shm_addr(shm);
		pool->s.pool_shm = shm;

		/* Now safe to unlock since pool entry has been allocated */
		POOL_UNLOCK(&pool->s.lock);

		pool->s.flags.unsegmented = unseg;
		pool->s.flags.zeroized = zeroized;
		pool->s.seg_size = unseg ? blk_size : seg_len;
		pool->s.blk_size = blk_size;

		uint8_t *block_base_addr = pool->s.pool_base_addr;
		uint8_t *mdata_base_addr =
			block_base_addr + block_size + pad_size;
		uint8_t *udata_base_addr = mdata_base_addr + mdata_size;

		uint64_t pool_base_phy = odp_v2p(pool->s.pool_base_addr);

		pool->s.v_p_offset = (uint64_t)pool->s.pool_base_addr -
				pool_base_phy;

		/* Pool mdata addr is used for indexing buffer metadata */
		pool->s.pool_mdata_addr = mdata_base_addr;
		pool->s.udata_size = p_udata_size;

		pool->s.buf_stride = buf_stride;
		pool->s.buf_freelist = NULL;
		pool->s.blk_freelist = NULL;

		/* Initialization will increment these to their target vals */
		odp_atomic_store_u32(&pool->s.bufcount, 0);
		odp_atomic_store_u32(&pool->s.blkcount, 0);

		uint8_t *buf = udata_base_addr - buf_stride;
		uint8_t *udat = udata_stride == 0 ? NULL :
			udata_base_addr + udata_size - udata_stride;

		/* Init buffer common header and add to pool buffer freelist */
		do {
			odp_buffer_hdr_t *tmp =
				(odp_buffer_hdr_t *)(void *)buf;

			/* Iniitalize buffer metadata */
			tmp->allocator = ODP_FREEBUF;
			tmp->flags.all = 0;
			tmp->flags.zeroized = zeroized;
			tmp->size = 0;
			odp_atomic_init_u32(&tmp->ref_count, 0);
			tmp->type = params->type;
			tmp->event_type = params->type;
			tmp->pool_hdl = pool->s.pool_hdl;
			tmp->uarea_addr = (void *)udat;
			tmp->uarea_size = p_udata_size;
			tmp->segcount = 0;
			tmp->segsize = pool->s.seg_size;
			tmp->handle.handle = odp_buffer_encode_handle(tmp);

			/* Set 1st seg addr for zero-len buffers */
			tmp->addr[0] = NULL;

			/* Special case for short buffer data */
			if (blk_size <= ODP_MAX_INLINE_BUF) {
				tmp->flags.hdrdata = 1;
				if (blk_size > 0) {
					tmp->segcount = 1;
					tmp->addr[0] = &tmp->addr[1];
					tmp->size = blk_size;
				}
			}

			/* Push buffer onto pool's freelist */
			ret_buf(&pool->s, tmp);
			buf  -= buf_stride;
			udat -= udata_stride;
		} while (buf >= mdata_base_addr);

		/* Make sure blocks is divided into size align to 8 bytes,
		  * as odp_packet_seg_t refers to address and segment count.
		  * pool->s.seg_size is align to 8 bytes before here
		  */
		pool->s.seg_size = ODP_ALIGN_ROUNDUP(pool->s.seg_size,
							sizeof(uint64_t));
		/* Form block freelist for pool */
		uint8_t *blk =
			block_base_addr + block_size - pool->s.seg_size -
						ODP_HDR_BACK_PTR_SIZE;
		if (blk_size > ODP_MAX_INLINE_BUF)
			do {
				ret_blk(&pool->s, blk + ODP_HDR_BACK_PTR_SIZE);
				blk -= (pool->s.seg_size +
					ODP_HDR_BACK_PTR_SIZE);
			} while (blk >= block_base_addr);

		/* For pkt pool, initiating packet hdr relative area is stored
		  * in the pool entry.
		  */
		if (params->type == ODP_POOL_PACKET) {
			odp_buffer_hdr_t *bh = get_buf(&pool->s);
			uint8_t *pkt_ctx = ((uint8_t *)bh +
				ODP_FIELD_SIZEOF(odp_packet_hdr_t, buf_hdr));

			memset(pkt_ctx, 0,  sizeof(odp_packet_hdr_t) -
				ODP_FIELD_SIZEOF(odp_packet_hdr_t, buf_hdr));
			((odp_packet_hdr_t *)bh)->l3_offset =
				ODP_PACKET_OFFSET_INVALID;
			((odp_packet_hdr_t *)bh)->l4_offset =
				ODP_PACKET_OFFSET_INVALID;
			((odp_packet_hdr_t *)bh)->payload_offset =
				ODP_PACKET_OFFSET_INVALID;
			((odp_packet_hdr_t *)bh)->headroom  = headroom;
			pool->s.cache_pkt_hdr = pkt_ctx;
			pool->s.buf_num -= 1;
		}

		/* Every kind of pool has max_size unit, as alloc, just need to
		  * compare with max_size here to check
		  */
		if (!unseg)
			pool->s.max_size = pool->s.seg_size *
				ODP_BUFFER_MAX_SEG -
				headroom - tailroom;
		else
			pool->s.max_size = pool->s.seg_size;

		/* Initialize pool statistics counters */
		odp_atomic_store_u64(&pool->s.poolstats.bufallocs, 0);
		odp_atomic_store_u64(&pool->s.poolstats.buffrees, 0);
		odp_atomic_store_u64(&pool->s.poolstats.blkallocs, 0);
		odp_atomic_store_u64(&pool->s.poolstats.blkfrees, 0);
		odp_atomic_store_u64(&pool->s.poolstats.bufempty, 0);
		odp_atomic_store_u64(&pool->s.poolstats.blkempty, 0);
		odp_atomic_store_u64(&pool->s.poolstats.high_wm_count, 0);
		odp_atomic_store_u64(&pool->s.poolstats.low_wm_count, 0);

		/* Reset other pool globals to initial state */
		pool->s.low_wm_assert = 0;
		pool->s.quiesced = 0;
		pool->s.headroom = headroom;
		pool->s.tailroom = tailroom;

		pool->s.room_size = headroom + tailroom;


		/* Watermarks are hard-coded for now to control caching */
		pool->s.high_wm = pool->s.buf_num / 2;
		pool->s.low_wm  = pool->s.buf_num / 4;
		pool_hdl = pool->s.pool_hdl;
		break;
	}

	return pool_hdl;
}
Ejemplo n.º 3
0
void test_atomic_store(void)
{
	odp_atomic_store_u32(&a32u, U32_INIT_VAL);
	odp_atomic_store_u64(&a64u, U64_INIT_VAL);
}