Exemple #1
0
int
rte_ring_init(struct rte_ring *r, const char *name, unsigned count,
	unsigned flags)
{
	/* compilation-time checks */
	RTE_BUILD_BUG_ON((sizeof(struct rte_ring) &
			  RTE_CACHE_LINE_MASK) != 0);
#ifdef RTE_RING_SPLIT_PROD_CONS
	RTE_BUILD_BUG_ON((offsetof(struct rte_ring, cons) &
			  RTE_CACHE_LINE_MASK) != 0);
#endif
	RTE_BUILD_BUG_ON((offsetof(struct rte_ring, prod) &
			  RTE_CACHE_LINE_MASK) != 0);
#ifdef RTE_LIBRTE_RING_DEBUG
	RTE_BUILD_BUG_ON((sizeof(struct rte_ring_debug_stats) &
			  RTE_CACHE_LINE_MASK) != 0);
	RTE_BUILD_BUG_ON((offsetof(struct rte_ring, stats) &
			  RTE_CACHE_LINE_MASK) != 0);
#endif

	/* init the ring structure */
	memset(r, 0, sizeof(*r));
	snprintf(r->name, sizeof(r->name), "%s", name);
	r->flags = flags;
	r->prod.watermark = count;
	r->prod.sp_enqueue = !!(flags & RING_F_SP_ENQ);
	r->cons.sc_dequeue = !!(flags & RING_F_SC_DEQ);
	r->prod.size = r->cons.size = count;
	r->prod.mask = r->cons.mask = count-1;
	r->prod.head = r->cons.head = 0;
	r->prod.tail = r->cons.tail = 0;

	return 0;
}
Exemple #2
0
/*
 * Allocates memory for LPM object
 */
struct rte_lpm *
rte_lpm_create(const char *name, int socket_id, int max_rules,
		__rte_unused int flags)
{
	char mem_name[RTE_LPM_NAMESIZE];
	struct rte_lpm *lpm = NULL;
	uint32_t mem_size;
	struct rte_lpm_list *lpm_list;

	/* check that we have an initialised tail queue */
	if ((lpm_list = 
	     RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_LPM, rte_lpm_list)) == NULL) {
		rte_errno = E_RTE_NO_TAILQ;
		return NULL;	
	}

	RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl24_entry) != 2);
	RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl8_entry) != 2);

	/* Check user arguments. */
	if ((name == NULL) || (socket_id < -1) || (max_rules == 0)){
		rte_errno = EINVAL;
		return NULL;
	}

	rte_snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);

	/* Determine the amount of memory to allocate. */
	mem_size = sizeof(*lpm) + (sizeof(lpm->rules_tbl[0]) * max_rules);

	rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);

	/* guarantee there's no existing */
	TAILQ_FOREACH(lpm, lpm_list, next) {
		if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0)
			break;
	}
	if (lpm != NULL)
		goto exit;

	/* Allocate memory to store the LPM data structures. */
	lpm = (struct rte_lpm *)rte_zmalloc_socket(mem_name, mem_size,
			CACHE_LINE_SIZE, socket_id);
	if (lpm == NULL) {
		RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
		goto exit;
	}

	/* Save user arguments. */
	lpm->max_rules = max_rules;
	rte_snprintf(lpm->name, sizeof(lpm->name), "%s", name);

	TAILQ_INSERT_TAIL(lpm_list, lpm, next);

exit:	
	rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);

	return lpm;
}
Exemple #3
0
/* create the ring */
struct rte_ring *
rte_ring_create(unsigned count, unsigned flags)
{
    struct rte_ring *r;
    size_t ring_size;

#if 0
    /* compilation-time checks */
    RTE_BUILD_BUG_ON((sizeof(struct rte_ring) &
              CACHE_LINE_MASK) != 0);
    RTE_BUILD_BUG_ON((offsetof(struct rte_ring, cons) &
              CACHE_LINE_MASK) != 0);
    RTE_BUILD_BUG_ON((offsetof(struct rte_ring, prod) &
              CACHE_LINE_MASK) != 0);
#ifdef RTE_LIBRTE_RING_DEBUG
    RTE_BUILD_BUG_ON((sizeof(struct rte_ring_debug_stats) &
              CACHE_LINE_MASK) != 0);
    RTE_BUILD_BUG_ON((offsetof(struct rte_ring, stats) &
              CACHE_LINE_MASK) != 0);
#endif
#endif

    /* count must be a power of 2 */
    if ((!POWEROF2(count)) || (count > RTE_RING_SZ_MASK )) {
        rte_errno = EINVAL;
        fprintf(stderr, "Requested size is invalid, must be power of 2, and "
                "do not exceed the size limit %u\n", RTE_RING_SZ_MASK);
        return NULL;
    }

    ring_size = count * sizeof(void *) + sizeof(struct rte_ring);

    r = (struct rte_ring*)malloc(ring_size);
    if (r == NULL)
        exit(1);

    /* init the ring structure */
    memset(r, 0, sizeof(*r));

    r->flags = flags;
    r->prod.watermark = count;
    r->prod.sp_enqueue = !!(flags & RING_F_SP_ENQ);
    r->cons.sc_dequeue = !!(flags & RING_F_SC_DEQ);
    r->prod.size = r->cons.size = count;
    r->prod.mask = r->cons.mask = count-1;
    r->prod.head = r->cons.head = 0;
    r->prod.tail = r->cons.tail = 0;

    return r;
}
static void
sfc_mcdi_poll(struct sfc_adapter *sa)
{
	efx_nic_t *enp;
	unsigned int delay_total;
	unsigned int delay_us;
	boolean_t aborted __rte_unused;

	delay_total = 0;
	delay_us = SFC_MCDI_POLL_INTERVAL_MIN_US;
	enp = sa->nic;

	do {
		if (efx_mcdi_request_poll(enp))
			return;

		if (delay_total > SFC_MCDI_WATCHDOG_INTERVAL_US) {
			aborted = efx_mcdi_request_abort(enp);
			SFC_ASSERT(aborted);
			sfc_mcdi_timeout(sa);
			return;
		}

		rte_delay_us(delay_us);

		delay_total += delay_us;

		/* Exponentially back off the poll frequency */
		RTE_BUILD_BUG_ON(SFC_MCDI_POLL_INTERVAL_MAX_US > UINT_MAX / 2);
		delay_us *= 2;
		if (delay_us > SFC_MCDI_POLL_INTERVAL_MAX_US)
			delay_us = SFC_MCDI_POLL_INTERVAL_MAX_US;

	} while (1);
}
Exemple #5
0
static void
vmxnet3_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
	unsigned int i;
	struct vmxnet3_hw *hw = dev->data->dev_private;

	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);

	RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_TX_QUEUES);
	for (i = 0; i < hw->num_tx_queues; i++) {
		struct UPT1_TxStats *txStats = &hw->tqd_start[i].stats;

		stats->q_opackets[i] = txStats->ucastPktsTxOK +
			txStats->mcastPktsTxOK +
			txStats->bcastPktsTxOK;
		stats->q_obytes[i] = txStats->ucastBytesTxOK +
			txStats->mcastBytesTxOK +
			txStats->bcastBytesTxOK;

		stats->opackets += stats->q_opackets[i];
		stats->obytes += stats->q_obytes[i];
		stats->oerrors += txStats->pktsTxError +
			txStats->pktsTxDiscard;
	}

	RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_RX_QUEUES);
	for (i = 0; i < hw->num_rx_queues; i++) {
		struct UPT1_RxStats *rxStats = &hw->rqd_start[i].stats;

		stats->q_ipackets[i] = rxStats->ucastPktsRxOK +
			rxStats->mcastPktsRxOK +
			rxStats->bcastPktsRxOK;

		stats->q_ibytes[i] = rxStats->ucastBytesRxOK +
			rxStats->mcastBytesRxOK +
			rxStats->bcastBytesRxOK;

		stats->ipackets += stats->q_ipackets[i];
		stats->ibytes += stats->q_ibytes[i];

		stats->q_errors[i] = rxStats->pktsRxError;
		stats->ierrors += rxStats->pktsRxError;
		stats->rx_nombuf += rxStats->pktsRxOutOfBuf;
	}
}
Exemple #6
0
int
octeontx_mbox_send(struct octeontx_mbox_hdr *hdr, void *txdata,
				 uint16_t txlen, void *rxdata, uint16_t rxlen)
{
	struct mbox *m = &octeontx_mbox;

	RTE_BUILD_BUG_ON(sizeof(struct mbox_ram_hdr) != 8);
	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
		return -EINVAL;

	return mbox_send(m, hdr, txdata, txlen, rxdata, rxlen);
}
/*
 * vPMD raw receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP)
 *
 * Notice:
 * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
 * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST
 *   numbers of DD bit
 * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
 * - don't support ol_flags for rss and csum err
 */
static inline uint16_t
_recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
		uint16_t nb_pkts, uint8_t *split_packet)
{
	volatile union ixgbe_adv_rx_desc *rxdp;
	struct ixgbe_rx_entry *sw_ring;
	uint16_t nb_pkts_recd;
	int pos;
	uint64_t var;
	__m128i shuf_msk;
	__m128i crc_adjust = _mm_set_epi16(
				0, 0, 0,    /* ignore non-length fields */
				-rxq->crc_len, /* sub crc on data_len */
				0,          /* ignore high-16bits of pkt_len */
				-rxq->crc_len, /* sub crc on pkt_len */
				0, 0            /* ignore pkt_type field */
			);
	__m128i dd_check, eop_check;
	uint8_t vlan_flags;

	/* nb_pkts shall be less equal than RTE_IXGBE_MAX_RX_BURST */
	nb_pkts = RTE_MIN(nb_pkts, RTE_IXGBE_MAX_RX_BURST);

	/* nb_pkts has to be floor-aligned to RTE_IXGBE_DESCS_PER_LOOP */
	nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_IXGBE_DESCS_PER_LOOP);

	/* Just the act of getting into the function from the application is
	 * going to cost about 7 cycles
	 */
	rxdp = rxq->rx_ring + rxq->rx_tail;

	_mm_prefetch((const void *)rxdp, _MM_HINT_T0);

	/* See if we need to rearm the RX queue - gives the prefetch a bit
	 * of time to act
	 */
	if (rxq->rxrearm_nb > RTE_IXGBE_RXQ_REARM_THRESH)
		ixgbe_rxq_rearm(rxq);

	/* Before we start moving massive data around, check to see if
	 * there is actually a packet available
	 */
	if (!(rxdp->wb.upper.status_error &
				rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
		return 0;

	/* 4 packets DD mask */
	dd_check = _mm_set_epi64x(0x0000000100000001LL, 0x0000000100000001LL);

	/* 4 packets EOP mask */
	eop_check = _mm_set_epi64x(0x0000000200000002LL, 0x0000000200000002LL);

	/* mask to shuffle from desc. to mbuf */
	shuf_msk = _mm_set_epi8(
		7, 6, 5, 4,  /* octet 4~7, 32bits rss */
		15, 14,      /* octet 14~15, low 16 bits vlan_macip */
		13, 12,      /* octet 12~13, 16 bits data_len */
		0xFF, 0xFF,  /* skip high 16 bits pkt_len, zero out */
		13, 12,      /* octet 12~13, low 16 bits pkt_len */
		0xFF, 0xFF,  /* skip 32 bit pkt_type */
		0xFF, 0xFF
		);

	/* Cache is empty -> need to scan the buffer rings, but first move
	 * the next 'n' mbufs into the cache
	 */
	sw_ring = &rxq->sw_ring[rxq->rx_tail];

	/* ensure these 2 flags are in the lower 8 bits */
	RTE_BUILD_BUG_ON((PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED) > UINT8_MAX);
	vlan_flags = rxq->vlan_flags & UINT8_MAX;

	/* A. load 4 packet in one loop
	 * [A*. mask out 4 unused dirty field in desc]
	 * B. copy 4 mbuf point from swring to rx_pkts
	 * C. calc the number of DD bits among the 4 packets
	 * [C*. extract the end-of-packet bit, if requested]
	 * D. fill info. from desc to mbuf
	 */
	for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts;
			pos += RTE_IXGBE_DESCS_PER_LOOP,
			rxdp += RTE_IXGBE_DESCS_PER_LOOP) {
		__m128i descs[RTE_IXGBE_DESCS_PER_LOOP];
		__m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
		__m128i zero, staterr, sterr_tmp1, sterr_tmp2;
		__m128i mbp1, mbp2; /* two mbuf pointer in one XMM reg. */

		/* B.1 load 1 mbuf point */
		mbp1 = _mm_loadu_si128((__m128i *)&sw_ring[pos]);

		/* Read desc statuses backwards to avoid race condition */
		/* A.1 load 4 pkts desc */
		descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));

		/* B.2 copy 2 mbuf point into rx_pkts  */
		_mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1);

		/* B.1 load 1 mbuf point */
		mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos+2]);

		descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
		/* B.1 load 2 mbuf point */
		descs[1] = _mm_loadu_si128((__m128i *)(rxdp + 1));
		descs[0] = _mm_loadu_si128((__m128i *)(rxdp));

		/* B.2 copy 2 mbuf point into rx_pkts  */
		_mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2);

		if (split_packet) {
			rte_mbuf_prefetch_part2(rx_pkts[pos]);
			rte_mbuf_prefetch_part2(rx_pkts[pos + 1]);
			rte_mbuf_prefetch_part2(rx_pkts[pos + 2]);
			rte_mbuf_prefetch_part2(rx_pkts[pos + 3]);
		}

		/* avoid compiler reorder optimization */
		rte_compiler_barrier();

		/* D.1 pkt 3,4 convert format from desc to pktmbuf */
		pkt_mb4 = _mm_shuffle_epi8(descs[3], shuf_msk);
		pkt_mb3 = _mm_shuffle_epi8(descs[2], shuf_msk);

		/* D.1 pkt 1,2 convert format from desc to pktmbuf */
		pkt_mb2 = _mm_shuffle_epi8(descs[1], shuf_msk);
		pkt_mb1 = _mm_shuffle_epi8(descs[0], shuf_msk);

		/* C.1 4=>2 filter staterr info only */
		sterr_tmp2 = _mm_unpackhi_epi32(descs[3], descs[2]);
		/* C.1 4=>2 filter staterr info only */
		sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]);

		/* set ol_flags with vlan packet type */
		desc_to_olflags_v(descs, vlan_flags, &rx_pkts[pos]);

		/* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
		pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust);
		pkt_mb3 = _mm_add_epi16(pkt_mb3, crc_adjust);

		/* C.2 get 4 pkts staterr value  */
		zero = _mm_xor_si128(dd_check, dd_check);
		staterr = _mm_unpacklo_epi32(sterr_tmp1, sterr_tmp2);

		/* D.3 copy final 3,4 data to rx_pkts */
		_mm_storeu_si128((void *)&rx_pkts[pos+3]->rx_descriptor_fields1,
				pkt_mb4);
		_mm_storeu_si128((void *)&rx_pkts[pos+2]->rx_descriptor_fields1,
				pkt_mb3);

		/* D.2 pkt 1,2 set in_port/nb_seg and remove crc */
		pkt_mb2 = _mm_add_epi16(pkt_mb2, crc_adjust);
		pkt_mb1 = _mm_add_epi16(pkt_mb1, crc_adjust);

		/* C* extract and record EOP bit */
		if (split_packet) {
			__m128i eop_shuf_mask = _mm_set_epi8(
					0xFF, 0xFF, 0xFF, 0xFF,
					0xFF, 0xFF, 0xFF, 0xFF,
					0xFF, 0xFF, 0xFF, 0xFF,
					0x04, 0x0C, 0x00, 0x08
					);

			/* and with mask to extract bits, flipping 1-0 */
			__m128i eop_bits = _mm_andnot_si128(staterr, eop_check);
			/* the staterr values are not in order, as the count
			 * count of dd bits doesn't care. However, for end of
			 * packet tracking, we do care, so shuffle. This also
			 * compresses the 32-bit values to 8-bit
			 */
			eop_bits = _mm_shuffle_epi8(eop_bits, eop_shuf_mask);
			/* store the resulting 32-bit value */
			*(int *)split_packet = _mm_cvtsi128_si32(eop_bits);
			split_packet += RTE_IXGBE_DESCS_PER_LOOP;

			/* zero-out next pointers */
			rx_pkts[pos]->next = NULL;
			rx_pkts[pos + 1]->next = NULL;
			rx_pkts[pos + 2]->next = NULL;
			rx_pkts[pos + 3]->next = NULL;
		}

		/* C.3 calc available number of desc */
		staterr = _mm_and_si128(staterr, dd_check);
		staterr = _mm_packs_epi32(staterr, zero);

		/* D.3 copy final 1,2 data to rx_pkts */
		_mm_storeu_si128((void *)&rx_pkts[pos+1]->rx_descriptor_fields1,
				pkt_mb2);
		_mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,
				pkt_mb1);

		/* C.4 calc avaialbe number of desc */
		var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
		nb_pkts_recd += var;
		if (likely(var != RTE_IXGBE_DESCS_PER_LOOP))
			break;
	}

	/* Update our internal tail pointer */
	rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd);
	rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1));
	rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd);

	return nb_pkts_recd;
}
Exemple #8
0
/* create the ring */
struct rte_ring *
rte_ring_create(const char *name, unsigned count, int socket_id,
                unsigned flags)
{
    char mz_name[RTE_MEMZONE_NAMESIZE];
    struct rte_ring *r;
    const struct rte_memzone *mz;
    size_t ring_size;
    int mz_flags = 0;
    struct rte_ring_list* ring_list = NULL;

    /* compilation-time checks */
    RTE_BUILD_BUG_ON((sizeof(struct rte_ring) &
                      CACHE_LINE_MASK) != 0);
    RTE_BUILD_BUG_ON((offsetof(struct rte_ring, cons) &
                      CACHE_LINE_MASK) != 0);
    RTE_BUILD_BUG_ON((offsetof(struct rte_ring, prod) &
                      CACHE_LINE_MASK) != 0);
#ifdef RTE_LIBRTE_RING_DEBUG
    RTE_BUILD_BUG_ON((sizeof(struct rte_ring_debug_stats) &
                      CACHE_LINE_MASK) != 0);
    RTE_BUILD_BUG_ON((offsetof(struct rte_ring, stats) &
                      CACHE_LINE_MASK) != 0);
#endif

    /* check that we have an initialised tail queue */
    if ((ring_list =
                RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_RING, rte_ring_list)) == NULL) {
        rte_errno = E_RTE_NO_TAILQ;
        return NULL;
    }

    /* count must be a power of 2 */
    if ((!POWEROF2(count)) || (count > RTE_RING_SZ_MASK )) {
        rte_errno = EINVAL;
        RTE_LOG(ERR, RING, "Requested size is invalid, must be power of 2, and "
                "do not exceed the size limit %u\n", RTE_RING_SZ_MASK);
        return NULL;
    }

    rte_snprintf(mz_name, sizeof(mz_name), "RG_%s", name);
    ring_size = count * sizeof(void *) + sizeof(struct rte_ring);

    rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);

    /* reserve a memory zone for this ring. If we can't get rte_config or
     * we are secondary process, the memzone_reserve function will set
     * rte_errno for us appropriately - hence no check in this this function */
    mz = rte_memzone_reserve(mz_name, ring_size, socket_id, mz_flags);
    if (mz != NULL) {
        r = mz->addr;

        /* init the ring structure */
        memset(r, 0, sizeof(*r));
        rte_snprintf(r->name, sizeof(r->name), "%s", name);
        r->flags = flags;
        r->prod.watermark = count;
        r->prod.sp_enqueue = !!(flags & RING_F_SP_ENQ);
        r->cons.sc_dequeue = !!(flags & RING_F_SC_DEQ);
        r->prod.size = r->cons.size = count;
        r->prod.mask = r->cons.mask = count-1;
        r->prod.head = r->cons.head = 0;
        r->prod.tail = r->cons.tail = 0;

        TAILQ_INSERT_TAIL(ring_list, r, next);
    } else {
        r = NULL;
        RTE_LOG(ERR, RING, "Cannot reserve memory\n");
    }
    rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);

    return r;
}
Exemple #9
0
/* create the mempool */
struct rte_mempool *
rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
		   unsigned cache_size, unsigned private_data_size,
		   rte_mempool_ctor_t *mp_init, void *mp_init_arg,
		   rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
		   int socket_id, unsigned flags)
{
	char mz_name[RTE_MEMZONE_NAMESIZE];
	char rg_name[RTE_RING_NAMESIZE];
	struct rte_mempool *mp = NULL;
	struct rte_ring *r;
	const struct rte_memzone *mz;
	size_t mempool_size, total_elt_size;
	int mz_flags = RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY;
	int rg_flags = 0;
	uint32_t header_size, trailer_size;
	unsigned i;
	void *obj;

	/* compilation-time checks */
	RTE_BUILD_BUG_ON((sizeof(struct rte_mempool) &
			  CACHE_LINE_MASK) != 0);
#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
	RTE_BUILD_BUG_ON((sizeof(struct rte_mempool_cache) &
			  CACHE_LINE_MASK) != 0);
	RTE_BUILD_BUG_ON((offsetof(struct rte_mempool, local_cache) &
			  CACHE_LINE_MASK) != 0);
#endif
#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
	RTE_BUILD_BUG_ON((sizeof(struct rte_mempool_debug_stats) &
			  CACHE_LINE_MASK) != 0);
	RTE_BUILD_BUG_ON((offsetof(struct rte_mempool, stats) &
			  CACHE_LINE_MASK) != 0);
#endif

	/* check that we have an initialised tail queue */
	if (RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_MEMPOOL, rte_mempool_list) == NULL) {
		rte_errno = E_RTE_NO_TAILQ;
		return NULL;	
	}
	
	/* asked cache too big */
	if (cache_size > RTE_MEMPOOL_CACHE_MAX_SIZE){
		rte_errno = EINVAL;
		return NULL;
	}

	/* "no cache align" imply "no spread" */
	if (flags & MEMPOOL_F_NO_CACHE_ALIGN)
		flags |= MEMPOOL_F_NO_SPREAD;

	/* ring flags */
	if (flags & MEMPOOL_F_SP_PUT)
		rg_flags |= RING_F_SP_ENQ;
	if (flags & MEMPOOL_F_SC_GET)
		rg_flags |= RING_F_SC_DEQ;

	rte_rwlock_write_lock(RTE_EAL_MEMPOOL_RWLOCK);

	/* allocate the ring that will be used to store objects */
	/* Ring functions will return appropriate errors if we are
	 * running as a secondary process etc., so no checks made
	 * in this function for that condition */
	rte_snprintf(rg_name, sizeof(rg_name), "MP_%s", name);
	r = rte_ring_create(rg_name, rte_align32pow2(n+1), socket_id, rg_flags);
	if (r == NULL)
		goto exit;

	/*
	 * In header, we have at least the pointer to the pool, and
	 * optionaly a 64 bits cookie.
	 */
	header_size = 0;
	header_size += sizeof(struct rte_mempool *); /* ptr to pool */
#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
	header_size += sizeof(uint64_t); /* cookie */
#endif
	if ((flags & MEMPOOL_F_NO_CACHE_ALIGN) == 0)
		header_size = (header_size + CACHE_LINE_MASK) & (~CACHE_LINE_MASK);

	/* trailer contains the cookie in debug mode */
	trailer_size = 0;
#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
	trailer_size += sizeof(uint64_t); /* cookie */
#endif
	/* element size is 8 bytes-aligned at least */
	elt_size = (elt_size + 7) & (~7);

	/* expand trailer to next cache line */
	if ((flags & MEMPOOL_F_NO_CACHE_ALIGN) == 0) {
		total_elt_size = header_size + elt_size + trailer_size;
		trailer_size += ((CACHE_LINE_SIZE -
				  (total_elt_size & CACHE_LINE_MASK)) &
				 CACHE_LINE_MASK);
	}

	/*
	 * increase trailer to add padding between objects in order to
	 * spread them accross memory channels/ranks
	 */
	if ((flags & MEMPOOL_F_NO_SPREAD) == 0) {
		unsigned new_size;
		new_size = optimize_object_size(header_size + elt_size +
						trailer_size);
		trailer_size = new_size - header_size - elt_size;
	}

	/* this is the size of an object, including header and trailer */
	total_elt_size = header_size + elt_size + trailer_size;

	/* reserve a memory zone for this mempool: private data is
	 * cache-aligned */
	private_data_size = (private_data_size +
			     CACHE_LINE_MASK) & (~CACHE_LINE_MASK);
	mempool_size = total_elt_size * n +
		sizeof(struct rte_mempool) + private_data_size;
	rte_snprintf(mz_name, sizeof(mz_name), "MP_%s", name);

	mz = rte_memzone_reserve(mz_name, mempool_size, socket_id, mz_flags);

	/*
	 * no more memory: in this case we loose previously reserved
	 * space for the as we cannot free it
	 */
	if (mz == NULL)
		goto exit;

	/* init the mempool structure */
	mp = mz->addr;
	memset(mp, 0, sizeof(*mp));
	rte_snprintf(mp->name, sizeof(mp->name), "%s", name);
	mp->phys_addr = mz->phys_addr;
	mp->ring = r;
	mp->size = n;
	mp->flags = flags;
	mp->elt_size = elt_size;
	mp->header_size = header_size;
	mp->trailer_size = trailer_size;
	mp->cache_size = cache_size;
	mp->cache_flushthresh = (uint32_t)(cache_size * CACHE_FLUSHTHRESH_MULTIPLIER);
	mp->private_data_size = private_data_size;

	/* call the initializer */
	if (mp_init)
		mp_init(mp, mp_init_arg);

	/* fill the headers and trailers, and add objects in ring */
	obj = (char *)mp + sizeof(struct rte_mempool) + private_data_size;
	for (i = 0; i < n; i++) {
		struct rte_mempool **mpp;
		obj = (char *)obj + header_size;

		/* set mempool ptr in header */
		mpp = __mempool_from_obj(obj);
		*mpp = mp;

#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
		__mempool_write_header_cookie(obj, 1);
		__mempool_write_trailer_cookie(obj);
#endif
		/* call the initializer */
		if (obj_init)
			obj_init(mp, obj_init_arg, obj, i);

		/* enqueue in ring */
		rte_ring_sp_enqueue(mp->ring, obj);
		obj = (char *)obj + elt_size + trailer_size;
	}

	RTE_EAL_TAILQ_INSERT_TAIL(RTE_TAILQ_MEMPOOL, rte_mempool_list, mp);

exit:
	rte_rwlock_write_unlock(RTE_EAL_MEMPOOL_RWLOCK);

	return mp;
}
Exemple #10
0
static void *
rte_table_acl_create(
	void *params,
	int socket_id,
	uint32_t entry_size)
{
	struct rte_table_acl_params *p = (struct rte_table_acl_params *) params;
	struct rte_table_acl *acl;
	uint32_t action_table_size, acl_rule_list_size, acl_rule_memory_size;
	uint32_t total_size;

	RTE_BUILD_BUG_ON(((sizeof(struct rte_table_acl) % RTE_CACHE_LINE_SIZE)
		!= 0));

	/* Check input parameters */
	if (p == NULL) {
		RTE_LOG(ERR, TABLE, "%s: Invalid value for params\n", __func__);
		return NULL;
	}
	if (p->name == NULL) {
		RTE_LOG(ERR, TABLE, "%s: Invalid value for name\n", __func__);
		return NULL;
	}
	if (p->n_rules == 0) {
		RTE_LOG(ERR, TABLE, "%s: Invalid value for n_rules\n",
			__func__);
		return NULL;
	}
	if ((p->n_rule_fields == 0) ||
	    (p->n_rule_fields > RTE_ACL_MAX_FIELDS)) {
		RTE_LOG(ERR, TABLE, "%s: Invalid value for n_rule_fields\n",
			__func__);
		return NULL;
	}

	entry_size = RTE_ALIGN(entry_size, sizeof(uint64_t));

	/* Memory allocation */
	action_table_size = RTE_CACHE_LINE_ROUNDUP(p->n_rules * entry_size);
	acl_rule_list_size =
		RTE_CACHE_LINE_ROUNDUP(p->n_rules * sizeof(struct rte_acl_rule *));
	acl_rule_memory_size = RTE_CACHE_LINE_ROUNDUP(p->n_rules *
		RTE_ACL_RULE_SZ(p->n_rule_fields));
	total_size = sizeof(struct rte_table_acl) + action_table_size +
		acl_rule_list_size + acl_rule_memory_size;

	acl = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE,
		socket_id);
	if (acl == NULL) {
		RTE_LOG(ERR, TABLE,
			"%s: Cannot allocate %u bytes for ACL table\n",
			__func__, total_size);
		return NULL;
	}

	acl->action_table = &acl->memory[0];
	acl->acl_rule_list =
		(struct rte_acl_rule **) &acl->memory[action_table_size];
	acl->acl_rule_memory = (uint8_t *)
		&acl->memory[action_table_size + acl_rule_list_size];

	/* Initialization of internal fields */
	snprintf(acl->name[0], RTE_ACL_NAMESIZE, "%s_a", p->name);
	snprintf(acl->name[1], RTE_ACL_NAMESIZE, "%s_b", p->name);
	acl->name_id = 1;

	acl->acl_params.name = acl->name[acl->name_id];
	acl->acl_params.socket_id = socket_id;
	acl->acl_params.rule_size = RTE_ACL_RULE_SZ(p->n_rule_fields);
	acl->acl_params.max_rule_num = p->n_rules;

	acl->cfg.num_categories = 1;
	acl->cfg.num_fields = p->n_rule_fields;
	memcpy(&acl->cfg.defs[0], &p->field_format[0],
		p->n_rule_fields * sizeof(struct rte_acl_field_def));

	acl->ctx = NULL;

	acl->n_rules = p->n_rules;
	acl->entry_size = entry_size;

	return acl;
}