Esempio n. 1
0
void
test_dpif_dpdk_flow_flush(struct dpif *dpif_p)
{
	struct dpif_dpdk_message reply;
	struct dpif_dpdk_message *request;
	struct rte_mbuf *mbuf = NULL;
	void *pktmbuf_data = NULL;
	int result = -1;

	/* It doesn't matter what kind of reply we enqueue here*/
	create_dpdk_flow_del_reply(&reply, NO_FLOW);
	result = enqueue_reply_on_reply_ring(reply);
	assert(result == 0);
	assert(rte_ring_count(vswitchd_reply_ring) == 1);

	dpif_p->dpif_class->flow_flush(dpif_p);
	assert(rte_ring_count(vswitchd_message_ring) == 1);
	result = rte_ring_sc_dequeue(vswitchd_message_ring, (void **)&mbuf);
	assert(result == 0);

	/* Just test that the message created and enqueued on the request ring
	 * was correct
	 */
    	pktmbuf_data = rte_pktmbuf_mtod(mbuf, void *);
    	request = (struct dpif_dpdk_message *)pktmbuf_data;
	assert(request->flow_msg.cmd == OVS_FLOW_CMD_DEL);
	printf(" %s\n", __FUNCTION__);
}
Esempio n. 2
0
void
test_dpif_dpdk_flow_del(struct dpif *dpif_p)
{
	struct dpif_dpdk_message reply;
	struct dpif_dpdk_message *request;
	struct dpif_flow_del del;
	struct rte_mbuf *mbuf = NULL;
	void *pktmbuf_data = NULL;
	int result = -1;

	create_dpdk_flow_del_reply(&reply, NO_FLOW);
	result = enqueue_reply_on_reply_ring(reply);
	assert(result == 0);
	assert(rte_ring_count(vswitchd_reply_ring) == 1);

	create_dpif_flow_del_message(&del);
	dpif_p->dpif_class->flow_del(dpif_p, &del);
	assert(rte_ring_count(vswitchd_message_ring) == 1);
	result = rte_ring_sc_dequeue(vswitchd_message_ring, (void **)&mbuf);
	assert(result == 0);

	/* Just test that the message created and enqueued on the request ring
	 * was correct
	 */
    	pktmbuf_data = rte_pktmbuf_mtod(mbuf, void *);
    	request = (struct dpif_dpdk_message *)pktmbuf_data;
	assert(request->flow_msg.actions[0].type == ACTION_NULL);
	assert(request->flow_msg.key.in_port == 5);
	printf(" %s\n", __FUNCTION__);
}
Esempio n. 3
0
/*
 * Flush packets scheduled for transmit on ports
 */
static void
flush_pkts(unsigned action)
{
	unsigned i = 0;
	uint16_t deq_count = PKT_BURST_SIZE;
	struct rte_mbuf *pkts[PKT_BURST_SIZE] =  {0};
	struct port_queue *pq =  &port_queues[action & PORT_MASK];
	struct statistics *s = &vport_stats[action];
	const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
	uint64_t diff_tsc = 0;
	static uint64_t prev_tsc[MAX_PHYPORTS] = {0};
	uint64_t cur_tsc = rte_rdtsc();
	unsigned num_pkts;

	diff_tsc = cur_tsc - prev_tsc[action & PORT_MASK];

	if (unlikely(rte_ring_count(pq->tx_q) >= PKT_BURST_SIZE))
	{
		num_pkts = PKT_BURST_SIZE;
	}
	else
	{
		/* If queue idles with less than PKT_BURST packets, drain it*/
		if(unlikely(diff_tsc > drain_tsc)) {
			num_pkts = rte_ring_count(pq->tx_q);
		}
		else {
			return;
		}
	}

	if (unlikely(rte_ring_dequeue_bulk(
			      pq->tx_q, (void **)pkts, num_pkts) != 0))
		return;

	const uint16_t sent = rte_eth_tx_burst(
				 ports->id[action & PORT_MASK], 0, pkts, num_pkts);

	prev_tsc[action & PORT_MASK] = cur_tsc;

	if (unlikely(sent < num_pkts))
	{
		for (i = sent; i < num_pkts; i++)
			rte_pktmbuf_free(pkts[i]);
		s->tx_drop += (num_pkts - sent);
	}
	else
	{
		s->tx += sent;
	}
}
Esempio n. 4
0
/*
 * Function receives messages from the daemon.
 */
static void
receive_request_from_vswitchd(void)
{
	int j = 0;
	uint16_t dq_pkt = PKT_BURST_SIZE;
	struct client *vswd = NULL;
	struct statistics *vswd_stat = NULL;
	struct rte_mbuf *buf[PKT_BURST_SIZE] = {0};

	vswd = &clients[VSWITCHD];
	vswd_stat = &vport_stats[VSWITCHD];

	/* Attempt to dequeue maximum available number of mbufs from ring */
	while (dq_pkt > 0 &&
			unlikely(rte_ring_sc_dequeue_bulk(
					vswd->tx_q, (void **)buf, dq_pkt) != 0))
		dq_pkt = (uint16_t)RTE_MIN(
				rte_ring_count(vswd->tx_q), PKT_BURST_SIZE);

	/* Update number of packets transmitted by daemon */
	vswd_stat->rx += dq_pkt;

	for (j = 0; j < dq_pkt; j++) {
		handle_vswitchd_cmd(buf[j]);
	}
}
Esempio n. 5
0
/*
 * Receive burst of packets from client
 */
static void
receive_from_client(uint16_t client)
{
	int j = 0;
	uint16_t dq_pkt = PKT_BURST_SIZE;
	struct rte_mbuf *buf[PKT_BURST_SIZE] = {0};
	struct client *cl = NULL;
	struct statistics *s = NULL;

	cl = &clients[client];
	s = &vport_stats[client];

	/* Attempt to dequeue maximum available number of mbufs from ring */
	while (dq_pkt > 0 &&
			unlikely(rte_ring_sc_dequeue_bulk(
					cl->tx_q, (void **)buf, dq_pkt) != 0))
		dq_pkt = (uint16_t)RTE_MIN(
				rte_ring_count(cl->tx_q), PKT_BURST_SIZE);

	/* Update number of packets transmitted by client */
	s->tx += dq_pkt;

	for (j = 0; j < dq_pkt; j++) {
		switch_packet(buf[j], client);
	}
}
Esempio n. 6
0
/* Return the number of entries in the mempool */
unsigned
rte_mempool_count(const struct rte_mempool *mp)
{
	unsigned count;

	count = rte_ring_count(mp->ring);

#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
	{
		unsigned lcore_id;
		if (mp->cache_size == 0)
			return count;

		for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++)
			count += mp->local_cache[lcore_id].len;
	}
#endif

	/*
	 * due to race condition (access to len is not locked), the
	 * total can be greater than size... so fix the result
	 */
	if (count > mp->size)
		return mp->size;
	return count;
}
Esempio n. 7
0
void cmd_ringinfo(uint8_t lcore_id, uint8_t task_id)
{
	struct lcore_cfg *lconf;
	struct rte_ring *ring;
	struct task_args* targ;
	uint32_t count;

	if (!dppd_core_active(lcore_id, 0)) {
		plog_info("lcore %u is not active\n", lcore_id);
		return;
	}
	lconf = &lcore_cfg[lcore_id];
	if (task_id >= lconf->nb_tasks) {
		plog_warn("Invalid task index %u: lcore %u has %u tasks\n", task_id, lcore_id, lconf->nb_tasks);
		return;
	}

	targ = &lconf->targs[task_id];
	plog_info("Core %u task %u: %u rings\n", lcore_id, task_id, targ->nb_rxrings);
	for (uint8_t i = 0; i < targ->nb_rxrings; ++i) {
		ring = targ->rx_rings[i];
		count = ring->prod.mask + 1;
		plog_info("\tRing %u:\n", i);
		plog_info("\t\tFlags: %s,%s\n", ring->flags & RING_F_SP_ENQ? "sp":"mp", ring->flags & RING_F_SC_DEQ? "sc":"mc");
		plog_info("\t\tMemory size: %zu bytes\n", rte_ring_get_memsize(count));
		plog_info("\t\tOccupied: %u/%u\n", rte_ring_count(ring), count);
	}
}
Esempio n. 8
0
/* dump the status of the ring on the console */
void
rte_ring_dump(FILE *f, const struct rte_ring *r)
{
#ifdef RTE_LIBRTE_RING_DEBUG
	struct rte_ring_debug_stats sum;
	unsigned lcore_id;
#endif

	fprintf(f, "ring <%s>@%p\n", r->name, r);
	fprintf(f, "  flags=%x\n", r->flags);
	fprintf(f, "  size=%"PRIu32"\n", r->prod.size);
	fprintf(f, "  ct=%"PRIu32"\n", r->cons.tail);
	fprintf(f, "  ch=%"PRIu32"\n", r->cons.head);
	fprintf(f, "  pt=%"PRIu32"\n", r->prod.tail);
	fprintf(f, "  ph=%"PRIu32"\n", r->prod.head);
	fprintf(f, "  used=%u\n", rte_ring_count(r));
	fprintf(f, "  avail=%u\n", rte_ring_free_count(r));
	if (r->prod.watermark == r->prod.size)
		fprintf(f, "  watermark=0\n");
	else
		fprintf(f, "  watermark=%"PRIu32"\n", r->prod.watermark);

	/* sum and dump statistics */
#ifdef RTE_LIBRTE_RING_DEBUG
	memset(&sum, 0, sizeof(sum));
	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
		sum.enq_success_bulk += r->stats[lcore_id].enq_success_bulk;
		sum.enq_success_objs += r->stats[lcore_id].enq_success_objs;
		sum.enq_quota_bulk += r->stats[lcore_id].enq_quota_bulk;
		sum.enq_quota_objs += r->stats[lcore_id].enq_quota_objs;
		sum.enq_fail_bulk += r->stats[lcore_id].enq_fail_bulk;
		sum.enq_fail_objs += r->stats[lcore_id].enq_fail_objs;
		sum.deq_success_bulk += r->stats[lcore_id].deq_success_bulk;
		sum.deq_success_objs += r->stats[lcore_id].deq_success_objs;
		sum.deq_fail_bulk += r->stats[lcore_id].deq_fail_bulk;
		sum.deq_fail_objs += r->stats[lcore_id].deq_fail_objs;
	}
	fprintf(f, "  size=%"PRIu32"\n", r->prod.size);
	fprintf(f, "  enq_success_bulk=%"PRIu64"\n", sum.enq_success_bulk);
	fprintf(f, "  enq_success_objs=%"PRIu64"\n", sum.enq_success_objs);
	fprintf(f, "  enq_quota_bulk=%"PRIu64"\n", sum.enq_quota_bulk);
	fprintf(f, "  enq_quota_objs=%"PRIu64"\n", sum.enq_quota_objs);
	fprintf(f, "  enq_fail_bulk=%"PRIu64"\n", sum.enq_fail_bulk);
	fprintf(f, "  enq_fail_objs=%"PRIu64"\n", sum.enq_fail_objs);
	fprintf(f, "  deq_success_bulk=%"PRIu64"\n", sum.deq_success_bulk);
	fprintf(f, "  deq_success_objs=%"PRIu64"\n", sum.deq_success_objs);
	fprintf(f, "  deq_fail_bulk=%"PRIu64"\n", sum.deq_fail_bulk);
	fprintf(f, "  deq_fail_objs=%"PRIu64"\n", sum.deq_fail_objs);
#else
	fprintf(f, "  no statistics available\n");
#endif
}
Esempio n. 9
0
/* dump the status of the mempool on the console */
void
rte_mempool_dump(const struct rte_mempool *mp)
{
#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
	struct rte_mempool_debug_stats sum;
	unsigned lcore_id;
#endif
	unsigned common_count;
	unsigned cache_count;

	printf("mempool <%s>@%p\n", mp->name, mp);
	printf("  flags=%x\n", mp->flags);
	printf("  ring=<%s>@%p\n", mp->ring->name, mp->ring);
	printf("  size=%"PRIu32"\n", mp->size);
	printf("  header_size=%"PRIu32"\n", mp->header_size);
	printf("  elt_size=%"PRIu32"\n", mp->elt_size);
	printf("  trailer_size=%"PRIu32"\n", mp->trailer_size);
	printf("  total_obj_size=%"PRIu32"\n",
	       mp->header_size + mp->elt_size + mp->trailer_size);

	cache_count = rte_mempool_dump_cache(mp);
	common_count = rte_ring_count(mp->ring);
	if ((cache_count + common_count) > mp->size)
		common_count = mp->size - cache_count;
	printf("  common_pool_count=%u\n", common_count);

	/* sum and dump statistics */
#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
	memset(&sum, 0, sizeof(sum));
	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
		sum.put_bulk += mp->stats[lcore_id].put_bulk;
		sum.put_objs += mp->stats[lcore_id].put_objs;
		sum.get_success_bulk += mp->stats[lcore_id].get_success_bulk;
		sum.get_success_objs += mp->stats[lcore_id].get_success_objs;
		sum.get_fail_bulk += mp->stats[lcore_id].get_fail_bulk;
		sum.get_fail_objs += mp->stats[lcore_id].get_fail_objs;
	}
	printf("  stats:\n");
	printf("    put_bulk=%"PRIu64"\n", sum.put_bulk);
	printf("    put_objs=%"PRIu64"\n", sum.put_objs);
	printf("    get_success_bulk=%"PRIu64"\n", sum.get_success_bulk);
	printf("    get_success_objs=%"PRIu64"\n", sum.get_success_objs);
	printf("    get_fail_bulk=%"PRIu64"\n", sum.get_fail_bulk);
	printf("    get_fail_objs=%"PRIu64"\n", sum.get_fail_objs);
#else
	printf("  no statistics available\n");
#endif

	rte_mempool_audit(mp);
}
Esempio n. 10
0
void
onvm_nf_check_status(void) {
        int i;
        void *msgs[MAX_NFS];
        struct onvm_nf_msg *msg;
        struct onvm_nf_info *nf;
        int num_msgs = rte_ring_count(incoming_msg_queue);

        if (num_msgs == 0) return;

        if (rte_ring_dequeue_bulk(incoming_msg_queue, msgs, num_msgs, NULL) == 0)
                return;

        for (i = 0; i < num_msgs; i++) {
                msg = (struct onvm_nf_msg*) msgs[i];

                switch (msg->msg_type) {
                case MSG_NF_STARTING:
                        nf = (struct onvm_nf_info*)msg->msg_data;
                        if (onvm_nf_start(nf) == 0) {
                                onvm_stats_add_event("NF Starting", nf);
                        }
                        break;
                case MSG_NF_READY:
                        nf = (struct onvm_nf_info*)msg->msg_data;
                        if (onvm_nf_ready(nf) == 0) {
                                onvm_stats_add_event("NF Ready", nf);
                        }
                        break;
                case MSG_NF_STOPPING:
                        nf = (struct onvm_nf_info*)msg->msg_data;
                        if (onvm_nf_stop(nf) == 0) {
                                onvm_stats_add_event("NF Stopping", nf);
                                num_nfs--;
                        }
                        break;
                }

                rte_mempool_put(nf_msg_pool, (void*)msg);
        }
}
Esempio n. 11
0
/*
 * Function handles messages from the daemon.
 */
void
handle_request_from_vswitchd(void)
{
	int j = 0;
	uint16_t dq_pkt = PKT_BURST_SIZE;
	struct rte_mbuf *buf[PKT_BURST_SIZE] = {0};

	/* Attempt to dequeue maximum available number of mbufs from ring */
	while (dq_pkt > 0 &&
	       unlikely(rte_ring_sc_dequeue_bulk(
	       vswitchd_message_ring, (void **)buf, dq_pkt) != 0))
		dq_pkt = (uint16_t)RTE_MIN(
		   rte_ring_count(vswitchd_message_ring), PKT_BURST_SIZE);

	/* Update number of packets transmitted by daemon */
	stats_vport_rx_increment(VSWITCHD, dq_pkt);

	for (j = 0; j < dq_pkt; j++) {
		handle_vswitchd_cmd(buf[j]);
	}
}
Esempio n. 12
0
void
test_dpif_dpdk_flow_put(struct dpif *dpif_p)
{
	struct dpif_dpdk_message reply;
	struct dpif_dpdk_message *request;
	struct dpif_flow_put put;
	struct rte_mbuf *mbuf = NULL;
	void *pktmbuf_data = NULL;
	int result = -1;
	int num_pkts = 0;

	/* Create a fake reply to put on the reply ring. We don't use
	 * this, but transact will hang until a reply is received so
	 * there has to be something to dequeue.
	 */
	create_dpdk_flow_put_reply(&reply);
	result = enqueue_reply_on_reply_ring(reply);
	assert(result == 0);

	create_dpif_flow_put_message(&put);
	dpif_p->dpif_class->flow_put(dpif_p, &put);
	num_pkts = rte_ring_count(vswitchd_message_ring);
	assert(num_pkts == 1);
	result = rte_ring_sc_dequeue(vswitchd_message_ring, (void **)&mbuf);
	assert(result == 0);

	/* Just test that the message created and enqueued on the request ring
	 * was correct
	 */
	pktmbuf_data = rte_pktmbuf_mtod(mbuf, void *);
	request = (struct dpif_dpdk_message *)pktmbuf_data;
	assert(request->flow_msg.actions[0].type == ACTION_NULL);
	assert(request->flow_msg.key.in_port == 5);
	rte_pktmbuf_free(mbuf);
	printf(" %s\n", __FUNCTION__);
}
Esempio n. 13
0
int QUEUE::Run()
{
	char szBuffer[DEF_MEM_BUF_128];

	map<string, RESOURCE_ATTR *>::iterator it;
	RESOURCE_ATTR *pstRsc = NULL;
	QUEUE_VALUE *pstData = NULL;

	float fUsage = 0;
	struct rte_ring *arrRing[RTE_MAX_MEMZONE];
	memset(arrRing, 0x00, sizeof(arrRing));

	m_pclsCLQ->GetRingList(arrRing);
	for(int i = 1; i < RTE_MAX_MEMZONE ; i++)
	{
		if(arrRing[i] == NULL)
			break;
		
		if( strncmp(arrRing[i]->name, "MP", 2) == 0 )
		{
			fUsage = rte_ring_free_count(arrRing[i]) / (float)arrRing[i]->prod.size * 100;
			m_pclsLog->DEBUG("Memory Pool : %25s  / Usage %3.2f", arrRing[i]->name, fUsage);
		}
		else
		{
			fUsage = rte_ring_count(arrRing[i]) / (float)arrRing[i]->prod.size * 100;
			m_pclsLog->DEBUG("Queue       : %25s  / Usage %3.2f", arrRing[i]->name, fUsage);
		}
		
		snprintf(szBuffer, sizeof(szBuffer), "%.2f", fUsage);
		it = m_pmapRsc->find( arrRing[i]->name );
		if(it != m_pmapRsc->end())
		{
			pstRsc = it->second;					
			pstData = (QUEUE_VALUE*)pstRsc->pData;
			pstData->vecStringValue[IDX_QUEUE_USAGE].assign(szBuffer);
		}
		else
		{
			pstRsc = new RESOURCE_ATTR;
			memset(pstRsc, 0x00, sizeof(RESOURCE_ATTR));
			pstRsc->pData = (void*)new QUEUE_VALUE;
			pstData = (QUEUE_VALUE*)pstRsc->pData;
			snprintf(pstRsc->szName, sizeof(pstRsc->szName), "%s", arrRing[i]->name);

			pstData->vecStringValue.assign(MAX_QUEUE_IDX, "");
			pstData->vecStringValue[IDX_QUEUE_USAGE].assign(szBuffer);
			m_pmapRsc->insert( std::pair<string, RESOURCE_ATTR*>(arrRing[i]->name, pstRsc) );
			
		}

        m_pclsEvent->SendTrap(
                                DEF_ALM_CODE_QUEUE_OVER, 
                                pstRsc->szName, 
                                Rounding(fUsage,2), 
                                NULL, NULL
                             );
	}

		

	return 0;
}
Esempio n. 14
0
/**
 * CALLED BY NF:
 * Application main function - loops through
 * receiving and processing packets. Never returns
 */
int
onvm_nf_run(struct onvm_nf_info* info, int(*handler)(struct rte_mbuf* pkt, struct onvm_pkt_meta* meta)) {
        void *pkts[PKT_READ_SIZE];
        struct onvm_pkt_meta* meta;

        printf("\nClient process %d handling packets\n", info->instance_id);
        printf("[Press Ctrl-C to quit ...]\n");

        /* Listen for ^C so we can exit gracefully */
        signal(SIGINT, handle_signal);

        for (; keep_running;) {
                uint16_t i, j, nb_pkts = PKT_READ_SIZE;
                void *pktsTX[PKT_READ_SIZE];
                int tx_batch_size = 0;
                int ret_act;

                /* try dequeuing max possible packets first, if that fails, get the
                 * most we can. Loop body should only execute once, maximum */
                while (nb_pkts > 0 &&
                                unlikely(rte_ring_dequeue_bulk(rx_ring, pkts, nb_pkts) != 0))
                        nb_pkts = (uint16_t)RTE_MIN(rte_ring_count(rx_ring), PKT_READ_SIZE);

                if(nb_pkts == 0) {
                        continue;
                }
                /* Give each packet to the user proccessing function */
                for (i = 0; i < nb_pkts; i++) {
                        meta = onvm_get_pkt_meta((struct rte_mbuf*)pkts[i]);
                        ret_act = (*handler)((struct rte_mbuf*)pkts[i], meta);
                        /* NF returns 0 to return packets or 1 to buffer */
                        if(likely(ret_act == 0)) {
                                pktsTX[tx_batch_size++] = pkts[i];
                        }
                        else {
                                tx_stats->tx_buffer[info->instance_id]++;
                        }
                }

                if (unlikely(tx_batch_size > 0 && rte_ring_enqueue_bulk(tx_ring, pktsTX, tx_batch_size) == -ENOBUFS)) {
                        tx_stats->tx_drop[info->instance_id] += tx_batch_size;
                        for (j = 0; j < tx_batch_size; j++) {
                                rte_pktmbuf_free(pktsTX[j]);
                        }
                } else {
                        tx_stats->tx[info->instance_id] += tx_batch_size;
                }
        }

        nf_info->status = NF_STOPPED;

        /* Put this NF's info struct back into queue for manager to ack shutdown */
        nf_info_ring = rte_ring_lookup(_NF_QUEUE_NAME);
        if (nf_info_ring == NULL) {
                rte_mempool_put(nf_info_mp, nf_info); // give back mermory
                rte_exit(EXIT_FAILURE, "Cannot get nf_info ring for shutdown");
        }

        if (rte_ring_enqueue(nf_info_ring, nf_info) < 0) {
                rte_mempool_put(nf_info_mp, nf_info); // give back mermory
                rte_exit(EXIT_FAILURE, "Cannot send nf_info to manager for shutdown");
        }
        return 0;
}
Esempio n. 15
0
/**
 * Set up the DPDK rings which will be used to pass packets, via
 * pointers, between the multi-process server and client processes.
 * Each client needs one RX queue.
 */
static int
init_shm_rings(void)
{
    unsigned i;
    unsigned socket_id;
    const char * q_name;

#ifdef INTERRUPT_FIFO
    const char * fifo_name;
#endif

#ifdef INTERRUPT_SEM
    const char * sem_name;
    sem_t *mutex;
#endif

#if defined(INTERRUPT_FIFO) || defined(INTERRUPT_SEM)

#ifdef DPDK_FLAG
    const char *irq_flag_name;
    const unsigned flagsize = 4;
#else
    key_t key;
    int shmid;
    char *shm;
#endif

#endif

    const unsigned ringsize = CLIENT_QUEUE_RINGSIZE;

    clients = rte_malloc("client details",
                         sizeof(*clients) * num_clients, 0);
    if (clients == NULL)
        rte_exit(EXIT_FAILURE, "Cannot allocate memory for client program details\n");

    for (i = 0; i < num_clients; i++) {
        /* Create an RX queue for each client */
        socket_id = rte_socket_id();
        q_name = get_rx_queue_name(i);
        clients[i].rx_q = rte_ring_create(q_name,
                                          ringsize, socket_id,
                                          RING_F_SP_ENQ | RING_F_SC_DEQ ); /* single prod, single cons */
        //verify two functions
        uint16_t ring_cur_entries = rte_ring_count(clients[i].rx_q);
        uint16_t ring_free_entries = rte_ring_free_count(clients[i].rx_q);
        fprintf(stderr, "ring_cur_entries=%d, ring_free_entries=%d\n", ring_cur_entries, ring_free_entries);
        if (clients[i].rx_q == NULL)
            rte_exit(EXIT_FAILURE, "Cannot create rx ring queue for client %u\n", i);

        //add by wei, create FIFO pipe
#ifdef INTERRUPT_FIFO
        umask(0);
        fifo_name = get_fifo_name(i);
        clients[i].fifo_name = fifo_name;
        mknod(fifo_name, S_IFIFO|0666, 0);

        clients[i].fifo_fp = fopen(fifo_name, "w");
        if(clients[i].fifo_fp == NULL) {
            fprintf(stderr, "can not create FIFO for client %d\n", i);
            exit(1);
        }
#endif

#ifdef INTERRUPT_SEM
        sem_name = get_sem_name(i);
        clients[i].sem_name = sem_name;

        fprintf(stderr, "sem_name=%s for client %d\n", sem_name, i);
        mutex = sem_open(sem_name, O_CREAT, 06666, 0);
        if(mutex == SEM_FAILED) {
            fprintf(stderr, "can not create semaphore for client %d\n", i);
            sem_unlink(sem_name);
            exit(1);
        }
        clients[i].mutex = mutex;
#endif

#if defined(INTERRUPT_FIFO) || defined(INTERRUPT_SEM)

#ifdef DPDK_FLAG
        irq_flag_name = get_irq_flag_name(i);
        clients[i].irq_flag = (int *)rte_ring_create(irq_flag_name,
                              flagsize, socket_id,
                              RING_F_SP_ENQ | RING_F_SC_DEQ ); /* single prod, single cons */
#else
        key = get_rx_shmkey(i);
        if ((shmid = shmget(key, SHMSZ, IPC_CREAT | 0666)) < 0) {
            fprintf(stderr, "can not create the shared memory segment for client %d\n", i);
            exit(1);
        }

        if ((shm = shmat(shmid, NULL, 0)) == (char *) -1) {
            fprintf(stderr, "can not attach the shared segment to the server space for client %d\n", i);
            exit(1);
        }

        clients[i].shm_server = (int *)shm;
#endif

#endif
    }
    return 0;
}
Esempio n. 16
0
/*
 * Application main function - loops through
 * receiving and processing packets. Never returns
 */
int
main(int argc, char *argv[])
{
    struct rte_ring *rx_ring = NULL;
    struct rte_ring *tx_ring = NULL;
    int retval = 0;
    void *pkts[PKT_READ_SIZE];
    int rslt = 0;

    if ((retval = rte_eal_init(argc, argv)) < 0) {
        return -1;
    }

    argc -= retval;
    argv += retval;

    if (parse_app_args(argc, argv) < 0) {
        rte_exit(EXIT_FAILURE, "Invalid command-line arguments\n");
    }

    rx_ring = rte_ring_lookup(get_rx_queue_name(client_id));
    if (rx_ring == NULL) {
        rte_exit(EXIT_FAILURE,
            "Cannot get RX ring - is server process running?\n");
    }

    tx_ring = rte_ring_lookup(get_tx_queue_name(client_id));
    if (tx_ring == NULL) {
        rte_exit(EXIT_FAILURE,
            "Cannot get TX ring - is server process running?\n");
    }

    RTE_LOG(INFO, APP, "Finished Process Init.\n");

    printf("\nClient process %d handling packets\n", client_id);
    printf("[Press Ctrl-C to quit ...]\n");

    for (;;) {
        unsigned rx_pkts = PKT_READ_SIZE;

        /* Try dequeuing max possible packets first, if that fails, get the
         * most we can. Loop body should only execute once, maximum.
         */
        while (unlikely(rte_ring_dequeue_bulk(rx_ring, pkts, rx_pkts) != 0) &&
            rx_pkts > 0) {
            rx_pkts = (uint16_t)RTE_MIN(rte_ring_count(rx_ring), PKT_READ_SIZE);
        }

        if (rx_pkts > 0) {
            pkt++;
            /* blocking enqueue */
            do {
                rslt = rte_ring_enqueue_bulk(tx_ring, pkts, rx_pkts);
            } while (rslt == -ENOBUFS);
        } else {
               no_pkt++;
        }

        if (!(pkt %  100000)) {
            printf("pkt %d %d\n", pkt, no_pkt);
            pkt = no_pkt = 0;
        }
    }
}
Esempio n. 17
0
static inline uint32_t
sw_schedule_parallel_to_cq(struct sw_evdev *sw, struct sw_qid * const qid,
		uint32_t iq_num, unsigned int count, int keep_order)
{
	uint32_t i;
	uint32_t cq_idx = qid->cq_next_tx;

	/* This is the QID ID. The QID ID is static, hence it can be
	 * used to identify the stage of processing in history lists etc
	 */
	uint32_t qid_id = qid->id;

	if (count > MAX_PER_IQ_DEQUEUE)
		count = MAX_PER_IQ_DEQUEUE;

	if (keep_order)
		/* only schedule as many as we have reorder buffer entries */
		count = RTE_MIN(count,
				rte_ring_count(qid->reorder_buffer_freelist));

	for (i = 0; i < count; i++) {
		const struct rte_event *qe = iq_ring_peek(qid->iq[iq_num]);
		uint32_t cq_check_count = 0;
		uint32_t cq;

		/*
		 *  for parallel, just send to next available CQ in round-robin
		 * fashion. So scan for an available CQ. If all CQs are full
		 * just return and move on to next QID
		 */
		do {
			if (++cq_check_count > qid->cq_num_mapped_cqs)
				goto exit;
			cq = qid->cq_map[cq_idx];
			if (++cq_idx == qid->cq_num_mapped_cqs)
				cq_idx = 0;
		} while (rte_event_ring_free_count(
				sw->ports[cq].cq_worker_ring) == 0 ||
				sw->ports[cq].inflights == SW_PORT_HIST_LIST);

		struct sw_port *p = &sw->ports[cq];
		if (sw->cq_ring_space[cq] == 0 ||
				p->inflights == SW_PORT_HIST_LIST)
			break;

		sw->cq_ring_space[cq]--;

		qid->stats.tx_pkts++;

		const int head = (p->hist_head & (SW_PORT_HIST_LIST-1));
		p->hist_list[head].fid = SW_HASH_FLOWID(qe->flow_id);
		p->hist_list[head].qid = qid_id;

		if (keep_order)
			rte_ring_sc_dequeue(qid->reorder_buffer_freelist,
					(void *)&p->hist_list[head].rob_entry);

		sw->ports[cq].cq_buf[sw->ports[cq].cq_buf_count++] = *qe;
		iq_ring_pop(qid->iq[iq_num]);

		rte_compiler_barrier();
		p->inflights++;
		p->stats.tx_pkts++;
		p->hist_head++;
	}
exit:
	qid->cq_next_tx = cq_idx;
	return i;
}
Esempio n. 18
0
/*
 * Application main function - loops through
 * receiving and processing packets. Never returns
 */
int
main(int argc, char *argv[])
{
    const struct rte_memzone *mz;
    struct rte_ring *rx_ring;
    struct rte_mempool *mp;
    struct port_info *ports;
    int need_flush = 0; /* indicates whether we have unsent packets */
    int retval;
    void *pkts[PKT_READ_SIZE];
    uint16_t sent;

    if ((retval = rte_eal_init(argc, argv)) < 0)
        return -1;
    argc -= retval;
    argv += retval;

    if (parse_app_args(argc, argv) < 0)
        rte_exit(EXIT_FAILURE, "Invalid command-line arguments\n");

    if (rte_eth_dev_count() == 0)
        rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n");

    rx_ring = rte_ring_lookup(get_rx_queue_name(client_id));
    if (rx_ring == NULL)
        rte_exit(EXIT_FAILURE, "Cannot get RX ring - is server process running?\n");

    mp = rte_mempool_lookup(PKTMBUF_POOL_NAME);
    if (mp == NULL)
        rte_exit(EXIT_FAILURE, "Cannot get mempool for mbufs\n");

    mz = rte_memzone_lookup(MZ_PORT_INFO);
    if (mz == NULL)
        rte_exit(EXIT_FAILURE, "Cannot get port info structure\n");
    ports = mz->addr;
    tx_stats = &(ports->tx_stats[client_id]);

    configure_output_ports(ports);

    RTE_LOG(INFO, APP, "Finished Process Init.\n");

    printf("\nClient process %d handling packets\n", client_id);
    printf("[Press Ctrl-C to quit ...]\n");

    for (;;) {
        uint16_t i, rx_pkts = PKT_READ_SIZE;
        uint8_t port;

        /* try dequeuing max possible packets first, if that fails, get the
         * most we can. Loop body should only execute once, maximum */
        while (rx_pkts > 0 &&
                unlikely(rte_ring_dequeue_bulk(rx_ring, pkts, rx_pkts) != 0))
            rx_pkts = (uint16_t)RTE_MIN(rte_ring_count(rx_ring), PKT_READ_SIZE);

        if (unlikely(rx_pkts == 0)) {
            if (need_flush)
                for (port = 0; port < ports->num_ports; port++) {
                    sent = rte_eth_tx_buffer_flush(ports->id[port], client_id,
                                                   tx_buffer[port]);
                    if (unlikely(sent))
                        tx_stats->tx[port] += sent;
                }
            need_flush = 0;
            continue;
        }

        for (i = 0; i < rx_pkts; i++)
            handle_packet(pkts[i]);

        need_flush = 1;
    }
}