int main(int argc, char **argv) {
	// eal args
	int num_args = rte_eal_init(argc, argv);
	if (num_args < 0)
		rte_exit(EXIT_FAILURE, "init failed");
	argc -= num_args;
	argv += num_args;

	// our args: [-s] port1 port2
	uint8_t port1, port2;
	char opt = getopt(argc, argv, "s");
	bool simple_tx = opt == 's';
	if (simple_tx) {
		printf("Requesting simple tx path\n");
		argc--;
		argv++;
	} else {
		printf("Requesting full-featured tx path\n");
	}
	if (argc != 3) {
		printf("usage: [-s] port1 port2\n");
		return -1;
	}
	port1 = atoi(argv[1]);
	port2 = atoi(argv[2]);
	printf("Using ports %d and %d\n", port1, port2);

	if (!config_port(port1, simple_tx)) return -1;
	if (!config_port(port2, simple_tx)) return -1;

	struct rte_mempool* pool = make_mempool();
	
	uint64_t sent = 0;
	uint64_t next_print = rte_get_tsc_hz();
	uint64_t last_sent = 0;
	while (true) {
		sent += send_pkts(port1, pool);
		sent += send_pkts(port2, pool);
		uint64_t time = rte_rdtsc();
		if (time >= next_print) {
			double elapsed = (time - next_print + rte_get_tsc_hz()) / rte_get_tsc_hz();
			uint64_t pkts = sent - last_sent;
			printf("Packet rate: %.2f Mpps\n", (double) pkts / elapsed / 1000000);
			next_print = time + rte_get_tsc_hz();
			last_sent = sent;
		}
	}

	return 0;
}
static void init_task_qinq_encap4(struct task_base *tbase, struct task_args *targ)
{
	struct task_qinq_encap4 *task = (struct task_qinq_encap4 *)(tbase);
	int socket_id = rte_lcore_to_socket_id(targ->lconf->id);

	task->qinq_tag = targ->qinq_tag;
	task->cpe_table = targ->cpe_table;
	task->cpe_timeout = rte_get_tsc_hz()/1000*targ->cpe_table_timeout_ms;

	if (!strcmp(targ->task_init->sub_mode_str, "pe")) {
		PROX_PANIC(!strcmp(targ->cpe_table_name, ""), "CPE table not configured\n");
		fill_table(targ, task->cpe_table);
	}

#ifdef ENABLE_EXTRA_USER_STATISTICS
	task->n_users = targ->n_users;
	task->stats_per_user = rte_zmalloc_socket(NULL, targ->n_users * sizeof(uint32_t),
						  RTE_CACHE_LINE_SIZE, rte_lcore_to_socket_id(targ->lconf->id));
#endif
	if (targ->runtime_flags & TASK_CLASSIFY) {
		PROX_PANIC(!strcmp(targ->dscp, ""), "DSCP table not specified\n");
		task->dscp = prox_sh_find_socket(socket_id, targ->dscp);
		if (!task->dscp) {
			int ret = lua_to_dscp(prox_lua(), GLOBAL, targ->dscp, socket_id, &task->dscp);
			PROX_PANIC(ret, "Failed to create dscp table from config:\n%s\n",
				   get_lua_to_errors());
			prox_sh_add_socket(socket_id, targ->dscp, task->dscp);
		}
	}

	task->runtime_flags = targ->runtime_flags;

	for (uint32_t i = 0; i < 64; ++i) {
		task->fake_packets[i] = (struct rte_mbuf*)((uint8_t*)&task->keys[i] - sizeof (struct rte_mbuf));
	}

	targ->lconf->ctrl_timeout = rte_get_tsc_hz()/targ->ctrl_freq;
	targ->lconf->ctrl_func_m[targ->task] = arp_msg;

	/* TODO: check if it is not necessary to limit reverse mapping
	   for the elements that have been changing in mapping? */

	for (uint32_t i =0 ; i < sizeof(targ->mapping)/sizeof(targ->mapping[0]); ++i) {
		task->src_mac[targ->mapping[i]] = *(uint64_t*)&prox_port_cfg[i].eth_addr;
	}

	/* task->src_mac[entry->port_idx] = *(uint64_t*)&prox_port_cfg[entry->port_idx].eth_addr; */
}
Exemple #3
0
static int
setup_ip_frag_tbl()
{
    lcore_conf_t *qconf;
    uint32_t max_flow_num = DEFAULT_FLOW_NUM;
    uint32_t max_flow_ttl = DEFAULT_FLOW_TTL;
    int socket;
    uint64_t frag_cycles;

    frag_cycles = (rte_get_tsc_hz() + MS_PER_S - 1) / MS_PER_S *
        max_flow_ttl;

    for (int i = 0; i < sk.nr_lcore_ids; ++i) {
        int lcore_id = sk.lcore_ids[i];
        qconf = &sk.lcore_conf[lcore_id];
        socket = rte_lcore_to_socket_id(lcore_id);
        if (socket == SOCKET_ID_ANY) socket = 0;

        if ((qconf->frag_tbl = rte_ip_frag_table_create(max_flow_num,
                                                        IP_FRAG_TBL_BUCKET_ENTRIES,
                                                        max_flow_num, frag_cycles,
                                                        socket)) == NULL)
        {
            RTE_LOG(ERR, "ip_frag_tbl_create(%u) on "
                    "lcore: %u failed\n",
                    max_flow_num, lcore_id);
            return -1;
        }
    }
    return 0;
}
Exemple #4
0
	// FIXME: support packet sizes here
	static inline void main_loop_poisson(struct rte_ring* ring, uint8_t device, uint16_t queue, uint32_t target, uint32_t link_speed) {
		uint64_t tsc_hz = rte_get_tsc_hz();
		// control IPGs instead of IDT as IDTs < packet_time are physically impossible
		std::default_random_engine rand;
		uint64_t next_send = 0;
		struct rte_mbuf* bufs[batch_size];
		while (1) {
			int rc = ring_dequeue(ring, reinterpret_cast<void**>(bufs), batch_size);
			uint64_t cur = rte_get_tsc_cycles();
			// nothing sent for 10 ms, restart rate control
			if (((int64_t) cur - (int64_t) next_send) > (int64_t) tsc_hz / 100) {
				next_send = cur;
			}
			if (rc == 0) {
				uint32_t sent = 0;
				while (sent < batch_size) {
					uint64_t pkt_time = (bufs[sent]->pkt.pkt_len + 24) * 8 / (link_speed / 1000);
					uint64_t avg = (uint64_t) (tsc_hz / (1000000000 / target) - pkt_time);
					std::exponential_distribution<double> distribution(1.0 / avg);
					while ((cur = rte_get_tsc_cycles()) < next_send);
					next_send += distribution(rand) + pkt_time;
					sent += rte_eth_tx_burst(device, queue, bufs + sent, 1);
				}
			}
		}
	}
Exemple #5
0
static void
l2sw_main_process(struct lcore_env *env)
{
    struct rte_mbuf *pkt_burst[MAX_PKT_BURST];
    uint8_t n_ports = rte_eth_dev_count();
    unsigned lcore_id = rte_lcore_id();
    uint64_t prev_tsc, diff_tsc, cur_tsc, timer_tsc;
    const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S
                               * BURST_TX_DRAIN_US;

    //RTE_LOG(INFO, MARIO, "[%u] Starting main processing.\n", lcore_id);

    prev_tsc = 0;
    timer_tsc = 0;
    while(1) {
        cur_tsc = rte_rdtsc();

        diff_tsc = cur_tsc - prev_tsc;
        if (unlikely(diff_tsc > drain_tsc)) {
            uint8_t port_id;
            for(port_id = 0; port_id < n_ports; port_id++) {
                if (env->tx_mbufs[port_id].len == 0)
                    continue;
                l2sw_send_burst(env, port_id, env->tx_mbufs[port_id].len);
                env->tx_mbufs[port_id].len = 0;
            }

            /* if timer is enabled */
            if (timer_period > 0) {
                /* advance the timer */
                timer_tsc += diff_tsc;
                /* if timer has reached its timeout */
                if (unlikely(timer_tsc >= (uint64_t) timer_period)) {
                    /* do this only on master core */
                    if (lcore_id == rte_get_master_lcore()) {
                        //print_stats(env);
                        /* reset the timer */
                        timer_tsc = 0;
                    }
                }
            }
            prev_tsc = cur_tsc;
        }

        /* RX */
        uint8_t port_id;
        for (port_id = 0; port_id < n_ports; port_id++) {
            unsigned n_rx = rte_eth_rx_burst(port_id, lcore_id,
                                             pkt_burst, MAX_PKT_BURST);
            if (n_rx != 0)
                //RTE_LOG(INFO, MARIO, "[%u-%u] %u packet(s) came.\n",
                //        lcore_id, port_id,  n_rx);

                __sync_fetch_and_add(&port_statistics[port_id].rx, n_rx);

            ether_in(env, pkt_burst, n_rx, port_id);
        }
    }
    return ;
}
Exemple #6
0
int tw_timer_start(tw_timer_t* timer_handle, tw_timer_cb timer_cb, uint64_t timeout) {
    if (timer_handle == NULL)
        return -1;
    timer_handle->timer_cb = timer_cb;
    timer_handle->timeout = (timeout) *( rte_get_tsc_hz() / 1000 );
    return 0;
}
Exemple #7
0
static void *
rte_port_ring_writer_ras_create(void *params, int socket_id, int is_ipv4)
{
	struct rte_port_ring_writer_ras_params *conf =
			params;
	struct rte_port_ring_writer_ras *port;
	uint64_t frag_cycles;

	/* Check input parameters */
	if (conf == NULL) {
		RTE_LOG(ERR, PORT, "%s: Parameter conf is NULL\n", __func__);
		return NULL;
	}
	if (conf->ring == NULL) {
		RTE_LOG(ERR, PORT, "%s: Parameter ring is NULL\n", __func__);
		return NULL;
	}
	if ((conf->tx_burst_sz == 0) ||
	    (conf->tx_burst_sz > RTE_PORT_IN_BURST_SIZE_MAX)) {
		RTE_LOG(ERR, PORT, "%s: Parameter tx_burst_sz is invalid\n",
			__func__);
		return NULL;
	}

	/* Memory allocation */
	port = rte_zmalloc_socket("PORT", sizeof(*port),
			RTE_CACHE_LINE_SIZE, socket_id);
	if (port == NULL) {
		RTE_LOG(ERR, PORT, "%s: Failed to allocate socket\n", __func__);
		return NULL;
	}

	/* Create fragmentation table */
	frag_cycles = (rte_get_tsc_hz() + MS_PER_S - 1) / MS_PER_S * MS_PER_S;
	frag_cycles *= 100;

	port->frag_tbl = rte_ip_frag_table_create(
		RTE_PORT_RAS_N_BUCKETS,
		RTE_PORT_RAS_N_ENTRIES_PER_BUCKET,
		RTE_PORT_RAS_N_ENTRIES,
		frag_cycles,
		socket_id);

	if (port->frag_tbl == NULL) {
		RTE_LOG(ERR, PORT, "%s: rte_ip_frag_table_create failed\n",
			__func__);
		rte_free(port);
		return NULL;
	}

	/* Initialization */
	port->ring = conf->ring;
	port->tx_burst_sz = conf->tx_burst_sz;
	port->tx_buf_count = 0;

	port->f_ras = (is_ipv4 == 1) ? process_ipv4 : process_ipv6;

	return port;
}
Exemple #8
0
static uint64_t tsc_extrapolate_backward(uint64_t tsc_from, uint64_t bytes, uint64_t tsc_minimum)
{
	uint64_t tsc = tsc_from - rte_get_tsc_hz()*bytes/1250000000;
	if (likely(tsc > tsc_minimum))
		return tsc;
	else
		return tsc_minimum;
}
Exemple #9
0
/**
 * @brief           StreamInfo copy constructor
 *
 * @param other     Object to be copied
 *
 */
DPDKAdapter::StreamInfo::StreamInfo(const StreamInfo& other)
{
    devId_ = other.devId_;
    delay_ = other.delay_;
    lastTx_ = other.lastTx_;
    numBursts_ = other.numBursts_;
    txBurstSize_ = other.txBurstSize_;
    burstSize_ = other.burstSize_;
    sentPackets_ = other.sentPackets_;
    sentBursts_ = other.sentBursts_;
    curPacketId_ = other.curPacketId_;
    ticksDelay_ = (uint64_t)delay_ * rte_get_tsc_hz() / SEC_TO_NSEC;

    packets_ = other.packets_;

    qDebug("ticksDelay_ %llu, CPU frequency %llu", ticksDelay_, rte_get_tsc_hz());
}
Exemple #10
0
/**
 * @brief             StreamInfo constructor
 *
 * @param devId       Port number
 * @param delay       Delay in nano seconds
 * @param burstSize   Stream burst size
 * @param numBursts   Number of bursts
 * @param txBurstSize A minimal packets batch size put on the wire
 */
DPDKAdapter::StreamInfo::StreamInfo(unsigned char devId, unsigned int delay, unsigned int burstSize, unsigned int numBursts, uint8_t txBurstSize) :
    devId_(devId),
    delay_(delay),
    lastTx_(0),
    numBursts_(numBursts),
    burstSize_(burstSize),
    txBurstSize_(txBurstSize),
    sentPackets_(0),
    sentBursts_(0),
    curPacketId_(0)
{
    ticksDelay_ = (uint64_t)delay * rte_get_tsc_hz() / SEC_TO_NSEC;

    qDebug("txBurstSize %u", txBurstSize); 

    qDebug("ticksDelay_ %llu, CPU frequency %llu", ticksDelay_, rte_get_tsc_hz());
}
Exemple #11
0
static void
init_per_lcore() {
    lcore_conf_t *qconf;
    unsigned lcore_id = rte_lcore_id();
    qconf = &sk.lcore_conf[lcore_id];
    qconf->tsc_hz = rte_get_tsc_hz();
    qconf->start_us = (uint64_t )ustime();
    qconf->start_tsc = rte_rdtsc();
}
Exemple #12
0
static void
print_trace(const char *msg, struct rte_keepalive *keepcfg, int idx_core)
{
	RTE_LOG(INFO, EAL, "%sLast seen %" PRId64 "ms ago.\n",
		msg,
		((rte_rdtsc() - keepcfg->last_alive[idx_core])*1000)
		/ rte_get_tsc_hz()
	      );
}
Exemple #13
0
void
app_ping(void)
{
	unsigned i;
	uint64_t timestamp, diff_tsc;

	const uint64_t timeout = rte_get_tsc_hz() * APP_PING_TIMEOUT_SEC;

	for (i = 0; i < RTE_MAX_LCORE; i++) {
		struct app_core_params *p = &app.cores[i];
		struct rte_ring *ring_req, *ring_resp;
		void *msg;
		struct app_msg_req *req;
		int status;

		if ((p->core_type != APP_CORE_FC) &&
		    (p->core_type != APP_CORE_FW) &&
			(p->core_type != APP_CORE_RT) &&
			(p->core_type != APP_CORE_RX))
			continue;

		ring_req = app_get_ring_req(p->core_id);
		ring_resp = app_get_ring_resp(p->core_id);

		/* Fill request message */
		msg = (void *)rte_ctrlmbuf_alloc(app.msg_pool);
		if (msg == NULL)
			rte_panic("Unable to allocate new message\n");

		req = (struct app_msg_req *)
				rte_ctrlmbuf_data((struct rte_mbuf *)msg);
		req->type = APP_MSG_REQ_PING;

		/* Send request */
		do {
			status = rte_ring_sp_enqueue(ring_req, msg);
		} while (status == -ENOBUFS);

		/* Wait for response */
		timestamp = rte_rdtsc();
		do {
			status = rte_ring_sc_dequeue(ring_resp, &msg);
			diff_tsc = rte_rdtsc() - timestamp;

			if (unlikely(diff_tsc > timeout))
				rte_panic("Core %u of type %d does not respond "
					"to requests\n", p->core_id,
					p->core_type);
		} while (status != 0);

		/* Free message buffer */
		rte_ctrlmbuf_free(msg);
	}
}
Exemple #14
0
/*
 * Flush packets scheduled for transmit on ports
 */
static void
flush_pkts(unsigned action)
{
	unsigned i = 0;
	uint16_t deq_count = PKT_BURST_SIZE;
	struct rte_mbuf *pkts[PKT_BURST_SIZE] =  {0};
	struct port_queue *pq =  &port_queues[action & PORT_MASK];
	struct statistics *s = &vport_stats[action];
	const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
	uint64_t diff_tsc = 0;
	static uint64_t prev_tsc[MAX_PHYPORTS] = {0};
	uint64_t cur_tsc = rte_rdtsc();
	unsigned num_pkts;

	diff_tsc = cur_tsc - prev_tsc[action & PORT_MASK];

	if (unlikely(rte_ring_count(pq->tx_q) >= PKT_BURST_SIZE))
	{
		num_pkts = PKT_BURST_SIZE;
	}
	else
	{
		/* If queue idles with less than PKT_BURST packets, drain it*/
		if(unlikely(diff_tsc > drain_tsc)) {
			num_pkts = rte_ring_count(pq->tx_q);
		}
		else {
			return;
		}
	}

	if (unlikely(rte_ring_dequeue_bulk(
			      pq->tx_q, (void **)pkts, num_pkts) != 0))
		return;

	const uint16_t sent = rte_eth_tx_burst(
				 ports->id[action & PORT_MASK], 0, pkts, num_pkts);

	prev_tsc[action & PORT_MASK] = cur_tsc;

	if (unlikely(sent < num_pkts))
	{
		for (i = sent; i < num_pkts; i++)
			rte_pktmbuf_free(pkts[i]);
		s->tx_drop += (num_pkts - sent);
	}
	else
	{
		s->tx += sent;
	}
}
Exemple #15
0
/**
 * @brief           DPDKProfiler destructor
 */
DPDKProfiler::~DPDKProfiler()
{
    uint64_t end = rte_get_tsc_cycles();

    stats_[coreId_][name_].last_duration = (end - start_) * SEC_TO_NSEC / rte_get_tsc_hz();
    stats_[coreId_][name_].total_duration += stats_[coreId_][name_].last_duration;
    stats_[coreId_][name_].invoke_cnt += 1;

    if (stats_[coreId_][name_].invoke_cnt == 10000000)
    {
        qWarning("%s on core %u : last duration %lu, medium duration %lu", name_.c_str(), coreId_, lastDurationGet(coreId_, name_), medDurationGet(coreId_, name_));
        reset(coreId_, name_);
    }
}
Exemple #16
0
/*
 * Function sends unmatched packets to vswitchd.
 */
void
send_packet_to_vswitchd(struct rte_mbuf *mbuf, struct dpdk_upcall *info)
{
	int rslt = 0;
	void *mbuf_ptr = NULL;
	const uint64_t dpif_send_tsc =
		(rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * DPIF_SEND_US;
	uint64_t cur_tsc = 0;
	uint64_t diff_tsc = 0;
	static uint64_t prev_tsc = 0;

	/* send one packet, delete information about segments */
	rte_pktmbuf_pkt_len(mbuf) = rte_pktmbuf_data_len(mbuf);

	/* allocate space before the packet for the upcall info */
	mbuf_ptr = rte_pktmbuf_prepend(mbuf, sizeof(*info));

	if (mbuf_ptr == NULL) {
		printf("Cannot prepend upcall info\n");
		rte_pktmbuf_free(mbuf);
		stats_vswitch_tx_drop_increment(INC_BY_1);
		stats_vport_tx_drop_increment(VSWITCHD, INC_BY_1);
		return;
	}

	rte_memcpy(mbuf_ptr, info, sizeof(*info));

	/* send the packet and the upcall info to the daemon */
	rslt = rte_ring_mp_enqueue(vswitchd_packet_ring, mbuf);
	if (rslt < 0) {
		if (rslt == -ENOBUFS) {
			rte_pktmbuf_free(mbuf);
			stats_vswitch_tx_drop_increment(INC_BY_1);
			stats_vport_tx_drop_increment(VSWITCHD, INC_BY_1);
			return;
		} else {
			stats_vport_overrun_increment(VSWITCHD, INC_BY_1);
		}
	}

	stats_vport_tx_increment(VSWITCHD, INC_BY_1);

	cur_tsc = rte_rdtsc();
	diff_tsc = cur_tsc - prev_tsc;
	prev_tsc = cur_tsc;
	/* Only signal the daemon after 100 milliseconds */
	if (unlikely(diff_tsc > dpif_send_tsc))
		send_signal_to_dpif();
}
Exemple #17
0
// FIXME: link speed is hardcoded to 10gbit (but not really relevant for this use case where you should have only one packet anyways)
// this is only optimized for latency measurements/timestamping, not packet capture
// packet capturing would benefit from running the whole rx thread in C to avoid gc/jit pauses
uint16_t receive_with_timestamps_software(uint8_t port_id, uint16_t queue_id, struct rte_mbuf* rx_pkts[], uint16_t nb_pkts, uint64_t timestamps[]) {
	uint32_t cycles_per_byte = rte_get_tsc_hz() / 10000000.0 / 0.8;
	while (is_running()) {
		uint64_t tsc = read_rdtsc();
		uint16_t rx = rte_eth_rx_burst(port_id, queue_id, rx_pkts, nb_pkts);
		uint16_t prev_pkt_size = 0;
		for (int i = 0; i < rx; i++) {
			timestamps[i] = tsc + prev_pkt_size * cycles_per_byte;
			prev_pkt_size = rx_pkts[i]->pkt_len + 24;
		}
		if (rx > 0) {
			return rx;
		}
	}
	return 0;
}
Exemple #18
0
static int tsc_diff_to_tv(uint64_t beg, uint64_t end, struct timeval *tv)
{
    if (end < beg) {
        return -1;
    }

    uint64_t diff = end - beg;
    uint64_t sec_tsc = rte_get_tsc_hz();
    uint64_t sec = diff/sec_tsc;

    tv->tv_sec = sec;
    diff -= sec*sec_tsc;
    tv->tv_usec = diff*1000000/sec_tsc;

    return 0;
}
Exemple #19
0
static void
app_init_pipelines(struct app_params *app)
{
	uint32_t p_id;

	for (p_id = 0; p_id < app->n_pipelines; p_id++) {
		struct app_pipeline_params *params =
			&app->pipeline_params[p_id];
		struct app_pipeline_data *data = &app->pipeline_data[p_id];
		struct pipeline_type *ptype;
		struct pipeline_params pp;

		APP_LOG(app, HIGH, "Initializing %s ...", params->name);

		ptype = app_pipeline_type_find(app, params->type);
		if (ptype == NULL)
			rte_panic("Init error: Unknown pipeline type \"%s\"\n",
				params->type);

		app_pipeline_params_get(app, params, &pp);

		/* Back-end */
		data->be = NULL;
		if (ptype->be_ops->f_init) {
			data->be = ptype->be_ops->f_init(&pp, (void *) app);

			if (data->be == NULL)
				rte_panic("Pipeline instance \"%s\" back-end "
					"init error\n", params->name);
		}

		/* Front-end */
		data->fe = NULL;
		if (ptype->fe_ops->f_init) {
			data->fe = ptype->fe_ops->f_init(&pp, (void *) app);

			if (data->fe == NULL)
				rte_panic("Pipeline instance \"%s\" front-end "
				"init error\n", params->name);
		}

		data->ptype = ptype;

		data->timer_period = (rte_get_tsc_hz() *
			params->timer_period) / 100;
	}
}
Exemple #20
0
/**
 * functional test for rte_meter_trtcm_color_blind_check
 */
static inline int
tm_test_trtcm_color_blind_check(void)
{
#define TRTCM_BLIND_CHECK_MSG "trtcm_blind_check"

	uint64_t time;
	struct rte_meter_trtcm tm;
	uint64_t hz = rte_get_tsc_hz();

	/* Test green */
	if(rte_meter_trtcm_config(&tm, &tparams) != 0)
		melog(TRTCM_BLIND_CHECK_MSG);
	time = rte_get_tsc_cycles() + hz;
	if(rte_meter_trtcm_color_blind_check(
		&tm, time, TM_TEST_TRTCM_CBS_DF - 1)
		!= e_RTE_METER_GREEN)
		melog(TRTCM_BLIND_CHECK_MSG" GREEN");

	/* Test yellow */
	if(rte_meter_trtcm_config(&tm, &tparams) != 0)
		melog(TRTCM_BLIND_CHECK_MSG);
	time = rte_get_tsc_cycles() + hz;
	if(rte_meter_trtcm_color_blind_check(
		&tm, time, TM_TEST_TRTCM_CBS_DF + 1)
		!= e_RTE_METER_YELLOW)
		melog(TRTCM_BLIND_CHECK_MSG" YELLOW");

	if(rte_meter_trtcm_config(&tm, &tparams) != 0)
		melog(TRTCM_BLIND_CHECK_MSG);
	time = rte_get_tsc_cycles() + hz;
	if(rte_meter_trtcm_color_blind_check(
		&tm, time, TM_TEST_TRTCM_PBS_DF - 1)
		!= e_RTE_METER_YELLOW)
		melog(TRTCM_BLIND_CHECK_MSG" YELLOW");

	/* Test red */
	if(rte_meter_trtcm_config(&tm, &tparams) != 0)
		melog(TRTCM_BLIND_CHECK_MSG);
	time = rte_get_tsc_cycles() + hz;
	if(rte_meter_trtcm_color_blind_check(
		&tm, time, TM_TEST_TRTCM_PBS_DF + 1)
		!= e_RTE_METER_RED)
		melog(TRTCM_BLIND_CHECK_MSG" RED");

	return 0;
}
Exemple #21
0
struct rte_keepalive *
rte_keepalive_create(rte_keepalive_failure_callback_t callback,
	void *data)
{
	struct rte_keepalive *keepcfg;

	keepcfg = rte_zmalloc("RTE_EAL_KEEPALIVE",
		sizeof(struct rte_keepalive),
		RTE_CACHE_LINE_SIZE);
	if (keepcfg != NULL) {
		keepcfg->callback = callback;
		keepcfg->callback_data = data;
		keepcfg->tsc_initial = rte_rdtsc();
		keepcfg->tsc_mhz = rte_get_tsc_hz() / 1000;
	}
	return keepcfg;
}
Exemple #22
0
/*
 * This function must be called by application to initialize.
 * the rate of polling for driver, timer, readable & writable socket lists
 * Paramters: drv_poll_interval,timer_poll_interval,tx_ready_sockets_poll_interval,
 * rx_ready_sockets_poll_interval - all in micros
 * Returns: None
 *
 */
void app_glue_init_poll_intervals(int drv_poll_interval,
		                          int timer_poll_interval,
		                          int tx_ready_sockets_poll_interval,
		                          int rx_ready_sockets_poll_interval)
{
	syslog(LOG_INFO,"%s %d %d %d %d %d\n",__func__,__LINE__,
			drv_poll_interval,timer_poll_interval,tx_ready_sockets_poll_interval,
			rx_ready_sockets_poll_interval);
	float cycles_in_micro = rte_get_tsc_hz()/1000000;
	app_glue_drv_poll_interval = cycles_in_micro*(float)drv_poll_interval;
	app_glue_timer_poll_interval = cycles_in_micro*(float)timer_poll_interval;
	app_glue_tx_ready_sockets_poll_interval = cycles_in_micro*(float)tx_ready_sockets_poll_interval;
	app_glue_rx_ready_sockets_poll_interval = cycles_in_micro*(float)rx_ready_sockets_poll_interval;
	syslog(LOG_INFO,"%s %d %"PRIu64" %"PRIu64" %"PRIu64" %"PRIu64"\n",__func__,__LINE__,
			app_glue_drv_poll_interval,app_glue_timer_poll_interval,
			app_glue_tx_ready_sockets_poll_interval,app_glue_rx_ready_sockets_poll_interval);
}
Exemple #23
0
/* Send burst of outgoing packet, if timeout expires. */
static inline void
send_timeout_burst(void)
{
	uint64_t cur_tsc;
	uint8_t port;
	const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;

	cur_tsc = rte_rdtsc();
	if (likely (cur_tsc < port_tx_conf.tx_tsc + drain_tsc))
		return;

	for (port = 0; port < MAX_PORTS; port++) {
		if (port_tx_conf.tx_mbufs[port].len != 0)
			send_burst(port);
	}
	port_tx_conf.tx_tsc = cur_tsc;
}
Exemple #24
0
/**
 * functional test for rte_meter_srtcm_color_blind_check
 */
static inline int
tm_test_srtcm_color_blind_check(void)
{
#define SRTCM_BLIND_CHECK_MSG "srtcm_blind_check"
	struct rte_meter_srtcm sm;
	uint64_t time;
	uint64_t hz = rte_get_tsc_hz();

	/* Test green */
	if(rte_meter_srtcm_config(&sm, &sparams) != 0)
		melog(SRTCM_BLIND_CHECK_MSG);
	time = rte_get_tsc_cycles() + hz;
	if(rte_meter_srtcm_color_blind_check(
		&sm, time, TM_TEST_SRTCM_CBS_DF - 1)
		!= e_RTE_METER_GREEN)
		melog(SRTCM_BLIND_CHECK_MSG" GREEN");

	/* Test yellow */
	if(rte_meter_srtcm_config(&sm, &sparams) != 0)
		melog(SRTCM_BLIND_CHECK_MSG);
	time = rte_get_tsc_cycles() + hz;
	if(rte_meter_srtcm_color_blind_check(
		&sm, time, TM_TEST_SRTCM_CBS_DF + 1)
		!= e_RTE_METER_YELLOW)
		melog(SRTCM_BLIND_CHECK_MSG" YELLOW");

	if(rte_meter_srtcm_config(&sm, &sparams) != 0)
		melog(SRTCM_BLIND_CHECK_MSG);
	time = rte_get_tsc_cycles() + hz;
	if(rte_meter_srtcm_color_blind_check(
		&sm, time, (uint32_t)sm.ebs - 1) != e_RTE_METER_YELLOW)
		melog(SRTCM_BLIND_CHECK_MSG" YELLOW");

	/* Test red */
	if(rte_meter_srtcm_config(&sm, &sparams) != 0)
		melog(SRTCM_BLIND_CHECK_MSG);
	time = rte_get_tsc_cycles() + hz;
	if(rte_meter_srtcm_color_blind_check(
		&sm, time, TM_TEST_SRTCM_EBS_DF + 1)
		!= e_RTE_METER_RED)
		melog(SRTCM_BLIND_CHECK_MSG" RED");

	return 0;

}
Exemple #25
0
static inline int wait_command_handled(struct lcore_cfg *lconf)
{
	uint64_t t1 = rte_rdtsc(), t2;
	while (lconf_is_req(lconf)) {
		t2 = rte_rdtsc();
		if (t2 - t1 > 5 * rte_get_tsc_hz()) {
			// Failed to handle command ...
			for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
				struct task_args *targs = &lconf->targs[task_id];
				if (!(targs->flags & TASK_ARG_DROP)) {
					plogx_err("Failed to handle command - task is in NO_DROP and might be stuck...\n");
					return - 1;
				}
			}
			plogx_err("Failed to handle command\n");
			return -1;
		}
	}
	return 0;
}
Exemple #26
0
static int setup_prox(int argc, char **argv)
{
	if (prox_read_config_file() != 0 ||
	    prox_setup_rte(argv[0]) != 0) {
		return -1;
	}

	if (prox_cfg.flags & DSF_CHECK_SYNTAX) {
		plog_info("=== Configuration file syntax has been checked ===\n\n");
		exit(EXIT_SUCCESS);
	}

	init_port_activate();
	plog_info("=== Initializing rte devices ===\n");
	if (!(prox_cfg.flags & DSF_USE_DUMMY_DEVICES))
		init_rte_ring_dev();
	init_rte_dev(prox_cfg.flags & DSF_USE_DUMMY_DEVICES);
	plog_info("=== Calibrating TSC overhead ===\n");
	clock_init();
	plog_info("\tTSC running at %"PRIu64" Hz\n", rte_get_tsc_hz());

	init_lcores();
	plog_info("=== Initializing ports ===\n");
	init_port_all();

	if (prox_cfg.logbuf_size) {
		prox_cfg.logbuf = prox_zmalloc(prox_cfg.logbuf_size, rte_socket_id());
		PROX_PANIC(prox_cfg.logbuf == NULL, "Failed to allocate memory for logbuf with size = %d\n", prox_cfg.logbuf_size);
	}

	if (prox_cfg.flags & DSF_CHECK_INIT) {
		plog_info("=== Initialization sequence completed ===\n\n");
		exit(EXIT_SUCCESS);
	}

	/* Current way that works to disable DPDK logging */
	FILE *f = fopen("/dev/null", "r");
	rte_openlog_stream(f);
	plog_info("=== PROX started ===\n");
	return 0;
}
Exemple #27
0
int sflow_timer_callback() {
    uint64_t expired_ticks = rte_rdtsc() - (rte_get_tsc_hz() * L4_SFLOW_EXPIRED_SECONDS);
    int expired_sockets = 0;
    sflow_socket_t* socket;

    rte_rwlock_write_lock(&sflow_hash_lock);
    for (int i = 0; i < L4_SFLOW_HASH_SIZE; ++i) {
        socket = sflow_sockets[i];
        if (!socket) continue;
        if (socket->rx_ticks < expired_ticks) {
            rte_spinlock_recursive_lock(&socket->lock);
            sflow_socket_delete(&socket->key, 1);
            rte_spinlock_recursive_unlock(&socket->lock);
            ++expired_sockets;
        }
    }
    rte_rwlock_write_unlock(&sflow_hash_lock);

    RTE_LOG(NOTICE, L3L4, "deleted %d expired sflow sockets\n", expired_sockets);
    return 0;
}
Exemple #28
0
/**
 * @in[4] : the flags packets carries.
 * @in[4] : the flags function expect to return.
 * It will do blind check at the time of 1 second from beginning.
 * At the time, it will use packets length of cbs -1, cbs + 1,
 * ebs -1 and ebs +1 with flag in[0], in[1], in[2] and in[3] to do
 * aware check, expect flag out[0], out[1], out[2] and out[3]
 */
static inline int
tm_test_trtcm_aware_check
(enum rte_meter_color in[4], enum rte_meter_color out[4])
{
#define TRTCM_AWARE_CHECK_MSG "trtcm_aware_check"
	struct rte_meter_trtcm tm;
	uint64_t time;
	uint64_t hz = rte_get_tsc_hz();

	if(rte_meter_trtcm_config(&tm, &tparams) != 0)
		melog(TRTCM_AWARE_CHECK_MSG);
	time = rte_get_tsc_cycles() + hz;
	if(rte_meter_trtcm_color_aware_check(
		&tm, time, TM_TEST_TRTCM_CBS_DF - 1, in[0]) != out[0])
		melog(TRTCM_AWARE_CHECK_MSG" %u:%u", in[0], out[0]);

	if(rte_meter_trtcm_config(&tm, &tparams) != 0)
		melog(TRTCM_AWARE_CHECK_MSG);
	time = rte_get_tsc_cycles() + hz;
	if(rte_meter_trtcm_color_aware_check(
		&tm, time, TM_TEST_TRTCM_CBS_DF + 1, in[1]) != out[1])
		melog(TRTCM_AWARE_CHECK_MSG" %u:%u", in[1], out[1]);

	if(rte_meter_trtcm_config(&tm, &tparams) != 0)
		melog(TRTCM_AWARE_CHECK_MSG);
	time = rte_get_tsc_cycles() + hz;
	if(rte_meter_trtcm_color_aware_check(
		&tm, time, TM_TEST_TRTCM_PBS_DF - 1, in[2]) != out[2])
		melog(TRTCM_AWARE_CHECK_MSG" %u:%u", in[2], out[2]);

	if(rte_meter_trtcm_config(&tm, &tparams) != 0)
		melog(TRTCM_AWARE_CHECK_MSG);
	time = rte_get_tsc_cycles() + hz;
	if(rte_meter_trtcm_color_aware_check(
		&tm, time, TM_TEST_TRTCM_PBS_DF + 1, in[3]) != out[3])
		melog(TRTCM_AWARE_CHECK_MSG" %u:%u", in[3], out[3]);

	return 0;
}
Exemple #29
0
	static inline void main_loop_cbr(struct rte_ring* ring, uint8_t device, uint16_t queue, uint32_t target) {
		uint64_t tsc_hz = rte_get_tsc_hz();
		uint64_t id_cycles = (uint64_t) (target / (1000000000.0 / ((double) tsc_hz)));
		uint64_t next_send = 0;
		struct rte_mbuf* bufs[batch_size];
		while (1) {
			int rc = ring_dequeue(ring, reinterpret_cast<void**>(bufs), batch_size);
			uint64_t cur = rte_get_tsc_cycles();
			// nothing sent for 10 ms, restart rate control
			if (((int64_t) cur - (int64_t) next_send) > (int64_t) tsc_hz / 100) {
				next_send = cur;
			}
			if (rc == 0) {
				uint32_t sent = 0;
				while (sent < batch_size) {
					while ((cur = rte_get_tsc_cycles()) < next_send);
					next_send += id_cycles;
					sent += rte_eth_tx_burst(device, queue, bufs + sent, 1);
				}
			}
		}
	}
Exemple #30
0
/**
 * @brief           Save mbuf burst to the capture buffer
 *
 * @param devId     Port number
 * @param burstBuf  mbuf burst
 * @param pktCount  Number of packets in the burst
 *
 * @return          true on success
 */
void DPDKAdapter::saveToBuf(uint8_t devId, MBuf_t** burstBuf, uint8_t pktCount)
{
    MBuf_t* m = NULL;
    DeviceInfo& devInfo = devices[devId];

    uint64_t rxTicksEnd = rte_get_tsc_cycles();
    uint64_t ticksDiff = rxTicksEnd - devInfo.rxTicksStart;
    uint64_t timestamp = (SEC_TO_NSEC * ticksDiff) / rte_get_tsc_hz();

    struct pcap_pkthdr hdr;
    memset(&hdr, 0, sizeof(pcap_pkthdr));

    uint32_t sec = timestamp / SEC_TO_NSEC;
    hdr.ts.tv_sec = sec;
    uint32_t usec = (timestamp - hdr.ts.tv_sec * SEC_TO_NSEC) / MSEC_TO_NSEC;
    hdr.ts.tv_usec = usec;

    for(uint8_t pkt = 0; pkt < pktCount; pkt++)
    {
        m = burstBuf[pkt];

        hdr.caplen = m->pkt.data_len;
        hdr.len = hdr.caplen;

        if(devInfo.captureDataSize + sizeof(hdr) + m->pkt.data_len > devInfo.captureDataLength)
        {
            qDebug("Capture buffer is full with %u bytes", devInfo.captureDataSize);
            devInfo.captureDataSize = 0;
        }

        memcpy(devInfo.captureData + devInfo.captureDataSize, &hdr, sizeof(hdr));
        devInfo.captureDataSize += sizeof(hdr);

        memcpy(devInfo.captureData + devInfo.captureDataSize, m->pkt.data, m->pkt.data_len);
        devInfo.captureDataSize += m->pkt.data_len;
    }
}