コード例 #1
0
void
test_dpif_dpdk_flow_flush(struct dpif *dpif_p)
{
	struct dpif_dpdk_message reply;
	struct dpif_dpdk_message *request;
	struct rte_mbuf *mbuf = NULL;
	void *pktmbuf_data = NULL;
	int result = -1;

	/* It doesn't matter what kind of reply we enqueue here*/
	create_dpdk_flow_del_reply(&reply, NO_FLOW);
	result = enqueue_reply_on_reply_ring(reply);
	assert(result == 0);
	assert(rte_ring_count(vswitchd_reply_ring) == 1);

	dpif_p->dpif_class->flow_flush(dpif_p);
	assert(rte_ring_count(vswitchd_message_ring) == 1);
	result = rte_ring_sc_dequeue(vswitchd_message_ring, (void **)&mbuf);
	assert(result == 0);

	/* Just test that the message created and enqueued on the request ring
	 * was correct
	 */
    	pktmbuf_data = rte_pktmbuf_mtod(mbuf, void *);
    	request = (struct dpif_dpdk_message *)pktmbuf_data;
	assert(request->flow_msg.cmd == OVS_FLOW_CMD_DEL);
	printf(" %s\n", __FUNCTION__);
}
コード例 #2
0
void
test_dpif_dpdk_flow_del(struct dpif *dpif_p)
{
	struct dpif_dpdk_message reply;
	struct dpif_dpdk_message *request;
	struct dpif_flow_del del;
	struct rte_mbuf *mbuf = NULL;
	void *pktmbuf_data = NULL;
	int result = -1;

	create_dpdk_flow_del_reply(&reply, NO_FLOW);
	result = enqueue_reply_on_reply_ring(reply);
	assert(result == 0);
	assert(rte_ring_count(vswitchd_reply_ring) == 1);

	create_dpif_flow_del_message(&del);
	dpif_p->dpif_class->flow_del(dpif_p, &del);
	assert(rte_ring_count(vswitchd_message_ring) == 1);
	result = rte_ring_sc_dequeue(vswitchd_message_ring, (void **)&mbuf);
	assert(result == 0);

	/* Just test that the message created and enqueued on the request ring
	 * was correct
	 */
    	pktmbuf_data = rte_pktmbuf_mtod(mbuf, void *);
    	request = (struct dpif_dpdk_message *)pktmbuf_data;
	assert(request->flow_msg.actions[0].type == ACTION_NULL);
	assert(request->flow_msg.key.in_port == 5);
	printf(" %s\n", __FUNCTION__);
}
コード例 #3
0
ファイル: main.c プロジェクト: 0817/masscan
/***************************************************************************
 * The recieve thread doesn't transmit packets. Instead, it queues them
 * up on the transmit thread. Every so often, the transmit thread needs
 * to flush this transmit queue and send everything.
 *
 * This is an inherent design issue trying to send things as batches rather
 * than individually. It increases latency, but increases performance. We
 * don't really care about latency.
 ***************************************************************************/
void
flush_packets(struct Adapter *adapter,
    PACKET_QUEUE *packet_buffers,
    PACKET_QUEUE *transmit_queue,
    struct Throttler *throttler, uint64_t *packets_sent)
{
    uint64_t batch_size;
    unsigned is_queue_empty = 0;

    while (!is_queue_empty) {
        /*
         * Only send a few packets at a time, throttled according to the max
         * --max-rate set by the user
         */
        batch_size = throttler_next_batch(throttler, *packets_sent);

        /*
         * Send a batch of queued packets
         */
        for ( ; batch_size; batch_size--) {
            int err;
            struct PacketBuffer *p;

            /*
             * Get the next packet from the transmit queue. This packet was 
             * put there by a receive thread, and will contain things like
             * an ACK or an HTTP request
             */
            err = rte_ring_sc_dequeue(transmit_queue, (void**)&p);
            if (err) {
                is_queue_empty = 1;
                break; /* queue is empty, nothing to send */
            }

            /*
             * Actually send the packet
             */
            rawsock_send_packet(adapter, p->px, (unsigned)p->length, 1);

            /*
             * Now that we are done with the packet, put it on the free list
             * of buffers that the transmit thread can reuse
             */
            for (err=1; err; ) {
                err = rte_ring_sp_enqueue(packet_buffers, p);
                if (err) {
                    LOG(0, "transmit queue full (should be impossible)\n");
                    pixie_usleep(10000);
                }
            }
        

            /*
             * Remember that we sent a packet, which will be used in
             * throttling.
             */
            (*packets_sent)++;
        }
    }
}
コード例 #4
0
ファイル: init.c プロジェクト: Cosios/dpdk
void
app_ping(void)
{
	unsigned i;
	uint64_t timestamp, diff_tsc;

	const uint64_t timeout = rte_get_tsc_hz() * APP_PING_TIMEOUT_SEC;

	for (i = 0; i < RTE_MAX_LCORE; i++) {
		struct app_core_params *p = &app.cores[i];
		struct rte_ring *ring_req, *ring_resp;
		void *msg;
		struct app_msg_req *req;
		int status;

		if ((p->core_type != APP_CORE_FC) &&
		    (p->core_type != APP_CORE_FW) &&
			(p->core_type != APP_CORE_RT) &&
			(p->core_type != APP_CORE_RX))
			continue;

		ring_req = app_get_ring_req(p->core_id);
		ring_resp = app_get_ring_resp(p->core_id);

		/* Fill request message */
		msg = (void *)rte_ctrlmbuf_alloc(app.msg_pool);
		if (msg == NULL)
			rte_panic("Unable to allocate new message\n");

		req = (struct app_msg_req *)
				rte_ctrlmbuf_data((struct rte_mbuf *)msg);
		req->type = APP_MSG_REQ_PING;

		/* Send request */
		do {
			status = rte_ring_sp_enqueue(ring_req, msg);
		} while (status == -ENOBUFS);

		/* Wait for response */
		timestamp = rte_rdtsc();
		do {
			status = rte_ring_sc_dequeue(ring_resp, &msg);
			diff_tsc = rte_rdtsc() - timestamp;

			if (unlikely(diff_tsc > timeout))
				rte_panic("Core %u of type %d does not respond "
					"to requests\n", p->core_id,
					p->core_type);
		} while (status != 0);

		/* Free message buffer */
		rte_ctrlmbuf_free(msg);
	}
}
コード例 #5
0
ファイル: dpdk-link.c プロジェクト: kikutak/dpdk-ovs
/* Blocking function that waits for 'reply' from datapath. */
int
dpdk_link_recv_reply(struct dpif_dpdk_message *reply)
{
    struct rte_mbuf *mbuf = NULL;
    void *pktmbuf_data = NULL;
    int pktmbuf_len = 0;

    DPDK_DEBUG()

    while (rte_ring_sc_dequeue(reply_ring, (void **)&mbuf) != 0);

    pktmbuf_data = rte_pktmbuf_mtod(mbuf, void *);
    pktmbuf_len = rte_pktmbuf_data_len(mbuf);
    rte_memcpy(reply, pktmbuf_data, pktmbuf_len);

    rte_pktmbuf_free(mbuf);

    return 0;
}
コード例 #6
0
ファイル: dpdk-link.c プロジェクト: kikutak/dpdk-ovs
/* Blocking function that waits for a packet from datapath. 'pkt' will get
 * populated with packet data. */
int
dpdk_link_recv_packet(struct ofpbuf **pkt, struct dpif_dpdk_upcall *info)
{
    struct rte_mbuf *mbuf = NULL;
    uint16_t pktmbuf_len = 0;
    void *pktmbuf_data = NULL;

    DPDK_DEBUG()

    if (rte_ring_sc_dequeue(packet_ring, (void **)&mbuf) != 0) {
        return EAGAIN;
    }

    pktmbuf_data = rte_pktmbuf_mtod(mbuf, void *);
    pktmbuf_len = rte_pktmbuf_data_len(mbuf);
    rte_memcpy(info, pktmbuf_data, sizeof(*info));
    pktmbuf_data = (uint8_t *)pktmbuf_data + sizeof(*info);
    *pkt = ofpbuf_clone_data(pktmbuf_data, pktmbuf_len - sizeof(*info));

    rte_pktmbuf_free(mbuf);

    return 0;
}
コード例 #7
0
void
test_dpif_dpdk_flow_put(struct dpif *dpif_p)
{
	struct dpif_dpdk_message reply;
	struct dpif_dpdk_message *request;
	struct dpif_flow_put put;
	struct rte_mbuf *mbuf = NULL;
	void *pktmbuf_data = NULL;
	int result = -1;
	int num_pkts = 0;

	/* Create a fake reply to put on the reply ring. We don't use
	 * this, but transact will hang until a reply is received so
	 * there has to be something to dequeue.
	 */
	create_dpdk_flow_put_reply(&reply);
	result = enqueue_reply_on_reply_ring(reply);
	assert(result == 0);

	create_dpif_flow_put_message(&put);
	dpif_p->dpif_class->flow_put(dpif_p, &put);
	num_pkts = rte_ring_count(vswitchd_message_ring);
	assert(num_pkts == 1);
	result = rte_ring_sc_dequeue(vswitchd_message_ring, (void **)&mbuf);
	assert(result == 0);

	/* Just test that the message created and enqueued on the request ring
	 * was correct
	 */
	pktmbuf_data = rte_pktmbuf_mtod(mbuf, void *);
	request = (struct dpif_dpdk_message *)pktmbuf_data;
	assert(request->flow_msg.actions[0].type == ACTION_NULL);
	assert(request->flow_msg.key.in_port == 5);
	rte_pktmbuf_free(mbuf);
	printf(" %s\n", __FUNCTION__);
}
コード例 #8
0
static inline uint32_t
sw_schedule_parallel_to_cq(struct sw_evdev *sw, struct sw_qid * const qid,
		uint32_t iq_num, unsigned int count, int keep_order)
{
	uint32_t i;
	uint32_t cq_idx = qid->cq_next_tx;

	/* This is the QID ID. The QID ID is static, hence it can be
	 * used to identify the stage of processing in history lists etc
	 */
	uint32_t qid_id = qid->id;

	if (count > MAX_PER_IQ_DEQUEUE)
		count = MAX_PER_IQ_DEQUEUE;

	if (keep_order)
		/* only schedule as many as we have reorder buffer entries */
		count = RTE_MIN(count,
				rte_ring_count(qid->reorder_buffer_freelist));

	for (i = 0; i < count; i++) {
		const struct rte_event *qe = iq_ring_peek(qid->iq[iq_num]);
		uint32_t cq_check_count = 0;
		uint32_t cq;

		/*
		 *  for parallel, just send to next available CQ in round-robin
		 * fashion. So scan for an available CQ. If all CQs are full
		 * just return and move on to next QID
		 */
		do {
			if (++cq_check_count > qid->cq_num_mapped_cqs)
				goto exit;
			cq = qid->cq_map[cq_idx];
			if (++cq_idx == qid->cq_num_mapped_cqs)
				cq_idx = 0;
		} while (rte_event_ring_free_count(
				sw->ports[cq].cq_worker_ring) == 0 ||
				sw->ports[cq].inflights == SW_PORT_HIST_LIST);

		struct sw_port *p = &sw->ports[cq];
		if (sw->cq_ring_space[cq] == 0 ||
				p->inflights == SW_PORT_HIST_LIST)
			break;

		sw->cq_ring_space[cq]--;

		qid->stats.tx_pkts++;

		const int head = (p->hist_head & (SW_PORT_HIST_LIST-1));
		p->hist_list[head].fid = SW_HASH_FLOWID(qe->flow_id);
		p->hist_list[head].qid = qid_id;

		if (keep_order)
			rte_ring_sc_dequeue(qid->reorder_buffer_freelist,
					(void *)&p->hist_list[head].rob_entry);

		sw->ports[cq].cq_buf[sw->ports[cq].cq_buf_count++] = *qe;
		iq_ring_pop(qid->iq[iq_num]);

		rte_compiler_barrier();
		p->inflights++;
		p->stats.tx_pkts++;
		p->hist_head++;
	}
exit:
	qid->cq_next_tx = cq_idx;
	return i;
}