Ejemplo n.º 1
0
int ipaugenblick_read_updates(void)
{
	struct rte_mbuf *mbuf = NULL;
	unsigned char cmd = 0;
	
	if(rte_ring_dequeue(client_ring,(void **)&mbuf)) {
		syslog(LOG_ERR,"%s %d\n",__FILE__,__LINE__);
		return -1;
	}
	unsigned char *p = rte_pktmbuf_mtod(mbuf, unsigned char *);
	switch(*p) {
		case IPAUGENBLICK_NEW_IFACES:
		if(ipaugenblick_update_cbk) {
			cmd = 1;
			p++;
			ipaugenblick_update_cbk(cmd,p,rte_pktmbuf_data_len(mbuf) - 1);
		}
		break;
		case IPAUGENBLICK_NEW_ADDRESSES:
		if(ipaugenblick_update_cbk) {
			cmd = 3;
			p++;
			ipaugenblick_update_cbk(cmd,p,rte_pktmbuf_data_len(mbuf) - 1);
		}
		break;
		case IPAUGENBLICK_END_OF_RECORD:
		return 0;
	}
	return -1;
}
static void on_client_connect(int client_idx)
{
	struct rte_mbuf *buffer = get_buffer();
	if(!buffer) {
		return;
	}
	unsigned char *data = rte_pktmbuf_mtod(buffer, unsigned char *);
	*data = IPAUGENBLICK_NEW_IFACES;
	data++;
	rte_pktmbuf_data_len(buffer) = get_all_devices(data) + 1;
	rte_ring_enqueue(ipaugenblick_clients[client_idx].client_ring,(void *)buffer);
	buffer = get_buffer();
	if(!buffer) {
		return;
	}
	data = rte_pktmbuf_mtod(buffer, unsigned char *);
	*data = IPAUGENBLICK_NEW_ADDRESSES;
	data++;
	rte_pktmbuf_data_len(buffer) = get_all_addresses(data) + 1;
	rte_ring_enqueue(ipaugenblick_clients[client_idx].client_ring,(void *)buffer);
	buffer = get_buffer();
	if(!buffer) {
		return;
	}
	data = rte_pktmbuf_mtod(buffer, unsigned char *);
	*data = IPAUGENBLICK_END_OF_RECORD;
	rte_ring_enqueue(ipaugenblick_clients[client_idx].client_ring,(void *)buffer);
}
Ejemplo n.º 3
0
/*
 * Allocate mbuf for flow stat (and latency) info sending
 * m - Original mbuf (can be complicated mbuf data structure)
 * fsp_head - return pointer in which the flow stat info should be filled
 * is_const - is the given mbuf const
 * return new mbuf structure in which the fsp_head can be written. If needed, orginal mbuf is freed.
 */
rte_mbuf_t * CGenNodeStateless::alloc_flow_stat_mbuf(rte_mbuf_t *m, struct flow_stat_payload_header *&fsp_head
                                                     , bool is_const) {
    rte_mbuf_t *m_ret = NULL, *m_lat = NULL;
    uint16_t fsp_head_size = sizeof(struct flow_stat_payload_header);

    if (is_const) {
        // const mbuf case
        if (rte_pktmbuf_data_len(m) > 128) {
            m_ret = CGlobalInfo::pktmbuf_alloc_small(get_socket_id());
            assert(m_ret);
            // alloc mbuf just for the latency header
            m_lat = CGlobalInfo::pktmbuf_alloc( get_socket_id(), fsp_head_size);
            assert(m_lat);
            fsp_head = (struct flow_stat_payload_header *)rte_pktmbuf_append(m_lat, fsp_head_size);
            rte_pktmbuf_attach(m_ret, m);
            rte_pktmbuf_trim(m_ret, sizeof(struct flow_stat_payload_header));
            utl_rte_pktmbuf_add_after2(m_ret, m_lat);
            // ref count was updated when we took the (const) mbuf, and again in rte_pktmbuf_attach
            // so need do decrease now, to avoid leak.
            rte_pktmbuf_refcnt_update(m, -1);
            return m_ret;
        } else {
            // Short packet. Just copy all bytes.
            m_ret = CGlobalInfo::pktmbuf_alloc( get_socket_id(), rte_pktmbuf_data_len(m) );
            assert(m_ret);
            char *p = rte_pktmbuf_mtod(m, char*);
            char *p_new = rte_pktmbuf_append(m_ret, rte_pktmbuf_data_len(m));
            memcpy(p_new , p, rte_pktmbuf_data_len(m));
            fsp_head = (struct flow_stat_payload_header *)(p_new + rte_pktmbuf_data_len(m) - fsp_head_size);
            rte_pktmbuf_free(m);
            return m_ret;
        }
    } else {
        // Field engine (vm)
        if (rte_pktmbuf_is_contiguous(m)) {
Ejemplo n.º 4
0
inline int ipaugenblick_sendto_bulk(int sock,struct data_and_descriptor *bufs_and_desc,int *offsets,int *lengths,unsigned int *ipaddrs,unsigned short *ports,int buffer_count)
{
    int rc,idx,total_length = 0;
    struct rte_mbuf *mbufs[buffer_count];

    for(idx = 0;idx < buffer_count;idx++) {
        char *p_addr;
        struct sockaddr_in *p_addr_in;
        /* TODO: set offsets */
        mbufs[idx] = (struct rte_mbuf *)bufs_and_desc[idx].pdesc;
        rte_pktmbuf_data_len(mbufs[idx]) = lengths[idx];
	total_length += lengths[idx];
        p_addr = rte_pktmbuf_mtod(mbufs[idx],char *);
        
        rte_pktmbuf_data_len(mbufs[idx]) = lengths[idx];
        p_addr -= sizeof(struct sockaddr_in);
        p_addr_in = (struct sockaddr_in *)p_addr;
        p_addr_in->sin_family = AF_INET;
        p_addr_in->sin_port = htons(ports[idx]);
        p_addr_in->sin_addr.s_addr = ipaddrs[idx];
    }
    ipaugenblick_stats_send_called++;
    ipaugenblick_stats_buffers_sent += buffer_count;
    rte_atomic16_set(&(local_socket_descriptors[sock & SOCKET_READY_MASK].socket->write_ready_to_app),0);
    rc = ipaugenblick_enqueue_tx_bufs_bulk(sock,mbufs,buffer_count);
    if(rc == 0)
	rte_atomic32_sub(&(local_socket_descriptors[sock & SOCKET_READY_MASK].socket->tx_space),total_length);
    ipaugenblick_stats_send_failure += (rc != 0);
    return rc;
}
Ejemplo n.º 5
0
/* Sends 'num_pkts' 'packets' and 'request' data to datapath. */
int
dpdk_link_send_bulk(struct dpif_dpdk_message *request,
                    const struct ofpbuf *const *packets, size_t num_pkts)
{
    struct rte_mbuf *mbufs[PKT_BURST_SIZE] = {NULL};
    uint8_t *mbuf_data = NULL;
    int i = 0;
    int ret = 0;

    if (num_pkts > PKT_BURST_SIZE) {
        return EINVAL;
    }

    DPDK_DEBUG()

    for (i = 0; i < num_pkts; i++) {
        mbufs[i] = rte_pktmbuf_alloc(mp);

        if (!mbufs[i]) {
            return ENOBUFS;
        }

        mbuf_data = rte_pktmbuf_mtod(mbufs[i], uint8_t *);
        rte_memcpy(mbuf_data, &request[i], sizeof(request[i]));

        if (request->type == DPIF_DPDK_PACKET_FAMILY) {
            mbuf_data = mbuf_data + sizeof(request[i]);
            if (likely(packets[i]->size <= (mbufs[i]->buf_len - sizeof(request[i])))) {
                rte_memcpy(mbuf_data, packets[i]->data, packets[i]->size);
                rte_pktmbuf_data_len(mbufs[i]) =
                    sizeof(request[i]) + packets[i]->size;
                rte_pktmbuf_pkt_len(mbufs[i]) = rte_pktmbuf_data_len(mbufs[i]);
            } else {
                RTE_LOG(ERR, APP, "%s, %d: %s", __FUNCTION__, __LINE__,
                        "memcpy prevented: packet size exceeds available mbuf space");
                for (i = 0; i < num_pkts; i++) {
                    rte_pktmbuf_free(mbufs[i]);
                }
                return ENOMEM;
            }
        } else {
            rte_pktmbuf_data_len(mbufs[i]) = sizeof(request[i]);
            rte_pktmbuf_pkt_len(mbufs[i]) = rte_pktmbuf_data_len(mbufs[i]);
        }
    }

    ret = rte_ring_sp_enqueue_bulk(message_ring, (void * const *)mbufs, num_pkts);
    if (ret == -ENOBUFS) {
        for (i = 0; i < num_pkts; i++) {
            rte_pktmbuf_free(mbufs[i]);
        }
        ret = ENOBUFS;
    } else if (unlikely(ret == -EDQUOT)) {
        ret = EDQUOT;
    }

    return ret;
}
Ejemplo n.º 6
0
static void
deliverframe(struct virtif_user *viu)
{
	struct rte_mbuf *m, *m0;
	struct iovec iov[STACK_IOV];
	struct iovec *iovp, *iovp0;

	assert(viu->viu_nbufpkts > 0 && viu->viu_bufidx < MAX_PKT_BURST);
	m0 = viu->viu_m_pkts[viu->viu_bufidx];
	assert(m0 != NULL);
	viu->viu_bufidx++;
	viu->viu_nbufpkts--;

	if (m0->pkt.nb_segs > STACK_IOV) {
		iovp = malloc(sizeof(*iovp) * m0->pkt.nb_segs);
		if (iovp == NULL)
			return; /* drop */
	} else {
		iovp = iov;
	}
	iovp0 = iovp;

	for (m = m0; m; m = m->pkt.next, iovp++) {
		iovp->iov_base = rte_pktmbuf_mtod(m, void *);
		iovp->iov_len = rte_pktmbuf_data_len(m);
	}
	VIF_DELIVERPKT(viu->viu_virtifsc, iovp0, iovp-iovp0);

	rte_pktmbuf_free(m0);
	if (iovp0 != iov)
		free(iovp0);
}
Ejemplo n.º 7
0
/*
 * Main thread that does the work, reading from INPUT_PORT
 * and writing to OUTPUT_PORT
 */
	static  __attribute__((noreturn)) void
lcore_main(void)
{
	uint8_t port = 0;

	if (rte_eth_dev_socket_id(port) > 0 &&
			rte_eth_dev_socket_id(port) !=
			(int)rte_socket_id())
		printf("WARNING, port %u is on remote NUMA node to "
				"polling thread.\n\tPerformance will "
				"not be optimal.\n", port);

	printf("\nCore %u forwarding packets. [Ctrl+C to quit]\n",
			rte_lcore_id());
	for (;;) {
		struct rte_mbuf *bufs[BURST_SIZE];
		const uint16_t nb_rx = rte_eth_rx_burst(port, 0,
				bufs, BURST_SIZE);
		uint16_t buf;

		if (unlikely(nb_rx == 0))
			continue;

		for (buf = 0; buf < nb_rx; buf++) {
			struct rte_mbuf *mbuf = bufs[buf];
			unsigned int len = rte_pktmbuf_data_len(mbuf);
			rte_pktmbuf_dump(stdout, mbuf, len);
			rte_pktmbuf_free(mbuf);
		}
	}
}
Ejemplo n.º 8
0
static inline void
send_burst_nodrop(struct rte_port_fd_writer_nodrop *p)
{
	uint64_t n_retries;
	uint32_t i;

	n_retries = 0;
	for (i = 0; (i < p->tx_buf_count) && (n_retries < p->n_retries); i++) {
		struct rte_mbuf *pkt = p->tx_buf[i];
		void *pkt_data = rte_pktmbuf_mtod(pkt, void*);
		size_t n_bytes = rte_pktmbuf_data_len(pkt);

		for ( ; n_retries < p->n_retries; n_retries++) {
			ssize_t ret;

			ret = write(p->fd, pkt_data, n_bytes);
			if (ret)
				break;
		}
	}

	RTE_PORT_FD_WRITER_NODROP_STATS_PKTS_DROP_ADD(p, p->tx_buf_count - i);

	for (i = 0; i < p->tx_buf_count; i++)
		rte_pktmbuf_free(p->tx_buf[i]);

	p->tx_buf_count = 0;
}
Ejemplo n.º 9
0
/*
 * test data manipulation in mbuf with non-ascii data
 */
static int
test_pktmbuf_with_non_ascii_data(void)
{
	struct rte_mbuf *m = NULL;
	char *data;

	m = rte_pktmbuf_alloc(pktmbuf_pool);
	if (m == NULL)
		GOTO_FAIL("Cannot allocate mbuf");
	if (rte_pktmbuf_pkt_len(m) != 0)
		GOTO_FAIL("Bad length");

	data = rte_pktmbuf_append(m, MBUF_TEST_DATA_LEN);
	if (data == NULL)
		GOTO_FAIL("Cannot append data");
	if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN)
		GOTO_FAIL("Bad pkt length");
	if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN)
		GOTO_FAIL("Bad data length");
	memset(data, 0xff, rte_pktmbuf_pkt_len(m));
	if (!rte_pktmbuf_is_contiguous(m))
		GOTO_FAIL("Buffer should be continuous");
	rte_pktmbuf_dump(m, MBUF_TEST_DATA_LEN);

	rte_pktmbuf_free(m);

	return 0;

fail:
	if(m) {
		rte_pktmbuf_free(m);
	}
	return -1;
}
Ejemplo n.º 10
0
int
arp_in (struct rte_mbuf *mbuf)
{
    assert(mbuf->buf_len >= sizeof(struct arp));
    struct arp *arp_pkt;
    struct ether_hdr *eth;
    uint32_t ip_add = 0;

    eth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);

    assert(rte_pktmbuf_data_len(mbuf) >= (sizeof(struct arp) + sizeof(struct ether_hdr)));
    arp_pkt  = rte_pktmbuf_mtod(mbuf, char *) + sizeof(struct ether_hdr);
    switch(ntohs(arp_pkt->opcode)) {
    case ARP_REQ ://
        send_arp_reply(arp_pkt->dst_pr_add, arp_pkt->src_pr_add);
        break;
    /*
       uint32_t ip_add = GetIntAddFromChar(arp_pkt->src_pr_add, 0);
       add_mac((ip_add), arp_pkt->src_hw_add);
       logger(ARP, NORMAL, "seen arp packet\n");
       break;
    */
    case ARP_REPLY ://
        ip_add = GetIntAddFromChar(arp_pkt->src_pr_add, 0);
        add_mac((ip_add), arp_pkt->src_hw_add);
        break;
        //   default : assert(0);
    }
}
Ejemplo n.º 11
0
void dump_ring(CLQManager *a_pclsCLQ, char *a_pszQName, uint32_t a_unStartIdx, uint32_t a_unEndIdx)
{
	struct rte_ring *pstRing = NULL;
	struct rte_mbuf *m = NULL;
	int ret = 0;
	uint32_t unMask = 0;
	uint32_t unStartIdx = 0;
	uint32_t unEndIdx = 0;
	uint32_t unIdx = 0;

	ret = a_pclsCLQ->CreateRing(a_pszQName, &pstRing);
	if(ret != E_Q_EXIST)
	{
		printf("There is no Queue (%s) \n", a_pszQName);
		if(ret == 0)
		{
			a_pclsCLQ->DeleteQueue(a_pszQName);
		}
	}

	if(pstRing != NULL)
	{
		unMask = pstRing->prod.mask;
		unStartIdx = pstRing->cons.tail;
		unEndIdx = pstRing->prod.tail;

		if(a_unEndIdx > unEndIdx)
		{
			printf("Invalid End idx %u\n", a_unEndIdx);
			return ;
		}

		if(a_unEndIdx < a_unStartIdx)
		{
			printf("Invalid Start Idx %u, End Idx %u\n", a_unStartIdx, a_unEndIdx);
			return ;
		}

		if(a_unStartIdx)
			unStartIdx = a_unStartIdx;
		if(a_unEndIdx)
			unEndIdx = a_unEndIdx;

		printf("Start Idx %u, End Idx %u\n", unStartIdx, unEndIdx);

		for(uint32_t i = unStartIdx; i < unEndIdx ; i++)
		{
			unIdx = i & unMask ;
			m = (struct rte_mbuf*)pstRing->ring[unIdx];
			printf("idx : [%8u], total_len : [%5u], data_len : [%5u], seg_cnt : [%2u], Data : %s\n"
							, i
							, rte_pktmbuf_pkt_len(m)
							, rte_pktmbuf_data_len(m)
							, m->nb_segs
							, rte_pktmbuf_mtod(m, char*)
					);
		}
	}
Ejemplo n.º 12
0
static inline struct rte_mbuf *ipaugenblick_get_from_shadow(int sock)
{
	struct rte_mbuf *mbuf = NULL;

	if(local_socket_descriptors[sock].shadow) {
		mbuf = local_socket_descriptors[sock].shadow;
		local_socket_descriptors[sock].shadow = NULL;
		rte_pktmbuf_data_len(mbuf) = local_socket_descriptors[sock].shadow_len_remainder;
		mbuf->data_off += local_socket_descriptors[sock].shadow_len_delievered;
		if(mbuf->next) {
			rte_pktmbuf_pkt_len(mbuf) = rte_pktmbuf_data_len(mbuf) + rte_pktmbuf_pkt_len(mbuf->next);
		}
		else {
			rte_pktmbuf_pkt_len(mbuf) = rte_pktmbuf_data_len(mbuf);
		}
		mbuf->next = local_socket_descriptors[sock].shadow_next;
		local_socket_descriptors[sock].shadow_next = NULL;
	}
	return mbuf;
}
Ejemplo n.º 13
0
static inline void ipaugenblick_try_read_exact_amount(struct rte_mbuf *mbuf,int sock,int *total_len,int *first_segment_len)
{
	struct rte_mbuf *tmp = mbuf,*prev = NULL;
	int curr_len = 0;
	while((tmp)&&((rte_pktmbuf_data_len(tmp) + curr_len) < *total_len)) {
		curr_len += rte_pktmbuf_data_len(tmp);
//		printf("%s %d %d\n",__FILE__,__LINE__,rte_pktmbuf_data_len(tmp));
		prev = tmp;
		tmp = tmp->next;
	}
	if(tmp) {
//		printf("%s %d %d\n",__FILE__,__LINE__,rte_pktmbuf_data_len(tmp));
		if((curr_len + rte_pktmbuf_data_len(tmp)) > *total_len) { /* more data remains */
			local_socket_descriptors[sock].shadow = tmp;
			local_socket_descriptors[sock].shadow_next = tmp->next;
			local_socket_descriptors[sock].shadow_len_remainder = (curr_len + rte_pktmbuf_data_len(tmp)) - *total_len;
			local_socket_descriptors[sock].shadow_len_delievered = 
				rte_pktmbuf_data_len(tmp) - local_socket_descriptors[sock].shadow_len_remainder;
			rte_pktmbuf_data_len(tmp) = local_socket_descriptors[sock].shadow_len_delievered;
			*first_segment_len = local_socket_descriptors[sock].shadow_len_delievered;
		}
		/* if less data than required, tmp is NULL. we're here that means exact amount is read */	
		else {
			*first_segment_len = rte_pktmbuf_data_len(mbuf);
			/* store next mbuf, if there is */
			if(tmp->next) {
				local_socket_descriptors[sock].shadow = tmp->next;
				local_socket_descriptors[sock].shadow_next = tmp->next->next;
				if(local_socket_descriptors[sock].shadow_next) {
					local_socket_descriptors[sock].shadow_len_remainder = 
						rte_pktmbuf_data_len(local_socket_descriptors[sock].shadow_next);
					local_socket_descriptors[sock].shadow_len_delievered = 0;
				}
			}
		}
		tmp->next = NULL;
		if(curr_len == *total_len) {
			if(prev)
				prev->next = NULL;
		}	
	}
	else {
		*total_len = curr_len;
		*first_segment_len = rte_pktmbuf_data_len(mbuf);
	}		
}
Ejemplo n.º 14
0
int
ether_out(unsigned char *dst_mac, char *src_mac, uint16_t ether_type, struct rte_mbuf *mbuf)
{
   int i = 0;
   struct ether_hdr *eth;
   eth = (struct ether_hdr *)rte_pktmbuf_prepend (mbuf, sizeof(struct ether_hdr)); 
   eth->ether_type = htons(ether_type);
   for(i=0;i<6;i++) {
      eth->d_addr.addr_bytes[i] = dst_mac[i];
   }
//   for(i=0;i<6;i++) {
   eth->s_addr.addr_bytes[0] = 0x6a;
   eth->s_addr.addr_bytes[1] = 0x9c;
   eth->s_addr.addr_bytes[2] = 0xba;
   eth->s_addr.addr_bytes[3] = 0xa0;
   eth->s_addr.addr_bytes[4] = 0x96;
   eth->s_addr.addr_bytes[5] = 0x24;
//   }
// fix this this should be automatically detect the network interface id.
   send_packet_out(mbuf, 0);
   static int counter_id = -1;
   if(counter_id == -1) {
      counter_id = create_counter("sent_rate");
   }
   int data_len = rte_pktmbuf_data_len(mbuf);

   counter_abs(counter_id, data_len);
   {
      static int counter_id = -1;
      if(counter_id == -1) {
         counter_id = create_counter("wire_sent");
      }
      int data_len = rte_pktmbuf_data_len(mbuf);
 
      counter_inc(counter_id, data_len);
   }
   (void) src_mac; // jusat to avoid warning
   src_mac = NULL;
   return 0;
}
Ejemplo n.º 15
0
/**
 * Process a crypto operation and complete a JOB_AES_HMAC job structure for
 * submission to the multi buffer library for processing.
 *
 * @param	qp	queue pair
 * @param	job	JOB_AES_HMAC structure to fill
 * @param	m	mbuf to process
 *
 * @return
 * - Completed JOB_AES_HMAC structure pointer on success
 * - NULL pointer if completion of JOB_AES_HMAC structure isn't possible
 */
static JOB_AES_HMAC *
process_crypto_op(struct aesni_mb_qp *qp, struct rte_crypto_op *op,
		struct aesni_mb_session *session)
{
	JOB_AES_HMAC *job;

	struct rte_mbuf *m_src = op->sym->m_src, *m_dst;
	uint16_t m_offset = 0;

	job = (*qp->ops->job.get_next)(&qp->mb_mgr);
	if (unlikely(job == NULL))
		return job;

	/* Set crypto operation */
	job->chain_order = session->chain_order;

	/* Set cipher parameters */
	job->cipher_direction = session->cipher.direction;
	job->cipher_mode = session->cipher.mode;

	job->aes_key_len_in_bytes = session->cipher.key_length_in_bytes;
	job->aes_enc_key_expanded = session->cipher.expanded_aes_keys.encode;
	job->aes_dec_key_expanded = session->cipher.expanded_aes_keys.decode;


	/* Set authentication parameters */
	job->hash_alg = session->auth.algo;
	if (job->hash_alg == AES_XCBC) {
		job->_k1_expanded = session->auth.xcbc.k1_expanded;
		job->_k2 = session->auth.xcbc.k2;
		job->_k3 = session->auth.xcbc.k3;
	} else {
		job->hashed_auth_key_xor_ipad = session->auth.pads.inner;
		job->hashed_auth_key_xor_opad = session->auth.pads.outer;
	}

	/* Mutable crypto operation parameters */
	if (op->sym->m_dst) {
		m_src = m_dst = op->sym->m_dst;

		/* append space for output data to mbuf */
		char *odata = rte_pktmbuf_append(m_dst,
				rte_pktmbuf_data_len(op->sym->m_src));
		if (odata == NULL) {
			MB_LOG_ERR("failed to allocate space in destination "
					"mbuf for source data");
			return NULL;
		}

		memcpy(odata, rte_pktmbuf_mtod(op->sym->m_src, void*),
				rte_pktmbuf_data_len(op->sym->m_src));
	} else {
Ejemplo n.º 16
0
/* TCP or connected UDP */
inline int ipaugenblick_send(int sock,void *pdesc,int offset,int length)
{
    int rc;
    struct rte_mbuf *mbuf = (struct rte_mbuf *)pdesc;
    ipaugenblick_stats_send_called++;
    rte_pktmbuf_data_len(mbuf) = length;
    mbuf->next = NULL;
    rte_atomic16_set(&(local_socket_descriptors[sock & SOCKET_READY_MASK].socket->write_ready_to_app),0);
    rc = ipaugenblick_enqueue_tx_buf(sock,mbuf);
    if(rc == 0)
        rte_atomic32_sub(&(local_socket_descriptors[sock & SOCKET_READY_MASK].socket->tx_space),length);
    ipaugenblick_stats_send_failure += (rc != 0);
    return rc;
}
Ejemplo n.º 17
0
/*
 * Function sends unmatched packets to vswitchd.
 */
void
send_packet_to_vswitchd(struct rte_mbuf *mbuf, struct dpdk_upcall *info)
{
	int rslt = 0;
	void *mbuf_ptr = NULL;
	const uint64_t dpif_send_tsc =
		(rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * DPIF_SEND_US;
	uint64_t cur_tsc = 0;
	uint64_t diff_tsc = 0;
	static uint64_t prev_tsc = 0;

	/* send one packet, delete information about segments */
	rte_pktmbuf_pkt_len(mbuf) = rte_pktmbuf_data_len(mbuf);

	/* allocate space before the packet for the upcall info */
	mbuf_ptr = rte_pktmbuf_prepend(mbuf, sizeof(*info));

	if (mbuf_ptr == NULL) {
		printf("Cannot prepend upcall info\n");
		rte_pktmbuf_free(mbuf);
		stats_vswitch_tx_drop_increment(INC_BY_1);
		stats_vport_tx_drop_increment(VSWITCHD, INC_BY_1);
		return;
	}

	rte_memcpy(mbuf_ptr, info, sizeof(*info));

	/* send the packet and the upcall info to the daemon */
	rslt = rte_ring_mp_enqueue(vswitchd_packet_ring, mbuf);
	if (rslt < 0) {
		if (rslt == -ENOBUFS) {
			rte_pktmbuf_free(mbuf);
			stats_vswitch_tx_drop_increment(INC_BY_1);
			stats_vport_tx_drop_increment(VSWITCHD, INC_BY_1);
			return;
		} else {
			stats_vport_overrun_increment(VSWITCHD, INC_BY_1);
		}
	}

	stats_vport_tx_increment(VSWITCHD, INC_BY_1);

	cur_tsc = rte_rdtsc();
	diff_tsc = cur_tsc - prev_tsc;
	prev_tsc = cur_tsc;
	/* Only signal the daemon after 100 milliseconds */
	if (unlikely(diff_tsc > dpif_send_tsc))
		send_signal_to_dpif();
}
Ejemplo n.º 18
0
/**
 * Given a dpdk mbuf, fill in the Netmap slot in ring r and its associated
 * buffer with the data held by the mbuf.
 * Note that mbuf chains are not supported.
 */
static void
mbuf_to_slot(struct rte_mbuf *mbuf, struct netmap_ring *r, uint32_t index)
{
	char *data;
	uint16_t length;

	data   = rte_pktmbuf_mtod(mbuf, char *);
	length = rte_pktmbuf_data_len(mbuf);

	if (length > r->nr_buf_size)
		length = 0;

	r->slot[index].len = length;
	rte_memcpy(NETMAP_BUF(r, r->slot[index].buf_idx), data, length);
}
Ejemplo n.º 19
0
static inline void tx_xmit_pkt(struct fm10k_tx_queue *q, struct rte_mbuf *mb)
{
	uint16_t last_id;
	uint8_t flags;

	/* always set the LAST flag on the last descriptor used to
	 * transmit the packet */
	flags = FM10K_TXD_FLAG_LAST;
	last_id = q->next_free + mb->nb_segs - 1;
	if (last_id >= q->nb_desc)
		last_id = last_id - q->nb_desc;

	/* but only set the RS flag on the last descriptor if rs_thresh
	 * descriptors will be used since the RS flag was last set */
	if ((q->nb_used + mb->nb_segs) >= q->rs_thresh) {
		flags |= FM10K_TXD_FLAG_RS;
		fifo_insert(&q->rs_tracker, last_id);
		q->nb_used = 0;
	} else {
		q->nb_used = q->nb_used + mb->nb_segs;
	}

	q->hw_ring[last_id].flags = flags;
	q->nb_free -= mb->nb_segs;

	/* set checksum flags on first descriptor of packet. SCTP checksum
	 * offload is not supported, but we do not explicitly check for this
	 * case in favor of greatly simplified processing. */
	if (mb->ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK))
		q->hw_ring[q->next_free].flags |= FM10K_TXD_FLAG_CSUM;

	/* set vlan if requested */
	if (mb->ol_flags & PKT_TX_VLAN_PKT)
		q->hw_ring[q->next_free].vlan = mb->vlan_tci;

	/* fill up the rings */
	for (; mb != NULL; mb = mb->next) {
		q->sw_ring[q->next_free] = mb;
		q->hw_ring[q->next_free].buffer_addr =
				rte_cpu_to_le_64(MBUF_DMA_ADDR(mb));
		q->hw_ring[q->next_free].buflen =
				rte_cpu_to_le_16(rte_pktmbuf_data_len(mb));
		if (++q->next_free == q->nb_desc)
			q->next_free = 0;
	}
}
Ejemplo n.º 20
0
/* Blocking function that waits for 'reply' from datapath. */
int
dpdk_link_recv_reply(struct dpif_dpdk_message *reply)
{
    struct rte_mbuf *mbuf = NULL;
    void *pktmbuf_data = NULL;
    int pktmbuf_len = 0;

    DPDK_DEBUG()

    while (rte_ring_sc_dequeue(reply_ring, (void **)&mbuf) != 0);

    pktmbuf_data = rte_pktmbuf_mtod(mbuf, void *);
    pktmbuf_len = rte_pktmbuf_data_len(mbuf);
    rte_memcpy(reply, pktmbuf_data, pktmbuf_len);

    rte_pktmbuf_free(mbuf);

    return 0;
}
Ejemplo n.º 21
0
static int
usock_mbuf_write(struct vr_usocket *usockp, struct rte_mbuf *mbuf)
{
    unsigned int i, pkt_len;
    struct msghdr mhdr;
    struct rte_mbuf *m;
    struct iovec *iov;

    if (!mbuf)
        return 0;

    pkt_len = rte_pktmbuf_pkt_len(mbuf);
    if (!pkt_len)
        return 0;

    iov = usockp->usock_iovec;

    m = mbuf;
    for (i = 0; (m && (i < PKT0_MAX_IOV_LEN)); i++) {
        iov->iov_base = rte_pktmbuf_mtod(m, unsigned char *);
        iov->iov_len = rte_pktmbuf_data_len(m);
        m = m->next;
        iov++;
    }

    if ((i == PKT0_MAX_IOV_LEN) && m)
        usockp->usock_pkt_truncated++;

    mhdr.msg_name = NULL;
    mhdr.msg_namelen = 0;
    mhdr.msg_iov = usockp->usock_iovec;
    mhdr.msg_iovlen = i;
    mhdr.msg_control = NULL;
    mhdr.msg_controllen = 0;
    mhdr.msg_flags = 0;

#ifdef VR_DPDK_USOCK_DUMP
    RTE_LOG(DEBUG, USOCK, "%s[%lx]: FD %d sending message\n", __func__,
            pthread_self(), usockp->usock_fd);
    rte_hexdump(stdout, "usock message dump:", &mhdr, sizeof(mhdr));
#endif
    return sendmsg(usockp->usock_fd, &mhdr, MSG_DONTWAIT);
}
Ejemplo n.º 22
0
static void
deliverframe(struct virtif_user *viu)
{
	struct mbuf *m;
	struct rte_mbuf *rm, *rm0;
	struct vif_mextdata mext[STACK_MEXTDATA];
	struct vif_mextdata *mextp, *mextp0 = NULL;

	assert(viu->viu_nbufpkts > 0 && viu->viu_bufidx < MAX_PKT_BURST);
	rm0 = viu->viu_m_pkts[viu->viu_bufidx];
	assert(rm0 != NULL);
	viu->viu_bufidx++;
	viu->viu_nbufpkts--;

	if (rm0->pkt.nb_segs > STACK_MEXTDATA) {
		mextp = malloc(sizeof(*mextp) * rm0->pkt.nb_segs);
		if (mextp == NULL)
			goto drop;
	} else {
		mextp = mext;
	}
	mextp0 = mextp;

	for (rm = rm0; rm; rm = rm->pkt.next, mextp++) {
		mextp->mext_data = rte_pktmbuf_mtod(rm, void *);
		mextp->mext_dlen = rte_pktmbuf_data_len(rm);
		mextp->mext_arg = rm;
	}
	if (VIF_MBUF_EXTALLOC(mextp0, mextp - mextp0, &m) != 0)
		goto drop;

	VIF_DELIVERMBUF(viu->viu_virtifsc, m);

	if (mextp0 != mext)
		free(mextp0);
	return;

 drop:
	if (mextp0 != mext)
		free(mextp0);
	rte_pktmbuf_free(rm0);
}
Ejemplo n.º 23
0
/* UDP or RAW */
inline int ipaugenblick_sendto(int sock,void *pdesc,int offset,int length,unsigned int ipaddr,unsigned short port)
{
    int rc;
    struct rte_mbuf *mbuf = (struct rte_mbuf *)pdesc;
    char *p_addr = rte_pktmbuf_mtod(mbuf,char *);
    struct sockaddr_in *p_addr_in;
    ipaugenblick_stats_send_called++;
    rte_pktmbuf_data_len(mbuf) = length;
    p_addr -= sizeof(struct sockaddr_in);
    p_addr_in = (struct sockaddr_in *)p_addr;
    p_addr_in->sin_family = AF_INET;
    p_addr_in->sin_port = htons(port);
    p_addr_in->sin_addr.s_addr = ipaddr;
    rte_atomic16_set(&(local_socket_descriptors[sock & SOCKET_READY_MASK].socket->write_ready_to_app),0);
    rc = ipaugenblick_enqueue_tx_buf(sock,mbuf);
    if(rc == 0)
        rte_atomic32_sub(&(local_socket_descriptors[sock & SOCKET_READY_MASK].socket->tx_space),length);
    ipaugenblick_stats_send_failure += (rc != 0);
    return rc;
}
Ejemplo n.º 24
0
inline int ipaugenblick_send_bulk(int sock,struct data_and_descriptor *bufs_and_desc,int *offsets,int *lengths,int buffer_count)
{
    int rc,idx,total_length = 0;
    struct rte_mbuf *mbufs[buffer_count];

    for(idx = 0;idx < buffer_count;idx++) {
        /* TODO: set offsets */
        mbufs[idx] = (struct rte_mbuf *)bufs_and_desc[idx].pdesc;
        rte_pktmbuf_data_len(mbufs[idx]) = lengths[idx];
	total_length += lengths[idx];
    }
    ipaugenblick_stats_send_called++;
    ipaugenblick_stats_buffers_sent += buffer_count;
    rte_atomic16_set(&(local_socket_descriptors[sock & SOCKET_READY_MASK].socket->write_ready_to_app),0);
    rc = ipaugenblick_enqueue_tx_bufs_bulk(sock,mbufs,buffer_count);
    if(rc == 0)
	rte_atomic32_sub(&(local_socket_descriptors[sock & SOCKET_READY_MASK].socket->tx_space),total_length);
    ipaugenblick_stats_send_failure += (rc != 0);
    return rc;
}
Ejemplo n.º 25
0
/*
 * Function sends unmatched packets to vswitchd.
 */
static void
send_packet_to_vswitchd(struct rte_mbuf *mbuf, struct dpdk_upcall *info)
{
	int rslt = 0;
	struct statistics *vswd_stat = NULL;
	void *mbuf_ptr = NULL;

	vswd_stat = &vport_stats[VSWITCHD];

	/* send one packet, delete information about segments */
	rte_pktmbuf_pkt_len(mbuf) = rte_pktmbuf_data_len(mbuf);

	/* allocate space before the packet for the upcall info */
	mbuf_ptr = rte_pktmbuf_prepend(mbuf, sizeof(*info));

	if (mbuf_ptr == NULL) {
		printf("Cannot prepend upcall info\n");
		rte_pktmbuf_free(mbuf);
		switch_tx_drop++;
		vswd_stat->tx_drop++;
		return;
	}

	rte_memcpy(mbuf_ptr, info, sizeof(*info));

	/* send the packet and the upcall info to the daemon */
	rslt = rte_ring_sp_enqueue(vswitch_packet_ring, mbuf);
	if (rslt < 0) {
		if (rslt == -ENOBUFS) {
			rte_pktmbuf_free(mbuf);
			switch_tx_drop++;
			vswd_stat->tx_drop++;
			return;
		} else {
			overruns++;
		}
	}

	vswd_stat->tx++;
}
Ejemplo n.º 26
0
static inline void
send_burst(struct rte_port_fd_writer *p)
{
	uint32_t i;

	for (i = 0; i < p->tx_buf_count; i++) {
		struct rte_mbuf *pkt = p->tx_buf[i];
		void *pkt_data = rte_pktmbuf_mtod(pkt, void*);
		size_t n_bytes = rte_pktmbuf_data_len(pkt);
		ssize_t ret;

		ret = write(p->fd, pkt_data, n_bytes);
		if (ret < 0)
			break;
	}

	RTE_PORT_FD_WRITER_STATS_PKTS_DROP_ADD(p, p->tx_buf_count - i);

	for (i = 0; i < p->tx_buf_count; i++)
		rte_pktmbuf_free(p->tx_buf[i]);

	p->tx_buf_count = 0;
}
Ejemplo n.º 27
0
/* Blocking function that waits for a packet from datapath. 'pkt' will get
 * populated with packet data. */
int
dpdk_link_recv_packet(struct ofpbuf **pkt, struct dpif_dpdk_upcall *info)
{
    struct rte_mbuf *mbuf = NULL;
    uint16_t pktmbuf_len = 0;
    void *pktmbuf_data = NULL;

    DPDK_DEBUG()

    if (rte_ring_sc_dequeue(packet_ring, (void **)&mbuf) != 0) {
        return EAGAIN;
    }

    pktmbuf_data = rte_pktmbuf_mtod(mbuf, void *);
    pktmbuf_len = rte_pktmbuf_data_len(mbuf);
    rte_memcpy(info, pktmbuf_data, sizeof(*info));
    pktmbuf_data = (uint8_t *)pktmbuf_data + sizeof(*info);
    *pkt = ofpbuf_clone_data(pktmbuf_data, pktmbuf_len - sizeof(*info));

    rte_pktmbuf_free(mbuf);

    return 0;
}
Ejemplo n.º 28
0
static uint32_t send_pkts(uint8_t port, struct rte_mempool* pool) {
	static uint64_t seq = 0;
	// alloc bufs
	struct rte_mbuf* bufs[BATCH_SIZE];
	uint32_t i;
	for (i = 0; i < BATCH_SIZE; i++) {
		struct rte_mbuf* buf = rte_pktmbuf_alloc(pool);
		rte_pktmbuf_data_len(buf) = 60;
		rte_pktmbuf_pkt_len(buf) = 60;
		bufs[i] = buf;
		// write seq number
		uint64_t* pkt = rte_pktmbuf_mtod(buf, uint64_t*);
		pkt[0] = seq++;
	}
	// send pkts
	uint32_t sent = 0;
	while (1) {
		sent += rte_eth_tx_burst(port, 0, bufs + sent, BATCH_SIZE - sent);
		if (sent >= BATCH_SIZE) {
			return sent;
		}
	}
}
Ejemplo n.º 29
0
/*
 * Send a reply message to the vswitchd
 */
static void
send_reply_to_vswitchd(struct dpdk_message *reply)
{
	struct rte_mbuf *mbuf = NULL;
	void *pktmbuf_data = NULL;
	int rslt = 0;

	/* Preparing the buffer to send */
	mbuf = rte_pktmbuf_alloc(pktmbuf_pool);

	if (!mbuf) {
		RTE_LOG(WARNING, APP, "Error : Unable to allocate an mbuf "
		        ": %s : %d", __FUNCTION__, __LINE__);
		stats_vswitch_tx_drop_increment(INC_BY_1);
		stats_vport_rx_drop_increment(VSWITCHD, INC_BY_1);
		return;
	}

	pktmbuf_data = rte_pktmbuf_mtod(mbuf, void *);
	rte_memcpy(pktmbuf_data, reply, sizeof(*reply));
	rte_pktmbuf_data_len(mbuf) = sizeof(*reply);

	/* Sending the buffer to vswitchd */
	rslt = rte_ring_mp_enqueue(vswitchd_reply_ring, (void *)mbuf);
	if (rslt < 0) {
		if (rslt == -ENOBUFS) {
			rte_pktmbuf_free(mbuf);
			stats_vswitch_tx_drop_increment(INC_BY_1);
			stats_vport_rx_drop_increment(VSWITCHD, INC_BY_1);
		} else {
			stats_vport_overrun_increment(VSWITCHD, INC_BY_1);
			stats_vport_rx_increment(VSWITCHD, INC_BY_1);
		}
	} else {
		stats_vport_rx_increment(VSWITCHD, INC_BY_1);
	}
}
Ejemplo n.º 30
0
virtio_dev_rx(struct virtio_net *dev, struct rte_mbuf **pkts, uint32_t count)
{
	struct vhost_virtqueue *vq;
	struct vring_desc *desc;
	struct rte_mbuf *buff;
	/* The virtio_hdr is initialised to 0. */
	struct virtio_net_hdr_mrg_rxbuf virtio_hdr = {{0,0,0,0,0,0},0};
	uint64_t buff_addr = 0;
	uint64_t buff_hdr_addr = 0;
	uint32_t head[MAX_PKT_BURST], packet_len = 0;
	uint32_t head_idx, packet_success = 0;
	uint16_t avail_idx, res_cur_idx;
	uint16_t res_base_idx, res_end_idx;
	uint16_t free_entries;
	uint8_t success = 0;

	LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_rx()\n", dev->device_fh);
	vq = dev->virtqueue_rx;
	count = (count > MAX_PKT_BURST) ? MAX_PKT_BURST : count;
	/* As many data cores may want access to available buffers, they need to be reserved. */
	do {

		res_base_idx = vq->last_used_idx_res;

		avail_idx = *((volatile uint16_t *)&vq->avail->idx);

		free_entries = (avail_idx - res_base_idx);

		/*check that we have enough buffers*/
		if (unlikely(count > free_entries))
			count = free_entries;

		if (count == 0)
			return 0;

		res_end_idx = res_base_idx + count;
		/* vq->last_used_idx_res is atomically updated. */
		success = rte_atomic16_cmpset(&vq->last_used_idx_res, res_base_idx,
									res_end_idx);
	} while (unlikely(success == 0));
	res_cur_idx = res_base_idx;
	LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Current Index %d| End Index %d\n", dev->device_fh, res_cur_idx, res_end_idx);

	/* Prefetch available ring to retrieve indexes. */
	rte_prefetch0(&vq->avail->ring[res_cur_idx & (vq->size - 1)]);

	/* Retrieve all of the head indexes first to avoid caching issues. */
	for (head_idx = 0; head_idx < count; head_idx++)
		head[head_idx] = vq->avail->ring[(res_cur_idx + head_idx) & (vq->size - 1)];

	/*Prefetch descriptor index. */
	rte_prefetch0(&vq->desc[head[packet_success]]);

	while (res_cur_idx != res_end_idx) {
		/* Get descriptor from available ring */
		desc = &vq->desc[head[packet_success]];
		/* Prefetch descriptor address. */
		rte_prefetch0(desc);

		buff = pkts[packet_success];

		/* Convert from gpa to vva (guest physical addr -> vhost virtual addr) */
		buff_addr = gpa_to_vva(dev, desc->addr);
		/* Prefetch buffer address. */
		rte_prefetch0((void*)(uintptr_t)buff_addr);

		{
			/* Copy virtio_hdr to packet and increment buffer address */
			buff_hdr_addr = buff_addr;
			packet_len = rte_pktmbuf_data_len(buff) + vq->vhost_hlen;

			/*
			 * If the descriptors are chained the header and data are placed in
			 * separate buffers.
			 */
			if (desc->flags & VRING_DESC_F_NEXT) {
				desc->len = vq->vhost_hlen;
				desc = &vq->desc[desc->next];
				/* Buffer address translation. */
				buff_addr = gpa_to_vva(dev, desc->addr);
				desc->len = rte_pktmbuf_data_len(buff);
			} else {
				buff_addr += vq->vhost_hlen;
				desc->len = packet_len;
			}
		}

		/* Update used ring with desc information */
		vq->used->ring[res_cur_idx & (vq->size - 1)].id = head[packet_success];
		vq->used->ring[res_cur_idx & (vq->size - 1)].len = packet_len;

		/* Copy mbuf data to buffer */
		rte_memcpy((void *)(uintptr_t)buff_addr, (const void*)buff->pkt.data, rte_pktmbuf_data_len(buff));

		res_cur_idx++;
		packet_success++;

		/* mergeable is disabled then a header is required per buffer. */
		rte_memcpy((void *)(uintptr_t)buff_hdr_addr, (const void*)&virtio_hdr, vq->vhost_hlen);
		if (res_cur_idx < res_end_idx) {
			/* Prefetch descriptor index. */
			rte_prefetch0(&vq->desc[head[packet_success]]);
		}
	}

	rte_compiler_barrier();

	/* Wait until it's our turn to add our buffer to the used ring. */
	while (unlikely(vq->last_used_idx != res_base_idx))
		rte_pause();

	*(volatile uint16_t *)&vq->used->idx += count;

	vq->last_used_idx = res_end_idx;

	return count;
}