static void on_client_connect(int client_idx)
{
	struct rte_mbuf *buffer = get_buffer();
	if(!buffer) {
		return;
	}
	unsigned char *data = rte_pktmbuf_mtod(buffer, unsigned char *);
	*data = IPAUGENBLICK_NEW_IFACES;
	data++;
	rte_pktmbuf_data_len(buffer) = get_all_devices(data) + 1;
	rte_ring_enqueue(ipaugenblick_clients[client_idx].client_ring,(void *)buffer);
	buffer = get_buffer();
	if(!buffer) {
		return;
	}
	data = rte_pktmbuf_mtod(buffer, unsigned char *);
	*data = IPAUGENBLICK_NEW_ADDRESSES;
	data++;
	rte_pktmbuf_data_len(buffer) = get_all_addresses(data) + 1;
	rte_ring_enqueue(ipaugenblick_clients[client_idx].client_ring,(void *)buffer);
	buffer = get_buffer();
	if(!buffer) {
		return;
	}
	data = rte_pktmbuf_mtod(buffer, unsigned char *);
	*data = IPAUGENBLICK_END_OF_RECORD;
	rte_ring_enqueue(ipaugenblick_clients[client_idx].client_ring,(void *)buffer);
}
예제 #2
0
int
urdma_accl_post_recvv(struct ibv_qp *ib_qp, const struct iovec *iov, size_t iov_size,
		void *context)
{
	struct usiw_recv_wqe *wqe;
	struct usiw_qp *qp;
	unsigned int y;
	int x;

	if (iov_size > DPDK_VERBS_IOV_LEN_MAX) {
		return -EINVAL;
	}

	qp = container_of(ib_qp, struct usiw_qp, ib_qp);
	x = qp_get_next_recv_wqe(qp, &wqe);
	if (x < 0)
		return x;

	wqe->wr_context = context;
	memcpy(wqe->iov, iov, iov_size * sizeof(*iov));
	wqe->iov_count = iov_size;
	wqe->total_request_size = 0;
	for (y = 0; y < iov_size; ++y) {
		wqe->total_request_size += iov[y].iov_len;
	}
	wqe->msn = 0;
	wqe->recv_size = 0;
	wqe->input_size = 0;
	wqe->complete = false;
	x = rte_ring_enqueue(qp->rq0.ring, wqe);
	assert(x == 0);

	return 0;
} /* urdma_accl_post_recvv */
예제 #3
0
void ipDeFragment(void * handle, struct ip * iphead,struct sk_buff *skb){
	printf("1\n");
	IpImpl * impl = (IpImpl *)handle;
	int index = addrtoHash( iphead->ip_src, iphead->ip_dst);
	int offset = ntohs(iphead ->ip_off);
	int flags = offset&~IP_OFFSET;
	offset &= IP_OFFSET;
	if(((flags & IP_MF) ==0)&&(offset ==0)){// no fragment.
		//printf("No fragment.\n");
		struct ring_buf * ptr = (struct ring_buf *)rte_malloc("rp",sizeof(struct ring_buf *),0);
		if(ptr ==NULL)OUTOFMEM
		ptr -> type = 0;
		ptr -> ptr = iphead;
		rte_ring_enqueue(impl -> r, ptr);
		
	}
	else
	{
		printf("Fragment in %d.\n",index);
		fflush(stdout);
		addToHashTable(handle, &impl -> tables[index], iphead, skb);
	}
	//	tables[index].addr->packets->ipFra->info.ipHead = iphead;

		/*here need to add ip packet info */
		/*to do :add ipFragment head*/

}
예제 #4
0
int anscli_ring_send(void *buff, int buff_len)
{
    void *msg;

    if(buff_len > ANS_RING_MSG_SIZE)
    {
        printf("Too long message size, max is %d \n", ANS_RING_MSG_SIZE);

        return ANS_EMSGPOOL;
    }

    if (rte_mempool_get(anscli_message_pool, &msg) < 0)
    {
        printf("Getting message failed \n");
        return ANS_EMSGPOOL;
    }

    rte_memcpy(msg, buff, buff_len);

    if (rte_ring_enqueue(anscli_ring_tx, msg) < 0)
    {
        printf("Sending message to ANS stack failed  \n");
        rte_mempool_put(anscli_message_pool, msg);
        return ANS_EMSGPOOL;
    }

    return 0;

}
예제 #5
0
int netdpcmd_ring_send(void *buff, int buff_len)
{
    void *msg;

    if(buff_len > NETDP_RING_MSG_SIZE)
    {
        printf("Too long message size, max is %d \n", NETDP_RING_MSG_SIZE);

        return NETDP_EMSGPOOL;
    }

    if (rte_mempool_get(netdpcmd_message_pool, &msg) < 0)
    {
        printf("Getting message failed \n");
        return NETDP_EMSGPOOL;
    }

    rte_memcpy(msg, buff, buff_len);
        
    if (rte_ring_enqueue(netdpcmd_ring_tx, msg) < 0) 
    {
        printf("Sending message to NETDP stack failed  \n");
        rte_mempool_put(netdpcmd_message_pool, msg);
        return NETDP_EMSGPOOL;
    }

    return 0;

}
예제 #6
0
/*
 * Return a buffered packet.
 */
int
onvm_nf_return_pkt(struct rte_mbuf* pkt) {
        /* FIXME: should we get a batch of buffered packets and then enqueue? Can we keep stats? */
        if(unlikely(rte_ring_enqueue(tx_ring, pkt) == -ENOBUFS)) {
                rte_pktmbuf_free(pkt);
                tx_stats->tx_drop[nf_info->instance_id]++;
                return -ENOBUFS;
        }
        else tx_stats->tx_returned[nf_info->instance_id]++;
        return 0;
}
예제 #7
0
int
urdma_accl_post_sendv(struct ibv_qp *ib_qp, struct iovec *iov, size_t iov_size,
		struct urdma_ah *ah, void *context)
{
	struct usiw_qp *qp;
	struct ee_state *ee;
	struct usiw_send_wqe *wqe;
	unsigned int y;
	int x;

	qp = container_of(ib_qp, struct usiw_qp, ib_qp);
	if (!ah && !qp_connected(qp)) {
		return -EINVAL;
	}

	if (iov_size > DPDK_VERBS_IOV_LEN_MAX) {
		return -EINVAL;
	}

	ee = &qp->remote_ep;
	if (!ee) {
		return -EINVAL;
	}

	x = qp_get_next_send_wqe(qp, &wqe);
	if (x < 0)
		return x;

	wqe->opcode = usiw_wr_send;
	wqe->wr_context = context;
	memcpy(wqe->iov, iov, iov_size * sizeof(*iov));
	wqe->iov_count = iov_size;
	wqe->remote_ep = ee;
	wqe->state = SEND_WQE_INIT;
	wqe->msn = 0; /* will be assigned at send time */
	wqe->total_length = 0;
	for (y = 0; y < iov_size; ++y) {
		wqe->total_length += wqe->iov[y].iov_len;
	}
	wqe->bytes_sent = 0;
	wqe->bytes_acked = 0;
	x = rte_ring_enqueue(qp->sq.ring, wqe);
	assert(x == 0);

	return 0;
} /* urdma_accl_post_sendv */
예제 #8
0
int socket_connect(int identifier, struct sock_addr *client_addr)
{
/* using static ip for current. furute get ip from conf*/
   int i;
      uint8_t ip[4];
      ip[0] = 192;
      ip[1] = 168;
      ip[2] = 78;
      ip[3] = 2;
   uint32_t DestIp = 0;
   static uint16_t SrcPorts = 0;
   if(SrcPorts == 0) {
      SrcPorts = 10000;
   }
   SrcPorts ++;
   for(i=0; i<4; i++) {
      DestIp |= ip[i] << i*8;
   }
   printf("opening connection connect call\n");
   Socket_Send_Msg *Msg = NULL;
   struct tcb *ptcb = get_tcb_by_identifier(identifier);
   if (rte_mempool_get(buffer_message_pool,(void **) &Msg) < 0) {
       printf ("Failed to get message buffer\n");
/// / put assert ;
   }
   Msg->m_Identifier = identifier;
   Msg->m_Msg_Type = CONNECTION_OPEN;
   if (rte_ring_enqueue(ptcb->tcb_socket_ring_send, Msg) < 0) {
      printf("Failed to send message - message discarded\n");
      rte_mempool_put(buffer_message_pool, Msg);
   }
   ptcb->ipv4_src = htonl(client_addr->ip); 
   ptcb->sport = client_addr->port; 
   ptcb->ipv4_dst = DestIp; 
   ptcb->dport = SrcPorts; 
   ptcb->next_seq = 1;
   pthread_mutex_lock(&(ptcb->mutex));
   ptcb->WaitingOnConnect = 1;
   pthread_cond_wait(&(ptcb->condAccept), &(ptcb->mutex));
   ptcb->WaitingOnConnect = 0;
   pthread_mutex_unlock(&(ptcb->mutex));
// wait on sema event of syn-ack.
 //  remove_tcb(identifier);
   return 0;
}
예제 #9
0
int
EnqueueMBuf(struct rte_mbuf *mbuf)
{
   struct rte_mbuf **Msg;
   if (rte_mempool_get(buffer_message_pool, (void **)&Msg) < 0) {
       printf ("Failed to get rte_mbuf message buffer\n");
/// / put assert ;
      return -1;
   }
   *Msg = mbuf;
   if (rte_ring_enqueue(ip_to_ether_ring_send, Msg) < 0) {
      printf("Failed to send rte_mbuf message - message discarded\n");
      rte_mempool_put(buffer_message_pool, Msg);
   }
   else {
      printf("mbuf enqueue = %p\n", mbuf);
   }
   return 0;
}
예제 #10
0
int
socket_close(int identifier)
{
   printf("closing tcb\n");
   Socket_Send_Msg *Msg = NULL;
   struct tcb *ptcb = get_tcb_by_identifier(identifier);
   if (rte_mempool_get(buffer_message_pool, (void **)&Msg) < 0) {
       printf ("Failed to get message buffer\n");
/// / put assert ;
   }
   Msg->m_Identifier = identifier;
   Msg->m_Msg_Type = SOCKET_CLOSE;
   if (rte_ring_enqueue(ptcb->tcb_socket_ring_send, Msg) < 0) {
      printf("Failed to send message - message discarded\n");
      rte_mempool_put(buffer_message_pool, Msg);
   }
 //  remove_tcb(identifier);
   return 0;
}
예제 #11
0
/**
 * Process a completed JOB_AES_HMAC job and keep processing jobs until
 * get_completed_job return NULL
 *
 * @param qp		Queue Pair to process
 * @param job		JOB_AES_HMAC job
 *
 * @return
 * - Number of processed jobs
 */
static unsigned
handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
{
	struct rte_mbuf *m = NULL;
	unsigned processed_jobs = 0;

	while (job) {
		processed_jobs++;
		m = post_process_mb_job(qp, job);
		if (m)
			rte_ring_enqueue(qp->processed_pkts, (void *)m);
		else
			qp->qp_stats.dequeue_err_count++;

		job = (*qp->ops->job.get_completed_job)(&qp->mb_mgr);
	}

	return processed_jobs;
}
예제 #12
0
/* TCP */
int ipaugenblick_receive(int sock,void **pbuffer,int *total_len,int *first_segment_len,void **pdesc)
{ 
    struct rte_mbuf *mbuf;

    ipaugenblick_stats_receive_called++;
    /* first try to look shadow. shadow pointer saved when last mbuf delievered partially */
    mbuf = ipaugenblick_get_from_shadow(sock);
    if((mbuf)&&(*total_len > 0)) { /* total_len > 0 means user restricts total read count */
//	printf("%s %d %d\n",__FILE__,__LINE__,*total_len);
	int total_len2 = *total_len;
	/* now find mbuf (if any) to be delievered partially and save it to shadown */
	ipaugenblick_try_read_exact_amount(mbuf,sock,&total_len2,first_segment_len);
	*pbuffer = rte_pktmbuf_mtod(mbuf,void *);
    	*pdesc = mbuf;
	if((total_len2 > 0)&&(total_len2 < *total_len)) { /* read less than user requested, try ring */
//		printf("%s %d %d\n",__FILE__,__LINE__,total_len2);
		struct rte_mbuf *mbuf2 = ipaugenblick_dequeue_rx_buf(sock);
		if(!mbuf2) { /* ring is empty */
			*total_len = total_len2;
//			printf("%s %d\n",__FILE__,__LINE__);
		}
		else { /* now try to find an mbuf to be delievered partially in the chain */
			int total_len3 = *total_len - total_len2;
			int first_segment_len_dummy;
			ipaugenblick_try_read_exact_amount(mbuf2,sock,&total_len3,&first_segment_len_dummy);
			struct rte_mbuf *last_mbuf = rte_pktmbuf_lastseg(mbuf);
			last_mbuf->next = mbuf2;
			*total_len = total_len2 + total_len3;
//			printf("%s %d %d\n",__FILE__,__LINE__,total_len3);
		}
	}
	else {
//		printf("%s %d %d\n",__FILE__,__LINE__,*total_len);
		//goto read_from_ring;
	}
	if(local_socket_descriptors[sock].shadow) {
		uint32_t ringidx_ready_mask = sock|(SOCKET_READABLE_BIT << SOCKET_READY_SHIFT);
		if(local_socket_descriptors[sock].select != -1)
			rte_ring_enqueue(selectors[local_socket_descriptors[sock].select].ready_connections,(void *)ringidx_ready_mask);	
	}
	return 0;
    }
예제 #13
0
int
urdma_accl_post_read(struct ibv_qp *ib_qp, void *addr, size_t length,
		struct urdma_ah *ah, uint64_t remote_addr, uint32_t rkey,
		void *context)
{
	struct usiw_send_wqe *wqe;
	struct ee_state *ee;
	struct usiw_qp *qp;
	int x;

	qp = container_of(ib_qp, struct usiw_qp, ib_qp);
	if (!ah && !qp_connected(qp)) {
		return -EINVAL;
	}

	ee = &qp->remote_ep;
	if (!ee) {
		return -EINVAL;
	}

	x = qp_get_next_send_wqe(qp, &wqe);
	if (x < 0)
		return x;

	wqe->opcode = usiw_wr_read;
	wqe->wr_context = context;
	wqe->iov[0].iov_base = addr;
	wqe->iov[0].iov_len = length;
	wqe->iov_count = 1;
	wqe->remote_addr = remote_addr;
	wqe->rkey = rkey;
	wqe->remote_ep = ee;
	wqe->state = SEND_WQE_INIT;
	wqe->msn = 0; /* will be assigned at send time */
	wqe->local_stag = 0;
	wqe->total_length = length;
	wqe->bytes_sent = 0;
	x = rte_ring_enqueue(qp->sq.ring, wqe);
	assert(x == 0);

	return 0;
} /* urdma_accl_post_read */
예제 #14
0
int
socket_send(int ser_id, const unsigned char *message, int len)
{
   Socket_Send_Msg *Msg = NULL;
   struct tcb *ptcb = get_tcb_by_identifier(ser_id);
   if (rte_mempool_get(buffer_message_pool,(void **) &Msg) < 0) {
       printf ("Failed to get message buffer\n");
/// / put assert ;
   }
   Msg->m_Identifier = ser_id;
   Msg->m_Len = len;
   Msg->m_Msg_Type = SEND_DATA;
   memcpy(Msg->m_Data, message, len);
   if (rte_ring_enqueue(ptcb->tcb_socket_ring_send, Msg) < 0) {
      printf("Failed to send message - message discarded\n");
      rte_mempool_put(buffer_message_pool, Msg);
   }
   printf("****** Enqued for  %s and len %d and identifier %d\n",(char *)Msg->m_Data, Msg->m_Len, Msg->m_Identifier);
  // sendtcppacket(ptcb, mbuf, message, len);
  // ptcb->send_data(message, len); 
   return 0;
}
예제 #15
0
/**
 * CALLED BY NF:
 * Application main function - loops through
 * receiving and processing packets. Never returns
 */
int
onvm_nf_run(struct onvm_nf_info* info, int(*handler)(struct rte_mbuf* pkt, struct onvm_pkt_meta* meta)) {
        void *pkts[PKT_READ_SIZE];
        struct onvm_pkt_meta* meta;

        printf("\nClient process %d handling packets\n", info->instance_id);
        printf("[Press Ctrl-C to quit ...]\n");

        /* Listen for ^C so we can exit gracefully */
        signal(SIGINT, handle_signal);

        for (; keep_running;) {
                uint16_t i, j, nb_pkts = PKT_READ_SIZE;
                void *pktsTX[PKT_READ_SIZE];
                int tx_batch_size = 0;
                int ret_act;

                /* try dequeuing max possible packets first, if that fails, get the
                 * most we can. Loop body should only execute once, maximum */
                while (nb_pkts > 0 &&
                                unlikely(rte_ring_dequeue_bulk(rx_ring, pkts, nb_pkts) != 0))
                        nb_pkts = (uint16_t)RTE_MIN(rte_ring_count(rx_ring), PKT_READ_SIZE);

                if(nb_pkts == 0) {
                        continue;
                }
                /* Give each packet to the user proccessing function */
                for (i = 0; i < nb_pkts; i++) {
                        meta = onvm_get_pkt_meta((struct rte_mbuf*)pkts[i]);
                        ret_act = (*handler)((struct rte_mbuf*)pkts[i], meta);
                        /* NF returns 0 to return packets or 1 to buffer */
                        if(likely(ret_act == 0)) {
                                pktsTX[tx_batch_size++] = pkts[i];
                        }
                        else {
                                tx_stats->tx_buffer[info->instance_id]++;
                        }
                }

                if (unlikely(tx_batch_size > 0 && rte_ring_enqueue_bulk(tx_ring, pktsTX, tx_batch_size) == -ENOBUFS)) {
                        tx_stats->tx_drop[info->instance_id] += tx_batch_size;
                        for (j = 0; j < tx_batch_size; j++) {
                                rte_pktmbuf_free(pktsTX[j]);
                        }
                } else {
                        tx_stats->tx[info->instance_id] += tx_batch_size;
                }
        }

        nf_info->status = NF_STOPPED;

        /* Put this NF's info struct back into queue for manager to ack shutdown */
        nf_info_ring = rte_ring_lookup(_NF_QUEUE_NAME);
        if (nf_info_ring == NULL) {
                rte_mempool_put(nf_info_mp, nf_info); // give back mermory
                rte_exit(EXIT_FAILURE, "Cannot get nf_info ring for shutdown");
        }

        if (rte_ring_enqueue(nf_info_ring, nf_info) < 0) {
                rte_mempool_put(nf_info_mp, nf_info); // give back mermory
                rte_exit(EXIT_FAILURE, "Cannot send nf_info to manager for shutdown");
        }
        return 0;
}
예제 #16
0
//将数据加入到分片链表中,其中,分片一定是唯一的,关于重复能不能重复这一点有待商榷
void adddToipFra(void *handle, struct srcDstAddr * fa, struct ipPacketHead * table, struct ip *iphead, struct sk_buff * skb)
{
	IpImpl * impl = (IpImpl *)handle;
	if (((ntohs(iphead->ip_off)&~IP_OFFSET) & IP_MF) == 0){
		table->MF = 0;
	}
printf("\n2 in addr\n ");
fflush(stdout);
	if (table->ipFra == NULL){
		table->ipFra = (struct ipFragment *)rte_malloc("ipFra", sizeof(struct ipFragment),0);
		if (table ->ipFra == NULL ){printf("Out of Mem1!\n");return ;}
		else{
			table->ipFra->next = NULL;
			table->ipFra->seq = NULL;
			table->ipFra->skb = skb;
			//table-> ipFra -> myJiffies = getTime();
			table->fraSeq = table->ipFra;
			table->ipFra->length = iphead -> ip_len;
			table->ipFra->offset = ntohs(iphead->ip_off) & IP_OFFSET;
			/*
			if(impl -> tail == NULL){
				impl -> tail = table -> ipFra;
				table -> ipFra -> timer_pre = impl -> head;
				impl -> head -> timer_next = table -> ipFra;
				impl -> tail -> timer_next = NULL;
								
			}else{
			table -> ipFra -> timer_pre = impl -> tail;
			impl -> tail -> timer_next = table -> ipFra;
			impl -> tail = table -> ipFra;
			impl -> tail -> timer_next = NULL;
			}*/
			/*
			table->ipFra->timer_pre -> timer_next = table -> ipFra -> timer_next;
			if(table->ipFra->timer_next)
				table->ipFra->timer_next ->timer_pre = table -> ipFra -> timer_pre;
			impl -> tail -> timer_next = table -> ipFra;
			table -> ipFra -> timer_pre = impl -> tail;
			impl -> tail = table -> ipFra;
			impl -> tail -> timer_next = NULL;
			*/
		}
	}
	else{
		//这里要做的是两件事
		//1.记录下数据包到来的顺序。
		//2.按数据包偏移位排序。
		//3.记录完成后检查是否是一个完整的分片包。
		//4.不完整就结束程序,完整就将包发到数据包池中。
		//timer_link
		/*
		table -> ipFra -> myJiffies = getTime();//just change the first's myJiffies.
		table->ipFra->timer_pre -> timer_next = table -> ipFra -> timer_next;
		if(table->ipFra->timer_next)
			table->ipFra->timer_next ->timer_pre = table -> ipFra -> timer_pre;
		impl -> tail -> timer_next = table -> ipFra;
		table -> ipFra -> timer_pre = impl -> tail;
		impl -> tail = table -> ipFra;
		impl -> tail -> timer_next = NULL;
		printf("\naddr:%ld %ld %ld\n",(long)table -> ipFra, (long)table -> ipFra -> timer_pre, (long)table -> ipFra->timer_next);	*/
		struct ipFragment * current, *pre,*newFrag;
		newFrag = (struct ipFragment *)rte_malloc("Fra", sizeof(struct ipFragment),0);
		if (newFrag == NULL){printf("Out of Mem2!\n");return ;}
		else{
			//here edit the new fragment
			//to do:edit the info.

			newFrag->skb = skb;
			newFrag->seq = NULL;
			newFrag->next = NULL;
			newFrag->offset = ntohs(iphead->ip_off) & IP_OFFSET;
			newFrag->length = iphead -> ip_len;
		}
		//1.record the sequence of packet.
		current = table->fraSeq;
		pre = current;
printf("3 ");
fflush(stdout);
		while (current){
			pre = current;
			current = current->seq;
		}
printf("4 ");
fflush(stdout);
		pre->seq = newFrag;
		//2.sort by offset
		current = table->ipFra;
		pre = current;
		if(current -> offset == newFrag -> offset){
			//if find the same packet, just change the next, not change the coming sequence.
			if(current -> length < newFrag -> length)
			{
				newFrag -> next = current -> next;
				table -> ipFra = newFrag;
			}
		}
		else if (current->offset > newFrag->offset){
			newFrag->next = current;
			table->ipFra = newFrag;
		}
		else{
printf("5 ");
fflush(stdout);

			while (current && current->offset < newFrag->offset)
			{

				pre = current;
				current = current -> next;
				if(current -> offset == newFrag -> offset){
				//if find the same packet, just change the next, not change the coming sequence.
					if(current -> length < newFrag -> length)
					{
						newFrag -> next = current -> next;
						pre -> next = newFrag;
					}
				}
			}
printf("6 ");
fflush(stdout);
			pre->next = newFrag;
			newFrag->next = current;
		}
		//3.judge weather the fragment is complete.
		if (table->MF == 0){//get the last fragement, need to judge now.
//int num = 0;
			printf("MF =0.\n");
			current = table->ipFra;
			pre = current;
printf("7 ");
fflush(stdout);
			while (current -> next){
				if (current->offset + current->length > current->next->offset){
					pre = current;
					current = current->next;
					/*to do :debug!*/
					printf("In loop 7.\n");
 					//if(pre == current){printf("The same.\n");return;}
				}
				else
					break;
			}
			printf("In if.\n");
			if (pre->offset + pre->length >= current->offset){
				//Job done.
				//here has two things to do.
				//1.
				//return;
				
				struct ring_buf * ptr = (struct ring_buf *)rte_malloc("ring_buf",sizeof(struct ring_buf),0);
				//void **obj = rte_malloc("rp",sizeof(void *)*2,0);
				if(ptr == NULL)OUTOFMEM
				ptr -> type = 1;

				ptr -> ptr = table -> ipFra;
				//obj[0] = ptr;
				rte_ring_enqueue(impl -> r, ptr);
			
		//printf("\naddr:%ld %ld %ld\n",(long)table -> ipFra, (long)table -> ipFra -> timer_pre, (long)table -> ipFra->timer_next);	
				if(impl -> tail == table )
				{
//printf("3.1");
//fflush(stdout);
					impl -> tail = table ->timer_pre;	
					impl -> tail ->timer_next = NULL;
				}
				else
				{
//printf("3.2");
//fflush(stdout);
					//printf("%ld %ld", (long)table -> ipFra -> timer_pre, (long)table -> ipFra->timer_next);	
//fflush(stdout);
					table->timer_pre -> timer_next = table  -> timer_next;
//printf("3.3");
//fflush(stdout);					
					if(table -> timer_next)
						table->timer_next ->timer_pre = table ->  timer_pre;
				}
				
				//这边考虑如何把分片包断开
				if(table -> next){
					table -> next -> pre = table -> pre;
				}//here just for test ring
//printf("2");
//fflush(stdout);
				if(table -> pre){
				table -> pre -> next = table -> next;
				}else
				{
					fa -> packets = NULL;
				}
				realsePacket(handle, ptr->ptr);
//printf("3");
//fflush(stdout);
				
//printf("4");
//fflush(stdout);
			//	ptr = NULL;
			//	rte_ring_dequeue(impl -> r, (void **)&ptr);
				//ptr = getPacket(handle);
				//printf("In ring %d IP:%p.\n",ptr -> type, ptr -> ptr);
				
			}else
			printf("Job not done!\n");
			//else the fragement not completed, just continue.
		}
printf("8 ");
fflush(stdout);
	}

}
예제 #17
0
void send_loop(void)
{
	RTE_LOG(INFO, APP, "send_loop()\n");
	char pkt[PKT_SIZE] = {0};
	int nreceived;

	int retval = 0;
	(void) retval;
#ifdef CALC_CHECKSUM
	unsigned int kk = 0;
#endif
	srand(time(NULL));

	//Initializate packet contents
	int i;
	for(i = 0; i < PKT_SIZE; i++)
		pkt[i] = rand()%256;

#if ALLOC_METHOD == ALLOC_APP
	struct rte_mempool * packets_pool = rte_mempool_lookup("ovs_mp_1500_0_262144");
	//struct rte_mempool * packets_pool = rte_mempool_lookup("packets");

	//Create mempool
	//struct rte_mempool * packets_pool = rte_mempool_create(
	//	"packets",
	//	NUM_PKTS,
	//	MBUF_SIZE,
	//	CACHE_SIZE,					//This is the size of the mempool cache
	//	sizeof(struct rte_pktmbuf_pool_private),
	//	rte_pktmbuf_pool_init,
	//	NULL,
	//	rte_pktmbuf_init,
	//	NULL,
	//	rte_socket_id(),
	//	0 /*NO_FLAGS*/);


	if(packets_pool == NULL)
	{
		RTE_LOG(INFO, APP, "rte_errno: %s\n", rte_strerror(rte_errno));
		rte_exit(EXIT_FAILURE, "Cannot find memory pool\n");
	}

	RTE_LOG(INFO, APP, "There are %d free packets in the pool\n",
		rte_mempool_count(packets_pool));

#endif

#ifdef USE_BURST
	struct rte_mbuf * packets_array[BURST_SIZE] = {0};
	struct rte_mbuf * packets_array_rx[BURST_SIZE] = {0};
	int ntosend;
	int n;
	(void) n;

	/* prealloc packets */
	do
	{
		n = rte_mempool_get_bulk(packets_pool, (void **) packets_array, BURST_SIZE);
	} while(n != 0 && !stop);
	ntosend = BURST_SIZE;

#else
	struct rte_mbuf * mbuf;
	/* prealloc packet */
	do {
		mbuf = rte_pktmbuf_alloc(packets_pool);
	} while(mbuf == NULL);

#endif

	RTE_LOG(INFO, APP, "Starting sender loop\n");
	signal (SIGINT, crtl_c_handler);
	stop = 0;
	while(likely(!stop))
	{
		while(pause_);
#ifdef USE_BURST

	#if ALLOC_METHOD == ALLOC_OVS
		//Try to get BURS_SIZE free slots
		ntosend = rte_ring_dequeue_burst(alloc_q, (void **) packets_array, BURST_SIZE);
	#elif ALLOC_METHOD == ALLOC_APP
		//do
		//{
		//	n = rte_mempool_get_bulk(packets_pool, (void **) packets_array, BURST_SIZE);
		//} while(n != 0 && !stop);
		//ntosend = BURST_SIZE;
	#else
		#error "No implemented"
	#endif

		//Copy data to the buffers
		for(i = 0; i < ntosend; i++)
		{
			rte_memcpy(packets_array[i]->buf_addr, pkt, PKT_SIZE);
			//fill_packet(packets_array[i]->pkt.data);
			packets_array[i]->next = NULL;
			packets_array[i]->pkt_len = PKT_SIZE;
			packets_array[i]->data_len = PKT_SIZE;

		#ifdef CALC_CHECKSUM
			for(i = 0; i < ntosend; i++)
				for(kk = 0; kk < 8; kk++)
					checksum += ((uint64_t *)packets_array[i]->buf_addr)[kk];
		#endif
		}

		//Enqueue data (try until all the allocated packets are enqueue)
		i = 0;
		while(i < ntosend && !stop)
		{
			i += rte_ring_enqueue_burst(tx_ring, (void **) &packets_array[i], ntosend - i);

			/* also dequeue some packets */
			nreceived= rte_ring_dequeue_burst(rx_ring, (void **) packets_array_rx, BURST_SIZE);
			rx += nreceived; /* update statistics */
		}

#else	// [NO] USE_BURST
	#if ALLOC_METHOD  == ALLOC_OVS //Method 1
		//Read a buffer to be used as a buffer for a packet
		retval = rte_ring_dequeue(alloc_q, (void **)&mbuf);
		if(retval != 0)
		{
		#ifdef CALC_ALLOC_STATS
			//stats.alloc_fails++;
		#endif
			continue;
		}
	#elif ALLOC_METHOD  == ALLOC_APP //Method 2
		//mbuf = rte_pktmbuf_alloc(packets_pool);
		//if(mbuf == NULL)
		//{
		//#ifdef CALC_ALLOC_STATS
		//	stats.alloc_fails++;
		//#endif
		//	continue;
		//}
	#else
		#error "ALLOC_METHOD has a non valid value"
	#endif

	#if DELAY_CYCLES > 0
		//This loop increases mumber of packets per second (don't ask me why)
		unsigned long long j = 0;
		for(j = 0; j < DELAY_CYCLES; j++)
			asm("");
	#endif

		//Copy packet to the correct buffer
		rte_memcpy(mbuf->buf_addr, pkt, PKT_SIZE);
		//fill_packet(mbuf->pkt.data);
		//mbuf->pkt.next = NULL;
		//mbuf->pkt.pkt_len = PKT_SIZE;
		//mbuf->pkt.data_len = PKT_SIZE;
		(void) pkt;
		mbuf->next = NULL;
		mbuf->pkt_len = PKT_SIZE;
		mbuf->data_len = PKT_SIZE;

	#ifdef CALC_CHECKSUM
		for(kk = 0; kk < 8; kk++)
			checksum += ((uint64_t *)mbuf->buf_addr)[kk];
	#endif

		//this method avoids dropping packets:
		//Simple tries until the packet is inserted in the queue
		tryagain:
		retval = rte_ring_enqueue(tx_ring, (void *) mbuf);
		if(retval == -ENOBUFS && !stop)
		{
	#ifdef CALC_TX_TRIES
			//stats.tx_retries++;
	#endif
			goto tryagain;
		}

	#ifdef CALC_TX_STATS
		//stats.tx++;
	#endif

#endif //USE_BURST
	}

#ifdef CALC_CHECKSUM
	printf("Checksum was %" PRIu64 "\n", checksum);
#endif

}
예제 #18
0
파일: uhd_dpdk.c 프로젝트: dkozel/uhd
/*
 * Initialize a given port using default settings and with the RX buffers
 * coming from the mbuf_pool passed as a parameter.
 * FIXME: Starting with assumption of one thread/core per port
 */
static inline int uhd_dpdk_port_init(struct uhd_dpdk_port *port,
                                     struct rte_mempool *rx_mbuf_pool,
                                     unsigned int mtu)
{
    int retval;

    /* Check for a valid port */
    if (port->id >= rte_eth_dev_count())
        return -ENODEV;

    /* Set up Ethernet device with defaults (1 RX ring, 1 TX ring) */
    /* FIXME: Check if hw_ip_checksum is possible */
    struct rte_eth_conf port_conf = {
        .rxmode = {
            .max_rx_pkt_len = mtu,
            .jumbo_frame = 1,
            .hw_ip_checksum = 1,
        }
    };
    retval = rte_eth_dev_configure(port->id, 1, 1, &port_conf);
    if (retval != 0)
        return retval;

    retval = rte_eth_rx_queue_setup(port->id, 0, DEFAULT_RING_SIZE,
                 rte_eth_dev_socket_id(port->id), NULL, rx_mbuf_pool);
    if (retval < 0)
        return retval;

    retval = rte_eth_tx_queue_setup(port->id, 0, DEFAULT_RING_SIZE,
                 rte_eth_dev_socket_id(port->id), NULL);
    if (retval < 0)
        goto port_init_fail;

    /* Create the hash table for the RX sockets */
    char name[32];
    snprintf(name, sizeof(name), "rx_table_%u", port->id);
    struct rte_hash_parameters hash_params = {
        .name = name,
        .entries = UHD_DPDK_MAX_SOCKET_CNT,
        .key_len = sizeof(struct uhd_dpdk_ipv4_5tuple),
        .hash_func = NULL,
        .hash_func_init_val = 0,
    };
    port->rx_table = rte_hash_create(&hash_params);
    if (port->rx_table == NULL) {
        retval = rte_errno;
        goto port_init_fail;
    }

    /* Create ARP table */
    snprintf(name, sizeof(name), "arp_table_%u", port->id);
    hash_params.name = name;
    hash_params.entries = UHD_DPDK_MAX_SOCKET_CNT;
    hash_params.key_len = sizeof(uint32_t);
    hash_params.hash_func = NULL;
    hash_params.hash_func_init_val = 0;
    port->arp_table = rte_hash_create(&hash_params);
    if (port->arp_table == NULL) {
        retval = rte_errno;
        goto free_rx_table;
    }

    /* Set up list for TX queues */
    LIST_INIT(&port->txq_list);

    /* Start the Ethernet port. */
    retval = rte_eth_dev_start(port->id);
    if (retval < 0) {
        goto free_arp_table;
    }

    /* Display the port MAC address. */
    rte_eth_macaddr_get(port->id, &port->mac_addr);
    RTE_LOG(INFO, EAL, "Port %u MAC: %02x %02x %02x %02x %02x %02x\n",
                (unsigned)port->id,
                port->mac_addr.addr_bytes[0], port->mac_addr.addr_bytes[1],
                port->mac_addr.addr_bytes[2], port->mac_addr.addr_bytes[3],
                port->mac_addr.addr_bytes[4], port->mac_addr.addr_bytes[5]);

    struct rte_eth_link link;
    rte_eth_link_get(port->id, &link);
    RTE_LOG(INFO, EAL, "Port %u UP: %d\n", port->id, link.link_status);

    return 0;

free_arp_table:
    rte_hash_free(port->arp_table);
free_rx_table:
    rte_hash_free(port->rx_table);
port_init_fail:
    return rte_errno;
}

static int uhd_dpdk_thread_init(struct uhd_dpdk_thread *thread, unsigned int id)
{
    if (!ctx || !thread)
        return -EINVAL;

    unsigned int socket_id = rte_lcore_to_socket_id(id);
    thread->id = id;
    thread->rx_pktbuf_pool = ctx->rx_pktbuf_pools[socket_id];
    thread->tx_pktbuf_pool = ctx->tx_pktbuf_pools[socket_id];
    LIST_INIT(&thread->port_list);

    char name[32];
    snprintf(name, sizeof(name), "sockreq_ring_%u", id);
    thread->sock_req_ring = rte_ring_create(
                               name,
                               UHD_DPDK_MAX_PENDING_SOCK_REQS,
                               socket_id,
                               RING_F_SC_DEQ
                            );
    if (!thread->sock_req_ring)
        return -ENOMEM;
    return 0;
}


int uhd_dpdk_init(int argc, char **argv, unsigned int num_ports,
                  int *port_thread_mapping, int num_mbufs, int mbuf_cache_size,
                  int mtu)
{
    /* Init context only once */
    if (ctx)
        return 1;

    if ((num_ports == 0) || (port_thread_mapping == NULL)) {
        return -EINVAL;
    }

    /* Grabs arguments intended for DPDK's EAL */
    int ret = rte_eal_init(argc, argv);
    if (ret < 0)
        rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");

    ctx = (struct uhd_dpdk_ctx *) rte_zmalloc("uhd_dpdk_ctx", sizeof(*ctx), rte_socket_id());
    if (!ctx)
        return -ENOMEM;

    ctx->num_threads = rte_lcore_count();
    if (ctx->num_threads <= 1)
        rte_exit(EXIT_FAILURE, "Error: No worker threads enabled\n");

    /* Check that we have ports to send/receive on */
    ctx->num_ports = rte_eth_dev_count();
    if (ctx->num_ports < 1)
        rte_exit(EXIT_FAILURE, "Error: Found no ports\n");
    if (ctx->num_ports < num_ports)
        rte_exit(EXIT_FAILURE, "Error: User requested more ports than available\n");

    /* Get memory for thread and port data structures */
    ctx->threads = rte_zmalloc("uhd_dpdk_thread", RTE_MAX_LCORE*sizeof(struct uhd_dpdk_thread), 0);
    if (!ctx->threads)
        rte_exit(EXIT_FAILURE, "Error: Could not allocate memory for thread data\n");
    ctx->ports = rte_zmalloc("uhd_dpdk_port", ctx->num_ports*sizeof(struct uhd_dpdk_port), 0);
    if (!ctx->ports)
        rte_exit(EXIT_FAILURE, "Error: Could not allocate memory for port data\n");

    /* Initialize the thread data structures */
    for (int i = rte_get_next_lcore(-1, 1, 0);
        (i < RTE_MAX_LCORE);
        i = rte_get_next_lcore(i, 1, 0))
    {
        /* Do one mempool of RX/TX per socket */
        unsigned int socket_id = rte_lcore_to_socket_id(i);
        /* FIXME Probably want to take into account actual number of ports per socket */
        if (ctx->tx_pktbuf_pools[socket_id] == NULL) {
            /* Creates a new mempool in memory to hold the mbufs.
             * This is done for each CPU socket
             */
            const int mbuf_size = mtu + 2048 + RTE_PKTMBUF_HEADROOM;
            char name[32];
            snprintf(name, sizeof(name), "rx_mbuf_pool_%u", socket_id);
            ctx->rx_pktbuf_pools[socket_id] = rte_pktmbuf_pool_create(
                                               name,
                                               ctx->num_ports*num_mbufs,
                                               mbuf_cache_size,
                                               0,
                                               mbuf_size,
                                               socket_id
                                           );
            snprintf(name, sizeof(name), "tx_mbuf_pool_%u", socket_id);
            ctx->tx_pktbuf_pools[socket_id] = rte_pktmbuf_pool_create(
                                               name,
                                               ctx->num_ports*num_mbufs,
                                               mbuf_cache_size,
                                               0,
                                               mbuf_size,
                                               socket_id
                                           );
            if ((ctx->rx_pktbuf_pools[socket_id]== NULL) ||
                (ctx->tx_pktbuf_pools[socket_id]== NULL))
                rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
        }

        if (uhd_dpdk_thread_init(&ctx->threads[i], i) < 0)
            rte_exit(EXIT_FAILURE, "Error initializing thread %i\n", i);
    }

    unsigned master_lcore = rte_get_master_lcore();

    /* Assign ports to threads and initialize the port data structures */
    for (unsigned int i = 0; i < num_ports; i++) {
        int thread_id = port_thread_mapping[i];
        if (thread_id < 0)
            continue;
        if (((unsigned int) thread_id) == master_lcore)
            RTE_LOG(WARNING, EAL, "User requested master lcore for port %u\n", i);
        if (ctx->threads[thread_id].id != (unsigned int) thread_id)
            rte_exit(EXIT_FAILURE, "Requested inactive lcore %u for port %u\n", (unsigned int) thread_id, i);

        struct uhd_dpdk_port *port = &ctx->ports[i];
        port->id = i;
        port->parent = &ctx->threads[thread_id];
        ctx->threads[thread_id].num_ports++;
        LIST_INSERT_HEAD(&ctx->threads[thread_id].port_list, port, port_entry);

        /* Initialize port. */
        if (uhd_dpdk_port_init(port, port->parent->rx_pktbuf_pool, mtu) != 0)
            rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu8 "\n",
                    i);
    }

    RTE_LOG(INFO, EAL, "Init DONE!\n");

    /* FIXME: Create functions to do this */
    RTE_LOG(INFO, EAL, "Starting I/O threads!\n");

    for (int i = rte_get_next_lcore(-1, 1, 0);
        (i < RTE_MAX_LCORE);
        i = rte_get_next_lcore(i, 1, 0))
    {
        struct uhd_dpdk_thread *t = &ctx->threads[i];
        if (!LIST_EMPTY(&t->port_list)) {
            rte_eal_remote_launch(_uhd_dpdk_driver_main, NULL, ctx->threads[i].id);
        }
    }
    return 0;
}

/* FIXME: This will be changed once we have functions to handle the threads */
int uhd_dpdk_destroy(void)
{
    if (!ctx)
        return -ENODEV;

    struct uhd_dpdk_config_req *req = (struct uhd_dpdk_config_req *) rte_zmalloc(NULL, sizeof(*req), 0);
    if (!req)
        return -ENOMEM;

    req->req_type = UHD_DPDK_LCORE_TERM;

    for (int i = rte_get_next_lcore(-1, 1, 0);
        (i < RTE_MAX_LCORE);
        i = rte_get_next_lcore(i, 1, 0))
    {
        struct uhd_dpdk_thread *t = &ctx->threads[i];

        if (LIST_EMPTY(&t->port_list))
            continue;

        if (rte_eal_get_lcore_state(t->id) == FINISHED)
            continue;

        pthread_mutex_init(&req->mutex, NULL);
        pthread_cond_init(&req->cond, NULL);
        pthread_mutex_lock(&req->mutex);
        if (rte_ring_enqueue(t->sock_req_ring, req)) {
            pthread_mutex_unlock(&req->mutex);
            RTE_LOG(ERR, USER2, "Failed to terminate thread %d\n", i);
            rte_free(req);
            return -ENOSPC;
        }
        struct timespec timeout = {
            .tv_sec = 1,
            .tv_nsec = 0
        };
        pthread_cond_timedwait(&req->cond, &req->mutex, &timeout);
        pthread_mutex_unlock(&req->mutex);
    }

    rte_free(req);
    return 0;
}
예제 #19
0
/**
 * CALLED BY NF:
 * Initialises everything we need
 *
 * Returns the number of arguments parsed by both rte_eal_init and
 * parse_nflib_args offset by 1.  This is used by getopt in the NF's
 * code.  The offsetting by one accounts for getopt parsing "--" which
 * increments optind by 1 each time.
 */
int
onvm_nf_init(int argc, char *argv[], const char *nf_tag) {
        const struct rte_memzone *mz;
	const struct rte_memzone *mz_scp;
        struct rte_mempool *mp;
	struct onvm_service_chain **scp;
        int retval_eal, retval_parse, retval_final;

        if ((retval_eal = rte_eal_init(argc, argv)) < 0)
                return -1;

        /* Modify argc and argv to conform to getopt rules for parse_nflib_args */
        argc -= retval_eal; argv += retval_eal;

        /* Reset getopt global variables opterr and optind to their default values */
        opterr = 0; optind = 1;

        if ((retval_parse = parse_nflib_args(argc, argv)) < 0)
                rte_exit(EXIT_FAILURE, "Invalid command-line arguments\n");

        /*
         * Calculate the offset that the nf will use to modify argc and argv for its
         * getopt call. This is the sum of the number of arguments parsed by
         * rte_eal_init and parse_nflib_args. This will be decremented by 1 to assure
         * getopt is looking at the correct index since optind is incremented by 1 each
         * time "--" is parsed.
         * This is the value that will be returned if initialization succeeds.
         */
        retval_final = (retval_eal + retval_parse) - 1;

        /* Reset getopt global variables opterr and optind to their default values */
        opterr = 0; optind = 1;

        /* Lookup mempool for nf_info struct */
        nf_info_mp = rte_mempool_lookup(_NF_MEMPOOL_NAME);
        if (nf_info_mp == NULL)
                rte_exit(EXIT_FAILURE, "No Client Info mempool - bye\n");

        /* Initialize the info struct */
        nf_info = ovnm_nf_info_init(nf_tag);

        mp = rte_mempool_lookup(PKTMBUF_POOL_NAME);
        if (mp == NULL)
                rte_exit(EXIT_FAILURE, "Cannot get mempool for mbufs\n");

        mz = rte_memzone_lookup(MZ_CLIENT_INFO);
        if (mz == NULL)
                rte_exit(EXIT_FAILURE, "Cannot get tx info structure\n");
        tx_stats = mz->addr;

	mz_scp = rte_memzone_lookup(MZ_SCP_INFO);
	if (mz_scp == NULL)
		rte_exit(EXIT_FAILURE, "Cannot get service chain info structre\n");
	scp = mz_scp->addr;
	default_chain = *scp;

	onvm_sc_print(default_chain);

        nf_info_ring = rte_ring_lookup(_NF_QUEUE_NAME);
        if (nf_info_ring == NULL)
                rte_exit(EXIT_FAILURE, "Cannot get nf_info ring");

        /* Put this NF's info struct onto queue for manager to process startup */
        if (rte_ring_enqueue(nf_info_ring, nf_info) < 0) {
                rte_mempool_put(nf_info_mp, nf_info); // give back mermory
                rte_exit(EXIT_FAILURE, "Cannot send nf_info to manager");
        }

        /* Wait for a client id to be assigned by the manager */
        RTE_LOG(INFO, APP, "Waiting for manager to assign an ID...\n");
        for (; nf_info->status == (uint16_t)NF_WAITING_FOR_ID ;) {
                sleep(1);
        }

        /* This NF is trying to declare an ID already in use. */
        if (nf_info->status == NF_ID_CONFLICT) {
                rte_mempool_put(nf_info_mp, nf_info);
                rte_exit(NF_ID_CONFLICT, "Selected ID already in use. Exiting...\n");
        } else if(nf_info->status == NF_NO_IDS) {
                rte_mempool_put(nf_info_mp, nf_info);
                rte_exit(NF_NO_IDS, "There are no ids available for this NF\n");
        } else if(nf_info->status != NF_STARTING) {
                rte_mempool_put(nf_info_mp, nf_info);
                rte_exit(EXIT_FAILURE, "Error occurred during manager initialization\n");
        }
        RTE_LOG(INFO, APP, "Using Instance ID %d\n", nf_info->instance_id);
        RTE_LOG(INFO, APP, "Using Service ID %d\n", nf_info->service_id);

        /* Now, map rx and tx rings into client space */
        rx_ring = rte_ring_lookup(get_rx_queue_name(nf_info->instance_id));
        if (rx_ring == NULL)
                rte_exit(EXIT_FAILURE, "Cannot get RX ring - is server process running?\n");

        tx_ring = rte_ring_lookup(get_tx_queue_name(nf_info->instance_id));
        if (tx_ring == NULL)
                rte_exit(EXIT_FAILURE, "Cannot get TX ring - is server process running?\n");

        /* Tell the manager we're ready to recieve packets */
        nf_info->status = NF_RUNNING;

        RTE_LOG(INFO, APP, "Finished Process Init.\n");
        return retval_final;
}