Example #1
0
void freeFlows()
{
	for (int i = 0; i < flows->count; i++) 
		flow_free(flows->data[i]);
	free(flows);

	for (int i = 0; i < virtual_flows->count; i++)
		flow_free(virtual_flows->data[i]);
	free(virtual_flows);
}
Example #2
0
void vxbox_flow_free(struct sw_flow *flow, bool deferred)
{
	if (!flow)
		return;

	if (flow->mask) {
		struct sw_flow_mask *mask = flow->mask;

		/* vxbox-lock is required to protect mask-refcount and
		 * mask list. */
		ASSERT_OVSL();
		BUG_ON(!mask->ref_count);
		mask->ref_count--;

		if (!mask->ref_count) {
			list_del_rcu(&mask->list);
			if (deferred)
				call_rcu(&mask->rcu, rcu_free_sw_flow_mask_cb);
			else
				kfree(mask);
		}
	}

	if (deferred)
		call_rcu(&flow->rcu, rcu_free_flow_callback);
	else
		flow_free(flow);
}
Example #3
0
void ovs_flow_free(struct sw_flow *flow, bool deferred)
{
	if (!flow)
		return;

	if (deferred)
		call_rcu(&flow->rcu, rcu_free_flow_callback);
	else
		flow_free(flow);
}
Example #4
0
void ovs_flow_tbl_destroy(struct flow_table *table)
{
	int i;

	if (!table)
		return;

	for (i = 0; i < table->n_buckets; i++) {
		struct sw_flow *flow;
		struct hlist_head *head = flex_array_get(table->buckets, i);
		struct hlist_node *node, *n;

		hlist_for_each_entry_safe(flow, node, n, head, hash_node) {
			hlist_del_init_rcu(&flow->hash_node);
			flow_free(flow);
		}
	}
Example #5
0
int
flow_queue_clear(void)
{
    pthread_mutex_lock(&mutex_queue);

    flow_t *f;

    while(flow_qlen > 0) {
        f = flow_queue_first;
        flow_queue_first = flow_queue_first->next;
        flow_free(f);
        flow_qlen--;
    }
    flow_queue_first =  NULL;
    flow_queue_last = NULL;
    flow_qlen = 0;

    pthread_mutex_unlock(&mutex_queue);
    return 0;
}
Example #6
0
void ovs_flow_tbl_destroy(struct flow_table *table)
{
	int i;

	if (!table)
		return;

	if (table->keep_flows)
		goto skip_flows;

	for (i = 0; i < table->n_buckets; i++) {
		struct sw_flow *flow;
		struct hlist_head *head = flex_array_get(table->buckets, i);
		struct hlist_node *node, *n;
		int ver = table->node_ver;

		hlist_for_each_entry_safe(flow, node, n, head, hash_node[ver]) {
			hlist_del_rcu(&flow->hash_node[ver]);
			flow_free(flow);
		}
	}
Example #7
0
static void rcu_free_flow_callback(struct rcu_head *rcu)
{
	struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);

	flow_free(flow);
}
Example #8
0
/* Add a packet_t object to flow's packet_t chain */
int 
flow_add_packet(flow_t *f, packet_t *packet, register BOOL src)
{
	pthread_mutex_lock(&(f->hmb->mutex));

	if( f->http == FALSE ){
		if( f->pkt_src_n >= 5){
			/* We make sure that the flow is not a HTTP flow,
			 * then remove it */
			packet_free(packet);
			flow_free(flow_hash_delete(f));
			pthread_mutex_unlock(&(f->hmb->mutex));
			return 1;
		}
	}

	/* TH_RST:
	 * If the flow is reset by sender or receiver*/
	if((packet->tcp_flags & TH_RST) == TH_RST){
		if( f->pkts_src < 4){
			// Flow with uncomplete information. Drop it.
			packet_free(packet);
			flow_free(flow_hash_delete(f));
			pthread_mutex_unlock(&(f->hmb->mutex));
			return 1;
		}else{
			cal_packet(f, packet, src);
			packet_free(packet);
			f->close = TRUE;
			flow_queue_enq(flow_hash_delete(f));
			pthread_mutex_unlock(&(f->hmb->mutex));
            return 0;
		}
	}

	/* TH_ACK: third handshake */
	if(f->pkts_src == 1 && src == TRUE){
		if((packet->tcp_flags & TH_ACK) == TH_ACK){
			f->ack2_sec = packet->cap_sec;
			f->ack2_usec = packet->cap_usec;
			/* round trip time in microsecond */
			f->rtt = (f->ack2_sec - f->syn_sec) * 1000000 + (f->ack2_usec - f->syn_usec);

			cal_packet(f, packet, src);
			packet_free(packet);
			pthread_mutex_unlock(&(f->hmb->mutex));
			return 0;
		}
	}

	/* TH_FIN:
	 * The flow will be closed if the both fins are detected */
	if( (packet->tcp_flags & TH_FIN) == TH_FIN){
		if( src == TRUE ){
			f->close = CLIENT_CLOSE;
		}else{
			f->close = SERVER_CLOSE;
		}		
		cal_packet(f, packet, src);
		packet_free(packet);

		if(f->close == CLIENT_CLOSE  || f->close == SERVER_CLOSE){		/* && or || */
			/* flow finished and send it to the flow queue */
			f->close = TRUE;
			flow_queue_enq(flow_hash_delete(f));
		}

		pthread_mutex_unlock(&(f->hmb->mutex));
		return 0;
	}

	/* other packets, without sequence number checked */
	if(src == TRUE){
		if( f->pkts_src == 0){
			/* syn */
			f->syn_sec = packet->cap_sec;
			f->syn_usec = packet->cap_usec;

			cal_packet(f, packet, src);
			packet_free(packet);
		}else{
			if(packet->tcp_flags == TH_SYN){
				/*syn repeatly*/
				flow_reset(f);		// Reset flow
				f->syn_sec = packet->cap_sec;
				f->syn_usec = packet->cap_usec;
				cal_packet(f, packet, src);
				packet_free(packet);
			}else{
				if(packet->http != 0 ){
					f->http = TRUE;
					/*
					 * only packets with HTTP payload
					 * are hooked on the packet chain
					 */
					hook_packet(f, packet, src);
					cal_packet(f, packet,src);
				}else{
					cal_packet(f, packet, src);
					packet_free(packet);
				}
			}
		}
	}else{
		if(packet->http != 0){
			f->http = TRUE;
			/*
			 * only packets with HTTP payload
			 * are hooked on the packet chain
			 */
			hook_packet(f, packet, src);
			cal_packet(f, packet, src);
		}else{
			cal_packet(f, packet, src);
			packet_free(packet);
		}
	}

	pthread_mutex_unlock(&(f->hmb->mutex));
	return 0;
}