// Thread which will handle dequeing and re-enqueing based on the status // and the flags for all ports in the output buffer void *output_monitor_routine(void *arg) { packet_t in_pkt ; ip_address_t address ; int dest_port=0 ; int loop_count= 0 ; element_to_queue * element ; while(!die) { // Only care dequing if there are elements. if((output_buffer->size > 0)) { // This will indeed dequeue the packet, but we may // have to put it back if the port isn't ready. queue_lock(output_buffer) ; dequeue(output_buffer,&in_pkt) ; queue_unlock(output_buffer) ; // Fetch the IP & lookup destination port ip_address_copy(&(in_pkt.address),&address); dest_port = cam_lookup_address(&address) ; if((dest_port != -1) && (dest_port < 4)) { // Wait for the lock port_lock(&(out_port[dest_port])) ; // If the flag is busy from the last write, then // we have to put the packet back in the queue and just // have to wait until we get to it again. if(out_port[dest_port].flag) { element = calloc(1,sizeof(element_to_queue)) ; packet_copy(&in_pkt,&(element->packet)); queue_lock(output_buffer) ; enqueue(element,output_buffer) ; queue_unlock(output_buffer) ; port_unlock(&(out_port[dest_port])) ; continue ; } // Port ready to be written , so go ahead and write. packet_copy(&in_pkt,&(out_port[dest_port].packet)); out_port[dest_port].flag = TRUE ; port_unlock(&(out_port[dest_port])) ; } } // Make sure it tried to at least deque 5 elements, before we // make it sleep. if(loop_count > LOOP_COUNT) { loop_count = 0 ; sleep() ; } else loop_count++ ; } }
void packet_copy( packet_t *source_packet_ptr, packet_t *dest_packet_ptr) { /* Error check on the input parameters */ if ((source_packet_ptr == (packet_t *)NULL) || (dest_packet_ptr == (packet_t *)NULL)) { printf("Error in packet_copy\n"); exit(0); } /* Copy the fields. To copy the address field, use the ip_address_copy routine defined above */ ip_address_copy( &(source_packet_ptr->address), &(dest_packet_ptr->address)); dest_packet_ptr->payload = source_packet_ptr->payload; }
index_t lisp_gpe_adjacency_find_or_create_and_lock (const locator_pair_t * pair, u32 overlay_table_id, u32 vni) { const lisp_gpe_sub_interface_t *l3s; const lisp_gpe_tunnel_t *lgt; lisp_gpe_adjacency_t *ladj; index_t lai, l3si; /* * first find the L3 sub-interface that corresponds to the loacl-rloc and vni */ l3si = lisp_gpe_sub_interface_find_or_create_and_lock (&pair->lcl_loc, overlay_table_id, vni); l3s = lisp_gpe_sub_interface_get (l3si); /* * find an existing or create a new adj */ lai = lisp_adj_find (&pair->rmt_loc, l3s->sw_if_index); if (INDEX_INVALID == lai) { pool_get (lisp_adj_pool, ladj); clib_memset (ladj, 0, sizeof (*ladj)); lai = (ladj - lisp_adj_pool); ip_address_copy (&ladj->remote_rloc, &pair->rmt_loc); ladj->vni = vni; /* transfer the lock to the adj */ ladj->lisp_l3_sub_index = l3si; ladj->sw_if_index = l3s->sw_if_index; /* if vni is non-default */ if (ladj->vni) ladj->flags = LISP_GPE_FLAGS_I; /* work in lisp-gpe not legacy mode */ ladj->flags |= LISP_GPE_FLAGS_P; /* * find the tunnel that will provide the underlying transport * and hence the rewrite. * The RLOC FIB index is default table - always. */ ladj->tunnel_index = lisp_gpe_tunnel_find_or_create_and_lock (pair, 0); lgt = lisp_gpe_tunnel_get (ladj->tunnel_index); /* * become of child of the RLOC FIB entry so we are updated when * its reachability changes, allowing us to re-stack the midcahins */ ladj->fib_entry_child_index = fib_entry_child_add (lgt->fib_entry_index, FIB_NODE_TYPE_LISP_ADJ, lai); lisp_adj_insert (&ladj->remote_rloc, ladj->sw_if_index, lai); } else { /* unlock the interface from the find. */ lisp_gpe_sub_interface_unlock (l3si); ladj = lisp_gpe_adjacency_get_i (lai); } ladj->locks++; return (lai); }
static u32 add_del_ip_tunnel (vnet_lisp_gpe_add_del_fwd_entry_args_t *a, u32 * tun_index_res) { lisp_gpe_main_t * lgm = &lisp_gpe_main; lisp_gpe_tunnel_t *t = 0; uword * p; int rv; lisp_gpe_tunnel_key_t key; /* prepare tunnel key */ memset(&key, 0, sizeof(key)); ip_prefix_copy(&key.eid, &gid_address_ippref(&a->deid)); ip_address_copy(&key.dst_loc, &a->dlocator); key.iid = clib_host_to_net_u32 (a->vni); p = mhash_get (&lgm->lisp_gpe_tunnel_by_key, &key); if (a->is_add) { /* adding a tunnel: tunnel must not already exist */ if (p) return VNET_API_ERROR_INVALID_VALUE; if (a->decap_next_index >= LISP_GPE_INPUT_N_NEXT) return VNET_API_ERROR_INVALID_DECAP_NEXT; pool_get_aligned (lgm->tunnels, t, CLIB_CACHE_LINE_BYTES); memset (t, 0, sizeof (*t)); /* copy from arg structure */ #define _(x) t->x = a->x; foreach_copy_field; #undef _ ip_address_copy(&t->src, &a->slocator); ip_address_copy(&t->dst, &a->dlocator); /* if vni is non-default */ if (a->vni) { t->flags = LISP_GPE_FLAGS_I; t->vni = a->vni; } t->flags |= LISP_GPE_FLAGS_P; t->next_protocol = ip_prefix_version(&key.eid) == IP4 ? LISP_GPE_NEXT_PROTO_IP4 : LISP_GPE_NEXT_PROTO_IP6; rv = lisp_gpe_rewrite (t); if (rv) { pool_put(lgm->tunnels, t); return rv; } mhash_set(&lgm->lisp_gpe_tunnel_by_key, &key, t - lgm->tunnels, 0); /* return tunnel index */ if (tun_index_res) tun_index_res[0] = t - lgm->tunnels; } else { /* deleting a tunnel: tunnel must exist */ if (!p) { clib_warning("Tunnel for eid %U doesn't exist!", format_gid_address, &a->deid); return VNET_API_ERROR_NO_SUCH_ENTRY; } t = pool_elt_at_index(lgm->tunnels, p[0]); mhash_unset(&lgm->lisp_gpe_tunnel_by_key, &key, 0); vec_free(t->rewrite); pool_put(lgm->tunnels, t); } return 0; }