Exemplo n.º 1
0
static inline int
session_send_evt_to_thread (void *data, void *args, u32 thread_index,
			    session_evt_type_t evt_type)
{
  session_event_t *evt;
  svm_msg_q_msg_t msg;
  svm_msg_q_t *mq;
  u32 tries = 0, max_tries;

  mq = session_main_get_vpp_event_queue (thread_index);
  while (svm_msg_q_try_lock (mq))
    {
      max_tries = vlib_get_current_process (vlib_get_main ())? 1e6 : 3;
      if (tries++ == max_tries)
	{
	  SESSION_DBG ("failed to enqueue evt");
	  return -1;
	}
    }
  if (PREDICT_FALSE (svm_msg_q_ring_is_full (mq, SESSION_MQ_IO_EVT_RING)))
    {
      svm_msg_q_unlock (mq);
      return -2;
    }
  msg = svm_msg_q_alloc_msg_w_ring (mq, SESSION_MQ_IO_EVT_RING);
  if (PREDICT_FALSE (svm_msg_q_msg_is_invalid (&msg)))
    {
      svm_msg_q_unlock (mq);
      return -2;
    }
  evt = (session_event_t *) svm_msg_q_msg_data (mq, &msg);
  evt->event_type = evt_type;
  switch (evt_type)
    {
    case SESSION_CTRL_EVT_RPC:
      evt->rpc_args.fp = data;
      evt->rpc_args.arg = args;
      break;
    case SESSION_IO_EVT_TX:
    case SESSION_IO_EVT_TX_FLUSH:
    case SESSION_IO_EVT_BUILTIN_RX:
      evt->session_index = *(u32 *) data;
      break;
    case SESSION_IO_EVT_BUILTIN_TX:
    case SESSION_CTRL_EVT_CLOSE:
      evt->session_handle = session_handle ((session_t *) data);
      break;
    default:
      clib_warning ("evt unhandled!");
      svm_msg_q_unlock (mq);
      return -1;
    }

  svm_msg_q_add_and_unlock (mq, &msg);
  return 0;
}
Exemplo n.º 2
0
Arquivo: phash.c Projeto: vnet/clib
static void init_keys_direct_u32 (phash_main_t * pm)
{
    int n_keys_left, b_mask, a_shift;
    u32 seed;
    phash_key_t * k;

    seed = pm->hash_seed;
    b_mask = (1 << pm->b_bits) - 1;
    a_shift = BITS (seed) - pm->a_bits;

    k = pm->keys;
    n_keys_left = vec_len (pm->keys);

    while (n_keys_left >= 2)
    {
        u32 x0, y0, z0;
        u32 x1, y1, z1;

        x0 = y0 = z0 = seed;
        x1 = y1 = z1 = seed;
        x0 += (u32) k[0].key;
        x1 += (u32) k[1].key;

        hash_mix32 (x0, y0, z0);
        hash_mix32 (x1, y1, z1);

        k[0].b = z0 & b_mask;
        k[1].b = z1 & b_mask;
        k[0].a = z0 >> a_shift;
        k[1].a = z1 >> a_shift;
        if (PREDICT_FALSE (a_shift >= BITS (z0)))
            k[0].a = k[1].a = 0;

        k += 2;
        n_keys_left -= 2;
    }

    if (n_keys_left >= 1)
    {
        u32 x0, y0, z0;

        x0 = y0 = z0 = seed;
        x0 += k[0].key;

        hash_mix32 (x0, y0, z0);

        k[0].b = z0 & b_mask;
        k[0].a = z0 >> a_shift;
        if (PREDICT_FALSE (a_shift >= BITS (z0)))
            k[0].a = 0;

        k += 1;
        n_keys_left -= 1;
    }
Exemplo n.º 3
0
/*
 * Enqueue data for delivery to session peer. Does not notify peer of enqueue
 * event but on request can queue notification events for later delivery by
 * calling stream_server_flush_enqueue_events().
 *
 * @param tc Transport connection which is to be enqueued data
 * @param b Buffer to be enqueued
 * @param offset Offset at which to start enqueueing if out-of-order
 * @param queue_event Flag to indicate if peer is to be notified or if event
 *                    is to be queued. The former is useful when more data is
 *                    enqueued and only one event is to be generated.
 * @param is_in_order Flag to indicate if data is in order
 * @return Number of bytes enqueued or a negative value if enqueueing failed.
 */
int
session_enqueue_stream_connection (transport_connection_t * tc,
				   vlib_buffer_t * b, u32 offset,
				   u8 queue_event, u8 is_in_order)
{
  session_t *s;
  int enqueued = 0, rv, in_order_off;

  s = session_get (tc->s_index, tc->thread_index);

  if (is_in_order)
    {
      enqueued = svm_fifo_enqueue (s->rx_fifo,
				   b->current_length,
				   vlib_buffer_get_current (b));
      if (PREDICT_FALSE ((b->flags & VLIB_BUFFER_NEXT_PRESENT)
			 && enqueued >= 0))
	{
	  in_order_off = enqueued > b->current_length ? enqueued : 0;
	  rv = session_enqueue_chain_tail (s, b, in_order_off, 1);
	  if (rv > 0)
	    enqueued += rv;
	}
    }
  else
    {
      rv = svm_fifo_enqueue_with_offset (s->rx_fifo, offset,
					 b->current_length,
					 vlib_buffer_get_current (b));
      if (PREDICT_FALSE ((b->flags & VLIB_BUFFER_NEXT_PRESENT) && !rv))
	session_enqueue_chain_tail (s, b, offset + b->current_length, 0);
      /* if something was enqueued, report even this as success for ooo
       * segment handling */
      return rv;
    }

  if (queue_event)
    {
      /* Queue RX event on this fifo. Eventually these will need to be flushed
       * by calling stream_server_flush_enqueue_events () */
      session_worker_t *wrk;

      wrk = session_main_get_worker (s->thread_index);
      if (!(s->flags & SESSION_F_RX_EVT))
	{
	  s->flags |= SESSION_F_RX_EVT;
	  vec_add1 (wrk->session_to_enqueue[tc->proto], s->session_index);
	}
    }

  return enqueued;
}
Exemplo n.º 4
0
static struct rte_mbuf * dpdk_replicate_packet_mb (vlib_buffer_t * b)
{
  vlib_main_t * vm = vlib_get_main();
  vlib_buffer_main_t * bm = vm->buffer_main;
  struct rte_mbuf * first_mb = 0, * new_mb, * pkt_mb, ** prev_mb_next = 0;
  u8 nb_segs, nb_segs_left;
  u32 copy_bytes;
  unsigned socket_id = rte_socket_id();

  ASSERT (bm->pktmbuf_pools[socket_id]);
  pkt_mb = ((struct rte_mbuf *)b)-1;
  nb_segs = pkt_mb->nb_segs;
  for (nb_segs_left = nb_segs; nb_segs_left; nb_segs_left--)
    {
      if (PREDICT_FALSE(pkt_mb == 0))
	{
	  clib_warning ("Missing %d mbuf chain segment(s):   "
			"(nb_segs = %d, nb_segs_left = %d)!",
			nb_segs - nb_segs_left, nb_segs, nb_segs_left);
	  if (first_mb)
	    rte_pktmbuf_free(first_mb);
	  return NULL;
	}
      new_mb = rte_pktmbuf_alloc (bm->pktmbuf_pools[socket_id]);
      if (PREDICT_FALSE(new_mb == 0))
	{
	  if (first_mb)
	    rte_pktmbuf_free(first_mb);
	  return NULL;
	}
      
      /*
       * Copy packet info into 1st segment.
       */
      if (first_mb == 0)
	{
	  first_mb = new_mb;
	  rte_pktmbuf_pkt_len (first_mb) = pkt_mb->pkt_len;
	  first_mb->nb_segs = pkt_mb->nb_segs;
	  first_mb->port = pkt_mb->port;
#ifdef DAW_FIXME // TX Offload support TBD
	  first_mb->vlan_macip = pkt_mb->vlan_macip;
	  first_mb->hash = pkt_mb->hash;
	  first_mb->ol_flags = pkt_mb->ol_flags
#endif
	}
      else
	{
Exemplo n.º 5
0
void cnat_port_free_v2 (
         cnat_portmap_v2_t *pm,
    	 int                index,
         port_pair_t        pair_type,
         u16                base_port,
         u16                static_port_range)
{
    cnat_portmap_v2_t *my_pm;
    uword bit;

    /* check for valid portmap */   
    if (PREDICT_FALSE(index > vec_len(pm))) {
        spp_printf(CNAT_INVALID_INDEX_TO_FREE_PORT, 0, 0);
        return;
    }

    my_pm = pm + index;
    bit = port2bit(base_port);

#if DEBUG > 0
    if(clib_bitmap_get_no_check(my_pm->bm, bit))
        ASSERT(clib_bitmap_get_no_check(my_pm->bm, bit) == 0); 
#endif

    cgn_clib_bitmap_set_no_check(my_pm->bm, bit);

    my_pm->inuse -= 1;
    if(base_port >= static_port_range) {
        /* Clear the full flag. we can have a new dynamic session now */
        my_pm->dyn_full = 0;
    }

    return;
}
Exemplo n.º 6
0
    unsigned int CompressedSet::Iterator::nextDoc(){

        //: if the pointer points to the end
        if(PREDICT_FALSE(++cursor == totalDocIdNum)) {
          lastAccessedDocId = NO_MORE_DOCS;
        } else {
             int iterBlockIndex = cursor >> BLOCK_SIZE_BIT;
             int offset = cursor & BLOCK_SIZE_MODULO;
             if( iterBlockIndex == compBlockNum  ) {
                 lastAccessedDocId = set->currentNoCompBlock[offset];
             } else { 
                 if (PREDICT_TRUE(offset)){
                    //lastAccessedDocId = iterDecompBlock[offset];
                    #ifdef PREFIX_SUM
                       lastAccessedDocId += (iterDecompBlock[offset]);
                    #else
                       lastAccessedDocId = iterDecompBlock[offset];
                    #endif  
                 } else {
                    // (offset==0) must be in one of the compressed blocks
                    Source src = set->sequenceOfCompBlocks.get(iterBlockIndex).getSource();
                    set->codec.Uncompress(src, &iterDecompBlock[0], DEFAULT_BATCH_SIZE);
                    #ifndef PREFIX_SUM
                      // postProcessBlock not needed if using integrated delta coding
                      // postProcessBlock(&iterDecompBlock[0], DEFAULT_BATCH_SIZE);
                    #endif       
                    // assert(uncompSize == DEFAULT_BATCH_SIZE);
                     lastAccessedDocId = iterDecompBlock[0];
                 }
             }
        }
        return lastAccessedDocId;
    }
Exemplo n.º 7
0
int
session_enqueue_dgram_connection (session_t * s,
				  session_dgram_hdr_t * hdr,
				  vlib_buffer_t * b, u8 proto, u8 queue_event)
{
  int enqueued = 0, rv, in_order_off;

  ASSERT (svm_fifo_max_enqueue_prod (s->rx_fifo)
	  >= b->current_length + sizeof (*hdr));

  svm_fifo_enqueue (s->rx_fifo, sizeof (session_dgram_hdr_t), (u8 *) hdr);
  enqueued = svm_fifo_enqueue (s->rx_fifo, b->current_length,
			       vlib_buffer_get_current (b));
  if (PREDICT_FALSE ((b->flags & VLIB_BUFFER_NEXT_PRESENT) && enqueued >= 0))
    {
      in_order_off = enqueued > b->current_length ? enqueued : 0;
      rv = session_enqueue_chain_tail (s, b, in_order_off, 1);
      if (rv > 0)
	enqueued += rv;
    }
  if (queue_event)
    {
      /* Queue RX event on this fifo. Eventually these will need to be flushed
       * by calling stream_server_flush_enqueue_events () */
      session_worker_t *wrk;

      wrk = session_main_get_worker (s->thread_index);
      if (!(s->flags & SESSION_F_RX_EVT))
	{
	  s->flags |= SESSION_F_RX_EVT;
	  vec_add1 (wrk->session_to_enqueue[proto], s->session_index);
	}
    }
  return enqueued;
}
Exemplo n.º 8
0
replication_context_t *
replication_prep (vlib_main_t * vm,
                  vlib_buffer_t * b0,
                  u32 recycle_node_index,
                  u32 l2_packet)
{
  replication_main_t * rm = &replication_main;
  replication_context_t * ctx;
  uword cpu_number = vm->cpu_index;
  ip4_header_t * ip;
  u32 ctx_id;

  // Allocate a context, reserve context 0
  if (PREDICT_FALSE(rm->contexts[cpu_number] == 0))
    pool_get_aligned (rm->contexts[cpu_number], ctx, CLIB_CACHE_LINE_BYTES);
      
  pool_get_aligned (rm->contexts[cpu_number], ctx, CLIB_CACHE_LINE_BYTES);
  ctx_id = ctx - rm->contexts[cpu_number];

  // Save state from vlib buffer
  ctx->saved_clone_count = b0->clone_count;
  ctx->saved_free_list_index = b0->free_list_index;
  ctx->current_data = b0->current_data;

  // Set up vlib buffer hooks
  b0->clone_count = ctx_id;
  b0->free_list_index = rm->recycle_list_index;

  // Save feature state
  ctx->recycle_node_index = recycle_node_index;

  // Save vnet state
  memcpy (ctx->vnet_buffer, vnet_buffer(b0), sizeof(vnet_buffer_opaque_t));

  // Save packet contents
  ctx->l2_packet = l2_packet;
  ip = (ip4_header_t *)vlib_buffer_get_current (b0);
  if (l2_packet) {
    // Save ethernet header
    ctx->l2_header[0] = ((u64 *)ip)[0];
    ctx->l2_header[1] = ((u64 *)ip)[1];
    ctx->l2_header[2] = ((u64 *)ip)[2];
    // set ip to the true ip header
    ip = (ip4_header_t *)(((u8 *)ip) + vnet_buffer(b0)->l2.l2_len);
  }

  // Copy L3 fields. 
  // We need to save TOS for ip4 and ip6 packets. Fortunately the TOS field is 
  // in the first two bytes of both the ip4 and ip6 headers.
  ctx->ip_tos = *((u16 *)(ip));

  // Save the ip4 checksum as well. We just blindly save the corresponding two
  // bytes even for ip6 packets. 
  ctx->ip4_checksum = ip->checksum;

  return ctx;
}
Exemplo n.º 9
0
void cnat_portmap_dump_v2 (cnat_portmap_v2_t *pm, u16 print_limit)
{
    int i;
    u32 inuse =0;

    ASSERT(pm);

    for (i = 0; i < BITS_PER_INST; i++) {
        if (PREDICT_FALSE(clib_bitmap_get_no_check (pm->bm, i) == 0)) {
            if (PREDICT_TRUE(inuse++ < print_limit))
                PLATFORM_DEBUG_PRINT(" %d", bit2port(i));
        }
    }
    if (PREDICT_FALSE(inuse >= print_limit)) {
        PLATFORM_DEBUG_PRINT("%d printed, print limit is %d\n",
                inuse, print_limit);
    }
    PLATFORM_DEBUG_PRINT("\n");
}
Exemplo n.º 10
0
void vl_msg_api_cleanup_handler (void *the_msg)
{
    api_main_t *am = &api_main;
    u16 id = ntohs(*((u16 *)the_msg));

    if (PREDICT_FALSE(id >= vec_len(am->msg_cleanup_handlers))) {
        clib_warning ("_vl_msg_id too large: %d\n", id);
        return;
    }
    if (am->msg_cleanup_handlers[id])
        (*am->msg_cleanup_handlers[id])(the_msg);

    vl_msg_api_free(the_msg);
}
Exemplo n.º 11
0
/*
 * vl_msg_api_replay_handler
 */
void vl_msg_api_replay_handler(void *the_msg)
{
    api_main_t *am = &api_main;

    u16 id = ntohs(*((u16 *)the_msg));

    if (PREDICT_FALSE(id >= vec_len(am->msg_handlers))) {
        clib_warning ("_vl_msg_id too large: %d\n", id);
        return;
    }
    /* do NOT trace the message... */
    if (am->msg_handlers[id])
        (*am->msg_handlers[id])(the_msg);
    /* do NOT free the message buffer... */
}
Exemplo n.º 12
0
    //This method will not work after a call to flush()
    inline bool CompressedSet::find(unsigned int target) const {

        vector<uint32_t,AlignedSTLAllocator<uint32_t, 64>> myDecompBlock(DEFAULT_BATCH_SIZE, 0);
        //unsigned int lastId = lastAdded;
        if(PREDICT_FALSE(totalDocIdNum==0))
              return false;
        if (sizeOfCurrentNoCompBlock!=0){
            //int lastId = currentNoCompBlock[sizeOfCurrentNoCompBlock-1];
            if(sizeOfCurrentNoCompBlock > 0 && target > currentNoCompBlock[sizeOfCurrentNoCompBlock-1])
            {
              return false;
            }

            // first search noComp block
            if(baseListForOnlyCompBlocks.size()==0 || target>baseListForOnlyCompBlocks[baseListForOnlyCompBlocks.size()-1])
            {
              int i;
              for(i=0;i<sizeOfCurrentNoCompBlock;++i)
              {
                if(currentNoCompBlock[i] >= target)
                  break;
              }
              if(i == sizeOfCurrentNoCompBlock)
                return false;
              return currentNoCompBlock[i] == target;
            }
        }

        // if we have some CompBlocks
        // first find which block to decompress by looking into baseListForOnlyCompBlocks
        if(baseListForOnlyCompBlocks.size()>0) {
            // baseListForOnlyCompBlocks.size() must then >0
           int index = binarySearchInBaseListForBlockThatMayContainTarget(baseListForOnlyCompBlocks, 0, baseListForOnlyCompBlocks.size()-1, target);
           if(index<0)
             return false;// target is bigger then biggest value

           ////uncompress block
           Source src = sequenceOfCompBlocks.get(index).getSource();
           size_t uncompSize = codec.Uncompress(src, &myDecompBlock[0] ,DEFAULT_BATCH_SIZE);
           return codec.findInArray(&myDecompBlock[0], uncompSize,target);
        }
        return false;
    }
Exemplo n.º 13
0
void
vl_msg_api_send_shmem (svm_queue_t * q, u8 * elem)
{
  api_main_t *am = &api_main;
  uword *trace = (uword *) elem;

  if (am->tx_trace && am->tx_trace->enabled)
    vl_msg_api_trace (am, am->tx_trace, (void *) trace[0]);

  /*
   * Announce a probable binary API client bug:
   * some client's input queue is stuffed.
   * The situation may be recoverable, or not.
   */
  if (PREDICT_FALSE
      (am->vl_clients /* vpp side */  && (q->cursize == q->maxsize)))
    clib_warning ("WARNING: client input queue at %llx is stuffed...", q);
  (void) svm_queue_add (q, elem, 0 /* nowait */ );
}
Exemplo n.º 14
0
static inline u32
is_pcp_pkt(spp_ctx_t *ctx, u32 addr, u16 port)
{
    cnat_vrfmap_t *my_vrfmap = NULL;
    u16  my_vrfmap_index;

    my_vrfmap_index = vrf_map_array[ctx->ru.rx.uidb_index];

    if (PREDICT_TRUE(my_vrfmap_index != VRF_MAP_ENTRY_EMPTY)) {

      my_vrfmap = cnat_map_by_vrf + my_vrfmap_index;

      if (PREDICT_FALSE( port ==  my_vrfmap->pcp_server_port)) {
             if(PREDICT_TRUE(addr == my_vrfmap->pcp_server_addr)) {
               return CNAT_SUCCESS;
             }
      }
    }

    return CNAT_NO_CONFIG;
}
Exemplo n.º 15
0
/**
 * Notify session peer that new data has been enqueued.
 *
 * @param s 	Stream session for which the event is to be generated.
 * @param lock 	Flag to indicate if call should lock message queue.
 *
 * @return 0 on success or negative number if failed to send notification.
 */
static inline int
session_enqueue_notify_inline (session_t * s)
{
  app_worker_t *app_wrk;
  u32 session_index;
  u8 n_subscribers;

  session_index = s->session_index;
  n_subscribers = svm_fifo_n_subscribers (s->rx_fifo);

  app_wrk = app_worker_get_if_valid (s->app_wrk_index);
  if (PREDICT_FALSE (!app_wrk))
    {
      SESSION_DBG ("invalid s->app_index = %d", s->app_wrk_index);
      return 0;
    }

  /* *INDENT-OFF* */
  SESSION_EVT_DBG(SESSION_EVT_ENQ, s, ({
      ed->data[0] = SESSION_IO_EVT_RX;
      ed->data[1] = svm_fifo_max_dequeue_prod (s->rx_fifo);
  }));
Exemplo n.º 16
0
session_t *
session_alloc (u32 thread_index)
{
  session_worker_t *wrk = &session_main.wrk[thread_index];
  session_t *s;
  u8 will_expand = 0;
  pool_get_aligned_will_expand (wrk->sessions, will_expand,
				CLIB_CACHE_LINE_BYTES);
  /* If we have peekers, let them finish */
  if (PREDICT_FALSE (will_expand && vlib_num_workers ()))
    {
      clib_rwlock_writer_lock (&wrk->peekers_rw_locks);
      pool_get_aligned (wrk->sessions, s, CLIB_CACHE_LINE_BYTES);
      clib_rwlock_writer_unlock (&wrk->peekers_rw_locks);
    }
  else
    {
      pool_get_aligned (wrk->sessions, s, CLIB_CACHE_LINE_BYTES);
    }
  clib_memset (s, 0, sizeof (*s));
  s->session_index = s - wrk->sessions;
  s->thread_index = thread_index;
  return s;
}
Exemplo n.º 17
0
/*
 * cnat_port_alloc_v2
 * public ipv4 address/port allocator for dynamic ports
 *
 * 200K users / 20M translations means vec_len(cnat_portmap) will be
 * around 300.
 *
 */
cnat_errno_t
cnat_dynamic_port_alloc_v2 (
                 cnat_portmap_v2_t    *pm,
                 port_alloc_t          atype,
                 port_pair_t           pair_type,
                 u32                  *index,
                 u32                  *o_ipv4_address,
                 u16                  *o_port,
                 u16                  static_port_range
#ifndef NO_BULK_LOGGING
                 , bulk_alloc_size_t    bulk_size,
                  int *nfv9_log_req
#endif
                 , u16                   ip_n_to_1,
                  u32                  *rseed_ip
                 )
{
    int i;
    cnat_errno_t       my_err = CNAT_NO_POOL_ANY;
    cnat_portmap_v2_t *my_pm = 0;
    u16 start_bit;
    u16 new_port;
    uword bit_test_result;
    uword max_trys_to_find_port;

    ASSERT(index);
    ASSERT(o_ipv4_address);
    ASSERT(o_port);

    my_pm = cnat_dynamic_addr_alloc_from_pm(pm, atype, index, &my_err, ip_n_to_1, 
            rseed_ip);

    if (PREDICT_FALSE(my_pm == NULL)) {
        return (my_err);
    }
    if(PREDICT_FALSE(my_pm->dyn_full == 1)) {
        if (atype == PORT_ALLOC_DIRECTED) {
            return (CNAT_NOT_FOUND_DIRECT);
        } else {
            return (CNAT_NOT_FOUND_ANY);
        }
    }

#if DEBUG > 1
    PLATFORM_DEBUG_PRINT("ALLOC_PORT_V2: My_Instance_Number %d: IP addr 0x%x, Inuse %d\n",
           my_instance_number, my_pm->ipv4_address, my_pm->inuse);
#endif

    rseed_port = randq1(rseed_port);

    /*
     * Exclude the static port range for allocating dynamic ports
     */
    start_bit = (rseed_port) % (BITS_PER_INST - static_port_range);
    start_bit = start_bit + static_port_range;

#ifndef NO_BULK_LOGGING
    *nfv9_log_req = BULK_ALLOC_NOT_ATTEMPTED;
    if(BULK_ALLOC_SIZE_NONE != bulk_size)
    {
        /* We need the start port of the range to be alined on integer multiple
         * of bulk_size */
        max_trys_to_find_port = BITS_PER_INST/bulk_size;
        start_bit= ((start_bit + bulk_size -1)/bulk_size) * bulk_size;
    }
    else
#endif /* #ifndef NO_BULK_LOGGING */
    max_trys_to_find_port = BITS_PER_INST;

    /* Allocate a random port / port-pair */
    for (i = 0; i < max_trys_to_find_port;  i++) {

    /* start_bit is only a u16.. so it can rollover and become zero */
    if (PREDICT_FALSE((start_bit >= BITS_PER_INST) ||
                    (start_bit < static_port_range))) {
                    start_bit = static_port_range;
#ifndef NO_BULK_LOGGING
        if(BULK_ALLOC_SIZE_NONE != bulk_size) {
            start_bit= ((start_bit + bulk_size -1)/bulk_size) * bulk_size;
        }
#endif /* #ifndef NO_BULK_LOGGING */
    }
        /* Scan forward from random position */
#ifndef NO_BULK_LOGGING
        if(BULK_ALLOC_SIZE_NONE != bulk_size) {
            bit_test_result = cgn_clib_bitmap_check_if_all(my_pm->bm,
            start_bit, bulk_size);
        }
        else
#endif /* #ifndef NO_BULK_LOGGING */
        bit_test_result = clib_bitmap_get_no_check(my_pm->bm, start_bit);
        
        if (PREDICT_TRUE(bit_test_result)) {
            new_port = bit2port(start_bit);
#ifndef NO_BULK_LOGGING
            if(BULK_ALLOC_SIZE_NONE != bulk_size)
                *nfv9_log_req = new_port;
#endif
            if ((pair_type == PORT_S_ODD) &&
                       (!(new_port & 0x1))) {
#ifndef NO_BULK_LOGGING
                if(BULK_ALLOC_SIZE_NONE != bulk_size) {
                    start_bit++; /* Just use the next one in the bulk range */
                    new_port++;
                    goto found2;
                }
#endif /* #ifndef NO_BULK_LOGGING */
                        goto notfound;
            } else if ((pair_type == PORT_S_EVEN) &&
                       (new_port & 0x1)) {
                        goto notfound;
            }

            /* OK we got one or two suitable ports */
            goto found2;
        }

    notfound:
#ifndef NO_BULK_LOGGING
    if(BULK_ALLOC_SIZE_NONE != bulk_size)
        start_bit += bulk_size;
    else
#endif /* #ifndef NO_BULK_LOGGING */
    start_bit++;

    } /* end of for loop */

    /* Completely out of ports */

    /* Port allocation failure */
    /* set dyn_full flag. This would be used to verify
     * for further dyn session before searching for port
     */
    if (atype == PORT_ALLOC_DIRECTED) {
        my_pm->dyn_full = 1;
        return (CNAT_NOT_FOUND_DIRECT);
    } else {
        my_pm->dyn_full = 1;
        return (CNAT_NOT_FOUND_ANY);
    }
  

 found2:

    /* Accounting */
    cgn_clib_bitmap_clear_no_check (my_pm->bm, start_bit);
    (my_pm->inuse)++;

    *index = my_pm - pm;
    *o_ipv4_address = my_pm->ipv4_address;

    *o_port = new_port;
    return (CNAT_SUCCESS);
}
Exemplo n.º 18
0
Arquivo: encap.c Projeto: chrisy/vpp
always_inline uword
geneve_encap_inline (vlib_main_t * vm,
		     vlib_node_runtime_t * node,
		     vlib_frame_t * from_frame, u32 is_ip4)
{
  u32 n_left_from, next_index, *from, *to_next;
  geneve_main_t *vxm = &geneve_main;
  vnet_main_t *vnm = vxm->vnet_main;
  vnet_interface_main_t *im = &vnm->interface_main;
  u32 pkts_encapsulated = 0;
  u16 old_l0 = 0, old_l1 = 0;
  u32 thread_index = vm->thread_index;
  u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
  u32 sw_if_index0 = ~0, sw_if_index1 = ~0;
  u32 next0 = 0, next1 = 0;
  vnet_hw_interface_t *hi0, *hi1;
  geneve_tunnel_t *t0 = NULL, *t1 = NULL;

  from = vlib_frame_vector_args (from_frame);
  n_left_from = from_frame->n_vectors;

  next_index = node->cached_next_index;
  stats_sw_if_index = node->runtime_data[0];
  stats_n_packets = stats_n_bytes = 0;

  while (n_left_from > 0)
    {
      u32 n_left_to_next;

      vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);

      while (n_left_from >= 4 && n_left_to_next >= 2)
	{
	  u32 bi0, bi1;
	  vlib_buffer_t *b0, *b1;
	  u32 flow_hash0, flow_hash1;
	  u32 len0, len1;
	  ip4_header_t *ip4_0, *ip4_1;
	  ip6_header_t *ip6_0, *ip6_1;
	  udp_header_t *udp0, *udp1;
	  u64 *copy_src0, *copy_dst0;
	  u64 *copy_src1, *copy_dst1;
	  u32 *copy_src_last0, *copy_dst_last0;
	  u32 *copy_src_last1, *copy_dst_last1;
	  u16 new_l0, new_l1;
	  ip_csum_t sum0, sum1;

	  /* Prefetch next iteration. */
	  {
	    vlib_buffer_t *p2, *p3;

	    p2 = vlib_get_buffer (vm, from[2]);
	    p3 = vlib_get_buffer (vm, from[3]);

	    vlib_prefetch_buffer_header (p2, LOAD);
	    vlib_prefetch_buffer_header (p3, LOAD);

	    CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
	    CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
	  }

	  bi0 = from[0];
	  bi1 = from[1];
	  to_next[0] = bi0;
	  to_next[1] = bi1;
	  from += 2;
	  to_next += 2;
	  n_left_to_next -= 2;
	  n_left_from -= 2;

	  b0 = vlib_get_buffer (vm, bi0);
	  b1 = vlib_get_buffer (vm, bi1);

	  flow_hash0 = vnet_l2_compute_flow_hash (b0);
	  flow_hash1 = vnet_l2_compute_flow_hash (b1);

	  /* Get next node index and adj index from tunnel next_dpo */
	  if (sw_if_index0 != vnet_buffer (b0)->sw_if_index[VLIB_TX])
	    {
	      sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
	      hi0 = vnet_get_sup_hw_interface (vnm, sw_if_index0);
	      t0 = &vxm->tunnels[hi0->dev_instance];
	      /* Note: change to always set next0 if it may be set to drop */
	      next0 = t0->next_dpo.dpoi_next_node;
	    }

	  ASSERT (t0 != NULL);

	  vnet_buffer (b0)->ip.adj_index[VLIB_TX] = t0->next_dpo.dpoi_index;

	  /* Get next node index and adj index from tunnel next_dpo */
	  if (sw_if_index1 != vnet_buffer (b1)->sw_if_index[VLIB_TX])
	    {
	      sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_TX];
	      hi1 = vnet_get_sup_hw_interface (vnm, sw_if_index1);
	      t1 = &vxm->tunnels[hi1->dev_instance];
	      /* Note: change to always set next1 if it may be set to drop */
	      next1 = t1->next_dpo.dpoi_next_node;
	    }

	  ASSERT (t1 != NULL);

	  vnet_buffer (b1)->ip.adj_index[VLIB_TX] = t1->next_dpo.dpoi_index;

	  /* Apply the rewrite string. $$$$ vnet_rewrite? */
	  vlib_buffer_advance (b0, -(word) _vec_len (t0->rewrite));
	  vlib_buffer_advance (b1, -(word) _vec_len (t1->rewrite));

	  if (is_ip4)
	    {
	      u8 ip4_geneve_base_header_len =
		sizeof (ip4_header_t) + sizeof (udp_header_t) +
		GENEVE_BASE_HEADER_LENGTH;
	      u8 ip4_geneve_header_total_len0 = ip4_geneve_base_header_len;
	      u8 ip4_geneve_header_total_len1 = ip4_geneve_base_header_len;
#if SUPPORT_OPTIONS_HEADER==1
	      ip4_geneve_header_total_len0 += t0->options_len;
	      ip4_geneve_header_total_len1 += t1->options_len;
#endif
	      ASSERT (vec_len (t0->rewrite) == ip4_geneve_header_total_len0);
	      ASSERT (vec_len (t1->rewrite) == ip4_geneve_header_total_len1);

	      ip4_0 = vlib_buffer_get_current (b0);
	      ip4_1 = vlib_buffer_get_current (b1);

	      /* Copy the fixed header */
	      copy_dst0 = (u64 *) ip4_0;
	      copy_src0 = (u64 *) t0->rewrite;
	      copy_dst1 = (u64 *) ip4_1;
	      copy_src1 = (u64 *) t1->rewrite;
	      /* Copy first 32 octets 8-bytes at a time */
#define _(offs) copy_dst0[offs] = copy_src0[offs];
	      foreach_fixed_header4_offset;
#undef _
#define _(offs) copy_dst1[offs] = copy_src1[offs];
	      foreach_fixed_header4_offset;
#undef _
	      /* Last 4 octets. Hopefully gcc will be our friend */
	      copy_dst_last0 = (u32 *) (&copy_dst0[4]);
	      copy_src_last0 = (u32 *) (&copy_src0[4]);
	      copy_dst_last0[0] = copy_src_last0[0];
	      copy_dst_last1 = (u32 *) (&copy_dst1[4]);
	      copy_src_last1 = (u32 *) (&copy_src1[4]);
	      copy_dst_last1[0] = copy_src_last1[0];

	      /* Fix the IP4 checksum and length */
	      sum0 = ip4_0->checksum;
	      new_l0 =		/* old_l0 always 0, see the rewrite setup */
		clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
	      sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
				     length /* changed member */ );
	      ip4_0->checksum = ip_csum_fold (sum0);
	      ip4_0->length = new_l0;
	      sum1 = ip4_1->checksum;
	      new_l1 =		/* old_l1 always 0, see the rewrite setup */
		clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1));
	      sum1 = ip_csum_update (sum1, old_l1, new_l1, ip4_header_t,
				     length /* changed member */ );
	      ip4_1->checksum = ip_csum_fold (sum1);
	      ip4_1->length = new_l1;

	      /* Fix UDP length and set source port */
	      udp0 = (udp_header_t *) (ip4_0 + 1);
	      new_l0 =
		clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0) -
				      sizeof (*ip4_0));
	      udp0->length = new_l0;
	      udp0->src_port = flow_hash0;
	      udp1 = (udp_header_t *) (ip4_1 + 1);
	      new_l1 =
		clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1) -
				      sizeof (*ip4_1));
	      udp1->length = new_l1;
	      udp1->src_port = flow_hash1;
	    }
	  else			/* ipv6 */
	    {
	      int bogus = 0;

	      u8 ip6_geneve_base_header_len =
		sizeof (ip6_header_t) + sizeof (udp_header_t) +
		GENEVE_BASE_HEADER_LENGTH;
	      u8 ip6_geneve_header_total_len0 = ip6_geneve_base_header_len;
	      u8 ip6_geneve_header_total_len1 = ip6_geneve_base_header_len;
#if SUPPORT_OPTIONS_HEADER==1
	      ip6_geneve_header_total_len0 += t0->options_len;
	      ip6_geneve_header_total_len1 += t1->options_len;
#endif
	      ASSERT (vec_len (t0->rewrite) == ip6_geneve_header_total_len0);
	      ASSERT (vec_len (t1->rewrite) == ip6_geneve_header_total_len1);

	      ip6_0 = vlib_buffer_get_current (b0);
	      ip6_1 = vlib_buffer_get_current (b1);

	      /* Copy the fixed header */
	      copy_dst0 = (u64 *) ip6_0;
	      copy_src0 = (u64 *) t0->rewrite;
	      copy_dst1 = (u64 *) ip6_1;
	      copy_src1 = (u64 *) t1->rewrite;
	      /* Copy first 56 (ip6) octets 8-bytes at a time */
#define _(offs) copy_dst0[offs] = copy_src0[offs];
	      foreach_fixed_header6_offset;
#undef _
#define _(offs) copy_dst1[offs] = copy_src1[offs];
	      foreach_fixed_header6_offset;
#undef _
	      /* Fix IP6 payload length */
	      new_l0 =
		clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)
				      - sizeof (*ip6_0));
	      ip6_0->payload_length = new_l0;
	      new_l1 =
		clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1)
				      - sizeof (*ip6_1));
	      ip6_1->payload_length = new_l1;

	      /* Fix UDP length  and set source port */
	      udp0 = (udp_header_t *) (ip6_0 + 1);
	      udp0->length = new_l0;
	      udp0->src_port = flow_hash0;
	      udp1 = (udp_header_t *) (ip6_1 + 1);
	      udp1->length = new_l1;
	      udp1->src_port = flow_hash1;

	      /* IPv6 UDP checksum is mandatory */
	      udp0->checksum = ip6_tcp_udp_icmp_compute_checksum (vm, b0,
								  ip6_0,
								  &bogus);
	      ASSERT (bogus == 0);
	      if (udp0->checksum == 0)
		udp0->checksum = 0xffff;
	      udp1->checksum = ip6_tcp_udp_icmp_compute_checksum (vm, b1,
								  ip6_1,
								  &bogus);
	      ASSERT (bogus == 0);
	      if (udp1->checksum == 0)
		udp1->checksum = 0xffff;
	    }

	  pkts_encapsulated += 2;
	  len0 = vlib_buffer_length_in_chain (vm, b0);
	  len1 = vlib_buffer_length_in_chain (vm, b1);
	  stats_n_packets += 2;
	  stats_n_bytes += len0 + len1;

	  /* Batch stats increment on the same geneve tunnel so counter is not
	     incremented per packet. Note stats are still incremented for deleted
	     and admin-down tunnel where packets are dropped. It is not worthwhile
	     to check for this rare case and affect normal path performance. */
	  if (PREDICT_FALSE ((sw_if_index0 != stats_sw_if_index) ||
			     (sw_if_index1 != stats_sw_if_index)))
	    {
	      stats_n_packets -= 2;
	      stats_n_bytes -= len0 + len1;
	      if (sw_if_index0 == sw_if_index1)
		{
		  if (stats_n_packets)
		    vlib_increment_combined_counter
		      (im->combined_sw_if_counters +
		       VNET_INTERFACE_COUNTER_TX, thread_index,
		       stats_sw_if_index, stats_n_packets, stats_n_bytes);
		  stats_sw_if_index = sw_if_index0;
		  stats_n_packets = 2;
		  stats_n_bytes = len0 + len1;
		}
	      else
		{
		  vlib_increment_combined_counter
		    (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
		     thread_index, sw_if_index0, 1, len0);
		  vlib_increment_combined_counter
		    (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
		     thread_index, sw_if_index1, 1, len1);
		}
	    }

	  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
	    {
	      geneve_encap_trace_t *tr =
		vlib_add_trace (vm, node, b0, sizeof (*tr));
	      tr->tunnel_index = t0 - vxm->tunnels;
	      tr->vni = t0->vni;
	    }

	  if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
	    {
	      geneve_encap_trace_t *tr =
		vlib_add_trace (vm, node, b1, sizeof (*tr));
	      tr->tunnel_index = t1 - vxm->tunnels;
	      tr->vni = t1->vni;
	    }

	  vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
					   to_next, n_left_to_next,
					   bi0, bi1, next0, next1);
	}

      while (n_left_from > 0 && n_left_to_next > 0)
	{
	  u32 bi0;
	  vlib_buffer_t *b0;
	  u32 flow_hash0;
	  u32 len0;
	  ip4_header_t *ip4_0;
	  ip6_header_t *ip6_0;
	  udp_header_t *udp0;
	  u64 *copy_src0, *copy_dst0;
	  u32 *copy_src_last0, *copy_dst_last0;
	  u16 new_l0;
	  ip_csum_t sum0;

	  bi0 = from[0];
	  to_next[0] = bi0;
	  from += 1;
	  to_next += 1;
	  n_left_from -= 1;
	  n_left_to_next -= 1;

	  b0 = vlib_get_buffer (vm, bi0);

	  flow_hash0 = vnet_l2_compute_flow_hash (b0);

	  /* Get next node index and adj index from tunnel next_dpo */
	  if (sw_if_index0 != vnet_buffer (b0)->sw_if_index[VLIB_TX])
	    {
	      sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
	      hi0 = vnet_get_sup_hw_interface (vnm, sw_if_index0);
	      t0 = &vxm->tunnels[hi0->dev_instance];
	      /* Note: change to always set next0 if it may be set to drop */
	      next0 = t0->next_dpo.dpoi_next_node;
	    }
	  vnet_buffer (b0)->ip.adj_index[VLIB_TX] = t0->next_dpo.dpoi_index;

	  /* Apply the rewrite string. $$$$ vnet_rewrite? */
	  vlib_buffer_advance (b0, -(word) _vec_len (t0->rewrite));

	  if (is_ip4)
	    {
	      u8 ip4_geneve_base_header_len =
		sizeof (ip4_header_t) + sizeof (udp_header_t) +
		GENEVE_BASE_HEADER_LENGTH;
	      u8 ip4_geneve_header_total_len0 = ip4_geneve_base_header_len;
#if SUPPORT_OPTIONS_HEADER==1
	      ip4_geneve_header_total_len0 += t0->options_len;
#endif
	      ASSERT (vec_len (t0->rewrite) == ip4_geneve_header_total_len0);

	      ip4_0 = vlib_buffer_get_current (b0);

	      /* Copy the fixed header */
	      copy_dst0 = (u64 *) ip4_0;
	      copy_src0 = (u64 *) t0->rewrite;
	      /* Copy first 32 octets 8-bytes at a time */
#define _(offs) copy_dst0[offs] = copy_src0[offs];
	      foreach_fixed_header4_offset;
#undef _
	      /* Last 4 octets. Hopefully gcc will be our friend */
	      copy_dst_last0 = (u32 *) (&copy_dst0[4]);
	      copy_src_last0 = (u32 *) (&copy_src0[4]);
	      copy_dst_last0[0] = copy_src_last0[0];

	      /* Fix the IP4 checksum and length */
	      sum0 = ip4_0->checksum;
	      new_l0 =		/* old_l0 always 0, see the rewrite setup */
		clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
	      sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
				     length /* changed member */ );
	      ip4_0->checksum = ip_csum_fold (sum0);
	      ip4_0->length = new_l0;

	      /* Fix UDP length and set source port */
	      udp0 = (udp_header_t *) (ip4_0 + 1);
	      new_l0 =
		clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0) -
				      sizeof (*ip4_0));
	      udp0->length = new_l0;
	      udp0->src_port = flow_hash0;
	    }

	  else			/* ip6 path */
	    {
	      int bogus = 0;

	      u8 ip6_geneve_base_header_len =
		sizeof (ip6_header_t) + sizeof (udp_header_t) +
		GENEVE_BASE_HEADER_LENGTH;
	      u8 ip6_geneve_header_total_len0 = ip6_geneve_base_header_len;
#if SUPPORT_OPTIONS_HEADER==1
	      ip6_geneve_header_total_len0 += t0->options_len;
#endif
	      ASSERT (vec_len (t0->rewrite) == ip6_geneve_header_total_len0);

	      ip6_0 = vlib_buffer_get_current (b0);
	      /* Copy the fixed header */
	      copy_dst0 = (u64 *) ip6_0;
	      copy_src0 = (u64 *) t0->rewrite;
	      /* Copy first 56 (ip6) octets 8-bytes at a time */
#define _(offs) copy_dst0[offs] = copy_src0[offs];
	      foreach_fixed_header6_offset;
#undef _
	      /* Fix IP6 payload length */
	      new_l0 =
		clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)
				      - sizeof (*ip6_0));
	      ip6_0->payload_length = new_l0;

	      /* Fix UDP length  and set source port */
	      udp0 = (udp_header_t *) (ip6_0 + 1);
	      udp0->length = new_l0;
	      udp0->src_port = flow_hash0;

	      /* IPv6 UDP checksum is mandatory */
	      udp0->checksum = ip6_tcp_udp_icmp_compute_checksum (vm, b0,
								  ip6_0,
								  &bogus);
	      ASSERT (bogus == 0);
	      if (udp0->checksum == 0)
		udp0->checksum = 0xffff;
	    }

	  pkts_encapsulated++;
	  len0 = vlib_buffer_length_in_chain (vm, b0);
	  stats_n_packets += 1;
	  stats_n_bytes += len0;

	  /* Batch stats increment on the same geneve tunnel so counter is not
	     incremented per packet. Note stats are still incremented for deleted
	     and admin-down tunnel where packets are dropped. It is not worthwhile
	     to check for this rare case and affect normal path performance. */
	  if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
	    {
	      stats_n_packets -= 1;
	      stats_n_bytes -= len0;
	      if (stats_n_packets)
		vlib_increment_combined_counter
		  (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
		   thread_index, stats_sw_if_index,
		   stats_n_packets, stats_n_bytes);
	      stats_n_packets = 1;
	      stats_n_bytes = len0;
	      stats_sw_if_index = sw_if_index0;
	    }

	  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
	    {
	      geneve_encap_trace_t *tr =
		vlib_add_trace (vm, node, b0, sizeof (*tr));
	      tr->tunnel_index = t0 - vxm->tunnels;
	      tr->vni = t0->vni;
	    }
	  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
					   to_next, n_left_to_next,
					   bi0, next0);
	}

      vlib_put_next_frame (vm, node, next_index, n_left_to_next);
    }

  /* Do we still need this now that tunnel tx stats is kept? */
  vlib_node_increment_counter (vm, node->node_index,
			       GENEVE_ENCAP_ERROR_ENCAPSULATED,
			       pkts_encapsulated);

  /* Increment any remaining batch stats */
  if (stats_n_packets)
    {
      vlib_increment_combined_counter
	(im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
	 thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
      node->runtime_data[0] = stats_sw_if_index;
    }

  return from_frame->n_vectors;
}
Exemplo n.º 19
0
always_inline uword
gtpu_encap_inline (vlib_main_t * vm,
		    vlib_node_runtime_t * node,
		    vlib_frame_t * from_frame,
		    u32 is_ip4)
{
  u32 n_left_from, next_index, * from, * to_next;
  gtpu_main_t * gtm = &gtpu_main;
  vnet_main_t * vnm = gtm->vnet_main;
  vnet_interface_main_t * im = &vnm->interface_main;
  u32 pkts_encapsulated = 0;
  u16 old_l0 = 0, old_l1 = 0, old_l2 = 0, old_l3 = 0;
  u32 thread_index = vlib_get_thread_index();
  u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
  u32 sw_if_index0 = 0, sw_if_index1 = 0, sw_if_index2 = 0, sw_if_index3 = 0;
  u32 next0 = 0, next1 = 0, next2 = 0, next3 = 0;
  vnet_hw_interface_t * hi0, * hi1, * hi2, * hi3;
  gtpu_tunnel_t * t0 = NULL, * t1 = NULL, * t2 = NULL, * t3 = NULL;

  from = vlib_frame_vector_args (from_frame);
  n_left_from = from_frame->n_vectors;

  next_index = node->cached_next_index;
  stats_sw_if_index = node->runtime_data[0];
  stats_n_packets = stats_n_bytes = 0;

  while (n_left_from > 0)
    {
      u32 n_left_to_next;

      vlib_get_next_frame (vm, node, next_index,
			   to_next, n_left_to_next);

      while (n_left_from >= 8 && n_left_to_next >= 4)
	{
          u32 bi0, bi1, bi2, bi3;
	  vlib_buffer_t * b0, * b1, * b2, * b3;
          u32 flow_hash0, flow_hash1, flow_hash2, flow_hash3;
	  u32 len0, len1, len2, len3;
          ip4_header_t * ip4_0, * ip4_1, * ip4_2, * ip4_3;
          ip6_header_t * ip6_0, * ip6_1, * ip6_2, * ip6_3;
          udp_header_t * udp0, * udp1, * udp2, * udp3;
          gtpu_header_t * gtpu0, * gtpu1, * gtpu2, * gtpu3;
          u64 * copy_src0, * copy_dst0;
          u64 * copy_src1, * copy_dst1;
          u64 * copy_src2, * copy_dst2;
          u64 * copy_src3, * copy_dst3;
          u32 * copy_src_last0, * copy_dst_last0;
          u32 * copy_src_last1, * copy_dst_last1;
          u32 * copy_src_last2, * copy_dst_last2;
          u32 * copy_src_last3, * copy_dst_last3;
          u16 new_l0, new_l1, new_l2, new_l3;
          ip_csum_t sum0, sum1, sum2, sum3;

	  /* Prefetch next iteration. */
	  {
	    vlib_buffer_t * p4, * p5, * p6, * p7;

	    p4 = vlib_get_buffer (vm, from[4]);
	    p5 = vlib_get_buffer (vm, from[5]);
	    p6 = vlib_get_buffer (vm, from[6]);
	    p7 = vlib_get_buffer (vm, from[7]);

	    vlib_prefetch_buffer_header (p4, LOAD);
	    vlib_prefetch_buffer_header (p5, LOAD);
	    vlib_prefetch_buffer_header (p6, LOAD);
	    vlib_prefetch_buffer_header (p7, LOAD);

	    CLIB_PREFETCH (p4->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
	    CLIB_PREFETCH (p5->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
	    CLIB_PREFETCH (p6->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
	    CLIB_PREFETCH (p7->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
	  }

	  bi0 = from[0];
	  bi1 = from[1];
	  bi2 = from[2];
	  bi3 = from[3];
	  to_next[0] = bi0;
	  to_next[1] = bi1;
	  to_next[2] = bi2;
	  to_next[3] = bi3;
	  from += 4;
	  to_next += 4;
	  n_left_to_next -= 4;
	  n_left_from -= 4;

	  b0 = vlib_get_buffer (vm, bi0);
	  b1 = vlib_get_buffer (vm, bi1);
	  b2 = vlib_get_buffer (vm, bi2);
	  b3 = vlib_get_buffer (vm, bi3);

          flow_hash0 = vnet_l2_compute_flow_hash (b0);
          flow_hash1 = vnet_l2_compute_flow_hash (b1);
          flow_hash2 = vnet_l2_compute_flow_hash (b2);
          flow_hash3 = vnet_l2_compute_flow_hash (b3);

	  /* Get next node index and adj index from tunnel next_dpo */
	  sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX];
	  sw_if_index1 = vnet_buffer(b1)->sw_if_index[VLIB_TX];
	  sw_if_index2 = vnet_buffer(b2)->sw_if_index[VLIB_TX];
	  sw_if_index3 = vnet_buffer(b3)->sw_if_index[VLIB_TX];
	  hi0 = vnet_get_sup_hw_interface (vnm, sw_if_index0);
	  hi1 = vnet_get_sup_hw_interface (vnm, sw_if_index1);
	  hi2 = vnet_get_sup_hw_interface (vnm, sw_if_index2);
	  hi3 = vnet_get_sup_hw_interface (vnm, sw_if_index3);
	  t0 = &gtm->tunnels[hi0->dev_instance];
	  t1 = &gtm->tunnels[hi1->dev_instance];
	  t2 = &gtm->tunnels[hi2->dev_instance];
	  t3 = &gtm->tunnels[hi3->dev_instance];

	  /* Note: change to always set next0 if it may be set to drop */
	  next0 = t0->next_dpo.dpoi_next_node;
          vnet_buffer(b0)->ip.adj_index[VLIB_TX] = t0->next_dpo.dpoi_index;
	  next1 = t1->next_dpo.dpoi_next_node;
          vnet_buffer(b1)->ip.adj_index[VLIB_TX] = t1->next_dpo.dpoi_index;
	  next2 = t2->next_dpo.dpoi_next_node;
          vnet_buffer(b2)->ip.adj_index[VLIB_TX] = t2->next_dpo.dpoi_index;
	  next3 = t3->next_dpo.dpoi_next_node;
          vnet_buffer(b3)->ip.adj_index[VLIB_TX] = t3->next_dpo.dpoi_index;

          /* Apply the rewrite string. $$$$ vnet_rewrite? */
          vlib_buffer_advance (b0, -(word)_vec_len(t0->rewrite));
          vlib_buffer_advance (b1, -(word)_vec_len(t1->rewrite));
          vlib_buffer_advance (b2, -(word)_vec_len(t2->rewrite));
          vlib_buffer_advance (b3, -(word)_vec_len(t3->rewrite));

	  if (is_ip4)
	    {
	      ip4_0 = vlib_buffer_get_current(b0);
	      ip4_1 = vlib_buffer_get_current(b1);
	      ip4_2 = vlib_buffer_get_current(b2);
	      ip4_3 = vlib_buffer_get_current(b3);

	      /* Copy the fixed header */
	      copy_dst0 = (u64 *) ip4_0;
	      copy_src0 = (u64 *) t0->rewrite;
	      copy_dst1 = (u64 *) ip4_1;
	      copy_src1 = (u64 *) t1->rewrite;
	      copy_dst2 = (u64 *) ip4_2;
	      copy_src2 = (u64 *) t2->rewrite;
	      copy_dst3 = (u64 *) ip4_3;
	      copy_src3 = (u64 *) t3->rewrite;

	      /* Copy first 32 octets 8-bytes at a time */
#define _(offs) copy_dst0[offs] = copy_src0[offs];
	      foreach_fixed_header4_offset;
#undef _
#define _(offs) copy_dst1[offs] = copy_src1[offs];
	      foreach_fixed_header4_offset;
#undef _
#define _(offs) copy_dst2[offs] = copy_src2[offs];
	      foreach_fixed_header4_offset;
#undef _
#define _(offs) copy_dst3[offs] = copy_src3[offs];
	      foreach_fixed_header4_offset;
#undef _
	      /* Last 4 octets. Hopefully gcc will be our friend */
              copy_dst_last0 = (u32 *)(&copy_dst0[4]);
              copy_src_last0 = (u32 *)(&copy_src0[4]);
              copy_dst_last0[0] = copy_src_last0[0];
              copy_dst_last1 = (u32 *)(&copy_dst1[4]);
              copy_src_last1 = (u32 *)(&copy_src1[4]);
              copy_dst_last1[0] = copy_src_last1[0];
              copy_dst_last2 = (u32 *)(&copy_dst2[4]);
              copy_src_last2 = (u32 *)(&copy_src2[4]);
              copy_dst_last2[0] = copy_src_last2[0];
              copy_dst_last3 = (u32 *)(&copy_dst3[4]);
              copy_src_last3 = (u32 *)(&copy_src3[4]);
              copy_dst_last3[0] = copy_src_last3[0];

	      /* Fix the IP4 checksum and length */
	      sum0 = ip4_0->checksum;
	      new_l0 = /* old_l0 always 0, see the rewrite setup */
                clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
              sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
				     length /* changed member */);
	      ip4_0->checksum = ip_csum_fold (sum0);
	      ip4_0->length = new_l0;
	      sum1 = ip4_1->checksum;
	      new_l1 = /* old_l1 always 0, see the rewrite setup */
                clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1));
              sum1 = ip_csum_update (sum1, old_l1, new_l1, ip4_header_t,
				     length /* changed member */);
	      ip4_1->checksum = ip_csum_fold (sum1);
	      ip4_1->length = new_l1;
	      sum2 = ip4_2->checksum;
	      new_l2 = /* old_l0 always 0, see the rewrite setup */
                clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b2));
              sum2 = ip_csum_update (sum2, old_l2, new_l2, ip4_header_t,
				     length /* changed member */);
	      ip4_2->checksum = ip_csum_fold (sum2);
	      ip4_2->length = new_l2;
	      sum3 = ip4_3->checksum;
	      new_l3 = /* old_l1 always 0, see the rewrite setup */
                clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b3));
              sum3 = ip_csum_update (sum3, old_l3, new_l3, ip4_header_t,
				     length /* changed member */);
	      ip4_3->checksum = ip_csum_fold (sum3);
	      ip4_3->length = new_l3;

	      /* Fix UDP length and set source port */
	      udp0 = (udp_header_t *)(ip4_0+1);
	      new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b0)
					     - sizeof (*ip4_0));
	      udp0->length = new_l0;
	      udp0->src_port = flow_hash0;
	      udp1 = (udp_header_t *)(ip4_1+1);
	      new_l1 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b1)
					     - sizeof (*ip4_1));
	      udp1->length = new_l1;
	      udp1->src_port = flow_hash1;
	      udp2 = (udp_header_t *)(ip4_2+1);
	      new_l2 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b2)
					     - sizeof (*ip4_2));
	      udp2->length = new_l2;
	      udp2->src_port = flow_hash2;
	      udp3 = (udp_header_t *)(ip4_3+1);
	      new_l3 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b3)
					     - sizeof (*ip4_3));
	      udp3->length = new_l3;
	      udp3->src_port = flow_hash3;

	      /* Fix GTPU length */
	      gtpu0 = (gtpu_header_t *)(udp0+1);
	      new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b0)
					     - sizeof (*ip4_0) - sizeof(*udp0)
					     - GTPU_V1_HDR_LEN);
	      gtpu0->length = new_l0;
	      gtpu1 = (gtpu_header_t *)(udp1+1);
	      new_l1 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b1)
					     - sizeof (*ip4_1) - sizeof(*udp1)
					     - GTPU_V1_HDR_LEN);
	      gtpu1->length = new_l1;
	      gtpu2 = (gtpu_header_t *)(udp2+1);
	      new_l2 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b2)
					     - sizeof (*ip4_2) - sizeof(*udp2)
					     - GTPU_V1_HDR_LEN);
	      gtpu2->length = new_l2;
	      gtpu3 = (gtpu_header_t *)(udp3+1);
	      new_l3 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b3)
					     - sizeof (*ip4_3) - sizeof(*udp3)
					     - GTPU_V1_HDR_LEN);
	      gtpu3->length = new_l3;
	    }
	  else /* ipv6 */
	    {
              int bogus = 0;

	      ip6_0 = vlib_buffer_get_current(b0);
	      ip6_1 = vlib_buffer_get_current(b1);
	      ip6_2 = vlib_buffer_get_current(b2);
	      ip6_3 = vlib_buffer_get_current(b3);

	      /* Copy the fixed header */
	      copy_dst0 = (u64 *) ip6_0;
	      copy_src0 = (u64 *) t0->rewrite;
	      copy_dst1 = (u64 *) ip6_1;
	      copy_src1 = (u64 *) t1->rewrite;
	      copy_dst2 = (u64 *) ip6_2;
	      copy_src2 = (u64 *) t2->rewrite;
	      copy_dst3 = (u64 *) ip6_3;
	      copy_src3 = (u64 *) t3->rewrite;
	      /* Copy first 56 (ip6) octets 8-bytes at a time */
#define _(offs) copy_dst0[offs] = copy_src0[offs];
	      foreach_fixed_header6_offset;
#undef _
#define _(offs) copy_dst1[offs] = copy_src1[offs];
	      foreach_fixed_header6_offset;
#undef _
#define _(offs) copy_dst2[offs] = copy_src2[offs];
	      foreach_fixed_header6_offset;
#undef _
#define _(offs) copy_dst3[offs] = copy_src3[offs];
	      foreach_fixed_header6_offset;
#undef _
	      /* Fix IP6 payload length */
	      new_l0 =
                clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)
				      - sizeof(*ip6_0));
	      ip6_0->payload_length = new_l0;
	      new_l1 =
                clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1)
				      - sizeof(*ip6_1));
	      ip6_1->payload_length = new_l1;
	      new_l2 =
                clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b2)
				      - sizeof(*ip6_2));
	      ip6_2->payload_length = new_l2;
	      new_l3 =
                clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b3)
				      - sizeof(*ip6_3));
	      ip6_3->payload_length = new_l3;

	      /* Fix UDP length  and set source port */
	      udp0 = (udp_header_t *)(ip6_0+1);
	      udp0->length = new_l0;
	      udp0->src_port = flow_hash0;
	      udp1 = (udp_header_t *)(ip6_1+1);
	      udp1->length = new_l1;
	      udp1->src_port = flow_hash1;
	      udp2 = (udp_header_t *)(ip6_2+1);
	      udp2->length = new_l2;
	      udp2->src_port = flow_hash2;
	      udp3 = (udp_header_t *)(ip6_3+1);
	      udp3->length = new_l3;
	      udp3->src_port = flow_hash3;

	      /* Fix GTPU length */
	      gtpu0 = (gtpu_header_t *)(udp0+1);
	      new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b0)
					     - sizeof (*ip6_0) - sizeof(*udp0)
					     - GTPU_V1_HDR_LEN);
	      gtpu0->length = new_l0;
	      gtpu1 = (gtpu_header_t *)(udp1+1);
	      new_l1 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b1)
					     - sizeof (*ip6_1) - sizeof(*udp1)
					     - GTPU_V1_HDR_LEN);
	      gtpu1->length = new_l1;
	      gtpu2 = (gtpu_header_t *)(udp2+1);
	      new_l2 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b2)
					     - sizeof (*ip6_2) - sizeof(*udp2)
					     - GTPU_V1_HDR_LEN);
	      gtpu2->length = new_l2;
	      gtpu3 = (gtpu_header_t *)(udp3+1);
	      new_l3 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b3)
					     - sizeof (*ip6_3) - sizeof(*udp3)
					     - GTPU_V1_HDR_LEN);
	      gtpu3->length = new_l3;

	      /* IPv6 UDP checksum is mandatory */
	      udp0->checksum = ip6_tcp_udp_icmp_compute_checksum(vm, b0,
								 ip6_0, &bogus);
	      if (udp0->checksum == 0)
		udp0->checksum = 0xffff;
	      udp1->checksum = ip6_tcp_udp_icmp_compute_checksum(vm, b1,
								 ip6_1, &bogus);
	      if (udp1->checksum == 0)
		udp1->checksum = 0xffff;
	      udp2->checksum = ip6_tcp_udp_icmp_compute_checksum(vm, b2,
								 ip6_2, &bogus);
	      if (udp2->checksum == 0)
		udp2->checksum = 0xffff;
	      udp3->checksum = ip6_tcp_udp_icmp_compute_checksum(vm, b3,
								 ip6_3, &bogus);
	      if (udp3->checksum == 0)
		udp3->checksum = 0xffff;

	    }

          pkts_encapsulated += 4;
 	  len0 = vlib_buffer_length_in_chain (vm, b0);
 	  len1 = vlib_buffer_length_in_chain (vm, b1);
 	  len2 = vlib_buffer_length_in_chain (vm, b2);
 	  len3 = vlib_buffer_length_in_chain (vm, b3);
	  stats_n_packets += 4;
	  stats_n_bytes += len0 + len1 + len2 + len3;

	  /* Batch stats increment on the same gtpu tunnel so counter is not
	     incremented per packet. Note stats are still incremented for deleted
	     and admin-down tunnel where packets are dropped. It is not worthwhile
	     to check for this rare case and affect normal path performance. */
	  if (PREDICT_FALSE ((sw_if_index0 != stats_sw_if_index) ||
			     (sw_if_index1 != stats_sw_if_index) ||
			     (sw_if_index2 != stats_sw_if_index) ||
			     (sw_if_index3 != stats_sw_if_index) ))
	    {
	      stats_n_packets -= 4;
	      stats_n_bytes -= len0 + len1 + len2 + len3;
	      if ( (sw_if_index0 == sw_if_index1 ) &&
		   (sw_if_index1 == sw_if_index2 ) &&
		   (sw_if_index2 == sw_if_index3 ) )
	        {
		  if (stats_n_packets)
		    vlib_increment_combined_counter
		      (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
		       thread_index, stats_sw_if_index,
		       stats_n_packets, stats_n_bytes);
		  stats_sw_if_index = sw_if_index0;
		  stats_n_packets = 4;
		  stats_n_bytes = len0 + len1 + len2 + len3;
	        }
	      else
	        {
		  vlib_increment_combined_counter
		      (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
		       thread_index, sw_if_index0, 1, len0);
		  vlib_increment_combined_counter
		      (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
		       thread_index, sw_if_index1, 1, len1);
		  vlib_increment_combined_counter
		      (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
		       thread_index, sw_if_index2, 1, len2);
		  vlib_increment_combined_counter
		      (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
		       thread_index, sw_if_index3, 1, len3);
		}
	    }

	  if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
            {
              gtpu_encap_trace_t *tr =
                vlib_add_trace (vm, node, b0, sizeof (*tr));
              tr->tunnel_index = t0 - gtm->tunnels;
              tr->teid = t0->teid;
           }

          if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
            {
              gtpu_encap_trace_t *tr =
                vlib_add_trace (vm, node, b1, sizeof (*tr));
              tr->tunnel_index = t1 - gtm->tunnels;
              tr->teid = t1->teid;
            }

	  vlib_validate_buffer_enqueue_x4 (vm, node, next_index,
					   to_next, n_left_to_next,
					   bi0, bi1, bi2, bi3,
					   next0, next1, next2, next3);
	}

      while (n_left_from > 0 && n_left_to_next > 0)
	{
	  u32 bi0;
	  vlib_buffer_t * b0;
          u32 flow_hash0;
	  u32 len0;
          ip4_header_t * ip4_0;
          ip6_header_t * ip6_0;
          udp_header_t * udp0;
          gtpu_header_t * gtpu0;
          u64 * copy_src0, * copy_dst0;
          u32 * copy_src_last0, * copy_dst_last0;
          u16 new_l0;
          ip_csum_t sum0;

	  bi0 = from[0];
	  to_next[0] = bi0;
	  from += 1;
	  to_next += 1;
	  n_left_from -= 1;
	  n_left_to_next -= 1;

	  b0 = vlib_get_buffer (vm, bi0);

          flow_hash0 = vnet_l2_compute_flow_hash(b0);

	  /* Get next node index and adj index from tunnel next_dpo */
	  sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX];
	  hi0 = vnet_get_sup_hw_interface (vnm, sw_if_index0);
	  t0 = &gtm->tunnels[hi0->dev_instance];
	  /* Note: change to always set next0 if it may be set to drop */
	  next0 = t0->next_dpo.dpoi_next_node;
	  vnet_buffer(b0)->ip.adj_index[VLIB_TX] = t0->next_dpo.dpoi_index;

          /* Apply the rewrite string. $$$$ vnet_rewrite? */
          vlib_buffer_advance (b0, -(word)_vec_len(t0->rewrite));

	  if (is_ip4)
	    {
	      ip4_0 = vlib_buffer_get_current(b0);

	      /* Copy the fixed header */
	      copy_dst0 = (u64 *) ip4_0;
	      copy_src0 = (u64 *) t0->rewrite;
	      /* Copy first 32 octets 8-bytes at a time */
#define _(offs) copy_dst0[offs] = copy_src0[offs];
	      foreach_fixed_header4_offset;
#undef _
	      /* Last 4 octets. Hopefully gcc will be our friend */
              copy_dst_last0 = (u32 *)(&copy_dst0[4]);
              copy_src_last0 = (u32 *)(&copy_src0[4]);
              copy_dst_last0[0] = copy_src_last0[0];

	      /* Fix the IP4 checksum and length */
	      sum0 = ip4_0->checksum;
	      new_l0 = /* old_l0 always 0, see the rewrite setup */
                clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
              sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
				     length /* changed member */);
	      ip4_0->checksum = ip_csum_fold (sum0);
	      ip4_0->length = new_l0;

	      /* Fix UDP length and set source port */
	      udp0 = (udp_header_t *)(ip4_0+1);
	      new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b0)
					     - sizeof (*ip4_0));
	      udp0->length = new_l0;
	      udp0->src_port = flow_hash0;

	      /* Fix GTPU length */
	      gtpu0 = (gtpu_header_t *)(udp0+1);
	      new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b0)
					     - sizeof (*ip4_0) - sizeof(*udp0)
					     - GTPU_V1_HDR_LEN);
	      gtpu0->length = new_l0;
	    }

	  else /* ip6 path */
	    {
              int bogus = 0;

	      ip6_0 = vlib_buffer_get_current(b0);
	      /* Copy the fixed header */
	      copy_dst0 = (u64 *) ip6_0;
	      copy_src0 = (u64 *) t0->rewrite;
	      /* Copy first 56 (ip6) octets 8-bytes at a time */
#define _(offs) copy_dst0[offs] = copy_src0[offs];
	      foreach_fixed_header6_offset;
#undef _
	      /* Fix IP6 payload length */
	      new_l0 =
                clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)
				      - sizeof(*ip6_0));
	      ip6_0->payload_length = new_l0;

	      /* Fix UDP length  and set source port */
	      udp0 = (udp_header_t *)(ip6_0+1);
	      udp0->length = new_l0;
	      udp0->src_port = flow_hash0;

	      /* Fix GTPU length */
	      gtpu0 = (gtpu_header_t *)(udp0+1);
	      new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b0)
					     - sizeof (*ip4_0) - sizeof(*udp0)
					     - GTPU_V1_HDR_LEN);
	      gtpu0->length = new_l0;

	      /* IPv6 UDP checksum is mandatory */
	      udp0->checksum = ip6_tcp_udp_icmp_compute_checksum(vm, b0,
								 ip6_0, &bogus);
	      if (udp0->checksum == 0)
		udp0->checksum = 0xffff;
	    }

          pkts_encapsulated ++;
	  len0 = vlib_buffer_length_in_chain (vm, b0);
	  stats_n_packets += 1;
	  stats_n_bytes += len0;

	  /* Batch stats increment on the same gtpu tunnel so counter is not
	     incremented per packet. Note stats are still incremented for deleted
	     and admin-down tunnel where packets are dropped. It is not worthwhile
	     to check for this rare case and affect normal path performance. */
	  if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
	    {
	      stats_n_packets -= 1;
	      stats_n_bytes -= len0;
	      if (stats_n_packets)
		vlib_increment_combined_counter
		  (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
		   thread_index, stats_sw_if_index,
		   stats_n_packets, stats_n_bytes);
	      stats_n_packets = 1;
	      stats_n_bytes = len0;
	      stats_sw_if_index = sw_if_index0;
	    }

          if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
            {
              gtpu_encap_trace_t *tr =
                vlib_add_trace (vm, node, b0, sizeof (*tr));
              tr->tunnel_index = t0 - gtm->tunnels;
              tr->teid = t0->teid;
            }
	  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
					   to_next, n_left_to_next,
					   bi0, next0);
	}

      vlib_put_next_frame (vm, node, next_index, n_left_to_next);
    }

  /* Do we still need this now that tunnel tx stats is kept? */
  vlib_node_increment_counter (vm, node->node_index,
                               GTPU_ENCAP_ERROR_ENCAPSULATED,
                               pkts_encapsulated);

  /* Increment any remaining batch stats */
  if (stats_n_packets)
    {
      vlib_increment_combined_counter
	(im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
	 thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
      node->runtime_data[0] = stats_sw_if_index;
    }

  return from_frame->n_vectors;
}
Exemplo n.º 20
0
    // Advances to the first beyond the current
    // whose value is greater than or equal to target.
    // we do linear search inside block because of delta encoding
    unsigned int CompressedSet::Iterator::Advance(unsigned int target){
        // if the pointer points past the end
        if( PREDICT_FALSE(cursor == totalDocIdNum || totalDocIdNum <= 0)){
            lastAccessedDocId = NO_MORE_DOCS;
            return NO_MORE_DOCS;
        }
        // if the pointer points to the end
        if(++cursor == totalDocIdNum){
            lastAccessedDocId = NO_MORE_DOCS;
            return NO_MORE_DOCS;
        }

        // the expected behavior is to find the first element AFTER the current cursor,
        // who is equal or larger than target
        if(cursor != 0 && target <= lastAccessedDocId) {
            target = lastAccessedDocId + 1;
        }
        

        int iterBlockIndex = cursor >> BLOCK_SIZE_BIT;
        int offset = cursor & BLOCK_SIZE_MODULO;

        // if there is noComp block, check noComp block
        // the next element is in currently in the last block , or
        // currently not in the last block, but the target is larger than
        // the last element of the last compressed block
        unsigned int sizeOfCurrentNoCompBlock = set->sizeOfCurrentNoCompBlock;
        size_t baseListForOnlyCompBlocksSize = set->baseListForOnlyCompBlocks.size();
        
        if(sizeOfCurrentNoCompBlock>0) {// if there exists the last decomp block
          if(iterBlockIndex == compBlockNum ||
             (baseListForOnlyCompBlocksSize>0 && target > set->baseListForOnlyCompBlocks[baseListForOnlyCompBlocksSize-1])) {
            offset = binarySearchForFirstElementEqualOrLargerThanTarget(&(set->currentNoCompBlock[0]), 0, sizeOfCurrentNoCompBlock-1, target);

            if(offset>=0){
              iterBlockIndex = compBlockNum;
              lastAccessedDocId = set->currentNoCompBlock[offset];
              cursor = (iterBlockIndex << BLOCK_INDEX_SHIFT_BITS) + offset;
              return lastAccessedDocId;
            } else {
              // hy: to avoid the repeated lookup next time once it reaches the end of the sequence
              cursor = totalDocIdNum;
              lastAccessedDocId = NO_MORE_DOCS;
              return lastAccessedDocId;
            }
          }
        }

         // if we cannot not find it in the noComp block, we check the comp blocks
         if(baseListForOnlyCompBlocksSize>0 && target <= set->baseListForOnlyCompBlocks[baseListForOnlyCompBlocksSize-1]) {
           // for the following cases, it must exist in one of the comp blocks since target<= the last base in the comp blocks
           if(offset == 0) {
             // searching the right block from the current block to the last block
            #ifdef PREFIX_SUM
                lastAccessedDocId = advanceToTargetInTheFollowingCompBlocksNoPostProcessing(target, iterBlockIndex);
            #else
                lastAccessedDocId = advanceToTargetInTheFollowingCompBlocks(target, iterBlockIndex);
            #endif
             return lastAccessedDocId;
           } else { // offset > 0, the current block has been decompressed, so, first test the first block;and then do sth like case 2
             assert(offset > 0);
             if(target <= set->baseListForOnlyCompBlocks[iterBlockIndex]) {
               while(offset < DEFAULT_BATCH_SIZE) {
                 #ifdef PREFIX_SUM
                    lastAccessedDocId += (iterDecompBlock[offset]);
                 #else
                    lastAccessedDocId = (iterDecompBlock[offset]);
                 #endif  
                 if (lastAccessedDocId >= target) {
                   break;
                 }
                 offset++;
               }
               if (offset == DEFAULT_BATCH_SIZE) {
                printf("Error case 3: Impossible, we must be able to find the target %d in the block, lastAccessedDocId: %d , baseListForOnlyCompBlocks[%d]\n",
                target,lastAccessedDocId,iterBlockIndex);
               }
               assert(offset != DEFAULT_BATCH_SIZE);

               cursor = (iterBlockIndex << BLOCK_INDEX_SHIFT_BITS) + offset;
               return lastAccessedDocId;
             } else { // hy: there must exist other comp blocks between the current block and noComp block since target <= baseListForOnlyCompBlocks.get(baseListForOnlyCompBlocks.size()-1)
                #ifdef PREFIX_SUM
                   lastAccessedDocId = advanceToTargetInTheFollowingCompBlocksNoPostProcessing(target, iterBlockIndex);
                #else
                   lastAccessedDocId = advanceToTargetInTheFollowingCompBlocks(target, iterBlockIndex);
                #endif  
                return lastAccessedDocId;
             }
           }
         }

         lastAccessedDocId = NO_MORE_DOCS;
         return lastAccessedDocId;
    }
Exemplo n.º 21
0
always_inline uword
adj_midchain_tx_inline (vlib_main_t * vm,
			vlib_node_runtime_t * node,
			vlib_frame_t * frame,
			int interface_count)
{
    u32 * from, * to_next, n_left_from, n_left_to_next;
    u32 next_index;
    vnet_main_t *vnm = vnet_get_main ();
    vnet_interface_main_t *im = &vnm->interface_main;
    u32 thread_index = vm->thread_index;

    /* Vector of buffer / pkt indices we're supposed to process */
    from = vlib_frame_vector_args (frame);

    /* Number of buffers / pkts */
    n_left_from = frame->n_vectors;

    /* Speculatively send the first buffer to the last disposition we used */
    next_index = node->cached_next_index;

    while (n_left_from > 0)
    {
	/* set up to enqueue to our disposition with index = next_index */
	vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);

	while (n_left_from >= 8 && n_left_to_next > 4)
	{
	    u32 bi0, adj_index0, next0;
	    const ip_adjacency_t * adj0;
	    const dpo_id_t *dpo0;
	    vlib_buffer_t * b0;
	    u32 bi1, adj_index1, next1;
	    const ip_adjacency_t * adj1;
	    const dpo_id_t *dpo1;
	    vlib_buffer_t * b1;
	    u32 bi2, adj_index2, next2;
	    const ip_adjacency_t * adj2;
	    const dpo_id_t *dpo2;
	    vlib_buffer_t * b2;
	    u32 bi3, adj_index3, next3;
	    const ip_adjacency_t * adj3;
	    const dpo_id_t *dpo3;
	    vlib_buffer_t * b3;

	    /* Prefetch next iteration. */
	    {
		vlib_buffer_t * p4, * p5;
		vlib_buffer_t * p6, * p7;

		p4 = vlib_get_buffer (vm, from[4]);
		p5 = vlib_get_buffer (vm, from[5]);
		p6 = vlib_get_buffer (vm, from[6]);
		p7 = vlib_get_buffer (vm, from[7]);

		vlib_prefetch_buffer_header (p4, LOAD);
		vlib_prefetch_buffer_header (p5, LOAD);
		vlib_prefetch_buffer_header (p6, LOAD);
		vlib_prefetch_buffer_header (p7, LOAD);
	    }

	    bi0 = from[0];
	    to_next[0] = bi0;
	    bi1 = from[1];
	    to_next[1] = bi1;
	    bi2 = from[2];
	    to_next[2] = bi2;
	    bi3 = from[3];
	    to_next[3] = bi3;

	    from += 4;
	    to_next += 4;
	    n_left_from -= 4;
	    n_left_to_next -= 4;

	    b0 = vlib_get_buffer(vm, bi0);
	    b1 = vlib_get_buffer(vm, bi1);
	    b2 = vlib_get_buffer(vm, bi2);
	    b3 = vlib_get_buffer(vm, bi3);

	    /* Follow the DPO on which the midchain is stacked */
	    adj_index0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
	    adj_index1 = vnet_buffer(b1)->ip.adj_index[VLIB_TX];
	    adj_index2 = vnet_buffer(b2)->ip.adj_index[VLIB_TX];
	    adj_index3 = vnet_buffer(b3)->ip.adj_index[VLIB_TX];

	    adj0 = adj_get(adj_index0);
	    adj1 = adj_get(adj_index1);
	    adj2 = adj_get(adj_index2);
	    adj3 = adj_get(adj_index3);

	    dpo0 = &adj0->sub_type.midchain.next_dpo;
	    dpo1 = &adj1->sub_type.midchain.next_dpo;
	    dpo2 = &adj2->sub_type.midchain.next_dpo;
	    dpo3 = &adj3->sub_type.midchain.next_dpo;

	    next0 = dpo0->dpoi_next_node;
	    next1 = dpo1->dpoi_next_node;
	    next2 = dpo2->dpoi_next_node;
	    next3 = dpo3->dpoi_next_node;

            vnet_buffer(b1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index;
            vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
            vnet_buffer(b2)->ip.adj_index[VLIB_TX] = dpo2->dpoi_index;
            vnet_buffer(b3)->ip.adj_index[VLIB_TX] = dpo3->dpoi_index;

	    if (interface_count)
	    {
		vlib_increment_combined_counter (im->combined_sw_if_counters
						 + VNET_INTERFACE_COUNTER_TX,
						 thread_index,
						 adj0->rewrite_header.sw_if_index,
						 1,
						 vlib_buffer_length_in_chain (vm, b0));
		vlib_increment_combined_counter (im->combined_sw_if_counters
						 + VNET_INTERFACE_COUNTER_TX,
						 thread_index,
						 adj1->rewrite_header.sw_if_index,
						 1,
						 vlib_buffer_length_in_chain (vm, b1));
		vlib_increment_combined_counter (im->combined_sw_if_counters
						 + VNET_INTERFACE_COUNTER_TX,
						 thread_index,
						 adj2->rewrite_header.sw_if_index,
						 1,
						 vlib_buffer_length_in_chain (vm, b2));
		vlib_increment_combined_counter (im->combined_sw_if_counters
						 + VNET_INTERFACE_COUNTER_TX,
						 thread_index,
						 adj3->rewrite_header.sw_if_index,
						 1,
						 vlib_buffer_length_in_chain (vm, b3));
	    }

	    if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
	    {
		adj_midchain_tx_trace_t *tr = vlib_add_trace (vm, node,
							      b0, sizeof (*tr));
		tr->ai = adj_index0;
	    }
	    if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
	    {
		adj_midchain_tx_trace_t *tr = vlib_add_trace (vm, node,
							      b1, sizeof (*tr));
		tr->ai = adj_index1;
	    }
	    if (PREDICT_FALSE(b2->flags & VLIB_BUFFER_IS_TRACED))
	    {
		adj_midchain_tx_trace_t *tr = vlib_add_trace (vm, node,
							      b2, sizeof (*tr));
		tr->ai = adj_index2;
	    }
	    if (PREDICT_FALSE(b3->flags & VLIB_BUFFER_IS_TRACED))
	    {
		adj_midchain_tx_trace_t *tr = vlib_add_trace (vm, node,
							      b3, sizeof (*tr));
		tr->ai = adj_index3;
	    }

	    vlib_validate_buffer_enqueue_x4 (vm, node, next_index,
					     to_next, n_left_to_next,
					     bi0, bi1, bi2, bi3,
					     next0, next1, next2, next3);
	}
	while (n_left_from > 0 && n_left_to_next > 0)
	{
	    u32 bi0, adj_index0, next0;
	    const ip_adjacency_t * adj0;
	    const dpo_id_t *dpo0;
	    vlib_buffer_t * b0;

	    bi0 = from[0];
	    to_next[0] = bi0;
	    from += 1;
	    to_next += 1;
	    n_left_from -= 1;
	    n_left_to_next -= 1;

	    b0 = vlib_get_buffer(vm, bi0);

	    /* Follow the DPO on which the midchain is stacked */
	    adj_index0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
	    adj0 = adj_get(adj_index0);
	    dpo0 = &adj0->sub_type.midchain.next_dpo;
	    next0 = dpo0->dpoi_next_node;
            vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;

	    if (interface_count)
	    {
		vlib_increment_combined_counter (im->combined_sw_if_counters
						 + VNET_INTERFACE_COUNTER_TX,
						 thread_index,
						 adj0->rewrite_header.sw_if_index,
						 1,
						 vlib_buffer_length_in_chain (vm, b0));
	    }

	    if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
	    {
		adj_midchain_tx_trace_t *tr = vlib_add_trace (vm, node,
							      b0, sizeof (*tr));
		tr->ai = adj_index0;
	    }

	    vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
					     to_next, n_left_to_next,
					     bi0, next0);
	}

	vlib_put_next_frame (vm, node, next_index, n_left_to_next);
    }

    return frame->n_vectors;
}
Exemplo n.º 22
0
/*
 * cnat_port_alloc_static_v2
 * public ipv4 address/port allocator for Static Port commands
 * tries to allocate same outside port as inside port
 */
cnat_errno_t
cnat_static_port_alloc_v2 (
                 cnat_portmap_v2_t    *pm,
                 port_alloc_t          atype,
                 port_pair_t           pair_type,
                 u32                   i_ipv4_address,
                 u16                   i_port,
                 u32                  *index,
                 u32                  *o_ipv4_address,
                 u16                  *o_port,
                 u16                   static_port_range
#ifndef NO_BULK_LOGGING
                 , bulk_alloc_size_t    bulk_size,
                 int *nfv9_log_req
#endif 
		 , u16                   ip_n_to_1
                 )
{
    u32 i, hash_value, my_index, found, max_attempts;
    u16 start_bit, new_port;
    cnat_portmap_v2_t *my_pm = 0;
    u32 pm_len = vec_len(pm);
    uword bit_test_result;

#ifndef NO_BULK_LOGGING
    *nfv9_log_req = BULK_ALLOC_NOT_ATTEMPTED;
#endif 

    if (PREDICT_FALSE(pm_len == 0)) {
        return (CNAT_NO_POOL_ANY);
    }

    switch (atype) {

    case PORT_ALLOC_ANY:

        found = 0;

        /*
         * Try to hash the IPv4 address to get an index value to select the pm
         */
        hash_value = (i_ipv4_address & 0xffff) ^
	                ((i_ipv4_address > 16) & 0xffff);

        /*
         * If pm_len <= 256, compact the hash to 8 bits
         */
        if (PREDICT_TRUE(pm_len <= 256)) {
            hash_value = (hash_value & 0xff) ^ ((hash_value > 8) & 0xff);
        }

        /*
         * Ensure that the hash value is in the range 0 .. (pm_len-1)
         */
        my_index = hash_value % pm_len;

        for (i = 0; i < PORT_PROBE_LIMIT; i++) {
            my_pm = pm + my_index;
	    if(PREDICT_TRUE(ip_n_to_1)) {
		if(PREDICT_TRUE(my_pm->private_ip_users_count < ip_n_to_1)) {
		    /*
		     * Try to find a PM with atlest 33% free and my_port free
		     */
		    if (PREDICT_TRUE((my_pm->inuse < ((BITS_PER_INST*2)/3)) &&
				     clib_bitmap_get_no_check(my_pm->bm, 
							      i_port) == 1) 
#ifndef NO_BULK_LOGGING
			&& check_if_stat_alloc_ok_for_bulk(my_pm, i_port,
							   bulk_size, 
							   static_port_range)
#endif 
			) {
			found = 1;
			break;
		    }
		}
		
	    } else {
		/*
		 * Try to find a PM with atlest 33% free and my_port free
		 */
		if (PREDICT_TRUE((my_pm->inuse < ((BITS_PER_INST*2)/3)) &&
				 clib_bitmap_get_no_check(my_pm->bm, 
							  i_port) == 1) 
#ifndef NO_BULK_LOGGING
		    && check_if_stat_alloc_ok_for_bulk(my_pm, i_port,
						       bulk_size, 
						       static_port_range)
#endif 
                    ) {
		    found = 1;
		    break;
		}
	    }
            my_index = (my_index + 1) % pm_len;
        }

        /*
         * If not found do it the hard way .
         * "hard" way, best-fit.
         */
        if (!found) {
            u32 min_inuse_any, min_inuse_myport;
            u32 min_index_any, min_index_myport;

            min_inuse_any = min_inuse_myport = PORTS_PER_ADDR + 1;
            min_index_any = min_index_myport = ~0;
            for (i = 0; i < pm_len; i++) {
                my_pm = pm + i;
		if(PREDICT_TRUE(ip_n_to_1)) {
		    if(PREDICT_TRUE(my_pm->private_ip_users_count < ip_n_to_1))                     {
			if (PREDICT_FALSE(my_pm->inuse < min_inuse_any)) {
			    min_inuse_any = my_pm->inuse;
			    min_index_any = my_pm - pm;
			}
			if (PREDICT_FALSE(my_pm->inuse < min_inuse_myport)) {
			    if (PREDICT_TRUE(clib_bitmap_get_no_check(
					     my_pm->bm,i_port) == 1) 
#ifndef NO_BULK_LOGGING
				&& check_if_stat_alloc_ok_for_bulk(my_pm, 
					    i_port,bulk_size,static_port_range)
#endif 
				) {
				min_inuse_myport = my_pm->inuse;
				min_index_myport = my_pm - pm;
			    }
			}
			
		    } 
		    
		} else {
		    if (PREDICT_FALSE(my_pm->inuse < min_inuse_any)) {
			min_inuse_any = my_pm->inuse;
			min_index_any = my_pm - pm;
		    }
		    if (PREDICT_FALSE(my_pm->inuse < min_inuse_myport)) {
			if (PREDICT_TRUE(clib_bitmap_get_no_check(
					 my_pm->bm, i_port) == 1) 
#ifndef NO_BULK_LOGGING
			    && check_if_stat_alloc_ok_for_bulk(my_pm, i_port,
						 bulk_size, static_port_range)
#endif 
			    ) {
			    min_inuse_myport = my_pm->inuse;
			    min_index_myport = my_pm - pm;
			}
		    }
		}
            }

            /*
             * Check if we have an exactly matching PM that has
             * myport free.  If so use it.  If no such PM is
             * available, use any PM
             */
            if (PREDICT_TRUE(min_inuse_myport < PORTS_PER_ADDR)) {
                my_pm = pm + min_index_myport;
                my_index = min_index_myport;
                found = 1;
            } else if (PREDICT_TRUE(min_inuse_any < PORTS_PER_ADDR)) {
                my_pm = pm + min_index_any;
                my_index = min_index_any;
                found = 1;
            }
        }

        if (!found) {
            return (CNAT_NO_PORT_ANY);
        }
        break;

    case PORT_ALLOC_DIRECTED:
        my_index = *index;
        if (PREDICT_FALSE(my_index > pm_len)) {
            return (CNAT_INV_PORT_DIRECT);
        }
        my_pm = pm + my_index;
        break;

    default:
        return (CNAT_ERR_PARSER);
    }

    /* Allocate a matching port if possible */
    start_bit = i_port;
    found = 0;
    max_attempts = BITS_PER_INST;
#ifndef NO_BULK_LOGGING
    if((BULK_ALLOC_SIZE_NONE != bulk_size) && 
        (i_port >= static_port_range)) {
        start_bit =  (start_bit/bulk_size) * bulk_size;
        max_attempts = BITS_PER_INST/bulk_size;
    }
#endif /* NO_BULK_LOGGING */

    for (i = 0; i < max_attempts; i++) {
#ifndef NO_BULK_LOGGING
        if((BULK_ALLOC_SIZE_NONE != bulk_size) &&
            (i_port >= static_port_range)) {
            bit_test_result = cgn_clib_bitmap_check_if_all(my_pm->bm, 
                        start_bit, bulk_size);
        }
        else
#endif /* #ifndef NO_BULK_LOGGING */
        bit_test_result = clib_bitmap_get_no_check(my_pm->bm, start_bit);

        if (PREDICT_TRUE(bit_test_result)) {
#ifndef NO_BULK_LOGGING
        if((BULK_ALLOC_SIZE_NONE != bulk_size) && 
            (i_port >= static_port_range)) {
            *nfv9_log_req = start_bit;
            if(i==0) new_port = i_port; /* First go */
            else {
                new_port = bit2port(start_bit);
                if (pair_type == PORT_S_ODD &&  (new_port & 0x1) == 0)
                    new_port++;                    
            }
            found = 1;
            break;
        }
        else {
#endif  /* NO_BULK_LOGGING */
            new_port = bit2port(start_bit);
            if (pair_type == PORT_S_ODD) {
                if ((new_port & 0x1) == 1) {
                    found = 1;
                    break;
                }
            } else if (pair_type == PORT_S_EVEN) {
                if ((new_port & 0x1) == 0) {
                    found = 1;
                    break;
                }
            } else {
                found = 1;
                break;
            }
#ifndef NO_BULK_LOGGING
        }
#endif 
        }
#ifndef NO_BULK_LOGGING
        if((BULK_ALLOC_SIZE_NONE != bulk_size) &&
                (i_port >= static_port_range))
            start_bit = (start_bit + bulk_size) % BITS_PER_INST;
        else {
#endif /* NO_BULK_LOGGING */
            start_bit = (start_bit + 1) % BITS_PER_INST;
            if(PREDICT_FALSE(start_bit == 0)) {
                start_bit = 1; /* Port 0 is invalid, so start from 1 */
            }
#ifndef NO_BULK_LOGGING
        }
#endif 
    } /* End of for loop */

    if (!found) {
        /* Port allocation failure */
        if (atype == PORT_ALLOC_DIRECTED) {
            return (CNAT_NOT_FOUND_DIRECT);
        } else {
            return (CNAT_NOT_FOUND_ANY);
        }
    }

    /* Accounting */
    cgn_clib_bitmap_clear_no_check(my_pm->bm, new_port);
    (my_pm->inuse)++;

    *index = my_pm - pm;
    *o_ipv4_address = my_pm->ipv4_address;

    *o_port = new_port;

    return (CNAT_SUCCESS);
}
Exemplo n.º 23
0
/*
 * Try to allocate a portmap structure based on atype field
 */
cnat_portmap_v2_t *
cnat_dynamic_addr_alloc_from_pm (
                 cnat_portmap_v2_t    *pm,
                 port_alloc_t          atype,
                 u32                  *index,
                 cnat_errno_t         *err,
                 u16                   ip_n_to_1,
                 u32                  *rseed_ip)
{
    u32 i, pm_len;
    int my_index;
    int min_inuse, min_index;

    cnat_portmap_v2_t *my_pm = 0;
    *err = CNAT_NO_POOL_ANY;

    pm_len = vec_len(pm);

    switch(atype) {
    case PORT_ALLOC_ANY:
        if (PREDICT_FALSE(pm_len == 0)) {
	    my_pm = 0;
            *err = CNAT_NO_POOL_ANY;
            goto done;
        }

        /* "Easy" way, first address with at least 200 free ports */
        for (i = 0; i < PORT_PROBE_LIMIT; i++) {
            *rseed_ip = randq1(*rseed_ip);
            my_index = (*rseed_ip) % pm_len;
            my_pm = pm + my_index;
            if (PREDICT_FALSE(ip_n_to_1)) {
		if(PREDICT_TRUE(ip_n_to_1 == 1)) {
		    if (PREDICT_FALSE(0 == my_pm->inuse)) {	
			goto done;		
		    }					    
		} else {
		    if(PREDICT_TRUE(my_pm->private_ip_users_count < ip_n_to_1))                     {
			if (PREDICT_FALSE(my_pm->inuse < ((BITS_PER_INST*2)/3)))                        {
			    goto done;
			}
		    } 
		}
            } else {
                if (PREDICT_FALSE(my_pm->inuse < ((BITS_PER_INST*2)/3))) {
                    goto done;
                }
            }
        }  

        /* "hard" way, best-fit. $$$$ Throttle complaint */
        min_inuse = PORTS_PER_ADDR + 1;
        min_index = ~0;
        for (i = 0; i < pm_len; i++) {
            my_pm = pm + i;
            if (PREDICT_FALSE(ip_n_to_1)) {
	       if(PREDICT_TRUE(ip_n_to_1 == 1)) {
		   if (PREDICT_FALSE(!my_pm->inuse)) {
		       min_inuse = my_pm->inuse;
		       min_index = my_pm - pm;
		   } 
	       } else {
		   if(PREDICT_TRUE(my_pm->private_ip_users_count < ip_n_to_1)) {
		       if (PREDICT_TRUE(my_pm->inuse < min_inuse)) {
			   min_inuse = my_pm->inuse;
			   min_index = my_pm - pm;
		       }

		   } 
	       }

            } else {
                if (PREDICT_TRUE(my_pm->inuse < min_inuse)) {
                    min_inuse = my_pm->inuse;
                    min_index = my_pm - pm;
                }
            }
        }

        if (PREDICT_TRUE(min_inuse < PORTS_PER_ADDR)) {
            my_pm = pm + min_index;
            my_index = min_index;
            goto done;
        }

        /* Completely out of ports */
#ifdef DEBUG_PRINTF_ENABLED
        PLATFORM_DEBUG_PRINT("%s out of ports\n", __FUNCTION__);
#endif

	my_pm = 0;
        *err = CNAT_NO_PORT_ANY;
        break;


    case PORT_ALLOC_DIRECTED:
        //ASSERT(*index < pm_len);
        if (PREDICT_FALSE(*index > pm_len)) {
	    my_pm = 0;
            *err = CNAT_INV_PORT_DIRECT;
            goto done;
        }
        my_pm = pm + *index;
        my_index = *index;
        break;

    default:
        msg_spp_err("bad allocation type in cnat_port_alloc");
        my_pm = 0;
        *err = CNAT_ERR_PARSER;
        break;
    }

 done:
    if (PREDICT_FALSE(my_pm == NULL)) {
        return (my_pm);
    }

    if (PREDICT_FALSE(my_pm->inuse >= BITS_PER_INST)) {
        my_pm = 0;
        if (atype == PORT_ALLOC_DIRECTED) {
            *err = CNAT_BAD_INUSE_DIRECT;
        } else {
            *err = CNAT_BAD_INUSE_ANY;
        }
    }

    return (my_pm);
}
Exemplo n.º 24
0
always_inline uword
bier_disp_dispatch_inline (vlib_main_t * vm,
                         vlib_node_runtime_t * node,
                         vlib_frame_t * from_frame)
{
    u32 n_left_from, next_index, * from, * to_next;

    from = vlib_frame_vector_args (from_frame);
    n_left_from = from_frame->n_vectors;

    next_index = node->cached_next_index;

    while (n_left_from > 0)
    {
        u32 n_left_to_next;

        vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);

        while (n_left_from > 0 && n_left_to_next > 0)
        {
            bier_hdr_proto_id_t pproto0;
            bier_disp_entry_t *bde0;
            u32 next0, bi0, bdei0;
            const dpo_id_t *dpo0;
            vlib_buffer_t * b0;
            bier_hdr_t *hdr0;
            u32 entropy0;

            bi0 = from[0];
            to_next[0] = bi0;
            from += 1;
            to_next += 1;
            n_left_from -= 1;
            n_left_to_next -= 1;

            b0 = vlib_get_buffer (vm, bi0);
            bdei0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
            hdr0 = vlib_buffer_get_current(b0);
            bde0 = bier_disp_entry_get(bdei0);
            vnet_buffer(b0)->ip.adj_index[VLIB_RX] = BIER_RX_ITF;

            /*
             * header is in network order - flip it, we are about to
             * consume it anyway
             */
            bier_hdr_ntoh(hdr0);
            pproto0 = bier_hdr_get_proto_id(hdr0);
            entropy0 = bier_hdr_get_entropy(hdr0);

            /*
             * strip the header and copy the entropy value into
             * the packets flow-hash field
             * DSCP mumble mumble...
             */
            vlib_buffer_advance(b0, (vnet_buffer(b0)->mpls.bier.n_bytes +
                                     sizeof(*hdr0)));
            vnet_buffer(b0)->ip.flow_hash = entropy0;

            /*
             * use the payload proto to dispatch to the
             * correct stacked DPO.
             */
            dpo0 = &bde0->bde_fwd[pproto0].bde_dpo;
            next0 = dpo0->dpoi_next_node;
            vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
            vnet_buffer(b0)->ip.rpf_id = bde0->bde_fwd[pproto0].bde_rpf_id;

            if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
            {
                bier_disp_dispatch_trace_t *tr =
                    vlib_add_trace (vm, node, b0, sizeof (*tr));
                tr->pproto = pproto0;
                tr->rpf_id = vnet_buffer(b0)->ip.rpf_id;
            }

            vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
                                            n_left_to_next, bi0, next0);
        }
        vlib_put_next_frame (vm, node, next_index, n_left_to_next);
    }
    return from_frame->n_vectors;
}
Exemplo n.º 25
0
always_inline uword
bier_imp_dpo_inline (vlib_main_t * vm,
                     vlib_node_runtime_t * node,
                     vlib_frame_t * from_frame,
                     fib_protocol_t fproto,
                     bier_hdr_proto_id_t bproto)
{
    u32 n_left_from, next_index, * from, * to_next;

    from = vlib_frame_vector_args (from_frame);
    n_left_from = from_frame->n_vectors;

    next_index = node->cached_next_index;

    while (n_left_from > 0)
    {
        u32 n_left_to_next;

        vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);

        while (n_left_from > 0 && n_left_to_next > 0)
        {
            vlib_buffer_t * b0;
            bier_imp_t *bimp0;
            bier_hdr_t *hdr0;
            u32 bi0, bii0;
            u32 next0;

            bi0 = from[0];
            to_next[0] = bi0;
            from += 1;
            to_next += 1;
            n_left_from -= 1;
            n_left_to_next -= 1;

            b0 = vlib_get_buffer (vm, bi0);

            bii0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
            bimp0 = bier_imp_get(bii0);

            if (FIB_PROTOCOL_IP4 == fproto)
            {
                /*
                 * decrement the TTL on ingress to the BIER domain
                 */
                ip4_header_t * ip0 = vlib_buffer_get_current(b0);
                u32 checksum0;

                checksum0 = ip0->checksum + clib_host_to_net_u16 (0x0100);
                checksum0 += checksum0 >= 0xffff;

                ip0->checksum = checksum0;
                ip0->ttl -= 1;

                /*
                 * calculate an entropy
                 */
                if (0 == vnet_buffer(b0)->ip.flow_hash)
                {
                    vnet_buffer(b0)->ip.flow_hash =
                        ip4_compute_flow_hash (ip0, IP_FLOW_HASH_DEFAULT);
                }
            }
            if (FIB_PROTOCOL_IP6 == fproto)
            {
                /*
                 * decrement the TTL on ingress to the BIER domain
                 */
                ip6_header_t * ip0 = vlib_buffer_get_current(b0);

                ip0->hop_limit -= 1;

                /*
                 * calculate an entropy
                 */
                if (0 == vnet_buffer(b0)->ip.flow_hash)
                {
                    vnet_buffer(b0)->ip.flow_hash =
                        ip6_compute_flow_hash (ip0, IP_FLOW_HASH_DEFAULT);
                }
            }

            /* Paint the BIER header */
            vlib_buffer_advance(b0, -(sizeof(bier_hdr_t) +
                                      bier_hdr_len_id_to_num_bytes(bimp0->bi_tbl.bti_hdr_len)));
            hdr0 = vlib_buffer_get_current(b0);

            /* RPF check */
            if (PREDICT_FALSE(BIER_RX_ITF == vnet_buffer(b0)->ip.adj_index[VLIB_RX]))
            {
                next0 = 0;
            }
            else
            {
                clib_memcpy_fast(hdr0, &bimp0->bi_hdr,
                            (sizeof(bier_hdr_t) +
                             bier_hdr_len_id_to_num_bytes(bimp0->bi_tbl.bti_hdr_len)));
                /*
                 * Fixup the entropy and protocol, both of which have a
                 * zero value post the paint job
                 */
                hdr0->bh_oam_dscp_proto |=
                    clib_host_to_net_u16(bproto << BIER_HDR_PROTO_FIELD_SHIFT);
                hdr0->bh_first_word |=
                    clib_host_to_net_u32((vnet_buffer(b0)->ip.flow_hash &
                                          BIER_HDR_ENTROPY_FIELD_MASK) <<
                                         BIER_HDR_ENTROPY_FIELD_SHIFT);

                /*
                 * use TTL 64 for the post enacp MPLS label/BIFT-ID
                 * this we be decremeted in bier_output node.
                 */
                vnet_buffer(b0)->mpls.ttl = 65;

                /* next node */
                next0 = bimp0->bi_dpo[fproto].dpoi_next_node;
                vnet_buffer(b0)->ip.adj_index[VLIB_TX] =
                    bimp0->bi_dpo[fproto].dpoi_index;
            }

            if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
            {
                bier_imp_trace_t *tr =
                    vlib_add_trace (vm, node, b0, sizeof (*tr));
                tr->imp = bii0;
                tr->hdr = *hdr0;
            }

            vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
                                            n_left_to_next, bi0, next0);
        }
        vlib_put_next_frame (vm, node, next_index, n_left_to_next);
    }
    return from_frame->n_vectors;
}
Exemplo n.º 26
0
/*
 * cnat_alloc_port_from_pm
 * Given a portmap structure find port/port_pair that are free
 *
 * The assumption in this function is that bit in bm corresponds
 * to a port number.   This is TRUE and hence there is no call
 * to the function bit2port here, though it is done in other 
 * places in this file.
 *
 */
static u32
cnat_alloc_port_from_pm (
    u32 start_port,
    u32 end_port,
    cnat_portmap_v2_t *my_pm,
    port_pair_t       pair_type
#ifndef NO_BULK_LOGGING
    , bulk_alloc_size_t    bulk_size,
    int                  *nfv9_log_req
#endif /* #ifnded NO_BULK_ALLOCATION */
    )
{
    u32 i;
    u32 start_bit;
    u32 total_ports = end_port - start_port + 1;
    uword bit_test_result;
    uword max_trys_to_find_port;

    rseed_port = randq1(rseed_port);

    start_bit = rseed_port % total_ports;
    start_bit = start_bit + start_port;
#ifndef NO_BULK_LOGGING
    *nfv9_log_req = BULK_ALLOC_NOT_ATTEMPTED;
    if(BULK_ALLOC_SIZE_NONE != bulk_size)
    {
        /* We need the start port of the range to be alined on integer multiple
         * of bulk_size */
        max_trys_to_find_port = total_ports/bulk_size;
        start_bit= ((start_bit + bulk_size -1)/bulk_size) * bulk_size;
    }
    else
#endif /* #ifndef NO_BULK_LOGGING */
    max_trys_to_find_port = total_ports;

    /* Allocate a random port / port-pair */
    for (i = 0; i < max_trys_to_find_port; i++) {
        /* start_bit is only a u16.. so it can rollover and become zero */
        if (PREDICT_FALSE((start_bit >= end_port) ||
                    (start_bit < start_port))) {
                    start_bit = start_port;
#ifndef NO_BULK_LOGGING
            if(BULK_ALLOC_SIZE_NONE != bulk_size) {
                start_bit= ((start_bit + bulk_size -1)/bulk_size) * bulk_size;
            }
#endif /* #ifndef NO_BULK_LOGGING */
        }

        /* Scan forward from random position */
#ifndef NO_BULK_LOGGING
        if(BULK_ALLOC_SIZE_NONE != bulk_size) {
            bit_test_result = cgn_clib_bitmap_check_if_all(my_pm->bm,
            start_bit, bulk_size);
        }
        else
#endif /* #ifndef NO_BULK_LOGGING */
            bit_test_result = clib_bitmap_get_no_check(my_pm->bm, start_bit);
        
            if (PREDICT_TRUE(bit_test_result)) {
#ifndef NO_BULK_LOGGING
                if(BULK_ALLOC_SIZE_NONE != bulk_size) {
                    /* Got the entire bulk range */
                    *nfv9_log_req = bit2port(start_bit);
                    return start_bit;
                } else { 
#endif /* #ifndef NO_BULK_LOGGING */
		        /*
		         * For PORT_PAIR, first port has to be Even
		         * subsequent port <= end_port
		         * subsequent port should be unallocated
		         */
                if ((start_bit & 0x1) ||
                    ((start_bit + 1) > end_port) ||
		            (clib_bitmap_get_no_check(my_pm->bm,
		                    (start_bit + 1)) == 0)) {
                        goto notfound;
                }
                return (start_bit);
#ifndef NO_BULK_LOGGING
            }
#endif /* #ifndef NO_BULK_LOGGING */
        } /* if( free port found ) */

notfound:
#ifndef NO_BULK_LOGGING
        if(BULK_ALLOC_SIZE_NONE != bulk_size) {
            start_bit += bulk_size;
        } else
#endif /* #ifndef NO_BULK_LOGGING */
            start_bit++;

    }
    return (BITS_PER_INST);
}
Exemplo n.º 27
0
/*
 * ip6_sixrd
 */
static uword
ip6_sixrd (vlib_main_t *vm,
	   vlib_node_runtime_t *node,
	   vlib_frame_t *frame)
{
  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
  vlib_node_runtime_t *error_node = vlib_node_get_runtime(vm, ip6_sixrd_node.index);
  u32 encap = 0;
  from = vlib_frame_vector_args(frame);
  n_left_from = frame->n_vectors;
  next_index = node->cached_next_index;

  while (n_left_from > 0) {
    vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);

    while (n_left_from > 0 && n_left_to_next > 0) {
      u32 pi0;
      vlib_buffer_t *p0;
      sixrd_domain_t *d0;
      u8 error0 = SIXRD_ERROR_NONE;
      ip6_header_t *ip60;
      ip4_header_t *ip4h0;
      u32 next0 = IP6_SIXRD_NEXT_IP4_LOOKUP;
      u32 sixrd_domain_index0 = ~0;

      pi0 = to_next[0] = from[0];
      from += 1;
      n_left_from -= 1;
      to_next +=1;
      n_left_to_next -= 1;

      p0 = vlib_get_buffer(vm, pi0);
      ip60 = vlib_buffer_get_current(p0);
      //      p0->current_length = clib_net_to_host_u16(ip40->length);
      d0 = ip6_sixrd_get_domain(vnet_buffer(p0)->ip.adj_index[VLIB_TX], &sixrd_domain_index0);
      ASSERT(d0);

      /* SIXRD calc */
      u64 dal60 = clib_net_to_host_u64(ip60->dst_address.as_u64[0]);
      u32 da40 = sixrd_get_addr(d0, dal60);
      u16 len = clib_net_to_host_u16(ip60->payload_length) + 60;
      if (da40 == 0) error0 = SIXRD_ERROR_UNKNOWN;

      /* construct ipv4 header */
      vlib_buffer_advance(p0, - (sizeof(ip4_header_t)));
      ip4h0 = vlib_buffer_get_current(p0);
      vnet_buffer(p0)->sw_if_index[VLIB_TX] = (u32)~0;
      ip4h0->ip_version_and_header_length = 0x45;
      ip4h0->tos = 0;
      ip4h0->length = clib_host_to_net_u16(len);
      ip4h0->fragment_id = 0;
      ip4h0->flags_and_fragment_offset = 0;
      ip4h0->ttl = 0x40;
      ip4h0->protocol = IP_PROTOCOL_IPV6;
      ip4h0->src_address = d0->ip4_src;
      ip4h0->dst_address.as_u32 = clib_host_to_net_u32(da40);
      ip4h0->checksum = ip4_header_checksum(ip4h0);

      next0 = error0 == SIXRD_ERROR_NONE ? IP6_SIXRD_NEXT_IP4_LOOKUP : IP6_SIXRD_NEXT_DROP;

      if (PREDICT_FALSE(p0->flags & VLIB_BUFFER_IS_TRACED)) {
	sixrd_trace_t *tr = vlib_add_trace(vm, node, p0, sizeof(*tr));
	tr->sixrd_domain_index = sixrd_domain_index0;
      }

      p0->error = error_node->errors[error0];
      if (PREDICT_TRUE(error0 == SIXRD_ERROR_NONE)) encap++;

      vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, pi0, next0);
    }
    vlib_put_next_frame(vm, node, next_index, n_left_to_next);  
  }
  vlib_node_increment_counter(vm, ip6_sixrd_node.index, SIXRD_ERROR_ENCAPSULATED, encap);

  return frame->n_vectors;
}
Exemplo n.º 28
0
cnat_errno_t
cnat_dynamic_port_alloc_rtsp (
                            cnat_portmap_v2_t *pm,
                            port_alloc_t       atype,
                            port_pair_t        pair_type,
                            u16                start_range,
                            u16                end_range,
                            u32               *index,
                            u32               *o_ipv4_address,
                            u16               *o_port
#ifndef NO_BULK_LOGGING
                            , bulk_alloc_size_t bulk_size,
                            int *nfv9_log_req
#endif
                            , u32               *rseed_ip
            )
{

    u32 current_timestamp;
    cnat_errno_t       my_err = CNAT_NO_POOL_ANY;
    cnat_portmap_v2_t *my_pm = 0;
    u32 alloc_bit;

    ASSERT(index);
    ASSERT(o_ipv4_address);
    ASSERT(o_port);

    my_pm = cnat_dynamic_addr_alloc_from_pm(pm, atype, index, &my_err, 0,rseed_ip);

    if (PREDICT_FALSE(my_pm == NULL)) {
        return (my_err);
    }

#if DEBUG > 1
    PLATFORM_DEBUG_PRINT("ALLOC_PORT_V2: My_Instance_Number %d: IP addr 0x%x, Inuse %d\n",
           my_instance_number, my_pm->ipv4_address, my_pm->inuse);
#endif

    alloc_bit = 
	cnat_alloc_port_from_pm(start_range, end_range, my_pm, pair_type
#ifndef NO_BULK_LOGGING
         , bulk_size, nfv9_log_req
#endif /* #ifndef NO_BULK_LOGGING */
        );

    if (alloc_bit < BITS_PER_INST) {
	if (pair_type == PORT_PAIR) {
	    /* Accounting */
	    cgn_clib_bitmap_clear_no_check (my_pm->bm, alloc_bit);
	    cgn_clib_bitmap_clear_no_check (my_pm->bm, alloc_bit+1);
	    (my_pm->inuse) += 2;
	} else {
	    /* Accounting */
	    cgn_clib_bitmap_clear_no_check (my_pm->bm, alloc_bit);
	    (my_pm->inuse)++;
	}

	*index = my_pm - pm;
	*o_ipv4_address = my_pm->ipv4_address;

	*o_port = bit2port(alloc_bit);;

	return (CNAT_SUCCESS);
    }

    /* Completely out of ports */
    current_timestamp = spp_trace_log_get_unix_time_in_seconds();
    if (PREDICT_FALSE((current_timestamp - my_pm->last_sent_timestamp) >
                1000)) {
        spp_printf(CNAT_NO_EXT_PORT_AVAILABLE, 0, NULL);
        my_pm->last_sent_timestamp = current_timestamp;
    }


    /* Port allocation failure */
    if (atype == PORT_ALLOC_DIRECTED) {
        return (CNAT_NOT_FOUND_DIRECT);
    } else {
        return (CNAT_NOT_FOUND_ANY);
    }
}
Exemplo n.º 29
0
static void
lisp_gpe_increment_stats_counters (lisp_cp_main_t * lcm, ip_adjacency_t * adj,
				   vlib_buffer_t * b)
{
  lisp_gpe_main_t *lgm = vnet_lisp_gpe_get_main ();
  lisp_gpe_adjacency_t *ladj;
  ip_address_t rloc;
  index_t lai;
  u32 si, di;
  gid_address_t src, dst;
  uword *feip;

  ip46_address_to_ip_address (&adj->sub_type.nbr.next_hop, &rloc);
  si = vnet_buffer (b)->sw_if_index[VLIB_TX];
  lai = lisp_adj_find (&rloc, si);
  ASSERT (INDEX_INVALID != lai);

  ladj = pool_elt_at_index (lisp_adj_pool, lai);

  u8 *lisp_data = (u8 *) vlib_buffer_get_current (b);

  /* skip IP header */
  if (is_v4_packet (lisp_data))
    lisp_data += sizeof (ip4_header_t);
  else
    lisp_data += sizeof (ip6_header_t);

  /* skip UDP header */
  lisp_data += sizeof (udp_header_t);
  // TODO: skip TCP?

  /* skip LISP GPE header */
  lisp_data += sizeof (lisp_gpe_header_t);

  i16 saved_current_data = b->current_data;
  b->current_data = lisp_data - b->data;

  lisp_afi_e afi = lisp_afi_from_vnet_link_type (adj->ia_link);
  get_src_and_dst_eids_from_buffer (lcm, b, &src, &dst, afi);
  b->current_data = saved_current_data;
  di = gid_dictionary_sd_lookup (&lcm->mapping_index_by_gid, &dst, &src);
  if (PREDICT_FALSE (~0 == di))
    {
      clib_warning ("dst mapping not found (%U, %U)", format_gid_address,
		    &src, format_gid_address, &dst);
      return;
    }

  feip = hash_get (lcm->fwd_entry_by_mapping_index, di);
  if (PREDICT_FALSE (!feip))
    return;

  lisp_stats_key_t key;
  clib_memset (&key, 0, sizeof (key));
  key.fwd_entry_index = feip[0];
  key.tunnel_index = ladj->tunnel_index;

  uword *p = hash_get_mem (lgm->lisp_stats_index_by_key, &key);
  ASSERT (p);

  /* compute payload length starting after GPE */
  u32 bytes = b->current_length - (lisp_data - b->data - b->current_data);
  vlib_increment_combined_counter (&lgm->counters, vlib_get_thread_index (),
				   p[0], 1, bytes);
}
Exemplo n.º 30
0
/*
 * cnat_mapped_static_port_alloc_v2
 * /
 */
cnat_errno_t
cnat_mapped_static_port_alloc_v2 (
             cnat_portmap_v2_t    *pm, 
		     port_alloc_t         atype, 
		     u32                  *index,
		     u32                   ipv4_address,
		     u16                   port
#ifndef NO_BULK_LOGGING
            , int *nfv9_log_req,
            bulk_alloc_size_t bulk_size
#endif
	    , u16                   ip_n_to_1
                )
{
    int i;
    u32 pm_len;
    u16 bm_bit;
    cnat_portmap_v2_t *my_pm = 0;
    u32 my_index;

    ASSERT(index);

    /*
     * Map the port to the bit in the pm bitmap structure.
     * Note that we use ports from 1024..65535, so 
     * port number x corresponds to (x-1024) position in bitmap
     */
    bm_bit = port2bit(port);

    pm_len = vec_len(pm);

    switch(atype) {
    case PORT_ALLOC_ANY:
        if (PREDICT_FALSE(pm_len == 0)) {
            return (CNAT_NO_POOL_ANY);
        }

	    /*
	     * Find the pm that is allocated for this translated IP address
	     */
	    my_index = pm_len;

        for (i = 0; i < pm_len; i++) {
	        my_pm = pm + i;
	        if (PREDICT_FALSE(my_pm->ipv4_address == ipv4_address)) {
		        my_index = i;
		        break;
	        }
	    }

	    if ((PREDICT_FALSE(my_index >= pm_len)) || 
		((PREDICT_FALSE(ip_n_to_1)) && (PREDICT_TRUE(my_pm->private_ip_users_count >= ip_n_to_1)))) {
		return (CNAT_NO_POOL_ANY);
	    }

	    break;

    case PORT_ALLOC_DIRECTED:
        if (PREDICT_FALSE(*index > pm_len)) {
            return (CNAT_INV_PORT_DIRECT);
        }

        my_index = *index;
        my_pm = pm + my_index;
        if (PREDICT_FALSE(my_pm->ipv4_address != ipv4_address)) {
            if (PREDICT_FALSE(global_debug_flag && CNAT_DEBUG_GLOBAL_ALL)) { 
                PLATFORM_DEBUG_PRINT("Delete all main db entry for that particular in ipv4 address\n");
            }
            return (CNAT_INV_PORT_DIRECT);
        }
        
        break;

    default:
        msg_spp_err("bad allocation type in cnat_port_alloc");
        return (CNAT_ERR_PARSER);
    }


    if (PREDICT_FALSE(my_pm == NULL)) {
	    return (CNAT_NO_POOL_ANY);
    }

    /*
     * Check if the port is already allocated to some other mapping
     */
    if (PREDICT_FALSE(clib_bitmap_get_no_check (my_pm->bm, bm_bit) == 0)) {
	    return (CNAT_NO_POOL_ANY);
    }

#if DEBUG > 1
    PLATFORM_DEBUG_PRINT("ALLOC_PORT_V2: My_Instance_Number %d: IP addr 0x%x, Inuse %d\n",
           my_instance_number, my_pm->ipv4_address, my_pm->inuse);
#endif

    /*
     * Indicate that the port is already allocated
     */
    cgn_clib_bitmap_clear_no_check (my_pm->bm, bm_bit);
    (my_pm->inuse)++;

    *index = my_index;

    return (CNAT_SUCCESS);
}