Esempio n. 1
0
    unsigned int CompressedSet::Iterator::nextDoc(){

        //: if the pointer points to the end
        if(PREDICT_FALSE(++cursor == totalDocIdNum)) {
          lastAccessedDocId = NO_MORE_DOCS;
        } else {
             int iterBlockIndex = cursor >> BLOCK_SIZE_BIT;
             int offset = cursor & BLOCK_SIZE_MODULO;
             if( iterBlockIndex == compBlockNum  ) {
                 lastAccessedDocId = set->currentNoCompBlock[offset];
             } else { 
                 if (PREDICT_TRUE(offset)){
                    //lastAccessedDocId = iterDecompBlock[offset];
                    #ifdef PREFIX_SUM
                       lastAccessedDocId += (iterDecompBlock[offset]);
                    #else
                       lastAccessedDocId = iterDecompBlock[offset];
                    #endif  
                 } else {
                    // (offset==0) must be in one of the compressed blocks
                    Source src = set->sequenceOfCompBlocks.get(iterBlockIndex).getSource();
                    set->codec.Uncompress(src, &iterDecompBlock[0], DEFAULT_BATCH_SIZE);
                    #ifndef PREFIX_SUM
                      // postProcessBlock not needed if using integrated delta coding
                      // postProcessBlock(&iterDecompBlock[0], DEFAULT_BATCH_SIZE);
                    #endif       
                    // assert(uncompSize == DEFAULT_BATCH_SIZE);
                     lastAccessedDocId = iterDecompBlock[0];
                 }
             }
        }
        return lastAccessedDocId;
    }
static inline u32
is_pcp_pkt(spp_ctx_t *ctx, u32 addr, u16 port)
{
    cnat_vrfmap_t *my_vrfmap = NULL;
    u16  my_vrfmap_index;

    my_vrfmap_index = vrf_map_array[ctx->ru.rx.uidb_index];

    if (PREDICT_TRUE(my_vrfmap_index != VRF_MAP_ENTRY_EMPTY)) {

      my_vrfmap = cnat_map_by_vrf + my_vrfmap_index;

      if (PREDICT_FALSE( port ==  my_vrfmap->pcp_server_port)) {
             if(PREDICT_TRUE(addr == my_vrfmap->pcp_server_addr)) {
               return CNAT_SUCCESS;
             }
      }
    }

    return CNAT_NO_CONFIG;
}
Esempio n. 3
0
void cnat_portmap_dump_v2 (cnat_portmap_v2_t *pm, u16 print_limit)
{
    int i;
    u32 inuse =0;

    ASSERT(pm);

    for (i = 0; i < BITS_PER_INST; i++) {
        if (PREDICT_FALSE(clib_bitmap_get_no_check (pm->bm, i) == 0)) {
            if (PREDICT_TRUE(inuse++ < print_limit))
                PLATFORM_DEBUG_PRINT(" %d", bit2port(i));
        }
    }
    if (PREDICT_FALSE(inuse >= print_limit)) {
        PLATFORM_DEBUG_PRINT("%d printed, print limit is %d\n",
                inuse, print_limit);
    }
    PLATFORM_DEBUG_PRINT("\n");
}
Esempio n. 4
0
  /**
   * Add document to this set
   * Note that you must set the bits in increasing order:
   * addDoc(1), addDoc(2) is ok;
   * addDoc(2), addDoc(1) is not ok.
   */
  void CompressedSet::addDoc(unsigned int docId) {
    if (PREDICT_TRUE(sizeOfCurrentNoCompBlock != DEFAULT_BATCH_SIZE)) {
       currentNoCompBlock.resize(sizeOfCurrentNoCompBlock+1);
       currentNoCompBlock[sizeOfCurrentNoCompBlock++] = docId;
    } else {
        //the last docId of the block
        baseListForOnlyCompBlocks.push_back(currentNoCompBlock[sizeOfCurrentNoCompBlock-1]);

        // compress currentNoCompBlock[] (excluding the input docId),
        shared_ptr<CompressedDeltaChunk> compRes = PForDeltaCompressCurrentBlock();
        sequenceOfCompBlocks.add(compRes);

        // next block
        sizeOfCurrentNoCompBlock = 1;
        currentNoCompBlock.resize(1);
        currentNoCompBlock[0] = docId;
    }
    totalDocIdNum++;
  }
Esempio n. 5
0
/*
 * ip6_sixrd
 */
static uword
ip6_sixrd (vlib_main_t *vm,
	   vlib_node_runtime_t *node,
	   vlib_frame_t *frame)
{
  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
  vlib_node_runtime_t *error_node = vlib_node_get_runtime(vm, ip6_sixrd_node.index);
  u32 encap = 0;
  from = vlib_frame_vector_args(frame);
  n_left_from = frame->n_vectors;
  next_index = node->cached_next_index;

  while (n_left_from > 0) {
    vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);

    while (n_left_from > 0 && n_left_to_next > 0) {
      u32 pi0;
      vlib_buffer_t *p0;
      sixrd_domain_t *d0;
      u8 error0 = SIXRD_ERROR_NONE;
      ip6_header_t *ip60;
      ip4_header_t *ip4h0;
      u32 next0 = IP6_SIXRD_NEXT_IP4_LOOKUP;
      u32 sixrd_domain_index0 = ~0;

      pi0 = to_next[0] = from[0];
      from += 1;
      n_left_from -= 1;
      to_next +=1;
      n_left_to_next -= 1;

      p0 = vlib_get_buffer(vm, pi0);
      ip60 = vlib_buffer_get_current(p0);
      //      p0->current_length = clib_net_to_host_u16(ip40->length);
      d0 = ip6_sixrd_get_domain(vnet_buffer(p0)->ip.adj_index[VLIB_TX], &sixrd_domain_index0);
      ASSERT(d0);

      /* SIXRD calc */
      u64 dal60 = clib_net_to_host_u64(ip60->dst_address.as_u64[0]);
      u32 da40 = sixrd_get_addr(d0, dal60);
      u16 len = clib_net_to_host_u16(ip60->payload_length) + 60;
      if (da40 == 0) error0 = SIXRD_ERROR_UNKNOWN;

      /* construct ipv4 header */
      vlib_buffer_advance(p0, - (sizeof(ip4_header_t)));
      ip4h0 = vlib_buffer_get_current(p0);
      vnet_buffer(p0)->sw_if_index[VLIB_TX] = (u32)~0;
      ip4h0->ip_version_and_header_length = 0x45;
      ip4h0->tos = 0;
      ip4h0->length = clib_host_to_net_u16(len);
      ip4h0->fragment_id = 0;
      ip4h0->flags_and_fragment_offset = 0;
      ip4h0->ttl = 0x40;
      ip4h0->protocol = IP_PROTOCOL_IPV6;
      ip4h0->src_address = d0->ip4_src;
      ip4h0->dst_address.as_u32 = clib_host_to_net_u32(da40);
      ip4h0->checksum = ip4_header_checksum(ip4h0);

      next0 = error0 == SIXRD_ERROR_NONE ? IP6_SIXRD_NEXT_IP4_LOOKUP : IP6_SIXRD_NEXT_DROP;

      if (PREDICT_FALSE(p0->flags & VLIB_BUFFER_IS_TRACED)) {
	sixrd_trace_t *tr = vlib_add_trace(vm, node, p0, sizeof(*tr));
	tr->sixrd_domain_index = sixrd_domain_index0;
      }

      p0->error = error_node->errors[error0];
      if (PREDICT_TRUE(error0 == SIXRD_ERROR_NONE)) encap++;

      vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, pi0, next0);
    }
    vlib_put_next_frame(vm, node, next_index, n_left_to_next);  
  }
  vlib_node_increment_counter(vm, ip6_sixrd_node.index, SIXRD_ERROR_ENCAPSULATED, encap);

  return frame->n_vectors;
}
Esempio n. 6
0
/*
 * cnat_mapped_static_port_alloc_v2
 * /
 */
cnat_errno_t
cnat_mapped_static_port_alloc_v2 (
             cnat_portmap_v2_t    *pm, 
		     port_alloc_t         atype, 
		     u32                  *index,
		     u32                   ipv4_address,
		     u16                   port
#ifndef NO_BULK_LOGGING
            , int *nfv9_log_req,
            bulk_alloc_size_t bulk_size
#endif
	    , u16                   ip_n_to_1
                )
{
    int i;
    u32 pm_len;
    u16 bm_bit;
    cnat_portmap_v2_t *my_pm = 0;
    u32 my_index;

    ASSERT(index);

    /*
     * Map the port to the bit in the pm bitmap structure.
     * Note that we use ports from 1024..65535, so 
     * port number x corresponds to (x-1024) position in bitmap
     */
    bm_bit = port2bit(port);

    pm_len = vec_len(pm);

    switch(atype) {
    case PORT_ALLOC_ANY:
        if (PREDICT_FALSE(pm_len == 0)) {
            return (CNAT_NO_POOL_ANY);
        }

	    /*
	     * Find the pm that is allocated for this translated IP address
	     */
	    my_index = pm_len;

        for (i = 0; i < pm_len; i++) {
	        my_pm = pm + i;
	        if (PREDICT_FALSE(my_pm->ipv4_address == ipv4_address)) {
		        my_index = i;
		        break;
	        }
	    }

	    if ((PREDICT_FALSE(my_index >= pm_len)) || 
		((PREDICT_FALSE(ip_n_to_1)) && (PREDICT_TRUE(my_pm->private_ip_users_count >= ip_n_to_1)))) {
		return (CNAT_NO_POOL_ANY);
	    }

	    break;

    case PORT_ALLOC_DIRECTED:
        if (PREDICT_FALSE(*index > pm_len)) {
            return (CNAT_INV_PORT_DIRECT);
        }

        my_index = *index;
        my_pm = pm + my_index;
        if (PREDICT_FALSE(my_pm->ipv4_address != ipv4_address)) {
            if (PREDICT_FALSE(global_debug_flag && CNAT_DEBUG_GLOBAL_ALL)) { 
                PLATFORM_DEBUG_PRINT("Delete all main db entry for that particular in ipv4 address\n");
            }
            return (CNAT_INV_PORT_DIRECT);
        }
        
        break;

    default:
        msg_spp_err("bad allocation type in cnat_port_alloc");
        return (CNAT_ERR_PARSER);
    }


    if (PREDICT_FALSE(my_pm == NULL)) {
	    return (CNAT_NO_POOL_ANY);
    }

    /*
     * Check if the port is already allocated to some other mapping
     */
    if (PREDICT_FALSE(clib_bitmap_get_no_check (my_pm->bm, bm_bit) == 0)) {
	    return (CNAT_NO_POOL_ANY);
    }

#if DEBUG > 1
    PLATFORM_DEBUG_PRINT("ALLOC_PORT_V2: My_Instance_Number %d: IP addr 0x%x, Inuse %d\n",
           my_instance_number, my_pm->ipv4_address, my_pm->inuse);
#endif

    /*
     * Indicate that the port is already allocated
     */
    cgn_clib_bitmap_clear_no_check (my_pm->bm, bm_bit);
    (my_pm->inuse)++;

    *index = my_index;

    return (CNAT_SUCCESS);
}
Esempio n. 7
0
/*
 * cnat_alloc_port_from_pm
 * Given a portmap structure find port/port_pair that are free
 *
 * The assumption in this function is that bit in bm corresponds
 * to a port number.   This is TRUE and hence there is no call
 * to the function bit2port here, though it is done in other 
 * places in this file.
 *
 */
static u32
cnat_alloc_port_from_pm (
    u32 start_port,
    u32 end_port,
    cnat_portmap_v2_t *my_pm,
    port_pair_t       pair_type
#ifndef NO_BULK_LOGGING
    , bulk_alloc_size_t    bulk_size,
    int                  *nfv9_log_req
#endif /* #ifnded NO_BULK_ALLOCATION */
    )
{
    u32 i;
    u32 start_bit;
    u32 total_ports = end_port - start_port + 1;
    uword bit_test_result;
    uword max_trys_to_find_port;

    rseed_port = randq1(rseed_port);

    start_bit = rseed_port % total_ports;
    start_bit = start_bit + start_port;
#ifndef NO_BULK_LOGGING
    *nfv9_log_req = BULK_ALLOC_NOT_ATTEMPTED;
    if(BULK_ALLOC_SIZE_NONE != bulk_size)
    {
        /* We need the start port of the range to be alined on integer multiple
         * of bulk_size */
        max_trys_to_find_port = total_ports/bulk_size;
        start_bit= ((start_bit + bulk_size -1)/bulk_size) * bulk_size;
    }
    else
#endif /* #ifndef NO_BULK_LOGGING */
    max_trys_to_find_port = total_ports;

    /* Allocate a random port / port-pair */
    for (i = 0; i < max_trys_to_find_port; i++) {
        /* start_bit is only a u16.. so it can rollover and become zero */
        if (PREDICT_FALSE((start_bit >= end_port) ||
                    (start_bit < start_port))) {
                    start_bit = start_port;
#ifndef NO_BULK_LOGGING
            if(BULK_ALLOC_SIZE_NONE != bulk_size) {
                start_bit= ((start_bit + bulk_size -1)/bulk_size) * bulk_size;
            }
#endif /* #ifndef NO_BULK_LOGGING */
        }

        /* Scan forward from random position */
#ifndef NO_BULK_LOGGING
        if(BULK_ALLOC_SIZE_NONE != bulk_size) {
            bit_test_result = cgn_clib_bitmap_check_if_all(my_pm->bm,
            start_bit, bulk_size);
        }
        else
#endif /* #ifndef NO_BULK_LOGGING */
            bit_test_result = clib_bitmap_get_no_check(my_pm->bm, start_bit);
        
            if (PREDICT_TRUE(bit_test_result)) {
#ifndef NO_BULK_LOGGING
                if(BULK_ALLOC_SIZE_NONE != bulk_size) {
                    /* Got the entire bulk range */
                    *nfv9_log_req = bit2port(start_bit);
                    return start_bit;
                } else { 
#endif /* #ifndef NO_BULK_LOGGING */
		        /*
		         * For PORT_PAIR, first port has to be Even
		         * subsequent port <= end_port
		         * subsequent port should be unallocated
		         */
                if ((start_bit & 0x1) ||
                    ((start_bit + 1) > end_port) ||
		            (clib_bitmap_get_no_check(my_pm->bm,
		                    (start_bit + 1)) == 0)) {
                        goto notfound;
                }
                return (start_bit);
#ifndef NO_BULK_LOGGING
            }
#endif /* #ifndef NO_BULK_LOGGING */
        } /* if( free port found ) */

notfound:
#ifndef NO_BULK_LOGGING
        if(BULK_ALLOC_SIZE_NONE != bulk_size) {
            start_bit += bulk_size;
        } else
#endif /* #ifndef NO_BULK_LOGGING */
            start_bit++;

    }
    return (BITS_PER_INST);
}
Esempio n. 8
0
/*
 * cnat_port_alloc_v2
 * public ipv4 address/port allocator for dynamic ports
 *
 * 200K users / 20M translations means vec_len(cnat_portmap) will be
 * around 300.
 *
 */
cnat_errno_t
cnat_dynamic_port_alloc_v2 (
                 cnat_portmap_v2_t    *pm,
                 port_alloc_t          atype,
                 port_pair_t           pair_type,
                 u32                  *index,
                 u32                  *o_ipv4_address,
                 u16                  *o_port,
                 u16                  static_port_range
#ifndef NO_BULK_LOGGING
                 , bulk_alloc_size_t    bulk_size,
                  int *nfv9_log_req
#endif
                 , u16                   ip_n_to_1,
                  u32                  *rseed_ip
                 )
{
    int i;
    cnat_errno_t       my_err = CNAT_NO_POOL_ANY;
    cnat_portmap_v2_t *my_pm = 0;
    u16 start_bit;
    u16 new_port;
    uword bit_test_result;
    uword max_trys_to_find_port;

    ASSERT(index);
    ASSERT(o_ipv4_address);
    ASSERT(o_port);

    my_pm = cnat_dynamic_addr_alloc_from_pm(pm, atype, index, &my_err, ip_n_to_1, 
            rseed_ip);

    if (PREDICT_FALSE(my_pm == NULL)) {
        return (my_err);
    }
    if(PREDICT_FALSE(my_pm->dyn_full == 1)) {
        if (atype == PORT_ALLOC_DIRECTED) {
            return (CNAT_NOT_FOUND_DIRECT);
        } else {
            return (CNAT_NOT_FOUND_ANY);
        }
    }

#if DEBUG > 1
    PLATFORM_DEBUG_PRINT("ALLOC_PORT_V2: My_Instance_Number %d: IP addr 0x%x, Inuse %d\n",
           my_instance_number, my_pm->ipv4_address, my_pm->inuse);
#endif

    rseed_port = randq1(rseed_port);

    /*
     * Exclude the static port range for allocating dynamic ports
     */
    start_bit = (rseed_port) % (BITS_PER_INST - static_port_range);
    start_bit = start_bit + static_port_range;

#ifndef NO_BULK_LOGGING
    *nfv9_log_req = BULK_ALLOC_NOT_ATTEMPTED;
    if(BULK_ALLOC_SIZE_NONE != bulk_size)
    {
        /* We need the start port of the range to be alined on integer multiple
         * of bulk_size */
        max_trys_to_find_port = BITS_PER_INST/bulk_size;
        start_bit= ((start_bit + bulk_size -1)/bulk_size) * bulk_size;
    }
    else
#endif /* #ifndef NO_BULK_LOGGING */
    max_trys_to_find_port = BITS_PER_INST;

    /* Allocate a random port / port-pair */
    for (i = 0; i < max_trys_to_find_port;  i++) {

    /* start_bit is only a u16.. so it can rollover and become zero */
    if (PREDICT_FALSE((start_bit >= BITS_PER_INST) ||
                    (start_bit < static_port_range))) {
                    start_bit = static_port_range;
#ifndef NO_BULK_LOGGING
        if(BULK_ALLOC_SIZE_NONE != bulk_size) {
            start_bit= ((start_bit + bulk_size -1)/bulk_size) * bulk_size;
        }
#endif /* #ifndef NO_BULK_LOGGING */
    }
        /* Scan forward from random position */
#ifndef NO_BULK_LOGGING
        if(BULK_ALLOC_SIZE_NONE != bulk_size) {
            bit_test_result = cgn_clib_bitmap_check_if_all(my_pm->bm,
            start_bit, bulk_size);
        }
        else
#endif /* #ifndef NO_BULK_LOGGING */
        bit_test_result = clib_bitmap_get_no_check(my_pm->bm, start_bit);
        
        if (PREDICT_TRUE(bit_test_result)) {
            new_port = bit2port(start_bit);
#ifndef NO_BULK_LOGGING
            if(BULK_ALLOC_SIZE_NONE != bulk_size)
                *nfv9_log_req = new_port;
#endif
            if ((pair_type == PORT_S_ODD) &&
                       (!(new_port & 0x1))) {
#ifndef NO_BULK_LOGGING
                if(BULK_ALLOC_SIZE_NONE != bulk_size) {
                    start_bit++; /* Just use the next one in the bulk range */
                    new_port++;
                    goto found2;
                }
#endif /* #ifndef NO_BULK_LOGGING */
                        goto notfound;
            } else if ((pair_type == PORT_S_EVEN) &&
                       (new_port & 0x1)) {
                        goto notfound;
            }

            /* OK we got one or two suitable ports */
            goto found2;
        }

    notfound:
#ifndef NO_BULK_LOGGING
    if(BULK_ALLOC_SIZE_NONE != bulk_size)
        start_bit += bulk_size;
    else
#endif /* #ifndef NO_BULK_LOGGING */
    start_bit++;

    } /* end of for loop */

    /* Completely out of ports */

    /* Port allocation failure */
    /* set dyn_full flag. This would be used to verify
     * for further dyn session before searching for port
     */
    if (atype == PORT_ALLOC_DIRECTED) {
        my_pm->dyn_full = 1;
        return (CNAT_NOT_FOUND_DIRECT);
    } else {
        my_pm->dyn_full = 1;
        return (CNAT_NOT_FOUND_ANY);
    }
  

 found2:

    /* Accounting */
    cgn_clib_bitmap_clear_no_check (my_pm->bm, start_bit);
    (my_pm->inuse)++;

    *index = my_pm - pm;
    *o_ipv4_address = my_pm->ipv4_address;

    *o_port = new_port;
    return (CNAT_SUCCESS);
}
Esempio n. 9
0
/*
 * Try to allocate a portmap structure based on atype field
 */
cnat_portmap_v2_t *
cnat_dynamic_addr_alloc_from_pm (
                 cnat_portmap_v2_t    *pm,
                 port_alloc_t          atype,
                 u32                  *index,
                 cnat_errno_t         *err,
                 u16                   ip_n_to_1,
                 u32                  *rseed_ip)
{
    u32 i, pm_len;
    int my_index;
    int min_inuse, min_index;

    cnat_portmap_v2_t *my_pm = 0;
    *err = CNAT_NO_POOL_ANY;

    pm_len = vec_len(pm);

    switch(atype) {
    case PORT_ALLOC_ANY:
        if (PREDICT_FALSE(pm_len == 0)) {
	    my_pm = 0;
            *err = CNAT_NO_POOL_ANY;
            goto done;
        }

        /* "Easy" way, first address with at least 200 free ports */
        for (i = 0; i < PORT_PROBE_LIMIT; i++) {
            *rseed_ip = randq1(*rseed_ip);
            my_index = (*rseed_ip) % pm_len;
            my_pm = pm + my_index;
            if (PREDICT_FALSE(ip_n_to_1)) {
		if(PREDICT_TRUE(ip_n_to_1 == 1)) {
		    if (PREDICT_FALSE(0 == my_pm->inuse)) {	
			goto done;		
		    }					    
		} else {
		    if(PREDICT_TRUE(my_pm->private_ip_users_count < ip_n_to_1))                     {
			if (PREDICT_FALSE(my_pm->inuse < ((BITS_PER_INST*2)/3)))                        {
			    goto done;
			}
		    } 
		}
            } else {
                if (PREDICT_FALSE(my_pm->inuse < ((BITS_PER_INST*2)/3))) {
                    goto done;
                }
            }
        }  

        /* "hard" way, best-fit. $$$$ Throttle complaint */
        min_inuse = PORTS_PER_ADDR + 1;
        min_index = ~0;
        for (i = 0; i < pm_len; i++) {
            my_pm = pm + i;
            if (PREDICT_FALSE(ip_n_to_1)) {
	       if(PREDICT_TRUE(ip_n_to_1 == 1)) {
		   if (PREDICT_FALSE(!my_pm->inuse)) {
		       min_inuse = my_pm->inuse;
		       min_index = my_pm - pm;
		   } 
	       } else {
		   if(PREDICT_TRUE(my_pm->private_ip_users_count < ip_n_to_1)) {
		       if (PREDICT_TRUE(my_pm->inuse < min_inuse)) {
			   min_inuse = my_pm->inuse;
			   min_index = my_pm - pm;
		       }

		   } 
	       }

            } else {
                if (PREDICT_TRUE(my_pm->inuse < min_inuse)) {
                    min_inuse = my_pm->inuse;
                    min_index = my_pm - pm;
                }
            }
        }

        if (PREDICT_TRUE(min_inuse < PORTS_PER_ADDR)) {
            my_pm = pm + min_index;
            my_index = min_index;
            goto done;
        }

        /* Completely out of ports */
#ifdef DEBUG_PRINTF_ENABLED
        PLATFORM_DEBUG_PRINT("%s out of ports\n", __FUNCTION__);
#endif

	my_pm = 0;
        *err = CNAT_NO_PORT_ANY;
        break;


    case PORT_ALLOC_DIRECTED:
        //ASSERT(*index < pm_len);
        if (PREDICT_FALSE(*index > pm_len)) {
	    my_pm = 0;
            *err = CNAT_INV_PORT_DIRECT;
            goto done;
        }
        my_pm = pm + *index;
        my_index = *index;
        break;

    default:
        msg_spp_err("bad allocation type in cnat_port_alloc");
        my_pm = 0;
        *err = CNAT_ERR_PARSER;
        break;
    }

 done:
    if (PREDICT_FALSE(my_pm == NULL)) {
        return (my_pm);
    }

    if (PREDICT_FALSE(my_pm->inuse >= BITS_PER_INST)) {
        my_pm = 0;
        if (atype == PORT_ALLOC_DIRECTED) {
            *err = CNAT_BAD_INUSE_DIRECT;
        } else {
            *err = CNAT_BAD_INUSE_ANY;
        }
    }

    return (my_pm);
}
Esempio n. 10
0
/*
 * cnat_port_alloc_static_v2
 * public ipv4 address/port allocator for Static Port commands
 * tries to allocate same outside port as inside port
 */
cnat_errno_t
cnat_static_port_alloc_v2 (
                 cnat_portmap_v2_t    *pm,
                 port_alloc_t          atype,
                 port_pair_t           pair_type,
                 u32                   i_ipv4_address,
                 u16                   i_port,
                 u32                  *index,
                 u32                  *o_ipv4_address,
                 u16                  *o_port,
                 u16                   static_port_range
#ifndef NO_BULK_LOGGING
                 , bulk_alloc_size_t    bulk_size,
                 int *nfv9_log_req
#endif 
		 , u16                   ip_n_to_1
                 )
{
    u32 i, hash_value, my_index, found, max_attempts;
    u16 start_bit, new_port;
    cnat_portmap_v2_t *my_pm = 0;
    u32 pm_len = vec_len(pm);
    uword bit_test_result;

#ifndef NO_BULK_LOGGING
    *nfv9_log_req = BULK_ALLOC_NOT_ATTEMPTED;
#endif 

    if (PREDICT_FALSE(pm_len == 0)) {
        return (CNAT_NO_POOL_ANY);
    }

    switch (atype) {

    case PORT_ALLOC_ANY:

        found = 0;

        /*
         * Try to hash the IPv4 address to get an index value to select the pm
         */
        hash_value = (i_ipv4_address & 0xffff) ^
	                ((i_ipv4_address > 16) & 0xffff);

        /*
         * If pm_len <= 256, compact the hash to 8 bits
         */
        if (PREDICT_TRUE(pm_len <= 256)) {
            hash_value = (hash_value & 0xff) ^ ((hash_value > 8) & 0xff);
        }

        /*
         * Ensure that the hash value is in the range 0 .. (pm_len-1)
         */
        my_index = hash_value % pm_len;

        for (i = 0; i < PORT_PROBE_LIMIT; i++) {
            my_pm = pm + my_index;
	    if(PREDICT_TRUE(ip_n_to_1)) {
		if(PREDICT_TRUE(my_pm->private_ip_users_count < ip_n_to_1)) {
		    /*
		     * Try to find a PM with atlest 33% free and my_port free
		     */
		    if (PREDICT_TRUE((my_pm->inuse < ((BITS_PER_INST*2)/3)) &&
				     clib_bitmap_get_no_check(my_pm->bm, 
							      i_port) == 1) 
#ifndef NO_BULK_LOGGING
			&& check_if_stat_alloc_ok_for_bulk(my_pm, i_port,
							   bulk_size, 
							   static_port_range)
#endif 
			) {
			found = 1;
			break;
		    }
		}
		
	    } else {
		/*
		 * Try to find a PM with atlest 33% free and my_port free
		 */
		if (PREDICT_TRUE((my_pm->inuse < ((BITS_PER_INST*2)/3)) &&
				 clib_bitmap_get_no_check(my_pm->bm, 
							  i_port) == 1) 
#ifndef NO_BULK_LOGGING
		    && check_if_stat_alloc_ok_for_bulk(my_pm, i_port,
						       bulk_size, 
						       static_port_range)
#endif 
                    ) {
		    found = 1;
		    break;
		}
	    }
            my_index = (my_index + 1) % pm_len;
        }

        /*
         * If not found do it the hard way .
         * "hard" way, best-fit.
         */
        if (!found) {
            u32 min_inuse_any, min_inuse_myport;
            u32 min_index_any, min_index_myport;

            min_inuse_any = min_inuse_myport = PORTS_PER_ADDR + 1;
            min_index_any = min_index_myport = ~0;
            for (i = 0; i < pm_len; i++) {
                my_pm = pm + i;
		if(PREDICT_TRUE(ip_n_to_1)) {
		    if(PREDICT_TRUE(my_pm->private_ip_users_count < ip_n_to_1))                     {
			if (PREDICT_FALSE(my_pm->inuse < min_inuse_any)) {
			    min_inuse_any = my_pm->inuse;
			    min_index_any = my_pm - pm;
			}
			if (PREDICT_FALSE(my_pm->inuse < min_inuse_myport)) {
			    if (PREDICT_TRUE(clib_bitmap_get_no_check(
					     my_pm->bm,i_port) == 1) 
#ifndef NO_BULK_LOGGING
				&& check_if_stat_alloc_ok_for_bulk(my_pm, 
					    i_port,bulk_size,static_port_range)
#endif 
				) {
				min_inuse_myport = my_pm->inuse;
				min_index_myport = my_pm - pm;
			    }
			}
			
		    } 
		    
		} else {
		    if (PREDICT_FALSE(my_pm->inuse < min_inuse_any)) {
			min_inuse_any = my_pm->inuse;
			min_index_any = my_pm - pm;
		    }
		    if (PREDICT_FALSE(my_pm->inuse < min_inuse_myport)) {
			if (PREDICT_TRUE(clib_bitmap_get_no_check(
					 my_pm->bm, i_port) == 1) 
#ifndef NO_BULK_LOGGING
			    && check_if_stat_alloc_ok_for_bulk(my_pm, i_port,
						 bulk_size, static_port_range)
#endif 
			    ) {
			    min_inuse_myport = my_pm->inuse;
			    min_index_myport = my_pm - pm;
			}
		    }
		}
            }

            /*
             * Check if we have an exactly matching PM that has
             * myport free.  If so use it.  If no such PM is
             * available, use any PM
             */
            if (PREDICT_TRUE(min_inuse_myport < PORTS_PER_ADDR)) {
                my_pm = pm + min_index_myport;
                my_index = min_index_myport;
                found = 1;
            } else if (PREDICT_TRUE(min_inuse_any < PORTS_PER_ADDR)) {
                my_pm = pm + min_index_any;
                my_index = min_index_any;
                found = 1;
            }
        }

        if (!found) {
            return (CNAT_NO_PORT_ANY);
        }
        break;

    case PORT_ALLOC_DIRECTED:
        my_index = *index;
        if (PREDICT_FALSE(my_index > pm_len)) {
            return (CNAT_INV_PORT_DIRECT);
        }
        my_pm = pm + my_index;
        break;

    default:
        return (CNAT_ERR_PARSER);
    }

    /* Allocate a matching port if possible */
    start_bit = i_port;
    found = 0;
    max_attempts = BITS_PER_INST;
#ifndef NO_BULK_LOGGING
    if((BULK_ALLOC_SIZE_NONE != bulk_size) && 
        (i_port >= static_port_range)) {
        start_bit =  (start_bit/bulk_size) * bulk_size;
        max_attempts = BITS_PER_INST/bulk_size;
    }
#endif /* NO_BULK_LOGGING */

    for (i = 0; i < max_attempts; i++) {
#ifndef NO_BULK_LOGGING
        if((BULK_ALLOC_SIZE_NONE != bulk_size) &&
            (i_port >= static_port_range)) {
            bit_test_result = cgn_clib_bitmap_check_if_all(my_pm->bm, 
                        start_bit, bulk_size);
        }
        else
#endif /* #ifndef NO_BULK_LOGGING */
        bit_test_result = clib_bitmap_get_no_check(my_pm->bm, start_bit);

        if (PREDICT_TRUE(bit_test_result)) {
#ifndef NO_BULK_LOGGING
        if((BULK_ALLOC_SIZE_NONE != bulk_size) && 
            (i_port >= static_port_range)) {
            *nfv9_log_req = start_bit;
            if(i==0) new_port = i_port; /* First go */
            else {
                new_port = bit2port(start_bit);
                if (pair_type == PORT_S_ODD &&  (new_port & 0x1) == 0)
                    new_port++;                    
            }
            found = 1;
            break;
        }
        else {
#endif  /* NO_BULK_LOGGING */
            new_port = bit2port(start_bit);
            if (pair_type == PORT_S_ODD) {
                if ((new_port & 0x1) == 1) {
                    found = 1;
                    break;
                }
            } else if (pair_type == PORT_S_EVEN) {
                if ((new_port & 0x1) == 0) {
                    found = 1;
                    break;
                }
            } else {
                found = 1;
                break;
            }
#ifndef NO_BULK_LOGGING
        }
#endif 
        }
#ifndef NO_BULK_LOGGING
        if((BULK_ALLOC_SIZE_NONE != bulk_size) &&
                (i_port >= static_port_range))
            start_bit = (start_bit + bulk_size) % BITS_PER_INST;
        else {
#endif /* NO_BULK_LOGGING */
            start_bit = (start_bit + 1) % BITS_PER_INST;
            if(PREDICT_FALSE(start_bit == 0)) {
                start_bit = 1; /* Port 0 is invalid, so start from 1 */
            }
#ifndef NO_BULK_LOGGING
        }
#endif 
    } /* End of for loop */

    if (!found) {
        /* Port allocation failure */
        if (atype == PORT_ALLOC_DIRECTED) {
            return (CNAT_NOT_FOUND_DIRECT);
        } else {
            return (CNAT_NOT_FOUND_ANY);
        }
    }

    /* Accounting */
    cgn_clib_bitmap_clear_no_check(my_pm->bm, new_port);
    (my_pm->inuse)++;

    *index = my_pm - pm;
    *o_ipv4_address = my_pm->ipv4_address;

    *o_port = new_port;

    return (CNAT_SUCCESS);
}
Esempio n. 11
0
/*
 * fish pkts back from the recycle queue/freelist
 * un-flatten the context chains
 */
static void replication_recycle_callback (vlib_main_t *vm, 
                                          vlib_buffer_free_list_t * fl)
{
  vlib_frame_t * f = 0;
  u32 n_left_from;
  u32 n_left_to_next = 0;
  u32 n_this_frame = 0;
  u32 * from;
  u32 * to_next = 0;
  u32 bi0, pi0;
  vlib_buffer_t *b0;
  vlib_buffer_t *bnext0;
  int i;
  replication_main_t * rm = &replication_main;
  replication_context_t * ctx;
  u32 feature_node_index = 0; 
  uword cpu_number = vm->cpu_index;

  // All buffers in the list are destined to the same recycle node.
  // Pull the recycle node index from the first buffer. 
  // Note: this could be sped up if the node index were stuffed into
  // the freelist itself.
  if (vec_len (fl->aligned_buffers) > 0) {
    bi0 = fl->aligned_buffers[0];
    b0 = vlib_get_buffer (vm, bi0);
    ctx = pool_elt_at_index (rm->contexts[cpu_number],
                             b0->clone_count);
    feature_node_index = ctx->recycle_node_index;
  } else if (vec_len (fl->unaligned_buffers) > 0) {
    bi0 = fl->unaligned_buffers[0];
    b0 = vlib_get_buffer (vm, bi0);
    ctx = pool_elt_at_index (rm->contexts[cpu_number], b0->clone_count);
    feature_node_index = ctx->recycle_node_index;
  }

  /* aligned, unaligned buffers */
  for (i = 0; i < 2; i++) 
    {
      if (i == 0)
        {
          from = fl->aligned_buffers;
          n_left_from = vec_len (from);
        }
      else
        {
          from = fl->unaligned_buffers;
          n_left_from = vec_len (from);
        }
    
      while (n_left_from > 0)
        {
          if (PREDICT_FALSE(n_left_to_next == 0)) 
            {
              if (f)
                {
                  f->n_vectors = n_this_frame;
                  vlib_put_frame_to_node (vm, feature_node_index, f);
                }
              
              f = vlib_get_frame_to_node (vm, feature_node_index);
              to_next = vlib_frame_vector_args (f);
              n_left_to_next = VLIB_FRAME_SIZE;
              n_this_frame = 0;
            }
          
          bi0 = from[0];
          if (PREDICT_TRUE(n_left_from > 1))
            {
              pi0 = from[1];
              vlib_prefetch_buffer_with_index(vm,pi0,LOAD);
            }
        
          bnext0 = b0 = vlib_get_buffer (vm, bi0);

          // Mark that this buffer was just recycled
          b0->flags |= VLIB_BUFFER_IS_RECYCLED;

          // If buffer is traced, mark frame as traced
          if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
              f->flags |= VLIB_FRAME_TRACE;

          while (bnext0->flags & VLIB_BUFFER_NEXT_PRESENT)
            {
              from += 1;
              n_left_from -= 1;
              bnext0 = vlib_get_buffer (vm, bnext0->next_buffer);
            }
          to_next[0] = bi0;

          from++;
          to_next++;
          n_this_frame++;
          n_left_to_next--;
          n_left_from--;
        }
    }
  
  vec_reset_length (fl->aligned_buffers);
  vec_reset_length (fl->unaligned_buffers);

  if (f)
    {
      ASSERT(n_this_frame);
      f->n_vectors = n_this_frame;
      vlib_put_frame_to_node (vm, feature_node_index, f);
    }
}
Esempio n. 12
0
uword
ssvm_eth_interface_tx (ssvm_private_t * intfc, char *buf_to_send, int len_to_send)
// , 
  //                     vlib_frame_t * f)
{
  ssvm_eth_main_t * em = &ssvm_eth_main;
  ssvm_shared_header_t * sh = intfc->sh;
  unix_shared_memory_queue_t * q;
  u32 * from;
  u32 n_left;
  ssvm_eth_queue_elt_t * elts, * elt, * prev_elt;
  u32 my_pid = intfc->my_pid;
  vlib_buffer_t * b0;
  u32 bi0;
  u32 size_this_buffer;
  u32 chunks_this_buffer;
  u8 i_am_master = intfc->i_am_master;
  u32 elt_index;
  int is_ring_full, interface_down;
  int i;
  volatile u32 *queue_lock;
  u32 n_to_alloc = VLIB_FRAME_SIZE;
  u32 n_allocated, n_present_in_cache, n_available;
  u32 * elt_indices;
  
  if (i_am_master)
    q = (unix_shared_memory_queue_t *)sh->opaque [TO_SLAVE_Q_INDEX];
  else
    q = (unix_shared_memory_queue_t *)sh->opaque [TO_MASTER_Q_INDEX];

  queue_lock = (u32 *) q;

  // from = vlib_frame_vector_args (f);
  //n_left = f->n_vectors;
  n_left = 1;

  is_ring_full = 0;
  interface_down = 0;

  n_present_in_cache = vec_len (em->chunk_cache);

#ifdef XXX
  /* admin / link up/down check */
  if (sh->opaque [MASTER_ADMIN_STATE_INDEX] == 0 ||
      sh->opaque [SLAVE_ADMIN_STATE_INDEX] == 0)
    {
      interface_down = 1;
      goto out;
    }
#endif

  ssvm_lock (sh, my_pid, 1);

  elts = (ssvm_eth_queue_elt_t *) (sh->opaque [CHUNK_POOL_INDEX]);
  elt_indices = (u32 *) (sh->opaque [CHUNK_POOL_FREELIST_INDEX]);
  n_available = (u32) pointer_to_uword(sh->opaque [CHUNK_POOL_NFREE]);

  printf("AYXX: n_left: %d, n_present_in_cache: %d\n", n_left, n_present_in_cache);

  if (n_present_in_cache < n_left*2)
    {
      vec_validate (em->chunk_cache, 
                    n_to_alloc + n_present_in_cache - 1);

      n_allocated = n_to_alloc < n_available ? n_to_alloc : n_available;
      printf("AYXX: n_allocated: %d, n_to_alloc: %d, n_available: %d\n", n_allocated, n_to_alloc, n_available);

      if (PREDICT_TRUE(n_allocated > 0))
	{
	  memcpy (&em->chunk_cache[n_present_in_cache],
		  &elt_indices[n_available - n_allocated],
		  sizeof(u32) * n_allocated);
	}

      n_present_in_cache += n_allocated;
      n_available -= n_allocated;
      sh->opaque [CHUNK_POOL_NFREE] = uword_to_pointer(n_available, void*);
      _vec_len (em->chunk_cache) = n_present_in_cache;
    }
Esempio n. 13
0
static inline void *
vl_msg_api_alloc_internal (int nbytes, int pool, int may_return_null)
{
  int i;
  msgbuf_t *rv;
  ring_alloc_t *ap;
  svm_queue_t *q;
  void *oldheap;
  vl_shmem_hdr_t *shmem_hdr;
  api_main_t *am = &api_main;

  shmem_hdr = am->shmem_hdr;

#if DEBUG_MESSAGE_BUFFER_OVERRUN > 0
  nbytes += 4;
#endif

  ASSERT (pool == 0 || vlib_get_thread_index () == 0);

  if (shmem_hdr == 0)
    {
      clib_warning ("shared memory header NULL");
      return 0;
    }

  /* account for the msgbuf_t header */
  nbytes += sizeof (msgbuf_t);

  if (shmem_hdr->vl_rings == 0)
    {
      clib_warning ("vl_rings NULL");
      ASSERT (0);
      abort ();
    }

  if (shmem_hdr->client_rings == 0)
    {
      clib_warning ("client_rings NULL");
      ASSERT (0);
      abort ();
    }

  ap = pool ? shmem_hdr->vl_rings : shmem_hdr->client_rings;
  for (i = 0; i < vec_len (ap); i++)
    {
      /* Too big? */
      if (nbytes > ap[i].size)
	{
	  continue;
	}

      q = ap[i].rp;
      if (pool == 0)
	{
	  pthread_mutex_lock (&q->mutex);
	}
      rv = (msgbuf_t *) (&q->data[0] + q->head * q->elsize);
      /*
       * Is this item still in use?
       */
      if (rv->q)
	{
	  u32 now = (u32) time (0);

	  if (PREDICT_TRUE (rv->gc_mark_timestamp == 0))
	    rv->gc_mark_timestamp = now;
	  else
	    {
	      if (now - rv->gc_mark_timestamp > 10)
		{
		  if (CLIB_DEBUG > 0)
		    {
		      u16 *msg_idp, msg_id;
		      clib_warning
			("garbage collect pool %d ring %d index %d", pool, i,
			 q->head);
		      msg_idp = (u16 *) (rv->data);
		      msg_id = clib_net_to_host_u16 (*msg_idp);
		      if (msg_id < vec_len (api_main.msg_names))
			clib_warning ("msg id %d name %s", (u32) msg_id,
				      api_main.msg_names[msg_id]);
		    }
		  shmem_hdr->garbage_collects++;
		  goto collected;
		}
	    }


	  /* yes, loser; try next larger pool */
	  ap[i].misses++;
	  if (pool == 0)
	    pthread_mutex_unlock (&q->mutex);
	  continue;
	}
    collected:

      /* OK, we have a winner */
      ap[i].hits++;
      /*
       * Remember the source queue, although we
       * don't need to know the queue to free the item.
       */
      rv->q = q;
      rv->gc_mark_timestamp = 0;
      q->head++;
      if (q->head == q->maxsize)
	q->head = 0;

      if (pool == 0)
	pthread_mutex_unlock (&q->mutex);
      goto out;
    }

  /*
   * Request too big, or head element of all size-compatible rings
   * still in use. Fall back to shared-memory malloc.
   */
  am->ring_misses++;

  pthread_mutex_lock (&am->vlib_rp->mutex);
  oldheap = svm_push_data_heap (am->vlib_rp);
  if (may_return_null)
    {
      rv = clib_mem_alloc_or_null (nbytes);
      if (PREDICT_FALSE (rv == 0))
	{
	  svm_pop_heap (oldheap);
	  pthread_mutex_unlock (&am->vlib_rp->mutex);
	  return 0;
	}
    }
  else
    rv = clib_mem_alloc (nbytes);

  rv->q = 0;
  rv->gc_mark_timestamp = 0;
  svm_pop_heap (oldheap);
  pthread_mutex_unlock (&am->vlib_rp->mutex);

out:
#if DEBUG_MESSAGE_BUFFER_OVERRUN > 0
  {
    nbytes -= 4;
    u32 *overrun;
    overrun = (u32 *) (rv->data + nbytes - sizeof (msgbuf_t));
    *overrun = 0x1badbabe;
  }
#endif
  rv->data_len = htonl (nbytes - sizeof (msgbuf_t));

  return (rv->data);
}