예제 #1
0
static clib_error_t * to_relay_socket_read_ready (unix_file_t * uf)
{
  mc_socket_main_t *msm = (mc_socket_main_t *)uf->private_data;
  mc_main_t *mcm = &msm->mc_main;
  vlib_main_t * vm = msm->mc_main.vlib_main;
  mc_multicast_socket_t * ms_to_relay = &msm->multicast_sockets[MC_TRANSPORT_USER_REQUEST_TO_RELAY];
  mc_multicast_socket_t * ms_from_relay = &msm->multicast_sockets[MC_TRANSPORT_USER_REQUEST_FROM_RELAY];
  clib_error_t * error;
  u32 bi;
  u32 is_master = mcm->relay_state == MC_RELAY_STATE_MASTER;

  /* Not the ordering master? Turf the msg */
  error = recvmsg_helper (msm, ms_to_relay->socket, /* rx_addr */ 0, &bi,
			  /* drop_message */ ! is_master);

  /* If we are the master, number and rebroadcast the msg. */
  if (! error && is_master)
    {
      vlib_buffer_t * b = vlib_get_buffer (vm, bi);
      mc_msg_user_request_t * mp = vlib_buffer_get_current (b);
      mp->global_sequence = clib_host_to_net_u32 (mcm->relay_global_sequence);
      mcm->relay_global_sequence++;
      error = sendmsg_helper (msm, ms_from_relay->socket, &ms_from_relay->tx_addr, bi);
      vlib_buffer_free_one (vm, bi);
    }

  return error;
}
예제 #2
0
/**
 * Discards bytes from buffer chain
 *
 * It discards n_bytes_to_drop starting at first buffer after chain_b
 */
always_inline void
session_enqueue_discard_chain_bytes (vlib_main_t * vm, vlib_buffer_t * b,
				     vlib_buffer_t ** chain_b,
				     u32 n_bytes_to_drop)
{
  vlib_buffer_t *next = *chain_b;
  u32 to_drop = n_bytes_to_drop;
  ASSERT (b->flags & VLIB_BUFFER_NEXT_PRESENT);
  while (to_drop && (next->flags & VLIB_BUFFER_NEXT_PRESENT))
    {
      next = vlib_get_buffer (vm, next->next_buffer);
      if (next->current_length > to_drop)
	{
	  vlib_buffer_advance (next, to_drop);
	  to_drop = 0;
	}
      else
	{
	  to_drop -= next->current_length;
	  next->current_length = 0;
	}
    }
  *chain_b = next;

  if (to_drop == 0)
    b->total_length_not_including_first_buffer -= n_bytes_to_drop;
}
예제 #3
0
static clib_error_t * join_socket_read_ready (unix_file_t * uf)
{
  mc_socket_main_t *msm = (mc_socket_main_t *)uf->private_data;
  mc_main_t * mcm = &msm->mc_main;
  vlib_main_t * vm = mcm->vlib_main;
  mc_multicast_socket_t * ms = &msm->multicast_sockets[MC_TRANSPORT_JOIN];
  clib_error_t * error;
  u32 bi;

  error = recvmsg_helper (msm, ms->socket, /* rx_addr */ 0, &bi, /* drop_message */ 0);
  if (! error)
    {
      vlib_buffer_t * b = vlib_get_buffer (vm, bi);
      mc_msg_join_or_leave_request_t * mp = vlib_buffer_get_current (b);

      switch (clib_host_to_net_u32 (mp->type))
	{
	case MC_MSG_TYPE_join_or_leave_request:
	  msg_handler (mcm, bi, /* handler_frees_buffer */ 0,
		       mc_msg_join_or_leave_request_handler);
	  break;

	case MC_MSG_TYPE_join_reply:
	  msg_handler (mcm, bi, /* handler_frees_buffer */ 0,
		       mc_msg_join_reply_handler);
	  break;

	default:
	  ASSERT (0);
	  break;
	}
    }
  return error;
}
예제 #4
0
uword vlib_buffer_length_in_chain_slow_path (vlib_main_t * vm, vlib_buffer_t * b_first)
{
  vlib_buffer_t * b = b_first;
  uword l_first = b_first->current_length;
  uword l = 0;
  while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
    {
      b = vlib_get_buffer (vm, b->next_buffer);
      l += b->current_length;
    }
  b_first->total_length_not_including_first_buffer = l;
  b_first->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
  return l + l_first;
}
예제 #5
0
always_inline void msg_handler (mc_main_t * mcm,
				u32 buffer_index,
				u32 handler_frees_buffer,
				void * _h)
{
  vlib_main_t * vm = mcm->vlib_main;
  mc_msg_handler_t * h = _h;
  vlib_buffer_t * b = vlib_get_buffer (vm, buffer_index);
  void * the_msg = vlib_buffer_get_current (b);

  h (mcm, the_msg, buffer_index);
  if (! handler_frees_buffer)
    vlib_buffer_free_one (vm, buffer_index);
}
예제 #6
0
파일: cdp_node.c 프로젝트: JehandadKhan/vpp
/*
 * Process a frame of cdp packets 
 * Expect 1 packet / frame 
 */
static uword
cdp_node_fn (vlib_main_t * vm,
             vlib_node_runtime_t * node,
             vlib_frame_t * frame)
{
  u32 n_left_from, * from;
  cdp_input_trace_t * t0;

  from = vlib_frame_vector_args (frame); /* array of buffer indices */
  n_left_from = frame->n_vectors;        /* number of buffer indices */

  while (n_left_from > 0)
    {
      u32 bi0;
      vlib_buffer_t * b0;
      u32 next0, error0;

      bi0 = from[0];
      b0 = vlib_get_buffer (vm, bi0);
      
      next0 = CDP_INPUT_NEXT_NORMAL;

      /* scan this cdp pkt. error0 is the counter index to bump */
      error0 = cdp_input (vm, b0, bi0);
      b0->error = node->errors[error0];

      /* If this pkt is traced, snapshoot the data */
      if (b0->flags & VLIB_BUFFER_IS_TRACED) {
          int len;
	  t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
          len = (b0->current_length < sizeof (t0->data)) 
              ? b0->current_length : sizeof (t0->data);
          t0->len = len;
          memcpy (t0->data, vlib_buffer_get_current (b0), len);
      }          
      /* push this pkt to the next graph node, always error-drop */
      vlib_set_next_frame_buffer (vm, node, next0, bi0);

      from += 1;
      n_left_from -= 1;
    }

  return frame->n_vectors;
}
예제 #7
0
static uword
append_buffer_index_to_iovec (vlib_main_t * vm,
			      u32 buffer_index,
			      struct iovec ** iovs_return)
{
  struct iovec * i;
  vlib_buffer_t * b;
  u32 bi = buffer_index;
  u32 l = 0;

  while (1)
    {
      b = vlib_get_buffer (vm, bi);
      vec_add2 (*iovs_return, i, 1);
      i->iov_base = vlib_buffer_get_current (b);
      i->iov_len = b->current_length;
      l += i->iov_len;
      if (! (b->flags & VLIB_BUFFER_NEXT_PRESENT))
	break;
      bi = b->next_buffer;
    }

  return l;
}
예제 #8
0
파일: trajectory.c 프로젝트: chrisy/vpp
/**
 * Dump a trajectory trace, reasonably easy to call from gdb
 */
void
vnet_dump_trajectory_trace (vlib_main_t * vm, u32 bi)
{
#if VLIB_BUFFER_TRACE_TRAJECTORY > 0
  vlib_node_main_t *vnm = &vm->node_main;
  vlib_buffer_t *b;
  u16 *trace;
  u8 i;

  b = vlib_get_buffer (vm, bi);

  trace = vnet_buffer2 (b)->trajectory_trace;

  fformat (stderr, "Context trace for bi %d b 0x%llx, visited %d\n",
	   bi, b, vec_len (trace));

  for (i = 0; i < vec_len (trace); i++)
    {
      u32 node_index;

      node_index = trace[i];

      if (node_index >= vec_len (vnm->nodes))
	{
	  fformat (stderr, "Skip bogus node index %d\n", node_index);
	  continue;
	}

      fformat (stderr, "%v (%d)\n", vnm->nodes[node_index]->name, node_index);
    }
#else
  fformat (stderr, "in vlib/buffers.h, "
	   "#define VLIB_BUFFER_TRACE_TRAJECTORY 1\n");

#endif
}
예제 #9
0
static vlib_buffer_t *
create_buffer_for_client_message (vlib_main_t * vm,
				  u32 sw_if_index,
				  dhcp6_pd_client_state_t
				  * client_state, u32 type)
{
  dhcp6_client_common_main_t *ccm = &dhcp6_client_common_main;
  vnet_main_t *vnm = vnet_get_main ();

  vlib_buffer_t *b;
  u32 bi;
  ip6_header_t *ip;
  udp_header_t *udp;
  dhcpv6_header_t *dhcp;
  ip6_address_t src_addr;
  u32 dhcp_opt_len = 0;
  client_state->transaction_start = vlib_time_now (vm);
  u32 n_prefixes;
  u32 i;

  vnet_hw_interface_t *hw = vnet_get_sup_hw_interface (vnm, sw_if_index);
  vnet_sw_interface_t *sup_sw = vnet_get_sup_sw_interface (vnm, sw_if_index);
  vnet_sw_interface_t *sw = vnet_get_sw_interface (vnm, sw_if_index);

  /* Interface(s) down? */
  if ((hw->flags & VNET_HW_INTERFACE_FLAG_LINK_UP) == 0)
    return NULL;
  if ((sup_sw->flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) == 0)
    return NULL;
  if ((sw->flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) == 0)
    return NULL;

  /* Get a link-local address */
  src_addr = ip6_neighbor_get_link_local_address (sw_if_index);

  if (src_addr.as_u8[0] != 0xfe)
    {
      clib_warning ("Could not find source address to send DHCPv6 packet");
      return NULL;
    }

  if (vlib_buffer_alloc (vm, &bi, 1) != 1)
    {
      clib_warning ("Buffer allocation failed");
      return NULL;
    }

  b = vlib_get_buffer (vm, bi);
  vnet_buffer (b)->sw_if_index[VLIB_RX] = sw_if_index;
  vnet_buffer (b)->sw_if_index[VLIB_TX] = sw_if_index;
  client_state->adj_index = adj_mcast_add_or_lock (FIB_PROTOCOL_IP6,
						   VNET_LINK_IP6,
						   sw_if_index);
  vnet_buffer (b)->ip.adj_index[VLIB_TX] = client_state->adj_index;
  b->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;

  ip = (ip6_header_t *) vlib_buffer_get_current (b);
  udp = (udp_header_t *) (ip + 1);
  dhcp = (dhcpv6_header_t *) (udp + 1);

  ip->src_address = src_addr;
  ip->hop_limit = 255;
  ip->ip_version_traffic_class_and_flow_label =
    clib_host_to_net_u32 (0x6 << 28);
  ip->payload_length = 0;
  ip->protocol = IP_PROTOCOL_UDP;

  udp->src_port = clib_host_to_net_u16 (DHCPV6_CLIENT_PORT);
  udp->dst_port = clib_host_to_net_u16 (DHCPV6_SERVER_PORT);
  udp->checksum = 0;
  udp->length = 0;

  dhcp->msg_type = type;
  dhcp->xid[0] = (client_state->transaction_id & 0x00ff0000) >> 16;
  dhcp->xid[1] = (client_state->transaction_id & 0x0000ff00) >> 8;
  dhcp->xid[2] = (client_state->transaction_id & 0x000000ff) >> 0;

  void *d = (void *) dhcp->data;
  dhcpv6_option_t *duid;
  dhcpv6_elapsed_t *elapsed;
  dhcpv6_ia_header_t *ia_hdr;
  dhcpv6_ia_opt_pd_t *opt_pd;
  if (type == DHCPV6_MSG_SOLICIT || type == DHCPV6_MSG_REQUEST ||
      type == DHCPV6_MSG_RENEW || type == DHCPV6_MSG_REBIND ||
      type == DHCPV6_MSG_RELEASE)
    {
      duid = (dhcpv6_option_t *) d;
      duid->option = clib_host_to_net_u16 (DHCPV6_OPTION_CLIENTID);
      duid->length = clib_host_to_net_u16 (CLIENT_DUID_LENGTH);
      clib_memcpy (duid + 1, client_duid.bin_string, CLIENT_DUID_LENGTH);
      d += sizeof (*duid) + CLIENT_DUID_LENGTH;

      if (client_state->params.server_index != ~0)
	{
	  server_id_t *se =
	    &ccm->server_ids[client_state->params.server_index];

	  duid = (dhcpv6_option_t *) d;
	  duid->option = clib_host_to_net_u16 (DHCPV6_OPTION_SERVERID);
	  duid->length = clib_host_to_net_u16 (se->len);
	  clib_memcpy (duid + 1, se->data, se->len);
	  d += sizeof (*duid) + se->len;
	}

      elapsed = (dhcpv6_elapsed_t *) d;
      elapsed->opt.option = clib_host_to_net_u16 (DHCPV6_OPTION_ELAPSED_TIME);
      elapsed->opt.length =
	clib_host_to_net_u16 (sizeof (*elapsed) - sizeof (elapsed->opt));
      elapsed->elapsed_10ms = 0;
      client_state->elapsed_pos =
	(char *) &elapsed->elapsed_10ms -
	(char *) vlib_buffer_get_current (b);
      d += sizeof (*elapsed);

      ia_hdr = (dhcpv6_ia_header_t *) d;
      ia_hdr->opt.option = clib_host_to_net_u16 (DHCPV6_OPTION_IA_PD);
      ia_hdr->iaid = clib_host_to_net_u32 (DHCPV6_CLIENT_IAID);
      ia_hdr->t1 = clib_host_to_net_u32 (client_state->params.T1);
      ia_hdr->t2 = clib_host_to_net_u32 (client_state->params.T2);
      d += sizeof (*ia_hdr);

      n_prefixes = vec_len (client_state->params.prefixes);

      ia_hdr->opt.length =
	clib_host_to_net_u16 (sizeof (*ia_hdr) +
			      n_prefixes * sizeof (*opt_pd) -
			      sizeof (ia_hdr->opt));

      for (i = 0; i < n_prefixes; i++)
	{
	  dhcp6_pd_send_client_message_params_prefix_t *pref =
	    &client_state->params.prefixes[i];
	  opt_pd = (dhcpv6_ia_opt_pd_t *) d;
	  opt_pd->opt.option = clib_host_to_net_u16 (DHCPV6_OPTION_IAPREFIX);
	  opt_pd->opt.length =
	    clib_host_to_net_u16 (sizeof (*opt_pd) - sizeof (opt_pd->opt));
	  opt_pd->addr = pref->prefix;
	  opt_pd->prefix = pref->prefix_length;
	  opt_pd->valid = clib_host_to_net_u32 (pref->valid_lt);
	  opt_pd->preferred = clib_host_to_net_u32 (pref->preferred_lt);
	  d += sizeof (*opt_pd);
	}
    }
  else
    {
      clib_warning ("State not implemented");
    }

  dhcp_opt_len = ((u8 *) d) - dhcp->data;
  udp->length =
    clib_host_to_net_u16 (sizeof (*udp) + sizeof (*dhcp) + dhcp_opt_len);
  ip->payload_length = udp->length;
  b->current_length =
    sizeof (*ip) + sizeof (*udp) + sizeof (*dhcp) + dhcp_opt_len;

  ip->dst_address = all_dhcp6_relay_agents_and_servers;

  return b;
}
예제 #10
0
파일: bier_imp_node.c 프로젝트: vpp-dev/vpp
always_inline uword
bier_imp_dpo_inline (vlib_main_t * vm,
                     vlib_node_runtime_t * node,
                     vlib_frame_t * from_frame,
                     fib_protocol_t fproto,
                     bier_hdr_proto_id_t bproto)
{
    u32 n_left_from, next_index, * from, * to_next;

    from = vlib_frame_vector_args (from_frame);
    n_left_from = from_frame->n_vectors;

    next_index = node->cached_next_index;

    while (n_left_from > 0)
    {
        u32 n_left_to_next;

        vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);

        while (n_left_from > 0 && n_left_to_next > 0)
        {
            vlib_buffer_t * b0;
            bier_imp_t *bimp0;
            bier_hdr_t *hdr0;
            u32 bi0, bii0;
            u32 next0;

            bi0 = from[0];
            to_next[0] = bi0;
            from += 1;
            to_next += 1;
            n_left_from -= 1;
            n_left_to_next -= 1;

            b0 = vlib_get_buffer (vm, bi0);

            bii0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
            bimp0 = bier_imp_get(bii0);

            if (FIB_PROTOCOL_IP4 == fproto)
            {
                /*
                 * decrement the TTL on ingress to the BIER domain
                 */
                ip4_header_t * ip0 = vlib_buffer_get_current(b0);
                u32 checksum0;

                checksum0 = ip0->checksum + clib_host_to_net_u16 (0x0100);
                checksum0 += checksum0 >= 0xffff;

                ip0->checksum = checksum0;
                ip0->ttl -= 1;

                /*
                 * calculate an entropy
                 */
                if (0 == vnet_buffer(b0)->ip.flow_hash)
                {
                    vnet_buffer(b0)->ip.flow_hash =
                        ip4_compute_flow_hash (ip0, IP_FLOW_HASH_DEFAULT);
                }
            }
            if (FIB_PROTOCOL_IP6 == fproto)
            {
                /*
                 * decrement the TTL on ingress to the BIER domain
                 */
                ip6_header_t * ip0 = vlib_buffer_get_current(b0);

                ip0->hop_limit -= 1;

                /*
                 * calculate an entropy
                 */
                if (0 == vnet_buffer(b0)->ip.flow_hash)
                {
                    vnet_buffer(b0)->ip.flow_hash =
                        ip6_compute_flow_hash (ip0, IP_FLOW_HASH_DEFAULT);
                }
            }

            /* Paint the BIER header */
            vlib_buffer_advance(b0, -(sizeof(bier_hdr_t) +
                                      bier_hdr_len_id_to_num_bytes(bimp0->bi_tbl.bti_hdr_len)));
            hdr0 = vlib_buffer_get_current(b0);

            /* RPF check */
            if (PREDICT_FALSE(BIER_RX_ITF == vnet_buffer(b0)->ip.adj_index[VLIB_RX]))
            {
                next0 = 0;
            }
            else
            {
                clib_memcpy_fast(hdr0, &bimp0->bi_hdr,
                            (sizeof(bier_hdr_t) +
                             bier_hdr_len_id_to_num_bytes(bimp0->bi_tbl.bti_hdr_len)));
                /*
                 * Fixup the entropy and protocol, both of which have a
                 * zero value post the paint job
                 */
                hdr0->bh_oam_dscp_proto |=
                    clib_host_to_net_u16(bproto << BIER_HDR_PROTO_FIELD_SHIFT);
                hdr0->bh_first_word |=
                    clib_host_to_net_u32((vnet_buffer(b0)->ip.flow_hash &
                                          BIER_HDR_ENTROPY_FIELD_MASK) <<
                                         BIER_HDR_ENTROPY_FIELD_SHIFT);

                /*
                 * use TTL 64 for the post enacp MPLS label/BIFT-ID
                 * this we be decremeted in bier_output node.
                 */
                vnet_buffer(b0)->mpls.ttl = 65;

                /* next node */
                next0 = bimp0->bi_dpo[fproto].dpoi_next_node;
                vnet_buffer(b0)->ip.adj_index[VLIB_TX] =
                    bimp0->bi_dpo[fproto].dpoi_index;
            }

            if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
            {
                bier_imp_trace_t *tr =
                    vlib_add_trace (vm, node, b0, sizeof (*tr));
                tr->imp = bii0;
                tr->hdr = *hdr0;
            }

            vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
                                            n_left_to_next, bi0, next0);
        }
        vlib_put_next_frame (vm, node, next_index, n_left_to_next);
    }
    return from_frame->n_vectors;
}
예제 #11
0
파일: trace.c 프로젝트: JehandadKhan/vpp
/* Helper function for nodes which only trace buffer data. */
void
vlib_trace_frame_buffers_only (vlib_main_t * vm,
			       vlib_node_runtime_t * node,
			       u32 * buffers,
			       uword n_buffers,
			       uword next_buffer_stride,
			       uword n_buffer_data_bytes_in_trace)
{
  u32 n_left, * from;

  n_left = n_buffers;
  from = buffers;
  
  while (n_left >= 4)
    {
      u32 bi0, bi1;
      vlib_buffer_t * b0, * b1;
      u8 * t0, * t1;

      /* Prefetch next iteration. */
      vlib_prefetch_buffer_with_index (vm, from[2], LOAD);
      vlib_prefetch_buffer_with_index (vm, from[3], LOAD);

      bi0 = from[0];
      bi1 = from[1];

      b0 = vlib_get_buffer (vm, bi0);
      b1 = vlib_get_buffer (vm, bi1);

      if (b0->flags & VLIB_BUFFER_IS_TRACED)
	{
	  t0 = vlib_add_trace (vm, node, b0, n_buffer_data_bytes_in_trace);
	  memcpy (t0, b0->data + b0->current_data,
		  n_buffer_data_bytes_in_trace);
	}
      if (b1->flags & VLIB_BUFFER_IS_TRACED)
	{
	  t1 = vlib_add_trace (vm, node, b1, n_buffer_data_bytes_in_trace);
	  memcpy (t1, b1->data + b1->current_data,
		  n_buffer_data_bytes_in_trace);
	}
      from += 2;
      n_left -= 2;
    }

  while (n_left >= 1)
    {
      u32 bi0;
      vlib_buffer_t * b0;
      u8 * t0;

      bi0 = from[0];

      b0 = vlib_get_buffer (vm, bi0);

      if (b0->flags & VLIB_BUFFER_IS_TRACED)
	{
	  t0 = vlib_add_trace (vm, node, b0, n_buffer_data_bytes_in_trace);
	  memcpy (t0, b0->data + b0->current_data,
		  n_buffer_data_bytes_in_trace);
	}
      from += 1;
      n_left -= 1;
    }
}
예제 #12
0
/**
 * Enqueue buffer chain tail
 */
always_inline int
session_enqueue_chain_tail (session_t * s, vlib_buffer_t * b,
			    u32 offset, u8 is_in_order)
{
  vlib_buffer_t *chain_b;
  u32 chain_bi, len, diff;
  vlib_main_t *vm = vlib_get_main ();
  u8 *data;
  u32 written = 0;
  int rv = 0;

  if (is_in_order && offset)
    {
      diff = offset - b->current_length;
      if (diff > b->total_length_not_including_first_buffer)
	return 0;
      chain_b = b;
      session_enqueue_discard_chain_bytes (vm, b, &chain_b, diff);
      chain_bi = vlib_get_buffer_index (vm, chain_b);
    }
  else
    chain_bi = b->next_buffer;

  do
    {
      chain_b = vlib_get_buffer (vm, chain_bi);
      data = vlib_buffer_get_current (chain_b);
      len = chain_b->current_length;
      if (!len)
	continue;
      if (is_in_order)
	{
	  rv = svm_fifo_enqueue (s->rx_fifo, len, data);
	  if (rv == len)
	    {
	      written += rv;
	    }
	  else if (rv < len)
	    {
	      return (rv > 0) ? (written + rv) : written;
	    }
	  else if (rv > len)
	    {
	      written += rv;

	      /* written more than what was left in chain */
	      if (written > b->total_length_not_including_first_buffer)
		return written;

	      /* drop the bytes that have already been delivered */
	      session_enqueue_discard_chain_bytes (vm, b, &chain_b, rv - len);
	    }
	}
      else
	{
	  rv = svm_fifo_enqueue_with_offset (s->rx_fifo, offset, len, data);
	  if (rv)
	    {
	      clib_warning ("failed to enqueue multi-buffer seg");
	      return -1;
	    }
	  offset += len;
	}
    }
  while ((chain_bi = (chain_b->flags & VLIB_BUFFER_NEXT_PRESENT)
	  ? chain_b->next_buffer : 0));

  if (is_in_order)
    return written;

  return 0;
}
예제 #13
0
/*
 * fish pkts back from the recycle queue/freelist
 * un-flatten the context chains
 */
static void replication_recycle_callback (vlib_main_t *vm, 
                                          vlib_buffer_free_list_t * fl)
{
  vlib_frame_t * f = 0;
  u32 n_left_from;
  u32 n_left_to_next = 0;
  u32 n_this_frame = 0;
  u32 * from;
  u32 * to_next = 0;
  u32 bi0, pi0;
  vlib_buffer_t *b0;
  vlib_buffer_t *bnext0;
  int i;
  replication_main_t * rm = &replication_main;
  replication_context_t * ctx;
  u32 feature_node_index = 0; 
  uword cpu_number = vm->cpu_index;

  // All buffers in the list are destined to the same recycle node.
  // Pull the recycle node index from the first buffer. 
  // Note: this could be sped up if the node index were stuffed into
  // the freelist itself.
  if (vec_len (fl->aligned_buffers) > 0) {
    bi0 = fl->aligned_buffers[0];
    b0 = vlib_get_buffer (vm, bi0);
    ctx = pool_elt_at_index (rm->contexts[cpu_number],
                             b0->clone_count);
    feature_node_index = ctx->recycle_node_index;
  } else if (vec_len (fl->unaligned_buffers) > 0) {
    bi0 = fl->unaligned_buffers[0];
    b0 = vlib_get_buffer (vm, bi0);
    ctx = pool_elt_at_index (rm->contexts[cpu_number], b0->clone_count);
    feature_node_index = ctx->recycle_node_index;
  }

  /* aligned, unaligned buffers */
  for (i = 0; i < 2; i++) 
    {
      if (i == 0)
        {
          from = fl->aligned_buffers;
          n_left_from = vec_len (from);
        }
      else
        {
          from = fl->unaligned_buffers;
          n_left_from = vec_len (from);
        }
    
      while (n_left_from > 0)
        {
          if (PREDICT_FALSE(n_left_to_next == 0)) 
            {
              if (f)
                {
                  f->n_vectors = n_this_frame;
                  vlib_put_frame_to_node (vm, feature_node_index, f);
                }
              
              f = vlib_get_frame_to_node (vm, feature_node_index);
              to_next = vlib_frame_vector_args (f);
              n_left_to_next = VLIB_FRAME_SIZE;
              n_this_frame = 0;
            }
          
          bi0 = from[0];
          if (PREDICT_TRUE(n_left_from > 1))
            {
              pi0 = from[1];
              vlib_prefetch_buffer_with_index(vm,pi0,LOAD);
            }
        
          bnext0 = b0 = vlib_get_buffer (vm, bi0);

          // Mark that this buffer was just recycled
          b0->flags |= VLIB_BUFFER_IS_RECYCLED;

          // If buffer is traced, mark frame as traced
          if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
              f->flags |= VLIB_FRAME_TRACE;

          while (bnext0->flags & VLIB_BUFFER_NEXT_PRESENT)
            {
              from += 1;
              n_left_from -= 1;
              bnext0 = vlib_get_buffer (vm, bnext0->next_buffer);
            }
          to_next[0] = bi0;

          from++;
          to_next++;
          n_this_frame++;
          n_left_to_next--;
          n_left_from--;
        }
    }
  
  vec_reset_length (fl->aligned_buffers);
  vec_reset_length (fl->unaligned_buffers);

  if (f)
    {
      ASSERT(n_this_frame);
      f->n_vectors = n_this_frame;
      vlib_put_frame_to_node (vm, feature_node_index, f);
    }
}
예제 #14
0
파일: ip6_sixrd.c 프로젝트: Venkattk/vpp
/*
 * ip6_sixrd
 */
static uword
ip6_sixrd (vlib_main_t *vm,
	   vlib_node_runtime_t *node,
	   vlib_frame_t *frame)
{
  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
  vlib_node_runtime_t *error_node = vlib_node_get_runtime(vm, ip6_sixrd_node.index);
  u32 encap = 0;
  from = vlib_frame_vector_args(frame);
  n_left_from = frame->n_vectors;
  next_index = node->cached_next_index;

  while (n_left_from > 0) {
    vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);

    while (n_left_from > 0 && n_left_to_next > 0) {
      u32 pi0;
      vlib_buffer_t *p0;
      sixrd_domain_t *d0;
      u8 error0 = SIXRD_ERROR_NONE;
      ip6_header_t *ip60;
      ip4_header_t *ip4h0;
      u32 next0 = IP6_SIXRD_NEXT_IP4_LOOKUP;
      u32 sixrd_domain_index0 = ~0;

      pi0 = to_next[0] = from[0];
      from += 1;
      n_left_from -= 1;
      to_next +=1;
      n_left_to_next -= 1;

      p0 = vlib_get_buffer(vm, pi0);
      ip60 = vlib_buffer_get_current(p0);
      //      p0->current_length = clib_net_to_host_u16(ip40->length);
      d0 = ip6_sixrd_get_domain(vnet_buffer(p0)->ip.adj_index[VLIB_TX], &sixrd_domain_index0);
      ASSERT(d0);

      /* SIXRD calc */
      u64 dal60 = clib_net_to_host_u64(ip60->dst_address.as_u64[0]);
      u32 da40 = sixrd_get_addr(d0, dal60);
      u16 len = clib_net_to_host_u16(ip60->payload_length) + 60;
      if (da40 == 0) error0 = SIXRD_ERROR_UNKNOWN;

      /* construct ipv4 header */
      vlib_buffer_advance(p0, - (sizeof(ip4_header_t)));
      ip4h0 = vlib_buffer_get_current(p0);
      vnet_buffer(p0)->sw_if_index[VLIB_TX] = (u32)~0;
      ip4h0->ip_version_and_header_length = 0x45;
      ip4h0->tos = 0;
      ip4h0->length = clib_host_to_net_u16(len);
      ip4h0->fragment_id = 0;
      ip4h0->flags_and_fragment_offset = 0;
      ip4h0->ttl = 0x40;
      ip4h0->protocol = IP_PROTOCOL_IPV6;
      ip4h0->src_address = d0->ip4_src;
      ip4h0->dst_address.as_u32 = clib_host_to_net_u32(da40);
      ip4h0->checksum = ip4_header_checksum(ip4h0);

      next0 = error0 == SIXRD_ERROR_NONE ? IP6_SIXRD_NEXT_IP4_LOOKUP : IP6_SIXRD_NEXT_DROP;

      if (PREDICT_FALSE(p0->flags & VLIB_BUFFER_IS_TRACED)) {
	sixrd_trace_t *tr = vlib_add_trace(vm, node, p0, sizeof(*tr));
	tr->sixrd_domain_index = sixrd_domain_index0;
      }

      p0->error = error_node->errors[error0];
      if (PREDICT_TRUE(error0 == SIXRD_ERROR_NONE)) encap++;

      vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, pi0, next0);
    }
    vlib_put_next_frame(vm, node, next_index, n_left_to_next);  
  }
  vlib_node_increment_counter(vm, ip6_sixrd_node.index, SIXRD_ERROR_ENCAPSULATED, encap);

  return frame->n_vectors;
}
예제 #15
0
파일: encap.c 프로젝트: chrisy/vpp
always_inline uword
geneve_encap_inline (vlib_main_t * vm,
		     vlib_node_runtime_t * node,
		     vlib_frame_t * from_frame, u32 is_ip4)
{
  u32 n_left_from, next_index, *from, *to_next;
  geneve_main_t *vxm = &geneve_main;
  vnet_main_t *vnm = vxm->vnet_main;
  vnet_interface_main_t *im = &vnm->interface_main;
  u32 pkts_encapsulated = 0;
  u16 old_l0 = 0, old_l1 = 0;
  u32 thread_index = vm->thread_index;
  u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
  u32 sw_if_index0 = ~0, sw_if_index1 = ~0;
  u32 next0 = 0, next1 = 0;
  vnet_hw_interface_t *hi0, *hi1;
  geneve_tunnel_t *t0 = NULL, *t1 = NULL;

  from = vlib_frame_vector_args (from_frame);
  n_left_from = from_frame->n_vectors;

  next_index = node->cached_next_index;
  stats_sw_if_index = node->runtime_data[0];
  stats_n_packets = stats_n_bytes = 0;

  while (n_left_from > 0)
    {
      u32 n_left_to_next;

      vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);

      while (n_left_from >= 4 && n_left_to_next >= 2)
	{
	  u32 bi0, bi1;
	  vlib_buffer_t *b0, *b1;
	  u32 flow_hash0, flow_hash1;
	  u32 len0, len1;
	  ip4_header_t *ip4_0, *ip4_1;
	  ip6_header_t *ip6_0, *ip6_1;
	  udp_header_t *udp0, *udp1;
	  u64 *copy_src0, *copy_dst0;
	  u64 *copy_src1, *copy_dst1;
	  u32 *copy_src_last0, *copy_dst_last0;
	  u32 *copy_src_last1, *copy_dst_last1;
	  u16 new_l0, new_l1;
	  ip_csum_t sum0, sum1;

	  /* Prefetch next iteration. */
	  {
	    vlib_buffer_t *p2, *p3;

	    p2 = vlib_get_buffer (vm, from[2]);
	    p3 = vlib_get_buffer (vm, from[3]);

	    vlib_prefetch_buffer_header (p2, LOAD);
	    vlib_prefetch_buffer_header (p3, LOAD);

	    CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
	    CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
	  }

	  bi0 = from[0];
	  bi1 = from[1];
	  to_next[0] = bi0;
	  to_next[1] = bi1;
	  from += 2;
	  to_next += 2;
	  n_left_to_next -= 2;
	  n_left_from -= 2;

	  b0 = vlib_get_buffer (vm, bi0);
	  b1 = vlib_get_buffer (vm, bi1);

	  flow_hash0 = vnet_l2_compute_flow_hash (b0);
	  flow_hash1 = vnet_l2_compute_flow_hash (b1);

	  /* Get next node index and adj index from tunnel next_dpo */
	  if (sw_if_index0 != vnet_buffer (b0)->sw_if_index[VLIB_TX])
	    {
	      sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
	      hi0 = vnet_get_sup_hw_interface (vnm, sw_if_index0);
	      t0 = &vxm->tunnels[hi0->dev_instance];
	      /* Note: change to always set next0 if it may be set to drop */
	      next0 = t0->next_dpo.dpoi_next_node;
	    }

	  ASSERT (t0 != NULL);

	  vnet_buffer (b0)->ip.adj_index[VLIB_TX] = t0->next_dpo.dpoi_index;

	  /* Get next node index and adj index from tunnel next_dpo */
	  if (sw_if_index1 != vnet_buffer (b1)->sw_if_index[VLIB_TX])
	    {
	      sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_TX];
	      hi1 = vnet_get_sup_hw_interface (vnm, sw_if_index1);
	      t1 = &vxm->tunnels[hi1->dev_instance];
	      /* Note: change to always set next1 if it may be set to drop */
	      next1 = t1->next_dpo.dpoi_next_node;
	    }

	  ASSERT (t1 != NULL);

	  vnet_buffer (b1)->ip.adj_index[VLIB_TX] = t1->next_dpo.dpoi_index;

	  /* Apply the rewrite string. $$$$ vnet_rewrite? */
	  vlib_buffer_advance (b0, -(word) _vec_len (t0->rewrite));
	  vlib_buffer_advance (b1, -(word) _vec_len (t1->rewrite));

	  if (is_ip4)
	    {
	      u8 ip4_geneve_base_header_len =
		sizeof (ip4_header_t) + sizeof (udp_header_t) +
		GENEVE_BASE_HEADER_LENGTH;
	      u8 ip4_geneve_header_total_len0 = ip4_geneve_base_header_len;
	      u8 ip4_geneve_header_total_len1 = ip4_geneve_base_header_len;
#if SUPPORT_OPTIONS_HEADER==1
	      ip4_geneve_header_total_len0 += t0->options_len;
	      ip4_geneve_header_total_len1 += t1->options_len;
#endif
	      ASSERT (vec_len (t0->rewrite) == ip4_geneve_header_total_len0);
	      ASSERT (vec_len (t1->rewrite) == ip4_geneve_header_total_len1);

	      ip4_0 = vlib_buffer_get_current (b0);
	      ip4_1 = vlib_buffer_get_current (b1);

	      /* Copy the fixed header */
	      copy_dst0 = (u64 *) ip4_0;
	      copy_src0 = (u64 *) t0->rewrite;
	      copy_dst1 = (u64 *) ip4_1;
	      copy_src1 = (u64 *) t1->rewrite;
	      /* Copy first 32 octets 8-bytes at a time */
#define _(offs) copy_dst0[offs] = copy_src0[offs];
	      foreach_fixed_header4_offset;
#undef _
#define _(offs) copy_dst1[offs] = copy_src1[offs];
	      foreach_fixed_header4_offset;
#undef _
	      /* Last 4 octets. Hopefully gcc will be our friend */
	      copy_dst_last0 = (u32 *) (&copy_dst0[4]);
	      copy_src_last0 = (u32 *) (&copy_src0[4]);
	      copy_dst_last0[0] = copy_src_last0[0];
	      copy_dst_last1 = (u32 *) (&copy_dst1[4]);
	      copy_src_last1 = (u32 *) (&copy_src1[4]);
	      copy_dst_last1[0] = copy_src_last1[0];

	      /* Fix the IP4 checksum and length */
	      sum0 = ip4_0->checksum;
	      new_l0 =		/* old_l0 always 0, see the rewrite setup */
		clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
	      sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
				     length /* changed member */ );
	      ip4_0->checksum = ip_csum_fold (sum0);
	      ip4_0->length = new_l0;
	      sum1 = ip4_1->checksum;
	      new_l1 =		/* old_l1 always 0, see the rewrite setup */
		clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1));
	      sum1 = ip_csum_update (sum1, old_l1, new_l1, ip4_header_t,
				     length /* changed member */ );
	      ip4_1->checksum = ip_csum_fold (sum1);
	      ip4_1->length = new_l1;

	      /* Fix UDP length and set source port */
	      udp0 = (udp_header_t *) (ip4_0 + 1);
	      new_l0 =
		clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0) -
				      sizeof (*ip4_0));
	      udp0->length = new_l0;
	      udp0->src_port = flow_hash0;
	      udp1 = (udp_header_t *) (ip4_1 + 1);
	      new_l1 =
		clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1) -
				      sizeof (*ip4_1));
	      udp1->length = new_l1;
	      udp1->src_port = flow_hash1;
	    }
	  else			/* ipv6 */
	    {
	      int bogus = 0;

	      u8 ip6_geneve_base_header_len =
		sizeof (ip6_header_t) + sizeof (udp_header_t) +
		GENEVE_BASE_HEADER_LENGTH;
	      u8 ip6_geneve_header_total_len0 = ip6_geneve_base_header_len;
	      u8 ip6_geneve_header_total_len1 = ip6_geneve_base_header_len;
#if SUPPORT_OPTIONS_HEADER==1
	      ip6_geneve_header_total_len0 += t0->options_len;
	      ip6_geneve_header_total_len1 += t1->options_len;
#endif
	      ASSERT (vec_len (t0->rewrite) == ip6_geneve_header_total_len0);
	      ASSERT (vec_len (t1->rewrite) == ip6_geneve_header_total_len1);

	      ip6_0 = vlib_buffer_get_current (b0);
	      ip6_1 = vlib_buffer_get_current (b1);

	      /* Copy the fixed header */
	      copy_dst0 = (u64 *) ip6_0;
	      copy_src0 = (u64 *) t0->rewrite;
	      copy_dst1 = (u64 *) ip6_1;
	      copy_src1 = (u64 *) t1->rewrite;
	      /* Copy first 56 (ip6) octets 8-bytes at a time */
#define _(offs) copy_dst0[offs] = copy_src0[offs];
	      foreach_fixed_header6_offset;
#undef _
#define _(offs) copy_dst1[offs] = copy_src1[offs];
	      foreach_fixed_header6_offset;
#undef _
	      /* Fix IP6 payload length */
	      new_l0 =
		clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)
				      - sizeof (*ip6_0));
	      ip6_0->payload_length = new_l0;
	      new_l1 =
		clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1)
				      - sizeof (*ip6_1));
	      ip6_1->payload_length = new_l1;

	      /* Fix UDP length  and set source port */
	      udp0 = (udp_header_t *) (ip6_0 + 1);
	      udp0->length = new_l0;
	      udp0->src_port = flow_hash0;
	      udp1 = (udp_header_t *) (ip6_1 + 1);
	      udp1->length = new_l1;
	      udp1->src_port = flow_hash1;

	      /* IPv6 UDP checksum is mandatory */
	      udp0->checksum = ip6_tcp_udp_icmp_compute_checksum (vm, b0,
								  ip6_0,
								  &bogus);
	      ASSERT (bogus == 0);
	      if (udp0->checksum == 0)
		udp0->checksum = 0xffff;
	      udp1->checksum = ip6_tcp_udp_icmp_compute_checksum (vm, b1,
								  ip6_1,
								  &bogus);
	      ASSERT (bogus == 0);
	      if (udp1->checksum == 0)
		udp1->checksum = 0xffff;
	    }

	  pkts_encapsulated += 2;
	  len0 = vlib_buffer_length_in_chain (vm, b0);
	  len1 = vlib_buffer_length_in_chain (vm, b1);
	  stats_n_packets += 2;
	  stats_n_bytes += len0 + len1;

	  /* Batch stats increment on the same geneve tunnel so counter is not
	     incremented per packet. Note stats are still incremented for deleted
	     and admin-down tunnel where packets are dropped. It is not worthwhile
	     to check for this rare case and affect normal path performance. */
	  if (PREDICT_FALSE ((sw_if_index0 != stats_sw_if_index) ||
			     (sw_if_index1 != stats_sw_if_index)))
	    {
	      stats_n_packets -= 2;
	      stats_n_bytes -= len0 + len1;
	      if (sw_if_index0 == sw_if_index1)
		{
		  if (stats_n_packets)
		    vlib_increment_combined_counter
		      (im->combined_sw_if_counters +
		       VNET_INTERFACE_COUNTER_TX, thread_index,
		       stats_sw_if_index, stats_n_packets, stats_n_bytes);
		  stats_sw_if_index = sw_if_index0;
		  stats_n_packets = 2;
		  stats_n_bytes = len0 + len1;
		}
	      else
		{
		  vlib_increment_combined_counter
		    (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
		     thread_index, sw_if_index0, 1, len0);
		  vlib_increment_combined_counter
		    (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
		     thread_index, sw_if_index1, 1, len1);
		}
	    }

	  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
	    {
	      geneve_encap_trace_t *tr =
		vlib_add_trace (vm, node, b0, sizeof (*tr));
	      tr->tunnel_index = t0 - vxm->tunnels;
	      tr->vni = t0->vni;
	    }

	  if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
	    {
	      geneve_encap_trace_t *tr =
		vlib_add_trace (vm, node, b1, sizeof (*tr));
	      tr->tunnel_index = t1 - vxm->tunnels;
	      tr->vni = t1->vni;
	    }

	  vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
					   to_next, n_left_to_next,
					   bi0, bi1, next0, next1);
	}

      while (n_left_from > 0 && n_left_to_next > 0)
	{
	  u32 bi0;
	  vlib_buffer_t *b0;
	  u32 flow_hash0;
	  u32 len0;
	  ip4_header_t *ip4_0;
	  ip6_header_t *ip6_0;
	  udp_header_t *udp0;
	  u64 *copy_src0, *copy_dst0;
	  u32 *copy_src_last0, *copy_dst_last0;
	  u16 new_l0;
	  ip_csum_t sum0;

	  bi0 = from[0];
	  to_next[0] = bi0;
	  from += 1;
	  to_next += 1;
	  n_left_from -= 1;
	  n_left_to_next -= 1;

	  b0 = vlib_get_buffer (vm, bi0);

	  flow_hash0 = vnet_l2_compute_flow_hash (b0);

	  /* Get next node index and adj index from tunnel next_dpo */
	  if (sw_if_index0 != vnet_buffer (b0)->sw_if_index[VLIB_TX])
	    {
	      sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
	      hi0 = vnet_get_sup_hw_interface (vnm, sw_if_index0);
	      t0 = &vxm->tunnels[hi0->dev_instance];
	      /* Note: change to always set next0 if it may be set to drop */
	      next0 = t0->next_dpo.dpoi_next_node;
	    }
	  vnet_buffer (b0)->ip.adj_index[VLIB_TX] = t0->next_dpo.dpoi_index;

	  /* Apply the rewrite string. $$$$ vnet_rewrite? */
	  vlib_buffer_advance (b0, -(word) _vec_len (t0->rewrite));

	  if (is_ip4)
	    {
	      u8 ip4_geneve_base_header_len =
		sizeof (ip4_header_t) + sizeof (udp_header_t) +
		GENEVE_BASE_HEADER_LENGTH;
	      u8 ip4_geneve_header_total_len0 = ip4_geneve_base_header_len;
#if SUPPORT_OPTIONS_HEADER==1
	      ip4_geneve_header_total_len0 += t0->options_len;
#endif
	      ASSERT (vec_len (t0->rewrite) == ip4_geneve_header_total_len0);

	      ip4_0 = vlib_buffer_get_current (b0);

	      /* Copy the fixed header */
	      copy_dst0 = (u64 *) ip4_0;
	      copy_src0 = (u64 *) t0->rewrite;
	      /* Copy first 32 octets 8-bytes at a time */
#define _(offs) copy_dst0[offs] = copy_src0[offs];
	      foreach_fixed_header4_offset;
#undef _
	      /* Last 4 octets. Hopefully gcc will be our friend */
	      copy_dst_last0 = (u32 *) (&copy_dst0[4]);
	      copy_src_last0 = (u32 *) (&copy_src0[4]);
	      copy_dst_last0[0] = copy_src_last0[0];

	      /* Fix the IP4 checksum and length */
	      sum0 = ip4_0->checksum;
	      new_l0 =		/* old_l0 always 0, see the rewrite setup */
		clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
	      sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
				     length /* changed member */ );
	      ip4_0->checksum = ip_csum_fold (sum0);
	      ip4_0->length = new_l0;

	      /* Fix UDP length and set source port */
	      udp0 = (udp_header_t *) (ip4_0 + 1);
	      new_l0 =
		clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0) -
				      sizeof (*ip4_0));
	      udp0->length = new_l0;
	      udp0->src_port = flow_hash0;
	    }

	  else			/* ip6 path */
	    {
	      int bogus = 0;

	      u8 ip6_geneve_base_header_len =
		sizeof (ip6_header_t) + sizeof (udp_header_t) +
		GENEVE_BASE_HEADER_LENGTH;
	      u8 ip6_geneve_header_total_len0 = ip6_geneve_base_header_len;
#if SUPPORT_OPTIONS_HEADER==1
	      ip6_geneve_header_total_len0 += t0->options_len;
#endif
	      ASSERT (vec_len (t0->rewrite) == ip6_geneve_header_total_len0);

	      ip6_0 = vlib_buffer_get_current (b0);
	      /* Copy the fixed header */
	      copy_dst0 = (u64 *) ip6_0;
	      copy_src0 = (u64 *) t0->rewrite;
	      /* Copy first 56 (ip6) octets 8-bytes at a time */
#define _(offs) copy_dst0[offs] = copy_src0[offs];
	      foreach_fixed_header6_offset;
#undef _
	      /* Fix IP6 payload length */
	      new_l0 =
		clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)
				      - sizeof (*ip6_0));
	      ip6_0->payload_length = new_l0;

	      /* Fix UDP length  and set source port */
	      udp0 = (udp_header_t *) (ip6_0 + 1);
	      udp0->length = new_l0;
	      udp0->src_port = flow_hash0;

	      /* IPv6 UDP checksum is mandatory */
	      udp0->checksum = ip6_tcp_udp_icmp_compute_checksum (vm, b0,
								  ip6_0,
								  &bogus);
	      ASSERT (bogus == 0);
	      if (udp0->checksum == 0)
		udp0->checksum = 0xffff;
	    }

	  pkts_encapsulated++;
	  len0 = vlib_buffer_length_in_chain (vm, b0);
	  stats_n_packets += 1;
	  stats_n_bytes += len0;

	  /* Batch stats increment on the same geneve tunnel so counter is not
	     incremented per packet. Note stats are still incremented for deleted
	     and admin-down tunnel where packets are dropped. It is not worthwhile
	     to check for this rare case and affect normal path performance. */
	  if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
	    {
	      stats_n_packets -= 1;
	      stats_n_bytes -= len0;
	      if (stats_n_packets)
		vlib_increment_combined_counter
		  (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
		   thread_index, stats_sw_if_index,
		   stats_n_packets, stats_n_bytes);
	      stats_n_packets = 1;
	      stats_n_bytes = len0;
	      stats_sw_if_index = sw_if_index0;
	    }

	  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
	    {
	      geneve_encap_trace_t *tr =
		vlib_add_trace (vm, node, b0, sizeof (*tr));
	      tr->tunnel_index = t0 - vxm->tunnels;
	      tr->vni = t0->vni;
	    }
	  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
					   to_next, n_left_to_next,
					   bi0, next0);
	}

      vlib_put_next_frame (vm, node, next_index, n_left_to_next);
    }

  /* Do we still need this now that tunnel tx stats is kept? */
  vlib_node_increment_counter (vm, node->node_index,
			       GENEVE_ENCAP_ERROR_ENCAPSULATED,
			       pkts_encapsulated);

  /* Increment any remaining batch stats */
  if (stats_n_packets)
    {
      vlib_increment_combined_counter
	(im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
	 thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
      node->runtime_data[0] = stats_sw_if_index;
    }

  return from_frame->n_vectors;
}
예제 #16
0
static clib_error_t *
recvmsg_helper (mc_socket_main_t * msm,
		int socket,
		struct sockaddr_in * rx_addr,
		u32 * buffer_index,
		u32 drop_message)
{
  vlib_main_t * vm = msm->mc_main.vlib_main;
  vlib_buffer_t * b;
  uword n_left, n_alloc, n_mtu, i, i_rx;
  const uword buffer_size = VLIB_BUFFER_DEFAULT_FREE_LIST_BYTES;
  word n_bytes_left;

  /* Make sure we have at least a MTU worth of buffers. */
  n_mtu = msm->rx_mtu_n_buffers;
  n_left = vec_len (msm->rx_buffers);
  if (n_left < n_mtu)
    {
      uword max_alloc = 8 * n_mtu;
      vec_validate (msm->rx_buffers, max_alloc - 1);
      n_alloc = vlib_buffer_alloc (vm, msm->rx_buffers + n_left, max_alloc - n_left);
      _vec_len (msm->rx_buffers) = n_left + n_alloc;
    }

  ASSERT (vec_len (msm->rx_buffers) >= n_mtu);
  vec_validate (msm->iovecs, n_mtu - 1);

  /* Allocate RX buffers from end of rx_buffers.
     Turn them into iovecs to pass to readv. */
  i_rx = vec_len (msm->rx_buffers) - 1;
  for (i = 0; i < n_mtu; i++)
    {
      b = vlib_get_buffer (vm, msm->rx_buffers[i_rx - i]);
      msm->iovecs[i].iov_base = b->data;
      msm->iovecs[i].iov_len = buffer_size;
    }
  _vec_len (msm->iovecs) = n_mtu;

  {
    struct msghdr h;

    memset (&h, 0, sizeof (h));
    if (rx_addr)
      {
	h.msg_name = rx_addr;
	h.msg_namelen = sizeof (rx_addr[0]);
      }
    h.msg_iov = msm->iovecs;
    h.msg_iovlen = vec_len (msm->iovecs);

    n_bytes_left = recvmsg (socket, &h, 0);
    if (n_bytes_left < 0)
      return clib_error_return_unix (0, "recvmsg");
  }

  if (drop_message)
    {
      *buffer_index = ~0;
      return 0;
    }

  *buffer_index = msm->rx_buffers[i_rx];
  while (1)
    {
      b = vlib_get_buffer (vm, msm->rx_buffers[i_rx]);

      b->flags = 0;
      b->current_data = 0;
      b->current_length = n_bytes_left < buffer_size ? n_bytes_left : buffer_size;

      n_bytes_left -= buffer_size;

      if (n_bytes_left <= 0)
	break;

      i_rx--;
      b->flags |= VLIB_BUFFER_NEXT_PRESENT;
      b->next_buffer = msm->rx_buffers[i_rx];
    }

  _vec_len (msm->rx_buffers) = i_rx;

  return 0 /* no error */;
}
예제 #17
0
파일: adj_midchain.c 프로젝트: vpp-dev/vpp
always_inline uword
adj_midchain_tx_inline (vlib_main_t * vm,
			vlib_node_runtime_t * node,
			vlib_frame_t * frame,
			int interface_count)
{
    u32 * from, * to_next, n_left_from, n_left_to_next;
    u32 next_index;
    vnet_main_t *vnm = vnet_get_main ();
    vnet_interface_main_t *im = &vnm->interface_main;
    u32 thread_index = vm->thread_index;

    /* Vector of buffer / pkt indices we're supposed to process */
    from = vlib_frame_vector_args (frame);

    /* Number of buffers / pkts */
    n_left_from = frame->n_vectors;

    /* Speculatively send the first buffer to the last disposition we used */
    next_index = node->cached_next_index;

    while (n_left_from > 0)
    {
	/* set up to enqueue to our disposition with index = next_index */
	vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);

	while (n_left_from >= 8 && n_left_to_next > 4)
	{
	    u32 bi0, adj_index0, next0;
	    const ip_adjacency_t * adj0;
	    const dpo_id_t *dpo0;
	    vlib_buffer_t * b0;
	    u32 bi1, adj_index1, next1;
	    const ip_adjacency_t * adj1;
	    const dpo_id_t *dpo1;
	    vlib_buffer_t * b1;
	    u32 bi2, adj_index2, next2;
	    const ip_adjacency_t * adj2;
	    const dpo_id_t *dpo2;
	    vlib_buffer_t * b2;
	    u32 bi3, adj_index3, next3;
	    const ip_adjacency_t * adj3;
	    const dpo_id_t *dpo3;
	    vlib_buffer_t * b3;

	    /* Prefetch next iteration. */
	    {
		vlib_buffer_t * p4, * p5;
		vlib_buffer_t * p6, * p7;

		p4 = vlib_get_buffer (vm, from[4]);
		p5 = vlib_get_buffer (vm, from[5]);
		p6 = vlib_get_buffer (vm, from[6]);
		p7 = vlib_get_buffer (vm, from[7]);

		vlib_prefetch_buffer_header (p4, LOAD);
		vlib_prefetch_buffer_header (p5, LOAD);
		vlib_prefetch_buffer_header (p6, LOAD);
		vlib_prefetch_buffer_header (p7, LOAD);
	    }

	    bi0 = from[0];
	    to_next[0] = bi0;
	    bi1 = from[1];
	    to_next[1] = bi1;
	    bi2 = from[2];
	    to_next[2] = bi2;
	    bi3 = from[3];
	    to_next[3] = bi3;

	    from += 4;
	    to_next += 4;
	    n_left_from -= 4;
	    n_left_to_next -= 4;

	    b0 = vlib_get_buffer(vm, bi0);
	    b1 = vlib_get_buffer(vm, bi1);
	    b2 = vlib_get_buffer(vm, bi2);
	    b3 = vlib_get_buffer(vm, bi3);

	    /* Follow the DPO on which the midchain is stacked */
	    adj_index0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
	    adj_index1 = vnet_buffer(b1)->ip.adj_index[VLIB_TX];
	    adj_index2 = vnet_buffer(b2)->ip.adj_index[VLIB_TX];
	    adj_index3 = vnet_buffer(b3)->ip.adj_index[VLIB_TX];

	    adj0 = adj_get(adj_index0);
	    adj1 = adj_get(adj_index1);
	    adj2 = adj_get(adj_index2);
	    adj3 = adj_get(adj_index3);

	    dpo0 = &adj0->sub_type.midchain.next_dpo;
	    dpo1 = &adj1->sub_type.midchain.next_dpo;
	    dpo2 = &adj2->sub_type.midchain.next_dpo;
	    dpo3 = &adj3->sub_type.midchain.next_dpo;

	    next0 = dpo0->dpoi_next_node;
	    next1 = dpo1->dpoi_next_node;
	    next2 = dpo2->dpoi_next_node;
	    next3 = dpo3->dpoi_next_node;

            vnet_buffer(b1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index;
            vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
            vnet_buffer(b2)->ip.adj_index[VLIB_TX] = dpo2->dpoi_index;
            vnet_buffer(b3)->ip.adj_index[VLIB_TX] = dpo3->dpoi_index;

	    if (interface_count)
	    {
		vlib_increment_combined_counter (im->combined_sw_if_counters
						 + VNET_INTERFACE_COUNTER_TX,
						 thread_index,
						 adj0->rewrite_header.sw_if_index,
						 1,
						 vlib_buffer_length_in_chain (vm, b0));
		vlib_increment_combined_counter (im->combined_sw_if_counters
						 + VNET_INTERFACE_COUNTER_TX,
						 thread_index,
						 adj1->rewrite_header.sw_if_index,
						 1,
						 vlib_buffer_length_in_chain (vm, b1));
		vlib_increment_combined_counter (im->combined_sw_if_counters
						 + VNET_INTERFACE_COUNTER_TX,
						 thread_index,
						 adj2->rewrite_header.sw_if_index,
						 1,
						 vlib_buffer_length_in_chain (vm, b2));
		vlib_increment_combined_counter (im->combined_sw_if_counters
						 + VNET_INTERFACE_COUNTER_TX,
						 thread_index,
						 adj3->rewrite_header.sw_if_index,
						 1,
						 vlib_buffer_length_in_chain (vm, b3));
	    }

	    if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
	    {
		adj_midchain_tx_trace_t *tr = vlib_add_trace (vm, node,
							      b0, sizeof (*tr));
		tr->ai = adj_index0;
	    }
	    if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
	    {
		adj_midchain_tx_trace_t *tr = vlib_add_trace (vm, node,
							      b1, sizeof (*tr));
		tr->ai = adj_index1;
	    }
	    if (PREDICT_FALSE(b2->flags & VLIB_BUFFER_IS_TRACED))
	    {
		adj_midchain_tx_trace_t *tr = vlib_add_trace (vm, node,
							      b2, sizeof (*tr));
		tr->ai = adj_index2;
	    }
	    if (PREDICT_FALSE(b3->flags & VLIB_BUFFER_IS_TRACED))
	    {
		adj_midchain_tx_trace_t *tr = vlib_add_trace (vm, node,
							      b3, sizeof (*tr));
		tr->ai = adj_index3;
	    }

	    vlib_validate_buffer_enqueue_x4 (vm, node, next_index,
					     to_next, n_left_to_next,
					     bi0, bi1, bi2, bi3,
					     next0, next1, next2, next3);
	}
	while (n_left_from > 0 && n_left_to_next > 0)
	{
	    u32 bi0, adj_index0, next0;
	    const ip_adjacency_t * adj0;
	    const dpo_id_t *dpo0;
	    vlib_buffer_t * b0;

	    bi0 = from[0];
	    to_next[0] = bi0;
	    from += 1;
	    to_next += 1;
	    n_left_from -= 1;
	    n_left_to_next -= 1;

	    b0 = vlib_get_buffer(vm, bi0);

	    /* Follow the DPO on which the midchain is stacked */
	    adj_index0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
	    adj0 = adj_get(adj_index0);
	    dpo0 = &adj0->sub_type.midchain.next_dpo;
	    next0 = dpo0->dpoi_next_node;
            vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;

	    if (interface_count)
	    {
		vlib_increment_combined_counter (im->combined_sw_if_counters
						 + VNET_INTERFACE_COUNTER_TX,
						 thread_index,
						 adj0->rewrite_header.sw_if_index,
						 1,
						 vlib_buffer_length_in_chain (vm, b0));
	    }

	    if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
	    {
		adj_midchain_tx_trace_t *tr = vlib_add_trace (vm, node,
							      b0, sizeof (*tr));
		tr->ai = adj_index0;
	    }

	    vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
					     to_next, n_left_to_next,
					     bi0, next0);
	}

	vlib_put_next_frame (vm, node, next_index, n_left_to_next);
    }

    return frame->n_vectors;
}
예제 #18
0
파일: gtpu_encap.c 프로젝트: chrisy/vpp
always_inline uword
gtpu_encap_inline (vlib_main_t * vm,
		    vlib_node_runtime_t * node,
		    vlib_frame_t * from_frame,
		    u32 is_ip4)
{
  u32 n_left_from, next_index, * from, * to_next;
  gtpu_main_t * gtm = &gtpu_main;
  vnet_main_t * vnm = gtm->vnet_main;
  vnet_interface_main_t * im = &vnm->interface_main;
  u32 pkts_encapsulated = 0;
  u16 old_l0 = 0, old_l1 = 0, old_l2 = 0, old_l3 = 0;
  u32 thread_index = vlib_get_thread_index();
  u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
  u32 sw_if_index0 = 0, sw_if_index1 = 0, sw_if_index2 = 0, sw_if_index3 = 0;
  u32 next0 = 0, next1 = 0, next2 = 0, next3 = 0;
  vnet_hw_interface_t * hi0, * hi1, * hi2, * hi3;
  gtpu_tunnel_t * t0 = NULL, * t1 = NULL, * t2 = NULL, * t3 = NULL;

  from = vlib_frame_vector_args (from_frame);
  n_left_from = from_frame->n_vectors;

  next_index = node->cached_next_index;
  stats_sw_if_index = node->runtime_data[0];
  stats_n_packets = stats_n_bytes = 0;

  while (n_left_from > 0)
    {
      u32 n_left_to_next;

      vlib_get_next_frame (vm, node, next_index,
			   to_next, n_left_to_next);

      while (n_left_from >= 8 && n_left_to_next >= 4)
	{
          u32 bi0, bi1, bi2, bi3;
	  vlib_buffer_t * b0, * b1, * b2, * b3;
          u32 flow_hash0, flow_hash1, flow_hash2, flow_hash3;
	  u32 len0, len1, len2, len3;
          ip4_header_t * ip4_0, * ip4_1, * ip4_2, * ip4_3;
          ip6_header_t * ip6_0, * ip6_1, * ip6_2, * ip6_3;
          udp_header_t * udp0, * udp1, * udp2, * udp3;
          gtpu_header_t * gtpu0, * gtpu1, * gtpu2, * gtpu3;
          u64 * copy_src0, * copy_dst0;
          u64 * copy_src1, * copy_dst1;
          u64 * copy_src2, * copy_dst2;
          u64 * copy_src3, * copy_dst3;
          u32 * copy_src_last0, * copy_dst_last0;
          u32 * copy_src_last1, * copy_dst_last1;
          u32 * copy_src_last2, * copy_dst_last2;
          u32 * copy_src_last3, * copy_dst_last3;
          u16 new_l0, new_l1, new_l2, new_l3;
          ip_csum_t sum0, sum1, sum2, sum3;

	  /* Prefetch next iteration. */
	  {
	    vlib_buffer_t * p4, * p5, * p6, * p7;

	    p4 = vlib_get_buffer (vm, from[4]);
	    p5 = vlib_get_buffer (vm, from[5]);
	    p6 = vlib_get_buffer (vm, from[6]);
	    p7 = vlib_get_buffer (vm, from[7]);

	    vlib_prefetch_buffer_header (p4, LOAD);
	    vlib_prefetch_buffer_header (p5, LOAD);
	    vlib_prefetch_buffer_header (p6, LOAD);
	    vlib_prefetch_buffer_header (p7, LOAD);

	    CLIB_PREFETCH (p4->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
	    CLIB_PREFETCH (p5->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
	    CLIB_PREFETCH (p6->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
	    CLIB_PREFETCH (p7->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
	  }

	  bi0 = from[0];
	  bi1 = from[1];
	  bi2 = from[2];
	  bi3 = from[3];
	  to_next[0] = bi0;
	  to_next[1] = bi1;
	  to_next[2] = bi2;
	  to_next[3] = bi3;
	  from += 4;
	  to_next += 4;
	  n_left_to_next -= 4;
	  n_left_from -= 4;

	  b0 = vlib_get_buffer (vm, bi0);
	  b1 = vlib_get_buffer (vm, bi1);
	  b2 = vlib_get_buffer (vm, bi2);
	  b3 = vlib_get_buffer (vm, bi3);

          flow_hash0 = vnet_l2_compute_flow_hash (b0);
          flow_hash1 = vnet_l2_compute_flow_hash (b1);
          flow_hash2 = vnet_l2_compute_flow_hash (b2);
          flow_hash3 = vnet_l2_compute_flow_hash (b3);

	  /* Get next node index and adj index from tunnel next_dpo */
	  sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX];
	  sw_if_index1 = vnet_buffer(b1)->sw_if_index[VLIB_TX];
	  sw_if_index2 = vnet_buffer(b2)->sw_if_index[VLIB_TX];
	  sw_if_index3 = vnet_buffer(b3)->sw_if_index[VLIB_TX];
	  hi0 = vnet_get_sup_hw_interface (vnm, sw_if_index0);
	  hi1 = vnet_get_sup_hw_interface (vnm, sw_if_index1);
	  hi2 = vnet_get_sup_hw_interface (vnm, sw_if_index2);
	  hi3 = vnet_get_sup_hw_interface (vnm, sw_if_index3);
	  t0 = &gtm->tunnels[hi0->dev_instance];
	  t1 = &gtm->tunnels[hi1->dev_instance];
	  t2 = &gtm->tunnels[hi2->dev_instance];
	  t3 = &gtm->tunnels[hi3->dev_instance];

	  /* Note: change to always set next0 if it may be set to drop */
	  next0 = t0->next_dpo.dpoi_next_node;
          vnet_buffer(b0)->ip.adj_index[VLIB_TX] = t0->next_dpo.dpoi_index;
	  next1 = t1->next_dpo.dpoi_next_node;
          vnet_buffer(b1)->ip.adj_index[VLIB_TX] = t1->next_dpo.dpoi_index;
	  next2 = t2->next_dpo.dpoi_next_node;
          vnet_buffer(b2)->ip.adj_index[VLIB_TX] = t2->next_dpo.dpoi_index;
	  next3 = t3->next_dpo.dpoi_next_node;
          vnet_buffer(b3)->ip.adj_index[VLIB_TX] = t3->next_dpo.dpoi_index;

          /* Apply the rewrite string. $$$$ vnet_rewrite? */
          vlib_buffer_advance (b0, -(word)_vec_len(t0->rewrite));
          vlib_buffer_advance (b1, -(word)_vec_len(t1->rewrite));
          vlib_buffer_advance (b2, -(word)_vec_len(t2->rewrite));
          vlib_buffer_advance (b3, -(word)_vec_len(t3->rewrite));

	  if (is_ip4)
	    {
	      ip4_0 = vlib_buffer_get_current(b0);
	      ip4_1 = vlib_buffer_get_current(b1);
	      ip4_2 = vlib_buffer_get_current(b2);
	      ip4_3 = vlib_buffer_get_current(b3);

	      /* Copy the fixed header */
	      copy_dst0 = (u64 *) ip4_0;
	      copy_src0 = (u64 *) t0->rewrite;
	      copy_dst1 = (u64 *) ip4_1;
	      copy_src1 = (u64 *) t1->rewrite;
	      copy_dst2 = (u64 *) ip4_2;
	      copy_src2 = (u64 *) t2->rewrite;
	      copy_dst3 = (u64 *) ip4_3;
	      copy_src3 = (u64 *) t3->rewrite;

	      /* Copy first 32 octets 8-bytes at a time */
#define _(offs) copy_dst0[offs] = copy_src0[offs];
	      foreach_fixed_header4_offset;
#undef _
#define _(offs) copy_dst1[offs] = copy_src1[offs];
	      foreach_fixed_header4_offset;
#undef _
#define _(offs) copy_dst2[offs] = copy_src2[offs];
	      foreach_fixed_header4_offset;
#undef _
#define _(offs) copy_dst3[offs] = copy_src3[offs];
	      foreach_fixed_header4_offset;
#undef _
	      /* Last 4 octets. Hopefully gcc will be our friend */
              copy_dst_last0 = (u32 *)(&copy_dst0[4]);
              copy_src_last0 = (u32 *)(&copy_src0[4]);
              copy_dst_last0[0] = copy_src_last0[0];
              copy_dst_last1 = (u32 *)(&copy_dst1[4]);
              copy_src_last1 = (u32 *)(&copy_src1[4]);
              copy_dst_last1[0] = copy_src_last1[0];
              copy_dst_last2 = (u32 *)(&copy_dst2[4]);
              copy_src_last2 = (u32 *)(&copy_src2[4]);
              copy_dst_last2[0] = copy_src_last2[0];
              copy_dst_last3 = (u32 *)(&copy_dst3[4]);
              copy_src_last3 = (u32 *)(&copy_src3[4]);
              copy_dst_last3[0] = copy_src_last3[0];

	      /* Fix the IP4 checksum and length */
	      sum0 = ip4_0->checksum;
	      new_l0 = /* old_l0 always 0, see the rewrite setup */
                clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
              sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
				     length /* changed member */);
	      ip4_0->checksum = ip_csum_fold (sum0);
	      ip4_0->length = new_l0;
	      sum1 = ip4_1->checksum;
	      new_l1 = /* old_l1 always 0, see the rewrite setup */
                clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1));
              sum1 = ip_csum_update (sum1, old_l1, new_l1, ip4_header_t,
				     length /* changed member */);
	      ip4_1->checksum = ip_csum_fold (sum1);
	      ip4_1->length = new_l1;
	      sum2 = ip4_2->checksum;
	      new_l2 = /* old_l0 always 0, see the rewrite setup */
                clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b2));
              sum2 = ip_csum_update (sum2, old_l2, new_l2, ip4_header_t,
				     length /* changed member */);
	      ip4_2->checksum = ip_csum_fold (sum2);
	      ip4_2->length = new_l2;
	      sum3 = ip4_3->checksum;
	      new_l3 = /* old_l1 always 0, see the rewrite setup */
                clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b3));
              sum3 = ip_csum_update (sum3, old_l3, new_l3, ip4_header_t,
				     length /* changed member */);
	      ip4_3->checksum = ip_csum_fold (sum3);
	      ip4_3->length = new_l3;

	      /* Fix UDP length and set source port */
	      udp0 = (udp_header_t *)(ip4_0+1);
	      new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b0)
					     - sizeof (*ip4_0));
	      udp0->length = new_l0;
	      udp0->src_port = flow_hash0;
	      udp1 = (udp_header_t *)(ip4_1+1);
	      new_l1 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b1)
					     - sizeof (*ip4_1));
	      udp1->length = new_l1;
	      udp1->src_port = flow_hash1;
	      udp2 = (udp_header_t *)(ip4_2+1);
	      new_l2 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b2)
					     - sizeof (*ip4_2));
	      udp2->length = new_l2;
	      udp2->src_port = flow_hash2;
	      udp3 = (udp_header_t *)(ip4_3+1);
	      new_l3 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b3)
					     - sizeof (*ip4_3));
	      udp3->length = new_l3;
	      udp3->src_port = flow_hash3;

	      /* Fix GTPU length */
	      gtpu0 = (gtpu_header_t *)(udp0+1);
	      new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b0)
					     - sizeof (*ip4_0) - sizeof(*udp0)
					     - GTPU_V1_HDR_LEN);
	      gtpu0->length = new_l0;
	      gtpu1 = (gtpu_header_t *)(udp1+1);
	      new_l1 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b1)
					     - sizeof (*ip4_1) - sizeof(*udp1)
					     - GTPU_V1_HDR_LEN);
	      gtpu1->length = new_l1;
	      gtpu2 = (gtpu_header_t *)(udp2+1);
	      new_l2 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b2)
					     - sizeof (*ip4_2) - sizeof(*udp2)
					     - GTPU_V1_HDR_LEN);
	      gtpu2->length = new_l2;
	      gtpu3 = (gtpu_header_t *)(udp3+1);
	      new_l3 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b3)
					     - sizeof (*ip4_3) - sizeof(*udp3)
					     - GTPU_V1_HDR_LEN);
	      gtpu3->length = new_l3;
	    }
	  else /* ipv6 */
	    {
              int bogus = 0;

	      ip6_0 = vlib_buffer_get_current(b0);
	      ip6_1 = vlib_buffer_get_current(b1);
	      ip6_2 = vlib_buffer_get_current(b2);
	      ip6_3 = vlib_buffer_get_current(b3);

	      /* Copy the fixed header */
	      copy_dst0 = (u64 *) ip6_0;
	      copy_src0 = (u64 *) t0->rewrite;
	      copy_dst1 = (u64 *) ip6_1;
	      copy_src1 = (u64 *) t1->rewrite;
	      copy_dst2 = (u64 *) ip6_2;
	      copy_src2 = (u64 *) t2->rewrite;
	      copy_dst3 = (u64 *) ip6_3;
	      copy_src3 = (u64 *) t3->rewrite;
	      /* Copy first 56 (ip6) octets 8-bytes at a time */
#define _(offs) copy_dst0[offs] = copy_src0[offs];
	      foreach_fixed_header6_offset;
#undef _
#define _(offs) copy_dst1[offs] = copy_src1[offs];
	      foreach_fixed_header6_offset;
#undef _
#define _(offs) copy_dst2[offs] = copy_src2[offs];
	      foreach_fixed_header6_offset;
#undef _
#define _(offs) copy_dst3[offs] = copy_src3[offs];
	      foreach_fixed_header6_offset;
#undef _
	      /* Fix IP6 payload length */
	      new_l0 =
                clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)
				      - sizeof(*ip6_0));
	      ip6_0->payload_length = new_l0;
	      new_l1 =
                clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1)
				      - sizeof(*ip6_1));
	      ip6_1->payload_length = new_l1;
	      new_l2 =
                clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b2)
				      - sizeof(*ip6_2));
	      ip6_2->payload_length = new_l2;
	      new_l3 =
                clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b3)
				      - sizeof(*ip6_3));
	      ip6_3->payload_length = new_l3;

	      /* Fix UDP length  and set source port */
	      udp0 = (udp_header_t *)(ip6_0+1);
	      udp0->length = new_l0;
	      udp0->src_port = flow_hash0;
	      udp1 = (udp_header_t *)(ip6_1+1);
	      udp1->length = new_l1;
	      udp1->src_port = flow_hash1;
	      udp2 = (udp_header_t *)(ip6_2+1);
	      udp2->length = new_l2;
	      udp2->src_port = flow_hash2;
	      udp3 = (udp_header_t *)(ip6_3+1);
	      udp3->length = new_l3;
	      udp3->src_port = flow_hash3;

	      /* Fix GTPU length */
	      gtpu0 = (gtpu_header_t *)(udp0+1);
	      new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b0)
					     - sizeof (*ip6_0) - sizeof(*udp0)
					     - GTPU_V1_HDR_LEN);
	      gtpu0->length = new_l0;
	      gtpu1 = (gtpu_header_t *)(udp1+1);
	      new_l1 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b1)
					     - sizeof (*ip6_1) - sizeof(*udp1)
					     - GTPU_V1_HDR_LEN);
	      gtpu1->length = new_l1;
	      gtpu2 = (gtpu_header_t *)(udp2+1);
	      new_l2 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b2)
					     - sizeof (*ip6_2) - sizeof(*udp2)
					     - GTPU_V1_HDR_LEN);
	      gtpu2->length = new_l2;
	      gtpu3 = (gtpu_header_t *)(udp3+1);
	      new_l3 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b3)
					     - sizeof (*ip6_3) - sizeof(*udp3)
					     - GTPU_V1_HDR_LEN);
	      gtpu3->length = new_l3;

	      /* IPv6 UDP checksum is mandatory */
	      udp0->checksum = ip6_tcp_udp_icmp_compute_checksum(vm, b0,
								 ip6_0, &bogus);
	      if (udp0->checksum == 0)
		udp0->checksum = 0xffff;
	      udp1->checksum = ip6_tcp_udp_icmp_compute_checksum(vm, b1,
								 ip6_1, &bogus);
	      if (udp1->checksum == 0)
		udp1->checksum = 0xffff;
	      udp2->checksum = ip6_tcp_udp_icmp_compute_checksum(vm, b2,
								 ip6_2, &bogus);
	      if (udp2->checksum == 0)
		udp2->checksum = 0xffff;
	      udp3->checksum = ip6_tcp_udp_icmp_compute_checksum(vm, b3,
								 ip6_3, &bogus);
	      if (udp3->checksum == 0)
		udp3->checksum = 0xffff;

	    }

          pkts_encapsulated += 4;
 	  len0 = vlib_buffer_length_in_chain (vm, b0);
 	  len1 = vlib_buffer_length_in_chain (vm, b1);
 	  len2 = vlib_buffer_length_in_chain (vm, b2);
 	  len3 = vlib_buffer_length_in_chain (vm, b3);
	  stats_n_packets += 4;
	  stats_n_bytes += len0 + len1 + len2 + len3;

	  /* Batch stats increment on the same gtpu tunnel so counter is not
	     incremented per packet. Note stats are still incremented for deleted
	     and admin-down tunnel where packets are dropped. It is not worthwhile
	     to check for this rare case and affect normal path performance. */
	  if (PREDICT_FALSE ((sw_if_index0 != stats_sw_if_index) ||
			     (sw_if_index1 != stats_sw_if_index) ||
			     (sw_if_index2 != stats_sw_if_index) ||
			     (sw_if_index3 != stats_sw_if_index) ))
	    {
	      stats_n_packets -= 4;
	      stats_n_bytes -= len0 + len1 + len2 + len3;
	      if ( (sw_if_index0 == sw_if_index1 ) &&
		   (sw_if_index1 == sw_if_index2 ) &&
		   (sw_if_index2 == sw_if_index3 ) )
	        {
		  if (stats_n_packets)
		    vlib_increment_combined_counter
		      (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
		       thread_index, stats_sw_if_index,
		       stats_n_packets, stats_n_bytes);
		  stats_sw_if_index = sw_if_index0;
		  stats_n_packets = 4;
		  stats_n_bytes = len0 + len1 + len2 + len3;
	        }
	      else
	        {
		  vlib_increment_combined_counter
		      (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
		       thread_index, sw_if_index0, 1, len0);
		  vlib_increment_combined_counter
		      (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
		       thread_index, sw_if_index1, 1, len1);
		  vlib_increment_combined_counter
		      (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
		       thread_index, sw_if_index2, 1, len2);
		  vlib_increment_combined_counter
		      (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
		       thread_index, sw_if_index3, 1, len3);
		}
	    }

	  if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
            {
              gtpu_encap_trace_t *tr =
                vlib_add_trace (vm, node, b0, sizeof (*tr));
              tr->tunnel_index = t0 - gtm->tunnels;
              tr->teid = t0->teid;
           }

          if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
            {
              gtpu_encap_trace_t *tr =
                vlib_add_trace (vm, node, b1, sizeof (*tr));
              tr->tunnel_index = t1 - gtm->tunnels;
              tr->teid = t1->teid;
            }

	  vlib_validate_buffer_enqueue_x4 (vm, node, next_index,
					   to_next, n_left_to_next,
					   bi0, bi1, bi2, bi3,
					   next0, next1, next2, next3);
	}

      while (n_left_from > 0 && n_left_to_next > 0)
	{
	  u32 bi0;
	  vlib_buffer_t * b0;
          u32 flow_hash0;
	  u32 len0;
          ip4_header_t * ip4_0;
          ip6_header_t * ip6_0;
          udp_header_t * udp0;
          gtpu_header_t * gtpu0;
          u64 * copy_src0, * copy_dst0;
          u32 * copy_src_last0, * copy_dst_last0;
          u16 new_l0;
          ip_csum_t sum0;

	  bi0 = from[0];
	  to_next[0] = bi0;
	  from += 1;
	  to_next += 1;
	  n_left_from -= 1;
	  n_left_to_next -= 1;

	  b0 = vlib_get_buffer (vm, bi0);

          flow_hash0 = vnet_l2_compute_flow_hash(b0);

	  /* Get next node index and adj index from tunnel next_dpo */
	  sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX];
	  hi0 = vnet_get_sup_hw_interface (vnm, sw_if_index0);
	  t0 = &gtm->tunnels[hi0->dev_instance];
	  /* Note: change to always set next0 if it may be set to drop */
	  next0 = t0->next_dpo.dpoi_next_node;
	  vnet_buffer(b0)->ip.adj_index[VLIB_TX] = t0->next_dpo.dpoi_index;

          /* Apply the rewrite string. $$$$ vnet_rewrite? */
          vlib_buffer_advance (b0, -(word)_vec_len(t0->rewrite));

	  if (is_ip4)
	    {
	      ip4_0 = vlib_buffer_get_current(b0);

	      /* Copy the fixed header */
	      copy_dst0 = (u64 *) ip4_0;
	      copy_src0 = (u64 *) t0->rewrite;
	      /* Copy first 32 octets 8-bytes at a time */
#define _(offs) copy_dst0[offs] = copy_src0[offs];
	      foreach_fixed_header4_offset;
#undef _
	      /* Last 4 octets. Hopefully gcc will be our friend */
              copy_dst_last0 = (u32 *)(&copy_dst0[4]);
              copy_src_last0 = (u32 *)(&copy_src0[4]);
              copy_dst_last0[0] = copy_src_last0[0];

	      /* Fix the IP4 checksum and length */
	      sum0 = ip4_0->checksum;
	      new_l0 = /* old_l0 always 0, see the rewrite setup */
                clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
              sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
				     length /* changed member */);
	      ip4_0->checksum = ip_csum_fold (sum0);
	      ip4_0->length = new_l0;

	      /* Fix UDP length and set source port */
	      udp0 = (udp_header_t *)(ip4_0+1);
	      new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b0)
					     - sizeof (*ip4_0));
	      udp0->length = new_l0;
	      udp0->src_port = flow_hash0;

	      /* Fix GTPU length */
	      gtpu0 = (gtpu_header_t *)(udp0+1);
	      new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b0)
					     - sizeof (*ip4_0) - sizeof(*udp0)
					     - GTPU_V1_HDR_LEN);
	      gtpu0->length = new_l0;
	    }

	  else /* ip6 path */
	    {
              int bogus = 0;

	      ip6_0 = vlib_buffer_get_current(b0);
	      /* Copy the fixed header */
	      copy_dst0 = (u64 *) ip6_0;
	      copy_src0 = (u64 *) t0->rewrite;
	      /* Copy first 56 (ip6) octets 8-bytes at a time */
#define _(offs) copy_dst0[offs] = copy_src0[offs];
	      foreach_fixed_header6_offset;
#undef _
	      /* Fix IP6 payload length */
	      new_l0 =
                clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)
				      - sizeof(*ip6_0));
	      ip6_0->payload_length = new_l0;

	      /* Fix UDP length  and set source port */
	      udp0 = (udp_header_t *)(ip6_0+1);
	      udp0->length = new_l0;
	      udp0->src_port = flow_hash0;

	      /* Fix GTPU length */
	      gtpu0 = (gtpu_header_t *)(udp0+1);
	      new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b0)
					     - sizeof (*ip4_0) - sizeof(*udp0)
					     - GTPU_V1_HDR_LEN);
	      gtpu0->length = new_l0;

	      /* IPv6 UDP checksum is mandatory */
	      udp0->checksum = ip6_tcp_udp_icmp_compute_checksum(vm, b0,
								 ip6_0, &bogus);
	      if (udp0->checksum == 0)
		udp0->checksum = 0xffff;
	    }

          pkts_encapsulated ++;
	  len0 = vlib_buffer_length_in_chain (vm, b0);
	  stats_n_packets += 1;
	  stats_n_bytes += len0;

	  /* Batch stats increment on the same gtpu tunnel so counter is not
	     incremented per packet. Note stats are still incremented for deleted
	     and admin-down tunnel where packets are dropped. It is not worthwhile
	     to check for this rare case and affect normal path performance. */
	  if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
	    {
	      stats_n_packets -= 1;
	      stats_n_bytes -= len0;
	      if (stats_n_packets)
		vlib_increment_combined_counter
		  (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
		   thread_index, stats_sw_if_index,
		   stats_n_packets, stats_n_bytes);
	      stats_n_packets = 1;
	      stats_n_bytes = len0;
	      stats_sw_if_index = sw_if_index0;
	    }

          if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
            {
              gtpu_encap_trace_t *tr =
                vlib_add_trace (vm, node, b0, sizeof (*tr));
              tr->tunnel_index = t0 - gtm->tunnels;
              tr->teid = t0->teid;
            }
	  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
					   to_next, n_left_to_next,
					   bi0, next0);
	}

      vlib_put_next_frame (vm, node, next_index, n_left_to_next);
    }

  /* Do we still need this now that tunnel tx stats is kept? */
  vlib_node_increment_counter (vm, node->node_index,
                               GTPU_ENCAP_ERROR_ENCAPSULATED,
                               pkts_encapsulated);

  /* Increment any remaining batch stats */
  if (stats_n_packets)
    {
      vlib_increment_combined_counter
	(im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
	 thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
      node->runtime_data[0] = stats_sw_if_index;
    }

  return from_frame->n_vectors;
}
예제 #19
0
always_inline uword
bier_disp_dispatch_inline (vlib_main_t * vm,
                         vlib_node_runtime_t * node,
                         vlib_frame_t * from_frame)
{
    u32 n_left_from, next_index, * from, * to_next;

    from = vlib_frame_vector_args (from_frame);
    n_left_from = from_frame->n_vectors;

    next_index = node->cached_next_index;

    while (n_left_from > 0)
    {
        u32 n_left_to_next;

        vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);

        while (n_left_from > 0 && n_left_to_next > 0)
        {
            bier_hdr_proto_id_t pproto0;
            bier_disp_entry_t *bde0;
            u32 next0, bi0, bdei0;
            const dpo_id_t *dpo0;
            vlib_buffer_t * b0;
            bier_hdr_t *hdr0;
            u32 entropy0;

            bi0 = from[0];
            to_next[0] = bi0;
            from += 1;
            to_next += 1;
            n_left_from -= 1;
            n_left_to_next -= 1;

            b0 = vlib_get_buffer (vm, bi0);
            bdei0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
            hdr0 = vlib_buffer_get_current(b0);
            bde0 = bier_disp_entry_get(bdei0);
            vnet_buffer(b0)->ip.adj_index[VLIB_RX] = BIER_RX_ITF;

            /*
             * header is in network order - flip it, we are about to
             * consume it anyway
             */
            bier_hdr_ntoh(hdr0);
            pproto0 = bier_hdr_get_proto_id(hdr0);
            entropy0 = bier_hdr_get_entropy(hdr0);

            /*
             * strip the header and copy the entropy value into
             * the packets flow-hash field
             * DSCP mumble mumble...
             */
            vlib_buffer_advance(b0, (vnet_buffer(b0)->mpls.bier.n_bytes +
                                     sizeof(*hdr0)));
            vnet_buffer(b0)->ip.flow_hash = entropy0;

            /*
             * use the payload proto to dispatch to the
             * correct stacked DPO.
             */
            dpo0 = &bde0->bde_fwd[pproto0].bde_dpo;
            next0 = dpo0->dpoi_next_node;
            vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
            vnet_buffer(b0)->ip.rpf_id = bde0->bde_fwd[pproto0].bde_rpf_id;

            if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
            {
                bier_disp_dispatch_trace_t *tr =
                    vlib_add_trace (vm, node, b0, sizeof (*tr));
                tr->pproto = pproto0;
                tr->rpf_id = vnet_buffer(b0)->ip.rpf_id;
            }

            vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
                                            n_left_to_next, bi0, next0);
        }
        vlib_put_next_frame (vm, node, next_index, n_left_to_next);
    }
    return from_frame->n_vectors;
}
예제 #20
0
파일: error.c 프로젝트: Venkattk/vpp
uword
vlib_error_drop_buffers (vlib_main_t * vm,
			 vlib_node_runtime_t * node,
			 u32 * buffers,
			 u32 next_buffer_stride,
			 u32 n_buffers,
			 u32 next_index,
			 u32 drop_error_node, u32 drop_error_code)
{
  u32 n_left_this_frame, n_buffers_left, *args, n_args_left;
  vlib_error_t drop_error;

  drop_error = vlib_error_set (drop_error_node, drop_error_code);

  n_buffers_left = n_buffers;
  while (n_buffers_left > 0)
    {
      vlib_get_next_frame (vm, node, next_index, args, n_args_left);

      n_left_this_frame = clib_min (n_buffers_left, n_args_left);
      n_buffers_left -= n_left_this_frame;
      n_args_left -= n_left_this_frame;

      while (n_left_this_frame >= 4)
	{
	  u32 bi0, bi1, bi2, bi3;
	  vlib_buffer_t *b0, *b1, *b2, *b3;

	  args[0] = bi0 = buffers[0];
	  args[1] = bi1 = buffers[1];
	  args[2] = bi2 = buffers[2];
	  args[3] = bi3 = buffers[3];

	  b0 = vlib_get_buffer (vm, bi0);
	  b1 = vlib_get_buffer (vm, bi1);
	  b2 = vlib_get_buffer (vm, bi2);
	  b3 = vlib_get_buffer (vm, bi3);

	  b0->error = drop_error;
	  b1->error = drop_error;
	  b2->error = drop_error;
	  b3->error = drop_error;

	  buffers += 4;
	  args += 4;
	  n_left_this_frame -= 4;
	}

      while (n_left_this_frame >= 1)
	{
	  u32 bi0;
	  vlib_buffer_t *b0;

	  args[0] = bi0 = buffers[0];

	  b0 = vlib_get_buffer (vm, bi0);
	  b0->error = drop_error;

	  buffers += 1;
	  args += 1;
	  n_left_this_frame -= 1;
	}

      vlib_put_next_frame (vm, node, next_index, n_args_left);
    }

  return n_buffers;
}