Exemplo n.º 1
0
Arquivo: util.c Projeto: Venkattk/vpp
clib_error_t *
foreach_directory_file (char *dir_name,
			clib_error_t * (*f) (void *arg, u8 * path_name,
					     u8 * file_name), void *arg,
			int scan_dirs)
{
  DIR *d;
  struct dirent *e;
  clib_error_t *error = 0;
  u8 *s, *t;

  d = opendir (dir_name);
  if (!d)
    {
      if (errno == ENOENT)
	return 0;
      return clib_error_return_unix (0, "open `%s'", dir_name);
    }

  s = t = 0;
  while (1)
    {
      e = readdir (d);
      if (!e)
	break;
      if (scan_dirs)
	{
	  if (e->d_type == DT_DIR
	      && (!strcmp (e->d_name, ".") || !strcmp (e->d_name, "..")))
	    continue;
	}
      else
	{
	  if (e->d_type == DT_DIR)
	    continue;
	}

      s = format (s, "%s/%s", dir_name, e->d_name);
      t = format (t, "%s", e->d_name);
      error = f (arg, s, t);
      _vec_len (s) = 0;
      _vec_len (t) = 0;

      if (error)
	break;
    }

  vec_free (s);
  closedir (d);

  return error;
}
Exemplo n.º 2
0
Arquivo: fifo.c Projeto: chrisy/vpp
void *
_clib_fifo_resize (void *v_old, uword n_new_elts, uword elt_bytes)
{
  void *v_new, *end, *head;
  uword n_old_elts, header_bytes;
  uword n_copy_bytes, n_zero_bytes;
  clib_fifo_header_t *f_new, *f_old;

  n_old_elts = clib_fifo_elts (v_old);
  n_new_elts += n_old_elts;
  if (n_new_elts < 32)
    n_new_elts = 32;
  else
    n_new_elts = max_pow2 (n_new_elts);

  header_bytes = vec_header_bytes (sizeof (clib_fifo_header_t));

  v_new = clib_mem_alloc_no_fail (n_new_elts * elt_bytes + header_bytes);
  v_new += header_bytes;

  f_new = clib_fifo_header (v_new);
  f_new->head_index = 0;
  f_new->tail_index = n_old_elts;
  _vec_len (v_new) = n_new_elts;

  /* Copy old -> new. */
  n_copy_bytes = n_old_elts * elt_bytes;
  if (n_copy_bytes > 0)
    {
      f_old = clib_fifo_header (v_old);
      end = v_old + _vec_len (v_old) * elt_bytes;
      head = v_old + f_old->head_index * elt_bytes;

      if (head + n_copy_bytes >= end)
	{
	  uword n = end - head;
	  clib_memcpy_fast (v_new, head, n);
	  clib_memcpy_fast (v_new + n, v_old, n_copy_bytes - n);
	}
      else
	clib_memcpy_fast (v_new, head, n_copy_bytes);
    }

  /* Zero empty space. */
  n_zero_bytes = (n_new_elts - n_old_elts) * elt_bytes;
  clib_memset (v_new + n_copy_bytes, 0, n_zero_bytes);

  clib_fifo_free (v_old);

  return v_new;
}
Exemplo n.º 3
0
static clib_error_t *
sendmsg_helper (mc_socket_main_t * msm,
		int socket,
		struct sockaddr_in * tx_addr,
		u32 buffer_index)
{
  vlib_main_t * vm = msm->mc_main.vlib_main;
  struct msghdr h;
  word n_bytes, n_bytes_tx, n_retries;

  memset (&h, 0, sizeof (h));
  h.msg_name = tx_addr;
  h.msg_namelen = sizeof (tx_addr[0]);

  if (msm->iovecs)
    _vec_len (msm->iovecs) = 0;

  n_bytes = append_buffer_index_to_iovec (vm, buffer_index, &msm->iovecs);
  ASSERT (n_bytes <= msm->mc_main.transport.max_packet_size);
  if (n_bytes > msm->mc_main.transport.max_packet_size)
    clib_error ("sending packet larger than interace MTU %d bytes", n_bytes);

  h.msg_iov = msm->iovecs;
  h.msg_iovlen = vec_len (msm->iovecs);

  n_retries = 0;
  while ((n_bytes_tx = sendmsg (socket, &h, /* flags */ 0)) != n_bytes
         && errno == EAGAIN)
    n_retries++;
  if (n_bytes_tx != n_bytes)
    {
      clib_unix_warning ("sendmsg");
      return 0;
    }
  if (n_retries)
    {
      ELOG_TYPE_DECLARE (e) = {
        .format = "sendmsg-helper: %d retries",
        .format_args = "i4",
      };
      struct { u32 retries; } * ed = 0;

      ed = ELOG_DATA (&vm->elog_main, e);
      ed->retries = n_retries;
    }
  return 0;
}

static clib_error_t *
tx_buffer (void * transport, mc_transport_type_t type, u32 buffer_index)
{
  mc_socket_main_t *msm = (mc_socket_main_t *)transport;
  vlib_main_t * vm = msm->mc_main.vlib_main;
  mc_multicast_socket_t * ms = &msm->multicast_sockets[type];
  clib_error_t * error;
  error = sendmsg_helper (msm, ms->socket, &ms->tx_addr, buffer_index);
  if (type != MC_TRANSPORT_USER_REQUEST_TO_RELAY)
    vlib_buffer_free_one (vm, buffer_index);
  return error;
}
Exemplo n.º 4
0
static vnet_config_t *
find_config_with_features (vlib_main_t * vm,
			   vnet_config_main_t * cm,
			   vnet_config_feature_t * feature_vector)
{
  u32 last_node_index = ~0;
  vnet_config_feature_t * f;
  u32 * config_string;
  uword * p;
  vnet_config_t * c;

  config_string = cm->config_string_temp;
  cm->config_string_temp = 0;
  if (config_string)
    _vec_len (config_string) = 0;

  vec_foreach (f, feature_vector)
    {
      /* Connect node graph. */
      f->next_index = add_next (vm, cm, last_node_index, f->node_index);
      last_node_index = f->node_index;

      /* Store next index in config string. */
      vec_add1 (config_string, f->next_index);

      /* Store feature config. */
      vec_add (config_string, f->feature_config, vec_len (f->feature_config));
    }
Exemplo n.º 5
0
/*
 * cdp periodic function 
 */
static uword
cdp_process (vlib_main_t * vm,
                    vlib_node_runtime_t * rt,
                    vlib_frame_t * f)
{
    cdp_main_t * cm = &cdp_main;
    f64 poll_time_remaining;
    uword event_type, * event_data = 0;

    /* So we can send events to the cdp process */
    cm->cdp_process_node_index = cdp_process_node.index;

    /* Dynamically register the cdp input node with the snap classifier */
    snap_register_input_protocol (vm, "cdp-input", 
                                  0xC /* ieee_oui, Cisco */,
                                  0x2000 /* protocol CDP */,
                                  cdp_input_node.index);

    snap_register_input_protocol (vm, "cdp-input", 
                                  0xC /* ieee_oui, Cisco */,
                                  0x2004 /* protocol CDP */,
                                  cdp_input_node.index);

#if 0 /* retain for reference */
    /* with the hdlc classifier */
    hdlc_register_input_protocol (vm, HDLC_PROTOCOL_cdp,
                                  cdp_input_node.index);
#endif

    /* with ethernet input (for SRP) */
    ethernet_register_input_type (vm, ETHERNET_TYPE_CDP /* CDP */,
				  cdp_input_node.index);

    poll_time_remaining = 10.0 /* seconds */;
    while (1) {
        /* sleep until next poll time, or msg serialize event occurs */
        poll_time_remaining = 
            vlib_process_wait_for_event_or_clock (vm, poll_time_remaining);
        
        event_type = vlib_process_get_events (vm, &event_data);
        switch (event_type) {
        case ~0:                /* no events => timeout */
            break;

        default:
            clib_warning ("BUG: event type 0x%wx", event_type);
            break;
        }
        if (event_data)
            _vec_len (event_data) = 0;

        /* peer timeout scan, send announcements */
        if (vlib_process_suspend_time_is_zero (poll_time_remaining)) {
            cdp_periodic (vm);
            poll_time_remaining = 10.0;
        }
    }

    return 0;
}
Exemplo n.º 6
0
Arquivo: heap.c Projeto: saumzy/clib
static void elt_delete (heap_header_t * h, heap_elt_t * e)
{
  heap_elt_t * l = vec_end (h->elts) - 1;

  ASSERT (e >= h->elts && e <= l);

  /* Update doubly linked pointers. */
  {
    heap_elt_t * p = heap_prev (e);
    heap_elt_t * n = heap_next (e);

    if (p == e)
      {
	n->prev = 0;
	h->head = n - h->elts;
      }
    else if (n == e)
      {
	p->next = 0;
	h->tail = p - h->elts;
      }
    else
      {
	p->next = n - p;
	n->prev = p - n;
      }
  }

  /* Add to index free list or delete from end. */
  if (e < l)
    vec_add1 (h->free_elts, e - h->elts);
  else
    _vec_len (h->elts)--;
}
Exemplo n.º 7
0
void
init_trajectory_trace (vlib_buffer_t * b)
{
  if (!clib_mem_is_vec (vnet_buffer2 (b)->trajectory_trace))
    {
      vnet_buffer2 (b)->trajectory_trace = 0;
      vec_validate (vnet_buffer2 (b)->trajectory_trace, 7);
    }
  _vec_len (vnet_buffer2 (b)->trajectory_trace) = 0;
}
Exemplo n.º 8
0
static clib_error_t * catchup_socket_read_ready (unix_file_t * uf, int is_server)
{
  unix_main_t * um = &unix_main;
  mc_socket_main_t *msm = (mc_socket_main_t *)uf->private_data;
  mc_main_t *mcm = &msm->mc_main;
  mc_socket_catchup_t * c = find_catchup_from_file_descriptor (msm, uf->file_descriptor);
  word l, n, is_eof;

  l = vec_len (c->input_vector);
  vec_resize (c->input_vector, 4096);
  n = read (uf->file_descriptor, c->input_vector + l, vec_len (c->input_vector) - l);
  is_eof = n == 0;

  if (n < 0)
    {
      if (errno == EAGAIN)
	n = 0;
      else
	{
	  catchup_cleanup (msm, c, um, uf);
	  return clib_error_return_unix (0, "read");
	}
    }

  _vec_len (c->input_vector) = l + n;

  if (is_eof && vec_len (c->input_vector) > 0)
    {
      if (is_server)
	{
	  mc_msg_catchup_request_handler (mcm, (void *) c->input_vector, c - msm->catchups);
	  _vec_len (c->input_vector) = 0;
	}
      else
	{
	  mc_msg_catchup_reply_handler (mcm, (void *) c->input_vector, c - msm->catchups);
	  c->input_vector = 0;	/* reply handler is responsible for freeing vector */
	  catchup_cleanup (msm, c, um, uf);
	}
    }

  return 0 /* no error */;
}
Exemplo n.º 9
0
Arquivo: timer.c Projeto: chrisy/vpp
/* Arrange for function to be called some time,
   roughly equal to dt seconds, in the future. */
void
timer_call (timer_func_t * func, any arg, f64 dt)
{
  timer_callback_t *t;
  sigset_t save;

  /* Install signal handler on first call. */
  static word signal_installed = 0;

  if (!signal_installed)
    {
      struct sigaction sa;

      /* Initialize time_resolution before first call to timer_interrupt */
      time_resolution = 0.75 / (f64) HZ;

      clib_memset (&sa, 0, sizeof (sa));
      sa.sa_handler = timer_interrupt;

      if (sigaction (TIMER_SIGNAL, &sa, 0) < 0)
	clib_panic ("sigaction");

      signal_installed = 1;
    }

  timer_block (&save);

  /* Add new timer. */
  vec_add2 (timers, t, 1);

  t->time = unix_time_now () + dt;
  t->func = func;
  t->arg = arg;

  {
    word reset_timer = vec_len (timers) == 1;

    if (_vec_len (timers) > 1)
      {
	reset_timer += t->time < (t - 1)->time;
	sort_timers (timers);
      }

    if (reset_timer)
      timer_interrupt (TIMER_SIGNAL);
  }

  timer_unblock (&save);
}
Exemplo n.º 10
0
Arquivo: timer.c Projeto: chrisy/vpp
/* Interrupt handler.  Call functions for all expired timers.
   Set time for next timer interrupt. */
static void
timer_interrupt (int signum)
{
  f64 now = unix_time_now ();
  f64 dt;
  timer_callback_t *t;

  while (1)
    {
      if (vec_len (timers) <= 0)
	return;

      /* Consider last (earliest) timer in reverse sorted
         vector of pending timers. */
      t = vec_end (timers) - 1;

      ASSERT (now >= 0 && isfinite (now));

      /* Time difference between when timer goes off and now. */
      dt = t->time - now;

      /* If timer is within threshold of going off
         call user's callback. */
      if (dt <= time_resolution && isfinite (dt))
	{
	  _vec_len (timers) -= 1;
	  (*t->func) (t->arg, -dt);
	}
      else
	{
	  /* Set timer for to go off in future. */
	  struct itimerval itv;
	  clib_memset (&itv, 0, sizeof (itv));
	  f64_to_tv (dt, &itv.it_value);
	  if (setitimer (ITIMER_REAL, &itv, 0) < 0)
	    clib_unix_error ("sititmer");
	  return;
	}
    }
}
Exemplo n.º 11
0
Arquivo: util.c Projeto: Venkattk/vpp
clib_error_t *
vlib_sysfs_read (char *file_name, char *fmt, ...)
{
  unformat_input_t input;
  u8 *s = 0;
  int fd;
  ssize_t sz;
  uword result;

  fd = open (file_name, O_RDONLY);
  if (fd < 0)
    return clib_error_return_unix (0, "open `%s'", file_name);

  vec_validate (s, 4095);

  sz = read (fd, s, vec_len (s));
  if (sz < 0)
    {
      close (fd);
      vec_free (s);
      return clib_error_return_unix (0, "read `%s'", file_name);
    }

  _vec_len (s) = sz;
  unformat_init_vector (&input, s);

  va_list va;
  va_start (va, fmt);
  result = va_unformat (&input, fmt, &va);
  va_end (va);

  vec_free (s);
  close (fd);

  if (result == 0)
    return clib_error_return (0, "unformat error");

  return 0;
}
Exemplo n.º 12
0
clib_error_t *
unix_proc_file_contents (char *file, u8 ** result)
{
  u8 *rv = 0;
  uword pos;
  int bytes, fd;

  /* Unfortunately, stat(/proc/XXX) returns zero... */
  fd = open (file, O_RDONLY);

  if (fd < 0)
    return clib_error_return_unix (0, "open `%s'", file);

  vec_validate (rv, 4095);
  pos = 0;
  while (1)
    {
      bytes = read (fd, rv + pos, 4096);
      if (bytes < 0)
	{
	  close (fd);
	  vec_free (rv);
	  return clib_error_return_unix (0, "read '%s'", file);
	}

      if (bytes == 0)
	{
	  _vec_len (rv) = pos;
	  break;
	}
      pos += bytes;
      vec_validate (rv, pos + 4095);
    }
  *result = rv;
  close (fd);
  return 0;
}
Exemplo n.º 13
0
int
vat_load_new_plugins (plugin_main_t * pm)
{
  DIR *dp;
  struct dirent *entry;
  struct stat statb;
  uword *p;
  plugin_info_t *pi;
  u8 **plugin_path;
  int i;

  plugin_path = split_plugin_path (pm);

  for (i = 0; i < vec_len (plugin_path); i++)
    {
      dp = opendir ((char *) plugin_path[i]);

      if (dp == 0)
	continue;

      while ((entry = readdir (dp)))
	{
	  u8 *plugin_name;
	  u8 *file_name;

	  if (pm->plugin_name_filter)
	    {
	      int j;
	      for (j = 0; j < vec_len (pm->plugin_name_filter); j++)
		if (entry->d_name[j] != pm->plugin_name_filter[j])
		  goto next;
	    }

	  file_name = format (0, "%s/%s%c", plugin_path[i], entry->d_name, 0);
	  plugin_name = format (0, "%s%c", entry->d_name, 0);

	  /* unreadable */
	  if (stat ((char *) file_name, &statb) < 0)
	    {
	    ignore:
	      vec_free (file_name);
	      vec_free (plugin_name);
	      continue;
	    }

	  /* a dir or other things which aren't plugins */
	  if (!S_ISREG (statb.st_mode))
	    goto ignore;

	  p = hash_get_mem (pm->plugin_by_name_hash, plugin_name);
	  if (p == 0)
	    {
	      vec_add2 (pm->plugin_info, pi, 1);
	      pi->name = plugin_name;
	      pi->filename = file_name;
	      pi->file_info = statb;

	      if (load_one_vat_plugin (pm, pi))
		{
		  vec_free (file_name);
		  vec_free (plugin_name);
		  _vec_len (pm->plugin_info) = vec_len (pm->plugin_info) - 1;
		  continue;
		}
	      clib_memset (pi, 0, sizeof (*pi));
	      hash_set_mem (pm->plugin_by_name_hash, plugin_name,
			    pi - pm->plugin_info);
	    }
	next:
	  ;
	}
      closedir (dp);
      vec_free (plugin_path[i]);
    }
  vec_free (plugin_path);
  return 0;
}
Exemplo n.º 14
0
Arquivo: encap.c Projeto: chrisy/vpp
always_inline uword
geneve_encap_inline (vlib_main_t * vm,
		     vlib_node_runtime_t * node,
		     vlib_frame_t * from_frame, u32 is_ip4)
{
  u32 n_left_from, next_index, *from, *to_next;
  geneve_main_t *vxm = &geneve_main;
  vnet_main_t *vnm = vxm->vnet_main;
  vnet_interface_main_t *im = &vnm->interface_main;
  u32 pkts_encapsulated = 0;
  u16 old_l0 = 0, old_l1 = 0;
  u32 thread_index = vm->thread_index;
  u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
  u32 sw_if_index0 = ~0, sw_if_index1 = ~0;
  u32 next0 = 0, next1 = 0;
  vnet_hw_interface_t *hi0, *hi1;
  geneve_tunnel_t *t0 = NULL, *t1 = NULL;

  from = vlib_frame_vector_args (from_frame);
  n_left_from = from_frame->n_vectors;

  next_index = node->cached_next_index;
  stats_sw_if_index = node->runtime_data[0];
  stats_n_packets = stats_n_bytes = 0;

  while (n_left_from > 0)
    {
      u32 n_left_to_next;

      vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);

      while (n_left_from >= 4 && n_left_to_next >= 2)
	{
	  u32 bi0, bi1;
	  vlib_buffer_t *b0, *b1;
	  u32 flow_hash0, flow_hash1;
	  u32 len0, len1;
	  ip4_header_t *ip4_0, *ip4_1;
	  ip6_header_t *ip6_0, *ip6_1;
	  udp_header_t *udp0, *udp1;
	  u64 *copy_src0, *copy_dst0;
	  u64 *copy_src1, *copy_dst1;
	  u32 *copy_src_last0, *copy_dst_last0;
	  u32 *copy_src_last1, *copy_dst_last1;
	  u16 new_l0, new_l1;
	  ip_csum_t sum0, sum1;

	  /* Prefetch next iteration. */
	  {
	    vlib_buffer_t *p2, *p3;

	    p2 = vlib_get_buffer (vm, from[2]);
	    p3 = vlib_get_buffer (vm, from[3]);

	    vlib_prefetch_buffer_header (p2, LOAD);
	    vlib_prefetch_buffer_header (p3, LOAD);

	    CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
	    CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
	  }

	  bi0 = from[0];
	  bi1 = from[1];
	  to_next[0] = bi0;
	  to_next[1] = bi1;
	  from += 2;
	  to_next += 2;
	  n_left_to_next -= 2;
	  n_left_from -= 2;

	  b0 = vlib_get_buffer (vm, bi0);
	  b1 = vlib_get_buffer (vm, bi1);

	  flow_hash0 = vnet_l2_compute_flow_hash (b0);
	  flow_hash1 = vnet_l2_compute_flow_hash (b1);

	  /* Get next node index and adj index from tunnel next_dpo */
	  if (sw_if_index0 != vnet_buffer (b0)->sw_if_index[VLIB_TX])
	    {
	      sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
	      hi0 = vnet_get_sup_hw_interface (vnm, sw_if_index0);
	      t0 = &vxm->tunnels[hi0->dev_instance];
	      /* Note: change to always set next0 if it may be set to drop */
	      next0 = t0->next_dpo.dpoi_next_node;
	    }

	  ASSERT (t0 != NULL);

	  vnet_buffer (b0)->ip.adj_index[VLIB_TX] = t0->next_dpo.dpoi_index;

	  /* Get next node index and adj index from tunnel next_dpo */
	  if (sw_if_index1 != vnet_buffer (b1)->sw_if_index[VLIB_TX])
	    {
	      sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_TX];
	      hi1 = vnet_get_sup_hw_interface (vnm, sw_if_index1);
	      t1 = &vxm->tunnels[hi1->dev_instance];
	      /* Note: change to always set next1 if it may be set to drop */
	      next1 = t1->next_dpo.dpoi_next_node;
	    }

	  ASSERT (t1 != NULL);

	  vnet_buffer (b1)->ip.adj_index[VLIB_TX] = t1->next_dpo.dpoi_index;

	  /* Apply the rewrite string. $$$$ vnet_rewrite? */
	  vlib_buffer_advance (b0, -(word) _vec_len (t0->rewrite));
	  vlib_buffer_advance (b1, -(word) _vec_len (t1->rewrite));

	  if (is_ip4)
	    {
	      u8 ip4_geneve_base_header_len =
		sizeof (ip4_header_t) + sizeof (udp_header_t) +
		GENEVE_BASE_HEADER_LENGTH;
	      u8 ip4_geneve_header_total_len0 = ip4_geneve_base_header_len;
	      u8 ip4_geneve_header_total_len1 = ip4_geneve_base_header_len;
#if SUPPORT_OPTIONS_HEADER==1
	      ip4_geneve_header_total_len0 += t0->options_len;
	      ip4_geneve_header_total_len1 += t1->options_len;
#endif
	      ASSERT (vec_len (t0->rewrite) == ip4_geneve_header_total_len0);
	      ASSERT (vec_len (t1->rewrite) == ip4_geneve_header_total_len1);

	      ip4_0 = vlib_buffer_get_current (b0);
	      ip4_1 = vlib_buffer_get_current (b1);

	      /* Copy the fixed header */
	      copy_dst0 = (u64 *) ip4_0;
	      copy_src0 = (u64 *) t0->rewrite;
	      copy_dst1 = (u64 *) ip4_1;
	      copy_src1 = (u64 *) t1->rewrite;
	      /* Copy first 32 octets 8-bytes at a time */
#define _(offs) copy_dst0[offs] = copy_src0[offs];
	      foreach_fixed_header4_offset;
#undef _
#define _(offs) copy_dst1[offs] = copy_src1[offs];
	      foreach_fixed_header4_offset;
#undef _
	      /* Last 4 octets. Hopefully gcc will be our friend */
	      copy_dst_last0 = (u32 *) (&copy_dst0[4]);
	      copy_src_last0 = (u32 *) (&copy_src0[4]);
	      copy_dst_last0[0] = copy_src_last0[0];
	      copy_dst_last1 = (u32 *) (&copy_dst1[4]);
	      copy_src_last1 = (u32 *) (&copy_src1[4]);
	      copy_dst_last1[0] = copy_src_last1[0];

	      /* Fix the IP4 checksum and length */
	      sum0 = ip4_0->checksum;
	      new_l0 =		/* old_l0 always 0, see the rewrite setup */
		clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
	      sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
				     length /* changed member */ );
	      ip4_0->checksum = ip_csum_fold (sum0);
	      ip4_0->length = new_l0;
	      sum1 = ip4_1->checksum;
	      new_l1 =		/* old_l1 always 0, see the rewrite setup */
		clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1));
	      sum1 = ip_csum_update (sum1, old_l1, new_l1, ip4_header_t,
				     length /* changed member */ );
	      ip4_1->checksum = ip_csum_fold (sum1);
	      ip4_1->length = new_l1;

	      /* Fix UDP length and set source port */
	      udp0 = (udp_header_t *) (ip4_0 + 1);
	      new_l0 =
		clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0) -
				      sizeof (*ip4_0));
	      udp0->length = new_l0;
	      udp0->src_port = flow_hash0;
	      udp1 = (udp_header_t *) (ip4_1 + 1);
	      new_l1 =
		clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1) -
				      sizeof (*ip4_1));
	      udp1->length = new_l1;
	      udp1->src_port = flow_hash1;
	    }
	  else			/* ipv6 */
	    {
	      int bogus = 0;

	      u8 ip6_geneve_base_header_len =
		sizeof (ip6_header_t) + sizeof (udp_header_t) +
		GENEVE_BASE_HEADER_LENGTH;
	      u8 ip6_geneve_header_total_len0 = ip6_geneve_base_header_len;
	      u8 ip6_geneve_header_total_len1 = ip6_geneve_base_header_len;
#if SUPPORT_OPTIONS_HEADER==1
	      ip6_geneve_header_total_len0 += t0->options_len;
	      ip6_geneve_header_total_len1 += t1->options_len;
#endif
	      ASSERT (vec_len (t0->rewrite) == ip6_geneve_header_total_len0);
	      ASSERT (vec_len (t1->rewrite) == ip6_geneve_header_total_len1);

	      ip6_0 = vlib_buffer_get_current (b0);
	      ip6_1 = vlib_buffer_get_current (b1);

	      /* Copy the fixed header */
	      copy_dst0 = (u64 *) ip6_0;
	      copy_src0 = (u64 *) t0->rewrite;
	      copy_dst1 = (u64 *) ip6_1;
	      copy_src1 = (u64 *) t1->rewrite;
	      /* Copy first 56 (ip6) octets 8-bytes at a time */
#define _(offs) copy_dst0[offs] = copy_src0[offs];
	      foreach_fixed_header6_offset;
#undef _
#define _(offs) copy_dst1[offs] = copy_src1[offs];
	      foreach_fixed_header6_offset;
#undef _
	      /* Fix IP6 payload length */
	      new_l0 =
		clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)
				      - sizeof (*ip6_0));
	      ip6_0->payload_length = new_l0;
	      new_l1 =
		clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1)
				      - sizeof (*ip6_1));
	      ip6_1->payload_length = new_l1;

	      /* Fix UDP length  and set source port */
	      udp0 = (udp_header_t *) (ip6_0 + 1);
	      udp0->length = new_l0;
	      udp0->src_port = flow_hash0;
	      udp1 = (udp_header_t *) (ip6_1 + 1);
	      udp1->length = new_l1;
	      udp1->src_port = flow_hash1;

	      /* IPv6 UDP checksum is mandatory */
	      udp0->checksum = ip6_tcp_udp_icmp_compute_checksum (vm, b0,
								  ip6_0,
								  &bogus);
	      ASSERT (bogus == 0);
	      if (udp0->checksum == 0)
		udp0->checksum = 0xffff;
	      udp1->checksum = ip6_tcp_udp_icmp_compute_checksum (vm, b1,
								  ip6_1,
								  &bogus);
	      ASSERT (bogus == 0);
	      if (udp1->checksum == 0)
		udp1->checksum = 0xffff;
	    }

	  pkts_encapsulated += 2;
	  len0 = vlib_buffer_length_in_chain (vm, b0);
	  len1 = vlib_buffer_length_in_chain (vm, b1);
	  stats_n_packets += 2;
	  stats_n_bytes += len0 + len1;

	  /* Batch stats increment on the same geneve tunnel so counter is not
	     incremented per packet. Note stats are still incremented for deleted
	     and admin-down tunnel where packets are dropped. It is not worthwhile
	     to check for this rare case and affect normal path performance. */
	  if (PREDICT_FALSE ((sw_if_index0 != stats_sw_if_index) ||
			     (sw_if_index1 != stats_sw_if_index)))
	    {
	      stats_n_packets -= 2;
	      stats_n_bytes -= len0 + len1;
	      if (sw_if_index0 == sw_if_index1)
		{
		  if (stats_n_packets)
		    vlib_increment_combined_counter
		      (im->combined_sw_if_counters +
		       VNET_INTERFACE_COUNTER_TX, thread_index,
		       stats_sw_if_index, stats_n_packets, stats_n_bytes);
		  stats_sw_if_index = sw_if_index0;
		  stats_n_packets = 2;
		  stats_n_bytes = len0 + len1;
		}
	      else
		{
		  vlib_increment_combined_counter
		    (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
		     thread_index, sw_if_index0, 1, len0);
		  vlib_increment_combined_counter
		    (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
		     thread_index, sw_if_index1, 1, len1);
		}
	    }

	  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
	    {
	      geneve_encap_trace_t *tr =
		vlib_add_trace (vm, node, b0, sizeof (*tr));
	      tr->tunnel_index = t0 - vxm->tunnels;
	      tr->vni = t0->vni;
	    }

	  if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
	    {
	      geneve_encap_trace_t *tr =
		vlib_add_trace (vm, node, b1, sizeof (*tr));
	      tr->tunnel_index = t1 - vxm->tunnels;
	      tr->vni = t1->vni;
	    }

	  vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
					   to_next, n_left_to_next,
					   bi0, bi1, next0, next1);
	}

      while (n_left_from > 0 && n_left_to_next > 0)
	{
	  u32 bi0;
	  vlib_buffer_t *b0;
	  u32 flow_hash0;
	  u32 len0;
	  ip4_header_t *ip4_0;
	  ip6_header_t *ip6_0;
	  udp_header_t *udp0;
	  u64 *copy_src0, *copy_dst0;
	  u32 *copy_src_last0, *copy_dst_last0;
	  u16 new_l0;
	  ip_csum_t sum0;

	  bi0 = from[0];
	  to_next[0] = bi0;
	  from += 1;
	  to_next += 1;
	  n_left_from -= 1;
	  n_left_to_next -= 1;

	  b0 = vlib_get_buffer (vm, bi0);

	  flow_hash0 = vnet_l2_compute_flow_hash (b0);

	  /* Get next node index and adj index from tunnel next_dpo */
	  if (sw_if_index0 != vnet_buffer (b0)->sw_if_index[VLIB_TX])
	    {
	      sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
	      hi0 = vnet_get_sup_hw_interface (vnm, sw_if_index0);
	      t0 = &vxm->tunnels[hi0->dev_instance];
	      /* Note: change to always set next0 if it may be set to drop */
	      next0 = t0->next_dpo.dpoi_next_node;
	    }
	  vnet_buffer (b0)->ip.adj_index[VLIB_TX] = t0->next_dpo.dpoi_index;

	  /* Apply the rewrite string. $$$$ vnet_rewrite? */
	  vlib_buffer_advance (b0, -(word) _vec_len (t0->rewrite));

	  if (is_ip4)
	    {
	      u8 ip4_geneve_base_header_len =
		sizeof (ip4_header_t) + sizeof (udp_header_t) +
		GENEVE_BASE_HEADER_LENGTH;
	      u8 ip4_geneve_header_total_len0 = ip4_geneve_base_header_len;
#if SUPPORT_OPTIONS_HEADER==1
	      ip4_geneve_header_total_len0 += t0->options_len;
#endif
	      ASSERT (vec_len (t0->rewrite) == ip4_geneve_header_total_len0);

	      ip4_0 = vlib_buffer_get_current (b0);

	      /* Copy the fixed header */
	      copy_dst0 = (u64 *) ip4_0;
	      copy_src0 = (u64 *) t0->rewrite;
	      /* Copy first 32 octets 8-bytes at a time */
#define _(offs) copy_dst0[offs] = copy_src0[offs];
	      foreach_fixed_header4_offset;
#undef _
	      /* Last 4 octets. Hopefully gcc will be our friend */
	      copy_dst_last0 = (u32 *) (&copy_dst0[4]);
	      copy_src_last0 = (u32 *) (&copy_src0[4]);
	      copy_dst_last0[0] = copy_src_last0[0];

	      /* Fix the IP4 checksum and length */
	      sum0 = ip4_0->checksum;
	      new_l0 =		/* old_l0 always 0, see the rewrite setup */
		clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
	      sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
				     length /* changed member */ );
	      ip4_0->checksum = ip_csum_fold (sum0);
	      ip4_0->length = new_l0;

	      /* Fix UDP length and set source port */
	      udp0 = (udp_header_t *) (ip4_0 + 1);
	      new_l0 =
		clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0) -
				      sizeof (*ip4_0));
	      udp0->length = new_l0;
	      udp0->src_port = flow_hash0;
	    }

	  else			/* ip6 path */
	    {
	      int bogus = 0;

	      u8 ip6_geneve_base_header_len =
		sizeof (ip6_header_t) + sizeof (udp_header_t) +
		GENEVE_BASE_HEADER_LENGTH;
	      u8 ip6_geneve_header_total_len0 = ip6_geneve_base_header_len;
#if SUPPORT_OPTIONS_HEADER==1
	      ip6_geneve_header_total_len0 += t0->options_len;
#endif
	      ASSERT (vec_len (t0->rewrite) == ip6_geneve_header_total_len0);

	      ip6_0 = vlib_buffer_get_current (b0);
	      /* Copy the fixed header */
	      copy_dst0 = (u64 *) ip6_0;
	      copy_src0 = (u64 *) t0->rewrite;
	      /* Copy first 56 (ip6) octets 8-bytes at a time */
#define _(offs) copy_dst0[offs] = copy_src0[offs];
	      foreach_fixed_header6_offset;
#undef _
	      /* Fix IP6 payload length */
	      new_l0 =
		clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)
				      - sizeof (*ip6_0));
	      ip6_0->payload_length = new_l0;

	      /* Fix UDP length  and set source port */
	      udp0 = (udp_header_t *) (ip6_0 + 1);
	      udp0->length = new_l0;
	      udp0->src_port = flow_hash0;

	      /* IPv6 UDP checksum is mandatory */
	      udp0->checksum = ip6_tcp_udp_icmp_compute_checksum (vm, b0,
								  ip6_0,
								  &bogus);
	      ASSERT (bogus == 0);
	      if (udp0->checksum == 0)
		udp0->checksum = 0xffff;
	    }

	  pkts_encapsulated++;
	  len0 = vlib_buffer_length_in_chain (vm, b0);
	  stats_n_packets += 1;
	  stats_n_bytes += len0;

	  /* Batch stats increment on the same geneve tunnel so counter is not
	     incremented per packet. Note stats are still incremented for deleted
	     and admin-down tunnel where packets are dropped. It is not worthwhile
	     to check for this rare case and affect normal path performance. */
	  if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
	    {
	      stats_n_packets -= 1;
	      stats_n_bytes -= len0;
	      if (stats_n_packets)
		vlib_increment_combined_counter
		  (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
		   thread_index, stats_sw_if_index,
		   stats_n_packets, stats_n_bytes);
	      stats_n_packets = 1;
	      stats_n_bytes = len0;
	      stats_sw_if_index = sw_if_index0;
	    }

	  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
	    {
	      geneve_encap_trace_t *tr =
		vlib_add_trace (vm, node, b0, sizeof (*tr));
	      tr->tunnel_index = t0 - vxm->tunnels;
	      tr->vni = t0->vni;
	    }
	  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
					   to_next, n_left_to_next,
					   bi0, next0);
	}

      vlib_put_next_frame (vm, node, next_index, n_left_to_next);
    }

  /* Do we still need this now that tunnel tx stats is kept? */
  vlib_node_increment_counter (vm, node->node_index,
			       GENEVE_ENCAP_ERROR_ENCAPSULATED,
			       pkts_encapsulated);

  /* Increment any remaining batch stats */
  if (stats_n_packets)
    {
      vlib_increment_combined_counter
	(im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
	 thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
      node->runtime_data[0] = stats_sw_if_index;
    }

  return from_frame->n_vectors;
}
Exemplo n.º 15
0
static uword
startup_config_process (vlib_main_t * vm,
			vlib_node_runtime_t * rt, vlib_frame_t * f)
{
  unix_main_t *um = &unix_main;
  u8 *buf = 0;
  uword l, n = 1;

  vlib_process_suspend (vm, 2.0);

  while (um->unix_config_complete == 0)
    vlib_process_suspend (vm, 0.1);

  if (um->startup_config_filename)
    {
      unformat_input_t sub_input;
      int fd;
      struct stat s;
      char *fn = (char *) um->startup_config_filename;

      fd = open (fn, O_RDONLY);
      if (fd < 0)
	{
	  clib_warning ("failed to open `%s'", fn);
	  return 0;
	}

      if (fstat (fd, &s) < 0)
	{
	  clib_warning ("failed to stat `%s'", fn);
	bail:
	  close (fd);
	  return 0;
	}

      if (!(S_ISREG (s.st_mode) || S_ISLNK (s.st_mode)))
	{
	  clib_warning ("not a regular file: `%s'", fn);
	  goto bail;
	}

      while (n > 0)
	{
	  l = vec_len (buf);
	  vec_resize (buf, 4096);
	  n = read (fd, buf + l, 4096);
	  if (n > 0)
	    {
	      _vec_len (buf) = l + n;
	      if (n < 4096)
		break;
	    }
	  else
	    break;
	}
      if (um->log_fd && vec_len (buf))
	{
	  u8 *lv = 0;
	  lv = format (lv, "%U: ***** Startup Config *****\n%v",
		       format_timeval, 0 /* current bat-time */ ,
		       0 /* current bat-format */ ,
		       buf);
	  {
	    int rv __attribute__ ((unused)) =
	      write (um->log_fd, lv, vec_len (lv));
	  }
	  vec_reset_length (lv);
	  lv = format (lv, "%U: ***** End Startup Config *****\n",
		       format_timeval, 0 /* current bat-time */ ,
		       0 /* current bat-format */ );
	  {
	    int rv __attribute__ ((unused)) =
	      write (um->log_fd, lv, vec_len (lv));
	  }
	  vec_free (lv);
	}

      if (vec_len (buf))
	{
	  unformat_init_vector (&sub_input, buf);
	  vlib_cli_input (vm, &sub_input, 0, 0);
	  /* frees buf for us */
	  unformat_free (&sub_input);
	}
      close (fd);
    }
  return 0;
}
Exemplo n.º 16
0
always_inline uword
gtpu_encap_inline (vlib_main_t * vm,
		    vlib_node_runtime_t * node,
		    vlib_frame_t * from_frame,
		    u32 is_ip4)
{
  u32 n_left_from, next_index, * from, * to_next;
  gtpu_main_t * gtm = &gtpu_main;
  vnet_main_t * vnm = gtm->vnet_main;
  vnet_interface_main_t * im = &vnm->interface_main;
  u32 pkts_encapsulated = 0;
  u16 old_l0 = 0, old_l1 = 0, old_l2 = 0, old_l3 = 0;
  u32 thread_index = vlib_get_thread_index();
  u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
  u32 sw_if_index0 = 0, sw_if_index1 = 0, sw_if_index2 = 0, sw_if_index3 = 0;
  u32 next0 = 0, next1 = 0, next2 = 0, next3 = 0;
  vnet_hw_interface_t * hi0, * hi1, * hi2, * hi3;
  gtpu_tunnel_t * t0 = NULL, * t1 = NULL, * t2 = NULL, * t3 = NULL;

  from = vlib_frame_vector_args (from_frame);
  n_left_from = from_frame->n_vectors;

  next_index = node->cached_next_index;
  stats_sw_if_index = node->runtime_data[0];
  stats_n_packets = stats_n_bytes = 0;

  while (n_left_from > 0)
    {
      u32 n_left_to_next;

      vlib_get_next_frame (vm, node, next_index,
			   to_next, n_left_to_next);

      while (n_left_from >= 8 && n_left_to_next >= 4)
	{
          u32 bi0, bi1, bi2, bi3;
	  vlib_buffer_t * b0, * b1, * b2, * b3;
          u32 flow_hash0, flow_hash1, flow_hash2, flow_hash3;
	  u32 len0, len1, len2, len3;
          ip4_header_t * ip4_0, * ip4_1, * ip4_2, * ip4_3;
          ip6_header_t * ip6_0, * ip6_1, * ip6_2, * ip6_3;
          udp_header_t * udp0, * udp1, * udp2, * udp3;
          gtpu_header_t * gtpu0, * gtpu1, * gtpu2, * gtpu3;
          u64 * copy_src0, * copy_dst0;
          u64 * copy_src1, * copy_dst1;
          u64 * copy_src2, * copy_dst2;
          u64 * copy_src3, * copy_dst3;
          u32 * copy_src_last0, * copy_dst_last0;
          u32 * copy_src_last1, * copy_dst_last1;
          u32 * copy_src_last2, * copy_dst_last2;
          u32 * copy_src_last3, * copy_dst_last3;
          u16 new_l0, new_l1, new_l2, new_l3;
          ip_csum_t sum0, sum1, sum2, sum3;

	  /* Prefetch next iteration. */
	  {
	    vlib_buffer_t * p4, * p5, * p6, * p7;

	    p4 = vlib_get_buffer (vm, from[4]);
	    p5 = vlib_get_buffer (vm, from[5]);
	    p6 = vlib_get_buffer (vm, from[6]);
	    p7 = vlib_get_buffer (vm, from[7]);

	    vlib_prefetch_buffer_header (p4, LOAD);
	    vlib_prefetch_buffer_header (p5, LOAD);
	    vlib_prefetch_buffer_header (p6, LOAD);
	    vlib_prefetch_buffer_header (p7, LOAD);

	    CLIB_PREFETCH (p4->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
	    CLIB_PREFETCH (p5->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
	    CLIB_PREFETCH (p6->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
	    CLIB_PREFETCH (p7->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
	  }

	  bi0 = from[0];
	  bi1 = from[1];
	  bi2 = from[2];
	  bi3 = from[3];
	  to_next[0] = bi0;
	  to_next[1] = bi1;
	  to_next[2] = bi2;
	  to_next[3] = bi3;
	  from += 4;
	  to_next += 4;
	  n_left_to_next -= 4;
	  n_left_from -= 4;

	  b0 = vlib_get_buffer (vm, bi0);
	  b1 = vlib_get_buffer (vm, bi1);
	  b2 = vlib_get_buffer (vm, bi2);
	  b3 = vlib_get_buffer (vm, bi3);

          flow_hash0 = vnet_l2_compute_flow_hash (b0);
          flow_hash1 = vnet_l2_compute_flow_hash (b1);
          flow_hash2 = vnet_l2_compute_flow_hash (b2);
          flow_hash3 = vnet_l2_compute_flow_hash (b3);

	  /* Get next node index and adj index from tunnel next_dpo */
	  sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX];
	  sw_if_index1 = vnet_buffer(b1)->sw_if_index[VLIB_TX];
	  sw_if_index2 = vnet_buffer(b2)->sw_if_index[VLIB_TX];
	  sw_if_index3 = vnet_buffer(b3)->sw_if_index[VLIB_TX];
	  hi0 = vnet_get_sup_hw_interface (vnm, sw_if_index0);
	  hi1 = vnet_get_sup_hw_interface (vnm, sw_if_index1);
	  hi2 = vnet_get_sup_hw_interface (vnm, sw_if_index2);
	  hi3 = vnet_get_sup_hw_interface (vnm, sw_if_index3);
	  t0 = &gtm->tunnels[hi0->dev_instance];
	  t1 = &gtm->tunnels[hi1->dev_instance];
	  t2 = &gtm->tunnels[hi2->dev_instance];
	  t3 = &gtm->tunnels[hi3->dev_instance];

	  /* Note: change to always set next0 if it may be set to drop */
	  next0 = t0->next_dpo.dpoi_next_node;
          vnet_buffer(b0)->ip.adj_index[VLIB_TX] = t0->next_dpo.dpoi_index;
	  next1 = t1->next_dpo.dpoi_next_node;
          vnet_buffer(b1)->ip.adj_index[VLIB_TX] = t1->next_dpo.dpoi_index;
	  next2 = t2->next_dpo.dpoi_next_node;
          vnet_buffer(b2)->ip.adj_index[VLIB_TX] = t2->next_dpo.dpoi_index;
	  next3 = t3->next_dpo.dpoi_next_node;
          vnet_buffer(b3)->ip.adj_index[VLIB_TX] = t3->next_dpo.dpoi_index;

          /* Apply the rewrite string. $$$$ vnet_rewrite? */
          vlib_buffer_advance (b0, -(word)_vec_len(t0->rewrite));
          vlib_buffer_advance (b1, -(word)_vec_len(t1->rewrite));
          vlib_buffer_advance (b2, -(word)_vec_len(t2->rewrite));
          vlib_buffer_advance (b3, -(word)_vec_len(t3->rewrite));

	  if (is_ip4)
	    {
	      ip4_0 = vlib_buffer_get_current(b0);
	      ip4_1 = vlib_buffer_get_current(b1);
	      ip4_2 = vlib_buffer_get_current(b2);
	      ip4_3 = vlib_buffer_get_current(b3);

	      /* Copy the fixed header */
	      copy_dst0 = (u64 *) ip4_0;
	      copy_src0 = (u64 *) t0->rewrite;
	      copy_dst1 = (u64 *) ip4_1;
	      copy_src1 = (u64 *) t1->rewrite;
	      copy_dst2 = (u64 *) ip4_2;
	      copy_src2 = (u64 *) t2->rewrite;
	      copy_dst3 = (u64 *) ip4_3;
	      copy_src3 = (u64 *) t3->rewrite;

	      /* Copy first 32 octets 8-bytes at a time */
#define _(offs) copy_dst0[offs] = copy_src0[offs];
	      foreach_fixed_header4_offset;
#undef _
#define _(offs) copy_dst1[offs] = copy_src1[offs];
	      foreach_fixed_header4_offset;
#undef _
#define _(offs) copy_dst2[offs] = copy_src2[offs];
	      foreach_fixed_header4_offset;
#undef _
#define _(offs) copy_dst3[offs] = copy_src3[offs];
	      foreach_fixed_header4_offset;
#undef _
	      /* Last 4 octets. Hopefully gcc will be our friend */
              copy_dst_last0 = (u32 *)(&copy_dst0[4]);
              copy_src_last0 = (u32 *)(&copy_src0[4]);
              copy_dst_last0[0] = copy_src_last0[0];
              copy_dst_last1 = (u32 *)(&copy_dst1[4]);
              copy_src_last1 = (u32 *)(&copy_src1[4]);
              copy_dst_last1[0] = copy_src_last1[0];
              copy_dst_last2 = (u32 *)(&copy_dst2[4]);
              copy_src_last2 = (u32 *)(&copy_src2[4]);
              copy_dst_last2[0] = copy_src_last2[0];
              copy_dst_last3 = (u32 *)(&copy_dst3[4]);
              copy_src_last3 = (u32 *)(&copy_src3[4]);
              copy_dst_last3[0] = copy_src_last3[0];

	      /* Fix the IP4 checksum and length */
	      sum0 = ip4_0->checksum;
	      new_l0 = /* old_l0 always 0, see the rewrite setup */
                clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
              sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
				     length /* changed member */);
	      ip4_0->checksum = ip_csum_fold (sum0);
	      ip4_0->length = new_l0;
	      sum1 = ip4_1->checksum;
	      new_l1 = /* old_l1 always 0, see the rewrite setup */
                clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1));
              sum1 = ip_csum_update (sum1, old_l1, new_l1, ip4_header_t,
				     length /* changed member */);
	      ip4_1->checksum = ip_csum_fold (sum1);
	      ip4_1->length = new_l1;
	      sum2 = ip4_2->checksum;
	      new_l2 = /* old_l0 always 0, see the rewrite setup */
                clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b2));
              sum2 = ip_csum_update (sum2, old_l2, new_l2, ip4_header_t,
				     length /* changed member */);
	      ip4_2->checksum = ip_csum_fold (sum2);
	      ip4_2->length = new_l2;
	      sum3 = ip4_3->checksum;
	      new_l3 = /* old_l1 always 0, see the rewrite setup */
                clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b3));
              sum3 = ip_csum_update (sum3, old_l3, new_l3, ip4_header_t,
				     length /* changed member */);
	      ip4_3->checksum = ip_csum_fold (sum3);
	      ip4_3->length = new_l3;

	      /* Fix UDP length and set source port */
	      udp0 = (udp_header_t *)(ip4_0+1);
	      new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b0)
					     - sizeof (*ip4_0));
	      udp0->length = new_l0;
	      udp0->src_port = flow_hash0;
	      udp1 = (udp_header_t *)(ip4_1+1);
	      new_l1 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b1)
					     - sizeof (*ip4_1));
	      udp1->length = new_l1;
	      udp1->src_port = flow_hash1;
	      udp2 = (udp_header_t *)(ip4_2+1);
	      new_l2 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b2)
					     - sizeof (*ip4_2));
	      udp2->length = new_l2;
	      udp2->src_port = flow_hash2;
	      udp3 = (udp_header_t *)(ip4_3+1);
	      new_l3 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b3)
					     - sizeof (*ip4_3));
	      udp3->length = new_l3;
	      udp3->src_port = flow_hash3;

	      /* Fix GTPU length */
	      gtpu0 = (gtpu_header_t *)(udp0+1);
	      new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b0)
					     - sizeof (*ip4_0) - sizeof(*udp0)
					     - GTPU_V1_HDR_LEN);
	      gtpu0->length = new_l0;
	      gtpu1 = (gtpu_header_t *)(udp1+1);
	      new_l1 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b1)
					     - sizeof (*ip4_1) - sizeof(*udp1)
					     - GTPU_V1_HDR_LEN);
	      gtpu1->length = new_l1;
	      gtpu2 = (gtpu_header_t *)(udp2+1);
	      new_l2 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b2)
					     - sizeof (*ip4_2) - sizeof(*udp2)
					     - GTPU_V1_HDR_LEN);
	      gtpu2->length = new_l2;
	      gtpu3 = (gtpu_header_t *)(udp3+1);
	      new_l3 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b3)
					     - sizeof (*ip4_3) - sizeof(*udp3)
					     - GTPU_V1_HDR_LEN);
	      gtpu3->length = new_l3;
	    }
	  else /* ipv6 */
	    {
              int bogus = 0;

	      ip6_0 = vlib_buffer_get_current(b0);
	      ip6_1 = vlib_buffer_get_current(b1);
	      ip6_2 = vlib_buffer_get_current(b2);
	      ip6_3 = vlib_buffer_get_current(b3);

	      /* Copy the fixed header */
	      copy_dst0 = (u64 *) ip6_0;
	      copy_src0 = (u64 *) t0->rewrite;
	      copy_dst1 = (u64 *) ip6_1;
	      copy_src1 = (u64 *) t1->rewrite;
	      copy_dst2 = (u64 *) ip6_2;
	      copy_src2 = (u64 *) t2->rewrite;
	      copy_dst3 = (u64 *) ip6_3;
	      copy_src3 = (u64 *) t3->rewrite;
	      /* Copy first 56 (ip6) octets 8-bytes at a time */
#define _(offs) copy_dst0[offs] = copy_src0[offs];
	      foreach_fixed_header6_offset;
#undef _
#define _(offs) copy_dst1[offs] = copy_src1[offs];
	      foreach_fixed_header6_offset;
#undef _
#define _(offs) copy_dst2[offs] = copy_src2[offs];
	      foreach_fixed_header6_offset;
#undef _
#define _(offs) copy_dst3[offs] = copy_src3[offs];
	      foreach_fixed_header6_offset;
#undef _
	      /* Fix IP6 payload length */
	      new_l0 =
                clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)
				      - sizeof(*ip6_0));
	      ip6_0->payload_length = new_l0;
	      new_l1 =
                clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1)
				      - sizeof(*ip6_1));
	      ip6_1->payload_length = new_l1;
	      new_l2 =
                clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b2)
				      - sizeof(*ip6_2));
	      ip6_2->payload_length = new_l2;
	      new_l3 =
                clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b3)
				      - sizeof(*ip6_3));
	      ip6_3->payload_length = new_l3;

	      /* Fix UDP length  and set source port */
	      udp0 = (udp_header_t *)(ip6_0+1);
	      udp0->length = new_l0;
	      udp0->src_port = flow_hash0;
	      udp1 = (udp_header_t *)(ip6_1+1);
	      udp1->length = new_l1;
	      udp1->src_port = flow_hash1;
	      udp2 = (udp_header_t *)(ip6_2+1);
	      udp2->length = new_l2;
	      udp2->src_port = flow_hash2;
	      udp3 = (udp_header_t *)(ip6_3+1);
	      udp3->length = new_l3;
	      udp3->src_port = flow_hash3;

	      /* Fix GTPU length */
	      gtpu0 = (gtpu_header_t *)(udp0+1);
	      new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b0)
					     - sizeof (*ip6_0) - sizeof(*udp0)
					     - GTPU_V1_HDR_LEN);
	      gtpu0->length = new_l0;
	      gtpu1 = (gtpu_header_t *)(udp1+1);
	      new_l1 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b1)
					     - sizeof (*ip6_1) - sizeof(*udp1)
					     - GTPU_V1_HDR_LEN);
	      gtpu1->length = new_l1;
	      gtpu2 = (gtpu_header_t *)(udp2+1);
	      new_l2 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b2)
					     - sizeof (*ip6_2) - sizeof(*udp2)
					     - GTPU_V1_HDR_LEN);
	      gtpu2->length = new_l2;
	      gtpu3 = (gtpu_header_t *)(udp3+1);
	      new_l3 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b3)
					     - sizeof (*ip6_3) - sizeof(*udp3)
					     - GTPU_V1_HDR_LEN);
	      gtpu3->length = new_l3;

	      /* IPv6 UDP checksum is mandatory */
	      udp0->checksum = ip6_tcp_udp_icmp_compute_checksum(vm, b0,
								 ip6_0, &bogus);
	      if (udp0->checksum == 0)
		udp0->checksum = 0xffff;
	      udp1->checksum = ip6_tcp_udp_icmp_compute_checksum(vm, b1,
								 ip6_1, &bogus);
	      if (udp1->checksum == 0)
		udp1->checksum = 0xffff;
	      udp2->checksum = ip6_tcp_udp_icmp_compute_checksum(vm, b2,
								 ip6_2, &bogus);
	      if (udp2->checksum == 0)
		udp2->checksum = 0xffff;
	      udp3->checksum = ip6_tcp_udp_icmp_compute_checksum(vm, b3,
								 ip6_3, &bogus);
	      if (udp3->checksum == 0)
		udp3->checksum = 0xffff;

	    }

          pkts_encapsulated += 4;
 	  len0 = vlib_buffer_length_in_chain (vm, b0);
 	  len1 = vlib_buffer_length_in_chain (vm, b1);
 	  len2 = vlib_buffer_length_in_chain (vm, b2);
 	  len3 = vlib_buffer_length_in_chain (vm, b3);
	  stats_n_packets += 4;
	  stats_n_bytes += len0 + len1 + len2 + len3;

	  /* Batch stats increment on the same gtpu tunnel so counter is not
	     incremented per packet. Note stats are still incremented for deleted
	     and admin-down tunnel where packets are dropped. It is not worthwhile
	     to check for this rare case and affect normal path performance. */
	  if (PREDICT_FALSE ((sw_if_index0 != stats_sw_if_index) ||
			     (sw_if_index1 != stats_sw_if_index) ||
			     (sw_if_index2 != stats_sw_if_index) ||
			     (sw_if_index3 != stats_sw_if_index) ))
	    {
	      stats_n_packets -= 4;
	      stats_n_bytes -= len0 + len1 + len2 + len3;
	      if ( (sw_if_index0 == sw_if_index1 ) &&
		   (sw_if_index1 == sw_if_index2 ) &&
		   (sw_if_index2 == sw_if_index3 ) )
	        {
		  if (stats_n_packets)
		    vlib_increment_combined_counter
		      (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
		       thread_index, stats_sw_if_index,
		       stats_n_packets, stats_n_bytes);
		  stats_sw_if_index = sw_if_index0;
		  stats_n_packets = 4;
		  stats_n_bytes = len0 + len1 + len2 + len3;
	        }
	      else
	        {
		  vlib_increment_combined_counter
		      (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
		       thread_index, sw_if_index0, 1, len0);
		  vlib_increment_combined_counter
		      (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
		       thread_index, sw_if_index1, 1, len1);
		  vlib_increment_combined_counter
		      (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
		       thread_index, sw_if_index2, 1, len2);
		  vlib_increment_combined_counter
		      (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
		       thread_index, sw_if_index3, 1, len3);
		}
	    }

	  if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
            {
              gtpu_encap_trace_t *tr =
                vlib_add_trace (vm, node, b0, sizeof (*tr));
              tr->tunnel_index = t0 - gtm->tunnels;
              tr->teid = t0->teid;
           }

          if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
            {
              gtpu_encap_trace_t *tr =
                vlib_add_trace (vm, node, b1, sizeof (*tr));
              tr->tunnel_index = t1 - gtm->tunnels;
              tr->teid = t1->teid;
            }

	  vlib_validate_buffer_enqueue_x4 (vm, node, next_index,
					   to_next, n_left_to_next,
					   bi0, bi1, bi2, bi3,
					   next0, next1, next2, next3);
	}

      while (n_left_from > 0 && n_left_to_next > 0)
	{
	  u32 bi0;
	  vlib_buffer_t * b0;
          u32 flow_hash0;
	  u32 len0;
          ip4_header_t * ip4_0;
          ip6_header_t * ip6_0;
          udp_header_t * udp0;
          gtpu_header_t * gtpu0;
          u64 * copy_src0, * copy_dst0;
          u32 * copy_src_last0, * copy_dst_last0;
          u16 new_l0;
          ip_csum_t sum0;

	  bi0 = from[0];
	  to_next[0] = bi0;
	  from += 1;
	  to_next += 1;
	  n_left_from -= 1;
	  n_left_to_next -= 1;

	  b0 = vlib_get_buffer (vm, bi0);

          flow_hash0 = vnet_l2_compute_flow_hash(b0);

	  /* Get next node index and adj index from tunnel next_dpo */
	  sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX];
	  hi0 = vnet_get_sup_hw_interface (vnm, sw_if_index0);
	  t0 = &gtm->tunnels[hi0->dev_instance];
	  /* Note: change to always set next0 if it may be set to drop */
	  next0 = t0->next_dpo.dpoi_next_node;
	  vnet_buffer(b0)->ip.adj_index[VLIB_TX] = t0->next_dpo.dpoi_index;

          /* Apply the rewrite string. $$$$ vnet_rewrite? */
          vlib_buffer_advance (b0, -(word)_vec_len(t0->rewrite));

	  if (is_ip4)
	    {
	      ip4_0 = vlib_buffer_get_current(b0);

	      /* Copy the fixed header */
	      copy_dst0 = (u64 *) ip4_0;
	      copy_src0 = (u64 *) t0->rewrite;
	      /* Copy first 32 octets 8-bytes at a time */
#define _(offs) copy_dst0[offs] = copy_src0[offs];
	      foreach_fixed_header4_offset;
#undef _
	      /* Last 4 octets. Hopefully gcc will be our friend */
              copy_dst_last0 = (u32 *)(&copy_dst0[4]);
              copy_src_last0 = (u32 *)(&copy_src0[4]);
              copy_dst_last0[0] = copy_src_last0[0];

	      /* Fix the IP4 checksum and length */
	      sum0 = ip4_0->checksum;
	      new_l0 = /* old_l0 always 0, see the rewrite setup */
                clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
              sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
				     length /* changed member */);
	      ip4_0->checksum = ip_csum_fold (sum0);
	      ip4_0->length = new_l0;

	      /* Fix UDP length and set source port */
	      udp0 = (udp_header_t *)(ip4_0+1);
	      new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b0)
					     - sizeof (*ip4_0));
	      udp0->length = new_l0;
	      udp0->src_port = flow_hash0;

	      /* Fix GTPU length */
	      gtpu0 = (gtpu_header_t *)(udp0+1);
	      new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b0)
					     - sizeof (*ip4_0) - sizeof(*udp0)
					     - GTPU_V1_HDR_LEN);
	      gtpu0->length = new_l0;
	    }

	  else /* ip6 path */
	    {
              int bogus = 0;

	      ip6_0 = vlib_buffer_get_current(b0);
	      /* Copy the fixed header */
	      copy_dst0 = (u64 *) ip6_0;
	      copy_src0 = (u64 *) t0->rewrite;
	      /* Copy first 56 (ip6) octets 8-bytes at a time */
#define _(offs) copy_dst0[offs] = copy_src0[offs];
	      foreach_fixed_header6_offset;
#undef _
	      /* Fix IP6 payload length */
	      new_l0 =
                clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)
				      - sizeof(*ip6_0));
	      ip6_0->payload_length = new_l0;

	      /* Fix UDP length  and set source port */
	      udp0 = (udp_header_t *)(ip6_0+1);
	      udp0->length = new_l0;
	      udp0->src_port = flow_hash0;

	      /* Fix GTPU length */
	      gtpu0 = (gtpu_header_t *)(udp0+1);
	      new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b0)
					     - sizeof (*ip4_0) - sizeof(*udp0)
					     - GTPU_V1_HDR_LEN);
	      gtpu0->length = new_l0;

	      /* IPv6 UDP checksum is mandatory */
	      udp0->checksum = ip6_tcp_udp_icmp_compute_checksum(vm, b0,
								 ip6_0, &bogus);
	      if (udp0->checksum == 0)
		udp0->checksum = 0xffff;
	    }

          pkts_encapsulated ++;
	  len0 = vlib_buffer_length_in_chain (vm, b0);
	  stats_n_packets += 1;
	  stats_n_bytes += len0;

	  /* Batch stats increment on the same gtpu tunnel so counter is not
	     incremented per packet. Note stats are still incremented for deleted
	     and admin-down tunnel where packets are dropped. It is not worthwhile
	     to check for this rare case and affect normal path performance. */
	  if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
	    {
	      stats_n_packets -= 1;
	      stats_n_bytes -= len0;
	      if (stats_n_packets)
		vlib_increment_combined_counter
		  (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
		   thread_index, stats_sw_if_index,
		   stats_n_packets, stats_n_bytes);
	      stats_n_packets = 1;
	      stats_n_bytes = len0;
	      stats_sw_if_index = sw_if_index0;
	    }

          if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
            {
              gtpu_encap_trace_t *tr =
                vlib_add_trace (vm, node, b0, sizeof (*tr));
              tr->tunnel_index = t0 - gtm->tunnels;
              tr->teid = t0->teid;
            }
	  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
					   to_next, n_left_to_next,
					   bi0, next0);
	}

      vlib_put_next_frame (vm, node, next_index, n_left_to_next);
    }

  /* Do we still need this now that tunnel tx stats is kept? */
  vlib_node_increment_counter (vm, node->node_index,
                               GTPU_ENCAP_ERROR_ENCAPSULATED,
                               pkts_encapsulated);

  /* Increment any remaining batch stats */
  if (stats_n_packets)
    {
      vlib_increment_combined_counter
	(im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
	 thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
      node->runtime_data[0] = stats_sw_if_index;
    }

  return from_frame->n_vectors;
}
Exemplo n.º 17
0
int test_socket_main (unformat_input_t * input)
{
  clib_socket_t _s = {0}, * s = &_s;
  char * config;
  clib_error_t * error;

  s->config = "localhost:22";
  s->flags = SOCKET_IS_CLIENT;

  while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
    {
      if (unformat (input, "server %s %=", &config,
		    &s->flags, SOCKET_IS_SERVER))
	;
      else if (unformat (input, "client %s %=", &config,
			 &s->flags, SOCKET_IS_CLIENT))
	;
      else
	{
	  error = clib_error_create ("unknown input `%U'\n",
				     format_unformat_error, input);
	  goto done;
	}
    }

  error = clib_socket_init (s);
  if (error)
    goto done;

  if (0)
    {
      struct { int a, b; } * msg;
      msg = clib_socket_tx_add (s, sizeof (msg[0]));
      msg->a = 99;
      msg->b = 100;
    }
  else
    clib_socket_tx_add_formatted (s, "hello there mr server %d\n", 99);

  error = clib_socket_tx (s);
  if (error)
    goto done;

  while (1)
    {
      error = clib_socket_rx (s, 100);
      if (error)
	break;

      if (clib_socket_rx_end_of_file (s))
	break;

      if_verbose   ("%v", s->rx_buffer);
      _vec_len (s->rx_buffer) = 0;
    }

  error = clib_socket_close (s);

 done:
  if (error)
    clib_error_report (error);
  return 0;
}
Exemplo n.º 18
0
/* Given next hop vector is over-written with normalized one with sorted weights and
   with weights corresponding to the number of adjacencies for each next hop.
   Returns number of adjacencies in block. */
static u32 ip_multipath_normalize_next_hops (ip_lookup_main_t * lm,
					     ip_multipath_next_hop_t * raw_next_hops,
					     ip_multipath_next_hop_t ** normalized_next_hops)
{
  ip_multipath_next_hop_t * nhs;
  uword n_nhs, n_adj, n_adj_left, i;
  f64 sum_weight, norm, error;

  n_nhs = vec_len (raw_next_hops);
  ASSERT (n_nhs > 0);
  if (n_nhs == 0)
    return 0;

  /* Allocate enough space for 2 copies; we'll use second copy to save original weights. */
  nhs = *normalized_next_hops;
  vec_validate (nhs, 2*n_nhs - 1);

  /* Fast path: 1 next hop in block. */
  n_adj = n_nhs;
  if (n_nhs == 1)
    {
      nhs[0] = raw_next_hops[0];
      nhs[0].weight = 1;
      _vec_len (nhs) = 1;
      goto done;
    }

  else if (n_nhs == 2)
    {
      int cmp = next_hop_sort_by_weight (&raw_next_hops[0], &raw_next_hops[1]) < 0;

      /* Fast sort. */
      nhs[0] = raw_next_hops[cmp];
      nhs[1] = raw_next_hops[cmp ^ 1];

      /* Fast path: equal cost multipath with 2 next hops. */
      if (nhs[0].weight == nhs[1].weight)
	{
	  nhs[0].weight = nhs[1].weight = 1;
	  _vec_len (nhs) = 2;
	  goto done;
	}
    }
  else
    {
      memcpy (nhs, raw_next_hops, n_nhs * sizeof (raw_next_hops[0]));
      qsort (nhs, n_nhs, sizeof (nhs[0]), (void *) next_hop_sort_by_weight);
    }

  /* Find total weight to normalize weights. */
  sum_weight = 0;
  for (i = 0; i < n_nhs; i++)
    sum_weight += nhs[i].weight;

  /* In the unlikely case that all weights are given as 0, set them all to 1. */
  if (sum_weight == 0)
    {
      for (i = 0; i < n_nhs; i++)
	nhs[i].weight = 1;
      sum_weight = n_nhs;
    }

  /* Save copies of all next hop weights to avoid being overwritten in loop below. */
  for (i = 0; i < n_nhs; i++)
    nhs[n_nhs + i].weight = nhs[i].weight;

  /* Try larger and larger power of 2 sized adjacency blocks until we
     find one where traffic flows to within 1% of specified weights. */
  for (n_adj = max_pow2 (n_nhs); ; n_adj *= 2)
    {
      error = 0;

      norm = n_adj / sum_weight;
      n_adj_left = n_adj;
      for (i = 0; i < n_nhs; i++)
	{
	  f64 nf = nhs[n_nhs + i].weight * norm; /* use saved weights */
	  word n = flt_round_nearest (nf);

	  n = n > n_adj_left ? n_adj_left : n;
	  n_adj_left -= n;
	  error += fabs (nf - n);
	  nhs[i].weight = n;
	}
	
      nhs[0].weight += n_adj_left;

      /* Less than 5% average error per adjacency with this size adjacency block? */
      if (error <= lm->multipath_next_hop_error_tolerance*n_adj)
	{
	  /* Truncate any next hops with zero weight. */
	  _vec_len (nhs) = i;
	  break;
	}
    }

 done:
  /* Save vector for next call. */
  *normalized_next_hops = nhs;
  return n_adj;
}
Exemplo n.º 19
0
uword
ssvm_eth_interface_tx (ssvm_private_t * intfc, char *buf_to_send, int len_to_send)
// , 
  //                     vlib_frame_t * f)
{
  ssvm_eth_main_t * em = &ssvm_eth_main;
  ssvm_shared_header_t * sh = intfc->sh;
  unix_shared_memory_queue_t * q;
  u32 * from;
  u32 n_left;
  ssvm_eth_queue_elt_t * elts, * elt, * prev_elt;
  u32 my_pid = intfc->my_pid;
  vlib_buffer_t * b0;
  u32 bi0;
  u32 size_this_buffer;
  u32 chunks_this_buffer;
  u8 i_am_master = intfc->i_am_master;
  u32 elt_index;
  int is_ring_full, interface_down;
  int i;
  volatile u32 *queue_lock;
  u32 n_to_alloc = VLIB_FRAME_SIZE;
  u32 n_allocated, n_present_in_cache, n_available;
  u32 * elt_indices;
  
  if (i_am_master)
    q = (unix_shared_memory_queue_t *)sh->opaque [TO_SLAVE_Q_INDEX];
  else
    q = (unix_shared_memory_queue_t *)sh->opaque [TO_MASTER_Q_INDEX];

  queue_lock = (u32 *) q;

  // from = vlib_frame_vector_args (f);
  //n_left = f->n_vectors;
  n_left = 1;

  is_ring_full = 0;
  interface_down = 0;

  n_present_in_cache = vec_len (em->chunk_cache);

#ifdef XXX
  /* admin / link up/down check */
  if (sh->opaque [MASTER_ADMIN_STATE_INDEX] == 0 ||
      sh->opaque [SLAVE_ADMIN_STATE_INDEX] == 0)
    {
      interface_down = 1;
      goto out;
    }
#endif

  ssvm_lock (sh, my_pid, 1);

  elts = (ssvm_eth_queue_elt_t *) (sh->opaque [CHUNK_POOL_INDEX]);
  elt_indices = (u32 *) (sh->opaque [CHUNK_POOL_FREELIST_INDEX]);
  n_available = (u32) pointer_to_uword(sh->opaque [CHUNK_POOL_NFREE]);

  printf("AYXX: n_left: %d, n_present_in_cache: %d\n", n_left, n_present_in_cache);

  if (n_present_in_cache < n_left*2)
    {
      vec_validate (em->chunk_cache, 
                    n_to_alloc + n_present_in_cache - 1);

      n_allocated = n_to_alloc < n_available ? n_to_alloc : n_available;
      printf("AYXX: n_allocated: %d, n_to_alloc: %d, n_available: %d\n", n_allocated, n_to_alloc, n_available);

      if (PREDICT_TRUE(n_allocated > 0))
	{
	  memcpy (&em->chunk_cache[n_present_in_cache],
		  &elt_indices[n_available - n_allocated],
		  sizeof(u32) * n_allocated);
	}

      n_present_in_cache += n_allocated;
      n_available -= n_allocated;
      sh->opaque [CHUNK_POOL_NFREE] = uword_to_pointer(n_available, void*);
      _vec_len (em->chunk_cache) = n_present_in_cache;
    }
Exemplo n.º 20
0
static clib_error_t *
recvmsg_helper (mc_socket_main_t * msm,
		int socket,
		struct sockaddr_in * rx_addr,
		u32 * buffer_index,
		u32 drop_message)
{
  vlib_main_t * vm = msm->mc_main.vlib_main;
  vlib_buffer_t * b;
  uword n_left, n_alloc, n_mtu, i, i_rx;
  const uword buffer_size = VLIB_BUFFER_DEFAULT_FREE_LIST_BYTES;
  word n_bytes_left;

  /* Make sure we have at least a MTU worth of buffers. */
  n_mtu = msm->rx_mtu_n_buffers;
  n_left = vec_len (msm->rx_buffers);
  if (n_left < n_mtu)
    {
      uword max_alloc = 8 * n_mtu;
      vec_validate (msm->rx_buffers, max_alloc - 1);
      n_alloc = vlib_buffer_alloc (vm, msm->rx_buffers + n_left, max_alloc - n_left);
      _vec_len (msm->rx_buffers) = n_left + n_alloc;
    }

  ASSERT (vec_len (msm->rx_buffers) >= n_mtu);
  vec_validate (msm->iovecs, n_mtu - 1);

  /* Allocate RX buffers from end of rx_buffers.
     Turn them into iovecs to pass to readv. */
  i_rx = vec_len (msm->rx_buffers) - 1;
  for (i = 0; i < n_mtu; i++)
    {
      b = vlib_get_buffer (vm, msm->rx_buffers[i_rx - i]);
      msm->iovecs[i].iov_base = b->data;
      msm->iovecs[i].iov_len = buffer_size;
    }
  _vec_len (msm->iovecs) = n_mtu;

  {
    struct msghdr h;

    memset (&h, 0, sizeof (h));
    if (rx_addr)
      {
	h.msg_name = rx_addr;
	h.msg_namelen = sizeof (rx_addr[0]);
      }
    h.msg_iov = msm->iovecs;
    h.msg_iovlen = vec_len (msm->iovecs);

    n_bytes_left = recvmsg (socket, &h, 0);
    if (n_bytes_left < 0)
      return clib_error_return_unix (0, "recvmsg");
  }

  if (drop_message)
    {
      *buffer_index = ~0;
      return 0;
    }

  *buffer_index = msm->rx_buffers[i_rx];
  while (1)
    {
      b = vlib_get_buffer (vm, msm->rx_buffers[i_rx]);

      b->flags = 0;
      b->current_data = 0;
      b->current_length = n_bytes_left < buffer_size ? n_bytes_left : buffer_size;

      n_bytes_left -= buffer_size;

      if (n_bytes_left <= 0)
	break;

      i_rx--;
      b->flags |= VLIB_BUFFER_NEXT_PRESENT;
      b->next_buffer = msm->rx_buffers[i_rx];
    }

  _vec_len (msm->rx_buffers) = i_rx;

  return 0 /* no error */;
}
Exemplo n.º 21
0
static void
vl_msg_api_process_file (vlib_main_t * vm, u8 * filename,
			 u32 first_index, u32 last_index,
			 vl_api_replay_t which)
{
  vl_api_trace_file_header_t *hp;
  int i, fd;
  struct stat statb;
  size_t file_size;
  u8 *msg;
  u8 endian_swap_needed = 0;
  api_main_t *am = &api_main;
  static u8 *tmpbuf;
  u32 nitems;
  void **saved_print_handlers = 0;

  fd = open ((char *) filename, O_RDONLY);

  if (fd < 0)
    {
      vlib_cli_output (vm, "Couldn't open %s\n", filename);
      return;
    }

  if (fstat (fd, &statb) < 0)
    {
      vlib_cli_output (vm, "Couldn't stat %s\n", filename);
      return;
    }

  if (!(statb.st_mode & S_IFREG) || (statb.st_size < sizeof (*hp)))
    {
      vlib_cli_output (vm, "File not plausible: %s\n", filename);
      close(fd);
      return;
    }

  file_size = statb.st_size;
  file_size = (file_size + 4095) & ~(4096);

  hp = mmap (0, file_size, PROT_READ, MAP_PRIVATE, fd, 0);

  if (hp == (vl_api_trace_file_header_t *) MAP_FAILED)
    {
      vlib_cli_output (vm, "mmap failed: %s\n", filename);
      close (fd);
      return;
    }
  close (fd);

  if ((clib_arch_is_little_endian && hp->endian == VL_API_BIG_ENDIAN)
      || (clib_arch_is_big_endian && hp->endian == VL_API_LITTLE_ENDIAN))
    endian_swap_needed = 1;

  if (endian_swap_needed)
    nitems = ntohl (hp->nitems);
  else
    nitems = hp->nitems;

  if (last_index == (u32) ~ 0)
    {
      last_index = nitems - 1;
    }

  if (first_index >= nitems || last_index >= nitems)
    {
      vlib_cli_output (vm, "Range (%d, %d) outside file range (0, %d)\n",
		       first_index, last_index, nitems - 1);
      munmap (hp, file_size);
      return;
    }
  if (hp->wrapped)
    vlib_cli_output (vm,
		     "Note: wrapped/incomplete trace, results may vary\n");

  if (which == CUSTOM_DUMP)
    {
      saved_print_handlers = (void **) vec_dup (am->msg_print_handlers);
      vl_msg_api_custom_dump_configure (am);
    }


  msg = (u8 *) (hp + 1);

  for (i = 0; i < first_index; i++)
    {
      trace_cfg_t *cfgp;
      int size;
      u16 msg_id;

      if (clib_arch_is_little_endian)
	msg_id = ntohs (*((u16 *) msg));
      else
	msg_id = *((u16 *) msg);

      cfgp = am->api_trace_cfg + msg_id;
      if (!cfgp)
	{
	  vlib_cli_output (vm, "Ugh: msg id %d no trace config\n", msg_id);
	  return;
	}
      size = cfgp->size;
      msg += size;
    }

  for (; i <= last_index; i++)
    {
      trace_cfg_t *cfgp;
      u16 *msg_idp;
      u16 msg_id;
      int size;

      if (which == DUMP)
	vlib_cli_output (vm, "---------- trace %d -----------\n", i);

      if (clib_arch_is_little_endian)
	msg_id = ntohs (*((u16 *) msg));
      else
	msg_id = *((u16 *) msg);

      cfgp = am->api_trace_cfg + msg_id;
      if (!cfgp)
	{
	  vlib_cli_output (vm, "Ugh: msg id %d no trace config\n", msg_id);
	  return;
	}
      size = cfgp->size;

      /* Copy the buffer (from the read-only mmap'ed file) */
      vec_validate (tmpbuf, size - 1 + sizeof (uword));
      clib_memcpy (tmpbuf + sizeof (uword), msg, size);
      memset (tmpbuf, 0xf, sizeof (uword));

      /*
       * Endian swap if needed. All msg data is supposed to be
       * in network byte order. All msg handlers are supposed to
       * know that. The generic message dumpers don't know that.
       * One could fix apigen, I suppose.
       */
      if ((which == DUMP && clib_arch_is_little_endian) || endian_swap_needed)
	{
	  void (*endian_fp) (void *);
	  if (msg_id >= vec_len (am->msg_endian_handlers)
	      || (am->msg_endian_handlers[msg_id] == 0))
	    {
	      vlib_cli_output (vm, "Ugh: msg id %d no endian swap\n", msg_id);
	      return;
	    }
	  endian_fp = am->msg_endian_handlers[msg_id];
	  (*endian_fp) (tmpbuf + sizeof (uword));
	}

      /* msg_id always in network byte order */
      if (clib_arch_is_little_endian)
	{
	  msg_idp = (u16 *) (tmpbuf + sizeof (uword));
	  *msg_idp = msg_id;
	}

      switch (which)
	{
	case CUSTOM_DUMP:
	case DUMP:
	  if (msg_id < vec_len (am->msg_print_handlers) &&
	      am->msg_print_handlers[msg_id])
	    {
	      u8 *(*print_fp) (void *, void *);

	      print_fp = (void *) am->msg_print_handlers[msg_id];
	      (*print_fp) (tmpbuf + sizeof (uword), vm);
	    }
	  else
	    {
	      vlib_cli_output (vm, "Skipping msg id %d: no print fcn\n",
			       msg_id);
	      break;
	    }
	  break;

	case INITIALIZERS:
	  if (msg_id < vec_len (am->msg_print_handlers) &&
	      am->msg_print_handlers[msg_id])
	    {
	      u8 *s;
	      int j;
	      u8 *(*print_fp) (void *, void *);

	      print_fp = (void *) am->msg_print_handlers[msg_id];

	      vlib_cli_output (vm, "/*");

	      (*print_fp) (tmpbuf + sizeof (uword), vm);
	      vlib_cli_output (vm, "*/\n");

	      s = format (0, "static u8 * vl_api_%s_%d[%d] = {",
			  am->msg_names[msg_id], i,
			  am->api_trace_cfg[msg_id].size);

	      for (j = 0; j < am->api_trace_cfg[msg_id].size; j++)
		{
		  if ((j & 7) == 0)
		    s = format (s, "\n    ");
		  s = format (s, "0x%02x,", tmpbuf[sizeof (uword) + j]);
		}
	      s = format (s, "\n};\n%c", 0);
	      vlib_cli_output (vm, (char *) s);
	      vec_free (s);
	    }
	  break;

	case REPLAY:
	  if (msg_id < vec_len (am->msg_print_handlers) &&
	      am->msg_print_handlers[msg_id] && cfgp->replay_enable)
	    {
	      void (*handler) (void *);

	      handler = (void *) am->msg_handlers[msg_id];

	      if (!am->is_mp_safe[msg_id])
		vl_msg_api_barrier_sync ();
	      (*handler) (tmpbuf + sizeof (uword));
	      if (!am->is_mp_safe[msg_id])
		vl_msg_api_barrier_release ();
	    }
	  else
	    {
	      if (cfgp->replay_enable)
		vlib_cli_output (vm, "Skipping msg id %d: no handler\n",
				 msg_id);
	      break;
	    }
	  break;
	}

      _vec_len (tmpbuf) = 0;
      msg += size;
    }

  if (saved_print_handlers)
    {
      clib_memcpy (am->msg_print_handlers, saved_print_handlers,
		   vec_len (am->msg_print_handlers) * sizeof (void *));
      vec_free (saved_print_handlers);
    }

  munmap (hp, file_size);
}
Exemplo n.º 22
0
int
vl_map_shmem (const char *region_name, int is_vlib)
{
  svm_map_region_args_t _a, *a = &_a;
  svm_region_t *vlib_rp, *root_rp;
  api_main_t *am = &api_main;
  int i;
  struct timespec ts, tsrem;
  char *vpe_api_region_suffix = "-vpe-api";

  clib_memset (a, 0, sizeof (*a));

  if (strstr (region_name, vpe_api_region_suffix))
    {
      u8 *root_path = format (0, "%s", region_name);
      _vec_len (root_path) = (vec_len (root_path) -
			      strlen (vpe_api_region_suffix));
      vec_terminate_c_string (root_path);
      a->root_path = (const char *) root_path;
      am->root_path = (const char *) root_path;
    }

  if (is_vlib == 0)
    {
      int tfd;
      u8 *api_name;
      /*
       * Clients wait for vpp to set up the root / API regioins
       */
      if (am->root_path)
	api_name = format (0, "/dev/shm/%s-%s%c", am->root_path,
			   region_name + 1, 0);
      else
	api_name = format (0, "/dev/shm%s%c", region_name, 0);

      /* Wait up to 100 seconds... */
      for (i = 0; i < 10000; i++)
	{
	  ts.tv_sec = 0;
	  ts.tv_nsec = 10000 * 1000;	/* 10 ms */
	  while (nanosleep (&ts, &tsrem) < 0)
	    ts = tsrem;
	  tfd = open ((char *) api_name, O_RDWR);
	  if (tfd >= 0)
	    break;
	}
      vec_free (api_name);
      if (tfd < 0)
	{
	  clib_warning ("region init fail");
	  return -2;
	}
      close (tfd);
      svm_region_init_chroot_uid_gid (am->root_path, getuid (), getgid ());
    }

  if (a->root_path != NULL)
    {
      a->name = "/vpe-api";
    }
  else
    a->name = region_name;
  a->size = am->api_size ? am->api_size : (16 << 20);
  a->flags = SVM_FLAGS_MHEAP;
  a->uid = am->api_uid;
  a->gid = am->api_gid;
  a->pvt_heap_size = am->api_pvt_heap_size;

  vlib_rp = svm_region_find_or_create (a);

  if (vlib_rp == 0)
    return (-2);

  pthread_mutex_lock (&vlib_rp->mutex);
  /* Has someone else set up the shared-memory variable table? */
  if (vlib_rp->user_ctx)
    {
      am->shmem_hdr = (void *) vlib_rp->user_ctx;
      am->our_pid = getpid ();
      if (is_vlib)
	{
	  svm_queue_t *q;
	  uword old_msg;
	  /*
	   * application restart. Reset cached pids, API message
	   * rings, list of clients; otherwise, various things
	   * fail. (e.g. queue non-empty notification)
	   */

	  /* ghosts keep the region from disappearing properly */
	  svm_client_scan_this_region_nolock (vlib_rp);
	  am->shmem_hdr->application_restarts++;
	  q = am->shmem_hdr->vl_input_queue;
	  am->shmem_hdr->vl_pid = getpid ();
	  q->consumer_pid = am->shmem_hdr->vl_pid;
	  /* Drain the input queue, freeing msgs */
	  for (i = 0; i < 10; i++)
	    {
	      if (pthread_mutex_trylock (&q->mutex) == 0)
		{
		  pthread_mutex_unlock (&q->mutex);
		  goto mutex_ok;
		}
	      ts.tv_sec = 0;
	      ts.tv_nsec = 10000 * 1000;	/* 10 ms */
	      while (nanosleep (&ts, &tsrem) < 0)
		ts = tsrem;
	    }
	  /* Mutex buggered, "fix" it */
	  clib_memset (&q->mutex, 0, sizeof (q->mutex));
	  clib_warning ("forcibly release main input queue mutex");

	mutex_ok:
	  am->vlib_rp = vlib_rp;
	  while (svm_queue_sub (q, (u8 *) & old_msg, SVM_Q_NOWAIT, 0)
		 != -2 /* queue underflow */ )
	    {
	      vl_msg_api_free_nolock ((void *) old_msg);
	      am->shmem_hdr->restart_reclaims++;
	    }
	  pthread_mutex_unlock (&vlib_rp->mutex);
	  root_rp = svm_get_root_rp ();
	  ASSERT (root_rp);
	  /* Clean up the root region client list */
	  pthread_mutex_lock (&root_rp->mutex);
	  svm_client_scan_this_region_nolock (root_rp);
	  pthread_mutex_unlock (&root_rp->mutex);
	}
      else
	{
	  pthread_mutex_unlock (&vlib_rp->mutex);
	}
      am->vlib_rp = vlib_rp;
      vec_add1 (am->mapped_shmem_regions, vlib_rp);
      return 0;
    }
  /* Clients simply have to wait... */
  if (!is_vlib)
    {
      pthread_mutex_unlock (&vlib_rp->mutex);

      /* Wait up to 100 seconds... */
      for (i = 0; i < 10000; i++)
	{
	  ts.tv_sec = 0;
	  ts.tv_nsec = 10000 * 1000;	/* 10 ms */
	  while (nanosleep (&ts, &tsrem) < 0)
	    ts = tsrem;
	  if (vlib_rp->user_ctx)
	    goto ready;
	}
      /* Clean up and leave... */
      svm_region_unmap (vlib_rp);
      clib_warning ("region init fail");
      return (-2);

    ready:
      am->shmem_hdr = (void *) vlib_rp->user_ctx;
      am->our_pid = getpid ();
      am->vlib_rp = vlib_rp;
      vec_add1 (am->mapped_shmem_regions, vlib_rp);
      return 0;
    }

  /* Nope, it's our problem... */
  vl_init_shmem (vlib_rp, 0 /* default config */ , 1 /* is vlib */ ,
		 0 /* is_private_region */ );

  vec_add1 (am->mapped_shmem_regions, vlib_rp);
  return 0;
}