Exemple #1
0
void
vl_api_mem_config (vl_shmem_hdr_t * hdr, vl_api_shm_elem_config_t * config)
{
  vl_api_shm_elem_config_t *c;
  ring_alloc_t *rp;
  u32 size;

  if (!config)
    {
      vl_api_default_mem_config (hdr);
      return;
    }

  vec_foreach (c, config)
  {
    switch (c->type)
      {
      case VL_API_QUEUE:
	hdr->vl_input_queue = svm_queue_alloc_and_init (c->count, c->size,
							getpid ());
	continue;
      case VL_API_VLIB_RING:
	vec_add2 (hdr->vl_rings, rp, 1);
	break;
      case VL_API_CLIENT_RING:
	vec_add2 (hdr->client_rings, rp, 1);
	break;
      default:
	clib_warning ("unknown config type: %d", c->type);
	continue;
      }

    size = sizeof (ring_alloc_t) + c->size;
    rp->rp = svm_queue_alloc_and_init (c->count, size, 0);
    rp->size = size;
    rp->nitems = c->count;
    rp->hits = 0;
    rp->misses = 0;
  }
}
Exemple #2
0
int evtdef_pass1(cpel_section_header_t *sh, int verbose, FILE *ofp)
{
    int i, nevents;
    event_definition_section_header_t *edh;
    event_definition_t *ep;
    u8 *this_strtab;
    u32 event_code;
    uword *p;
    bound_event_t *bp;

    edh = (event_definition_section_header_t *)(sh+1);
    nevents = ntohl(edh->number_of_event_definitions);
    
    if (verbose) {
        fprintf(ofp, "Event Definition Section: %d definitions\n",
                nevents);
    }

    p = hash_get_mem(the_strtab_hash, edh->string_table_name);
    if (!p) {
        fprintf(ofp, "Fatal: couldn't find string table\n");
        return(1);
    }
    this_strtab = (u8 *)p[0];

    initialize_events();

    ep = (event_definition_t *)(edh+1);
    
    for (i = 0; i < nevents; i++) {
        event_code = ntohl(ep->event);
        p = hash_get(the_evtdef_hash, event_code);
        if (p) {
            fprintf(ofp, "Event %d redefined, retain first definition\n",
                    event_code);
            continue;
        }
        vec_add2(bound_events, bp, 1);
        bp->event_code = event_code;
        bp->event_str = this_strtab + ntohl(ep->event_format);
        bp->datum_str = this_strtab + ntohl(ep->datum_format);
        hash_set(the_evtdef_hash, event_code, bp - bound_events);

        add_event_from_cpel_file(event_code, (char *) bp->event_str, 
                                 (char *)bp->datum_str);

        ep++;
    }

    finalize_events();
    return (0);
}
Exemple #3
0
/* Arrange for function to be called some time,
   roughly equal to dt seconds, in the future. */
void
timer_call (timer_func_t * func, any arg, f64 dt)
{
  timer_callback_t *t;
  sigset_t save;

  /* Install signal handler on first call. */
  static word signal_installed = 0;

  if (!signal_installed)
    {
      struct sigaction sa;

      /* Initialize time_resolution before first call to timer_interrupt */
      time_resolution = 0.75 / (f64) HZ;

      clib_memset (&sa, 0, sizeof (sa));
      sa.sa_handler = timer_interrupt;

      if (sigaction (TIMER_SIGNAL, &sa, 0) < 0)
	clib_panic ("sigaction");

      signal_installed = 1;
    }

  timer_block (&save);

  /* Add new timer. */
  vec_add2 (timers, t, 1);

  t->time = unix_time_now () + dt;
  t->func = func;
  t->arg = arg;

  {
    word reset_timer = vec_len (timers) == 1;

    if (_vec_len (timers) > 1)
      {
	reset_timer += t->time < (t - 1)->time;
	sort_timers (timers);
      }

    if (reset_timer)
      timer_interrupt (TIMER_SIGNAL);
  }

  timer_unblock (&save);
}
Exemple #4
0
int trackdef_pass1(cpel_section_header_t *sh, int verbose, FILE *ofp)
{
    int i, nevents;
    track_definition_section_header_t *tdh;
    track_definition_t *tp;
    u8 *this_strtab;
    u32 track_code;
    uword *p;
    bound_track_t *btp;
    int track_strlen;

    tdh = (track_definition_section_header_t *)(sh+1);
    nevents = ntohl(tdh->number_of_track_definitions);
    
    if (verbose) {
        fprintf(ofp, "Track Definition Section: %d definitions\n",
                nevents);
    }

    p = hash_get_mem(the_strtab_hash, tdh->string_table_name);
    if (!p) {
        fprintf(ofp, "Fatal: couldn't find string table\n");
        return(1);
    }
    this_strtab = (u8 *)p[0];

    tp = (track_definition_t *)(tdh+1);
    
    for (i = 0; i < nevents; i++) {
        track_code = ntohl(tp->track);
        p = hash_get(the_trackdef_hash, track_code);
        if (p) {
            fprintf(ofp, "track %d redefined, retain first definition\n",
                    track_code);
            continue;
        }
        vec_add2(bound_tracks, btp, 1);
        btp->track = track_code;
        btp->track_str = this_strtab + ntohl(tp->track_format);
        hash_set(the_trackdef_hash, track_code, btp - bound_tracks);

        track_strlen = strlen((char *)btp->track_str);
        if (track_strlen > widest_track_format)
            widest_track_format = track_strlen;
        tp++;
    }
    return (0);
}
Exemple #5
0
/* Fill random buffer. */
void clib_random_buffer_fill (clib_random_buffer_t * b, uword n_words)
{
  uword * w, n = n_words;

  if (n < 256)
    n = 256;

  n = round_pow2 (n, 2 << ISAAC_LOG2_SIZE);

  vec_add2 (b->buffer, w, n);
  do {
    isaac2 (b->ctx, w);
    w += 2 * ISAAC_SIZE;
    n -= 2 * ISAAC_SIZE;
  } while (n > 0);
}
Exemple #6
0
static void
add_type (ethernet_main_t * em, ethernet_type_t type, char *type_name)
{
  ethernet_type_info_t *ti;
  u32 i;

  vec_add2 (em->type_infos, ti, 1);
  i = ti - em->type_infos;

  ti->name = type_name;
  ti->type = type;
  ti->next_index = ti->node_index = ~0;

  hash_set (em->type_info_by_type, type, i);
  hash_set_mem (em->type_info_by_name, ti->name, i);
}
Exemple #7
0
static load_balance_path_t *
replicate_multipath_next_hop_fixup (load_balance_path_t *nhs,
                                    dpo_proto_t drop_proto)
{
    if (0 == vec_len(nhs))
    {
        load_balance_path_t *nh;

        /*
         * we need something for the replicate. so use the drop
         */
        vec_add2(nhs, nh, 1);

        nh->path_weight = 1;
        dpo_copy(&nh->path_dpo, drop_dpo_get(drop_proto));
    }

    return (nhs);
}
Exemple #8
0
static void
session_program_transport_close (session_t * s)
{
  u32 thread_index = vlib_get_thread_index ();
  session_worker_t *wrk;
  session_event_t *evt;

  /* If we are in the handler thread, or being called with the worker barrier
   * held, just append a new event to pending disconnects vector. */
  if (vlib_thread_is_main_w_barrier () || thread_index == s->thread_index)
    {
      wrk = session_main_get_worker (s->thread_index);
      vec_add2 (wrk->pending_disconnects, evt, 1);
      clib_memset (evt, 0, sizeof (*evt));
      evt->session_handle = session_handle (s);
      evt->event_type = SESSION_CTRL_EVT_CLOSE;
    }
  else
    session_send_ctrl_evt_to_thread (s, SESSION_CTRL_EVT_CLOSE);
}
Exemple #9
0
static uword
append_buffer_index_to_iovec (vlib_main_t * vm,
			      u32 buffer_index,
			      struct iovec ** iovs_return)
{
  struct iovec * i;
  vlib_buffer_t * b;
  u32 bi = buffer_index;
  u32 l = 0;

  while (1)
    {
      b = vlib_get_buffer (vm, bi);
      vec_add2 (*iovs_return, i, 1);
      i->iov_base = vlib_buffer_get_current (b);
      i->iov_len = b->current_length;
      l += i->iov_len;
      if (! (b->flags & VLIB_BUFFER_NEXT_PRESENT))
	break;
      bi = b->next_buffer;
    }

  return l;
}
Exemple #10
0
int
vat_load_new_plugins (plugin_main_t * pm)
{
  DIR *dp;
  struct dirent *entry;
  struct stat statb;
  uword *p;
  plugin_info_t *pi;
  u8 **plugin_path;
  int i;

  plugin_path = split_plugin_path (pm);

  for (i = 0; i < vec_len (plugin_path); i++)
    {
      dp = opendir ((char *) plugin_path[i]);

      if (dp == 0)
	continue;

      while ((entry = readdir (dp)))
	{
	  u8 *plugin_name;
	  u8 *file_name;

	  if (pm->plugin_name_filter)
	    {
	      int j;
	      for (j = 0; j < vec_len (pm->plugin_name_filter); j++)
		if (entry->d_name[j] != pm->plugin_name_filter[j])
		  goto next;
	    }

	  file_name = format (0, "%s/%s%c", plugin_path[i], entry->d_name, 0);
	  plugin_name = format (0, "%s%c", entry->d_name, 0);

	  /* unreadable */
	  if (stat ((char *) file_name, &statb) < 0)
	    {
	    ignore:
	      vec_free (file_name);
	      vec_free (plugin_name);
	      continue;
	    }

	  /* a dir or other things which aren't plugins */
	  if (!S_ISREG (statb.st_mode))
	    goto ignore;

	  p = hash_get_mem (pm->plugin_by_name_hash, plugin_name);
	  if (p == 0)
	    {
	      vec_add2 (pm->plugin_info, pi, 1);
	      pi->name = plugin_name;
	      pi->filename = file_name;
	      pi->file_info = statb;

	      if (load_one_vat_plugin (pm, pi))
		{
		  vec_free (file_name);
		  vec_free (plugin_name);
		  _vec_len (pm->plugin_info) = vec_len (pm->plugin_info) - 1;
		  continue;
		}
	      clib_memset (pi, 0, sizeof (*pi));
	      hash_set_mem (pm->plugin_by_name_hash, plugin_name,
			    pi - pm->plugin_info);
	    }
	next:
	  ;
	}
      closedir (dp);
      vec_free (plugin_path[i]);
    }
  vec_free (plugin_path);
  return 0;
}
Exemple #11
0
int
svmdb_local_add_del_notification (svmdb_client_t * client,
				  svmdb_notification_args_t * a)
{
  uword *h;
  void *oldheap;
  hash_pair_t *hp;
  svmdb_shm_hdr_t *shm;
  u8 *dummy_value = 0;
  svmdb_value_t *value;
  svmdb_notify_t *np;
  int i;
  int rv = 0;

  ASSERT (a->elsize);

  region_lock (client->db_rp, 18);
  shm = client->shm;
  oldheap = svm_push_data_heap (client->db_rp);

  h = shm->namespaces[a->nspace];

  hp = hash_get_pair_mem (h, a->var);
  if (hp == 0)
    {
      local_set_variable_nolock (client, a->nspace, (u8 *) a->var,
				 dummy_value, a->elsize);
      /* might have moved */
      h = shm->namespaces[a->nspace];
      hp = hash_get_pair_mem (h, a->var);
      ASSERT (hp);
    }

  value = pool_elt_at_index (shm->values, hp->value[0]);

  for (i = 0; i < vec_len (value->notifications); i++)
    {
      np = vec_elt_at_index (value->notifications, i);
      if ((np->pid == client->pid)
	  && (np->signum == a->signum)
	  && (np->action == a->action) && (np->opaque == a->opaque))
	{
	  if (a->add_del == 0 /* delete */ )
	    {
	      vec_delete (value->notifications, 1, i);
	      goto out;
	    }
	  else
	    {			/* add */
	      clib_warning
		("%s: ignore dup reg pid %d signum %d action %d opaque %x",
		 a->var, client->pid, a->signum, a->action, a->opaque);
	      rv = -2;
	      goto out;
	    }
	}
    }
  if (a->add_del == 0)
    {
      rv = -3;
      goto out;
    }

  vec_add2 (value->notifications, np, 1);
  np->pid = client->pid;
  np->signum = a->signum;
  np->action = a->action;
  np->opaque = a->opaque;

out:
  svm_pop_heap (oldheap);
  region_unlock (client->db_rp);
  return rv;
}
Exemple #12
0
static clib_error_t *
virtual_ip_cmd_fn_command_fn (vlib_main_t * vm,
		 unformat_input_t * input,
		 vlib_cli_command_t * cmd)
{
    unformat_input_t _line_input, * line_input = &_line_input;
    vnet_main_t * vnm = vnet_get_main();
    ip4_main_t * im = &ip4_main;
    ip_lookup_main_t * lm = &im->lookup_main;
    ip4_address_t ip_addr, next_hop;
    u8 mac_addr[6];
    mac_addr_t *mac_addrs = 0;
    u32 sw_if_index;
    u32 i, f;

    /* Get a line of input. */
    if (! unformat_user (input, unformat_line_input, line_input))
        return 0;

    if (!unformat(line_input, "%U %U", 
                  unformat_ip4_address, &ip_addr,
                  unformat_vnet_sw_interface, vnm, &sw_if_index))
        goto barf;

    while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) {
	if (unformat (line_input, "mac %U", 
                      unformat_ethernet_address, 
                      &mac_addr))
        {
            mac_addr_t *ma;
            vec_add2 (mac_addrs, ma, 1);
            clib_memcpy(ma, mac_addr, sizeof (mac_addr));
        } else {
        barf:
	    return clib_error_return (0, "unknown input `%U'",
				      format_unformat_error, input);
        }
    }
    if (vec_len (mac_addrs) == 0)
        goto barf;

    /* Create / delete special interface route /32's */
    next_hop.as_u32 = 0;

    for (i = 0; i < vec_len(mac_addrs); i++) {
        ip_adjacency_t adj;
        u32 adj_index;
        
        adj.lookup_next_index = IP_LOOKUP_NEXT_REWRITE;
        
        vnet_rewrite_for_sw_interface
            (vnm,
             VNET_L3_PACKET_TYPE_IP4,
             sw_if_index,
             ip4_rewrite_node.index,
             &mac_addrs[i],     /* destination address */
             &adj.rewrite_header,
             sizeof (adj.rewrite_data));

        ip_add_adjacency (lm, &adj, 1 /* one adj */,
                          &adj_index);
        
        f = (i + 1 < vec_len(mac_addrs)) ? IP4_ROUTE_FLAG_NOT_LAST_IN_GROUP : 0;
        ip4_add_del_route_next_hop (im,
                                    IP4_ROUTE_FLAG_ADD | f,
                                    &ip_addr,
                                    32 /* insert /32's */,
                                    &next_hop,
                                    sw_if_index,
                                    1 /* weight */, 
                                    adj_index, 
                                    (u32)~0 /* explicit fib index */);
    }

    vec_free (mac_addrs);

    return 0;
}
Exemple #13
0
static int
dpdk_flow_add (dpdk_device_t * xd, vnet_flow_t * f, dpdk_flow_entry_t * fe)
{
  struct rte_flow_item_ipv4 ip4[2] = { };
  struct rte_flow_item_ipv6 ip6[2] = { };
  struct rte_flow_item_udp udp[2] = { };
  struct rte_flow_item_tcp tcp[2] = { };
  struct rte_flow_action_mark mark = { 0 };
  struct rte_flow_item *item, *items = 0;
  struct rte_flow_action *action, *actions = 0;

  enum
  {
    vxlan_hdr_sz = sizeof (vxlan_header_t),
    raw_sz = sizeof (struct rte_flow_item_raw)
  };

  union
  {
    struct rte_flow_item_raw item;
    u8 val[raw_sz + vxlan_hdr_sz];
  } raw[2];

  u16 src_port, dst_port, src_port_mask, dst_port_mask;
  u8 protocol;
  int rv = 0;

  if (f->actions & (~xd->supported_flow_actions))
    return VNET_FLOW_ERROR_NOT_SUPPORTED;

  /* Match items */
  /* Ethernet */
  vec_add2 (items, item, 1);
  item->type = RTE_FLOW_ITEM_TYPE_ETH;
  item->spec = any_eth;
  item->mask = any_eth + 1;

  /* VLAN */
  if (f->type != VNET_FLOW_TYPE_IP4_VXLAN)
    {
      vec_add2 (items, item, 1);
      item->type = RTE_FLOW_ITEM_TYPE_VLAN;
      item->spec = any_vlan;
      item->mask = any_vlan + 1;
    }

  /* IP */
  vec_add2 (items, item, 1);
  if (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE)
    {
      vnet_flow_ip6_n_tuple_t *t6 = &f->ip6_n_tuple;
      clib_memcpy_fast (ip6[0].hdr.src_addr, &t6->src_addr.addr, 16);
      clib_memcpy_fast (ip6[1].hdr.src_addr, &t6->src_addr.mask, 16);
      clib_memcpy_fast (ip6[0].hdr.dst_addr, &t6->dst_addr.addr, 16);
      clib_memcpy_fast (ip6[1].hdr.dst_addr, &t6->dst_addr.mask, 16);
      item->type = RTE_FLOW_ITEM_TYPE_IPV6;
      item->spec = ip6;
      item->mask = ip6 + 1;

      src_port = t6->src_port.port;
      dst_port = t6->dst_port.port;
      src_port_mask = t6->src_port.mask;
      dst_port_mask = t6->dst_port.mask;
      protocol = t6->protocol;
    }
  else if (f->type == VNET_FLOW_TYPE_IP4_N_TUPLE)
    {
      vnet_flow_ip4_n_tuple_t *t4 = &f->ip4_n_tuple;
      ip4[0].hdr.src_addr = t4->src_addr.addr.as_u32;
      ip4[1].hdr.src_addr = t4->src_addr.mask.as_u32;
      ip4[0].hdr.dst_addr = t4->dst_addr.addr.as_u32;
      ip4[1].hdr.dst_addr = t4->dst_addr.mask.as_u32;
      item->type = RTE_FLOW_ITEM_TYPE_IPV4;
      item->spec = ip4;
      item->mask = ip4 + 1;

      src_port = t4->src_port.port;
      dst_port = t4->dst_port.port;
      src_port_mask = t4->src_port.mask;
      dst_port_mask = t4->dst_port.mask;
      protocol = t4->protocol;
    }
  else if (f->type == VNET_FLOW_TYPE_IP4_VXLAN)
    {
      vnet_flow_ip4_vxlan_t *v4 = &f->ip4_vxlan;
      ip4[0].hdr.src_addr = v4->src_addr.as_u32;
      ip4[1].hdr.src_addr = -1;
      ip4[0].hdr.dst_addr = v4->dst_addr.as_u32;
      ip4[1].hdr.dst_addr = -1;
      item->type = RTE_FLOW_ITEM_TYPE_IPV4;
      item->spec = ip4;
      item->mask = ip4 + 1;

      dst_port = v4->dst_port;
      dst_port_mask = -1;
      src_port = 0;
      src_port_mask = 0;
      protocol = IP_PROTOCOL_UDP;
    }
  else
    {
      rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
      goto done;
    }

  /* Layer 4 */
  vec_add2 (items, item, 1);
  if (protocol == IP_PROTOCOL_UDP)
    {
      udp[0].hdr.src_port = clib_host_to_net_u16 (src_port);
      udp[1].hdr.src_port = clib_host_to_net_u16 (src_port_mask);
      udp[0].hdr.dst_port = clib_host_to_net_u16 (dst_port);
      udp[1].hdr.dst_port = clib_host_to_net_u16 (dst_port_mask);
      item->type = RTE_FLOW_ITEM_TYPE_UDP;
      item->spec = udp;
      item->mask = udp + 1;
    }
  else if (protocol == IP_PROTOCOL_TCP)
    {
      tcp[0].hdr.src_port = clib_host_to_net_u16 (src_port);
      tcp[1].hdr.src_port = clib_host_to_net_u16 (src_port_mask);
      tcp[0].hdr.dst_port = clib_host_to_net_u16 (dst_port);
      tcp[1].hdr.dst_port = clib_host_to_net_u16 (dst_port_mask);
      item->type = RTE_FLOW_ITEM_TYPE_TCP;
      item->spec = tcp;
      item->mask = tcp + 1;
    }
  else
    {
      rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
      goto done;
    }

  /* Tunnel header match */
  if (f->type == VNET_FLOW_TYPE_IP4_VXLAN)
    {
      u32 vni = f->ip4_vxlan.vni;
      vxlan_header_t spec_hdr = {
	.flags = VXLAN_FLAGS_I,
	.vni_reserved = clib_host_to_net_u32 (vni << 8)
      };
      vxlan_header_t mask_hdr = {
	.flags = 0xff,
	.vni_reserved = clib_host_to_net_u32 (((u32) - 1) << 8)
      };

      clib_memset (raw, 0, sizeof raw);
      raw[0].item.relative = 1;
      raw[0].item.length = vxlan_hdr_sz;

      clib_memcpy_fast (raw[0].val + raw_sz, &spec_hdr, vxlan_hdr_sz);
      raw[0].item.pattern = raw[0].val + raw_sz;
      clib_memcpy_fast (raw[1].val + raw_sz, &mask_hdr, vxlan_hdr_sz);
      raw[1].item.pattern = raw[1].val + raw_sz;

      vec_add2 (items, item, 1);
      item->type = RTE_FLOW_ITEM_TYPE_RAW;
      item->spec = raw;
      item->mask = raw + 1;
    }

  vec_add2 (items, item, 1);
  item->type = RTE_FLOW_ITEM_TYPE_END;

  /* Actions */
  vec_add2 (actions, action, 1);
  action->type = RTE_FLOW_ACTION_TYPE_PASSTHRU;

  vec_add2 (actions, action, 1);
  mark.id = fe->mark;
  action->type = RTE_FLOW_ACTION_TYPE_MARK;
  action->conf = &mark;

  vec_add2 (actions, action, 1);
  action->type = RTE_FLOW_ACTION_TYPE_END;

  fe->handle = rte_flow_create (xd->device_index, &ingress, items, actions,
				&xd->last_flow_error);

  if (!fe->handle)
    rv = VNET_FLOW_ERROR_NOT_SUPPORTED;

done:
  vec_free (items);
  vec_free (actions);
  return rv;
}

int
dpdk_flow_ops_fn (vnet_main_t * vnm, vnet_flow_dev_op_t op, u32 dev_instance,
		  u32 flow_index, uword * private_data)
{
  dpdk_main_t *dm = &dpdk_main;
  vnet_flow_t *flow = vnet_get_flow (flow_index);
  dpdk_device_t *xd = vec_elt_at_index (dm->devices, dev_instance);
  dpdk_flow_entry_t *fe;
  dpdk_flow_lookup_entry_t *fle = 0;
  int rv;

  /* recycle old flow lookup entries only after the main loop counter
     increases - i.e. previously DMA'ed packets were handled */
  if (vec_len (xd->parked_lookup_indexes) > 0 &&
      xd->parked_loop_count != dm->vlib_main->main_loop_count)
    {
      u32 *fl_index;

      vec_foreach (fl_index, xd->parked_lookup_indexes)
	pool_put_index (xd->flow_lookup_entries, *fl_index);
      vec_reset_length (xd->flow_lookup_entries);
    }

  if (op == VNET_FLOW_DEV_OP_DEL_FLOW)
    {
      ASSERT (*private_data >= vec_len (xd->flow_entries));

      fe = vec_elt_at_index (xd->flow_entries, *private_data);

      if ((rv = rte_flow_destroy (xd->device_index, fe->handle,
				  &xd->last_flow_error)))
	return VNET_FLOW_ERROR_INTERNAL;

      if (fe->mark)
	{
	  /* make sure no action is taken for in-flight (marked) packets */
	  fle = pool_elt_at_index (xd->flow_lookup_entries, fe->mark);
	  clib_memset (fle, -1, sizeof (*fle));
	  vec_add1 (xd->parked_lookup_indexes, fe->mark);
	  xd->parked_loop_count = dm->vlib_main->main_loop_count;
	}

      clib_memset (fe, 0, sizeof (*fe));
      pool_put (xd->flow_entries, fe);

      goto disable_rx_offload;
    }

  if (op != VNET_FLOW_DEV_OP_ADD_FLOW)
    return VNET_FLOW_ERROR_NOT_SUPPORTED;

  pool_get (xd->flow_entries, fe);
  fe->flow_index = flow->index;

  if (flow->actions == 0)
    {
      rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
      goto done;
    }

  /* if we need to mark packets, assign one mark */
  if (flow->actions & (VNET_FLOW_ACTION_MARK |
		       VNET_FLOW_ACTION_REDIRECT_TO_NODE |
		       VNET_FLOW_ACTION_BUFFER_ADVANCE))
    {
      /* reserve slot 0 */
      if (xd->flow_lookup_entries == 0)
	pool_get_aligned (xd->flow_lookup_entries, fle,
			  CLIB_CACHE_LINE_BYTES);
      pool_get_aligned (xd->flow_lookup_entries, fle, CLIB_CACHE_LINE_BYTES);
      fe->mark = fle - xd->flow_lookup_entries;

      /* install entry in the lookup table */
      clib_memset (fle, -1, sizeof (*fle));
      if (flow->actions & VNET_FLOW_ACTION_MARK)
	fle->flow_id = flow->mark_flow_id;
      if (flow->actions & VNET_FLOW_ACTION_REDIRECT_TO_NODE)
	fle->next_index = flow->redirect_device_input_next_index;
      if (flow->actions & VNET_FLOW_ACTION_BUFFER_ADVANCE)
	fle->buffer_advance = flow->buffer_advance;
    }
  else
    fe->mark = 0;

  if ((xd->flags & DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD) == 0)
    {
      xd->flags |= DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD;
      dpdk_device_setup (xd);
    }

  switch (flow->type)
    {
    case VNET_FLOW_TYPE_IP4_N_TUPLE:
    case VNET_FLOW_TYPE_IP6_N_TUPLE:
    case VNET_FLOW_TYPE_IP4_VXLAN:
      if ((rv = dpdk_flow_add (xd, flow, fe)))
	goto done;
      break;
    default:
      rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
      goto done;
    }

  *private_data = fe - xd->flow_entries;

done:
  if (rv)
    {
      clib_memset (fe, 0, sizeof (*fe));
      pool_put (xd->flow_entries, fe);
      if (fle)
	{
	  clib_memset (fle, -1, sizeof (*fle));
	  pool_put (xd->flow_lookup_entries, fle);
	}
    }
disable_rx_offload:
  if ((xd->flags & DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD) != 0
      && pool_elts (xd->flow_entries) == 0)
    {
      xd->flags &= ~DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD;
      dpdk_device_setup (xd);
    }

  return rv;
}
Exemple #14
0
int ssvm_eth_create (ssvm_eth_main_t * em, u8 * name, int is_master)
{
  // ssvm_eth_main_t * em = &ssvm_eth_main;
  ssvm_private_t * intfc;
  void * oldheap;
  clib_error_t * e;
  unix_shared_memory_queue_t * q;
  ssvm_shared_header_t * sh;
  ssvm_eth_queue_elt_t * elts;
  u32 * elt_indices;
  u8 enet_addr[6];
  int i, rv;

  vec_add2 (em->intfcs, intfc, 1);

  intfc->ssvm_size = em->segment_size;
  intfc->i_am_master = 1;
  intfc->name = name;
  intfc->my_pid = getpid();
  if (is_master == 0)
    {
      rv = ssvm_slave_init (intfc, 20 /* timeout in seconds */);
      if (rv < 0)
        return rv;
      goto create_vnet_interface;
    }

  intfc->requested_va = em->next_base_va;
  em->next_base_va += em->segment_size;
  rv = ssvm_master_init (intfc, intfc - em->intfcs /* master index */);

  if (rv < 0)
    return rv;
  
  /* OK, segment created, set up queues and so forth.  */
  
  sh = intfc->sh;
  oldheap = ssvm_push_heap (sh);

  q = unix_shared_memory_queue_init (em->queue_elts, sizeof (u32),
                                     0 /* consumer pid not interesting */,
                                     0 /* signal not sent */);
  sh->opaque [TO_MASTER_Q_INDEX] = (void *)q;
  q = unix_shared_memory_queue_init (em->queue_elts, sizeof (u32),
                                     0 /* consumer pid not interesting */,
                                     0 /* signal not sent */);
  sh->opaque [TO_SLAVE_Q_INDEX] = (void *)q;
  
  /* 
   * Preallocate the requested number of buffer chunks
   * There must be a better way to do this, etc.
   * Add some slop to avoid pool reallocation, which will not go well
   */
  elts = 0;
  elt_indices = 0;

  vec_validate_aligned (elts, em->nbuffers - 1, CLIB_CACHE_LINE_BYTES);
  vec_validate_aligned (elt_indices, em->nbuffers - 1, CLIB_CACHE_LINE_BYTES);
  
  for (i = 0; i < em->nbuffers; i++)
    elt_indices[i] = i;

  sh->opaque [CHUNK_POOL_INDEX] = (void *) elts;
  sh->opaque [CHUNK_POOL_FREELIST_INDEX] = (void *) elt_indices;
  sh->opaque [CHUNK_POOL_NFREE] = (void *)(uword) em->nbuffers;
  
  ssvm_pop_heap (oldheap);

 create_vnet_interface:

  sh = intfc->sh;

  memset (enet_addr, 0, sizeof (enet_addr));
  enet_addr[0] = 2;
  enet_addr[1] = 0xFE;
  enet_addr[2] = is_master;
  enet_addr[5] = sh->master_index;
  
  /* Let the games begin... */
  if (is_master)
      sh->ready = 1;
  return 0;
}