Esempio n. 1
0
replication_context_t *
replication_prep (vlib_main_t * vm,
                  vlib_buffer_t * b0,
                  u32 recycle_node_index,
                  u32 l2_packet)
{
  replication_main_t * rm = &replication_main;
  replication_context_t * ctx;
  uword cpu_number = vm->cpu_index;
  ip4_header_t * ip;
  u32 ctx_id;

  // Allocate a context, reserve context 0
  if (PREDICT_FALSE(rm->contexts[cpu_number] == 0))
    pool_get_aligned (rm->contexts[cpu_number], ctx, CLIB_CACHE_LINE_BYTES);
      
  pool_get_aligned (rm->contexts[cpu_number], ctx, CLIB_CACHE_LINE_BYTES);
  ctx_id = ctx - rm->contexts[cpu_number];

  // Save state from vlib buffer
  ctx->saved_clone_count = b0->clone_count;
  ctx->saved_free_list_index = b0->free_list_index;
  ctx->current_data = b0->current_data;

  // Set up vlib buffer hooks
  b0->clone_count = ctx_id;
  b0->free_list_index = rm->recycle_list_index;

  // Save feature state
  ctx->recycle_node_index = recycle_node_index;

  // Save vnet state
  memcpy (ctx->vnet_buffer, vnet_buffer(b0), sizeof(vnet_buffer_opaque_t));

  // Save packet contents
  ctx->l2_packet = l2_packet;
  ip = (ip4_header_t *)vlib_buffer_get_current (b0);
  if (l2_packet) {
    // Save ethernet header
    ctx->l2_header[0] = ((u64 *)ip)[0];
    ctx->l2_header[1] = ((u64 *)ip)[1];
    ctx->l2_header[2] = ((u64 *)ip)[2];
    // set ip to the true ip header
    ip = (ip4_header_t *)(((u8 *)ip) + vnet_buffer(b0)->l2.l2_len);
  }

  // Copy L3 fields. 
  // We need to save TOS for ip4 and ip6 packets. Fortunately the TOS field is 
  // in the first two bytes of both the ip4 and ip6 headers.
  ctx->ip_tos = *((u16 *)(ip));

  // Save the ip4 checksum as well. We just blindly save the corresponding two
  // bytes even for ip6 packets. 
  ctx->ip4_checksum = ip->checksum;

  return ctx;
}
Esempio n. 2
0
session_table_t *
session_table_alloc (void)
{
  session_table_t *slt;
  pool_get_aligned (lookup_tables, slt, CLIB_CACHE_LINE_BYTES);
  clib_memset (slt, 0, sizeof (*slt));
  return slt;
}
Esempio n. 3
0
session_t *
session_alloc (u32 thread_index)
{
  session_worker_t *wrk = &session_main.wrk[thread_index];
  session_t *s;
  u8 will_expand = 0;
  pool_get_aligned_will_expand (wrk->sessions, will_expand,
				CLIB_CACHE_LINE_BYTES);
  /* If we have peekers, let them finish */
  if (PREDICT_FALSE (will_expand && vlib_num_workers ()))
    {
      clib_rwlock_writer_lock (&wrk->peekers_rw_locks);
      pool_get_aligned (wrk->sessions, s, CLIB_CACHE_LINE_BYTES);
      clib_rwlock_writer_unlock (&wrk->peekers_rw_locks);
    }
  else
    {
      pool_get_aligned (wrk->sessions, s, CLIB_CACHE_LINE_BYTES);
    }
  clib_memset (s, 0, sizeof (*s));
  s->session_index = s - wrk->sessions;
  s->thread_index = thread_index;
  return s;
}
Esempio n. 4
0
static replicate_t *
replicate_alloc_i (void)
{
    replicate_t *rep;

    pool_get_aligned(replicate_pool, rep, CLIB_CACHE_LINE_BYTES);
    clib_memset(rep, 0, sizeof(*rep));

    vlib_validate_combined_counter(&(replicate_main.repm_counters),
                                   replicate_get_index(rep));
    vlib_zero_combined_counter(&(replicate_main.repm_counters),
                               replicate_get_index(rep));

    return (rep);
}
Esempio n. 5
0
index_t
mfib_itf_create (fib_node_index_t path_index,
                 mfib_itf_flags_t mfi_flags)
{
    mfib_itf_t *mfib_itf;

    pool_get_aligned(mfib_itf_pool, mfib_itf,
                     CLIB_CACHE_LINE_BYTES);

    mfib_itf->mfi_sw_if_index = fib_path_get_resolving_interface(path_index);
    mfib_itf->mfi_si = INDEX_INVALID;

    /*
     * add the path index to the per-path hash
     */
    mfib_itf->mfi_hash = hash_set(mfib_itf->mfi_hash, path_index, mfi_flags);

    /*
     * the combined flags from all the paths is from just the one contributor
     */
    mfib_itf->mfi_flags = mfi_flags;

    return (mfib_itf - mfib_itf_pool);
}
Esempio n. 6
0
clib_error_t *
policer_add_del (vlib_main_t *vm,
                 u8 * name,
                 sse2_qos_pol_cfg_params_st * cfg,
                 u32 * policer_index,
                 u8 is_add)
{
  vnet_policer_main_t *pm = &vnet_policer_main;
  policer_read_response_type_st test_policer;
  policer_read_response_type_st * policer;
  uword * p;
  u32 pi;
  int rv;

  p = hash_get_mem (pm->policer_config_by_name, name);

  if (is_add == 0)
    {
      if (p == 0)
        {
          vec_free(name);
          return clib_error_return (0, "No such policer configuration");
        }
      hash_unset_mem (pm->policer_config_by_name, name);
      hash_unset_mem (pm->policer_index_by_name, name);
      vec_free(name);
      return 0;
    }

  if (p != 0)
    {
      vec_free(name);
      return clib_error_return (0, "Policer already exists");
    }

  /* Vet the configuration before adding it to the table */
  rv = sse2_pol_logical_2_physical (cfg, &test_policer);

  if (rv == 0)
    {
      policer_read_response_type_st *pp;
      sse2_qos_pol_cfg_params_st *cp;

      pool_get (pm->configs, cp);
      pool_get (pm->policer_templates, pp);

      ASSERT (cp - pm->configs == pp - pm->policer_templates);

      clib_memcpy (cp, cfg, sizeof (*cp));
      clib_memcpy (pp, &test_policer, sizeof (*pp));

      hash_set_mem (pm->policer_config_by_name, name, cp - pm->configs);
      pool_get_aligned (pm->policers, policer, CLIB_CACHE_LINE_BYTES);
      policer[0] = pp[0];
      pi = policer - pm->policers;
      hash_set_mem (pm->policer_index_by_name, name, pi);
      *policer_index = pi;
    }
  else
    {
      vec_free (name);
      return clib_error_return (0, "Config failed sanity check");
    }

  return 0;
}
Esempio n. 7
0
static u32
add_del_ip_tunnel (vnet_lisp_gpe_add_del_fwd_entry_args_t *a,
                   u32 * tun_index_res)
{
  lisp_gpe_main_t * lgm = &lisp_gpe_main;
  lisp_gpe_tunnel_t *t = 0;
  uword * p;
  int rv;
  lisp_gpe_tunnel_key_t key;

  /* prepare tunnel key */
  memset(&key, 0, sizeof(key));
  ip_prefix_copy(&key.eid, &gid_address_ippref(&a->deid));
  ip_address_copy(&key.dst_loc, &a->dlocator);
  key.iid = clib_host_to_net_u32 (a->vni);

  p = mhash_get (&lgm->lisp_gpe_tunnel_by_key, &key);

  if (a->is_add)
    {
      /* adding a tunnel: tunnel must not already exist */
      if (p)
        return VNET_API_ERROR_INVALID_VALUE;

      if (a->decap_next_index >= LISP_GPE_INPUT_N_NEXT)
        return VNET_API_ERROR_INVALID_DECAP_NEXT;

      pool_get_aligned (lgm->tunnels, t, CLIB_CACHE_LINE_BYTES);
      memset (t, 0, sizeof (*t));

      /* copy from arg structure */
#define _(x) t->x = a->x;
      foreach_copy_field;
#undef _

      ip_address_copy(&t->src, &a->slocator);
      ip_address_copy(&t->dst, &a->dlocator);

      /* if vni is non-default */
      if (a->vni)
        {
          t->flags = LISP_GPE_FLAGS_I;
          t->vni = a->vni;
        }

      t->flags |= LISP_GPE_FLAGS_P;
      t->next_protocol = ip_prefix_version(&key.eid) == IP4 ?
          LISP_GPE_NEXT_PROTO_IP4 : LISP_GPE_NEXT_PROTO_IP6;

      rv = lisp_gpe_rewrite (t);

      if (rv)
        {
          pool_put(lgm->tunnels, t);
          return rv;
        }

      mhash_set(&lgm->lisp_gpe_tunnel_by_key, &key, t - lgm->tunnels, 0);

      /* return tunnel index */
      if (tun_index_res)
        tun_index_res[0] = t - lgm->tunnels;
    }
  else
    {
      /* deleting a tunnel: tunnel must exist */
      if (!p)
        {
          clib_warning("Tunnel for eid %U doesn't exist!", format_gid_address,
                       &a->deid);
          return VNET_API_ERROR_NO_SUCH_ENTRY;
        }

      t = pool_elt_at_index(lgm->tunnels, p[0]);

      mhash_unset(&lgm->lisp_gpe_tunnel_by_key, &key, 0);

      vec_free(t->rewrite);
      pool_put(lgm->tunnels, t);
    }

  return 0;
}
Esempio n. 8
0
File: flow.c Progetto: vpp-dev/vpp
static int
dpdk_flow_add (dpdk_device_t * xd, vnet_flow_t * f, dpdk_flow_entry_t * fe)
{
  struct rte_flow_item_ipv4 ip4[2] = { };
  struct rte_flow_item_ipv6 ip6[2] = { };
  struct rte_flow_item_udp udp[2] = { };
  struct rte_flow_item_tcp tcp[2] = { };
  struct rte_flow_action_mark mark = { 0 };
  struct rte_flow_item *item, *items = 0;
  struct rte_flow_action *action, *actions = 0;

  enum
  {
    vxlan_hdr_sz = sizeof (vxlan_header_t),
    raw_sz = sizeof (struct rte_flow_item_raw)
  };

  union
  {
    struct rte_flow_item_raw item;
    u8 val[raw_sz + vxlan_hdr_sz];
  } raw[2];

  u16 src_port, dst_port, src_port_mask, dst_port_mask;
  u8 protocol;
  int rv = 0;

  if (f->actions & (~xd->supported_flow_actions))
    return VNET_FLOW_ERROR_NOT_SUPPORTED;

  /* Match items */
  /* Ethernet */
  vec_add2 (items, item, 1);
  item->type = RTE_FLOW_ITEM_TYPE_ETH;
  item->spec = any_eth;
  item->mask = any_eth + 1;

  /* VLAN */
  if (f->type != VNET_FLOW_TYPE_IP4_VXLAN)
    {
      vec_add2 (items, item, 1);
      item->type = RTE_FLOW_ITEM_TYPE_VLAN;
      item->spec = any_vlan;
      item->mask = any_vlan + 1;
    }

  /* IP */
  vec_add2 (items, item, 1);
  if (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE)
    {
      vnet_flow_ip6_n_tuple_t *t6 = &f->ip6_n_tuple;
      clib_memcpy_fast (ip6[0].hdr.src_addr, &t6->src_addr.addr, 16);
      clib_memcpy_fast (ip6[1].hdr.src_addr, &t6->src_addr.mask, 16);
      clib_memcpy_fast (ip6[0].hdr.dst_addr, &t6->dst_addr.addr, 16);
      clib_memcpy_fast (ip6[1].hdr.dst_addr, &t6->dst_addr.mask, 16);
      item->type = RTE_FLOW_ITEM_TYPE_IPV6;
      item->spec = ip6;
      item->mask = ip6 + 1;

      src_port = t6->src_port.port;
      dst_port = t6->dst_port.port;
      src_port_mask = t6->src_port.mask;
      dst_port_mask = t6->dst_port.mask;
      protocol = t6->protocol;
    }
  else if (f->type == VNET_FLOW_TYPE_IP4_N_TUPLE)
    {
      vnet_flow_ip4_n_tuple_t *t4 = &f->ip4_n_tuple;
      ip4[0].hdr.src_addr = t4->src_addr.addr.as_u32;
      ip4[1].hdr.src_addr = t4->src_addr.mask.as_u32;
      ip4[0].hdr.dst_addr = t4->dst_addr.addr.as_u32;
      ip4[1].hdr.dst_addr = t4->dst_addr.mask.as_u32;
      item->type = RTE_FLOW_ITEM_TYPE_IPV4;
      item->spec = ip4;
      item->mask = ip4 + 1;

      src_port = t4->src_port.port;
      dst_port = t4->dst_port.port;
      src_port_mask = t4->src_port.mask;
      dst_port_mask = t4->dst_port.mask;
      protocol = t4->protocol;
    }
  else if (f->type == VNET_FLOW_TYPE_IP4_VXLAN)
    {
      vnet_flow_ip4_vxlan_t *v4 = &f->ip4_vxlan;
      ip4[0].hdr.src_addr = v4->src_addr.as_u32;
      ip4[1].hdr.src_addr = -1;
      ip4[0].hdr.dst_addr = v4->dst_addr.as_u32;
      ip4[1].hdr.dst_addr = -1;
      item->type = RTE_FLOW_ITEM_TYPE_IPV4;
      item->spec = ip4;
      item->mask = ip4 + 1;

      dst_port = v4->dst_port;
      dst_port_mask = -1;
      src_port = 0;
      src_port_mask = 0;
      protocol = IP_PROTOCOL_UDP;
    }
  else
    {
      rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
      goto done;
    }

  /* Layer 4 */
  vec_add2 (items, item, 1);
  if (protocol == IP_PROTOCOL_UDP)
    {
      udp[0].hdr.src_port = clib_host_to_net_u16 (src_port);
      udp[1].hdr.src_port = clib_host_to_net_u16 (src_port_mask);
      udp[0].hdr.dst_port = clib_host_to_net_u16 (dst_port);
      udp[1].hdr.dst_port = clib_host_to_net_u16 (dst_port_mask);
      item->type = RTE_FLOW_ITEM_TYPE_UDP;
      item->spec = udp;
      item->mask = udp + 1;
    }
  else if (protocol == IP_PROTOCOL_TCP)
    {
      tcp[0].hdr.src_port = clib_host_to_net_u16 (src_port);
      tcp[1].hdr.src_port = clib_host_to_net_u16 (src_port_mask);
      tcp[0].hdr.dst_port = clib_host_to_net_u16 (dst_port);
      tcp[1].hdr.dst_port = clib_host_to_net_u16 (dst_port_mask);
      item->type = RTE_FLOW_ITEM_TYPE_TCP;
      item->spec = tcp;
      item->mask = tcp + 1;
    }
  else
    {
      rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
      goto done;
    }

  /* Tunnel header match */
  if (f->type == VNET_FLOW_TYPE_IP4_VXLAN)
    {
      u32 vni = f->ip4_vxlan.vni;
      vxlan_header_t spec_hdr = {
	.flags = VXLAN_FLAGS_I,
	.vni_reserved = clib_host_to_net_u32 (vni << 8)
      };
      vxlan_header_t mask_hdr = {
	.flags = 0xff,
	.vni_reserved = clib_host_to_net_u32 (((u32) - 1) << 8)
      };

      clib_memset (raw, 0, sizeof raw);
      raw[0].item.relative = 1;
      raw[0].item.length = vxlan_hdr_sz;

      clib_memcpy_fast (raw[0].val + raw_sz, &spec_hdr, vxlan_hdr_sz);
      raw[0].item.pattern = raw[0].val + raw_sz;
      clib_memcpy_fast (raw[1].val + raw_sz, &mask_hdr, vxlan_hdr_sz);
      raw[1].item.pattern = raw[1].val + raw_sz;

      vec_add2 (items, item, 1);
      item->type = RTE_FLOW_ITEM_TYPE_RAW;
      item->spec = raw;
      item->mask = raw + 1;
    }

  vec_add2 (items, item, 1);
  item->type = RTE_FLOW_ITEM_TYPE_END;

  /* Actions */
  vec_add2 (actions, action, 1);
  action->type = RTE_FLOW_ACTION_TYPE_PASSTHRU;

  vec_add2 (actions, action, 1);
  mark.id = fe->mark;
  action->type = RTE_FLOW_ACTION_TYPE_MARK;
  action->conf = &mark;

  vec_add2 (actions, action, 1);
  action->type = RTE_FLOW_ACTION_TYPE_END;

  fe->handle = rte_flow_create (xd->device_index, &ingress, items, actions,
				&xd->last_flow_error);

  if (!fe->handle)
    rv = VNET_FLOW_ERROR_NOT_SUPPORTED;

done:
  vec_free (items);
  vec_free (actions);
  return rv;
}

int
dpdk_flow_ops_fn (vnet_main_t * vnm, vnet_flow_dev_op_t op, u32 dev_instance,
		  u32 flow_index, uword * private_data)
{
  dpdk_main_t *dm = &dpdk_main;
  vnet_flow_t *flow = vnet_get_flow (flow_index);
  dpdk_device_t *xd = vec_elt_at_index (dm->devices, dev_instance);
  dpdk_flow_entry_t *fe;
  dpdk_flow_lookup_entry_t *fle = 0;
  int rv;

  /* recycle old flow lookup entries only after the main loop counter
     increases - i.e. previously DMA'ed packets were handled */
  if (vec_len (xd->parked_lookup_indexes) > 0 &&
      xd->parked_loop_count != dm->vlib_main->main_loop_count)
    {
      u32 *fl_index;

      vec_foreach (fl_index, xd->parked_lookup_indexes)
	pool_put_index (xd->flow_lookup_entries, *fl_index);
      vec_reset_length (xd->flow_lookup_entries);
    }

  if (op == VNET_FLOW_DEV_OP_DEL_FLOW)
    {
      ASSERT (*private_data >= vec_len (xd->flow_entries));

      fe = vec_elt_at_index (xd->flow_entries, *private_data);

      if ((rv = rte_flow_destroy (xd->device_index, fe->handle,
				  &xd->last_flow_error)))
	return VNET_FLOW_ERROR_INTERNAL;

      if (fe->mark)
	{
	  /* make sure no action is taken for in-flight (marked) packets */
	  fle = pool_elt_at_index (xd->flow_lookup_entries, fe->mark);
	  clib_memset (fle, -1, sizeof (*fle));
	  vec_add1 (xd->parked_lookup_indexes, fe->mark);
	  xd->parked_loop_count = dm->vlib_main->main_loop_count;
	}

      clib_memset (fe, 0, sizeof (*fe));
      pool_put (xd->flow_entries, fe);

      goto disable_rx_offload;
    }

  if (op != VNET_FLOW_DEV_OP_ADD_FLOW)
    return VNET_FLOW_ERROR_NOT_SUPPORTED;

  pool_get (xd->flow_entries, fe);
  fe->flow_index = flow->index;

  if (flow->actions == 0)
    {
      rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
      goto done;
    }

  /* if we need to mark packets, assign one mark */
  if (flow->actions & (VNET_FLOW_ACTION_MARK |
		       VNET_FLOW_ACTION_REDIRECT_TO_NODE |
		       VNET_FLOW_ACTION_BUFFER_ADVANCE))
    {
      /* reserve slot 0 */
      if (xd->flow_lookup_entries == 0)
	pool_get_aligned (xd->flow_lookup_entries, fle,
			  CLIB_CACHE_LINE_BYTES);
      pool_get_aligned (xd->flow_lookup_entries, fle, CLIB_CACHE_LINE_BYTES);
      fe->mark = fle - xd->flow_lookup_entries;

      /* install entry in the lookup table */
      clib_memset (fle, -1, sizeof (*fle));
      if (flow->actions & VNET_FLOW_ACTION_MARK)
	fle->flow_id = flow->mark_flow_id;
      if (flow->actions & VNET_FLOW_ACTION_REDIRECT_TO_NODE)
	fle->next_index = flow->redirect_device_input_next_index;
      if (flow->actions & VNET_FLOW_ACTION_BUFFER_ADVANCE)
	fle->buffer_advance = flow->buffer_advance;
    }
  else
    fe->mark = 0;

  if ((xd->flags & DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD) == 0)
    {
      xd->flags |= DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD;
      dpdk_device_setup (xd);
    }

  switch (flow->type)
    {
    case VNET_FLOW_TYPE_IP4_N_TUPLE:
    case VNET_FLOW_TYPE_IP6_N_TUPLE:
    case VNET_FLOW_TYPE_IP4_VXLAN:
      if ((rv = dpdk_flow_add (xd, flow, fe)))
	goto done;
      break;
    default:
      rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
      goto done;
    }

  *private_data = fe - xd->flow_entries;

done:
  if (rv)
    {
      clib_memset (fe, 0, sizeof (*fe));
      pool_put (xd->flow_entries, fe);
      if (fle)
	{
	  clib_memset (fle, -1, sizeof (*fle));
	  pool_put (xd->flow_lookup_entries, fle);
	}
    }
disable_rx_offload:
  if ((xd->flags & DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD) != 0
      && pool_elts (xd->flow_entries) == 0)
    {
      xd->flags &= ~DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD;
      dpdk_device_setup (xd);
    }

  return rv;
}