示例#1
0
static uword
send_dhcp6_pd_client_message_process (vlib_main_t * vm,
				      vlib_node_runtime_t * rt,
				      vlib_frame_t * f0)
{
  dhcp6_pd_client_main_t *cm = &dhcp6_pd_client_main;
  dhcp6_pd_client_state_t *client_state;
  uword *event_data = 0;
  f64 sleep_time = 1e9;
  f64 current_time;
  f64 due_time;
  f64 dt = 0;
  int i;

  while (true)
    {
      vlib_process_wait_for_event_or_clock (vm, sleep_time);
      vlib_process_get_events (vm, &event_data);
      vec_reset_length (event_data);

      current_time = vlib_time_now (vm);
      do
	{
	  due_time = current_time + 1e9;
	  for (i = 0; i < vec_len (cm->client_state_by_sw_if_index); i++)
	    {
	      client_state = &cm->client_state_by_sw_if_index[i];
	      if (!client_state->entry_valid)
		continue;
	      if (check_pd_send_client_message
		  (vm, client_state, current_time, &dt) && (dt < due_time))
		due_time = dt;
	    }
	  current_time = vlib_time_now (vm);
	}
      while (due_time < current_time);

      sleep_time = due_time - current_time;
    }

  return 0;
}
示例#2
0
static uword
perfmon_periodic_process (vlib_main_t * vm,
			  vlib_node_runtime_t * rt, vlib_frame_t * f)
{
  perfmon_main_t *pm = &perfmon_main;
  f64 now;
  uword *event_data = 0;
  uword event_type;
  int i;

  while (1)
    {
      if (pm->state == PERFMON_STATE_RUNNING)
	vlib_process_wait_for_event_or_clock (vm, pm->timeout_interval);
      else
	vlib_process_wait_for_event (vm);

      now = vlib_time_now (vm);

      event_type = vlib_process_get_events (vm, (uword **) & event_data);

      switch (event_type)
	{
	case PERFMON_START:
	  for (i = 0; i < vec_len (event_data); i++)
	    start_event (pm, now, event_data[i]);
	  break;

	  /* Handle timeout */
	case ~0:
	  handle_timeout (vm, pm, now);
	  break;

	default:
	  clib_warning ("Unexpected event %d", event_type);
	  break;
	}
      vec_reset_length (event_data);
    }
  return 0;			/* or not */
}
示例#3
0
static void
unix_signal_handler (int signum, siginfo_t * si, ucontext_t * uc)
{
  uword fatal = 0;

  /* These come in handy when looking at core files from optimized images */
  last_signum = signum;
  last_faulting_address = (uword) si->si_addr;

  syslog_msg = format (syslog_msg, "received signal %U, PC %U",
		       format_signal, signum, format_ucontext_pc, uc);

  if (signum == SIGSEGV)
    syslog_msg = format (syslog_msg, ", faulting address %p", si->si_addr);

  switch (signum)
    {
      /* these (caught) signals cause the application to exit */
    case SIGTERM:
      /*
       * Ignore SIGTERM if it's sent before we're ready.
       */
      if (unix_main.vlib_main && unix_main.vlib_main->main_loop_exit_set)
	{
	  syslog (LOG_ERR | LOG_DAEMON, "received SIGTERM, exiting...");
	  unix_main.vlib_main->main_loop_exit_now = 1;
	}
      else
	syslog (LOG_ERR | LOG_DAEMON, "IGNORE early SIGTERM...");
      break;
      /* fall through */
    case SIGQUIT:
    case SIGINT:
    case SIGILL:
    case SIGBUS:
    case SIGSEGV:
    case SIGHUP:
    case SIGFPE:
    case SIGABRT:
      fatal = 1;
      break;

      /* by default, print a message and continue */
    default:
      fatal = 0;
      break;
    }

#ifdef CLIB_GCOV
  /*
   * Test framework sends SIGTERM, so we need to flush the
   * code coverage stats here.
   */
  {
    void __gcov_flush (void);
    __gcov_flush ();
  }
#endif

  /* Null terminate. */
  vec_add1 (syslog_msg, 0);

  if (fatal)
    {
      syslog (LOG_ERR | LOG_DAEMON, "%s", syslog_msg);

      /* Address of callers: outer first, inner last. */
      uword callers[15];
      uword n_callers = clib_backtrace (callers, ARRAY_LEN (callers), 0);
      int i;
      for (i = 0; i < n_callers; i++)
	{
	  vec_reset_length (syslog_msg);

	  syslog_msg =
	    format (syslog_msg, "#%-2d 0x%016lx %U%c", i, callers[i],
		    format_clib_elf_symbol_with_address, callers[i], 0);

	  syslog (LOG_ERR | LOG_DAEMON, "%s", syslog_msg);
	}

      /* have to remove SIGABRT to avoid recusive - os_exit calling abort() */
      unsetup_signal_handlers (SIGABRT);

      os_exit (1);
    }
  else
    clib_warning ("%s", syslog_msg);

}
示例#4
0
static uword
startup_config_process (vlib_main_t * vm,
			vlib_node_runtime_t * rt, vlib_frame_t * f)
{
  unix_main_t *um = &unix_main;
  u8 *buf = 0;
  uword l, n = 1;

  vlib_process_suspend (vm, 2.0);

  while (um->unix_config_complete == 0)
    vlib_process_suspend (vm, 0.1);

  if (um->startup_config_filename)
    {
      unformat_input_t sub_input;
      int fd;
      struct stat s;
      char *fn = (char *) um->startup_config_filename;

      fd = open (fn, O_RDONLY);
      if (fd < 0)
	{
	  clib_warning ("failed to open `%s'", fn);
	  return 0;
	}

      if (fstat (fd, &s) < 0)
	{
	  clib_warning ("failed to stat `%s'", fn);
	bail:
	  close (fd);
	  return 0;
	}

      if (!(S_ISREG (s.st_mode) || S_ISLNK (s.st_mode)))
	{
	  clib_warning ("not a regular file: `%s'", fn);
	  goto bail;
	}

      while (n > 0)
	{
	  l = vec_len (buf);
	  vec_resize (buf, 4096);
	  n = read (fd, buf + l, 4096);
	  if (n > 0)
	    {
	      _vec_len (buf) = l + n;
	      if (n < 4096)
		break;
	    }
	  else
	    break;
	}
      if (um->log_fd && vec_len (buf))
	{
	  u8 *lv = 0;
	  lv = format (lv, "%U: ***** Startup Config *****\n%v",
		       format_timeval, 0 /* current bat-time */ ,
		       0 /* current bat-format */ ,
		       buf);
	  {
	    int rv __attribute__ ((unused)) =
	      write (um->log_fd, lv, vec_len (lv));
	  }
	  vec_reset_length (lv);
	  lv = format (lv, "%U: ***** End Startup Config *****\n",
		       format_timeval, 0 /* current bat-time */ ,
		       0 /* current bat-format */ );
	  {
	    int rv __attribute__ ((unused)) =
	      write (um->log_fd, lv, vec_len (lv));
	  }
	  vec_free (lv);
	}

      if (vec_len (buf))
	{
	  unformat_init_vector (&sub_input, buf);
	  vlib_cli_input (vm, &sub_input, 0, 0);
	  /* frees buf for us */
	  unformat_free (&sub_input);
	}
      close (fd);
    }
  return 0;
}
示例#5
0
文件: pcap.c 项目: JehandadKhan/vpp
clib_error_t *
pcap_write (pcap_main_t * pm)
{
  clib_error_t * error = 0;

  if (! (pm->flags & PCAP_MAIN_INIT_DONE))
    {
      pcap_file_header_t fh;
      int n;

      if (! pm->file_name)
	pm->file_name = "/tmp/vnet.pcap";

      pm->file_descriptor = open (pm->file_name, O_CREAT | O_TRUNC | O_WRONLY, 0664);
      if (pm->file_descriptor < 0)
	{
	  error = clib_error_return_unix (0, "failed to open `%s'", pm->file_name);
	  goto done;
	}

      pm->flags |= PCAP_MAIN_INIT_DONE;
      pm->n_packets_captured = 0;
      pm->n_pcap_data_written = 0;

      /* Write file header. */
      memset (&fh, 0, sizeof (fh));
      fh.magic = 0xa1b2c3d4;
      fh.major_version = 2;
      fh.minor_version = 4;
      fh.time_zone = 0;
      fh.max_packet_size_in_bytes = 1 << 16;
      fh.packet_type = pm->packet_type;
      n = write (pm->file_descriptor, &fh, sizeof (fh));
      if (n != sizeof (fh))
	{
	  if (n < 0)
	    error = clib_error_return_unix (0, "write file header `%s'", pm->file_name);
	  else
	    error = clib_error_return (0, "short write of file header `%s'", pm->file_name);
	  goto done;
	}
    }

  do {
    int n = vec_len (pm->pcap_data) - pm->n_pcap_data_written;

    if (n > 0)
      {
	n = write (pm->file_descriptor,
		   vec_elt_at_index (pm->pcap_data, pm->n_pcap_data_written),
		   n);
	if (n < 0 && unix_error_is_fatal (errno))
	  {
	    error = clib_error_return_unix (0, "write `%s'", pm->file_name);
	    goto done;
	  }
      }
    pm->n_pcap_data_written += n;
    if (pm->n_pcap_data_written >= vec_len (pm->pcap_data))
      {
        vec_reset_length (pm->pcap_data);
	break;
      }
  } while (pm->n_packets_captured >= pm->n_packets_to_capture);

  if (pm->n_packets_captured >= pm->n_packets_to_capture)
    {
      close (pm->file_descriptor);
      pm->flags &= ~PCAP_MAIN_INIT_DONE;
      pm->file_descriptor = -1;
    }

 done:
  if (error)
    {
      if (pm->file_descriptor >= 0)
	close (pm->file_descriptor);
    }
  return error;
}
示例#6
0
文件: flow.c 项目: vpp-dev/vpp
static int
dpdk_flow_add (dpdk_device_t * xd, vnet_flow_t * f, dpdk_flow_entry_t * fe)
{
  struct rte_flow_item_ipv4 ip4[2] = { };
  struct rte_flow_item_ipv6 ip6[2] = { };
  struct rte_flow_item_udp udp[2] = { };
  struct rte_flow_item_tcp tcp[2] = { };
  struct rte_flow_action_mark mark = { 0 };
  struct rte_flow_item *item, *items = 0;
  struct rte_flow_action *action, *actions = 0;

  enum
  {
    vxlan_hdr_sz = sizeof (vxlan_header_t),
    raw_sz = sizeof (struct rte_flow_item_raw)
  };

  union
  {
    struct rte_flow_item_raw item;
    u8 val[raw_sz + vxlan_hdr_sz];
  } raw[2];

  u16 src_port, dst_port, src_port_mask, dst_port_mask;
  u8 protocol;
  int rv = 0;

  if (f->actions & (~xd->supported_flow_actions))
    return VNET_FLOW_ERROR_NOT_SUPPORTED;

  /* Match items */
  /* Ethernet */
  vec_add2 (items, item, 1);
  item->type = RTE_FLOW_ITEM_TYPE_ETH;
  item->spec = any_eth;
  item->mask = any_eth + 1;

  /* VLAN */
  if (f->type != VNET_FLOW_TYPE_IP4_VXLAN)
    {
      vec_add2 (items, item, 1);
      item->type = RTE_FLOW_ITEM_TYPE_VLAN;
      item->spec = any_vlan;
      item->mask = any_vlan + 1;
    }

  /* IP */
  vec_add2 (items, item, 1);
  if (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE)
    {
      vnet_flow_ip6_n_tuple_t *t6 = &f->ip6_n_tuple;
      clib_memcpy_fast (ip6[0].hdr.src_addr, &t6->src_addr.addr, 16);
      clib_memcpy_fast (ip6[1].hdr.src_addr, &t6->src_addr.mask, 16);
      clib_memcpy_fast (ip6[0].hdr.dst_addr, &t6->dst_addr.addr, 16);
      clib_memcpy_fast (ip6[1].hdr.dst_addr, &t6->dst_addr.mask, 16);
      item->type = RTE_FLOW_ITEM_TYPE_IPV6;
      item->spec = ip6;
      item->mask = ip6 + 1;

      src_port = t6->src_port.port;
      dst_port = t6->dst_port.port;
      src_port_mask = t6->src_port.mask;
      dst_port_mask = t6->dst_port.mask;
      protocol = t6->protocol;
    }
  else if (f->type == VNET_FLOW_TYPE_IP4_N_TUPLE)
    {
      vnet_flow_ip4_n_tuple_t *t4 = &f->ip4_n_tuple;
      ip4[0].hdr.src_addr = t4->src_addr.addr.as_u32;
      ip4[1].hdr.src_addr = t4->src_addr.mask.as_u32;
      ip4[0].hdr.dst_addr = t4->dst_addr.addr.as_u32;
      ip4[1].hdr.dst_addr = t4->dst_addr.mask.as_u32;
      item->type = RTE_FLOW_ITEM_TYPE_IPV4;
      item->spec = ip4;
      item->mask = ip4 + 1;

      src_port = t4->src_port.port;
      dst_port = t4->dst_port.port;
      src_port_mask = t4->src_port.mask;
      dst_port_mask = t4->dst_port.mask;
      protocol = t4->protocol;
    }
  else if (f->type == VNET_FLOW_TYPE_IP4_VXLAN)
    {
      vnet_flow_ip4_vxlan_t *v4 = &f->ip4_vxlan;
      ip4[0].hdr.src_addr = v4->src_addr.as_u32;
      ip4[1].hdr.src_addr = -1;
      ip4[0].hdr.dst_addr = v4->dst_addr.as_u32;
      ip4[1].hdr.dst_addr = -1;
      item->type = RTE_FLOW_ITEM_TYPE_IPV4;
      item->spec = ip4;
      item->mask = ip4 + 1;

      dst_port = v4->dst_port;
      dst_port_mask = -1;
      src_port = 0;
      src_port_mask = 0;
      protocol = IP_PROTOCOL_UDP;
    }
  else
    {
      rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
      goto done;
    }

  /* Layer 4 */
  vec_add2 (items, item, 1);
  if (protocol == IP_PROTOCOL_UDP)
    {
      udp[0].hdr.src_port = clib_host_to_net_u16 (src_port);
      udp[1].hdr.src_port = clib_host_to_net_u16 (src_port_mask);
      udp[0].hdr.dst_port = clib_host_to_net_u16 (dst_port);
      udp[1].hdr.dst_port = clib_host_to_net_u16 (dst_port_mask);
      item->type = RTE_FLOW_ITEM_TYPE_UDP;
      item->spec = udp;
      item->mask = udp + 1;
    }
  else if (protocol == IP_PROTOCOL_TCP)
    {
      tcp[0].hdr.src_port = clib_host_to_net_u16 (src_port);
      tcp[1].hdr.src_port = clib_host_to_net_u16 (src_port_mask);
      tcp[0].hdr.dst_port = clib_host_to_net_u16 (dst_port);
      tcp[1].hdr.dst_port = clib_host_to_net_u16 (dst_port_mask);
      item->type = RTE_FLOW_ITEM_TYPE_TCP;
      item->spec = tcp;
      item->mask = tcp + 1;
    }
  else
    {
      rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
      goto done;
    }

  /* Tunnel header match */
  if (f->type == VNET_FLOW_TYPE_IP4_VXLAN)
    {
      u32 vni = f->ip4_vxlan.vni;
      vxlan_header_t spec_hdr = {
	.flags = VXLAN_FLAGS_I,
	.vni_reserved = clib_host_to_net_u32 (vni << 8)
      };
      vxlan_header_t mask_hdr = {
	.flags = 0xff,
	.vni_reserved = clib_host_to_net_u32 (((u32) - 1) << 8)
      };

      clib_memset (raw, 0, sizeof raw);
      raw[0].item.relative = 1;
      raw[0].item.length = vxlan_hdr_sz;

      clib_memcpy_fast (raw[0].val + raw_sz, &spec_hdr, vxlan_hdr_sz);
      raw[0].item.pattern = raw[0].val + raw_sz;
      clib_memcpy_fast (raw[1].val + raw_sz, &mask_hdr, vxlan_hdr_sz);
      raw[1].item.pattern = raw[1].val + raw_sz;

      vec_add2 (items, item, 1);
      item->type = RTE_FLOW_ITEM_TYPE_RAW;
      item->spec = raw;
      item->mask = raw + 1;
    }

  vec_add2 (items, item, 1);
  item->type = RTE_FLOW_ITEM_TYPE_END;

  /* Actions */
  vec_add2 (actions, action, 1);
  action->type = RTE_FLOW_ACTION_TYPE_PASSTHRU;

  vec_add2 (actions, action, 1);
  mark.id = fe->mark;
  action->type = RTE_FLOW_ACTION_TYPE_MARK;
  action->conf = &mark;

  vec_add2 (actions, action, 1);
  action->type = RTE_FLOW_ACTION_TYPE_END;

  fe->handle = rte_flow_create (xd->device_index, &ingress, items, actions,
				&xd->last_flow_error);

  if (!fe->handle)
    rv = VNET_FLOW_ERROR_NOT_SUPPORTED;

done:
  vec_free (items);
  vec_free (actions);
  return rv;
}

int
dpdk_flow_ops_fn (vnet_main_t * vnm, vnet_flow_dev_op_t op, u32 dev_instance,
		  u32 flow_index, uword * private_data)
{
  dpdk_main_t *dm = &dpdk_main;
  vnet_flow_t *flow = vnet_get_flow (flow_index);
  dpdk_device_t *xd = vec_elt_at_index (dm->devices, dev_instance);
  dpdk_flow_entry_t *fe;
  dpdk_flow_lookup_entry_t *fle = 0;
  int rv;

  /* recycle old flow lookup entries only after the main loop counter
     increases - i.e. previously DMA'ed packets were handled */
  if (vec_len (xd->parked_lookup_indexes) > 0 &&
      xd->parked_loop_count != dm->vlib_main->main_loop_count)
    {
      u32 *fl_index;

      vec_foreach (fl_index, xd->parked_lookup_indexes)
	pool_put_index (xd->flow_lookup_entries, *fl_index);
      vec_reset_length (xd->flow_lookup_entries);
    }

  if (op == VNET_FLOW_DEV_OP_DEL_FLOW)
    {
      ASSERT (*private_data >= vec_len (xd->flow_entries));

      fe = vec_elt_at_index (xd->flow_entries, *private_data);

      if ((rv = rte_flow_destroy (xd->device_index, fe->handle,
				  &xd->last_flow_error)))
	return VNET_FLOW_ERROR_INTERNAL;

      if (fe->mark)
	{
	  /* make sure no action is taken for in-flight (marked) packets */
	  fle = pool_elt_at_index (xd->flow_lookup_entries, fe->mark);
	  clib_memset (fle, -1, sizeof (*fle));
	  vec_add1 (xd->parked_lookup_indexes, fe->mark);
	  xd->parked_loop_count = dm->vlib_main->main_loop_count;
	}

      clib_memset (fe, 0, sizeof (*fe));
      pool_put (xd->flow_entries, fe);

      goto disable_rx_offload;
    }

  if (op != VNET_FLOW_DEV_OP_ADD_FLOW)
    return VNET_FLOW_ERROR_NOT_SUPPORTED;

  pool_get (xd->flow_entries, fe);
  fe->flow_index = flow->index;

  if (flow->actions == 0)
    {
      rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
      goto done;
    }

  /* if we need to mark packets, assign one mark */
  if (flow->actions & (VNET_FLOW_ACTION_MARK |
		       VNET_FLOW_ACTION_REDIRECT_TO_NODE |
		       VNET_FLOW_ACTION_BUFFER_ADVANCE))
    {
      /* reserve slot 0 */
      if (xd->flow_lookup_entries == 0)
	pool_get_aligned (xd->flow_lookup_entries, fle,
			  CLIB_CACHE_LINE_BYTES);
      pool_get_aligned (xd->flow_lookup_entries, fle, CLIB_CACHE_LINE_BYTES);
      fe->mark = fle - xd->flow_lookup_entries;

      /* install entry in the lookup table */
      clib_memset (fle, -1, sizeof (*fle));
      if (flow->actions & VNET_FLOW_ACTION_MARK)
	fle->flow_id = flow->mark_flow_id;
      if (flow->actions & VNET_FLOW_ACTION_REDIRECT_TO_NODE)
	fle->next_index = flow->redirect_device_input_next_index;
      if (flow->actions & VNET_FLOW_ACTION_BUFFER_ADVANCE)
	fle->buffer_advance = flow->buffer_advance;
    }
  else
    fe->mark = 0;

  if ((xd->flags & DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD) == 0)
    {
      xd->flags |= DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD;
      dpdk_device_setup (xd);
    }

  switch (flow->type)
    {
    case VNET_FLOW_TYPE_IP4_N_TUPLE:
    case VNET_FLOW_TYPE_IP6_N_TUPLE:
    case VNET_FLOW_TYPE_IP4_VXLAN:
      if ((rv = dpdk_flow_add (xd, flow, fe)))
	goto done;
      break;
    default:
      rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
      goto done;
    }

  *private_data = fe - xd->flow_entries;

done:
  if (rv)
    {
      clib_memset (fe, 0, sizeof (*fe));
      pool_put (xd->flow_entries, fe);
      if (fle)
	{
	  clib_memset (fle, -1, sizeof (*fle));
	  pool_put (xd->flow_lookup_entries, fle);
	}
    }
disable_rx_offload:
  if ((xd->flags & DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD) != 0
      && pool_elts (xd->flow_entries) == 0)
    {
      xd->flags &= ~DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD;
      dpdk_device_setup (xd);
    }

  return rv;
}
示例#7
0
/*
 * fish pkts back from the recycle queue/freelist
 * un-flatten the context chains
 */
static void replication_recycle_callback (vlib_main_t *vm, 
                                          vlib_buffer_free_list_t * fl)
{
  vlib_frame_t * f = 0;
  u32 n_left_from;
  u32 n_left_to_next = 0;
  u32 n_this_frame = 0;
  u32 * from;
  u32 * to_next = 0;
  u32 bi0, pi0;
  vlib_buffer_t *b0;
  vlib_buffer_t *bnext0;
  int i;
  replication_main_t * rm = &replication_main;
  replication_context_t * ctx;
  u32 feature_node_index = 0; 
  uword cpu_number = vm->cpu_index;

  // All buffers in the list are destined to the same recycle node.
  // Pull the recycle node index from the first buffer. 
  // Note: this could be sped up if the node index were stuffed into
  // the freelist itself.
  if (vec_len (fl->aligned_buffers) > 0) {
    bi0 = fl->aligned_buffers[0];
    b0 = vlib_get_buffer (vm, bi0);
    ctx = pool_elt_at_index (rm->contexts[cpu_number],
                             b0->clone_count);
    feature_node_index = ctx->recycle_node_index;
  } else if (vec_len (fl->unaligned_buffers) > 0) {
    bi0 = fl->unaligned_buffers[0];
    b0 = vlib_get_buffer (vm, bi0);
    ctx = pool_elt_at_index (rm->contexts[cpu_number], b0->clone_count);
    feature_node_index = ctx->recycle_node_index;
  }

  /* aligned, unaligned buffers */
  for (i = 0; i < 2; i++) 
    {
      if (i == 0)
        {
          from = fl->aligned_buffers;
          n_left_from = vec_len (from);
        }
      else
        {
          from = fl->unaligned_buffers;
          n_left_from = vec_len (from);
        }
    
      while (n_left_from > 0)
        {
          if (PREDICT_FALSE(n_left_to_next == 0)) 
            {
              if (f)
                {
                  f->n_vectors = n_this_frame;
                  vlib_put_frame_to_node (vm, feature_node_index, f);
                }
              
              f = vlib_get_frame_to_node (vm, feature_node_index);
              to_next = vlib_frame_vector_args (f);
              n_left_to_next = VLIB_FRAME_SIZE;
              n_this_frame = 0;
            }
          
          bi0 = from[0];
          if (PREDICT_TRUE(n_left_from > 1))
            {
              pi0 = from[1];
              vlib_prefetch_buffer_with_index(vm,pi0,LOAD);
            }
        
          bnext0 = b0 = vlib_get_buffer (vm, bi0);

          // Mark that this buffer was just recycled
          b0->flags |= VLIB_BUFFER_IS_RECYCLED;

          // If buffer is traced, mark frame as traced
          if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
              f->flags |= VLIB_FRAME_TRACE;

          while (bnext0->flags & VLIB_BUFFER_NEXT_PRESENT)
            {
              from += 1;
              n_left_from -= 1;
              bnext0 = vlib_get_buffer (vm, bnext0->next_buffer);
            }
          to_next[0] = bi0;

          from++;
          to_next++;
          n_this_frame++;
          n_left_to_next--;
          n_left_from--;
        }
    }
  
  vec_reset_length (fl->aligned_buffers);
  vec_reset_length (fl->unaligned_buffers);

  if (f)
    {
      ASSERT(n_this_frame);
      f->n_vectors = n_this_frame;
      vlib_put_frame_to_node (vm, feature_node_index, f);
    }
}