Exemplo n.º 1
0
void
ssvm_delete_private (ssvm_private_t * ssvm)
{
  vec_free (ssvm->name);
#if USE_DLMALLOC == 0
  mheap_free (ssvm->sh->heap);
#else
  destroy_mspace (ssvm->sh->heap);
#endif
  clib_mem_free (ssvm->sh);
}
Exemplo n.º 2
0
static int
srv6_am_localsid_removal_fn (ip6_sr_localsid_t * localsid)
{
  srv6_am_localsid_t *ls_mem = localsid->plugin_mem;

  /* Remove hardware indirection (from sr_steering.c:137) */
  int ret = vnet_feature_enable_disable ("ip6-unicast", "srv6-am-rewrite",
					 ls_mem->sw_if_index_in, 0, 0, 0);
  if (ret != 0)
    return -1;

  /* Unlock (OIF, NHOP) adjacency (from sr_localsid.c:103) */
  adj_unlock (localsid->nh_adj);

  /* Clean up local SID memory */
  clib_mem_free (localsid->plugin_mem);

  return 0;
}
Exemplo n.º 3
0
void
vl_msg_api_free (void *a)
{
  msgbuf_t *rv;
  void *oldheap;
  api_main_t *am = &api_main;

  rv = (msgbuf_t *) (((u8 *) a) - offsetof (msgbuf_t, data));

  /*
   * Here's the beauty of the scheme.  Only one proc/thread has
   * control of a given message buffer. To free a buffer, we just clear the
   * queue field, and leave. No locks, no hits, no errors...
   */
  if (rv->q)
    {
      rv->q = 0;
      rv->gc_mark_timestamp = 0;
#if DEBUG_MESSAGE_BUFFER_OVERRUN > 0
      {
	u32 *overrun;
	overrun = (u32 *) (rv->data + ntohl (rv->data_len));
	ASSERT (*overrun == 0x1badbabe);
      }
#endif
      return;
    }

  pthread_mutex_lock (&am->vlib_rp->mutex);
  oldheap = svm_push_data_heap (am->vlib_rp);

#if DEBUG_MESSAGE_BUFFER_OVERRUN > 0
  {
    u32 *overrun;
    overrun = (u32 *) (rv->data + ntohl (rv->data_len));
    ASSERT (*overrun == 0x1badbabe);
  }
#endif

  clib_mem_free (rv);
  svm_pop_heap (oldheap);
  pthread_mutex_unlock (&am->vlib_rp->mutex);
}
Exemplo n.º 4
0
static void
vl_msg_api_free_nolock (void *a)
{
  msgbuf_t *rv;
  void *oldheap;
  api_main_t *am = &api_main;

  rv = (msgbuf_t *) (((u8 *) a) - offsetof (msgbuf_t, data));
  /*
   * Here's the beauty of the scheme.  Only one proc/thread has
   * control of a given message buffer. To free a buffer, we just clear the
   * queue field, and leave. No locks, no hits, no errors...
   */
  if (rv->q)
    {
      rv->q = 0;
      return;
    }

  oldheap = svm_push_data_heap (am->vlib_rp);
  clib_mem_free (rv);
  svm_pop_heap (oldheap);
}
Exemplo n.º 5
0
static uword
dhcp6_pd_reply_process (vlib_main_t * vm, vlib_node_runtime_t * rt,
			vlib_frame_t * f)
{
  /* These cross the longjmp  boundary (vlib_process_wait_for_event)
   * and need to be volatile - to prevent them from being optimized into
   * a register - which could change during suspension */

  while (1)
    {
      vlib_process_wait_for_event (vm);
      uword event_type = DHCP6_PD_DP_REPLY_REPORT;
      void *event_data = vlib_process_get_event_data (vm, &event_type);

      int i;
      if (event_type == DHCP6_PD_DP_REPLY_REPORT)
	{
	  prefix_report_t *events = event_data;
	  for (i = 0; i < vec_len (events); i++)
	    {
	      u32 event_size =
		sizeof (vl_api_dhcp6_pd_reply_event_t) +
		vec_len (events[i].prefixes) *
		sizeof (vl_api_dhcp6_pd_prefix_info_t);
	      vl_api_dhcp6_pd_reply_event_t *event =
		clib_mem_alloc (event_size);
	      clib_memset (event, 0, event_size);

	      event->sw_if_index = htonl (events[i].body.sw_if_index);
	      event->server_index = htonl (events[i].body.server_index);
	      event->msg_type = events[i].body.msg_type;
	      event->T1 = htonl (events[i].body.T1);
	      event->T2 = htonl (events[i].body.T2);
	      event->inner_status_code =
		htons (events[i].body.inner_status_code);
	      event->status_code = htons (events[i].body.status_code);
	      event->preference = events[i].body.preference;

	      event->n_prefixes = htonl (vec_len (events[i].prefixes));
	      vl_api_dhcp6_pd_prefix_info_t *prefix =
		(typeof (prefix)) event->prefixes;
	      u32 j;
	      for (j = 0; j < vec_len (events[i].prefixes); j++)
		{
		  dhcp6_prefix_info_t *info = &events[i].prefixes[j];
		  memcpy (prefix->prefix, &info->prefix, 16);
		  prefix->prefix_length = info->prefix_length;
		  prefix->valid_time = htonl (info->valid_time);
		  prefix->preferred_time = htonl (info->preferred_time);
		  prefix++;
		}
	      vec_free (events[i].prefixes);

	      dhcp6_pd_client_public_main_t *dpcpm =
		&dhcp6_pd_client_public_main;
	      call_dhcp6_pd_reply_event_callbacks (event, dpcpm->functions);

	      vpe_client_registration_t *reg;
              /* *INDENT-OFF* */
              pool_foreach(reg, vpe_api_main.dhcp6_pd_reply_events_registrations,
              ({
                vl_api_registration_t *vl_reg;
                vl_reg =
                  vl_api_client_index_to_registration (reg->client_index);
                if (vl_reg && vl_api_can_send_msg (vl_reg))
                  {
                    vl_api_dhcp6_pd_reply_event_t *msg =
                      vl_msg_api_alloc (event_size);
                    clib_memcpy (msg, event, event_size);
                    msg->_vl_msg_id = htons (VL_API_DHCP6_PD_REPLY_EVENT);
                    msg->client_index = reg->client_index;
                    msg->pid = reg->client_pid;
                    vl_api_send_msg (vl_reg, (u8 *) msg);
                  }
              }));
              /* *INDENT-ON* */

	      clib_mem_free (event);
	    }
	}
Exemplo n.º 6
0
int
test_mheap_main (unformat_input_t * input)
{
  int i, j, k, n_iterations;
  void *h, *h_mem;
  uword *objects = 0;
  u32 objects_used, really_verbose, n_objects, max_object_size;
  u32 check_mask, seed, trace, use_vm;
  u32 print_every = 0;
  u32 *data;
  mheap_t *mh;

  /* Validation flags. */
  check_mask = 0;
#define CHECK_VALIDITY 1
#define CHECK_DATA     2
#define CHECK_ALIGN    4
#define TEST1	       8

  n_iterations = 10;
  seed = 0;
  max_object_size = 100;
  n_objects = 1000;
  trace = 0;
  really_verbose = 0;
  use_vm = 0;

  while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
    {
      if (0 == unformat (input, "iter %d", &n_iterations)
	  && 0 == unformat (input, "count %d", &n_objects)
	  && 0 == unformat (input, "size %d", &max_object_size)
	  && 0 == unformat (input, "seed %d", &seed)
	  && 0 == unformat (input, "print %d", &print_every)
	  && 0 == unformat (input, "validdata %|",
			    &check_mask, CHECK_DATA | CHECK_VALIDITY)
	  && 0 == unformat (input, "valid %|",
			    &check_mask, CHECK_VALIDITY)
	  && 0 == unformat (input, "verbose %=", &really_verbose, 1)
	  && 0 == unformat (input, "trace %=", &trace, 1)
	  && 0 == unformat (input, "vm %=", &use_vm, 1)
	  && 0 == unformat (input, "align %|", &check_mask, CHECK_ALIGN)
	  && 0 == unformat (input, "test1 %|", &check_mask, TEST1))
	{
	  clib_warning ("unknown input `%U'", format_unformat_error, input);
	  return 1;
	}
    }

  /* Zero seed means use default. */
  if (!seed)
    seed = random_default_seed ();

  if (check_mask & TEST1)
    {
      return test1 ();
    }

  if_verbose
    ("testing %d iterations, %d %saligned objects, max. size %d, seed %d",
     n_iterations, n_objects, (check_mask & CHECK_ALIGN) ? "randomly " : "un",
     max_object_size, seed);

  vec_resize (objects, n_objects);
  if (vec_bytes (objects) > 0)	/* stupid warning be gone */
    clib_memset (objects, ~0, vec_bytes (objects));
  objects_used = 0;

  /* Allocate initial heap. */
  {
    uword size =
      max_pow2 (2 * n_objects * max_object_size * sizeof (data[0]));

    h_mem = clib_mem_alloc (size);
    if (!h_mem)
      return 0;

    h = mheap_alloc (h_mem, size);
  }

  if (trace)
    mheap_trace (h, trace);

  mh = mheap_header (h);

  if (use_vm)
    mh->flags &= ~MHEAP_FLAG_DISABLE_VM;
  else
    mh->flags |= MHEAP_FLAG_DISABLE_VM;

  if (check_mask & CHECK_VALIDITY)
    mh->flags |= MHEAP_FLAG_VALIDATE;

  for (i = 0; i < n_iterations; i++)
    {
      while (1)
	{
	  j = random_u32 (&seed) % vec_len (objects);
	  if (objects[j] != ~0 || i + objects_used < n_iterations)
	    break;
	}

      if (objects[j] != ~0)
	{
	  mheap_put (h, objects[j]);
	  objects_used--;
	  objects[j] = ~0;
	}
      else
	{
	  uword size, align, align_offset;

	  size = (random_u32 (&seed) % max_object_size) * sizeof (data[0]);
	  align = align_offset = 0;
	  if (check_mask & CHECK_ALIGN)
	    {
	      align = 1 << (random_u32 (&seed) % 10);
	      align_offset = round_pow2 (random_u32 (&seed) & (align - 1),
					 sizeof (u32));
	    }

	  h = mheap_get_aligned (h, size, align, align_offset, &objects[j]);

	  if (align > 0)
	    ASSERT (0 == ((objects[j] + align_offset) & (align - 1)));

	  ASSERT (objects[j] != ~0);
	  objects_used++;

	  /* Set newly allocated object with test data. */
	  if (check_mask & CHECK_DATA)
	    {
	      uword len;

	      data = (void *) h + objects[j];
	      len = mheap_len (h, data);

	      ASSERT (size <= mheap_data_bytes (h, objects[j]));

	      data[0] = len;
	      for (k = 1; k < len; k++)
		data[k] = objects[j] + k;
	    }
	}

      /* Verify that all used objects have correct test data. */
      if (check_mask & 2)
	{
	  for (j = 0; j < vec_len (objects); j++)
	    if (objects[j] != ~0)
	      {
		u32 *data = h + objects[j];
		uword len = data[0];
		for (k = 1; k < len; k++)
		  ASSERT (data[k] == objects[j] + k);
	      }
	}
      if (print_every != 0 && i > 0 && (i % print_every) == 0)
	fformat (stderr, "iteration %d: %U\n", i, format_mheap, h,
		 really_verbose);
    }

  if (verbose)
    fformat (stderr, "%U\n", format_mheap, h, really_verbose);
  mheap_free (h);
  clib_mem_free (h_mem);
  vec_free (objects);

  return 0;
}
Exemplo n.º 7
0
Arquivo: smp.c Projeto: saumzy/clib
void clib_smp_lock_free (clib_smp_lock_t ** pl)
{
  if (*pl)
    clib_mem_free (*pl);
  *pl = 0;
}
Exemplo n.º 8
0
void
scrape_and_clear_counters (perfmon_main_t * pm)
{
  int i, j, k;
  vlib_main_t *vm = pm->vlib_main;
  vlib_main_t *stat_vm;
  vlib_node_main_t *nm;
  vlib_node_t ***node_dups = 0;
  vlib_node_t **nodes;
  vlib_node_t *n;
  perfmon_capture_t *c;
  perfmon_event_config_t *current_event;
  uword *p;
  u8 *counter_name;
  u64 vectors_this_counter;

  /* snapshoot the nodes, including pm counters */
  vlib_worker_thread_barrier_sync (vm);

  for (j = 0; j < vec_len (vlib_mains); j++)
    {
      stat_vm = vlib_mains[j];
      if (stat_vm == 0)
	continue;

      nm = &stat_vm->node_main;

      for (i = 0; i < vec_len (nm->nodes); i++)
	{
	  n = nm->nodes[i];
	  vlib_node_sync_stats (stat_vm, n);
	}

      nodes = 0;
      vec_validate (nodes, vec_len (nm->nodes) - 1);
      vec_add1 (node_dups, nodes);

      /* Snapshoot and clear the per-node perfmon counters */
      for (i = 0; i < vec_len (nm->nodes); i++)
	{
	  n = nm->nodes[i];
	  nodes[i] = clib_mem_alloc (sizeof (*n));
	  clib_memcpy_fast (nodes[i], n, sizeof (*n));
	  n->stats_total.perf_counter0_ticks = 0;
	  n->stats_total.perf_counter1_ticks = 0;
	  n->stats_total.perf_counter_vectors = 0;
	  n->stats_last_clear.perf_counter0_ticks = 0;
	  n->stats_last_clear.perf_counter1_ticks = 0;
	  n->stats_last_clear.perf_counter_vectors = 0;
	}
    }

  vlib_worker_thread_barrier_release (vm);

  for (j = 0; j < vec_len (vlib_mains); j++)
    {
      stat_vm = vlib_mains[j];
      if (stat_vm == 0)
	continue;

      nodes = node_dups[j];

      for (i = 0; i < vec_len (nodes); i++)
	{
	  u8 *capture_name;

	  n = nodes[i];

	  if (n->stats_total.perf_counter0_ticks == 0 &&
	      n->stats_total.perf_counter1_ticks == 0)
	    goto skip_this_node;

	  for (k = 0; k < 2; k++)
	    {
	      u64 counter_value, counter_last_clear;

	      /*
	       * We collect 2 counters at once, except for the
	       * last counter when the user asks for an odd number of
	       * counters
	       */
	      if ((pm->current_event + k)
		  >= vec_len (pm->single_events_to_collect))
		break;

	      if (k == 0)
		{
		  counter_value = n->stats_total.perf_counter0_ticks;
		  counter_last_clear =
		    n->stats_last_clear.perf_counter0_ticks;
		}
	      else
		{
		  counter_value = n->stats_total.perf_counter1_ticks;
		  counter_last_clear =
		    n->stats_last_clear.perf_counter1_ticks;
		}

	      capture_name = format (0, "t%d-%v%c", j, n->name, 0);

	      p = hash_get_mem (pm->capture_by_thread_and_node_name,
				capture_name);

	      if (p == 0)
		{
		  pool_get (pm->capture_pool, c);
		  memset (c, 0, sizeof (*c));
		  c->thread_and_node_name = capture_name;
		  hash_set_mem (pm->capture_by_thread_and_node_name,
				capture_name, c - pm->capture_pool);
		}
	      else
		{
		  c = pool_elt_at_index (pm->capture_pool, p[0]);
		  vec_free (capture_name);
		}

	      /* Snapshoot counters, etc. into the capture */
	      current_event = pm->single_events_to_collect
		+ pm->current_event + k;
	      counter_name = (u8 *) current_event->name;
	      vectors_this_counter = n->stats_total.perf_counter_vectors -
		n->stats_last_clear.perf_counter_vectors;

	      vec_add1 (c->counter_names, counter_name);
	      vec_add1 (c->counter_values,
			counter_value - counter_last_clear);
	      vec_add1 (c->vectors_this_counter, vectors_this_counter);
	    }
	skip_this_node:
	  clib_mem_free (n);
	}
      vec_free (nodes);
    }
  vec_free (node_dups);
}