コード例 #1
0
ファイル: svmdb.c プロジェクト: ajvoniq/vpp
static svmdb_client_t *
svmdb_map_internal (char *root_path, uword size)
{
  svmdb_client_t *client = 0;
  svm_map_region_args_t *a = 0;
  svm_region_t *db_rp;
  void *oldheap;
  svmdb_shm_hdr_t *hp = 0;

  vec_validate (client, 0);
  vec_validate (a, 0);

  svm_region_init_chroot (root_path);

  a->root_path = root_path;
  a->name = "/db";
  a->size = size ? size : SVMDB_DEFAULT_SIZE;
  a->flags = SVM_FLAGS_MHEAP;

  db_rp = client->db_rp = svm_region_find_or_create (a);

  ASSERT (db_rp);

  vec_free (a);

  region_lock (client->db_rp, 10);
  /* Has someone else set up the shared-memory variable table? */
  if (db_rp->user_ctx)
    {
      client->shm = (void *) db_rp->user_ctx;
      client->pid = getpid ();
      region_unlock (client->db_rp);
      ASSERT (client->shm->version == SVMDB_SHM_VERSION);
      return (client);
    }
  /* Nope, it's our problem... */

  /* Add a bogus client (pid=0) so the svm won't be deallocated */
  oldheap = svm_push_pvt_heap (db_rp);
  vec_add1 (client->db_rp->client_pids, 0);
  svm_pop_heap (oldheap);

  oldheap = svm_push_data_heap (db_rp);

  vec_validate (hp, 0);
  hp->version = SVMDB_SHM_VERSION;
  hp->namespaces[SVMDB_NAMESPACE_STRING]
    = hash_create_string (0, sizeof (uword));
  hp->namespaces[SVMDB_NAMESPACE_VEC]
    = hash_create_string (0, sizeof (uword));

  db_rp->user_ctx = hp;
  client->shm = hp;

  svm_pop_heap (oldheap);
  region_unlock (client->db_rp);
  client->pid = getpid ();

  return (client);
}
コード例 #2
0
ファイル: memory_shared.c プロジェクト: chrisy/vpp
void
vl_msg_api_free (void *a)
{
  msgbuf_t *rv;
  void *oldheap;
  api_main_t *am = &api_main;

  rv = (msgbuf_t *) (((u8 *) a) - offsetof (msgbuf_t, data));

  /*
   * Here's the beauty of the scheme.  Only one proc/thread has
   * control of a given message buffer. To free a buffer, we just clear the
   * queue field, and leave. No locks, no hits, no errors...
   */
  if (rv->q)
    {
      rv->q = 0;
      rv->gc_mark_timestamp = 0;
#if DEBUG_MESSAGE_BUFFER_OVERRUN > 0
      {
	u32 *overrun;
	overrun = (u32 *) (rv->data + ntohl (rv->data_len));
	ASSERT (*overrun == 0x1badbabe);
      }
#endif
      return;
    }

  pthread_mutex_lock (&am->vlib_rp->mutex);
  oldheap = svm_push_data_heap (am->vlib_rp);

#if DEBUG_MESSAGE_BUFFER_OVERRUN > 0
  {
    u32 *overrun;
    overrun = (u32 *) (rv->data + ntohl (rv->data_len));
    ASSERT (*overrun == 0x1badbabe);
  }
#endif

  clib_mem_free (rv);
  svm_pop_heap (oldheap);
  pthread_mutex_unlock (&am->vlib_rp->mutex);
}
コード例 #3
0
ファイル: memory_shared.c プロジェクト: chrisy/vpp
void
vl_init_shmem (svm_region_t * vlib_rp, vl_api_shm_elem_config_t * config,
	       int is_vlib, int is_private_region)
{
  api_main_t *am = &api_main;
  vl_shmem_hdr_t *shmem_hdr = 0;
  void *oldheap;
  ASSERT (vlib_rp);

  /* $$$$ need private region config parameters */

  oldheap = svm_push_data_heap (vlib_rp);

  vec_validate (shmem_hdr, 0);
  shmem_hdr->version = VL_SHM_VERSION;
  shmem_hdr->clib_file_index = VL_API_INVALID_FI;

  /* Set up the queue and msg ring allocator */
  vl_api_mem_config (shmem_hdr, config);

  if (is_private_region == 0)
    {
      am->shmem_hdr = shmem_hdr;
      am->vlib_rp = vlib_rp;
      am->our_pid = getpid ();
      if (is_vlib)
	am->shmem_hdr->vl_pid = am->our_pid;
    }
  else
    shmem_hdr->vl_pid = am->our_pid;

  svm_pop_heap (oldheap);

  /*
   * After absolutely everything that a client might see is set up,
   * declare the shmem region valid
   */
  vlib_rp->user_ctx = shmem_hdr;

  pthread_mutex_unlock (&vlib_rp->mutex);
}
コード例 #4
0
ファイル: memory_shared.c プロジェクト: chrisy/vpp
static void
vl_msg_api_free_nolock (void *a)
{
  msgbuf_t *rv;
  void *oldheap;
  api_main_t *am = &api_main;

  rv = (msgbuf_t *) (((u8 *) a) - offsetof (msgbuf_t, data));
  /*
   * Here's the beauty of the scheme.  Only one proc/thread has
   * control of a given message buffer. To free a buffer, we just clear the
   * queue field, and leave. No locks, no hits, no errors...
   */
  if (rv->q)
    {
      rv->q = 0;
      return;
    }

  oldheap = svm_push_data_heap (am->vlib_rp);
  clib_mem_free (rv);
  svm_pop_heap (oldheap);
}
コード例 #5
0
ファイル: svmdb.c プロジェクト: ajvoniq/vpp
int
svmdb_local_add_del_notification (svmdb_client_t * client,
				  svmdb_notification_args_t * a)
{
  uword *h;
  void *oldheap;
  hash_pair_t *hp;
  svmdb_shm_hdr_t *shm;
  u8 *dummy_value = 0;
  svmdb_value_t *value;
  svmdb_notify_t *np;
  int i;
  int rv = 0;

  ASSERT (a->elsize);

  region_lock (client->db_rp, 18);
  shm = client->shm;
  oldheap = svm_push_data_heap (client->db_rp);

  h = shm->namespaces[a->nspace];

  hp = hash_get_pair_mem (h, a->var);
  if (hp == 0)
    {
      local_set_variable_nolock (client, a->nspace, (u8 *) a->var,
				 dummy_value, a->elsize);
      /* might have moved */
      h = shm->namespaces[a->nspace];
      hp = hash_get_pair_mem (h, a->var);
      ASSERT (hp);
    }

  value = pool_elt_at_index (shm->values, hp->value[0]);

  for (i = 0; i < vec_len (value->notifications); i++)
    {
      np = vec_elt_at_index (value->notifications, i);
      if ((np->pid == client->pid)
	  && (np->signum == a->signum)
	  && (np->action == a->action) && (np->opaque == a->opaque))
	{
	  if (a->add_del == 0 /* delete */ )
	    {
	      vec_delete (value->notifications, 1, i);
	      goto out;
	    }
	  else
	    {			/* add */
	      clib_warning
		("%s: ignore dup reg pid %d signum %d action %d opaque %x",
		 a->var, client->pid, a->signum, a->action, a->opaque);
	      rv = -2;
	      goto out;
	    }
	}
    }
  if (a->add_del == 0)
    {
      rv = -3;
      goto out;
    }

  vec_add2 (value->notifications, np, 1);
  np->pid = client->pid;
  np->signum = a->signum;
  np->action = a->action;
  np->opaque = a->opaque;

out:
  svm_pop_heap (oldheap);
  region_unlock (client->db_rp);
  return rv;
}
コード例 #6
0
ファイル: memory_shared.c プロジェクト: chrisy/vpp
static inline void *
vl_msg_api_alloc_internal (int nbytes, int pool, int may_return_null)
{
  int i;
  msgbuf_t *rv;
  ring_alloc_t *ap;
  svm_queue_t *q;
  void *oldheap;
  vl_shmem_hdr_t *shmem_hdr;
  api_main_t *am = &api_main;

  shmem_hdr = am->shmem_hdr;

#if DEBUG_MESSAGE_BUFFER_OVERRUN > 0
  nbytes += 4;
#endif

  ASSERT (pool == 0 || vlib_get_thread_index () == 0);

  if (shmem_hdr == 0)
    {
      clib_warning ("shared memory header NULL");
      return 0;
    }

  /* account for the msgbuf_t header */
  nbytes += sizeof (msgbuf_t);

  if (shmem_hdr->vl_rings == 0)
    {
      clib_warning ("vl_rings NULL");
      ASSERT (0);
      abort ();
    }

  if (shmem_hdr->client_rings == 0)
    {
      clib_warning ("client_rings NULL");
      ASSERT (0);
      abort ();
    }

  ap = pool ? shmem_hdr->vl_rings : shmem_hdr->client_rings;
  for (i = 0; i < vec_len (ap); i++)
    {
      /* Too big? */
      if (nbytes > ap[i].size)
	{
	  continue;
	}

      q = ap[i].rp;
      if (pool == 0)
	{
	  pthread_mutex_lock (&q->mutex);
	}
      rv = (msgbuf_t *) (&q->data[0] + q->head * q->elsize);
      /*
       * Is this item still in use?
       */
      if (rv->q)
	{
	  u32 now = (u32) time (0);

	  if (PREDICT_TRUE (rv->gc_mark_timestamp == 0))
	    rv->gc_mark_timestamp = now;
	  else
	    {
	      if (now - rv->gc_mark_timestamp > 10)
		{
		  if (CLIB_DEBUG > 0)
		    {
		      u16 *msg_idp, msg_id;
		      clib_warning
			("garbage collect pool %d ring %d index %d", pool, i,
			 q->head);
		      msg_idp = (u16 *) (rv->data);
		      msg_id = clib_net_to_host_u16 (*msg_idp);
		      if (msg_id < vec_len (api_main.msg_names))
			clib_warning ("msg id %d name %s", (u32) msg_id,
				      api_main.msg_names[msg_id]);
		    }
		  shmem_hdr->garbage_collects++;
		  goto collected;
		}
	    }


	  /* yes, loser; try next larger pool */
	  ap[i].misses++;
	  if (pool == 0)
	    pthread_mutex_unlock (&q->mutex);
	  continue;
	}
    collected:

      /* OK, we have a winner */
      ap[i].hits++;
      /*
       * Remember the source queue, although we
       * don't need to know the queue to free the item.
       */
      rv->q = q;
      rv->gc_mark_timestamp = 0;
      q->head++;
      if (q->head == q->maxsize)
	q->head = 0;

      if (pool == 0)
	pthread_mutex_unlock (&q->mutex);
      goto out;
    }

  /*
   * Request too big, or head element of all size-compatible rings
   * still in use. Fall back to shared-memory malloc.
   */
  am->ring_misses++;

  pthread_mutex_lock (&am->vlib_rp->mutex);
  oldheap = svm_push_data_heap (am->vlib_rp);
  if (may_return_null)
    {
      rv = clib_mem_alloc_or_null (nbytes);
      if (PREDICT_FALSE (rv == 0))
	{
	  svm_pop_heap (oldheap);
	  pthread_mutex_unlock (&am->vlib_rp->mutex);
	  return 0;
	}
    }
  else
    rv = clib_mem_alloc (nbytes);

  rv->q = 0;
  rv->gc_mark_timestamp = 0;
  svm_pop_heap (oldheap);
  pthread_mutex_unlock (&am->vlib_rp->mutex);

out:
#if DEBUG_MESSAGE_BUFFER_OVERRUN > 0
  {
    nbytes -= 4;
    u32 *overrun;
    overrun = (u32 *) (rv->data + nbytes - sizeof (msgbuf_t));
    *overrun = 0x1badbabe;
  }
#endif
  rv->data_len = htonl (nbytes - sizeof (msgbuf_t));

  return (rv->data);
}