示例#1
0
int silc_idlist_get_clients_by_hash(SilcIDList id_list,
				    char *nickname, char *server,
				    SilcHash md5hash,
				    SilcClientEntry **clients,
				    SilcUInt32 *clients_count)
{
  SilcList list;
  SilcIDCacheEntry id_cache = NULL;
  unsigned char hash[SILC_HASH_MAXLEN];
  SilcClientID client_id;
  SilcClientEntry client_entry;

  SILC_LOG_DEBUG(("Start"));

  silc_hash_make(md5hash, nickname, strlen(nickname), hash);

  /* As the Client ID is hashed in the ID cache by hashing only the hash
     from the Client ID, we can do a lookup with only the hash not the
     other parts of the ID and get all the clients with that hash, ie.
     with that nickname, as the hash is from the nickname. */
  memset(&client_id, 0, sizeof(client_id));
  memcpy(&client_id.hash, hash, sizeof(client_id.hash));
  if (!silc_idcache_find_by_id(id_list->clients, &client_id, &list))
    return FALSE;

  /* If server is specified, narrow the search with it. */
  if (server) {
    silc_list_start(list);
    while ((id_cache = silc_list_get(list))) {
      client_entry = id_cache->context;
      if (!client_entry->servername)
	continue;
      if (!silc_utf8_strcasecmp(client_entry->servername, server))
	silc_list_del(list, id_cache);
    }
  }

  if (!silc_list_count(list))
    return FALSE;

  *clients = silc_realloc(*clients,
			  (silc_list_count(list) + *clients_count) *
			  sizeof(**clients));

  silc_list_start(list);
  while ((id_cache = silc_list_get(list)))
    (*clients)[(*clients_count)++] = id_cache->context;

  SILC_LOG_DEBUG(("Found total %d clients", *clients_count));

  return TRUE;
}
示例#2
0
SilcChannelEntry *
silc_idlist_get_channels(SilcIDList id_list, SilcChannelID *channel_id,
			 SilcUInt32 *channels_count)
{
  SilcList list;
  SilcIDCacheEntry id_cache = NULL;
  SilcChannelEntry *channels = NULL;
  int i = 0;

  SILC_LOG_DEBUG(("Start"));

  if (!channel_id) {
    if (!silc_idcache_get_all(id_list->channels, &list))
      return NULL;

    channels = silc_calloc(silc_list_count(list), sizeof(*channels));

    i = 0;
    silc_list_start(list);
    while ((id_cache = silc_list_get(list)))
      channels[i++] = (SilcChannelEntry)id_cache->context;
  } else {
    if (!silc_idcache_find_by_id_one(id_list->channels, channel_id, &id_cache))
      return NULL;

    i = 1;
    channels = silc_calloc(1, sizeof(*channels));
    channels[0] = (SilcChannelEntry)id_cache->context;
  }

  if (channels_count)
    *channels_count = i;

  return channels;
}
示例#3
0
int silc_idlist_get_clients_by_nickname(SilcIDList id_list, char *nickname,
					char *server,
					SilcClientEntry **clients,
					SilcUInt32 *clients_count)
{
  SilcList list;
  SilcIDCacheEntry id_cache = NULL;

  SILC_LOG_DEBUG(("Start"));

  if (!silc_idcache_find_by_name(id_list->clients, nickname, &list))
    return FALSE;

  *clients = silc_realloc(*clients,
			  (silc_list_count(list) + *clients_count) *
			  sizeof(**clients));

  silc_list_start(list);
  while ((id_cache = silc_list_get(list)))
    (*clients)[(*clients_count)++] = id_cache->context;

  SILC_LOG_DEBUG(("Found total %d clients", *clients_count));

  return TRUE;
}
示例#4
0
void silc_thread_pool_free(SilcThreadPool tp, SilcBool wait_unfinished)
{
  SilcThreadPoolThread t;

  SILC_LOG_DEBUG(("Free thread pool %p", tp));

  silc_mutex_lock(tp->lock);
  tp->destroy = TRUE;

  /* Stop threads */
  silc_list_start(tp->threads);
  while ((t = silc_list_get(tp->threads))) {
    silc_mutex_lock(t->lock);
    t->stop = TRUE;
    silc_cond_signal(t->thread_signal);
    silc_mutex_unlock(t->lock);
  }

  if (wait_unfinished) {
    SILC_LOG_DEBUG(("Wait threads to finish"));
    while (silc_list_count(tp->threads))
      silc_cond_wait(tp->pool_signal, tp->lock);
  }

  /* Release reference.  Releases lock also. */
  silc_thread_pool_unref(tp);
}
示例#5
0
SilcUInt32 silc_thread_pool_num_free_threads(SilcThreadPool tp)
{
  SilcUInt32 free_threads;

  silc_mutex_lock(tp->lock);
  free_threads = silc_list_count(tp->free_threads);
  silc_mutex_unlock(tp->lock);

  return free_threads;
}
示例#6
0
static void silc_client_packet_destructor(SilcFSMThread thread,
					  void *thread_context,
					  void *destructor_context)
{
  SilcClientConnection conn = thread_context;

  /* Add thread back to thread pool */
  silc_list_add(conn->internal->thread_pool, thread);
  if (silc_list_count(conn->internal->thread_pool) == 1)
    silc_list_start(conn->internal->thread_pool);
}
示例#7
0
void silc_thread_pool_purge(SilcThreadPool tp)
{
  SilcThreadPoolThread t;
  int i;

  silc_mutex_lock(tp->lock);

  if (silc_list_count(tp->free_threads) <= tp->min_threads) {
    SILC_LOG_DEBUG(("No threads to purge"));
    silc_mutex_unlock(tp->lock);
    return;
  }

  i = silc_list_count(tp->free_threads) - tp->min_threads;

  SILC_LOG_DEBUG(("Purge %d threads", i));

  silc_list_start(tp->threads);
  while ((t = silc_list_get(tp->threads))) {
    silc_mutex_lock(t->lock);
    if (t->run) {
      silc_mutex_unlock(t->lock);
      continue;
    }

    /* Signal the thread to stop */
    t->stop = TRUE;
    silc_cond_signal(t->thread_signal);
    silc_mutex_unlock(t->lock);

    silc_list_del(tp->free_threads, t);

    i--;
    if (!i)
      break;
  }

  silc_list_start(tp->threads);
  silc_mutex_unlock(tp->lock);
}
示例#8
0
SilcPrivateMessageKeys
silc_client_list_private_message_keys(SilcClient client,
				      SilcClientConnection conn,
				      SilcUInt32 *key_count)
{
  SilcPrivateMessageKeys keys;
  SilcUInt32 count = 0;
  SilcList list;
  SilcIDCacheEntry id_cache;
  SilcClientEntry entry;

  if (!client || !conn)
    return NULL;

  silc_mutex_lock(conn->internal->lock);
  if (!silc_idcache_get_all(conn->internal->client_cache, &list)) {
    silc_mutex_unlock(conn->internal->lock);
    return NULL;
  }

  keys = silc_calloc(silc_list_count(list), sizeof(*keys));
  if (!keys) {
    silc_mutex_unlock(conn->internal->lock);
    return NULL;
  }

  silc_list_start(list);
  while ((id_cache = silc_list_get(list))) {
    entry = id_cache->context;
    if (entry->internal.send_key) {
      keys[count].client_entry = entry;
      keys[count].cipher = (char *)silc_cipher_get_name(entry->internal.
							send_key);
      keys[count].key = (entry->internal.generated == FALSE ?
			 entry->internal.key : NULL);
      keys[count].key_len = (entry->internal.generated == FALSE ?
			     entry->internal.key_len : 0);
      count++;
    }
  }

  silc_mutex_unlock(conn->internal->lock);

  if (key_count)
    *key_count = count;

  return keys;
}
示例#9
0
SilcBool silc_thread_pool_run(SilcThreadPool tp,
			      SilcBool queuable,
			      SilcSchedule schedule,
			      SilcThreadPoolFunc run,
			      void *run_context,
			      SilcTaskCallback completion,
			      void *completion_context)
{
  SilcThreadPoolThread t, q;

  silc_mutex_lock(tp->lock);

  if (tp->destroy) {
    silc_mutex_unlock(tp->lock);
    silc_set_errno(SILC_ERR_NOT_VALID);
    return FALSE;
  }

  /* Get free thread */
  silc_list_start(tp->free_threads);
  t = silc_list_get(tp->free_threads);
  if (!t || t->stop) {
    if (silc_list_count(tp->threads) + 1 > tp->max_threads) {
      /* Maximum threads reached */
      if (!queuable) {
	silc_mutex_unlock(tp->lock);
	silc_set_errno(SILC_ERR_LIMIT);
	return FALSE;
      }

      /* User wants to queue this call until thread becomes free.  Get
	 a thread to assign this call. */
      t = silc_list_get(tp->threads);
      if (!t) {
	/* List wraps around */
	silc_list_start(tp->threads);
	t = silc_list_get(tp->threads);
      }
      silc_mutex_unlock(tp->lock);

      SILC_LOG_DEBUG(("Queue call %p, context %p in thread %p",
		      run, run_context, t));

      silc_mutex_lock(t->lock);

      /* Get free call context from the list */
      silc_list_start(t->free_queue);
      q = silc_list_get(t->free_queue);
      if (!q) {
	q = silc_scalloc(tp->stack, 1, sizeof(*q));
	if (!q) {
	  silc_mutex_unlock(t->lock);
	  return FALSE;
	}
      } else {
	silc_list_del(t->free_queue, q);
      }

      q->run = run;
      q->run_context = run_context;
      q->completion = completion;
      q->completion_context = completion_context;
      q->schedule = schedule;

      /* Add at the start of the list.  It gets executed first. */
      silc_list_insert(t->queue, NULL, q);
      silc_mutex_unlock(t->lock);
      return TRUE;
    } else {
      /* Create new thread */
      t = silc_thread_pool_new_thread(tp);
      if (!t) {
	silc_mutex_unlock(tp->lock);
	return FALSE;
      }
    }
  }

  silc_list_del(tp->free_threads, t);
  silc_mutex_unlock(tp->lock);

  SILC_LOG_DEBUG(("Run call %p, context %p, thread %p", run, run_context, t));

  silc_mutex_lock(t->lock);

  /* Mark this call to be executed in this thread */
  t->run = run;
  t->run_context = run_context;
  t->completion = completion;
  t->completion_context = completion_context;
  t->schedule = schedule;

  /* Signal the thread */
  silc_cond_signal(t->thread_signal);
  silc_mutex_unlock(t->lock);

  return TRUE;
}
示例#10
0
static void *silc_thread_pool_run_thread(void *context)
{
  SilcThreadPoolThread t = context, o, q;
  SilcThreadPool tp = t->tp;
  SilcMutex lock = t->lock;
  SilcCond thread_signal = t->thread_signal;

  silc_mutex_lock(lock);

  while (1) {
    /* Wait here for code to execute */
    while (!t->run && !t->stop)
      silc_cond_wait(thread_signal, lock);

    if (t->stop)
      goto stop;

    /* Execute code */
    silc_mutex_unlock(lock);
  execute:
    SILC_LOG_DEBUG(("Execute call %p, context %p, thread %p", t->run,
		    t->run_context, t));
    t->run(t->schedule, t->run_context);

    /* If scheduler is NULL, call completion directly from here.  Otherwise
       it is called through the scheduler in the thread where the scheduler
       is running. */
    if (t->completion) {
      if (t->schedule) {
	SILC_LOG_DEBUG(("Run completion through scheduler %p", t->schedule));
	if (!silc_schedule_task_add_timeout(t->schedule, t->completion,
					    t->completion_context, 0, 0)) {
	  SILC_LOG_DEBUG(("Run completion directly"));
	  t->completion(NULL, NULL, 0, 0, t->completion_context);
	}
	silc_schedule_wakeup(t->schedule);
      } else {
	SILC_LOG_DEBUG(("Run completion directly"));
	t->completion(NULL, NULL, 0, 0, t->completion_context);
      }
    }

    silc_mutex_lock(lock);
    if (t->stop)
      goto stop;

    /* Check if there are calls in queue.  Takes the most recently added
       call since new ones are added at the start of the list. */
    if (silc_list_count(t->queue) > 0) {
    execute_queue:
      silc_list_start(t->queue);
      q = silc_list_get(t->queue);

      SILC_LOG_DEBUG(("Execute call from queue"));

      /* Execute this call now */
      t->run = q->run;
      t->run_context = q->run_context;
      t->completion = q->completion;
      t->completion_context = q->completion_context;
      t->schedule = q->schedule;

      silc_list_del(t->queue, q);
      silc_list_add(t->free_queue, q);
      silc_mutex_unlock(lock);
      goto execute;
    }

    silc_mutex_unlock(lock);
    silc_mutex_lock(tp->lock);

    /* Nothing to do.  Attempt to steal call from some other thread. */
    o = silc_list_get(tp->threads);
    if (!o) {
      /* List wraps around */
      silc_list_start(tp->threads);
      o = silc_list_get(tp->threads);
    }

    /* Check that the other thread is valid and has something to execute. */
    silc_mutex_lock(o->lock);
    if (o == t || o->stop || silc_list_count(o->queue) == 0) {
      silc_mutex_unlock(o->lock);
      o = NULL;
    }

    if (o) {
      silc_mutex_unlock(tp->lock);
      silc_list_start(o->queue);
      q = silc_list_get(o->queue);

      SILC_LOG_DEBUG(("Execute call from queue from thread %p", o));

      /* Execute this call now */
      t->run = q->run;
      t->run_context = q->run_context;
      t->completion = q->completion;
      t->completion_context = q->completion_context;
      t->schedule = q->schedule;

      silc_list_del(o->queue, q);
      silc_list_add(o->free_queue, q);
      silc_mutex_unlock(o->lock);
      goto execute;
    }

    silc_mutex_lock(lock);
    if (t->stop) {
      silc_mutex_unlock(tp->lock);
      goto stop;
    }

    /* Now that we have the lock back, check the queue again. */
    if (silc_list_count(t->queue) > 0) {
      silc_mutex_unlock(tp->lock);
      goto execute_queue;
    }

    /* The thread is now free for use again. */
    t->run = NULL;
    t->completion = NULL;
    t->schedule = NULL;
    silc_list_add(tp->free_threads, t);
    silc_mutex_unlock(tp->lock);
  }

 stop:
  /* Stop the thread.  Remove from threads list. */
  SILC_LOG_DEBUG(("Stop thread %p", t));

  /* We can unlock the thread now.  After we get the thread pool lock
     no one can retrieve the thread anymore. */
  silc_mutex_unlock(lock);
  silc_mutex_lock(tp->lock);

  silc_list_del(tp->threads, t);
  silc_list_start(tp->threads);

  /* Clear thread's call queue. */
  silc_list_start(t->queue);
  silc_list_start(t->free_queue);
  while ((q = silc_list_get(t->queue)))
    silc_sfree(tp->stack, q);
  while ((q = silc_list_get(t->free_queue)))
    silc_sfree(tp->stack, q);

  /* Destroy the thread */
  silc_mutex_free(lock);
  silc_cond_free(thread_signal);
  silc_sfree(tp->stack, t);

  /* If we are last thread, signal the waiting destructor. */
  if (silc_list_count(tp->threads) == 0)
    silc_cond_signal(tp->pool_signal);

  /* Release pool reference.  Releases lock also. */
  silc_thread_pool_unref(tp);

  return NULL;
}