SilcServerCommand silc_server_command_alloc(SilcServerThread thread) { SilcServerCommand cmd; silc_mutex_lock(thread->server->lock); /* Get command context from freelist or allocate new one. */ cmd = silc_list_get(thread->server->command_pool); if (!cmd) { silc_mutex_unlock(thread->server->lock); cmd = silc_calloc(1, sizeof(*cmd)); if (!cmd) return NULL; SILC_LOG_DEBUG(("Allocating command context %p", cmd)); cmd->thread = thread; return cmd; } SILC_LOG_DEBUG(("Get command context %p", cmd)); /* Delete from freelist */ silc_list_del(thread->server->command_pool, cmd); cmd->thread = thread; silc_mutex_unlock(thread->server->lock); return cmd; }
int silc_idlist_get_clients_by_hash(SilcIDList id_list, char *nickname, char *server, SilcHash md5hash, SilcClientEntry **clients, SilcUInt32 *clients_count) { SilcList list; SilcIDCacheEntry id_cache = NULL; unsigned char hash[SILC_HASH_MAXLEN]; SilcClientID client_id; SilcClientEntry client_entry; SILC_LOG_DEBUG(("Start")); silc_hash_make(md5hash, nickname, strlen(nickname), hash); /* As the Client ID is hashed in the ID cache by hashing only the hash from the Client ID, we can do a lookup with only the hash not the other parts of the ID and get all the clients with that hash, ie. with that nickname, as the hash is from the nickname. */ memset(&client_id, 0, sizeof(client_id)); memcpy(&client_id.hash, hash, sizeof(client_id.hash)); if (!silc_idcache_find_by_id(id_list->clients, &client_id, &list)) return FALSE; /* If server is specified, narrow the search with it. */ if (server) { silc_list_start(list); while ((id_cache = silc_list_get(list))) { client_entry = id_cache->context; if (!client_entry->servername) continue; if (!silc_utf8_strcasecmp(client_entry->servername, server)) silc_list_del(list, id_cache); } } if (!silc_list_count(list)) return FALSE; *clients = silc_realloc(*clients, (silc_list_count(list) + *clients_count) * sizeof(**clients)); silc_list_start(list); while ((id_cache = silc_list_get(list))) (*clients)[(*clients_count)++] = id_cache->context; SILC_LOG_DEBUG(("Found total %d clients", *clients_count)); return TRUE; }
static SilcBool silc_client_packet_receive(SilcPacketEngine engine, SilcPacketStream stream, SilcPacket packet, void *callback_context, void *stream_context) { SilcClientConnection conn = stream_context; SilcFSMThread thread; /* Packets we do not handle */ switch (packet->type) { case SILC_PACKET_HEARTBEAT: case SILC_PACKET_SUCCESS: case SILC_PACKET_FAILURE: case SILC_PACKET_REJECT: case SILC_PACKET_KEY_EXCHANGE: case SILC_PACKET_KEY_EXCHANGE_1: case SILC_PACKET_KEY_EXCHANGE_2: case SILC_PACKET_REKEY_DONE: case SILC_PACKET_CONNECTION_AUTH: return FALSE; break; } /* Get packet processing thread */ thread = silc_list_get(conn->internal->thread_pool); if (!thread) { thread = silc_fsm_thread_alloc(&conn->internal->fsm, conn, silc_client_packet_destructor, NULL, FALSE); if (!thread) return FALSE; } else { silc_list_del(conn->internal->thread_pool, thread); silc_fsm_thread_init(thread, &conn->internal->fsm, conn, silc_client_packet_destructor, NULL, FALSE); } /* Process packet in thread */ silc_fsm_set_state_context(thread, packet); silc_fsm_start_sync(thread, silc_client_connection_st_packet); return TRUE; }
void silc_thread_pool_purge(SilcThreadPool tp) { SilcThreadPoolThread t; int i; silc_mutex_lock(tp->lock); if (silc_list_count(tp->free_threads) <= tp->min_threads) { SILC_LOG_DEBUG(("No threads to purge")); silc_mutex_unlock(tp->lock); return; } i = silc_list_count(tp->free_threads) - tp->min_threads; SILC_LOG_DEBUG(("Purge %d threads", i)); silc_list_start(tp->threads); while ((t = silc_list_get(tp->threads))) { silc_mutex_lock(t->lock); if (t->run) { silc_mutex_unlock(t->lock); continue; } /* Signal the thread to stop */ t->stop = TRUE; silc_cond_signal(t->thread_signal); silc_mutex_unlock(t->lock); silc_list_del(tp->free_threads, t); i--; if (!i) break; } silc_list_start(tp->threads); silc_mutex_unlock(tp->lock); }
SilcBool silc_thread_pool_run(SilcThreadPool tp, SilcBool queuable, SilcSchedule schedule, SilcThreadPoolFunc run, void *run_context, SilcTaskCallback completion, void *completion_context) { SilcThreadPoolThread t, q; silc_mutex_lock(tp->lock); if (tp->destroy) { silc_mutex_unlock(tp->lock); silc_set_errno(SILC_ERR_NOT_VALID); return FALSE; } /* Get free thread */ silc_list_start(tp->free_threads); t = silc_list_get(tp->free_threads); if (!t || t->stop) { if (silc_list_count(tp->threads) + 1 > tp->max_threads) { /* Maximum threads reached */ if (!queuable) { silc_mutex_unlock(tp->lock); silc_set_errno(SILC_ERR_LIMIT); return FALSE; } /* User wants to queue this call until thread becomes free. Get a thread to assign this call. */ t = silc_list_get(tp->threads); if (!t) { /* List wraps around */ silc_list_start(tp->threads); t = silc_list_get(tp->threads); } silc_mutex_unlock(tp->lock); SILC_LOG_DEBUG(("Queue call %p, context %p in thread %p", run, run_context, t)); silc_mutex_lock(t->lock); /* Get free call context from the list */ silc_list_start(t->free_queue); q = silc_list_get(t->free_queue); if (!q) { q = silc_scalloc(tp->stack, 1, sizeof(*q)); if (!q) { silc_mutex_unlock(t->lock); return FALSE; } } else { silc_list_del(t->free_queue, q); } q->run = run; q->run_context = run_context; q->completion = completion; q->completion_context = completion_context; q->schedule = schedule; /* Add at the start of the list. It gets executed first. */ silc_list_insert(t->queue, NULL, q); silc_mutex_unlock(t->lock); return TRUE; } else { /* Create new thread */ t = silc_thread_pool_new_thread(tp); if (!t) { silc_mutex_unlock(tp->lock); return FALSE; } } } silc_list_del(tp->free_threads, t); silc_mutex_unlock(tp->lock); SILC_LOG_DEBUG(("Run call %p, context %p, thread %p", run, run_context, t)); silc_mutex_lock(t->lock); /* Mark this call to be executed in this thread */ t->run = run; t->run_context = run_context; t->completion = completion; t->completion_context = completion_context; t->schedule = schedule; /* Signal the thread */ silc_cond_signal(t->thread_signal); silc_mutex_unlock(t->lock); return TRUE; }
static void *silc_thread_pool_run_thread(void *context) { SilcThreadPoolThread t = context, o, q; SilcThreadPool tp = t->tp; SilcMutex lock = t->lock; SilcCond thread_signal = t->thread_signal; silc_mutex_lock(lock); while (1) { /* Wait here for code to execute */ while (!t->run && !t->stop) silc_cond_wait(thread_signal, lock); if (t->stop) goto stop; /* Execute code */ silc_mutex_unlock(lock); execute: SILC_LOG_DEBUG(("Execute call %p, context %p, thread %p", t->run, t->run_context, t)); t->run(t->schedule, t->run_context); /* If scheduler is NULL, call completion directly from here. Otherwise it is called through the scheduler in the thread where the scheduler is running. */ if (t->completion) { if (t->schedule) { SILC_LOG_DEBUG(("Run completion through scheduler %p", t->schedule)); if (!silc_schedule_task_add_timeout(t->schedule, t->completion, t->completion_context, 0, 0)) { SILC_LOG_DEBUG(("Run completion directly")); t->completion(NULL, NULL, 0, 0, t->completion_context); } silc_schedule_wakeup(t->schedule); } else { SILC_LOG_DEBUG(("Run completion directly")); t->completion(NULL, NULL, 0, 0, t->completion_context); } } silc_mutex_lock(lock); if (t->stop) goto stop; /* Check if there are calls in queue. Takes the most recently added call since new ones are added at the start of the list. */ if (silc_list_count(t->queue) > 0) { execute_queue: silc_list_start(t->queue); q = silc_list_get(t->queue); SILC_LOG_DEBUG(("Execute call from queue")); /* Execute this call now */ t->run = q->run; t->run_context = q->run_context; t->completion = q->completion; t->completion_context = q->completion_context; t->schedule = q->schedule; silc_list_del(t->queue, q); silc_list_add(t->free_queue, q); silc_mutex_unlock(lock); goto execute; } silc_mutex_unlock(lock); silc_mutex_lock(tp->lock); /* Nothing to do. Attempt to steal call from some other thread. */ o = silc_list_get(tp->threads); if (!o) { /* List wraps around */ silc_list_start(tp->threads); o = silc_list_get(tp->threads); } /* Check that the other thread is valid and has something to execute. */ silc_mutex_lock(o->lock); if (o == t || o->stop || silc_list_count(o->queue) == 0) { silc_mutex_unlock(o->lock); o = NULL; } if (o) { silc_mutex_unlock(tp->lock); silc_list_start(o->queue); q = silc_list_get(o->queue); SILC_LOG_DEBUG(("Execute call from queue from thread %p", o)); /* Execute this call now */ t->run = q->run; t->run_context = q->run_context; t->completion = q->completion; t->completion_context = q->completion_context; t->schedule = q->schedule; silc_list_del(o->queue, q); silc_list_add(o->free_queue, q); silc_mutex_unlock(o->lock); goto execute; } silc_mutex_lock(lock); if (t->stop) { silc_mutex_unlock(tp->lock); goto stop; } /* Now that we have the lock back, check the queue again. */ if (silc_list_count(t->queue) > 0) { silc_mutex_unlock(tp->lock); goto execute_queue; } /* The thread is now free for use again. */ t->run = NULL; t->completion = NULL; t->schedule = NULL; silc_list_add(tp->free_threads, t); silc_mutex_unlock(tp->lock); } stop: /* Stop the thread. Remove from threads list. */ SILC_LOG_DEBUG(("Stop thread %p", t)); /* We can unlock the thread now. After we get the thread pool lock no one can retrieve the thread anymore. */ silc_mutex_unlock(lock); silc_mutex_lock(tp->lock); silc_list_del(tp->threads, t); silc_list_start(tp->threads); /* Clear thread's call queue. */ silc_list_start(t->queue); silc_list_start(t->free_queue); while ((q = silc_list_get(t->queue))) silc_sfree(tp->stack, q); while ((q = silc_list_get(t->free_queue))) silc_sfree(tp->stack, q); /* Destroy the thread */ silc_mutex_free(lock); silc_cond_free(thread_signal); silc_sfree(tp->stack, t); /* If we are last thread, signal the waiting destructor. */ if (silc_list_count(tp->threads) == 0) silc_cond_signal(tp->pool_signal); /* Release pool reference. Releases lock also. */ silc_thread_pool_unref(tp); return NULL; }