static GQueue* _schedulerpolicyhoststeal_getHosts(SchedulerPolicy* policy) { MAGIC_ASSERT(policy); HostStealPolicyData* data = policy->data; g_rw_lock_reader_lock(&data->lock); HostStealThreadData* tdata = g_hash_table_lookup(data->threadToThreadDataMap, GUINT_TO_POINTER(pthread_self())); g_rw_lock_reader_unlock(&data->lock); if(!tdata) { return NULL; } if(g_queue_is_empty(tdata->unprocessedHosts)) { return tdata->processedHosts; } if(g_queue_is_empty(tdata->processedHosts)) { return tdata->unprocessedHosts; } if(tdata->allHosts) { g_queue_free(tdata->allHosts); } tdata->allHosts = g_queue_copy(tdata->processedHosts); g_queue_foreach(tdata->unprocessedHosts, (GFunc)concat_queue_iter, tdata->allHosts); return tdata->allHosts; }
TEST_F(GQueueTest, copy) { int testData1 = 42; int testData2 = 1337; GList *list = NULL; list = g_list_append(list, &testData1); list = g_list_append(list, &testData2); queue->head = list; queue->tail = list->next->next; queue->length = 2; GQueue *copiedQueue = g_queue_copy(queue); ASSERT_EQ(&testData1, copiedQueue->head->data) << "copied first queue element data should be set"; ASSERT_TRUE(copiedQueue->head->prev == NULL) << "copied first queue element should not have a previous element"; ASSERT_TRUE(copiedQueue->head->next != NULL) << "copied first queue element should have a next element"; ASSERT_EQ(copiedQueue->tail, copiedQueue->head->next) << "copied first queue element next should be equal to queue tail"; ASSERT_EQ(&testData2, copiedQueue->tail->data) << "copied second queue element data should be set"; ASSERT_EQ(copiedQueue->head, copiedQueue->tail->prev) << "copied second queue element should have the first as previous element"; ASSERT_TRUE(copiedQueue->tail->next == NULL) << "copied second queue element should not have a next element"; ASSERT_NE(queue->head, copiedQueue->head) << "copied queue should not be equal to original queue"; ASSERT_NE(queue->tail, copiedQueue->tail) << "copied queue second element should not be equal to original queue second element"; g_queue_free(copiedQueue); }
API bool connectClientSocketAsync(Socket *s, int timeout) { if(s->connected) { logError("Cannot connect already connected socket %d", s->fd); return false; } if(s->type != SOCKET_CLIENT) { logError("Cannot asynchronously connect non-client socket"); return false; } struct addrinfo hints; struct addrinfo *server; int ret; memset(&hints, 0, sizeof(struct addrinfo)); hints.ai_family = AF_UNSPEC; // don't care if we use IPv4 or IPv6 to reach our destination hints.ai_socktype = SOCK_STREAM; // TCP stream sockets if((ret = getaddrinfo(s->host, s->port, &hints, &server)) != 0) { logError("Failed to look up address %s:%s: %s", s->host, s->port, gai_strerror(ret)); return false; } if((s->fd = socket(server->ai_family, server->ai_socktype, server->ai_protocol)) == -1) { logSystemError("Failed to create socket"); return false; } if(!setSocketNonBlocking(s->fd)) { logSystemError("Failed to set socket non-blocking"); closeSocket(s); return false; } logNotice("Asynchronously connecting client socket %d to %s:%s...", s->fd, s->host, s->port); if(connect(s->fd, server->ai_addr, server->ai_addrlen) < 0) { // try to connect socket #ifdef WIN32 if(WSAGetLastError() == WSAEINPROGRESS || WSAGetLastError() == WSAEWOULDBLOCK) { #else if(errno == EINPROGRESS) { #endif // free previous timer if present free(s->custom); AsyncConnectionTimer *timer = ALLOCATE_OBJECT(AsyncConnectionTimer); timer->creationTime = getMicroTime(); timer->timeout = timeout; s->custom = timer; g_queue_push_tail(connecting, s); // add to connecting list logInfo("Socket %d delayed connection, queueing...", s->fd); } else { #ifdef WIN32 char *error = g_win32_error_message(WSAGetLastError()); logError("Connection for socket %d failed: %s", s->fd, error); free(error); #else logSystemError("Connection for socket %d failed", s->fd); #endif closeSocket(s); freeaddrinfo(server); return false; } } else { logNotice("Direct response for asynchronous connection on socket %d", s->fd); s->connected = true; triggerEvent(s, "connected"); enableSocketPolling(s); } freeaddrinfo(server); return true; } API bool enableSocketPolling(Socket *socket) { if(isSocketPollingEnabled(socket)) { // Socket with that fd is already polled return false; } int *fd = ALLOCATE_OBJECT(int); *fd = socket->fd; g_hash_table_insert(poll_table, fd, socket); return true; } API bool isSocketPollingEnabled(Socket *socket) { return g_hash_table_lookup(poll_table, &socket->fd) != NULL; } API bool disableSocketPolling(Socket *socket) { return g_hash_table_remove(poll_table, &socket->fd) == true; } API void pollSockets() { if(!polling) { polling = true; // set polling flag to lock our poll table in order to make this function reentrancy safe if(!g_queue_is_empty(connecting)) { GQueue *connectingSockets = g_queue_copy(connecting); // copy the connecting socket list so we may modify the list while polling for(GList *iter = connectingSockets->head; iter != NULL; iter = iter->next) { if(pollConnectingSocket(iter->data)) { // poll the connecting socket // The socket should no longer be polled g_queue_remove(connecting, iter->data); // remove it from the original connecting queue } } g_queue_free(connectingSockets); // remove our temporary iteration list } GList *sockets = g_hash_table_get_values(poll_table); // get a static list of sockets so we may modify the hash table while polling for(GList *iter = sockets; iter != NULL; iter = iter->next) { Socket *poll = iter->data; int fd; // storage for the file descriptor that won't be available anymore in case the socket gets freed before we remove it if(pollSocket(poll, &fd)) { // poll the socket // The socket should no longer be polled g_hash_table_remove(poll_table, &fd); // remove it from the polling table } } g_list_free(sockets); // remove our temporary iteration list polling = false; // release pseudo lock on poll table } } API bool isSocketsPolling() { return polling; } API Socket *getPolledSocketByFd(int fd) { return g_hash_table_lookup(poll_table, &fd); } /** * Callback to poll all sockets signed up for polling */ TIMER_CALLBACK(poll) { pollSockets(); triggerEvent(NULL, "sockets_polled"); TIMER_ADD_TIMEOUT(pollInterval, poll); } /** * Polls a connecting socket and notifies the caller of whether it should be removed from the connecting polling queue afterwards * * @param socket the connecting socket to poll * @result true if the socket should be removed from the connecting polling queue after polling */ static bool pollConnectingSocket(Socket *socket) { assert(!socket->connected); assert(socket->type == SOCKET_CLIENT); assert(socket->custom != NULL); // Check whether the socket has timed out yet AsyncConnectionTimer *timer = socket->custom; if(getMicroTime() - timer->creationTime > timer->timeout) { // connection timed out logWarning("Asynchronous connection on socket %d timed out", socket->fd); closeSocket(socket); triggerEvent(socket, "disconnect"); return true; } // Initialize timeout struct timeval tv = {0, 0}; // Initialize write fd set fd_set fdset; FD_ZERO(&fdset); FD_SET(socket->fd, &fdset); // Select socket for write flag (connected) int ret; if((ret = select(socket->fd + 1, NULL, &fdset, NULL, &tv)) < 0) { #ifdef WIN32 if(WSAGetLastError() != WSAEINTR) { char *error = g_win32_error_message(WSAGetLastError()); logError("Error selecting socket %d for write flag (connected) while polling: %s", socket->fd, error); free(error); #else if(errno != EINTR) { logSystemError("Error selecting socket %d for write flag (connected) while polling", socket->fd); #endif closeSocket(socket); triggerEvent(socket, "disconnect"); return true; } // EINTR at this point means the socket is just not connected yet, so we can safely return and continue polling another time return false; } else if(ret > 0) { // there is a write flag on the socket // Socket selected for write, check if we're indeed connected int valopt; socklen_t lon = sizeof(int); if(getsockopt(socket->fd, SOL_SOCKET, SO_ERROR, (void*) (&valopt), &lon) < 0) { logSystemError("getsockopt() failed on socket %d", socket->fd); closeSocket(socket); triggerEvent(socket, "disconnect"); return true; } else if(valopt != 0) { // There was a connection error logSystemError("Asynchronous connection on socket %d failed", socket->fd); closeSocket(socket); triggerEvent(socket, "disconnect"); return true; } logNotice("Asynchronously connected socket %d", socket->fd); socket->connected = true; triggerEvent(socket, "connected"); enableSocketPolling(socket); return true; } // the socket doesn't have a write flag, so let's just wait until it's connected return false; } /** * Polls a socket and notifies the caller of whether it should be removed from the polling table afterwards * * @param socket the socket to poll * @param fd_p a pointer to an integer field to which the file descriptor of the socket should be written in case the socket should be removed from the polling table and could already be freed at that time * @result true if the socket should be removed from the polling table after polling */ static bool pollSocket(Socket *socket, int *fd_p) { *fd_p = socket->fd; // backup file descriptor if(!socket->connected) { // Socket is disconnected triggerEvent(socket, "disconnect"); return true; } if(socket->type != SOCKET_SERVER && socket->type != SOCKET_SERVER_BLOCK) { int ret; if((ret = socketReadRaw(socket, poll_buffer, SOCKET_POLL_BUFSIZE)) < 0) { if(socket->connected) { // socket is still connected, so the error was not fatal triggerEvent(socket, "error"); return false; } else { // socket was disconnected either by the peer or by a fatal error triggerEvent(socket, "disconnect"); return true; } } else if(ret > 0) { // we actually read something triggerEvent(socket, "read", poll_buffer, ret); } // else nothing to read right now } else { Socket *clientSocket; if((clientSocket = socketAccept(socket)) != NULL) { triggerEvent(socket, "accept", clientSocket); } else { if(socket->connected) { // socket is still connected, so the error was not fatal triggerEvent(socket, "error"); return false; } else { // socket was disconnected either by the peer or by a fatal error triggerEvent(socket, "disconnect"); return true; } } } return false; }
static void create_everything(struct main_context *ctx) { struct callmaster_config mc; struct control_tcp *ct; struct control_udp *cu; struct control_ng *cn; struct cli *cl; int kfd = -1; struct timeval tmp_tv; struct timeval redis_start, redis_stop; double redis_diff = 0; if (table < 0) goto no_kernel; if (kernel_create_table(table)) { fprintf(stderr, "FAILED TO CREATE KERNEL TABLE %i, KERNEL FORWARDING DISABLED\n", table); ilog(LOG_CRIT, "FAILED TO CREATE KERNEL TABLE %i, KERNEL FORWARDING DISABLED\n", table); if (no_fallback) exit(-1); goto no_kernel; } kfd = kernel_open_table(table); if (kfd == -1) { fprintf(stderr, "FAILED TO OPEN KERNEL TABLE %i, KERNEL FORWARDING DISABLED\n", table); ilog(LOG_CRIT, "FAILED TO OPEN KERNEL TABLE %i, KERNEL FORWARDING DISABLED\n", table); if (no_fallback) exit(-1); goto no_kernel; } no_kernel: ctx->p = poller_new(); if (!ctx->p) die("poller creation failed"); ctx->m = callmaster_new(ctx->p); if (!ctx->m) die("callmaster creation failed"); dtls_timer(ctx->p); ZERO(mc); rwlock_init(&mc.config_lock); mc.kernelfd = kfd; mc.kernelid = table; if (max_sessions < -1) { max_sessions = -1; } mc.max_sessions = max_sessions; mc.timeout = timeout; mc.silent_timeout = silent_timeout; mc.final_timeout = final_timeout; mc.delete_delay = delete_delay; mc.default_tos = tos; mc.b2b_url = b2b_url; mc.fmt = xmlrpc_fmt; mc.graphite_ep = graphite_ep; mc.graphite_interval = graphite_interval; mc.redis_subscribed_keyspaces = g_queue_copy(&keyspaces); if (redis_num_threads < 1) { #ifdef _SC_NPROCESSORS_ONLN redis_num_threads = sysconf( _SC_NPROCESSORS_ONLN ); #endif if (redis_num_threads < 1) { redis_num_threads = REDIS_RESTORE_NUM_THREADS; } } mc.redis_num_threads = redis_num_threads; ct = NULL; if (tcp_listen_ep.port) { ct = control_tcp_new(ctx->p, &tcp_listen_ep, ctx->m); if (!ct) die("Failed to open TCP control connection port"); } cu = NULL; if (udp_listen_ep.port) { interfaces_exclude_port(udp_listen_ep.port); cu = control_udp_new(ctx->p, &udp_listen_ep, ctx->m); if (!cu) die("Failed to open UDP control connection port"); } cn = NULL; if (ng_listen_ep.port) { interfaces_exclude_port(ng_listen_ep.port); cn = control_ng_new(ctx->p, &ng_listen_ep, ctx->m); if (!cn) die("Failed to open UDP control connection port"); } cl = NULL; if (cli_listen_ep.port) { interfaces_exclude_port(cli_listen_ep.port); cl = cli_new(ctx->p, &cli_listen_ep, ctx->m); if (!cl) die("Failed to open UDP CLI connection port"); } if (!is_addr_unspecified(&redis_write_ep.address)) { mc.redis_write = redis_new(&redis_write_ep, redis_write_db, redis_write_auth, ANY_REDIS_ROLE, no_redis_required); if (!mc.redis_write) die("Cannot start up without running Redis %s write database! See also NO_REDIS_REQUIRED paramter.", endpoint_print_buf(&redis_write_ep)); } if (!is_addr_unspecified(&redis_ep.address)) { mc.redis = redis_new(&redis_ep, redis_db, redis_auth, mc.redis_write ? ANY_REDIS_ROLE : MASTER_REDIS_ROLE, no_redis_required); mc.redis_notify = redis_new(&redis_ep, redis_db, redis_auth, mc.redis_write ? ANY_REDIS_ROLE : MASTER_REDIS_ROLE, no_redis_required); if (!mc.redis || !mc.redis_notify) die("Cannot start up without running Redis %s database! See also NO_REDIS_REQUIRED paramter.", endpoint_print_buf(&redis_ep)); if (!mc.redis_write) mc.redis_write = mc.redis; } mc.redis_expires_secs = redis_expires; ctx->m->conf = mc; if (!foreground) daemonize(); wpidfile(); ctx->m->homer = homer_sender_new(&homer_ep, homer_protocol, homer_id); if (mc.redis) { // start redis restore timer gettimeofday(&redis_start, NULL); // restore if (redis_restore(ctx->m, mc.redis)) die("Refusing to continue without working Redis database"); // stop redis restore timer gettimeofday(&redis_stop, NULL); // print redis restore duration redis_diff += timeval_diff(&redis_stop, &redis_start) / 1000.0; ilog(LOG_INFO, "Redis restore time = %.0lf ms", redis_diff); } gettimeofday(&ctx->m->latest_graphite_interval_start, NULL); timeval_from_us(&tmp_tv, graphite_interval*1000000); set_graphite_interval_tv(&tmp_tv); }