int main(int argc, char **argv) { int i, array[MAX]; fr_fifo_t *fi; fi = fr_fifo_create(MAX, NULL); if (!fi) exit(1); for (i = 0; i < MAX; i++) { array[i] = i; if (!fr_fifo_push(fi, &array[i])) exit(2); } for (i = 0; i < MAX; i++) { int *p; p = fr_fifo_pop(fi); if (!p) { fprintf(stderr, "No pop at %d\n", i); exit(3); } if (*p != i) exit(4); } exit(0); }
/* * Callback for freeing a client. */ void client_free(RADCLIENT *client) { if (!client) return; #ifdef WITH_DYNAMIC_CLIENTS if (client->dynamic == 2) { time_t now; if (!deleted_clients) { deleted_clients = fr_fifo_create(1024, (void *) client_free); if (!deleted_clients) return; /* MEMLEAK */ } /* * Mark it as in the fifo, and remember when we * pushed it. */ client->dynamic = 3; client->created = now = time(NULL); /* re-set it */ fr_fifo_push(deleted_clients, client); /* * Peek at the head of the fifo. If it might * still be in use, return. Otherwise, pop it * from the queue and delete it. */ client = fr_fifo_peek(deleted_clients); if ((client->created + 120) >= now) return; client = fr_fifo_pop(deleted_clients); rad_assert(client != NULL); } #endif free(client->longname); free(client->secret); free(client->shortname); free(client->nastype); free(client->login); free(client->password); free(client->server); #ifdef WITH_DYNAMIC_CLIENTS free(client->client_server); #endif free(client); }
/* * Add a request to the list of waiting requests. * This function gets called ONLY from the main handler thread... * * This function should never fail. */ static int request_enqueue(REQUEST *request, RAD_REQUEST_FUNP fun) { rad_assert(request->process == fun); pthread_mutex_lock(&thread_pool.queue_mutex); thread_pool.request_count++; if (thread_pool.num_queued >= thread_pool.max_queue_size) { pthread_mutex_unlock(&thread_pool.queue_mutex); /* * Mark the request as done. */ radlog(L_ERR, "!!! ERROR !!! The server is blocked: discarding new request %d", request->number); request->child_state = REQUEST_DONE; return 0; } /* * Push the request onto the appropriate fifo for that */ if (!fr_fifo_push(thread_pool.fifo[request->priority], request)) { pthread_mutex_unlock(&thread_pool.queue_mutex); radlog(L_ERR, "!!! ERROR !!! Failed inserting request %d into the queue", request->number); request->child_state = REQUEST_DONE; return 0; } thread_pool.num_queued++; pthread_mutex_unlock(&thread_pool.queue_mutex); /* * There's one more request in the queue. * * Note that we're not touching the queue any more, so * the semaphore post is outside of the mutex. This also * means that when the thread wakes up and tries to lock * the mutex, it will be unlocked, and there won't be * contention. */ sem_post(&thread_pool.semaphore); return 1; }
/* * Add a request to the list of waiting requests. * This function gets called ONLY from the main handler thread... * * This function should never fail. */ int request_enqueue(REQUEST *request) { /* * If we haven't checked the number of child threads * in a while, OR if the thread pool appears to be full, * go manage it. */ if ((last_cleaned < request->timestamp) || (thread_pool.active_threads == thread_pool.total_threads)) { thread_pool_manage(request->timestamp); } pthread_mutex_lock(&thread_pool.queue_mutex); #ifdef WITH_STATS #ifdef WITH_ACCOUNTING if (thread_pool.auto_limit_acct) { struct timeval now; /* * Throw away accounting requests if we're too busy. */ if ((request->packet->code == PW_ACCOUNTING_REQUEST) && (fr_fifo_num_elements(thread_pool.fifo[RAD_LISTEN_ACCT]) > 0) && (thread_pool.num_queued > (thread_pool.max_queue_size / 2)) && (thread_pool.pps_in.pps_now > thread_pool.pps_out.pps_now)) { pthread_mutex_unlock(&thread_pool.queue_mutex); return 0; } gettimeofday(&now, NULL); thread_pool.pps_in.pps = rad_pps(&thread_pool.pps_in.pps_old, &thread_pool.pps_in.pps_now, &thread_pool.pps_in.time_old, &now); thread_pool.pps_in.pps_now++; } #endif /* WITH_ACCOUNTING */ #endif thread_pool.request_count++; if (thread_pool.num_queued >= thread_pool.max_queue_size) { int complain = FALSE; time_t now; static time_t last_complained = 0; now = time(NULL); if (last_complained != now) { last_complained = now; complain = TRUE; } pthread_mutex_unlock(&thread_pool.queue_mutex); /* * Mark the request as done. */ if (complain) { radlog(L_ERR, "Something is blocking the server. There are %d packets in the queue, waiting to be processed. Ignoring the new request.", thread_pool.max_queue_size); } return 0; } request->component = "<core>"; request->module = "<queue>"; /* * Push the request onto the appropriate fifo for that */ if (!fr_fifo_push(thread_pool.fifo[request->priority], request)) { pthread_mutex_unlock(&thread_pool.queue_mutex); radlog(L_ERR, "!!! ERROR !!! Failed inserting request %d into the queue", request->number); return 0; } thread_pool.num_queued++; pthread_mutex_unlock(&thread_pool.queue_mutex); /* * There's one more request in the queue. * * Note that we're not touching the queue any more, so * the semaphore post is outside of the mutex. This also * means that when the thread wakes up and tries to lock * the mutex, it will be unlocked, and there won't be * contention. */ sem_post(&thread_pool.semaphore); return 1; }
/* * Add a request to the list of waiting requests. * This function gets called ONLY from the main handler thread... * * This function should never fail. */ static int request_enqueue(REQUEST *request, RAD_REQUEST_FUNP fun) { request_queue_t *entry; /* * If we haven't checked the number of child threads * in a while, OR if the thread pool appears to be full, * go manage it. */ if ((last_cleaned < request->timestamp) || (thread_pool.active_threads == thread_pool.total_threads) || (thread_pool.exited_threads > 0)) { thread_pool_manage(request->timestamp); } pthread_mutex_lock(&thread_pool.queue_mutex); thread_pool.request_count++; if (thread_pool.num_queued >= thread_pool.max_queue_size) { int complain = FALSE; time_t now; static time_t last_complained = 0; now = time(NULL); if (last_complained != now) { last_complained = now; complain = TRUE; } pthread_mutex_unlock(&thread_pool.queue_mutex); /* * Mark the request as done. */ if (complain) { radlog(L_ERR, "Something is blocking the server. There are %d packets in the queue, waiting to be processed. Ignoring the new request.", thread_pool.max_queue_size); } request->child_state = REQUEST_DONE; return 0; } request->child_state = REQUEST_QUEUED; request->component = "<core>"; request->module = "<queue>"; entry = rad_malloc(sizeof(*entry)); entry->request = request; entry->fun = fun; /* * Push the request onto the appropriate fifo for that */ if (!fr_fifo_push(thread_pool.fifo[request->priority], entry)) { pthread_mutex_unlock(&thread_pool.queue_mutex); radlog(L_ERR, "!!! ERROR !!! Failed inserting request %d into the queue", request->number); request->child_state = REQUEST_DONE; return 0; } thread_pool.num_queued++; pthread_mutex_unlock(&thread_pool.queue_mutex); /* * There's one more request in the queue. * * Note that we're not touching the queue any more, so * the semaphore post is outside of the mutex. This also * means that when the thread wakes up and tries to lock * the mutex, it will be unlocked, and there won't be * contention. */ sem_post(&thread_pool.semaphore); return 1; }
int main(int argc, char **argv) { int i, j, array[MAX]; fr_fifo_t *fi; fi = fr_fifo_create(NULL, MAX, NULL); if (!fi) fr_exit(1); for (j = 0; j < 5; j++) { #define SPLIT (MAX/3) #define COUNT ((j * SPLIT) + i) for (i = 0; i < SPLIT; i++) { array[COUNT % MAX] = COUNT; if (fr_fifo_push(fi, &array[COUNT % MAX]) < 0) { fprintf(stderr, "%d %d\tfailed pushing %d\n", j, i, COUNT); fr_exit(2); } if (fr_fifo_num_elements(fi) != (i + 1)) { fprintf(stderr, "%d %d\tgot size %d expected %d\n", j, i, i + 1, fr_fifo_num_elements(fi)); fr_exit(1); } } if (fr_fifo_num_elements(fi) != SPLIT) { fprintf(stderr, "HALF %d %d\n", fr_fifo_num_elements(fi), SPLIT); fr_exit(1); } for (i = 0; i < SPLIT; i++) { int *p; p = fr_fifo_pop(fi); if (!p) { fprintf(stderr, "No pop at %d\n", i); fr_exit(3); } if (*p != COUNT) { fprintf(stderr, "%d %d\tgot %d expected %d\n", j, i, *p, COUNT); fr_exit(4); } if (fr_fifo_num_elements(fi) != SPLIT - (i + 1)) { fprintf(stderr, "%d %d\tgot size %d expected %d\n", j, i, SPLIT - (i + 1), fr_fifo_num_elements(fi)); fr_exit(1); } } if (fr_fifo_num_elements(fi) != 0) { fprintf(stderr, "ZERO %d %d\n", fr_fifo_num_elements(fi), 0); fr_exit(1); } } talloc_free(fi); fr_exit(0); }
/* * Add a request to the list of waiting requests. * This function gets called ONLY from the main handler thread... * * This function should never fail. */ int request_enqueue(REQUEST *request) { static int last_complained = 0; almost_now = request->timestamp; /* * If we haven't checked the number of child threads * in a while, OR if the thread pool appears to be full, * go manage it. */ if ((last_cleaned < request->timestamp) || (thread_pool.active_threads == thread_pool.total_threads)) { thread_pool_manage(request->timestamp); } pthread_mutex_lock(&thread_pool.queue_mutex); thread_pool.request_count++; if ((thread_pool.num_queued >= thread_pool.max_queue_size) && (last_complained != almost_now)) { last_complained = almost_now; pthread_mutex_unlock(&thread_pool.queue_mutex); /* * Mark the request as done. */ radlog(L_ERR, "Something is blocking the server. There are %d packets in the queue, waiting to be processed. Ignoring the new request.", thread_pool.max_queue_size); return 0; } request->component = "<core>"; request->module = "<queue>"; /* * Push the request onto the appropriate fifo for that */ if (!fr_fifo_push(thread_pool.fifo[request->priority], request)) { pthread_mutex_unlock(&thread_pool.queue_mutex); radlog(L_ERR, "!!! ERROR !!! Failed inserting request %d into the queue", request->number); return 0; } thread_pool.num_queued++; pthread_mutex_unlock(&thread_pool.queue_mutex); /* * There's one more request in the queue. * * Note that we're not touching the queue any more, so * the semaphore post is outside of the mutex. This also * means that when the thread wakes up and tries to lock * the mutex, it will be unlocked, and there won't be * contention. */ sem_post(&thread_pool.semaphore); return 1; }