/* When a worker thread gets to the end of it's life it dies with an * exit value of the code supplied to this function. The thread has * already had check_restart() registered to be called when dying, so * we don't concern ourselves with restarting at all here. We do however * mark the scoreboard slot as belonging to a dead server and zero out * it's thread_id. * * TODO - use the status we set to determine if we need to restart the * thread. */ static void clean_child_exit(int code, int slot) { (void) ap_update_child_status_from_indexes(0, slot, SERVER_DEAD, (request_rec*)NULL); ap_scoreboard_image->servers[0][slot].tid = 0; exit_thread(code); }
static int make_worker(int slot) { thread_id tid; proc_info *my_info = (proc_info *)malloc(sizeof(proc_info)); /* freed by thread... */ if (my_info == NULL) { ap_log_error(APLOG_MARK, APLOG_ALERT, errno, ap_server_conf, "malloc: out of memory"); clean_child_exit(APEXIT_CHILDFATAL); } my_info->slot = slot; apr_pool_create(&my_info->tpool, pchild); if (slot + 1 > ap_max_child_assigned) ap_max_child_assigned = slot + 1; if (one_process) { set_signals(); ap_scoreboard_image->parent[0].pid = getpid(); return 0; } (void) ap_update_child_status_from_indexes(0, slot, SERVER_STARTING, (request_rec*)NULL); tid = spawn_thread(worker_thread, "apache_worker", B_NORMAL_PRIORITY, my_info); if (tid < B_NO_ERROR) { ap_log_error(APLOG_MARK, APLOG_ERR, errno, NULL, "spawn_thread: Unable to start a new thread"); /* In case system resources are maxxed out, we don't want * Apache running away with the CPU trying to fork over and * over and over again. */ (void) ap_update_child_status_from_indexes(0, slot, SERVER_DEAD, (request_rec*)NULL); sleep(10); free(my_info); return -1; } resume_thread(tid); ap_scoreboard_image->servers[0][slot].tid = tid; return 0; }
AP_DECLARE(int) ap_update_child_status(ap_sb_handle_t *sbh, int status, request_rec *r) { if (!sbh) return -1; return ap_update_child_status_from_indexes(sbh->child_num, sbh->thread_num, status, r); }
static int make_child(server_rec *s, int slot) { int tid; int err=0; NXContext_t ctx; if (slot + 1 > ap_max_workers_limit) { ap_max_workers_limit = slot + 1; } ap_update_child_status_from_indexes(0, slot, WORKER_STARTING, (request_rec *) NULL); if (ctx = NXContextAlloc((void (*)(void *)) worker_main, (void*)slot, NX_PRIO_MED, ap_thread_stacksize, NX_CTX_NORMAL, &err)) { char threadName[32]; sprintf (threadName, "Apache_Worker %d", slot); NXContextSetName(ctx, threadName); err = NXThreadCreate(ctx, NX_THR_BIND_CONTEXT, &tid); if (err) { NXContextFree (ctx); } } if (err) { /* create thread didn't succeed. Fix the scoreboard or else * it will say SERVER_STARTING forever and ever */ ap_update_child_status_from_indexes(0, slot, WORKER_DEAD, (request_rec *) NULL); /* In case system resources are maxxed out, we don't want Apache running away with the CPU trying to fork over and over and over again. */ apr_thread_yield(); return -1; } ap_scoreboard_image->servers[0][slot].tid = tid; return 0; }
static void clean_child_exit(int code, int worker_num, apr_pool_t *ptrans, apr_bucket_alloc_t *bucket_alloc) { apr_bucket_alloc_destroy(bucket_alloc); if (!shutdown_pending) { apr_pool_destroy(ptrans); } atomic_dec (&worker_thread_count); if (worker_num >=0) ap_update_child_status_from_indexes(0, worker_num, WORKER_DEAD, (request_rec *) NULL); NXThreadExit((void*)&code); }
/* Systems without a real waitpid sometimes lose a child's exit while waiting for another. Search through the scoreboard for missing children. */ int reap_children(int *exitcode, apr_exit_why_e *status) { int n, pid; for (n = 0; n < ap_max_daemons_limit; ++n) { if (ap_scoreboard_image->servers[n][0].status != SERVER_DEAD && kill((pid = ap_scoreboard_image->parent[n].pid), 0) == -1) { ap_update_child_status_from_indexes(n, 0, SERVER_DEAD, NULL); /* just mark it as having a successful exit status */ *status = APR_PROC_EXIT; *exitcode = 0; return(pid); } } return 0; }
/*static */ void worker_main(void *arg) { ap_listen_rec *lr, *first_lr, *last_lr = NULL; apr_pool_t *ptrans; apr_allocator_t *allocator; apr_bucket_alloc_t *bucket_alloc; conn_rec *current_conn; apr_status_t stat = APR_EINIT; ap_sb_handle_t *sbh; apr_thread_t *thd = NULL; apr_os_thread_t osthd; int my_worker_num = (int)arg; apr_socket_t *csd = NULL; int requests_this_child = 0; apr_socket_t *sd = NULL; fd_set main_fds; int sockdes; int srv; struct timeval tv; int wouldblock_retry; osthd = apr_os_thread_current(); apr_os_thread_put(&thd, &osthd, pmain); tv.tv_sec = 1; tv.tv_usec = 0; apr_allocator_create(&allocator); apr_allocator_max_free_set(allocator, ap_max_mem_free); apr_pool_create_ex(&ptrans, pmain, NULL, allocator); apr_allocator_owner_set(allocator, ptrans); apr_pool_tag(ptrans, "transaction"); bucket_alloc = apr_bucket_alloc_create_ex(allocator); atomic_inc (&worker_thread_count); while (!die_now) { /* * (Re)initialize this child to a pre-connection state. */ current_conn = NULL; apr_pool_clear(ptrans); if ((ap_max_requests_per_child > 0 && requests_this_child++ >= ap_max_requests_per_child)) { DBPRINT1 ("\n**Thread slot %d is shutting down", my_worker_num); clean_child_exit(0, my_worker_num, ptrans, bucket_alloc); } ap_update_child_status_from_indexes(0, my_worker_num, WORKER_READY, (request_rec *) NULL); /* * Wait for an acceptable connection to arrive. */ for (;;) { if (shutdown_pending || restart_pending || (ap_scoreboard_image->servers[0][my_worker_num].status == WORKER_IDLE_KILL)) { DBPRINT1 ("\nThread slot %d is shutting down\n", my_worker_num); clean_child_exit(0, my_worker_num, ptrans, bucket_alloc); } /* Check the listen queue on all sockets for requests */ memcpy(&main_fds, &listenfds, sizeof(fd_set)); srv = select(listenmaxfd + 1, &main_fds, NULL, NULL, &tv); if (srv <= 0) { if (srv < 0) { ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, ap_server_conf, APLOGNO(00217) "select() failed on listen socket"); apr_thread_yield(); } continue; } /* remember the last_lr we searched last time around so that we don't end up starving any particular listening socket */ if (last_lr == NULL) { lr = ap_listeners; } else { lr = last_lr->next; if (!lr) lr = ap_listeners; } first_lr = lr; do { apr_os_sock_get(&sockdes, lr->sd); if (FD_ISSET(sockdes, &main_fds)) goto got_listener; lr = lr->next; if (!lr) lr = ap_listeners; } while (lr != first_lr); /* if we get here, something unexpected happened. Go back into the select state and try again. */ continue; got_listener: last_lr = lr; sd = lr->sd; wouldblock_retry = MAX_WB_RETRIES; while (wouldblock_retry) { if ((stat = apr_socket_accept(&csd, sd, ptrans)) == APR_SUCCESS) { break; } else { /* if the error is a wouldblock then maybe we were too quick try to pull the next request from the listen queue. Try a few more times then return to our idle listen state. */ if (!APR_STATUS_IS_EAGAIN(stat)) { break; } if (wouldblock_retry--) { apr_thread_yield(); } } } /* If we got a new socket, set it to non-blocking mode and process it. Otherwise handle the error. */ if (stat == APR_SUCCESS) { apr_socket_opt_set(csd, APR_SO_NONBLOCK, 0); #ifdef DBINFO_ON if (wouldblock_retry < MAX_WB_RETRIES) { retry_success++; avg_retries += (MAX_WB_RETRIES-wouldblock_retry); } #endif break; /* We have a socket ready for reading */ } else { #ifdef DBINFO_ON if (APR_STATUS_IS_EAGAIN(stat)) { would_block++; retry_fail++; } else if ( #else if (APR_STATUS_IS_EAGAIN(stat) || #endif APR_STATUS_IS_ECONNRESET(stat) || APR_STATUS_IS_ETIMEDOUT(stat) || APR_STATUS_IS_EHOSTUNREACH(stat) || APR_STATUS_IS_ENETUNREACH(stat)) { ; } #ifdef USE_WINSOCK else if (APR_STATUS_IS_ENETDOWN(stat)) { /* * When the network layer has been shut down, there * is not much use in simply exiting: the parent * would simply re-create us (and we'd fail again). * Use the CHILDFATAL code to tear the server down. * @@@ Martin's idea for possible improvement: * A different approach would be to define * a new APEXIT_NETDOWN exit code, the reception * of which would make the parent shutdown all * children, then idle-loop until it detected that * the network is up again, and restart the children. * Ben Hyde noted that temporary ENETDOWN situations * occur in mobile IP. */ ap_log_error(APLOG_MARK, APLOG_EMERG, stat, ap_server_conf, APLOGNO(00218) "apr_socket_accept: giving up."); clean_child_exit(APEXIT_CHILDFATAL, my_worker_num, ptrans, bucket_alloc); } #endif else { ap_log_error(APLOG_MARK, APLOG_ERR, stat, ap_server_conf, APLOGNO(00219) "apr_socket_accept: (client socket)"); clean_child_exit(1, my_worker_num, ptrans, bucket_alloc); } } } ap_create_sb_handle(&sbh, ptrans, 0, my_worker_num); /* * We now have a connection, so set it up with the appropriate * socket options, file descriptors, and read/write buffers. */ current_conn = ap_run_create_connection(ptrans, ap_server_conf, csd, my_worker_num, sbh, bucket_alloc); if (current_conn) { current_conn->current_thread = thd; ap_process_connection(current_conn, csd); ap_lingering_close(current_conn); } request_count++; } clean_child_exit(0, my_worker_num, ptrans, bucket_alloc); }
static void server_main_loop(int remaining_threads_to_start) { int child_slot; apr_exit_why_e exitwhy; int status; apr_proc_t pid; int i; while (!restart_pending && !shutdown_pending) { ap_wait_or_timeout(&exitwhy, &status, &pid, pconf); if (pid.pid >= 0) { if (ap_process_child_status(&pid, exitwhy, status) == APEXIT_CHILDFATAL) { shutdown_pending = 1; child_fatal = 1; return; } /* non-fatal death... note that it's gone in the scoreboard. */ child_slot = -1; for (i = 0; i < ap_max_child_assigned; ++i) { if (ap_scoreboard_image->servers[0][i].tid == pid.pid) { child_slot = i; break; } } if (child_slot >= 0) { ap_scoreboard_image->servers[0][child_slot].tid = 0; (void) ap_update_child_status_from_indexes(0, child_slot, SERVER_DEAD, (request_rec*)NULL); if (remaining_threads_to_start && child_slot < ap_thread_limit) { /* we're still doing a 1-for-1 replacement of dead * children with new children */ make_worker(child_slot); --remaining_threads_to_start; } #if APR_HAS_OTHER_CHILD } else if (apr_proc_other_child_read(&pid, status) == 0) { /* handled */ #endif } else if (is_graceful) { /* Great, we've probably just lost a slot in the * scoreboard. Somehow we don't know about this * child. */ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ap_server_conf, "long lost child came home! (pid %ld)", pid.pid); } /* Don't perform idle maintenance when a child dies, * only do it when there's a timeout. Remember only a * finite number of children can die, and it's pretty * pathological for a lot to die suddenly. */ continue; } else if (remaining_threads_to_start) { /* we hit a 1 second timeout in which none of the previous * generation of children needed to be reaped... so assume * they're all done, and pick up the slack if any is left. */ startup_threads(remaining_threads_to_start); remaining_threads_to_start = 0; /* In any event we really shouldn't do the code below because * few of the servers we just started are in the IDLE state * yet, so we'd mistakenly create an extra server. */ continue; } perform_idle_server_maintenance(); } }
/* * worker_main() * Main entry point for the worker threads. Worker threads block in * win*_get_connection() awaiting a connection to service. */ static unsigned int __stdcall worker_main(void *thread_num_val) { static int requests_this_child = 0; PCOMP_CONTEXT context = NULL; int thread_num = (int)thread_num_val; ap_sb_handle_t *sbh; while (1) { conn_rec *c; apr_int32_t disconnected; ap_update_child_status_from_indexes(0, thread_num, SERVER_READY, NULL); /* Grab a connection off the network */ if (use_acceptex) { context = winnt_get_connection(context); } else { context = win9x_get_connection(context); } if (!context) { /* Time for the thread to exit */ break; } /* Have we hit MaxRequestPerChild connections? */ if (ap_max_requests_per_child) { requests_this_child++; if (requests_this_child > ap_max_requests_per_child) { SetEvent(max_requests_per_child_event); } } ap_create_sb_handle(&sbh, context->ptrans, 0, thread_num); c = ap_run_create_connection(context->ptrans, ap_server_conf, context->sock, thread_num, sbh, context->ba); if (c) { ap_process_connection(c, context->sock); apr_socket_opt_get(context->sock, APR_SO_DISCONNECTED, &disconnected); if (!disconnected) { context->accept_socket = INVALID_SOCKET; ap_lingering_close(c); } else if (!use_acceptex) { /* If the socket is disconnected but we are not using acceptex, * we cannot reuse the socket. Disconnected sockets are removed * from the apr_socket_t struct by apr_sendfile() to prevent the * socket descriptor from being inadvertently closed by a call * to apr_socket_close(), so close it directly. */ closesocket(context->accept_socket); context->accept_socket = INVALID_SOCKET; } } else { /* ap_run_create_connection closes the socket on failure */ context->accept_socket = INVALID_SOCKET; } } ap_update_child_status_from_indexes(0, thread_num, SERVER_DEAD, (request_rec *) NULL); return 0; }
/* This is the thread that actually does all the work. */ static int32 worker_thread(void *dummy) { int worker_slot = (int)dummy; apr_allocator_t *allocator; apr_bucket_alloc_t *bucket_alloc; apr_status_t rv = APR_EINIT; int last_poll_idx = 0; sigset_t sig_mask; int requests_this_child = 0; apr_pollset_t *pollset = NULL; ap_listen_rec *lr = NULL; ap_sb_handle_t *sbh = NULL; int i; /* each worker thread is in control of its own destiny...*/ int this_worker_should_exit = 0; /* We have 2 pools that we create/use throughout the lifetime of this * worker. The first and longest lived is the pworker pool. From * this we create the ptrans pool, the lifetime of which is the same * as each connection and is reset prior to each attempt to * process a connection. */ apr_pool_t *ptrans = NULL; apr_pool_t *pworker = NULL; mpm_state = AP_MPMQ_STARTING; /* for benefit of any hooks that run as this * child initializes */ on_exit_thread(check_restart, (void*)worker_slot); /* block the signals for this thread only if we're not running as a * single process. */ if (!one_process) { sigfillset(&sig_mask); sigprocmask(SIG_BLOCK, &sig_mask, NULL); } /* Each worker thread is fully in control of it's destinay and so * to allow each thread to handle the lifetime of it's own resources * we create and use a subcontext for every thread. * The subcontext is a child of the pconf pool. */ apr_allocator_create(&allocator); apr_allocator_max_free_set(allocator, ap_max_mem_free); apr_pool_create_ex(&pworker, pconf, NULL, allocator); apr_allocator_owner_set(allocator, pworker); apr_pool_create(&ptrans, pworker); apr_pool_tag(ptrans, "transaction"); ap_create_sb_handle(&sbh, pworker, 0, worker_slot); (void) ap_update_child_status(sbh, SERVER_READY, (request_rec *) NULL); /* We add an extra socket here as we add the udp_sock we use for signalling * death. This gets added after the others. */ apr_pollset_create(&pollset, num_listening_sockets + 1, pworker, 0); for (lr = ap_listeners, i = num_listening_sockets; i--; lr = lr->next) { apr_pollfd_t pfd = {0}; pfd.desc_type = APR_POLL_SOCKET; pfd.desc.s = lr->sd; pfd.reqevents = APR_POLLIN; pfd.client_data = lr; apr_pollset_add(pollset, &pfd); } { apr_pollfd_t pfd = {0}; pfd.desc_type = APR_POLL_SOCKET; pfd.desc.s = udp_sock; pfd.reqevents = APR_POLLIN; apr_pollset_add(pollset, &pfd); } bucket_alloc = apr_bucket_alloc_create(pworker); mpm_state = AP_MPMQ_RUNNING; while (!this_worker_should_exit) { conn_rec *current_conn; void *csd; /* (Re)initialize this child to a pre-connection state. */ apr_pool_clear(ptrans); if ((ap_max_requests_per_thread > 0 && requests_this_child++ >= ap_max_requests_per_thread)) clean_child_exit(0, worker_slot); (void) ap_update_child_status(sbh, SERVER_READY, (request_rec *) NULL); apr_thread_mutex_lock(accept_mutex); /* We always (presently) have at least 2 sockets we listen on, so * we don't have the ability for a fast path for a single socket * as some MPM's allow :( */ for (;;) { apr_int32_t numdesc = 0; const apr_pollfd_t *pdesc = NULL; rv = apr_pollset_poll(pollset, -1, &numdesc, &pdesc); if (rv != APR_SUCCESS) { if (APR_STATUS_IS_EINTR(rv)) { if (one_process && shutdown_pending) return; continue; } ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf, "apr_pollset_poll: (listen)"); clean_child_exit(1, worker_slot); } /* We can always use pdesc[0], but sockets at position N * could end up completely starved of attention in a very * busy server. Therefore, we round-robin across the * returned set of descriptors. While it is possible that * the returned set of descriptors might flip around and * continue to starve some sockets, we happen to know the * internal pollset implementation retains ordering * stability of the sockets. Thus, the round-robin should * ensure that a socket will eventually be serviced. */ if (last_poll_idx >= numdesc) last_poll_idx = 0; /* Grab a listener record from the client_data of the poll * descriptor, and advance our saved index to round-robin * the next fetch. * * ### hmm... this descriptor might have POLLERR rather * ### than POLLIN */ lr = pdesc[last_poll_idx++].client_data; /* The only socket we add without client_data is the first, the UDP socket * we listen on for restart signals. If we've therefore gotten a hit on that * listener lr will be NULL here and we know we've been told to die. * Before we jump to the end of the while loop with this_worker_should_exit * set to 1 (causing us to exit normally we hope) we release the accept_mutex * as we want every thread to go through this same routine :) * Bit of a hack, but compared to what I had before... */ if (lr == NULL) { this_worker_should_exit = 1; apr_thread_mutex_unlock(accept_mutex); goto got_a_black_spot; } goto got_fd; } got_fd: /* Run beos_accept to accept the connection and set things up to * allow us to process it. We always release the accept_lock here, * even if we failt o accept as otherwise we'll starve other workers * which would be bad. */ rv = beos_accept(&csd, lr, ptrans); apr_thread_mutex_unlock(accept_mutex); if (rv == APR_EGENERAL) { /* resource shortage or should-not-occur occured */ clean_child_exit(1, worker_slot); } else if (rv != APR_SUCCESS) continue; current_conn = ap_run_create_connection(ptrans, ap_server_conf, csd, worker_slot, sbh, bucket_alloc); if (current_conn) { ap_process_connection(current_conn, csd); ap_lingering_close(current_conn); } if (ap_my_generation != ap_scoreboard_image->global->running_generation) { /* restart? */ /* yeah, this could be non-graceful restart, in which case the * parent will kill us soon enough, but why bother checking? */ this_worker_should_exit = 1; } got_a_black_spot: } apr_pool_destroy(ptrans); apr_pool_destroy(pworker); clean_child_exit(0, worker_slot); } static int make_worker(int slot) { thread_id tid; if (slot + 1 > ap_max_child_assigned) ap_max_child_assigned = slot + 1; (void) ap_update_child_status_from_indexes(0, slot, SERVER_STARTING, (request_rec*)NULL); if (one_process) { set_signals(); ap_scoreboard_image->parent[0].pid = getpid(); ap_scoreboard_image->servers[0][slot].tid = find_thread(NULL); return 0; } tid = spawn_thread(worker_thread, "apache_worker", B_NORMAL_PRIORITY, (void *)slot); if (tid < B_NO_ERROR) { ap_log_error(APLOG_MARK, APLOG_ERR, errno, NULL, "spawn_thread: Unable to start a new thread"); /* In case system resources are maxed out, we don't want * Apache running away with the CPU trying to fork over and * over and over again. */ (void) ap_update_child_status_from_indexes(0, slot, SERVER_DEAD, (request_rec*)NULL); sleep(10); return -1; } resume_thread(tid); ap_scoreboard_image->servers[0][slot].tid = tid; return 0; }
int ap_mpm_run(apr_pool_t *_pconf, apr_pool_t *plog, server_rec *s) { int index; int remaining_children_to_start; apr_status_t rv; ap_log_pid(pconf, ap_pid_fname); first_server_limit = server_limit; if (changed_limit_at_restart) { ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, "WARNING: Attempt to change ServerLimit " "ignored during restart"); changed_limit_at_restart = 0; } /* Initialize cross-process accept lock */ ap_lock_fname = apr_psprintf(_pconf, "%s.%" APR_PID_T_FMT, ap_server_root_relative(_pconf, ap_lock_fname), ap_my_pid); rv = apr_proc_mutex_create(&accept_mutex, ap_lock_fname, ap_accept_lock_mech, _pconf); if (rv != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_EMERG, rv, s, "Couldn't create accept lock"); mpm_state = AP_MPMQ_STOPPING; return 1; } #if APR_USE_SYSVSEM_SERIALIZE if (ap_accept_lock_mech == APR_LOCK_DEFAULT || ap_accept_lock_mech == APR_LOCK_SYSVSEM) { #else if (ap_accept_lock_mech == APR_LOCK_SYSVSEM) { #endif rv = unixd_set_proc_mutex_perms(accept_mutex); if (rv != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_EMERG, rv, s, "Couldn't set permissions on cross-process lock; " "check User and Group directives"); mpm_state = AP_MPMQ_STOPPING; return 1; } } if (!is_graceful) { if (ap_run_pre_mpm(s->process->pool, SB_SHARED) != OK) { mpm_state = AP_MPMQ_STOPPING; return 1; } /* fix the generation number in the global score; we just got a new, * cleared scoreboard */ ap_scoreboard_image->global->running_generation = ap_my_generation; } set_signals(); if (one_process) { AP_MONCONTROL(1); } if (ap_daemons_max_free < ap_daemons_min_free + 1) /* Don't thrash... */ ap_daemons_max_free = ap_daemons_min_free + 1; /* If we're doing a graceful_restart then we're going to see a lot * of children exiting immediately when we get into the main loop * below (because we just sent them AP_SIG_GRACEFUL). This happens pretty * rapidly... and for each one that exits we'll start a new one until * we reach at least daemons_min_free. But we may be permitted to * start more than that, so we'll just keep track of how many we're * supposed to start up without the 1 second penalty between each fork. */ remaining_children_to_start = ap_daemons_to_start; if (remaining_children_to_start > ap_daemons_limit) { remaining_children_to_start = ap_daemons_limit; } if (!is_graceful) { startup_children(remaining_children_to_start); remaining_children_to_start = 0; } else { /* give the system some time to recover before kicking into * exponential mode */ hold_off_on_exponential_spawning = 10; } ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, ap_server_conf, "%s configured -- resuming normal operations", ap_get_server_version()); ap_log_error(APLOG_MARK, APLOG_INFO, 0, ap_server_conf, "Server built: %s", ap_get_server_built()); #ifdef AP_MPM_WANT_SET_ACCEPT_LOCK_MECH ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, "AcceptMutex: %s (default: %s)", apr_proc_mutex_name(accept_mutex), apr_proc_mutex_defname()); #endif restart_pending = shutdown_pending = 0; mpm_state = AP_MPMQ_RUNNING; while (!restart_pending && !shutdown_pending) { int child_slot; apr_exit_why_e exitwhy; int status, processed_status; /* this is a memory leak, but I'll fix it later. */ apr_proc_t pid; ap_wait_or_timeout(&exitwhy, &status, &pid, pconf); /* XXX: if it takes longer than 1 second for all our children * to start up and get into IDLE state then we may spawn an * extra child */ if (pid.pid != -1) { processed_status = ap_process_child_status(&pid, exitwhy, status); if (processed_status == APEXIT_CHILDFATAL) { mpm_state = AP_MPMQ_STOPPING; return 1; } /* non-fatal death... note that it's gone in the scoreboard. */ child_slot = find_child_by_pid(&pid); if (child_slot >= 0) { (void) ap_update_child_status_from_indexes(child_slot, 0, SERVER_DEAD, (request_rec *) NULL); if (processed_status == APEXIT_CHILDSICK) { /* child detected a resource shortage (E[NM]FILE, ENOBUFS, etc) * cut the fork rate to the minimum */ idle_spawn_rate = 1; } else if (remaining_children_to_start && child_slot < ap_daemons_limit) { /* we're still doing a 1-for-1 replacement of dead * children with new children */ make_child(ap_server_conf, child_slot); --remaining_children_to_start; } #if APR_HAS_OTHER_CHILD } else if (apr_proc_other_child_read(&pid, status) == 0) { /* handled */ #endif } else if (is_graceful) { /* Great, we've probably just lost a slot in the * scoreboard. Somehow we don't know about this * child. */ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ap_server_conf, "long lost child came home! (pid %ld)", (long)pid.pid); } /* Don't perform idle maintenance when a child dies, * only do it when there's a timeout. Remember only a * finite number of children can die, and it's pretty * pathological for a lot to die suddenly. */ continue; } else if (remaining_children_to_start) { /* we hit a 1 second timeout in which none of the previous * generation of children needed to be reaped... so assume * they're all done, and pick up the slack if any is left. */ startup_children(remaining_children_to_start); remaining_children_to_start = 0; /* In any event we really shouldn't do the code below because * few of the servers we just started are in the IDLE state * yet, so we'd mistakenly create an extra server. */ continue; } perform_idle_server_maintenance(pconf); #ifdef TPF shutdown_pending = os_check_server(tpf_server_name); ap_check_signals(); sleep(1); #endif /*TPF */ } mpm_state = AP_MPMQ_STOPPING; if (shutdown_pending) { /* Time to gracefully shut down: * Kill child processes, tell them to call child_exit, etc... */ if (unixd_killpg(getpgrp(), SIGTERM) < 0) { ap_log_error(APLOG_MARK, APLOG_WARNING, errno, ap_server_conf, "killpg SIGTERM"); } ap_reclaim_child_processes(1); /* Start with SIGTERM */ /* cleanup pid file on normal shutdown */ { const char *pidfile = NULL; pidfile = ap_server_root_relative (pconf, ap_pid_fname); if ( pidfile != NULL && unlink(pidfile) == 0) ap_log_error(APLOG_MARK, APLOG_INFO, 0, ap_server_conf, "removed PID file %s (pid=%ld)", pidfile, (long)getpid()); } ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, ap_server_conf, "caught SIGTERM, shutting down"); return 1; } /* we've been told to restart */ apr_signal(SIGHUP, SIG_IGN); if (one_process) { /* not worth thinking about */ return 1; } /* advance to the next generation */ /* XXX: we really need to make sure this new generation number isn't in * use by any of the children. */ ++ap_my_generation; ap_scoreboard_image->global->running_generation = ap_my_generation; if (is_graceful) { ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, ap_server_conf, "Graceful restart requested, doing restart"); /* kill off the idle ones */ ap_mpm_pod_killpg(pod, ap_max_daemons_limit); /* This is mostly for debugging... so that we know what is still * gracefully dealing with existing request. This will break * in a very nasty way if we ever have the scoreboard totally * file-based (no shared memory) */ for (index = 0; index < ap_daemons_limit; ++index) { if (ap_scoreboard_image->servers[index][0].status != SERVER_DEAD) { ap_scoreboard_image->servers[index][0].status = SERVER_GRACEFUL; } } } else { /* Kill 'em off */ if (unixd_killpg(getpgrp(), SIGHUP) < 0) { ap_log_error(APLOG_MARK, APLOG_WARNING, errno, ap_server_conf, "killpg SIGHUP"); } ap_reclaim_child_processes(0); /* Not when just starting up */ ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, ap_server_conf, "SIGHUP received. Attempting to restart"); } return 0; } /* This really should be a post_config hook, but the error log is already * redirected by that point, so we need to do this in the open_logs phase. */ static int prefork_open_logs(apr_pool_t *p, apr_pool_t *plog, apr_pool_t *ptemp, server_rec *s) { apr_status_t rv; pconf = p; ap_server_conf = s; if ((num_listensocks = ap_setup_listeners(ap_server_conf)) < 1) { ap_log_error(APLOG_MARK, APLOG_ALERT|APLOG_STARTUP, 0, NULL, "no listening sockets available, shutting down"); return DONE; } if ((rv = ap_mpm_pod_open(pconf, &pod))) { ap_log_error(APLOG_MARK, APLOG_CRIT|APLOG_STARTUP, rv, NULL, "Could not open pipe-of-death."); return DONE; } return OK; }
static int make_child(server_rec *s, int slot) { int pid; if (slot + 1 > ap_max_daemons_limit) { ap_max_daemons_limit = slot + 1; } if (one_process) { apr_signal(SIGHUP, just_die); /* Don't catch AP_SIG_GRACEFUL in ONE_PROCESS mode :) */ apr_signal(SIGINT, just_die); #ifdef SIGQUIT apr_signal(SIGQUIT, SIG_DFL); #endif apr_signal(SIGTERM, just_die); child_main(slot); } (void) ap_update_child_status_from_indexes(slot, 0, SERVER_STARTING, (request_rec *) NULL); #ifdef _OSD_POSIX /* BS2000 requires a "special" version of fork() before a setuid() call */ if ((pid = os_fork(unixd_config.user_name)) == -1) { #elif defined(TPF) if ((pid = os_fork(s, slot)) == -1) { #else if ((pid = fork()) == -1) { #endif ap_log_error(APLOG_MARK, APLOG_ERR, errno, s, "fork: Unable to fork new process"); /* fork didn't succeed. Fix the scoreboard or else * it will say SERVER_STARTING forever and ever */ (void) ap_update_child_status_from_indexes(slot, 0, SERVER_DEAD, (request_rec *) NULL); /* In case system resources are maxxed out, we don't want Apache running away with the CPU trying to fork over and over and over again. */ sleep(10); return -1; } if (!pid) { #ifdef HAVE_BINDPROCESSOR /* by default AIX binds to a single processor * this bit unbinds children which will then bind to another cpu */ int status = bindprocessor(BINDPROCESS, (int)getpid(), PROCESSOR_CLASS_ANY); if (status != OK) { ap_log_error(APLOG_MARK, APLOG_WARNING, errno, ap_server_conf, "processor unbind failed %d", status); } #endif RAISE_SIGSTOP(MAKE_CHILD); AP_MONCONTROL(1); /* Disable the parent's signal handlers and set up proper handling in * the child. */ apr_signal(SIGHUP, just_die); apr_signal(SIGTERM, just_die); /* The child process doesn't do anything for AP_SIG_GRACEFUL. * Instead, the pod is used for signalling graceful restart. */ apr_signal(AP_SIG_GRACEFUL, SIG_IGN); child_main(slot); } ap_scoreboard_image->parent[slot].pid = pid; return 0; } /* start up a bunch of children */ static void startup_children(int number_to_start) { int i; for (i = 0; number_to_start && i < ap_daemons_limit; ++i) { if (ap_scoreboard_image->servers[i][0].status != SERVER_DEAD) { continue; } if (make_child(ap_server_conf, i) < 0) { break; } --number_to_start; } } /* * idle_spawn_rate is the number of children that will be spawned on the * next maintenance cycle if there aren't enough idle servers. It is * doubled up to MAX_SPAWN_RATE, and reset only when a cycle goes by * without the need to spawn. */ static int idle_spawn_rate = 1; #ifndef MAX_SPAWN_RATE #define MAX_SPAWN_RATE (32) #endif static int hold_off_on_exponential_spawning; static void perform_idle_server_maintenance(apr_pool_t *p) { int i; int to_kill; int idle_count; worker_score *ws; int free_length; int free_slots[MAX_SPAWN_RATE]; int last_non_dead; int total_non_dead; /* initialize the free_list */ free_length = 0; to_kill = -1; idle_count = 0; last_non_dead = -1; total_non_dead = 0; for (i = 0; i < ap_daemons_limit; ++i) { int status; if (i >= ap_max_daemons_limit && free_length == idle_spawn_rate) break; ws = &ap_scoreboard_image->servers[i][0]; status = ws->status; if (status == SERVER_DEAD) { /* try to keep children numbers as low as possible */ if (free_length < idle_spawn_rate) { free_slots[free_length] = i; ++free_length; } } else { /* We consider a starting server as idle because we started it * at least a cycle ago, and if it still hasn't finished starting * then we're just going to swamp things worse by forking more. * So we hopefully won't need to fork more if we count it. * This depends on the ordering of SERVER_READY and SERVER_STARTING. */ if (status <= SERVER_READY) { ++ idle_count; /* always kill the highest numbered child if we have to... * no really well thought out reason ... other than observing * the server behaviour under linux where lower numbered children * tend to service more hits (and hence are more likely to have * their data in cpu caches). */ to_kill = i; } ++total_non_dead; last_non_dead = i; } } ap_max_daemons_limit = last_non_dead + 1; if (idle_count > ap_daemons_max_free) { /* kill off one child... we use the pod because that'll cause it to * shut down gracefully, in case it happened to pick up a request * while we were counting */ ap_mpm_pod_signal(pod); idle_spawn_rate = 1; } else if (idle_count < ap_daemons_min_free) { /* terminate the free list */ if (free_length == 0) { /* only report this condition once */ static int reported = 0; if (!reported) { ap_log_error(APLOG_MARK, APLOG_ERR, 0, ap_server_conf, "server reached MaxClients setting, consider" " raising the MaxClients setting"); reported = 1; } idle_spawn_rate = 1; } else { if (idle_spawn_rate >= 8) { ap_log_error(APLOG_MARK, APLOG_INFO, 0, ap_server_conf, "server seems busy, (you may need " "to increase StartServers, or Min/MaxSpareServers), " "spawning %d children, there are %d idle, and " "%d total children", idle_spawn_rate, idle_count, total_non_dead); } for (i = 0; i < free_length; ++i) { #ifdef TPF if (make_child(ap_server_conf, free_slots[i]) == -1) { if(free_length == 1) { shutdown_pending = 1; ap_log_error(APLOG_MARK, APLOG_EMERG, 0, ap_server_conf, "No active child processes: shutting down"); } } #else make_child(ap_server_conf, free_slots[i]); #endif /* TPF */ } /* the next time around we want to spawn twice as many if this * wasn't good enough, but not if we've just done a graceful */ if (hold_off_on_exponential_spawning) { --hold_off_on_exponential_spawning; } else if (idle_spawn_rate < MAX_SPAWN_RATE) { idle_spawn_rate *= 2; } } } else { idle_spawn_rate = 1; } }
void child_main(apr_pool_t *pconf) { apr_status_t status; apr_hash_t *ht; ap_listen_rec *lr; HANDLE child_events[2]; HANDLE *child_handles; int listener_started = 0; int threads_created = 0; int watch_thread; int time_remains; int cld; unsigned tid; int rv; int i; apr_pool_create(&pchild, pconf); apr_pool_tag(pchild, "pchild"); ap_run_child_init(pchild, ap_server_conf); ht = apr_hash_make(pchild); /* Initialize the child_events */ max_requests_per_child_event = CreateEvent(NULL, TRUE, FALSE, NULL); if (!max_requests_per_child_event) { ap_log_error(APLOG_MARK, APLOG_CRIT, apr_get_os_error(), ap_server_conf, "Child %lu: Failed to create a max_requests event.", my_pid); exit(APEXIT_CHILDINIT); } child_events[0] = exit_event; child_events[1] = max_requests_per_child_event; allowed_globals.jobsemaphore = CreateSemaphore(NULL, 0, 1000000, NULL); apr_thread_mutex_create(&allowed_globals.jobmutex, APR_THREAD_MUTEX_DEFAULT, pchild); /* * Wait until we have permission to start accepting connections. * start_mutex is used to ensure that only one child ever * goes into the listen/accept loop at once. */ status = apr_proc_mutex_lock(start_mutex); if (status != APR_SUCCESS) { ap_log_error(APLOG_MARK,APLOG_ERR, status, ap_server_conf, "Child %lu: Failed to acquire the start_mutex. Process will exit.", my_pid); exit(APEXIT_CHILDINIT); } ap_log_error(APLOG_MARK,APLOG_NOTICE, APR_SUCCESS, ap_server_conf, "Child %lu: Acquired the start mutex.", my_pid); /* * Create the worker thread dispatch IOCompletionPort * on Windows NT/2000 */ if (use_acceptex) { /* Create the worker thread dispatch IOCP */ ThreadDispatchIOCP = CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, 0, 0); /* CONCURRENT ACTIVE THREADS */ apr_thread_mutex_create(&qlock, APR_THREAD_MUTEX_DEFAULT, pchild); qwait_event = CreateEvent(NULL, TRUE, FALSE, NULL); if (!qwait_event) { ap_log_error(APLOG_MARK, APLOG_CRIT, apr_get_os_error(), ap_server_conf, "Child %lu: Failed to create a qwait event.", my_pid); exit(APEXIT_CHILDINIT); } } /* * Create the pool of worker threads */ ap_log_error(APLOG_MARK,APLOG_NOTICE, APR_SUCCESS, ap_server_conf, "Child %lu: Starting %d worker threads.", my_pid, ap_threads_per_child); child_handles = (HANDLE) apr_pcalloc(pchild, ap_threads_per_child * sizeof(HANDLE)); apr_thread_mutex_create(&child_lock, APR_THREAD_MUTEX_DEFAULT, pchild); while (1) { for (i = 0; i < ap_threads_per_child; i++) { int *score_idx; int status = ap_scoreboard_image->servers[0][i].status; if (status != SERVER_GRACEFUL && status != SERVER_DEAD) { continue; } ap_update_child_status_from_indexes(0, i, SERVER_STARTING, NULL); child_handles[i] = (HANDLE) _beginthreadex(NULL, (unsigned)ap_thread_stacksize, worker_main, (void *) i, 0, &tid); if (child_handles[i] == 0) { ap_log_error(APLOG_MARK, APLOG_CRIT, apr_get_os_error(), ap_server_conf, "Child %lu: _beginthreadex failed. Unable to create all worker threads. " "Created %d of the %d threads requested with the ThreadsPerChild configuration directive.", my_pid, threads_created, ap_threads_per_child); ap_signal_parent(SIGNAL_PARENT_SHUTDOWN); goto shutdown; } threads_created++; /* Save the score board index in ht keyed to the thread handle. We need this * when cleaning up threads down below... */ apr_thread_mutex_lock(child_lock); score_idx = apr_pcalloc(pchild, sizeof(int)); *score_idx = i; apr_hash_set(ht, &child_handles[i], sizeof(HANDLE), score_idx); apr_thread_mutex_unlock(child_lock); } /* Start the listener only when workers are available */ if (!listener_started && threads_created) { create_listener_thread(); listener_started = 1; winnt_mpm_state = AP_MPMQ_RUNNING; } if (threads_created == ap_threads_per_child) { break; } /* Check to see if the child has been told to exit */ if (WaitForSingleObject(exit_event, 0) != WAIT_TIMEOUT) { break; } /* wait for previous generation to clean up an entry in the scoreboard */ apr_sleep(1 * APR_USEC_PER_SEC); } /* Wait for one of three events: * exit_event: * The exit_event is signaled by the parent process to notify * the child that it is time to exit. * * max_requests_per_child_event: * This event is signaled by the worker threads to indicate that * the process has handled MaxRequestsPerChild connections. * * TIMEOUT: * To do periodic maintenance on the server (check for thread exits, * number of completion contexts, etc.) * * XXX: thread exits *aren't* being checked. * * XXX: other_child - we need the process handles to the other children * in order to map them to apr_proc_other_child_read (which is not * named well, it's more like a_p_o_c_died.) * * XXX: however - if we get a_p_o_c handle inheritance working, and * the parent process creates other children and passes the pipes * to our worker processes, then we have no business doing such * things in the child_main loop, but should happen in master_main. */ while (1) { #if !APR_HAS_OTHER_CHILD rv = WaitForMultipleObjects(2, (HANDLE *) child_events, FALSE, INFINITE); cld = rv - WAIT_OBJECT_0; #else rv = WaitForMultipleObjects(2, (HANDLE *) child_events, FALSE, 1000); cld = rv - WAIT_OBJECT_0; if (rv == WAIT_TIMEOUT) { apr_proc_other_child_refresh_all(APR_OC_REASON_RUNNING); } else #endif if (rv == WAIT_FAILED) { /* Something serious is wrong */ ap_log_error(APLOG_MARK, APLOG_CRIT, apr_get_os_error(), ap_server_conf, "Child %lu: WAIT_FAILED -- shutting down server", my_pid); break; } else if (cld == 0) { /* Exit event was signaled */ ap_log_error(APLOG_MARK, APLOG_NOTICE, APR_SUCCESS, ap_server_conf, "Child %lu: Exit event signaled. Child process is ending.", my_pid); break; } else { /* MaxRequestsPerChild event set by the worker threads. * Signal the parent to restart */ ap_log_error(APLOG_MARK, APLOG_NOTICE, APR_SUCCESS, ap_server_conf, "Child %lu: Process exiting because it reached " "MaxRequestsPerChild. Signaling the parent to " "restart a new child process.", my_pid); ap_signal_parent(SIGNAL_PARENT_RESTART); break; } } /* * Time to shutdown the child process */ shutdown: winnt_mpm_state = AP_MPMQ_STOPPING; /* Setting is_graceful will cause threads handling keep-alive connections * to close the connection after handling the current request. */ is_graceful = 1; /* Close the listening sockets. Note, we must close the listeners * before closing any accept sockets pending in AcceptEx to prevent * memory leaks in the kernel. */ for (lr = ap_listeners; lr ; lr = lr->next) { apr_socket_close(lr->sd); } /* Shutdown listener threads and pending AcceptEx socksts * but allow the worker threads to continue consuming from * the queue of accepted connections. */ shutdown_in_progress = 1; Sleep(1000); /* Tell the worker threads to exit */ workers_may_exit = 1; /* Release the start_mutex to let the new process (in the restart * scenario) a chance to begin accepting and servicing requests */ rv = apr_proc_mutex_unlock(start_mutex); if (rv == APR_SUCCESS) { ap_log_error(APLOG_MARK,APLOG_NOTICE, rv, ap_server_conf, "Child %lu: Released the start mutex", my_pid); } else { ap_log_error(APLOG_MARK,APLOG_ERR, rv, ap_server_conf, "Child %lu: Failure releasing the start mutex", my_pid); } /* Shutdown the worker threads */ if (!use_acceptex) { for (i = 0; i < threads_created; i++) { add_job(INVALID_SOCKET); } } else { /* Windows NT/2000 */ /* Post worker threads blocked on the ThreadDispatch IOCompletion port */ while (g_blocked_threads > 0) { ap_log_error(APLOG_MARK,APLOG_INFO, APR_SUCCESS, ap_server_conf, "Child %lu: %d threads blocked on the completion port", my_pid, g_blocked_threads); for (i=g_blocked_threads; i > 0; i--) { PostQueuedCompletionStatus(ThreadDispatchIOCP, 0, IOCP_SHUTDOWN, NULL); } Sleep(1000); } /* Empty the accept queue of completion contexts */ apr_thread_mutex_lock(qlock); while (qhead) { CloseHandle(qhead->Overlapped.hEvent); closesocket(qhead->accept_socket); qhead = qhead->next; } apr_thread_mutex_unlock(qlock); } /* Give busy threads a chance to service their connections, * (no more than the global server timeout period which * we track in msec remaining). */ watch_thread = 0; time_remains = (int)(ap_server_conf->timeout / APR_TIME_C(1000)); while (threads_created) { int nFailsafe = MAXIMUM_WAIT_OBJECTS; DWORD dwRet; /* Every time we roll over to wait on the first group * of MAXIMUM_WAIT_OBJECTS threads, take a breather, * and infrequently update the error log. */ if (watch_thread >= threads_created) { if ((time_remains -= 100) < 0) break; /* Every 30 seconds give an update */ if ((time_remains % 30000) == 0) { ap_log_error(APLOG_MARK, APLOG_NOTICE, APR_SUCCESS, ap_server_conf, "Child %lu: Waiting %d more seconds " "for %d worker threads to finish.", my_pid, time_remains / 1000, threads_created); } /* We'll poll from the top, 10 times per second */ Sleep(100); watch_thread = 0; } /* Fairness, on each iteration we will pick up with the thread * after the one we just removed, even if it's a single thread. * We don't block here. */ dwRet = WaitForMultipleObjects(min(threads_created - watch_thread, MAXIMUM_WAIT_OBJECTS), child_handles + watch_thread, 0, 0); if (dwRet == WAIT_FAILED) { break; } if (dwRet == WAIT_TIMEOUT) { /* none ready */ watch_thread += MAXIMUM_WAIT_OBJECTS; continue; } else if (dwRet >= WAIT_ABANDONED_0) { /* We just got the ownership of the object, which * should happen at most MAXIMUM_WAIT_OBJECTS times. * It does NOT mean that the object is signaled. */ if ((nFailsafe--) < 1) break; } else { watch_thread += (dwRet - WAIT_OBJECT_0); if (watch_thread >= threads_created) break; cleanup_thread(child_handles, &threads_created, watch_thread); } } /* Kill remaining threads off the hard way */ if (threads_created) { ap_log_error(APLOG_MARK,APLOG_NOTICE, APR_SUCCESS, ap_server_conf, "Child %lu: Terminating %d threads that failed to exit.", my_pid, threads_created); } for (i = 0; i < threads_created; i++) { int *score_idx; TerminateThread(child_handles[i], 1); CloseHandle(child_handles[i]); /* Reset the scoreboard entry for the thread we just whacked */ score_idx = apr_hash_get(ht, &child_handles[i], sizeof(HANDLE)); if (score_idx) { ap_update_child_status_from_indexes(0, *score_idx, SERVER_DEAD, NULL); } } ap_log_error(APLOG_MARK,APLOG_NOTICE, APR_SUCCESS, ap_server_conf, "Child %lu: All worker threads have exited.", my_pid); CloseHandle(allowed_globals.jobsemaphore); apr_thread_mutex_destroy(allowed_globals.jobmutex); apr_thread_mutex_destroy(child_lock); if (use_acceptex) { apr_thread_mutex_destroy(qlock); CloseHandle(qwait_event); } apr_pool_destroy(pchild); CloseHandle(exit_event); }
static void perform_idle_server_maintenance(apr_pool_t *p) { int i; int idle_count; worker_score *ws; int free_length; int free_slots[MAX_SPAWN_RATE]; int last_non_dead; int total_non_dead; /* initialize the free_list */ free_length = 0; idle_count = 0; last_non_dead = -1; total_non_dead = 0; for (i = 0; i < ap_threads_limit; ++i) { int status; if (i >= ap_max_workers_limit && free_length == idle_spawn_rate) break; ws = &ap_scoreboard_image->servers[0][i]; status = ws->status; if (status == WORKER_DEAD) { /* try to keep children numbers as low as possible */ if (free_length < idle_spawn_rate) { free_slots[free_length] = i; ++free_length; } } else if (status == WORKER_IDLE_KILL) { /* If it is already marked to die, skip it */ continue; } else { /* We consider a starting server as idle because we started it * at least a cycle ago, and if it still hasn't finished starting * then we're just going to swamp things worse by forking more. * So we hopefully won't need to fork more if we count it. * This depends on the ordering of SERVER_READY and SERVER_STARTING. */ if (status <= WORKER_READY) { ++ idle_count; } ++total_non_dead; last_non_dead = i; } } DBPRINT2("Total: %d Idle Count: %d \r", total_non_dead, idle_count); ap_max_workers_limit = last_non_dead + 1; if (idle_count > ap_threads_max_free) { /* kill off one child... we use the pod because that'll cause it to * shut down gracefully, in case it happened to pick up a request * while we were counting */ idle_spawn_rate = 1; ap_update_child_status_from_indexes(0, last_non_dead, WORKER_IDLE_KILL, (request_rec *) NULL); DBPRINT1("\nKilling idle thread: %d\n", last_non_dead); } else if (idle_count < ap_threads_min_free) { /* terminate the free list */ if (free_length == 0) { /* only report this condition once */ static int reported = 0; if (!reported) { ap_log_error(APLOG_MARK, APLOG_ERR, 0, ap_server_conf, APLOGNO(00220) "server reached MaxRequestWorkers setting, consider" " raising the MaxRequestWorkers setting"); reported = 1; } idle_spawn_rate = 1; } else { if (idle_spawn_rate >= 8) { ap_log_error(APLOG_MARK, APLOG_INFO, 0, ap_server_conf, APLOGNO(00221) "server seems busy, (you may need " "to increase StartServers, or Min/MaxSpareServers), " "spawning %d children, there are %d idle, and " "%d total children", idle_spawn_rate, idle_count, total_non_dead); } DBPRINT0("\n"); for (i = 0; i < free_length; ++i) { DBPRINT1("Spawning additional thread slot: %d\n", free_slots[i]); make_child(ap_server_conf, free_slots[i]); } /* the next time around we want to spawn twice as many if this * wasn't good enough, but not if we've just done a graceful */ if (hold_off_on_exponential_spawning) { --hold_off_on_exponential_spawning; } else if (idle_spawn_rate < MAX_SPAWN_RATE) { idle_spawn_rate *= 2; } } } else { idle_spawn_rate = 1; } }
static int32 worker_thread(void * dummy) { proc_info * ti = dummy; int child_slot = ti->slot; apr_pool_t *tpool = ti->tpool; apr_allocator_t *allocator; apr_socket_t *csd = NULL; apr_pool_t *ptrans; /* Pool for per-transaction stuff */ apr_bucket_alloc_t *bucket_alloc; apr_socket_t *sd = NULL; apr_status_t rv = APR_EINIT; int srv , n; int curr_pollfd = 0, last_pollfd = 0; sigset_t sig_mask; int requests_this_child = ap_max_requests_per_thread; apr_pollfd_t *pollset; /* each worker thread is in control of its own destiny...*/ int this_worker_should_exit = 0; free(ti); mpm_state = AP_MPMQ_STARTING; on_exit_thread(check_restart, (void*)child_slot); /* block the signals for this thread */ sigfillset(&sig_mask); sigprocmask(SIG_BLOCK, &sig_mask, NULL); apr_allocator_create(&allocator); apr_allocator_max_free_set(allocator, ap_max_mem_free); apr_pool_create_ex(&ptrans, tpool, NULL, allocator); apr_allocator_owner_set(allocator, ptrans); apr_pool_tag(ptrans, "transaction"); bucket_alloc = apr_bucket_alloc_create_ex(allocator); apr_thread_mutex_lock(worker_thread_count_mutex); worker_thread_count++; apr_thread_mutex_unlock(worker_thread_count_mutex); (void) ap_update_child_status_from_indexes(0, child_slot, SERVER_STARTING, (request_rec*)NULL); apr_poll_setup(&pollset, num_listening_sockets + 1, tpool); for(n=0 ; n <= num_listening_sockets ; n++) apr_poll_socket_add(pollset, listening_sockets[n], APR_POLLIN); mpm_state = AP_MPMQ_RUNNING; while (1) { /* If we're here, then chances are (unless we're the first thread created) * we're going to be held up in the accept mutex, so doing this here * shouldn't hurt performance. */ this_worker_should_exit |= (ap_max_requests_per_thread != 0) && (requests_this_child <= 0); if (this_worker_should_exit) break; (void) ap_update_child_status_from_indexes(0, child_slot, SERVER_READY, (request_rec*)NULL); apr_thread_mutex_lock(accept_mutex); while (!this_worker_should_exit) { apr_int16_t event; apr_status_t ret; ret = apr_poll(pollset, num_listening_sockets + 1, &srv, -1); if (ret != APR_SUCCESS) { if (APR_STATUS_IS_EINTR(ret)) { continue; } /* poll() will only return errors in catastrophic * circumstances. Let's try exiting gracefully, for now. */ ap_log_error(APLOG_MARK, APLOG_ERR, ret, (const server_rec *) ap_server_conf, "apr_poll: (listen)"); this_worker_should_exit = 1; } else { /* if we've bailed in apr_poll what's the point of trying to use the data? */ apr_poll_revents_get(&event, listening_sockets[0], pollset); if (event & APR_POLLIN){ apr_sockaddr_t *rec_sa; apr_size_t len = 5; char *tmpbuf = apr_palloc(ptrans, sizeof(char) * 5); apr_sockaddr_info_get(&rec_sa, "127.0.0.1", APR_UNSPEC, 7772, 0, ptrans); if ((ret = apr_recvfrom(rec_sa, listening_sockets[0], 0, tmpbuf, &len)) != APR_SUCCESS){ ap_log_error(APLOG_MARK, APLOG_ERR, ret, NULL, "error getting data from UDP!!"); }else { /* add checking??? */ } this_worker_should_exit = 1; } } if (this_worker_should_exit) break; if (num_listening_sockets == 1) { sd = ap_listeners->sd; goto got_fd; } else { /* find a listener */ curr_pollfd = last_pollfd; do { curr_pollfd++; if (curr_pollfd > num_listening_sockets) curr_pollfd = 1; /* Get the revent... */ apr_poll_revents_get(&event, listening_sockets[curr_pollfd], pollset); if (event & APR_POLLIN) { last_pollfd = curr_pollfd; sd = listening_sockets[curr_pollfd]; goto got_fd; } } while (curr_pollfd != last_pollfd); } } got_fd: if (!this_worker_should_exit) { rv = apr_accept(&csd, sd, ptrans); apr_thread_mutex_unlock(accept_mutex); if (rv != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf, "apr_accept"); } else { process_socket(ptrans, csd, child_slot, bucket_alloc); requests_this_child--; } } else { apr_thread_mutex_unlock(accept_mutex); break; } apr_pool_clear(ptrans); } ap_update_child_status_from_indexes(0, child_slot, SERVER_DEAD, (request_rec*)NULL); apr_bucket_alloc_destroy(bucket_alloc); ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, NULL, "worker_thread %ld exiting", find_thread(NULL)); apr_thread_mutex_lock(worker_thread_count_mutex); worker_thread_count--; apr_thread_mutex_unlock(worker_thread_count_mutex); return (0); }
static void worker_main(void *vpArg) { long conn_id; conn_rec *current_conn; apr_pool_t *pconn; apr_allocator_t *allocator; apr_bucket_alloc_t *bucket_alloc; worker_args_t *worker_args; HQUEUE workq; PID owner; int rc; REQUESTDATA rd; ULONG len; BYTE priority; int thread_slot = (int)vpArg; EXCEPTIONREGISTRATIONRECORD reg_rec = { NULL, thread_exception_handler }; ap_sb_handle_t *sbh; /* Trap exceptions in this thread so we don't take down the whole process */ DosSetExceptionHandler( ®_rec ); rc = DosOpenQueue(&owner, &workq, apr_psprintf(pchild, "/queues/httpd/work.%d", getpid())); if (rc) { ap_log_error(APLOG_MARK, APLOG_ERR, APR_FROM_OS_ERROR(rc), ap_server_conf, "unable to open work queue, exiting"); ap_scoreboard_image->servers[child_slot][thread_slot].tid = 0; } conn_id = ID_FROM_CHILD_THREAD(child_slot, thread_slot); ap_update_child_status_from_indexes(child_slot, thread_slot, SERVER_READY, NULL); apr_allocator_create(&allocator); apr_allocator_max_free_set(allocator, ap_max_mem_free); bucket_alloc = apr_bucket_alloc_create_ex(allocator); while (rc = DosReadQueue(workq, &rd, &len, (PPVOID)&worker_args, 0, DCWW_WAIT, &priority, NULLHANDLE), rc == 0 && rd.ulData != WORKTYPE_EXIT) { pconn = worker_args->pconn; ap_create_sb_handle(&sbh, pconn, child_slot, thread_slot); current_conn = ap_run_create_connection(pconn, ap_server_conf, worker_args->conn_sd, conn_id, sbh, bucket_alloc); if (current_conn) { ap_process_connection(current_conn, worker_args->conn_sd); ap_lingering_close(current_conn); } apr_pool_destroy(pconn); ap_update_child_status_from_indexes(child_slot, thread_slot, SERVER_READY, NULL); } ap_update_child_status_from_indexes(child_slot, thread_slot, SERVER_DEAD, NULL); apr_bucket_alloc_destroy(bucket_alloc); apr_allocator_destroy(allocator); }
/* * worker_main() * Main entry point for the worker threads. Worker threads block in * win*_get_connection() awaiting a connection to service. */ static DWORD __stdcall worker_main(void *thread_num_val) { apr_thread_t *thd; apr_os_thread_t osthd; static int requests_this_child = 0; winnt_conn_ctx_t *context = NULL; int thread_num = (int)thread_num_val; ap_sb_handle_t *sbh; apr_bucket *e; int rc; conn_rec *c; apr_int32_t disconnected; osthd = apr_os_thread_current(); while (1) { ap_update_child_status_from_indexes(0, thread_num, SERVER_READY, NULL); /* Grab a connection off the network */ context = winnt_get_connection(context); if (!context) { /* Time for the thread to exit */ break; } /* Have we hit MaxConnectionsPerChild connections? */ if (ap_max_requests_per_child) { requests_this_child++; if (requests_this_child > ap_max_requests_per_child) { SetEvent(max_requests_per_child_event); } } e = context->overlapped.Pointer; ap_create_sb_handle(&sbh, context->ptrans, 0, thread_num); c = ap_run_create_connection(context->ptrans, ap_server_conf, context->sock, thread_num, sbh, context->ba); if (!c) { /* ap_run_create_connection closes the socket on failure */ context->accept_socket = INVALID_SOCKET; if (e) apr_bucket_free(e); continue; } thd = NULL; apr_os_thread_put(&thd, &osthd, context->ptrans); c->current_thread = thd; /* follow ap_process_connection(c, context->sock) logic * as it left us no chance to reinject our first data bucket. */ ap_update_vhost_given_ip(c); rc = ap_run_pre_connection(c, context->sock); if (rc != OK && rc != DONE) { c->aborted = 1; } if (e && c->aborted) { apr_bucket_free(e); } else { ap_set_module_config(c->conn_config, &mpm_winnt_module, context); } if (!c->aborted) { ap_run_process_connection(c); apr_socket_opt_get(context->sock, APR_SO_DISCONNECTED, &disconnected); if (!disconnected) { context->accept_socket = INVALID_SOCKET; ap_lingering_close(c); } } } ap_update_child_status_from_indexes(0, thread_num, SERVER_DEAD, (request_rec *) NULL); return 0; }