void accept_mutex_off() { int err = 0; /* Have to block alarms here, or else we might have a double-unlock, which * is possible with pthread mutexes, since they are designed to be fast, * and hence not necessarily make checks for ownership or multiple unlocks. */ ap_block_alarms(); if ((err = pthread_mutex_unlock(accept_mutex))) { errno = err; perror("pthread_mutex_unlock"); clean_child_exit(APEXIT_CHILDFATAL); } have_accept_mutex = 0; ap_unblock_alarms(); if (sigprocmask(SIG_SETMASK, &accept_previous_mask, NULL)) { perror("sigprocmask(SIG_SETMASK)"); clean_child_exit(1); } }
void accept_mutex_on() { int err = 0; if (sigprocmask(SIG_BLOCK, &accept_block_mask, &accept_previous_mask)) { perror("sigprocmask(SIG_BLOCK)"); clean_child_exit(APEXIT_CHILDFATAL); } /* We need to block alarms here, since if we get killed *right* after * locking the mutex, have_accept_mutex will not be set, and our * child cleanup will not work. */ ap_block_alarms(); if ((err = pthread_mutex_lock(accept_mutex))) { errno = err; perror("pthread_mutex_lock"); clean_child_exit(APEXIT_CHILDFATAL); } have_accept_mutex = 1; ap_unblock_alarms(); }
static int make_worker(int slot) { thread_id tid; proc_info *my_info = (proc_info *)malloc(sizeof(proc_info)); /* freed by thread... */ if (my_info == NULL) { ap_log_error(APLOG_MARK, APLOG_ALERT, errno, ap_server_conf, "malloc: out of memory"); clean_child_exit(APEXIT_CHILDFATAL); } my_info->slot = slot; apr_pool_create(&my_info->tpool, pchild); if (slot + 1 > ap_max_child_assigned) ap_max_child_assigned = slot + 1; if (one_process) { set_signals(); ap_scoreboard_image->parent[0].pid = getpid(); return 0; } (void) ap_update_child_status_from_indexes(0, slot, SERVER_STARTING, (request_rec*)NULL); tid = spawn_thread(worker_thread, "apache_worker", B_NORMAL_PRIORITY, my_info); if (tid < B_NO_ERROR) { ap_log_error(APLOG_MARK, APLOG_ERR, errno, NULL, "spawn_thread: Unable to start a new thread"); /* In case system resources are maxxed out, we don't want * Apache running away with the CPU trying to fork over and * over and over again. */ (void) ap_update_child_status_from_indexes(0, slot, SERVER_DEAD, (request_rec*)NULL); sleep(10); free(my_info); return -1; } resume_thread(tid); ap_scoreboard_image->servers[0][slot].tid = tid; return 0; }
static void accept_mutex_on(void) { apr_status_t rv = apr_proc_mutex_lock(accept_mutex); if (rv != APR_SUCCESS) { const char *msg = "couldn't grab the accept mutex"; if (ap_my_generation != ap_scoreboard_image->global->running_generation) { ap_log_error(APLOG_MARK, APLOG_DEBUG, rv, NULL, msg); clean_child_exit(0); } else { ap_log_error(APLOG_MARK, APLOG_EMERG, rv, NULL, msg); exit(APEXIT_CHILDFATAL); } } }
void ap_mpm_child_main(apr_pool_t *pconf) { ap_listen_rec *lr = NULL; int requests_this_child = 0; int rv = 0; unsigned long ulTimes; int my_pid = getpid(); ULONG rc, c; HQUEUE workq; apr_pollset_t *pollset; int num_listeners; TID server_maint_tid; void *sb_mem; /* Stop Ctrl-C/Ctrl-Break signals going to child processes */ DosSetSignalExceptionFocus(0, &ulTimes); set_signals(); /* Create pool for child */ apr_pool_create(&pchild, pconf); ap_run_child_init(pchild, ap_server_conf); /* Create an event semaphore used to trigger other threads to shutdown */ rc = DosCreateEventSem(NULL, &shutdown_event, 0, FALSE); if (rc) { ap_log_error(APLOG_MARK, APLOG_ERR, APR_FROM_OS_ERROR(rc), ap_server_conf, "unable to create shutdown semaphore, exiting"); clean_child_exit(APEXIT_CHILDFATAL); } /* Gain access to the scoreboard. */ rc = DosGetNamedSharedMem(&sb_mem, ap_scoreboard_fname, PAG_READ|PAG_WRITE); if (rc) { ap_log_error(APLOG_MARK, APLOG_ERR, APR_FROM_OS_ERROR(rc), ap_server_conf, "scoreboard not readable in child, exiting"); clean_child_exit(APEXIT_CHILDFATAL); } ap_calc_scoreboard_size(); ap_init_scoreboard(sb_mem); /* Gain access to the accpet mutex */ rc = DosOpenMutexSem(NULL, &ap_mpm_accept_mutex); if (rc) { ap_log_error(APLOG_MARK, APLOG_ERR, APR_FROM_OS_ERROR(rc), ap_server_conf, "accept mutex couldn't be accessed in child, exiting"); clean_child_exit(APEXIT_CHILDFATAL); } /* Find our pid in the scoreboard so we know what slot our parent allocated us */ for (child_slot = 0; ap_scoreboard_image->parent[child_slot].pid != my_pid && child_slot < HARD_SERVER_LIMIT; child_slot++); if (child_slot == HARD_SERVER_LIMIT) { ap_log_error(APLOG_MARK, APLOG_ERR, 0, ap_server_conf, "child pid not found in scoreboard, exiting"); clean_child_exit(APEXIT_CHILDFATAL); } ap_my_generation = ap_scoreboard_image->parent[child_slot].generation; memset(ap_scoreboard_image->servers[child_slot], 0, sizeof(worker_score) * HARD_THREAD_LIMIT); /* Set up an OS/2 queue for passing connections & termination requests * to worker threads */ rc = DosCreateQueue(&workq, QUE_FIFO, apr_psprintf(pchild, "/queues/httpd/work.%d", my_pid)); if (rc) { ap_log_error(APLOG_MARK, APLOG_ERR, APR_FROM_OS_ERROR(rc), ap_server_conf, "unable to create work queue, exiting"); clean_child_exit(APEXIT_CHILDFATAL); } /* Create initial pool of worker threads */ for (c = 0; c < ap_min_spare_threads; c++) { // ap_scoreboard_image->servers[child_slot][c].tid = _beginthread(worker_main, NULL, 128*1024, (void *)c); } /* Start maintenance thread */ server_maint_tid = _beginthread(server_maintenance, NULL, 32768, NULL); /* Set up poll */ for (num_listeners = 0, lr = ap_listeners; lr; lr = lr->next) { num_listeners++; } apr_pollset_create(&pollset, num_listeners, pchild, 0); for (lr = ap_listeners; lr != NULL; lr = lr->next) { apr_pollfd_t pfd = { 0 }; pfd.desc_type = APR_POLL_SOCKET; pfd.desc.s = lr->sd; pfd.reqevents = APR_POLLIN; pfd.client_data = lr; apr_pollset_add(pollset, &pfd); } /* Main connection accept loop */ do { apr_pool_t *pconn; worker_args_t *worker_args; int last_poll_idx = 0; apr_pool_create(&pconn, pchild); worker_args = apr_palloc(pconn, sizeof(worker_args_t)); worker_args->pconn = pconn; if (num_listeners == 1) { rv = apr_socket_accept(&worker_args->conn_sd, ap_listeners->sd, pconn); } else { const apr_pollfd_t *poll_results; apr_int32_t num_poll_results; rc = DosRequestMutexSem(ap_mpm_accept_mutex, SEM_INDEFINITE_WAIT); if (shutdown_pending) { DosReleaseMutexSem(ap_mpm_accept_mutex); break; } rv = APR_FROM_OS_ERROR(rc); if (rv == APR_SUCCESS) { rv = apr_pollset_poll(pollset, -1, &num_poll_results, &poll_results); DosReleaseMutexSem(ap_mpm_accept_mutex); } if (rv == APR_SUCCESS) { if (last_poll_idx >= num_listeners) { last_poll_idx = 0; } lr = poll_results[last_poll_idx++].client_data; rv = apr_socket_accept(&worker_args->conn_sd, lr->sd, pconn); last_poll_idx++; } } if (rv != APR_SUCCESS) { if (!APR_STATUS_IS_EINTR(rv)) { ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf, "apr_socket_accept"); clean_child_exit(APEXIT_CHILDFATAL); } } else { DosWriteQueue(workq, WORKTYPE_CONN, sizeof(worker_args_t), worker_args, 0); requests_this_child++; } if (ap_max_requests_per_child != 0 && requests_this_child >= ap_max_requests_per_child) break; } while (!shutdown_pending && ap_my_generation == ap_scoreboard_image->global->running_generation); ap_scoreboard_image->parent[child_slot].quiescing = 1; DosPostEventSem(shutdown_event); DosWaitThread(&server_maint_tid, DCWW_WAIT); if (is_graceful) { char someleft; /* tell our worker threads to exit */ for (c=0; c<HARD_THREAD_LIMIT; c++) { if (ap_scoreboard_image->servers[child_slot][c].status != SERVER_DEAD) { DosWriteQueue(workq, WORKTYPE_EXIT, 0, NULL, 0); } } do { someleft = 0; for (c=0; c<HARD_THREAD_LIMIT; c++) { if (ap_scoreboard_image->servers[child_slot][c].status != SERVER_DEAD) { someleft = 1; DosSleep(1000); break; } } } while (someleft); } else { DosPurgeQueue(workq); for (c=0; c<HARD_THREAD_LIMIT; c++) { if (ap_scoreboard_image->servers[child_slot][c].status != SERVER_DEAD) { DosKillThread(ap_scoreboard_image->servers[child_slot][c].tid); } } } apr_pool_destroy(pchild); }
/*static */ void worker_main(void *arg) { ap_listen_rec *lr, *first_lr, *last_lr = NULL; apr_pool_t *ptrans; apr_allocator_t *allocator; apr_bucket_alloc_t *bucket_alloc; conn_rec *current_conn; apr_status_t stat = APR_EINIT; ap_sb_handle_t *sbh; apr_thread_t *thd = NULL; apr_os_thread_t osthd; int my_worker_num = (int)arg; apr_socket_t *csd = NULL; int requests_this_child = 0; apr_socket_t *sd = NULL; fd_set main_fds; int sockdes; int srv; struct timeval tv; int wouldblock_retry; osthd = apr_os_thread_current(); apr_os_thread_put(&thd, &osthd, pmain); tv.tv_sec = 1; tv.tv_usec = 0; apr_allocator_create(&allocator); apr_allocator_max_free_set(allocator, ap_max_mem_free); apr_pool_create_ex(&ptrans, pmain, NULL, allocator); apr_allocator_owner_set(allocator, ptrans); apr_pool_tag(ptrans, "transaction"); bucket_alloc = apr_bucket_alloc_create_ex(allocator); atomic_inc (&worker_thread_count); while (!die_now) { /* * (Re)initialize this child to a pre-connection state. */ current_conn = NULL; apr_pool_clear(ptrans); if ((ap_max_requests_per_child > 0 && requests_this_child++ >= ap_max_requests_per_child)) { DBPRINT1 ("\n**Thread slot %d is shutting down", my_worker_num); clean_child_exit(0, my_worker_num, ptrans, bucket_alloc); } ap_update_child_status_from_indexes(0, my_worker_num, WORKER_READY, (request_rec *) NULL); /* * Wait for an acceptable connection to arrive. */ for (;;) { if (shutdown_pending || restart_pending || (ap_scoreboard_image->servers[0][my_worker_num].status == WORKER_IDLE_KILL)) { DBPRINT1 ("\nThread slot %d is shutting down\n", my_worker_num); clean_child_exit(0, my_worker_num, ptrans, bucket_alloc); } /* Check the listen queue on all sockets for requests */ memcpy(&main_fds, &listenfds, sizeof(fd_set)); srv = select(listenmaxfd + 1, &main_fds, NULL, NULL, &tv); if (srv <= 0) { if (srv < 0) { ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, ap_server_conf, APLOGNO(00217) "select() failed on listen socket"); apr_thread_yield(); } continue; } /* remember the last_lr we searched last time around so that we don't end up starving any particular listening socket */ if (last_lr == NULL) { lr = ap_listeners; } else { lr = last_lr->next; if (!lr) lr = ap_listeners; } first_lr = lr; do { apr_os_sock_get(&sockdes, lr->sd); if (FD_ISSET(sockdes, &main_fds)) goto got_listener; lr = lr->next; if (!lr) lr = ap_listeners; } while (lr != first_lr); /* if we get here, something unexpected happened. Go back into the select state and try again. */ continue; got_listener: last_lr = lr; sd = lr->sd; wouldblock_retry = MAX_WB_RETRIES; while (wouldblock_retry) { if ((stat = apr_socket_accept(&csd, sd, ptrans)) == APR_SUCCESS) { break; } else { /* if the error is a wouldblock then maybe we were too quick try to pull the next request from the listen queue. Try a few more times then return to our idle listen state. */ if (!APR_STATUS_IS_EAGAIN(stat)) { break; } if (wouldblock_retry--) { apr_thread_yield(); } } } /* If we got a new socket, set it to non-blocking mode and process it. Otherwise handle the error. */ if (stat == APR_SUCCESS) { apr_socket_opt_set(csd, APR_SO_NONBLOCK, 0); #ifdef DBINFO_ON if (wouldblock_retry < MAX_WB_RETRIES) { retry_success++; avg_retries += (MAX_WB_RETRIES-wouldblock_retry); } #endif break; /* We have a socket ready for reading */ } else { #ifdef DBINFO_ON if (APR_STATUS_IS_EAGAIN(stat)) { would_block++; retry_fail++; } else if ( #else if (APR_STATUS_IS_EAGAIN(stat) || #endif APR_STATUS_IS_ECONNRESET(stat) || APR_STATUS_IS_ETIMEDOUT(stat) || APR_STATUS_IS_EHOSTUNREACH(stat) || APR_STATUS_IS_ENETUNREACH(stat)) { ; } #ifdef USE_WINSOCK else if (APR_STATUS_IS_ENETDOWN(stat)) { /* * When the network layer has been shut down, there * is not much use in simply exiting: the parent * would simply re-create us (and we'd fail again). * Use the CHILDFATAL code to tear the server down. * @@@ Martin's idea for possible improvement: * A different approach would be to define * a new APEXIT_NETDOWN exit code, the reception * of which would make the parent shutdown all * children, then idle-loop until it detected that * the network is up again, and restart the children. * Ben Hyde noted that temporary ENETDOWN situations * occur in mobile IP. */ ap_log_error(APLOG_MARK, APLOG_EMERG, stat, ap_server_conf, APLOGNO(00218) "apr_socket_accept: giving up."); clean_child_exit(APEXIT_CHILDFATAL, my_worker_num, ptrans, bucket_alloc); } #endif else { ap_log_error(APLOG_MARK, APLOG_ERR, stat, ap_server_conf, APLOGNO(00219) "apr_socket_accept: (client socket)"); clean_child_exit(1, my_worker_num, ptrans, bucket_alloc); } } } ap_create_sb_handle(&sbh, ptrans, 0, my_worker_num); /* * We now have a connection, so set it up with the appropriate * socket options, file descriptors, and read/write buffers. */ current_conn = ap_run_create_connection(ptrans, ap_server_conf, csd, my_worker_num, sbh, bucket_alloc); if (current_conn) { current_conn->current_thread = thd; ap_process_connection(current_conn, csd); ap_lingering_close(current_conn); } request_count++; } clean_child_exit(0, my_worker_num, ptrans, bucket_alloc); }
//static void child_main(int child_num_arg) void body() { mpm_state = AP_MPMQ_STARTING; /* for benefit of any hooks that run as this * child initializes */ my_child_num = child_num_arg; ap_my_pid = getpid(); requests_this_child = 0; ap_fatal_signal_child_setup(ap_server_conf); /* Get a sub context for global allocations in this child, so that * we can have cleanups occur when the child exits. */ apr_allocator_create(allocator); //// removed deref apr_allocator_max_free_set(allocator, ap_max_mem_free); apr_pool_create_ex(pchild, pconf, NULL, allocator); //// removed deref apr_allocator_owner_set(allocator, pchild); apr_pool_create(ptrans, pchild); //// removed deref apr_pool_tag(ptrans, 65); // "transaction"); /* needs to be done before we switch UIDs so we have permissions */ ap_reopen_scoreboard(pchild, NULL, 0); status = apr_proc_mutex_child_init(accept_mutex, ap_lock_fname, pchild); //// removed deref if (status != APR_SUCCESS) { /* ap_log_error(APLOG_MARK, APLOG_EMERG, status, ap_server_conf, */ /* "Couldnt initialize crossprocess lock in child " */ /* "%s %d", ap_lock_fname, ap_accept_lock_mech); */ clean_child_exit(APEXIT_CHILDFATAL); } if (unixd_setup_child() > 0) { clean_child_exit(APEXIT_CHILDFATAL); } ap_run_child_init(pchild, ap_server_conf); ap_create_sb_handle(sbh, pchild, my_child_num, 0); //// removed deref ap_update_child_status(sbh, SERVER_READY, NULL); /* Set up the pollfd array */ /* ### check the status */ (void) apr_pollset_create(pollset, num_listensocks, pchild, 0); //// removed deref num_listensocks = nondet(); assume(num_listensocks>0); lr = ap_listeners; i = num_listensocks; while (1) { if ( i<=0 ) break; int pfd = 0; pfd_desc_type = APR_POLL_SOCKET; pfd_desc_s = 1; // lr->sd; pfd_reqevents = APR_POLLIN; pfd_client_data = lr; /* ### check the status */ (void) apr_pollset_add(pollset, pfd); //// removed deref i--; } mpm_state = AP_MPMQ_RUNNING; bucket_alloc = apr_bucket_alloc_create(pchild); while(1>0) { if (die_now>0) break; conn_rec *current_conn; void *csd; /* * (Re)initialize this child to a pre-connection state. */ apr_pool_clear(ptrans); if ((ap_max_requests_per_child > 0 && requests_this_child++ >= ap_max_requests_per_child)) { clean_child_exit(0); } (void) ap_update_child_status(sbh, SERVER_READY, NULL); /* * Wait for an acceptable connection to arrive. */ /* Lock around "accept", if necessary */ SAFE_ACCEPT(accept_mutex_on()); do_ACCEPT=1; do_ACCEPT=0; dummy = nondet(); if(dummy > 0) { /* goto loc_return; */ while(1>0) { int ddd; ddd=ddd; } } if (num_listensocks == 1) { /* There is only one listener record, so refer to that one. */ lr = ap_listeners; } else { /* multiple listening sockets - need to poll */ while(1) { int numdesc; const void *pdesc; /* timeout == -1 == wait forever */ status = apr_pollset_poll(pollset, -1, numdesc, pdesc); //// removed deref if (status != APR_SUCCESS) { if (APR_STATUS_IS_EINTR(status) > 0) { if (one_process>0 && shutdown_pending>0) { /* goto loc_return; */ while(1>0) { int ddd; ddd=ddd; } } goto loc_continueA; } /* Single Unix documents select as returning errnos * EBADF, EINTR, and EINVAL... and in none of those * cases does it make sense to continue. In fact * on Linux 2.0.x we seem to end up with EFAULT * occasionally, and we'd loop forever due to it. */ /* ap_log_error5(APLOG_MARK, APLOG_ERR, status, */ /* ap_server_conf, "apr_pollset_poll: (listen)"); */ clean_child_exit(1); } /* We can always use pdesc[0], but sockets at position N * could end up completely starved of attention in a very * busy server. Therefore, we round-robin across the * returned set of descriptors. While it is possible that * the returned set of descriptors might flip around and * continue to starve some sockets, we happen to know the * internal pollset implementation retains ordering * stability of the sockets. Thus, the round-robin should * ensure that a socket will eventually be serviced. */ if (last_poll_idx >= numdesc) last_poll_idx = 0; /* Grab a listener record from the client_data of the poll * descriptor, and advance our saved index to round-robin * the next fetch. * * ### hmm... this descriptor might have POLLERR rather * ### than POLLIN */ lr = 1; //pdesc[last_poll_idx++].client_data; break; loc_continueA: {int yyy2; yyy2=yyy2; } } } /* if we accept() something we don't want to die, so we have to * defer the exit */ status = nondet(); // lr->accept_func(&csd, lr, ptrans); SAFE_ACCEPT(accept_mutex_off()); /* unlock after "accept" */ if (status == APR_EGENERAL) { /* resource shortage or should-not-occur occured */ clean_child_exit(1); } else if (status != APR_SUCCESS) { goto loc_continueB; } /* * We now have a connection, so set it up with the appropriate * socket options, file descriptors, and read/write buffers. */ current_conn = ap_run_create_connection(ptrans, ap_server_conf, csd, my_child_num, sbh, bucket_alloc); if (current_conn > 0) { ap_process_connection(current_conn, csd); ap_lingering_close(current_conn); } /* Check the pod and the generation number after processing a * connection so that we'll go away if a graceful restart occurred * while we were processing the connection or we are the lucky * idle server process that gets to die. */ dummy = nondet(); if (ap_mpm_pod_check(pod) == APR_SUCCESS) { /* selected as idle? */ die_now = 1; } else if (ap_my_generation != dummy) { //ap_scoreboard_image->global->running_generation) { /* restart? */ /* yeah, this could be non-graceful restart, in which case the * parent will kill us soon enough, but why bother checking? */ die_now = 1; } loc_continueB: { int uuu; uuu=uuu; } } clean_child_exit(0); /* loc_return: */ while(1>0) { int ddd; ddd=ddd; } }
/* This is the thread that actually does all the work. */ static int32 worker_thread(void *dummy) { int worker_slot = (int)dummy; apr_allocator_t *allocator; apr_bucket_alloc_t *bucket_alloc; apr_status_t rv = APR_EINIT; int last_poll_idx = 0; sigset_t sig_mask; int requests_this_child = 0; apr_pollset_t *pollset = NULL; ap_listen_rec *lr = NULL; ap_sb_handle_t *sbh = NULL; int i; /* each worker thread is in control of its own destiny...*/ int this_worker_should_exit = 0; /* We have 2 pools that we create/use throughout the lifetime of this * worker. The first and longest lived is the pworker pool. From * this we create the ptrans pool, the lifetime of which is the same * as each connection and is reset prior to each attempt to * process a connection. */ apr_pool_t *ptrans = NULL; apr_pool_t *pworker = NULL; mpm_state = AP_MPMQ_STARTING; /* for benefit of any hooks that run as this * child initializes */ on_exit_thread(check_restart, (void*)worker_slot); /* block the signals for this thread only if we're not running as a * single process. */ if (!one_process) { sigfillset(&sig_mask); sigprocmask(SIG_BLOCK, &sig_mask, NULL); } /* Each worker thread is fully in control of it's destinay and so * to allow each thread to handle the lifetime of it's own resources * we create and use a subcontext for every thread. * The subcontext is a child of the pconf pool. */ apr_allocator_create(&allocator); apr_allocator_max_free_set(allocator, ap_max_mem_free); apr_pool_create_ex(&pworker, pconf, NULL, allocator); apr_allocator_owner_set(allocator, pworker); apr_pool_create(&ptrans, pworker); apr_pool_tag(ptrans, "transaction"); ap_create_sb_handle(&sbh, pworker, 0, worker_slot); (void) ap_update_child_status(sbh, SERVER_READY, (request_rec *) NULL); /* We add an extra socket here as we add the udp_sock we use for signalling * death. This gets added after the others. */ apr_pollset_create(&pollset, num_listening_sockets + 1, pworker, 0); for (lr = ap_listeners, i = num_listening_sockets; i--; lr = lr->next) { apr_pollfd_t pfd = {0}; pfd.desc_type = APR_POLL_SOCKET; pfd.desc.s = lr->sd; pfd.reqevents = APR_POLLIN; pfd.client_data = lr; apr_pollset_add(pollset, &pfd); } { apr_pollfd_t pfd = {0}; pfd.desc_type = APR_POLL_SOCKET; pfd.desc.s = udp_sock; pfd.reqevents = APR_POLLIN; apr_pollset_add(pollset, &pfd); } bucket_alloc = apr_bucket_alloc_create(pworker); mpm_state = AP_MPMQ_RUNNING; while (!this_worker_should_exit) { conn_rec *current_conn; void *csd; /* (Re)initialize this child to a pre-connection state. */ apr_pool_clear(ptrans); if ((ap_max_requests_per_thread > 0 && requests_this_child++ >= ap_max_requests_per_thread)) clean_child_exit(0, worker_slot); (void) ap_update_child_status(sbh, SERVER_READY, (request_rec *) NULL); apr_thread_mutex_lock(accept_mutex); /* We always (presently) have at least 2 sockets we listen on, so * we don't have the ability for a fast path for a single socket * as some MPM's allow :( */ for (;;) { apr_int32_t numdesc = 0; const apr_pollfd_t *pdesc = NULL; rv = apr_pollset_poll(pollset, -1, &numdesc, &pdesc); if (rv != APR_SUCCESS) { if (APR_STATUS_IS_EINTR(rv)) { if (one_process && shutdown_pending) return; continue; } ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf, "apr_pollset_poll: (listen)"); clean_child_exit(1, worker_slot); } /* We can always use pdesc[0], but sockets at position N * could end up completely starved of attention in a very * busy server. Therefore, we round-robin across the * returned set of descriptors. While it is possible that * the returned set of descriptors might flip around and * continue to starve some sockets, we happen to know the * internal pollset implementation retains ordering * stability of the sockets. Thus, the round-robin should * ensure that a socket will eventually be serviced. */ if (last_poll_idx >= numdesc) last_poll_idx = 0; /* Grab a listener record from the client_data of the poll * descriptor, and advance our saved index to round-robin * the next fetch. * * ### hmm... this descriptor might have POLLERR rather * ### than POLLIN */ lr = pdesc[last_poll_idx++].client_data; /* The only socket we add without client_data is the first, the UDP socket * we listen on for restart signals. If we've therefore gotten a hit on that * listener lr will be NULL here and we know we've been told to die. * Before we jump to the end of the while loop with this_worker_should_exit * set to 1 (causing us to exit normally we hope) we release the accept_mutex * as we want every thread to go through this same routine :) * Bit of a hack, but compared to what I had before... */ if (lr == NULL) { this_worker_should_exit = 1; apr_thread_mutex_unlock(accept_mutex); goto got_a_black_spot; } goto got_fd; } got_fd: /* Run beos_accept to accept the connection and set things up to * allow us to process it. We always release the accept_lock here, * even if we failt o accept as otherwise we'll starve other workers * which would be bad. */ rv = beos_accept(&csd, lr, ptrans); apr_thread_mutex_unlock(accept_mutex); if (rv == APR_EGENERAL) { /* resource shortage or should-not-occur occured */ clean_child_exit(1, worker_slot); } else if (rv != APR_SUCCESS) continue; current_conn = ap_run_create_connection(ptrans, ap_server_conf, csd, worker_slot, sbh, bucket_alloc); if (current_conn) { ap_process_connection(current_conn, csd); ap_lingering_close(current_conn); } if (ap_my_generation != ap_scoreboard_image->global->running_generation) { /* restart? */ /* yeah, this could be non-graceful restart, in which case the * parent will kill us soon enough, but why bother checking? */ this_worker_should_exit = 1; } got_a_black_spot: } apr_pool_destroy(ptrans); apr_pool_destroy(pworker); clean_child_exit(0, worker_slot); } static int make_worker(int slot) { thread_id tid; if (slot + 1 > ap_max_child_assigned) ap_max_child_assigned = slot + 1; (void) ap_update_child_status_from_indexes(0, slot, SERVER_STARTING, (request_rec*)NULL); if (one_process) { set_signals(); ap_scoreboard_image->parent[0].pid = getpid(); ap_scoreboard_image->servers[0][slot].tid = find_thread(NULL); return 0; } tid = spawn_thread(worker_thread, "apache_worker", B_NORMAL_PRIORITY, (void *)slot); if (tid < B_NO_ERROR) { ap_log_error(APLOG_MARK, APLOG_ERR, errno, NULL, "spawn_thread: Unable to start a new thread"); /* In case system resources are maxed out, we don't want * Apache running away with the CPU trying to fork over and * over and over again. */ (void) ap_update_child_status_from_indexes(0, slot, SERVER_DEAD, (request_rec*)NULL); sleep(10); return -1; } resume_thread(tid); ap_scoreboard_image->servers[0][slot].tid = tid; return 0; }
static void child_main(int child_num_arg) { apr_pool_t *ptrans; apr_allocator_t *allocator; conn_rec *current_conn; apr_status_t status = APR_EINIT; int i; ap_listen_rec *lr; int curr_pollfd, last_pollfd = 0; apr_pollfd_t *pollset; int offset; void *csd; ap_sb_handle_t *sbh; apr_status_t rv; apr_bucket_alloc_t *bucket_alloc; mpm_state = AP_MPMQ_STARTING; /* for benefit of any hooks that run as this * child initializes */ my_child_num = child_num_arg; ap_my_pid = getpid(); csd = NULL; requests_this_child = 0; ap_fatal_signal_child_setup(ap_server_conf); /* Get a sub context for global allocations in this child, so that * we can have cleanups occur when the child exits. */ apr_allocator_create(&allocator); apr_allocator_max_free_set(allocator, ap_max_mem_free); apr_pool_create_ex(&pchild, pconf, NULL, allocator); apr_allocator_owner_set(allocator, pchild); apr_pool_create(&ptrans, pchild); apr_pool_tag(ptrans, "transaction"); /* needs to be done before we switch UIDs so we have permissions */ ap_reopen_scoreboard(pchild, NULL, 0); rv = apr_proc_mutex_child_init(&accept_mutex, ap_lock_fname, pchild); if (rv != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_EMERG, rv, ap_server_conf, "Couldn't initialize cross-process lock in child"); clean_child_exit(APEXIT_CHILDFATAL); } if (unixd_setup_child()) { clean_child_exit(APEXIT_CHILDFATAL); } ap_run_child_init(pchild, ap_server_conf); ap_create_sb_handle(&sbh, pchild, my_child_num, 0); (void) ap_update_child_status(sbh, SERVER_READY, (request_rec *) NULL); /* Set up the pollfd array */ listensocks = apr_pcalloc(pchild, sizeof(*listensocks) * (num_listensocks)); for (lr = ap_listeners, i = 0; i < num_listensocks; lr = lr->next, i++) { listensocks[i].accept_func = lr->accept_func; listensocks[i].sd = lr->sd; } pollset = apr_palloc(pchild, sizeof(*pollset) * num_listensocks); pollset[0].p = pchild; for (i = 0; i < num_listensocks; i++) { pollset[i].desc.s = listensocks[i].sd; pollset[i].desc_type = APR_POLL_SOCKET; pollset[i].reqevents = APR_POLLIN; } mpm_state = AP_MPMQ_RUNNING; bucket_alloc = apr_bucket_alloc_create(pchild); while (!die_now) { /* * (Re)initialize this child to a pre-connection state. */ current_conn = NULL; apr_pool_clear(ptrans); if ((ap_max_requests_per_child > 0 && requests_this_child++ >= ap_max_requests_per_child)) { clean_child_exit(0); } (void) ap_update_child_status(sbh, SERVER_READY, (request_rec *) NULL); /* * Wait for an acceptable connection to arrive. */ /* Lock around "accept", if necessary */ SAFE_ACCEPT(accept_mutex_on()); if (num_listensocks == 1) { offset = 0; } else { /* multiple listening sockets - need to poll */ for (;;) { apr_status_t ret; apr_int32_t n; ret = apr_poll(pollset, num_listensocks, &n, -1); if (ret != APR_SUCCESS) { if (APR_STATUS_IS_EINTR(ret)) { continue; } /* Single Unix documents select as returning errnos * EBADF, EINTR, and EINVAL... and in none of those * cases does it make sense to continue. In fact * on Linux 2.0.x we seem to end up with EFAULT * occasionally, and we'd loop forever due to it. */ ap_log_error(APLOG_MARK, APLOG_ERR, ret, ap_server_conf, "apr_poll: (listen)"); clean_child_exit(1); } /* find a listener */ curr_pollfd = last_pollfd; do { curr_pollfd++; if (curr_pollfd >= num_listensocks) { curr_pollfd = 0; } /* XXX: Should we check for POLLERR? */ if (pollset[curr_pollfd].rtnevents & APR_POLLIN) { last_pollfd = curr_pollfd; offset = curr_pollfd; goto got_fd; } } while (curr_pollfd != last_pollfd); continue; } } got_fd: /* if we accept() something we don't want to die, so we have to * defer the exit */ status = listensocks[offset].accept_func(&csd, &listensocks[offset], ptrans); SAFE_ACCEPT(accept_mutex_off()); /* unlock after "accept" */ if (status == APR_EGENERAL) { /* resource shortage or should-not-occur occured */ clean_child_exit(1); } else if (status != APR_SUCCESS) { continue; } /* * We now have a connection, so set it up with the appropriate * socket options, file descriptors, and read/write buffers. */ current_conn = ap_run_create_connection(ptrans, ap_server_conf, csd, my_child_num, sbh, bucket_alloc); if (current_conn) { ap_process_connection(current_conn, csd); ap_lingering_close(current_conn); } /* Check the pod and the generation number after processing a * connection so that we'll go away if a graceful restart occurred * while we were processing the connection or we are the lucky * idle server process that gets to die. */ if (ap_mpm_pod_check(pod) == APR_SUCCESS) { /* selected as idle? */ die_now = 1; } else if (ap_my_generation != ap_scoreboard_image->global->running_generation) { /* restart? */ /* yeah, this could be non-graceful restart, in which case the * parent will kill us soon enough, but why bother checking? */ die_now = 1; } } clean_child_exit(0); }
static void just_die(int sig) { clean_child_exit(0); }
static void child_main(int child_num_arg) { apr_pool_t *ptrans; apr_allocator_t *allocator; apr_status_t status; int i; ap_listen_rec *lr; apr_pollset_t *pollset; ap_sb_handle_t *sbh; apr_bucket_alloc_t *bucket_alloc; int last_poll_idx = 0; mpm_state = AP_MPMQ_STARTING; /* for benefit of any hooks that run as this * child initializes */ my_child_num = child_num_arg; ap_my_pid = getpid(); requests_this_child = 0; ap_fatal_signal_child_setup(ap_server_conf); /* Get a sub context for global allocations in this child, so that * we can have cleanups occur when the child exits. */ apr_allocator_create(&allocator); apr_allocator_max_free_set(allocator, ap_max_mem_free); apr_pool_create_ex(&pchild, pconf, NULL, allocator); apr_allocator_owner_set(allocator, pchild); apr_pool_create(&ptrans, pchild); apr_pool_tag(ptrans, "transaction"); /* needs to be done before we switch UIDs so we have permissions */ ap_reopen_scoreboard(pchild, NULL, 0); status = apr_proc_mutex_child_init(&accept_mutex, ap_lock_fname, pchild); if (status != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_EMERG, status, ap_server_conf, "Couldn't initialize cross-process lock in child " "(%s) (%d)", ap_lock_fname, ap_accept_lock_mech); clean_child_exit(APEXIT_CHILDFATAL); } if (unixd_setup_child()) { clean_child_exit(APEXIT_CHILDFATAL); } ap_run_child_init(pchild, ap_server_conf); ap_create_sb_handle(&sbh, pchild, my_child_num, 0); (void) ap_update_child_status(sbh, SERVER_READY, (request_rec *) NULL); /* Set up the pollfd array */ status = apr_pollset_create(&pollset, num_listensocks, pchild, 0); if (status != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_EMERG, status, ap_server_conf, "Couldn't create pollset in child; check system or user limits"); clean_child_exit(APEXIT_CHILDSICK); /* assume temporary resource issue */ } for (lr = ap_listeners, i = num_listensocks; i--; lr = lr->next) { apr_pollfd_t pfd = { 0 }; pfd.desc_type = APR_POLL_SOCKET; pfd.desc.s = lr->sd; pfd.reqevents = APR_POLLIN; pfd.client_data = lr; /* ### check the status */ (void) apr_pollset_add(pollset, &pfd); } mpm_state = AP_MPMQ_RUNNING; bucket_alloc = apr_bucket_alloc_create(pchild); /* die_now is set when AP_SIG_GRACEFUL is received in the child; * shutdown_pending is set when SIGTERM is received when running * in single process mode. */ while (!die_now && !shutdown_pending) { conn_rec *current_conn; void *csd; /* * (Re)initialize this child to a pre-connection state. */ apr_pool_clear(ptrans); if ((ap_max_requests_per_child > 0 && requests_this_child++ >= ap_max_requests_per_child)) { clean_child_exit(0); } (void) ap_update_child_status(sbh, SERVER_READY, (request_rec *) NULL); /* * Wait for an acceptable connection to arrive. */ /* Lock around "accept", if necessary */ SAFE_ACCEPT(accept_mutex_on()); if (num_listensocks == 1) { /* There is only one listener record, so refer to that one. */ lr = ap_listeners; } else { /* multiple listening sockets - need to poll */ for (;;) { apr_int32_t numdesc; const apr_pollfd_t *pdesc; /* check for termination first so we don't sleep for a while in * poll if already signalled */ if (one_process && shutdown_pending) { SAFE_ACCEPT(accept_mutex_off()); return; } else if (die_now) { /* In graceful stop/restart; drop the mutex * and terminate the child. */ SAFE_ACCEPT(accept_mutex_off()); clean_child_exit(0); } /* timeout == 10 seconds to avoid a hang at graceful restart/stop * caused by the closing of sockets by the signal handler */ status = apr_pollset_poll(pollset, apr_time_from_sec(10), &numdesc, &pdesc); if (status != APR_SUCCESS) { if (APR_STATUS_IS_TIMEUP(status) || APR_STATUS_IS_EINTR(status)) { continue; } /* Single Unix documents select as returning errnos * EBADF, EINTR, and EINVAL... and in none of those * cases does it make sense to continue. In fact * on Linux 2.0.x we seem to end up with EFAULT * occasionally, and we'd loop forever due to it. */ ap_log_error(APLOG_MARK, APLOG_ERR, status, ap_server_conf, "apr_pollset_poll: (listen)"); SAFE_ACCEPT(accept_mutex_off()); clean_child_exit(1); } /* We can always use pdesc[0], but sockets at position N * could end up completely starved of attention in a very * busy server. Therefore, we round-robin across the * returned set of descriptors. While it is possible that * the returned set of descriptors might flip around and * continue to starve some sockets, we happen to know the * internal pollset implementation retains ordering * stability of the sockets. Thus, the round-robin should * ensure that a socket will eventually be serviced. */ if (last_poll_idx >= numdesc) last_poll_idx = 0; /* Grab a listener record from the client_data of the poll * descriptor, and advance our saved index to round-robin * the next fetch. * * ### hmm... this descriptor might have POLLERR rather * ### than POLLIN */ lr = pdesc[last_poll_idx++].client_data; goto got_fd; } } got_fd: /* if we accept() something we don't want to die, so we have to * defer the exit */ status = lr->accept_func(&csd, lr, ptrans); SAFE_ACCEPT(accept_mutex_off()); /* unlock after "accept" */ if (status == APR_EGENERAL) { /* resource shortage or should-not-occur occured */ clean_child_exit(1); } else if (status != APR_SUCCESS) { continue; } /* * We now have a connection, so set it up with the appropriate * socket options, file descriptors, and read/write buffers. */ current_conn = ap_run_create_connection(ptrans, ap_server_conf, csd, my_child_num, sbh, bucket_alloc); if (current_conn) { ap_process_connection(current_conn, csd); ap_lingering_close(current_conn); } /* Check the pod and the generation number after processing a * connection so that we'll go away if a graceful restart occurred * while we were processing the connection or we are the lucky * idle server process that gets to die. */ if (ap_mpm_pod_check(pod) == APR_SUCCESS) { /* selected as idle? */ die_now = 1; } else if (ap_my_generation != ap_scoreboard_image->global->running_generation) { /* restart? */ /* yeah, this could be non-graceful restart, in which case the * parent will kill us soon enough, but why bother checking? */ die_now = 1; } } clean_child_exit(0); }