static void multi_event_pollset(abts_case *tc, void *data) { apr_status_t rv; apr_pollfd_t socket_pollfd; int lrv; const apr_pollfd_t *descs = NULL; ABTS_PTR_NOTNULL(tc, s[0]); socket_pollfd.desc_type = APR_POLL_SOCKET; socket_pollfd.reqevents = APR_POLLIN | APR_POLLOUT; socket_pollfd.desc.s = s[0]; socket_pollfd.client_data = s[0]; rv = apr_pollset_add(pollset, &socket_pollfd); ABTS_INT_EQUAL(tc, APR_SUCCESS, rv); send_msg(s, sa, 0, tc); rv = apr_pollset_poll(pollset, -1, &lrv, &descs); ABTS_INT_EQUAL(tc, APR_SUCCESS, rv); if (lrv == 1) { int ev = descs[0].rtnevents; ABTS_PTR_EQUAL(tc, s[0], descs[0].desc.s); ABTS_PTR_EQUAL(tc, s[0], descs[0].client_data); ABTS_ASSERT(tc, "either or both of APR_POLLIN, APR_POLLOUT returned", ((ev & APR_POLLIN) != 0) || ((ev & APR_POLLOUT) != 0)); } else if (lrv == 2) { ABTS_PTR_EQUAL(tc, s[0], descs[0].desc.s); ABTS_PTR_EQUAL(tc, s[0], descs[0].client_data); ABTS_PTR_EQUAL(tc, s[0], descs[1].desc.s); ABTS_PTR_EQUAL(tc, s[0], descs[1].client_data); ABTS_ASSERT(tc, "returned events incorrect", ((descs[0].rtnevents | descs[1].rtnevents) == (APR_POLLIN | APR_POLLOUT)) && descs[0].rtnevents != descs[1].rtnevents); } else { ABTS_ASSERT(tc, "either one or two events returned", lrv == 1 || lrv == 2); } recv_msg(s, 0, p, tc); rv = apr_pollset_poll(pollset, 0, &lrv, &descs); ABTS_INT_EQUAL(tc, 0, APR_STATUS_IS_TIMEUP(rv)); ABTS_INT_EQUAL(tc, 1, lrv); ABTS_PTR_EQUAL(tc, s[0], descs[0].desc.s); ABTS_INT_EQUAL(tc, APR_POLLOUT, descs[0].rtnevents); ABTS_PTR_EQUAL(tc, s[0], descs[0].client_data); rv = apr_pollset_remove(pollset, &socket_pollfd); ABTS_INT_EQUAL(tc, APR_SUCCESS, rv); }
static apr_status_t pass_data_to_filter(ap_filter_t *f, const char *data, apr_size_t len, apr_bucket_brigade *bb) { ef_ctx_t *ctx = f->ctx; ef_dir_t *dc = ctx->dc; apr_status_t rv; apr_size_t bytes_written = 0; apr_size_t tmplen; do { tmplen = len - bytes_written; rv = apr_file_write(ctx->proc->in, (const char *)data + bytes_written, &tmplen); bytes_written += tmplen; if (rv != APR_SUCCESS && !APR_STATUS_IS_EAGAIN(rv)) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, f->r, "apr_file_write(child input), len %" APR_SIZE_T_FMT, tmplen); return rv; } if (APR_STATUS_IS_EAGAIN(rv)) { /* XXX handle blocking conditions here... if we block, we need * to read data from the child process and pass it down to the * next filter! */ rv = drain_available_output(f, bb); if (APR_STATUS_IS_EAGAIN(rv)) { #if APR_FILES_AS_SOCKETS int num_events; const apr_pollfd_t *pdesc; rv = apr_pollset_poll(ctx->pollset, f->r->server->timeout, &num_events, &pdesc); if (rv || dc->debug >= DBGLVL_GORY) { ap_log_rerror(APLOG_MARK, APLOG_DEBUG, rv, f->r, "apr_pollset_poll()"); } if (rv != APR_SUCCESS && !APR_STATUS_IS_EINTR(rv)) { /* some error such as APR_TIMEUP */ return rv; } #else /* APR_FILES_AS_SOCKETS */ /* Yuck... I'd really like to wait until I can read * or write, but instead I have to sleep and try again */ apr_sleep(100000); /* 100 milliseconds */ if (dc->debug >= DBGLVL_GORY) { ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, f->r, "apr_sleep()"); } #endif /* APR_FILES_AS_SOCKETS */ } else if (rv != APR_SUCCESS) { return rv; } } } while (bytes_written < len); return rv; }
/** Block for activity on the descriptor(s) in a pollset */ APT_DECLARE(apr_status_t) apt_pollset_poll( apt_pollset_t *pollset, apr_interval_time_t timeout, apr_int32_t *num, const apr_pollfd_t **descriptors) { return apr_pollset_poll(pollset->base,timeout,num,descriptors); }
/* Read method of CGI bucket: polls on stderr and stdout of the child, * sending any stderr output immediately away to the error log. */ static apr_status_t cgi_bucket_read(apr_bucket *b, const char **str, apr_size_t *len, apr_read_type_e block) { struct cgi_bucket_data *data = b->data; apr_interval_time_t timeout; apr_status_t rv; int gotdata = 0; timeout = block == APR_NONBLOCK_READ ? 0 : data->r->server->timeout; do { const apr_pollfd_t *results; apr_int32_t num; rv = apr_pollset_poll(data->pollset, timeout, &num, &results); if (APR_STATUS_IS_TIMEUP(rv)) { if (timeout) { ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, data->r, "Timeout waiting for output from CGI script %s", data->r->filename); return rv; } else { return APR_EAGAIN; } } else if (APR_STATUS_IS_EINTR(rv)) { continue; } else if (rv != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, data->r, "poll failed waiting for CGI child"); return rv; } for (; num; num--, results++) { if (results[0].client_data == (void *)1) { /* stdout */ rv = cgi_read_stdout(b, results[0].desc.f, str, len); if (APR_STATUS_IS_EOF(rv)) { rv = APR_SUCCESS; } gotdata = 1; } else { /* stderr */ apr_status_t rv2 = log_script_err(data->r, results[0].desc.f); if (APR_STATUS_IS_EOF(rv2)) { apr_pollset_remove(data->pollset, &results[0]); } } } } while (!gotdata); return rv; }
apr_status_t apr_wait_for_io_or_timeout(apr_file_t *f, apr_socket_t *s, int for_read) { apr_interval_time_t timeout; apr_pollfd_t pfd; int type = for_read ? APR_POLLIN : APR_POLLOUT; apr_pollset_t *pollset; apr_status_t status; /* TODO - timeout should be less each time through this loop */ if (f) { pfd.desc_type = APR_POLL_FILE; pfd.desc.f = f; pollset = f->pollset; if (pollset == NULL) { status = apr_pollset_create(&(f->pollset), 1, f->pool, 0); if (status != APR_SUCCESS) { return status; } pollset = f->pollset; } timeout = f->timeout; } else { pfd.desc_type = APR_POLL_SOCKET; pfd.desc.s = s; pollset = s->pollset; timeout = s->timeout; } pfd.reqevents = type; /* Remove the object if it was in the pollset, then add in the new * object with the correct reqevents value. Ignore the status result * on the remove, because it might not be in there (yet). */ (void) apr_pollset_remove(pollset, &pfd); /* ### check status code */ (void) apr_pollset_add(pollset, &pfd); do { int numdesc; const apr_pollfd_t *pdesc; status = apr_pollset_poll(pollset, timeout, &numdesc, &pdesc); if (numdesc == 1 && (pdesc[0].rtnevents & type) != 0) { return APR_SUCCESS; } } while (APR_STATUS_IS_EINTR(status)); return status; }
static void nomessage_pollset(CuTest *tc) { apr_status_t rv; int lrv; const apr_pollfd_t *descs = NULL; rv = apr_pollset_poll(pollset, 0, &lrv, &descs); CuAssertIntEquals(tc, 1, APR_STATUS_IS_TIMEUP(rv)); CuAssertIntEquals(tc, 0, lrv); CuAssertPtrEquals(tc, NULL, descs); }
static void nomessage_pollset(abts_case *tc, void *data) { apr_status_t rv; int lrv; const apr_pollfd_t *descs = NULL; rv = apr_pollset_poll(pollset, 0, &lrv, &descs); ABTS_INT_EQUAL(tc, 1, APR_STATUS_IS_TIMEUP(rv)); ABTS_INT_EQUAL(tc, 0, lrv); ABTS_PTR_EQUAL(tc, NULL, descs); }
static void pollset_wakeup(abts_case *tc, void *data) { apr_status_t rv; apr_pollfd_t socket_pollfd; apr_pollset_t *pollset; apr_int32_t num; const apr_pollfd_t *descriptors; rv = apr_pollset_create_ex(&pollset, 1, p, APR_POLLSET_WAKEABLE, default_pollset_impl); if (rv == APR_ENOTIMPL) { ABTS_NOT_IMPL(tc, "apr_pollset_wakeup() not supported"); return; } ABTS_INT_EQUAL(tc, APR_SUCCESS, rv); /* send wakeup but no data; apr_pollset_poll() should return APR_EINTR */ rv = apr_pollset_wakeup(pollset); ABTS_INT_EQUAL(tc, APR_SUCCESS, rv); rv = apr_pollset_poll(pollset, -1, &num, &descriptors); ABTS_INT_EQUAL(tc, APR_EINTR, rv); /* send wakeup and data; apr_pollset_poll() should return APR_SUCCESS */ socket_pollfd.desc_type = APR_POLL_SOCKET; socket_pollfd.reqevents = APR_POLLIN; socket_pollfd.desc.s = s[0]; socket_pollfd.client_data = s[0]; rv = apr_pollset_add(pollset, &socket_pollfd); ABTS_INT_EQUAL(tc, APR_SUCCESS, rv); send_msg(s, sa, 0, tc); rv = apr_pollset_wakeup(pollset); ABTS_INT_EQUAL(tc, APR_SUCCESS, rv); rv = apr_pollset_poll(pollset, -1, &num, &descriptors); ABTS_INT_EQUAL(tc, APR_SUCCESS, rv); ABTS_INT_EQUAL(tc, 1, num); }
void* APR_THREAD_FUNC poll_worker_run( apr_thread_t *th, void *arg) { poll_worker_t *worker = arg; LOG_TRACE("poll worker loop starts"); while (!worker->fg_exit) { int num = 0; const apr_pollfd_t *ret_pfd; // wait 100 * 1000 micro seconds. apr_status_t status = apr_pollset_poll(worker->ps, 100 * 1000, &num, &ret_pfd); if (status == APR_SUCCESS) { LOG_TRACE("poll worker loop: found something"); SAFE_ASSERT(num > 0); for (int i = 0; i < num; i++) { int rtne = ret_pfd[i].rtnevents; SAFE_ASSERT(rtne & (APR_POLLIN | APR_POLLOUT)); poll_job_t *job = ret_pfd[i].client_data; SAFE_ASSERT(job != NULL); if (rtne & APR_POLLIN) { LOG_TRACE("poll worker loop: found something to read"); SAFE_ASSERT(job->do_read != NULL); (*job->do_read)(job->holder); } if (rtne & APR_POLLOUT) { LOG_TRACE("poll worker loop: found something to write"); SAFE_ASSERT(job->do_write); (*job->do_write)(job->holder); } } } else if (status == APR_EINTR) { // the signal we get when process exit, // wakeup, or add in and write. LOG_WARN("the receiver epoll exits?"); continue; } else if (status == APR_TIMEUP) { //LOG_DEBUG("poll time out"); continue; } else { LOG_ERROR("unknown poll error. %s", apr_strerror(status, calloc(1, 100), 100)); SAFE_ASSERT(0); } } LOG_TRACE("poll worker loop stops"); SAFE_ASSERT(apr_thread_exit(th, APR_SUCCESS) == APR_SUCCESS); return NULL; }
static void clear_last_pollset(CuTest *tc) { apr_status_t rv; int lrv; const apr_pollfd_t *descs = NULL; recv_msg(s, LARGE_NUM_SOCKETS - 1, p, tc); rv = apr_pollset_poll(pollset, 0, &lrv, &descs); CuAssertIntEquals(tc, 1, APR_STATUS_IS_TIMEUP(rv)); CuAssertIntEquals(tc, 0, lrv); CuAssertPtrEquals(tc, NULL, descs); }
static void clear_last_pollset(abts_case *tc, void *data) { apr_status_t rv; int lrv; const apr_pollfd_t *descs = NULL; recv_msg(s, LARGE_NUM_SOCKETS - 1, p, tc); rv = apr_pollset_poll(pollset, 0, &lrv, &descs); ABTS_INT_EQUAL(tc, 1, APR_STATUS_IS_TIMEUP(rv)); ABTS_INT_EQUAL(tc, 0, lrv); ABTS_PTR_EQUAL(tc, NULL, descs); }
apr_status_t serf_context_run( serf_context_t *ctx, apr_short_interval_time_t duration, apr_pool_t *pool) { apr_status_t status; apr_int32_t num; const apr_pollfd_t *desc; serf_pollset_t *ps = (serf_pollset_t*)ctx->pollset_baton; if ((status = serf_context_prerun(ctx)) != APR_SUCCESS) { return status; } if ((status = apr_pollset_poll(ps->pollset, duration, &num, &desc)) != APR_SUCCESS) { /* EINTR indicates a handled signal happened during the poll call, ignore, the application can safely retry. */ if (APR_STATUS_IS_EINTR(status)) return APR_SUCCESS; /* ### do we still need to dispatch stuff here? ### look at the potential return codes. map to our defined ### return values? ... */ /* Use the strict documented error for poll timeouts, to allow proper handling of the other timeout types when returned from serf_event_trigger */ if (APR_STATUS_IS_TIMEUP(status)) return APR_TIMEUP; /* Return the documented error */ return status; } while (num--) { serf_io_baton_t *io = desc->client_data; status = serf_event_trigger(ctx, io, desc); if (status) { /* Don't return APR_TIMEUP as a connection error, as our caller will use that as a trigger to call us again */ if (APR_STATUS_IS_TIMEUP(status)) status = SERF_ERROR_CONNECTION_TIMEDOUT; return status; } desc++; } return APR_SUCCESS; }
SWITCH_DECLARE(switch_status_t) switch_pollset_poll(switch_pollset_t *pollset, switch_interval_time_t timeout, int32_t *num, const switch_pollfd_t **descriptors) { apr_status_t st = SWITCH_STATUS_FALSE; if (pollset) { st = apr_pollset_poll((apr_pollset_t *) pollset, timeout, num, (const apr_pollfd_t **) descriptors); if (st == APR_TIMEUP) { st = SWITCH_STATUS_TIMEOUT; } } return st; }
static apt_bool_t mrcp_server_agent_task_run(apt_task_t *task) { mrcp_connection_agent_t *agent = apt_task_object_get(task); apt_bool_t running = TRUE; apr_status_t status; apr_int32_t num; const apr_pollfd_t *ret_pfd; int i; if(!agent) { apt_log(APT_PRIO_WARNING,"Failed to Start MRCPv2 Agent"); return FALSE; } if(mrcp_server_agent_pollset_create(agent) == FALSE) { apt_log(APT_PRIO_WARNING,"Failed to Create Pollset"); return FALSE; } while(running) { status = apr_pollset_poll(agent->pollset, -1, &num, &ret_pfd); if(status != APR_SUCCESS) { continue; } for(i = 0; i < num; i++) { if(ret_pfd[i].desc.s == agent->listen_sock) { apt_log(APT_PRIO_DEBUG,"Accept MRCPv2 Connection"); mrcp_server_agent_connection_accept(agent); continue; } if(ret_pfd[i].desc.s == agent->control_sock) { apt_log(APT_PRIO_DEBUG,"Process Control Message"); if(mrcp_server_agent_control_pocess(agent) == FALSE) { running = FALSE; break; } continue; } apt_log(APT_PRIO_DEBUG,"Process MRCPv2 Message"); mrcp_server_agent_messsage_receive(agent,ret_pfd[i].client_data); } } mrcp_server_agent_pollset_destroy(agent); apt_task_child_terminate(agent->task); return TRUE; }
static void send_last_pollset(abts_case *tc, void *data) { apr_status_t rv; const apr_pollfd_t *descs = NULL; int num; send_msg(s, sa, LARGE_NUM_SOCKETS - 1, tc); rv = apr_pollset_poll(pollset, -1, &num, &descs); ABTS_INT_EQUAL(tc, APR_SUCCESS, rv); ABTS_INT_EQUAL(tc, 1, num); ABTS_PTR_NOTNULL(tc, descs); ABTS_PTR_EQUAL(tc, s[LARGE_NUM_SOCKETS - 1], descs[0].desc.s); ABTS_PTR_EQUAL(tc, s[LARGE_NUM_SOCKETS - 1], descs[0].client_data); }
static void send_last_pollset(CuTest *tc) { apr_status_t rv; const apr_pollfd_t *descs = NULL; int num; send_msg(s, sa, LARGE_NUM_SOCKETS - 1, tc); rv = apr_pollset_poll(pollset, 0, &num, &descs); CuAssertIntEquals(tc, APR_SUCCESS, rv); CuAssertIntEquals(tc, 1, num); CuAssertPtrNotNull(tc, descs); CuAssertPtrEquals(tc, s[LARGE_NUM_SOCKETS - 1], descs[0].desc.s); CuAssertPtrEquals(tc, s[LARGE_NUM_SOCKETS - 1], descs[0].client_data); }
static void send_middle_pollset(CuTest *tc) { apr_status_t rv; const apr_pollfd_t *descs = NULL; int num; send_msg(s, sa, 2, tc); send_msg(s, sa, 5, tc); rv = apr_pollset_poll(pollset, 0, &num, &descs); CuAssertIntEquals(tc, APR_SUCCESS, rv); CuAssertIntEquals(tc, 2, num); CuAssertPtrNotNull(tc, descs); CuAssert(tc, "Incorrect socket in result set", ((descs[0].desc.s == s[2]) && (descs[1].desc.s == s[5])) || ((descs[0].desc.s == s[5]) && (descs[1].desc.s == s[2]))); }
static void send_middle_pollset(abts_case *tc, void *data) { apr_status_t rv; const apr_pollfd_t *descs = NULL; int num; send_msg(s, sa, 2, tc); send_msg(s, sa, 5, tc); rv = apr_pollset_poll(pollset, 0, &num, &descs); ABTS_INT_EQUAL(tc, APR_SUCCESS, rv); ABTS_INT_EQUAL(tc, 2, num); ABTS_PTR_NOTNULL(tc, descs); ABTS_ASSERT(tc, "Incorrect socket in result set", ((descs[0].desc.s == s[2]) && (descs[1].desc.s == s[5])) || ((descs[0].desc.s == s[5]) && (descs[1].desc.s == s[2]))); }
apr_status_t serf_context_run( serf_context_t *ctx, apr_short_interval_time_t duration, apr_pool_t *pool) { apr_status_t status; apr_int32_t num; const apr_pollfd_t *desc; serf_pollset_t *ps = (serf_pollset_t*)ctx->pollset_baton; if ((status = serf_context_prerun(ctx)) != APR_SUCCESS) { return status; } if ((status = apr_pollset_poll(ps->pollset, duration, &num, &desc)) != APR_SUCCESS) { /* EINTR indicates a handled signal happened during the poll call, ignore, the application can safely retry. */ if (APR_STATUS_IS_EINTR(status)) return APR_SUCCESS; /* ### do we still need to dispatch stuff here? ### look at the potential return codes. map to our defined ### return values? ... */ return status; } while (num--) { serf_connection_t *conn = desc->client_data; status = serf_event_trigger(ctx, conn, desc); if (status) { return status; } desc++; } return APR_SUCCESS; }
static void send_middle_pollset(abts_case *tc, void *data) { apr_status_t rv; const apr_pollfd_t *descs = NULL; int num; send_msg(s, sa, 2, tc); send_msg(s, sa, 5, tc); rv = apr_pollset_poll(pollset, -1, &num, &descs); ABTS_INT_EQUAL(tc, APR_SUCCESS, rv); ABTS_PTR_NOTNULL(tc, descs); ABTS_ASSERT(tc, "either one or two events returned", num == 1 || num == 2); /* The poll might only see the first sent message, in which * case we just don't bother checking this assertion */ if (num == 2) { ABTS_ASSERT(tc, "Incorrect socket in result set", ((descs[0].desc.s == s[2]) && (descs[1].desc.s == s[5])) || ((descs[0].desc.s == s[5]) && (descs[1].desc.s == s[2]))); } }
int sock_connection_request(net_sock_t *nsock, int timeout) { apr_int32_t n; apr_interval_time_t dt; const apr_pollfd_t *ret_fd; network_sock_t *sock = (network_sock_t *)nsock; //dt= apr_time_make(0, 100*1000); //apr_sleep(dt); if (sock == NULL) return(-1); dt = apr_time_make(timeout,0); n = 0; apr_pollset_poll(sock->pollset, dt, &n, &ret_fd); //int i=n; //log_printf(15, "sock_connection_request: err=%d n=%d APR_SUCCESS=%d\n", err, i, APR_SUCCESS); if (n == 1) { return(1); } else { return(0); } return(-1); }
void ap_mpm_child_main(apr_pool_t *pconf) { ap_listen_rec *lr = NULL; int requests_this_child = 0; int rv = 0; unsigned long ulTimes; int my_pid = getpid(); ULONG rc, c; HQUEUE workq; apr_pollset_t *pollset; int num_listeners; TID server_maint_tid; void *sb_mem; /* Stop Ctrl-C/Ctrl-Break signals going to child processes */ DosSetSignalExceptionFocus(0, &ulTimes); set_signals(); /* Create pool for child */ apr_pool_create(&pchild, pconf); ap_run_child_init(pchild, ap_server_conf); /* Create an event semaphore used to trigger other threads to shutdown */ rc = DosCreateEventSem(NULL, &shutdown_event, 0, FALSE); if (rc) { ap_log_error(APLOG_MARK, APLOG_ERR, APR_FROM_OS_ERROR(rc), ap_server_conf, "unable to create shutdown semaphore, exiting"); clean_child_exit(APEXIT_CHILDFATAL); } /* Gain access to the scoreboard. */ rc = DosGetNamedSharedMem(&sb_mem, ap_scoreboard_fname, PAG_READ|PAG_WRITE); if (rc) { ap_log_error(APLOG_MARK, APLOG_ERR, APR_FROM_OS_ERROR(rc), ap_server_conf, "scoreboard not readable in child, exiting"); clean_child_exit(APEXIT_CHILDFATAL); } ap_calc_scoreboard_size(); ap_init_scoreboard(sb_mem); /* Gain access to the accpet mutex */ rc = DosOpenMutexSem(NULL, &ap_mpm_accept_mutex); if (rc) { ap_log_error(APLOG_MARK, APLOG_ERR, APR_FROM_OS_ERROR(rc), ap_server_conf, "accept mutex couldn't be accessed in child, exiting"); clean_child_exit(APEXIT_CHILDFATAL); } /* Find our pid in the scoreboard so we know what slot our parent allocated us */ for (child_slot = 0; ap_scoreboard_image->parent[child_slot].pid != my_pid && child_slot < HARD_SERVER_LIMIT; child_slot++); if (child_slot == HARD_SERVER_LIMIT) { ap_log_error(APLOG_MARK, APLOG_ERR, 0, ap_server_conf, "child pid not found in scoreboard, exiting"); clean_child_exit(APEXIT_CHILDFATAL); } ap_my_generation = ap_scoreboard_image->parent[child_slot].generation; memset(ap_scoreboard_image->servers[child_slot], 0, sizeof(worker_score) * HARD_THREAD_LIMIT); /* Set up an OS/2 queue for passing connections & termination requests * to worker threads */ rc = DosCreateQueue(&workq, QUE_FIFO, apr_psprintf(pchild, "/queues/httpd/work.%d", my_pid)); if (rc) { ap_log_error(APLOG_MARK, APLOG_ERR, APR_FROM_OS_ERROR(rc), ap_server_conf, "unable to create work queue, exiting"); clean_child_exit(APEXIT_CHILDFATAL); } /* Create initial pool of worker threads */ for (c = 0; c < ap_min_spare_threads; c++) { // ap_scoreboard_image->servers[child_slot][c].tid = _beginthread(worker_main, NULL, 128*1024, (void *)c); } /* Start maintenance thread */ server_maint_tid = _beginthread(server_maintenance, NULL, 32768, NULL); /* Set up poll */ for (num_listeners = 0, lr = ap_listeners; lr; lr = lr->next) { num_listeners++; } apr_pollset_create(&pollset, num_listeners, pchild, 0); for (lr = ap_listeners; lr != NULL; lr = lr->next) { apr_pollfd_t pfd = { 0 }; pfd.desc_type = APR_POLL_SOCKET; pfd.desc.s = lr->sd; pfd.reqevents = APR_POLLIN; pfd.client_data = lr; apr_pollset_add(pollset, &pfd); } /* Main connection accept loop */ do { apr_pool_t *pconn; worker_args_t *worker_args; int last_poll_idx = 0; apr_pool_create(&pconn, pchild); worker_args = apr_palloc(pconn, sizeof(worker_args_t)); worker_args->pconn = pconn; if (num_listeners == 1) { rv = apr_socket_accept(&worker_args->conn_sd, ap_listeners->sd, pconn); } else { const apr_pollfd_t *poll_results; apr_int32_t num_poll_results; rc = DosRequestMutexSem(ap_mpm_accept_mutex, SEM_INDEFINITE_WAIT); if (shutdown_pending) { DosReleaseMutexSem(ap_mpm_accept_mutex); break; } rv = APR_FROM_OS_ERROR(rc); if (rv == APR_SUCCESS) { rv = apr_pollset_poll(pollset, -1, &num_poll_results, &poll_results); DosReleaseMutexSem(ap_mpm_accept_mutex); } if (rv == APR_SUCCESS) { if (last_poll_idx >= num_listeners) { last_poll_idx = 0; } lr = poll_results[last_poll_idx++].client_data; rv = apr_socket_accept(&worker_args->conn_sd, lr->sd, pconn); last_poll_idx++; } } if (rv != APR_SUCCESS) { if (!APR_STATUS_IS_EINTR(rv)) { ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf, "apr_socket_accept"); clean_child_exit(APEXIT_CHILDFATAL); } } else { DosWriteQueue(workq, WORKTYPE_CONN, sizeof(worker_args_t), worker_args, 0); requests_this_child++; } if (ap_max_requests_per_child != 0 && requests_this_child >= ap_max_requests_per_child) break; } while (!shutdown_pending && ap_my_generation == ap_scoreboard_image->global->running_generation); ap_scoreboard_image->parent[child_slot].quiescing = 1; DosPostEventSem(shutdown_event); DosWaitThread(&server_maint_tid, DCWW_WAIT); if (is_graceful) { char someleft; /* tell our worker threads to exit */ for (c=0; c<HARD_THREAD_LIMIT; c++) { if (ap_scoreboard_image->servers[child_slot][c].status != SERVER_DEAD) { DosWriteQueue(workq, WORKTYPE_EXIT, 0, NULL, 0); } } do { someleft = 0; for (c=0; c<HARD_THREAD_LIMIT; c++) { if (ap_scoreboard_image->servers[child_slot][c].status != SERVER_DEAD) { someleft = 1; DosSleep(1000); break; } } } while (someleft); } else { DosPurgeQueue(workq); for (c=0; c<HARD_THREAD_LIMIT; c++) { if (ap_scoreboard_image->servers[child_slot][c].status != SERVER_DEAD) { DosKillThread(ap_scoreboard_image->servers[child_slot][c].tid); } } } apr_pool_destroy(pchild); }
int jtagHostLoop() { apr_status_t rv; apr_pool_t *mp; apr_pollset_t *pollset; apr_int32_t num; const apr_pollfd_t *ret_pfd; apr_initialize(); apr_pool_create(&mp, NULL); serv_ctx_t *channel_contexts[IPDBG_CHANNELS]; apr_pollset_create(&pollset, DEF_POLLSET_NUM, mp, 0); for(uint8_t ch = 0; ch < IPDBG_CHANNELS; ++ch) { serv_ctx_t *serv_ctx = apr_palloc(mp, sizeof(serv_ctx_t)); channel_contexts[ch] = serv_ctx; serv_ctx->channel_number = ch; serv_ctx->channel_state = listening; serv_ctx->up_buf_level = 0; serv_ctx->down_buf_level = 0; if(ch == 0) serv_ctx->valid_mask = IPDBG_LA_VALID_MASK; if(ch == 1) serv_ctx->valid_mask = IPDBG_IOVIEW_VALID_MASK; if(ch == 2) serv_ctx->valid_mask = IPDBG_GDB_VALID_MASK; if(ch == 3) serv_ctx->valid_mask = IPDBG_WFG_VALID_MASK; apr_socket_t *listening_sock = create_listen_sock(mp, ch); assert(listening_sock); apr_pollfd_t pfd = { mp, APR_POLL_SOCKET, APR_POLLIN, 0, { NULL }, serv_ctx }; pfd.desc.s = listening_sock; apr_pollset_add(pollset, &pfd); } // reset JtagCDC uint16_t val; ipdbgJTAGtransfer(&val, 0xf00); while (1) { size_t transfers = 0; for(size_t ch = 0 ; ch < IPDBG_CHANNELS ; ++ch) { for(size_t idx = 0 ; idx < channel_contexts[ch]->down_buf_level; ++idx) { uint16_t val; ipdbgJTAGtransfer(&val, channel_contexts[ch]->down_buf[idx] | channel_contexts[ch]->valid_mask); transfers++; distribute_to_up_buffer(val, channel_contexts); } channel_contexts[ch]->down_buf_level = 0; } for(size_t k = transfers ; k < MIN_TRANSFERS ; ++k) { uint16_t val; ipdbgJTAGtransfer(&val, 0x000); distribute_to_up_buffer(val, channel_contexts); } rv = apr_pollset_poll(pollset, DEF_POLL_TIMEOUT, &num, &ret_pfd); if (rv == APR_SUCCESS) { int i; /* scan the active sockets */ for (i = 0; i < num; i++) { serv_ctx_t *serv_ctx = ret_pfd[i].client_data; if(serv_ctx) { if (serv_ctx->channel_state == listening) { apr_socket_t *listening_sock = ret_pfd[i].desc.s; /* the listen socket is readable. that indicates we accepted a new connection */ do_accept(serv_ctx, pollset, listening_sock, mp); apr_socket_close(listening_sock); apr_pollset_remove(pollset, &ret_pfd[i]); } else { int ret = TRUE; if(ret_pfd[i].rtnevents & (APR_POLLIN | APR_POLLHUP)) { ret = connection_rx_cb(serv_ctx, pollset, ret_pfd[i].desc.s); } else // (ret_pfd[i].rtnevents & APR_POLLOUT) { ret = connection_tx_cb(serv_ctx, pollset, ret_pfd[i].desc.s); } if (ret == FALSE) { //printf("closing connection %d", serv_ctx->channel_number); apr_socket_t *sock = ret_pfd[i].desc.s; apr_socket_close(sock); apr_pollset_remove(pollset, &ret_pfd[i]); apr_socket_t *listening_sock = create_listen_sock(mp, serv_ctx->channel_number); serv_ctx->channel_state = listening; apr_pollfd_t pfd = { mp, APR_POLL_SOCKET, APR_POLLIN, 0, { NULL }, serv_ctx }; pfd.desc.s = listening_sock; apr_pollset_add(pollset, &pfd); } } } } } } return 0; }
void* thread_socket_pipe_receiver(apr_thread_t* thd, void* data) { frl_socket_pipe* pipe = (frl_socket_pipe*)data; apr_status_t state; apr_socket_t* listen_sock; apr_socket_create(&listen_sock, pipe->sock_addr->family, SOCK_STREAM, APR_PROTO_TCP, pipe->sockpool); apr_socket_opt_set(listen_sock, APR_SO_NONBLOCK, 1); apr_socket_timeout_set(listen_sock, 0); apr_socket_opt_set(listen_sock, APR_SO_REUSEADDR, 1); pipe->recv_state = apr_socket_bind(listen_sock, pipe->sock_addr); F_ERROR_IF_RUN(APR_SUCCESS != pipe->recv_state, return NULL, "[frl_socket_pipe::thread_socket_pipe_receiver]: Socket Binding Error: %d\n", pipe->recv_state); pipe->recv_state = apr_socket_listen(listen_sock, SOMAXCONN); F_ERROR_IF_RUN(APR_SUCCESS != pipe->recv_state, return NULL, "[frl_socket_pipe::thread_socket_pipe_receiver]: Socket Listen Error: %d\n", pipe->recv_state); apr_uint32_t hash; apr_pollset_t* pollset; apr_pollset_create(&pollset, pipe->replicate+2, pipe->sockpool, 0); apr_pollfd_t pfd = { pipe->sockpool, APR_POLL_SOCKET, APR_POLLIN, 0, { NULL }, NULL }; pfd.desc.s = listen_sock; apr_pollset_add(pollset, &pfd); do { // the fun loop apr_int32_t total; const apr_pollfd_t* ret_pfd; pipe->recv_state = apr_pollset_poll(pollset, SOCKET_PIPE_POLL_TIMEOUT, &total, &ret_pfd); if (APR_SUCCESS == pipe->recv_state) { for (int i = 0; i < total; i++) { if (ret_pfd[i].desc.s == listen_sock) { apr_socket_t* accept_sock; state = apr_socket_accept(&accept_sock, listen_sock, pipe->sockpool); F_ERROR_IF_RUN(APR_SUCCESS != state, continue, "[frl_socket_pipe::thread_socket_pipe_receiver]: Socket Accept Error: %d\n", state); // accept connection, initiate recv frl_pipe_state_t* pipestate = (frl_pipe_state_t*)frl_slab_palloc(pipe->statepool); apr_pollfd_t pfd = { pipe->sockpool, APR_POLL_SOCKET, APR_POLLIN, 0, { NULL }, pipestate }; pipestate->state = FRL_PIPE_READ_HEADER_START; pipestate->reader = (char*)&pipestate->header; pipestate->offset = 0; pipestate->size = SIZEOF_FRL_PIPE_HEADER_T; pfd.desc.s = accept_sock; apr_socket_opt_set(accept_sock, APR_SO_NONBLOCK, 1); apr_socket_timeout_set(accept_sock, 0); apr_pollset_add(pollset, &pfd); } else { if (ret_pfd[i].rtnevents & APR_POLLIN) { frl_pipe_state_t* pipestate = (frl_pipe_state_t*)ret_pfd[i].client_data; apr_size_t len_a = pipestate->size-pipestate->offset; state = apr_socket_recv(ret_pfd[i].desc.s, pipestate->reader, &len_a); pipestate->offset += len_a; pipestate->reader += len_a; // read buffer to reader if ((pipestate->offset >= pipestate->size)||(APR_STATUS_IS_EAGAIN(state))) { pipestate->offset = pipestate->size; PIPE_STATE_TO_COMPLETE(pipestate->state); // read complete, move state to complete } else if ((APR_STATUS_IS_EOF(state))||(len_a == 0)) { apr_pollfd_t pfd = { pipe->sockpool, APR_POLL_SOCKET, APR_POLLIN, 0, { NULL }, pipestate }; pfd.desc.s = ret_pfd[i].desc.s; apr_pollset_remove(pollset, &pfd); frl_slab_pfree(pipestate); apr_socket_close(ret_pfd[i].desc.s); // remote error, close connection continue; } switch (pipestate->state) { case FRL_PIPE_READ_HEADER_COMPLETE: { // recv header (hash & size) pipestate->data.offset = 0; pipestate->data.size = pipestate->header.size; state = pipe->recv_before(&pipestate->data.buf, &pipestate->data.size); if (FRL_PROGRESS_IS_INTERRUPT(state)) { apr_pollfd_t pfd = { pipe->sockpool, APR_POLL_SOCKET, APR_POLLIN, 0, { NULL }, pipestate }; pfd.desc.s = ret_pfd[i].desc.s; apr_pollset_remove(pollset, &pfd); frl_slab_pfree(pipestate); apr_socket_close(ret_pfd[i].desc.s); continue; } pipestate->state = FRL_PIPE_READ_BLOCK_START; // start to read block (<= 4092 bytes each) pipestate->reader = pipestate->buffer; pipestate->offset = 0; if (pipestate->data.size < SIZEOF_FRL_PIPE_BLOCK_BUFFER) pipestate->size = pipestate->data.size+SIZEOF_FRL_PIPE_HEADER_T; else pipestate->size = SOCKET_PACKAGE_SIZE; break; } case FRL_PIPE_READ_BLOCK_COMPLETE: { // a block complete, move to data memcpy(pipestate->data.buf+pipestate->data.offset, &pipestate->block.start, pipestate->block.header.size); hash = hashlittle(&pipestate->block.start, pipestate->size-SIZEOF_FRL_PIPE_HEADER_T); if (hash != pipestate->block.header.hash) { // check the hash fingerprint of the block apr_pollfd_t pfd = { pipe->sockpool, APR_POLL_SOCKET, APR_POLLIN, 0, { NULL }, pipestate }; pfd.desc.s = ret_pfd[i].desc.s; apr_pollset_remove(pollset, &pfd); frl_slab_pfree(pipestate); apr_socket_close(ret_pfd[i].desc.s); continue; } pipestate->data.offset += pipestate->block.header.size; if (pipestate->data.offset >= pipestate->data.size) { // finish read, report state to remote apr_pollfd_t pfd = { pipe->sockpool, APR_POLL_SOCKET, APR_POLLIN, 0, { NULL }, pipestate }; pfd.desc.s = ret_pfd[i].desc.s; apr_pollset_remove(pollset, &pfd); hash = hashlittle(pipestate->data.buf, pipestate->data.size); if (hash != pipestate->header.hash) { // check hash fingerprint of all data frl_slab_pfree(pipestate); apr_socket_close(ret_pfd[i].desc.s); } else { pfd.reqevents = APR_POLLOUT; state = pipe->recv_after(pipestate->data.buf, pipestate->data.size); if (FRL_PROGRESS_IS_INTERRUPT(state)) { frl_slab_pfree(pipestate); apr_socket_close(ret_pfd[i].desc.s); } else { pipestate->state = FRL_PIPE_SEND_HEADER_START; pipestate->reader = (char*)&pipestate->header; pipestate->offset = 0; pipestate->size = SIZEOF_FRL_PIPE_HEADER_T; apr_pollset_add(pollset, &pfd); } } continue; } // to start read successor block pipestate->state = FRL_PIPE_READ_BLOCK_START; pipestate->reader = pipestate->buffer; pipestate->offset = 0; if (pipestate->data.size-pipestate->data.offset < SIZEOF_FRL_PIPE_BLOCK_BUFFER) pipestate->size = pipestate->data.size-pipestate->data.offset+SIZEOF_FRL_PIPE_HEADER_T; else pipestate->size = SOCKET_PACKAGE_SIZE; break; } default: break; } } else if (ret_pfd[i].rtnevents & APR_POLLOUT) { // send report information, basic header frl_pipe_state_t* pipestate = (frl_pipe_state_t*)ret_pfd[i].client_data; apr_size_t len_a = pipestate->size-pipestate->offset; state = apr_socket_send(ret_pfd[i].desc.s, pipestate->reader, &len_a); pipestate->offset += len_a; pipestate->reader += len_a; if ((pipestate->offset >= pipestate->size)||(APR_STATUS_IS_EAGAIN(state))) { pipestate->offset = pipestate->size; PIPE_STATE_TO_COMPLETE(pipestate->state); } else if ((APR_STATUS_IS_EOF(state))||(len_a == 0)) { apr_pollfd_t pfd = { pipe->sockpool, APR_POLL_SOCKET, APR_POLLOUT, 0, { NULL }, pipestate }; pfd.desc.s = ret_pfd[i].desc.s; apr_pollset_remove(pollset, &pfd); frl_slab_pfree(pipestate); apr_socket_close(ret_pfd[i].desc.s); continue; } switch (pipestate->state) { case FRL_PIPE_SEND_HEADER_COMPLETE: { // complete, return to listen state apr_pollfd_t pfd = { pipe->sockpool, APR_POLL_SOCKET, APR_POLLOUT, 0, { NULL }, pipestate }; pfd.desc.s = ret_pfd[i].desc.s; apr_pollset_remove(pollset, &pfd); pfd.reqevents = APR_POLLIN; pipestate->state = FRL_PIPE_DISABLED; pipestate->reader = 0; pipestate->offset = 0; pipestate->size = 0; apr_pollset_add(pollset, &pfd); break; } default: break; } } else { // other errors, close connection frl_pipe_state_t* pipestate = (frl_pipe_state_t*)ret_pfd[i].client_data; apr_pollfd_t pfd = { pipe->sockpool, APR_POLL_SOCKET, APR_POLLIN | APR_POLLOUT, 0, { NULL }, pipestate }; pfd.desc.s = ret_pfd[i].desc.s; apr_pollset_remove(pollset, &pfd); frl_slab_pfree(pipestate); apr_socket_close(ret_pfd[i].desc.s); } } } } else if (!APR_STATUS_IS_TIMEUP(pipe->recv_state)) {
/* CONNECT handler */ static int proxy_connect_handler(request_rec *r, proxy_worker *worker, proxy_server_conf *conf, char *url, const char *proxyname, apr_port_t proxyport) { apr_pool_t *p = r->pool; apr_socket_t *sock; apr_status_t err, rv; apr_size_t i, o, nbytes; char buffer[HUGE_STRING_LEN]; apr_socket_t *client_socket = ap_get_module_config(r->connection->conn_config, &core_module); int failed; apr_pollset_t *pollset; apr_pollfd_t pollfd; const apr_pollfd_t *signalled; apr_int32_t pollcnt, pi; apr_int16_t pollevent; apr_sockaddr_t *uri_addr, *connect_addr; apr_uri_t uri; const char *connectname; int connectport = 0; /* is this for us? */ if (r->method_number != M_CONNECT) { ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "proxy: CONNECT: declining URL %s", url); return DECLINED; } ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "proxy: CONNECT: serving URL %s", url); /* * Step One: Determine Who To Connect To * * Break up the URL to determine the host to connect to */ /* we break the URL into host, port, uri */ if (APR_SUCCESS != apr_uri_parse_hostinfo(p, url, &uri)) { return ap_proxyerror(r, HTTP_BAD_REQUEST, apr_pstrcat(p, "URI cannot be parsed: ", url, NULL)); } ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "proxy: CONNECT: connecting %s to %s:%d", url, uri.hostname, uri.port); /* do a DNS lookup for the destination host */ err = apr_sockaddr_info_get(&uri_addr, uri.hostname, APR_UNSPEC, uri.port, 0, p); /* are we connecting directly, or via a proxy? */ if (proxyname) { connectname = proxyname; connectport = proxyport; err = apr_sockaddr_info_get(&connect_addr, proxyname, APR_UNSPEC, proxyport, 0, p); } else { connectname = uri.hostname; connectport = uri.port; connect_addr = uri_addr; } ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "proxy: CONNECT: connecting to remote proxy %s on port %d", connectname, connectport); /* check if ProxyBlock directive on this host */ if (OK != ap_proxy_checkproxyblock(r, conf, uri_addr)) { return ap_proxyerror(r, HTTP_FORBIDDEN, "Connect to remote machine blocked"); } /* Check if it is an allowed port */ if (conf->allowed_connect_ports->nelts == 0) { /* Default setting if not overridden by AllowCONNECT */ switch (uri.port) { case APR_URI_HTTPS_DEFAULT_PORT: case APR_URI_SNEWS_DEFAULT_PORT: break; default: /* XXX can we call ap_proxyerror() here to get a nice log message? */ return HTTP_FORBIDDEN; } } else if(!allowed_port(conf, uri.port)) { /* XXX can we call ap_proxyerror() here to get a nice log message? */ return HTTP_FORBIDDEN; } /* * Step Two: Make the Connection * * We have determined who to connect to. Now make the connection. */ /* get all the possible IP addresses for the destname and loop through them * until we get a successful connection */ if (APR_SUCCESS != err) { return ap_proxyerror(r, HTTP_BAD_GATEWAY, apr_pstrcat(p, "DNS lookup failure for: ", connectname, NULL)); } /* * At this point we have a list of one or more IP addresses of * the machine to connect to. If configured, reorder this * list so that the "best candidate" is first try. "best * candidate" could mean the least loaded server, the fastest * responding server, whatever. * * For now we do nothing, ie we get DNS round robin. * XXX FIXME */ failed = ap_proxy_connect_to_backend(&sock, "CONNECT", connect_addr, connectname, conf, r->server, r->pool); /* handle a permanent error from the above loop */ if (failed) { if (proxyname) { return DECLINED; } else { return HTTP_BAD_GATEWAY; } } /* * Step Three: Send the Request * * Send the HTTP/1.1 CONNECT request to the remote server */ /* we are acting as a tunnel - the output filter stack should * be completely empty, because when we are done here we are done completely. * We add the NULL filter to the stack to do this... */ r->output_filters = NULL; r->connection->output_filters = NULL; /* If we are connecting through a remote proxy, we need to pass * the CONNECT request on to it. */ if (proxyport) { /* FIXME: Error checking ignored. */ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "proxy: CONNECT: sending the CONNECT request to the remote proxy"); nbytes = apr_snprintf(buffer, sizeof(buffer), "CONNECT %s HTTP/1.0" CRLF, r->uri); apr_socket_send(sock, buffer, &nbytes); nbytes = apr_snprintf(buffer, sizeof(buffer), "Proxy-agent: %s" CRLF CRLF, ap_get_server_banner()); apr_socket_send(sock, buffer, &nbytes); } else { ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "proxy: CONNECT: Returning 200 OK Status"); nbytes = apr_snprintf(buffer, sizeof(buffer), "HTTP/1.0 200 Connection Established" CRLF); ap_xlate_proto_to_ascii(buffer, nbytes); apr_socket_send(client_socket, buffer, &nbytes); nbytes = apr_snprintf(buffer, sizeof(buffer), "Proxy-agent: %s" CRLF CRLF, ap_get_server_banner()); ap_xlate_proto_to_ascii(buffer, nbytes); apr_socket_send(client_socket, buffer, &nbytes); #if 0 /* This is safer code, but it doesn't work yet. I'm leaving it * here so that I can fix it later. */ r->status = HTTP_OK; r->header_only = 1; apr_table_set(r->headers_out, "Proxy-agent: %s", ap_get_server_banner()); ap_rflush(r); #endif } ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "proxy: CONNECT: setting up poll()"); /* * Step Four: Handle Data Transfer * * Handle two way transfer of data over the socket (this is a tunnel). */ /* r->sent_bodyct = 1;*/ if ((rv = apr_pollset_create(&pollset, 2, r->pool, 0)) != APR_SUCCESS) { apr_socket_close(sock); ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, "proxy: CONNECT: error apr_pollset_create()"); return HTTP_INTERNAL_SERVER_ERROR; } /* Add client side to the poll */ pollfd.p = r->pool; pollfd.desc_type = APR_POLL_SOCKET; pollfd.reqevents = APR_POLLIN; pollfd.desc.s = client_socket; pollfd.client_data = NULL; apr_pollset_add(pollset, &pollfd); /* Add the server side to the poll */ pollfd.desc.s = sock; apr_pollset_add(pollset, &pollfd); while (1) { /* Infinite loop until error (one side closes the connection) */ if ((rv = apr_pollset_poll(pollset, -1, &pollcnt, &signalled)) != APR_SUCCESS) { apr_socket_close(sock); ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, "proxy: CONNECT: error apr_poll()"); return HTTP_INTERNAL_SERVER_ERROR; } #ifdef DEBUGGING ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "proxy: CONNECT: woke from select(), i=%d", pollcnt); #endif for (pi = 0; pi < pollcnt; pi++) { const apr_pollfd_t *cur = &signalled[pi]; if (cur->desc.s == sock) { pollevent = cur->rtnevents; if (pollevent & APR_POLLIN) { #ifdef DEBUGGING ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "proxy: CONNECT: sock was set"); #endif nbytes = sizeof(buffer); rv = apr_socket_recv(sock, buffer, &nbytes); if (rv == APR_SUCCESS) { o = 0; i = nbytes; while(i > 0) { nbytes = i; /* This is just plain wrong. No module should ever write directly * to the client. For now, this works, but this is high on my list of * things to fix. The correct line is: * if ((nbytes = ap_rwrite(buffer + o, nbytes, r)) < 0) * rbb */ rv = apr_socket_send(client_socket, buffer + o, &nbytes); if (rv != APR_SUCCESS) break; o += nbytes; i -= nbytes; } } else break; } else if ((pollevent & APR_POLLERR) || (pollevent & APR_POLLHUP)) break; } else if (cur->desc.s == client_socket) { pollevent = cur->rtnevents; if (pollevent & APR_POLLIN) { #ifdef DEBUGGING ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "proxy: CONNECT: client was set"); #endif nbytes = sizeof(buffer); rv = apr_socket_recv(client_socket, buffer, &nbytes); if (rv == APR_SUCCESS) { o = 0; i = nbytes; #ifdef DEBUGGING ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "proxy: CONNECT: read %d from client", i); #endif while(i > 0) { nbytes = i; rv = apr_socket_send(sock, buffer + o, &nbytes); if (rv != APR_SUCCESS) break; o += nbytes; i -= nbytes; } } else break; } else if ((pollevent & APR_POLLERR) || (pollevent & APR_POLLHUP)) { rv = APR_EOF; break; } } else break; } if (rv != APR_SUCCESS) { break; } } ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "proxy: CONNECT: finished with poll() - cleaning up"); /* * Step Five: Clean Up * * Close the socket and clean up */ apr_socket_close(sock); return OK; }
//timeout is in microseconds void LLPumpIO::pump(const S32& poll_timeout) { LLMemType m1(LLMemType::MTYPE_IO_PUMP); LLFastTimer t1(LLFastTimer::FTM_PUMP); //llinfos << "LLPumpIO::pump()" << llendl; // Run any pending runners. mRunner.run(); // We need to move all of the pending heads over to the running // chains. PUMP_DEBUG; if(true) { #if LL_THREADS_APR LLScopedLock lock(mChainsMutex); #endif // bail if this pump is paused. if(PAUSING == mState) { mState = PAUSED; } if(PAUSED == mState) { return; } PUMP_DEBUG; // Move the pending chains over to the running chaings if(!mPendingChains.empty()) { PUMP_DEBUG; //lldebugs << "Pushing " << mPendingChains.size() << "." << llendl; std::copy( mPendingChains.begin(), mPendingChains.end(), std::back_insert_iterator<running_chains_t>(mRunningChains)); mPendingChains.clear(); PUMP_DEBUG; } // Clear any locks. This needs to be done here so that we do // not clash during a call to clearLock(). if(!mClearLocks.empty()) { PUMP_DEBUG; running_chains_t::iterator it = mRunningChains.begin(); running_chains_t::iterator end = mRunningChains.end(); std::set<S32>::iterator not_cleared = mClearLocks.end(); for(; it != end; ++it) { if((*it).mLock && mClearLocks.find((*it).mLock) != not_cleared) { (*it).mLock = 0; } } PUMP_DEBUG; mClearLocks.clear(); } } PUMP_DEBUG; // rebuild the pollset if necessary if(mRebuildPollset) { PUMP_DEBUG; rebuildPollset(); mRebuildPollset = false; } // Poll based on the last known pollset // *TODO: may want to pass in a poll timeout so it works correctly // in single and multi threaded processes. PUMP_DEBUG; typedef std::map<S32, S32> signal_client_t; signal_client_t signalled_client; const apr_pollfd_t* poll_fd = NULL; if(mPollset) { PUMP_DEBUG; //llinfos << "polling" << llendl; S32 count = 0; S32 client_id = 0; { LLPerfBlock polltime("pump_poll"); apr_pollset_poll(mPollset, poll_timeout, &count, &poll_fd); } PUMP_DEBUG; for(S32 ii = 0; ii < count; ++ii) { ll_debug_poll_fd("Signalled pipe", &poll_fd[ii]); client_id = *((S32*)poll_fd[ii].client_data); signalled_client[client_id] = ii; } PUMP_DEBUG; } PUMP_DEBUG; // set up for a check to see if each one was signalled signal_client_t::iterator not_signalled = signalled_client.end(); // Process everything as appropriate //lldebugs << "Running chain count: " << mRunningChains.size() << llendl; running_chains_t::iterator run_chain = mRunningChains.begin(); bool process_this_chain = false; for(; run_chain != mRunningChains.end(); ) { PUMP_DEBUG; if((*run_chain).mInit && (*run_chain).mTimer.getStarted() && (*run_chain).mTimer.hasExpired()) { PUMP_DEBUG; if(handleChainError(*run_chain, LLIOPipe::STATUS_EXPIRED)) { // the pipe probably handled the error. If the handler // forgot to reset the expiration then we need to do // that here. if((*run_chain).mTimer.getStarted() && (*run_chain).mTimer.hasExpired()) { PUMP_DEBUG; llinfos << "Error handler forgot to reset timeout. " << "Resetting to " << DEFAULT_CHAIN_EXPIRY_SECS << " seconds." << llendl; (*run_chain).setTimeoutSeconds(DEFAULT_CHAIN_EXPIRY_SECS); } } else { PUMP_DEBUG; // it timed out and no one handled it, so we need to // retire the chain #if LL_DEBUG_PIPE_TYPE_IN_PUMP lldebugs << "Removing chain " << (*run_chain).mChainLinks[0].mPipe << " '" << typeid(*((*run_chain).mChainLinks[0].mPipe)).name() << "' because it timed out." << llendl; #else // lldebugs << "Removing chain " // << (*run_chain).mChainLinks[0].mPipe // << " because we reached the end." << llendl; #endif run_chain = mRunningChains.erase(run_chain); continue; } } PUMP_DEBUG; if((*run_chain).mLock) { ++run_chain; continue; } PUMP_DEBUG; mCurrentChain = run_chain; if((*run_chain).mDescriptors.empty()) { // if there are no conditionals, just process this chain. process_this_chain = true; //lldebugs << "no conditionals - processing" << llendl; } else { PUMP_DEBUG; //lldebugs << "checking conditionals" << llendl; // Check if this run chain was signalled. If any file // descriptor is ready for something, then go ahead and // process this chian. process_this_chain = false; if(!signalled_client.empty()) { PUMP_DEBUG; LLChainInfo::conditionals_t::iterator it; it = (*run_chain).mDescriptors.begin(); LLChainInfo::conditionals_t::iterator end; end = (*run_chain).mDescriptors.end(); S32 client_id = 0; signal_client_t::iterator signal; for(; it != end; ++it) { PUMP_DEBUG; client_id = *((S32*)((*it).second.client_data)); signal = signalled_client.find(client_id); if (signal == not_signalled) continue; static const apr_int16_t POLL_CHAIN_ERROR = APR_POLLHUP | APR_POLLNVAL | APR_POLLERR; const apr_pollfd_t* poll = &(poll_fd[(*signal).second]); if(poll->rtnevents & POLL_CHAIN_ERROR) { // Potential eror condition has been // returned. If HUP was one of them, we pass // that as the error even though there may be // more. If there are in fact more errors, // we'll just wait for that detection until // the next pump() cycle to catch it so that // the logic here gets no more strained than // it already is. LLIOPipe::EStatus error_status; if(poll->rtnevents & APR_POLLHUP) error_status = LLIOPipe::STATUS_LOST_CONNECTION; else error_status = LLIOPipe::STATUS_ERROR; if(handleChainError(*run_chain, error_status)) break; ll_debug_poll_fd("Removing pipe", poll); llwarns << "Removing pipe " << (*run_chain).mChainLinks[0].mPipe << " '" #if LL_DEBUG_PIPE_TYPE_IN_PUMP << typeid( *((*run_chain).mChainLinks[0].mPipe)).name() #endif << "' because: " << events_2_string(poll->rtnevents) << llendl; (*run_chain).mHead = (*run_chain).mChainLinks.end(); break; } // at least 1 fd got signalled, and there were no // errors. That means we process this chain. process_this_chain = true; break; } } } if(process_this_chain) { PUMP_DEBUG; if(!((*run_chain).mInit)) { (*run_chain).mHead = (*run_chain).mChainLinks.begin(); (*run_chain).mInit = true; } PUMP_DEBUG; processChain(*run_chain); } PUMP_DEBUG; if((*run_chain).mHead == (*run_chain).mChainLinks.end()) { #if LL_DEBUG_PIPE_TYPE_IN_PUMP lldebugs << "Removing chain " << (*run_chain).mChainLinks[0].mPipe << " '" << typeid(*((*run_chain).mChainLinks[0].mPipe)).name() << "' because we reached the end." << llendl; #else // lldebugs << "Removing chain " << (*run_chain).mChainLinks[0].mPipe // << " because we reached the end." << llendl; #endif PUMP_DEBUG; // This chain is done. Clean up any allocated memory and // erase the chain info. std::for_each( (*run_chain).mDescriptors.begin(), (*run_chain).mDescriptors.end(), ll_delete_apr_pollset_fd_client_data()); run_chain = mRunningChains.erase(run_chain); // *NOTE: may not always need to rebuild the pollset. mRebuildPollset = true; } else { PUMP_DEBUG; // this chain needs more processing - just go to the next // chain. ++run_chain; } } PUMP_DEBUG; // null out the chain mCurrentChain = mRunningChains.end(); END_PUMP_DEBUG; }
void LLPluginProcessParent::poll(F64 timeout) { if(sPollsetNeedsRebuild || !sUseReadThread) { sPollsetNeedsRebuild = false; updatePollset(); } if(sPollSet) { apr_status_t status; apr_int32_t count; const apr_pollfd_t *descriptors; status = apr_pollset_poll(sPollSet, (apr_interval_time_t)(timeout * 1000000), &count, &descriptors); if(status == APR_SUCCESS) { // One or more of the descriptors signalled. Call them. for(int i = 0; i < count; i++) { LLPluginProcessParent *self = (LLPluginProcessParent *)(descriptors[i].client_data); // NOTE: the descriptor returned here is actually a COPY of the original (even though we create the pollset with APR_POLLSET_NOCOPY). // This means that even if the parent has set its mPollFD.client_data to NULL, the old pointer may still there in this descriptor. // It's even possible that the old pointer no longer points to a valid LLPluginProcessParent. // This means that we can't safely dereference the 'self' pointer here without some extra steps... if(self) { // Make sure this pointer is still in the instances list bool valid = false; { LLMutexLock lock(sInstancesMutex); for(std::list<LLPluginProcessParent*>::iterator iter = sInstances.begin(); iter != sInstances.end(); ++iter) { if(*iter == self) { // Lock the instance's mutex before unlocking the global mutex. // This avoids a possible race condition where the instance gets deleted between this check and the servicePoll() call. self->mIncomingQueueMutex.lock(); valid = true; break; } } } if(valid) { // The instance is still valid. // Pull incoming messages off the socket self->servicePoll(); self->mIncomingQueueMutex.unlock(); } else { LL_DEBUGS("PluginPoll") << "detected deleted instance " << self << LL_ENDL; } } } } else if(APR_STATUS_IS_TIMEUP(status)) { // timed out with no incoming data. Just return. } else if(status == EBADF) { // This happens when one of the file descriptors in the pollset is destroyed, which happens whenever a plugin's socket is closed. // The pollset has been or will be recreated, so just return. LL_DEBUGS("PluginPoll") << "apr_pollset_poll returned EBADF" << LL_ENDL; } else if(status != APR_SUCCESS) { LL_WARNS("PluginPoll") << "apr_pollset_poll failed with status " << status << LL_ENDL; } } }
//static void child_main(int child_num_arg) void body() { mpm_state = AP_MPMQ_STARTING; /* for benefit of any hooks that run as this * child initializes */ my_child_num = child_num_arg; ap_my_pid = getpid(); requests_this_child = 0; ap_fatal_signal_child_setup(ap_server_conf); /* Get a sub context for global allocations in this child, so that * we can have cleanups occur when the child exits. */ apr_allocator_create(allocator); //// removed deref apr_allocator_max_free_set(allocator, ap_max_mem_free); apr_pool_create_ex(pchild, pconf, NULL, allocator); //// removed deref apr_allocator_owner_set(allocator, pchild); apr_pool_create(ptrans, pchild); //// removed deref apr_pool_tag(ptrans, 65); // "transaction"); /* needs to be done before we switch UIDs so we have permissions */ ap_reopen_scoreboard(pchild, NULL, 0); status = apr_proc_mutex_child_init(accept_mutex, ap_lock_fname, pchild); //// removed deref if (status != APR_SUCCESS) { /* ap_log_error(APLOG_MARK, APLOG_EMERG, status, ap_server_conf, */ /* "Couldnt initialize crossprocess lock in child " */ /* "%s %d", ap_lock_fname, ap_accept_lock_mech); */ clean_child_exit(APEXIT_CHILDFATAL); } if (unixd_setup_child() > 0) { clean_child_exit(APEXIT_CHILDFATAL); } ap_run_child_init(pchild, ap_server_conf); ap_create_sb_handle(sbh, pchild, my_child_num, 0); //// removed deref ap_update_child_status(sbh, SERVER_READY, NULL); /* Set up the pollfd array */ /* ### check the status */ (void) apr_pollset_create(pollset, num_listensocks, pchild, 0); //// removed deref num_listensocks = nondet(); assume(num_listensocks>0); lr = ap_listeners; i = num_listensocks; while (1) { if ( i<=0 ) break; int pfd = 0; pfd_desc_type = APR_POLL_SOCKET; pfd_desc_s = 1; // lr->sd; pfd_reqevents = APR_POLLIN; pfd_client_data = lr; /* ### check the status */ (void) apr_pollset_add(pollset, pfd); //// removed deref i--; } mpm_state = AP_MPMQ_RUNNING; bucket_alloc = apr_bucket_alloc_create(pchild); while(1>0) { if (die_now>0) break; conn_rec *current_conn; void *csd; /* * (Re)initialize this child to a pre-connection state. */ apr_pool_clear(ptrans); if ((ap_max_requests_per_child > 0 && requests_this_child++ >= ap_max_requests_per_child)) { clean_child_exit(0); } (void) ap_update_child_status(sbh, SERVER_READY, NULL); /* * Wait for an acceptable connection to arrive. */ /* Lock around "accept", if necessary */ SAFE_ACCEPT(accept_mutex_on()); do_ACCEPT=1; do_ACCEPT=0; dummy = nondet(); if(dummy > 0) { /* goto loc_return; */ while(1>0) { int ddd; ddd=ddd; } } if (num_listensocks == 1) { /* There is only one listener record, so refer to that one. */ lr = ap_listeners; } else { /* multiple listening sockets - need to poll */ while(1) { int numdesc; const void *pdesc; /* timeout == -1 == wait forever */ status = apr_pollset_poll(pollset, -1, numdesc, pdesc); //// removed deref if (status != APR_SUCCESS) { if (APR_STATUS_IS_EINTR(status) > 0) { if (one_process>0 && shutdown_pending>0) { /* goto loc_return; */ while(1>0) { int ddd; ddd=ddd; } } goto loc_continueA; } /* Single Unix documents select as returning errnos * EBADF, EINTR, and EINVAL... and in none of those * cases does it make sense to continue. In fact * on Linux 2.0.x we seem to end up with EFAULT * occasionally, and we'd loop forever due to it. */ /* ap_log_error5(APLOG_MARK, APLOG_ERR, status, */ /* ap_server_conf, "apr_pollset_poll: (listen)"); */ clean_child_exit(1); } /* We can always use pdesc[0], but sockets at position N * could end up completely starved of attention in a very * busy server. Therefore, we round-robin across the * returned set of descriptors. While it is possible that * the returned set of descriptors might flip around and * continue to starve some sockets, we happen to know the * internal pollset implementation retains ordering * stability of the sockets. Thus, the round-robin should * ensure that a socket will eventually be serviced. */ if (last_poll_idx >= numdesc) last_poll_idx = 0; /* Grab a listener record from the client_data of the poll * descriptor, and advance our saved index to round-robin * the next fetch. * * ### hmm... this descriptor might have POLLERR rather * ### than POLLIN */ lr = 1; //pdesc[last_poll_idx++].client_data; break; loc_continueA: {int yyy2; yyy2=yyy2; } } } /* if we accept() something we don't want to die, so we have to * defer the exit */ status = nondet(); // lr->accept_func(&csd, lr, ptrans); SAFE_ACCEPT(accept_mutex_off()); /* unlock after "accept" */ if (status == APR_EGENERAL) { /* resource shortage or should-not-occur occured */ clean_child_exit(1); } else if (status != APR_SUCCESS) { goto loc_continueB; } /* * We now have a connection, so set it up with the appropriate * socket options, file descriptors, and read/write buffers. */ current_conn = ap_run_create_connection(ptrans, ap_server_conf, csd, my_child_num, sbh, bucket_alloc); if (current_conn > 0) { ap_process_connection(current_conn, csd); ap_lingering_close(current_conn); } /* Check the pod and the generation number after processing a * connection so that we'll go away if a graceful restart occurred * while we were processing the connection or we are the lucky * idle server process that gets to die. */ dummy = nondet(); if (ap_mpm_pod_check(pod) == APR_SUCCESS) { /* selected as idle? */ die_now = 1; } else if (ap_my_generation != dummy) { //ap_scoreboard_image->global->running_generation) { /* restart? */ /* yeah, this could be non-graceful restart, in which case the * parent will kill us soon enough, but why bother checking? */ die_now = 1; } loc_continueB: { int uuu; uuu=uuu; } } clean_child_exit(0); /* loc_return: */ while(1>0) { int ddd; ddd=ddd; } }
static void justsleep(abts_case *tc, void *data) { apr_int32_t nsds; const apr_pollfd_t *hot_files; apr_pollset_t *pollset; apr_status_t rv; apr_time_t t1, t2; int i; apr_pollset_method_e methods[] = { APR_POLLSET_DEFAULT, APR_POLLSET_SELECT, APR_POLLSET_KQUEUE, APR_POLLSET_PORT, APR_POLLSET_EPOLL, APR_POLLSET_POLL}; nsds = 1; t1 = apr_time_now(); rv = apr_poll(NULL, 0, &nsds, apr_time_from_msec(200)); t2 = apr_time_now(); ABTS_INT_EQUAL(tc, 1, APR_STATUS_IS_TIMEUP(rv)); ABTS_INT_EQUAL(tc, 0, nsds); ABTS_ASSERT(tc, "apr_poll() didn't sleep", (t2 - t1) > apr_time_from_msec(100)); for (i = 0; i < sizeof methods / sizeof methods[0]; i++) { rv = apr_pollset_create_ex(&pollset, 5, p, 0, methods[i]); if (rv != APR_ENOTIMPL) { ABTS_INT_EQUAL(tc, APR_SUCCESS, rv); nsds = 1; t1 = apr_time_now(); rv = apr_pollset_poll(pollset, apr_time_from_msec(200), &nsds, &hot_files); t2 = apr_time_now(); ABTS_INT_EQUAL(tc, 1, APR_STATUS_IS_TIMEUP(rv)); ABTS_INT_EQUAL(tc, 0, nsds); ABTS_ASSERT(tc, "apr_pollset_poll() didn't sleep", (t2 - t1) > apr_time_from_msec(100)); rv = apr_pollset_destroy(pollset); ABTS_INT_EQUAL(tc, APR_SUCCESS, rv); } rv = apr_pollcb_create_ex(&pollcb, 5, p, 0, methods[0]); if (rv != APR_ENOTIMPL) { ABTS_INT_EQUAL(tc, APR_SUCCESS, rv); t1 = apr_time_now(); rv = apr_pollcb_poll(pollcb, apr_time_from_msec(200), NULL, NULL); t2 = apr_time_now(); ABTS_INT_EQUAL(tc, 1, APR_STATUS_IS_TIMEUP(rv)); ABTS_ASSERT(tc, "apr_pollcb_poll() didn't sleep", (t2 - t1) > apr_time_from_msec(100)); /* no apr_pollcb_destroy() */ } } }
static void pollset_remove(abts_case *tc, void *data) { apr_status_t rv; apr_pollset_t *pollset; const apr_pollfd_t *hot_files; apr_pollfd_t pfd; apr_int32_t num; rv = apr_pollset_create(&pollset, 5, p, 0); ABTS_INT_EQUAL(tc, APR_SUCCESS, rv); pfd.p = p; pfd.desc_type = APR_POLL_SOCKET; pfd.reqevents = APR_POLLOUT; pfd.desc.s = s[0]; pfd.client_data = (void *)1; rv = apr_pollset_add(pollset, &pfd); ABTS_INT_EQUAL(tc, APR_SUCCESS, rv); pfd.desc.s = s[1]; pfd.client_data = (void *)2; rv = apr_pollset_add(pollset, &pfd); ABTS_INT_EQUAL(tc, APR_SUCCESS, rv); pfd.desc.s = s[2]; pfd.client_data = (void *)3; rv = apr_pollset_add(pollset, &pfd); ABTS_INT_EQUAL(tc, APR_SUCCESS, rv); pfd.desc.s = s[3]; pfd.client_data = (void *)4; rv = apr_pollset_add(pollset, &pfd); ABTS_INT_EQUAL(tc, APR_SUCCESS, rv); rv = apr_pollset_poll(pollset, 1000, &num, &hot_files); ABTS_INT_EQUAL(tc, APR_SUCCESS, rv); ABTS_INT_EQUAL(tc, 4, num); /* now remove the pollset element referring to desc s[1] */ pfd.desc.s = s[1]; pfd.client_data = (void *)999; /* not used on this call */ rv = apr_pollset_remove(pollset, &pfd); ABTS_INT_EQUAL(tc, APR_SUCCESS, rv); /* this time only three should match */ rv = apr_pollset_poll(pollset, 1000, &num, &hot_files); ABTS_INT_EQUAL(tc, APR_SUCCESS, rv); ABTS_INT_EQUAL(tc, 3, num); ABTS_PTR_EQUAL(tc, (void *)1, hot_files[0].client_data); ABTS_PTR_EQUAL(tc, s[0], hot_files[0].desc.s); ABTS_PTR_EQUAL(tc, (void *)3, hot_files[1].client_data); ABTS_PTR_EQUAL(tc, s[2], hot_files[1].desc.s); ABTS_PTR_EQUAL(tc, (void *)4, hot_files[2].client_data); ABTS_PTR_EQUAL(tc, s[3], hot_files[2].desc.s); /* now remove the pollset elements referring to desc s[2] */ pfd.desc.s = s[2]; pfd.client_data = (void *)999; /* not used on this call */ rv = apr_pollset_remove(pollset, &pfd); ABTS_INT_EQUAL(tc, APR_SUCCESS, rv); /* this time only two should match */ rv = apr_pollset_poll(pollset, 1000, &num, &hot_files); ABTS_INT_EQUAL(tc, APR_SUCCESS, rv); ABTS_INT_EQUAL(tc, 2, num); ABTS_ASSERT(tc, "Incorrect socket in result set", ((hot_files[0].desc.s == s[0]) && (hot_files[1].desc.s == s[3])) || ((hot_files[0].desc.s == s[3]) && (hot_files[1].desc.s == s[0]))); ABTS_ASSERT(tc, "Incorrect client data in result set", ((hot_files[0].client_data == (void *)1) && (hot_files[1].client_data == (void *)4)) || ((hot_files[0].client_data == (void *)4) && (hot_files[1].client_data == (void *)1))); }