static apr_status_t send_brigade_blocking(apr_socket_t *s, apr_bucket_brigade *bb, apr_size_t *bytes_written, conn_rec *c) { apr_status_t rv; rv = APR_SUCCESS; while (!APR_BRIGADE_EMPTY(bb)) { rv = send_brigade_nonblocking(s, bb, bytes_written, c); if (rv != APR_SUCCESS) { if (APR_STATUS_IS_EAGAIN(rv)) { /* Wait until we can send more data */ apr_int32_t nsds; apr_interval_time_t timeout; apr_pollfd_t pollset; pollset.p = c->pool; pollset.desc_type = APR_POLL_SOCKET; pollset.reqevents = APR_POLLOUT; pollset.desc.s = s; apr_socket_timeout_get(s, &timeout); rv = apr_poll(&pollset, 1, &nsds, timeout); if (rv != APR_SUCCESS) { break; } } else { break; } } } return rv; }
static apr_status_t pass_data_to_filter(ap_filter_t *f, const char *data, apr_size_t len) { ef_ctx_t *ctx = f->ctx; ef_dir_t *dc = ctx->dc; apr_status_t rv; apr_size_t bytes_written = 0; apr_size_t tmplen; do { tmplen = len - bytes_written; rv = apr_file_write(ctx->proc->in, (const char *)data + bytes_written, &tmplen); bytes_written += tmplen; if (rv != APR_SUCCESS && !APR_STATUS_IS_EAGAIN(rv)) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, f->r, "apr_file_write(child input), len %" APR_SIZE_T_FMT, tmplen); return rv; } if (APR_STATUS_IS_EAGAIN(rv)) { /* XXX handle blocking conditions here... if we block, we need * to read data from the child process and pass it down to the * next filter! */ rv = drain_available_output(f); if (APR_STATUS_IS_EAGAIN(rv)) { #if APR_FILES_AS_SOCKETS int num_events; rv = apr_poll(ctx->pollset, 2, &num_events, f->r->server->timeout); if (rv || dc->debug >= DBGLVL_GORY) { ap_log_rerror(APLOG_MARK, APLOG_DEBUG, rv, f->r, "apr_poll()"); } if (rv != APR_SUCCESS && !APR_STATUS_IS_EINTR(rv)) { /* some error such as APR_TIMEUP */ return rv; } #else /* APR_FILES_AS_SOCKETS */ /* Yuck... I'd really like to wait until I can read * or write, but instead I have to sleep and try again */ apr_sleep(100000); /* 100 milliseconds */ if (dc->debug >= DBGLVL_GORY) { ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, f->r, "apr_sleep()"); } #endif /* APR_FILES_AS_SOCKETS */ } else if (rv != APR_SUCCESS) { return rv; } } } while (bytes_written < len); return rv; }
/* Returns TRUE if PFD has pending data, FALSE otherwise. */ static svn_boolean_t pending(apr_pollfd_t *pfd, apr_pool_t *pool) { apr_status_t status; int n; pfd->p = pool; pfd->reqevents = APR_POLLIN; status = apr_poll(pfd, 1, &n, 0); return (status == APR_SUCCESS && n); }
APR_DECLARE(apr_status_t) apr_socket_atreadeof(apr_socket_t *sock, int *atreadeof) { apr_pollfd_t pfds[1]; apr_status_t rv; apr_int32_t nfds; /* The purpose here is to return APR_SUCCESS only in cases in * which it can be unambiguously determined whether or not the * socket will return EOF on next read. In case of an unexpected * error, return that. */ pfds[0].reqevents = APR_POLLIN; pfds[0].desc_type = APR_POLL_SOCKET; pfds[0].desc.s = sock; do { rv = apr_poll(&pfds[0], 1, &nfds, 0); } while (APR_STATUS_IS_EINTR(rv)); if (APR_STATUS_IS_TIMEUP(rv)) { /* Read buffer empty -> subsequent reads would block, so, * definitely not at EOF. */ *atreadeof = 0; return APR_SUCCESS; } else if (rv) { /* Some other error -> unexpected error. */ return rv; } else if (nfds == 1 && pfds[0].rtnevents == APR_POLLIN) { apr_sockaddr_t unused; apr_size_t len = 1; char buf; /* The socket is readable - peek to see whether it returns EOF * without consuming bytes from the socket buffer. */ rv = apr_socket_recvfrom(&unused, sock, MSG_PEEK, &buf, &len); if (rv == APR_EOF) { *atreadeof = 1; return APR_SUCCESS; } else if (rv) { /* Read error -> unexpected error. */ return rv; } else { *atreadeof = 0; return APR_SUCCESS; } } /* Should not fall through here. */ return APR_EGENERAL; }
static void nomessage(CuTest *tc) { apr_status_t rv; int srv = SMALL_NUM_SOCKETS; rv = apr_poll(pollarray, SMALL_NUM_SOCKETS, &srv, 2 * APR_USEC_PER_SEC); CuAssertIntEquals(tc, 1, APR_STATUS_IS_TIMEUP(rv)); check_sockets(pollarray, s, 0, 0, tc); check_sockets(pollarray, s, 1, 0, tc); check_sockets(pollarray, s, 2, 0, tc); }
static void nomessage(abts_case *tc, void *data) { apr_status_t rv; int srv = SMALL_NUM_SOCKETS; rv = apr_poll(pollarray, SMALL_NUM_SOCKETS, &srv, 2 * APR_USEC_PER_SEC); ABTS_INT_EQUAL(tc, 1, APR_STATUS_IS_TIMEUP(rv)); check_sockets(pollarray, s, 0, 0, tc); check_sockets(pollarray, s, 1, 0, tc); check_sockets(pollarray, s, 2, 0, tc); }
static void send_2_signaled_1(CuTest *tc) { apr_status_t rv; int srv = SMALL_NUM_SOCKETS; send_msg(s, sa, 2, tc); rv = apr_poll(pollarray, SMALL_NUM_SOCKETS, &srv, 2 * APR_USEC_PER_SEC); CuAssertIntEquals(tc, APR_SUCCESS, rv); check_sockets(pollarray, s, 0, 0, tc); check_sockets(pollarray, s, 1, 1, tc); check_sockets(pollarray, s, 2, 1, tc); }
static void send_2_signaled_1(abts_case *tc, void *data) { apr_status_t rv; int srv = SMALL_NUM_SOCKETS; send_msg(s, sa, 2, tc); rv = apr_poll(pollarray, SMALL_NUM_SOCKETS, &srv, 2 * APR_USEC_PER_SEC); ABTS_INT_EQUAL(tc, APR_SUCCESS, rv); check_sockets(pollarray, s, 0, 0, tc); check_sockets(pollarray, s, 1, 1, tc); check_sockets(pollarray, s, 2, 1, tc); }
static void clear_all_signalled(abts_case *tc, void *data) { apr_status_t rv; int srv = SMALL_NUM_SOCKETS; recv_msg(s, 0, p, tc); recv_msg(s, 2, p, tc); rv = apr_poll(pollarray, SMALL_NUM_SOCKETS, &srv, 2 * APR_USEC_PER_SEC); ABTS_INT_EQUAL(tc, 1, APR_STATUS_IS_TIMEUP(rv)); check_sockets(pollarray, s, 0, 0, tc); check_sockets(pollarray, s, 1, 0, tc); check_sockets(pollarray, s, 2, 0, tc); }
static void clear_all_signalled(CuTest *tc) { apr_status_t rv; int srv = SMALL_NUM_SOCKETS; recv_msg(s, 0, p, tc); recv_msg(s, 2, p, tc); rv = apr_poll(pollarray, SMALL_NUM_SOCKETS, &srv, 2 * APR_USEC_PER_SEC); CuAssertIntEquals(tc, 1, APR_STATUS_IS_TIMEUP(rv)); check_sockets(pollarray, s, 0, 0, tc); check_sockets(pollarray, s, 1, 0, tc); check_sockets(pollarray, s, 2, 0, tc); }
SWITCH_DECLARE(switch_status_t) switch_poll(switch_pollfd_t *aprset, int32_t numsock, int32_t *nsds, switch_interval_time_t timeout) { apr_status_t st = SWITCH_STATUS_FALSE; if (aprset) { st = apr_poll((apr_pollfd_t *) aprset, numsock, nsds, timeout); if (st == APR_TIMEUP) { st = SWITCH_STATUS_TIMEOUT; } } return st; }
static void recv_large_pollarray(CuTest *tc) { apr_status_t rv; int lrv = LARGE_NUM_SOCKETS; int i; recv_msg(s, LARGE_NUM_SOCKETS - 1, p, tc); rv = apr_poll(pollarray_large, LARGE_NUM_SOCKETS, &lrv, 2 * APR_USEC_PER_SEC); CuAssertIntEquals(tc, 1, APR_STATUS_IS_TIMEUP(rv)); for (i = 0; i < LARGE_NUM_SOCKETS; i++) { check_sockets(pollarray_large, s, i, 0, tc); } }
static void recv_large_pollarray(abts_case *tc, void *data) { apr_status_t rv; int lrv = LARGE_NUM_SOCKETS; int i; recv_msg(s, LARGE_NUM_SOCKETS - 1, p, tc); rv = apr_poll(pollarray_large, LARGE_NUM_SOCKETS, &lrv, 2 * APR_USEC_PER_SEC); ABTS_INT_EQUAL(tc, 1, APR_STATUS_IS_TIMEUP(rv)); for (i = 0; i < LARGE_NUM_SOCKETS; i++) { check_sockets(pollarray_large, s, i, 0, tc); } }
static void send_large_pollarray(abts_case *tc, void *data) { apr_status_t rv; int lrv = LARGE_NUM_SOCKETS; int i; send_msg(s, sa, LARGE_NUM_SOCKETS - 1, tc); rv = apr_poll(pollarray_large, LARGE_NUM_SOCKETS, &lrv, 2 * APR_USEC_PER_SEC); ABTS_INT_EQUAL(tc, APR_SUCCESS, rv); for (i = 0; i < LARGE_NUM_SOCKETS; i++) { if (i == (LARGE_NUM_SOCKETS - 1)) { check_sockets(pollarray_large, s, i, 1, tc); } else { check_sockets(pollarray_large, s, i, 0, tc); } } }
static void send_large_pollarray(CuTest *tc) { apr_status_t rv; int lrv = LARGE_NUM_SOCKETS; int i; send_msg(s, sa, LARGE_NUM_SOCKETS - 1, tc); rv = apr_poll(pollarray_large, LARGE_NUM_SOCKETS, &lrv, 2 * APR_USEC_PER_SEC); CuAssertIntEquals(tc, APR_SUCCESS, rv); for (i = 0; i < LARGE_NUM_SOCKETS; i++) { if (i == (LARGE_NUM_SOCKETS - 1)) { check_sockets(pollarray_large, s, i, 1, tc); } else { check_sockets(pollarray_large, s, i, 0, tc); } } }
void ap_mpm_child_main(apr_pool_t *pconf) { ap_listen_rec *lr = NULL; ap_listen_rec *first_lr = NULL; int requests_this_child = 0; apr_socket_t *sd = ap_listeners->sd; int nsds, rv = 0; unsigned long ulTimes; int my_pid = getpid(); ULONG rc, c; HQUEUE workq; apr_pollfd_t *pollset; int num_listeners; TID server_maint_tid; void *sb_mem; /* Stop Ctrl-C/Ctrl-Break signals going to child processes */ DosSetSignalExceptionFocus(0, &ulTimes); set_signals(); /* Create pool for child */ apr_pool_create(&pchild, pconf); ap_run_child_init(pchild, ap_server_conf); /* Create an event semaphore used to trigger other threads to shutdown */ rc = DosCreateEventSem(NULL, &shutdown_event, 0, FALSE); if (rc) { ap_log_error(APLOG_MARK, APLOG_ERR, APR_FROM_OS_ERROR(rc), ap_server_conf, "unable to create shutdown semaphore, exiting"); clean_child_exit(APEXIT_CHILDFATAL); } /* Gain access to the scoreboard. */ rc = DosGetNamedSharedMem(&sb_mem, ap_scoreboard_fname, PAG_READ|PAG_WRITE); if (rc) { ap_log_error(APLOG_MARK, APLOG_ERR, APR_FROM_OS_ERROR(rc), ap_server_conf, "scoreboard not readable in child, exiting"); clean_child_exit(APEXIT_CHILDFATAL); } ap_calc_scoreboard_size(); ap_init_scoreboard(sb_mem); /* Gain access to the accpet mutex */ rc = DosOpenMutexSem(NULL, &ap_mpm_accept_mutex); if (rc) { ap_log_error(APLOG_MARK, APLOG_ERR, APR_FROM_OS_ERROR(rc), ap_server_conf, "accept mutex couldn't be accessed in child, exiting"); clean_child_exit(APEXIT_CHILDFATAL); } /* Find our pid in the scoreboard so we know what slot our parent allocated us */ for (child_slot = 0; ap_scoreboard_image->parent[child_slot].pid != my_pid && child_slot < HARD_SERVER_LIMIT; child_slot++); if (child_slot == HARD_SERVER_LIMIT) { ap_log_error(APLOG_MARK, APLOG_ERR, 0, ap_server_conf, "child pid not found in scoreboard, exiting"); clean_child_exit(APEXIT_CHILDFATAL); } ap_my_generation = ap_scoreboard_image->parent[child_slot].generation; memset(ap_scoreboard_image->servers[child_slot], 0, sizeof(worker_score) * HARD_THREAD_LIMIT); /* Set up an OS/2 queue for passing connections & termination requests * to worker threads */ rc = DosCreateQueue(&workq, QUE_FIFO, apr_psprintf(pchild, "/queues/httpd/work.%d", my_pid)); if (rc) { ap_log_error(APLOG_MARK, APLOG_ERR, APR_FROM_OS_ERROR(rc), ap_server_conf, "unable to create work queue, exiting"); clean_child_exit(APEXIT_CHILDFATAL); } /* Create initial pool of worker threads */ for (c = 0; c < ap_min_spare_threads; c++) { // ap_scoreboard_image->servers[child_slot][c].tid = _beginthread(worker_main, NULL, 128*1024, (void *)c); } /* Start maintenance thread */ server_maint_tid = _beginthread(server_maintenance, NULL, 32768, NULL); /* Set up poll */ for (num_listeners = 0, lr = ap_listeners; lr; lr = lr->next) { num_listeners++; } apr_poll_setup(&pollset, num_listeners, pchild); for (lr = ap_listeners; lr; lr = lr->next) { apr_poll_socket_add(pollset, lr->sd, APR_POLLIN); } /* Main connection accept loop */ do { apr_pool_t *pconn; worker_args_t *worker_args; apr_pool_create(&pconn, pchild); worker_args = apr_palloc(pconn, sizeof(worker_args_t)); worker_args->pconn = pconn; if (num_listeners == 1) { rv = apr_accept(&worker_args->conn_sd, ap_listeners->sd, pconn); } else { rc = DosRequestMutexSem(ap_mpm_accept_mutex, SEM_INDEFINITE_WAIT); if (shutdown_pending) { DosReleaseMutexSem(ap_mpm_accept_mutex); break; } rv = APR_FROM_OS_ERROR(rc); if (rv == APR_SUCCESS) { rv = apr_poll(pollset, num_listeners, &nsds, -1); DosReleaseMutexSem(ap_mpm_accept_mutex); } if (rv == APR_SUCCESS) { if (first_lr == NULL) { first_lr = ap_listeners; } lr = first_lr; do { apr_int16_t event; apr_poll_revents_get(&event, lr->sd, pollset); if (event == APR_POLLIN) { apr_sockaddr_t *sa; apr_port_t port; apr_socket_addr_get(&sa, APR_LOCAL, lr->sd); apr_sockaddr_port_get(&port, sa); first_lr = lr->next; break; } lr = lr->next; if (!lr) { lr = ap_listeners; } } while (lr != first_lr); if (lr == first_lr) { continue; } sd = lr->sd; rv = apr_accept(&worker_args->conn_sd, sd, pconn); } } if (rv != APR_SUCCESS) { if (!APR_STATUS_IS_EINTR(rv)) { ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf, "apr_accept"); clean_child_exit(APEXIT_CHILDFATAL); } } else { DosWriteQueue(workq, WORKTYPE_CONN, sizeof(worker_args_t), worker_args, 0); requests_this_child++; } if (ap_max_requests_per_child != 0 && requests_this_child >= ap_max_requests_per_child) break; } while (!shutdown_pending && ap_my_generation == ap_scoreboard_image->global->running_generation); ap_scoreboard_image->parent[child_slot].quiescing = 1; DosPostEventSem(shutdown_event); DosWaitThread(&server_maint_tid, DCWW_WAIT); if (is_graceful) { char someleft; /* tell our worker threads to exit */ for (c=0; c<HARD_THREAD_LIMIT; c++) { if (ap_scoreboard_image->servers[child_slot][c].status != SERVER_DEAD) { DosWriteQueue(workq, WORKTYPE_EXIT, 0, NULL, 0); } } do { someleft = 0; for (c=0; c<HARD_THREAD_LIMIT; c++) { if (ap_scoreboard_image->servers[child_slot][c].status != SERVER_DEAD) { someleft = 1; DosSleep(1000); break; } } } while (someleft); } else { DosPurgeQueue(workq); for (c=0; c<HARD_THREAD_LIMIT; c++) { if (ap_scoreboard_image->servers[child_slot][c].status != SERVER_DEAD) { DosKillThread(ap_scoreboard_image->servers[child_slot][c].tid); } } } apr_pool_destroy(pchild); }
static void test_get_addr(abts_case *tc, void *data) { apr_status_t rv; apr_socket_t *ld, *sd, *cd; apr_sockaddr_t *sa, *ca; char a[128], b[128]; ld = setup_socket(tc); APR_ASSERT_SUCCESS(tc, "get local address of bound socket", apr_socket_addr_get(&sa, APR_LOCAL, ld)); rv = apr_socket_create(&cd, sa->family, SOCK_STREAM, APR_PROTO_TCP, p); APR_ASSERT_SUCCESS(tc, "create client socket", rv); APR_ASSERT_SUCCESS(tc, "enable non-block mode", apr_socket_opt_set(cd, APR_SO_NONBLOCK, 1)); /* It is valid for a connect() on a socket with NONBLOCK set to * succeed (if the connection can be established synchronously), * but if it does, this test cannot proceed. */ rv = apr_socket_connect(cd, sa); if (rv == APR_SUCCESS) { apr_socket_close(ld); apr_socket_close(cd); ABTS_NOT_IMPL(tc, "Cannot test if connect completes " "synchronously"); return; } if (!APR_STATUS_IS_EINPROGRESS(rv)) { apr_socket_close(ld); apr_socket_close(cd); APR_ASSERT_SUCCESS(tc, "connect to listener", rv); return; } APR_ASSERT_SUCCESS(tc, "accept connection", apr_socket_accept(&sd, ld, p)); { /* wait for writability */ apr_pollfd_t pfd; int n; pfd.p = p; pfd.desc_type = APR_POLL_SOCKET; pfd.reqevents = APR_POLLOUT|APR_POLLHUP; pfd.desc.s = cd; pfd.client_data = NULL; APR_ASSERT_SUCCESS(tc, "poll for connect completion", apr_poll(&pfd, 1, &n, 5 * APR_USEC_PER_SEC)); } APR_ASSERT_SUCCESS(tc, "get local address of server socket", apr_socket_addr_get(&sa, APR_LOCAL, sd)); APR_ASSERT_SUCCESS(tc, "get remote address of client socket", apr_socket_addr_get(&ca, APR_REMOTE, cd)); apr_snprintf(a, sizeof(a), "%pI", sa); apr_snprintf(b, sizeof(b), "%pI", ca); ABTS_STR_EQUAL(tc, a, b); apr_socket_close(cd); apr_socket_close(sd); apr_socket_close(ld); }
void lfd_listen(apr_pool_t * mp) { apr_pool_t * thd_pool = NULL; apr_socket_t * listen_sock; apr_socket_t * client_sock; apr_thread_t *thd; apr_threadattr_t * thattr; apr_pollfd_t pfd; apr_interval_time_t timeout = lfd_config_max_acceptloop_timeout; apr_int32_t nsds; apr_status_t rc; create_listen_socket(&listen_sock, mp); if(NULL == listen_sock) { lfd_log(LFD_ERROR, "lfd_listen: could not create listen socket"); return; } rc = apr_threadattr_create(&thattr, mp); if(APR_SUCCESS != rc) { lfd_log_apr_err(rc, "apr_threadattr_create failed"); return; } while(1) { //###: Should I allocate the pool as a subpool of the root pool? //What is the amount allocated per pool and is it freed when the child pool is destroyed? //rc = apr_pool_create(&thd_pool, mp); if(NULL == thd_pool) { rc = apr_pool_create(&thd_pool, NULL); if(APR_SUCCESS != rc) { lfd_log_apr_err(rc, "apr_pool_create of thd_pool failed"); continue; } } create_pollfd_from_socket(&pfd, listen_sock, mp); rc = apr_poll(&pfd, 1, &nsds, timeout); if((APR_SUCCESS != rc) && (!APR_STATUS_IS_TIMEUP(rc)) && (!APR_STATUS_IS_EINTR(rc))) { //break - an unrecoverable error occured lfd_log_apr_err(rc, "apr_poll failed"); break; } if(apr_atomic_read32(&ftp_must_exit)) { //if the flag says we must exit, we comply, so bye bye! return; } if(APR_STATUS_IS_TIMEUP(rc) || APR_STATUS_IS_EINTR(rc) || (APR_POLLIN != pfd.rtnevents)) { continue; } rc = apr_socket_accept(&client_sock, listen_sock, thd_pool); if(APR_SUCCESS != rc) { //###: For which errorcode must we break out of the loop? lfd_log_apr_err(rc, "apr_socket_accept failed"); if(APR_STATUS_IS_EAGAIN(rc)) { lfd_log(LFD_ERROR, "lfd_listen: APR_STATUS_IS_EAGAIN"); } continue; } rc = apr_thread_create(&thd, thattr, &lfd_worker_protocol_main, (void*)client_sock, thd_pool); if(APR_SUCCESS != rc) { lfd_log_apr_err(rc, "apr_thread_create failed"); apr_socket_close(client_sock); continue; } thd_pool = NULL; } }
bool LLAres::process(U64 timeout) { if (!gAPRPoolp) { ll_init_apr(); } ares_socket_t socks[ARES_GETSOCK_MAXNUM]; apr_pollfd_t aprFds[ARES_GETSOCK_MAXNUM]; apr_int32_t nsds = 0; int nactive = 0; int bitmask; bitmask = ares_getsock(chan_, socks, ARES_GETSOCK_MAXNUM); if (bitmask == 0) { return nsds > 0; } apr_status_t status; LLAPRPool pool; status = pool.getStatus() ; ll_apr_assert_status(status); for (int i = 0; i < ARES_GETSOCK_MAXNUM; i++) { if (ARES_GETSOCK_READABLE(bitmask, i)) { aprFds[nactive].reqevents = APR_POLLIN | APR_POLLERR; } else if (ARES_GETSOCK_WRITABLE(bitmask, i)) { aprFds[nactive].reqevents = APR_POLLOUT | APR_POLLERR; } else { continue; } apr_socket_t *aprSock = NULL; status = apr_os_sock_put(&aprSock, (apr_os_sock_t *) &socks[i], pool.getAPRPool()); if (status != APR_SUCCESS) { ll_apr_warn_status(status); return nsds > 0; } aprFds[nactive].desc.s = aprSock; aprFds[nactive].desc_type = APR_POLL_SOCKET; aprFds[nactive].p = pool.getAPRPool(); aprFds[nactive].rtnevents = 0; aprFds[nactive].client_data = &socks[i]; nactive++; } if (nactive > 0) { status = apr_poll(aprFds, nactive, &nsds, timeout); if (status != APR_SUCCESS && status != APR_TIMEUP) { ll_apr_warn_status(status); } for (int i = 0; i < nactive; i++) { int evts = aprFds[i].rtnevents; int ifd = (evts & (APR_POLLIN | APR_POLLERR)) ? *((int *) aprFds[i].client_data) : ARES_SOCKET_BAD; int ofd = (evts & (APR_POLLOUT | APR_POLLERR)) ? *((int *) aprFds[i].client_data) : ARES_SOCKET_BAD; ares_process_fd(chan_, ifd, ofd); } } return nsds > 0; }
static void justsleep(abts_case *tc, void *data) { apr_int32_t nsds; const apr_pollfd_t *hot_files; apr_pollset_t *pollset; apr_status_t rv; apr_time_t t1, t2; int i; apr_pollset_method_e methods[] = { APR_POLLSET_DEFAULT, APR_POLLSET_SELECT, APR_POLLSET_KQUEUE, APR_POLLSET_PORT, APR_POLLSET_EPOLL, APR_POLLSET_POLL}; nsds = 1; t1 = apr_time_now(); rv = apr_poll(NULL, 0, &nsds, apr_time_from_msec(200)); t2 = apr_time_now(); ABTS_INT_EQUAL(tc, 1, APR_STATUS_IS_TIMEUP(rv)); ABTS_INT_EQUAL(tc, 0, nsds); ABTS_ASSERT(tc, "apr_poll() didn't sleep", (t2 - t1) > apr_time_from_msec(100)); for (i = 0; i < sizeof methods / sizeof methods[0]; i++) { rv = apr_pollset_create_ex(&pollset, 5, p, 0, methods[i]); if (rv != APR_ENOTIMPL) { ABTS_INT_EQUAL(tc, APR_SUCCESS, rv); nsds = 1; t1 = apr_time_now(); rv = apr_pollset_poll(pollset, apr_time_from_msec(200), &nsds, &hot_files); t2 = apr_time_now(); ABTS_INT_EQUAL(tc, 1, APR_STATUS_IS_TIMEUP(rv)); ABTS_INT_EQUAL(tc, 0, nsds); ABTS_ASSERT(tc, "apr_pollset_poll() didn't sleep", (t2 - t1) > apr_time_from_msec(100)); rv = apr_pollset_destroy(pollset); ABTS_INT_EQUAL(tc, APR_SUCCESS, rv); } rv = apr_pollcb_create_ex(&pollcb, 5, p, 0, methods[0]); if (rv != APR_ENOTIMPL) { ABTS_INT_EQUAL(tc, APR_SUCCESS, rv); t1 = apr_time_now(); rv = apr_pollcb_poll(pollcb, apr_time_from_msec(200), NULL, NULL); t2 = apr_time_now(); ABTS_INT_EQUAL(tc, 1, APR_STATUS_IS_TIMEUP(rv)); ABTS_ASSERT(tc, "apr_pollcb_poll() didn't sleep", (t2 - t1) > apr_time_from_msec(100)); /* no apr_pollcb_destroy() */ } } }
static void child_main(int child_num_arg) { apr_pool_t *ptrans; apr_allocator_t *allocator; conn_rec *current_conn; apr_status_t status = APR_EINIT; int i; ap_listen_rec *lr; int curr_pollfd, last_pollfd = 0; apr_pollfd_t *pollset; int offset; void *csd; ap_sb_handle_t *sbh; apr_status_t rv; apr_bucket_alloc_t *bucket_alloc; mpm_state = AP_MPMQ_STARTING; /* for benefit of any hooks that run as this * child initializes */ my_child_num = child_num_arg; ap_my_pid = getpid(); csd = NULL; requests_this_child = 0; ap_fatal_signal_child_setup(ap_server_conf); /* Get a sub context for global allocations in this child, so that * we can have cleanups occur when the child exits. */ apr_allocator_create(&allocator); apr_allocator_max_free_set(allocator, ap_max_mem_free); apr_pool_create_ex(&pchild, pconf, NULL, allocator); apr_allocator_owner_set(allocator, pchild); apr_pool_create(&ptrans, pchild); apr_pool_tag(ptrans, "transaction"); /* needs to be done before we switch UIDs so we have permissions */ ap_reopen_scoreboard(pchild, NULL, 0); rv = apr_proc_mutex_child_init(&accept_mutex, ap_lock_fname, pchild); if (rv != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_EMERG, rv, ap_server_conf, "Couldn't initialize cross-process lock in child"); clean_child_exit(APEXIT_CHILDFATAL); } if (unixd_setup_child()) { clean_child_exit(APEXIT_CHILDFATAL); } ap_run_child_init(pchild, ap_server_conf); ap_create_sb_handle(&sbh, pchild, my_child_num, 0); (void) ap_update_child_status(sbh, SERVER_READY, (request_rec *) NULL); /* Set up the pollfd array */ listensocks = apr_pcalloc(pchild, sizeof(*listensocks) * (num_listensocks)); for (lr = ap_listeners, i = 0; i < num_listensocks; lr = lr->next, i++) { listensocks[i].accept_func = lr->accept_func; listensocks[i].sd = lr->sd; } pollset = apr_palloc(pchild, sizeof(*pollset) * num_listensocks); pollset[0].p = pchild; for (i = 0; i < num_listensocks; i++) { pollset[i].desc.s = listensocks[i].sd; pollset[i].desc_type = APR_POLL_SOCKET; pollset[i].reqevents = APR_POLLIN; } mpm_state = AP_MPMQ_RUNNING; bucket_alloc = apr_bucket_alloc_create(pchild); while (!die_now) { /* * (Re)initialize this child to a pre-connection state. */ current_conn = NULL; apr_pool_clear(ptrans); if ((ap_max_requests_per_child > 0 && requests_this_child++ >= ap_max_requests_per_child)) { clean_child_exit(0); } (void) ap_update_child_status(sbh, SERVER_READY, (request_rec *) NULL); /* * Wait for an acceptable connection to arrive. */ /* Lock around "accept", if necessary */ SAFE_ACCEPT(accept_mutex_on()); if (num_listensocks == 1) { offset = 0; } else { /* multiple listening sockets - need to poll */ for (;;) { apr_status_t ret; apr_int32_t n; ret = apr_poll(pollset, num_listensocks, &n, -1); if (ret != APR_SUCCESS) { if (APR_STATUS_IS_EINTR(ret)) { continue; } /* Single Unix documents select as returning errnos * EBADF, EINTR, and EINVAL... and in none of those * cases does it make sense to continue. In fact * on Linux 2.0.x we seem to end up with EFAULT * occasionally, and we'd loop forever due to it. */ ap_log_error(APLOG_MARK, APLOG_ERR, ret, ap_server_conf, "apr_poll: (listen)"); clean_child_exit(1); } /* find a listener */ curr_pollfd = last_pollfd; do { curr_pollfd++; if (curr_pollfd >= num_listensocks) { curr_pollfd = 0; } /* XXX: Should we check for POLLERR? */ if (pollset[curr_pollfd].rtnevents & APR_POLLIN) { last_pollfd = curr_pollfd; offset = curr_pollfd; goto got_fd; } } while (curr_pollfd != last_pollfd); continue; } } got_fd: /* if we accept() something we don't want to die, so we have to * defer the exit */ status = listensocks[offset].accept_func(&csd, &listensocks[offset], ptrans); SAFE_ACCEPT(accept_mutex_off()); /* unlock after "accept" */ if (status == APR_EGENERAL) { /* resource shortage or should-not-occur occured */ clean_child_exit(1); } else if (status != APR_SUCCESS) { continue; } /* * We now have a connection, so set it up with the appropriate * socket options, file descriptors, and read/write buffers. */ current_conn = ap_run_create_connection(ptrans, ap_server_conf, csd, my_child_num, sbh, bucket_alloc); if (current_conn) { ap_process_connection(current_conn, csd); ap_lingering_close(current_conn); } /* Check the pod and the generation number after processing a * connection so that we'll go away if a graceful restart occurred * while we were processing the connection or we are the lucky * idle server process that gets to die. */ if (ap_mpm_pod_check(pod) == APR_SUCCESS) { /* selected as idle? */ die_now = 1; } else if (ap_my_generation != ap_scoreboard_image->global->running_generation) { /* restart? */ /* yeah, this could be non-graceful restart, in which case the * parent will kill us soon enough, but why bother checking? */ die_now = 1; } } clean_child_exit(0); }
static void test_get_addr(abts_case *tc, void *data) { apr_status_t rv; apr_socket_t *ld, *sd, *cd; apr_sockaddr_t *sa, *ca; apr_pool_t *subp; char *a, *b; APR_ASSERT_SUCCESS(tc, "create subpool", apr_pool_create(&subp, p)); if ((ld = setup_socket(tc)) != APR_SUCCESS) return; APR_ASSERT_SUCCESS(tc, "get local address of bound socket", apr_socket_addr_get(&sa, APR_LOCAL, ld)); rv = apr_socket_create(&cd, sa->family, SOCK_STREAM, APR_PROTO_TCP, subp); APR_ASSERT_SUCCESS(tc, "create client socket", rv); APR_ASSERT_SUCCESS(tc, "enable non-block mode", apr_socket_opt_set(cd, APR_SO_NONBLOCK, 1)); /* It is valid for a connect() on a socket with NONBLOCK set to * succeed (if the connection can be established synchronously), * but if it does, this test cannot proceed. */ rv = apr_socket_connect(cd, sa); if (rv == APR_SUCCESS) { apr_socket_close(ld); apr_socket_close(cd); ABTS_NOT_IMPL(tc, "Cannot test if connect completes " "synchronously"); return; } if (!APR_STATUS_IS_EINPROGRESS(rv)) { apr_socket_close(ld); apr_socket_close(cd); APR_ASSERT_SUCCESS(tc, "connect to listener", rv); return; } APR_ASSERT_SUCCESS(tc, "accept connection", apr_socket_accept(&sd, ld, subp)); { /* wait for writability */ apr_pollfd_t pfd; int n; pfd.p = p; pfd.desc_type = APR_POLL_SOCKET; pfd.reqevents = APR_POLLOUT|APR_POLLHUP; pfd.desc.s = cd; pfd.client_data = NULL; APR_ASSERT_SUCCESS(tc, "poll for connect completion", apr_poll(&pfd, 1, &n, 5 * APR_USEC_PER_SEC)); } APR_ASSERT_SUCCESS(tc, "get local address of server socket", apr_socket_addr_get(&sa, APR_LOCAL, sd)); APR_ASSERT_SUCCESS(tc, "get remote address of client socket", apr_socket_addr_get(&ca, APR_REMOTE, cd)); /* Test that the pool of the returned sockaddr objects exactly * match the socket. */ ABTS_PTR_EQUAL(tc, subp, sa->pool); ABTS_PTR_EQUAL(tc, subp, ca->pool); /* Check equivalence. */ a = apr_psprintf(p, "%pI fam=%d", sa, sa->family); b = apr_psprintf(p, "%pI fam=%d", ca, ca->family); ABTS_STR_EQUAL(tc, a, b); /* Check pool of returned sockaddr, as above. */ APR_ASSERT_SUCCESS(tc, "get local address of client socket", apr_socket_addr_get(&sa, APR_LOCAL, cd)); APR_ASSERT_SUCCESS(tc, "get remote address of server socket", apr_socket_addr_get(&ca, APR_REMOTE, sd)); /* Check equivalence. */ a = apr_psprintf(p, "%pI fam=%d", sa, sa->family); b = apr_psprintf(p, "%pI fam=%d", ca, ca->family); ABTS_STR_EQUAL(tc, a, b); ABTS_PTR_EQUAL(tc, subp, sa->pool); ABTS_PTR_EQUAL(tc, subp, ca->pool); apr_socket_close(cd); apr_socket_close(sd); apr_socket_close(ld); apr_pool_destroy(subp); }
static apr_status_t dispatch(proxy_conn_rec *conn, proxy_dir_conf *conf, request_rec *r, apr_pool_t *setaside_pool, apr_uint16_t request_id, const char **err, int *bad_request, int *has_responded) { apr_bucket_brigade *ib, *ob; int seen_end_of_headers = 0, done = 0, ignore_body = 0; apr_status_t rv = APR_SUCCESS; int script_error_status = HTTP_OK; conn_rec *c = r->connection; struct iovec vec[2]; ap_fcgi_header header; unsigned char farray[AP_FCGI_HEADER_LEN]; apr_pollfd_t pfd; int header_state = HDR_STATE_READING_HEADERS; char stack_iobuf[AP_IOBUFSIZE]; apr_size_t iobuf_size = AP_IOBUFSIZE; char *iobuf = stack_iobuf; *err = NULL; if (conn->worker->s->io_buffer_size_set) { iobuf_size = conn->worker->s->io_buffer_size; iobuf = apr_palloc(r->pool, iobuf_size); } pfd.desc_type = APR_POLL_SOCKET; pfd.desc.s = conn->sock; pfd.p = r->pool; pfd.reqevents = APR_POLLIN | APR_POLLOUT; ib = apr_brigade_create(r->pool, c->bucket_alloc); ob = apr_brigade_create(r->pool, c->bucket_alloc); while (! done) { apr_interval_time_t timeout; apr_size_t len; int n; /* We need SOME kind of timeout here, or virtually anything will * cause timeout errors. */ apr_socket_timeout_get(conn->sock, &timeout); rv = apr_poll(&pfd, 1, &n, timeout); if (rv != APR_SUCCESS) { if (APR_STATUS_IS_EINTR(rv)) { continue; } *err = "polling"; break; } if (pfd.rtnevents & APR_POLLOUT) { apr_size_t to_send, writebuflen; int last_stdin = 0; char *iobuf_cursor; rv = ap_get_brigade(r->input_filters, ib, AP_MODE_READBYTES, APR_BLOCK_READ, iobuf_size); if (rv != APR_SUCCESS) { *err = "reading input brigade"; *bad_request = 1; break; } if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(ib))) { last_stdin = 1; } writebuflen = iobuf_size; rv = apr_brigade_flatten(ib, iobuf, &writebuflen); apr_brigade_cleanup(ib); if (rv != APR_SUCCESS) { *err = "flattening brigade"; break; } to_send = writebuflen; iobuf_cursor = iobuf; while (to_send > 0) { int nvec = 0; apr_size_t write_this_time; write_this_time = to_send < AP_FCGI_MAX_CONTENT_LEN ? to_send : AP_FCGI_MAX_CONTENT_LEN; ap_fcgi_fill_in_header(&header, AP_FCGI_STDIN, request_id, (apr_uint16_t)write_this_time, 0); ap_fcgi_header_to_array(&header, farray); vec[nvec].iov_base = (void *)farray; vec[nvec].iov_len = sizeof(farray); ++nvec; if (writebuflen) { vec[nvec].iov_base = iobuf_cursor; vec[nvec].iov_len = write_this_time; ++nvec; } rv = send_data(conn, vec, nvec, &len); if (rv != APR_SUCCESS) { *err = "sending stdin"; break; } to_send -= write_this_time; iobuf_cursor += write_this_time; } if (rv != APR_SUCCESS) { break; } if (last_stdin) { pfd.reqevents = APR_POLLIN; /* Done with input data */ /* signal EOF (empty FCGI_STDIN) */ ap_fcgi_fill_in_header(&header, AP_FCGI_STDIN, request_id, 0, 0); ap_fcgi_header_to_array(&header, farray); vec[0].iov_base = (void *)farray; vec[0].iov_len = sizeof(farray); rv = send_data(conn, vec, 1, &len); if (rv != APR_SUCCESS) { *err = "sending empty stdin"; break; } } } if (pfd.rtnevents & APR_POLLIN) { apr_size_t readbuflen; apr_uint16_t clen, rid; apr_bucket *b; unsigned char plen; unsigned char type, version; /* First, we grab the header... */ rv = get_data_full(conn, (char *) farray, AP_FCGI_HEADER_LEN); if (rv != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01067) "Failed to read FastCGI header"); break; } ap_log_rdata(APLOG_MARK, APLOG_TRACE8, r, "FastCGI header", farray, AP_FCGI_HEADER_LEN, 0); ap_fcgi_header_fields_from_array(&version, &type, &rid, &clen, &plen, farray); if (version != AP_FCGI_VERSION_1) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01068) "Got bogus version %d", (int)version); rv = APR_EINVAL; break; } if (rid != request_id) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01069) "Got bogus rid %d, expected %d", rid, request_id); rv = APR_EINVAL; break; } recv_again: if (clen > iobuf_size) { readbuflen = iobuf_size; } else { readbuflen = clen; } /* Now get the actual data. Yes it sucks to do this in a second * recv call, this will eventually change when we move to real * nonblocking recv calls. */ if (readbuflen != 0) { rv = get_data(conn, iobuf, &readbuflen); if (rv != APR_SUCCESS) { *err = "reading response body"; break; } } switch (type) { case AP_FCGI_STDOUT: if (clen != 0) { b = apr_bucket_transient_create(iobuf, readbuflen, c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(ob, b); if (! seen_end_of_headers) { int st = handle_headers(r, &header_state, iobuf, readbuflen); if (st == 1) { int status; seen_end_of_headers = 1; status = ap_scan_script_header_err_brigade_ex(r, ob, NULL, APLOG_MODULE_INDEX); /* suck in all the rest */ if (status != OK) { apr_bucket *tmp_b; apr_brigade_cleanup(ob); tmp_b = apr_bucket_eos_create(c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(ob, tmp_b); *has_responded = 1; r->status = status; rv = ap_pass_brigade(r->output_filters, ob); if (rv != APR_SUCCESS) { *err = "passing headers brigade to output filters"; } else if (status == HTTP_NOT_MODIFIED) { /* The 304 response MUST NOT contain * a message-body, ignore it. */ ignore_body = 1; } else { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01070) "Error parsing script headers"); rv = APR_EINVAL; } break; } if (conf->error_override && ap_is_HTTP_ERROR(r->status)) { /* * set script_error_status to discard * everything after the headers */ script_error_status = r->status; /* * prevent ap_die() from treating this as a * recursive error, initially: */ r->status = HTTP_OK; } if (script_error_status == HTTP_OK && !APR_BRIGADE_EMPTY(ob) && !ignore_body) { /* Send the part of the body that we read while * reading the headers. */ *has_responded = 1; rv = ap_pass_brigade(r->output_filters, ob); if (rv != APR_SUCCESS) { *err = "passing brigade to output filters"; break; } } apr_brigade_cleanup(ob); apr_pool_clear(setaside_pool); } else { /* We're still looking for the end of the * headers, so this part of the data will need * to persist. */ apr_bucket_setaside(b, setaside_pool); } } else { /* we've already passed along the headers, so now pass * through the content. we could simply continue to * setaside the content and not pass until we see the * 0 content-length (below, where we append the EOS), * but that could be a huge amount of data; so we pass * along smaller chunks */ if (script_error_status == HTTP_OK && !ignore_body) { *has_responded = 1; rv = ap_pass_brigade(r->output_filters, ob); if (rv != APR_SUCCESS) { *err = "passing brigade to output filters"; break; } } apr_brigade_cleanup(ob); } /* If we didn't read all the data, go back and get the * rest of it. */ if (clen > readbuflen) { clen -= readbuflen; goto recv_again; } } else { /* XXX what if we haven't seen end of the headers yet? */ if (script_error_status == HTTP_OK) { b = apr_bucket_eos_create(c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(ob, b); *has_responded = 1; rv = ap_pass_brigade(r->output_filters, ob); if (rv != APR_SUCCESS) { *err = "passing brigade to output filters"; break; } } /* XXX Why don't we cleanup here? (logic from AJP) */ } break; case AP_FCGI_STDERR: /* TODO: Should probably clean up this logging a bit... */ if (clen) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01071) "Got error '%.*s'", (int)readbuflen, iobuf); } if (clen > readbuflen) { clen -= readbuflen; goto recv_again; } break; case AP_FCGI_END_REQUEST: done = 1; break; default: ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01072) "Got bogus record %d", type); break; } /* Leave on above switch's inner error. */ if (rv != APR_SUCCESS) { break; } if (plen) { rv = get_data_full(conn, iobuf, plen); if (rv != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(02537) "Error occurred reading padding"); break; } } } } apr_brigade_destroy(ib); apr_brigade_destroy(ob); if (script_error_status != HTTP_OK) { ap_die(script_error_status, r); /* send ErrorDocument */ *has_responded = 1; } return rv; }
static int32 worker_thread(void * dummy) { proc_info * ti = dummy; int child_slot = ti->slot; apr_pool_t *tpool = ti->tpool; apr_allocator_t *allocator; apr_socket_t *csd = NULL; apr_pool_t *ptrans; /* Pool for per-transaction stuff */ apr_bucket_alloc_t *bucket_alloc; apr_socket_t *sd = NULL; apr_status_t rv = APR_EINIT; int srv , n; int curr_pollfd = 0, last_pollfd = 0; sigset_t sig_mask; int requests_this_child = ap_max_requests_per_thread; apr_pollfd_t *pollset; /* each worker thread is in control of its own destiny...*/ int this_worker_should_exit = 0; free(ti); mpm_state = AP_MPMQ_STARTING; on_exit_thread(check_restart, (void*)child_slot); /* block the signals for this thread */ sigfillset(&sig_mask); sigprocmask(SIG_BLOCK, &sig_mask, NULL); apr_allocator_create(&allocator); apr_allocator_max_free_set(allocator, ap_max_mem_free); apr_pool_create_ex(&ptrans, tpool, NULL, allocator); apr_allocator_owner_set(allocator, ptrans); apr_pool_tag(ptrans, "transaction"); bucket_alloc = apr_bucket_alloc_create_ex(allocator); apr_thread_mutex_lock(worker_thread_count_mutex); worker_thread_count++; apr_thread_mutex_unlock(worker_thread_count_mutex); (void) ap_update_child_status_from_indexes(0, child_slot, SERVER_STARTING, (request_rec*)NULL); apr_poll_setup(&pollset, num_listening_sockets + 1, tpool); for(n=0 ; n <= num_listening_sockets ; n++) apr_poll_socket_add(pollset, listening_sockets[n], APR_POLLIN); mpm_state = AP_MPMQ_RUNNING; while (1) { /* If we're here, then chances are (unless we're the first thread created) * we're going to be held up in the accept mutex, so doing this here * shouldn't hurt performance. */ this_worker_should_exit |= (ap_max_requests_per_thread != 0) && (requests_this_child <= 0); if (this_worker_should_exit) break; (void) ap_update_child_status_from_indexes(0, child_slot, SERVER_READY, (request_rec*)NULL); apr_thread_mutex_lock(accept_mutex); while (!this_worker_should_exit) { apr_int16_t event; apr_status_t ret; ret = apr_poll(pollset, num_listening_sockets + 1, &srv, -1); if (ret != APR_SUCCESS) { if (APR_STATUS_IS_EINTR(ret)) { continue; } /* poll() will only return errors in catastrophic * circumstances. Let's try exiting gracefully, for now. */ ap_log_error(APLOG_MARK, APLOG_ERR, ret, (const server_rec *) ap_server_conf, "apr_poll: (listen)"); this_worker_should_exit = 1; } else { /* if we've bailed in apr_poll what's the point of trying to use the data? */ apr_poll_revents_get(&event, listening_sockets[0], pollset); if (event & APR_POLLIN){ apr_sockaddr_t *rec_sa; apr_size_t len = 5; char *tmpbuf = apr_palloc(ptrans, sizeof(char) * 5); apr_sockaddr_info_get(&rec_sa, "127.0.0.1", APR_UNSPEC, 7772, 0, ptrans); if ((ret = apr_recvfrom(rec_sa, listening_sockets[0], 0, tmpbuf, &len)) != APR_SUCCESS){ ap_log_error(APLOG_MARK, APLOG_ERR, ret, NULL, "error getting data from UDP!!"); }else { /* add checking??? */ } this_worker_should_exit = 1; } } if (this_worker_should_exit) break; if (num_listening_sockets == 1) { sd = ap_listeners->sd; goto got_fd; } else { /* find a listener */ curr_pollfd = last_pollfd; do { curr_pollfd++; if (curr_pollfd > num_listening_sockets) curr_pollfd = 1; /* Get the revent... */ apr_poll_revents_get(&event, listening_sockets[curr_pollfd], pollset); if (event & APR_POLLIN) { last_pollfd = curr_pollfd; sd = listening_sockets[curr_pollfd]; goto got_fd; } } while (curr_pollfd != last_pollfd); } } got_fd: if (!this_worker_should_exit) { rv = apr_accept(&csd, sd, ptrans); apr_thread_mutex_unlock(accept_mutex); if (rv != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf, "apr_accept"); } else { process_socket(ptrans, csd, child_slot, bucket_alloc); requests_this_child--; } } else { apr_thread_mutex_unlock(accept_mutex); break; } apr_pool_clear(ptrans); } ap_update_child_status_from_indexes(0, child_slot, SERVER_DEAD, (request_rec*)NULL); apr_bucket_alloc_destroy(bucket_alloc); ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, NULL, "worker_thread %ld exiting", find_thread(NULL)); apr_thread_mutex_lock(worker_thread_count_mutex); worker_thread_count--; apr_thread_mutex_unlock(worker_thread_count_mutex); return (0); }
/* * process the request and write the response. */ static int ap_proxy_ajp_request(apr_pool_t *p, request_rec *r, proxy_conn_rec *conn, conn_rec *origin, proxy_dir_conf *conf, apr_uri_t *uri, char *url, char *server_portstr) { apr_status_t status; int result; apr_bucket *e; apr_bucket_brigade *input_brigade; apr_bucket_brigade *output_brigade; ajp_msg_t *msg; apr_size_t bufsiz = 0; char *buff; char *send_body_chunk_buff; apr_uint16_t size; apr_byte_t conn_reuse = 0; const char *tenc; int havebody = 1; int output_failed = 0; int backend_failed = 0; apr_off_t bb_len; int data_sent = 0; int request_ended = 0; int headers_sent = 0; int rv = 0; apr_int32_t conn_poll_fd; apr_pollfd_t *conn_poll; proxy_server_conf *psf = ap_get_module_config(r->server->module_config, &proxy_module); apr_size_t maxsize = AJP_MSG_BUFFER_SZ; int send_body = 0; apr_off_t content_length = 0; int original_status = r->status; const char *original_status_line = r->status_line; if (psf->io_buffer_size_set) maxsize = psf->io_buffer_size; if (maxsize > AJP_MAX_BUFFER_SZ) maxsize = AJP_MAX_BUFFER_SZ; else if (maxsize < AJP_MSG_BUFFER_SZ) maxsize = AJP_MSG_BUFFER_SZ; maxsize = APR_ALIGN(maxsize, 1024); /* * Send the AJP request to the remote server */ /* send request headers */ status = ajp_send_header(conn->sock, r, maxsize, uri); if (status != APR_SUCCESS) { conn->close++; ap_log_error(APLOG_MARK, APLOG_ERR, status, r->server, "proxy: AJP: request failed to %pI (%s)", conn->worker->cp->addr, conn->worker->hostname); if (status == AJP_EOVERFLOW) return HTTP_BAD_REQUEST; else if (status == AJP_EBAD_METHOD) { return HTTP_NOT_IMPLEMENTED; } else { /* * This is only non fatal when the method is idempotent. In this * case we can dare to retry it with a different worker if we are * a balancer member. */ if (is_idempotent(r) == METHOD_IDEMPOTENT) { return HTTP_SERVICE_UNAVAILABLE; } return HTTP_INTERNAL_SERVER_ERROR; } } /* allocate an AJP message to store the data of the buckets */ bufsiz = maxsize; status = ajp_alloc_data_msg(r->pool, &buff, &bufsiz, &msg); if (status != APR_SUCCESS) { /* We had a failure: Close connection to backend */ conn->close++; ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "proxy: ajp_alloc_data_msg failed"); return HTTP_INTERNAL_SERVER_ERROR; } /* read the first bloc of data */ input_brigade = apr_brigade_create(p, r->connection->bucket_alloc); tenc = apr_table_get(r->headers_in, "Transfer-Encoding"); if (tenc && (strcasecmp(tenc, "chunked") == 0)) { /* The AJP protocol does not want body data yet */ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "proxy: request is chunked"); } else { /* Get client provided Content-Length header */ content_length = get_content_length(r); status = ap_get_brigade(r->input_filters, input_brigade, AP_MODE_READBYTES, APR_BLOCK_READ, maxsize - AJP_HEADER_SZ); if (status != APR_SUCCESS) { /* We had a failure: Close connection to backend */ conn->close++; ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "proxy: ap_get_brigade failed"); apr_brigade_destroy(input_brigade); return HTTP_BAD_REQUEST; } /* have something */ if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) { ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "proxy: APR_BUCKET_IS_EOS"); } /* Try to send something */ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "proxy: data to read (max %" APR_SIZE_T_FMT " at %" APR_SIZE_T_FMT ")", bufsiz, msg->pos); status = apr_brigade_flatten(input_brigade, buff, &bufsiz); if (status != APR_SUCCESS) { /* We had a failure: Close connection to backend */ conn->close++; apr_brigade_destroy(input_brigade); ap_log_error(APLOG_MARK, APLOG_ERR, status, r->server, "proxy: apr_brigade_flatten"); return HTTP_INTERNAL_SERVER_ERROR; } apr_brigade_cleanup(input_brigade); ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "proxy: got %" APR_SIZE_T_FMT " bytes of data", bufsiz); if (bufsiz > 0) { status = ajp_send_data_msg(conn->sock, msg, bufsiz); if (status != APR_SUCCESS) { /* We had a failure: Close connection to backend */ conn->close++; apr_brigade_destroy(input_brigade); ap_log_error(APLOG_MARK, APLOG_ERR, status, r->server, "proxy: send failed to %pI (%s)", conn->worker->cp->addr, conn->worker->hostname); /* * It is fatal when we failed to send a (part) of the request * body. */ return HTTP_INTERNAL_SERVER_ERROR; } conn->worker->s->transferred += bufsiz; send_body = 1; } else if (content_length > 0) { ap_log_error(APLOG_MARK, APLOG_ERR, status, r->server, "proxy: read zero bytes, expecting" " %" APR_OFF_T_FMT " bytes", content_length); /* * We can only get here if the client closed the connection * to us without sending the body. * Now the connection is in the wrong state on the backend. * Sending an empty data msg doesn't help either as it does * not move this connection to the correct state on the backend * for later resusage by the next request again. * Close it to clean things up. */ conn->close++; return HTTP_BAD_REQUEST; } } /* read the response */ conn->data = NULL; status = ajp_read_header(conn->sock, r, maxsize, (ajp_msg_t **)&(conn->data)); if (status != APR_SUCCESS) { /* We had a failure: Close connection to backend */ conn->close++; apr_brigade_destroy(input_brigade); ap_log_error(APLOG_MARK, APLOG_ERR, status, r->server, "proxy: read response failed from %pI (%s)", conn->worker->cp->addr, conn->worker->hostname); /* If we had a successful cping/cpong and then a timeout * we assume it is a request that cause a back-end timeout, * but doesn't affect the whole worker. */ if (APR_STATUS_IS_TIMEUP(status) && conn->worker->ping_timeout_set) { return HTTP_GATEWAY_TIME_OUT; } /* * This is only non fatal when we have not sent (parts) of a possible * request body so far (we do not store it and thus cannot sent it * again) and the method is idempotent. In this case we can dare to * retry it with a different worker if we are a balancer member. */ if (!send_body && (is_idempotent(r) == METHOD_IDEMPOTENT)) { return HTTP_SERVICE_UNAVAILABLE; } return HTTP_INTERNAL_SERVER_ERROR; } /* parse the reponse */ result = ajp_parse_type(r, conn->data); output_brigade = apr_brigade_create(p, r->connection->bucket_alloc); /* * Prepare apr_pollfd_t struct for possible later check if there is currently * data available from the backend (do not flush response to client) * or not (flush response to client) */ conn_poll = apr_pcalloc(p, sizeof(apr_pollfd_t)); conn_poll->reqevents = APR_POLLIN; conn_poll->desc_type = APR_POLL_SOCKET; conn_poll->desc.s = conn->sock; bufsiz = maxsize; for (;;) { switch (result) { case CMD_AJP13_GET_BODY_CHUNK: if (havebody) { if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) { /* This is the end */ bufsiz = 0; havebody = 0; ap_log_error(APLOG_MARK, APLOG_DEBUG, status, r->server, "proxy: APR_BUCKET_IS_EOS"); } else { status = ap_get_brigade(r->input_filters, input_brigade, AP_MODE_READBYTES, APR_BLOCK_READ, maxsize - AJP_HEADER_SZ); if (status != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_DEBUG, status, r->server, "ap_get_brigade failed"); output_failed = 1; break; } bufsiz = maxsize; status = apr_brigade_flatten(input_brigade, buff, &bufsiz); apr_brigade_cleanup(input_brigade); if (status != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_DEBUG, status, r->server, "apr_brigade_flatten failed"); output_failed = 1; break; } } ajp_msg_reset(msg); /* will go in ajp_send_data_msg */ status = ajp_send_data_msg(conn->sock, msg, bufsiz); if (status != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_DEBUG, status, r->server, "ajp_send_data_msg failed"); backend_failed = 1; break; } conn->worker->s->transferred += bufsiz; } else { /* * something is wrong TC asks for more body but we are * already at the end of the body data */ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "ap_proxy_ajp_request error read after end"); backend_failed = 1; } break; case CMD_AJP13_SEND_HEADERS: if (headers_sent) { /* Do not send anything to the client. * Backend already send us the headers. */ backend_failed = 1; ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "proxy: Backend sent headers twice."); break; } /* AJP13_SEND_HEADERS: process them */ status = ajp_parse_header(r, conf, conn->data); if (status != APR_SUCCESS) { backend_failed = 1; } else if ((r->status == 401) && psf->error_override) { const char *buf; const char *wa = "WWW-Authenticate"; if ((buf = apr_table_get(r->headers_out, wa))) { apr_table_set(r->err_headers_out, wa, buf); } else { ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "ap_proxy_ajp_request: origin server " "sent 401 without WWW-Authenticate header"); } } headers_sent = 1; break; case CMD_AJP13_SEND_BODY_CHUNK: /* AJP13_SEND_BODY_CHUNK: piece of data */ status = ajp_parse_data(r, conn->data, &size, &send_body_chunk_buff); if (status == APR_SUCCESS) { /* If we are overriding the errors, we can't put the content * of the page into the brigade. */ if (!psf->error_override || !ap_is_HTTP_ERROR(r->status)) { /* AJP13_SEND_BODY_CHUNK with zero length * is explicit flush message */ if (size == 0) { if (headers_sent) { e = apr_bucket_flush_create(r->connection->bucket_alloc); APR_BRIGADE_INSERT_TAIL(output_brigade, e); } else { ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "Ignoring flush message received before headers"); } } else { apr_status_t rv; /* Handle the case where the error document is itself reverse * proxied and was successful. We must maintain any previous * error status so that an underlying error (eg HTTP_NOT_FOUND) * doesn't become an HTTP_OK. */ if (psf->error_override && !ap_is_HTTP_ERROR(r->status) && ap_is_HTTP_ERROR(original_status)) { r->status = original_status; r->status_line = original_status_line; } e = apr_bucket_transient_create(send_body_chunk_buff, size, r->connection->bucket_alloc); APR_BRIGADE_INSERT_TAIL(output_brigade, e); if ((conn->worker->flush_packets == flush_on) || ((conn->worker->flush_packets == flush_auto) && ((rv = apr_poll(conn_poll, 1, &conn_poll_fd, conn->worker->flush_wait)) != APR_SUCCESS) && APR_STATUS_IS_TIMEUP(rv))) { e = apr_bucket_flush_create(r->connection->bucket_alloc); APR_BRIGADE_INSERT_TAIL(output_brigade, e); } apr_brigade_length(output_brigade, 0, &bb_len); if (bb_len != -1) conn->worker->s->read += bb_len; } if (headers_sent) { if (ap_pass_brigade(r->output_filters, output_brigade) != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, "proxy: error processing body.%s", r->connection->aborted ? " Client aborted connection." : ""); output_failed = 1; } data_sent = 1; apr_brigade_cleanup(output_brigade); } } } else { backend_failed = 1; } break; case CMD_AJP13_END_RESPONSE: status = ajp_parse_reuse(r, conn->data, &conn_reuse); if (status != APR_SUCCESS) { backend_failed = 1; } /* If we are overriding the errors, we must not send anything to * the client, especially as the brigade already contains headers. * So do nothing here, and it will be cleaned up below. */ if (!psf->error_override || !ap_is_HTTP_ERROR(r->status)) { e = apr_bucket_eos_create(r->connection->bucket_alloc); APR_BRIGADE_INSERT_TAIL(output_brigade, e); if (ap_pass_brigade(r->output_filters, output_brigade) != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, "proxy: error processing end"); output_failed = 1; } /* XXX: what about flush here? See mod_jk */ data_sent = 1; } request_ended = 1; break; default: backend_failed = 1; break; } /* * If connection has been aborted by client: Stop working. * Nevertheless, we regard our operation so far as a success: * So reset output_failed to 0 and set result to CMD_AJP13_END_RESPONSE * But: Close this connection to the backend. */ if (r->connection->aborted) { conn->close++; output_failed = 0; result = CMD_AJP13_END_RESPONSE; request_ended = 1; } /* * We either have finished successfully or we failed. * So bail out */ if ((result == CMD_AJP13_END_RESPONSE) || backend_failed || output_failed) break; /* read the response */ status = ajp_read_header(conn->sock, r, maxsize, (ajp_msg_t **)&(conn->data)); if (status != APR_SUCCESS) { backend_failed = 1; ap_log_error(APLOG_MARK, APLOG_DEBUG, status, r->server, "ajp_read_header failed"); break; } result = ajp_parse_type(r, conn->data); } apr_brigade_destroy(input_brigade); /* * Clear output_brigade to remove possible buckets that remained there * after an error. */ apr_brigade_cleanup(output_brigade); if (backend_failed || output_failed) { ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "proxy: Processing of request failed backend: %i, " "output: %i", backend_failed, output_failed); /* We had a failure: Close connection to backend */ conn->close++; /* Return DONE to avoid error messages being added to the stream */ if (data_sent) { rv = DONE; } } else if (!request_ended) { ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "proxy: Processing of request didn't terminate cleanly"); /* We had a failure: Close connection to backend */ conn->close++; backend_failed = 1; /* Return DONE to avoid error messages being added to the stream */ if (data_sent) { rv = DONE; } } else if (!conn_reuse) { /* Our backend signalled connection close */ conn->close++; } else { ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "proxy: got response from %pI (%s)", conn->worker->cp->addr, conn->worker->hostname); if (psf->error_override && ap_is_HTTP_ERROR(r->status)) { /* clear r->status for override error, otherwise ErrorDocument * thinks that this is a recursive error, and doesn't find the * custom error page */ rv = r->status; r->status = HTTP_OK; } else { rv = OK; } } if (backend_failed) { ap_log_error(APLOG_MARK, APLOG_ERR, status, r->server, "proxy: dialog to %pI (%s) failed", conn->worker->cp->addr, conn->worker->hostname); /* * If we already send data, signal a broken backend connection * upwards in the chain. */ if (data_sent) { ap_proxy_backend_broke(r, output_brigade); } else if (!send_body && (is_idempotent(r) == METHOD_IDEMPOTENT)) { /* * This is only non fatal when we have not sent (parts) of a possible * request body so far (we do not store it and thus cannot sent it * again) and the method is idempotent. In this case we can dare to * retry it with a different worker if we are a balancer member. */ rv = HTTP_SERVICE_UNAVAILABLE; } else { rv = HTTP_INTERNAL_SERVER_ERROR; } } /* * Ensure that we sent an EOS bucket thru the filter chain, if we already * have sent some data. Maybe ap_proxy_backend_broke was called and added * one to the brigade already (no longer making it empty). So we should * not do this in this case. */ if (data_sent && !r->eos_sent && APR_BRIGADE_EMPTY(output_brigade)) { e = apr_bucket_eos_create(r->connection->bucket_alloc); APR_BRIGADE_INSERT_TAIL(output_brigade, e); } /* If we have added something to the brigade above, sent it */ if (!APR_BRIGADE_EMPTY(output_brigade)) ap_pass_brigade(r->output_filters, output_brigade); apr_brigade_destroy(output_brigade); return rv; }
int main (int argc, const char * const argv[]) { char buf[BUFSIZE]; apr_size_t nRead, nWrite; apr_file_t *f_stdin; apr_file_t *f_stdout; apr_getopt_t *opt; apr_status_t rv; char c; const char *opt_arg; const char *err = NULL; #if APR_FILES_AS_SOCKETS apr_pollfd_t pollfd = { 0 }; apr_status_t pollret = APR_SUCCESS; int polltimeout; #endif apr_app_initialize(&argc, &argv, NULL); atexit(apr_terminate); memset(&config, 0, sizeof config); memset(&status, 0, sizeof status); status.rotateReason = ROTATE_NONE; apr_pool_create(&status.pool, NULL); apr_getopt_init(&opt, status.pool, argc, argv); #if APR_FILES_AS_SOCKETS while ((rv = apr_getopt(opt, "lL:p:ftvecn:", &c, &opt_arg)) == APR_SUCCESS) { #else while ((rv = apr_getopt(opt, "lL:p:ftven:", &c, &opt_arg)) == APR_SUCCESS) { #endif switch (c) { case 'l': config.use_localtime = 1; break; case 'L': config.linkfile = opt_arg; break; case 'p': config.postrotate_prog = opt_arg; break; case 'f': config.force_open = 1; break; case 't': config.truncate = 1; break; case 'v': config.verbose = 1; break; case 'e': config.echo = 1; break; #if APR_FILES_AS_SOCKETS case 'c': config.create_empty = 1; break; #endif case 'n': config.num_files = atoi(opt_arg); status.fileNum = -1; break; } } if (rv != APR_EOF) { usage(argv[0], NULL /* specific error message already issued */ ); } /* * After the initial flags we need 2 to 4 arguments, * the file name, either the rotation interval time or size * or both of them, and optionally the UTC offset. */ if ((argc - opt->ind < 2) || (argc - opt->ind > 4) ) { usage(argv[0], "Incorrect number of arguments"); } config.szLogRoot = argv[opt->ind++]; /* Read in the remaining flags, namely time, size and UTC offset. */ for(; opt->ind < argc; opt->ind++) { if ((err = get_time_or_size(&config, argv[opt->ind], opt->ind < argc - 1 ? 0 : 1)) != NULL) { usage(argv[0], err); } } config.use_strftime = (strchr(config.szLogRoot, '%') != NULL); if (config.use_strftime && config.num_files > 0) { fprintf(stderr, "Cannot use -n with %% in filename\n"); exit(1); } if (status.fileNum == -1 && config.num_files < 1) { fprintf(stderr, "Invalid -n argument\n"); exit(1); } if (apr_file_open_stdin(&f_stdin, status.pool) != APR_SUCCESS) { fprintf(stderr, "Unable to open stdin\n"); exit(1); } if (apr_file_open_stdout(&f_stdout, status.pool) != APR_SUCCESS) { fprintf(stderr, "Unable to open stdout\n"); exit(1); } /* * Write out result of config parsing if verbose is set. */ if (config.verbose) { dumpConfig(&config); } #if APR_FILES_AS_SOCKETS if (config.create_empty && config.tRotation) { pollfd.p = status.pool; pollfd.desc_type = APR_POLL_FILE; pollfd.reqevents = APR_POLLIN; pollfd.desc.f = f_stdin; } #endif /* * Immediately open the logfile as we start, if we were forced * to do so via '-f'. */ if (config.force_open) { doRotate(&config, &status); } for (;;) { nRead = sizeof(buf); #if APR_FILES_AS_SOCKETS if (config.create_empty && config.tRotation) { polltimeout = status.tLogEnd ? status.tLogEnd - get_now(&config) : config.tRotation; if (polltimeout <= 0) { pollret = APR_TIMEUP; } else { pollret = apr_poll(&pollfd, 1, &pollret, apr_time_from_sec(polltimeout)); } } if (pollret == APR_SUCCESS) { rv = apr_file_read(f_stdin, buf, &nRead); if (APR_STATUS_IS_EOF(rv)) { break; } else if (rv != APR_SUCCESS) { exit(3); } } else if (pollret == APR_TIMEUP) { *buf = 0; nRead = 0; } else { fprintf(stderr, "Unable to poll stdin\n"); exit(5); } #else /* APR_FILES_AS_SOCKETS */ rv = apr_file_read(f_stdin, buf, &nRead); if (APR_STATUS_IS_EOF(rv)) { break; } else if (rv != APR_SUCCESS) { exit(3); } #endif /* APR_FILES_AS_SOCKETS */ checkRotate(&config, &status); if (status.rotateReason != ROTATE_NONE) { doRotate(&config, &status); } nWrite = nRead; rv = apr_file_write_full(status.current.fd, buf, nWrite, &nWrite); if (nWrite != nRead) { apr_off_t cur_offset; cur_offset = 0; if (apr_file_seek(status.current.fd, APR_CUR, &cur_offset) != APR_SUCCESS) { cur_offset = -1; } status.nMessCount++; apr_snprintf(status.errbuf, sizeof status.errbuf, "Error %d writing to log file at offset %" APR_OFF_T_FMT ". " "%10d messages lost (%pm)\n", rv, cur_offset, status.nMessCount, &rv); truncate_and_write_error(&status); } else { status.nMessCount++; } if (config.echo) { if (apr_file_write_full(f_stdout, buf, nRead, &nWrite)) { fprintf(stderr, "Unable to write to stdout\n"); exit(4); } } } return 0; /* reached only at stdin EOF. */ }
double write_allocs(ibp_capset_t *caps, int qlen, int n, int asize, int block_size) { int count, i, j, nleft, nblocks, rem, len, err, block_start, alloc_start; int *slot; op_status_t status; int64_t nbytes, last_bytes, print_bytes, delta_bytes; apr_int32_t nfds, finished; double dbytes, r1, r2; apr_pool_t *pool; apr_file_t *fd_in; opque_t *q; op_generic_t *op; char *buffer = (char *)malloc(block_size); apr_interval_time_t dt = 10; apr_pollfd_t pfd; apr_time_t stime, dtime; int *tbuf_index; tbuffer_t *buf; Stack_t *tbuf_free; tbuf_free = new_stack(); type_malloc_clear(tbuf_index, int, qlen); type_malloc_clear(buf, tbuffer_t, qlen); for (i=0; i<qlen; i++) { tbuf_index[i] = i; push(tbuf_free, &(tbuf_index[i])); } //** Make the stuff to capture the kbd apr_pool_create(&pool, NULL); nfds = 1; apr_file_open_stdin(&fd_in, pool); pfd.p = pool; pfd.desc_type = APR_POLL_FILE; pfd.reqevents = APR_POLLIN|APR_POLLHUP; pfd.desc.f = fd_in; pfd.client_data = NULL; //** Init the ibp stuff init_buffer(buffer, 'W', block_size); q = new_opque(); opque_start_execution(q); nblocks = asize / block_size; rem = asize % block_size; if (rem > 0) nblocks++; block_start = 0; alloc_start = 0; finished = 0; apr_poll(&pfd, nfds, &finished, dt); count = 0; nbytes=0; last_bytes = 0; delta_bytes = 1024 * 1024 * 1024; print_bytes = delta_bytes; stime = apr_time_now(); while (finished == 0) { // nleft = qlen - opque_tasks_left(q); nleft = stack_size(tbuf_free); // printf("\nLOOP: nleft=%d qlen=%d\n", nleft, qlen); if (nleft > 0) { for (j=block_start; j < nblocks; j++) { for (i=alloc_start; i<n; i++) { nleft--; if (nleft <= 0) { block_start = j; alloc_start = i; goto skip_submit; } slot = (int *)pop(tbuf_free); if ((j==(nblocks-1)) && (rem > 0)) { len = rem; } else { len = block_size; } // printf("%d=(%d,%d) ", count, j, i); tbuffer_single(&(buf[*slot]), len, buffer); op = new_ibp_write_op(ic, get_ibp_cap(&(caps[i]), IBP_WRITECAP), j*block_size, &(buf[*slot]), 0, len, ibp_timeout); gop_set_id(op, *slot); ibp_op_set_cc(ibp_get_iop(op), cc); ibp_op_set_ncs(ibp_get_iop(op), ncs); opque_add(q, op); } alloc_start = 0; } block_start = 0; } skip_submit: finished = 1; apr_poll(&pfd, nfds, &finished, dt); do { //** Empty the finished queue. Always wait for for at least 1 to complete op = opque_waitany(q); status = gop_get_status(op); if (status.error_code != IBP_OK) { printf("ERROR: Aborting with error code %d\n", status.error_code); finished = 0; } count++; i = gop_get_id(op); nbytes = nbytes + tbuffer_size(&(buf[i])); if (nbytes > print_bytes) { dbytes = nbytes / (1.0*1024*1024*1024); dtime = apr_time_now() - stime; r2 = dtime / (1.0 * APR_USEC_PER_SEC); r1 = nbytes - last_bytes; r1 = r1 / (r2 * 1024.0 * 1024.0); printf("%.2lfGB written (%.2lfMB/s : %.2lf secs)\n", dbytes, r1, r2); print_bytes = print_bytes + delta_bytes; last_bytes = nbytes; stime = apr_time_now(); } push(tbuf_free, &(tbuf_index[i])); gop_free(op, OP_DESTROY); } while (opque_tasks_finished(q) > 0); } err = opque_waitall(q); if (err != OP_STATE_SUCCESS) { printf("write_allocs: At least 1 error occured! * ibp_errno=%d * nfailed=%d\n", err, opque_tasks_failed(q)); } opque_free(q, OP_DESTROY); free_stack(tbuf_free, 0); free(tbuf_index); free(buf); free(buffer); apr_pool_destroy(pool); dbytes = nbytes; return(dbytes); }