static void test_atreadeof(abts_case *tc, void *data) { apr_status_t rv; apr_socket_t *sock; apr_socket_t *sock2; apr_proc_t proc; apr_size_t length = STRLEN; char datastr[STRLEN]; int atreadeof = -1; sock = setup_socket(tc); if (!sock) return; launch_child(tc, &proc, "write", p); rv = apr_socket_accept(&sock2, sock, p); APR_ASSERT_SUCCESS(tc, "Problem with receiving connection", rv); /* Check that the remote socket is still open */ rv = apr_socket_atreadeof(sock2, &atreadeof); APR_ASSERT_SUCCESS(tc, "Determine whether at EOF, #1", rv); ABTS_INT_EQUAL(tc, 0, atreadeof); memset(datastr, 0, STRLEN); apr_socket_recv(sock2, datastr, &length); /* Make sure that the server received the data we sent */ ABTS_STR_EQUAL(tc, DATASTR, datastr); ABTS_SIZE_EQUAL(tc, strlen(datastr), wait_child(tc, &proc)); /* The child is dead, so should be the remote socket */ rv = apr_socket_atreadeof(sock2, &atreadeof); APR_ASSERT_SUCCESS(tc, "Determine whether at EOF, #2", rv); ABTS_INT_EQUAL(tc, 1, atreadeof); rv = apr_socket_close(sock2); APR_ASSERT_SUCCESS(tc, "Problem closing connected socket", rv); launch_child(tc, &proc, "close", p); rv = apr_socket_accept(&sock2, sock, p); APR_ASSERT_SUCCESS(tc, "Problem with receiving connection", rv); /* The child closed the socket as soon as it could... */ rv = apr_socket_atreadeof(sock2, &atreadeof); APR_ASSERT_SUCCESS(tc, "Determine whether at EOF, #3", rv); if (!atreadeof) { /* ... but perhaps not yet; wait a moment */ apr_sleep(apr_time_from_msec(5)); rv = apr_socket_atreadeof(sock2, &atreadeof); APR_ASSERT_SUCCESS(tc, "Determine whether at EOF, #4", rv); } ABTS_INT_EQUAL(tc, 1, atreadeof); wait_child(tc, &proc); rv = apr_socket_close(sock2); APR_ASSERT_SUCCESS(tc, "Problem closing connected socket", rv); rv = apr_socket_close(sock); APR_ASSERT_SUCCESS(tc, "Problem closing socket", rv); }
static apt_bool_t mrcp_server_agent_connection_accept(mrcp_connection_agent_t *agent) { char *local_ip = NULL; char *remote_ip = NULL; apr_socket_t *sock; apr_pool_t *pool; mrcp_connection_t *connection; if(!agent->null_connection) { pool = apt_pool_create(); if(apr_socket_accept(&sock,agent->listen_sock,pool) != APR_SUCCESS) { return FALSE; } apt_log(APT_LOG_MARK,APT_PRIO_NOTICE,"Rejected TCP/MRCPv2 Connection"); apr_socket_close(sock); apr_pool_destroy(pool); return FALSE; } pool = agent->null_connection->pool; if(apr_socket_accept(&sock,agent->listen_sock,pool) != APR_SUCCESS) { return FALSE; } connection = mrcp_connection_create(); connection->sock = sock; if(apr_socket_addr_get(&connection->r_sockaddr,APR_REMOTE,sock) != APR_SUCCESS || apr_socket_addr_get(&connection->l_sockaddr,APR_LOCAL,sock) != APR_SUCCESS) { apr_socket_close(sock); mrcp_connection_destroy(connection); return FALSE; } apr_sockaddr_ip_get(&local_ip,connection->l_sockaddr); apr_sockaddr_ip_get(&remote_ip,connection->r_sockaddr); apt_string_set(&connection->remote_ip,remote_ip); connection->id = apr_psprintf(connection->pool,"%s:%hu <-> %s:%hu", local_ip,connection->l_sockaddr->port, remote_ip,connection->r_sockaddr->port); memset(&connection->sock_pfd,0,sizeof(apr_pollfd_t)); connection->sock_pfd.desc_type = APR_POLL_SOCKET; connection->sock_pfd.reqevents = APR_POLLIN; connection->sock_pfd.desc.s = connection->sock; connection->sock_pfd.client_data = connection; if(apt_pollset_add(agent->pollset, &connection->sock_pfd) != TRUE) { apt_log(APT_LOG_MARK,APT_PRIO_WARNING,"Failed to Add to Pollset"); apr_socket_close(sock); mrcp_connection_destroy(connection); return FALSE; } apt_log(APT_LOG_MARK,APT_PRIO_NOTICE,"Accepted TCP/MRCPv2 Connection %s",connection->id); connection->agent = agent; connection->it = apt_list_push_back(agent->connection_list,connection,connection->pool); connection->parser = mrcp_parser_create(agent->resource_factory,connection->pool); connection->generator = mrcp_generator_create(agent->resource_factory,connection->pool); return TRUE; }
static apt_bool_t mrcp_server_agent_connection_accept(mrcp_connection_agent_t *agent) { apr_socket_t *sock; apr_pool_t *pool; mrcp_connection_t *connection; if(!agent->null_connection) { apr_pool_create(&pool,NULL); if(apr_socket_accept(&sock,agent->listen_sock,pool) != APR_SUCCESS) { return FALSE; } apt_log(APT_LOG_MARK,APT_PRIO_NOTICE,"Rejected TCP/MRCPv2 Connection"); apr_socket_close(sock); apr_pool_destroy(pool); return FALSE; } pool = agent->null_connection->pool; if(apr_socket_accept(&sock,agent->listen_sock,pool) != APR_SUCCESS) { return FALSE; } connection = mrcp_connection_create(); connection->sock = sock; connection->sock_pfd.desc_type = APR_POLL_SOCKET; connection->sock_pfd.reqevents = APR_POLLIN; connection->sock_pfd.desc.s = connection->sock; connection->sock_pfd.client_data = connection; if(apr_pollset_add(agent->pollset, &connection->sock_pfd) != APR_SUCCESS) { apt_log(APT_LOG_MARK,APT_PRIO_WARNING,"Failed to Add to Pollset"); apr_socket_close(sock); mrcp_connection_destroy(connection); return FALSE; } connection->agent = agent; connection->it = apt_list_push_back(agent->connection_list,connection); connection->parser = mrcp_parser_create(agent->resource_factory,connection->pool); connection->generator = mrcp_generator_create(agent->resource_factory,connection->pool); apr_socket_addr_get(&connection->sockaddr,APR_REMOTE,sock); if(apr_sockaddr_ip_get(&connection->remote_ip.buf,connection->sockaddr) == APR_SUCCESS) { connection->remote_ip.length = strlen(connection->remote_ip.buf); } apt_log(APT_LOG_MARK,APT_PRIO_NOTICE,"Accepted TCP/MRCPv2 Connection %s:%d", connection->remote_ip.buf, connection->sockaddr->port); return TRUE; }
/* Accept RTSP connection */ static apt_bool_t rtsp_server_connection_accept(rtsp_server_t *server) { rtsp_server_connection_t *rtsp_connection; char *local_ip = NULL; char *remote_ip = NULL; apr_sockaddr_t *l_sockaddr = NULL; apr_sockaddr_t *r_sockaddr = NULL; apr_pool_t *pool = apt_pool_create(); if(!pool) { return FALSE; } rtsp_connection = apr_palloc(pool,sizeof(rtsp_server_connection_t)); rtsp_connection->pool = pool; rtsp_connection->sock = NULL; rtsp_connection->client_ip = NULL; APR_RING_ELEM_INIT(rtsp_connection,link); if(apr_socket_accept(&rtsp_connection->sock,server->listen_sock,rtsp_connection->pool) != APR_SUCCESS) { apt_log(RTSP_LOG_MARK,APT_PRIO_WARNING,"Failed to Accept RTSP Connection"); apr_pool_destroy(pool); return FALSE; } if(apr_socket_addr_get(&l_sockaddr,APR_LOCAL,rtsp_connection->sock) != APR_SUCCESS || apr_socket_addr_get(&r_sockaddr,APR_REMOTE,rtsp_connection->sock) != APR_SUCCESS) { apt_log(RTSP_LOG_MARK,APT_PRIO_WARNING,"Failed to Get RTSP Socket Address"); apr_pool_destroy(pool); return FALSE; } apr_sockaddr_ip_get(&local_ip,l_sockaddr); apr_sockaddr_ip_get(&remote_ip,r_sockaddr); rtsp_connection->client_ip = remote_ip; rtsp_connection->id = apr_psprintf(pool,"%s:%hu <-> %s:%hu", local_ip,l_sockaddr->port, remote_ip,r_sockaddr->port); memset(&rtsp_connection->sock_pfd,0,sizeof(apr_pollfd_t)); rtsp_connection->sock_pfd.desc_type = APR_POLL_SOCKET; rtsp_connection->sock_pfd.reqevents = APR_POLLIN; rtsp_connection->sock_pfd.desc.s = rtsp_connection->sock; rtsp_connection->sock_pfd.client_data = rtsp_connection; if(apt_poller_task_descriptor_add(server->task,&rtsp_connection->sock_pfd) != TRUE) { apt_log(RTSP_LOG_MARK,APT_PRIO_WARNING,"Failed to Add to Pollset %s",rtsp_connection->id); apr_socket_close(rtsp_connection->sock); apr_pool_destroy(pool); return FALSE; } apt_log(RTSP_LOG_MARK,APT_PRIO_NOTICE,"Accepted TCP Connection %s",rtsp_connection->id); rtsp_connection->session_table = apr_hash_make(rtsp_connection->pool); apt_text_stream_init(&rtsp_connection->rx_stream,rtsp_connection->rx_buffer,sizeof(rtsp_connection->rx_buffer)-1); apt_text_stream_init(&rtsp_connection->tx_stream,rtsp_connection->tx_buffer,sizeof(rtsp_connection->tx_buffer)-1); rtsp_connection->parser = rtsp_parser_create(rtsp_connection->pool); rtsp_connection->generator = rtsp_generator_create(rtsp_connection->pool); rtsp_connection->server = server; APR_RING_INSERT_TAIL(&server->connection_list,rtsp_connection,rtsp_server_connection_t,link); return TRUE; }
/* This accepts a connection and allows us to handle the error codes better than * the previous code, while also making it more obvious. */ static apr_status_t beos_accept(void **accepted, ap_listen_rec *lr, apr_pool_t *ptrans) { apr_socket_t *csd; apr_status_t status; int sockdes; *accepted = NULL; status = apr_socket_accept(&csd, lr->sd, ptrans); if (status == APR_SUCCESS) { *accepted = csd; apr_os_sock_get(&sockdes, csd); return status; } if (APR_STATUS_IS_EINTR(status)) { return status; } /* This switch statement provides us with better error details. */ switch (status) { #ifdef ECONNABORTED case ECONNABORTED: #endif #ifdef ETIMEDOUT case ETIMEDOUT: #endif #ifdef EHOSTUNREACH case EHOSTUNREACH: #endif #ifdef ENETUNREACH case ENETUNREACH: #endif break; #ifdef ENETDOWN case ENETDOWN: /* * When the network layer has been shut down, there * is not much use in simply exiting: the parent * would simply re-create us (and we'd fail again). * Use the CHILDFATAL code to tear the server down. * @@@ Martin's idea for possible improvement: * A different approach would be to define * a new APEXIT_NETDOWN exit code, the reception * of which would make the parent shutdown all * children, then idle-loop until it detected that * the network is up again, and restart the children. * Ben Hyde noted that temporary ENETDOWN situations * occur in mobile IP. */ ap_log_error(APLOG_MARK, APLOG_EMERG, status, ap_server_conf, "apr_socket_accept: giving up."); return APR_EGENERAL; #endif /*ENETDOWN*/ default: ap_log_error(APLOG_MARK, APLOG_ERR, status, ap_server_conf, "apr_socket_accept: (client socket)"); return APR_EGENERAL; } return status; }
static void test_recv(abts_case *tc, void *data) { apr_status_t rv; apr_socket_t *sock; apr_socket_t *sock2; apr_proc_t proc; int protocol; apr_size_t length = STRLEN; char datastr[STRLEN]; sock = setup_socket(tc); if (!sock) return; launch_child(tc, &proc, "write", p); rv = apr_socket_accept(&sock2, sock, p); APR_ASSERT_SUCCESS(tc, "Problem with receiving connection", rv); apr_socket_protocol_get(sock2, &protocol); ABTS_INT_EQUAL(tc, APR_PROTO_TCP, protocol); memset(datastr, 0, STRLEN); apr_socket_recv(sock2, datastr, &length); /* Make sure that the server received the data we sent */ ABTS_STR_EQUAL(tc, DATASTR, datastr); ABTS_SIZE_EQUAL(tc, strlen(datastr), wait_child(tc, &proc)); rv = apr_socket_close(sock2); APR_ASSERT_SUCCESS(tc, "Problem closing connected socket", rv); rv = apr_socket_close(sock); APR_ASSERT_SUCCESS(tc, "Problem closing socket", rv); }
static void test_send(abts_case *tc, void *data) { apr_status_t rv; apr_socket_t *sock; apr_socket_t *sock2; apr_proc_t proc; int protocol; apr_size_t length; sock = setup_socket(tc); if (!sock) return; launch_child(tc, &proc, "read", p); rv = apr_socket_accept(&sock2, sock, p); APR_ASSERT_SUCCESS(tc, "Problem with receiving connection", rv); apr_socket_protocol_get(sock2, &protocol); ABTS_INT_EQUAL(tc, APR_PROTO_TCP, protocol); length = strlen(DATASTR); apr_socket_send(sock2, DATASTR, &length); /* Make sure that the client received the data we sent */ ABTS_SIZE_EQUAL(tc, strlen(DATASTR), wait_child(tc, &proc)); rv = apr_socket_close(sock2); APR_ASSERT_SUCCESS(tc, "Problem closing connected socket", rv); rv = apr_socket_close(sock); APR_ASSERT_SUCCESS(tc, "Problem closing socket", rv); }
int main() { apr_initialize(); apr_pool_t *mempool; apr_sockaddr_t *socket_addr; apr_socket_t *socket; apr_pool_create( &mempool, NULL ); apr_sockaddr_info_get( &socket_addr, NULL, APR_INET, REPLY_PORT, 0, mempool ); apr_socket_create( &socket, socket_addr->family, SOCK_STREAM, APR_PROTO_TCP, mempool ); apr_socket_bind( socket, socket_addr ); apr_socket_listen( socket, SOMAXCONN ); apr_socket_t *accepted; apr_socket_accept( &accepted, socket, mempool ); int *replies = (int*)malloc( frl_reply_size ); apr_size_t len = frl_reply_size; do { apr_socket_recv( accepted, (char*)replies, &len ); int *iter_replies = replies+2; for ( int i = 0; i < 100; i++, iter_replies+=2 ) { std::cout<<*iter_replies<<" "<<*(iter_replies+1)<<std::endl; } std::cout<<"The End."<<std::endl; } while (1); apr_terminate(); return 0; }
net_sock_t *sock_accept(net_sock_t *nsock) { int err; Net_timeout_t tm; network_sock_t *psock = (network_sock_t *)nsock; network_sock_t *sock = (network_sock_t *)malloc(sizeof(network_sock_t)); assert(sock != NULL); memset(sock, 0, sizeof(network_sock_t)); if (apr_pool_create(&(sock->mpool), NULL) != APR_SUCCESS) { free(sock); return(NULL); } sock->tcpsize = psock->tcpsize; err = apr_socket_accept(&(sock->fd), psock->fd, sock->mpool); if (err != APR_SUCCESS) { apr_pool_destroy(sock->mpool); free(sock); sock = NULL; log_printf(0, "ERROR with apr_socket_accept err=%d\n", err); } else { apr_thread_mutex_create(&(sock->lock), APR_THREAD_MUTEX_DEFAULT,sock->mpool); //** Set the with a minimal timeout of 10ms set_net_timeout(&tm, 0, SOCK_DEFAULT_TIMEOUT); apr_socket_timeout_set(sock->fd, tm); } return(sock); }
static void* APR_THREAD_FUNC test_server_run(apr_thread_t *thd, void *data) { server_thread_data * server_data = (server_thread_data*)data; apr_status_t rv; apr_pool_t *mp; apr_socket_t *s;/* listening socket */ apr_pool_create(&mp, NULL); rv = do_listen(&s, mp, server_data->port); if (rv != APR_SUCCESS) { char errbuf[256]; apr_strerror(rv, errbuf, sizeof(errbuf)); printf("test server error listening: %d, %s\n", rv, errbuf); apr_pool_destroy(mp); apr_thread_exit(thd, rv); return; } server_data->handle->test_server_running = 1; while (server_data->handle->test_server_running == 1) { apr_socket_t *ns;/* accepted socket */ //printf("A"); rv = apr_socket_accept(&ns, s, mp); if(rv == 11) { //printf("."); } else { //printf("B"); if (rv != APR_SUCCESS) { char errbuf[256]; apr_strerror(rv, errbuf, sizeof(errbuf)); printf("test server error accepting: %d, %s\n", rv, errbuf); apr_pool_destroy(mp); apr_thread_exit(thd, rv); return; } /* it is a good idea to specify socket options for the newly accepted socket explicitly */ apr_socket_opt_set(ns, APR_SO_NONBLOCK, 0); apr_socket_timeout_set(ns, 5000); rv = server_data->request_process_callback(ns, mp, server_data); apr_socket_close(ns); if (rv != APR_SUCCESS) { char errbuf[256]; apr_strerror(rv, errbuf, sizeof(errbuf)); printf("test server error processing: %d, %s\n", rv, errbuf); apr_pool_destroy(mp); apr_thread_exit(thd, rv); return; } } } //printf("apr_pool_destroy\n"); apr_pool_destroy(mp); //printf("apr_thread_exit\n"); apr_thread_exit(thd, APR_SUCCESS); //printf("return\n"); return NULL; }
static int do_accept(serv_ctx_t *serv_ctx, apr_pollset_t *pollset, apr_socket_t *lsock, apr_pool_t *mp) { apr_socket_t *ns;/* accepted socket */ apr_status_t rv = apr_socket_accept(&ns, lsock, mp); if (rv == APR_SUCCESS) { //serv_ctx_t *serv_ctx = apr_palloc(mp, sizeof(serv_ctx_t)); serv_ctx->up_buf_level = 0; serv_ctx->down_buf_level = 0; serv_ctx->channel_state = connected; apr_pollfd_t pfd = { mp, APR_POLL_SOCKET, APR_POLLIN|APR_POLLOUT|APR_POLLHUP, 0, { NULL }, serv_ctx }; pfd.desc.s = ns; serv_ctx->mp = mp; /* non-blocking socket. We can't expect that @ns inherits non-blocking mode from @lsock */ apr_socket_opt_set(ns, APR_SO_NONBLOCK, 1); //Setup socket options for the specified socket apr_socket_timeout_set(ns, 0); apr_pollset_add(pollset, &pfd); //Add a socket or file descriptor to a pollset //printf("connected client to channel %d\n", serv_ctx->channel_number); } return TRUE; }
int do_accept(apr_pollset_t *pollset, apr_socket_t *lsock, apr_pool_t *mp) { apr_socket_t *ns;/* accepted socket */ apr_status_t rv; rv = apr_socket_accept(&ns, lsock, mp); if (rv == APR_SUCCESS) { serv_ctx_t *serv_ctx = apr_palloc(mp, sizeof(serv_ctx_t)); apr_pollfd_t pfd = { mp, APR_POLL_SOCKET, APR_POLLIN, 0, { NULL }, serv_ctx }; pfd.desc.s = ns; /* at first, we expect requests, so we poll APR_POLLIN event */ serv_ctx->status = SERV_RECV_REQUEST; serv_ctx->cb_func = recv_req_cb; serv_ctx->recv.is_firstline = TRUE; serv_ctx->mp = mp; /** * non-blocking socket. We can't expect that @ns * inherits non-blocking mode from @lsock */ apr_socket_opt_set(ns, APR_SO_NONBLOCK, 1); apr_socket_timeout_set(ns, 0); apr_pollset_add(pollset, &pfd); } return TRUE; }
static void test_timeout(abts_case *tc, void *data) { apr_status_t rv; apr_socket_t *sock; apr_socket_t *sock2; apr_proc_t proc; int protocol; int exit; sock = setup_socket(tc); if (!sock) return; launch_child(tc, &proc, "read", p); rv = apr_socket_accept(&sock2, sock, p); APR_ASSERT_SUCCESS(tc, "Problem with receiving connection", rv); apr_socket_protocol_get(sock2, &protocol); ABTS_INT_EQUAL(tc, APR_PROTO_TCP, protocol); exit = wait_child(tc, &proc); ABTS_INT_EQUAL(tc, SOCKET_TIMEOUT, exit); /* We didn't write any data, so make sure the child program returns * an error. */ rv = apr_socket_close(sock2); APR_ASSERT_SUCCESS(tc, "Problem closing connected socket", rv); rv = apr_socket_close(sock); APR_ASSERT_SUCCESS(tc, "Problem closing socket", rv); }
/* Wait for the next client connection to come in from SOCK. Allocate * the connection in a root pool from CONNECTION_POOLS and assign PARAMS. * Return the connection object in *CONNECTION. * * Use HANDLING_MODE for proper internal cleanup. */ static svn_error_t * accept_connection(connection_t **connection, apr_socket_t *sock, serve_params_t *params, enum connection_handling_mode handling_mode, apr_pool_t *pool) { apr_status_t status; /* Non-standard pool handling. The main thread never blocks to join * the connection threads so it cannot clean up after each one. So * separate pools that can be cleared at thread exit are used. */ apr_pool_t *connection_pool = svn_pool_create(pool); *connection = apr_pcalloc(connection_pool, sizeof(**connection)); (*connection)->pool = connection_pool; (*connection)->params = params; (*connection)->ref_count = 1; do { #ifdef WIN32 if (winservice_is_stopping()) exit(0); #endif status = apr_socket_accept(&(*connection)->usock, sock, connection_pool); if (handling_mode == connection_mode_fork) { apr_proc_t proc; /* Collect any zombie child processes. */ while (apr_proc_wait_all_procs(&proc, NULL, NULL, APR_NOWAIT, connection_pool) == APR_CHILD_DONE) ; } } while (APR_STATUS_IS_EINTR(status) || APR_STATUS_IS_ECONNABORTED(status) || APR_STATUS_IS_ECONNRESET(status)); return status ? svn_error_wrap_apr(status, _("Can't accept client connection")) : SVN_NO_ERROR; }
static int socket_accept(lua_State *L) { lua_apr_socket *server, *client = NULL; apr_status_t status; server = socket_check(L, 1, 1); status = socket_alloc(L, &client); client->family = server->family; client->protocol = server->protocol; if (status == APR_SUCCESS) status = apr_socket_accept(&client->handle, server->handle, client->pool); socket_init(L, client); if (status != APR_SUCCESS) return push_error_status(L, status); return 1; }
bool LLPluginProcessParent::accept() { bool result = false; apr_status_t status = APR_EGENERAL; apr_socket_t *new_socket = NULL; status = apr_socket_accept( &new_socket, mListenSocket->getSocket(), gAPRPoolp); if(status == APR_SUCCESS) { // llinfos << "SUCCESS" << llendl; // Success. Create a message pipe on the new socket // we MUST create a new pool for the LLSocket, since it will take ownership of it and delete it in its destructor! apr_pool_t* new_pool = NULL; status = apr_pool_create(&new_pool, gAPRPoolp); mSocket = LLSocket::create(new_socket, new_pool); new LLPluginMessagePipe(this, mSocket); result = true; } else if(APR_STATUS_IS_EAGAIN(status)) { // llinfos << "EAGAIN" << llendl; // No incoming connections. This is not an error. status = APR_SUCCESS; } else { // llinfos << "Error:" << llendl; ll_apr_warn_status(status); // Some other error. errorState(); } return result; }
static apr_status_t glassToWall(apr_int16_t port, apr_pool_t *parent) { apr_sockaddr_t *sockAddr; apr_socket_t *listener, *accepted; apr_status_t rv; rv = apr_socket_create(&listener, APR_INET, SOCK_STREAM, APR_PROTO_TCP, parent); if (rv != APR_SUCCESS) { reportError("Unable to create socket", rv, parent); return rv; } rv = apr_sockaddr_info_get(&sockAddr, "127.0.0.1", APR_UNSPEC, port, 0, parent); if (rv != APR_SUCCESS) { reportError("Unable to get socket info", rv, parent); apr_socket_close(listener); return rv; } if ((rv = apr_socket_bind(listener, sockAddr)) != APR_SUCCESS || (rv = apr_socket_listen(listener, 5)) != APR_SUCCESS) { reportError("Unable to bind or listen to socket", rv, parent); apr_socket_close(listener); return rv; } for (;;) { rv = apr_socket_accept(&accepted, listener, parent); if (rv != APR_SUCCESS) { reportError("Error accepting on socket", rv, parent); break; } printf("\tAnswering connection\n"); rv = talkTalk(accepted, parent); apr_socket_close(accepted); printf("\tConnection closed\n"); if (rv != APR_SUCCESS) break; } apr_socket_close(listener); return APR_SUCCESS; }
static void do_client_accept(apr_socket_t *acc, apr_pollset_t *pollset, apr_pool_t *pool, lmSQL *lmdb) { apr_socket_t *client = NULL; apr_status_t acc_err = apr_socket_accept(&client, acc, pool); if (acc_err) { APR_FAIL(acc_err); return; } apr_status_t opt_err = apr_socket_opt_set(client, APR_SO_NONBLOCK, 1); if (opt_err) { APR_FAIL(opt_err); apr_maybe_fail(apr_socket_close(client)); return; } apr_status_t timeout_err = apr_socket_timeout_set(client, 0); if (timeout_err) { APR_FAIL(timeout_err); apr_maybe_fail(apr_socket_close(client)); return; } struct per_client *ctx = malloc(sizeof *ctx); if (ctx == NULL) { apr_maybe_fail(apr_socket_close(client)); FAIL("can't malloc ctx!\n"); return; } ctx->state = LM_S_INIT_CLIENT; ctx->lmdb = lmdb; apr_pollfd_t fake_s; memset(&fake_s, 0, sizeof fake_s); fake_s.p = pool; fake_s.desc_type = APR_POLL_SOCKET; fake_s.desc.s = client; fake_s.reqevents = 0; fake_s.client_data = ctx; do_client_state_machine(&fake_s, pollset); }
void handle_server_accept(void* arg) { server_t *svr = (server_t *) arg; apr_status_t status = APR_SUCCESS; apr_socket_t *ns = NULL; LOG_TRACE("on new connection"); status = apr_socket_accept(&ns, svr->comm->s, svr->comm->mp); // apr_socket_t *sock_listen = r->com.s; // LOG_INFO("accept on fd %x", sock_listen->socketdes); LOG_INFO("accept new connection"); if (status == APR_EMFILE) { LOG_ERROR("cannot open more file handles, please check system configurations."); SAFE_ASSERT(0); } else if (status == APR_ENFILE) { LOG_ERROR("cannot open more file handles, please check system configurations."); SAFE_ASSERT(0); } if (status != APR_SUCCESS) { LOG_ERROR("recvr accept error."); LOG_ERROR("%s", apr_strerror(status, calloc(100, 1), 100)); SAFE_ASSERT(status == APR_SUCCESS); } apr_socket_opt_set(ns, APR_SO_NONBLOCK, 1); apr_socket_opt_set(ns, APR_TCP_NODELAY, 1); // apr_socket_opt_set(ns, APR_SO_REUSEADDR, 1); sconn_t *sconn; sconn_create(&sconn, svr); apr_pollfd_t pfd = {svr->comm->mp, APR_POLL_SOCKET, APR_POLLIN, 0, {NULL}, NULL}; pfd.desc.s = ns; pfd.client_data = sconn->pjob; sconn->pjob->pfd = pfd; sconn->pjob->mgr = svr->pjob->mgr; poll_mgr_add_job(sconn->pjob->mgr, sconn->pjob); }
apr_status_t ol_listener_do_readable(outlet_t *self) { apr_status_t rs; ol_listener_data_t *data = self->data; proc_t *proc; proc = scheduler_lookup(data->teevm->scheduler, pid_serial(data->reply_to_pid)); if (proc) { apr_socket_t *new_sock; outlet_t *new_outlet; term_t msg; apr_pool_t *p; apr_pool_create(&p, 0); rs = apr_socket_accept(&new_sock, data->sock, p); if (rs == 0) rs = apr_socket_opt_set(new_sock, APR_SO_NONBLOCK, 1); if (rs != 0) { apr_pool_destroy(p); return rs; } new_outlet = ol_socket_make(new_sock, 0); //put to poll ring outlet_mall_allot(data->teevm->mall, new_outlet); //messages delivered to caller of accept new_outlet->owner_in = new_outlet->owner_out = proc; msg = heap_tuple3(proc->heap, A_TCP_ACCEPTED, outlet_id(self), outlet_id(new_outlet)); scheduler_new_local_mail(data->teevm->scheduler, proc, msg); } data->is_accepting = 0; return APR_SUCCESS; }
// static LLSocket::ptr_t LLSocket::create(apr_status_t& status, LLSocket::ptr_t& listen_socket) { if (!listen_socket->getSocket()) { status = APR_ENOSOCKET; return LLSocket::ptr_t(); } LLSocket::ptr_t rv(new LLSocket); LL_DEBUGS() << "accepting socket" << LL_ENDL; status = apr_socket_accept(&rv->mSocket, listen_socket->getSocket(), rv->mPool()); if (status != APR_SUCCESS) { rv->mSocket = NULL; rv.reset(); return rv; } rv->mPort = PORT_EPHEMERAL; rv->setNonBlocking(); return rv; }
// static LLSocket::ptr_t LLSocket::create(apr_status_t& status, LLSocket::ptr_t& listen_socket) { LLMemType m1(LLMemType::MTYPE_IO_TCP); if (!listen_socket->getSocket()) { status = APR_ENOSOCKET; return LLSocket::ptr_t(); } LLSocket::ptr_t rv(new LLSocket); lldebugs << "accepting socket" << llendl; status = apr_socket_accept(&rv->mSocket, listen_socket->getSocket(), rv->mPool()); if (status != APR_SUCCESS) { rv->mSocket = NULL; rv.reset(); return rv; } rv->mPort = PORT_EPHEMERAL; rv->setNonBlocking(); return rv; }
/* * * Function: maintenance * Description: 接收命令或者每隔指定时间维护系统信息 * Input: * OutPut: * * Return: * Other: * */ void CommandServer::maintenance( void ) { apr_status_t tStatus; apr_socket_t *pstCommand; apr_interval_time_t tTimeOut = 5000000; /*设置5秒的超时时间*/ apr_size_t tRecvLen; apr_pool_create( &m_pstLocal, m_pstRoot ); apr_socket_timeout_set( m_pstListen, 1000000 ); while( true ) { apr_pool_clear( m_pstLocal ); memset( m_aczCommand, 0, sizeof(m_aczCommand) ); checkExitedSystem( ); tStatus = apr_socket_accept( &pstCommand, m_pstListen, m_pstLocal ); if( tStatus != APR_SUCCESS ) { apr_sleep( 1000000 ); continue; } apr_socket_timeout_set( pstCommand, tTimeOut ); tRecvLen = 2047; tStatus = apr_socket_recv( pstCommand, m_aczCommand, &tRecvLen ); if( tStatus != APR_SUCCESS ) { LOG4C(( LOG_WARN, "从客户端接收命令失败 %s\n", strerror(errno) )); apr_socket_close( pstCommand ); continue; } doCommand( pstCommand ); apr_socket_close( pstCommand ); } }
apr_status_t serf__process_listener(serf_listener_t *l) { apr_status_t rv; apr_socket_t *in; apr_pool_t *p; /* THIS IS NOT OPTIMAL */ apr_pool_create(&p, l->pool); rv = apr_socket_accept(&in, l->skt, p); if (rv) { apr_pool_destroy(p); return rv; } rv = l->accept_func(l->ctx, l, l->accept_baton, in, p); if (rv) { apr_pool_destroy(p); return rv; } return rv; }
void ap_mpm_child_main(apr_pool_t *pconf) { ap_listen_rec *lr = NULL; int requests_this_child = 0; int rv = 0; unsigned long ulTimes; int my_pid = getpid(); ULONG rc, c; HQUEUE workq; apr_pollset_t *pollset; int num_listeners; TID server_maint_tid; void *sb_mem; /* Stop Ctrl-C/Ctrl-Break signals going to child processes */ DosSetSignalExceptionFocus(0, &ulTimes); set_signals(); /* Create pool for child */ apr_pool_create(&pchild, pconf); ap_run_child_init(pchild, ap_server_conf); /* Create an event semaphore used to trigger other threads to shutdown */ rc = DosCreateEventSem(NULL, &shutdown_event, 0, FALSE); if (rc) { ap_log_error(APLOG_MARK, APLOG_ERR, APR_FROM_OS_ERROR(rc), ap_server_conf, "unable to create shutdown semaphore, exiting"); clean_child_exit(APEXIT_CHILDFATAL); } /* Gain access to the scoreboard. */ rc = DosGetNamedSharedMem(&sb_mem, ap_scoreboard_fname, PAG_READ|PAG_WRITE); if (rc) { ap_log_error(APLOG_MARK, APLOG_ERR, APR_FROM_OS_ERROR(rc), ap_server_conf, "scoreboard not readable in child, exiting"); clean_child_exit(APEXIT_CHILDFATAL); } ap_calc_scoreboard_size(); ap_init_scoreboard(sb_mem); /* Gain access to the accpet mutex */ rc = DosOpenMutexSem(NULL, &ap_mpm_accept_mutex); if (rc) { ap_log_error(APLOG_MARK, APLOG_ERR, APR_FROM_OS_ERROR(rc), ap_server_conf, "accept mutex couldn't be accessed in child, exiting"); clean_child_exit(APEXIT_CHILDFATAL); } /* Find our pid in the scoreboard so we know what slot our parent allocated us */ for (child_slot = 0; ap_scoreboard_image->parent[child_slot].pid != my_pid && child_slot < HARD_SERVER_LIMIT; child_slot++); if (child_slot == HARD_SERVER_LIMIT) { ap_log_error(APLOG_MARK, APLOG_ERR, 0, ap_server_conf, "child pid not found in scoreboard, exiting"); clean_child_exit(APEXIT_CHILDFATAL); } ap_my_generation = ap_scoreboard_image->parent[child_slot].generation; memset(ap_scoreboard_image->servers[child_slot], 0, sizeof(worker_score) * HARD_THREAD_LIMIT); /* Set up an OS/2 queue for passing connections & termination requests * to worker threads */ rc = DosCreateQueue(&workq, QUE_FIFO, apr_psprintf(pchild, "/queues/httpd/work.%d", my_pid)); if (rc) { ap_log_error(APLOG_MARK, APLOG_ERR, APR_FROM_OS_ERROR(rc), ap_server_conf, "unable to create work queue, exiting"); clean_child_exit(APEXIT_CHILDFATAL); } /* Create initial pool of worker threads */ for (c = 0; c < ap_min_spare_threads; c++) { // ap_scoreboard_image->servers[child_slot][c].tid = _beginthread(worker_main, NULL, 128*1024, (void *)c); } /* Start maintenance thread */ server_maint_tid = _beginthread(server_maintenance, NULL, 32768, NULL); /* Set up poll */ for (num_listeners = 0, lr = ap_listeners; lr; lr = lr->next) { num_listeners++; } apr_pollset_create(&pollset, num_listeners, pchild, 0); for (lr = ap_listeners; lr != NULL; lr = lr->next) { apr_pollfd_t pfd = { 0 }; pfd.desc_type = APR_POLL_SOCKET; pfd.desc.s = lr->sd; pfd.reqevents = APR_POLLIN; pfd.client_data = lr; apr_pollset_add(pollset, &pfd); } /* Main connection accept loop */ do { apr_pool_t *pconn; worker_args_t *worker_args; int last_poll_idx = 0; apr_pool_create(&pconn, pchild); worker_args = apr_palloc(pconn, sizeof(worker_args_t)); worker_args->pconn = pconn; if (num_listeners == 1) { rv = apr_socket_accept(&worker_args->conn_sd, ap_listeners->sd, pconn); } else { const apr_pollfd_t *poll_results; apr_int32_t num_poll_results; rc = DosRequestMutexSem(ap_mpm_accept_mutex, SEM_INDEFINITE_WAIT); if (shutdown_pending) { DosReleaseMutexSem(ap_mpm_accept_mutex); break; } rv = APR_FROM_OS_ERROR(rc); if (rv == APR_SUCCESS) { rv = apr_pollset_poll(pollset, -1, &num_poll_results, &poll_results); DosReleaseMutexSem(ap_mpm_accept_mutex); } if (rv == APR_SUCCESS) { if (last_poll_idx >= num_listeners) { last_poll_idx = 0; } lr = poll_results[last_poll_idx++].client_data; rv = apr_socket_accept(&worker_args->conn_sd, lr->sd, pconn); last_poll_idx++; } } if (rv != APR_SUCCESS) { if (!APR_STATUS_IS_EINTR(rv)) { ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf, "apr_socket_accept"); clean_child_exit(APEXIT_CHILDFATAL); } } else { DosWriteQueue(workq, WORKTYPE_CONN, sizeof(worker_args_t), worker_args, 0); requests_this_child++; } if (ap_max_requests_per_child != 0 && requests_this_child >= ap_max_requests_per_child) break; } while (!shutdown_pending && ap_my_generation == ap_scoreboard_image->global->running_generation); ap_scoreboard_image->parent[child_slot].quiescing = 1; DosPostEventSem(shutdown_event); DosWaitThread(&server_maint_tid, DCWW_WAIT); if (is_graceful) { char someleft; /* tell our worker threads to exit */ for (c=0; c<HARD_THREAD_LIMIT; c++) { if (ap_scoreboard_image->servers[child_slot][c].status != SERVER_DEAD) { DosWriteQueue(workq, WORKTYPE_EXIT, 0, NULL, 0); } } do { someleft = 0; for (c=0; c<HARD_THREAD_LIMIT; c++) { if (ap_scoreboard_image->servers[child_slot][c].status != SERVER_DEAD) { someleft = 1; DosSleep(1000); break; } } } while (someleft); } else { DosPurgeQueue(workq); for (c=0; c<HARD_THREAD_LIMIT; c++) { if (ap_scoreboard_image->servers[child_slot][c].status != SERVER_DEAD) { DosKillThread(ap_scoreboard_image->servers[child_slot][c].tid); } } } apr_pool_destroy(pchild); }
void* thread_socket_pipe_receiver(apr_thread_t* thd, void* data) { frl_socket_pipe* pipe = (frl_socket_pipe*)data; apr_status_t state; apr_socket_t* listen_sock; apr_socket_create(&listen_sock, pipe->sock_addr->family, SOCK_STREAM, APR_PROTO_TCP, pipe->sockpool); apr_socket_opt_set(listen_sock, APR_SO_NONBLOCK, 1); apr_socket_timeout_set(listen_sock, 0); apr_socket_opt_set(listen_sock, APR_SO_REUSEADDR, 1); pipe->recv_state = apr_socket_bind(listen_sock, pipe->sock_addr); F_ERROR_IF_RUN(APR_SUCCESS != pipe->recv_state, return NULL, "[frl_socket_pipe::thread_socket_pipe_receiver]: Socket Binding Error: %d\n", pipe->recv_state); pipe->recv_state = apr_socket_listen(listen_sock, SOMAXCONN); F_ERROR_IF_RUN(APR_SUCCESS != pipe->recv_state, return NULL, "[frl_socket_pipe::thread_socket_pipe_receiver]: Socket Listen Error: %d\n", pipe->recv_state); apr_uint32_t hash; apr_pollset_t* pollset; apr_pollset_create(&pollset, pipe->replicate+2, pipe->sockpool, 0); apr_pollfd_t pfd = { pipe->sockpool, APR_POLL_SOCKET, APR_POLLIN, 0, { NULL }, NULL }; pfd.desc.s = listen_sock; apr_pollset_add(pollset, &pfd); do { // the fun loop apr_int32_t total; const apr_pollfd_t* ret_pfd; pipe->recv_state = apr_pollset_poll(pollset, SOCKET_PIPE_POLL_TIMEOUT, &total, &ret_pfd); if (APR_SUCCESS == pipe->recv_state) { for (int i = 0; i < total; i++) { if (ret_pfd[i].desc.s == listen_sock) { apr_socket_t* accept_sock; state = apr_socket_accept(&accept_sock, listen_sock, pipe->sockpool); F_ERROR_IF_RUN(APR_SUCCESS != state, continue, "[frl_socket_pipe::thread_socket_pipe_receiver]: Socket Accept Error: %d\n", state); // accept connection, initiate recv frl_pipe_state_t* pipestate = (frl_pipe_state_t*)frl_slab_palloc(pipe->statepool); apr_pollfd_t pfd = { pipe->sockpool, APR_POLL_SOCKET, APR_POLLIN, 0, { NULL }, pipestate }; pipestate->state = FRL_PIPE_READ_HEADER_START; pipestate->reader = (char*)&pipestate->header; pipestate->offset = 0; pipestate->size = SIZEOF_FRL_PIPE_HEADER_T; pfd.desc.s = accept_sock; apr_socket_opt_set(accept_sock, APR_SO_NONBLOCK, 1); apr_socket_timeout_set(accept_sock, 0); apr_pollset_add(pollset, &pfd); } else { if (ret_pfd[i].rtnevents & APR_POLLIN) { frl_pipe_state_t* pipestate = (frl_pipe_state_t*)ret_pfd[i].client_data; apr_size_t len_a = pipestate->size-pipestate->offset; state = apr_socket_recv(ret_pfd[i].desc.s, pipestate->reader, &len_a); pipestate->offset += len_a; pipestate->reader += len_a; // read buffer to reader if ((pipestate->offset >= pipestate->size)||(APR_STATUS_IS_EAGAIN(state))) { pipestate->offset = pipestate->size; PIPE_STATE_TO_COMPLETE(pipestate->state); // read complete, move state to complete } else if ((APR_STATUS_IS_EOF(state))||(len_a == 0)) { apr_pollfd_t pfd = { pipe->sockpool, APR_POLL_SOCKET, APR_POLLIN, 0, { NULL }, pipestate }; pfd.desc.s = ret_pfd[i].desc.s; apr_pollset_remove(pollset, &pfd); frl_slab_pfree(pipestate); apr_socket_close(ret_pfd[i].desc.s); // remote error, close connection continue; } switch (pipestate->state) { case FRL_PIPE_READ_HEADER_COMPLETE: { // recv header (hash & size) pipestate->data.offset = 0; pipestate->data.size = pipestate->header.size; state = pipe->recv_before(&pipestate->data.buf, &pipestate->data.size); if (FRL_PROGRESS_IS_INTERRUPT(state)) { apr_pollfd_t pfd = { pipe->sockpool, APR_POLL_SOCKET, APR_POLLIN, 0, { NULL }, pipestate }; pfd.desc.s = ret_pfd[i].desc.s; apr_pollset_remove(pollset, &pfd); frl_slab_pfree(pipestate); apr_socket_close(ret_pfd[i].desc.s); continue; } pipestate->state = FRL_PIPE_READ_BLOCK_START; // start to read block (<= 4092 bytes each) pipestate->reader = pipestate->buffer; pipestate->offset = 0; if (pipestate->data.size < SIZEOF_FRL_PIPE_BLOCK_BUFFER) pipestate->size = pipestate->data.size+SIZEOF_FRL_PIPE_HEADER_T; else pipestate->size = SOCKET_PACKAGE_SIZE; break; } case FRL_PIPE_READ_BLOCK_COMPLETE: { // a block complete, move to data memcpy(pipestate->data.buf+pipestate->data.offset, &pipestate->block.start, pipestate->block.header.size); hash = hashlittle(&pipestate->block.start, pipestate->size-SIZEOF_FRL_PIPE_HEADER_T); if (hash != pipestate->block.header.hash) { // check the hash fingerprint of the block apr_pollfd_t pfd = { pipe->sockpool, APR_POLL_SOCKET, APR_POLLIN, 0, { NULL }, pipestate }; pfd.desc.s = ret_pfd[i].desc.s; apr_pollset_remove(pollset, &pfd); frl_slab_pfree(pipestate); apr_socket_close(ret_pfd[i].desc.s); continue; } pipestate->data.offset += pipestate->block.header.size; if (pipestate->data.offset >= pipestate->data.size) { // finish read, report state to remote apr_pollfd_t pfd = { pipe->sockpool, APR_POLL_SOCKET, APR_POLLIN, 0, { NULL }, pipestate }; pfd.desc.s = ret_pfd[i].desc.s; apr_pollset_remove(pollset, &pfd); hash = hashlittle(pipestate->data.buf, pipestate->data.size); if (hash != pipestate->header.hash) { // check hash fingerprint of all data frl_slab_pfree(pipestate); apr_socket_close(ret_pfd[i].desc.s); } else { pfd.reqevents = APR_POLLOUT; state = pipe->recv_after(pipestate->data.buf, pipestate->data.size); if (FRL_PROGRESS_IS_INTERRUPT(state)) { frl_slab_pfree(pipestate); apr_socket_close(ret_pfd[i].desc.s); } else { pipestate->state = FRL_PIPE_SEND_HEADER_START; pipestate->reader = (char*)&pipestate->header; pipestate->offset = 0; pipestate->size = SIZEOF_FRL_PIPE_HEADER_T; apr_pollset_add(pollset, &pfd); } } continue; } // to start read successor block pipestate->state = FRL_PIPE_READ_BLOCK_START; pipestate->reader = pipestate->buffer; pipestate->offset = 0; if (pipestate->data.size-pipestate->data.offset < SIZEOF_FRL_PIPE_BLOCK_BUFFER) pipestate->size = pipestate->data.size-pipestate->data.offset+SIZEOF_FRL_PIPE_HEADER_T; else pipestate->size = SOCKET_PACKAGE_SIZE; break; } default: break; } } else if (ret_pfd[i].rtnevents & APR_POLLOUT) { // send report information, basic header frl_pipe_state_t* pipestate = (frl_pipe_state_t*)ret_pfd[i].client_data; apr_size_t len_a = pipestate->size-pipestate->offset; state = apr_socket_send(ret_pfd[i].desc.s, pipestate->reader, &len_a); pipestate->offset += len_a; pipestate->reader += len_a; if ((pipestate->offset >= pipestate->size)||(APR_STATUS_IS_EAGAIN(state))) { pipestate->offset = pipestate->size; PIPE_STATE_TO_COMPLETE(pipestate->state); } else if ((APR_STATUS_IS_EOF(state))||(len_a == 0)) { apr_pollfd_t pfd = { pipe->sockpool, APR_POLL_SOCKET, APR_POLLOUT, 0, { NULL }, pipestate }; pfd.desc.s = ret_pfd[i].desc.s; apr_pollset_remove(pollset, &pfd); frl_slab_pfree(pipestate); apr_socket_close(ret_pfd[i].desc.s); continue; } switch (pipestate->state) { case FRL_PIPE_SEND_HEADER_COMPLETE: { // complete, return to listen state apr_pollfd_t pfd = { pipe->sockpool, APR_POLL_SOCKET, APR_POLLOUT, 0, { NULL }, pipestate }; pfd.desc.s = ret_pfd[i].desc.s; apr_pollset_remove(pollset, &pfd); pfd.reqevents = APR_POLLIN; pipestate->state = FRL_PIPE_DISABLED; pipestate->reader = 0; pipestate->offset = 0; pipestate->size = 0; apr_pollset_add(pollset, &pfd); break; } default: break; } } else { // other errors, close connection frl_pipe_state_t* pipestate = (frl_pipe_state_t*)ret_pfd[i].client_data; apr_pollfd_t pfd = { pipe->sockpool, APR_POLL_SOCKET, APR_POLLIN | APR_POLLOUT, 0, { NULL }, pipestate }; pfd.desc.s = ret_pfd[i].desc.s; apr_pollset_remove(pollset, &pfd); frl_slab_pfree(pipestate); apr_socket_close(ret_pfd[i].desc.s); } } } } else if (!APR_STATUS_IS_TIMEUP(pipe->recv_state)) {
AP_DECLARE(apr_status_t) ap_unixd_accept(void **accepted, ap_listen_rec *lr, apr_pool_t *ptrans) { apr_socket_t *csd; apr_status_t status; #ifdef _OSD_POSIX int sockdes; #endif *accepted = NULL; status = apr_socket_accept(&csd, lr->sd, ptrans); if (status == APR_SUCCESS) { *accepted = csd; #ifdef _OSD_POSIX apr_os_sock_get(&sockdes, csd); if (sockdes >= FD_SETSIZE) { ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ap_server_conf, APLOGNO(02176) "new file descriptor %d is too large; you probably need " "to rebuild Apache with a larger FD_SETSIZE " "(currently %d)", sockdes, FD_SETSIZE); apr_socket_close(csd); return APR_EINTR; } #endif return APR_SUCCESS; } if (APR_STATUS_IS_EINTR(status)) { return status; } /* Our old behaviour here was to continue after accept() * errors. But this leads us into lots of troubles * because most of the errors are quite fatal. For * example, EMFILE can be caused by slow descriptor * leaks (say in a 3rd party module, or libc). It's * foolish for us to continue after an EMFILE. We also * seem to tickle kernel bugs on some platforms which * lead to never-ending loops here. So it seems best * to just exit in most cases. */ switch (status) { #if defined(HPUX11) && defined(ENOBUFS) /* On HPUX 11.x, the 'ENOBUFS, No buffer space available' * error occurs because the accept() cannot complete. * You will not see ENOBUFS with 10.20 because the kernel * hides any occurrence from being returned to user space. * ENOBUFS with 11.x's TCP/IP stack is possible, and could * occur intermittently. As a work-around, we are going to * ignore ENOBUFS. */ case ENOBUFS: #endif #ifdef EPROTO /* EPROTO on certain older kernels really means * ECONNABORTED, so we need to ignore it for them. * See discussion in new-httpd archives nh.9701 * search for EPROTO. * * Also see nh.9603, search for EPROTO: * There is potentially a bug in Solaris 2.x x<6, * and other boxes that implement tcp sockets in * userland (i.e. on top of STREAMS). On these * systems, EPROTO can actually result in a fatal * loop. See PR#981 for example. It's hard to * handle both uses of EPROTO. */ case EPROTO: #endif #ifdef ECONNABORTED case ECONNABORTED: #endif /* Linux generates the rest of these, other tcp * stacks (i.e. bsd) tend to hide them behind * getsockopt() interfaces. They occur when * the net goes sour or the client disconnects * after the three-way handshake has been done * in the kernel but before userland has picked * up the socket. */ #ifdef ECONNRESET case ECONNRESET: #endif #ifdef ETIMEDOUT case ETIMEDOUT: #endif #ifdef EHOSTUNREACH case EHOSTUNREACH: #endif #ifdef ENETUNREACH case ENETUNREACH: #endif /* EAGAIN/EWOULDBLOCK can be returned on BSD-derived * TCP stacks when the connection is aborted before * we call connect, but only because our listener * sockets are non-blocking (AP_NONBLOCK_WHEN_MULTI_LISTEN) */ #ifdef EAGAIN case EAGAIN: #endif #ifdef EWOULDBLOCK #if !defined(EAGAIN) || EAGAIN != EWOULDBLOCK case EWOULDBLOCK: #endif #endif break; #ifdef ENETDOWN case ENETDOWN: /* * When the network layer has been shut down, there * is not much use in simply exiting: the parent * would simply re-create us (and we'd fail again). * Use the CHILDFATAL code to tear the server down. * @@@ Martin's idea for possible improvement: * A different approach would be to define * a new APEXIT_NETDOWN exit code, the reception * of which would make the parent shutdown all * children, then idle-loop until it detected that * the network is up again, and restart the children. * Ben Hyde noted that temporary ENETDOWN situations * occur in mobile IP. */ ap_log_error(APLOG_MARK, APLOG_EMERG, status, ap_server_conf, APLOGNO(02177) "apr_socket_accept: giving up."); return APR_EGENERAL; #endif /*ENETDOWN*/ default: /* If the socket has been closed in ap_close_listeners() * by the restart/stop action, we may get EBADF. * Do not print an error in this case. */ if (!lr->active) { ap_log_error(APLOG_MARK, APLOG_DEBUG, status, ap_server_conf, APLOGNO(02178) "apr_socket_accept failed for inactive listener"); return status; } ap_log_error(APLOG_MARK, APLOG_ERR, status, ap_server_conf, APLOGNO(02179) "apr_socket_accept: (client socket)"); return APR_EGENERAL; } return status; }
lt_http_status_t lt_http_server_run( lt_http_server_t * server ) { apr_pool_t * pool = NULL; apr_socket_t * client = NULL; char error[1025]; memset( error, 0, 1025 ); if( server == NULL ) return LT_HTTP_INVALID_ARG; /* prepare connection pool */ apr_pool_create( &pool, server->pool ); /* make the socket non-blocking */ if( APR_SUCCESS != apr_socket_opt_set( server->socket, APR_SO_NONBLOCK, 1 ) ) { my_perror( "ERROR: apr_socket_opt_set failed with: " ); return LT_HTTP_INVALID_ARG; } while( 1 ) { apr_status_t rv; /* bool reading should be atomic operation so no locking is needed */ if( server->stoprequested ) { break; } /* clear pool memory */ apr_pool_clear( pool ); /* accept new connection */ rv = apr_socket_accept( &client, server->socket, pool ); if( APR_STATUS_IS_EAGAIN( rv ) || APR_STATUS_IS_EINTR( rv ) ) { /* sleep for 100ms before accepting new client */ apr_sleep( 100 * 1000 ); continue; } if( APR_SUCCESS != rv ) { my_perror( "ERROR: apr_socket_accept failed with: " ); continue; } /* determine client address */ { apr_sockaddr_t * sa = NULL; char * ip = NULL; if( APR_SUCCESS != apr_socket_addr_get( &sa, APR_REMOTE, client ) ) { my_perror( "ERROR: apr_socket_addr_get failed with: " ); apr_socket_close( client ); continue; } if( APR_SUCCESS != apr_sockaddr_ip_get( &ip, sa ) ) { my_perror( "ERROR: apr_sockaddr_ip_get failed with: " ); apr_socket_close( client ); continue; } } /* immediatelly start sending HTTP response headers */ { char * headers = apr_pstrcat( pool, "HTTP/1.0 200 OK\r\n" "Content-Length: ", apr_ltoa( pool, server->finfo.size ), "\r\n", "Content-Type: application/octet-stream;" " charset=utf-8\r\n", "Connection: Close\r\n", "\r\n", NULL ); apr_size_t headers_size = strlen( headers ); if( APR_SUCCESS != apr_socket_send( client, headers, &headers_size ) ) { my_perror( "ERROR: apr_socket_send failed with: " ); apr_socket_close( client ); continue; } } /* send file contents */ { apr_off_t offset = 0; apr_size_t len = server->finfo.size; if( APR_SUCCESS != apr_socket_sendfile( client, server->file, NULL, &offset, &len, 0 ) ) { my_perror( "ERROR: apr_socket_sendfile failed with: " ); apr_socket_close( client ); continue; } } /* read and discard all headers */ { apr_status_t rv; /* set non-block option on client socket */ if( APR_SUCCESS != apr_socket_timeout_set( client, 2 * 1000 * 1000 ) ) { my_perror( "ERROR: apr_socket_timeout_set failed with: " ); apr_socket_close( client ); continue; } /* read all data until 2 sec timeout or eof, then proceed to */ /* close */ do { char buffer[1024]; apr_size_t len = 1024; rv = apr_socket_recv( client, buffer, &len ); if( APR_STATUS_IS_TIMEUP( rv ) || APR_STATUS_IS_EOF( rv ) ) { break; } } while( 1 ); } /* close our side of connection */ if( APR_SUCCESS != apr_socket_shutdown( client, APR_SHUTDOWN_WRITE ) ) { /* we actually don't care about errors arriving during shutdown * phase * my_perror( "ERROR: apr_socket_shutdown(WRITE) failed with: " ); */ apr_socket_close( client ); continue; } /* close other side of connection */ if( APR_SUCCESS != apr_socket_shutdown( client, APR_SHUTDOWN_READ ) ) { /* we actually don't care about errors arriving during shutdown * phase * my_perror( "ERROR: apr_socket_shutdown(READ) failed with: " ); */ apr_socket_close( client ); continue; } /* close socket */ if( APR_SUCCESS != apr_socket_close( client ) ) { /* we actually don't care about errors arriving during shutdown * phase * my_perror( "ERROR: apr_socket_close failed with: " ); */ continue; } } return LT_HTTP_SUCCESS; }
static void im_ssl_accept(nx_module_t *module) { nx_im_ssl_conf_t *imconf; apr_socket_t *sock; apr_sockaddr_t *sa; char *ipstr; nx_module_input_t *input; SSL *ssl; apr_pool_t *pool = NULL; apr_status_t rv; nx_exception_t e; log_debug("im_ssl_accept"); imconf = (nx_im_ssl_conf_t *) module->config; pool = nx_pool_create_child(module->pool); try { if ( (rv = apr_socket_accept(&sock, imconf->listensock, pool)) != APR_SUCCESS ) { if ( APR_STATUS_IS_EAGAIN(rv) ) { nx_module_add_poll_event(module); apr_pool_destroy(pool); } else { throw(rv, "couldn't accept connection on %s:%u (statuscode: %d)", imconf->host, imconf->port, rv); } } if ( rv == APR_SUCCESS ) { CHECKERR_MSG(apr_socket_opt_set(sock, APR_SO_NONBLOCK, 1), "couldn't set SO_NONBLOCK on accepted socket"); CHECKERR_MSG(apr_socket_timeout_set(sock, 0), "couldn't set socket timeout on accepted socket"); CHECKERR_MSG(apr_socket_addr_get(&sa, APR_REMOTE, sock), "couldn't get info on accepted socket"); CHECKERR_MSG(apr_sockaddr_ip_get(&ipstr, sa), "couldn't get IP of accepted socket"); nx_module_pollset_add_socket(module, imconf->listensock, APR_POLLIN | APR_POLLHUP); ssl = nx_ssl_from_socket(&(imconf->ssl_ctx), sock); ASSERT(ssl != NULL); SSL_set_accept_state(ssl); //SSL_accept(ssl); CHECKERR_MSG(apr_socket_opt_set(sock, APR_SO_NONBLOCK, 1), "couldn't set SO_NONBLOCK on accepted socket"); CHECKERR_MSG(apr_socket_timeout_set(sock, 0), "couldn't set socket timeout on accepted socket"); input = nx_module_input_new(module, pool); input->desc_type = APR_POLL_SOCKET; input->desc.s = sock; input->inputfunc = imconf->inputfunc; ASSERT(input->inputfunc != NULL); nx_module_input_data_set(input, "ssl", ssl); CHECKERR_MSG(apr_socket_data_set(sock, input, "input", NULL), "couldn't set data on socket"); NX_DLIST_INSERT_TAIL(imconf->connections, input, link); nx_module_input_data_set(input, "recv_from_str", ipstr); nx_module_pollset_add_socket(module, sock, APR_POLLIN | APR_POLLHUP); log_info("SSL connection accepted from %s:%u", ipstr, sa->port); } } catch(e) { apr_pool_destroy(pool); rethrow(e); } }
static void test_get_addr(abts_case *tc, void *data) { apr_status_t rv; apr_socket_t *ld, *sd, *cd; apr_sockaddr_t *sa, *ca; char a[128], b[128]; ld = setup_socket(tc); APR_ASSERT_SUCCESS(tc, "get local address of bound socket", apr_socket_addr_get(&sa, APR_LOCAL, ld)); rv = apr_socket_create(&cd, sa->family, SOCK_STREAM, APR_PROTO_TCP, p); APR_ASSERT_SUCCESS(tc, "create client socket", rv); APR_ASSERT_SUCCESS(tc, "enable non-block mode", apr_socket_opt_set(cd, APR_SO_NONBLOCK, 1)); /* It is valid for a connect() on a socket with NONBLOCK set to * succeed (if the connection can be established synchronously), * but if it does, this test cannot proceed. */ rv = apr_socket_connect(cd, sa); if (rv == APR_SUCCESS) { apr_socket_close(ld); apr_socket_close(cd); ABTS_NOT_IMPL(tc, "Cannot test if connect completes " "synchronously"); return; } if (!APR_STATUS_IS_EINPROGRESS(rv)) { apr_socket_close(ld); apr_socket_close(cd); APR_ASSERT_SUCCESS(tc, "connect to listener", rv); return; } APR_ASSERT_SUCCESS(tc, "accept connection", apr_socket_accept(&sd, ld, p)); { /* wait for writability */ apr_pollfd_t pfd; int n; pfd.p = p; pfd.desc_type = APR_POLL_SOCKET; pfd.reqevents = APR_POLLOUT|APR_POLLHUP; pfd.desc.s = cd; pfd.client_data = NULL; APR_ASSERT_SUCCESS(tc, "poll for connect completion", apr_poll(&pfd, 1, &n, 5 * APR_USEC_PER_SEC)); } APR_ASSERT_SUCCESS(tc, "get local address of server socket", apr_socket_addr_get(&sa, APR_LOCAL, sd)); APR_ASSERT_SUCCESS(tc, "get remote address of client socket", apr_socket_addr_get(&ca, APR_REMOTE, cd)); apr_snprintf(a, sizeof(a), "%pI", sa); apr_snprintf(b, sizeof(b), "%pI", ca); ABTS_STR_EQUAL(tc, a, b); apr_socket_close(cd); apr_socket_close(sd); apr_socket_close(ld); }