int _SSL_recv (SSL * ssl, char *buf, int len) { int num; num = SSL_read (ssl, buf, len); switch (SSL_get_error (ssl, num)) { case SSL_ERROR_SSL: /* ??? */ __SSL_fill_err_buf ("SSL_read"); fprintf (stderr, "%s\n", err_buf); break; case SSL_ERROR_SYSCALL: /* ??? */ if (!would_block ()) perror ("SSL_read/read"); break; case SSL_ERROR_ZERO_RETURN: /* fprintf(stdeerr, "SSL closed on read\n"); */ break; } return (num); }
static struct socket *simple_server_socket_accept(struct server_socket *gs, struct tasklet *t, struct error *e) { int fd; int i; struct socket *res = NULL; struct simple_server_socket *s = (struct simple_server_socket *)gs; assert(gs->ops == &simple_server_socket_ops); mutex_lock(&s->mutex); do { for (i = 0; i < s->n_fds; i++) { struct server_fd *sfd = &s->fds[i]; if (!sfd->ready) continue; /* Clear sfd->ready before trying accept, to avoid a race with accept_handle_events. */ sfd->ready = FALSE; fd = accept(sfd->fd, NULL, NULL); if (fd >= 0) { /* Got one! But there might be more to come. */ sfd->ready = TRUE; if (fcntl(fd, F_SETFL, O_NONBLOCK) < 0) { error_errno(e, "fcntl"); close(fd); } else { res = simple_socket_create(fd); } goto out; } else if (!would_block()) { error_errno(e, "accept"); goto out; } } /* No sockets ready, so wait. */ for (i = 0; i < s->n_fds; i++) if (!watched_fd_set_interest(s->fds[i].watched_fd, WATCHED_FD_IN, e)) goto out; } while (wait_list_down(&s->accepting, 1, t)); out: mutex_unlock(&s->mutex); return res; }
// wait until it is possible to grab slot in monitor, // update last entered void pre_enter(C& obj, gu::Lock& lock) { assert(last_left_ <= last_entered_); const wsrep_seqno_t obj_seqno(obj.seqno()); while (would_block (obj_seqno)) // TODO: exit on error { obj.unlock(); lock.wait(cond_); obj.lock(); } if (last_entered_ < obj_seqno) last_entered_ = obj_seqno; }
void stream_socket::async_connect(endpoint const &ep,event_handler const &h) { if(!dont_block(h)) return; system::error_code e; connect(ep,e); if(e && would_block(e)) { async_connector::pointer connector(new async_connector(h,this)); on_writeable(connector); } else { event_binder::pointer binder(new event_binder( h,e )); get_io_service().post(binder); } }
/* write buffer to designated socket/file */ ssize_t write_buf( int fd, const char* data, const ssize_t len, FILE* log ) { ssize_t n = 0, nwr = 0, error = IO_ERR; int err = 0; for( n = 0; errno = 0, n < len ; ) { nwr = write( fd, &(data[n]), len - n ); if( nwr <= 0 ) { err = errno; if( EINTR == err ) { TRACE( (void)tmfprintf( log, "%s interrupted\n", __func__ ) ); continue; } else { if( would_block(err) ) error = IO_BLK; break; } } n += nwr; if( nwr != len ) { if( NULL != log ) { TRACE( (void)tmfprintf( log, "Fragment written %s[%ld:%ld]/[%ld] bytes\n", (len > n ? "P" : "F"), (long)nwr, (long)n, (long)len ) ); } } } if( nwr <= 0 ) { if( log ) { if (IO_BLK == error) (void)tmfprintf( log, "%s: socket time-out on write", __func__); else if( !no_fault(err) || g_uopt.is_verbose ) mperror( log, errno, "%s: write", __func__ ); } return error; } return n; }
/* read data chunk of designated size (or less) into buffer * (will *NOT* attempt to re-read if read less than expected * w/o interruption) */ ssize_t read_buf( int fd, char* data, const ssize_t len, FILE* log ) { ssize_t n = 0, nrd = 0, err = 0; for( n = 0; errno = 0, n < len ; ) { nrd = read( fd, &(data[n]), len - n ); if( nrd <= 0 ) { err = errno; if( EINTR == err ) { TRACE( (void)tmfprintf( log, "%s interrupted\n", __func__ ) ); errno = 0; continue; } else { break; } } n += nrd; /* if( nrd != len ) { if( NULL != log ) { TRACE( (void)tmfprintf( log, "Fragment read [%ld]/[%ld] bytes\n", (long)nrd, (long)len ) ); } } */ /* we only read as much as we can read at once (uninterrupted) */ break; } if( nrd < 0 ) { if( log ) { if( would_block(err) ) (void)tmfprintf( log, "%s: socket time-out on read", __func__); else if( !no_fault(err) || g_uopt.is_verbose ) mperror( log, errno, "%s: read", __func__ ); } } return n; }
static void start_connecting(struct connector *c) { for (;;) { struct addrinfo *ai = c->next_addrinfo; if (!ai) break; /* If we have an existing connecting socket, dispose of it. */ connector_close(c); c->next_addrinfo = ai->ai_next; error_reset(&c->err); c->fd = make_socket(ai->ai_family, ai->ai_socktype, &c->err); if (c->fd < 0) continue; c->watched_fd = watched_fd_create(c->fd, connector_handle_events, c); if (connect(c->fd, ai->ai_addr, ai->ai_addrlen) >= 0) { /* Immediately connected. Not sure this can actually happen. */ wait_list_up(&c->connecting, 1); return; } else if (would_block()) { /* Writeability will indicate that the connection has * been established. */ if (!watched_fd_set_interest(c->watched_fd, WATCHED_FD_OUT, &c->err)) /* Give up and propagate the error */ break; return; } else { error_errno(&c->err, "connect"); } } /* Ran out of addresses to try, so we are done. We should have an error to report. */ assert(!error_ok(&c->err)); simple_socket_wake_all(&c->socket->base); }
void stream_socket::async_read_some(mutable_buffer const &buffer,io_handler const &h) { if(!dont_block(h)) return; #ifdef BOOSTER_AIO_FORCE_POLL reader_some::pointer reader(new reader_some(h,buffer,this)); on_readable(reader); #else system::error_code e; size_t n = read_some(buffer,e); if(e && would_block(e)) { reader_some::pointer reader(new reader_some(h,buffer,this)); on_readable(reader); } else { io_binder::pointer binder(new io_binder( h,n,e )); get_io_service().post(binder); } #endif }
static ssize_t simple_socket_read(struct stream *gs, void *buf, size_t len, struct tasklet *t, struct error *e) { struct simple_socket *s = (struct simple_socket *)gs; ssize_t res; assert(((struct socket *)gs)->ops == &simple_socket_ops); mutex_lock(&s->mutex); if (s->fd >= 0) { res = read(s->fd, buf, len); if (likely(res > 0)) goto out; if (res == 0) { res = len != 0 ? STREAM_END : 0; goto out; } if (would_block()) { wait_list_wait(&s->reading, t); if (!watched_fd_set_interest(s->watched_fd, WATCHED_FD_IN, e)) goto error; res = STREAM_WAITING; goto out; } error_errno(e, "read"); } else { error_set(e, ERROR_INVALID, "socket_read: closed socket"); } error: res = STREAM_ERROR; out: mutex_unlock(&s->mutex); return res; }
static void udp_server_input_handler(evutil_socket_t fd, short what, void* arg) { int cycle = 0; dtls_listener_relay_server_type* server = (dtls_listener_relay_server_type*) arg; FUNCSTART; if (!(what & EV_READ)) { return; } ioa_network_buffer_handle *elem = NULL; start_udp_cycle: elem = (ioa_network_buffer_handle *)ioa_network_buffer_allocate(server->e); server->sm.m.sm.nd.nbh = elem; server->sm.m.sm.nd.recv_ttl = TTL_IGNORE; server->sm.m.sm.nd.recv_tos = TOS_IGNORE; int slen = server->slen0; ssize_t bsize = 0; int flags = 0; do { bsize = recvfrom(fd, ioa_network_buffer_data(elem), ioa_network_buffer_get_capacity(), flags, (struct sockaddr*) &(server->sm.m.sm.nd.src_addr), (socklen_t*) &slen); } while (bsize < 0 && (errno == EINTR)); int conn_reset = is_connreset(); int to_block = would_block(); if (bsize < 0) { if(to_block) { ioa_network_buffer_delete(server->e, server->sm.m.sm.nd.nbh); server->sm.m.sm.nd.nbh = NULL; FUNCEND; return; } #if defined(MSG_ERRQUEUE) //Linux int eflags = MSG_ERRQUEUE | MSG_DONTWAIT; static s08bits buffer[65535]; u32bits errcode = 0; ioa_addr orig_addr; int ttl = 0; int tos = 0; udp_recvfrom(fd, &orig_addr, &(server->addr), buffer, (int) sizeof(buffer), &ttl, &tos, server->e->cmsg, eflags, &errcode); //try again... do { bsize = recvfrom(fd, ioa_network_buffer_data(elem), ioa_network_buffer_get_capacity(), flags, (struct sockaddr*) &(server->sm.m.sm.nd.src_addr), (socklen_t*) &slen); } while (bsize < 0 && (errno == EINTR)); conn_reset = is_connreset(); to_block = would_block(); #endif if(conn_reset) { ioa_network_buffer_delete(server->e, server->sm.m.sm.nd.nbh); server->sm.m.sm.nd.nbh = NULL; reopen_server_socket(server); FUNCEND; return; } } if(bsize<0) { if(!to_block && !conn_reset) { int ern=errno; perror(__FUNCTION__); TURN_LOG_FUNC(TURN_LOG_LEVEL_ERROR, "%s: recvfrom error %d\n",__FUNCTION__,ern); } ioa_network_buffer_delete(server->e, server->sm.m.sm.nd.nbh); server->sm.m.sm.nd.nbh = NULL; FUNCEND; return; } if (bsize > 0) { ioa_network_buffer_set_size(elem, (size_t)bsize); server->sm.m.sm.s = server->udp_listen_s; int rc = handle_udp_packet(server, &(server->sm), server->e, server->ts); if(rc < 0) { if(eve(server->e->verbose)) { TURN_LOG_FUNC(TURN_LOG_LEVEL_ERROR, "Cannot handle UDP event\n"); } } } ioa_network_buffer_delete(server->e, server->sm.m.sm.nd.nbh); server->sm.m.sm.nd.nbh = NULL; if(cycle++<MAX_SINGLE_UDP_BATCH) goto start_udp_cycle; FUNCEND; }
int no_fault(int err) { return (EPIPE == err) || (ECONNRESET == err) || would_block(err); }