int nn_efd_wait (struct nn_efd *self, int timeout) { int rc; struct timeval tv; int fd = self->r; if (nn_slow (fd < 0)) { return -EBADF; } FD_SET (fd, &self->fds); if (timeout >= 0) { tv.tv_sec = timeout / 1000; tv.tv_usec = timeout % 1000 * 1000; } rc = select (0, &self->fds, NULL, NULL, timeout >= 0 ? &tv : NULL); if (nn_slow (rc == SOCKET_ERROR)) { rc = nn_err_wsa_to_posix (WSAGetLastError ()); errno = rc; /* Treat these as a non-fatal errors, typically occuring when the socket is being closed from a separate thread during a blocking I/O operation. */ if (nn_fast (rc == EINTR || rc == ENOTSOCK)) return -EINTR; } wsa_assert (rc >= 0); if (nn_slow (rc == 0)) return -ETIMEDOUT; return 0; }
int nn_efd_wait (struct nn_efd *self, int timeout) { int rc; struct timeval tv; FD_SET (self->r, &self->fds); if (timeout >= 0) { tv.tv_sec = timeout / 1000; tv.tv_usec = timeout % 1000 * 1000; } rc = select (0, &self->fds, NULL, NULL, timeout >= 0 ? &tv : NULL); if (nn_slow (rc == SOCKET_ERROR)) { rc = nn_err_wsa_to_posix (WSAGetLastError ()); errno = rc; if (nn_slow (rc == EINTR || rc == ENOTSOCK)) return -EINTR; } errno_assert (rc >= 0); if (nn_slow (rc == 0)) return -ETIMEDOUT; return 0; }
int nn_poll (struct nn_pollfd *fds, int nfds, int timeout) { int rc; int i; fd_set fdset; SOCKET fd; int res; size_t sz; struct timeval tv; /* Fill in the fdset, as appropriate. */ FD_ZERO (&fdset); for (i = 0; i != nfds; ++i) { if (fds [i].events & NN_POLLIN) { sz = sizeof (fd); rc = nn_getsockopt (fds [i].fd, NN_SOL_SOCKET, NN_RCVFD, &fd, &sz); if (nn_slow (rc < 0)) { errno = -rc; return -1; } nn_assert (sz == sizeof (fd)); FD_SET (fd, &fdset); } if (fds [i].events & NN_POLLOUT) { sz = sizeof (fd); rc = nn_getsockopt (fds [i].fd, NN_SOL_SOCKET, NN_SNDFD, &fd, &sz); if (nn_slow (rc < 0)) { errno = -rc; return -1; } nn_assert (sz == sizeof (fd)); FD_SET (fd, &fdset); } } /* Do the polling itself. */ tv.tv_sec = timeout / 1000; tv.tv_usec = timeout % 1000 * 1000; rc = select (-1, &fdset, NULL, NULL, &tv); if (nn_slow (rc == 0)) return 0; if (nn_slow (rc == SOCKET_ERROR)) { errno = nn_err_wsa_to_posix (WSAGetLastError ()); return -1; } /* Move the results from fdset to the nanomsg pollset. */ res = 0; for (i = 0; i != nfds; ++i) { fds [i].revents = 0; if (fds [i].events & NN_POLLIN) { sz = sizeof (fd); rc = nn_getsockopt (fds [i].fd, NN_SOL_SOCKET, NN_RCVFD, &fd, &sz); if (nn_slow (rc < 0)) { errno = -rc; return -1; } nn_assert (sz == sizeof (fd)); if (FD_ISSET (fd, &fdset)) fds [i].revents |= NN_POLLIN; } if (fds [i].events & NN_POLLOUT) { sz = sizeof (fd); rc = nn_getsockopt (fds [i].fd, NN_SOL_SOCKET, NN_SNDFD, &fd, &sz); if (nn_slow (rc < 0)) { errno = -rc; return -1; } nn_assert (sz == sizeof (fd)); if (FD_ISSET (fd, &fdset)) fds [i].revents |= NN_POLLOUT; } if (fds [i].revents) ++res; } return res; }
int nn_efd_wait (struct nn_efd *self, int timeout) { int rc; struct timeval tv; SOCKET fd = self->r; uint64_t expire; if (timeout > 0) { expire = nn_clock_ms() + timeout; tv.tv_sec = timeout / 1000; tv.tv_usec = timeout % 1000 * 1000; } else { expire = timeout; } for (;;) { if (nn_slow (fd == INVALID_SOCKET)) { return -EBADF; } FD_SET (fd, &self->fds); switch (expire) { case 0: tv.tv_sec = 0; tv.tv_usec = 0; break; case (uint64_t)-1: tv.tv_sec = 0; tv.tv_usec = 100000; break; default: timeout = (int)(expire - nn_clock_ms()); if (timeout < 0) { return -ETIMEDOUT; } if (timeout > 100) { tv.tv_sec = 0; tv.tv_usec = 100000; } else { tv.tv_sec = timeout / 1000; tv.tv_usec = timeout % 1000 * 1000; } } rc = select (0, &self->fds, NULL, NULL, &tv); if (nn_slow (rc == SOCKET_ERROR)) { rc = nn_err_wsa_to_posix (WSAGetLastError ()); errno = rc; /* Treat these as a non-fatal errors, typically occuring when the socket is being closed from a separate thread during a blocking I/O operation. */ if (rc == EINTR || rc == ENOTSOCK) return -EINTR; } else if (rc == 0) { if (expire == 0) return -ETIMEDOUT; if ((expire != (uint64_t)-1) && (expire < nn_clock_ms())) { return -ETIMEDOUT; } continue; } wsa_assert (rc >= 0); return 0; } }