zmq::pollset_t::handle_t zmq::pollset_t::add_fd (fd_t fd_, i_poll_events *events_) { poll_entry_t *pe = new (std::nothrow) poll_entry_t; alloc_assert (pe); pe->fd = fd_; pe->flag_pollin = false; pe->flag_pollout = false; pe->events = events_; struct poll_ctl pc; pc.fd = fd_; pc.cmd = PS_ADD; pc.events = 0; int rc = pollset_ctl (pollset_fd, &pc, 1); errno_assert (rc != -1); // Increase the load metric of the thread. adjust_load (1); if (fd_ >= fd_table.size ()) { fd_table.resize(fd_ + 1, NULL); } fd_table [fd_] = pe; return pe; }
zmq::poll_t::handle_t zmq::poll_t::add_fd (fd_t fd_, i_poll_events *events_) { check_thread (); zmq_assert (fd_ != retired_fd); // If the file descriptor table is too small expand it. fd_table_t::size_type sz = fd_table.size (); if (sz <= (fd_table_t::size_type) fd_) { fd_table.resize (fd_ + 1); while (sz != (fd_table_t::size_type) (fd_ + 1)) { fd_table[sz].index = retired_fd; ++sz; } } pollfd pfd = {fd_, 0, 0}; pollset.push_back (pfd); zmq_assert (fd_table[fd_].index == retired_fd); fd_table[fd_].index = pollset.size () - 1; fd_table[fd_].events = events_; // Increase the load metric of the thread. adjust_load (1); return fd_; }
zmq::devpoll_t::handle_t zmq::devpoll_t::add_fd (fd_t fd_, i_poll_events *reactor_) { // If the file descriptor table is too small expand it. fd_table_t::size_type sz = fd_table.size (); if (sz <= (fd_table_t::size_type) fd_) { fd_table.resize (fd_ + 1); while (sz != (fd_table_t::size_type) (fd_ + 1)) { fd_table [sz].valid = false; ++sz; } } zmq_assert (!fd_table [fd_].valid); fd_table [fd_].events = 0; fd_table [fd_].reactor = reactor_; fd_table [fd_].valid = true; fd_table [fd_].accepted = false; devpoll_ctl (fd_, 0); pending_list.push_back (fd_); // Increase the load metric of the thread. adjust_load (1); return fd_; }
void zmq::select_t::rm_fd (handle_t handle_) { // Mark the descriptor as retired. fd_set_t::iterator it; for (it = fds.begin (); it != fds.end (); ++it) if (it->fd == handle_) break; zmq_assert (it != fds.end ()); it->fd = retired_fd; retired = true; // Stop polling on the descriptor. FD_CLR (handle_, &source_set_in); FD_CLR (handle_, &source_set_out); FD_CLR (handle_, &source_set_err); // Discard all events generated on this file descriptor. FD_CLR (handle_, &readfds); FD_CLR (handle_, &writefds); FD_CLR (handle_, &exceptfds); // Adjust the maxfd attribute if we have removed the // highest-numbered file descriptor. if (handle_ == maxfd) { maxfd = retired_fd; for (fd_set_t::iterator it = fds.begin (); it != fds.end (); ++it) if (it->fd > maxfd) maxfd = it->fd; } // Decrease the load metric of the thread. adjust_load (-1); }
void zmq::select_t::rm_fd (handle_t handle_) { #if defined ZMQ_HAVE_WINDOWS u_short family = get_fd_family (handle_); wsa_assert (family != AF_UNSPEC); family_entries_t::iterator family_entry_it = family_entries.find (family); family_entry_t& family_entry = family_entry_it->second; if (family_entry_it != current_family_entry_it) { // Family is not currently being iterated and can be safely // modified in-place. So later it can be skipped without // re-verifying its content. fd_entries_t::iterator fd_entry_it; for (fd_entry_it = family_entry.fd_entries.begin (); fd_entry_it != family_entry.fd_entries.end (); ++fd_entry_it) if (fd_entry_it->fd == handle_) break; zmq_assert (fd_entry_it != family_entry.fd_entries.end ()); family_entry.fd_entries.erase (fd_entry_it); family_entry.fds_set.remove_fd (handle_); } else { // Otherwise mark removed entries as retired. It will be cleaned up // at the end of the iteration. See zmq::select_t::loop fd_entries_t::iterator fd_entry_it; for (fd_entry_it = family_entry.fd_entries.begin (); fd_entry_it != family_entry.fd_entries.end (); ++fd_entry_it) if (fd_entry_it->fd == handle_) break; zmq_assert (fd_entry_it != family_entry.fd_entries.end ()); fd_entry_it->fd = retired_fd; family_entry.fds_set.remove_fd (handle_); family_entry.retired = true; } #else fd_entries_t::iterator fd_entry_it; for (fd_entry_it = fd_entries.begin (); fd_entry_it != fd_entries.end (); ++fd_entry_it) if (fd_entry_it->fd == handle_) break; zmq_assert (fd_entry_it != fd_entries.end ()); fd_entry_it->fd = retired_fd; fds_set.remove_fd (handle_); if (handle_ == maxfd) { maxfd = retired_fd; for (fd_entry_it = fd_entries.begin (); fd_entry_it != fd_entries.end (); ++fd_entry_it) if (fd_entry_it->fd > maxfd) maxfd = fd_entry_it->fd; } retired = true; #endif adjust_load (-1); }
void zmq::devpoll_t::rm_fd (handle_t handle_) { zmq_assert (fd_table [handle_].valid); devpoll_ctl (handle_, POLLREMOVE); fd_table [handle_].valid = false; // Decrease the load metric of the thread. adjust_load (-1); }
void zmq::epoll_t::rm_fd (handle_t handle_) { poll_entry_t *pe = (poll_entry_t*) handle_; int rc = epoll_ctl (epoll_fd, EPOLL_CTL_DEL, pe->fd, &pe->ev); errno_assert (rc != -1); pe->fd = retired_fd; retired.push_back (pe); // Decrease the load metric of the thread. adjust_load (-1); }
void zmq::kqueue_t::rm_fd (handle_t handle_) { poll_entry_t *pe = (poll_entry_t*) handle_; if (pe->flag_pollin) kevent_delete (pe->fd, EVFILT_READ); if (pe->flag_pollout) kevent_delete (pe->fd, EVFILT_WRITE); pe->fd = retired_fd; retired.push_back (pe); adjust_load (-1); }
void zmq::poll_t::rm_fd (handle_t handle_) { fd_t index = fd_table [handle_].index; assert (index != retired_fd); // Mark the fd as unused. pollset [index].fd = retired_fd; fd_table [handle_].index = retired_fd; retired = true; // Decrease the load metric of the thread. adjust_load (-1); }
zmq::kqueue_t::handle_t zmq::kqueue_t::add_fd (fd_t fd_, i_poll_events *reactor_) { poll_entry_t *pe = new (std::nothrow) poll_entry_t; zmq_assert (pe != NULL); pe->fd = fd_; pe->flag_pollin = 0; pe->flag_pollout = 0; pe->reactor = reactor_; adjust_load (1); return pe; }
void zmq::pollset_t::rm_fd (handle_t handle_) { poll_entry_t *pe = (poll_entry_t*) handle_; struct poll_ctl pc; pc.fd = pe->fd; pc.cmd = PS_DELETE; pc.events = 0; pollset_ctl (pollset_fd, &pc, 1); fd_table [pe->fd] = NULL; pe->fd = retired_fd; retired.push_back (pe); // Decrease the load metric of the thread. adjust_load (-1); }
zmq::devpoll_t::handle_t zmq::devpoll_t::add_fd (fd_t fd_, i_poll_events *reactor_) { assert (!fd_table [fd_].valid); fd_table [fd_].events = 0; fd_table [fd_].reactor = reactor_; fd_table [fd_].valid = true; fd_table [fd_].accepted = false; devpoll_ctl (fd_, 0); pending_list.push_back (fd_); // Increase the load metric of the thread. adjust_load (1); return fd_; }
zmq::epoll_t::handle_t zmq::epoll_t::add_fd (fd_t fd_, i_poll_events *events_) { poll_entry_t *pe = new (std::nothrow) poll_entry_t; zmq_assert (pe != NULL); // The memset is not actually needed. It's here to prevent debugging // tools to complain about using uninitialised memory. memset (pe, 0, sizeof (poll_entry_t)); pe->fd = fd_; pe->ev.events = 0; pe->ev.data.ptr = pe; pe->events = events_; int rc = epoll_ctl (epoll_fd, EPOLL_CTL_ADD, fd_, &pe->ev); errno_assert (rc != -1); // Increase the load metric of the thread. adjust_load (1); return pe; }
zmq::select_t::handle_t zmq::select_t::add_fd (fd_t fd_, i_poll_events *events_) { // Store the file descriptor. fd_entry_t entry = {fd_, events_}; fds.push_back (entry); // Ensure we do not attempt to select () on more than FD_SETSIZE // file descriptors. zmq_assert (fds.size () <= FD_SETSIZE); // Start polling on errors. FD_SET (fd_, &source_set_err); // Adjust maxfd if necessary. if (fd_ > maxfd) maxfd = fd_; // Increase the load metric of the thread. adjust_load (1); return fd_; }
zmq::select_t::handle_t zmq::select_t::add_fd (fd_t fd_, i_poll_events *events_) { fd_entry_t fd_entry; fd_entry.fd = fd_; fd_entry.events = events_; #if defined ZMQ_HAVE_WINDOWS u_short family = get_fd_family (fd_); wsa_assert (family != AF_UNSPEC); family_entry_t& family_entry = family_entries [family]; family_entry.fd_entries.push_back (fd_entry); FD_SET (fd_, &family_entry.fds_set.error); #else fd_entries.push_back (fd_entry); FD_SET (fd_, &fds_set.error); if (fd_ > maxfd) maxfd = fd_; #endif adjust_load (1); return fd_; }