static void eventfd_consume(grpc_wakeup_fd* fd_info) { eventfd_t value; int err; do { err = eventfd_read(fd_info->read_fd, &value); } while (err < 0 && errno == EINTR); }
int QEventDispatcherUNIXPrivate::processThreadWakeUp(int nsel) { if (nsel > 0 && FD_ISSET(thread_pipe[0], &sn_vec[0].select_fds)) { // some other thread woke us up... consume the data on the thread pipe so that // select doesn't immediately return next time #if defined(Q_OS_VXWORKS) char c[16]; ::read(thread_pipe[0], c, sizeof(c)); ::ioctl(thread_pipe[0], FIOFLUSH, 0); #else # ifndef QT_NO_EVENTFD if (thread_pipe[1] == -1) { // eventfd eventfd_t value; eventfd_read(thread_pipe[0], &value); } else # endif { char c[16]; while (::read(thread_pipe[0], c, sizeof(c)) > 0) { } } #endif if (!wakeUps.testAndSetRelease(1, 0)) { // hopefully, this is dead code qWarning("QEventDispatcherUNIX: internal error, wakeUps.testAndSetRelease(1, 0) failed!"); } return 1; } return 0; }
// Called when user wants to update the fds list for a engine loop. // This happens when a virtio/vm is created/destroyed. static void engine_cmd_callback(uint16_t lcore_id, void* data) { struct cmd_event_info* info = (struct cmd_event_info*)data; struct engine_cmd_msg* msg; eventfd_read(lcore_cmd_event_fd[lcore_id], (eventfd_t*) &msg); if (msg->cmd == ENGINE_CMD_FD_ADD) { memset(&info->fds[msg->slot], 0, sizeof(struct pollfd)); info->event_handlers[msg->slot].data = msg->handler.data; info->event_handlers[msg->slot].fn = msg->handler.fn; info->fds[msg->slot].events = POLLIN|POLLERR; info->fds[msg->slot].fd = msg->fd; (*info->nb_fd)++; free(msg); } else if (msg->cmd == ENGINE_CMD_FD_DEL) { void* arg = info->event_handlers[msg->slot].data; memset(&info->fds[msg->slot], 0, sizeof(struct pollfd)); (*info->nb_fd)--; free(arg); free(msg); } else { log_crit("Unrecongnized command received\n"); free(msg); } }
BOOL ResetEvent(HANDLE hEvent) { ULONG Type; WINPR_HANDLE* Object; int length; BOOL status = TRUE; WINPR_EVENT* event; if (!winpr_Handle_GetInfo(hEvent, &Type, &Object)) return FALSE; event = (WINPR_EVENT*) Object; while (status && WaitForSingleObject(hEvent, 0) == WAIT_OBJECT_0) { do { #ifdef HAVE_SYS_EVENTFD_H eventfd_t value; length = eventfd_read(event->pipe_fd[0], &value); #else length = read(event->pipe_fd[0], &length, 1); #endif } while ((length < 0) && (errno == EINTR)); if (length < 0) status = FALSE; } return status; }
static BOOL reset_event(WINPR_THREAD* thread) { int length; BOOL status = FALSE; #ifdef HAVE_EVENTFD_H eventfd_t value; do { length = eventfd_read(thread->pipe_fd[0], &value); } while ((length < 0) && (errno == EINTR)); if ((length > 0) && (!status)) status = TRUE; #else length = read(thread->pipe_fd[0], &length, 1); if ((length == 1) && (!status)) status = TRUE; #endif return status; }
int EventNotifier::handleEvents(short int event) { int count = 0; if (event & POLLIN) { #ifdef LSEFD_AVAIL uint64_t ret; if (eventfd_read(getfd(), &ret) < 0) return LS_FAIL; if (ret > INT_MAX) count = INT_MAX; else count = (int)ret; #else char achBuf[50]; int len = 0; while ((len = read(getfd(), achBuf, sizeof(achBuf) / sizeof(char))) > 0) count += len; #endif onNotified(count); } return 0; }
void semaphore_wait(semaphore_t *semaphore) { assert(semaphore != NULL); assert(semaphore->fd != -1); uint64_t value; if (eventfd_read(semaphore->fd, &value) == -1) ALOGE("%s unable to wait on semaphore: %s", __func__, strerror(errno)); }
JNIEXPORT void JNICALL Java_io_netty_channel_epoll_Native_eventFdRead(JNIEnv * env, jclass clazz, jint fd) { uint64_t eventfd_t; if (eventfd_read(fd, &eventfd_t) != 0) { // something is serious wrong throwRuntimeException(env, "Error calling eventfd_read(...)"); } }
static void netty_epoll_native_eventFdRead(JNIEnv* env, jclass clazz, jint fd) { uint64_t eventfd_t; if (eventfd_read(fd, &eventfd_t) != 0) { // something is serious wrong netty_unix_errors_throwRuntimeException(env, "eventfd_read() failed"); } }
void EventLoopL::quitAsync(int cond) { _DBG("quitAsync!"); eventfd_t quit = 1; eventfd_read(Wakeup_->Fd(), &quit); event_base_loopexit(EventBase_, NULL); //or event_base_loopbreak(EventBase_); }
void testValues() { f = 2; eventfd_t v; eventfd_read(anyint(), &v); //@ assert f == 2; //@ assert vacuous: \false; }
static inline void maru_fifo_write_notify_ack_nolock(maru_fifo *fifo) { // Reset counter to 0 if there is no more data to write. if (maru_fifo_write_avail_nolock(fifo) < fifo->write_trigger) { eventfd_t val; eventfd_read(fifo->write_fd, &val); } }
static void ezApiDoEventfdCmd(ezEventLoop * eventLoop) { eventfd_t cmd = 0; ezApiState *state = (ezApiState *) eventLoop->apidata; eventfd_read(state->evfd, &cmd); if (cmd == _STOP_CMD) { eventLoop->stop = 1; } }
int main (void) { #if defined(HAVE_SIGNALFD) && defined(HAVE_EVENTFD) \ && defined(HAVE_EVENTFD_READ) && defined(HAVE_PPOLL) { sigset_t mask; int fd, fd2; eventfd_t ev; struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 }; struct pollfd pfd[2]; sigemptyset (&mask); sigaddset (&mask, SIGUSR1); fd = signalfd (-1, &mask, 0); sigaddset (&mask, SIGUSR2); fd = signalfd (fd, &mask, 0); fd2 = eventfd (5, 0); eventfd_read (fd2, &ev); pfd[0].fd = fd; pfd[0].events = POLLIN|POLLOUT; pfd[1].fd = fd2; pfd[1].events = POLLIN|POLLOUT; ppoll (pfd, 2, &ts, &mask); } #endif #if defined(HAVE_UTIMENSAT) unlink("/tmp/valgrind-utimensat-test"); close (creat ("/tmp/valgrind-utimensat-test", S_IRUSR | S_IWUSR)); { struct timespec ts2[2] = { [0].tv_sec = 10000000, [1].tv_sec = 20000000 }; utimensat (AT_FDCWD, "/tmp/valgrind-utimensat-test", ts2, 0); } unlink("/tmp/valgrind-utimensat-test"); #endif #if defined(HAVE_EPOLL_CREATE) && defined(HAVE_EPOLL_PWAIT) { int fd3; struct epoll_event evs[10]; sigset_t mask; sigemptyset (&mask); sigaddset (&mask, SIGUSR1); sigaddset (&mask, SIGUSR2); fd3 = epoll_create (10); epoll_pwait (fd3, evs, 10, 0, &mask); } #endif return 0; }
void Notifier::onEvent(IOLoop* loop, int events) { NotifierPtr_t notifier = shared_from_this(); eventfd_t value; if(eventfd_read(m_fd, &value) < 0) { LOG_ERROR("eventfd read error"); return; } m_handler(notifier); }
/* * Receive an eventfd state on the eventfd file descriptor. * * If the third argument is set to a value other than zero, then this * function will compare the received value with this argument and set * the return value. * * On success return 0. On error, -1 will be returned, and errno will * be set appropriately. */ int eventfd_recv_state(int efd, eventfd_t *e, eventfd_t s) { int ret; ret = eventfd_read(efd, e); if (ret < 0) return ret; else if (s != 0 && *e != s) { errno = EINVAL; return -1; } return 0; }
void HandleEvent(EventType et, int errornum) { if (et == EVENT_READ) { eventfd_t dummy; eventfd_read(fd, &dummy); parent->OnNotify(); } else { ServerInstance->GlobalCulls.AddItem(this); } }
void EventDispatcherEPollPrivate::wake_up_handler() { eventfd_t value; int res; do { res = eventfd_read(m_event_fd, &value); } while (Q_UNLIKELY(-1 == res && EINTR == errno)); if (Q_UNLIKELY(-1 == res)) { qErrnoWarning("%s: eventfd_read() failed", Q_FUNC_INFO); } if (Q_UNLIKELY(!m_wakeups.testAndSetRelease(1, 0))) { qCritical("%s: internal error, testAndSetRelease(1, 0) failed!", Q_FUNC_INFO); } }
static void efd_read(struct iothread *t, struct nbio *n) { struct nb_efd *efd = (struct nb_efd *)n; eventfd_t val; if ( eventfd_read(efd->e_nbio.fd, &val) ) { if ( errno == EAGAIN ) { nbio_inactive(t, &efd->e_nbio, NBIO_READ); return; } fprintf(stderr, "eventfd_read: %s\n", os_err()); return; } efd->e_cb(t, efd->e_priv, val); }
/* * Return the read value on success, or -1 if efd has been made nonblocking and * errno is EAGAIN. If efd has been marked blocking or the eventfd counter is * not zero, this function doesn't return error. */ int eventfd_xread(int efd) { int ret; eventfd_t value = 0; do { ret = eventfd_read(efd, &value); } while (unlikely(ret < 0) && errno == EINTR); if (ret == 0) ret = value; else if (unlikely(errno != EAGAIN)) panic("eventfd_read() failed, %m"); return ret; }
/* Thread: main (pairing) */ static void pairing_cb(int fd, short event, void *arg) { struct remote_info *ri; #ifdef USE_EVENTFD eventfd_t count; int ret; ret = eventfd_read(pairing_efd, &count); if (ret < 0) { DPRINTF(E_LOG, L_REMOTE, "Could not read event counter: %s\n", strerror(errno)); return; } #else int dummy; /* Drain the pipe */ while (read(pairing_pipe[0], &dummy, sizeof(dummy)) >= 0) ; /* EMPTY */ #endif for (;;) { pthread_mutex_lock(&remote_lck); for (ri = remote_list; ri; ri = ri->next) { /* We've got both the mDNS data and the pin */ if (ri->paircode && ri->pin) { unlink_remote(ri); break; } } pthread_mutex_unlock(&remote_lck); if (!ri) break; do_pairing(ri); } event_add(&pairingev, NULL); }
// New packet from SPI stack is send into brickd event loop static void red_stack_dispatch_from_spi(void *opaque) { eventfd_t ev; (void)opaque; if (eventfd_read(_red_stack_notification_event, &ev) < 0) { log_error("Could not read from SPI notification event: %s (%d)", get_errno_name(errno), errno); return; } // Send message into brickd dispatcher // and allow SPI thread to run again. network_dispatch_response(&_red_stack.packet_from_spi); semaphore_release(&_red_stack_dispatch_packet_from_spi_semaphore); }
// SEE HEADER FOR THREAD SAFETY NOTE size_t eager_reader_read(eager_reader_t *reader, uint8_t *buffer, size_t max_size, bool block) { assert(reader != NULL); assert(buffer != NULL); // If the caller wants nonblocking behavior, poll to see if we have // any bytes available before reading. if (!block && !has_byte(reader)) return 0; // Find out how many bytes we have available in our various buffers. eventfd_t bytes_available; if (eventfd_read(reader->bytes_available_fd, &bytes_available) == -1) { LOG_ERROR(LOG_TAG, "%s unable to read semaphore for output data.", __func__); return 0; } if (max_size > bytes_available) max_size = bytes_available; size_t bytes_consumed = 0; while (bytes_consumed < max_size) { if (!reader->current_buffer) reader->current_buffer = fixed_queue_dequeue(reader->buffers); size_t bytes_to_copy = reader->current_buffer->length - reader->current_buffer->offset; if (bytes_to_copy > (max_size - bytes_consumed)) bytes_to_copy = max_size - bytes_consumed; memcpy(&buffer[bytes_consumed], &reader->current_buffer->data[reader->current_buffer->offset], bytes_to_copy); bytes_consumed += bytes_to_copy; reader->current_buffer->offset += bytes_to_copy; if (reader->current_buffer->offset >= reader->current_buffer->length) { reader->allocator->free(reader->current_buffer); reader->current_buffer = NULL; } } bytes_available -= bytes_consumed; if (eventfd_write(reader->bytes_available_fd, bytes_available) == -1) { LOG_ERROR(LOG_TAG, "%s unable to write back bytes available for output data.", __func__); } return bytes_consumed; }
BOOL ResetEvent(HANDLE hEvent) { ULONG Type; PVOID Object; int length; BOOL status; WINPR_EVENT* event; status = FALSE; EnterCriticalSection(&cs); if (winpr_Handle_GetInfo(hEvent, &Type, &Object)) { event = (WINPR_EVENT*) Object; while (WaitForSingleObject(hEvent, 0) == WAIT_OBJECT_0) { #ifdef HAVE_EVENTFD_H eventfd_t value; do { length = eventfd_read(event->pipe_fd[0], &value); } while ((length < 0) && (errno == EINTR)); if ((length > 0) && (!status)) status = TRUE; #else length = read(event->pipe_fd[0], &length, 1); if ((length == 1) && (!status)) status = TRUE; #endif } } LeaveCriticalSection(&cs); return status; }
void join_widget_threads (struct bar *bar) { unsigned short i; struct timespec timeout; if (widgets_active && (widgets_len > 0)) { LOG_DEBUG("gracefully shutting down widget threads..."); for (i = 0; i < widgets_len; i++) { /* make all threads wait until we're ready to receive the cond signal below */ pthread_mutex_lock(&widgets_active[i]->exit_mutex); } /* send exit signal */ eventfd_write(bar->efd, 1); for (i = 0; i < widgets_len; i++) { /* update cond timeout */ clock_gettime(CLOCK_REALTIME, &timeout); timeout.tv_sec += 2; /* wait until thread times out or sends an exit confirmation signal */ int ret = pthread_cond_timedwait(&widgets_active[i]->exit_cond, &widgets_active[i]->exit_mutex, &timeout); if (ret == ETIMEDOUT) { LOG_WARN("timed out waiting for widget %s to exit", widgets_active[i]->name); pthread_cancel(widgets_active[i]->thread); } else { pthread_join(widgets_active[i]->thread, NULL); } } /* read any data from the efd so it blocks on epoll_wait */ eventfd_read(bar->efd, NULL); free(widgets_active); } else { LOG_DEBUG("no widget threads have been spawned"); } }
static void vu_kick_cb(VuDev *dev, int condition, void *data) { int index = (intptr_t)data; VuVirtq *vq = &dev->vq[index]; int sock = vq->kick_fd; eventfd_t kick_data; ssize_t rc; rc = eventfd_read(sock, &kick_data); if (rc == -1) { vu_panic(dev, "kick eventfd_read(): %s", strerror(errno)); dev->remove_watch(dev, dev->vq[index].kick_fd); } else { DPRINT("Got kick_data: %016"PRIx64" handler:%p idx:%d\n", kick_data, vq->handler, index); if (vq->handler) { vq->handler(dev, index); } } }
bool semaphore_try_wait(semaphore_t *semaphore) { assert(semaphore != NULL); assert(semaphore->fd != -1); int flags = fcntl(semaphore->fd, F_GETFL); if (flags == -1) { ALOGE("%s unable to get flags for semaphore fd: %s", __func__, strerror(errno)); return false; } if (fcntl(semaphore->fd, F_SETFL, flags | O_NONBLOCK) == -1) { ALOGE("%s unable to set O_NONBLOCK for semaphore fd: %s", __func__, strerror(errno)); return false; } eventfd_t value; if (eventfd_read(semaphore->fd, &value) == -1) return false; if (fcntl(semaphore->fd, F_SETFL, flags) == -1) ALOGE("%s unable to resetore flags for semaphore fd: %s", __func__, strerror(errno)); return true; }
// Returns 0 for success, -errno for error int Device::EnsureOpen() const { int rv; if (mFd >= 0) return 0; if (mDeviceName.length()) { unsigned mode = 0; if (mDeviceMode & (std::ios::in | std::ios::out)) mode = O_RDWR; else if (mDeviceMode & std::ios::in) mode = O_WRONLY; else if (mDeviceMode & std::ios::out) mode = O_RDONLY; else return Error::DeviceModeUnset; rv = OpenKsockByName(mDeviceName.c_str(), mode); } else { mFd = -1; return Error::DeviceHasNoName; } if (rv < 0) { mFd = -1; return rv; } else { mFd = rv; eventfd_t eventFdVal; eventfd_read(mEventFd, &eventFdVal); return 0; } }
void * wakeupThreadLoop(void * null) { evfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK); if (evfd == -1) { perror("eventfd failed"); exit(-1); } #ifdef READ_EVENT_FD int epfd; struct epoll_event event; struct epoll_event *events; uint64_t val; int n; epfd = epoll_create1(0); events = calloc (1, sizeof event); event.data.fd = evfd; event.events = EPOLLIN; if (epoll_ctl (epfd, EPOLL_CTL_ADD, evfd, &event)) { perror("epoll_ctl"); exit(-1); } while(1) { n = epoll_wait(epfd, events, 1, -1); if (n>0) { if (eventfd_read(evfd, &val)) { perror("eventfd_read"); exit(-1); } } } #else sleep(20); #endif pthread_exit(NULL); }
static int outer_child(void) { int ret; eventfd_t event_status = 0; /* We entered the child we are ready */ ret = eventfd_write(efd, 1); if (ret < 0) { ret = -errno; printf("error eventfd_write(): %d (%m)\n", ret); return ret; } ret = eventfd_read(efd_userns_child, &event_status); if (ret < 0 || event_status != 1) { printf("error eventfd_read() ***\n"); return -1; } ret = update_uid_gid(); if (ret < 0) return ret; /* ret = child_test_filesystems(); if (ret < 0) { printf("failed at filesystems test\n"); return ret; } */ /* TODO: test here stats and other uidshift results */ execle("/bin/bash", NULL, NULL, NULL); return -1; }