static void dispatch_mods(struct ioq *q) { for (;;) { ioq_fd_mask_t requested; int flags; struct ioq_fd *f = mod_dequeue(q, &flags, &requested); if (!f) break; if (!(flags & IOQ_FLAG_WAITING)) { runq_task_exec(&f->task, f->task.func); } else if (!requested) { if (flags & IOQ_FLAG_EPOLL) epoll_ctl(q->epoll_fd, EPOLL_CTL_DEL, f->fd, NULL); f->ready = 0; thr_mutex_lock(&q->lock); f->flags &= ~(IOQ_FLAG_EPOLL | IOQ_FLAG_WAITING); mod_enqueue_nolock(q, f); thr_mutex_unlock(&q->lock); } else { struct epoll_event evt; memset(&evt, 0, sizeof(evt)); evt.events = requested; evt.data.ptr = f; if (epoll_ctl(q->epoll_fd, (flags & IOQ_FLAG_EPOLL) ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, f->fd, &evt) < 0) { f->err = syserr_last(); thr_mutex_lock(&q->lock); f->requested = 0; mod_enqueue_nolock(q, f); thr_mutex_unlock(&q->lock); } else { thr_mutex_lock(&q->lock); f->flags |= IOQ_FLAG_EPOLL; thr_mutex_unlock(&q->lock); } } } }
static int do_wait(struct ioq *q) { struct epoll_event evts[32]; const int timeout = waitq_next_deadline(&q->wait); int ret; int i; /* This can't fail for any reason but signal interruption */ ret = epoll_wait(q->epoll_fd, evts, lengthof(evts), timeout); if (ret < 0) { if (syserr_last() == EINTR) return 0; return -1; } intr_ack(q); for (i = 0; i < ret; i++) { const struct epoll_event *e = &evts[i]; struct ioq_fd *f = e->data.ptr; if (!f) continue; epoll_ctl(q->epoll_fd, EPOLL_CTL_DEL, f->fd, NULL); f->ready = e->events; thr_mutex_lock(&q->lock); f->flags &= ~(IOQ_FLAG_EPOLL | IOQ_FLAG_WAITING); mod_enqueue_nolock(q, f); thr_mutex_unlock(&q->lock); } return 0; }
int main(void) { int my_count = 5; thr_mutex_init(&mutex); if (thr_event_init(&event) < 0) { report_syserr("thr_event_init"); return -1; } thr_start(&worker, work_func, NULL); while (my_count) { thr_mutex_lock(&mutex); counter++; my_count--; printf("producer: counter++\n"); thr_mutex_unlock(&mutex); thr_event_raise(&event); clock_wait(10); } thr_join(worker); test_timedwait(); thr_event_destroy(&event); thr_mutex_destroy(&mutex); return 0; }
void afile_cancel(struct afile *a) { thr_mutex_lock(&a->lock); ioq_fd_cancel(&a->fd); a->flags |= F_WANT_CANCEL; thr_mutex_unlock(&a->lock); }
static void intr_ack(struct ioq *q) { char discard[128]; while (read(q->intr[0], discard, sizeof(discard)) >= 0); thr_mutex_lock(&q->lock); q->intr_state = 0; thr_mutex_unlock(&q->lock); }
void ioq_notify(struct ioq *q) { int old_state; thr_mutex_lock(&q->lock); old_state = q->intr_state; q->intr_state = 1; thr_mutex_unlock(&q->lock); if (!old_state) write(q->intr[1], "", 1); }
static int end_wait(struct afile *a, syserr_t *err) { ioq_fd_mask_t wait_mask = 0; int perform = 0; thr_mutex_lock(&a->lock); /* Check for an error */ if (ioq_fd_error(&a->fd)) { perform = a->flags | F_WANT_CANCEL; a->flags = 0; *err = ioq_fd_error(&a->fd); goto out; } *err = 0; /* Check for cancellation request */ if (a->flags & F_WANT_CANCEL) { perform = a->flags | F_WANT_CANCEL; a->flags = 0; goto out; } /* Choose actions to perform, and new things to wait for */ if (a->flags & F_WANT_WRITE) { if (ioq_fd_ready(&a->fd) & (IOQ_EVENT_OUT | IOQ_EVENT_ERR | IOQ_EVENT_HUP)) { perform |= F_WANT_WRITE; a->flags &= ~F_WANT_WRITE; } else { wait_mask |= IOQ_EVENT_OUT; } } if (a->flags & F_WANT_READ) { if (ioq_fd_ready(&a->fd) & (IOQ_EVENT_IN | IOQ_EVENT_ERR | IOQ_EVENT_HUP)) { perform |= F_WANT_READ; a->flags &= ~F_WANT_READ; } else { wait_mask |= IOQ_EVENT_IN; } } /* Wait for more IO events */ if (wait_mask) ioq_fd_wait(&a->fd, wait_mask, ioq_cb); out: thr_mutex_unlock(&a->lock); return perform; }
void ioq_fd_rewait(struct ioq_fd *f, ioq_fd_mask_t set) { struct ioq *q = f->owner; int need_wakeup = 0; thr_mutex_lock(&q->lock); if (f->flags & IOQ_FLAG_WAITING) { f->requested = set; need_wakeup = mod_enqueue_nolock(q, f); } thr_mutex_unlock(&q->lock); if (need_wakeup) ioq_notify(q); }
static void work_func(void *arg) { int my_count = 0; while (my_count < 5) { thr_event_wait(&event); thr_event_clear(&event); thr_mutex_lock(&mutex); while (counter) { counter--; my_count++; printf("consumer: counter--\n"); } thr_mutex_unlock(&mutex); } }
void afile_read(struct afile *a, void *data, size_t len, afile_func_t func) { a->read.buffer = data; a->read.size = len; a->read.func = func; thr_mutex_lock(&a->lock); if (a->flags) ioq_fd_rewait(&a->fd, IOQ_EVENT_IN | IOQ_EVENT_OUT); else ioq_fd_wait(&a->fd, IOQ_EVENT_IN, ioq_cb); a->flags |= F_WANT_READ; thr_mutex_unlock(&a->lock); }
void afile_write(struct afile *a, const void *data, size_t len, afile_func_t func) { a->write.buffer = (void *)data; a->write.size = len; a->write.func = func; thr_mutex_lock(&a->lock); if (a->flags) ioq_fd_rewait(&a->fd, IOQ_EVENT_IN | IOQ_EVENT_OUT); else ioq_fd_wait(&a->fd, IOQ_EVENT_OUT, ioq_cb); a->flags |= F_WANT_WRITE; thr_mutex_unlock(&a->lock); }
static struct ioq_fd *mod_dequeue(struct ioq *q, int *flags, ioq_fd_mask_t *requested) { struct slist_node *n; struct ioq_fd *r = NULL; thr_mutex_lock(&q->lock); n = slist_pop(&q->mod_list); if (n) { r = container_of(n, struct ioq_fd, mod_list); r->flags &= ~IOQ_FLAG_MOD_LIST; *flags = r->flags; *requested = r->requested; } thr_mutex_unlock(&q->lock); return r; }
void ioq_fd_wait(struct ioq_fd *f, ioq_fd_mask_t set, ioq_fd_func_t func) { struct ioq *q = f->owner; int need_wakeup = 0; f->task.func = (runq_task_func_t)func; f->requested = set; f->ready = 0; f->err = 0; f->flags = IOQ_FLAG_WAITING; if (!set) { runq_task_exec(&f->task, (runq_task_func_t)func); return; } thr_mutex_lock(&q->lock); need_wakeup = mod_enqueue_nolock(q, f); thr_mutex_unlock(&q->lock); if (need_wakeup) ioq_notify(q); }
/* Main routine for the server threads */ thr_startfunc_t serve_pipe(void *data) { char sio_buf[BUFSIZ], sock_buf[BUFSIZ]; int fd_max, sio_fd, sock_fd; int sio_count, sock_count; int res, port; fd_set rfds, wfds; pipe_s *pipe = (pipe_s *)data; #if defined(__UNIX__) struct timeval tv = {pipe->timeout, 0}; struct timeval *ptv = &tv; #elif defined(__WIN32__) struct timeval tv = {0,10000}; struct timeval *ptv = &tv; DWORD msecs = 0, timeout = pipe->timeout * 1000; #endif port = pipe->sio.info.port; /* Only proceed if we can lock the mutex */ if (thr_mutex_trylock(pipe->mutex)) { error("server(%d) - resource is locked", port); } else { sio_count = 0; sock_count = 0; sio_fd = pipe->sio.fd; sock_fd = pipe->sock.fd; #if defined(__UNIX__) fd_max = sio_fd > sock_fd ? sio_fd : sock_fd; #elif defined(__WIN32__) fd_max = sock_fd; msecs = GetTickCount(); #endif fprintf(stderr, "server(%d) - thread started\n", port); while (1) { FD_ZERO(&rfds); FD_ZERO(&wfds); #if defined(__UNIX__) /* Always ask for read notification to check for EOF */ FD_SET(sio_fd, &rfds); /* Only ask for write notification if we have something to write */ if (sock_count > 0) FD_SET(sio_fd, &wfds); /* Reset timeout values */ tv.tv_sec = pipe->timeout; tv.tv_usec = 0; #endif /* Always ask for read notification to check for EOF */ FD_SET(sock_fd, &rfds); /* Only ask for write notification if we have something to write */ if (sio_count > 0) FD_SET(sock_fd, &wfds); //DBG_MSG2("server(%d) waiting for events", port); /* Wait for read/write events */ res = select(fd_max + 1, &rfds, &wfds, NULL, ptv); if (res == -1) { perror2("server(%d) - select()", port); break; } #if defined(__UNIX__) /* Use the select result for timeout detection */ if (res == 0) { fprintf(stderr, "server(%d) - timed out\n", port); break; } /* Input from serial port? */ if (FD_ISSET(sio_fd, &rfds)) #elif defined(__WIN32__) if (1) #endif { /* Only read input if buffer is empty */ if (sio_count == 0) { sio_count = sio_read(&pipe->sio, sio_buf, sizeof(sio_buf)); if (sio_count <= 0) { if (sio_count == 0) { #if defined(__UNIX__) fprintf(stderr, "server(%d) - EOF from sio\n", port); break; #endif } else { perror2("server(%d) - read(sio)", port); break; } } else { DBG_MSG3("server(%d) - read %d bytes from sio", port, sio_count); } } } /* Write to socket possible? */ if (FD_ISSET(sock_fd, &wfds)) { if (sio_count > 0) { if ((res = tcp_write(&pipe->sock, sio_buf, sio_count)) < 0) { perror2("server(%d) - write(sock)", port); break; } DBG_MSG3("server(%d) - Wrote %d bytes to sock", port, res); sio_count -= res; } } /* Input from socket? */ if (FD_ISSET(sock_fd, &rfds)) { /* Only read input if buffer is empty */ if (sock_count == 0) { sock_count = tcp_read(&pipe->sock, sock_buf, sizeof(sock_buf)); if (sock_count <= 0) { if (sock_count == 0) { fprintf(stderr, "server(%d) - EOF from sock\n", port); break; } else { perror2("server(%d) - read(sock)", port); break; } } DBG_MSG3("server(%d) - read %d bytes from sock", port, sock_count); } } #if defined(__UNIX__) /* Write to serial port possible? */ if (FD_ISSET(sio_fd, &wfds)) #elif defined(__WIN32__) /* No socket IO performed? */ if ((!FD_ISSET(sock_fd, &rfds)) && (!FD_ISSET(sock_fd, &wfds))) { /* Break on a time out */ if (GetTickCount() - msecs > timeout) { fprintf(stderr, "server(%d) - timed out\n", port); break; } } else { msecs = GetTickCount(); } if (1) #endif { if (sock_count > 0) { if ((res = sio_write(&pipe->sio, sock_buf, sock_count)) < 0) { perror2("server(%d) - write(sio)", port); break; } DBG_MSG3("server(%d) - wrote %d bytes to sio", port, res); sock_count -= res; } } } /* Unlock our mutex */ thr_mutex_unlock(pipe->mutex); } fprintf(stderr, "server(%d) exiting\n", port); /* Clean up - don't call pipe_cleanup() as that would nuke our mutex */ sio_cleanup(&pipe->sio); tcp_cleanup(&pipe->sock); free(pipe); thr_exit((thr_exitcode_t)0); return (thr_exitcode_t)0; }