static int worker_start(void) { struct worker *wkr; dbg_puts("Spawning another worker"); wkr = calloc(1, sizeof(*wkr)); if (wkr == NULL) { dbg_perror("calloc(3)"); return (-1); } scoreboard.count++; atomic_inc(&scoreboard.idle); if (pthread_create(&wkr->tid, &detached_attr, worker_main, wkr) != 0) { dbg_perror("pthread_create(3)"); atomic_dec(&scoreboard.idle); scoreboard.count--; return (-1); } pthread_mutex_lock(&workers_mtx); LIST_INSERT_HEAD(&workers, wkr, entries); pthread_mutex_unlock(&workers_mtx); return (0); }
int put_string(int fd, const char *fmt, ...) { char *buf, *buf_ptr; va_list ap; ssize_t to_write; va_start(ap, fmt); if (vasprintf(&buf, fmt, ap) < 0) { dbg_perror("vasprintf"); return -1; } va_end(ap); buf_ptr = buf; to_write = strlen(buf); while (to_write) { ssize_t written = write(fd, buf_ptr, to_write); if (written < 0) { if (errno == EINTR) continue; dbg_perror("write"); free(buf); return -1; } to_write -= written; buf_ptr += written; } free(buf); return 0; }
static int _timer_create(struct filter *filt, struct knote *kn) { pthread_attr_t attr; pthread_t tid; struct sleepreq *req; kn->kev.flags |= EV_CLEAR; req = malloc(sizeof(*req)); if (req == NULL) { dbg_perror("malloc"); return (-1); } req->pfd = filt->kf_pfd; req->wfd = filt->kf_wfd; req->ident = kn->kev.ident; req->interval = kn->kev.data; kn->data.sleepreq = req; pthread_cond_init(&req->cond, NULL); pthread_mutex_init(&req->mtx, NULL); pthread_attr_init(&attr); pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); if (pthread_create(&tid, &attr, sleeper_thread, req) != 0) { dbg_perror("pthread_create"); pthread_attr_destroy(&attr); free(req); return (-1); } pthread_attr_destroy(&attr); return (0); }
int seek_and_read(int fd, off_t offset, void *buf, size_t count) { if (lseek(fd, offset, SEEK_SET) < 0) { dbg_perror("lseek"); return -1; } if (read(fd, buf, count) != (ssize_t)count) { dbg_perror("read"); return -1; } return 0; }
int evfilt_read_knote_create(struct filter *filt, struct knote *kn) { struct epoll_event ev; if (linux_get_descriptor_type(kn) < 0) return (-1); /* Convert the kevent into an epoll_event */ #if defined(HAVE_EPOLLRDHUP) kn->data.events = EPOLLIN | EPOLLRDHUP; #else kn->data.events = EPOLLIN; #endif if (kn->kev.flags & EV_ONESHOT || kn->kev.flags & EV_DISPATCH) kn->data.events |= EPOLLONESHOT; if (kn->kev.flags & EV_CLEAR) kn->data.events |= EPOLLET; memset(&ev, 0, sizeof(ev)); ev.events = kn->data.events; ev.data.ptr = kn; /* Special case: for regular files, add a surrogate eventfd that is always readable */ if (kn->kn_flags & KNFL_FILE) { int evfd; kn->kn_epollfd = filter_epfd(filt); evfd = eventfd(0, 0); if (evfd < 0) { dbg_perror("eventfd(2)"); return (-1); } if (eventfd_write(evfd, 1) < 0) { dbg_perror("eventfd_write(3)"); (void) close(evfd); return (-1); } kn->kdata.kn_eventfd = evfd; if (epoll_ctl(kn->kn_epollfd, EPOLL_CTL_ADD, kn->kdata.kn_eventfd, &ev) < 0) { dbg_printf("epoll_ctl(2): %s", strerror(errno)); return (-1); } kn->kn_registered = 1; return (0); } return epoll_update(EPOLL_CTL_ADD, filt, kn, &ev); }
static int do_accept(EpmdVars *g,int listensock) { int msgsock; struct EPMD_SOCKADDR_IN icli_addr; /* workaround for QNX bug - cannot */ int icli_addr_len; /* handle NULL pointers to accept. */ icli_addr_len = sizeof(icli_addr); msgsock = accept(listensock,(struct sockaddr*) &icli_addr, (unsigned int*) &icli_addr_len); if (msgsock < 0) { dbg_perror(g,"error in accept"); switch (errno) { case EAGAIN: case ECONNABORTED: case EINTR: return EPMD_FALSE; default: epmd_cleanup_exit(g,1); } } return conn_open(g,msgsock); }
int evfilt_read_knote_delete(struct filter *filt, struct knote *kn) { if (kn->kev.flags & EV_DISABLE) return (0); if ( ( kn->kn_flags & KNFL_REGULAR_FILE && kn->kdata.kn_eventfd != -1 ) == true ) { if (epoll_ctl(kn->kn_epollfd, EPOLL_CTL_DEL, kn->kdata.kn_eventfd, NULL) < 0) { dbg_perror("epoll_ctl(2)"); return (-1); } (void) close(kn->kdata.kn_eventfd); kn->kdata.kn_eventfd = -1; } else { return epoll_update(EPOLL_CTL_DEL, filt, kn, NULL); } // clang will complain about not returning a value otherwise return (-1); }
static VOID CALLBACK evfilt_timer_callback(void* param, BOOLEAN fired){ struct knote* kn; struct kqueue* kq; if(fired){ dbg_puts("called, but timer did not fire - this case should never be reached"); return; } assert(param); kn = (struct knote*)param; if(kn->kn_flags & KNFL_KNOTE_DELETED) { dbg_puts("knote marked for deletion, skipping event"); return; } else { kq = kn->kn_kq; assert(kq); if (!PostQueuedCompletionStatus(kq->kq_iocp, 1, (ULONG_PTR) 0, (LPOVERLAPPED) kn)) { dbg_lasterror("PostQueuedCompletionStatus()"); return; /* FIXME: need more extreme action */ } #if DEADWOOD evt_signal(kq->kq_loop, EVT_WAKEUP, kn); #endif } if(kn->kev.flags & EV_ONESHOT) { struct filter* filt; if( filter_lookup(&filt, kq, kn->kev.filter) ) dbg_perror("filter_lookup()"); knote_release(kn); } }
/* TODO: USE this to get events with name field */ int get_one_event(struct inotify_event *dst, int inofd) { ssize_t n; dbg_puts("reading one inotify event"); for (;;) { n = read(inofd, dst, sizeof(*dst)); if (n < 0) { if (errno == EINTR) continue; dbg_perror("read"); return (-1); } else { break; } } dbg_printf("read(2) from inotify wd: %ld bytes", (long) n); /* FIXME-TODO: if len > 0, read(len) */ if (dst->len != 0) abort(); return (0); }
static int fclose_dst_src(FILE ** const dstf, FILE ** const srcf) { int r, r1, r2, errsv1, errsv2; r1 = fclose(*dstf); if (r1) { errsv1 = errno; // LCOV_EXCL_LINE } else { *dstf = NULL; } r2 = fclose(*srcf); if (r2) { errsv2 = errno; // LCOV_EXCL_LINE } else { *srcf = NULL; } if (r1 && r2) { // LCOV_EXCL_START errno = errsv2; dbg_perror("fclose_dst_src: failed to close srcf, can't express this error"); // LCOV_EXCL_LINE errno = errsv1; r = r1; // LCOV_EXCL_STOP } else if (r1) { r = r1; // LCOV_EXCL_LINE errno = errsv1; // LCOV_EXCL_LINE } else if (r2) { r = r2; // LCOV_EXCL_LINE errno = errsv2; // LCOV_EXCL_LINE } else { r = 0; } return r; }
void ptwq_set_current_thread_priority(int priority) { long retval = 0; dbg_printf("reconfiguring thread for priority level=%u", priority); switch (priority) { case WORKQ_LOW_PRIOQUEUE: retval = priocntl(P_LWPID, P_MYID, PC_SETXPARMS, "TS", 0); // run low prio queues as time sharing break; case WORKQ_DEFAULT_PRIOQUEUE: retval = priocntl(P_LWPID, P_MYID, PC_SETXPARMS, "TS", 0); // run low prio queues as time sharing // retval = priocntl(P_LWPID, P_MYID, PC_SETXPARMS, "RT", RT_KY_PRI, WORKQ_NUM_PRIOQUEUE - priority - 1, 0); break; case WORKQ_HIGH_PRIOQUEUE: retval = priocntl(P_LWPID, P_MYID, PC_SETXPARMS, "FX", 0); // retval = priocntl(P_LWPID, P_MYID, PC_SETXPARMS, "RT", RT_KY_PRI, WORKQ_NUM_PRIOQUEUE - priority - 1, 0); break; default: dbg_printf("Unknown priority level = %u", priority); break; } if (retval != 0) dbg_perror("priocntl()"); return; }
int kqueue_validate(struct kqueue *kq) { int rv; char buf[1]; struct pollfd pfd; pfd.fd = kq->kq_sockfd[0]; pfd.events = POLLIN | POLLHUP; pfd.revents = 0; rv = poll(&pfd, 1, 0); if (rv == 0) return (1); if (rv < 0) { dbg_perror("poll(2)"); return (-1); } if (rv > 0) { /* NOTE: If the caller accidentally writes to the kqfd, it will be considered invalid. */ rv = recv(kq->kq_sockfd[0], buf, sizeof(buf), MSG_PEEK | MSG_DONTWAIT); if (rv == 0) return (0); else return (-1); } return (0); }
static void * overcommit_worker_main(void *arg) { struct timespec ts; pthread_workqueue_t workq; void (*func)(void *); void *func_arg; struct work *witem; int rv, idx; (void)arg; pthread_mutex_lock(&ocwq_mtx); for (;;) { /* Find the highest priority workqueue that is non-empty */ idx = ffs(ocwq_mask); if (idx > 0) { workq = ocwq[idx - 1]; witem = STAILQ_FIRST(&workq->item_listhead); if (witem != NULL) { /* Remove the first work item */ STAILQ_REMOVE_HEAD(&workq->item_listhead, item_entry); if (STAILQ_EMPTY(&workq->item_listhead)) ocwq_mask &= ~(0x1 << workq->wqlist_index); /* Execute the work item */ pthread_mutex_unlock(&ocwq_mtx); func = witem->func; func_arg = witem->func_arg; witem_free(witem); func(func_arg); pthread_mutex_lock(&ocwq_mtx); continue; } } /* Wait for more work to be available. */ clock_gettime(CLOCK_REALTIME, &ts); ts.tv_sec += 15; ocwq_idle_threads++; dbg_printf("waiting for work (idle=%d)", ocwq_idle_threads); rv = pthread_cond_timedwait(&ocwq_has_work, &ocwq_mtx, &ts); if (rv < 0) { /* Normally, the signaler will decrement the idle counter, but this path is not taken in response to a signaler. */ ocwq_idle_threads--; if (errno == ETIMEDOUT) { dbg_puts("timeout, no work available"); break; } else { dbg_perror("pthread_cond_timedwait"); //TODO: some kind of crash mechanism break; } } } pthread_exit(NULL); }
int threads_runnable(unsigned int *threads_running, unsigned int *threads_total) { DIR *dip; struct dirent *dit; const char *task_path = "/proc/self/task"; char thread_path[1024]; char thread_data[MAX_RESULT_SIZE+1]; char dummy[MAX_RESULT_SIZE+1]; char state; int pid; unsigned int running_count = 0, total_count = 0; dbg_puts("Checking threads_runnable()"); if ((dip = opendir(task_path)) == NULL) { dbg_perror("opendir"); return -1; } while ((dit = readdir(dip)) != NULL) { memset(thread_data, 0, sizeof(thread_data)); sprintf(thread_path, "%s/%s/stat",task_path, dit->d_name); if (_read_file(thread_path, thread_data) == 0) { if (sscanf(thread_data, "%d %s %c", &pid, dummy, &state) == 3) { total_count++; dbg_printf("The state for thread %s is %c", dit->d_name, state); switch (state) { case 'R': running_count++; break; default: break; } } else { dbg_printf("Failed to scan state for thread %s (%s)", dit->d_name, thread_data); } } } if (closedir(dip) == -1) { perror("closedir"); } dbg_printf("Running count is %d", running_count); *threads_running = running_count; *threads_total = total_count; return 0; }
static int fdopen_dst_src(int dst, int src, FILE ** const dstfp, FILE ** const srcfp) { int r; int newdst = dup(dst); if (newdst < 0) { r = errno; goto exit; } FILE *dstf = fdopen(newdst, "a"); if (!dstf) { r = errno; int cr = close(newdst); if (cr) { dbg_perror("fdopen_dst_src: close"); // LCOV_EXCL_LINE } goto exit; } int newsrc = dup(src); if (newsrc < 0) { r = errno; int cr = fclose(dstf); if (cr) { dbg_perror("fdopen_dst_src: fclose"); // LCOV_EXCL_LINE } goto exit; } FILE *srcf = fdopen(newsrc, "r"); if (!srcf) { r = errno; int cr = close(newsrc); if (cr) { dbg_perror("fdopen_dst_src: close"); // LCOV_EXCL_LINE } cr = fclose(dstf); if (cr) { dbg_perror("fdopen_dst_src: fclose"); // LCOV_EXCL_LINE } goto exit; } *dstfp = dstf; *srcfp = srcf; r = 0; exit: return r; }
/* TODO: This entire function is copy+pasted from socket.c with minor changes for timerfds. Perhaps it could be refactored into a generic epoll_copyout() that calls custom per-filter actions. */ int evfilt_timer_copyout(struct filter *filt, struct kevent *dst, int nevents) { struct epoll_event epevt[MAX_KEVENT]; struct epoll_event *ev; struct knote *kn; uint64_t expired; int i, nret; ssize_t n; for (;;) { nret = epoll_wait(filt->kf_pfd, &epevt[0], nevents, 0); if (nret < 0) { if (errno == EINTR) continue; dbg_perror("epoll_wait"); return (-1); } else { break; } } for (i = 0, nevents = 0; i < nret; i++) { ev = &epevt[i]; /* TODO: put in generic debug.c: epoll_event_dump(ev); */ kn = ev->data.ptr; memcpy(dst, &kn->kev, sizeof(*dst)); if (ev->events & EPOLLERR) dst->fflags = 1; /* FIXME: Return the actual timer error */ /* On return, data contains the number of times the timer has been trigered. */ n = read(kn->data.pfd, &expired, sizeof(expired)); if (n < 0 || n < sizeof(expired)) { dbg_puts("invalid read from timerfd"); expired = 1; /* Fail gracefully */ } dst->data = expired; if (kn->kev.flags & EV_DISPATCH) { KNOTE_DISABLE(kn); ktimer_delete(filt, kn); } else if (kn->kev.flags & EV_ONESHOT) { ktimer_delete(filt, kn); knote_free(filt, kn); } nevents++; dst++; } return (nevents); }
/* * Return the offset from the current position to end of file. */ static intptr_t get_eof_offset(int fd) { off_t curpos; struct stat sb; curpos = lseek(fd, 0, SEEK_CUR); if (curpos == (off_t) -1) { dbg_perror("lseek(2)"); curpos = 0; } if (fstat(fd, &sb) < 0) { dbg_perror("fstat(2)"); sb.st_size = 1; } dbg_printf("curpos=%zu size=%zu\n", (size_t)curpos, (size_t)sb.st_size); return (sb.st_size - curpos); //FIXME: can overflow }
static int _read_file(const char *path, char *result) { int read_fd, retval = -1; ssize_t actual_read; read_fd = open(path, O_RDONLY); if (read_fd == -1) { dbg_perror("open()"); return retval; } if (fcntl(read_fd, F_SETFL, O_NONBLOCK) != 0) { dbg_perror("fcntl()"); goto errout; } actual_read = read(read_fd, result, MAX_RESULT_SIZE); # ifdef __ia64__ dbg_printf("read %ld from %s", actual_read, path); # else dbg_printf("read %zu from %s", (size_t) actual_read, path); #endif if (actual_read == 0) { goto errout; } retval = 0; errout: if (close(read_fd) != 0) { dbg_perror("close()"); } return retval; }
static int _timer_delete(struct knote *kn) { if (pthread_cancel(kn->data.tid) != 0) { /* Race condition: sleeper_thread exits before it is cancelled */ if (errno == ENOENT) return (0); dbg_perror("pthread_cancel(3)"); return (-1); } return (0); }
int linux_kqueue_init(struct kqueue *kq) { kq->kq_id = epoll_create(1); if (kq->kq_id < 0) { dbg_perror("epoll_create(2)"); return (-1); } if (filter_register_all(kq) < 0) { close(kq->kq_id); return (-1); } #if DEADWOOD //might be useful in posix /* Add each filter's pollable descriptor to the epollset */ for (i = 0; i < EVFILT_SYSCOUNT; i++) { filt = &kq->kq_filt[i]; if (filt->kf_id == 0) continue; memset(&ev, 0, sizeof(ev)); ev.events = EPOLLIN; ev.data.ptr = filt; if (epoll_ctl(kq->kq_id, EPOLL_CTL_ADD, filt->kf_pfd, &ev) < 0) { dbg_perror("epoll_ctl(2)"); close(kq->kq_id); return (-1); } } #endif return (0); }
int evfilt_timer_init(struct filter *filt) { int fd[2]; if (socketpair(AF_UNIX, SOCK_STREAM, 0, fd) < 0) { dbg_perror("socketpair(3)"); return (-1); } if (fcntl(fd[0], F_SETFL, O_NONBLOCK) < 0 || fcntl(fd[1], F_SETFL, O_NONBLOCK) < 0) { dbg_perror("fcntl(2)"); close(fd[0]); close(fd[1]); return (-1); } filt->kf_wfd = fd[0]; filt->kf_pfd = fd[1]; return (0); }
int evfilt_read_knote_disable(struct filter *filt, struct knote *kn) { if (kn->kn_flags & KNFL_REGULAR_FILE) { if (epoll_ctl(kn->kn_epollfd, EPOLL_CTL_DEL, kn->kdata.kn_eventfd, NULL) < 0) { dbg_perror("epoll_ctl(2)"); return (-1); } return (0); } else { return epoll_update(EPOLL_CTL_DEL, filt, kn, NULL); } }
static int delete_watch(struct filter *filt, struct knote *kn) { int ifd = kn->kdata.kn_inotifyfd; if (ifd < 0) return (0); if (epoll_ctl(filter_epfd(filt), EPOLL_CTL_DEL, ifd, NULL) < 0) { dbg_perror("epoll_ctl(2)"); return (-1); } (void) close(ifd); kn->kdata.kn_inotifyfd = -1; return (0); }
static int do_accept(EpmdVars *g,int listensock) { int msgsock; struct EPMD_SOCKADDR_IN icli_addr; /* workaround for QNX bug - cannot */ int icli_addr_len; /* handle NULL pointers to accept. */ icli_addr_len = sizeof(icli_addr); msgsock = accept(listensock,(struct sockaddr*) &icli_addr, (unsigned int*) &icli_addr_len); if (msgsock < 0) { dbg_perror(g,"error in accept"); return EPMD_FALSE; } return conn_open(g,msgsock); }
/* Non-portable kqueue initalization code. */ static int kqueue_sys_init(struct kqueue *kq) { #if defined(__sun__) port_event_t *pe; if ((kq->kq_port = port_create()) < 0) { dbg_perror("port_create(2)"); return (-1); } if (pthread_key_create(&kq->kq_port_event, NULL) != 0) abort(); if ((pe = calloc(1, sizeof(*pe))) == NULL) abort(); if (pthread_setspecific(kq->kq_port_event, pe) != 0) abort(); #endif return (0); }
static unsigned int get_fd_limit(void) { #ifdef _WIN32 /* actually windows should be able to hold way more, as they use HANDLEs for everything. Still this number should still be sufficient for the provided number of kqueue fds. */ return 65536; #else struct rlimit rlim; if (getrlimit(RLIMIT_NOFILE, &rlim) < 0) { dbg_perror("getrlimit(2)"); return (65536); } else { return (rlim.rlim_max); } #endif }
int manager_init(void) { wqlist_has_manager = 0; pthread_cond_init(&wqlist_has_work, NULL); LIST_INIT(&workers); pthread_mutex_init(&workers_mtx, NULL); pthread_mutex_init(&wqlist_mtx, NULL); wqlist_mask = 0; pthread_cond_init(&ocwq_has_work, NULL); pthread_mutex_init(&ocwq_mtx, NULL); ocwq_mask = 0; ocwq_idle_threads = 0; witem_cache_init(); cpu_count = (unsigned int) sysconf(_SC_NPROCESSORS_ONLN); pthread_attr_init(&detached_attr); pthread_attr_setdetachstate(&detached_attr, PTHREAD_CREATE_DETACHED); /* Initialize the scoreboard */ pthread_cond_init(&scoreboard.sb_wake_cond, NULL); pthread_mutex_init(&scoreboard.sb_wake_mtx, NULL); /* Determine the initial thread pool constraints */ worker_min = 2; // we can start with a small amount, worker_idle_threshold will be used as new dynamic low watermark worker_idle_threshold = worker_idle_threshold_per_cpu(); if (pthread_atfork(NULL, NULL, manager_reinit) < 0) { dbg_perror("pthread_atfork()"); return (-1); } return (0); }
static const char *detect_fs(const char *path) { int fd; const char *ret; fd = open(path, O_RDONLY); if (fd < 0) { dbg_perror("open"); return 0; } if (is_vfat(fd)) ret = "vfat"; else if (is_iso9660(fd)) ret = "iso9660"; else ret = is_extN(fd); close(fd); return ret; }
int evfilt_read_knote_enable(struct filter *filt, struct knote *kn) { struct epoll_event ev; memset(&ev, 0, sizeof(ev)); ev.events = kn->data.events; ev.data.ptr = kn; if (kn->kn_flags & KNFL_REGULAR_FILE) { if (epoll_ctl(kn->kn_epollfd, EPOLL_CTL_ADD, kn->kdata.kn_eventfd, &ev) < 0) { dbg_perror("epoll_ctl(2)"); return (-1); } return (0); } else { return epoll_update(EPOLL_CTL_ADD, filt, kn, &ev); } // clang will complain about not returning a value otherwise return (-1); }
static int add_watch(struct filter *filt, struct knote *kn) { struct epoll_event ev; int ifd; char path[PATH_MAX]; uint32_t mask; /* Convert the fd to a pathname */ if (linux_fd_to_path(&path[0], sizeof(path), kn->kev.ident) < 0) return (-1); /* Convert the fflags to the inotify mask */ mask = IN_CLOSE; if (kn->kev.fflags & NOTE_DELETE) mask |= IN_ATTRIB | IN_DELETE_SELF; if (kn->kev.fflags & NOTE_WRITE) mask |= IN_MODIFY | IN_ATTRIB; if (kn->kev.fflags & NOTE_EXTEND) mask |= IN_MODIFY | IN_ATTRIB; if ((kn->kev.fflags & NOTE_ATTRIB) || (kn->kev.fflags & NOTE_LINK)) mask |= IN_ATTRIB; if (kn->kev.fflags & NOTE_RENAME) mask |= IN_MOVE_SELF; if (kn->kev.flags & EV_ONESHOT) mask |= IN_ONESHOT; /* Create an inotify descriptor */ ifd = inotify_init(); if (ifd < 0) { dbg_perror("inotify_init(2)"); return (-1); } /* Add the watch */ dbg_printf("inotify_add_watch(2); inofd=%d, %s, path=%s", ifd, inotify_mask_dump(mask), path); kn->kev.data = inotify_add_watch(ifd, path, mask); if (kn->kev.data < 0) { dbg_perror("inotify_add_watch(2)"); goto errout; } /* Add the inotify fd to the epoll set */ memset(&ev, 0, sizeof(ev)); ev.events = EPOLLIN; ev.data.ptr = kn; if (epoll_ctl(filter_epfd(filt), EPOLL_CTL_ADD, ifd, &ev) < 0) { dbg_perror("epoll_ctl(2)"); goto errout; } kn->kdata.kn_inotifyfd = ifd; return (0); errout: kn->kdata.kn_inotifyfd = -1; (void) close(ifd); return (-1); }