int uv_rwlock_trywrlock(uv_rwlock_t* rwlock) { int r; r = pthread_rwlock_trywrlock(rwlock); if (r && r != EBUSY && r != EAGAIN) JXABORT("11"); if (r) return -1; else return 0; }
int uv_mutex_init(uv_mutex_t* mutex) { #if defined(NDEBUG) || !defined(PTHREAD_MUTEX_ERRORCHECK) if (pthread_mutex_init(mutex, NULL)) return -1; else return 0; #else pthread_mutexattr_t attr; int r; if (pthread_mutexattr_init(&attr)) JXABORT("1"); if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK)) JXABORT("1"); r = pthread_mutex_init(mutex, &attr); if (pthread_mutexattr_destroy(&attr)) JXABORT("1"); return r ? -1 : 0; #endif }
int uv_mutex_trylock(uv_mutex_t* mutex) { int r; r = pthread_mutex_trylock(mutex); if (r && r != EBUSY && r != EAGAIN) JXABORT("4"); if (r) return -1; else return 0; }
void uv_barrier_wait(uv_barrier_t* barrier) { int r = pthread_barrier_wait(barrier); if (r && r != PTHREAD_BARRIER_SERIAL_THREAD) JXABORT("22"); }
void uv_barrier_destroy(uv_barrier_t* barrier) { if (pthread_barrier_destroy(barrier)) JXABORT("21"); }
void uv_cond_wait(uv_cond_t* cond, uv_mutex_t* mutex) { if (pthread_cond_wait(cond, mutex)) JXABORT("18"); }
void uv_cond_broadcast(uv_cond_t* cond) { if (pthread_cond_broadcast(cond)) JXABORT("17"); }
static int read_times(unsigned int numcpus, uv_cpu_info_t* ci) { unsigned long clock_ticks; struct uv_cpu_times_s ts; unsigned long user; unsigned long nice; unsigned long sys; unsigned long idle; unsigned long dummy; unsigned long irq; unsigned int num; unsigned int len; char buf[1024]; FILE* fp; clock_ticks = sysconf(_SC_CLK_TCK); assert(clock_ticks != (unsigned long)-1); assert(clock_ticks != 0); fp = fopen("/proc/stat", "r"); if (fp == NULL) return -1; if (!fgets(buf, sizeof(buf), fp)) JXABORT("A4"); num = 0; while (fgets(buf, sizeof(buf), fp)) { if (num >= numcpus) break; if (strncmp(buf, "cpu", 3)) break; /* skip "cpu<num> " marker */ { unsigned int n = num; for (len = sizeof("cpu0"); n /= 10; len++) ; assert(sscanf(buf, "cpu%u ", &n) == 1 && n >= num); while (n > num) { // OS has a bug and we couldn't read the previous CPU value ts.user = 0; ts.nice = 0; ts.sys = 0; ts.idle = 0; ts.irq = 0; ci[num].cpu_times = ts; num++; } } /* Line contains user, nice, system, idle, iowait, irq, softirq, steal, * guest, guest_nice but we're only interested in the first four + irq. * * Don't use %*s to skip fields or %ll to read straight into the uint64_t * fields, they're not allowed in C89 mode. */ if (6 != sscanf(buf + len, "%lu %lu %lu %lu %lu %lu", &user, &nice, &sys, &idle, &dummy, &irq)) JXABORT("A5"); ts.user = clock_ticks * user; ts.nice = clock_ticks * nice; ts.sys = clock_ticks * sys; ts.idle = clock_ticks * idle; ts.irq = clock_ticks * irq; ci[num++].cpu_times = ts; } fclose(fp); return 0; }
void uv_mutex_unlock(uv_mutex_t* mutex) { if (pthread_mutex_unlock(mutex)) JXABORT("5"); }
void uv_once(uv_once_t* guard, void (*callback)(void)) { if (pthread_once(guard, callback)) JXABORT("13"); }
void uv_rwlock_wrunlock(uv_rwlock_t* rwlock) { if (pthread_rwlock_unlock(rwlock)) JXABORT("12"); }
void uv_rwlock_wrlock(uv_rwlock_t* rwlock) { if (pthread_rwlock_wrlock(rwlock)) JXABORT("10"); }
void uv_rwlock_rdunlock(uv_rwlock_t* rwlock) { if (pthread_rwlock_unlock(rwlock)) JXABORT("9"); }
void uv_rwlock_rdlock(uv_rwlock_t* rwlock) { if (pthread_rwlock_rdlock(rwlock)) JXABORT("7"); }
void uv_mutex_destroy(uv_mutex_t* mutex) { if (pthread_mutex_destroy(mutex)) JXABORT("2"); }
void uv_cond_destroy(uv_cond_t* cond) { if (pthread_cond_destroy(cond)) JXABORT("15"); }
void uv_cond_signal(uv_cond_t* cond) { if (pthread_cond_signal(cond)) JXABORT("16"); }
void uv_rwlock_destroy(uv_rwlock_t* rwlock) { if (pthread_rwlock_destroy(rwlock)) JXABORT("6"); }
void uv__io_poll_jx(uv_loop_t* loop, int timeout, const int tid) { static int no_epoll_pwait; static int no_epoll_wait; struct uv__epoll_event events[1024]; struct uv__epoll_event* pe; struct uv__epoll_event e; QUEUE* q; uv__io_t* w; sigset_t sigset; uint64_t sigmask; uint64_t base; uint64_t diff; int nevents; int count; int nfds; int fd; int op; int i; if (loop->nfds == 0) { assert(QUEUE_EMPTY(&loop->watcher_queue)); return; } while (!QUEUE_EMPTY(&loop->watcher_queue)) { q = QUEUE_HEAD(&loop->watcher_queue); QUEUE_REMOVE(q); QUEUE_INIT(q); w = QUEUE_DATA(q, uv__io_t, watcher_queue); assert(w->pevents != 0); assert(w->fd >= 0); assert(w->fd < (int)loop->nwatchers); e.events = w->pevents; e.data = w->fd; if (w->events == 0) op = UV__EPOLL_CTL_ADD; else op = UV__EPOLL_CTL_MOD; /* XXX Future optimization: do EPOLL_CTL_MOD lazily if we stop watching * events, skip the syscall and squelch the events after epoll_wait(). */ if (uv__epoll_ctl(loop->backend_fd, op, w->fd, &e)) { if (errno != EEXIST) JXABORT("A1"); assert(op == UV__EPOLL_CTL_ADD); /* We've reactivated a file descriptor that's been watched before. */ if (uv__epoll_ctl(loop->backend_fd, UV__EPOLL_CTL_MOD, w->fd, &e)) JXABORT("A2"); } w->events = w->pevents; } sigmask = 0; if (loop->flags & UV_LOOP_BLOCK_SIGPROF) { sigemptyset(&sigset); sigaddset(&sigset, SIGPROF); sigmask |= 1 << (SIGPROF - 1); } assert(timeout >= -1); base = loop->time; count = 48; /* Benchmarks suggest this gives the best throughput. */ for (;;) { if (sigmask != 0 && no_epoll_pwait != 0) if (pthread_sigmask(SIG_BLOCK, &sigset, NULL)) abort(); if (sigmask != 0 && no_epoll_pwait == 0) { nfds = uv__epoll_pwait(loop->backend_fd, events, ARRAY_SIZE(events), timeout, sigmask); if (nfds == -1 && errno == ENOSYS) no_epoll_pwait = 1; } else { nfds = uv__epoll_wait(loop->backend_fd, events, ARRAY_SIZE(events), timeout); if (nfds == -1 && errno == ENOSYS) no_epoll_wait = 1; } if (sigmask != 0 && no_epoll_pwait != 0) if (pthread_sigmask(SIG_UNBLOCK, &sigset, NULL)) abort(); /* Update loop->time unconditionally. It's tempting to skip the update when * timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the * operating system didn't reschedule our process while in the syscall. */ SAVE_ERRNO(uv__update_time(loop)); if (nfds == 0) { assert(timeout != -1); return; } if (nfds == -1) { if (errno == ENOSYS) { /* epoll_wait() or epoll_pwait() failed, try the other system call. */ assert(no_epoll_wait == 0 || no_epoll_pwait == 0); continue; } if (errno != EINTR && loop->stop_flag != 1) JXABORT("A3"); else if (loop->stop_flag == 1) { return; } if (timeout == -1) continue; if (timeout == 0) return; /* Interrupted by a signal. Update timeout and poll again. */ goto update_timeout; } nevents = 0; assert(loop->watchers != NULL); loop->watchers[loop->nwatchers] = (void*)events; loop->watchers[loop->nwatchers + 1] = (void*)(uintptr_t)nfds; for (i = 0; i < nfds; i++) { pe = events + i; fd = pe->data; /* Skip invalidated events, see uv__platform_invalidate_fd */ if (fd == -1) continue; assert(fd >= 0); assert((unsigned)fd < loop->nwatchers); w = loop->watchers[fd]; if (w == NULL) { /* File descriptor that we've stopped watching, disarm it. * * Ignore all errors because we may be racing with another thread * when the file descriptor is closed. */ uv__epoll_ctl(loop->backend_fd, UV__EPOLL_CTL_DEL, fd, pe); continue; } /* Give users only events they're interested in. Prevents spurious * callbacks when previous callback invocation in this loop has stopped * the current watcher. Also, filters out events that users has not * requested us to watch. */ pe->events &= w->pevents | UV__POLLERR | UV__POLLHUP; /* Work around an epoll quirk where it sometimes reports just the * EPOLLERR or EPOLLHUP event. In order to force the event loop to * move forward, we merge in the read/write events that the watcher * is interested in; uv__read() and uv__write() will then deal with * the error or hangup in the usual fashion. * * Note to self: happens when epoll reports EPOLLIN|EPOLLHUP, the user * reads the available data, calls uv_read_stop(), then sometime later * calls uv_read_start() again. By then, libuv has forgotten about the * hangup and the kernel won't report EPOLLIN again because there's * nothing left to read. If anything, libuv is to blame here. The * current hack is just a quick bandaid; to properly fix it, libuv * needs to remember the error/hangup event. We should get that for * free when we switch over to edge-triggered I/O. */ if (pe->events == UV__EPOLLERR || pe->events == UV__EPOLLHUP) pe->events |= w->pevents & (UV__EPOLLIN | UV__EPOLLOUT); if (pe->events != 0) { w->cb(loop, w, pe->events); nevents++; } } loop->watchers[loop->nwatchers] = NULL; loop->watchers[loop->nwatchers + 1] = NULL; if (nevents != 0) { if (nfds == ARRAY_SIZE(events) && --count != 0) { /* Poll for more events but don't block this time. */ timeout = 0; continue; } return; } if (timeout == 0) return; if (timeout == -1) continue; update_timeout: assert(timeout > 0); diff = loop->time - base; if (diff >= (uint64_t)timeout) return; timeout -= diff; } }