void mill_list_insert(struct mill_list *self, struct mill_list_item *item, struct mill_list_item *it) { item->prev = it ? it->prev : self->last; item->next = it; if(mill_fast(item->prev)) item->prev->next = item; if(mill_fast(item->next)) item->next->prev = item; if(mill_slow(!self->first || self->first == it)) self->first = item; if(mill_slow(!it)) self->last = item; }
void mill_trace_(const char *location, const char *format, ...) { if(mill_fast(mill_tracelevel <= 0)) return; char buf[256]; /* First print the timestamp. */ struct timeval nw; gettimeofday(&nw, NULL); struct tm *nwtm = localtime(&nw.tv_sec); snprintf(buf, sizeof buf, "%02d:%02d:%02d", (int)nwtm->tm_hour, (int)nwtm->tm_min, (int)nwtm->tm_sec); fprintf(stderr, "==> %s.%06d ", buf, (int)nw.tv_usec); /* Coroutine ID. */ snprintf(buf, sizeof(buf), "{%d}", (int)mill_running->debug.id); fprintf(stderr, "%-8s ", buf); va_list va; va_start(va ,format); vfprintf(stderr, format, va); va_end(va); if(location) fprintf(stderr, " at %s\n", location); else fprintf(stderr, "\n"); fflush(stderr); }
int64_t now(void) { #if (defined __GNUC__ || defined __clang__) && \ (defined __i386__ || defined __x86_64__) /* Get the timestamp counter. This is time since startup, expressed in CPU cycles. Unlike gettimeofday() or similar function, it's extremely fast - it takes only few CPU cycles to evaluate. */ uint32_t low; uint32_t high; __asm__ volatile("rdtsc" : "=a" (low), "=d" (high)); int64_t tsc = (int64_t)((uint64_t)high << 32 | low); /* These global variables are used to hold the last seen timestamp counter and last seen time measurement. We'll initilise them the first time this function is called. */ static int64_t last_tsc = -1; static int64_t last_now = -1; if(mill_slow(last_tsc < 0)) { last_tsc = tsc; last_now = mill_now(); } /* If TSC haven't jumped back or progressed more than 1/2 ms, we can use the cached time value. */ if(mill_fast(tsc - last_tsc <= (MILL_CLOCK_PRECISION / 2) && tsc >= last_tsc)) return last_now; /* It's more than 1/2 ms since we've last measured the time. We'll do a new measurement now. */ last_tsc = tsc; last_now = mill_now(); return last_now; #else return mill_now(); #endif }
/* Get memory page size. The query is done once only. The value is cached. */ static size_t mill_page_size(void) { static long pgsz = 0; if(mill_fast(pgsz)) return (size_t)pgsz; pgsz = sysconf(_SC_PAGE_SIZE); mill_assert(pgsz > 0); return (size_t)pgsz; }
struct mill_list_item *mill_list_erase(struct mill_list *self, struct mill_list_item *item) { struct mill_list_item *next; if(mill_fast(item->prev)) item->prev->next = item->next; else self->first = item->next; if(mill_fast(item->next)) item->next->prev = item->prev; else self->last = item->prev; next = item->next; item->prev = NULL; item->next = NULL; return next; }
void udpsend(udpsock s, ipaddr addr, const void *buf, size_t len) { struct sockaddr *saddr = (struct sockaddr*) &addr; ssize_t ss = sendto(s->fd, buf, len, 0, saddr, saddr->sa_family == AF_INET ? sizeof(struct sockaddr_in) : sizeof(struct sockaddr_in6)); if(mill_fast(ss == len)) { errno = 0; return; } mill_assert(ss < 0); if(errno == EAGAIN || errno == EWOULDBLOCK) errno = 0; }
static void *mill_getvalbuf(struct mill_cr *cr, size_t size) { /* Small valbufs don't require dynamic allocation. Also note that main coroutine doesn't have a stack allocated on the heap like other coroutines, so we have to handle valbuf in a special way. */ if(mill_fast(cr != &mill_main)) { if(mill_fast(size <= mill_valbuf_size)) return (void*)(((char*)cr) - mill_valbuf_size); } else { if(mill_fast(size <= sizeof(mill_main_valbuf))) return (void*)mill_main_valbuf; } /* Large valbufs are simply allocated on heap. */ if(mill_fast(cr->valbuf && cr->valbuf_sz <= size)) return cr->valbuf; void *ptr = realloc(cr->valbuf, size); if(!ptr) return NULL; cr->valbuf = ptr; cr->valbuf_sz = size; return cr->valbuf; }
static int pipe_write(struct mill_pipe_s *mp, void *ptr) { while (1) { int n = (int) write(mp->fd[1], ptr, mp->sz); if (mill_fast(n == mp->sz)) break; mill_assert(n < 0); if (errno == EINTR) continue; /* EAGAIN -- pipe capacity execeeded ? */ if (errno != EAGAIN) return -1; mill_fdevent(mp->fd[1], FDW_OUT, -1); } return mp->sz; }
static size_t mill_get_stack_size(void) { #if defined HAVE_POSIX_MEMALIGN && HAVE_MPROTECT /* If sanitisation was already done, return the precomputed size. */ if(mill_fast(mill_sanitised_stack_size)) return mill_sanitised_stack_size; mill_assert(mill_stack_size > mill_page_size()); /* Amount of memory allocated must be multiply of the page size otherwise the behaviour of posix_memalign() is undefined. */ size_t sz = (mill_stack_size + mill_page_size() - 1) & ~(mill_page_size() - 1); /* Allocate one additional guard page. */ mill_sanitised_stack_size = sz + mill_page_size(); return mill_sanitised_stack_size; #else return mill_stack_size; #endif }
static int pipe_read(struct mill_pipe_s *mp, void *ptr) { unsigned size = mp->sz; int n, total = 0; while (1) { if (trylock(mp)) { again: n = (int) read(mp->fd[0], (char *) ptr + total, size - total); if (mill_slow(n == 0)) { /* done */ mill_assert(total == 0); unlock(mp); return 0; } if (n > 0) { total += n; if (mill_fast(total == size)) { unlock(mp); return total; } goto again; } /* n == -1 */ if (errno == EINTR) goto again; if (errno == EAGAIN) { mill_fdevent(mp->fd[0], FDW_IN, -1); goto again; } unlock(mp); break; } mill_fdevent(mp->fd[0], FDW_IN, -1); /* Multiple threads may receive notification. Race for the lock. */ } return -1; }